Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 835481d9 authored by Rusty Russell's avatar Rusty Russell Committed by Ingo Molnar
Browse files

cpumask: convert struct cpufreq_policy to cpumask_var_t



Impact: use new cpumask API to reduce memory usage

This is part of an effort to reduce structure sizes for machines
configured with large NR_CPUS.  cpumask_t gets replaced by
cpumask_var_t, which is either struct cpumask[1] (small NR_CPUS) or
struct cpumask * (large NR_CPUS).

Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarMike Travis <travis@sgi.com>
Acked-by: default avatarDave Jones <davej@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 5cb0535f
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -411,7 +411,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,

#ifdef CONFIG_HOTPLUG_CPU
	/* cpufreq holds the hotplug lock, so we are safe from here on */
	cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
	cpumask_and(&online_policy_cpus, cpu_online_mask, policy->cpus);
#else
	online_policy_cpus = policy->cpus;
#endif
@@ -626,15 +626,15 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
	 */
	if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
	    policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
		cpumask_copy(&policy->cpus, perf->shared_cpu_map);
		cpumask_copy(policy->cpus, perf->shared_cpu_map);
	}
	cpumask_copy(&policy->related_cpus, perf->shared_cpu_map);
	cpumask_copy(policy->related_cpus, perf->shared_cpu_map);

#ifdef CONFIG_SMP
	dmi_check_system(sw_any_bug_dmi_table);
	if (bios_with_sw_any_bug && cpus_weight(policy->cpus) == 1) {
	if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) {
		policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
		policy->cpus = per_cpu(cpu_core_map, cpu);
		cpumask_copy(policy->cpus, cpu_core_mask(cpu));
	}
#endif

+4 −4
Original line number Diff line number Diff line
@@ -122,7 +122,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
		return 0;

	/* notifiers */
	for_each_cpu_mask_nr(i, policy->cpus) {
	for_each_cpu(i, policy->cpus) {
		freqs.cpu = i;
		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
	}
@@ -130,11 +130,11 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
	/* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
	 * Developer's Manual, Volume 3
	 */
	for_each_cpu_mask_nr(i, policy->cpus)
	for_each_cpu(i, policy->cpus)
		cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);

	/* notifiers */
	for_each_cpu_mask_nr(i, policy->cpus) {
	for_each_cpu(i, policy->cpus) {
		freqs.cpu = i;
		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
	}
@@ -203,7 +203,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
	unsigned int i;

#ifdef CONFIG_SMP
	policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
	cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
#endif

	/* Errata workaround */
+3 −3
Original line number Diff line number Diff line
@@ -1199,10 +1199,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
	set_cpus_allowed_ptr(current, &oldmask);

	if (cpu_family == CPU_HW_PSTATE)
		pol->cpus = cpumask_of_cpu(pol->cpu);
		cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
	else
		pol->cpus = per_cpu(cpu_core_map, pol->cpu);
	data->available_cores = &(pol->cpus);
		cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu));
	data->available_cores = pol->cpus;

	/* Take a crude guess here.
	 * That guess was in microseconds, so multiply with 1000 */
+1 −1
Original line number Diff line number Diff line
@@ -53,7 +53,7 @@ struct powernow_k8_data {
	/* we need to keep track of associated cores, but let cpufreq
	 * handle hotplug events - so just point at cpufreq pol->cpus
	 * structure */
	cpumask_t *available_cores;
	struct cpumask *available_cores;
};


+7 −7
Original line number Diff line number Diff line
@@ -492,8 +492,8 @@ static int centrino_target (struct cpufreq_policy *policy,
	}

	first_cpu = 1;
	for_each_cpu_mask_nr(j, policy->cpus) {
		const cpumask_t *mask;
	for_each_cpu(j, policy->cpus) {
		const struct cpumask *mask;

		/* cpufreq holds the hotplug lock, so we are safe here */
		if (!cpu_online(j))
@@ -504,9 +504,9 @@ static int centrino_target (struct cpufreq_policy *policy,
		 * Make sure we are running on CPU that wants to change freq
		 */
		if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
			mask = &policy->cpus;
			mask = policy->cpus;
		else
			mask = &cpumask_of_cpu(j);
			mask = cpumask_of(j);

		set_cpus_allowed_ptr(current, mask);
		preempt_disable();
@@ -538,7 +538,7 @@ static int centrino_target (struct cpufreq_policy *policy,
			dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
				target_freq, freqs.old, freqs.new, msr);

			for_each_cpu_mask_nr(k, policy->cpus) {
			for_each_cpu(k, policy->cpus) {
				if (!cpu_online(k))
					continue;
				freqs.cpu = k;
@@ -563,7 +563,7 @@ static int centrino_target (struct cpufreq_policy *policy,
		preempt_enable();
	}

	for_each_cpu_mask_nr(k, policy->cpus) {
	for_each_cpu(k, policy->cpus) {
		if (!cpu_online(k))
			continue;
		freqs.cpu = k;
@@ -586,7 +586,7 @@ static int centrino_target (struct cpufreq_policy *policy,
		tmp = freqs.new;
		freqs.new = freqs.old;
		freqs.old = tmp;
		for_each_cpu_mask_nr(j, policy->cpus) {
		for_each_cpu(j, policy->cpus) {
			if (!cpu_online(j))
				continue;
			cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
Loading