Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 79f55997 authored by Li Zefan's avatar Li Zefan Committed by Rusty Russell
Browse files

cpumask: use zalloc_cpumask_var() where possible



Remove open-coded zalloc_cpumask_var() and zalloc_cpumask_var_node().

Signed-off-by: default avatarLi Zefan <lizf@cn.fujitsu.com>
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
parent a724eada
Loading
Loading
Loading
Loading
+2 −5
Original line number Diff line number Diff line
@@ -227,17 +227,14 @@ static struct irq_cfg *get_one_free_irq_cfg(int node)

	cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
	if (cfg) {
		if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
		if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
			kfree(cfg);
			cfg = NULL;
		} else if (!alloc_cpumask_var_node(&cfg->old_domain,
		} else if (!zalloc_cpumask_var_node(&cfg->old_domain,
							  GFP_ATOMIC, node)) {
			free_cpumask_var(cfg->domain);
			kfree(cfg);
			cfg = NULL;
		} else {
			cpumask_clear(cfg->domain);
			cpumask_clear(cfg->old_domain);
		}
	}

+2 −4
Original line number Diff line number Diff line
@@ -555,10 +555,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
void __init init_c1e_mask(void)
{
	/* If we're using c1e_idle, we need to allocate c1e_mask. */
	if (pm_idle == c1e_idle) {
		alloc_cpumask_var(&c1e_mask, GFP_KERNEL);
		cpumask_clear(c1e_mask);
	}
	if (pm_idle == c1e_idle)
		zalloc_cpumask_var(&c1e_mask, GFP_KERNEL);
}

static int __init idle_setup(char *str)
+3 −6
Original line number Diff line number Diff line
@@ -1059,12 +1059,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
#endif
	current_thread_info()->cpu = 0;  /* needed? */
	for_each_possible_cpu(i) {
		alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
		alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
		alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
		cpumask_clear(per_cpu(cpu_core_map, i));
		cpumask_clear(per_cpu(cpu_sibling_map, i));
		cpumask_clear(cpu_data(i).llc_shared_map);
		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
		zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
	}
	set_cpu_sibling_map(0);

+1 −2
Original line number Diff line number Diff line
@@ -511,7 +511,7 @@ int acpi_processor_preregister_performance(
	struct acpi_processor *match_pr;
	struct acpi_psd_package *match_pdomain;

	if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
		return -ENOMEM;

	mutex_lock(&performance_mutex);
@@ -558,7 +558,6 @@ int acpi_processor_preregister_performance(
	 * Now that we have _PSD data from all CPUs, lets setup P-state 
	 * domain info.
	 */
	cpumask_clear(covered_cpus);
	for_each_possible_cpu(i) {
		pr = per_cpu(processors, i);
		if (!pr)
+1 −2
Original line number Diff line number Diff line
@@ -77,7 +77,7 @@ static int acpi_processor_update_tsd_coord(void)
	struct acpi_tsd_package *pdomain, *match_pdomain;
	struct acpi_processor_throttling *pthrottling, *match_pthrottling;

	if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
		return -ENOMEM;

	/*
@@ -105,7 +105,6 @@ static int acpi_processor_update_tsd_coord(void)
	if (retval)
		goto err_ret;

	cpumask_clear(covered_cpus);
	for_each_possible_cpu(i) {
		pr = per_cpu(processors, i);
		if (!pr)
Loading