Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fb1fece5 authored by KOSAKI Motohiro's avatar KOSAKI Motohiro Committed by David S. Miller
Browse files

sparc: convert old cpumask API into new one



Adapt new API. Almost change is trivial, most important change are to
remove following like =operator.

 cpumask_t cpu_mask = *mm_cpumask(mm);
 cpus_allowed = current->cpus_allowed;

Because cpumask_var_t is =operator unsafe. These usage might prevent
kernel core improvement.

No functional change.

Signed-off-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 55dd23ec
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -68,17 +68,17 @@ BTFIXUPDEF_BLACKBOX(load_current)

#define smp_cross_call(func,mask,arg1,arg2,arg3,arg4) BTFIXUP_CALL(smp_cross_call)(func,mask,arg1,arg2,arg3,arg4)

static inline void xc0(smpfunc_t func) { smp_cross_call(func, cpu_online_map, 0, 0, 0, 0); }
static inline void xc0(smpfunc_t func) { smp_cross_call(func, *cpu_online_mask, 0, 0, 0, 0); }
static inline void xc1(smpfunc_t func, unsigned long arg1)
{ smp_cross_call(func, cpu_online_map, arg1, 0, 0, 0); }
{ smp_cross_call(func, *cpu_online_mask, arg1, 0, 0, 0); }
static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
{ smp_cross_call(func, cpu_online_map, arg1, arg2, 0, 0); }
{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, 0, 0); }
static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
			   unsigned long arg3)
{ smp_cross_call(func, cpu_online_map, arg1, arg2, arg3, 0); }
{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, arg3, 0); }
static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
			   unsigned long arg3, unsigned long arg4)
{ smp_cross_call(func, cpu_online_map, arg1, arg2, arg3, arg4); }
{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, arg3, arg4); }

extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+2 −2
Original line number Diff line number Diff line
@@ -202,7 +202,7 @@ static struct cpuinfo_tree *build_cpuinfo_tree(void)
	new_tree->total_nodes = n;
	memcpy(&new_tree->level, tmp_level, sizeof(tmp_level));

	prev_cpu = cpu = first_cpu(cpu_online_map);
	prev_cpu = cpu = cpumask_first(cpu_online_mask);

	/* Initialize all levels in the tree with the first CPU */
	for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; level--) {
@@ -381,7 +381,7 @@ static int simple_map_to_cpu(unsigned int index)
	}

	/* Impossible, since num_online_cpus() <= num_possible_cpus() */
	return first_cpu(cpu_online_map);
	return cpumask_first(cpu_online_mask);
}

static int _map_to_cpu(unsigned int index)
+7 −7
Original line number Diff line number Diff line
@@ -497,7 +497,7 @@ static void dr_cpu_init_response(struct ds_data *resp, u64 req_num,
	tag->num_records = ncpus;

	i = 0;
	for_each_cpu_mask(cpu, *mask) {
	for_each_cpu(cpu, mask) {
		ent[i].cpu = cpu;
		ent[i].result = DR_CPU_RES_OK;
		ent[i].stat = default_stat;
@@ -534,7 +534,7 @@ static int __cpuinit dr_cpu_configure(struct ds_info *dp,
	int resp_len, ncpus, cpu;
	unsigned long flags;

	ncpus = cpus_weight(*mask);
	ncpus = cpumask_weight(mask);
	resp_len = dr_cpu_size_response(ncpus);
	resp = kzalloc(resp_len, GFP_KERNEL);
	if (!resp)
@@ -547,7 +547,7 @@ static int __cpuinit dr_cpu_configure(struct ds_info *dp,
	mdesc_populate_present_mask(mask);
	mdesc_fill_in_cpu_data(mask);

	for_each_cpu_mask(cpu, *mask) {
	for_each_cpu(cpu, mask) {
		int err;

		printk(KERN_INFO "ds-%llu: Starting cpu %d...\n",
@@ -593,7 +593,7 @@ static int dr_cpu_unconfigure(struct ds_info *dp,
	int resp_len, ncpus, cpu;
	unsigned long flags;

	ncpus = cpus_weight(*mask);
	ncpus = cpumask_weight(mask);
	resp_len = dr_cpu_size_response(ncpus);
	resp = kzalloc(resp_len, GFP_KERNEL);
	if (!resp)
@@ -603,7 +603,7 @@ static int dr_cpu_unconfigure(struct ds_info *dp,
			     resp_len, ncpus, mask,
			     DR_CPU_STAT_UNCONFIGURED);

	for_each_cpu_mask(cpu, *mask) {
	for_each_cpu(cpu, mask) {
		int err;

		printk(KERN_INFO "ds-%llu: Shutting down cpu %d...\n",
@@ -649,13 +649,13 @@ static void __cpuinit dr_cpu_data(struct ds_info *dp,

	purge_dups(cpu_list, tag->num_records);

	cpus_clear(mask);
	cpumask_clear(&mask);
	for (i = 0; i < tag->num_records; i++) {
		if (cpu_list[i] == CPU_SENTINEL)
			continue;

		if (cpu_list[i] < nr_cpu_ids)
			cpu_set(cpu_list[i], mask);
			cpumask_set_cpu(cpu_list[i], &mask);
	}

	if (tag->type == DR_CPU_CONFIGURE)
+3 −3
Original line number Diff line number Diff line
@@ -224,13 +224,13 @@ static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
	int cpuid;

	cpumask_copy(&mask, affinity);
	if (cpus_equal(mask, cpu_online_map)) {
	if (cpumask_equal(&mask, cpu_online_mask)) {
		cpuid = map_to_cpu(irq);
	} else {
		cpumask_t tmp;

		cpus_and(tmp, cpu_online_map, mask);
		cpuid = cpus_empty(tmp) ? map_to_cpu(irq) : first_cpu(tmp);
		cpumask_and(&tmp, cpu_online_mask, &mask);
		cpuid = cpumask_empty(&tmp) ? map_to_cpu(irq) : cpumask_first(&tmp);
	}

	return cpuid;
+10 −10
Original line number Diff line number Diff line
@@ -107,11 +107,11 @@ void __cpuinit leon_callin(void)
	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;

	while (!cpu_isset(cpuid, smp_commenced_mask))
	while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
		mb();

	local_irq_enable();
	cpu_set(cpuid, cpu_online_map);
	set_cpu_online(cpuid, true);
}

/*
@@ -272,21 +272,21 @@ void __init leon_smp_done(void)
	local_flush_cache_all();

	/* Free unneeded trap tables */
	if (!cpu_isset(1, cpu_present_map)) {
	if (!cpu_present(1)) {
		ClearPageReserved(virt_to_page(&trapbase_cpu1));
		init_page_count(virt_to_page(&trapbase_cpu1));
		free_page((unsigned long)&trapbase_cpu1);
		totalram_pages++;
		num_physpages++;
	}
	if (!cpu_isset(2, cpu_present_map)) {
	if (!cpu_present(2)) {
		ClearPageReserved(virt_to_page(&trapbase_cpu2));
		init_page_count(virt_to_page(&trapbase_cpu2));
		free_page((unsigned long)&trapbase_cpu2);
		totalram_pages++;
		num_physpages++;
	}
	if (!cpu_isset(3, cpu_present_map)) {
	if (!cpu_present(3)) {
		ClearPageReserved(virt_to_page(&trapbase_cpu3));
		init_page_count(virt_to_page(&trapbase_cpu3));
		free_page((unsigned long)&trapbase_cpu3);
@@ -440,10 +440,10 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
		{
			register int i;

			cpu_clear(smp_processor_id(), mask);
			cpus_and(mask, cpu_online_map, mask);
			cpumask_clear_cpu(smp_processor_id(), &mask);
			cpumask_and(&mask, cpu_online_mask, &mask);
			for (i = 0; i <= high; i++) {
				if (cpu_isset(i, mask)) {
				if (cpumask_test_cpu(i, &mask)) {
					ccall_info.processors_in[i] = 0;
					ccall_info.processors_out[i] = 0;
					set_cpu_int(i, LEON3_IRQ_CROSS_CALL);
@@ -457,7 +457,7 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,

			i = 0;
			do {
				if (!cpu_isset(i, mask))
				if (!cpumask_test_cpu(i, &mask))
					continue;

				while (!ccall_info.processors_in[i])
@@ -466,7 +466,7 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,

			i = 0;
			do {
				if (!cpu_isset(i, mask))
				if (!cpumask_test_cpu(i, &mask))
					continue;

				while (!ccall_info.processors_out[i])
Loading