Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 104699c0 authored by KOSAKI Motohiro's avatar KOSAKI Motohiro Committed by Benjamin Herrenschmidt
Browse files

powerpc: Convert old cpumask API into new one



Adapt new API.

Almost change is trivial. Most important change is the below line
because we plan to change task->cpus_allowed implementation.

-       ctx->cpus_allowed = current->cpus_allowed;

Signed-off-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 48404f2e
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -37,16 +37,16 @@ extern cpumask_t threads_core_mask;
 * This can typically be used for things like IPI for tlb invalidations
 * since those need to be done only once per core/TLB
 */
static inline cpumask_t cpu_thread_mask_to_cores(cpumask_t threads)
static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
{
	cpumask_t	tmp, res;
	int		i;

	res = CPU_MASK_NONE;
	cpumask_clear(&res);
	for (i = 0; i < NR_CPUS; i += threads_per_core) {
		cpus_shift_left(tmp, threads_core_mask, i);
		if (cpus_intersects(threads, tmp))
			cpu_set(i, res);
		cpumask_shift_left(&tmp, &threads_core_mask, i);
		if (cpumask_intersects(threads, &tmp))
			cpumask_set_cpu(i, &res);
	}
	return res;
}
@@ -58,7 +58,7 @@ static inline int cpu_nr_cores(void)

static inline cpumask_t cpu_online_cores_map(void)
{
	return cpu_thread_mask_to_cores(cpu_online_map);
	return cpu_thread_mask_to_cores(cpu_online_mask);
}

#ifdef CONFIG_SMP
+1 −1
Original line number Diff line number Diff line
@@ -76,7 +76,7 @@ extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
extern cpumask_t cpus_in_sr;
static inline int kexec_sr_activated(int cpu)
{
	return cpu_isset(cpu,cpus_in_sr);
	return cpumask_test_cpu(cpu, &cpus_in_sr);
}

struct kimage;
+16 −16
Original line number Diff line number Diff line
@@ -64,9 +64,9 @@ void crash_ipi_callback(struct pt_regs *regs)
		return;

	hard_irq_disable();
	if (!cpu_isset(cpu, cpus_in_crash))
	if (!cpumask_test_cpu(cpu, &cpus_in_crash))
		crash_save_cpu(regs, cpu);
	cpu_set(cpu, cpus_in_crash);
	cpumask_set_cpu(cpu, &cpus_in_crash);

	/*
	 * Entered via soft-reset - could be the kdump
@@ -77,8 +77,8 @@ void crash_ipi_callback(struct pt_regs *regs)
	 * Tell the kexec CPU that entered via soft-reset and ready
	 * to go down.
	 */
	if (cpu_isset(cpu, cpus_in_sr)) {
		cpu_clear(cpu, cpus_in_sr);
	if (cpumask_test_cpu(cpu, &cpus_in_sr)) {
		cpumask_clear_cpu(cpu, &cpus_in_sr);
		atomic_inc(&enter_on_soft_reset);
	}

@@ -87,7 +87,7 @@ void crash_ipi_callback(struct pt_regs *regs)
	 * This barrier is needed to make sure that all CPUs are stopped.
	 * If not, soft-reset will be invoked to bring other CPUs.
	 */
	while (!cpu_isset(crashing_cpu, cpus_in_crash))
	while (!cpumask_test_cpu(crashing_cpu, &cpus_in_crash))
		cpu_relax();

	if (ppc_md.kexec_cpu_down)
@@ -109,7 +109,7 @@ static void crash_soft_reset_check(int cpu)
{
	unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */

	cpu_clear(cpu, cpus_in_sr);
	cpumask_clear_cpu(cpu, &cpus_in_sr);
	while (atomic_read(&enter_on_soft_reset) != ncpus)
		cpu_relax();
}
@@ -132,7 +132,7 @@ static void crash_kexec_prepare_cpus(int cpu)
	 */
	printk(KERN_EMERG "Sending IPI to other cpus...\n");
	msecs = 10000;
	while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
	while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
		cpu_relax();
		mdelay(1);
	}
@@ -144,20 +144,20 @@ static void crash_kexec_prepare_cpus(int cpu)
	 * user to do soft reset such that we get all.
	 * Soft-reset will be used until better mechanism is implemented.
	 */
	if (cpus_weight(cpus_in_crash) < ncpus) {
	if (cpumask_weight(&cpus_in_crash) < ncpus) {
		printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n",
			ncpus - cpus_weight(cpus_in_crash));
			ncpus - cpumask_weight(&cpus_in_crash));
		printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n");
		cpus_in_sr = CPU_MASK_NONE;
		cpumask_clear(&cpus_in_sr);
		atomic_set(&enter_on_soft_reset, 0);
		while (cpus_weight(cpus_in_crash) < ncpus)
		while (cpumask_weight(&cpus_in_crash) < ncpus)
			cpu_relax();
	}
	/*
	 * Make sure all CPUs are entered via soft-reset if the kdump is
	 * invoked using soft-reset.
	 */
	if (cpu_isset(cpu, cpus_in_sr))
	if (cpumask_test_cpu(cpu, &cpus_in_sr))
		crash_soft_reset_check(cpu);
	/* Leave the IPI callback set */
}
@@ -210,7 +210,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
			 * exited using 'x'(exit and recover) or
			 * kexec_should_crash() failed for all running tasks.
			 */
			cpu_clear(cpu, cpus_in_sr);
			cpumask_clear_cpu(cpu, &cpus_in_sr);
			local_irq_restore(flags);
			return;
		}
@@ -224,7 +224,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
		 * then start kexec boot.
		 */
		crash_soft_reset_check(cpu);
		cpu_set(crashing_cpu, cpus_in_crash);
		cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
		if (ppc_md.kexec_cpu_down)
			ppc_md.kexec_cpu_down(1, 0);
		machine_kexec(kexec_crash_image);
@@ -253,7 +253,7 @@ static void crash_kexec_prepare_cpus(int cpu)

void crash_kexec_secondary(struct pt_regs *regs)
{
	cpus_in_sr = CPU_MASK_NONE;
	cpumask_clear(&cpus_in_sr);
}
#endif	/* CONFIG_SMP */

@@ -345,7 +345,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
	crashing_cpu = smp_processor_id();
	crash_save_cpu(regs, crashing_cpu);
	crash_kexec_prepare_cpus(crashing_cpu);
	cpu_set(crashing_cpu, cpus_in_crash);
	cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
	crash_kexec_wait_realmode(crashing_cpu);

	machine_kexec_mask_interrupts();
+2 −2
Original line number Diff line number Diff line
@@ -381,7 +381,7 @@ static void __init cpu_init_thread_core_maps(int tpc)
	int i;

	threads_per_core = tpc;
	threads_core_mask = CPU_MASK_NONE;
	cpumask_clear(&threads_core_mask);

	/* This implementation only supports power of 2 number of threads
	 * for simplicity and performance
@@ -390,7 +390,7 @@ static void __init cpu_init_thread_core_maps(int tpc)
	BUG_ON(tpc != (1 << threads_shift));

	for (i = 0; i < tpc; i++)
		cpu_set(i, threads_core_mask);
		cpumask_set_cpu(i, &threads_core_mask);

	printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
	       tpc, tpc > 1 ? "s" : "");
+2 −2
Original line number Diff line number Diff line
@@ -513,7 +513,7 @@ int cpu_first_thread_of_core(int core)
}
EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);

/* Must be called when no change can occur to cpu_present_map,
/* Must be called when no change can occur to cpu_present_mask,
 * i.e. during cpu online or offline.
 */
static struct device_node *cpu_to_l2cache(int cpu)
@@ -614,7 +614,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
	 * se we pin us down to CPU 0 for a short while
	 */
	alloc_cpumask_var(&old_mask, GFP_NOWAIT);
	cpumask_copy(old_mask, &current->cpus_allowed);
	cpumask_copy(old_mask, tsk_cpus_allowed(current));
	set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
	
	if (smp_ops && smp_ops->setup_cpu)
Loading