Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 54b11e6d authored by Rusty Russell's avatar Rusty Russell
Browse files

cpumask: smp_call_function_many()



Impact: Implementation change to remove cpumask_t from stack.

Actually change smp_call_function_mask() to smp_call_function_many().
We avoid cpumasks on the stack in this version.

(S390 has its own version, but that's going away apparently).

We have to do some dancing to figure out if 0 or 1 other cpus are in
the mask supplied and the online mask without allocating a tmp
cpumask.  It's still fairly cheap.

We allocate the cpumask at the end of the call_function_data
structure: if allocation fails we fallback to smp_call_function_single
rather than using the baroque quiescing code (which needs a cpumask on
stack).

(Thanks to Hiroshi Shimamoto for spotting several bugs in previous versions!)

Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarMike Travis <travis@sgi.com>
Cc: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>
Cc: npiggin@suse.de
Cc: axboe@kernel.dk
parent 3fa41520
Loading
Loading
Loading
Loading
+8 −7
Original line number Diff line number Diff line
@@ -67,15 +67,16 @@ extern void smp_cpus_done(unsigned int max_cpus);
 * Call a function on all other processors
 */
int smp_call_function(void(*func)(void *info), void *info, int wait);
/* Deprecated: use smp_call_function_many() which uses a cpumask ptr. */
int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
				int wait);
void smp_call_function_many(const struct cpumask *mask,
			    void (*func)(void *info), void *info, bool wait);

static inline void smp_call_function_many(const struct cpumask *mask,
					  void (*func)(void *info), void *info,
/* Deprecated: Use smp_call_function_many which takes a pointer to the mask. */
static inline int
smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
		       int wait)
{
	smp_call_function_mask(*mask, func, info, wait);
	smp_call_function_many(&mask, func, info, wait);
	return 0;
}

int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
+49 −90
Original line number Diff line number Diff line
@@ -24,8 +24,8 @@ struct call_function_data {
	struct call_single_data csd;
	spinlock_t lock;
	unsigned int refs;
	cpumask_t cpumask;
	struct rcu_head rcu_head;
	unsigned long cpumask_bits[];
};

struct call_single_queue {
@@ -110,13 +110,13 @@ void generic_smp_call_function_interrupt(void)
	list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
		int refs;

		if (!cpu_isset(cpu, data->cpumask))
		if (!cpumask_test_cpu(cpu, to_cpumask(data->cpumask_bits)))
			continue;

		data->csd.func(data->csd.info);

		spin_lock(&data->lock);
		cpu_clear(cpu, data->cpumask);
		cpumask_clear_cpu(cpu, to_cpumask(data->cpumask_bits));
		WARN_ON(data->refs == 0);
		data->refs--;
		refs = data->refs;
@@ -266,51 +266,13 @@ void __smp_call_function_single(int cpu, struct call_single_data *data)
	generic_exec_single(cpu, data);
}

/* Dummy function */
static void quiesce_dummy(void *unused)
{
}

/*
 * Ensure stack based data used in call function mask is safe to free.
 *
 * This is needed by smp_call_function_mask when using on-stack data, because
 * a single call function queue is shared by all CPUs, and any CPU may pick up
 * the data item on the queue at any time before it is deleted. So we need to
 * ensure that all CPUs have transitioned through a quiescent state after
 * this call.
 *
 * This is a very slow function, implemented by sending synchronous IPIs to
 * all possible CPUs. For this reason, we have to alloc data rather than use
 * stack based data even in the case of synchronous calls. The stack based
 * data is then just used for deadlock/oom fallback which will be very rare.
 *
 * If a faster scheme can be made, we could go back to preferring stack based
 * data -- the data allocation/free is non-zero cost.
 */
static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
{
	struct call_single_data data;
	int cpu;

	data.func = quiesce_dummy;
	data.info = NULL;

	for_each_cpu_mask(cpu, mask) {
		data.flags = CSD_FLAG_WAIT;
		generic_exec_single(cpu, &data);
	}
}

/**
 * smp_call_function_mask(): Run a function on a set of other CPUs.
 * @mask: The set of cpus to run on.
 * smp_call_function_many(): Run a function on a set of other CPUs.
 * @mask: The set of cpus to run on (only runs on online subset).
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
 * @wait: If true, wait (atomically) until function has completed on other CPUs.
 *
 * Returns 0 on success, else a negative status code.
 *
 * If @wait is true, then returns once @func has returned. Note that @wait
 * will be implicitly turned on in case of allocation failures, since
 * we fall back to on-stack allocation.
@@ -319,53 +281,57 @@ static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
 * hardware interrupt handler or from a bottom half handler. Preemption
 * must be disabled when calling this function.
 */
int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
			   int wait)
void smp_call_function_many(const struct cpumask *mask,
			    void (*func)(void *), void *info,
			    bool wait)
{
	struct call_function_data d;
	struct call_function_data *data = NULL;
	cpumask_t allbutself;
	struct call_function_data *data;
	unsigned long flags;
	int cpu, num_cpus;
	int slowpath = 0;
	int cpu, next_cpu;

	/* Can deadlock when called with interrupts disabled */
	WARN_ON(irqs_disabled());

	cpu = smp_processor_id();
	allbutself = cpu_online_map;
	cpu_clear(cpu, allbutself);
	cpus_and(mask, mask, allbutself);
	num_cpus = cpus_weight(mask);
	/* So, what's a CPU they want?  Ignoring this one. */
	cpu = cpumask_first_and(mask, cpu_online_mask);
	if (cpu == smp_processor_id())
		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
	/* No online cpus?  We're done. */
	if (cpu >= nr_cpu_ids)
		return;

	/* Do we have another CPU which isn't us? */
	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
	if (next_cpu == smp_processor_id())
		next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);

	/* Fastpath: do that cpu by itself. */
	if (next_cpu >= nr_cpu_ids) {
		smp_call_function_single(cpu, func, info, wait);
		return;
	}

	/*
	 * If zero CPUs, return. If just a single CPU, turn this request
	 * into a targetted single call instead since it's faster.
	 */
	if (!num_cpus)
		return 0;
	else if (num_cpus == 1) {
		cpu = first_cpu(mask);
		return smp_call_function_single(cpu, func, info, wait);
	data = kmalloc(sizeof(*data) + cpumask_size(), GFP_ATOMIC);
	if (unlikely(!data)) {
		/* Slow path. */
		for_each_online_cpu(cpu) {
			if (cpu == smp_processor_id())
				continue;
			if (cpumask_test_cpu(cpu, mask))
				smp_call_function_single(cpu, func, info, wait);
		}
		return;
	}

	data = kmalloc(sizeof(*data), GFP_ATOMIC);
	if (data) {
	spin_lock_init(&data->lock);
	data->csd.flags = CSD_FLAG_ALLOC;
	if (wait)
		data->csd.flags |= CSD_FLAG_WAIT;
	} else {
		data = &d;
		data->csd.flags = CSD_FLAG_WAIT;
		wait = 1;
		slowpath = 1;
	}

	spin_lock_init(&data->lock);
	data->csd.func = func;
	data->csd.info = info;
	data->refs = num_cpus;
	data->cpumask = mask;
	cpumask_and(to_cpumask(data->cpumask_bits), mask, cpu_online_mask);
	cpumask_clear_cpu(smp_processor_id(), to_cpumask(data->cpumask_bits));
	data->refs = cpumask_weight(to_cpumask(data->cpumask_bits));

	spin_lock_irqsave(&call_function_lock, flags);
	list_add_tail_rcu(&data->csd.list, &call_function_queue);
@@ -377,18 +343,13 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
	smp_mb();

	/* Send a message to all CPUs in the map */
	arch_send_call_function_ipi(mask);
	arch_send_call_function_ipi(*to_cpumask(data->cpumask_bits));

	/* optionally wait for the CPUs to complete */
	if (wait) {
	if (wait)
		csd_flag_wait(&data->csd);
		if (unlikely(slowpath))
			smp_call_function_mask_quiesce_stack(mask);
}

	return 0;
}
EXPORT_SYMBOL(smp_call_function_mask);
EXPORT_SYMBOL(smp_call_function_many);

/**
 * smp_call_function(): Run a function on all other CPUs.
@@ -396,7 +357,7 @@ EXPORT_SYMBOL(smp_call_function_mask);
 * @info: An arbitrary pointer to pass to the function.
 * @wait: If true, wait (atomically) until function has completed on other CPUs.
 *
 * Returns 0 on success, else a negative status code.
 * Returns 0.
 *
 * If @wait is true, then returns once @func has returned; otherwise
 * it returns just before the target cpu calls @func. In case of allocation
@@ -407,12 +368,10 @@ EXPORT_SYMBOL(smp_call_function_mask);
 */
int smp_call_function(void (*func)(void *), void *info, int wait)
{
	int ret;

	preempt_disable();
	ret = smp_call_function_mask(cpu_online_map, func, info, wait);
	smp_call_function_many(cpu_online_mask, func, info, wait);
	preempt_enable();
	return ret;
	return 0;
}
EXPORT_SYMBOL(smp_call_function);