Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9f5a5621 authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

smp: Convert smplocks to raw_spinlocks



Convert locks which cannot be sleeping locks in preempt-rt to
raw_spinlocks.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
parent d209d74d
Loading
Loading
Loading
Loading
+16 −16
Original line number Diff line number Diff line
@@ -16,11 +16,11 @@ static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);

static struct {
	struct list_head	queue;
	spinlock_t		lock;
	raw_spinlock_t		lock;
} call_function __cacheline_aligned_in_smp =
	{
		.queue		= LIST_HEAD_INIT(call_function.queue),
		.lock		= __SPIN_LOCK_UNLOCKED(call_function.lock),
		.lock		= __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
	};

enum {
@@ -35,7 +35,7 @@ struct call_function_data {

struct call_single_queue {
	struct list_head	list;
	spinlock_t		lock;
	raw_spinlock_t		lock;
};

static DEFINE_PER_CPU(struct call_function_data, cfd_data);
@@ -80,7 +80,7 @@ static int __cpuinit init_call_single_data(void)
	for_each_possible_cpu(i) {
		struct call_single_queue *q = &per_cpu(call_single_queue, i);

		spin_lock_init(&q->lock);
		raw_spin_lock_init(&q->lock);
		INIT_LIST_HEAD(&q->list);
	}

@@ -141,10 +141,10 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
	unsigned long flags;
	int ipi;

	spin_lock_irqsave(&dst->lock, flags);
	raw_spin_lock_irqsave(&dst->lock, flags);
	ipi = list_empty(&dst->list);
	list_add_tail(&data->list, &dst->list);
	spin_unlock_irqrestore(&dst->lock, flags);
	raw_spin_unlock_irqrestore(&dst->lock, flags);

	/*
	 * The list addition should be visible before sending the IPI
@@ -201,9 +201,9 @@ void generic_smp_call_function_interrupt(void)
		refs = atomic_dec_return(&data->refs);
		WARN_ON(refs < 0);
		if (!refs) {
			spin_lock(&call_function.lock);
			raw_spin_lock(&call_function.lock);
			list_del_rcu(&data->csd.list);
			spin_unlock(&call_function.lock);
			raw_spin_unlock(&call_function.lock);
		}

		if (refs)
@@ -230,9 +230,9 @@ void generic_smp_call_function_single_interrupt(void)
	 */
	WARN_ON_ONCE(!cpu_online(smp_processor_id()));

	spin_lock(&q->lock);
	raw_spin_lock(&q->lock);
	list_replace_init(&q->list, &list);
	spin_unlock(&q->lock);
	raw_spin_unlock(&q->lock);

	while (!list_empty(&list)) {
		struct call_single_data *data;
@@ -449,14 +449,14 @@ void smp_call_function_many(const struct cpumask *mask,
	cpumask_clear_cpu(this_cpu, data->cpumask);
	atomic_set(&data->refs, cpumask_weight(data->cpumask));

	spin_lock_irqsave(&call_function.lock, flags);
	raw_spin_lock_irqsave(&call_function.lock, flags);
	/*
	 * Place entry at the _HEAD_ of the list, so that any cpu still
	 * observing the entry in generic_smp_call_function_interrupt()
	 * will not miss any other list entries:
	 */
	list_add_rcu(&data->csd.list, &call_function.queue);
	spin_unlock_irqrestore(&call_function.lock, flags);
	raw_spin_unlock_irqrestore(&call_function.lock, flags);

	/*
	 * Make the list addition visible before sending the ipi.
@@ -501,20 +501,20 @@ EXPORT_SYMBOL(smp_call_function);

void ipi_call_lock(void)
{
	spin_lock(&call_function.lock);
	raw_spin_lock(&call_function.lock);
}

void ipi_call_unlock(void)
{
	spin_unlock(&call_function.lock);
	raw_spin_unlock(&call_function.lock);
}

void ipi_call_lock_irq(void)
{
	spin_lock_irq(&call_function.lock);
	raw_spin_lock_irq(&call_function.lock);
}

void ipi_call_unlock_irq(void)
{
	spin_unlock_irq(&call_function.lock);
	raw_spin_unlock_irq(&call_function.lock);
}