Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fb0527bd authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

locking/mutexes: Introduce cancelable MCS lock for adaptive spinning



Since we want a task waiting for a mutex_lock() to go to sleep and
reschedule on need_resched() we must be able to abort the
mcs_spin_lock() around the adaptive spin.

Therefore implement a cancelable mcs lock.

Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: chegu_vinod@hp.com
Cc: paulmck@linux.vnet.ibm.com
Cc: Waiman.Long@hp.com
Cc: torvalds@linux-foundation.org
Cc: tglx@linutronix.de
Cc: riel@redhat.com
Cc: akpm@linux-foundation.org
Cc: davidlohr@hp.com
Cc: hpa@zytor.com
Cc: andi@firstfloor.org
Cc: aswin@hp.com
Cc: scott.norton@hp.com
Cc: Jason Low <jason.low2@hp.com>
Link: http://lkml.kernel.org/n/tip-62hcl5wxydmjzd182zhvk89m@git.kernel.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 1d8fe7dc
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -46,7 +46,7 @@
 * - detects multi-task circular deadlocks and prints out all affected
 * - detects multi-task circular deadlocks and prints out all affected
 *   locks and tasks (and only those tasks)
 *   locks and tasks (and only those tasks)
 */
 */
struct mcs_spinlock;
struct optimistic_spin_queue;
struct mutex {
struct mutex {
	/* 1: unlocked, 0: locked, negative: locked, possible waiters */
	/* 1: unlocked, 0: locked, negative: locked, possible waiters */
	atomic_t		count;
	atomic_t		count;
@@ -56,7 +56,7 @@ struct mutex {
	struct task_struct	*owner;
	struct task_struct	*owner;
#endif
#endif
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
	struct mcs_spinlock	*mcs_lock;	/* Spinner MCS lock */
	struct optimistic_spin_queue	*osq;	/* Spinner MCS lock */
#endif
#endif
#ifdef CONFIG_DEBUG_MUTEXES
#ifdef CONFIG_DEBUG_MUTEXES
	const char 		*name;
	const char 		*name;
+1 −1
Original line number Original line Diff line number Diff line


obj-y += mutex.o semaphore.o rwsem.o lglock.o
obj-y += mutex.o semaphore.o rwsem.o lglock.o mcs_spinlock.o


ifdef CONFIG_FUNCTION_TRACER
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_lockdep.o = -pg
CFLAGS_REMOVE_lockdep.o = -pg
+178 −0
Original line number Original line Diff line number Diff line

#include <linux/percpu.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include "mcs_spinlock.h"

#ifdef CONFIG_SMP

/*
 * An MCS like lock especially tailored for optimistic spinning for sleeping
 * lock implementations (mutex, rwsem, etc).
 *
 * Using a single mcs node per CPU is safe because sleeping locks should not be
 * called from interrupt context and we have preemption disabled while
 * spinning.
 */
static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node);

/*
 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
 * Can return NULL in case we were the last queued and we updated @lock instead.
 */
static inline struct optimistic_spin_queue *
osq_wait_next(struct optimistic_spin_queue **lock,
	      struct optimistic_spin_queue *node,
	      struct optimistic_spin_queue *prev)
{
	struct optimistic_spin_queue *next = NULL;

	for (;;) {
		if (*lock == node && cmpxchg(lock, node, prev) == node) {
			/*
			 * We were the last queued, we moved @lock back. @prev
			 * will now observe @lock and will complete its
			 * unlock()/unqueue().
			 */
			break;
		}

		/*
		 * We must xchg() the @node->next value, because if we were to
		 * leave it in, a concurrent unlock()/unqueue() from
		 * @node->next might complete Step-A and think its @prev is
		 * still valid.
		 *
		 * If the concurrent unlock()/unqueue() wins the race, we'll
		 * wait for either @lock to point to us, through its Step-B, or
		 * wait for a new @node->next from its Step-C.
		 */
		if (node->next) {
			next = xchg(&node->next, NULL);
			if (next)
				break;
		}

		arch_mutex_cpu_relax();
	}

	return next;
}

bool osq_lock(struct optimistic_spin_queue **lock)
{
	struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
	struct optimistic_spin_queue *prev, *next;

	node->locked = 0;
	node->next = NULL;

	node->prev = prev = xchg(lock, node);
	if (likely(prev == NULL))
		return true;

	ACCESS_ONCE(prev->next) = node;

	/*
	 * Normally @prev is untouchable after the above store; because at that
	 * moment unlock can proceed and wipe the node element from stack.
	 *
	 * However, since our nodes are static per-cpu storage, we're
	 * guaranteed their existence -- this allows us to apply
	 * cmpxchg in an attempt to undo our queueing.
	 */

	while (!smp_load_acquire(&node->locked)) {
		/*
		 * If we need to reschedule bail... so we can block.
		 */
		if (need_resched())
			goto unqueue;

		arch_mutex_cpu_relax();
	}
	return true;

unqueue:
	/*
	 * Step - A  -- stabilize @prev
	 *
	 * Undo our @prev->next assignment; this will make @prev's
	 * unlock()/unqueue() wait for a next pointer since @lock points to us
	 * (or later).
	 */

	for (;;) {
		if (prev->next == node &&
		    cmpxchg(&prev->next, node, NULL) == node)
			break;

		/*
		 * We can only fail the cmpxchg() racing against an unlock(),
		 * in which case we should observe @node->locked becomming
		 * true.
		 */
		if (smp_load_acquire(&node->locked))
			return true;

		arch_mutex_cpu_relax();

		/*
		 * Or we race against a concurrent unqueue()'s step-B, in which
		 * case its step-C will write us a new @node->prev pointer.
		 */
		prev = ACCESS_ONCE(node->prev);
	}

	/*
	 * Step - B -- stabilize @next
	 *
	 * Similar to unlock(), wait for @node->next or move @lock from @node
	 * back to @prev.
	 */

	next = osq_wait_next(lock, node, prev);
	if (!next)
		return false;

	/*
	 * Step - C -- unlink
	 *
	 * @prev is stable because its still waiting for a new @prev->next
	 * pointer, @next is stable because our @node->next pointer is NULL and
	 * it will wait in Step-A.
	 */

	ACCESS_ONCE(next->prev) = prev;
	ACCESS_ONCE(prev->next) = next;

	return false;
}

void osq_unlock(struct optimistic_spin_queue **lock)
{
	struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
	struct optimistic_spin_queue *next;

	/*
	 * Fast path for the uncontended case.
	 */
	if (likely(cmpxchg(lock, node, NULL) == node))
		return;

	/*
	 * Second most likely case.
	 */
	next = xchg(&node->next, NULL);
	if (next) {
		ACCESS_ONCE(next->locked) = 1;
		return;
	}

	next = osq_wait_next(lock, node, NULL);
	if (next)
		ACCESS_ONCE(next->locked) = 1;
}

#endif
+15 −0
Original line number Original line Diff line number Diff line
@@ -111,4 +111,19 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
	arch_mcs_spin_unlock_contended(&next->locked);
	arch_mcs_spin_unlock_contended(&next->locked);
}
}


/*
 * Cancellable version of the MCS lock above.
 *
 * Intended for adaptive spinning of sleeping locks:
 * mutex_lock()/rwsem_down_{read,write}() etc.
 */

struct optimistic_spin_queue {
	struct optimistic_spin_queue *next, *prev;
	int locked; /* 1 if lock acquired */
};

extern bool osq_lock(struct optimistic_spin_queue **lock);
extern void osq_unlock(struct optimistic_spin_queue **lock);

#endif /* __LINUX_MCS_SPINLOCK_H */
#endif /* __LINUX_MCS_SPINLOCK_H */
+6 −4
Original line number Original line Diff line number Diff line
@@ -53,7 +53,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
	INIT_LIST_HEAD(&lock->wait_list);
	INIT_LIST_HEAD(&lock->wait_list);
	mutex_clear_owner(lock);
	mutex_clear_owner(lock);
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
	lock->mcs_lock = NULL;
	lock->osq = NULL;
#endif
#endif


	debug_mutex_init(lock, name, key);
	debug_mutex_init(lock, name, key);
@@ -403,7 +403,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
	if (!mutex_can_spin_on_owner(lock))
	if (!mutex_can_spin_on_owner(lock))
		goto slowpath;
		goto slowpath;


	mcs_spin_lock(&lock->mcs_lock, &node);
	if (!osq_lock(&lock->osq))
		goto slowpath;

	for (;;) {
	for (;;) {
		struct task_struct *owner;
		struct task_struct *owner;


@@ -442,7 +444,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
			}
			}


			mutex_set_owner(lock);
			mutex_set_owner(lock);
			mcs_spin_unlock(&lock->mcs_lock, &node);
			osq_unlock(&lock->osq);
			preempt_enable();
			preempt_enable();
			return 0;
			return 0;
		}
		}
@@ -464,7 +466,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
		 */
		 */
		arch_mutex_cpu_relax();
		arch_mutex_cpu_relax();
	}
	}
	mcs_spin_unlock(&lock->mcs_lock, &node);
	osq_unlock(&lock->osq);
slowpath:
slowpath:
#endif
#endif
	spin_lock_mutex(&lock->wait_lock, flags);
	spin_lock_mutex(&lock->wait_lock, flags);