Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d84b6728 authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Ingo Molnar
Browse files

locking/mcs: Better differentiate between MCS variants



We have two flavors of the MCS spinlock: standard and cancelable (OSQ).
While each one is independent of the other, we currently mix and match
them. This patch:

  - Moves the OSQ code out of mcs_spinlock.h (which only deals with the traditional
    version) into include/linux/osq_lock.h. No unnecessary code is added to the
    more global header file, anything locks that make use of OSQ must include
    it anyway.

  - Renames mcs_spinlock.c to osq_lock.c. This file only contains osq code.

  - Introduces a CONFIG_LOCK_SPIN_ON_OWNER in order to only build osq_lock
    if there is support for it.

Signed-off-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Jason Low <jason.low2@hp.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mikulas Patocka <mpatocka@redhat.com>
Cc: Waiman Long <Waiman.Long@hp.com>
Link: http://lkml.kernel.org/r/1420573509-24774-5-git-send-email-dave@stgolabs.net


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 4bd19084
Loading
Loading
Loading
Loading
+10 −2
Original line number Original line Diff line number Diff line
@@ -5,8 +5,11 @@
 * An MCS like lock especially tailored for optimistic spinning for sleeping
 * An MCS like lock especially tailored for optimistic spinning for sleeping
 * lock implementations (mutex, rwsem, etc).
 * lock implementations (mutex, rwsem, etc).
 */
 */

struct optimistic_spin_node {
#define OSQ_UNLOCKED_VAL (0)
	struct optimistic_spin_node *next, *prev;
	int locked; /* 1 if lock acquired */
	int cpu; /* encoded CPU # + 1 value */
};


struct optimistic_spin_queue {
struct optimistic_spin_queue {
	/*
	/*
@@ -16,6 +19,8 @@ struct optimistic_spin_queue {
	atomic_t tail;
	atomic_t tail;
};
};


#define OSQ_UNLOCKED_VAL (0)

/* Init macro and function. */
/* Init macro and function. */
#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }


@@ -24,4 +29,7 @@ static inline void osq_lock_init(struct optimistic_spin_queue *lock)
	atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
	atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
}
}


extern bool osq_lock(struct optimistic_spin_queue *lock);
extern void osq_unlock(struct optimistic_spin_queue *lock);

#endif
#endif
+4 −0
Original line number Original line Diff line number Diff line
@@ -231,6 +231,10 @@ config RWSEM_SPIN_ON_OWNER
       def_bool y
       def_bool y
       depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
       depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW


config LOCK_SPIN_ON_OWNER
       def_bool y
       depends on MUTEX_SPIN_ON_OWNER || RWSEM_SPIN_ON_OWNER

config ARCH_USE_QUEUE_RWLOCK
config ARCH_USE_QUEUE_RWLOCK
	bool
	bool


+2 −1
Original line number Original line Diff line number Diff line


obj-y += mutex.o semaphore.o rwsem.o mcs_spinlock.o
obj-y += mutex.o semaphore.o rwsem.o


ifdef CONFIG_FUNCTION_TRACER
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_lockdep.o = -pg
CFLAGS_REMOVE_lockdep.o = -pg
@@ -14,6 +14,7 @@ ifeq ($(CONFIG_PROC_FS),y)
obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
endif
endif
obj-$(CONFIG_SMP) += spinlock.o
obj-$(CONFIG_SMP) += spinlock.o
obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
obj-$(CONFIG_SMP) += lglock.o
obj-$(CONFIG_SMP) += lglock.o
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
+0 −16
Original line number Original line Diff line number Diff line
@@ -108,20 +108,4 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
	arch_mcs_spin_unlock_contended(&next->locked);
	arch_mcs_spin_unlock_contended(&next->locked);
}
}


/*
 * Cancellable version of the MCS lock above.
 *
 * Intended for adaptive spinning of sleeping locks:
 * mutex_lock()/rwsem_down_{read,write}() etc.
 */

struct optimistic_spin_node {
	struct optimistic_spin_node *next, *prev;
	int locked; /* 1 if lock acquired */
	int cpu; /* encoded CPU # value */
};

extern bool osq_lock(struct optimistic_spin_queue *lock);
extern void osq_unlock(struct optimistic_spin_queue *lock);

#endif /* __LINUX_MCS_SPINLOCK_H */
#endif /* __LINUX_MCS_SPINLOCK_H */
+1 −6
Original line number Original line Diff line number Diff line
#include <linux/percpu.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/sched.h>
#include "mcs_spinlock.h"
#include <linux/osq_lock.h>

#ifdef CONFIG_SMP


/*
/*
 * An MCS like lock especially tailored for optimistic spinning for sleeping
 * An MCS like lock especially tailored for optimistic spinning for sleeping
@@ -203,6 +201,3 @@ void osq_unlock(struct optimistic_spin_queue *lock)
	if (next)
	if (next)
		ACCESS_ONCE(next->locked) = 1;
		ACCESS_ONCE(next->locked) = 1;
}
}

#endif