Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8ee62b18 authored by Jason Low's avatar Jason Low Committed by Ingo Molnar
Browse files

locking/rwsem: Convert sem->count to 'atomic_long_t'



Convert the rwsem count variable to an atomic_long_t since we use it
as an atomic variable. This also allows us to remove the
rwsem_atomic_{add,update}() "abstraction" which would now be an unnecesary
level of indirection. In follow up patches, we also remove the
rwsem_atomic_{add,update}() definitions across the various architectures.

Suggested-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarJason Low <jason.low2@hpe.com>
[ Build warning fixes on various architectures. ]
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Jason Low <jason.low2@hp.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Hurley <peter@hurleysoftware.com>
Cc: Terry Rudd <terry.rudd@hpe.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Waiman Long <Waiman.Long@hpe.com>
Link: http://lkml.kernel.org/r/1465017963-4839-2-git-send-email-jason.low2@hpe.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 055ce0fd
Loading
Loading
Loading
Loading
+13 −13
Original line number Diff line number Diff line
@@ -25,8 +25,8 @@ static inline void __down_read(struct rw_semaphore *sem)
{
	long oldcount;
#ifndef	CONFIG_SMP
	oldcount = sem->count;
	sem->count += RWSEM_ACTIVE_READ_BIAS;
	oldcount = sem->count.counter;
	sem->count.counter += RWSEM_ACTIVE_READ_BIAS;
#else
	long temp;
	__asm__ __volatile__(
@@ -52,13 +52,13 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
{
	long old, new, res;

	res = sem->count;
	res = atomic_long_read(&sem->count);
	do {
		new = res + RWSEM_ACTIVE_READ_BIAS;
		if (new <= 0)
			break;
		old = res;
		res = cmpxchg(&sem->count, old, new);
		res = atomic_long_cmpxchg(&sem->count, old, new);
	} while (res != old);
	return res >= 0 ? 1 : 0;
}
@@ -67,8 +67,8 @@ static inline long ___down_write(struct rw_semaphore *sem)
{
	long oldcount;
#ifndef	CONFIG_SMP
	oldcount = sem->count;
	sem->count += RWSEM_ACTIVE_WRITE_BIAS;
	oldcount = sem->count.counter;
	sem->count.counter += RWSEM_ACTIVE_WRITE_BIAS;
#else
	long temp;
	__asm__ __volatile__(
@@ -106,7 +106,7 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
 */
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
	long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
	long ret = atomic_long_cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
			   RWSEM_ACTIVE_WRITE_BIAS);
	if (ret == RWSEM_UNLOCKED_VALUE)
		return 1;
@@ -117,8 +117,8 @@ static inline void __up_read(struct rw_semaphore *sem)
{
	long oldcount;
#ifndef	CONFIG_SMP
	oldcount = sem->count;
	sem->count -= RWSEM_ACTIVE_READ_BIAS;
	oldcount = sem->count.counter;
	sem->count.counter -= RWSEM_ACTIVE_READ_BIAS;
#else
	long temp;
	__asm__ __volatile__(
@@ -142,8 +142,8 @@ static inline void __up_write(struct rw_semaphore *sem)
{
	long count;
#ifndef	CONFIG_SMP
	sem->count -= RWSEM_ACTIVE_WRITE_BIAS;
	count = sem->count;
	sem->count.counter -= RWSEM_ACTIVE_WRITE_BIAS;
	count = sem->count.counter;
#else
	long temp;
	__asm__ __volatile__(
@@ -171,8 +171,8 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
{
	long oldcount;
#ifndef	CONFIG_SMP
	oldcount = sem->count;
	sem->count -= RWSEM_WAITING_BIAS;
	oldcount = sem->count.counter;
	sem->count.counter -= RWSEM_WAITING_BIAS;
#else
	long temp;
	__asm__ __volatile__(
+12 −12
Original line number Diff line number Diff line
@@ -40,7 +40,7 @@
static inline void
__down_read (struct rw_semaphore *sem)
{
	long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1);
	long result = ia64_fetchadd8_acq((unsigned long *)&sem->count.counter, 1);

	if (result < 0)
		rwsem_down_read_failed(sem);
@@ -55,9 +55,9 @@ ___down_write (struct rw_semaphore *sem)
	long old, new;

	do {
		old = sem->count;
		old = atomic_long_read(&sem->count);
		new = old + RWSEM_ACTIVE_WRITE_BIAS;
	} while (cmpxchg_acq(&sem->count, old, new) != old);
	} while (atomic_long_cmpxchg_acquire(&sem->count, old, new) != old);

	return old;
}
@@ -85,7 +85,7 @@ __down_write_killable (struct rw_semaphore *sem)
static inline void
__up_read (struct rw_semaphore *sem)
{
	long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1);
	long result = ia64_fetchadd8_rel((unsigned long *)&sem->count.counter, -1);

	if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
		rwsem_wake(sem);
@@ -100,9 +100,9 @@ __up_write (struct rw_semaphore *sem)
	long old, new;

	do {
		old = sem->count;
		old = atomic_long_read(&sem->count);
		new = old - RWSEM_ACTIVE_WRITE_BIAS;
	} while (cmpxchg_rel(&sem->count, old, new) != old);
	} while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);

	if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
		rwsem_wake(sem);
@@ -115,8 +115,8 @@ static inline int
__down_read_trylock (struct rw_semaphore *sem)
{
	long tmp;
	while ((tmp = sem->count) >= 0) {
		if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) {
	while ((tmp = atomic_long_read(&sem->count)) >= 0) {
		if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, tmp+1)) {
			return 1;
		}
	}
@@ -129,8 +129,8 @@ __down_read_trylock (struct rw_semaphore *sem)
static inline int
__down_write_trylock (struct rw_semaphore *sem)
{
	long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
			      RWSEM_ACTIVE_WRITE_BIAS);
	long tmp = atomic_long_cmpxchg_acquire(&sem->count,
			RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS);
	return tmp == RWSEM_UNLOCKED_VALUE;
}

@@ -143,9 +143,9 @@ __downgrade_write (struct rw_semaphore *sem)
	long old, new;

	do {
		old = sem->count;
		old = atomic_long_read(&sem->count);
		new = old - RWSEM_WAITING_BIAS;
	} while (cmpxchg_rel(&sem->count, old, new) != old);
	} while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);

	if (old < 0)
		rwsem_downgrade_wake(sem);
+3 −3
Original line number Diff line number Diff line
@@ -41,8 +41,8 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
{
	long tmp;

	while ((tmp = sem->count) >= 0) {
		if (tmp == cmpxchg_acquire(&sem->count, tmp,
	while ((tmp = atomic_long_read(&sem->count)) >= 0) {
		if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp,
				   tmp + RWSEM_ACTIVE_READ_BIAS)) {
			return 1;
		}
@@ -79,7 +79,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
{
	long tmp;

	tmp = cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
	tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
		      RWSEM_ACTIVE_WRITE_BIAS);
	return tmp == RWSEM_UNLOCKED_VALUE;
}
+5 −3
Original line number Diff line number Diff line
@@ -23,10 +23,11 @@ struct rw_semaphore;

#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
#include <linux/rwsem-spinlock.h> /* use a generic implementation */
#define __RWSEM_INIT_COUNT(name)	.count = RWSEM_UNLOCKED_VALUE
#else
/* All arch specific implementations share the same struct */
struct rw_semaphore {
	long count;
	atomic_long_t count;
	struct list_head wait_list;
	raw_spinlock_t wait_lock;
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
@@ -54,9 +55,10 @@ extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
/* In all implementations count != 0 means locked */
static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
	return sem->count != 0;
	return atomic_long_read(&sem->count) != 0;
}

#define __RWSEM_INIT_COUNT(name)	.count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
#endif

/* Common initializer macros and functions */
@@ -74,7 +76,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
#endif

#define __RWSEM_INITIALIZER(name)				\
	{ .count = RWSEM_UNLOCKED_VALUE,			\
	{ __RWSEM_INIT_COUNT(name),				\
	  .wait_list = LIST_HEAD_INIT((name).wait_list),	\
	  .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock)	\
	  __RWSEM_OPT_INIT(name)				\
+17 −15
Original line number Diff line number Diff line
@@ -80,7 +80,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
	lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
	sem->count = RWSEM_UNLOCKED_VALUE;
	atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
	raw_spin_lock_init(&sem->wait_lock);
	INIT_LIST_HEAD(&sem->wait_list);
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
@@ -153,10 +153,11 @@ __rwsem_mark_wake(struct rw_semaphore *sem,
	if (wake_type != RWSEM_WAKE_READ_OWNED) {
		adjustment = RWSEM_ACTIVE_READ_BIAS;
 try_reader_grant:
		oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
		oldcount = atomic_long_add_return(adjustment, &sem->count) - adjustment;

		if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
			/* A writer stole the lock. Undo our reader grant. */
			if (rwsem_atomic_update(-adjustment, sem) &
			if (atomic_long_sub_return(adjustment, &sem->count) &
						RWSEM_ACTIVE_MASK)
				goto out;
			/* Last active locker left. Retry waking readers. */
@@ -186,7 +187,7 @@ __rwsem_mark_wake(struct rw_semaphore *sem,
		adjustment -= RWSEM_WAITING_BIAS;

	if (adjustment)
		rwsem_atomic_add(adjustment, sem);
		atomic_long_add(adjustment, &sem->count);

	next = sem->wait_list.next;
	loop = woken;
@@ -233,7 +234,7 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
	list_add_tail(&waiter.list, &sem->wait_list);

	/* we're now waiting on the lock, but no longer actively locking */
	count = rwsem_atomic_update(adjustment, sem);
	count = atomic_long_add_return(adjustment, &sem->count);

	/* If there are no active locks, wake the front queued process(es).
	 *
@@ -282,7 +283,8 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
			RWSEM_ACTIVE_WRITE_BIAS :
			RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS;

	if (cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count) == RWSEM_WAITING_BIAS) {
	if (atomic_long_cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count)
							== RWSEM_WAITING_BIAS) {
		rwsem_set_owner(sem);
		return true;
	}
@@ -296,13 +298,13 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
 */
static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
{
	long old, count = READ_ONCE(sem->count);
	long old, count = atomic_long_read(&sem->count);

	while (true) {
		if (!(count == 0 || count == RWSEM_WAITING_BIAS))
			return false;

		old = cmpxchg_acquire(&sem->count, count,
		old = atomic_long_cmpxchg_acquire(&sem->count, count,
				      count + RWSEM_ACTIVE_WRITE_BIAS);
		if (old == count) {
			rwsem_set_owner(sem);
@@ -324,7 +326,7 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
	rcu_read_lock();
	owner = READ_ONCE(sem->owner);
	if (!owner) {
		long count = READ_ONCE(sem->count);
		long count = atomic_long_read(&sem->count);
		/*
		 * If sem->owner is not set, yet we have just recently entered the
		 * slowpath with the lock being active, then there is a possibility
@@ -375,7 +377,7 @@ bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner)
	 * held by readers. Check the counter to verify the
	 * state.
	 */
	count = READ_ONCE(sem->count);
	count = atomic_long_read(&sem->count);
	return (count == 0 || count == RWSEM_WAITING_BIAS);
}

@@ -460,7 +462,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
	WAKE_Q(wake_q);

	/* undo write bias from down_write operation, stop active locking */
	count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem);
	count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);

	/* do optimistic spinning and steal lock if possible */
	if (rwsem_optimistic_spin(sem))
@@ -483,7 +485,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)

	/* we're now waiting on the lock, but no longer actively locking */
	if (waiting) {
		count = READ_ONCE(sem->count);
		count = atomic_long_read(&sem->count);

		/*
		 * If there were already threads queued before us and there are
@@ -505,7 +507,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
		}

	} else
		count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
		count = atomic_long_add_return(RWSEM_WAITING_BIAS, &sem->count);

	/* wait until we successfully acquire the lock */
	set_current_state(state);
@@ -521,7 +523,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)

			schedule();
			set_current_state(state);
		} while ((count = sem->count) & RWSEM_ACTIVE_MASK);
		} while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK);

		raw_spin_lock_irq(&sem->wait_lock);
	}
@@ -536,7 +538,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
	raw_spin_lock_irq(&sem->wait_lock);
	list_del(&waiter.list);
	if (list_empty(&sem->wait_list))
		rwsem_atomic_update(-RWSEM_WAITING_BIAS, sem);
		atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
	else
		__rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
	raw_spin_unlock_irq(&sem->wait_lock);