Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a8a217c2 authored by Will Deacon's avatar Will Deacon Committed by Ingo Molnar
Browse files

locking/core: Remove {read,spin,write}_can_lock()



Outside of the locking code itself, {read,spin,write}_can_lock() have no
users in tree. Apparmor (the last remaining user of write_can_lock()) got
moved over to lockdep by the previous patch.

This patch removes the use of {read,spin,write}_can_lock() from the
BUILD_LOCK_OPS macro, deferring to the trylock operation for testing the
lock status, and subsequently removes the unused macros altogether. They
aren't guaranteed to work in a concurrent environment and can give
incorrect results in the case of qrwlock.

Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/1507055129-12300-2-git-send-email-will.deacon@arm.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 26c4eb19
Loading
Loading
Loading
Loading
+0 −10
Original line number Diff line number Diff line
@@ -54,16 +54,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)

/***********************************************************/

static inline int arch_read_can_lock(arch_rwlock_t *lock)
{
	return (lock->lock & 1) == 0;
}

static inline int arch_write_can_lock(arch_rwlock_t *lock)
{
	return lock->lock == 0;
}

static inline void arch_read_lock(arch_rwlock_t *lock)
{
	long regx;
+0 −3
Original line number Diff line number Diff line
@@ -410,9 +410,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)

#endif

#define arch_read_can_lock(x)	((x)->counter > 0)
#define arch_write_can_lock(x)	((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)

#define arch_read_lock_flags(lock, flags)	arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags)	arch_write_lock(lock)

+0 −6
Original line number Diff line number Diff line
@@ -193,9 +193,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
	dsb_sev();
}

/* write_can_lock - would write_trylock() succeed? */
#define arch_write_can_lock(x)		(ACCESS_ONCE((x)->lock) == 0)

/*
 * Read locks are a bit more hairy:
 *  - Exclusively load the lock value.
@@ -273,9 +270,6 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
	}
}

/* read_can_lock - would read_trylock() succeed? */
#define arch_read_can_lock(x)		(ACCESS_ONCE((x)->lock) < 0x80000000)

#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)

+0 −10
Original line number Diff line number Diff line
@@ -48,16 +48,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
	__raw_spin_unlock_asm(&lock->lock);
}

static inline int arch_read_can_lock(arch_rwlock_t *rw)
{
	return __raw_uncached_fetch_asm(&rw->lock) > 0;
}

static inline int arch_write_can_lock(arch_rwlock_t *rw)
{
	return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
}

static inline void arch_read_lock(arch_rwlock_t *rw)
{
	__raw_read_lock_asm(&rw->lock);
+0 −10
Original line number Diff line number Diff line
@@ -86,16 +86,6 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
	return temp;
}

static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
{
	return rwlock->lock == 0;
}

static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
{
	return rwlock->lock == 0;
}

/*  Stuffs a -1 in the lock value?  */
static inline void arch_write_lock(arch_rwlock_t *lock)
{
Loading