Loading include/asm-generic/qrwlock.h +2 −2 Original line number Diff line number Diff line Loading @@ -49,7 +49,7 @@ /* * External function declarations */ extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts); extern void queued_read_lock_slowpath(struct qrwlock *lock); extern void queued_write_lock_slowpath(struct qrwlock *lock); /** Loading Loading @@ -118,7 +118,7 @@ static inline void queued_read_lock(struct qrwlock *lock) return; /* The slowpath will decrement the reader count, if necessary. */ queued_read_lock_slowpath(lock, cnts); queued_read_lock_slowpath(lock); } /** Loading kernel/locking/qrwlock.c +12 −38 Original line number Diff line number Diff line Loading @@ -22,29 +22,11 @@ #include <linux/hardirq.h> #include <asm/qrwlock.h> /** * rspin_until_writer_unlock - inc reader count & spin until writer is gone * @lock : Pointer to queue rwlock structure * @writer: Current queue rwlock writer status byte * * In interrupt context or at the head of the queue, the reader will just * increment the reader count & wait until the writer releases the lock. */ static __always_inline void rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts) { while ((cnts & _QW_WMASK) == _QW_LOCKED) { cpu_relax_lowlatency(); cnts = atomic_read_acquire(&lock->cnts); } } /** * queued_read_lock_slowpath - acquire read lock of a queue rwlock * @lock: Pointer to queue rwlock structure * @cnts: Current qrwlock lock value */ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts) void queued_read_lock_slowpath(struct qrwlock *lock) { /* * Readers come here when they cannot get the lock without waiting Loading @@ -52,13 +34,12 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts) if (unlikely(in_interrupt())) { /* * Readers in interrupt context will get the lock immediately * if the writer is just waiting (not holding the lock yet). * The rspin_until_writer_unlock() function returns immediately * in this case. Otherwise, they will spin (with ACQUIRE * semantics) until the lock is available without waiting in * the queue. * if the writer is just waiting (not holding the lock yet), * so spin with ACQUIRE semantics until the lock is available * without waiting in the queue. */ rspin_until_writer_unlock(lock, cnts); atomic_cond_read_acquire(&lock->cnts, (VAL & _QW_WMASK) != _QW_LOCKED); return; } atomic_sub(_QR_BIAS, &lock->cnts); Loading @@ -67,14 +48,14 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts) * Put the reader into the wait queue */ arch_spin_lock(&lock->wait_lock); atomic_add(_QR_BIAS, &lock->cnts); /* * The ACQUIRE semantics of the following spinning code ensure * that accesses can't leak upwards out of our subsequent critical * section in the case that the lock is currently held for write. */ cnts = atomic_fetch_add_acquire(_QR_BIAS, &lock->cnts); rspin_until_writer_unlock(lock, cnts); atomic_cond_read_acquire(&lock->cnts, (VAL & _QW_WMASK) != _QW_LOCKED); /* * Signal the next one in queue to become queue head Loading @@ -89,8 +70,6 @@ EXPORT_SYMBOL(queued_read_lock_slowpath); */ void queued_write_lock_slowpath(struct qrwlock *lock) { u32 cnts; /* Put the writer into the wait queue */ arch_spin_lock(&lock->wait_lock); Loading @@ -112,15 +91,10 @@ void queued_write_lock_slowpath(struct qrwlock *lock) } /* When no more readers, set the locked flag */ for (;;) { cnts = atomic_read(&lock->cnts); if ((cnts == _QW_WAITING) && (atomic_cmpxchg_acquire(&lock->cnts, _QW_WAITING, _QW_LOCKED) == _QW_WAITING)) break; cpu_relax_lowlatency(); } do { atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING); } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING, _QW_LOCKED) != _QW_WAITING); unlock: arch_spin_unlock(&lock->wait_lock); } Loading Loading
include/asm-generic/qrwlock.h +2 −2 Original line number Diff line number Diff line Loading @@ -49,7 +49,7 @@ /* * External function declarations */ extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts); extern void queued_read_lock_slowpath(struct qrwlock *lock); extern void queued_write_lock_slowpath(struct qrwlock *lock); /** Loading Loading @@ -118,7 +118,7 @@ static inline void queued_read_lock(struct qrwlock *lock) return; /* The slowpath will decrement the reader count, if necessary. */ queued_read_lock_slowpath(lock, cnts); queued_read_lock_slowpath(lock); } /** Loading
kernel/locking/qrwlock.c +12 −38 Original line number Diff line number Diff line Loading @@ -22,29 +22,11 @@ #include <linux/hardirq.h> #include <asm/qrwlock.h> /** * rspin_until_writer_unlock - inc reader count & spin until writer is gone * @lock : Pointer to queue rwlock structure * @writer: Current queue rwlock writer status byte * * In interrupt context or at the head of the queue, the reader will just * increment the reader count & wait until the writer releases the lock. */ static __always_inline void rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts) { while ((cnts & _QW_WMASK) == _QW_LOCKED) { cpu_relax_lowlatency(); cnts = atomic_read_acquire(&lock->cnts); } } /** * queued_read_lock_slowpath - acquire read lock of a queue rwlock * @lock: Pointer to queue rwlock structure * @cnts: Current qrwlock lock value */ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts) void queued_read_lock_slowpath(struct qrwlock *lock) { /* * Readers come here when they cannot get the lock without waiting Loading @@ -52,13 +34,12 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts) if (unlikely(in_interrupt())) { /* * Readers in interrupt context will get the lock immediately * if the writer is just waiting (not holding the lock yet). * The rspin_until_writer_unlock() function returns immediately * in this case. Otherwise, they will spin (with ACQUIRE * semantics) until the lock is available without waiting in * the queue. * if the writer is just waiting (not holding the lock yet), * so spin with ACQUIRE semantics until the lock is available * without waiting in the queue. */ rspin_until_writer_unlock(lock, cnts); atomic_cond_read_acquire(&lock->cnts, (VAL & _QW_WMASK) != _QW_LOCKED); return; } atomic_sub(_QR_BIAS, &lock->cnts); Loading @@ -67,14 +48,14 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts) * Put the reader into the wait queue */ arch_spin_lock(&lock->wait_lock); atomic_add(_QR_BIAS, &lock->cnts); /* * The ACQUIRE semantics of the following spinning code ensure * that accesses can't leak upwards out of our subsequent critical * section in the case that the lock is currently held for write. */ cnts = atomic_fetch_add_acquire(_QR_BIAS, &lock->cnts); rspin_until_writer_unlock(lock, cnts); atomic_cond_read_acquire(&lock->cnts, (VAL & _QW_WMASK) != _QW_LOCKED); /* * Signal the next one in queue to become queue head Loading @@ -89,8 +70,6 @@ EXPORT_SYMBOL(queued_read_lock_slowpath); */ void queued_write_lock_slowpath(struct qrwlock *lock) { u32 cnts; /* Put the writer into the wait queue */ arch_spin_lock(&lock->wait_lock); Loading @@ -112,15 +91,10 @@ void queued_write_lock_slowpath(struct qrwlock *lock) } /* When no more readers, set the locked flag */ for (;;) { cnts = atomic_read(&lock->cnts); if ((cnts == _QW_WAITING) && (atomic_cmpxchg_acquire(&lock->cnts, _QW_WAITING, _QW_LOCKED) == _QW_WAITING)) break; cpu_relax_lowlatency(); } do { atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING); } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING, _QW_LOCKED) != _QW_WAITING); unlock: arch_spin_unlock(&lock->wait_lock); } Loading