Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 49d5d32d authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "locking/atomic: Add atomic_cond_read_acquire()"

parents 2892dfee 75a7ea0c
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -243,4 +243,7 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
#define atomic_long_inc_not_zero(l) \
	ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))

#define atomic_long_cond_read_acquire(v, c) \
	ATOMIC_LONG_PFX(_cond_read_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (c))

#endif  /*  _ASM_GENERIC_ATOMIC_LONG_H  */
+1 −11
Original line number Diff line number Diff line
@@ -146,23 +146,13 @@ static inline void queued_read_unlock(struct qrwlock *lock)
	(void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
}

/**
 * __qrwlock_write_byte - retrieve the write byte address of a queue rwlock
 * @lock : Pointer to queue rwlock structure
 * Return: the write byte address of a queue rwlock
 */
static inline u8 *__qrwlock_write_byte(struct qrwlock *lock)
{
	return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN);
}

/**
 * queued_write_unlock - release write lock of a queue rwlock
 * @lock : Pointer to queue rwlock structure
 */
static inline void queued_write_unlock(struct qrwlock *lock)
{
	smp_store_release(__qrwlock_write_byte(lock), 0);
	smp_store_release(&lock->wmode, 0);
}

/*
+13 −2
Original line number Diff line number Diff line
@@ -9,12 +9,23 @@
 */

typedef struct qrwlock {
	union {
		atomic_t cnts;
		struct {
#ifdef __LITTLE_ENDIAN
			u8 wmode;	/* Writer mode   */
			u8 rcnts[3];	/* Reader counts */
#else
			u8 rcnts[3];	/* Reader counts */
			u8 wmode;	/* Writer mode   */
#endif
		};
	};
	arch_spinlock_t		wait_lock;
} arch_rwlock_t;

#define	__ARCH_RW_LOCK_UNLOCKED {		\
	.cnts = ATOMIC_INIT(0),			\
	{ .cnts = ATOMIC_INIT(0), },		\
	.wait_lock = __ARCH_SPIN_LOCK_UNLOCKED,	\
}

+4 −0
Original line number Diff line number Diff line
@@ -627,6 +627,8 @@ static inline int atomic_dec_if_positive(atomic_t *v)
}
#endif

#define atomic_cond_read_acquire(v, c)	smp_cond_load_acquire(&(v)->counter, (c))

#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
@@ -1023,6 +1025,8 @@ static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v
}
#endif

#define atomic64_cond_read_acquire(v, c)	smp_cond_load_acquire(&(v)->counter, (c))

#include <asm-generic/atomic-long.h>

#endif /* _LINUX_ATOMIC_H */
+2 −24
Original line number Diff line number Diff line
@@ -22,26 +22,6 @@
#include <linux/hardirq.h>
#include <asm/qrwlock.h>

/*
 * This internal data structure is used for optimizing access to some of
 * the subfields within the atomic_t cnts.
 */
struct __qrwlock {
	union {
		atomic_t cnts;
		struct {
#ifdef __LITTLE_ENDIAN
			u8 wmode;	/* Writer mode   */
			u8 rcnts[3];	/* Reader counts */
#else
			u8 rcnts[3];	/* Reader counts */
			u8 wmode;	/* Writer mode   */
#endif
		};
	};
	arch_spinlock_t	lock;
};

/**
 * rspin_until_writer_unlock - inc reader count & spin until writer is gone
 * @lock  : Pointer to queue rwlock structure
@@ -124,10 +104,8 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
	 * or wait for a previous writer to go away.
	 */
	for (;;) {
		struct __qrwlock *l = (struct __qrwlock *)lock;

		if (!READ_ONCE(l->wmode) &&
		   (cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0))
		if (!READ_ONCE(lock->wmode) &&
		   (cmpxchg_relaxed(&lock->wmode, 0, _QW_WAITING) == 0))
			break;

		cpu_relax_lowlatency();