Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 60815cf2 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull ACCESS_ONCE cleanup preparation from Christian Borntraeger:
 "kernel: Provide READ_ONCE and ASSIGN_ONCE

  As discussed on LKML http://marc.info/?i=54611D86.4040306%40de.ibm.com
  ACCESS_ONCE might fail with specific compilers for non-scalar
  accesses.

  Here is a set of patches to tackle that problem.

  The first patch introduce READ_ONCE and ASSIGN_ONCE.  If the data
  structure is larger than the machine word size memcpy is used and a
  warning is emitted.  The next patches fix up several in-tree users of
  ACCESS_ONCE on non-scalar types.

  This does not yet contain a patch that forces ACCESS_ONCE to work only
  on scalar types.  This is targetted for the next merge window as Linux
  next already contains new offenders regarding ACCESS_ONCE vs.
  non-scalar types"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/borntraeger/linux:
  s390/kvm: REPLACE barrier fixup with READ_ONCE
  arm/spinlock: Replace ACCESS_ONCE with READ_ONCE
  arm64/spinlock: Replace ACCESS_ONCE READ_ONCE
  mips/gup: Replace ACCESS_ONCE with READ_ONCE
  x86/gup: Replace ACCESS_ONCE with READ_ONCE
  x86/spinlock: Replace ACCESS_ONCE with READ_ONCE
  mm: replace ACCESS_ONCE with READ_ONCE or barriers
  kernel: Provide READ_ONCE and ASSIGN_ONCE
parents bfc7249c 5de72a22
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -120,12 +120,12 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)


static inline int arch_spin_is_locked(arch_spinlock_t *lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
{
	return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
	return !arch_spin_value_unlocked(READ_ONCE(*lock));
}
}


static inline int arch_spin_is_contended(arch_spinlock_t *lock)
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
{
	struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
	struct __raw_tickets tickets = READ_ONCE(lock->tickets);
	return (tickets.next - tickets.owner) > 1;
	return (tickets.next - tickets.owner) > 1;
}
}
#define arch_spin_is_contended	arch_spin_is_contended
#define arch_spin_is_contended	arch_spin_is_contended
+2 −2
Original line number Original line Diff line number Diff line
@@ -99,12 +99,12 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)


static inline int arch_spin_is_locked(arch_spinlock_t *lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
{
	return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
	return !arch_spin_value_unlocked(READ_ONCE(*lock));
}
}


static inline int arch_spin_is_contended(arch_spinlock_t *lock)
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
{
	arch_spinlock_t lockval = ACCESS_ONCE(*lock);
	arch_spinlock_t lockval = READ_ONCE(*lock);
	return (lockval.next - lockval.owner) > 1;
	return (lockval.next - lockval.owner) > 1;
}
}
#define arch_spin_is_contended	arch_spin_is_contended
#define arch_spin_is_contended	arch_spin_is_contended
+1 −1
Original line number Original line Diff line number Diff line
@@ -30,7 +30,7 @@ static inline pte_t gup_get_pte(pte_t *ptep)


	return pte;
	return pte;
#else
#else
	return ACCESS_ONCE(*ptep);
	return READ_ONCE(*ptep);
#endif
#endif
}
}


+6 −12
Original line number Original line Diff line number Diff line
@@ -227,12 +227,10 @@ static void ipte_lock_simple(struct kvm_vcpu *vcpu)
		goto out;
		goto out;
	ic = &vcpu->kvm->arch.sca->ipte_control;
	ic = &vcpu->kvm->arch.sca->ipte_control;
	do {
	do {
		old = *ic;
		old = READ_ONCE(*ic);
		barrier();
		while (old.k) {
		while (old.k) {
			cond_resched();
			cond_resched();
			old = *ic;
			old = READ_ONCE(*ic);
			barrier();
		}
		}
		new = old;
		new = old;
		new.k = 1;
		new.k = 1;
@@ -251,8 +249,7 @@ static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
		goto out;
		goto out;
	ic = &vcpu->kvm->arch.sca->ipte_control;
	ic = &vcpu->kvm->arch.sca->ipte_control;
	do {
	do {
		old = *ic;
		old = READ_ONCE(*ic);
		barrier();
		new = old;
		new = old;
		new.k = 0;
		new.k = 0;
	} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
	} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
@@ -267,12 +264,10 @@ static void ipte_lock_siif(struct kvm_vcpu *vcpu)


	ic = &vcpu->kvm->arch.sca->ipte_control;
	ic = &vcpu->kvm->arch.sca->ipte_control;
	do {
	do {
		old = *ic;
		old = READ_ONCE(*ic);
		barrier();
		while (old.kg) {
		while (old.kg) {
			cond_resched();
			cond_resched();
			old = *ic;
			old = READ_ONCE(*ic);
			barrier();
		}
		}
		new = old;
		new = old;
		new.k = 1;
		new.k = 1;
@@ -286,8 +281,7 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu)


	ic = &vcpu->kvm->arch.sca->ipte_control;
	ic = &vcpu->kvm->arch.sca->ipte_control;
	do {
	do {
		old = *ic;
		old = READ_ONCE(*ic);
		barrier();
		new = old;
		new = old;
		new.kh--;
		new.kh--;
		if (!new.kh)
		if (!new.kh)
+4 −4
Original line number Original line Diff line number Diff line
@@ -92,7 +92,7 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
		unsigned count = SPIN_THRESHOLD;
		unsigned count = SPIN_THRESHOLD;


		do {
		do {
			if (ACCESS_ONCE(lock->tickets.head) == inc.tail)
			if (READ_ONCE(lock->tickets.head) == inc.tail)
				goto out;
				goto out;
			cpu_relax();
			cpu_relax();
		} while (--count);
		} while (--count);
@@ -105,7 +105,7 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{
{
	arch_spinlock_t old, new;
	arch_spinlock_t old, new;


	old.tickets = ACCESS_ONCE(lock->tickets);
	old.tickets = READ_ONCE(lock->tickets);
	if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
	if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
		return 0;
		return 0;


@@ -162,14 +162,14 @@ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)


static inline int arch_spin_is_locked(arch_spinlock_t *lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
{
	struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
	struct __raw_tickets tmp = READ_ONCE(lock->tickets);


	return tmp.tail != tmp.head;
	return tmp.tail != tmp.head;
}
}


static inline int arch_spin_is_contended(arch_spinlock_t *lock)
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
{
	struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
	struct __raw_tickets tmp = READ_ONCE(lock->tickets);


	return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
	return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
}
}
Loading