Loading arch/x86/include/asm/cmpxchg.h +70 −70 Original line number Original line Diff line number Diff line Loading @@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void) __compiletime_error("Bad argument size for cmpxchg"); __compiletime_error("Bad argument size for cmpxchg"); extern void __xadd_wrong_size(void) extern void __xadd_wrong_size(void) __compiletime_error("Bad argument size for xadd"); __compiletime_error("Bad argument size for xadd"); extern void __add_wrong_size(void) __compiletime_error("Bad argument size for add"); /* /* * Constants for operation sizes. On 32-bit, the 64-bit size it set to * Constants for operation sizes. On 32-bit, the 64-bit size it set to Loading @@ -32,59 +34,46 @@ extern void __xadd_wrong_size(void) #endif #endif /* /* * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. * An exchange-type operation, which takes a value and a pointer, and * Since this is generally used to protect other memory information, we * returns a the old value. * use "asm volatile" and "memory" clobbers to prevent gcc from moving * information around. */ */ #define __xchg(x, ptr, size) \ #define __xchg_op(ptr, arg, op, lock) \ ({ \ ({ \ __typeof(*(ptr)) __x = (x); \ __typeof__ (*(ptr)) __ret = (arg); \ switch (size) { \ switch (sizeof(*(ptr))) { \ case __X86_CASE_B: \ case __X86_CASE_B: \ { \ asm volatile (lock #op "b %b0, %1\n" \ volatile u8 *__ptr = (volatile u8 *)(ptr); \ : "+r" (__ret), "+m" (*(ptr)) \ asm volatile("xchgb %0,%1" \ : : "memory", "cc"); \ : "=q" (__x), "+m" (*__ptr) \ : "0" (__x) \ : "memory"); \ break; \ break; \ } \ case __X86_CASE_W: \ case __X86_CASE_W: \ { \ asm volatile (lock #op "w %w0, %1\n" \ volatile u16 *__ptr = (volatile u16 *)(ptr); \ : "+r" (__ret), "+m" (*(ptr)) \ asm volatile("xchgw %0,%1" \ : : "memory", "cc"); \ : "=r" (__x), "+m" (*__ptr) \ : "0" (__x) \ : "memory"); \ break; \ break; \ } \ case __X86_CASE_L: \ case __X86_CASE_L: \ { \ asm volatile (lock #op "l %0, %1\n" \ volatile u32 *__ptr = (volatile u32 *)(ptr); \ : "+r" (__ret), "+m" (*(ptr)) \ asm volatile("xchgl %0,%1" \ : : "memory", "cc"); \ : "=r" (__x), "+m" (*__ptr) \ : "0" (__x) \ : "memory"); \ break; \ break; \ } \ case __X86_CASE_Q: \ case __X86_CASE_Q: \ { \ asm volatile (lock #op "q %q0, %1\n" \ volatile u64 *__ptr = (volatile u64 *)(ptr); \ : "+r" (__ret), "+m" (*(ptr)) \ asm volatile("xchgq %0,%1" \ : : "memory", "cc"); \ : "=r" (__x), "+m" (*__ptr) \ : "0" (__x) \ : "memory"); \ break; \ break; \ } \ default: \ default: \ __xchg_wrong_size(); \ __ ## op ## _wrong_size(); \ } \ } \ __x; \ __ret; \ }) }) #define xchg(ptr, v) \ /* __xchg((v), (ptr), sizeof(*ptr)) * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. * Since this is generally used to protect other memory information, we * use "asm volatile" and "memory" clobbers to prevent gcc from moving * information around. */ #define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "") /* /* * Atomic compare and exchange. Compare OLD with MEM, if identical, * Atomic compare and exchange. Compare OLD with MEM, if identical, Loading Loading @@ -165,46 +154,57 @@ extern void __xadd_wrong_size(void) __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) #endif #endif #define __xadd(ptr, inc, lock) \ /* * xadd() adds "inc" to "*ptr" and atomically returns the previous * value of "*ptr". * * xadd() is locked when multiple CPUs are online * xadd_sync() is always locked * xadd_local() is never locked */ #define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock) #define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX) #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ") #define xadd_local(ptr, inc) __xadd((ptr), (inc), "") #define __add(ptr, inc, lock) \ ({ \ ({ \ __typeof__ (*(ptr)) __ret = (inc); \ __typeof__ (*(ptr)) __ret = (inc); \ switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \ case __X86_CASE_B: \ case __X86_CASE_B: \ asm volatile (lock "xaddb %b0, %1\n" \ asm volatile (lock "addb %b1, %0\n" \ : "+r" (__ret), "+m" (*(ptr)) \ : "+m" (*(ptr)) : "ri" (inc) \ : : "memory", "cc"); \ : "memory", "cc"); \ break; \ break; \ case __X86_CASE_W: \ case __X86_CASE_W: \ asm volatile (lock "xaddw %w0, %1\n" \ asm volatile (lock "addw %w1, %0\n" \ : "+r" (__ret), "+m" (*(ptr)) \ : "+m" (*(ptr)) : "ri" (inc) \ : : "memory", "cc"); \ : "memory", "cc"); \ break; \ break; \ case __X86_CASE_L: \ case __X86_CASE_L: \ asm volatile (lock "xaddl %0, %1\n" \ asm volatile (lock "addl %1, %0\n" \ : "+r" (__ret), "+m" (*(ptr)) \ : "+m" (*(ptr)) : "ri" (inc) \ : : "memory", "cc"); \ : "memory", "cc"); \ break; \ break; \ case __X86_CASE_Q: \ case __X86_CASE_Q: \ asm volatile (lock "xaddq %q0, %1\n" \ asm volatile (lock "addq %1, %0\n" \ : "+r" (__ret), "+m" (*(ptr)) \ : "+m" (*(ptr)) : "ri" (inc) \ : : "memory", "cc"); \ : "memory", "cc"); \ break; \ break; \ default: \ default: \ __xadd_wrong_size(); \ __add_wrong_size(); \ } \ } \ __ret; \ __ret; \ }) }) /* /* * xadd() adds "inc" to "*ptr" and atomically returns the previous * add_*() adds "inc" to "*ptr" * value of "*ptr". * * * xadd() is locked when multiple CPUs are online * __add() takes a lock prefix * xadd_sync() is always locked * add_smp() is locked when multiple CPUs are online * xadd_local() is never locked * add_sync() is always locked */ */ #define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX) #define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX) #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ") #define add_sync(ptr, inc) __add((ptr), (inc), "lock; ") #define xadd_local(ptr, inc) __xadd((ptr), (inc), "") #endif /* ASM_X86_CMPXCHG_H */ #endif /* ASM_X86_CMPXCHG_H */ arch/x86/include/asm/spinlock.h +1 −14 Original line number Original line Diff line number Diff line Loading @@ -79,23 +79,10 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; } } #if (NR_CPUS < 256) static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { { asm volatile(UNLOCK_LOCK_PREFIX "incb %0" __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX); : "+m" (lock->head_tail) : : "memory", "cc"); } } #else static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { asm volatile(UNLOCK_LOCK_PREFIX "incw %0" : "+m" (lock->head_tail) : : "memory", "cc"); } #endif static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) { { Loading Loading
arch/x86/include/asm/cmpxchg.h +70 −70 Original line number Original line Diff line number Diff line Loading @@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void) __compiletime_error("Bad argument size for cmpxchg"); __compiletime_error("Bad argument size for cmpxchg"); extern void __xadd_wrong_size(void) extern void __xadd_wrong_size(void) __compiletime_error("Bad argument size for xadd"); __compiletime_error("Bad argument size for xadd"); extern void __add_wrong_size(void) __compiletime_error("Bad argument size for add"); /* /* * Constants for operation sizes. On 32-bit, the 64-bit size it set to * Constants for operation sizes. On 32-bit, the 64-bit size it set to Loading @@ -32,59 +34,46 @@ extern void __xadd_wrong_size(void) #endif #endif /* /* * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. * An exchange-type operation, which takes a value and a pointer, and * Since this is generally used to protect other memory information, we * returns a the old value. * use "asm volatile" and "memory" clobbers to prevent gcc from moving * information around. */ */ #define __xchg(x, ptr, size) \ #define __xchg_op(ptr, arg, op, lock) \ ({ \ ({ \ __typeof(*(ptr)) __x = (x); \ __typeof__ (*(ptr)) __ret = (arg); \ switch (size) { \ switch (sizeof(*(ptr))) { \ case __X86_CASE_B: \ case __X86_CASE_B: \ { \ asm volatile (lock #op "b %b0, %1\n" \ volatile u8 *__ptr = (volatile u8 *)(ptr); \ : "+r" (__ret), "+m" (*(ptr)) \ asm volatile("xchgb %0,%1" \ : : "memory", "cc"); \ : "=q" (__x), "+m" (*__ptr) \ : "0" (__x) \ : "memory"); \ break; \ break; \ } \ case __X86_CASE_W: \ case __X86_CASE_W: \ { \ asm volatile (lock #op "w %w0, %1\n" \ volatile u16 *__ptr = (volatile u16 *)(ptr); \ : "+r" (__ret), "+m" (*(ptr)) \ asm volatile("xchgw %0,%1" \ : : "memory", "cc"); \ : "=r" (__x), "+m" (*__ptr) \ : "0" (__x) \ : "memory"); \ break; \ break; \ } \ case __X86_CASE_L: \ case __X86_CASE_L: \ { \ asm volatile (lock #op "l %0, %1\n" \ volatile u32 *__ptr = (volatile u32 *)(ptr); \ : "+r" (__ret), "+m" (*(ptr)) \ asm volatile("xchgl %0,%1" \ : : "memory", "cc"); \ : "=r" (__x), "+m" (*__ptr) \ : "0" (__x) \ : "memory"); \ break; \ break; \ } \ case __X86_CASE_Q: \ case __X86_CASE_Q: \ { \ asm volatile (lock #op "q %q0, %1\n" \ volatile u64 *__ptr = (volatile u64 *)(ptr); \ : "+r" (__ret), "+m" (*(ptr)) \ asm volatile("xchgq %0,%1" \ : : "memory", "cc"); \ : "=r" (__x), "+m" (*__ptr) \ : "0" (__x) \ : "memory"); \ break; \ break; \ } \ default: \ default: \ __xchg_wrong_size(); \ __ ## op ## _wrong_size(); \ } \ } \ __x; \ __ret; \ }) }) #define xchg(ptr, v) \ /* __xchg((v), (ptr), sizeof(*ptr)) * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. * Since this is generally used to protect other memory information, we * use "asm volatile" and "memory" clobbers to prevent gcc from moving * information around. */ #define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "") /* /* * Atomic compare and exchange. Compare OLD with MEM, if identical, * Atomic compare and exchange. Compare OLD with MEM, if identical, Loading Loading @@ -165,46 +154,57 @@ extern void __xadd_wrong_size(void) __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) #endif #endif #define __xadd(ptr, inc, lock) \ /* * xadd() adds "inc" to "*ptr" and atomically returns the previous * value of "*ptr". * * xadd() is locked when multiple CPUs are online * xadd_sync() is always locked * xadd_local() is never locked */ #define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock) #define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX) #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ") #define xadd_local(ptr, inc) __xadd((ptr), (inc), "") #define __add(ptr, inc, lock) \ ({ \ ({ \ __typeof__ (*(ptr)) __ret = (inc); \ __typeof__ (*(ptr)) __ret = (inc); \ switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \ case __X86_CASE_B: \ case __X86_CASE_B: \ asm volatile (lock "xaddb %b0, %1\n" \ asm volatile (lock "addb %b1, %0\n" \ : "+r" (__ret), "+m" (*(ptr)) \ : "+m" (*(ptr)) : "ri" (inc) \ : : "memory", "cc"); \ : "memory", "cc"); \ break; \ break; \ case __X86_CASE_W: \ case __X86_CASE_W: \ asm volatile (lock "xaddw %w0, %1\n" \ asm volatile (lock "addw %w1, %0\n" \ : "+r" (__ret), "+m" (*(ptr)) \ : "+m" (*(ptr)) : "ri" (inc) \ : : "memory", "cc"); \ : "memory", "cc"); \ break; \ break; \ case __X86_CASE_L: \ case __X86_CASE_L: \ asm volatile (lock "xaddl %0, %1\n" \ asm volatile (lock "addl %1, %0\n" \ : "+r" (__ret), "+m" (*(ptr)) \ : "+m" (*(ptr)) : "ri" (inc) \ : : "memory", "cc"); \ : "memory", "cc"); \ break; \ break; \ case __X86_CASE_Q: \ case __X86_CASE_Q: \ asm volatile (lock "xaddq %q0, %1\n" \ asm volatile (lock "addq %1, %0\n" \ : "+r" (__ret), "+m" (*(ptr)) \ : "+m" (*(ptr)) : "ri" (inc) \ : : "memory", "cc"); \ : "memory", "cc"); \ break; \ break; \ default: \ default: \ __xadd_wrong_size(); \ __add_wrong_size(); \ } \ } \ __ret; \ __ret; \ }) }) /* /* * xadd() adds "inc" to "*ptr" and atomically returns the previous * add_*() adds "inc" to "*ptr" * value of "*ptr". * * * xadd() is locked when multiple CPUs are online * __add() takes a lock prefix * xadd_sync() is always locked * add_smp() is locked when multiple CPUs are online * xadd_local() is never locked * add_sync() is always locked */ */ #define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX) #define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX) #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ") #define add_sync(ptr, inc) __add((ptr), (inc), "lock; ") #define xadd_local(ptr, inc) __xadd((ptr), (inc), "") #endif /* ASM_X86_CMPXCHG_H */ #endif /* ASM_X86_CMPXCHG_H */
arch/x86/include/asm/spinlock.h +1 −14 Original line number Original line Diff line number Diff line Loading @@ -79,23 +79,10 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; } } #if (NR_CPUS < 256) static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { { asm volatile(UNLOCK_LOCK_PREFIX "incb %0" __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX); : "+m" (lock->head_tail) : : "memory", "cc"); } } #else static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { asm volatile(UNLOCK_LOCK_PREFIX "incw %0" : "+m" (lock->head_tail) : : "memory", "cc"); } #endif static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) { { Loading