Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 91bbefe6 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

arch,mips: Convert smp_mb__*()



MIPS is interesting and has hardware variants that reorder over ll/sc
as well as those that do not.

Implement the 2 new barrier functions as per the old barriers.

Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/n/tip-9ph49jbae3hol9v721sbc2g6@git.kernel.org


Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Maciej W. Rozycki" <macro@codesourcery.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-kernel@vger.kernel.org
Cc: linux-mips@linux-mips.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 40074dec
Loading
Loading
Loading
Loading
+0 −9
Original line number Original line Diff line number Diff line
@@ -761,13 +761,4 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)


#endif /* CONFIG_64BIT */
#endif /* CONFIG_64BIT */


/*
 * atomic*_return operations are serializing but not the non-*_return
 * versions.
 */
#define smp_mb__before_atomic_dec()	smp_mb__before_llsc()
#define smp_mb__after_atomic_dec()	smp_llsc_mb()
#define smp_mb__before_atomic_inc()	smp_mb__before_llsc()
#define smp_mb__after_atomic_inc()	smp_llsc_mb()

#endif /* _ASM_ATOMIC_H */
#endif /* _ASM_ATOMIC_H */
+3 −0
Original line number Original line Diff line number Diff line
@@ -195,4 +195,7 @@ do { \
	___p1;								\
	___p1;								\
})
})


#define smp_mb__before_atomic()	smp_mb__before_llsc()
#define smp_mb__after_atomic()	smp_llsc_mb()

#endif /* __ASM_BARRIER_H */
#endif /* __ASM_BARRIER_H */
+2 −9
Original line number Original line Diff line number Diff line
@@ -37,13 +37,6 @@
#define __EXT		"dext	 "
#define __EXT		"dext	 "
#endif
#endif


/*
 * clear_bit() doesn't provide any barrier for the compiler.
 */
#define smp_mb__before_clear_bit()	smp_mb__before_llsc()
#define smp_mb__after_clear_bit()	smp_llsc_mb()


/*
/*
 * These are the "slower" versions of the functions and are in bitops.c.
 * These are the "slower" versions of the functions and are in bitops.c.
 * These functions call raw_local_irq_{save,restore}().
 * These functions call raw_local_irq_{save,restore}().
@@ -120,7 +113,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 *
 *
 * clear_bit() is atomic and may not be reordered.  However, it does
 * clear_bit() is atomic and may not be reordered.  However, it does
 * not contain a memory barrier, so if it is used for locking purposes,
 * not contain a memory barrier, so if it is used for locking purposes,
 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
 * in order to ensure changes are visible on other processors.
 * in order to ensure changes are visible on other processors.
 */
 */
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
@@ -175,7 +168,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
 */
 */
static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
{
{
	smp_mb__before_clear_bit();
	smp_mb__before_atomic();
	clear_bit(nr, addr);
	clear_bit(nr, addr);
}
}


+2 −2
Original line number Original line Diff line number Diff line
@@ -62,9 +62,9 @@ void __init alloc_legacy_irqno(void)


void free_irqno(unsigned int irq)
void free_irqno(unsigned int irq)
{
{
	smp_mb__before_clear_bit();
	smp_mb__before_atomic();
	clear_bit(irq, irq_map);
	clear_bit(irq, irq_map);
	smp_mb__after_clear_bit();
	smp_mb__after_atomic();
}
}


/*
/*