Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 398e692f authored by Lennert Buytenhek's avatar Lennert Buytenhek Committed by Russell King
Browse files

[ARM] 4298/1: fix memory barriers for DMA coherent and SMP platforms



This patch:
- Switches mb/rmb/wmb back to being full-blown DMBs on ARM SMP systems,
  since mb/rmb/wmb are required to order Normal memory accesses as well.
- Enables the use of DMB and ISB on XSC3 (which is an ARMv5TE ISA core
  but conforms to the ARMv6 memory ordering model and supports the
  various ARMv6 barriers.)
- Makes DMA coherent platforms (only ixp23xx at the moment) map
  mb/rmb/wmb to dmb(), as on DMA coherent platforms, DMA consistent
  mappings are done as Normal mappings, which are weakly ordered.

Signed-off-by: default avatarLennert Buytenhek <buytenh@wantstofly.org>
Acked-by: default avatarDavid Howells <dhowells@redhat.com>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 9a4d93d4
Loading
Loading
Loading
Loading
+18 −16
Original line number Original line Diff line number Diff line
@@ -3,6 +3,7 @@


#ifdef __KERNEL__
#ifdef __KERNEL__


#include <asm/memory.h>


#define CPU_ARCH_UNKNOWN	0
#define CPU_ARCH_UNKNOWN	0
#define CPU_ARCH_ARMv3		1
#define CPU_ARCH_ARMv3		1
@@ -154,7 +155,7 @@ extern unsigned int user_debug;
#define vectors_high()	(0)
#define vectors_high()	(0)
#endif
#endif


#if __LINUX_ARM_ARCH__ >= 6
#if defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ >= 6
#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
				    : : "r" (0) : "memory")
				    : : "r" (0) : "memory")
#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
@@ -168,22 +169,23 @@ extern unsigned int user_debug;
#define dmb() __asm__ __volatile__ ("" : : : "memory")
#define dmb() __asm__ __volatile__ ("" : : : "memory")
#endif
#endif


#define mb()			barrier()
#ifndef CONFIG_SMP
#define rmb()			barrier()
#define mb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define wmb()			barrier()
#define rmb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define read_barrier_depends()	do { } while(0)
#define wmb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)

#ifdef CONFIG_SMP
#define smp_mb()		dmb()
#define smp_rmb()		dmb()
#define smp_wmb()		dmb()
#define smp_read_barrier_depends()	read_barrier_depends()
#else
#define smp_mb()	barrier()
#define smp_mb()	barrier()
#define smp_rmb()	barrier()
#define smp_rmb()	barrier()
#define smp_wmb()	barrier()
#define smp_wmb()	barrier()
#define smp_read_barrier_depends()	read_barrier_depends()
#else
#endif /* CONFIG_SMP */
#define mb()		dmb()
#define rmb()		dmb()
#define wmb()		dmb()
#define smp_mb()	dmb()
#define smp_rmb()	dmb()
#define smp_wmb()	dmb()
#endif
#define read_barrier_depends()		do { } while(0)
#define smp_read_barrier_depends()	do { } while(0)


#define set_mb(var, value)	do { var = value; smp_mb(); } while (0)
#define set_mb(var, value)	do { var = value; smp_mb(); } while (0)
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");