Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4f3db074 authored by Arnaldo Carvalho de Melo's avatar Arnaldo Carvalho de Melo
Browse files

perf tools: Move arm(64) barrier.h stuff to tools/arch/arm*/include/asm/barrier.h

We will need it for atomic.h, so move it from the ad-hoc tools/perf/
place to a tools/ subset of the kernel arch/ hierarchy.

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: David Ahern <dsahern@gmail.com>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-cgfhreaejd7ohitdjccu9k2o@git.kernel.org


Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 163e589d
Loading
Loading
Loading
Loading
+12 −0
Original line number Original line Diff line number Diff line
#ifndef _TOOLS_LINUX_ASM_ARM_BARRIER_H
#define _TOOLS_LINUX_ASM_ARM_BARRIER_H

/*
 * Use the __kuser_memory_barrier helper in the CPU helper page. See
 * arch/arm/kernel/entry-armv.S in the kernel source for details.
 */
#define mb()		((void(*)(void))0xffff0fa0)()
#define wmb()		((void(*)(void))0xffff0fa0)()
#define rmb()		((void(*)(void))0xffff0fa0)()

#endif /* _TOOLS_LINUX_ASM_ARM_BARRIER_H */
+16 −0
Original line number Original line Diff line number Diff line
#ifndef _TOOLS_LINUX_ASM_AARCH64_BARRIER_H
#define _TOOLS_LINUX_ASM_AARCH64_BARRIER_H

/*
 * From tools/perf/perf-sys.h, last modified in:
 * f428ebd184c82a7914b2aa7e9f868918aaf7ea78 perf tools: Fix AAAAARGH64 memory barriers
 *
 * XXX: arch/arm64/include/asm/barrier.h in the kernel sources use dsb, is this
 * a case like for arm32 where we do things differently in userspace?
 */

#define mb()		asm volatile("dmb ish" ::: "memory")
#define wmb()		asm volatile("dmb ishst" ::: "memory")
#define rmb()		asm volatile("dmb ishld" ::: "memory")

#endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */
+4 −0
Original line number Original line Diff line number Diff line
#if defined(__i386__) || defined(__x86_64__)
#if defined(__i386__) || defined(__x86_64__)
#include "../../arch/x86/include/asm/barrier.h"
#include "../../arch/x86/include/asm/barrier.h"
#elif defined(__arm__)
#include "../../arch/arm/include/asm/barrier.h"
#elif defined(__aarch64__)
#include "../../arch/arm64/include/asm/barrier.h"
#elif defined(__powerpc__)
#elif defined(__powerpc__)
#include "../../arch/powerpc/include/asm/barrier.h"
#include "../../arch/powerpc/include/asm/barrier.h"
#elif defined(__s390__)
#elif defined(__s390__)
+1 −0
Original line number Original line Diff line number Diff line
tools/perf
tools/perf
tools/arch/alpha/include/asm/barrier.h
tools/arch/alpha/include/asm/barrier.h
tools/arch/arm/include/asm/barrier.h
tools/arch/ia64/include/asm/barrier.h
tools/arch/ia64/include/asm/barrier.h
tools/arch/powerpc/include/asm/barrier.h
tools/arch/powerpc/include/asm/barrier.h
tools/arch/s390/include/asm/barrier.h
tools/arch/s390/include/asm/barrier.h
+0 −10
Original line number Original line Diff line number Diff line
@@ -70,20 +70,10 @@
#endif
#endif


#ifdef __arm__
#ifdef __arm__
/*
 * Use the __kuser_memory_barrier helper in the CPU helper page. See
 * arch/arm/kernel/entry-armv.S in the kernel source for details.
 */
#define mb()		((void(*)(void))0xffff0fa0)()
#define wmb()		((void(*)(void))0xffff0fa0)()
#define rmb()		((void(*)(void))0xffff0fa0)()
#define CPUINFO_PROC	{"model name", "Processor"}
#define CPUINFO_PROC	{"model name", "Processor"}
#endif
#endif


#ifdef __aarch64__
#ifdef __aarch64__
#define mb()		asm volatile("dmb ish" ::: "memory")
#define wmb()		asm volatile("dmb ishst" ::: "memory")
#define rmb()		asm volatile("dmb ishld" ::: "memory")
#define cpu_relax()	asm volatile("yield" ::: "memory")
#define cpu_relax()	asm volatile("yield" ::: "memory")
#endif
#endif