Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d7b48fec authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
 "Two kprobes fixes and a handful of tooling fixes"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf tools: Make sparc64 arch point to sparc
  perf symbols: Define EM_AARCH64 for older OSes
  perf top: Fix SIGBUS on sparc64
  perf tools: Fix probing for PERF_FLAG_FD_CLOEXEC flag
  perf tools: Fix pthread_attr_setaffinity_np build error
  perf tools: Define _GNU_SOURCE on pthread_attr_setaffinity_np feature check
  perf bench: Fix order of arguments to memcpy_alloc_mem
  kprobes/x86: Check for invalid ftrace location in __recover_probed_insn()
  kprobes/x86: Use 5-byte NOP when the code might be modified by ftrace
parents 2ea51b88 021f5f12
Loading
Loading
Loading
Loading
+40 −14
Original line number Diff line number Diff line
@@ -223,10 +223,22 @@ static unsigned long
__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
{
	struct kprobe *kp;
	unsigned long faddr;

	kp = get_kprobe((void *)addr);
	/* There is no probe, return original address */
	if (!kp)
	faddr = ftrace_location(addr);
	/*
	 * Addresses inside the ftrace location are refused by
	 * arch_check_ftrace_location(). Something went terribly wrong
	 * if such an address is checked here.
	 */
	if (WARN_ON(faddr && faddr != addr))
		return 0UL;
	/*
	 * Use the current code if it is not modified by Kprobe
	 * and it cannot be modified by ftrace.
	 */
	if (!kp && !faddr)
		return addr;

	/*
@@ -236,13 +248,22 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
	 * that instruction. In that case, we can't recover the instruction
	 * from the kp->ainsn.insn.
	 *
	 *  On the other hand, kp->opcode has a copy of the first byte of
	 *  the probed instruction, which is overwritten by int3. And
	 *  the instruction at kp->addr is not modified by kprobes except
	 *  for the first byte, we can recover the original instruction
	 * On the other hand, in case on normal Kprobe, kp->opcode has a copy
	 * of the first byte of the probed instruction, which is overwritten
	 * by int3. And the instruction at kp->addr is not modified by kprobes
	 * except for the first byte, we can recover the original instruction
	 * from it and kp->opcode.
	 *
	 * In case of Kprobes using ftrace, we do not have a copy of
	 * the original instruction. In fact, the ftrace location might
	 * be modified at anytime and even could be in an inconsistent state.
	 * Fortunately, we know that the original code is the ideal 5-byte
	 * long NOP.
	 */
	memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
	memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
	if (faddr)
		memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
	else
		buf[0] = kp->opcode;
	return (unsigned long)buf;
}
@@ -251,6 +272,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
 * Recover the probed instruction at addr for further analysis.
 * Caller must lock kprobes by kprobe_mutex, or disable preemption
 * for preventing to release referencing kprobes.
 * Returns zero if the instruction can not get recovered.
 */
unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
{
@@ -285,6 +307,8 @@ static int can_probe(unsigned long paddr)
		 * normally used, we just go through if there is no kprobe.
		 */
		__addr = recover_probed_instruction(buf, addr);
		if (!__addr)
			return 0;
		kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
		insn_get_length(&insn);

@@ -333,6 +357,8 @@ int __copy_instruction(u8 *dest, u8 *src)
	unsigned long recovered_insn =
		recover_probed_instruction(buf, (unsigned long)src);

	if (!recovered_insn)
		return 0;
	kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
	insn_get_length(&insn);
	/* Another subsystem puts a breakpoint, failed to recover */
+2 −0
Original line number Diff line number Diff line
@@ -259,6 +259,8 @@ static int can_optimize(unsigned long paddr)
			 */
			return 0;
		recovered_insn = recover_probed_instruction(buf, addr);
		if (!recovered_insn)
			return 0;
		kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
		insn_get_length(&insn);
		/* Another subsystem puts a breakpoint */
+2 −2
Original line number Diff line number Diff line
@@ -289,7 +289,7 @@ static u64 do_memcpy_cycle(const struct routine *r, size_t len, bool prefault)
	memcpy_t fn = r->fn.memcpy;
	int i;

	memcpy_alloc_mem(&src, &dst, len);
	memcpy_alloc_mem(&dst, &src, len);

	if (prefault)
		fn(dst, src, len);
@@ -312,7 +312,7 @@ static double do_memcpy_gettimeofday(const struct routine *r, size_t len,
	void *src = NULL, *dst = NULL;
	int i;

	memcpy_alloc_mem(&src, &dst, len);
	memcpy_alloc_mem(&dst, &src, len);

	if (prefault)
		fn(dst, src, len);
+4 −0
Original line number Diff line number Diff line
@@ -21,6 +21,10 @@ ifeq ($(RAW_ARCH),x86_64)
  endif
endif

ifeq ($(RAW_ARCH),sparc64)
  ARCH ?= sparc
endif

ARCH ?= $(RAW_ARCH)

LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
+1 −1
Original line number Diff line number Diff line
@@ -49,7 +49,7 @@ test-hello.bin:
	$(BUILD)

test-pthread-attr-setaffinity-np.bin:
	$(BUILD) -Werror -lpthread
	$(BUILD) -D_GNU_SOURCE -Werror -lpthread

test-stackprotector-all.bin:
	$(BUILD) -Werror -fstack-protector-all
Loading