Loading arch/x86/kernel/kprobes/core.c +40 −14 Original line number Diff line number Diff line Loading @@ -223,10 +223,22 @@ static unsigned long __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) { struct kprobe *kp; unsigned long faddr; kp = get_kprobe((void *)addr); /* There is no probe, return original address */ if (!kp) faddr = ftrace_location(addr); /* * Addresses inside the ftrace location are refused by * arch_check_ftrace_location(). Something went terribly wrong * if such an address is checked here. */ if (WARN_ON(faddr && faddr != addr)) return 0UL; /* * Use the current code if it is not modified by Kprobe * and it cannot be modified by ftrace. */ if (!kp && !faddr) return addr; /* Loading @@ -236,13 +248,22 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) * that instruction. In that case, we can't recover the instruction * from the kp->ainsn.insn. * * On the other hand, kp->opcode has a copy of the first byte of * the probed instruction, which is overwritten by int3. And * the instruction at kp->addr is not modified by kprobes except * for the first byte, we can recover the original instruction * On the other hand, in case on normal Kprobe, kp->opcode has a copy * of the first byte of the probed instruction, which is overwritten * by int3. And the instruction at kp->addr is not modified by kprobes * except for the first byte, we can recover the original instruction * from it and kp->opcode. * * In case of Kprobes using ftrace, we do not have a copy of * the original instruction. In fact, the ftrace location might * be modified at anytime and even could be in an inconsistent state. * Fortunately, we know that the original code is the ideal 5-byte * long NOP. */ memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); if (faddr) memcpy(buf, ideal_nops[NOP_ATOMIC5], 5); else buf[0] = kp->opcode; return (unsigned long)buf; } Loading @@ -251,6 +272,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) * Recover the probed instruction at addr for further analysis. * Caller must lock kprobes by kprobe_mutex, or disable preemption * for preventing to release referencing kprobes. * Returns zero if the instruction can not get recovered. */ unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) { Loading Loading @@ -285,6 +307,8 @@ static int can_probe(unsigned long paddr) * normally used, we just go through if there is no kprobe. */ __addr = recover_probed_instruction(buf, addr); if (!__addr) return 0; kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE); insn_get_length(&insn); Loading Loading @@ -333,6 +357,8 @@ int __copy_instruction(u8 *dest, u8 *src) unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src); if (!recovered_insn) return 0; kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); insn_get_length(&insn); /* Another subsystem puts a breakpoint, failed to recover */ Loading arch/x86/kernel/kprobes/opt.c +2 −0 Original line number Diff line number Diff line Loading @@ -259,6 +259,8 @@ static int can_optimize(unsigned long paddr) */ return 0; recovered_insn = recover_probed_instruction(buf, addr); if (!recovered_insn) return 0; kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); insn_get_length(&insn); /* Another subsystem puts a breakpoint */ Loading tools/perf/bench/mem-memcpy.c +2 −2 Original line number Diff line number Diff line Loading @@ -289,7 +289,7 @@ static u64 do_memcpy_cycle(const struct routine *r, size_t len, bool prefault) memcpy_t fn = r->fn.memcpy; int i; memcpy_alloc_mem(&src, &dst, len); memcpy_alloc_mem(&dst, &src, len); if (prefault) fn(dst, src, len); Loading @@ -312,7 +312,7 @@ static double do_memcpy_gettimeofday(const struct routine *r, size_t len, void *src = NULL, *dst = NULL; int i; memcpy_alloc_mem(&src, &dst, len); memcpy_alloc_mem(&dst, &src, len); if (prefault) fn(dst, src, len); Loading tools/perf/config/Makefile.arch +4 −0 Original line number Diff line number Diff line Loading @@ -21,6 +21,10 @@ ifeq ($(RAW_ARCH),x86_64) endif endif ifeq ($(RAW_ARCH),sparc64) ARCH ?= sparc endif ARCH ?= $(RAW_ARCH) LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1) Loading tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c +2 −1 Original line number Diff line number Diff line Loading @@ -5,10 +5,11 @@ int main(void) { int ret = 0; pthread_attr_t thread_attr; cpu_set_t cs; pthread_attr_init(&thread_attr); /* don't care abt exact args, just the API itself in libpthread */ ret = pthread_attr_setaffinity_np(&thread_attr, 0, NULL); ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cs), &cs); return ret; } Loading
arch/x86/kernel/kprobes/core.c +40 −14 Original line number Diff line number Diff line Loading @@ -223,10 +223,22 @@ static unsigned long __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) { struct kprobe *kp; unsigned long faddr; kp = get_kprobe((void *)addr); /* There is no probe, return original address */ if (!kp) faddr = ftrace_location(addr); /* * Addresses inside the ftrace location are refused by * arch_check_ftrace_location(). Something went terribly wrong * if such an address is checked here. */ if (WARN_ON(faddr && faddr != addr)) return 0UL; /* * Use the current code if it is not modified by Kprobe * and it cannot be modified by ftrace. */ if (!kp && !faddr) return addr; /* Loading @@ -236,13 +248,22 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) * that instruction. In that case, we can't recover the instruction * from the kp->ainsn.insn. * * On the other hand, kp->opcode has a copy of the first byte of * the probed instruction, which is overwritten by int3. And * the instruction at kp->addr is not modified by kprobes except * for the first byte, we can recover the original instruction * On the other hand, in case on normal Kprobe, kp->opcode has a copy * of the first byte of the probed instruction, which is overwritten * by int3. And the instruction at kp->addr is not modified by kprobes * except for the first byte, we can recover the original instruction * from it and kp->opcode. * * In case of Kprobes using ftrace, we do not have a copy of * the original instruction. In fact, the ftrace location might * be modified at anytime and even could be in an inconsistent state. * Fortunately, we know that the original code is the ideal 5-byte * long NOP. */ memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); if (faddr) memcpy(buf, ideal_nops[NOP_ATOMIC5], 5); else buf[0] = kp->opcode; return (unsigned long)buf; } Loading @@ -251,6 +272,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) * Recover the probed instruction at addr for further analysis. * Caller must lock kprobes by kprobe_mutex, or disable preemption * for preventing to release referencing kprobes. * Returns zero if the instruction can not get recovered. */ unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) { Loading Loading @@ -285,6 +307,8 @@ static int can_probe(unsigned long paddr) * normally used, we just go through if there is no kprobe. */ __addr = recover_probed_instruction(buf, addr); if (!__addr) return 0; kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE); insn_get_length(&insn); Loading Loading @@ -333,6 +357,8 @@ int __copy_instruction(u8 *dest, u8 *src) unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src); if (!recovered_insn) return 0; kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); insn_get_length(&insn); /* Another subsystem puts a breakpoint, failed to recover */ Loading
arch/x86/kernel/kprobes/opt.c +2 −0 Original line number Diff line number Diff line Loading @@ -259,6 +259,8 @@ static int can_optimize(unsigned long paddr) */ return 0; recovered_insn = recover_probed_instruction(buf, addr); if (!recovered_insn) return 0; kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); insn_get_length(&insn); /* Another subsystem puts a breakpoint */ Loading
tools/perf/bench/mem-memcpy.c +2 −2 Original line number Diff line number Diff line Loading @@ -289,7 +289,7 @@ static u64 do_memcpy_cycle(const struct routine *r, size_t len, bool prefault) memcpy_t fn = r->fn.memcpy; int i; memcpy_alloc_mem(&src, &dst, len); memcpy_alloc_mem(&dst, &src, len); if (prefault) fn(dst, src, len); Loading @@ -312,7 +312,7 @@ static double do_memcpy_gettimeofday(const struct routine *r, size_t len, void *src = NULL, *dst = NULL; int i; memcpy_alloc_mem(&src, &dst, len); memcpy_alloc_mem(&dst, &src, len); if (prefault) fn(dst, src, len); Loading
tools/perf/config/Makefile.arch +4 −0 Original line number Diff line number Diff line Loading @@ -21,6 +21,10 @@ ifeq ($(RAW_ARCH),x86_64) endif endif ifeq ($(RAW_ARCH),sparc64) ARCH ?= sparc endif ARCH ?= $(RAW_ARCH) LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1) Loading
tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c +2 −1 Original line number Diff line number Diff line Loading @@ -5,10 +5,11 @@ int main(void) { int ret = 0; pthread_attr_t thread_attr; cpu_set_t cs; pthread_attr_init(&thread_attr); /* don't care abt exact args, just the API itself in libpthread */ ret = pthread_attr_setaffinity_np(&thread_attr, 0, NULL); ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cs), &cs); return ret; }