Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 80006dbe authored by Masami Hiramatsu's avatar Masami Hiramatsu Committed by Ingo Molnar
Browse files

kprobes/x86: Remove jprobe implementation



Remove arch dependent setjump/longjump functions
and unused fields in kprobe_ctlblk for jprobes
from arch/x86.

Signed-off-by: default avatarMasami Hiramatsu <mhiramat@kernel.org>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: linux-arch@vger.kernel.org
Link: https://lore.kernel.org/lkml/152942433578.15209.14034551799624757792.stgit@devbox


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 5a6cf77f
Loading
Loading
Loading
Loading
+0 −3
Original line number Original line Diff line number Diff line
@@ -111,9 +111,6 @@ struct kprobe_ctlblk {
	unsigned long kprobe_status;
	unsigned long kprobe_status;
	unsigned long kprobe_old_flags;
	unsigned long kprobe_old_flags;
	unsigned long kprobe_saved_flags;
	unsigned long kprobe_saved_flags;
	unsigned long *jprobe_saved_sp;
	struct pt_regs jprobe_saved_regs;
	kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
	struct prev_kprobe prev_kprobe;
	struct prev_kprobe prev_kprobe;
};
};


+3 −93
Original line number Original line Diff line number Diff line
@@ -66,8 +66,6 @@


#include "common.h"
#include "common.h"


void jprobe_return_end(void);

DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);


@@ -690,10 +688,9 @@ int kprobe_int3_handler(struct pt_regs *regs)
			/*
			/*
			 * If we have no pre-handler or it returned 0, we
			 * If we have no pre-handler or it returned 0, we
			 * continue with normal processing.  If we have a
			 * continue with normal processing.  If we have a
			 * pre-handler and it returned non-zero, it prepped
			 * pre-handler and it returned non-zero, that means
			 * for calling the break_handler below on re-entry
			 * user handler setup registers to exit to another
			 * for jprobe processing, so get out doing nothing
			 * instruction, we must skip the single stepping.
			 * more here.
			 */
			 */
			if (!p->pre_handler || !p->pre_handler(p, regs))
			if (!p->pre_handler || !p->pre_handler(p, regs))
				setup_singlestep(p, regs, kcb, 0);
				setup_singlestep(p, regs, kcb, 0);
@@ -1083,93 +1080,6 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
}
}
NOKPROBE_SYMBOL(kprobe_exceptions_notify);
NOKPROBE_SYMBOL(kprobe_exceptions_notify);


int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
	struct jprobe *jp = container_of(p, struct jprobe, kp);
	unsigned long addr;
	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();

	kcb->jprobe_saved_regs = *regs;
	kcb->jprobe_saved_sp = stack_addr(regs);
	addr = (unsigned long)(kcb->jprobe_saved_sp);

	/*
	 * As Linus pointed out, gcc assumes that the callee
	 * owns the argument space and could overwrite it, e.g.
	 * tailcall optimization. So, to be absolutely safe
	 * we also save and restore enough stack bytes to cover
	 * the argument area.
	 * Use __memcpy() to avoid KASAN stack out-of-bounds reports as we copy
	 * raw stack chunk with redzones:
	 */
	__memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
	regs->ip = (unsigned long)(jp->entry);

	/*
	 * jprobes use jprobe_return() which skips the normal return
	 * path of the function, and this messes up the accounting of the
	 * function graph tracer to get messed up.
	 *
	 * Pause function graph tracing while performing the jprobe function.
	 */
	pause_graph_tracing();
	return 1;
}
NOKPROBE_SYMBOL(setjmp_pre_handler);

void jprobe_return(void)
{
	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();

	/* Unpoison stack redzones in the frames we are going to jump over. */
	kasan_unpoison_stack_above_sp_to(kcb->jprobe_saved_sp);

	asm volatile (
#ifdef CONFIG_X86_64
			"       xchg   %%rbx,%%rsp	\n"
#else
			"       xchgl   %%ebx,%%esp	\n"
#endif
			"       int3			\n"
			"       .globl jprobe_return_end\n"
			"       jprobe_return_end:	\n"
			"       nop			\n"::"b"
			(kcb->jprobe_saved_sp):"memory");
}
NOKPROBE_SYMBOL(jprobe_return);
NOKPROBE_SYMBOL(jprobe_return_end);

int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
	u8 *addr = (u8 *) (regs->ip - 1);
	struct jprobe *jp = container_of(p, struct jprobe, kp);
	void *saved_sp = kcb->jprobe_saved_sp;

	if ((addr > (u8 *) jprobe_return) &&
	    (addr < (u8 *) jprobe_return_end)) {
		if (stack_addr(regs) != saved_sp) {
			struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
			printk(KERN_ERR
			       "current sp %p does not match saved sp %p\n",
			       stack_addr(regs), saved_sp);
			printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
			show_regs(saved_regs);
			printk(KERN_ERR "Current registers\n");
			show_regs(regs);
			BUG();
		}
		/* It's OK to start function graph tracing again */
		unpause_graph_tracing();
		*regs = kcb->jprobe_saved_regs;
		__memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
		preempt_enable_no_resched();
		return 1;
	}
	return 0;
}
NOKPROBE_SYMBOL(longjmp_break_handler);

bool arch_within_kprobe_blacklist(unsigned long addr)
bool arch_within_kprobe_blacklist(unsigned long addr)
{
{
	bool is_in_entry_trampoline_section = false;
	bool is_in_entry_trampoline_section = false;