Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a7b0133e authored by Masami Hiramatsu's avatar Masami Hiramatsu Committed by Ingo Molnar
Browse files

kprobes/x86: Use text_poke_bp() instead of text_poke_smp*()



Use text_poke_bp() for optimizing kprobes instead of
text_poke_smp*(). Since the number of kprobes is usually not so
large (<100) and text_poke_bp() is much lighter than
text_poke_smp() [which uses stop_machine()], this just stops
using batch processing.

Signed-off-by: default avatarMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Reviewed-by: default avatarJiri Kosina <jkosina@suse.cz>
Cc: H. Peter Anvin <hpa@linux.intel.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Jason Baron <jbaron@akamai.com>
Cc: yrl.pp-manager.tt@hitachi.com
Cc: Borislav Petkov <bpetkov@suse.de>
Link: http://lkml.kernel.org/r/20130718114750.26675.9174.stgit@mhiramat-M0-7522


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c7e85c42
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -82,14 +82,9 @@ extern void synthesize_reljump(void *from, void *to);
extern void synthesize_relcall(void *from, void *to);

#ifdef	CONFIG_OPTPROBES
extern int arch_init_optprobes(void);
extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter);
extern unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr);
#else	/* !CONFIG_OPTPROBES */
static inline int arch_init_optprobes(void)
{
	return 0;
}
static inline int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
{
	return 0;
+1 −1
Original line number Diff line number Diff line
@@ -1068,7 +1068,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)

int __init arch_init_kprobes(void)
{
	return arch_init_optprobes();
	return 0;
}

int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+22 −78
Original line number Diff line number Diff line
@@ -371,19 +371,21 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
	return 0;
}

#define MAX_OPTIMIZE_PROBES 256
static struct text_poke_param *jump_poke_params;
static struct jump_poke_buffer {
	u8 buf[RELATIVEJUMP_SIZE];
} *jump_poke_bufs;

static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
					    u8 *insn_buf,
					    struct optimized_kprobe *op)
/*
 * Replace breakpoints (int3) with relative jumps.
 * Caller must call with locking kprobe_mutex and text_mutex.
 */
void __kprobes arch_optimize_kprobes(struct list_head *oplist)
{
	struct optimized_kprobe *op, *tmp;
	u8 insn_buf[RELATIVEJUMP_SIZE];

	list_for_each_entry_safe(op, tmp, oplist, list) {
		s32 rel = (s32)((long)op->optinsn.insn -
			((long)op->kp.addr + RELATIVEJUMP_SIZE));

		WARN_ON(kprobe_disabled(&op->kp));

		/* Backup instructions which will be replaced by jump address */
		memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
		       RELATIVE_ADDR_SIZE);
@@ -391,44 +393,23 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
		insn_buf[0] = RELATIVEJUMP_OPCODE;
		*(s32 *)(&insn_buf[1]) = rel;

	tprm->addr = op->kp.addr;
	tprm->opcode = insn_buf;
	tprm->len = RELATIVEJUMP_SIZE;
}
		text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
			     op->optinsn.insn);

/*
 * Replace breakpoints (int3) with relative jumps.
 * Caller must call with locking kprobe_mutex and text_mutex.
 */
void __kprobes arch_optimize_kprobes(struct list_head *oplist)
{
	struct optimized_kprobe *op, *tmp;
	int c = 0;

	list_for_each_entry_safe(op, tmp, oplist, list) {
		WARN_ON(kprobe_disabled(&op->kp));
		/* Setup param */
		setup_optimize_kprobe(&jump_poke_params[c],
				      jump_poke_bufs[c].buf, op);
		list_del_init(&op->list);
		if (++c >= MAX_OPTIMIZE_PROBES)
			break;
	}

	text_poke_smp_batch(jump_poke_params, c);
}

static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm,
					      u8 *insn_buf,
					      struct optimized_kprobe *op)
/* Replace a relative jump with a breakpoint (int3).  */
void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op)
{
	u8 insn_buf[RELATIVEJUMP_SIZE];

	/* Set int3 to first byte for kprobes */
	insn_buf[0] = BREAKPOINT_INSTRUCTION;
	memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);

	tprm->addr = op->kp.addr;
	tprm->opcode = insn_buf;
	tprm->len = RELATIVEJUMP_SIZE;
	text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
		     op->optinsn.insn);
}

/*
@@ -439,29 +420,11 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist,
				    struct list_head *done_list)
{
	struct optimized_kprobe *op, *tmp;
	int c = 0;

	list_for_each_entry_safe(op, tmp, oplist, list) {
		/* Setup param */
		setup_unoptimize_kprobe(&jump_poke_params[c],
					jump_poke_bufs[c].buf, op);
		arch_unoptimize_kprobe(op);
		list_move(&op->list, done_list);
		if (++c >= MAX_OPTIMIZE_PROBES)
			break;
	}

	text_poke_smp_batch(jump_poke_params, c);
}

/* Replace a relative jump with a breakpoint (int3).  */
void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op)
{
	u8 buf[RELATIVEJUMP_SIZE];

	/* Set int3 to first byte for kprobes */
	buf[0] = BREAKPOINT_INSTRUCTION;
	memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
	text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE);
}

int  __kprobes
@@ -481,22 +444,3 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
	}
	return 0;
}

int __kprobes arch_init_optprobes(void)
{
	/* Allocate code buffer and parameter array */
	jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) *
				 MAX_OPTIMIZE_PROBES, GFP_KERNEL);
	if (!jump_poke_bufs)
		return -ENOMEM;

	jump_poke_params = kmalloc(sizeof(struct text_poke_param) *
				   MAX_OPTIMIZE_PROBES, GFP_KERNEL);
	if (!jump_poke_params) {
		kfree(jump_poke_bufs);
		jump_poke_bufs = NULL;
		return -ENOMEM;
	}

	return 0;
}