Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8533bbe9 authored by Masami Hiramatsu's avatar Masami Hiramatsu Committed by Ingo Molnar
Browse files

x86: prepare kprobes code for x86 unification



This patch cleanup kprobes code on x86 for unification.
This patch is based on Arjan's previous work.

- Remove spurious whitespace changes
- Add harmless includes
- Make the 32/64 files more identical
 - Generalize structure fields' and local variable name.
 - Wrap accessing to stack address by macros.
 - Modify bitmap making macro.
 - Merge fixup code into is_riprel() and change its name to fix_riprel().
 - Set MAX_INSN_SIZE to 16 on both arch.
 - Use u32 for bitmaps on both architectures.
 - Clarify some comments.

Signed-off-by: default avatarMasami Hiramatsu <mhiramat@redhat.com>
Signed-off-by: default avatarArjan van de Ven <arjan@infradead.org>
Signed-off-by: default avatarJim Keniston <jkenisto@us.ibm.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent da07ab03
Loading
Loading
Loading
Loading
+199 −122
Original line number Diff line number Diff line
@@ -29,10 +29,15 @@

#include <linux/kprobes.h>
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/preempt.h>
#include <linux/module.h>
#include <linux/kdebug.h>

#include <asm/cacheflush.h>
#include <asm/desc.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/alternative.h>

@@ -41,30 +46,16 @@ void jprobe_return_end(void);
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);

struct kretprobe_blackpoint kretprobe_blacklist[] = {
	{"__switch_to", }, /* This function switches only current task, but
			     doesn't switch kernel stack.*/
	{NULL, NULL}	/* Terminator */
};
const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);

/* insert a jmp code */
static __always_inline void set_jmp_op(void *from, void *to)
{
	struct __arch_jmp_op {
		char op;
		long raddr;
	} __attribute__((packed)) *jop;
	jop = (struct __arch_jmp_op *)from;
	jop->raddr = (long)(to) - ((long)(from) + 5);
	jop->op = RELATIVEJUMP_INSTRUCTION;
}

/*
 * returns non-zero if opcodes can be boosted.
 * "&regs->sp" looks wrong, but it's correct for x86_32.  x86_32 CPUs
 * don't save the ss and esp registers if the CPU is already in kernel
 * mode when it traps.  So for kprobes, regs->sp and regs->ss are not
 * the [nonexistent] saved stack pointer and ss register, but rather
 * the top 8 bytes of the pre-int3 stack.  So &regs->sp happens to
 * point to the top of the pre-int3 stack.
 */
static __always_inline int can_boost(kprobe_opcode_t *opcodes)
{
#define stack_addr(regs) ((unsigned long *)&regs->sp)

#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
	(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
	  (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) |   \
@@ -73,11 +64,11 @@ static __always_inline int can_boost(kprobe_opcode_t *opcodes)
	 << (row % 32))
	/*
	 * Undefined/reserved opcodes, conditional jump, Opcode Extension
	 * Groups, and some special opcodes can not be boost.
	 * Groups, and some special opcodes can not boost.
	 */
	static const unsigned long twobyte_is_boostable[256 / 32] = {
static const u32 twobyte_is_boostable[256 / 32] = {
	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
		/*      -------------------------------         */
	/*      ----------------------------------------------          */
	W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
	W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */
	W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
@@ -94,12 +85,82 @@ static __always_inline int can_boost(kprobe_opcode_t *opcodes)
	W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
	W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
	W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0)   /* f0 */
		/*      -------------------------------         */
	/*      -----------------------------------------------         */
	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
};
static const u32 onebyte_has_modrm[256 / 32] = {
	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
	/*      -----------------------------------------------         */
	W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 00 */
	W(0x10, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 10 */
	W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 20 */
	W(0x30, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 30 */
	W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */
	W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
	W(0x60, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0) | /* 60 */
	W(0x70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 70 */
	W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
	W(0x90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 90 */
	W(0xa0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* a0 */
	W(0xb0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* b0 */
	W(0xc0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* c0 */
	W(0xd0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
	W(0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* e0 */
	W(0xf0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1)   /* f0 */
	/*      -----------------------------------------------         */
	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
};
static const u32 twobyte_has_modrm[256 / 32] = {
	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
	/*      -----------------------------------------------         */
	W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1) | /* 0f */
	W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0) , /* 1f */
	W(0x20, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 2f */
	W(0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 3f */
	W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 4f */
	W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 5f */
	W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 6f */
	W(0x70, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1) , /* 7f */
	W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 8f */
	W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 9f */
	W(0xa0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) | /* af */
	W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* bf */
	W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* cf */
	W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* df */
	W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* ef */
	W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0)   /* ff */
	/*      -----------------------------------------------         */
	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
};
#undef W

struct kretprobe_blackpoint kretprobe_blacklist[] = {
	{"__switch_to", }, /* This function switches only current task, but
			      doesn't switch kernel stack.*/
	{NULL, NULL}	/* Terminator */
};
const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);

/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
static __always_inline void set_jmp_op(void *from, void *to)
{
	struct __arch_jmp_op {
		char op;
		s32 raddr;
	} __attribute__((packed)) * jop;
	jop = (struct __arch_jmp_op *)from;
	jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
	jop->op = RELATIVEJUMP_INSTRUCTION;
}

/*
 * returns non-zero if opcode is boostable.
 */
static __always_inline int can_boost(kprobe_opcode_t *opcodes)
{
	kprobe_opcode_t opcode;
	kprobe_opcode_t *orig_opcodes = opcodes;

retry:
	if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
		return 0;
@@ -109,7 +170,8 @@ retry:
	if (opcode == 0x0f) {
		if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
			return 0;
		return test_bit(*opcodes, twobyte_is_boostable);
		return test_bit(*opcodes,
				(unsigned long *)twobyte_is_boostable);
	}

	switch (opcode & 0xf0) {
@@ -132,12 +194,13 @@ retry:
	case 0xf0:
		if ((opcode & 0x0c) == 0 && opcode != 0xf1)
			goto retry; /* lock/rep(ne) prefix */
		/* clear and set flags can be boost */
		/* clear and set flags are boostable */
		return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
	default:
		/* segment override prefixes are boostable */
		if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
			goto retry; /* prefixes */
		/* can't boost CS override and call */
		/* CS override prefix and call are not boostable */
		return (opcode != 0x2e && opcode != 0x9a);
	}
}
@@ -145,9 +208,9 @@ retry:
/*
 * returns non-zero if opcode modifies the interrupt flag.
 */
static int __kprobes is_IF_modifier(kprobe_opcode_t opcode)
static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
{
	switch (opcode) {
	switch (*insn) {
	case 0xfa:		/* cli */
	case 0xfb:		/* sti */
	case 0xcf:		/* iret/iretd */
@@ -157,20 +220,24 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t opcode)
	return 0;
}

int __kprobes arch_prepare_kprobe(struct kprobe *p)
static void __kprobes arch_copy_kprobe(struct kprobe *p)
{
	/* insn: must be on special executable page on i386. */
	p->ainsn.insn = get_insn_slot();
	if (!p->ainsn.insn)
		return -ENOMEM;

	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
	p->opcode = *p->addr;
	if (can_boost(p->addr)) {
	if (can_boost(p->addr))
		p->ainsn.boostable = 0;
	} else {
	else
		p->ainsn.boostable = -1;

	p->opcode = *p->addr;
}

int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
	/* insn: must be on special executable page on x86. */
	p->ainsn.insn = get_insn_slot();
	if (!p->ainsn.insn)
		return -ENOMEM;
	arch_copy_kprobe(p);
	return 0;
}

@@ -195,26 +262,26 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
	kcb->prev_kprobe.kp = kprobe_running();
	kcb->prev_kprobe.status = kcb->kprobe_status;
	kcb->prev_kprobe.old_eflags = kcb->kprobe_old_eflags;
	kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags;
	kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
	kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
}

static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
	kcb->kprobe_status = kcb->prev_kprobe.status;
	kcb->kprobe_old_eflags = kcb->prev_kprobe.old_eflags;
	kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags;
	kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
	kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
}

static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
				struct kprobe_ctlblk *kcb)
{
	__get_cpu_var(current_kprobe) = p;
	kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags
	kcb->kprobe_saved_flags = kcb->kprobe_old_flags
		= (regs->flags & (TF_MASK | IF_MASK));
	if (is_IF_modifier(p->opcode))
		kcb->kprobe_saved_eflags &= ~IF_MASK;
	if (is_IF_modifier(p->ainsn.insn))
		kcb->kprobe_saved_flags &= ~IF_MASK;
}

static __always_inline void clear_btf(void)
@@ -245,7 +312,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
				      struct pt_regs *regs)
{
	unsigned long *sara = (unsigned long *)&regs->sp;
	unsigned long *sara = stack_addr(regs);

	ri->ret_addr = (kprobe_opcode_t *) *sara;

@@ -280,7 +347,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
			if (kcb->kprobe_status == KPROBE_HIT_SS &&
				*p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
				regs->flags &= ~TF_MASK;
				regs->flags |= kcb->kprobe_saved_eflags;
				regs->flags |= kcb->kprobe_saved_flags;
				goto no_kprobe;
			}
			/* We have reentered the kprobe_handler(), since
@@ -301,7 +368,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
			 * another cpu right after we hit, no further
			 * handling of this interrupt is appropriate
			 */
				regs->ip -= sizeof(kprobe_opcode_t);
				regs->ip = (unsigned long)addr;
				ret = 1;
				goto no_kprobe;
			}
@@ -325,7 +392,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
			 * Back up over the (now missing) int3 and run
			 * the original instruction.
			 */
			regs->ip -= sizeof(kprobe_opcode_t);
			regs->ip = (unsigned long)addr;
			ret = 1;
		}
		/* Not one of ours: let kernel handle it */
@@ -359,16 +426,18 @@ no_kprobe:
}

/*
 * For function-return probes, init_kprobes() establishes a probepoint
 * here. When a retprobed function returns, this probe is hit and
 * trampoline_probe_handler() runs, calling the kretprobe's handler.
 * When a retprobed function returns, this code saves registers and
 * calls trampoline_handler() runs, which calls the kretprobe's handler.
 */
 void __kprobes kretprobe_trampoline_holder(void)
 {
	asm volatile ( ".global kretprobe_trampoline\n"
			"kretprobe_trampoline: \n"
			"	pushf\n"
			/* skip cs, ip, orig_ax */
			/*
			 * Skip cs, ip, orig_ax.
			 * trampoline_handler() will plug in these values
			 */
			"	subl $12, %esp\n"
			"	pushl %fs\n"
			"	pushl %ds\n"
@@ -382,10 +451,10 @@ no_kprobe:
			"	pushl %ebx\n"
			"	movl %esp, %eax\n"
			"	call trampoline_handler\n"
			/* move flags to cs */
			/* Move flags to cs */
			"	movl 52(%esp), %edx\n"
			"	movl %edx, 48(%esp)\n"
			/* save true return address on flags */
			/* Replace saved flags with true return address. */
			"	movl %eax, 52(%esp)\n"
			"	popl %ebx\n"
			"	popl %ecx\n"
@@ -394,7 +463,7 @@ no_kprobe:
			"	popl %edi\n"
			"	popl %ebp\n"
			"	popl %eax\n"
			/* skip ip, orig_ax, es, ds, fs */
			/* Skip ip, orig_ax, es, ds, fs */
			"	addl $20, %esp\n"
			"	popf\n"
			"	ret\n");
@@ -417,20 +486,20 @@ void *__kprobes trampoline_handler(struct pt_regs *regs)
	/* fixup registers */
	regs->cs = __KERNEL_CS | get_kernel_rpl();
	regs->ip = trampoline_address;
	regs->orig_ax = 0xffffffff;
	regs->orig_ax = ~0UL;

	/*
	 * It is possible to have multiple instances associated with a given
	 * task either because an multiple functions in the call path
	 * have a return probe installed on them, and/or more then one return
	 * task either because multiple functions in the call path have
	 * return probes installed on them, and/or more then one
	 * return probe was registered for a target function.
	 *
	 * We can handle this because:
	 *     - instances are always inserted at the head of the list
	 *     - instances are always pushed into the head of the list
	 *     - when multiple return probes are registered for the same
	 *       function, the first instance's ret_addr will point to the
	 *       real return address, and all the rest will point to
	 *       kretprobe_trampoline
	 *	 function, the (chronologically) first instance's ret_addr
	 *	 will be the real return address, and all the rest will
	 *	 point to kretprobe_trampoline.
	 */
	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
		if (ri->task != current)
@@ -457,6 +526,7 @@ void *__kprobes trampoline_handler(struct pt_regs *regs)
	}

	kretprobe_assert(ri, orig_ret_address, trampoline_address);

	spin_unlock_irqrestore(&kretprobe_lock, flags);

	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
@@ -488,20 +558,24 @@ void *__kprobes trampoline_handler(struct pt_regs *regs)
 * that is atop the stack is the address following the copied instruction.
 * We need to make it the address following the original instruction.
 *
 * This function also checks instruction size for preparing direct execution.
 * If this is the first time we've single-stepped the instruction at
 * this probepoint, and the instruction is boostable, boost it: add a
 * jump instruction after the copied instruction, that jumps to the next
 * instruction after the probepoint.
 */
static void __kprobes resume_execution(struct kprobe *p,
		struct pt_regs *regs, struct kprobe_ctlblk *kcb)
{
	unsigned long *tos = (unsigned long *)&regs->sp;
	unsigned long copy_eip = (unsigned long)p->ainsn.insn;
	unsigned long orig_eip = (unsigned long)p->addr;
	unsigned long *tos = stack_addr(regs);
	unsigned long copy_ip = (unsigned long)p->ainsn.insn;
	unsigned long orig_ip = (unsigned long)p->addr;
	kprobe_opcode_t *insn = p->ainsn.insn;

	regs->flags &= ~TF_MASK;
	switch (p->ainsn.insn[0]) {
	switch (*insn) {
	case 0x9c:	/* pushfl */
		*tos &= ~(TF_MASK | IF_MASK);
		*tos |= kcb->kprobe_old_eflags;
		*tos |= kcb->kprobe_old_flags;
		break;
	case 0xc2:	/* iret/ret/lret */
	case 0xc3:
@@ -513,23 +587,26 @@ static void __kprobes resume_execution(struct kprobe *p,
		p->ainsn.boostable = 1;
		goto no_change;
	case 0xe8:	/* call relative - Fix return addr */
		*tos = orig_eip + (*tos - copy_eip);
		*tos = orig_ip + (*tos - copy_ip);
		break;
	case 0x9a:	/* call absolute -- same as call absolute, indirect */
		*tos = orig_eip + (*tos - copy_eip);
		*tos = orig_ip + (*tos - copy_ip);
		goto no_change;
	case 0xff:
		if ((p->ainsn.insn[1] & 0x30) == 0x10) {
		if ((insn[1] & 0x30) == 0x10) {
			/*
			 * call absolute, indirect
			 * Fix return addr; ip is correct.
			 * But this is not boostable
			 */
			*tos = orig_eip + (*tos - copy_eip);
			*tos = orig_ip + (*tos - copy_ip);
			goto no_change;
		} else if (((p->ainsn.insn[1] & 0x31) == 0x20) ||	/* jmp near, absolute indirect */
			   ((p->ainsn.insn[1] & 0x31) == 0x21)) {	/* jmp far, absolute indirect */
			/* ip is correct. And this is boostable */
		} else if (((insn[1] & 0x31) == 0x20) ||
			   ((insn[1] & 0x31) == 0x21)) {
			/*
			 * jmp near and far, absolute indirect
			 * ip is correct. And this is boostable
			 */
			p->ainsn.boostable = 1;
			goto no_change;
		}
@@ -538,21 +615,21 @@ static void __kprobes resume_execution(struct kprobe *p,
	}

	if (p->ainsn.boostable == 0) {
		if ((regs->ip > copy_eip) &&
		    (regs->ip - copy_eip) + 5 < MAX_INSN_SIZE) {
		if ((regs->ip > copy_ip) &&
		    (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) {
			/*
			 * These instructions can be executed directly if it
			 * jumps back to correct address.
			 */
			set_jmp_op((void *)regs->ip,
				   (void *)orig_eip + (regs->ip - copy_eip));
				   (void *)orig_ip + (regs->ip - copy_ip));
			p->ainsn.boostable = 1;
		} else {
			p->ainsn.boostable = -1;
		}
	}

	regs->ip = orig_eip + (regs->ip - copy_eip);
	regs->ip += orig_ip - copy_ip;

no_change:
	restore_btf();
@@ -578,7 +655,7 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
	}

	resume_execution(cur, regs, kcb);
	regs->flags |= kcb->kprobe_saved_eflags;
	regs->flags |= kcb->kprobe_saved_flags;
	trace_hardirqs_fixup_flags(regs->flags);

	/* Restore back the original saved kprobes variables and continue. */
@@ -617,7 +694,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
		 * normal page fault.
		 */
		regs->ip = (unsigned long)cur->addr;
		regs->flags |= kcb->kprobe_old_eflags;
		regs->flags |= kcb->kprobe_old_flags;
		if (kcb->kprobe_status == KPROBE_REENTER)
			restore_previous_kprobe(kcb);
		else
@@ -628,7 +705,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
	case KPROBE_HIT_SSDONE:
		/*
		 * We increment the nmissed count for accounting,
		 * we can also use npre/npostfault count for accouting
		 * we can also use npre/npostfault count for accounting
		 * these specific fault cases.
		 */
		kprobes_inc_nmissed_count(cur);
@@ -651,7 +728,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
			return 1;

		/*
		 * fixup_exception() could not handle it,
		 * fixup routine could not handle it,
		 * Let do_page_fault() fix it.
		 */
		break;
@@ -662,7 +739,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
}

/*
 * Wrapper routine to for handling exceptions.
 * Wrapper routine for handling exceptions.
 */
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
				       unsigned long val, void *data)
@@ -703,11 +780,11 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();

	kcb->jprobe_saved_regs = *regs;
	kcb->jprobe_saved_esp = &regs->sp;
	addr = (unsigned long)(kcb->jprobe_saved_esp);
	kcb->jprobe_saved_sp = stack_addr(regs);
	addr = (unsigned long)(kcb->jprobe_saved_sp);

	/*
	 * TBD: As Linus pointed out, gcc assumes that the callee
	 * As Linus pointed out, gcc assumes that the callee
	 * owns the argument space and could overwrite it, e.g.
	 * tailcall optimization. So, to be absolutely safe
	 * we also save and restore enough stack bytes to cover
@@ -730,21 +807,20 @@ void __kprobes jprobe_return(void)
		      "       .globl jprobe_return_end	\n"
		      "       jprobe_return_end:	\n"
		      "       nop			\n"::"b"
		      (kcb->jprobe_saved_esp):"memory");
		      (kcb->jprobe_saved_sp):"memory");
}

int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
	u8 *addr = (u8 *) (regs->ip - 1);
	unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_esp);
	struct jprobe *jp = container_of(p, struct jprobe, kp);

	if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
		if (&regs->sp != kcb->jprobe_saved_esp) {
		if (stack_addr(regs) != kcb->jprobe_saved_sp) {
			struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
			printk("current sp %p does not match saved sp %p\n",
			       &regs->sp, kcb->jprobe_saved_esp);
			       stack_addr(regs), kcb->jprobe_saved_sp);
			printk("Saved registers for jprobe %p\n", jp);
			show_registers(saved_regs);
			printk("Current registers\n");
@@ -752,20 +828,21 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
			BUG();
		}
		*regs = kcb->jprobe_saved_regs;
		memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
		       MIN_STACK_SIZE(stack_addr));
		memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
		       kcb->jprobes_stack,
		       MIN_STACK_SIZE(kcb->jprobe_saved_sp));
		preempt_enable_no_resched();
		return 1;
	}
	return 0;
}

int __kprobes arch_trampoline_kprobe(struct kprobe *p)
int __init arch_init_kprobes(void)
{
	return 0;
}

int __init arch_init_kprobes(void)
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
	return 0;
}
+212 −197

File changed.

Preview size limit exceeded, changes collapsed.

+0 −1
Original line number Diff line number Diff line
@@ -25,7 +25,6 @@
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
#include <linux/kprobes.h>

#include <asm/system.h>
#include <asm/desc.h>
+0 −1
Original line number Diff line number Diff line
@@ -25,7 +25,6 @@
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
#include <linux/kprobes.h>

#include <asm/system.h>
#include <asm/pgalloc.h>
+20 −12
Original line number Diff line number Diff line
@@ -2,7 +2,6 @@
#define _ASM_KPROBES_H
/*
 *  Kernel Probes (KProbes)
 *  include/asm-i386/kprobes.h
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
@@ -23,14 +22,17 @@
 * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
 *		Probes initial implementation ( includes suggestions from
 *		Rusty Russell).
 * 2004-Oct	Prasanna S Panchamukhi <prasanna@in.ibm.com> and Jim Keniston
 *		kenistoj@us.ibm.com adopted from i386.
 */
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/percpu.h>

#define  __ARCH_WANT_KPROBES_INSN_SLOT

struct kprobe;
struct pt_regs;
struct kprobe;

typedef u8 kprobe_opcode_t;
#define BREAKPOINT_INSTRUCTION	0xcc
@@ -38,9 +40,11 @@ typedef u8 kprobe_opcode_t;
#define MAX_INSN_SIZE 16
#define MAX_STACK_SIZE 64
#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
	(((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
	(((unsigned long)current_thread_info()) + THREAD_SIZE \
	 - (unsigned long)(ADDR))) \
	? (MAX_STACK_SIZE) \
	: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
	: (((unsigned long)current_thread_info()) + THREAD_SIZE \
	   - (unsigned long)(ADDR)))

#define ARCH_SUPPORTS_KRETPROBES
#define flush_insn_slot(p)	do { } while (0)
@@ -55,8 +59,12 @@ struct arch_specific_insn {
	/* copy of the original instruction */
	kprobe_opcode_t *insn;
	/*
	 * If this flag is not 0, this kprobe can be boost when its
	 * post_handler and break_handler is not set.
	 * boostable = -1: This instruction type is not boostable.
	 * boostable = 0: This instruction type is boostable.
	 * boostable = 1: This instruction has been boosted: we have
	 * added a relative jump after the instruction copy in insn,
	 * so no single-step and fixup are needed (unless there's
	 * a post_handler or break_handler).
	 */
	int boostable;
};
@@ -64,16 +72,16 @@ struct arch_specific_insn {
struct prev_kprobe {
	struct kprobe *kp;
	unsigned long status;
	unsigned long old_eflags;
	unsigned long saved_eflags;
	unsigned long old_flags;
	unsigned long saved_flags;
};

/* per-cpu kprobe control block */
struct kprobe_ctlblk {
	unsigned long kprobe_status;
	unsigned long kprobe_old_eflags;
	unsigned long kprobe_saved_eflags;
	unsigned long *jprobe_saved_esp;
	unsigned long kprobe_old_flags;
	unsigned long kprobe_saved_flags;
	unsigned long *jprobe_saved_sp;
	struct pt_regs jprobe_saved_regs;
	kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
	struct prev_kprobe prev_kprobe;
@@ -88,7 +96,7 @@ static inline void restore_interrupts(struct pt_regs *regs)
		local_irq_enable();
}

extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_exceptions_notify(struct notifier_block *self,
				    unsigned long val, void *data);
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
#endif				/* _ASM_KPROBES_H */
Loading