Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 49a2a1b8 authored by Anil S Keshavamurthy's avatar Anil S Keshavamurthy Committed by Linus Torvalds
Browse files

[PATCH] kprobes: changed from using spinlock to mutex



Since Kprobes runtime exception handlers is now lock free as this code path is
now using RCU to walk through the list, there is no need for the
register/unregister{_kprobe} to use spin_{lock/unlock}_isr{save/restore}.  The
serialization during registration/unregistration is now possible using just a
mutex.

In the above process, this patch also fixes a minor memory leak for x86_64 and
powerpc.

Signed-off-by: default avatarAnil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 41dead49
Loading
Loading
Loading
Loading
+1 −5
Original line number Diff line number Diff line
@@ -57,14 +57,10 @@ static inline int is_IF_modifier(kprobe_opcode_t opcode)
}

int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
	return 0;
}

void __kprobes arch_copy_kprobe(struct kprobe *p)
{
	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
	p->opcode = *p->addr;
	return 0;
}

void __kprobes arch_arm_kprobe(struct kprobe *p)
+6 −8
Original line number Diff line number Diff line
@@ -60,15 +60,15 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
		if (!p->ainsn.insn)
			ret = -ENOMEM;
	}
	return ret;
}

void __kprobes arch_copy_kprobe(struct kprobe *p)
{
	if (!ret) {
		memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
		p->opcode = *p->addr;
	}

	return ret;
}

void __kprobes arch_arm_kprobe(struct kprobe *p)
{
	*p->addr = BREAKPOINT_INSTRUCTION;
@@ -85,9 +85,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)

void __kprobes arch_remove_kprobe(struct kprobe *p)
{
	down(&kprobe_mutex);
	free_insn_slot(p->ainsn.insn);
	up(&kprobe_mutex);
}

static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+1 −5
Original line number Diff line number Diff line
@@ -42,15 +42,11 @@ DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);

int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
	return 0;
}

void __kprobes arch_copy_kprobe(struct kprobe *p)
{
	p->ainsn.insn[0] = *p->addr;
	p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
	p->opcode = *p->addr;
	return 0;
}

void __kprobes arch_arm_kprobe(struct kprobe *p)
+2 −5
Original line number Diff line number Diff line
@@ -42,8 +42,8 @@
#include <asm/pgtable.h>
#include <asm/kdebug.h>

static DECLARE_MUTEX(kprobe_mutex);
void jprobe_return_end(void);
void __kprobes arch_copy_kprobe(struct kprobe *p);

DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -69,12 +69,11 @@ static inline int is_IF_modifier(kprobe_opcode_t *insn)
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
	/* insn: must be on special executable page on x86_64. */
	down(&kprobe_mutex);
	p->ainsn.insn = get_insn_slot();
	up(&kprobe_mutex);
	if (!p->ainsn.insn) {
		return -ENOMEM;
	}
	arch_copy_kprobe(p);
	return 0;
}

@@ -223,9 +222,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)

void __kprobes arch_remove_kprobe(struct kprobe *p)
{
	down(&kprobe_mutex);
	free_insn_slot(p->ainsn.insn);
	up(&kprobe_mutex);
}

static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+0 −5
Original line number Diff line number Diff line
@@ -110,11 +110,6 @@ struct arch_specific_insn {
 	unsigned short target_br_reg;
};

/* ia64 does not need this */
static inline void arch_copy_kprobe(struct kprobe *p)
{
}

extern int kprobe_exceptions_notify(struct notifier_block *self,
				    unsigned long val, void *data);

Loading