Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 09897d78 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'uprobes/core' of...

Merge branch 'uprobes/core' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc

 into perf/core

Pull uprobes cleanups from Oleg Nesterov.

Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents e98a6e59 ad439356
Loading
Loading
Loading
Loading
+2 −3
Original line number Diff line number Diff line
@@ -36,9 +36,8 @@ typedef ppc_opcode_t uprobe_opcode_t;

struct arch_uprobe {
	union {
		u8	insn[MAX_UINSN_BYTES];
		u8	ixol[MAX_UINSN_BYTES];
		u32	ainsn;
		u32	insn;
		u32	ixol;
	};
};

+1 −1
Original line number Diff line number Diff line
@@ -186,7 +186,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
	 * emulate_step() returns 1 if the insn was successfully emulated.
	 * For all other cases, we need to single-step in hardware.
	 */
	ret = emulate_step(regs, auprobe->ainsn);
	ret = emulate_step(regs, auprobe->insn);
	if (ret > 0)
		return true;

+20 −32
Original line number Diff line number Diff line
@@ -26,16 +26,13 @@

#include <linux/errno.h>
#include <linux/rbtree.h>
#include <linux/types.h>

struct vm_area_struct;
struct mm_struct;
struct inode;
struct notifier_block;

#ifdef CONFIG_ARCH_SUPPORTS_UPROBES
# include <asm/uprobes.h>
#endif

#define UPROBE_HANDLER_REMOVE		1
#define UPROBE_HANDLER_MASK		1

@@ -60,6 +57,8 @@ struct uprobe_consumer {
};

#ifdef CONFIG_UPROBES
#include <asm/uprobes.h>

enum uprobe_task_state {
	UTASK_RUNNING,
	UTASK_SSTEP,
@@ -72,35 +71,28 @@ enum uprobe_task_state {
 */
struct uprobe_task {
	enum uprobe_task_state		state;
	struct arch_uprobe_task		autask;

	struct return_instance		*return_instances;
	unsigned int			depth;
	struct uprobe			*active_uprobe;

	unsigned long			xol_vaddr;
	union {
		struct {
			struct arch_uprobe_task	autask;
			unsigned long		vaddr;
		};

/*
 * On a breakpoint hit, thread contests for a slot.  It frees the
 * slot after singlestep. Currently a fixed number of slots are
 * allocated.
 */
struct xol_area {
	wait_queue_head_t 	wq;		/* if all slots are busy */
	atomic_t 		slot_count;	/* number of in-use slots */
	unsigned long 		*bitmap;	/* 0 = free slot */
	struct page 		*page;
		struct {
			struct callback_head	dup_xol_work;
			unsigned long		dup_xol_addr;
		};
	};

	/*
	 * We keep the vma's vm_start rather than a pointer to the vma
	 * itself.  The probed process or a naughty kernel module could make
	 * the vma go away, and we must handle that reasonably gracefully.
	 */
	unsigned long 		vaddr;		/* Page(s) of instruction slots */
	struct uprobe			*active_uprobe;
	unsigned long			xol_vaddr;

	struct return_instance		*return_instances;
	unsigned int			depth;
};

struct xol_area;

struct uprobes_state {
	struct xol_area		*xol_area;
};
@@ -109,6 +101,7 @@ extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsign
extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
extern bool __weak is_trap_insn(uprobe_opcode_t *insn);
extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
@@ -120,7 +113,6 @@ extern void uprobe_end_dup_mmap(void);
extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm);
extern void uprobe_free_utask(struct task_struct *t);
extern void uprobe_copy_process(struct task_struct *t, unsigned long flags);
extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
extern void uprobe_notify_resume(struct pt_regs *regs);
@@ -176,10 +168,6 @@ static inline bool uprobe_deny_signal(void)
{
	return false;
}
static inline unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
{
	return 0;
}
static inline void uprobe_free_utask(struct task_struct *t)
{
}
+43 −17
Original line number Diff line number Diff line
@@ -73,6 +73,17 @@ struct uprobe {
	struct inode		*inode;		/* Also hold a ref to inode */
	loff_t			offset;
	unsigned long		flags;

	/*
	 * The generic code assumes that it has two members of unknown type
	 * owned by the arch-specific code:
	 *
	 * 	insn -	copy_insn() saves the original instruction here for
	 *		arch_uprobe_analyze_insn().
	 *
	 *	ixol -	potentially modified instruction to execute out of
	 *		line, copied to xol_area by xol_get_insn_slot().
	 */
	struct arch_uprobe	arch;
};

@@ -85,6 +96,29 @@ struct return_instance {
	struct return_instance	*next;		/* keep as stack */
};

/*
 * Execute out of line area: anonymous executable mapping installed
 * by the probed task to execute the copy of the original instruction
 * mangled by set_swbp().
 *
 * On a breakpoint hit, thread contests for a slot.  It frees the
 * slot after singlestep. Currently a fixed number of slots are
 * allocated.
 */
struct xol_area {
	wait_queue_head_t 	wq;		/* if all slots are busy */
	atomic_t 		slot_count;	/* number of in-use slots */
	unsigned long 		*bitmap;	/* 0 = free slot */
	struct page 		*page;

	/*
	 * We keep the vma's vm_start rather than a pointer to the vma
	 * itself.  The probed process or a naughty kernel module could make
	 * the vma go away, and we must handle that reasonably gracefully.
	 */
	unsigned long 		vaddr;		/* Page(s) of instruction slots */
};

/*
 * valid_vma: Verify if the specified vma is an executable vma
 * Relax restrictions while unregistering: vm_flags might have
@@ -330,7 +364,7 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned
int __weak
set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
{
	return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
	return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn);
}

static int match_uprobe(struct uprobe *l, struct uprobe *r)
@@ -529,8 +563,8 @@ static int copy_insn(struct uprobe *uprobe, struct file *filp)
{
	struct address_space *mapping = uprobe->inode->i_mapping;
	loff_t offs = uprobe->offset;
	void *insn = uprobe->arch.insn;
	int size = MAX_UINSN_BYTES;
	void *insn = &uprobe->arch.insn;
	int size = sizeof(uprobe->arch.insn);
	int len, err = -EIO;

	/* Copy only available bytes, -EIO if nothing was read */
@@ -569,7 +603,7 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
		goto out;

	ret = -ENOTSUPP;
	if (is_trap_insn((uprobe_opcode_t *)uprobe->arch.insn))
	if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
		goto out;

	ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
@@ -1264,7 +1298,7 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe)

	/* Initialize the slot */
	copy_to_page(area->page, xol_vaddr,
			uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
			&uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
	/*
	 * We probably need flush_icache_user_range() but it needs vma.
	 * This should work on supported architectures too.
@@ -1403,12 +1437,10 @@ static void uprobe_warn(struct task_struct *t, const char *msg)

static void dup_xol_work(struct callback_head *work)
{
	kfree(work);

	if (current->flags & PF_EXITING)
		return;

	if (!__create_xol_area(current->utask->vaddr))
	if (!__create_xol_area(current->utask->dup_xol_addr))
		uprobe_warn(current, "dup xol area");
}

@@ -1419,7 +1451,6 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags)
{
	struct uprobe_task *utask = current->utask;
	struct mm_struct *mm = current->mm;
	struct callback_head *work;
	struct xol_area *area;

	t->utask = NULL;
@@ -1441,14 +1472,9 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags)
	if (mm == t->mm)
		return;

	/* TODO: move it into the union in uprobe_task */
	work = kmalloc(sizeof(*work), GFP_KERNEL);
	if (!work)
		return uprobe_warn(t, "dup xol area");

	t->utask->vaddr = area->vaddr;
	init_task_work(work, dup_xol_work);
	task_work_add(t, work, true);
	t->utask->dup_xol_addr = area->vaddr;
	init_task_work(&t->utask->dup_xol_work, dup_xol_work);
	task_work_add(t, &t->utask->dup_xol_work, true);
}

/*