Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5669ccee authored by Oleg Nesterov's avatar Oleg Nesterov
Browse files

uprobes: Introduce copy_to_page()



Extract the kmap_atomic/memcpy/kunmap_atomic code from
xol_get_insn_slot() into the new simple helper, copy_to_page().
It will have more users soon.

Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Acked-by: default avatarAnton Arapov <anton@redhat.com>
Acked-by: default avatarSrikar Dronamraju <srikar@linux.vnet.ibm.com>
parent 98763a1b
Loading
Loading
Loading
Loading
+8 −6
Original line number Original line Diff line number Diff line
@@ -194,6 +194,13 @@ static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, in
	kunmap_atomic(kaddr);
	kunmap_atomic(kaddr);
}
}


static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
{
	void *kaddr = kmap_atomic(page);
	memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
	kunmap_atomic(kaddr);
}

static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
{
{
	uprobe_opcode_t old_opcode;
	uprobe_opcode_t old_opcode;
@@ -1227,9 +1234,7 @@ static unsigned long xol_take_insn_slot(struct xol_area *area)
static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
{
{
	struct xol_area *area;
	struct xol_area *area;
	unsigned long offset;
	unsigned long xol_vaddr;
	unsigned long xol_vaddr;
	void *vaddr;


	area = get_xol_area();
	area = get_xol_area();
	if (!area)
	if (!area)
@@ -1240,10 +1245,7 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
		return 0;
		return 0;


	/* Initialize the slot */
	/* Initialize the slot */
	offset = xol_vaddr & ~PAGE_MASK;
	copy_to_page(area->page, xol_vaddr, uprobe->arch.insn, MAX_UINSN_BYTES);
	vaddr = kmap_atomic(area->page);
	memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES);
	kunmap_atomic(vaddr);
	/*
	/*
	 * We probably need flush_icache_user_range() but it needs vma.
	 * We probably need flush_icache_user_range() but it needs vma.
	 * This should work on supported architectures too.
	 * This should work on supported architectures too.