Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 19d36ccd authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds
Browse files

x86: Fix alternatives and kprobes to remap write-protected kernel text



Reenable kprobes and alternative patching when the kernel text is write
protected by DEBUG_RODATA

Add a general utility function to change write protected text.  The new
function remaps the code using vmap to write it and takes care of CPU
synchronization.  It also does CLFLUSH to make icache recovery faster.

There are some limitations on when the function can be used, see the
comment.

This is a newer version that also changes the paravirt_ops code.
text_poke also supports multi byte patching now.

Contains bug fixes from Zach Amsden and suggestions from Mathieu
Desnoyers.

Cc: Jan Beulich <jbeulich@novell.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Mathieu Desnoyers <compudj@krystal.dyndns.org>
Cc: Zach Amsden <zach@vmware.com>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f51c9452
Loading
Loading
Loading
Loading
+34 −6
Original line number Original line Diff line number Diff line
@@ -2,8 +2,12 @@
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/list.h>
#include <linux/kprobes.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <asm/alternative.h>
#include <asm/alternative.h>
#include <asm/sections.h>
#include <asm/sections.h>
#include <asm/pgtable.h>


#ifdef CONFIG_HOTPLUG_CPU
#ifdef CONFIG_HOTPLUG_CPU
static int smp_alt_once;
static int smp_alt_once;
@@ -150,7 +154,7 @@ static void nop_out(void *insns, unsigned int len)
		unsigned int noplen = len;
		unsigned int noplen = len;
		if (noplen > ASM_NOP_MAX)
		if (noplen > ASM_NOP_MAX)
			noplen = ASM_NOP_MAX;
			noplen = ASM_NOP_MAX;
		memcpy(insns, noptable[noplen], noplen);
		text_poke(insns, noptable[noplen], noplen);
		insns += noplen;
		insns += noplen;
		len -= noplen;
		len -= noplen;
	}
	}
@@ -202,7 +206,7 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
			continue;
			continue;
		if (*ptr > text_end)
		if (*ptr > text_end)
			continue;
			continue;
		**ptr = 0xf0; /* lock prefix */
		text_poke(*ptr, ((unsigned char []){0xf0}), 1); /* add lock prefix */
	};
	};
}
}


@@ -360,10 +364,6 @@ void apply_paravirt(struct paravirt_patch_site *start,
		/* Pad the rest with nops */
		/* Pad the rest with nops */
		nop_out(p->instr + used, p->len - used);
		nop_out(p->instr + used, p->len - used);
	}
	}

	/* Sync to be conservative, in case we patched following
	 * instructions */
	sync_core();
}
}
extern struct paravirt_patch_site __start_parainstructions[],
extern struct paravirt_patch_site __start_parainstructions[],
	__stop_parainstructions[];
	__stop_parainstructions[];
@@ -406,3 +406,31 @@ void __init alternative_instructions(void)
 	apply_paravirt(__parainstructions, __parainstructions_end);
 	apply_paravirt(__parainstructions, __parainstructions_end);
	local_irq_restore(flags);
	local_irq_restore(flags);
}
}

/*
 * Warning:
 * When you use this code to patch more than one byte of an instruction
 * you need to make sure that other CPUs cannot execute this code in parallel.
 * Also no thread must be currently preempted in the middle of these instructions.
 * And on the local CPU you need to be protected again NMI or MCE handlers
 * seeing an inconsistent instruction while you patch.
 */
void __kprobes text_poke(void *oaddr, unsigned char *opcode, int len)
{
        u8 *addr = oaddr;
	if (!pte_write(*lookup_address((unsigned long)addr))) {
		struct page *p[2] = { virt_to_page(addr), virt_to_page(addr+PAGE_SIZE) };
		addr = vmap(p, 2, VM_MAP, PAGE_KERNEL);
		if (!addr)
			return;
		addr += ((unsigned long)oaddr) % PAGE_SIZE;
	}
	memcpy(addr, opcode, len);
	sync_core();
	/* Not strictly needed, but can speed CPU recovery up. Ignore cross cacheline
	   case. */
	if (cpu_has_clflush)
		asm("clflush (%0) " :: "r" (oaddr) : "memory");
	if (addr != oaddr)
		vunmap(addr);
}
+3 −6
Original line number Original line Diff line number Diff line
@@ -35,6 +35,7 @@
#include <asm/cacheflush.h>
#include <asm/cacheflush.h>
#include <asm/desc.h>
#include <asm/desc.h>
#include <asm/uaccess.h>
#include <asm/uaccess.h>
#include <asm/alternative.h>


void jprobe_return_end(void);
void jprobe_return_end(void);


@@ -169,16 +170,12 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)


void __kprobes arch_arm_kprobe(struct kprobe *p)
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
{
	*p->addr = BREAKPOINT_INSTRUCTION;
	text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
	flush_icache_range((unsigned long) p->addr,
			   (unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
}


void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
{
	*p->addr = p->opcode;
	text_poke(p->addr, &p->opcode, 1);
	flush_icache_range((unsigned long) p->addr,
			   (unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
}


void __kprobes arch_remove_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p)
+13 −4
Original line number Original line Diff line number Diff line
@@ -124,20 +124,28 @@ unsigned paravirt_patch_ignore(unsigned len)
	return len;
	return len;
}
}


struct branch {
	unsigned char opcode;
	u32 delta;
} __attribute__((packed));

unsigned paravirt_patch_call(void *target, u16 tgt_clobbers,
unsigned paravirt_patch_call(void *target, u16 tgt_clobbers,
			     void *site, u16 site_clobbers,
			     void *site, u16 site_clobbers,
			     unsigned len)
			     unsigned len)
{
{
	unsigned char *call = site;
	unsigned char *call = site;
	unsigned long delta = (unsigned long)target - (unsigned long)(call+5);
	unsigned long delta = (unsigned long)target - (unsigned long)(call+5);
	struct branch b;


	if (tgt_clobbers & ~site_clobbers)
	if (tgt_clobbers & ~site_clobbers)
		return len;	/* target would clobber too much for this site */
		return len;	/* target would clobber too much for this site */
	if (len < 5)
	if (len < 5)
		return len;	/* call too long for patch site */
		return len;	/* call too long for patch site */


	*call++ = 0xe8;		/* call */
	b.opcode = 0xe8; /* call */
	*(unsigned long *)call = delta;
	b.delta = delta;
	BUILD_BUG_ON(sizeof(b) != 5);
	text_poke(call, (unsigned char *)&b, 5);


	return 5;
	return 5;
}
}
@@ -150,8 +158,9 @@ unsigned paravirt_patch_jmp(void *target, void *site, unsigned len)
	if (len < 5)
	if (len < 5)
		return len;	/* call too long for patch site */
		return len;	/* call too long for patch site */


	*jmp++ = 0xe9;		/* jmp */
	b.opcode = 0xe9;	/* jmp */
	*(unsigned long *)jmp = delta;
	b.delta = delta;
	text_poke(call, (unsigned char *)&b, 5);


	return 5;
	return 5;
}
}
+3 −11
Original line number Original line Diff line number Diff line
@@ -800,17 +800,9 @@ void mark_rodata_ro(void)
	unsigned long start = PFN_ALIGN(_text);
	unsigned long start = PFN_ALIGN(_text);
	unsigned long size = PFN_ALIGN(_etext) - start;
	unsigned long size = PFN_ALIGN(_etext) - start;


#ifndef CONFIG_KPROBES
#ifdef CONFIG_HOTPLUG_CPU
	/* It must still be possible to apply SMP alternatives. */
	if (num_possible_cpus() <= 1)
#endif
	{
	change_page_attr(virt_to_page(start),
	change_page_attr(virt_to_page(start),
	                 size >> PAGE_SHIFT, PAGE_KERNEL_RX);
	                 size >> PAGE_SHIFT, PAGE_KERNEL_RX);
	printk("Write protecting the kernel text: %luk\n", size >> 10);
	printk("Write protecting the kernel text: %luk\n", size >> 10);
	}
#endif
	start += size;
	start += size;
	size = (unsigned long)__end_rodata - start;
	size = (unsigned long)__end_rodata - start;
	change_page_attr(virt_to_page(start),
	change_page_attr(virt_to_page(start),
+3 −7
Original line number Original line Diff line number Diff line
@@ -39,9 +39,9 @@
#include <linux/module.h>
#include <linux/module.h>
#include <linux/kdebug.h>
#include <linux/kdebug.h>


#include <asm/cacheflush.h>
#include <asm/pgtable.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/uaccess.h>
#include <asm/alternative.h>


void jprobe_return_end(void);
void jprobe_return_end(void);
static void __kprobes arch_copy_kprobe(struct kprobe *p);
static void __kprobes arch_copy_kprobe(struct kprobe *p);
@@ -209,16 +209,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)


void __kprobes arch_arm_kprobe(struct kprobe *p)
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
{
	*p->addr = BREAKPOINT_INSTRUCTION;
	text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
	flush_icache_range((unsigned long) p->addr,
			   (unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
}


void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
{
	*p->addr = p->opcode;
	text_poke(p->addr, &p->opcode, 1);
	flush_icache_range((unsigned long) p->addr,
			   (unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
}


void __kprobes arch_remove_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p)
Loading