Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 602033ed authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Revert most of "x86: Fix alternatives and kprobes to remap write-protected kernel text"



This reverts most of commit 19d36ccd.

The way to DEBUG_RODATA interactions with KPROBES and CPU hotplug is to
just not mark the text as being write-protected in the first place.
Both of those facilities depend on rewriting instructions.

Having "helpful" debug facilities that just cause more problem is not
being helpful.  It just adds complexity and bugs. Not worth it.

Reported-by: default avatarRafael J. Wysocki <rjw@sisk.pl>
Cc: Andi Kleen <ak@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 07d4e9af
Loading
Loading
Loading
Loading
+2 −12
Original line number Original line Diff line number Diff line
@@ -430,22 +430,12 @@ void __init alternative_instructions(void)
 * And on the local CPU you need to be protected again NMI or MCE handlers
 * And on the local CPU you need to be protected again NMI or MCE handlers
 * seeing an inconsistent instruction while you patch.
 * seeing an inconsistent instruction while you patch.
 */
 */
void __kprobes text_poke(void *oaddr, unsigned char *opcode, int len)
void __kprobes text_poke(void *addr, unsigned char *opcode, int len)
{
{
        u8 *addr = oaddr;
	if (!pte_write(*lookup_address((unsigned long)addr))) {
		struct page *p[2] = { virt_to_page(addr), virt_to_page(addr+PAGE_SIZE) };
		addr = vmap(p, 2, VM_MAP, PAGE_KERNEL);
		if (!addr)
			return;
		addr += ((unsigned long)oaddr) % PAGE_SIZE;
	}
	memcpy(addr, opcode, len);
	memcpy(addr, opcode, len);
	sync_core();
	sync_core();
	/* Not strictly needed, but can speed CPU recovery up. Ignore cross cacheline
	/* Not strictly needed, but can speed CPU recovery up. Ignore cross cacheline
	   case. */
	   case. */
	if (cpu_has_clflush)
	if (cpu_has_clflush)
		asm("clflush (%0) " :: "r" (oaddr) : "memory");
		asm("clflush (%0) " :: "r" (addr) : "memory");
	if (addr != oaddr)
		vunmap(addr);
}
}
+11 −3
Original line number Original line Diff line number Diff line
@@ -800,9 +800,17 @@ void mark_rodata_ro(void)
	unsigned long start = PFN_ALIGN(_text);
	unsigned long start = PFN_ALIGN(_text);
	unsigned long size = PFN_ALIGN(_etext) - start;
	unsigned long size = PFN_ALIGN(_etext) - start;


#ifndef CONFIG_KPROBES
#ifdef CONFIG_HOTPLUG_CPU
	/* It must still be possible to apply SMP alternatives. */
	if (num_possible_cpus() <= 1)
#endif
	{
		change_page_attr(virt_to_page(start),
		change_page_attr(virt_to_page(start),
		                 size >> PAGE_SHIFT, PAGE_KERNEL_RX);
		                 size >> PAGE_SHIFT, PAGE_KERNEL_RX);
		printk("Write protecting the kernel text: %luk\n", size >> 10);
		printk("Write protecting the kernel text: %luk\n", size >> 10);
	}
#endif
	start += size;
	start += size;
	size = (unsigned long)__end_rodata - start;
	size = (unsigned long)__end_rodata - start;
	change_page_attr(virt_to_page(start),
	change_page_attr(virt_to_page(start),
+10 −0
Original line number Original line Diff line number Diff line
@@ -600,6 +600,16 @@ void mark_rodata_ro(void)
{
{
	unsigned long start = (unsigned long)_stext, end;
	unsigned long start = (unsigned long)_stext, end;


#ifdef CONFIG_HOTPLUG_CPU
	/* It must still be possible to apply SMP alternatives. */
	if (num_possible_cpus() > 1)
		start = (unsigned long)_etext;
#endif

#ifdef CONFIG_KPROBES
	start = (unsigned long)__start_rodata;
#endif
	
	end = (unsigned long)__end_rodata;
	end = (unsigned long)__end_rodata;
	start = (start + PAGE_SIZE - 1) & PAGE_MASK;
	start = (start + PAGE_SIZE - 1) & PAGE_MASK;
	end &= PAGE_MASK;
	end &= PAGE_MASK;