Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d7dd2ff1 authored by James Bottomley's avatar James Bottomley Committed by James Bottomley
Browse files

[PARISC] only make executable areas executable



Currently parisc has the whole kernel marked as RWX, meaning any
kernel page at all is eligible to be executed.  This can cause a
theoretical problem on systems with combined I/D TLB because the act
of referencing a page causes a TLB insertion with an executable bit.
This TLB entry may be used by the CPU as the basis for speculating the
page into the I-Cache.  If this speculated page is subsequently used
for a user process, there is the possibility we will get a stale
I-cache line picked up as the binary executes.

As a point of good practise, only mark actual kernel text pages as
executable.  The same has to be done for init_text pages, but they're
converted to data pages (and the I-Cache flushed) when the init memory
is released.

Signed-off-by: default avatarJames Bottomley <James.Bottomley@suse.de>
parent e38f5b74
Loading
Loading
Loading
Loading
+7 −2
Original line number Original line Diff line number Diff line
@@ -177,7 +177,10 @@ struct vm_area_struct;


#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE |  _PAGE_DIRTY | _PAGE_ACCESSED)
#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE |  _PAGE_DIRTY | _PAGE_ACCESSED)
#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_KERNEL	(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
#define _PAGE_KERNEL_RO	(_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
#define _PAGE_KERNEL_EXEC	(_PAGE_KERNEL_RO | _PAGE_EXEC)
#define _PAGE_KERNEL_RWX	(_PAGE_KERNEL_EXEC | _PAGE_WRITE)
#define _PAGE_KERNEL		(_PAGE_KERNEL_RO | _PAGE_WRITE)


/* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
/* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
 * are page-aligned, we don't care about the PAGE_OFFSET bits, except
 * are page-aligned, we don't care about the PAGE_OFFSET bits, except
@@ -208,7 +211,9 @@ struct vm_area_struct;
#define PAGE_COPY       PAGE_EXECREAD
#define PAGE_COPY       PAGE_EXECREAD
#define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
#define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
#define PAGE_KERNEL	__pgprot(_PAGE_KERNEL)
#define PAGE_KERNEL	__pgprot(_PAGE_KERNEL)
#define PAGE_KERNEL_RO	__pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
#define PAGE_KERNEL_EXEC	__pgprot(_PAGE_KERNEL_EXEC)
#define PAGE_KERNEL_RWX	__pgprot(_PAGE_KERNEL_RWX)
#define PAGE_KERNEL_RO	__pgprot(_PAGE_KERNEL_RO)
#define PAGE_KERNEL_UNC	__pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
#define PAGE_KERNEL_UNC	__pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
#define PAGE_GATEWAY    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ)
#define PAGE_GATEWAY    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ)


+3 −0
Original line number Original line Diff line number Diff line
@@ -692,6 +692,9 @@ ENTRY(fault_vector_11)
END(fault_vector_11)
END(fault_vector_11)


#endif
#endif
	/* Fault vector is separately protected and *must* be on its own page */
	.align		PAGE_SIZE
ENTRY(end_fault_vector)


	.import		handle_interruption,code
	.import		handle_interruption,code
	.import		do_cpu_irq_mask,code
	.import		do_cpu_irq_mask,code
+3 −2
Original line number Original line Diff line number Diff line
@@ -106,8 +106,9 @@ $bss_loop:
#endif
#endif




	/* Now initialize the PTEs themselves */
	/* Now initialize the PTEs themselves.  We use RWX for
	ldo		0+_PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
	 * everything ... it will get remapped correctly later */
	ldo		0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
	ldi		(1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
	ldi		(1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
	load32		PA(pg0),%r1
	load32		PA(pg0),%r1


+9 −1
Original line number Original line Diff line number Diff line
@@ -61,8 +61,10 @@
#include <linux/string.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/bug.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/slab.h>


#include <asm/pgtable.h>
#include <asm/unwind.h>
#include <asm/unwind.h>


#if 0
#if 0
@@ -214,7 +216,13 @@ void *module_alloc(unsigned long size)
{
{
	if (size == 0)
	if (size == 0)
		return NULL;
		return NULL;
	return vmalloc(size);
	/* using RWX means less protection for modules, but it's
	 * easier than trying to map the text, data, init_text and
	 * init_data correctly */
	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
				    GFP_KERNEL | __GFP_HIGHMEM,
				    PAGE_KERNEL_RWX, -1,
				    __builtin_return_address(0));
}
}


#ifndef CONFIG_64BIT
#ifndef CONFIG_64BIT
+1 −0
Original line number Original line Diff line number Diff line
@@ -134,6 +134,7 @@ SECTIONS
	. = ALIGN(16384);
	. = ALIGN(16384);
	__init_begin = .;
	__init_begin = .;
	INIT_TEXT_SECTION(16384)
	INIT_TEXT_SECTION(16384)
	. = ALIGN(PAGE_SIZE);
	INIT_DATA_SECTION(16)
	INIT_DATA_SECTION(16)
	/* we have to discard exit text and such at runtime, not link time */
	/* we have to discard exit text and such at runtime, not link time */
	.exit.text :
	.exit.text :
Loading