Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 671b87d2 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "arm: mm: program ptes for access restriction"

parents 68bd8358 a6a2dfde
Loading
Loading
Loading
Loading
+23 −0
Original line number Diff line number Diff line
@@ -75,6 +75,29 @@ config DEBUG_USER
	      8 - SIGSEGV faults
	     16 - SIGBUS faults

config FORCE_PAGES
	bool "Force lowmem to be mapped with 4K pages"
        help
          There are some advanced debug features that can only be done when
          memory is mapped with pages instead of sections. Enable this option
          to always map lowmem pages with pages. This may have a performance
          cost due to increased TLB pressure.

          If unsure say N.

config FREE_PAGES_RDONLY
	bool "Set pages as read only while on the buddy list"
	select FORCE_PAGES
	select PAGE_POISONING
	help
          Pages are always mapped in the kernel. This means that anyone
          can write to the page if they have the address. Enable this option
          to mark pages as read only to trigger a fault if any code attempts
          to write to a page on the buddy list. This may have a performance
          impact.

          If unsure, say N.

# These options are only for real kernel hackers who want to get their hands dirty.
config DEBUG_LL
	bool "Kernel low-level debugging functions (read help!)"
+8 −0
Original line number Diff line number Diff line
@@ -520,4 +520,12 @@ static inline void set_kernel_text_ro(void) { }
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
			     void *kaddr, unsigned long len);

#ifdef CONFIG_FREE_PAGES_RDONLY
#define mark_addr_rdonly(a)	set_memory_ro((unsigned long)a, 1)
#define mark_addr_rdwrite(a)	set_memory_rw((unsigned long)a, 1)
#else
#define mark_addr_rdonly(a)
#define mark_addr_rdwrite(a)
#endif

#endif
+56 −3
Original line number Diff line number Diff line
@@ -672,6 +672,9 @@ struct section_perm {
	pmdval_t mask;
	pmdval_t prot;
	pmdval_t clear;
	pteval_t ptemask;
	pteval_t pteprot;
	pteval_t pteclear;
};

static struct section_perm nx_perms[] = {
@@ -681,6 +684,8 @@ static struct section_perm nx_perms[] = {
		.end	= (unsigned long)_stext,
		.mask	= ~PMD_SECT_XN,
		.prot	= PMD_SECT_XN,
		.ptemask = ~L_PTE_XN,
		.pteprot = L_PTE_XN,
	},
	/* Make init RW (set NX). */
	{
@@ -688,6 +693,8 @@ static struct section_perm nx_perms[] = {
		.end	= (unsigned long)_sdata,
		.mask	= ~PMD_SECT_XN,
		.prot	= PMD_SECT_XN,
		.ptemask = ~L_PTE_XN,
		.pteprot = L_PTE_XN,
	},
#ifdef CONFIG_DEBUG_RODATA
	/* Make rodata NX (set RO in ro_perms below). */
@@ -696,6 +703,8 @@ static struct section_perm nx_perms[] = {
		.end    = (unsigned long)__init_begin,
		.mask   = ~PMD_SECT_XN,
		.prot   = PMD_SECT_XN,
		.ptemask = ~L_PTE_XN,
		.pteprot = L_PTE_XN,
	},
#endif
};
@@ -714,6 +723,8 @@ static struct section_perm ro_perms[] = {
		.prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
		.clear  = PMD_SECT_AP_WRITE,
#endif
		.ptemask = ~L_PTE_RDONLY,
		.pteprot = L_PTE_RDONLY,
	},
};
#endif
@@ -723,6 +734,37 @@ static struct section_perm ro_perms[] = {
 * copied into each mm). During startup, this is the init_mm. Is only
 * safe to be called with preemption disabled, as under stop_machine().
 */
struct pte_data {
	pteval_t mask;
	pteval_t val;
};

static int __pte_update(pte_t *ptep, pgtable_t token, unsigned long addr,
			void *d)
{
	struct pte_data *data = d;
	pte_t pte = *ptep;

	pte = __pte((pte_val(*ptep) & data->mask) | data->val);
	set_pte_ext(ptep, pte, 0);

	return 0;
}

static inline void pte_update(unsigned long addr, pteval_t mask,
				  pteval_t prot)
{
	struct pte_data data;
	struct mm_struct *mm;

	data.mask = mask;
	data.val = prot;
	mm = current->active_mm;

	apply_to_page_range(mm, addr, SECTION_SIZE, __pte_update, &data);
	flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
}

static inline void section_update(unsigned long addr, pmdval_t mask,
				  pmdval_t prot)
{
@@ -771,9 +813,20 @@ static inline bool arch_has_strict_perms(void)
									\
		for (addr = perms[i].start;				\
		     addr < perms[i].end;				\
		     addr += SECTION_SIZE)				\
		     addr += SECTION_SIZE) {				\
			pmd_t *pmd;					\
			struct mm_struct *mm;				\
									\
			mm = current->active_mm;			\
			pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), \
						addr), addr);		\
			if (pmd_bad(*pmd))				\
				section_update(addr, perms[i].mask,	\
					       perms[i].field);		\
			else						\
				pte_update(addr, perms[i].ptemask,	\
					       perms[i].pte##field);	\
		}							\
	}								\
}

+120 −4
Original line number Diff line number Diff line
@@ -373,11 +373,13 @@ int set_memory_##_name(unsigned long addr, int numpages) \
	unsigned long size = PAGE_SIZE*numpages; \
	unsigned end = start + size; \
\
	if (!IS_ENABLED(CONFIG_FORCE_PAGES)) { \
		if (start < MODULES_VADDR || start >= MODULES_END) \
			return -EINVAL;\
\
		if (end < MODULES_VADDR || end >= MODULES_END) \
			return -EINVAL; \
	} \
\
	apply_to_page_range(&init_mm, start, size, callback, NULL); \
	flush_tlb_kernel_range(start, end); \
@@ -1588,6 +1590,119 @@ void __init early_paging_init(const struct machine_desc *mdesc,

#endif

#ifdef CONFIG_FORCE_PAGES
/*
 * remap a PMD into pages
 * We split a single pmd here none of this two pmd nonsense
 */
static noinline void __init split_pmd(pmd_t *pmd, unsigned long addr,
				unsigned long end, unsigned long pfn,
				const struct mem_type *type)
{
	pte_t *pte, *start_pte;
	pmd_t *base_pmd;

	base_pmd = pmd_offset(
			pud_offset(pgd_offset(&init_mm, addr), addr), addr);

	if (pmd_none(*base_pmd) || pmd_bad(*base_pmd)) {
		start_pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
#ifndef CONFIG_ARM_LPAE
		/*
		 * Following is needed when new pte is allocated for pmd[1]
		 * cases, which may happen when base (start) address falls
		 * under pmd[1].
		 */
		if (addr & SECTION_SIZE)
			start_pte += pte_index(addr);
#endif
	} else {
		start_pte = pte_offset_kernel(base_pmd, addr);
	}

	pte = start_pte;

	do {
		set_pte_ext(pte, pfn_pte(pfn, type->prot_pte), 0);
		pfn++;
	} while (pte++, addr += PAGE_SIZE, addr != end);

	*pmd = __pmd((__pa(start_pte) + PTE_HWTABLE_OFF) | type->prot_l1);
	mb(); /* let pmd be programmed */
	flush_pmd_entry(pmd);
	flush_tlb_all();
}

/*
 * It's significantly easier to remap as pages later after all memory is
 * mapped. Everything is sections so all we have to do is split
 */
static void __init remap_pages(void)
{
	struct memblock_region *reg;

	for_each_memblock(memory, reg) {
		phys_addr_t phys_start = reg->base;
		phys_addr_t phys_end = reg->base + reg->size;
		unsigned long addr = (unsigned long)__va(phys_start);
		unsigned long end = (unsigned long)__va(phys_end);
		pmd_t *pmd = NULL;
		unsigned long next;
		unsigned long pfn = __phys_to_pfn(phys_start);
		bool fixup = false;
		unsigned long saved_start = addr;

		if (phys_start > arm_lowmem_limit)
			break;
		if (phys_end > arm_lowmem_limit)
			end = (unsigned long)__va(arm_lowmem_limit);
		if (phys_start >= phys_end)
			break;

		pmd = pmd_offset(
			pud_offset(pgd_offset(&init_mm, addr), addr), addr);

#ifndef	CONFIG_ARM_LPAE
		if (addr & SECTION_SIZE) {
			fixup = true;
			pmd_empty_section_gap((addr - SECTION_SIZE) & PMD_MASK);
			pmd++;
		}

		if (end & SECTION_SIZE)
			pmd_empty_section_gap(end);
#endif

		do {
			next = addr + SECTION_SIZE;

			if (pmd_none(*pmd) || pmd_bad(*pmd))
				split_pmd(pmd, addr, next, pfn,
						&mem_types[MT_MEMORY_RWX]);
			pmd++;
			pfn += SECTION_SIZE >> PAGE_SHIFT;

		} while (addr = next, addr < end);

		if (fixup) {
			/*
			 * Put a faulting page table here to avoid detecting no
			 * pmd when accessing an odd section boundary. This
			 * needs to be faulting to help catch errors and avoid
			 * speculation
			 */
			pmd = pmd_off_k(saved_start);
			pmd[0] = pmd[1] & ~1;
		}
	}
}
#else
static void __init remap_pages(void)
{

}
#endif

/*
 * paging_init() sets up the page tables, initialises the zone memory
 * maps, and sets up the zero page, bad page and bad page tables.
@@ -1600,6 +1715,7 @@ void __init paging_init(const struct machine_desc *mdesc)
	prepare_page_table();
	map_lowmem();
	dma_contiguous_remap();
	remap_pages();
	early_ioremap_reset();
	devicemaps_init(mdesc);
	kmap_init();