Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f6f6f4e8 authored by Shiraz Hashim's avatar Shiraz Hashim Committed by Charan Teja Reddy
Browse files

arm: mm: program ptes for access restriction



CONFIG_RODATA allows strict kernel mapping permissions to
be followed and accordingly maps regions as read-only,
not-executable etc. correspondingly. CONFIG_RODATA however
assumes all memory regions to be SECTION_SIZE aligned and
section mapped for performance reasons.

With CONFIG_FORCE_PAGES, we force all kernel mappings as
page mapped thus breaking CONFIG_RODATA.

Provide provision to apply permissions at page (pte)
level, if CONFIG_RODATA does not find section mapping.

Change-Id: I8dbf5c3741836bc63a231d8a471cf0306662993b
Signed-off-by: default avatarShiraz Hashim <shashim@codeaurora.org>
Signed-off-by: default avatarCharan Teja Reddy <charante@codeaurora.org>
parent c3d0d491
Loading
Loading
Loading
Loading
+54 −4
Original line number Diff line number Diff line
@@ -600,6 +600,9 @@ struct section_perm {
	pmdval_t mask;
	pmdval_t prot;
	pmdval_t clear;
	pteval_t ptemask;
	pteval_t pteprot;
	pteval_t pteclear;
};

/* First section-aligned location at or after __start_rodata. */
@@ -613,6 +616,8 @@ static struct section_perm nx_perms[] = {
		.end	= (unsigned long)_stext,
		.mask	= ~PMD_SECT_XN,
		.prot	= PMD_SECT_XN,
		.ptemask = ~L_PTE_XN,
		.pteprot = L_PTE_XN,
	},
	/* Make init RW (set NX). */
	{
@@ -621,6 +626,8 @@ static struct section_perm nx_perms[] = {
		.end	= (unsigned long)_sdata,
		.mask	= ~PMD_SECT_XN,
		.prot	= PMD_SECT_XN,
		.ptemask = ~L_PTE_XN,
		.pteprot = L_PTE_XN,
	},
	/* Make rodata NX (set RO in ro_perms below). */
	{
@@ -629,6 +636,8 @@ static struct section_perm nx_perms[] = {
		.end    = (unsigned long)__init_begin,
		.mask   = ~PMD_SECT_XN,
		.prot   = PMD_SECT_XN,
		.ptemask = ~L_PTE_XN,
		.pteprot = L_PTE_XN,
	},
};

@@ -646,6 +655,8 @@ static struct section_perm ro_perms[] = {
		.prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
		.clear  = PMD_SECT_AP_WRITE,
#endif
		.ptemask = ~L_PTE_RDONLY,
		.pteprot = L_PTE_RDONLY,
	},
};

@@ -654,6 +665,35 @@ static struct section_perm ro_perms[] = {
 * copied into each mm). During startup, this is the init_mm. Is only
 * safe to be called with preemption disabled, as under stop_machine().
 */
struct pte_data {
	pteval_t mask;
	pteval_t val;
};

static int __pte_update(pte_t *ptep, pgtable_t token, unsigned long addr,
			void *d)
{
	struct pte_data *data = d;
	pte_t pte = *ptep;

	pte = __pte((pte_val(*ptep) & data->mask) | data->val);
	set_pte_ext(ptep, pte, 0);

	return 0;
}

static inline void pte_update(unsigned long addr, pteval_t mask,
				  pteval_t prot, struct mm_struct *mm)
{
	struct pte_data data;

	data.mask = mask;
	data.val = prot;

	apply_to_page_range(mm, addr, SECTION_SIZE, __pte_update, &data);
	flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
}

static inline void section_update(unsigned long addr, pmdval_t mask,
				  pmdval_t prot, struct mm_struct *mm)
{
@@ -702,11 +742,21 @@ void set_section_perms(struct section_perm *perms, int n, bool set,

		for (addr = perms[i].start;
		     addr < perms[i].end;
		     addr += SECTION_SIZE)
		     addr += SECTION_SIZE) {
			pmd_t *pmd;

			pmd = pmd_offset(pud_offset(pgd_offset(mm, addr),
						addr), addr);
			if (pmd_bad(*pmd))
				section_update(addr, perms[i].mask,
				set ? perms[i].prot : perms[i].clear, mm);
					set ? perms[i].prot : perms[i].clear,
					mm);
			else
				pte_update(addr, perms[i].ptemask,
				     set ? perms[i].pteprot : perms[i].pteclear,
				     mm);
		}
	}

}

/**