Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4fad4d37 authored by Laura Abbott's avatar Laura Abbott Committed by Neil Leeder
Browse files

mmu: arm64: fix ability to write to protected memory



Use the mem_text_address_writeable function if
CONFIG_KERNEL_TEXT_RDONLY is specified. Modify the
page table entry rather than the pmd, depending
on pmd type.

Change-Id: I04390a9b7376b299161842e87150802da2d4d728
Signed-off-by: default avatarNeil Leeder <nleeder@codeaurora.org>
parent de4cd7bb
Loading
Loading
Loading
Loading
+15 −6
Original line number Diff line number Diff line
@@ -75,7 +75,9 @@ static struct cachepolicy cache_policies[] __initdata = {
#ifdef CONFIG_STRICT_MEMORY_RWX
static struct {
	pmd_t *pmd;
	pte_t *pte;
	pmd_t saved_pmd;
	pte_t saved_pte;
	bool made_writeable;
} mem_unprotect;

@@ -126,11 +128,15 @@ void mem_text_address_writeable(u64 addr)
	mem_unprotect.pmd = pmd_offset(pud, addr);
	addr_aligned = addr & PAGE_MASK;
	mem_unprotect.saved_pmd = *mem_unprotect.pmd;
	if ((mem_unprotect.saved_pmd & PMD_TYPE_MASK) != PMD_TYPE_SECT)
		return;

	if ((mem_unprotect.saved_pmd & PMD_TYPE_MASK) == PMD_TYPE_SECT) {
		set_pmd(mem_unprotect.pmd,
			__pmd(__pa(addr_aligned) | prot_sect_kernel));
	} else {
		mem_unprotect.pte = pte_offset_kernel(mem_unprotect.pmd, addr);
		mem_unprotect.saved_pte = *mem_unprotect.pte;
		set_pte(mem_unprotect.pte, pfn_pte(__pa(addr) >> PAGE_SHIFT,
						   PAGE_KERNEL_EXEC));
	}
	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);

	mem_unprotect.made_writeable = 1;
@@ -140,7 +146,10 @@ void mem_text_address_writeable(u64 addr)
void mem_text_address_restore(u64 addr)
{
	if (mem_unprotect.made_writeable) {
		if ((mem_unprotect.saved_pmd & PMD_TYPE_MASK) == PMD_TYPE_SECT)
			*mem_unprotect.pmd = mem_unprotect.saved_pmd;
		else
			*mem_unprotect.pte = mem_unprotect.saved_pte;
		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
	}
}