Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7d36604b authored by Mitchel Humpherys's avatar Mitchel Humpherys
Browse files

soc: qcom: Add kernel_protect library and initcall



It's good security practice to make your executable code read-only.  On
hypervisor-enabled targets, this can be trivially accomplished by
removing the writable attribute from all stage-2 mappings of the kernel
text.  Add a small library and initcall to do this.

Due to constraints on the hypervisor, this needs to happen before all of
the cores are brought out of reset, so make it an early_initcall.

Change-Id: I2d3ee4ad69402d98f0f6a9078c58e66cd227d222
Signed-off-by: default avatarMitchel Humpherys <mitchelh@codeaurora.org>
parent ad9fe5a4
Loading
Loading
Loading
Loading
+22 −0
Original line number Diff line number Diff line
@@ -671,6 +671,28 @@ config MSM_PACMAN
	  driver allows reconfiguration of the Bus Access Manager Low Speed
	  peripheral (BLSP) ownership.

config MSM_KERNEL_PROTECT
	bool "Protect kernel text by removing write permissions in stage-2"
        depends on !FUNCTION_TRACER
        help
          On hypervisor-enabled targets, this option will make a call into
          the hypervisor to request that the kernel text be remapped
          without write permissions.  This protects against malicious
          devices rewriting kernel code.

          Note that this will BREAK any runtime patching of the kernel text
          (i.e. anything that uses apply_alternatives,
          aarch64_insn_patch_text_nosync, etc. including the various CPU
          errata workarounds in arch/arm64/kernel/cpu_errata.c).

config MSM_KERNEL_PROTECT_TEST
	bool "Bootup test of kernel protection (INTENTIONAL CRASH)"
        depends on MSM_KERNEL_PROTECT
        help
          Attempts to write to the kernel text after making the kernel text
          read-only.  This test is FATAL whether it passes or fails!
          Success is signaled by a stage-2 fault.

source "drivers/soc/qcom/memshare/Kconfig"

endif # ARCH_MSM
+1 −0
Original line number Diff line number Diff line
@@ -83,3 +83,4 @@ obj-$(CONFIG_MSM_SERVICE_LOCATOR) += service-locator.o
obj-$(CONFIG_MSM_PACMAN)        += msm_pacman.o
obj-$(CONFIG_MSM_QBT1000) += qbt1000.o
obj-$(CONFIG_MSM_SCM_XPU) += scm-xpu.o
obj-$(CONFIG_MSM_KERNEL_PROTECT) += kernel_protect.o
+107 −0
Original line number Diff line number Diff line
/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 */

#include <linux/printk.h>
#include <linux/init.h>
#include <linux/gfp.h>
#include <soc/qcom/secure_buffer.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>


#ifdef CONFIG_MSM_KERNEL_PROTECT_TEST

/*
 * We're going to crash the system so we need to make sure debug messages
 * in the main msm_protect_kernel initcall make it to the serial console.
 */
#undef pr_debug
#define pr_debug pr_err

/* tests protection by trying to hijack  __alloc_pages_nodemask */
static void msm_protect_kernel_test(void)
{
	/*
	 * There's nothing special about __alloc_pages_nodemask, we just
	 * need something that lives in the regulator (non-init) kernel
	 * text section that we know will never be compiled out.
	 */
	char *addr = (char *)__alloc_pages_nodemask;

	pr_err("Checking whether the kernel text is writable...\n");
	pr_err("A BUG means it is writable (this is bad)\n");
	pr_err("A stage-2 fault means it's not writable (this is good, but we'll still crash)\n");
	/*
	 * We can't simply do a `*addr = 0' since the kernel text might be
	 * read-only in stage-1.  We have to ensure the address is writable
	 * in stage-1 first, otherwise we'll just get a stage-1 fault and
	 * we'll never know if our stage-2 protection is actually working.
	 */
	if (set_memory_rw(round_down((u64)addr, PAGE_SIZE), 1)) {
		pr_err("Couldn't set memory as RW.  Can't perform check!\n");
		return;
	}
	pr_err("Writing now...\n");
	*addr = 0;
	pr_err("If we're still alive right now then kernel protection did NOT work.\n");
	BUG();
}

#else

static void msm_protect_kernel_test(void)
{
}

#endif

static int __init msm_protect_kernel(void)
{
	int ret;
	u32 vmid_hlos = VMID_HLOS;
	int dest_perms = PERM_READ | PERM_EXEC;
	/*
	 * Although the kernel image is mapped with section mappings, the
	 * start and end of the .text segment are on a PAGE_SIZE
	 * boundaries.
	 */
	phys_addr_t kernel_x_start_rounded = round_down(__pa(_stext),
							PAGE_SIZE);
	phys_addr_t kernel_x_end = round_up(__pa(_etext), PAGE_SIZE);
	void *virt_start = phys_to_virt(kernel_x_start_rounded);
	void *virt_end = phys_to_virt(kernel_x_end);

	pr_debug("assigning from phys: %pa to %pa\n",
		 &kernel_x_start_rounded, &kernel_x_end);
	pr_debug("virtual: %p to %p\n", virt_start, virt_end);
	ret = hyp_assign_phys(kernel_x_start_rounded,
			      kernel_x_end - kernel_x_start_rounded,
			      &vmid_hlos, 1, &vmid_hlos, &dest_perms, 1);
	if (ret)
		/*
		 * We want to fail relatively silently since not all
		 * platforms support the hyp_assign_phys call.
		 */
		pr_debug("Couldn't protect the kernel region: %d\n", ret);

	msm_protect_kernel_test();

	return ret;
}

/*
 * The assign call only works if it happens before we go into SMP mode.  It
 * needs to be an early_initcall so that it happens before we bring the
 * other cores out of reset.
 */
early_initcall(msm_protect_kernel);