Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b0599e28 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'x86/mm' into efi/core



This commit in x86/mm changed EFI code:

   116fef64: x86/mm/dump_pagetables: Add the EFI pagetable to the debugfs 'page_tables' directory

So merge in that commit plus its dependencies, before continuing with
EFI work.

Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents f779ca74 116fef64
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -22,6 +22,7 @@
#ifdef CONFIG_AMD_MEM_ENCRYPT
#ifdef CONFIG_AMD_MEM_ENCRYPT


extern u64 sme_me_mask;
extern u64 sme_me_mask;
extern bool sev_enabled;


void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr,
void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr,
			 unsigned long decrypted_kernel_vaddr,
			 unsigned long decrypted_kernel_vaddr,
+9 −5
Original line number Original line Diff line number Diff line
# SPDX-License-Identifier: GPL-2.0
# SPDX-License-Identifier: GPL-2.0
# Kernel does not boot with instrumentation of tlb.c and mem_encrypt.c
# Kernel does not boot with instrumentation of tlb.c and mem_encrypt*.c
KCOV_INSTRUMENT_tlb.o			:= n
KCOV_INSTRUMENT_tlb.o			:= n
KCOV_INSTRUMENT_mem_encrypt.o		:= n
KCOV_INSTRUMENT_mem_encrypt.o		:= n
KCOV_INSTRUMENT_mem_encrypt_identity.o	:= n


KASAN_SANITIZE_mem_encrypt.o		:= n
KASAN_SANITIZE_mem_encrypt.o		:= n
KASAN_SANITIZE_mem_encrypt_identity.o	:= n


ifdef CONFIG_FUNCTION_TRACER
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_mem_encrypt.o		= -pg
CFLAGS_REMOVE_mem_encrypt.o		= -pg
CFLAGS_REMOVE_mem_encrypt_identity.o	= -pg
endif
endif


obj-y	:=  init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
obj-y	:=  init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
@@ -47,4 +50,5 @@ obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
obj-$(CONFIG_PAGE_TABLE_ISOLATION)		+= pti.o
obj-$(CONFIG_PAGE_TABLE_ISOLATION)		+= pti.o


obj-$(CONFIG_AMD_MEM_ENCRYPT)	+= mem_encrypt.o
obj-$(CONFIG_AMD_MEM_ENCRYPT)	+= mem_encrypt.o
obj-$(CONFIG_AMD_MEM_ENCRYPT)	+= mem_encrypt_identity.o
obj-$(CONFIG_AMD_MEM_ENCRYPT)	+= mem_encrypt_boot.o
obj-$(CONFIG_AMD_MEM_ENCRYPT)	+= mem_encrypt_boot.o
+32 −0
Original line number Original line Diff line number Diff line
@@ -72,6 +72,31 @@ static const struct file_operations ptdump_curusr_fops = {
};
};
#endif
#endif


#if defined(CONFIG_EFI) && defined(CONFIG_X86_64)
extern pgd_t *efi_pgd;
static struct dentry *pe_efi;

static int ptdump_show_efi(struct seq_file *m, void *v)
{
	if (efi_pgd)
		ptdump_walk_pgd_level_debugfs(m, efi_pgd, false);
	return 0;
}

static int ptdump_open_efi(struct inode *inode, struct file *filp)
{
	return single_open(filp, ptdump_show_efi, NULL);
}

static const struct file_operations ptdump_efi_fops = {
	.owner		= THIS_MODULE,
	.open		= ptdump_open_efi,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};
#endif

static struct dentry *dir, *pe_knl, *pe_curknl;
static struct dentry *dir, *pe_knl, *pe_curknl;


static int __init pt_dump_debug_init(void)
static int __init pt_dump_debug_init(void)
@@ -96,6 +121,13 @@ static int __init pt_dump_debug_init(void)
	if (!pe_curusr)
	if (!pe_curusr)
		goto err;
		goto err;
#endif
#endif

#if defined(CONFIG_EFI) && defined(CONFIG_X86_64)
	pe_efi = debugfs_create_file("efi", 0400, dir, NULL, &ptdump_efi_fops);
	if (!pe_efi)
		goto err;
#endif

	return 0;
	return 0;
err:
err:
	debugfs_remove_recursive(dir);
	debugfs_remove_recursive(dir);
+1 −577
Original line number Original line Diff line number Diff line
@@ -25,17 +25,12 @@
#include <asm/bootparam.h>
#include <asm/bootparam.h>
#include <asm/set_memory.h>
#include <asm/set_memory.h>
#include <asm/cacheflush.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
#include <asm/processor-flags.h>
#include <asm/processor-flags.h>
#include <asm/msr.h>
#include <asm/msr.h>
#include <asm/cmdline.h>
#include <asm/cmdline.h>


#include "mm_internal.h"
#include "mm_internal.h"


static char sme_cmdline_arg[] __initdata = "mem_encrypt";
static char sme_cmdline_on[]  __initdata = "on";
static char sme_cmdline_off[] __initdata = "off";

/*
/*
 * Since SME related variables are set early in the boot process they must
 * Since SME related variables are set early in the boot process they must
 * reside in the .data section so as not to be zeroed out when the .bss
 * reside in the .data section so as not to be zeroed out when the .bss
@@ -46,7 +41,7 @@ EXPORT_SYMBOL(sme_me_mask);
DEFINE_STATIC_KEY_FALSE(sev_enable_key);
DEFINE_STATIC_KEY_FALSE(sev_enable_key);
EXPORT_SYMBOL_GPL(sev_enable_key);
EXPORT_SYMBOL_GPL(sev_enable_key);


static bool sev_enabled __section(.data);
bool sev_enabled __section(.data);


/* Buffer used for early in-place encryption by BSP, no locking needed */
/* Buffer used for early in-place encryption by BSP, no locking needed */
static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
@@ -463,574 +458,3 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
	/* Make the SWIOTLB buffer area decrypted */
	/* Make the SWIOTLB buffer area decrypted */
	set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
	set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
}
}

struct sme_populate_pgd_data {
	void	*pgtable_area;
	pgd_t	*pgd;

	pmdval_t pmd_flags;
	pteval_t pte_flags;
	unsigned long paddr;

	unsigned long vaddr;
	unsigned long vaddr_end;
};

static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
{
	unsigned long pgd_start, pgd_end, pgd_size;
	pgd_t *pgd_p;

	pgd_start = ppd->vaddr & PGDIR_MASK;
	pgd_end = ppd->vaddr_end & PGDIR_MASK;

	pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);

	pgd_p = ppd->pgd + pgd_index(ppd->vaddr);

	memset(pgd_p, 0, pgd_size);
}

#define PGD_FLAGS		_KERNPG_TABLE_NOENC
#define P4D_FLAGS		_KERNPG_TABLE_NOENC
#define PUD_FLAGS		_KERNPG_TABLE_NOENC
#define PMD_FLAGS		_KERNPG_TABLE_NOENC

#define PMD_FLAGS_LARGE		(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)

#define PMD_FLAGS_DEC		PMD_FLAGS_LARGE
#define PMD_FLAGS_DEC_WP	((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
				 (_PAGE_PAT | _PAGE_PWT))

#define PMD_FLAGS_ENC		(PMD_FLAGS_LARGE | _PAGE_ENC)

#define PTE_FLAGS		(__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)

#define PTE_FLAGS_DEC		PTE_FLAGS
#define PTE_FLAGS_DEC_WP	((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
				 (_PAGE_PAT | _PAGE_PWT))

#define PTE_FLAGS_ENC		(PTE_FLAGS | _PAGE_ENC)

static pmd_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
{
	pgd_t *pgd_p;
	p4d_t *p4d_p;
	pud_t *pud_p;
	pmd_t *pmd_p;

	pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
	if (native_pgd_val(*pgd_p)) {
		if (IS_ENABLED(CONFIG_X86_5LEVEL))
			p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK);
		else
			pud_p = (pud_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK);
	} else {
		pgd_t pgd;

		if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
			p4d_p = ppd->pgtable_area;
			memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
			ppd->pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D;

			pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS);
		} else {
			pud_p = ppd->pgtable_area;
			memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
			ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;

			pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS);
		}
		native_set_pgd(pgd_p, pgd);
	}

	if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
		p4d_p += p4d_index(ppd->vaddr);
		if (native_p4d_val(*p4d_p)) {
			pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK);
		} else {
			p4d_t p4d;

			pud_p = ppd->pgtable_area;
			memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
			ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;

			p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS);
			native_set_p4d(p4d_p, p4d);
		}
	}

	pud_p += pud_index(ppd->vaddr);
	if (native_pud_val(*pud_p)) {
		if (native_pud_val(*pud_p) & _PAGE_PSE)
			return NULL;

		pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK);
	} else {
		pud_t pud;

		pmd_p = ppd->pgtable_area;
		memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
		ppd->pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD;

		pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS);
		native_set_pud(pud_p, pud);
	}

	return pmd_p;
}

static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
{
	pmd_t *pmd_p;

	pmd_p = sme_prepare_pgd(ppd);
	if (!pmd_p)
		return;

	pmd_p += pmd_index(ppd->vaddr);
	if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE))
		native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags));
}

static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
{
	pmd_t *pmd_p;
	pte_t *pte_p;

	pmd_p = sme_prepare_pgd(ppd);
	if (!pmd_p)
		return;

	pmd_p += pmd_index(ppd->vaddr);
	if (native_pmd_val(*pmd_p)) {
		if (native_pmd_val(*pmd_p) & _PAGE_PSE)
			return;

		pte_p = (pte_t *)(native_pmd_val(*pmd_p) & ~PTE_FLAGS_MASK);
	} else {
		pmd_t pmd;

		pte_p = ppd->pgtable_area;
		memset(pte_p, 0, sizeof(*pte_p) * PTRS_PER_PTE);
		ppd->pgtable_area += sizeof(*pte_p) * PTRS_PER_PTE;

		pmd = native_make_pmd((pteval_t)pte_p + PMD_FLAGS);
		native_set_pmd(pmd_p, pmd);
	}

	pte_p += pte_index(ppd->vaddr);
	if (!native_pte_val(*pte_p))
		native_set_pte(pte_p, native_make_pte(ppd->paddr | ppd->pte_flags));
}

static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
{
	while (ppd->vaddr < ppd->vaddr_end) {
		sme_populate_pgd_large(ppd);

		ppd->vaddr += PMD_PAGE_SIZE;
		ppd->paddr += PMD_PAGE_SIZE;
	}
}

static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
{
	while (ppd->vaddr < ppd->vaddr_end) {
		sme_populate_pgd(ppd);

		ppd->vaddr += PAGE_SIZE;
		ppd->paddr += PAGE_SIZE;
	}
}

static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
				   pmdval_t pmd_flags, pteval_t pte_flags)
{
	unsigned long vaddr_end;

	ppd->pmd_flags = pmd_flags;
	ppd->pte_flags = pte_flags;

	/* Save original end value since we modify the struct value */
	vaddr_end = ppd->vaddr_end;

	/* If start is not 2MB aligned, create PTE entries */
	ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE);
	__sme_map_range_pte(ppd);

	/* Create PMD entries */
	ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK;
	__sme_map_range_pmd(ppd);

	/* If end is not 2MB aligned, create PTE entries */
	ppd->vaddr_end = vaddr_end;
	__sme_map_range_pte(ppd);
}

static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
{
	__sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
}

static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
{
	__sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
}

static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
{
	__sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
}

static unsigned long __init sme_pgtable_calc(unsigned long len)
{
	unsigned long p4d_size, pud_size, pmd_size, pte_size;
	unsigned long total;

	/*
	 * Perform a relatively simplistic calculation of the pagetable
	 * entries that are needed. Those mappings will be covered mostly
	 * by 2MB PMD entries so we can conservatively calculate the required
	 * number of P4D, PUD and PMD structures needed to perform the
	 * mappings.  For mappings that are not 2MB aligned, PTE mappings
	 * would be needed for the start and end portion of the address range
	 * that fall outside of the 2MB alignment.  This results in, at most,
	 * two extra pages to hold PTE entries for each range that is mapped.
	 * Incrementing the count for each covers the case where the addresses
	 * cross entries.
	 */
	if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
		p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1;
		p4d_size *= sizeof(p4d_t) * PTRS_PER_P4D;
		pud_size = (ALIGN(len, P4D_SIZE) / P4D_SIZE) + 1;
		pud_size *= sizeof(pud_t) * PTRS_PER_PUD;
	} else {
		p4d_size = 0;
		pud_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1;
		pud_size *= sizeof(pud_t) * PTRS_PER_PUD;
	}
	pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1;
	pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD;
	pte_size = 2 * sizeof(pte_t) * PTRS_PER_PTE;

	total = p4d_size + pud_size + pmd_size + pte_size;

	/*
	 * Now calculate the added pagetable structures needed to populate
	 * the new pagetables.
	 */
	if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
		p4d_size = ALIGN(total, PGDIR_SIZE) / PGDIR_SIZE;
		p4d_size *= sizeof(p4d_t) * PTRS_PER_P4D;
		pud_size = ALIGN(total, P4D_SIZE) / P4D_SIZE;
		pud_size *= sizeof(pud_t) * PTRS_PER_PUD;
	} else {
		p4d_size = 0;
		pud_size = ALIGN(total, PGDIR_SIZE) / PGDIR_SIZE;
		pud_size *= sizeof(pud_t) * PTRS_PER_PUD;
	}
	pmd_size = ALIGN(total, PUD_SIZE) / PUD_SIZE;
	pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD;

	total += p4d_size + pud_size + pmd_size;

	return total;
}

void __init __nostackprotector sme_encrypt_kernel(struct boot_params *bp)
{
	unsigned long workarea_start, workarea_end, workarea_len;
	unsigned long execute_start, execute_end, execute_len;
	unsigned long kernel_start, kernel_end, kernel_len;
	unsigned long initrd_start, initrd_end, initrd_len;
	struct sme_populate_pgd_data ppd;
	unsigned long pgtable_area_len;
	unsigned long decrypted_base;

	if (!sme_active())
		return;

	/*
	 * Prepare for encrypting the kernel and initrd by building new
	 * pagetables with the necessary attributes needed to encrypt the
	 * kernel in place.
	 *
	 *   One range of virtual addresses will map the memory occupied
	 *   by the kernel and initrd as encrypted.
	 *
	 *   Another range of virtual addresses will map the memory occupied
	 *   by the kernel and initrd as decrypted and write-protected.
	 *
	 *     The use of write-protect attribute will prevent any of the
	 *     memory from being cached.
	 */

	/* Physical addresses gives us the identity mapped virtual addresses */
	kernel_start = __pa_symbol(_text);
	kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
	kernel_len = kernel_end - kernel_start;

	initrd_start = 0;
	initrd_end = 0;
	initrd_len = 0;
#ifdef CONFIG_BLK_DEV_INITRD
	initrd_len = (unsigned long)bp->hdr.ramdisk_size |
		     ((unsigned long)bp->ext_ramdisk_size << 32);
	if (initrd_len) {
		initrd_start = (unsigned long)bp->hdr.ramdisk_image |
			       ((unsigned long)bp->ext_ramdisk_image << 32);
		initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
		initrd_len = initrd_end - initrd_start;
	}
#endif

	/* Set the encryption workarea to be immediately after the kernel */
	workarea_start = kernel_end;

	/*
	 * Calculate required number of workarea bytes needed:
	 *   executable encryption area size:
	 *     stack page (PAGE_SIZE)
	 *     encryption routine page (PAGE_SIZE)
	 *     intermediate copy buffer (PMD_PAGE_SIZE)
	 *   pagetable structures for the encryption of the kernel
	 *   pagetable structures for workarea (in case not currently mapped)
	 */
	execute_start = workarea_start;
	execute_end = execute_start + (PAGE_SIZE * 2) + PMD_PAGE_SIZE;
	execute_len = execute_end - execute_start;

	/*
	 * One PGD for both encrypted and decrypted mappings and a set of
	 * PUDs and PMDs for each of the encrypted and decrypted mappings.
	 */
	pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
	pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
	if (initrd_len)
		pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;

	/* PUDs and PMDs needed in the current pagetables for the workarea */
	pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);

	/*
	 * The total workarea includes the executable encryption area and
	 * the pagetable area. The start of the workarea is already 2MB
	 * aligned, align the end of the workarea on a 2MB boundary so that
	 * we don't try to create/allocate PTE entries from the workarea
	 * before it is mapped.
	 */
	workarea_len = execute_len + pgtable_area_len;
	workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE);

	/*
	 * Set the address to the start of where newly created pagetable
	 * structures (PGDs, PUDs and PMDs) will be allocated. New pagetable
	 * structures are created when the workarea is added to the current
	 * pagetables and when the new encrypted and decrypted kernel
	 * mappings are populated.
	 */
	ppd.pgtable_area = (void *)execute_end;

	/*
	 * Make sure the current pagetable structure has entries for
	 * addressing the workarea.
	 */
	ppd.pgd = (pgd_t *)native_read_cr3_pa();
	ppd.paddr = workarea_start;
	ppd.vaddr = workarea_start;
	ppd.vaddr_end = workarea_end;
	sme_map_range_decrypted(&ppd);

	/* Flush the TLB - no globals so cr3 is enough */
	native_write_cr3(__native_read_cr3());

	/*
	 * A new pagetable structure is being built to allow for the kernel
	 * and initrd to be encrypted. It starts with an empty PGD that will
	 * then be populated with new PUDs and PMDs as the encrypted and
	 * decrypted kernel mappings are created.
	 */
	ppd.pgd = ppd.pgtable_area;
	memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
	ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;

	/*
	 * A different PGD index/entry must be used to get different
	 * pagetable entries for the decrypted mapping. Choose the next
	 * PGD index and convert it to a virtual address to be used as
	 * the base of the mapping.
	 */
	decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
	if (initrd_len) {
		unsigned long check_base;

		check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
		decrypted_base = max(decrypted_base, check_base);
	}
	decrypted_base <<= PGDIR_SHIFT;

	/* Add encrypted kernel (identity) mappings */
	ppd.paddr = kernel_start;
	ppd.vaddr = kernel_start;
	ppd.vaddr_end = kernel_end;
	sme_map_range_encrypted(&ppd);

	/* Add decrypted, write-protected kernel (non-identity) mappings */
	ppd.paddr = kernel_start;
	ppd.vaddr = kernel_start + decrypted_base;
	ppd.vaddr_end = kernel_end + decrypted_base;
	sme_map_range_decrypted_wp(&ppd);

	if (initrd_len) {
		/* Add encrypted initrd (identity) mappings */
		ppd.paddr = initrd_start;
		ppd.vaddr = initrd_start;
		ppd.vaddr_end = initrd_end;
		sme_map_range_encrypted(&ppd);
		/*
		 * Add decrypted, write-protected initrd (non-identity) mappings
		 */
		ppd.paddr = initrd_start;
		ppd.vaddr = initrd_start + decrypted_base;
		ppd.vaddr_end = initrd_end + decrypted_base;
		sme_map_range_decrypted_wp(&ppd);
	}

	/* Add decrypted workarea mappings to both kernel mappings */
	ppd.paddr = workarea_start;
	ppd.vaddr = workarea_start;
	ppd.vaddr_end = workarea_end;
	sme_map_range_decrypted(&ppd);

	ppd.paddr = workarea_start;
	ppd.vaddr = workarea_start + decrypted_base;
	ppd.vaddr_end = workarea_end + decrypted_base;
	sme_map_range_decrypted(&ppd);

	/* Perform the encryption */
	sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
			    kernel_len, workarea_start, (unsigned long)ppd.pgd);

	if (initrd_len)
		sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
				    initrd_len, workarea_start,
				    (unsigned long)ppd.pgd);

	/*
	 * At this point we are running encrypted.  Remove the mappings for
	 * the decrypted areas - all that is needed for this is to remove
	 * the PGD entry/entries.
	 */
	ppd.vaddr = kernel_start + decrypted_base;
	ppd.vaddr_end = kernel_end + decrypted_base;
	sme_clear_pgd(&ppd);

	if (initrd_len) {
		ppd.vaddr = initrd_start + decrypted_base;
		ppd.vaddr_end = initrd_end + decrypted_base;
		sme_clear_pgd(&ppd);
	}

	ppd.vaddr = workarea_start + decrypted_base;
	ppd.vaddr_end = workarea_end + decrypted_base;
	sme_clear_pgd(&ppd);

	/* Flush the TLB - no globals so cr3 is enough */
	native_write_cr3(__native_read_cr3());
}

void __init __nostackprotector sme_enable(struct boot_params *bp)
{
	const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
	unsigned int eax, ebx, ecx, edx;
	unsigned long feature_mask;
	bool active_by_default;
	unsigned long me_mask;
	char buffer[16];
	u64 msr;

	/* Check for the SME/SEV support leaf */
	eax = 0x80000000;
	ecx = 0;
	native_cpuid(&eax, &ebx, &ecx, &edx);
	if (eax < 0x8000001f)
		return;

#define AMD_SME_BIT	BIT(0)
#define AMD_SEV_BIT	BIT(1)
	/*
	 * Set the feature mask (SME or SEV) based on whether we are
	 * running under a hypervisor.
	 */
	eax = 1;
	ecx = 0;
	native_cpuid(&eax, &ebx, &ecx, &edx);
	feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT;

	/*
	 * Check for the SME/SEV feature:
	 *   CPUID Fn8000_001F[EAX]
	 *   - Bit 0 - Secure Memory Encryption support
	 *   - Bit 1 - Secure Encrypted Virtualization support
	 *   CPUID Fn8000_001F[EBX]
	 *   - Bits 5:0 - Pagetable bit position used to indicate encryption
	 */
	eax = 0x8000001f;
	ecx = 0;
	native_cpuid(&eax, &ebx, &ecx, &edx);
	if (!(eax & feature_mask))
		return;

	me_mask = 1UL << (ebx & 0x3f);

	/* Check if memory encryption is enabled */
	if (feature_mask == AMD_SME_BIT) {
		/* For SME, check the SYSCFG MSR */
		msr = __rdmsr(MSR_K8_SYSCFG);
		if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
			return;
	} else {
		/* For SEV, check the SEV MSR */
		msr = __rdmsr(MSR_AMD64_SEV);
		if (!(msr & MSR_AMD64_SEV_ENABLED))
			return;

		/* SEV state cannot be controlled by a command line option */
		sme_me_mask = me_mask;
		sev_enabled = true;
		return;
	}

	/*
	 * Fixups have not been applied to phys_base yet and we're running
	 * identity mapped, so we must obtain the address to the SME command
	 * line argument data using rip-relative addressing.
	 */
	asm ("lea sme_cmdline_arg(%%rip), %0"
	     : "=r" (cmdline_arg)
	     : "p" (sme_cmdline_arg));
	asm ("lea sme_cmdline_on(%%rip), %0"
	     : "=r" (cmdline_on)
	     : "p" (sme_cmdline_on));
	asm ("lea sme_cmdline_off(%%rip), %0"
	     : "=r" (cmdline_off)
	     : "p" (sme_cmdline_off));

	if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
		active_by_default = true;
	else
		active_by_default = false;

	cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
				     ((u64)bp->ext_cmd_line_ptr << 32));

	cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer));

	if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
		sme_me_mask = me_mask;
	else if (!strncmp(buffer, cmdline_off, sizeof(buffer)))
		sme_me_mask = 0;
	else
		sme_me_mask = active_by_default ? me_mask : 0;
}
+564 −0

File added.

Preview size limit exceeded, changes collapsed.

Loading