Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1958b5fc authored by Tom Lendacky's avatar Tom Lendacky Committed by Thomas Gleixner
Browse files

x86/boot: Add early boot support when running with SEV active



Early in the boot process, add checks to determine if the kernel is
running with Secure Encrypted Virtualization (SEV) active.

Checking for SEV requires checking that the kernel is running under a
hypervisor (CPUID 0x00000001, bit 31), that the SEV feature is available
(CPUID 0x8000001f, bit 1) and then checking a non-interceptable SEV MSR
(0xc0010131, bit 0).

This check is required so that during early compressed kernel booting the
pagetables (both the boot pagetables and KASLR pagetables (if enabled) are
updated to include the encryption mask so that when the kernel is
decompressed into encrypted memory, it can boot properly.

After the kernel is decompressed and continues booting the same logic is
used to check if SEV is active and set a flag indicating so.  This allows
to distinguish between SME and SEV, each of which have unique differences
in how certain things are handled: e.g. DMA (always bounce buffered with
SEV) or EFI tables (always access decrypted with SME).

Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarBrijesh Singh <brijesh.singh@amd.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Tested-by: default avatarBorislav Petkov <bp@suse.de>
Cc: Laura Abbott <labbott@redhat.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: kvm@vger.kernel.org
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Link: https://lkml.kernel.org/r/20171020143059.3291-13-brijesh.singh@amd.com
parent d7b417fa
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -78,6 +78,7 @@ vmlinux-objs-$(CONFIG_EARLY_PRINTK) += $(obj)/early_serial_console.o
vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o
vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o
ifdef CONFIG_X86_64
ifdef CONFIG_X86_64
	vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/pagetable.o
	vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/pagetable.o
	vmlinux-objs-y += $(obj)/mem_encrypt.o
endif
endif


$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
+16 −0
Original line number Original line Diff line number Diff line
@@ -131,6 +131,19 @@ ENTRY(startup_32)
 /*
 /*
  * Build early 4G boot pagetable
  * Build early 4G boot pagetable
  */
  */
	/*
	 * If SEV is active then set the encryption mask in the page tables.
	 * This will insure that when the kernel is copied and decompressed
	 * it will be done so encrypted.
	 */
	call	get_sev_encryption_bit
	xorl	%edx, %edx
	testl	%eax, %eax
	jz	1f
	subl	$32, %eax	/* Encryption bit is always above bit 31 */
	bts	%eax, %edx	/* Set encryption mask for page tables */
1:

	/* Initialize Page tables to 0 */
	/* Initialize Page tables to 0 */
	leal	pgtable(%ebx), %edi
	leal	pgtable(%ebx), %edi
	xorl	%eax, %eax
	xorl	%eax, %eax
@@ -141,12 +154,14 @@ ENTRY(startup_32)
	leal	pgtable + 0(%ebx), %edi
	leal	pgtable + 0(%ebx), %edi
	leal	0x1007 (%edi), %eax
	leal	0x1007 (%edi), %eax
	movl	%eax, 0(%edi)
	movl	%eax, 0(%edi)
	addl	%edx, 4(%edi)


	/* Build Level 3 */
	/* Build Level 3 */
	leal	pgtable + 0x1000(%ebx), %edi
	leal	pgtable + 0x1000(%ebx), %edi
	leal	0x1007(%edi), %eax
	leal	0x1007(%edi), %eax
	movl	$4, %ecx
	movl	$4, %ecx
1:	movl	%eax, 0x00(%edi)
1:	movl	%eax, 0x00(%edi)
	addl	%edx, 0x04(%edi)
	addl	$0x00001000, %eax
	addl	$0x00001000, %eax
	addl	$8, %edi
	addl	$8, %edi
	decl	%ecx
	decl	%ecx
@@ -157,6 +172,7 @@ ENTRY(startup_32)
	movl	$0x00000183, %eax
	movl	$0x00000183, %eax
	movl	$2048, %ecx
	movl	$2048, %ecx
1:	movl	%eax, 0(%edi)
1:	movl	%eax, 0(%edi)
	addl	%edx, 4(%edi)
	addl	$0x00200000, %eax
	addl	$0x00200000, %eax
	addl	$8, %edi
	addl	$8, %edi
	decl	%ecx
	decl	%ecx
+120 −0
Original line number Original line Diff line number Diff line
/*
 * AMD Memory Encryption Support
 *
 * Copyright (C) 2017 Advanced Micro Devices, Inc.
 *
 * Author: Tom Lendacky <thomas.lendacky@amd.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/linkage.h>

#include <asm/processor-flags.h>
#include <asm/msr.h>
#include <asm/asm-offsets.h>

	.text
	.code32
ENTRY(get_sev_encryption_bit)
	xor	%eax, %eax

#ifdef CONFIG_AMD_MEM_ENCRYPT
	push	%ebx
	push	%ecx
	push	%edx
	push	%edi

	/*
	 * RIP-relative addressing is needed to access the encryption bit
	 * variable. Since we are running in 32-bit mode we need this call/pop
	 * sequence to get the proper relative addressing.
	 */
	call	1f
1:	popl	%edi
	subl	$1b, %edi

	movl	enc_bit(%edi), %eax
	cmpl	$0, %eax
	jge	.Lsev_exit

	/* Check if running under a hypervisor */
	movl	$1, %eax
	cpuid
	bt	$31, %ecx		/* Check the hypervisor bit */
	jnc	.Lno_sev

	movl	$0x80000000, %eax	/* CPUID to check the highest leaf */
	cpuid
	cmpl	$0x8000001f, %eax	/* See if 0x8000001f is available */
	jb	.Lno_sev

	/*
	 * Check for the SEV feature:
	 *   CPUID Fn8000_001F[EAX] - Bit 1
	 *   CPUID Fn8000_001F[EBX] - Bits 5:0
	 *     Pagetable bit position used to indicate encryption
	 */
	movl	$0x8000001f, %eax
	cpuid
	bt	$1, %eax		/* Check if SEV is available */
	jnc	.Lno_sev

	movl	$MSR_AMD64_SEV, %ecx	/* Read the SEV MSR */
	rdmsr
	bt	$MSR_AMD64_SEV_ENABLED_BIT, %eax	/* Check if SEV is active */
	jnc	.Lno_sev

	movl	%ebx, %eax
	andl	$0x3f, %eax		/* Return the encryption bit location */
	movl	%eax, enc_bit(%edi)
	jmp	.Lsev_exit

.Lno_sev:
	xor	%eax, %eax
	movl	%eax, enc_bit(%edi)

.Lsev_exit:
	pop	%edi
	pop	%edx
	pop	%ecx
	pop	%ebx

#endif	/* CONFIG_AMD_MEM_ENCRYPT */

	ret
ENDPROC(get_sev_encryption_bit)

	.code64
ENTRY(get_sev_encryption_mask)
	xor	%rax, %rax

#ifdef CONFIG_AMD_MEM_ENCRYPT
	push	%rbp
	push	%rdx

	movq	%rsp, %rbp		/* Save current stack pointer */

	call	get_sev_encryption_bit	/* Get the encryption bit position */
	testl	%eax, %eax
	jz	.Lno_sev_mask

	xor	%rdx, %rdx
	bts	%rax, %rdx		/* Create the encryption mask */
	mov	%rdx, %rax		/* ... and return it */

.Lno_sev_mask:
	movq	%rbp, %rsp		/* Restore original stack pointer */

	pop	%rdx
	pop	%rbp
#endif

	ret
ENDPROC(get_sev_encryption_mask)

	.data
enc_bit:
	.int	0xffffffff
+2 −0
Original line number Original line Diff line number Diff line
@@ -109,4 +109,6 @@ static inline void console_init(void)
{ }
{ }
#endif
#endif


unsigned long get_sev_encryption_mask(void);

#endif
#endif
+5 −3
Original line number Original line Diff line number Diff line
@@ -77,16 +77,18 @@ static unsigned long top_level_pgt;
 * Mapping information structure passed to kernel_ident_mapping_init().
 * Mapping information structure passed to kernel_ident_mapping_init().
 * Due to relocation, pointers must be assigned at run time not build time.
 * Due to relocation, pointers must be assigned at run time not build time.
 */
 */
static struct x86_mapping_info mapping_info = {
static struct x86_mapping_info mapping_info;
	.page_flag       = __PAGE_KERNEL_LARGE_EXEC,
};


/* Locates and clears a region for a new top level page table. */
/* Locates and clears a region for a new top level page table. */
void initialize_identity_maps(void)
void initialize_identity_maps(void)
{
{
	unsigned long sev_me_mask = get_sev_encryption_mask();

	/* Init mapping_info with run-time function/buffer pointers. */
	/* Init mapping_info with run-time function/buffer pointers. */
	mapping_info.alloc_pgt_page = alloc_pgt_page;
	mapping_info.alloc_pgt_page = alloc_pgt_page;
	mapping_info.context = &pgt_data;
	mapping_info.context = &pgt_data;
	mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sev_me_mask;
	mapping_info.kernpg_flag = _KERNPG_TABLE | sev_me_mask;


	/*
	/*
	 * It should be impossible for this not to already be true,
	 * It should be impossible for this not to already be true,
Loading