Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 28a1f3ac authored by Junaid Shahid's avatar Junaid Shahid Committed by Paolo Bonzini
Browse files

kvm: x86: Set highest physical address bits in non-present/reserved SPTEs



Always set the 5 upper-most supported physical address bits to 1 for SPTEs
that are marked as non-present or reserved, to make them unusable for
L1TF attacks from the guest. Currently, this just applies to MMIO SPTEs.
(We do not need to mark PTEs that are completely 0 as physical page 0
is already reserved.)

This allows mitigation of L1TF without disabling hyper-threading by using
shadow paging mode instead of EPT.

Signed-off-by: default avatarJunaid Shahid <junaids@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent fd8ca6da
Loading
Loading
Loading
Loading
+38 −5
Original line number Diff line number Diff line
@@ -238,6 +238,17 @@ static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
						    PT64_EPT_EXECUTABLE_MASK;
static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;

/*
 * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
 * to guard against L1TF attacks.
 */
static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;

/*
 * The number of high-order 1 bits to use in the mask above.
 */
static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;

static void mmu_spte_set(u64 *sptep, u64 spte);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
@@ -327,9 +338,13 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
{
	unsigned int gen = kvm_current_mmio_generation(vcpu);
	u64 mask = generation_mmio_spte_mask(gen);
	u64 gpa = gfn << PAGE_SHIFT;

	access &= ACC_WRITE_MASK | ACC_USER_MASK;
	mask |= shadow_mmio_value | access | gfn << PAGE_SHIFT;
	mask |= shadow_mmio_value | access;
	mask |= gpa | shadow_nonpresent_or_rsvd_mask;
	mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
		<< shadow_nonpresent_or_rsvd_mask_len;

	trace_mark_mmio_spte(sptep, gfn, access, gen);
	mmu_spte_set(sptep, mask);
@@ -342,8 +357,14 @@ static bool is_mmio_spte(u64 spte)

static gfn_t get_mmio_spte_gfn(u64 spte)
{
	u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
	return (spte & ~mask) >> PAGE_SHIFT;
	u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
		   shadow_nonpresent_or_rsvd_mask;
	u64 gpa = spte & ~mask;

	gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
	       & shadow_nonpresent_or_rsvd_mask;

	return gpa >> PAGE_SHIFT;
}

static unsigned get_mmio_spte_access(u64 spte)
@@ -400,7 +421,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);

static void kvm_mmu_clear_all_pte_masks(void)
static void kvm_mmu_reset_all_pte_masks(void)
{
	shadow_user_mask = 0;
	shadow_accessed_mask = 0;
@@ -410,6 +431,18 @@ static void kvm_mmu_clear_all_pte_masks(void)
	shadow_mmio_mask = 0;
	shadow_present_mask = 0;
	shadow_acc_track_mask = 0;

	/*
	 * If the CPU has 46 or less physical address bits, then set an
	 * appropriate mask to guard against L1TF attacks. Otherwise, it is
	 * assumed that the CPU is not vulnerable to L1TF.
	 */
	if (boot_cpu_data.x86_phys_bits <
	    52 - shadow_nonpresent_or_rsvd_mask_len)
		shadow_nonpresent_or_rsvd_mask =
			rsvd_bits(boot_cpu_data.x86_phys_bits -
				  shadow_nonpresent_or_rsvd_mask_len,
				  boot_cpu_data.x86_phys_bits - 1);
}

static int is_cpuid_PSE36(void)
@@ -5819,7 +5852,7 @@ int kvm_mmu_module_init(void)
{
	int ret = -ENOMEM;

	kvm_mmu_clear_all_pte_masks();
	kvm_mmu_reset_all_pte_masks();

	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
					    sizeof(struct pte_list_desc),
+6 −2
Original line number Diff line number Diff line
@@ -6536,8 +6536,12 @@ static void kvm_set_mmio_spte_mask(void)
	 * Set the reserved bits and the present bit of an paging-structure
	 * entry to generate page fault with PFER.RSV = 1.
	 */
	 /* Mask the reserved physical address bits. */
	mask = rsvd_bits(maxphyaddr, 51);

	/*
	 * Mask the uppermost physical address bit, which would be reserved as
	 * long as the supported physical address width is less than 52.
	 */
	mask = 1ull << 51;

	/* Set the present bit. */
	mask |= 1ull;