Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0cb2501e authored by Marcelo Tosatti's avatar Marcelo Tosatti Committed by Greg Kroah-Hartman
Browse files

KVM: x86: handle invalid root_hpa everywhere



commit 37f6a4e237303549c8676dfe1fd1991ceab512eb upstream.

Rom Freiman <rom@stratoscale.com> notes other code paths vulnerable to
bug fixed by 989c6b34f6a9480e397b.

Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Cc: Josh Boyer <jwboyer@fedoraproject.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 9bf49602
Loading
Loading
Loading
Loading
+9 −0
Original line number Original line Diff line number Diff line
@@ -2751,6 +2751,9 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
	bool ret = false;
	bool ret = false;
	u64 spte = 0ull;
	u64 spte = 0ull;


	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
		return false;

	if (!page_fault_can_be_fast(vcpu, error_code))
	if (!page_fault_can_be_fast(vcpu, error_code))
		return false;
		return false;


@@ -3142,6 +3145,9 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
	struct kvm_shadow_walk_iterator iterator;
	struct kvm_shadow_walk_iterator iterator;
	u64 spte = 0ull;
	u64 spte = 0ull;


	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
		return spte;

	walk_shadow_page_lockless_begin(vcpu);
	walk_shadow_page_lockless_begin(vcpu);
	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
		if (!is_shadow_present_pte(spte))
		if (!is_shadow_present_pte(spte))
@@ -4332,6 +4338,9 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
	u64 spte;
	u64 spte;
	int nr_sptes = 0;
	int nr_sptes = 0;


	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
		return nr_sptes;

	walk_shadow_page_lockless_begin(vcpu);
	walk_shadow_page_lockless_begin(vcpu);
	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
		sptes[iterator.level-1] = spte;
		sptes[iterator.level-1] = spte;
+8 −0
Original line number Original line Diff line number Diff line
@@ -423,6 +423,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
	if (FNAME(gpte_changed)(vcpu, gw, top_level))
	if (FNAME(gpte_changed)(vcpu, gw, top_level))
		goto out_gpte_changed;
		goto out_gpte_changed;


	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
		goto out_gpte_changed;

	for (shadow_walk_init(&it, vcpu, addr);
	for (shadow_walk_init(&it, vcpu, addr);
	     shadow_walk_okay(&it) && it.level > gw->level;
	     shadow_walk_okay(&it) && it.level > gw->level;
	     shadow_walk_next(&it)) {
	     shadow_walk_next(&it)) {
@@ -671,6 +674,11 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
	 */
	 */
	mmu_topup_memory_caches(vcpu);
	mmu_topup_memory_caches(vcpu);


	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
		WARN_ON(1);
		return;
	}

	spin_lock(&vcpu->kvm->mmu_lock);
	spin_lock(&vcpu->kvm->mmu_lock);
	for_each_shadow_entry(vcpu, gva, iterator) {
	for_each_shadow_entry(vcpu, gva, iterator) {
		level = iterator.level;
		level = iterator.level;