Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8ab3c471 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: x86/mmu: Consolidate kvm_mmu_zap_all() and kvm_mmu_zap_mmio_sptes()



...via a new helper, __kvm_mmu_zap_all().  An alternative to passing a
'bool mmio_only' would be to pass a callback function to filter the
shadow page, i.e. to make __kvm_mmu_zap_all() generic and reusable, but
zapping all shadow pages is a last resort, i.e. making the helper less
extensible is a feature of sorts.  And the explicit MMIO parameter makes
it easy to preserve the WARN_ON_ONCE() if a restart is triggered when
zapping MMIO sptes.

Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 24efe61f
Loading
Loading
Loading
Loading
+10 −23
Original line number Diff line number Diff line
@@ -5840,7 +5840,7 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);

void kvm_mmu_zap_all(struct kvm *kvm)
static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only)
{
	struct kvm_mmu_page *sp, *node;
	LIST_HEAD(invalid_list);
@@ -5849,30 +5849,12 @@ void kvm_mmu_zap_all(struct kvm *kvm)
	spin_lock(&kvm->mmu_lock);
restart:
	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
		if (sp->role.invalid && sp->root_count)
		if (mmio_only && !sp->mmio_cached)
			continue;
		if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign) ||
		    cond_resched_lock(&kvm->mmu_lock))
			goto restart;
	}

	kvm_mmu_commit_zap_page(kvm, &invalid_list);
	spin_unlock(&kvm->mmu_lock);
}

static void kvm_mmu_zap_mmio_sptes(struct kvm *kvm)
{
	struct kvm_mmu_page *sp, *node;
	LIST_HEAD(invalid_list);
	int ign;

	spin_lock(&kvm->mmu_lock);
restart:
	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
		if (!sp->mmio_cached)
		if (sp->role.invalid && sp->root_count)
			continue;
		if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) {
			WARN_ON_ONCE(1);
			WARN_ON_ONCE(mmio_only);
			goto restart;
		}
		if (cond_resched_lock(&kvm->mmu_lock))
@@ -5883,6 +5865,11 @@ static void kvm_mmu_zap_mmio_sptes(struct kvm *kvm)
	spin_unlock(&kvm->mmu_lock);
}

void kvm_mmu_zap_all(struct kvm *kvm)
{
	return __kvm_mmu_zap_all(kvm, false);
}

void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
{
	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
@@ -5904,7 +5891,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
	 */
	if (unlikely(gen == 0)) {
		kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
		kvm_mmu_zap_mmio_sptes(kvm);
		__kvm_mmu_zap_all(kvm, true);
	}
}