Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ea145aac authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

Revert "KVM: MMU: fast invalidate all pages"

Remove x86 KVM's fast invalidate mechanism, i.e. revert all patches
from the original series[1], now that all users of the fast invalidate
mechanism are gone.

This reverts commit 5304b8d3.

[1] https://lkml.kernel.org/r/1369960590-14138-1-git-send-email-xiaoguangrong@linux.vnet.ibm.com



Cc: Xiao Guangrong <guangrong.xiao@gmail.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 5d6317ca
Loading
Loading
Loading
Loading
+0 −2
Original line number Original line Diff line number Diff line
@@ -334,7 +334,6 @@ struct kvm_mmu_page {
	int root_count;          /* Currently serving as active root */
	int root_count;          /* Currently serving as active root */
	unsigned int unsync_children;
	unsigned int unsync_children;
	struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
	struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
	unsigned long mmu_valid_gen;
	DECLARE_BITMAP(unsync_child_bitmap, 512);
	DECLARE_BITMAP(unsync_child_bitmap, 512);


#ifdef CONFIG_X86_32
#ifdef CONFIG_X86_32
@@ -845,7 +844,6 @@ struct kvm_arch {
	unsigned int n_requested_mmu_pages;
	unsigned int n_requested_mmu_pages;
	unsigned int n_max_mmu_pages;
	unsigned int n_max_mmu_pages;
	unsigned int indirect_shadow_pages;
	unsigned int indirect_shadow_pages;
	unsigned long mmu_valid_gen;
	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
	/*
	/*
	 * Hash table of struct kvm_mmu_page.
	 * Hash table of struct kvm_mmu_page.
+1 −97
Original line number Original line Diff line number Diff line
@@ -2060,12 +2060,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct
	if (!direct)
	if (!direct)
		sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
		sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);

	/*
	 * The active_mmu_pages list is the FIFO list, do not move the
	 * page until it is zapped. kvm_zap_obsolete_pages depends on
	 * this feature. See the comments in kvm_zap_obsolete_pages().
	 */
	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
	kvm_mod_used_mmu_pages(vcpu->kvm, +1);
	kvm_mod_used_mmu_pages(vcpu->kvm, +1);
	return sp;
	return sp;
@@ -2214,7 +2208,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
#define for_each_valid_sp(_kvm, _sp, _gfn)				\
#define for_each_valid_sp(_kvm, _sp, _gfn)				\
	hlist_for_each_entry(_sp,					\
	hlist_for_each_entry(_sp,					\
	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
		if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) {    \
		if ((_sp)->role.invalid) {    \
		} else
		} else


#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
@@ -2266,11 +2260,6 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
static void mmu_audit_disable(void) { }
static void mmu_audit_disable(void) { }
#endif
#endif


static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
	return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
}

static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
			 struct list_head *invalid_list)
			 struct list_head *invalid_list)
{
{
@@ -2495,7 +2484,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
		if (level > PT_PAGE_TABLE_LEVEL && need_sync)
		if (level > PT_PAGE_TABLE_LEVEL && need_sync)
			flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
			flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
	}
	}
	sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
	clear_page(sp->spt);
	clear_page(sp->spt);
	trace_kvm_mmu_get_page(sp, true);
	trace_kvm_mmu_get_page(sp, true);


@@ -4206,14 +4194,6 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
			return false;
			return false;


		if (cached_root_available(vcpu, new_cr3, new_role)) {
		if (cached_root_available(vcpu, new_cr3, new_role)) {
			/*
			 * It is possible that the cached previous root page is
			 * obsolete because of a change in the MMU
			 * generation number. However, that is accompanied by
			 * KVM_REQ_MMU_RELOAD, which will free the root that we
			 * have set here and allocate a new one.
			 */

			kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
			kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
			if (!skip_tlb_flush) {
			if (!skip_tlb_flush) {
				kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
				kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
@@ -5865,82 +5845,6 @@ void kvm_mmu_zap_all(struct kvm *kvm)
	spin_unlock(&kvm->mmu_lock);
	spin_unlock(&kvm->mmu_lock);
}
}


static void kvm_zap_obsolete_pages(struct kvm *kvm)
{
	struct kvm_mmu_page *sp, *node;
	LIST_HEAD(invalid_list);

restart:
	list_for_each_entry_safe_reverse(sp, node,
	      &kvm->arch.active_mmu_pages, link) {
		/*
		 * No obsolete page exists before new created page since
		 * active_mmu_pages is the FIFO list.
		 */
		if (!is_obsolete_sp(kvm, sp))
			break;

		/*
		 * Do not repeatedly zap a root page to avoid unnecessary
		 * KVM_REQ_MMU_RELOAD, otherwise we may not be able to
		 * progress:
		 *    vcpu 0                        vcpu 1
		 *                         call vcpu_enter_guest():
		 *                            1): handle KVM_REQ_MMU_RELOAD
		 *                                and require mmu-lock to
		 *                                load mmu
		 * repeat:
		 *    1): zap root page and
		 *        send KVM_REQ_MMU_RELOAD
		 *
		 *    2): if (cond_resched_lock(mmu-lock))
		 *
		 *                            2): hold mmu-lock and load mmu
		 *
		 *                            3): see KVM_REQ_MMU_RELOAD bit
		 *                                on vcpu->requests is set
		 *                                then return 1 to call
		 *                                vcpu_enter_guest() again.
		 *            goto repeat;
		 *
		 * Since we are reversely walking the list and the invalid
		 * list will be moved to the head, skip the invalid page
		 * can help us to avoid the infinity list walking.
		 */
		if (sp->role.invalid)
			continue;

		if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
			kvm_mmu_commit_zap_page(kvm, &invalid_list);
			cond_resched_lock(&kvm->mmu_lock);
			goto restart;
		}

		if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
			goto restart;
	}

	kvm_mmu_commit_zap_page(kvm, &invalid_list);
}

/*
 * Fast invalidate all shadow pages and use lock-break technique
 * to zap obsolete pages.
 *
 * It's required when memslot is being deleted or VM is being
 * destroyed, in these cases, we should ensure that KVM MMU does
 * not use any resource of the being-deleted slot or all slots
 * after calling the function.
 */
void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm)
{
	spin_lock(&kvm->mmu_lock);
	kvm->arch.mmu_valid_gen++;

	kvm_zap_obsolete_pages(kvm);
	spin_unlock(&kvm->mmu_lock);
}

static void kvm_mmu_zap_mmio_sptes(struct kvm *kvm)
static void kvm_mmu_zap_mmio_sptes(struct kvm *kvm)
{
{
	struct kvm_mmu_page *sp, *node;
	struct kvm_mmu_page *sp, *node;
+0 −1
Original line number Original line Diff line number Diff line
@@ -203,7 +203,6 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
	return -(u32)fault & errcode;
	return -(u32)fault & errcode;
}
}


void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);


void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);