Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a4a8e6f7 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity
Browse files

KVM: MMU: remove 'clear_unsync' parameter



Remove it since we can judge it by using sp->unsync

Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 9bdbba13
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -250,7 +250,7 @@ struct kvm_mmu {
	void (*prefetch_page)(struct kvm_vcpu *vcpu,
	void (*prefetch_page)(struct kvm_vcpu *vcpu,
			      struct kvm_mmu_page *page);
			      struct kvm_mmu_page *page);
	int (*sync_page)(struct kvm_vcpu *vcpu,
	int (*sync_page)(struct kvm_vcpu *vcpu,
			 struct kvm_mmu_page *sp, bool clear_unsync);
			 struct kvm_mmu_page *sp);
	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
	hpa_t root_hpa;
	hpa_t root_hpa;
	int root_level;
	int root_level;
+4 −4
Original line number Original line Diff line number Diff line
@@ -1156,7 +1156,7 @@ static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
}
}


static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
			       struct kvm_mmu_page *sp, bool clear_unsync)
			       struct kvm_mmu_page *sp)
{
{
	return 1;
	return 1;
}
}
@@ -1286,7 +1286,7 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
	if (clear_unsync)
	if (clear_unsync)
		kvm_unlink_unsync_page(vcpu->kvm, sp);
		kvm_unlink_unsync_page(vcpu->kvm, sp);


	if (vcpu->arch.mmu.sync_page(vcpu, sp, clear_unsync)) {
	if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
		return 1;
		return 1;
	}
	}
@@ -1327,12 +1327,12 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
			continue;
			continue;


		WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
		WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
		kvm_unlink_unsync_page(vcpu->kvm, s);
		if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
		if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
			(vcpu->arch.mmu.sync_page(vcpu, s, true))) {
			(vcpu->arch.mmu.sync_page(vcpu, s))) {
			kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
			kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
			continue;
			continue;
		}
		}
		kvm_unlink_unsync_page(vcpu->kvm, s);
		flush = true;
		flush = true;
	}
	}


+2 −3
Original line number Original line Diff line number Diff line
@@ -740,8 +740,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
 * - The spte has a reference to the struct page, so the pfn for a given gfn
 * - The spte has a reference to the struct page, so the pfn for a given gfn
 *   can't change unless all sptes pointing to it are nuked first.
 *   can't change unless all sptes pointing to it are nuked first.
 */
 */
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
			    bool clear_unsync)
{
{
	int i, offset, nr_present;
	int i, offset, nr_present;
	bool host_writable;
	bool host_writable;
@@ -781,7 +780,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
			u64 nonpresent;
			u64 nonpresent;


			if (rsvd_bits_set || is_present_gpte(gpte) ||
			if (rsvd_bits_set || is_present_gpte(gpte) ||
			      !clear_unsync)
			      sp->unsync)
				nonpresent = shadow_trap_nonpresent_pte;
				nonpresent = shadow_trap_nonpresent_pte;
			else
			else
				nonpresent = shadow_notrap_nonpresent_pte;
				nonpresent = shadow_notrap_nonpresent_pte;