Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit faff8758 authored by Junaid Shahid's avatar Junaid Shahid Committed by Paolo Bonzini
Browse files

kvm: x86: Flush only affected TLB entries in kvm_mmu_invlpg*



This needs a minor bug fix. The updated patch is as follows.

Thanks,
Junaid

------------------------------------------------------------------------------

kvm_mmu_invlpg() and kvm_mmu_invpcid_gva() only need to flush the TLB
entries for the specific guest virtual address, instead of flushing all
TLB entries associated with the VM.

Signed-off-by: default avatarJunaid Shahid <junaids@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 956bf353
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -985,6 +985,14 @@ struct kvm_x86_ops {

	void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa);

	/*
	 * Flush any TLB entries associated with the given GVA.
	 * Does not need to flush GPA->HPA mappings.
	 * Can potentially get non-canonical addresses through INVLPGs, which
	 * the implementation may choose to ignore if appropriate.
	 */
	void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr);

	void (*run)(struct kvm_vcpu *vcpu);
	int (*handle_exit)(struct kvm_vcpu *vcpu);
	void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
+11 −3
Original line number Diff line number Diff line
@@ -5226,6 +5226,10 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
	struct kvm_mmu *mmu = &vcpu->arch.mmu;

	/* INVLPG on a * non-canonical address is a NOP according to the SDM.  */
	if (is_noncanonical_address(gva, vcpu))
		return;

	mmu->invlpg(vcpu, gva, mmu->root_hpa);

	/*
@@ -5242,7 +5246,7 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
	if (VALID_PAGE(mmu->prev_root.hpa))
		mmu->invlpg(vcpu, gva, mmu->prev_root.hpa);

	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
	kvm_x86_ops->tlb_flush_gva(vcpu, gva);
	++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
@@ -5250,18 +5254,22 @@ EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
{
	struct kvm_mmu *mmu = &vcpu->arch.mmu;
	bool tlb_flush = false;

	if (pcid == kvm_get_active_pcid(vcpu)) {
		mmu->invlpg(vcpu, gva, mmu->root_hpa);
		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
		tlb_flush = true;
	}

	if (VALID_PAGE(mmu->prev_root.hpa) &&
	    pcid == kvm_get_pcid(vcpu, mmu->prev_root.cr3)) {
		mmu->invlpg(vcpu, gva, mmu->prev_root.hpa);
		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
		tlb_flush = true;
	}

	if (tlb_flush)
		kvm_x86_ops->tlb_flush_gva(vcpu, gva);

	++vcpu->stat.invlpg;

	/*
+8 −0
Original line number Diff line number Diff line
@@ -5434,6 +5434,13 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
		svm->asid_generation--;
}

static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	invlpga(gva, svm->vmcb->control.asid);
}

static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
{
}
@@ -7086,6 +7093,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
	.set_rflags = svm_set_rflags,

	.tlb_flush = svm_flush_tlb,
	.tlb_flush_gva = svm_flush_tlb_gva,

	.run = svm_vcpu_run,
	.handle_exit = handle_exit,
+28 −0
Original line number Diff line number Diff line
@@ -1992,6 +1992,19 @@ static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
			 __loaded_vmcs_clear, loaded_vmcs, 1);
}

static inline bool vpid_sync_vcpu_addr(int vpid, gva_t addr)
{
	if (vpid == 0)
		return true;

	if (cpu_has_vmx_invvpid_individual_addr()) {
		__invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
		return true;
	}

	return false;
}

static inline void vpid_sync_vcpu_single(int vpid)
{
	if (vpid == 0)
@@ -4833,6 +4846,20 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
	__vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
}

static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
{
	int vpid = to_vmx(vcpu)->vpid;

	if (!vpid_sync_vcpu_addr(vpid, addr))
		vpid_sync_context(vpid);

	/*
	 * If VPIDs are not supported or enabled, then the above is a no-op.
	 * But we don't really need a TLB flush in that case anyway, because
	 * each VM entry/exit includes an implicit flush when VPID is 0.
	 */
}

static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
{
	ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
@@ -13603,6 +13630,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
	.set_rflags = vmx_set_rflags,

	.tlb_flush = vmx_flush_tlb,
	.tlb_flush_gva = vmx_flush_tlb_gva,

	.run = vmx_vcpu_run,
	.handle_exit = vmx_handle_exit,