Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b8b0c871 authored by Jim Mattson's avatar Jim Mattson Committed by Greg Kroah-Hartman
Browse files

kvm: svm: Ensure an IBPB on all affected CPUs when freeing a vmcb



commit fd65d3142f734bc4376053c8d75670041903134d upstream.

Previously, we only called indirect_branch_prediction_barrier on the
logical CPU that freed a vmcb. This function should be called on all
logical CPUs that last loaded the vmcb in question.

Fixes: 15d45071 ("KVM/x86: Add IBPB support")
Reported-by: default avatarNeel Natu <neelnatu@google.com>
Signed-off-by: default avatarJim Mattson <jmattson@google.com>
Reviewed-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: stable@vger.kernel.org
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 471aca57
Loading
Loading
Loading
Loading
+15 −5
Original line number Original line Diff line number Diff line
@@ -2187,21 +2187,31 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
	return ERR_PTR(err);
	return ERR_PTR(err);
}
}


static void svm_clear_current_vmcb(struct vmcb *vmcb)
{
	int i;

	for_each_online_cpu(i)
		cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
}

static void svm_free_vcpu(struct kvm_vcpu *vcpu)
static void svm_free_vcpu(struct kvm_vcpu *vcpu)
{
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vcpu_svm *svm = to_svm(vcpu);


	/*
	 * The vmcb page can be recycled, causing a false negative in
	 * svm_vcpu_load(). So, ensure that no logical CPU has this
	 * vmcb page recorded as its current vmcb.
	 */
	svm_clear_current_vmcb(svm->vmcb);

	__free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
	__free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
	__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
	__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
	__free_page(virt_to_page(svm->nested.hsave));
	__free_page(virt_to_page(svm->nested.hsave));
	__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
	__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
	kvm_vcpu_uninit(vcpu);
	kvm_vcpu_uninit(vcpu);
	kmem_cache_free(kvm_vcpu_cache, svm);
	kmem_cache_free(kvm_vcpu_cache, svm);
	/*
	 * The vmcb page can be recycled, causing a false negative in
	 * svm_vcpu_load(). So do a full IBPB now.
	 */
	indirect_branch_prediction_barrier();
}
}


static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)