Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit db2336a8 authored by Kyle Huey's avatar Kyle Huey Committed by Paolo Bonzini
Browse files

KVM: x86: virtualize cpuid faulting



Hardware support for faulting on the cpuid instruction is not required to
emulate it, because cpuid triggers a VM exit anyways. KVM handles the relevant
MSRs (MSR_PLATFORM_INFO and MSR_MISC_FEATURES_ENABLE) and upon a
cpuid-induced VM exit checks the cpuid faulting state and the CPL.
kvm_require_cpl is even kind enough to inject the GP fault for us.

Signed-off-by: default avatarKyle Huey <khuey@kylehuey.com>
Reviewed-by: default avatarDavid Matlack <dmatlack@google.com>
[Return "1" from kvm_emulate_cpuid, it's not void. - Paolo]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent bd17117b
Loading
Loading
Loading
Loading
+2 −0
Original line number Original line Diff line number Diff line
@@ -611,6 +611,8 @@ struct kvm_vcpu_arch {
	unsigned long dr7;
	unsigned long dr7;
	unsigned long eff_db[KVM_NR_DB_REGS];
	unsigned long eff_db[KVM_NR_DB_REGS];
	unsigned long guest_debug_dr7;
	unsigned long guest_debug_dr7;
	u64 msr_platform_info;
	u64 msr_misc_features_enables;


	u64 mcg_cap;
	u64 mcg_cap;
	u64 mcg_status;
	u64 mcg_status;
+3 −0
Original line number Original line Diff line number Diff line
@@ -876,6 +876,9 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
{
{
	u32 eax, ebx, ecx, edx;
	u32 eax, ebx, ecx, edx;


	if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
		return 1;

	eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
	eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
	ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
	ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
	kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx);
	kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx);
+11 −0
Original line number Original line Diff line number Diff line
@@ -205,4 +205,15 @@ static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
	return x86_stepping(best->eax);
	return x86_stepping(best->eax);
}
}


static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
}

static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.msr_misc_features_enables &
		  MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
}

#endif
#endif
+7 −0
Original line number Original line Diff line number Diff line
@@ -3854,6 +3854,13 @@ static int em_sti(struct x86_emulate_ctxt *ctxt)
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
{
{
	u32 eax, ebx, ecx, edx;
	u32 eax, ebx, ecx, edx;
	u64 msr = 0;

	ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
	if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
	    ctxt->ops->cpl(ctxt)) {
		return emulate_gp(ctxt, 0);
	}


	eax = reg_read(ctxt, VCPU_REGS_RAX);
	eax = reg_read(ctxt, VCPU_REGS_RAX);
	ecx = reg_read(ctxt, VCPU_REGS_RCX);
	ecx = reg_read(ctxt, VCPU_REGS_RCX);
+26 −0
Original line number Original line Diff line number Diff line
@@ -1007,6 +1007,8 @@ static u32 emulated_msrs[] = {
	MSR_IA32_MCG_CTL,
	MSR_IA32_MCG_CTL,
	MSR_IA32_MCG_EXT_CTL,
	MSR_IA32_MCG_EXT_CTL,
	MSR_IA32_SMBASE,
	MSR_IA32_SMBASE,
	MSR_PLATFORM_INFO,
	MSR_MISC_FEATURES_ENABLES,
};
};


static unsigned num_emulated_msrs;
static unsigned num_emulated_msrs;
@@ -2314,6 +2316,21 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
			return 1;
			return 1;
		vcpu->arch.osvw.status = data;
		vcpu->arch.osvw.status = data;
		break;
		break;
	case MSR_PLATFORM_INFO:
		if (!msr_info->host_initiated ||
		    data & ~MSR_PLATFORM_INFO_CPUID_FAULT ||
		    (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) &&
		     cpuid_fault_enabled(vcpu)))
			return 1;
		vcpu->arch.msr_platform_info = data;
		break;
	case MSR_MISC_FEATURES_ENABLES:
		if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT ||
		    (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
		     !supports_cpuid_fault(vcpu)))
			return 1;
		vcpu->arch.msr_misc_features_enables = data;
		break;
	default:
	default:
		if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
		if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
			return xen_hvm_config(vcpu, data);
			return xen_hvm_config(vcpu, data);
@@ -2529,6 +2546,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
			return 1;
			return 1;
		msr_info->data = vcpu->arch.osvw.status;
		msr_info->data = vcpu->arch.osvw.status;
		break;
		break;
	case MSR_PLATFORM_INFO:
		msr_info->data = vcpu->arch.msr_platform_info;
		break;
	case MSR_MISC_FEATURES_ENABLES:
		msr_info->data = vcpu->arch.msr_misc_features_enables;
		break;
	default:
	default:
		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
			return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
			return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
@@ -7710,6 +7733,9 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
	if (!init_event) {
	if (!init_event) {
		kvm_pmu_reset(vcpu);
		kvm_pmu_reset(vcpu);
		vcpu->arch.smbase = 0x30000;
		vcpu->arch.smbase = 0x30000;

		vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
		vcpu->arch.msr_misc_features_enables = 0;
	}
	}


	memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
	memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));