Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 47f7dc4b authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull kvm fixes from Paolo Bonzini:
 "Miscellaneous bugfixes, plus a small patchlet related to Spectre v2"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  kvmclock: fix TSC calibration for nested guests
  KVM: VMX: Mark VMXArea with revision_id of physical CPU even when eVMCS enabled
  KVM: irqfd: fix race between EPOLLHUP and irq_bypass_register_consumer
  KVM/Eventfd: Avoid crash when assign and deassign specific eventfd in parallel.
  x86/kvmclock: set pvti_cpu0_va after enabling kvmclock
  x86/kvm/Kconfig: Ensure CRYPTO_DEV_CCP_DD state at minimum matches KVM_AMD
  kvm: nVMX: Restore exit qual for VM-entry failure due to MSR loading
  x86/kvm/vmx: don't read current->thread.{fs,gs}base of legacy tasks
  KVM: VMX: support MSR_IA32_ARCH_CAPABILITIES as a feature MSR
parents 3c53776e e10f7805
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -138,6 +138,7 @@ static unsigned long kvm_get_tsc_khz(void)
	src = &hv_clock[cpu].pvti;
	tsc_khz = pvclock_tsc_khz(src);
	put_cpu();
	setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
	return tsc_khz;
}

@@ -319,6 +320,8 @@ void __init kvmclock_init(void)
	printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
		msr_kvm_system_time, msr_kvm_wall_clock);

	pvclock_set_pvti_cpu0_va(hv_clock);

	if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
		pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);

@@ -366,13 +369,10 @@ int __init kvm_setup_vsyscall_timeinfo(void)
	vcpu_time = &hv_clock[cpu].pvti;
	flags = pvclock_read_flags(vcpu_time);

	if (!(flags & PVCLOCK_TSC_STABLE_BIT)) {
	put_cpu();
		return 1;
	}

	pvclock_set_pvti_cpu0_va(hv_clock);
	put_cpu();
	if (!(flags & PVCLOCK_TSC_STABLE_BIT))
		return 1;

	kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
#endif
+1 −1
Original line number Diff line number Diff line
@@ -85,7 +85,7 @@ config KVM_AMD_SEV
	def_bool y
	bool "AMD Secure Encrypted Virtualization (SEV) support"
	depends on KVM_AMD && X86_64
	depends on CRYPTO_DEV_CCP && CRYPTO_DEV_CCP_DD && CRYPTO_DEV_SP_PSP
	depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
	---help---
	Provides support for launching Encrypted VMs on AMD processors.

+42 −19
Original line number Diff line number Diff line
@@ -2571,6 +2571,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
	struct vcpu_vmx *vmx = to_vmx(vcpu);
#ifdef CONFIG_X86_64
	int cpu = raw_smp_processor_id();
	unsigned long fs_base, kernel_gs_base;
#endif
	int i;

@@ -2586,12 +2587,20 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
	vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;

#ifdef CONFIG_X86_64
	if (likely(is_64bit_mm(current->mm))) {
		save_fsgs_for_kvm();
		vmx->host_state.fs_sel = current->thread.fsindex;
		vmx->host_state.gs_sel = current->thread.gsindex;
#else
		fs_base = current->thread.fsbase;
		kernel_gs_base = current->thread.gsbase;
	} else {
#endif
		savesegment(fs, vmx->host_state.fs_sel);
		savesegment(gs, vmx->host_state.gs_sel);
#ifdef CONFIG_X86_64
		fs_base = read_msr(MSR_FS_BASE);
		kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
	}
#endif
	if (!(vmx->host_state.fs_sel & 7)) {
		vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
@@ -2611,10 +2620,10 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
	savesegment(ds, vmx->host_state.ds_sel);
	savesegment(es, vmx->host_state.es_sel);

	vmcs_writel(HOST_FS_BASE, current->thread.fsbase);
	vmcs_writel(HOST_FS_BASE, fs_base);
	vmcs_writel(HOST_GS_BASE, cpu_kernelmode_gs_base(cpu));

	vmx->msr_host_kernel_gs_base = current->thread.gsbase;
	vmx->msr_host_kernel_gs_base = kernel_gs_base;
	if (is_long_mode(&vmx->vcpu))
		wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#else
@@ -4322,10 +4331,6 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
	vmcs_conf->order = get_order(vmcs_conf->size);
	vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;

	/* KVM supports Enlightened VMCS v1 only */
	if (static_branch_unlikely(&enable_evmcs))
		vmcs_conf->revision_id = KVM_EVMCS_VERSION;
	else
	vmcs_conf->revision_id = vmx_msr_low;

	vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
@@ -4396,7 +4401,13 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
		return NULL;
	vmcs = page_address(pages);
	memset(vmcs, 0, vmcs_config.size);
	vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */

	/* KVM supports Enlightened VMCS v1 only */
	if (static_branch_unlikely(&enable_evmcs))
		vmcs->revision_id = KVM_EVMCS_VERSION;
	else
		vmcs->revision_id = vmcs_config.revision_id;

	return vmcs;
}

@@ -4564,6 +4575,19 @@ static __init int alloc_kvm_area(void)
			return -ENOMEM;
		}

		/*
		 * When eVMCS is enabled, alloc_vmcs_cpu() sets
		 * vmcs->revision_id to KVM_EVMCS_VERSION instead of
		 * revision_id reported by MSR_IA32_VMX_BASIC.
		 *
		 * However, even though not explictly documented by
		 * TLFS, VMXArea passed as VMXON argument should
		 * still be marked with revision_id reported by
		 * physical CPU.
		 */
		if (static_branch_unlikely(&enable_evmcs))
			vmcs->revision_id = vmcs_config.revision_id;

		per_cpu(vmxarea, cpu) = vmcs;
	}
	return 0;
@@ -11753,7 +11777,6 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
{
	struct vcpu_vmx *vmx = to_vmx(vcpu);
	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
	u32 msr_entry_idx;
	u32 exit_qual;
	int r;

@@ -11775,10 +11798,10 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
	nested_get_vmcs12_pages(vcpu, vmcs12);

	r = EXIT_REASON_MSR_LOAD_FAIL;
	msr_entry_idx = nested_vmx_load_msr(vcpu,
	exit_qual = nested_vmx_load_msr(vcpu,
					vmcs12->vm_entry_msr_load_addr,
					vmcs12->vm_entry_msr_load_count);
	if (msr_entry_idx)
	if (exit_qual)
		goto fail;

	/*
+3 −1
Original line number Diff line number Diff line
@@ -1097,6 +1097,7 @@ static u32 msr_based_features[] = {

	MSR_F10H_DECFG,
	MSR_IA32_UCODE_REV,
	MSR_IA32_ARCH_CAPABILITIES,
};

static unsigned int num_msr_based_features;
@@ -1105,7 +1106,8 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
{
	switch (msr->index) {
	case MSR_IA32_UCODE_REV:
		rdmsrl(msr->index, msr->data);
	case MSR_IA32_ARCH_CAPABILITIES:
		rdmsrl_safe(msr->index, &msr->data);
		break;
	default:
		if (kvm_x86_ops->get_msr_feature(msr))
+11 −6
Original line number Diff line number Diff line
@@ -119,8 +119,12 @@ irqfd_shutdown(struct work_struct *work)
{
	struct kvm_kernel_irqfd *irqfd =
		container_of(work, struct kvm_kernel_irqfd, shutdown);
	struct kvm *kvm = irqfd->kvm;
	u64 cnt;

	/* Make sure irqfd has been initalized in assign path. */
	synchronize_srcu(&kvm->irq_srcu);

	/*
	 * Synchronize with the wait-queue and unhook ourselves to prevent
	 * further events.
@@ -387,7 +391,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)

	idx = srcu_read_lock(&kvm->irq_srcu);
	irqfd_update(kvm, irqfd);
	srcu_read_unlock(&kvm->irq_srcu, idx);

	list_add_tail(&irqfd->list, &kvm->irqfds.items);

@@ -402,11 +405,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
	if (events & EPOLLIN)
		schedule_work(&irqfd->inject);

	/*
	 * do not drop the file until the irqfd is fully initialized, otherwise
	 * we might race against the EPOLLHUP
	 */
	fdput(f);
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
	if (kvm_arch_has_irq_bypass()) {
		irqfd->consumer.token = (void *)irqfd->eventfd;
@@ -421,6 +419,13 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
	}
#endif

	srcu_read_unlock(&kvm->irq_srcu, idx);

	/*
	 * do not drop the file until the irqfd is fully initialized, otherwise
	 * we might race against the EPOLLHUP
	 */
	fdput(f);
	return 0;

fail: