Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1fc7f56d authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull KVM fixes from Paolo Bonzini:
 "Quite a few fixes for x86: nested virtualization save/restore, AMD
  nested virtualization and virtual APIC, 32-bit fixes, an important fix
  to restore operation on older processors, and a bunch of hyper-v
  bugfixes. Several are marked stable.

  There are also fixes for GCC warnings and for a GCC/objtool interaction"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86: Mark expected switch fall-throughs
  KVM: x86: fix TRACE_INCLUDE_PATH and remove -I. header search paths
  KVM: selftests: check returned evmcs version range
  x86/kvm/hyper-v: nested_enable_evmcs() sets vmcs_version incorrectly
  KVM: VMX: Move vmx_vcpu_run()'s VM-Enter asm blob to a helper function
  kvm: selftests: Fix region overlap check in kvm_util
  kvm: vmx: fix some -Wmissing-prototypes warnings
  KVM: nSVM: clear events pending from svm_complete_interrupts() when exiting to L1
  svm: Fix AVIC incomplete IPI emulation
  svm: Add warning message for AVIC IPI invalid target
  KVM: x86: WARN_ONCE if sending a PV IPI returns a fatal error
  KVM: x86: Fix PV IPIs for 32-bit KVM host
  x86/kvm/hyper-v: recommend using eVMCS only when it is enabled
  x86/kvm/hyper-v: don't recommend doing reset via synthetic MSR
  kvm: x86/vmx: Use kzalloc for cached_vmcs12
  KVM: VMX: Use the correct field var when clearing VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
  KVM: x86: Fix single-step debugging
  x86/kvm/hyper-v: don't announce GUEST IDLE MSR support
parents c180f1b0 b2869f28
Loading
Loading
Loading
Loading
+5 −2
Original line number Original line Diff line number Diff line
@@ -457,6 +457,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
#else
#else
	u64 ipi_bitmap = 0;
	u64 ipi_bitmap = 0;
#endif
#endif
	long ret;


	if (cpumask_empty(mask))
	if (cpumask_empty(mask))
		return;
		return;
@@ -482,8 +483,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
		} else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
		} else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
			max = apic_id < max ? max : apic_id;
			max = apic_id < max ? max : apic_id;
		} else {
		} else {
			kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
			ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
				(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
				(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
			WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
			min = max = apic_id;
			min = max = apic_id;
			ipi_bitmap = 0;
			ipi_bitmap = 0;
		}
		}
@@ -491,8 +493,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
	}
	}


	if (ipi_bitmap) {
	if (ipi_bitmap) {
		kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
		ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
			(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
			(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
		WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
	}
	}


	local_irq_restore(flags);
	local_irq_restore(flags);
+0 −4
Original line number Original line Diff line number Diff line
@@ -2,10 +2,6 @@


ccflags-y += -Iarch/x86/kvm
ccflags-y += -Iarch/x86/kvm


CFLAGS_x86.o := -I.
CFLAGS_svm.o := -I.
CFLAGS_vmx.o := -I.

KVM := ../../../virt/kvm
KVM := ../../../virt/kvm


kvm-y			+= $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
kvm-y			+= $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
+3 −4
Original line number Original line Diff line number Diff line
@@ -1636,7 +1636,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
		ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
		ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
		if (ret != HV_STATUS_INVALID_PORT_ID)
		if (ret != HV_STATUS_INVALID_PORT_ID)
			break;
			break;
		/* maybe userspace knows this conn_id: fall through */
		/* fall through - maybe userspace knows this conn_id. */
	case HVCALL_POST_MESSAGE:
	case HVCALL_POST_MESSAGE:
		/* don't bother userspace if it has no way to handle it */
		/* don't bother userspace if it has no way to handle it */
		if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
		if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
@@ -1832,7 +1832,6 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
			ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
			ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
			ent->eax |= HV_X64_MSR_RESET_AVAILABLE;
			ent->eax |= HV_X64_MSR_RESET_AVAILABLE;
			ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
			ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
			ent->eax |= HV_X64_MSR_GUEST_IDLE_AVAILABLE;
			ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS;
			ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS;
			ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT;
			ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT;


@@ -1848,10 +1847,10 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
		case HYPERV_CPUID_ENLIGHTMENT_INFO:
		case HYPERV_CPUID_ENLIGHTMENT_INFO:
			ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
			ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
			ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
			ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
			ent->eax |= HV_X64_SYSTEM_RESET_RECOMMENDED;
			ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
			ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
			ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
			ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
			ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
			ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
			if (evmcs_ver)
				ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
				ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;


			/*
			/*
+2 −0
Original line number Original line Diff line number Diff line
@@ -1035,6 +1035,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
	switch (delivery_mode) {
	switch (delivery_mode) {
	case APIC_DM_LOWEST:
	case APIC_DM_LOWEST:
		vcpu->arch.apic_arb_prio++;
		vcpu->arch.apic_arb_prio++;
		/* fall through */
	case APIC_DM_FIXED:
	case APIC_DM_FIXED:
		if (unlikely(trig_mode && !level))
		if (unlikely(trig_mode && !level))
			break;
			break;
@@ -1874,6 +1875,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)


	case APIC_LVT0:
	case APIC_LVT0:
		apic_manage_nmi_watchdog(apic, val);
		apic_manage_nmi_watchdog(apic, val);
		/* fall through */
	case APIC_LVTTHMR:
	case APIC_LVTTHMR:
	case APIC_LVTPC:
	case APIC_LVTPC:
	case APIC_LVT1:
	case APIC_LVT1:
+1 −0
Original line number Original line Diff line number Diff line
@@ -4371,6 +4371,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
			rsvd_bits(maxphyaddr, 51);
			rsvd_bits(maxphyaddr, 51);
		rsvd_check->rsvd_bits_mask[1][4] =
		rsvd_check->rsvd_bits_mask[1][4] =
			rsvd_check->rsvd_bits_mask[0][4];
			rsvd_check->rsvd_bits_mask[0][4];
		/* fall through */
	case PT64_ROOT_4LEVEL:
	case PT64_ROOT_4LEVEL:
		rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
		rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
			nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
			nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
Loading