Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4340fa55 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull KVM fixes from Radim Krčmář:
 "ARM:
   - two fixes for 4.6 vgic [Christoffer] (cc stable)

   - six fixes for 4.7 vgic [Marc]

  x86:
   - six fixes from syzkaller reports [Paolo] (two of them cc stable)

   - allow OS X to boot [Dmitry]

   - don't trust compilers [Nadav]"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86: fix OOPS after invalid KVM_SET_DEBUGREGS
  KVM: x86: avoid vmalloc(0) in the KVM_SET_CPUID
  KVM: irqfd: fix NULL pointer dereference in kvm_irq_map_gsi
  KVM: fail KVM_SET_VCPU_EVENTS with invalid exception number
  KVM: x86: avoid vmalloc(0) in the KVM_SET_CPUID
  kvm: x86: avoid warning on repeated KVM_SET_TSS_ADDR
  KVM: Handle MSR_IA32_PERF_CTL
  KVM: x86: avoid write-tearing of TDP
  KVM: arm/arm64: vgic-new: Removel harmful BUG_ON
  arm64: KVM: vgic-v3: Relax synchronization when SRE==1
  arm64: KVM: vgic-v3: Prevent the guest from messing with ICC_SRE_EL1
  arm64: KVM: Make ICC_SRE_EL1 access return the configured SRE value
  KVM: arm/arm64: vgic-v3: Always resample level interrupts
  KVM: arm/arm64: vgic-v2: Always resample level interrupts
  KVM: arm/arm64: vgic-v3: Clear all dirty LRs
  KVM: arm/arm64: vgic-v2: Clear all dirty LRs
parents 719af93a d14bdb55
Loading
Loading
Loading
Loading
+21 −15
Original line number Diff line number Diff line
@@ -169,6 +169,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
	 * Make sure stores to the GIC via the memory mapped interface
	 * are now visible to the system register interface.
	 */
	if (!cpu_if->vgic_sre)
		dsb(st);

	cpu_if->vgic_vmcr  = read_gicreg(ICH_VMCR_EL2);
@@ -190,12 +191,11 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
			if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
				continue;

			if (cpu_if->vgic_elrsr & (1 << i)) {
			if (cpu_if->vgic_elrsr & (1 << i))
				cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
				continue;
			}

			else
				cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);

			__gic_v3_set_lr(0, i);
		}

@@ -236,9 +236,13 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)

	val = read_gicreg(ICC_SRE_EL2);
	write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
	isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */

	if (!cpu_if->vgic_sre) {
		/* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
		isb();
		write_gicreg(1, ICC_SRE_EL1);
	}
}

void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
{
@@ -256,8 +260,10 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
	 * been actually programmed with the value we want before
	 * starting to mess with the rest of the GIC.
	 */
	write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1);
	if (!cpu_if->vgic_sre) {
		write_gicreg(0, ICC_SRE_EL1);
		isb();
	}

	val = read_gicreg(ICH_VTR_EL2);
	max_lr_idx = vtr_to_max_lr_idx(val);
@@ -306,19 +312,19 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
	 * (re)distributors. This ensure the guest will read the
	 * correct values from the memory-mapped interface.
	 */
	if (!cpu_if->vgic_sre) {
		isb();
		dsb(sy);
	}
	vcpu->arch.vgic_cpu.live_lrs = live_lrs;

	/*
	 * Prevent the guest from touching the GIC system registers if
	 * SRE isn't enabled for GICv3 emulation.
	 */
	if (!cpu_if->vgic_sre) {
	write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
		     ICC_SRE_EL2);
}
}

void __hyp_text __vgic_v3_init_lrs(void)
{
+12 −1
Original line number Diff line number Diff line
@@ -134,6 +134,17 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
	return true;
}

static bool access_gic_sre(struct kvm_vcpu *vcpu,
			   struct sys_reg_params *p,
			   const struct sys_reg_desc *r)
{
	if (p->is_write)
		return ignore_write(vcpu, p);

	p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
	return true;
}

static bool trap_raz_wi(struct kvm_vcpu *vcpu,
			struct sys_reg_params *p,
			const struct sys_reg_desc *r)
@@ -958,7 +969,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
	  access_gic_sgi },
	/* ICC_SRE_EL1 */
	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
	  trap_raz_wi },
	  access_gic_sre },

	/* CONTEXTIDR_EL1 */
	{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
+12 −10
Original line number Diff line number Diff line
@@ -181,19 +181,22 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
			     struct kvm_cpuid_entry __user *entries)
{
	int r, i;
	struct kvm_cpuid_entry *cpuid_entries;
	struct kvm_cpuid_entry *cpuid_entries = NULL;

	r = -E2BIG;
	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
		goto out;
	r = -ENOMEM;
	cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
	if (cpuid->nent) {
		cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) *
					cpuid->nent);
		if (!cpuid_entries)
			goto out;
		r = -EFAULT;
		if (copy_from_user(cpuid_entries, entries,
				   cpuid->nent * sizeof(struct kvm_cpuid_entry)))
		goto out_free;
			goto out;
	}
	for (i = 0; i < cpuid->nent; i++) {
		vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
		vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
@@ -212,9 +215,8 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
	kvm_x86_ops->cpuid_update(vcpu);
	r = kvm_update_cpuid(vcpu);

out_free:
	vfree(cpuid_entries);
out:
	vfree(cpuid_entries);
	return r;
}

+4 −4
Original line number Diff line number Diff line
@@ -336,12 +336,12 @@ static gfn_t pse36_gfn_delta(u32 gpte)
#ifdef CONFIG_X86_64
static void __set_spte(u64 *sptep, u64 spte)
{
	*sptep = spte;
	WRITE_ONCE(*sptep, spte);
}

static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
	*sptep = spte;
	WRITE_ONCE(*sptep, spte);
}

static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
@@ -390,7 +390,7 @@ static void __set_spte(u64 *sptep, u64 spte)
	 */
	smp_wmb();

	ssptep->spte_low = sspte.spte_low;
	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
}

static void __update_clear_spte_fast(u64 *sptep, u64 spte)
@@ -400,7 +400,7 @@ static void __update_clear_spte_fast(u64 *sptep, u64 spte)
	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

	ssptep->spte_low = sspte.spte_low;
	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);

	/*
	 * If we map the spte from present to nonpresent, we should clear
+11 −1
Original line number Diff line number Diff line
@@ -2314,6 +2314,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
	case MSR_AMD64_NB_CFG:
	case MSR_FAM10H_MMIO_CONF_BASE:
	case MSR_AMD64_BU_CFG2:
	case MSR_IA32_PERF_CTL:
		msr_info->data = 0;
		break;
	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
@@ -2972,6 +2973,10 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
			      | KVM_VCPUEVENT_VALID_SMM))
		return -EINVAL;

	if (events->exception.injected &&
	    (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
		return -EINVAL;

	process_nmi(vcpu);
	vcpu->arch.exception.pending = events->exception.injected;
	vcpu->arch.exception.nr = events->exception.nr;
@@ -3036,6 +3041,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
	if (dbgregs->flags)
		return -EINVAL;

	if (dbgregs->dr6 & ~0xffffffffull)
		return -EINVAL;
	if (dbgregs->dr7 & ~0xffffffffull)
		return -EINVAL;

	memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
	kvm_update_dr0123(vcpu);
	vcpu->arch.dr6 = dbgregs->dr6;
@@ -7815,7 +7825,7 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)

	slot = id_to_memslot(slots, id);
	if (size) {
		if (WARN_ON(slot->npages))
		if (slot->npages)
			return -EEXIST;

		/*
Loading