Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 542380a2 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull KVM fixes from Radim Krčmář:
 "ARM:
   - Fix a problem with GICv3 userspace save/restore
   - Clarify GICv2 userspace save/restore ABI
   - Be more careful in clearing GIC LRs
   - Add missing synchronization primitive to our MMU handling code

  PPC:
   - Check for a NULL return from kzalloc

  s390:
   - Prevent translation exception errors on valid page tables for the
     instruction-exection-protection support

  x86:
   - Fix Page-Modification Logging when running a nested guest"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: PPC: Book3S HV: Check for kmalloc errors in ioctl
  KVM: nVMX: initialize PML fields in vmcs02
  KVM: nVMX: do not leak PML full vmexit to L1
  KVM: arm/arm64: vgic: Fix GICC_PMR uaccess on GICv3 and clarify ABI
  KVM: arm64: Ensure LRs are clear when they should be
  kvm: arm/arm64: Fix locking for kvm_free_stage2_pgd
  KVM: s390: remove change-recording override support
  arm/arm64: KVM: Take mmap_sem in kvm_arch_prepare_memory_region
  arm/arm64: KVM: Take mmap_sem in stage2_unmap_vm
parents 62fedca5 8786fa66
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -83,6 +83,12 @@ Groups:

    Bits for undefined preemption levels are RAZ/WI.

    For historical reasons and to provide ABI compatibility with userspace we
    export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask
    field in the lower 5 bits of a word, meaning that userspace must always
    use the lower 5 bits to communicate with the KVM device and must shift the
    value left by 3 places to obtain the actual priority mask level.

  Limitations:
    - Priorities are not implemented, and registers are RAZ/WI
    - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.
+3 −0
Original line number Diff line number Diff line
@@ -1124,6 +1124,9 @@ static void cpu_hyp_reinit(void)
		if (__hyp_get_vectors() == hyp_default_vectors)
			cpu_init_hyp_mode(NULL);
	}

	if (vgic_present)
		kvm_vgic_init_cpu_hardware();
}

static void cpu_hyp_reset(void)
+20 −3
Original line number Diff line number Diff line
@@ -292,11 +292,18 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
	phys_addr_t addr = start, end = start + size;
	phys_addr_t next;

	assert_spin_locked(&kvm->mmu_lock);
	pgd = kvm->arch.pgd + stage2_pgd_index(addr);
	do {
		next = stage2_pgd_addr_end(addr, end);
		if (!stage2_pgd_none(*pgd))
			unmap_stage2_puds(kvm, pgd, addr, next);
		/*
		 * If the range is too large, release the kvm->mmu_lock
		 * to prevent starvation and lockup detector warnings.
		 */
		if (next != end)
			cond_resched_lock(&kvm->mmu_lock);
	} while (pgd++, addr = next, addr != end);
}

@@ -803,6 +810,7 @@ void stage2_unmap_vm(struct kvm *kvm)
	int idx;

	idx = srcu_read_lock(&kvm->srcu);
	down_read(&current->mm->mmap_sem);
	spin_lock(&kvm->mmu_lock);

	slots = kvm_memslots(kvm);
@@ -810,6 +818,7 @@ void stage2_unmap_vm(struct kvm *kvm)
		stage2_unmap_memslot(kvm, memslot);

	spin_unlock(&kvm->mmu_lock);
	up_read(&current->mm->mmap_sem);
	srcu_read_unlock(&kvm->srcu, idx);
}

@@ -829,7 +838,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
	if (kvm->arch.pgd == NULL)
		return;

	spin_lock(&kvm->mmu_lock);
	unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
	spin_unlock(&kvm->mmu_lock);

	/* Free the HW pgd, one page at a time */
	free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
	kvm->arch.pgd = NULL;
@@ -1801,6 +1813,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
	    (KVM_PHYS_SIZE >> PAGE_SHIFT))
		return -EFAULT;

	down_read(&current->mm->mmap_sem);
	/*
	 * A memory region could potentially cover multiple VMAs, and any holes
	 * between them, so iterate over all of them to find out if we can map
@@ -1844,8 +1857,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
			pa += vm_start - vma->vm_start;

			/* IO region dirty page logging not allowed */
			if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
				return -EINVAL;
			if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
				ret = -EINVAL;
				goto out;
			}

			ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
						    vm_end - vm_start,
@@ -1857,7 +1872,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
	} while (hva < reg_end);

	if (change == KVM_MR_FLAGS_ONLY)
		return ret;
		goto out;

	spin_lock(&kvm->mmu_lock);
	if (ret)
@@ -1865,6 +1880,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
	else
		stage2_flush_memslot(kvm, memslot);
	spin_unlock(&kvm->mmu_lock);
out:
	up_read(&current->mm->mmap_sem);
	return ret;
}

+4 −0
Original line number Diff line number Diff line
@@ -1487,6 +1487,10 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
	/* start new resize */

	resize = kzalloc(sizeof(*resize), GFP_KERNEL);
	if (!resize) {
		ret = -ENOMEM;
		goto out;
	}
	resize->order = shift;
	resize->kvm = kvm;
	INIT_WORK(&resize->work, resize_hpt_prepare_work);
+2 −5
Original line number Diff line number Diff line
@@ -168,8 +168,7 @@ union page_table_entry {
		unsigned long z  : 1; /* Zero Bit */
		unsigned long i  : 1; /* Page-Invalid Bit */
		unsigned long p  : 1; /* DAT-Protection Bit */
		unsigned long co : 1; /* Change-Recording Override */
		unsigned long	 : 8;
		unsigned long	 : 9;
	};
};

@@ -745,8 +744,6 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
		return PGM_PAGE_TRANSLATION;
	if (pte.z)
		return PGM_TRANSLATION_SPEC;
	if (pte.co && !edat1)
		return PGM_TRANSLATION_SPEC;
	dat_protection |= pte.p;
	raddr.pfra = pte.pfra;
real_address:
@@ -1182,7 +1179,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
		rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
	if (!rc && pte.i)
		rc = PGM_PAGE_TRANSLATION;
	if (!rc && (pte.z || (pte.co && sg->edat_level < 1)))
	if (!rc && pte.z)
		rc = PGM_TRANSLATION_SPEC;
shadow_page:
	pte.p |= dat_protection;
Loading