Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 58f800d5 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

Merge branch 'kvm-master' into HEAD

This merge brings in a couple important SMM fixes, which makes it
easier to test latest KVM with unrestricted_guest=0 and to test
the in-progress work on SMM support in the firmware.

Conflicts:
	arch/x86/kvm/x86.c
parents 1330a017 73917739
Loading
Loading
Loading
Loading
+2 −4
Original line number Diff line number Diff line
@@ -1251,10 +1251,8 @@ void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);

int kvm_is_in_guest(void);

int __x86_set_memory_region(struct kvm *kvm,
			    const struct kvm_userspace_memory_region *mem);
int x86_set_memory_region(struct kvm *kvm,
			  const struct kvm_userspace_memory_region *mem);
int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);

+6 −20
Original line number Diff line number Diff line
@@ -4227,17 +4227,13 @@ static void seg_setup(int seg)
static int alloc_apic_access_page(struct kvm *kvm)
{
	struct page *page;
	struct kvm_userspace_memory_region kvm_userspace_mem;
	int r = 0;

	mutex_lock(&kvm->slots_lock);
	if (kvm->arch.apic_access_page_done)
		goto out;
	kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
	kvm_userspace_mem.flags = 0;
	kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE;
	kvm_userspace_mem.memory_size = PAGE_SIZE;
	r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
	r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
				    APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
	if (r)
		goto out;

@@ -4262,17 +4258,12 @@ static int alloc_identity_pagetable(struct kvm *kvm)
{
	/* Called with kvm->slots_lock held. */

	struct kvm_userspace_memory_region kvm_userspace_mem;
	int r = 0;

	BUG_ON(kvm->arch.ept_identity_pagetable_done);

	kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
	kvm_userspace_mem.flags = 0;
	kvm_userspace_mem.guest_phys_addr =
		kvm->arch.ept_identity_map_addr;
	kvm_userspace_mem.memory_size = PAGE_SIZE;
	r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
	r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
				    kvm->arch.ept_identity_map_addr, PAGE_SIZE);

	return r;
}
@@ -5089,14 +5080,9 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
{
	int ret;
	struct kvm_userspace_memory_region tss_mem = {
		.slot = TSS_PRIVATE_MEMSLOT,
		.guest_phys_addr = addr,
		.memory_size = PAGE_SIZE * 3,
		.flags = 0,
	};

	ret = x86_set_memory_region(kvm, &tss_mem);
	ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
				    PAGE_SIZE * 3);
	if (ret)
		return ret;
	kvm->arch.tss_addr = addr;
+75 −60
Original line number Diff line number Diff line
@@ -6531,6 +6531,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
	return 1;
}

static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
{
	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
		!vcpu->arch.apf.halted);
}

static int vcpu_run(struct kvm_vcpu *vcpu)
{
	int r;
@@ -6539,8 +6545,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
	vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);

	for (;;) {
		if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
		    !vcpu->arch.apf.halted) {
		if (kvm_vcpu_running(vcpu)) {
			r = vcpu_enter_guest(vcpu);
		} else {
			r = vcpu_block(kvm, vcpu);
@@ -7556,34 +7561,66 @@ void kvm_arch_sync_events(struct kvm *kvm)
	kvm_free_pit(kvm);
}

int __x86_set_memory_region(struct kvm *kvm,
			    const struct kvm_userspace_memory_region *mem)
int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
{
	int i, r;
	u64 hva;
	struct kvm_memslots *slots = kvm_memslots(kvm);
	struct kvm_memory_slot *slot, old;

	/* Called with kvm->slots_lock held.  */
	BUG_ON(mem->slot >= KVM_MEM_SLOTS_NUM);
	if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
		return -EINVAL;

	slot = id_to_memslot(slots, id);
	if (size) {
		if (WARN_ON(slot->npages))
			return -EEXIST;

		/*
		 * MAP_SHARED to prevent internal slot pages from being moved
		 * by fork()/COW.
		 */
		hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
			      MAP_SHARED | MAP_ANONYMOUS, 0);
		if (IS_ERR((void *)hva))
			return PTR_ERR((void *)hva);
	} else {
		if (!slot->npages)
			return 0;

		hva = 0;
	}

	old = *slot;
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		struct kvm_userspace_memory_region m = *mem;
		struct kvm_userspace_memory_region m;

		m.slot |= i << 16;
		m.slot = id | (i << 16);
		m.flags = 0;
		m.guest_phys_addr = gpa;
		m.userspace_addr = hva;
		m.memory_size = size;
		r = __kvm_set_memory_region(kvm, &m);
		if (r < 0)
			return r;
	}

	if (!size) {
		r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
		WARN_ON(r < 0);
	}

	return 0;
}
EXPORT_SYMBOL_GPL(__x86_set_memory_region);

int x86_set_memory_region(struct kvm *kvm,
			  const struct kvm_userspace_memory_region *mem)
int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
{
	int r;

	mutex_lock(&kvm->slots_lock);
	r = __x86_set_memory_region(kvm, mem);
	r = __x86_set_memory_region(kvm, id, gpa, size);
	mutex_unlock(&kvm->slots_lock);

	return r;
@@ -7598,16 +7635,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
		 * unless the the memory map has changed due to process exit
		 * or fd copying.
		 */
		struct kvm_userspace_memory_region mem;
		memset(&mem, 0, sizeof(mem));
		mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
		x86_set_memory_region(kvm, &mem);

		mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
		x86_set_memory_region(kvm, &mem);

		mem.slot = TSS_PRIVATE_MEMSLOT;
		x86_set_memory_region(kvm, &mem);
		x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
		x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
		x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
	}
	kvm_iommu_unmap_guest(kvm);
	kfree(kvm->arch.vpic);
@@ -7710,27 +7740,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
				const struct kvm_userspace_memory_region *mem,
				enum kvm_mr_change change)
{
	/*
	 * Only private memory slots need to be mapped here since
	 * KVM_SET_MEMORY_REGION ioctl is no longer supported.
	 */
	if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) {
		unsigned long userspace_addr;

		/*
		 * MAP_SHARED to prevent internal slot pages from being moved
		 * by fork()/COW.
		 */
		userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE,
					 PROT_READ | PROT_WRITE,
					 MAP_SHARED | MAP_ANONYMOUS, 0);

		if (IS_ERR((void *)userspace_addr))
			return PTR_ERR((void *)userspace_addr);

		memslot->userspace_addr = userspace_addr;
	}

	return 0;
}

@@ -7792,17 +7801,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
{
	int nr_mmu_pages = 0;

	if (change == KVM_MR_DELETE && old->id >= KVM_USER_MEM_SLOTS) {
		int ret;

		ret = vm_munmap(old->userspace_addr,
				old->npages * PAGE_SIZE);
		if (ret < 0)
			printk(KERN_WARNING
			       "kvm_vm_ioctl_set_memory_region: "
			       "failed to munmap memory\n");
	}

	if (!kvm->arch.n_requested_mmu_pages)
		nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);

@@ -7851,19 +7849,36 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
	kvm_mmu_invalidate_zap_all_pages(kvm);
}

static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
{
	if (!list_empty_careful(&vcpu->async_pf.done))
		return true;

	if (kvm_apic_has_events(vcpu))
		return true;

	if (vcpu->arch.pv.pv_unhalted)
		return true;

	if (atomic_read(&vcpu->arch.nmi_queued))
		return true;

	if (test_bit(KVM_REQ_SMI, &vcpu->requests))
		return true;

	if (kvm_arch_interrupt_allowed(vcpu) &&
	    kvm_cpu_has_interrupt(vcpu))
		return true;

	return false;
}

int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
	if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
		kvm_x86_ops->check_nested_events(vcpu, false);

	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
		!vcpu->arch.apf.halted)
		|| !list_empty_careful(&vcpu->async_pf.done)
		|| kvm_apic_has_events(vcpu)
		|| vcpu->arch.pv.pv_unhalted
		|| atomic_read(&vcpu->arch.nmi_queued) ||
		(kvm_arch_interrupt_allowed(vcpu) &&
		 kvm_cpu_has_interrupt(vcpu));
	return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
}

int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)