Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7bc30c23 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'kvm-updates/2.6.39' of git://git.kernel.org/pub/scm/virt/kvm/kvm

* 'kvm-updates/2.6.39' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: move and fix substitue search for missing CPUID entries
  KVM: fix XSAVE bit scanning
  KVM: Enable async page fault processing
  KVM: fix crash on irqfd deassign
parents ccfeef0f bd22f5cf
Loading
Loading
Loading
Loading
+28 −9
Original line number Diff line number Diff line
@@ -2395,9 +2395,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
		int i;

		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
		for (i = 1; *nent < maxnent; ++i) {
			if (entry[i - 1].eax == 0 && i != 2)
				break;
		for (i = 1; *nent < maxnent && i < 64; ++i) {
			if (entry[i].eax == 0)
				continue;
			do_cpuid_1_ent(&entry[i], function, i);
			entry[i].flags |=
			       KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
@@ -4958,12 +4958,6 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
			best = e;
			break;
		}
		/*
		 * Both basic or both extended?
		 */
		if (((e->function ^ function) & 0x80000000) == 0)
			if (!best || e->function > best->function)
				best = e;
	}
	return best;
}
@@ -4983,6 +4977,27 @@ int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
	return 36;
}

/*
 * If no match is found, check whether we exceed the vCPU's limit
 * and return the content of the highest valid _standard_ leaf instead.
 * This is to satisfy the CPUID specification.
 */
static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
                                                  u32 function, u32 index)
{
	struct kvm_cpuid_entry2 *maxlevel;

	maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
	if (!maxlevel || maxlevel->eax >= function)
		return NULL;
	if (function & 0x80000000) {
		maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
		if (!maxlevel)
			return NULL;
	}
	return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
}

void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
{
	u32 function, index;
@@ -4995,6 +5010,10 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
	kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
	kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
	best = kvm_find_cpuid_entry(vcpu, function, index);

	if (!best)
		best = check_cpuid_limit(vcpu, function, index);

	if (best) {
		kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
		kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
+1 −1
Original line number Diff line number Diff line
@@ -90,7 +90,7 @@ irqfd_shutdown(struct work_struct *work)
	 * We know no new events will be scheduled at this point, so block
	 * until all previously outstanding events have completed
	 */
	flush_work(&irqfd->inject);
	flush_work_sync(&irqfd->inject);

	/*
	 * It is now safe to release the object's resources
+21 −2
Original line number Diff line number Diff line
@@ -1037,6 +1037,17 @@ static pfn_t get_fault_pfn(void)
	return fault_pfn;
}

int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
	unsigned long start, int write, struct page **page)
{
	int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;

	if (write)
		flags |= FOLL_WRITE;

	return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
}

static inline int check_user_page_hwpoison(unsigned long addr)
{
	int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
@@ -1070,7 +1081,14 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
		if (writable)
			*writable = write_fault;

		npages = get_user_pages_fast(addr, 1, write_fault, page);
		if (async) {
			down_read(&current->mm->mmap_sem);
			npages = get_user_page_nowait(current, current->mm,
						     addr, write_fault, page);
			up_read(&current->mm->mmap_sem);
		} else
			npages = get_user_pages_fast(addr, 1, write_fault,
						     page);

		/* map read fault as writable if possible */
		if (unlikely(!write_fault) && npages == 1) {
@@ -1093,7 +1111,8 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
			return get_fault_pfn();

		down_read(&current->mm->mmap_sem);
		if (check_user_page_hwpoison(addr)) {
		if (npages == -EHWPOISON ||
			(!async && check_user_page_hwpoison(addr))) {
			up_read(&current->mm->mmap_sem);
			get_page(hwpoison_page);
			return page_to_pfn(hwpoison_page);