Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e9de42d8 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull KVM fixes from Paolo Bonzini:
 "Reverting a 3.16 patch, fixing two bugs in device assignment (one has
  a CVE), and fixing some problems introduced during the merge window
  (the CMA bug came in via Andrew, the x86 ones via yours truly)"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  virt/kvm/assigned-dev.c: Set 'dev->irq_source_id' to '-1' after free it
  Revert "KVM: x86: Increase the number of fixed MTRR regs to 10"
  KVM: x86: do not check CS.DPL against RPL during task switch
  KVM: x86: Avoid emulating instructions on #UD mistakenly
  PC, KVM, CMA: Fix regression caused by wrong get_order() use
  kvm: iommu: fix the third parameter of kvm_iommu_put_pages (CVE-2014-3601)
parents be816bc4 30d1e0e8
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -101,7 +101,7 @@ struct kvm_rma_info *kvm_alloc_rma()
	ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
	if (!ri)
		return NULL;
	page = cma_alloc(kvm_cma, kvm_rma_pages, get_order(kvm_rma_pages));
	page = cma_alloc(kvm_cma, kvm_rma_pages, order_base_2(kvm_rma_pages));
	if (!page)
		goto err_out;
	atomic_set(&ri->use_count, 1);
@@ -135,12 +135,12 @@ struct page *kvm_alloc_hpt(unsigned long nr_pages)
{
	unsigned long align_pages = HPT_ALIGN_PAGES;

	VM_BUG_ON(get_order(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
	VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);

	/* Old CPUs require HPT aligned on a multiple of its size */
	if (!cpu_has_feature(CPU_FTR_ARCH_206))
		align_pages = nr_pages;
	return cma_alloc(kvm_cma, nr_pages, get_order(align_pages));
	return cma_alloc(kvm_cma, nr_pages, order_base_2(align_pages));
}
EXPORT_SYMBOL_GPL(kvm_alloc_hpt);

+1 −1
Original line number Diff line number Diff line
@@ -95,7 +95,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
#define KVM_REFILL_PAGES 25
#define KVM_MAX_CPUID_ENTRIES 80
#define KVM_NR_FIXED_MTRR_REGION 88
#define KVM_NR_VAR_MTRR 10
#define KVM_NR_VAR_MTRR 8

#define ASYNC_PF_PER_VCPU 64

+4 −7
Original line number Diff line number Diff line
@@ -1491,9 +1491,6 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
			goto exception;
		break;
	case VCPU_SREG_CS:
		if (in_task_switch && rpl != dpl)
			goto exception;

		if (!(seg_desc.type & 8))
			goto exception;

@@ -4394,8 +4391,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)

	ctxt->execute = opcode.u.execute;

	if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
		return EMULATION_FAILED;

	if (unlikely(ctxt->d &
		     (NotImpl|EmulateOnUD|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
		     (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
		/*
		 * These are copied unconditionally here, and checked unconditionally
		 * in x86_emulate_insn.
@@ -4406,9 +4406,6 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
		if (ctxt->d & NotImpl)
			return EMULATION_FAILED;

		if (!(ctxt->d & EmulateOnUD) && ctxt->ud)
			return EMULATION_FAILED;

		if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
			ctxt->op_bytes = 8;

+3 −1
Original line number Diff line number Diff line
@@ -526,8 +526,10 @@ static int assign_guest_irq(struct kvm *kvm,
		dev->irq_requested_type |= guest_irq_type;
		if (dev->ack_notifier.gsi != -1)
			kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
	} else
	} else {
		kvm_free_irq_source_id(kvm, dev->irq_source_id);
		dev->irq_source_id = -1;
	}

	return r;
}
+10 −9
Original line number Diff line number Diff line
@@ -61,6 +61,14 @@ static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
	return pfn;
}

static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
{
	unsigned long i;

	for (i = 0; i < npages; ++i)
		kvm_release_pfn_clean(pfn + i);
}

int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
{
	gfn_t gfn, end_gfn;
@@ -123,6 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
		if (r) {
			printk(KERN_ERR "kvm_iommu_map_address:"
			       "iommu failed to map pfn=%llx\n", pfn);
			kvm_unpin_pages(kvm, pfn, page_size);
			goto unmap_pages;
		}

@@ -134,7 +143,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
	return 0;

unmap_pages:
	kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
	kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
	return r;
}

@@ -266,14 +275,6 @@ int kvm_iommu_map_guest(struct kvm *kvm)
	return r;
}

static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
{
	unsigned long i;

	for (i = 0; i < npages; ++i)
		kvm_release_pfn_clean(pfn + i);
}

static void kvm_iommu_put_pages(struct kvm *kvm,
				gfn_t base_gfn, unsigned long npages)
{