Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a4df1ac1 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'kvm-updates-2.6.26' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm

* 'kvm-updates-2.6.26' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm:
  KVM: MMU: Fix is_empty_shadow_page() check
  KVM: MMU: Fix printk() format string
  KVM: IOAPIC: only set remote_irr if interrupt was injected
  KVM: MMU: reschedule during shadow teardown
  KVM: VMX: Clear CR4.VMXE in hardware_disable
  KVM: migrate PIT timer
  KVM: ppc: Report bad GFNs
  KVM: ppc: Use a read lock around MMU operations, and release it on error
  KVM: ppc: Remove unmatched kunmap() call
  KVM: ppc: add lwzx/stwz emulation
  KVM: ppc: Remove duplicate function
  KVM: s390: Fix race condition in kvm_s390_handle_wait
  KVM: s390: Send program check on access error
  KVM: s390: fix interrupt delivery
  KVM: s390: handle machine checks when guest is running
  KVM: s390: fix locking order problem in enable_sie
  KVM: s390: use yield instead of schedule to implement diag 0x44
  KVM: x86 emulator: fix hypercall return value on AMD
  KVM: ia64: fix zero extending for mmio ld1/2/4 emulation in KVM
parents 2a212f69 3c915510
Loading
Loading
Loading
Loading
+2 −1
Original line number Original line Diff line number Diff line
@@ -159,7 +159,8 @@ static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,


	if (p->u.ioreq.state == STATE_IORESP_READY) {
	if (p->u.ioreq.state == STATE_IORESP_READY) {
		if (dir == IOREQ_READ)
		if (dir == IOREQ_READ)
			*dest = p->u.ioreq.data;
			/* it's necessary to ensure zero extending */
			*dest = p->u.ioreq.data & (~0UL >> (64-(s*8)));
	} else
	} else
		panic_vm(vcpu);
		panic_vm(vcpu);
out:
out:
+4 −5
Original line number Original line Diff line number Diff line
@@ -116,8 +116,6 @@ static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
	struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index];
	struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index];
	struct page *page = vcpu->arch.shadow_pages[index];
	struct page *page = vcpu->arch.shadow_pages[index];


	kunmap(vcpu->arch.shadow_pages[index]);

	if (get_tlb_v(stlbe)) {
	if (get_tlb_v(stlbe)) {
		if (kvmppc_44x_tlbe_is_writable(stlbe))
		if (kvmppc_44x_tlbe_is_writable(stlbe))
			kvm_release_page_dirty(page);
			kvm_release_page_dirty(page);
@@ -144,18 +142,19 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
	stlbe = &vcpu->arch.shadow_tlb[victim];
	stlbe = &vcpu->arch.shadow_tlb[victim];


	/* Get reference to new page. */
	/* Get reference to new page. */
	down_write(&current->mm->mmap_sem);
	down_read(&current->mm->mmap_sem);
	new_page = gfn_to_page(vcpu->kvm, gfn);
	new_page = gfn_to_page(vcpu->kvm, gfn);
	if (is_error_page(new_page)) {
	if (is_error_page(new_page)) {
		printk(KERN_ERR "Couldn't get guest page!\n");
		printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
		kvm_release_page_clean(new_page);
		kvm_release_page_clean(new_page);
		up_read(&current->mm->mmap_sem);
		return;
		return;
	}
	}
	hpaddr = page_to_phys(new_page);
	hpaddr = page_to_phys(new_page);


	/* Drop reference to old page. */
	/* Drop reference to old page. */
	kvmppc_44x_shadow_release(vcpu, victim);
	kvmppc_44x_shadow_release(vcpu, victim);
	up_write(&current->mm->mmap_sem);
	up_read(&current->mm->mmap_sem);


	vcpu->arch.shadow_pages[victim] = new_page;
	vcpu->arch.shadow_pages[victim] = new_page;


+0 −33
Original line number Original line Diff line number Diff line
@@ -227,39 +227,6 @@ void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu)
	}
	}
}
}


static int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
	enum emulation_result er;
	int r;

	er = kvmppc_emulate_instruction(run, vcpu);
	switch (er) {
	case EMULATE_DONE:
		/* Future optimization: only reload non-volatiles if they were
		 * actually modified. */
		r = RESUME_GUEST_NV;
		break;
	case EMULATE_DO_MMIO:
		run->exit_reason = KVM_EXIT_MMIO;
		/* We must reload nonvolatiles because "update" load/store
		 * instructions modify register state. */
		/* Future optimization: only reload non-volatiles if they were
		 * actually modified. */
		r = RESUME_HOST_NV;
		break;
	case EMULATE_FAIL:
		/* XXX Deliver Program interrupt to guest. */
		printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
		       vcpu->arch.last_inst);
		r = RESUME_HOST;
		break;
	default:
		BUG();
	}

	return r;
}

/**
/**
 * kvmppc_handle_exit
 * kvmppc_handle_exit
 *
 *
+12 −0
Original line number Original line Diff line number Diff line
@@ -246,6 +246,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
	case 31:
	case 31:
		switch (get_xop(inst)) {
		switch (get_xop(inst)) {


		case 23:                                        /* lwzx */
			rt = get_rt(inst);
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
			break;

		case 83:                                        /* mfmsr */
		case 83:                                        /* mfmsr */
			rt = get_rt(inst);
			rt = get_rt(inst);
			vcpu->arch.gpr[rt] = vcpu->arch.msr;
			vcpu->arch.gpr[rt] = vcpu->arch.msr;
@@ -267,6 +272,13 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
			kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
			kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
			break;
			break;


		case 151:                                       /* stwx */
			rs = get_rs(inst);
			emulated = kvmppc_handle_store(run, vcpu,
			                               vcpu->arch.gpr[rs],
			                               4, 1);
			break;

		case 163:                                       /* wrteei */
		case 163:                                       /* wrteei */
			vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
			vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
			                 | (inst & MSR_EE);
			                 | (inst & MSR_EE);
+1 −1
Original line number Original line Diff line number Diff line
@@ -20,7 +20,7 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
	VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
	VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
	vcpu->stat.diagnose_44++;
	vcpu->stat.diagnose_44++;
	vcpu_put(vcpu);
	vcpu_put(vcpu);
	schedule();
	yield();
	vcpu_load(vcpu);
	vcpu_load(vcpu);
	return 0;
	return 0;
}
}
Loading