Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 11840e2f authored by Daniel Vetter's avatar Daniel Vetter
Browse files

Merge tag 'for-kvmgt' of git://git.kernel.org/pub/scm/virt/kvm/kvm into drm-intel-next-queued



Paulo Bonzini writes:

The three KVM patches that KVMGT needs.

Signed-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
parents 6a5d1db9 871b7ef2
Loading
Loading
Loading
Loading
+11 −1
Original line number Original line Diff line number Diff line
@@ -4,7 +4,17 @@ KVM Lock Overview
1. Acquisition Orders
1. Acquisition Orders
---------------------
---------------------


(to be written)
The acquisition orders for mutexes are as follows:

- kvm->lock is taken outside vcpu->mutex

- kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock

- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
  them together is quite rare.

For spinlocks, kvm_lock is taken outside kvm->mmu_lock.  Everything
else is a leaf: no other lock is taken inside the critical sections.


2: Exception
2: Exception
------------
------------
+4 −3
Original line number Original line Diff line number Diff line
@@ -293,7 +293,10 @@ struct kvm_vcpu_arch {
	/* Host KSEG0 address of the EI/DI offset */
	/* Host KSEG0 address of the EI/DI offset */
	void *kseg0_commpage;
	void *kseg0_commpage;


	u32 io_gpr;		/* GPR used as IO source/target */
	/* Resume PC after MMIO completion */
	unsigned long io_pc;
	/* GPR used as IO source/target */
	u32 io_gpr;


	struct hrtimer comparecount_timer;
	struct hrtimer comparecount_timer;
	/* Count timer control KVM register */
	/* Count timer control KVM register */
@@ -315,8 +318,6 @@ struct kvm_vcpu_arch {
	/* Bitmask of pending exceptions to be cleared */
	/* Bitmask of pending exceptions to be cleared */
	unsigned long pending_exceptions_clr;
	unsigned long pending_exceptions_clr;


	u32 pending_load_cause;

	/* Save/Restore the entryhi register when are are preempted/scheduled back in */
	/* Save/Restore the entryhi register when are are preempted/scheduled back in */
	unsigned long preempt_entryhi;
	unsigned long preempt_entryhi;


+19 −13
Original line number Original line Diff line number Diff line
@@ -790,15 +790,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
	struct mips_coproc *cop0 = vcpu->arch.cop0;
	struct mips_coproc *cop0 = vcpu->arch.cop0;
	enum emulation_result er = EMULATE_DONE;
	enum emulation_result er = EMULATE_DONE;


	if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
	if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
		kvm_clear_c0_guest_status(cop0, ST0_ERL);
		vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
	} else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
		kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
		kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
			  kvm_read_c0_guest_epc(cop0));
			  kvm_read_c0_guest_epc(cop0));
		kvm_clear_c0_guest_status(cop0, ST0_EXL);
		kvm_clear_c0_guest_status(cop0, ST0_EXL);
		vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
		vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);


	} else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
		kvm_clear_c0_guest_status(cop0, ST0_ERL);
		vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
	} else {
	} else {
		kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
		kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
			vcpu->arch.pc);
			vcpu->arch.pc);
@@ -1528,13 +1528,25 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
					    struct kvm_vcpu *vcpu)
					    struct kvm_vcpu *vcpu)
{
{
	enum emulation_result er = EMULATE_DO_MMIO;
	enum emulation_result er = EMULATE_DO_MMIO;
	unsigned long curr_pc;
	u32 op, rt;
	u32 op, rt;
	u32 bytes;
	u32 bytes;


	rt = inst.i_format.rt;
	rt = inst.i_format.rt;
	op = inst.i_format.opcode;
	op = inst.i_format.opcode;


	vcpu->arch.pending_load_cause = cause;
	/*
	 * Find the resume PC now while we have safe and easy access to the
	 * prior branch instruction, and save it for
	 * kvm_mips_complete_mmio_load() to restore later.
	 */
	curr_pc = vcpu->arch.pc;
	er = update_pc(vcpu, cause);
	if (er == EMULATE_FAIL)
		return er;
	vcpu->arch.io_pc = vcpu->arch.pc;
	vcpu->arch.pc = curr_pc;

	vcpu->arch.io_gpr = rt;
	vcpu->arch.io_gpr = rt;


	switch (op) {
	switch (op) {
@@ -2494,9 +2506,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
		goto done;
		goto done;
	}
	}


	er = update_pc(vcpu, vcpu->arch.pending_load_cause);
	/* Restore saved resume PC */
	if (er == EMULATE_FAIL)
	vcpu->arch.pc = vcpu->arch.io_pc;
		return er;


	switch (run->mmio.len) {
	switch (run->mmio.len) {
	case 4:
	case 4:
@@ -2518,11 +2529,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
		break;
		break;
	}
	}


	if (vcpu->arch.pending_load_cause & CAUSEF_BD)
		kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
			  vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
			  vcpu->mmio_needed);

done:
done:
	return er;
	return er;
}
}
+4 −1
Original line number Original line Diff line number Diff line
@@ -426,7 +426,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
{
{
	struct mips_coproc *cop0 = vcpu->arch.cop0;
	struct mips_coproc *cop0 = vcpu->arch.cop0;
	int cpu = smp_processor_id();
	int i, cpu = smp_processor_id();
	unsigned int gasid;
	unsigned int gasid;


	/*
	/*
@@ -442,6 +442,9 @@ static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
						vcpu);
						vcpu);
			vcpu->arch.guest_user_asid[cpu] =
			vcpu->arch.guest_user_asid[cpu] =
				vcpu->arch.guest_user_mm.context.asid[cpu];
				vcpu->arch.guest_user_mm.context.asid[cpu];
			for_each_possible_cpu(i)
				if (i != cpu)
					vcpu->arch.guest_user_asid[cpu] = 0;
			vcpu->arch.last_user_gasid = gasid;
			vcpu->arch.last_user_gasid = gasid;
		}
		}
	}
	}
+0 −4
Original line number Original line Diff line number Diff line
@@ -260,13 +260,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)


	if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
	if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
						asid_version_mask(cpu)) {
						asid_version_mask(cpu)) {
		u32 gasid = kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
				KVM_ENTRYHI_ASID;

		kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
		kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
		vcpu->arch.guest_user_asid[cpu] =
		vcpu->arch.guest_user_asid[cpu] =
		    vcpu->arch.guest_user_mm.context.asid[cpu];
		    vcpu->arch.guest_user_mm.context.asid[cpu];
		vcpu->arch.last_user_gasid = gasid;
		newasid++;
		newasid++;


		kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
		kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
Loading