Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 03d25c5b authored by Alexander Graf's avatar Alexander Graf
Browse files

KVM: PPC: Use same kvmppc_prepare_to_enter code for booke and book3s_pr



We need to do the same things when preparing to enter a guest for booke and
book3s_pr cores. Fold the generic code into a generic function that both call.

Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 2d8185d4
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -112,6 +112,7 @@ extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn,
				     ulong val);
extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
				     ulong *val);
extern void kvmppc_core_check_requests(struct kvm_vcpu *vcpu);

extern int kvmppc_booke_init(void);
extern void kvmppc_booke_exit(void);
@@ -150,6 +151,8 @@ extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
extern int kvmppc_bookehv_init(void);
extern void kvmppc_bookehv_exit(void);

extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);

/*
 * Cuts out inst bits with ordering according to spec.
 * That means the leftmost bit is zero. All given bits are included.
+6 −16
Original line number Diff line number Diff line
@@ -88,6 +88,10 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
	kvmppc_giveup_ext(vcpu, MSR_VSX);
}

void kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
{
}

static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
{
	ulong smsr = vcpu->arch.shared->msr;
@@ -815,19 +819,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
		 * again due to a host external interrupt.
		 */
		__hard_irq_disable();
		if (signal_pending(current)) {
			__hard_irq_enable();
#ifdef EXIT_DEBUG
			printk(KERN_EMERG "KVM: Going back to host\n");
#endif
			vcpu->stat.signal_exits++;
		if (kvmppc_prepare_to_enter(vcpu)) {
			run->exit_reason = KVM_EXIT_INTR;
			r = -EINTR;
		} else {
			/* In case an interrupt came in that was triggered
			 * from userspace (like DEC), we need to check what
			 * to inject now! */
			kvmppc_core_prepare_to_enter(vcpu);
		}
	}

@@ -1029,8 +1023,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
		goto out;
	}

	kvmppc_core_prepare_to_enter(vcpu);

	/*
	 * Interrupts could be timers for the guest which we have to inject
	 * again, so let's postpone them until we're in the guest and if we
@@ -1038,9 +1030,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
	 * a host external interrupt.
	 */
	__hard_irq_disable();

	/* No need to go into the guest when all we do is going out */
	if (signal_pending(current)) {
	if (kvmppc_prepare_to_enter(vcpu)) {
		__hard_irq_enable();
		kvm_run->exit_reason = KVM_EXIT_INTR;
		ret = -EINTR;
+1 −57
Original line number Diff line number Diff line
@@ -455,10 +455,8 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
	return r;
}

static void kvmppc_check_requests(struct kvm_vcpu *vcpu)
void kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
{
	trace_kvm_check_requests(vcpu);

	if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
		update_timer_ints(vcpu);
#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
@@ -467,60 +465,6 @@ static void kvmppc_check_requests(struct kvm_vcpu *vcpu)
#endif
}

/*
 * Common checks before entering the guest world.  Call with interrupts
 * disabled.
 *
 * returns !0 if a signal is pending and check_signal is true
 */
static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
{
	int r = 0;

	WARN_ON_ONCE(!irqs_disabled());
	while (true) {
		if (need_resched()) {
			local_irq_enable();
			cond_resched();
			local_irq_disable();
			continue;
		}

		if (signal_pending(current)) {
			r = 1;
			break;
		}

		smp_mb();
		if (vcpu->requests) {
			/* Make sure we process requests preemptable */
			local_irq_enable();
			kvmppc_check_requests(vcpu);
			local_irq_disable();
			continue;
		}

		if (kvmppc_core_prepare_to_enter(vcpu)) {
			/* interrupts got enabled in between, so we
			   are back at square 1 */
			continue;
		}

		if (vcpu->mode == EXITING_GUEST_MODE) {
			r = 1;
			break;
		}

		/* Going into guest context! Yay! */
		vcpu->mode = IN_GUEST_MODE;
		smp_wmb();

		break;
	}

	return r;
}

int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
	int ret;
+57 −0
Original line number Diff line number Diff line
@@ -47,6 +47,63 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
	return 1;
}

#ifndef CONFIG_KVM_BOOK3S_64_HV
/*
 * Common checks before entering the guest world.  Call with interrupts
 * disabled.
 *
 * returns !0 if a signal is pending and check_signal is true
 */
int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
{
	int r = 0;

	WARN_ON_ONCE(!irqs_disabled());
	while (true) {
		if (need_resched()) {
			local_irq_enable();
			cond_resched();
			local_irq_disable();
			continue;
		}

		if (signal_pending(current)) {
			r = 1;
			break;
		}

		smp_mb();
		if (vcpu->requests) {
			/* Make sure we process requests preemptable */
			local_irq_enable();
			trace_kvm_check_requests(vcpu);
			kvmppc_core_check_requests(vcpu);
			local_irq_disable();
			continue;
		}

		if (kvmppc_core_prepare_to_enter(vcpu)) {
			/* interrupts got enabled in between, so we
			   are back at square 1 */
			continue;
		}

		if (vcpu->mode == EXITING_GUEST_MODE) {
			r = 1;
			break;
		}

		/* Going into guest context! Yay! */
		vcpu->mode = IN_GUEST_MODE;
		smp_wmb();

		break;
	}

	return r;
}
#endif /* CONFIG_KVM_BOOK3S_64_HV */

int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
{
	int nr = kvmppc_get_gpr(vcpu, 11);