Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6edaa530 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: remove kvm_guest_enter/exit wrappers



Use the functions from context_tracking.h directly.

Cc: Andy Lutomirski <luto@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ebaac173
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -615,7 +615,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
		 * Enter the guest
		 */
		trace_kvm_entry(*vcpu_pc(vcpu));
		__kvm_guest_enter();
		guest_enter_irqoff();
		vcpu->mode = IN_GUEST_MODE;

		ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
@@ -641,14 +641,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
		local_irq_enable();

		/*
		 * We do local_irq_enable() before calling kvm_guest_exit() so
		 * We do local_irq_enable() before calling guest_exit() so
		 * that if a timer interrupt hits while running the guest we
		 * account that tick as being spent in the guest.  We enable
		 * preemption after calling kvm_guest_exit() so that if we get
		 * preemption after calling guest_exit() so that if we get
		 * preempted we make sure ticks after that is not counted as
		 * guest time.
		 */
		kvm_guest_exit();
		guest_exit();
		trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));

		/*
+2 −2
Original line number Diff line number Diff line
@@ -406,7 +406,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
	kvm_mips_deliver_interrupts(vcpu,
				    kvm_read_c0_guest_cause(vcpu->arch.cop0));

	__kvm_guest_enter();
	guest_enter_irqoff();

	/* Disable hardware page table walking while in guest */
	htw_stop();
@@ -418,7 +418,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
	/* Re-enable HTW before enabling interrupts */
	htw_start();

	__kvm_guest_exit();
	guest_exit_irqoff();
	local_irq_enable();

	if (vcpu->sigset_active)
+2 −2
Original line number Diff line number Diff line
@@ -2522,7 +2522,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
		list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list)
			spin_unlock(&pvc->lock);

	kvm_guest_enter();
	guest_enter();

	srcu_idx = srcu_read_lock(&vc->kvm->srcu);

@@ -2570,7 +2570,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)

	/* make sure updates to secondary vcpu structs are visible now */
	smp_mb();
	kvm_guest_exit();
	guest_exit();

	for (sub = 0; sub < core_info.n_subcores; ++sub)
		list_for_each_entry_safe(pvc, vcnext, &core_info.vcs[sub],
+2 −2
Original line number Diff line number Diff line
@@ -914,7 +914,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
	/* We get here with MSR.EE=1 */

	trace_kvm_exit(exit_nr, vcpu);
	kvm_guest_exit();
	guest_exit();

	switch (exit_nr) {
	case BOOK3S_INTERRUPT_INST_STORAGE:
@@ -1531,7 +1531,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)

	kvmppc_clear_debug(vcpu);

	/* No need for kvm_guest_exit. It's done in handle_exit.
	/* No need for guest_exit. It's done in handle_exit.
	   We also get here with interrupts enabled. */

	/* Make sure we save the guest FPU/Altivec/VSX state */
+2 −2
Original line number Diff line number Diff line
@@ -776,7 +776,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)

	ret = __kvmppc_vcpu_run(kvm_run, vcpu);

	/* No need for kvm_guest_exit. It's done in handle_exit.
	/* No need for guest_exit. It's done in handle_exit.
	   We also get here with interrupts enabled. */

	/* Switch back to user space debug context */
@@ -1012,7 +1012,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
	}

	trace_kvm_exit(exit_nr, vcpu);
	__kvm_guest_exit();
	guest_exit_irqoff();

	local_irq_enable();

Loading