Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 822f312d authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Paolo Bonzini
Browse files

kvm: x86: make kvm_{load|put}_guest_fpu() static



The functions
	kvm_load_guest_fpu()
	kvm_put_guest_fpu()

are only used locally, make them static. This requires also that both
functions are moved because they are used before their implementation.
Those functions were exported (via EXPORT_SYMBOL) before commit
e5bb4025 ("KVM: Drop kvm_{load,put}_guest_fpu() exports").

Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a1efa9b7
Loading
Loading
Loading
Loading
+23 −23
Original line number Diff line number Diff line
@@ -7835,6 +7835,29 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
	return 0;
}

/* Swap (qemu) user FPU context for the guest FPU context. */
static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
{
	preempt_disable();
	copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
	/* PKRU is separately restored in kvm_x86_ops->run.  */
	__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
				~XFEATURE_MASK_PKRU);
	preempt_enable();
	trace_kvm_fpu(1);
}

/* When vcpu_run ends, restore user space FPU context. */
static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{
	preempt_disable();
	copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
	copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
	preempt_enable();
	++vcpu->stat.fpu_reload;
	trace_kvm_fpu(0);
}

int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
	int r;
@@ -8412,29 +8435,6 @@ static void fx_init(struct kvm_vcpu *vcpu)
	vcpu->arch.cr0 |= X86_CR0_ET;
}

/* Swap (qemu) user FPU context for the guest FPU context. */
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
{
	preempt_disable();
	copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
	/* PKRU is separately restored in kvm_x86_ops->run.  */
	__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
				~XFEATURE_MASK_PKRU);
	preempt_enable();
	trace_kvm_fpu(1);
}

/* When vcpu_run ends, restore user space FPU context. */
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{
	preempt_disable();
	copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
	copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
	preempt_enable();
	++vcpu->stat.fpu_reload;
	trace_kvm_fpu(0);
}

void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
	void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
+0 −2
Original line number Diff line number Diff line
@@ -733,8 +733,6 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);

void kvm_flush_remote_tlbs(struct kvm *kvm);
void kvm_reload_remote_mmus(struct kvm *kvm);