Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0e75c54f authored by Ingo Molnar's avatar Ingo Molnar
Browse files

x86/fpu: Rename restore_fpu_checking() to copy_fpstate_to_fpregs()



fpu_restore_checking() is a helper function of restore_fpu_checking(),
but this is not apparent from the naming.

Both copy fpstate contents to fpregs, while the fuller variant does
a full copy without leaking information.

So rename them to:

    copy_fpstate_to_fpregs()
  __copy_fpstate_to_fpregs()

Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 50338615
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -289,7 +289,7 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)

extern void fpu__save(struct fpu *fpu);

static inline int fpu_restore_checking(struct fpu *fpu)
static inline int __copy_fpstate_to_fpregs(struct fpu *fpu)
{
	if (use_xsave())
		return fpu_xrstor_checking(&fpu->state.xsave);
@@ -299,7 +299,7 @@ static inline int fpu_restore_checking(struct fpu *fpu)
		return frstor_checking(&fpu->state.fsave);
}

static inline int restore_fpu_checking(struct fpu *fpu)
static inline int copy_fpstate_to_fpregs(struct fpu *fpu)
{
	/*
	 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
@@ -314,7 +314,7 @@ static inline int restore_fpu_checking(struct fpu *fpu)
			: : [addr] "m" (fpu->fpregs_active));
	}

	return fpu_restore_checking(fpu);
	return __copy_fpstate_to_fpregs(fpu);
}

/*
@@ -520,7 +520,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
{
	if (fpu_switch.preload) {
		if (unlikely(restore_fpu_checking(new_fpu)))
		if (unlikely(copy_fpstate_to_fpregs(new_fpu)))
			fpu__reset(new_fpu);
	}
}
+2 −2
Original line number Diff line number Diff line
@@ -115,7 +115,7 @@ void __kernel_fpu_end(void)
	struct fpu *fpu = &current->thread.fpu;

	if (fpu->fpregs_active) {
		if (WARN_ON(restore_fpu_checking(fpu)))
		if (WARN_ON(copy_fpstate_to_fpregs(fpu)))
			fpu__reset(fpu);
	} else {
		__fpregs_deactivate_hw();
@@ -338,7 +338,7 @@ void fpu__restore(void)
	/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
	kernel_fpu_disable();
	fpregs_activate(fpu);
	if (unlikely(restore_fpu_checking(fpu))) {
	if (unlikely(copy_fpstate_to_fpregs(fpu))) {
		fpu__reset(fpu);
		force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
	} else {
+1 −1
Original line number Diff line number Diff line
@@ -7030,7 +7030,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
	kvm_put_guest_xcr0(vcpu);
	vcpu->guest_fpu_loaded = 1;
	__kernel_fpu_begin();
	fpu_restore_checking(&vcpu->arch.guest_fpu);
	__copy_fpstate_to_fpregs(&vcpu->arch.guest_fpu);
	trace_kvm_fpu(1);
}