Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c2085059 authored by Anton Blanchard's avatar Anton Blanchard Committed by Michael Ellerman
Browse files

powerpc: create giveup_all()

Create a single function that gives everything up (FP, VMX, VSX, SPE).
Doing this all at once means we only do one MSR write.

A context switch microbenchmark using yield():

http://ozlabs.org/~anton/junkcode/context_switch2.c



./context_switch2 --test=yield --fp --altivec --vector 0 0

shows an improvement of 3% on POWER8.

Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
[mpe: giveup_all() needs to be EXPORT_SYMBOL'ed]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 1f2e25b2
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -26,6 +26,7 @@ extern void __giveup_vsx(struct task_struct *);
extern void giveup_vsx(struct task_struct *);
extern void enable_kernel_spe(void);
extern void load_up_spe(struct task_struct *);
extern void giveup_all(struct task_struct *);
extern void switch_booke_debug_regs(struct debug_reg *new_debug);

#ifdef CONFIG_PPC_FPU
+61 −15
Original line number Diff line number Diff line
@@ -308,6 +308,65 @@ void flush_spe_to_thread(struct task_struct *tsk)
}
#endif /* CONFIG_SPE */

static unsigned long msr_all_available;

static int __init init_msr_all_available(void)
{
#ifdef CONFIG_PPC_FPU
	msr_all_available |= MSR_FP;
#endif
#ifdef CONFIG_ALTIVEC
	if (cpu_has_feature(CPU_FTR_ALTIVEC))
		msr_all_available |= MSR_VEC;
#endif
#ifdef CONFIG_VSX
	if (cpu_has_feature(CPU_FTR_VSX))
		msr_all_available |= MSR_VSX;
#endif
#ifdef CONFIG_SPE
	if (cpu_has_feature(CPU_FTR_SPE))
		msr_all_available |= MSR_SPE;
#endif

	return 0;
}
early_initcall(init_msr_all_available);

void giveup_all(struct task_struct *tsk)
{
	unsigned long usermsr;

	if (!tsk->thread.regs)
		return;

	usermsr = tsk->thread.regs->msr;

	if ((usermsr & msr_all_available) == 0)
		return;

	msr_check_and_set(msr_all_available);

#ifdef CONFIG_PPC_FPU
	if (usermsr & MSR_FP)
		__giveup_fpu(tsk);
#endif
#ifdef CONFIG_ALTIVEC
	if (usermsr & MSR_VEC)
		__giveup_altivec(tsk);
#endif
#ifdef CONFIG_VSX
	if (usermsr & MSR_VSX)
		__giveup_vsx(tsk);
#endif
#ifdef CONFIG_SPE
	if (usermsr & MSR_SPE)
		__giveup_spe(tsk);
#endif

	msr_check_and_clear(msr_all_available);
}
EXPORT_SYMBOL(giveup_all);

#ifdef CONFIG_PPC_ADV_DEBUG_REGS
void do_send_trap(struct pt_regs *regs, unsigned long address,
		  unsigned long error_code, int signal_code, int breakpt)
@@ -839,21 +898,8 @@ struct task_struct *__switch_to(struct task_struct *prev,

	__switch_to_tm(prev);

	if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
		giveup_fpu(prev);
#ifdef CONFIG_ALTIVEC
	if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
		giveup_altivec(prev);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
	if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
		/* VMX and FPU registers are already save here */
		__giveup_vsx(prev);
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
	if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
		giveup_spe(prev);
#endif /* CONFIG_SPE */
	/* Save FPU, Altivec, VSX and SPE state */
	giveup_all(prev);

#ifdef CONFIG_PPC_ADV_DEBUG_REGS
	switch_booke_debug_regs(&new->thread.debug);
+2 −15
Original line number Diff line number Diff line
@@ -1490,21 +1490,8 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
		goto out;
	/* interrupts now hard-disabled */

	/* Save FPU state in thread_struct */
	if (current->thread.regs->msr & MSR_FP)
		giveup_fpu(current);

#ifdef CONFIG_ALTIVEC
	/* Save Altivec state in thread_struct */
	if (current->thread.regs->msr & MSR_VEC)
		giveup_altivec(current);
#endif

#ifdef CONFIG_VSX
	/* Save VSX state in thread_struct */
	if (current->thread.regs->msr & MSR_VSX)
		__giveup_vsx(current);
#endif
	/* Save FPU, Altivec and VSX state */
	giveup_all(current);

	/* Preload FPU if it's enabled */
	if (kvmppc_get_msr(vcpu) & MSR_FP)