Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a0e72cf1 authored by Anton Blanchard's avatar Anton Blanchard Committed by Michael Ellerman
Browse files

powerpc: Create msr_check_and_{set,clear}()



Create helper functions to set and clear MSR bits after first
checking if they are already set. Grouping them will make it
easy to avoid the MSR writes in a subsequent optimisation.

Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 1552cd70
Loading
Loading
Loading
Loading
+52 −55
Original line number Diff line number Diff line
@@ -87,23 +87,46 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */

#ifdef CONFIG_PPC_FPU
void giveup_fpu(struct task_struct *tsk)
static void msr_check_and_set(unsigned long bits)
{
	u64 oldmsr = mfmsr();
	u64 newmsr;
	unsigned long oldmsr = mfmsr();
	unsigned long newmsr;

	check_if_tm_restore_required(tsk);
	newmsr = oldmsr | bits;

	newmsr = oldmsr | MSR_FP;
#ifdef CONFIG_VSX
	if (cpu_has_feature(CPU_FTR_VSX))
	if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
		newmsr |= MSR_VSX;
#endif

	if (oldmsr != newmsr)
		mtmsr_isync(newmsr);
}

static void msr_check_and_clear(unsigned long bits)
{
	unsigned long oldmsr = mfmsr();
	unsigned long newmsr;

	newmsr = oldmsr & ~bits;

#ifdef CONFIG_VSX
	if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
		newmsr &= ~MSR_VSX;
#endif

	if (oldmsr != newmsr)
		mtmsr_isync(newmsr);
}

#ifdef CONFIG_PPC_FPU
void giveup_fpu(struct task_struct *tsk)
{
	check_if_tm_restore_required(tsk);

	msr_check_and_set(MSR_FP);
	__giveup_fpu(tsk);
	msr_check_and_clear(MSR_FP);
}
EXPORT_SYMBOL(giveup_fpu);

@@ -144,30 +167,21 @@ void enable_kernel_fp(void)
{
	WARN_ON(preemptible());

	if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
		giveup_fpu(current);
	} else {
		u64 oldmsr = mfmsr();
	msr_check_and_set(MSR_FP);

		if (!(oldmsr & MSR_FP))
			mtmsr_isync(oldmsr | MSR_FP);
	}
	if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
		__giveup_fpu(current);
}
EXPORT_SYMBOL(enable_kernel_fp);

#ifdef CONFIG_ALTIVEC
void giveup_altivec(struct task_struct *tsk)
{
	u64 oldmsr = mfmsr();
	u64 newmsr;

	check_if_tm_restore_required(tsk);

	newmsr = oldmsr | MSR_VEC;
	if (oldmsr != newmsr)
		mtmsr_isync(newmsr);

	msr_check_and_set(MSR_VEC);
	__giveup_altivec(tsk);
	msr_check_and_clear(MSR_VEC);
}
EXPORT_SYMBOL(giveup_altivec);

@@ -175,14 +189,10 @@ void enable_kernel_altivec(void)
{
	WARN_ON(preemptible());

	if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
		giveup_altivec(current);
	} else {
		u64 oldmsr = mfmsr();
	msr_check_and_set(MSR_VEC);

		if (!(oldmsr & MSR_VEC))
			mtmsr_isync(oldmsr | MSR_VEC);
	}
	if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
		__giveup_altivec(current);
}
EXPORT_SYMBOL(enable_kernel_altivec);

@@ -207,20 +217,15 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
#ifdef CONFIG_VSX
void giveup_vsx(struct task_struct *tsk)
{
	u64 oldmsr = mfmsr();
	u64 newmsr;

	check_if_tm_restore_required(tsk);

	newmsr = oldmsr | (MSR_FP|MSR_VEC|MSR_VSX);
	if (oldmsr != newmsr)
		mtmsr_isync(newmsr);

	msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
	if (tsk->thread.regs->msr & MSR_FP)
		__giveup_fpu(tsk);
	if (tsk->thread.regs->msr & MSR_VEC)
		__giveup_altivec(tsk);
	__giveup_vsx(tsk);
	msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
}
EXPORT_SYMBOL(giveup_vsx);

@@ -228,13 +233,14 @@ void enable_kernel_vsx(void)
{
	WARN_ON(preemptible());

	if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
		giveup_vsx(current);
	} else {
		u64 oldmsr = mfmsr();
	msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);

		if (!(oldmsr & MSR_VSX))
			mtmsr_isync(oldmsr | MSR_VSX);
	if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
		if (current->thread.regs->msr & MSR_FP)
			__giveup_fpu(current);
		if (current->thread.regs->msr & MSR_VEC)
			__giveup_altivec(current);
		__giveup_vsx(current);
	}
}
EXPORT_SYMBOL(enable_kernel_vsx);
@@ -256,16 +262,11 @@ EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
#ifdef CONFIG_SPE
void giveup_spe(struct task_struct *tsk)
{
	u64 oldmsr = mfmsr();
	u64 newmsr;

	check_if_tm_restore_required(tsk);

	newmsr = oldmsr | MSR_SPE;
	if (oldmsr != newmsr)
		mtmsr_isync(newmsr);

	msr_check_and_set(MSR_SPE);
	__giveup_spe(tsk);
	msr_check_and_clear(MSR_SPE);
}
EXPORT_SYMBOL(giveup_spe);

@@ -273,14 +274,10 @@ void enable_kernel_spe(void)
{
	WARN_ON(preemptible());

	if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
		giveup_spe(current);
	} else {
		u64 oldmsr = mfmsr();
	msr_check_and_set(MSR_SPE);

		if (!(oldmsr & MSR_SPE))
			mtmsr_isync(oldmsr | MSR_SPE);
	}
	if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
		__giveup_spe(current);
}
EXPORT_SYMBOL(enable_kernel_spe);