Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 611b0e5c authored by Anton Blanchard's avatar Anton Blanchard Committed by Michael Ellerman
Browse files

powerpc: Create mtmsrd_isync()



mtmsrd_isync() will do an mtmsrd followed by an isync on older
processors. On newer processors we avoid the isync via a feature fixup.

Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent b86fd2bd
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -1193,12 +1193,20 @@
#define __mtmsrd(v, l)	asm volatile("mtmsrd %0," __stringify(l) \
				     : : "r" (v) : "memory")
#define mtmsr(v)	__mtmsrd((v), 0)
#define __MTMSR		"mtmsrd"
#else
#define mtmsr(v)	asm volatile("mtmsr %0" : \
				     : "r" ((unsigned long)(v)) \
				     : "memory")
#define __MTMSR		"mtmsr"
#endif

static inline void mtmsr_isync(unsigned long val)
{
	asm volatile(__MTMSR " %0; " ASM_FTR_IFCLR("isync", "nop", %1) : :
			"r" (val), "i" (CPU_FTR_ARCH_206) : "memory");
}

#define mfspr(rn)	({unsigned long rval; \
			asm volatile("mfspr %0," __stringify(rn) \
				: "=r" (rval)); rval;})
+22 −8
Original line number Diff line number Diff line
@@ -130,7 +130,10 @@ void enable_kernel_fp(void)
		check_if_tm_restore_required(current);
		giveup_fpu(current);
	} else {
		giveup_fpu(NULL);	/* just enables FP for kernel */
		u64 oldmsr = mfmsr();

		if (!(oldmsr & MSR_FP))
			mtmsr_isync(oldmsr | MSR_FP);
	}
}
EXPORT_SYMBOL(enable_kernel_fp);
@@ -144,7 +147,10 @@ void enable_kernel_altivec(void)
		check_if_tm_restore_required(current);
		giveup_altivec(current);
	} else {
		giveup_altivec_notask();
		u64 oldmsr = mfmsr();

		if (!(oldmsr & MSR_VEC))
			mtmsr_isync(oldmsr | MSR_VEC);
	}
}
EXPORT_SYMBOL(enable_kernel_altivec);
@@ -173,10 +179,14 @@ void enable_kernel_vsx(void)
{
	WARN_ON(preemptible());

	if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
	if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
		giveup_vsx(current);
	else
		giveup_vsx(NULL);	/* just enable vsx for kernel - force */
	} else {
		u64 oldmsr = mfmsr();

		if (!(oldmsr & MSR_VSX))
			mtmsr_isync(oldmsr | MSR_VSX);
	}
}
EXPORT_SYMBOL(enable_kernel_vsx);

@@ -209,10 +219,14 @@ void enable_kernel_spe(void)
{
	WARN_ON(preemptible());

	if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
	if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
		giveup_spe(current);
	else
		giveup_spe(NULL);	/* just enable SPE for kernel - force */
	} else {
		u64 oldmsr = mfmsr();

		if (!(oldmsr & MSR_SPE))
			mtmsr_isync(oldmsr | MSR_SPE);
	}
}
EXPORT_SYMBOL(enable_kernel_spe);