Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bb7f20b1 authored by Neil Campbell's avatar Neil Campbell Committed by Benjamin Herrenschmidt
Browse files

powerpc: Handle VSX alignment faults correctly in little-endian mode



This patch fixes the handling of VSX alignment faults in little-endian
mode (the current code assumes the processor is in big-endian mode).

The patch also makes the handlers clear the top 8 bytes of the register
when handling an 8 byte VSX load.

This is based on 2.6.32.

Signed-off-by: default avatarNeil Campbell <neilc@linux.vnet.ibm.com>
Cc: <stable@kernel.org>
Acked-by: default avatarMichael Neuling <mikey@neuling.org>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent f04b10cd
Loading
Loading
Loading
Loading
+46 −17
Original line number Diff line number Diff line
@@ -642,10 +642,14 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
 */
static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
		       unsigned int areg, struct pt_regs *regs,
		       unsigned int flags, unsigned int length)
		       unsigned int flags, unsigned int length,
		       unsigned int elsize)
{
	char *ptr;
	unsigned long *lptr;
	int ret = 0;
	int sw = 0;
	int i, j;

	flush_vsx_to_thread(current);

@@ -654,19 +658,35 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
	else
		ptr = (char *) &current->thread.vr[reg - 32];

	lptr = (unsigned long *) ptr;

	if (flags & SW)
		sw = elsize-1;

	for (j = 0; j < length; j += elsize) {
		for (i = 0; i < elsize; ++i) {
			if (flags & ST)
		ret = __copy_to_user(addr, ptr, length);
        else {
		if (flags & SPLT){
			ret = __copy_from_user(ptr, addr, length);
			ptr += length;
				ret |= __put_user(ptr[i^sw], addr + i);
			else
				ret |= __get_user(ptr[i^sw], addr + i);
		}
		ret |= __copy_from_user(ptr, addr, length);
		ptr  += elsize;
		addr += elsize;
	}

	if (!ret) {
		if (flags & U)
			regs->gpr[areg] = regs->dar;
	if (ret)

		/* Splat load copies the same data to top and bottom 8 bytes */
		if (flags & SPLT)
			lptr[1] = lptr[0];
		/* For 8 byte loads, zero the top 8 bytes */
		else if (!(flags & ST) && (8 == length))
			lptr[1] = 0;
	} else
		return -EFAULT;

	return 1;
}
#endif
@@ -767,16 +787,25 @@ int fix_alignment(struct pt_regs *regs)

#ifdef CONFIG_VSX
	if ((instruction & 0xfc00003e) == 0x7c000018) {
		/* Additional register addressing bit (64 VSX vs 32 FPR/GPR */
		unsigned int elsize;

		/* Additional register addressing bit (64 VSX vs 32 FPR/GPR) */
		reg |= (instruction & 0x1) << 5;
		/* Simple inline decoder instead of a table */
		/* VSX has only 8 and 16 byte memory accesses */
		nb = 8;
		if (instruction & 0x200)
			nb = 16;
		else if (instruction & 0x080)
			nb = 8;
		else
			nb = 4;

		/* Vector stores in little-endian mode swap individual
		   elements, so process them separately */
		elsize = 4;
		if (instruction & 0x80)
			elsize = 8;

		flags = 0;
		if (regs->msr & MSR_LE)
			flags |= SW;
		if (instruction & 0x100)
			flags |= ST;
		if (instruction & 0x040)
@@ -787,7 +816,7 @@ int fix_alignment(struct pt_regs *regs)
			nb = 8;
		}
		PPC_WARN_ALIGNMENT(vsx, regs);
		return emulate_vsx(addr, reg, areg, regs, flags, nb);
		return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize);
	}
#endif
	/* A size of 0 indicates an instruction we don't support, with