Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fb510265 authored by Helge Deller's avatar Helge Deller Committed by Greg Kroah-Hartman
Browse files

parisc: Fix alignment of pa_tlb_lock in assembly on 32-bit SMP kernel



commit 88776c0e70be0290f8357019d844aae15edaa967 upstream.

Qemu for PARISC reported on a 32bit SMP parisc kernel strange failures
about "Not-handled unaligned insn 0x0e8011d6 and 0x0c2011c9."

Those opcodes evaluate to the ldcw() assembly instruction which requires
(on 32bit) an alignment of 16 bytes to ensure atomicity.

As it turns out, qemu is correct and in our assembly code in entry.S and
pacache.S we don't pay attention to the required alignment.

This patch fixes the problem by aligning the lock offset in assembly
code in the same manner as we do in our C-code.

Signed-off-by: default avatarHelge Deller <deller@gmx.de>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent f5edee88
Loading
Loading
Loading
Loading
+2 −0
Original line number Original line Diff line number Diff line
@@ -12,6 +12,7 @@
   for the semaphore.  */
   for the semaphore.  */


#define __PA_LDCW_ALIGNMENT	16
#define __PA_LDCW_ALIGNMENT	16
#define __PA_LDCW_ALIGN_ORDER	4
#define __ldcw_align(a) ({					\
#define __ldcw_align(a) ({					\
	unsigned long __ret = (unsigned long) &(a)->lock[0];	\
	unsigned long __ret = (unsigned long) &(a)->lock[0];	\
	__ret = (__ret + __PA_LDCW_ALIGNMENT - 1)		\
	__ret = (__ret + __PA_LDCW_ALIGNMENT - 1)		\
@@ -29,6 +30,7 @@
   ldcd). */
   ldcd). */


#define __PA_LDCW_ALIGNMENT	4
#define __PA_LDCW_ALIGNMENT	4
#define __PA_LDCW_ALIGN_ORDER	2
#define __ldcw_align(a) (&(a)->slock)
#define __ldcw_align(a) (&(a)->slock)
#define __LDCW	"ldcw,co"
#define __LDCW	"ldcw,co"


+11 −2
Original line number Original line Diff line number Diff line
@@ -35,6 +35,7 @@
#include <asm/pgtable.h>
#include <asm/pgtable.h>
#include <asm/signal.h>
#include <asm/signal.h>
#include <asm/unistd.h>
#include <asm/unistd.h>
#include <asm/ldcw.h>
#include <asm/thread_info.h>
#include <asm/thread_info.h>


#include <linux/linkage.h>
#include <linux/linkage.h>
@@ -46,6 +47,14 @@
#endif
#endif


	.import		pa_tlb_lock,data
	.import		pa_tlb_lock,data
	.macro  load_pa_tlb_lock reg
#if __PA_LDCW_ALIGNMENT > 4
	load32	PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
	depi	0,31,__PA_LDCW_ALIGN_ORDER, \reg
#else
	load32	PA(pa_tlb_lock), \reg
#endif
	.endm


	/* space_to_prot macro creates a prot id from a space id */
	/* space_to_prot macro creates a prot id from a space id */


@@ -457,7 +466,7 @@
	.macro		tlb_lock	spc,ptp,pte,tmp,tmp1,fault
	.macro		tlb_lock	spc,ptp,pte,tmp,tmp1,fault
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
	cmpib,COND(=),n	0,\spc,2f
	cmpib,COND(=),n	0,\spc,2f
	load32		PA(pa_tlb_lock),\tmp
	load_pa_tlb_lock \tmp
1:	LDCW		0(\tmp),\tmp1
1:	LDCW		0(\tmp),\tmp1
	cmpib,COND(=)	0,\tmp1,1b
	cmpib,COND(=)	0,\tmp1,1b
	nop
	nop
@@ -480,7 +489,7 @@
	/* Release pa_tlb_lock lock. */
	/* Release pa_tlb_lock lock. */
	.macro		tlb_unlock1	spc,tmp
	.macro		tlb_unlock1	spc,tmp
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
	load32		PA(pa_tlb_lock),\tmp
	load_pa_tlb_lock \tmp
	tlb_unlock0	\spc,\tmp
	tlb_unlock0	\spc,\tmp
#endif
#endif
	.endm
	.endm
+7 −2
Original line number Original line Diff line number Diff line
@@ -36,6 +36,7 @@
#include <asm/assembly.h>
#include <asm/assembly.h>
#include <asm/pgtable.h>
#include <asm/pgtable.h>
#include <asm/cache.h>
#include <asm/cache.h>
#include <asm/ldcw.h>
#include <linux/linkage.h>
#include <linux/linkage.h>


	.text
	.text
@@ -333,8 +334,12 @@ ENDPROC_CFI(flush_data_cache_local)


	.macro	tlb_lock	la,flags,tmp
	.macro	tlb_lock	la,flags,tmp
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
	ldil		L%pa_tlb_lock,%r1
#if __PA_LDCW_ALIGNMENT > 4
	ldo		R%pa_tlb_lock(%r1),\la
	load32		pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
	depi		0,31,__PA_LDCW_ALIGN_ORDER, \la
#else
	load32		pa_tlb_lock, \la
#endif
	rsm		PSW_SM_I,\flags
	rsm		PSW_SM_I,\flags
1:	LDCW		0(\la),\tmp
1:	LDCW		0(\la),\tmp
	cmpib,<>,n	0,\tmp,3f
	cmpib,<>,n	0,\tmp,3f