Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 896a3756 authored by Grant Grundler's avatar Grant Grundler Committed by Kyle McMartin
Browse files

[PARISC] Make sure use of RFI conforms to PA 2.0 and 1.1 arch docs



2.6.12-rc4-pa3 : first pass at making sure use of RFI conforms to
PA 2.0 arch pages F-4 and F-5, PA 1.1 Arch page 3-19 and 3-20.

The discussion revolves around all the rules for clearing PSW Q-bit.
The hard part is meeting all the rules for "relied upon translation".

.align directive is used to guarantee the critical sequence ends more than
8 instructions (32 bytes) from the end of page.

Signed-off-by: default avatarGrant Grundler <grundler@parisc-linux.org>

Signed-off-by: default avatarKyle McMartin <kyle@parisc-linux.org>
parent b2c1fe81
Loading
Loading
Loading
Loading
+24 −27
Original line number Diff line number Diff line
@@ -30,9 +30,9 @@
 *  - save registers to kernel stack and handle in assembly or C */


#include <asm/psw.h>
#include <asm/assembly.h>	/* for LDREG/STREG defines */
#include <asm/pgtable.h>
#include <asm/psw.h>
#include <asm/signal.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
@@ -67,19 +67,22 @@

	/* Switch to virtual mapping, trashing only %r1 */
	.macro  virt_map
	rsm     PSW_SM_Q,%r0
	tovirt_r1 %r29
	/* pcxt_ssm_bug */
	rsm	PSW_SM_I, %r0	/* barrier for "Relied upon Translation */
	mtsp	%r0, %sr4
	mtsp	%r0, %sr5
	mfsp	%sr7, %r1
	or,=    %r0,%r1,%r0	/* Only save sr7 in sr3 if sr7 != 0 */
	mtsp	%r1, %sr3
	mtsp	%r0, %sr4
	mtsp	%r0, %sr5
	tovirt_r1 %r29
	load32	KERNEL_PSW, %r1

	rsm     PSW_SM_QUIET,%r0	/* second "heavy weight" ctl op */
	mtsp	%r0, %sr6
	mtsp	%r0, %sr7
	load32	KERNEL_PSW, %r1
	mtctl	%r1, %cr22
	mtctl	%r0, %cr17	/* Clear IIASQ tail */
	mtctl	%r0, %cr17	/* Clear IIASQ head */
	mtctl	%r1, %ipsw
	load32	4f, %r1
	mtctl	%r1, %cr18	/* Set IIAOQ tail */
	ldo	4(%r1), %r1
@@ -888,9 +891,6 @@ _switch_to_ret:
	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
	 * adjust IASQ[0..1].
	 *
	 * Note that the following code uses a "relied upon translation".
	 * See the parisc ACD for details. The ssm is necessary due to a
	 * PCXT bug.
	 */

	.align 4096
@@ -985,24 +985,19 @@ intr_restore:
	rest_fp         %r1
	rest_general    %r29

	/* Create a "relied upon translation" PA 2.0 Arch. F-5 */
	ssm		0,%r0
	nop
	nop
	nop
	nop
	nop
	nop
	nop
	/* inverse of virt_map */
	pcxt_ssm_bug
	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
	tophys_r1       %r29
	rsm             (PSW_SM_Q|PSW_SM_P|PSW_SM_D|PSW_SM_I),%r0

	/* Restore space id's and special cr's from PT_REGS
	 * structure pointed to by r29 */
	 * structure pointed to by r29
	 */
	rest_specials	%r29

	/* Important: Note that rest_stack restores r29
	 * last (we are using it)! It also restores r1 and r30. */
	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
	 * It also restores r1 and r30.
	 */
	rest_stack

	rfi
@@ -1153,15 +1148,17 @@ intr_save:

	CMPIB=,n        6,%r26,skip_save_ior

	/* save_specials left ipsw value in r8 for us to test */

	mfctl           %cr20, %r16 /* isr */
	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
	mfctl           %cr21, %r17 /* ior */


#ifdef __LP64__
	/*
	 * If the interrupted code was running with W bit off (32 bit),
	 * clear the b bits (bits 0 & 1) in the ior.
	 * save_specials left ipsw value in r8 for us to test.
	 */
	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
	depdi           0,1,2,%r17
@@ -1487,10 +1484,10 @@ nadtlb_emulate:
	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */

nadtlb_nullify:
	mfctl           %cr22,%r8              /* Get ipsw */
	mfctl           %ipsw,%r8
	ldil            L%PSW_N,%r9
	or              %r8,%r9,%r8            /* Set PSW_N */
	mtctl           %r8,%cr22
	mtctl           %r8,%ipsw

	rfir
	nop
+9 −38
Original line number Diff line number Diff line
@@ -224,8 +224,6 @@ stext_pdc_ret:
	mtctl	%r0,%cr12
	mtctl	%r0,%cr13

	/* Prepare to RFI! Man all the cannons! */

	/* Initialize the global data pointer */
	loadgp

@@ -254,46 +252,16 @@ $is_pa20:
$install_iva:
	mtctl		%r10,%cr14

#ifdef __LP64__
	b		aligned_rfi
	b		aligned_rfi  /* Prepare to RFI! Man all the cannons! */
	nop

	.align          256
	.align 128
aligned_rfi:
	ssm             0,0
	nop             /* 1 */
	nop             /* 2 */
	nop             /* 3 */
	nop             /* 4 */
	nop             /* 5 */
	nop             /* 6 */
	nop             /* 7 */
	nop             /* 8 */
#endif

#ifdef __LP64__ /* move to psw.h? */
#define		PSW_BITS	PSW_Q+PSW_I+PSW_D+PSW_P+PSW_R
#else
#define		PSW_BITS	PSW_SM_Q
#endif

$rfi:	
	/* turn off troublesome PSW bits */
	rsm		PSW_BITS,%r0
	pcxt_ssm_bug

	/* kernel PSW:
	 *  - no interruptions except HPMC and TOC (which are handled by PDC)
	 *  - Q bit set (IODC / PDC interruptions)
	 *  - big-endian
	 *  - virtually mapped
	 */
	load32		KERNEL_PSW,%r10
	mtctl		%r10,%ipsw
	rsm		PSW_SM_QUIET,%r0	/* off troublesome PSW bits */
	/* Don't need NOPs, have 8 compliant insn before rfi */

	/* Set the space pointers for the post-RFI world
	** Clear the two-level IIA Space Queue, effectively setting
	** Kernel space.
	*/
	mtctl		%r0,%cr17	/* Clear IIASQ tail */
	mtctl		%r0,%cr17	/* Clear IIASQ head */

@@ -302,7 +270,10 @@ $rfi:
	ldo		4(%r11),%r11
	mtctl		%r11,%cr18	/* IIAOQ tail */

	/* Jump to hyperspace */
	load32		KERNEL_PSW,%r10
	mtctl		%r10,%ipsw
	
	/* Jump through hyperspace to Virt Mode */
	rfi
	nop

+53 −52
Original line number Diff line number Diff line
@@ -40,8 +40,8 @@
	.level	2.0
#endif

#include <asm/assembly.h>
#include <asm/psw.h>
#include <asm/assembly.h>
#include <asm/pgtable.h>
#include <asm/cache.h>

@@ -62,32 +62,23 @@ flush_tlb_all_local:
	 * to happen in real mode with all interruptions disabled.
	 */

	/*
	 * Once again, we do the rfi dance ... some day we need examine
	 * all of our uses of this type of code and see what can be
	 * consolidated.
	 */

	rsm		PSW_SM_I, %r19		/* relied upon translation! PA 2.0 Arch. F-5 */
	nop
	nop
	/* pcxt_ssm_bug	- relied upon translation! PA 2.0 Arch. F-4 and F-5 */
	rsm	PSW_SM_I, %r19		/* save I-bit state */
	load32		PA(1f), %r1
	nop
	nop
	nop
	nop
	nop

	rsm		PSW_SM_Q, %r0		/* Turn off Q bit to load iia queue */
	ldil		L%REAL_MODE_PSW, %r1
	ldo		R%REAL_MODE_PSW(%r1), %r1
	mtctl		%r1, %cr22
	rsm		PSW_SM_Q, %r0		/* prep to load iia queue */
	mtctl		%r0, %cr17		/* Clear IIASQ tail */
	mtctl		%r0, %cr17		/* Clear IIASQ head */
	ldil		L%PA(1f), %r1
	ldo		R%PA(1f)(%r1), %r1
	mtctl		%r1, %cr18		/* IIAOQ head */
	ldo		4(%r1), %r1
	mtctl		%r1, %cr18		/* IIAOQ tail */
	load32		REAL_MODE_PSW, %r1
	mtctl           %r1, %ipsw
	rfi
	nop

@@ -178,29 +169,36 @@ fdtonemiddle: /* Loop if LOOP = 1 */
	ADDIB>		-1, %r22, fdtoneloop	/* Outer loop count decr */
	add		%r21, %r20, %r20	/* increment space */

fdtdone:

	/* Switch back to virtual mode */
fdtdone:
	/*
	 * Switch back to virtual mode
	 */
	/* pcxt_ssm_bug */
	rsm		PSW_SM_I, %r0
	load32		2f, %r1
	nop
	nop
	nop
	nop
	nop

	rsm		PSW_SM_Q, %r0		/* clear Q bit to load iia queue */
	ldil		L%KERNEL_PSW, %r1
	ldo		R%KERNEL_PSW(%r1), %r1
	or		%r1, %r19, %r1		/* Set I bit if set on entry */
	mtctl		%r1, %cr22
	rsm		PSW_SM_Q, %r0		/* prep to load iia queue */
	mtctl		%r0, %cr17		/* Clear IIASQ tail */
	mtctl		%r0, %cr17		/* Clear IIASQ head */
	ldil		L%(2f), %r1
	ldo		R%(2f)(%r1), %r1
	mtctl		%r1, %cr18		/* IIAOQ head */
	ldo		4(%r1), %r1
	mtctl		%r1, %cr18		/* IIAOQ tail */
	load32		KERNEL_PSW, %r1
	or		%r1, %r19, %r1	/* I-bit to state on entry */
	mtctl		%r1, %ipsw	/* restore I-bit (entire PSW) */
	rfi
	nop

2:      bv		%r0(%r2)
	nop
	.exit

	.exit
	.procend

	.export flush_instruction_cache_local,code
@@ -238,7 +236,7 @@ fioneloop: /* Loop if LOOP = 1 */

fisync:
	sync
	mtsm		%r22
	mtsm		%r22			/* restore I-bit */
	bv		%r0(%r2)
	nop
	.exit
@@ -281,7 +279,7 @@ fdoneloop: /* Loop if LOOP = 1 */
fdsync:
	syncdma
	sync
	mtsm		%r22
	mtsm		%r22			/* restore I-bit */
	bv		%r0(%r2)
	nop
	.exit
@@ -988,11 +986,12 @@ flush_kernel_icache_range_asm:
	bv		%r0(%r2)
	nop
	.exit

	.procend

	.align	128

	/* align should cover use of rfi in disable_sr_hashing_asm and
	 * srdis_done.
	 */
	.align	256
	.export disable_sr_hashing_asm,code

disable_sr_hashing_asm:
@@ -1000,28 +999,26 @@ disable_sr_hashing_asm:
	.callinfo NO_CALLS
	.entry

	/* Switch to real mode */

	ssm		0, %r0			/* relied upon translation! */
	nop
	nop
	/*
	 * Switch to real mode
	 */
	/* pcxt_ssm_bug */
	rsm		PSW_SM_I, %r0
	load32		PA(1f), %r1
	nop
	nop
	nop
	nop
	nop

	rsm		(PSW_SM_Q|PSW_SM_I), %r0 /* disable Q&I to load the iia queue */
	ldil		L%REAL_MODE_PSW, %r1
	ldo		R%REAL_MODE_PSW(%r1), %r1
	mtctl		%r1, %cr22
	rsm		PSW_SM_Q, %r0		/* prep to load iia queue */
	mtctl		%r0, %cr17		/* Clear IIASQ tail */
	mtctl		%r0, %cr17		/* Clear IIASQ head */
	ldil		L%PA(1f), %r1
	ldo		R%PA(1f)(%r1), %r1
	mtctl		%r1, %cr18		/* IIAOQ head */
	ldo		4(%r1), %r1
	mtctl		%r1, %cr18		/* IIAOQ tail */
	load32		REAL_MODE_PSW, %r1
	mtctl		%r1, %ipsw
	rfi
	nop

@@ -1053,27 +1050,31 @@ srdis_pcxl:

srdis_pa20:

	/* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+ */
	/* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */

	.word		0x144008bc		/* mfdiag %dr2, %r28 */
	depdi		0, 54,1, %r28		/* clear DIAG_SPHASH_ENAB (bit 54) */
	.word		0x145c1840		/* mtdiag %r28, %dr2 */

srdis_done:

srdis_done:
	/* Switch back to virtual mode */
	rsm		PSW_SM_I, %r0		/* prep to load iia queue */
	load32 	   	2f, %r1
	nop
	nop
	nop
	nop
	nop

	rsm		PSW_SM_Q, %r0		/* clear Q bit to load iia queue */
	ldil		L%KERNEL_PSW, %r1
	ldo		R%KERNEL_PSW(%r1), %r1
	mtctl		%r1, %cr22
	rsm		PSW_SM_Q, %r0		/* prep to load iia queue */
	mtctl		%r0, %cr17		/* Clear IIASQ tail */
	mtctl		%r0, %cr17		/* Clear IIASQ head */
	ldil 	   	L%(2f), %r1
	ldo     	R%(2f)(%r1), %r1
	mtctl		%r1, %cr18		/* IIAOQ head */
	ldo		4(%r1), %r1
	mtctl		%r1, %cr18		/* IIAOQ tail */
	load32		KERNEL_PSW, %r1
	mtctl		%r1, %ipsw
	rfi
	nop

+6 −12
Original line number Diff line number Diff line
@@ -7,8 +7,8 @@
 * Copyright (C) 2000 Hewlett Packard (Paul Bame bame@puffin.external.hp.com)
 *
 */
#include <asm/assembly.h>
#include <asm/psw.h>
#include <asm/assembly.h>

	.section	.bss
	.export real_stack
@@ -147,20 +147,17 @@ restore_control_regs:
	.text
rfi_virt2real:
	/* switch to real mode... */
	ssm		0,0		/* See "relied upon translation" */
	nop				/* PA 2.0 Arch. F-5 */
	nop
	nop
	rsm		PSW_SM_I,%r0
	load32		PA(rfi_v2r_1), %r1
	nop
	nop
	nop
	nop
	nop
	
	rsm             (PSW_SM_Q|PSW_SM_I),%r0  /* disable Q & I bits to load iia queue */
	rsm             PSW_SM_Q,%r0  /* disable Q & I bits to load iia queue */
	mtctl		%r0, %cr17	/* Clear IIASQ tail */
	mtctl		%r0, %cr17	/* Clear IIASQ head */
	load32		PA(rfi_v2r_1), %r1
	mtctl		%r1, %cr18	/* IIAOQ head */
	ldo		4(%r1), %r1
	mtctl		%r1, %cr18	/* IIAOQ tail */
@@ -184,10 +181,8 @@ rfi_v2r_1:
	.text
	.align 128
rfi_real2virt:
	ssm		0,0		/* See "relied upon translation" */
	nop				/* PA 2.0 Arch. F-5 */
	nop
	nop
	rsm		PSW_SM_I,%r0
	load32		(rfi_r2v_1), %r1
	nop
	nop
	nop
@@ -197,7 +192,6 @@ rfi_real2virt:
	rsm             PSW_SM_Q,%r0    /* disable Q bit to load iia queue */
	mtctl		%r0, %cr17	/* Clear IIASQ tail */
	mtctl		%r0, %cr17	/* Clear IIASQ head */
	load32		(rfi_r2v_1), %r1
	mtctl		%r1, %cr18	/* IIAOQ head */
	ldo		4(%r1), %r1
	mtctl		%r1, %cr18	/* IIAOQ tail */
+25 −0
Original line number Diff line number Diff line
@@ -450,5 +450,30 @@
	REST_CR	(%cr22, PT_PSW	(\regs))
	.endm


	/* First step to create a "relied upon translation"
	 * See PA 2.0 Arch. page F-4 and F-5.
	 *
	 * The ssm was originally necessary due to a "PCxT bug".
	 * But someone decided it needed to be added to the architecture
	 * and this "feature" went into rev3 of PA-RISC 1.1 Arch Manual.
	 * It's been carried forward into PA 2.0 Arch as well. :^(
	 *
	 * "ssm 0,%r0" is a NOP with side effects (prefetch barrier).
	 * rsm/ssm prevents the ifetch unit from speculatively fetching
	 * instructions past this line in the code stream.
	 * PA 2.0 processor will single step all insn in the same QUAD (4 insn).
	 */
	.macro	pcxt_ssm_bug
	rsm	PSW_SM_I,%r0
	nop	/* 1 */
	nop	/* 2 */
	nop	/* 3 */
	nop	/* 4 */
	nop	/* 5 */
	nop	/* 6 */
	nop	/* 7 */
	.endm

#endif /* __ASSEMBLY__ */
#endif
Loading