Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 413059f2 authored by Grant Grundler's avatar Grant Grundler Committed by Kyle McMartin
Browse files

[PARISC] Replace uses of __LP64__ with CONFIG_64BIT



2.6.12-rc4-pa3 s/__LP64__/CONFIG_64BIT/ and fixup config.h usage

Signed-off-by: default avatarGrant Grundler <grundler@parisc-linux.org>

Signed-off-by: default avatarKyle McMartin <kyle@parisc-linux.org>
parent 34994952
Loading
Loading
Loading
Loading
+49 −49
Original line number Diff line number Diff line
@@ -37,7 +37,7 @@
#include <asm/unistd.h>
#include <asm/thread_info.h>

#ifdef __LP64__
#ifdef CONFIG_64BIT
#define CMPIB           cmpib,*
#define CMPB            cmpb,*
#define COND(x)		*x
@@ -217,7 +217,7 @@
	va  = r8	/* virtual address for which the trap occured */
	spc = r24	/* space for which the trap occured */

#ifndef __LP64__
#ifndef CONFIG_64BIT

	/*
	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
@@ -239,7 +239,7 @@

	.macro	itlb_20 code
	mfctl	%pcsq, spc
#ifdef __LP64__
#ifdef CONFIG_64BIT
	b       itlb_miss_20w
#else
	b	itlb_miss_20
@@ -249,7 +249,7 @@
	.align		32
	.endm
	
#ifndef __LP64__
#ifndef CONFIG_64BIT
	/*
	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
	 *
@@ -286,7 +286,7 @@
	.macro	naitlb_20 code

	mfctl	%isr,spc
#ifdef __LP64__
#ifdef CONFIG_64BIT
	b       itlb_miss_20w
#else
	b	itlb_miss_20
@@ -299,7 +299,7 @@
	.align		32
	.endm
	
#ifndef __LP64__
#ifndef CONFIG_64BIT
	/*
	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
	 */
@@ -321,7 +321,7 @@
	.macro	dtlb_20 code

	mfctl	%isr, spc
#ifdef __LP64__
#ifdef CONFIG_64BIT
	b       dtlb_miss_20w
#else
	b	dtlb_miss_20
@@ -331,7 +331,7 @@
	.align		32
	.endm
	
#ifndef __LP64__
#ifndef CONFIG_64BIT
	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */

	.macro	nadtlb_11 code
@@ -349,7 +349,7 @@
	.macro	nadtlb_20 code

	mfctl	%isr,spc
#ifdef __LP64__
#ifdef CONFIG_64BIT
	b       nadtlb_miss_20w
#else
	b       nadtlb_miss_20
@@ -359,7 +359,7 @@
	.align		32
	.endm
	
#ifndef __LP64__
#ifndef CONFIG_64BIT
	/*
	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
	 */
@@ -381,7 +381,7 @@
	.macro	dbit_20 code

	mfctl	%isr,spc
#ifdef __LP64__
#ifdef CONFIG_64BIT
	b       dbit_trap_20w
#else
	b	dbit_trap_20
@@ -394,7 +394,7 @@
	/* The following are simple 32 vs 64 bit instruction
	 * abstractions for the macros */
	.macro		EXTR	reg1,start,length,reg2
#ifdef __LP64__
#ifdef CONFIG_64BIT
	extrd,u		\reg1,32+\start,\length,\reg2
#else
	extrw,u		\reg1,\start,\length,\reg2
@@ -402,7 +402,7 @@
	.endm

	.macro		DEP	reg1,start,length,reg2
#ifdef __LP64__
#ifdef CONFIG_64BIT
	depd		\reg1,32+\start,\length,\reg2
#else
	depw		\reg1,\start,\length,\reg2
@@ -410,7 +410,7 @@
	.endm

	.macro		DEPI	val,start,length,reg
#ifdef __LP64__
#ifdef CONFIG_64BIT
	depdi		\val,32+\start,\length,\reg
#else
	depwi		\val,\start,\length,\reg
@@ -421,7 +421,7 @@
	 * fault.  We have to extract this and place it in the va,
	 * zeroing the corresponding bits in the space register */
	.macro		space_adjust	spc,va,tmp
#ifdef __LP64__
#ifdef CONFIG_64BIT
	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
	depd		%r0,63,SPACEID_SHIFT,\spc
	depd		\tmp,31,SPACEID_SHIFT,\va
@@ -479,7 +479,7 @@
	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
	DEP		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
	copy		\pmd,%r9
#ifdef __LP64__
#ifdef CONFIG_64BIT
	shld		%r9,PxD_VALUE_SHIFT,\pmd
#else
	shlw		%r9,PxD_VALUE_SHIFT,\pmd
@@ -610,7 +610,7 @@
	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault
	cmpib,COND(<>),n 0,\spc,\fault
	ldil		L%(TMPALIAS_MAP_START),\tmp
#if defined(__LP64__) && (TMPALIAS_MAP_START >= 0x80000000)
#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
	/* on LP64, ldi will sign extend into the upper 32 bits,
	 * which is behaviour we don't want */
	depdi		0,31,32,\tmp
@@ -624,7 +624,7 @@
	 * OK, it is in the temp alias region, check whether "from" or "to".
	 * Check "subtle" note in pacache.S re: r23/r26.
	 */
#ifdef __LP64__
#ifdef CONFIG_64BIT
	extrd,u,*=	\va,41,1,%r0
#else
	extrw,u,=	\va,9,1,%r0
@@ -691,7 +691,7 @@ fault_vector_20:
	def		30
	def		31

#ifndef __LP64__
#ifndef CONFIG_64BIT

	.export fault_vector_11
	
@@ -764,7 +764,7 @@ __kernel_thread:

	copy	%r30, %r1
	ldo	PT_SZ_ALGN(%r30),%r30
#ifdef __LP64__
#ifdef CONFIG_64BIT
	/* Yo, function pointers in wide mode are little structs... -PB */
	ldd	24(%r26), %r2
	STREG	%r2, PT_GR27(%r1)	/* Store childs %dp */
@@ -780,7 +780,7 @@ __kernel_thread:
	or	%r26, %r24, %r26      /* will have kernel mappings.	 */
	ldi	1, %r25			/* stack_start, signals kernel thread */
	stw	%r0, -52(%r30)	     	/* user_tid */
#ifdef __LP64__
#ifdef CONFIG_64BIT
	ldo	-16(%r30),%r29		/* Reference param save area */
#endif
	BL	do_fork, %r2
@@ -809,7 +809,7 @@ ret_from_kernel_thread:

	LDREG	TI_TASK-THREAD_SZ_ALGN(%r30), %r1
	LDREG	TASK_PT_GR25(%r1), %r26
#ifdef __LP64__
#ifdef CONFIG_64BIT
	LDREG	TASK_PT_GR27(%r1), %r27
	LDREG	TASK_PT_GR22(%r1), %r22
#endif
@@ -817,7 +817,7 @@ ret_from_kernel_thread:
	ble	0(%sr7, %r1)
	copy	%r31, %r2

#ifdef __LP64__
#ifdef CONFIG_64BIT
	ldo	-16(%r30),%r29		/* Reference param save area */
	loadgp				/* Thread could have been in a module */
#endif
@@ -838,7 +838,7 @@ __execve:
	STREG	%r26, PT_GR26(%r16)
	STREG	%r25, PT_GR25(%r16)
	STREG	%r24, PT_GR24(%r16)
#ifdef __LP64__
#ifdef CONFIG_64BIT
	ldo	-16(%r30),%r29		/* Reference param save area */
#endif
	BL	sys_execve, %r2
@@ -916,7 +916,7 @@ syscall_exit_rfi:
	STREG	%r19,PT_IAOQ1(%r16)
	LDREG   PT_PSW(%r16),%r19
	load32	USER_PSW_MASK,%r1
#ifdef __LP64__
#ifdef CONFIG_64BIT
	load32	USER_PSW_HI_MASK,%r20
	depd    %r20,31,32,%r1
#endif
@@ -960,7 +960,7 @@ intr_return:
	/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
	** irq_stat[] is defined using ____cacheline_aligned.
	*/
#ifdef __LP64__
#ifdef CONFIG_64BIT
	shld	%r1, 6, %r20
#else
	shlw	%r1, 5, %r20
@@ -1018,7 +1018,7 @@ intr_restore:
	.import do_softirq,code
intr_do_softirq:
	BL      do_softirq,%r2
#ifdef __LP64__
#ifdef CONFIG_64BIT
	ldo	-16(%r30),%r29		/* Reference param save area */
#else
	nop
@@ -1036,7 +1036,7 @@ intr_do_resched:
	CMPIB= 0,%r20,intr_restore /* backward */
	nop

#ifdef __LP64__
#ifdef CONFIG_64BIT
	ldo	-16(%r30),%r29		/* Reference param save area */
#endif

@@ -1069,7 +1069,7 @@ intr_do_signal:

	copy	%r0, %r24			/* unsigned long in_syscall */
	copy	%r16, %r25			/* struct pt_regs *regs */
#ifdef __LP64__
#ifdef CONFIG_64BIT
	ldo	-16(%r30),%r29			/* Reference param save area */
#endif

@@ -1093,7 +1093,7 @@ intr_extint:
	mfctl	%cr31,%r1
	copy	%r30,%r17
	/* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
#ifdef __LP64__
#ifdef CONFIG_64BIT
	depdi	0,63,15,%r17
#else
	depi	0,31,15,%r17
@@ -1120,7 +1120,7 @@ intr_extint:

	ldil	L%intr_return, %r2

#ifdef __LP64__
#ifdef CONFIG_64BIT
	ldo	-16(%r30),%r29	/* Reference param save area */
#endif

@@ -1164,7 +1164,7 @@ intr_save:
	mfctl           %cr21, %r17 /* ior */


#ifdef __LP64__
#ifdef CONFIG_64BIT
	/*
	 * If the interrupted code was running with W bit off (32 bit),
	 * clear the b bits (bits 0 & 1) in the ior.
@@ -1199,7 +1199,7 @@ skip_save_ior:
	loadgp

	copy		%r29, %r25	/* arg1 is pt_regs */
#ifdef __LP64__
#ifdef CONFIG_64BIT
	ldo		-16(%r30),%r29	/* Reference param save area */
#endif

@@ -1237,7 +1237,7 @@ skip_save_ior:
	spc  = r24	/* space for which the trap occured */
	ptp = r25	/* page directory/page table pointer */

#ifdef __LP64__
#ifdef CONFIG_64BIT

dtlb_miss_20w:
	space_adjust	spc,va,t0
@@ -1528,7 +1528,7 @@ nadtlb_probe_check:
	nop


#ifdef __LP64__
#ifdef CONFIG_64BIT
itlb_miss_20w:

	/*
@@ -1595,7 +1595,7 @@ itlb_miss_20:

#endif

#ifdef __LP64__
#ifdef CONFIG_64BIT

dbit_trap_20w:
	space_adjust	spc,va,t0
@@ -1804,7 +1804,7 @@ sys_fork_wrapper:

	STREG	%r2,-RP_OFFSET(%r30)
	ldo	FRAME_SIZE(%r30),%r30
#ifdef __LP64__
#ifdef CONFIG_64BIT
	ldo	-16(%r30),%r29		/* Reference param save area */
#endif

@@ -1854,7 +1854,7 @@ sys_clone_wrapper:

	STREG	%r2,-RP_OFFSET(%r30)
	ldo	FRAME_SIZE(%r30),%r30
#ifdef __LP64__
#ifdef CONFIG_64BIT
	ldo	-16(%r30),%r29		/* Reference param save area */
#endif

@@ -1876,7 +1876,7 @@ sys_vfork_wrapper:

	STREG	%r2,-RP_OFFSET(%r30)
	ldo	FRAME_SIZE(%r30),%r30
#ifdef __LP64__
#ifdef CONFIG_64BIT
	ldo	-16(%r30),%r29		/* Reference param save area */
#endif

@@ -1904,7 +1904,7 @@ sys_vfork_wrapper:

	STREG %r2,-RP_OFFSET(%r30)
	ldo FRAME_SIZE(%r30),%r30
#ifdef __LP64__
#ifdef CONFIG_64BIT
	ldo	-16(%r30),%r29		/* Reference param save area */
#endif
	BL \execve,%r2
@@ -1930,7 +1930,7 @@ error_\execve:
sys_execve_wrapper:
	execve_wrapper sys_execve

#ifdef __LP64__
#ifdef CONFIG_64BIT
	.export sys32_execve_wrapper
	.import sys32_execve

@@ -1944,7 +1944,7 @@ sys_rt_sigreturn_wrapper:
	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
	/* Don't save regs, we are going to restore them from sigcontext. */
	STREG	%r2, -RP_OFFSET(%r30)
#ifdef __LP64__
#ifdef CONFIG_64BIT
	ldo	FRAME_SIZE(%r30), %r30
	BL	sys_rt_sigreturn,%r2
	ldo	-16(%r30),%r29		/* Reference param save area */
@@ -1975,7 +1975,7 @@ sys_sigaltstack_wrapper:
	ldo	TASK_REGS(%r1),%r24	/* get pt regs */
	LDREG	TASK_PT_GR30(%r24),%r24
	STREG	%r2, -RP_OFFSET(%r30)
#ifdef __LP64__
#ifdef CONFIG_64BIT
	ldo	FRAME_SIZE(%r30), %r30
	b,l	do_sigaltstack,%r2
	ldo	-16(%r30),%r29		/* Reference param save area */
@@ -1989,7 +1989,7 @@ sys_sigaltstack_wrapper:
	bv	%r0(%r2)
	nop

#ifdef __LP64__
#ifdef CONFIG_64BIT
	.export sys32_sigaltstack_wrapper
sys32_sigaltstack_wrapper:
	/* Get the user stack pointer */
@@ -2013,7 +2013,7 @@ sys_rt_sigsuspend_wrapper:
	reg_save %r24

	STREG	%r2, -RP_OFFSET(%r30)
#ifdef __LP64__
#ifdef CONFIG_64BIT
	ldo	FRAME_SIZE(%r30), %r30
	b,l	sys_rt_sigsuspend,%r2
	ldo	-16(%r30),%r29		/* Reference param save area */
@@ -2086,7 +2086,7 @@ syscall_check_bh:
	ldw     TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */

	/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
#ifdef __LP64__
#ifdef CONFIG_64BIT
	shld	%r26, 6, %r20
#else
	shlw	%r26, 5, %r20
@@ -2151,7 +2151,7 @@ syscall_restore:

	depi	3,31,2,%r31			   /* ensure return to user mode. */

#ifdef __LP64__
#ifdef CONFIG_64BIT
	/* decide whether to reset the wide mode bit
	 *
	 * For a syscall, the W bit is stored in the lowest bit
@@ -2247,7 +2247,7 @@ syscall_do_softirq:
	.import schedule,code
syscall_do_resched:
	BL	schedule,%r2
#ifdef __LP64__
#ifdef CONFIG_64BIT
	ldo	-16(%r30),%r29		/* Reference param save area */
#else
	nop
@@ -2267,7 +2267,7 @@ syscall_do_signal:

	ldi	1, %r24				/* unsigned long in_syscall */

#ifdef __LP64__
#ifdef CONFIG_64BIT
	ldo	-16(%r30),%r29			/* Reference param save area */
#endif
	BL	do_signal,%r2
+13 −13
Original line number Diff line number Diff line
@@ -12,7 +12,7 @@
 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
 */

#include <linux/autoconf.h>	/* for CONFIG_SMP */
#include <linux/config.h>	/* for CONFIG_SMP */

#include <asm/asm-offsets.h>
#include <asm/psw.h>
@@ -36,10 +36,10 @@ boot_args:
	.align	4
	.import init_thread_union,data
	.import fault_vector_20,code    /* IVA parisc 2.0 32 bit */
#ifndef __LP64__
#ifndef CONFIG_64BIT
        .import fault_vector_11,code    /* IVA parisc 1.1 32 bit */
	.import	$global$		/* forward declaration */
#endif /*!LP64*/
#endif /*!CONFIG_64BIT*/
	.export stext
	.export _stext,data		/* Kernel want it this way! */
_stext:
@@ -76,7 +76,7 @@ $bss_loop:
	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
	mtctl		%r4,%cr25	/* Initialize user root pointer */

#ifdef __LP64__
#ifdef CONFIG_64BIT
	/* Set pmd in pgd */
	load32		PA(pmd0),%r5
	shrd            %r5,PxD_VALUE_SHIFT,%r3	
@@ -99,7 +99,7 @@ $bss_loop:
	stw		%r3,0(%r4)
	ldo		(ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
	addib,>		-1,%r1,1b
#ifdef __LP64__
#ifdef CONFIG_64BIT
	ldo             ASM_PMD_ENTRY_SIZE(%r4),%r4
#else
	ldo             ASM_PGD_ENTRY_SIZE(%r4),%r4
@@ -170,7 +170,7 @@ common_stext:
	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI */
#endif /*CONFIG_SMP*/

#ifdef __LP64__
#ifdef CONFIG_64BIT
	tophys_r1	%sp

	/* Save the rfi target address */
@@ -233,7 +233,7 @@ stext_pdc_ret:
	 * following short sequence of instructions can determine this
	 * (without being illegal on a PA1.1 machine).
	 */
#ifndef __LP64__
#ifndef CONFIG_64BIT
	ldi		32,%r10
	mtctl		%r10,%cr11
	.level 2.0
@@ -246,7 +246,7 @@ stext_pdc_ret:

$is_pa20:
	.level		LEVEL /* restore 1.1 || 2.0w */
#endif /*!LP64*/
#endif /*!CONFIG_64BIT*/
	load32		PA(fault_vector_20),%r10

$install_iva:
@@ -284,7 +284,7 @@ aligned_rfi:
	.import smp_init_current_idle_task,data
	.import	smp_callin,code

#ifndef __LP64__
#ifndef CONFIG_64BIT
smp_callin_rtn:
        .proc
	.callinfo
@@ -292,7 +292,7 @@ smp_callin_rtn:
	nop
	nop
        .procend
#endif /*!LP64*/
#endif /*!CONFIG_64BIT*/

/***************************************************************************
* smp_slave_stext is executed by all non-monarch Processors when the Monarch
@@ -327,7 +327,7 @@ smp_slave_stext:
	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
	mtctl		%r4,%cr25	/* Initialize user root pointer */

#ifdef __LP64__
#ifdef CONFIG_64BIT
	/* Setup PDCE_PROC entry */
	copy            %arg0,%r3
#else
@@ -344,7 +344,7 @@ smp_slave_stext:

	.procend
#endif /* CONFIG_SMP */
#ifndef __LP64__
#ifndef CONFIG_64BIT
	.data

	.align	4
@@ -354,4 +354,4 @@ smp_slave_stext:
	.size	$global$,4
$global$:	
	.word 0
#endif /*!LP64*/
#endif /*!CONFIG_64BIT*/
+16 −14
Original line number Diff line number Diff line
@@ -26,7 +26,7 @@
 *       can be used.
 */

#ifdef __LP64__
#ifdef CONFIG_64BIT
#define ADDIB	addib,*
#define CMPB	cmpb,*
#define ANDCM	andcm,*
@@ -40,6 +40,8 @@
	.level	2.0
#endif

#include <linux/config.h>

#include <asm/psw.h>
#include <asm/assembly.h>
#include <asm/pgtable.h>
@@ -294,7 +296,7 @@ copy_user_page_asm:
	.callinfo NO_CALLS
	.entry

#ifdef __LP64__
#ifdef CONFIG_64BIT
	/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
	 * Unroll the loop by hand and arrange insn appropriately.
	 * GCC probably can do this just as well.
@@ -454,7 +456,7 @@ copy_user_page_asm:
	sub		%r25, %r1, %r23		/* move physical addr into non shadowed reg */

	ldil		L%(TMPALIAS_MAP_START), %r28
#ifdef __LP64__
#ifdef CONFIG_64BIT
	extrd,u		%r26,56,32, %r26		/* convert phys addr to tlb insert format */
	extrd,u		%r23,56,32, %r23		/* convert phys addr to tlb insert format */
	depd		%r24,63,22, %r28		/* Form aliased virtual address 'to' */
@@ -541,7 +543,7 @@ __clear_user_page_asm:
	tophys_r1	%r26

	ldil		L%(TMPALIAS_MAP_START), %r28
#ifdef __LP64__
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
	depdi		0, 31,32, %r28		/* clear any sign extension */
#endif
@@ -558,7 +560,7 @@ __clear_user_page_asm:

	pdtlb		0(%r28)

#ifdef __LP64__
#ifdef CONFIG_64BIT
	ldi		32, %r1			/* PAGE_SIZE/128 == 32 */

	/* PREFETCH (Write) has not (yet) been proven to help here */
@@ -583,7 +585,7 @@ __clear_user_page_asm:
	ADDIB>		-1, %r1, 1b
	ldo		128(%r28), %r28

#else	/* ! __LP64 */
#else	/* ! CONFIG_64BIT */

	ldi		64, %r1			/* PAGE_SIZE/64 == 64 */

@@ -606,7 +608,7 @@ __clear_user_page_asm:
	stw		%r0, 60(%r28)
	ADDIB>		-1, %r1, 1b
	ldo		64(%r28), %r28
#endif	/* __LP64 */
#endif	/* CONFIG_64BIT */

	bv		%r0(%r2)
	nop
@@ -624,7 +626,7 @@ flush_kernel_dcache_page:
	ldil		L%dcache_stride, %r1
	ldw		R%dcache_stride(%r1), %r23

#ifdef __LP64__
#ifdef CONFIG_64BIT
	depdi,z		1, 63-PAGE_SHIFT,1, %r25
#else
	depwi,z		1, 31-PAGE_SHIFT,1, %r25
@@ -668,7 +670,7 @@ flush_user_dcache_page:
	ldil		L%dcache_stride, %r1
	ldw		R%dcache_stride(%r1), %r23

#ifdef __LP64__
#ifdef CONFIG_64BIT
	depdi,z		1,63-PAGE_SHIFT,1, %r25
#else
	depwi,z		1,31-PAGE_SHIFT,1, %r25
@@ -712,7 +714,7 @@ flush_user_icache_page:
	ldil		L%dcache_stride, %r1
	ldw		R%dcache_stride(%r1), %r23

#ifdef __LP64__
#ifdef CONFIG_64BIT
	depdi,z		1, 63-PAGE_SHIFT,1, %r25
#else
	depwi,z		1, 31-PAGE_SHIFT,1, %r25
@@ -757,7 +759,7 @@ purge_kernel_dcache_page:
	ldil		L%dcache_stride, %r1
	ldw		R%dcache_stride(%r1), %r23

#ifdef __LP64__
#ifdef CONFIG_64BIT
	depdi,z		1, 63-PAGE_SHIFT,1, %r25
#else
	depwi,z		1, 31-PAGE_SHIFT,1, %r25
@@ -805,7 +807,7 @@ flush_alias_page:
	tophys_r1		%r26

	ldil		L%(TMPALIAS_MAP_START), %r28
#ifdef __LP64__
#ifdef CONFIG_64BIT
	extrd,u		%r26, 56,32, %r26	/* convert phys addr to tlb insert format */
	depd		%r25, 63,22, %r28	/* Form aliased virtual address 'to' */
	depdi		0, 63,12, %r28		/* Clear any offset bits */
@@ -822,7 +824,7 @@ flush_alias_page:
	ldil		L%dcache_stride, %r1
	ldw		R%dcache_stride(%r1), %r23

#ifdef __LP64__
#ifdef CONFIG_64BIT
	depdi,z		1, 63-PAGE_SHIFT,1, %r29
#else
	depwi,z		1, 31-PAGE_SHIFT,1, %r29
@@ -933,7 +935,7 @@ flush_kernel_icache_page:
	ldil		L%icache_stride, %r1
	ldw		R%icache_stride(%r1), %r23

#ifdef __LP64__
#ifdef CONFIG_64BIT
	depdi,z		1, 63-PAGE_SHIFT,1, %r25
#else
	depwi,z		1, 31-PAGE_SHIFT,1, %r25
+9 −7
Original line number Diff line number Diff line
@@ -7,6 +7,8 @@
 * Copyright (C) 2000 Hewlett Packard (Paul Bame bame@puffin.external.hp.com)
 *
 */
#include <linux/config.h>

#include <asm/psw.h>
#include <asm/assembly.h>

@@ -20,7 +22,7 @@ real32_stack:
real64_stack:
	.block	8192

#ifdef __LP64__
#ifdef CONFIG_64BIT
#  define REG_SZ 8
#else
#  define REG_SZ 4
@@ -50,7 +52,7 @@ save_cr_end:

real32_call_asm:
	STREG	%rp, -RP_OFFSET(%sp)	/* save RP */
#ifdef __LP64__
#ifdef CONFIG_64BIT
	callee_save
	ldo	2*REG_SZ(%sp), %sp	/* room for a couple more saves */
	STREG	%r27, -1*REG_SZ(%sp)
@@ -77,7 +79,7 @@ real32_call_asm:
	b,l	save_control_regs,%r2		/* modifies r1, r2, r28 */
	nop

#ifdef __LP64__
#ifdef CONFIG_64BIT
	rsm	PSW_SM_W, %r0		/* go narrow */
#endif

@@ -85,7 +87,7 @@ real32_call_asm:
	bv	0(%r31)
	nop
ric_ret:
#ifdef __LP64__
#ifdef CONFIG_64BIT
	ssm	PSW_SM_W, %r0		/* go wide */
#endif
	/* restore CRs before going virtual in case we page fault */
@@ -97,7 +99,7 @@ ric_ret:

	tovirt_r1 %sp
	LDREG	-REG_SZ(%sp), %sp	/* restore SP */
#ifdef __LP64__
#ifdef CONFIG_64BIT
	LDREG	-1*REG_SZ(%sp), %r27
	LDREG	-2*REG_SZ(%sp), %r29
	ldo	-2*REG_SZ(%sp), %sp
@@ -212,7 +214,7 @@ rfi_r2v_1:
	bv	0(%r2)
	nop

#ifdef __LP64__
#ifdef CONFIG_64BIT

/************************ 64-bit real-mode calls ***********************/
/* This is only usable in wide kernels right now and will probably stay so */
@@ -290,7 +292,7 @@ pc_in_user_space:
	**	comparing function pointers.
	*/
__canonicalize_funcptr_for_compare:
#ifdef __LP64__
#ifdef CONFIG_64BIT
	bve (%r2)
#else
	bv %r0(%r2)
+1 −1
Original line number Diff line number Diff line
@@ -18,7 +18,7 @@
*/
#undef ENTRY_SYS_CPUS	/* syscall support for iCOD-like functionality */

#include <linux/autoconf.h>
#include <linux/config.h>

#include <linux/types.h>
#include <linux/spinlock.h>
Loading