Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 06b2e988 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6:
  parisc: Call pagefault_disable/pagefault_enable in kmap_atomic/kunmap_atomic
  parisc: Remove unnecessary macros from entry.S
  parisc: LWS fixes for syscall.S
  parisc: Delete unnecessary nop's in entry.S
  parisc: Avoid interruption in critical region in entry.S
  parisc: invoke oom-killer from page fault
  parisc: clear floating point exception flag on SIGFPE signal
  parisc: Use of align_frame provides stack frame.
parents 35926ff5 210501aa
Loading
Loading
Loading
Loading
+13 −3
Original line number Diff line number Diff line
@@ -2,6 +2,7 @@
#define _PARISC_CACHEFLUSH_H

#include <linux/mm.h>
#include <linux/uaccess.h>

/* The usual comment is "Caches aren't brain-dead on the <architecture>".
 * Unfortunately, that doesn't apply to PA-RISC. */
@@ -125,11 +126,20 @@ static inline void *kmap(struct page *page)

#define kunmap(page)			kunmap_parisc(page_address(page))

#define kmap_atomic(page, idx)		page_address(page)
static inline void *kmap_atomic(struct page *page, enum km_type idx)
{
	pagefault_disable();
	return page_address(page);
}

#define kunmap_atomic(addr, idx)	kunmap_parisc(addr)
static inline void kunmap_atomic(void *addr, enum km_type idx)
{
	kunmap_parisc(addr);
	pagefault_enable();
}

#define kmap_atomic_pfn(pfn, idx)	page_address(pfn_to_page(pfn))
#define kmap_atomic_prot(page, idx, prot)	kmap_atomic(page, idx)
#define kmap_atomic_pfn(pfn, idx)	kmap_atomic(pfn_to_page(pfn), (idx))
#define kmap_atomic_to_page(ptr)	virt_to_page(ptr)
#endif

+11 −4
Original line number Diff line number Diff line
@@ -45,8 +45,12 @@
#else
#define FRAME_SIZE	64
#endif
#define FRAME_ALIGN	64

#define align(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y)))
/* Add FRAME_SIZE to the size x and align it to y. All definitions
 * that use align_frame will include space for a frame.
 */
#define align_frame(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y)))

int main(void)
{
@@ -146,7 +150,8 @@ int main(void)
	DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior));
	BLANK();
	DEFINE(TASK_SZ, sizeof(struct task_struct));
	DEFINE(TASK_SZ_ALGN, align(sizeof(struct task_struct), 64));
	/* TASK_SZ_ALGN includes space for a stack frame. */
	DEFINE(TASK_SZ_ALGN, align_frame(sizeof(struct task_struct), FRAME_ALIGN));
	BLANK();
	DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0]));
	DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1]));
@@ -233,7 +238,8 @@ int main(void)
	DEFINE(PT_ISR, offsetof(struct pt_regs, isr));
	DEFINE(PT_IOR, offsetof(struct pt_regs, ior));
	DEFINE(PT_SIZE, sizeof(struct pt_regs));
	DEFINE(PT_SZ_ALGN, align(sizeof(struct pt_regs), 64));
	/* PT_SZ_ALGN includes space for a stack frame. */
	DEFINE(PT_SZ_ALGN, align_frame(sizeof(struct pt_regs), FRAME_ALIGN));
	BLANK();
	DEFINE(TI_TASK, offsetof(struct thread_info, task));
	DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
@@ -242,7 +248,8 @@ int main(void)
	DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit));
	DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
	DEFINE(THREAD_SZ, sizeof(struct thread_info));
	DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64));
	/* THREAD_SZ_ALGN includes space for a stack frame. */
	DEFINE(THREAD_SZ_ALGN, align_frame(sizeof(struct thread_info), FRAME_ALIGN));
	BLANK();
	DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
	DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
+10 −42
Original line number Diff line number Diff line
@@ -364,32 +364,6 @@
	.align		32
	.endm

	/* The following are simple 32 vs 64 bit instruction
	 * abstractions for the macros */
	.macro		EXTR	reg1,start,length,reg2
#ifdef CONFIG_64BIT
	extrd,u		\reg1,32+(\start),\length,\reg2
#else
	extrw,u		\reg1,\start,\length,\reg2
#endif
	.endm

	.macro		DEP	reg1,start,length,reg2
#ifdef CONFIG_64BIT
	depd		\reg1,32+(\start),\length,\reg2
#else
	depw		\reg1,\start,\length,\reg2
#endif
	.endm

	.macro		DEPI	val,start,length,reg
#ifdef CONFIG_64BIT
	depdi		\val,32+(\start),\length,\reg
#else
	depwi		\val,\start,\length,\reg
#endif
	.endm

	/* In LP64, the space contains part of the upper 32 bits of the
	 * fault.  We have to extract this and place it in the va,
	 * zeroing the corresponding bits in the space register */
@@ -442,19 +416,19 @@
	 */
	.macro		L2_ptep	pmd,pte,index,va,fault
#if PT_NLEVELS == 3
	EXTR		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
	extru		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
#else
	EXTR		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
	extru		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
#endif
	DEP             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
	dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
	copy		%r0,\pte
	ldw,s		\index(\pmd),\pmd
	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
	DEP		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
	dep		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
	copy		\pmd,%r9
	SHLREG		%r9,PxD_VALUE_SHIFT,\pmd
	EXTR		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
	DEP		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
	extru		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
	dep		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd
	LDREG		%r0(\pmd),\pte		/* pmd is now pte */
	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
@@ -605,7 +579,7 @@
	depdi		0,31,32,\tmp
#endif
	copy		\va,\tmp1
	DEPI		0,31,23,\tmp1
	depi		0,31,23,\tmp1
	cmpb,COND(<>),n	\tmp,\tmp1,\fault
	ldi		(_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
	depd,z		\prot,8,7,\prot
@@ -997,13 +971,6 @@ intr_restore:

	rfi
	nop
	nop
	nop
	nop
	nop
	nop
	nop
	nop

#ifndef CONFIG_PREEMPT
# define intr_do_preempt	intr_restore
@@ -2076,9 +2043,10 @@ syscall_restore:
	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */

	/* NOTE: We use rsm/ssm pair to make this operation atomic */
	LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
	rsm     PSW_SM_I, %r0
	LDREG   TASK_PT_GR30(%r1),%r30             /* restore user sp */
	mfsp	%sr3,%r1			   /* Get users space id */
	copy    %r1,%r30                           /* Restore user sp */
	mfsp    %sr3,%r1                           /* Get user space id */
	mtsp    %r1,%sr7                           /* Restore sr7 */
	ssm     PSW_SM_I, %r0

+9 −23
Original line number Diff line number Diff line
@@ -47,18 +47,17 @@ ENTRY(linux_gateway_page)
	KILL_INSN
	.endr

	/* ADDRESS 0xb0 to 0xb4, lws uses 1 insns for entry */
	/* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
	/* Light-weight-syscall entry must always be located at 0xb0 */
	/* WARNING: Keep this number updated with table size changes */
#define __NR_lws_entries (2)

lws_entry:
	/* Unconditional branch to lws_start, located on the 
	   same gateway page */
	b,n	lws_start
	gate	lws_start, %r0		/* increase privilege */
	depi	3, 31, 2, %r31		/* Ensure we return into user mode. */

	/* Fill from 0xb4 to 0xe0 */
	.rept 11
	/* Fill from 0xb8 to 0xe0 */
	.rept 10
	KILL_INSN
	.endr

@@ -423,9 +422,6 @@ tracesys_sigexit:

	*********************************************************/
lws_start:
	/* Gate and ensure we return to userspace */
	gate	.+8, %r0
	depi	3, 31, 2, %r31	/* Ensure we return to userspace */

#ifdef CONFIG_64BIT
	/* FIXME: If we are a 64-bit kernel just
@@ -442,7 +438,7 @@ lws_start:
#endif	

        /* Is the lws entry number valid? */
	comiclr,>>=	__NR_lws_entries, %r20, %r0
	comiclr,>>	__NR_lws_entries, %r20, %r0
	b,n	lws_exit_nosys

	/* WARNING: Trashing sr2 and sr3 */
@@ -473,7 +469,7 @@ lws_exit:
	/* now reset the lowest bit of sp if it was set */
	xor	%r30,%r1,%r30
#endif
	be,n	0(%sr3, %r31)
	be,n	0(%sr7, %r31)


	
@@ -529,7 +525,6 @@ lws_compare_and_swap32:
#endif

lws_compare_and_swap:
#ifdef CONFIG_SMP
	/* Load start of lock table */
	ldil	L%lws_lock_start, %r20
	ldo	R%lws_lock_start(%r20), %r28
@@ -572,8 +567,6 @@ cas_wouldblock:
	ldo	2(%r0), %r28				/* 2nd case */
	b	lws_exit				/* Contended... */
	ldo	-EAGAIN(%r0), %r21			/* Spin in userspace */
#endif
/* CONFIG_SMP */

	/*
		prev = *addr;
@@ -601,13 +594,11 @@ cas_action:
1:	ldw	0(%sr3,%r26), %r28
	sub,<>	%r28, %r25, %r0
2:	stw	%r24, 0(%sr3,%r26)
#ifdef CONFIG_SMP
	/* Free lock */
	stw	%r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
	/* Clear thread register indicator */
	stw	%r0, 4(%sr2,%r20)
# endif
#endif
	/* Return to userspace, set no error */
	b	lws_exit
@@ -615,12 +606,10 @@ cas_action:

3:		
	/* Error occured on load or store */
#ifdef CONFIG_SMP
	/* Free lock */
	stw	%r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
	stw	%r0, 4(%sr2,%r20)
# endif
#endif
	b	lws_exit
	ldo	-EFAULT(%r0),%r21	/* set errno */
@@ -672,7 +661,6 @@ ENTRY(sys_call_table64)
END(sys_call_table64)
#endif

#ifdef CONFIG_SMP
	/*
		All light-weight-syscall atomic operations 
		will use this set of locks 
@@ -694,8 +682,6 @@ ENTRY(lws_lock_start)
	.endr
END(lws_lock_start)
	.previous
#endif
/* CONFIG_SMP for lws_lock_start */

.end

+1 −0
Original line number Diff line number Diff line
@@ -342,6 +342,7 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[])
		return SIGNALCODE(SIGFPE, FPE_FLTINV);
	  case DIVISIONBYZEROEXCEPTION:
		update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
		Clear_excp_register(exception_index);
	  	return SIGNALCODE(SIGFPE, FPE_FLTDIV);
	  case INEXACTEXCEPTION:
		update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
Loading