Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3813d402 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull Itanium fixes from Tony Luck.

* tag 'ia64-3.5-merge' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux:
  [IA64] Liberate the signal layer from IA64 assembler
  [IA64] Add cmpxchg.h to exported userspace headers
  [IA64] Fix fast syscall version of getcpu()
  [IA64] Removed "task_size" element from thread_struct - it is now constant
parents 2335a836 4035c6db
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
include include/asm-generic/Kbuild.asm

header-y += break.h
header-y += cmpxchg.h
header-y += fpu.h
header-y += gcc_intrin.h
header-y += ia64regs.h
+1 −4
Original line number Diff line number Diff line
@@ -34,8 +34,7 @@
 * each (assuming 8KB page size), for a total of 8TB of user virtual
 * address space.
 */
#define TASK_SIZE_OF(tsk)	((tsk)->thread.task_size)
#define TASK_SIZE       	TASK_SIZE_OF(current)
#define TASK_SIZE       	DEFAULT_TASK_SIZE

/*
 * This decides where the kernel will search for a free chunk of vm
@@ -280,7 +279,6 @@ struct thread_struct {
	__u8 pad[3];
	__u64 ksp;			/* kernel stack pointer */
	__u64 map_base;			/* base address for get_unmapped_area() */
	__u64 task_size;		/* limit for task size */
	__u64 rbs_bot;			/* the base address for the RBS */
	int last_fph_cpu;		/* CPU that may hold the contents of f32-f127 */

@@ -303,7 +301,6 @@ struct thread_struct {
	.ksp =		0,					\
	.map_base =	DEFAULT_MAP_BASE,			\
	.rbs_bot =	STACK_TOP - DEFAULT_USER_STACK_SIZE,	\
	.task_size =	DEFAULT_TASK_SIZE,			\
	.last_fph_cpu =  -1,					\
	INIT_THREAD_PM						\
	.dbr =		{0, },					\
+13 −178
Original line number Diff line number Diff line
@@ -371,175 +371,6 @@ ENTRY(fsys_clock_gettime)
	br.many .gettime
END(fsys_clock_gettime)

/*
 * long fsys_rt_sigprocmask (int how, sigset_t *set, sigset_t *oset, size_t sigsetsize).
 */
#if _NSIG_WORDS != 1
# error Sorry, fsys_rt_sigprocmask() needs to be updated for _NSIG_WORDS != 1.
#endif
ENTRY(fsys_rt_sigprocmask)
	.prologue
	.altrp b6
	.body

	add r2=IA64_TASK_BLOCKED_OFFSET,r16
	add r9=TI_FLAGS+IA64_TASK_SIZE,r16
	cmp4.ltu p6,p0=SIG_SETMASK,r32

	cmp.ne p15,p0=r0,r34			// oset != NULL?
	tnat.nz p8,p0=r34
	add r31=IA64_TASK_SIGHAND_OFFSET,r16
	;;
	ld8 r3=[r2]				// read/prefetch current->blocked
	ld4 r9=[r9]
	tnat.nz.or p6,p0=r35

	cmp.ne.or p6,p0=_NSIG_WORDS*8,r35
	tnat.nz.or p6,p0=r32
(p6)	br.spnt.few .fail_einval		// fail with EINVAL
	;;
#ifdef CONFIG_SMP
	ld8 r31=[r31]				// r31 <- current->sighand
#endif
	and r9=TIF_ALLWORK_MASK,r9
	tnat.nz.or p8,p0=r33
	;;
	cmp.ne p7,p0=0,r9
	cmp.eq p6,p0=r0,r33			// set == NULL?
	add r31=IA64_SIGHAND_SIGLOCK_OFFSET,r31	// r31 <- current->sighand->siglock
(p8)	br.spnt.few .fail_efault		// fail with EFAULT
(p7)	br.spnt.many fsys_fallback_syscall	// got pending kernel work...
(p6)	br.dpnt.many .store_mask		// -> short-circuit to just reading the signal mask

	/* Argh, we actually have to do some work and _update_ the signal mask: */

EX(.fail_efault, probe.r.fault r33, 3)		// verify user has read-access to *set
EX(.fail_efault, ld8 r14=[r33])			// r14 <- *set
	mov r17=(1 << (SIGKILL - 1)) | (1 << (SIGSTOP - 1))
	;;

	RSM_PSR_I(p0, r18, r19)			// mask interrupt delivery
	andcm r14=r14,r17			// filter out SIGKILL & SIGSTOP
	mov r8=EINVAL			// default to EINVAL

#ifdef CONFIG_SMP
	// __ticket_spin_trylock(r31)
	ld4 r17=[r31]
	;;
	mov.m ar.ccv=r17
	extr.u r9=r17,17,15
	adds r19=1,r17
	extr.u r18=r17,0,15
	;;
	cmp.eq p6,p7=r9,r18
	;;
(p6)	cmpxchg4.acq r9=[r31],r19,ar.ccv
(p6)	dep.z r20=r19,1,15		// next serving ticket for unlock
(p7)	br.cond.spnt.many .lock_contention
	;;
	cmp4.eq p0,p7=r9,r17
	adds r31=2,r31
(p7)	br.cond.spnt.many .lock_contention
	ld8 r3=[r2]			// re-read current->blocked now that we hold the lock
	;;
#else
	ld8 r3=[r2]			// re-read current->blocked now that we hold the lock
#endif
	add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16
	add r19=IA64_TASK_SIGNAL_OFFSET,r16
	cmp4.eq p6,p0=SIG_BLOCK,r32
	;;
	ld8 r19=[r19]			// r19 <- current->signal
	cmp4.eq p7,p0=SIG_UNBLOCK,r32
	cmp4.eq p8,p0=SIG_SETMASK,r32
	;;
	ld8 r18=[r18]			// r18 <- current->pending.signal
	.pred.rel.mutex p6,p7,p8
(p6)	or r14=r3,r14			// SIG_BLOCK
(p7)	andcm r14=r3,r14		// SIG_UNBLOCK

(p8)	mov r14=r14			// SIG_SETMASK
(p6)	mov r8=0			// clear error code
	// recalc_sigpending()
	add r17=IA64_SIGNAL_GROUP_STOP_COUNT_OFFSET,r19

	add r19=IA64_SIGNAL_SHARED_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r19
	;;
	ld4 r17=[r17]		// r17 <- current->signal->group_stop_count
(p7)	mov r8=0		// clear error code

	ld8 r19=[r19]		// r19 <- current->signal->shared_pending
	;;
	cmp4.gt p6,p7=r17,r0	// p6/p7 <- (current->signal->group_stop_count > 0)?
(p8)	mov r8=0		// clear error code

	or r18=r18,r19		// r18 <- current->pending | current->signal->shared_pending
	;;
	// r18 <- (current->pending | current->signal->shared_pending) & ~current->blocked:
	andcm r18=r18,r14
	add r9=TI_FLAGS+IA64_TASK_SIZE,r16
	;;

(p7)	cmp.ne.or.andcm p6,p7=r18,r0		// p6/p7 <- signal pending
	mov r19=0					// i must not leak kernel bits...
(p6)	br.cond.dpnt.many .sig_pending
	;;

1:	ld4 r17=[r9]				// r17 <- current->thread_info->flags
	;;
	mov ar.ccv=r17
	and r18=~_TIF_SIGPENDING,r17		// r18 <- r17 & ~(1 << TIF_SIGPENDING)
	;;

	st8 [r2]=r14				// update current->blocked with new mask
	cmpxchg4.acq r8=[r9],r18,ar.ccv		// current->thread_info->flags <- r18
	;;
	cmp.ne p6,p0=r17,r8			// update failed?
(p6)	br.cond.spnt.few 1b			// yes -> retry

#ifdef CONFIG_SMP
	// __ticket_spin_unlock(r31)
	st2.rel [r31]=r20
	mov r20=0					// i must not leak kernel bits...
#endif
	SSM_PSR_I(p0, p9, r31)
	;;

	srlz.d					// ensure psr.i is set again
	mov r18=0					// i must not leak kernel bits...

.store_mask:
EX(.fail_efault, (p15) probe.w.fault r34, 3)	// verify user has write-access to *oset
EX(.fail_efault, (p15) st8 [r34]=r3)
	mov r2=0					// i must not leak kernel bits...
	mov r3=0					// i must not leak kernel bits...
	mov r8=0				// return 0
	mov r9=0					// i must not leak kernel bits...
	mov r14=0					// i must not leak kernel bits...
	mov r17=0					// i must not leak kernel bits...
	mov r31=0					// i must not leak kernel bits...
	FSYS_RETURN

.sig_pending:
#ifdef CONFIG_SMP
	// __ticket_spin_unlock(r31)
	st2.rel [r31]=r20			// release the lock
#endif
	SSM_PSR_I(p0, p9, r17)
	;;
	srlz.d
	br.sptk.many fsys_fallback_syscall	// with signal pending, do the heavy-weight syscall

#ifdef CONFIG_SMP
.lock_contention:
	/* Rather than spinning here, fall back on doing a heavy-weight syscall.  */
	SSM_PSR_I(p0, p9, r17)
	;;
	srlz.d
	br.sptk.many fsys_fallback_syscall
#endif
END(fsys_rt_sigprocmask)

/*
 * fsys_getcpu doesn't use the third parameter in this implementation. It reads
 * current_thread_info()->cpu and corresponding node in cpu_to_node_map.
@@ -559,11 +390,15 @@ ENTRY(fsys_getcpu)
	;;
	tnat.nz p7,p0 = r33			// I guard against NaT argument
(p7)    br.cond.spnt.few .fail_einval		// B
	;;
	cmp.ne p6,p0=r32,r0
	cmp.ne p7,p0=r33,r0
	;;
#ifdef CONFIG_NUMA
	movl r17=cpu_to_node_map
	;;
EX(.fail_efault, probe.w.fault r32, 3)		// M This takes 5 cycles
EX(.fail_efault, probe.w.fault r33, 3)		// M This takes 5 cycles
EX(.fail_efault, (p6) probe.w.fault r32, 3)		// M This takes 5 cycles
EX(.fail_efault, (p7) probe.w.fault r33, 3)		// M This takes 5 cycles
	shladd r18=r3,1,r17
	;;
	ld2 r20=[r18]				// r20 = cpu_to_node_map[cpu]
@@ -573,20 +408,20 @@ EX(.fail_efault, probe.w.fault r33, 3) // M This takes 5 cycles
(p8)	br.spnt.many fsys_fallback_syscall
	;;
	;;
EX(.fail_efault, st4 [r32] = r3)
EX(.fail_efault, st2 [r33] = r20)
EX(.fail_efault, (p6) st4 [r32] = r3)
EX(.fail_efault, (p7) st2 [r33] = r20)
	mov r8=0
	;;
#else
EX(.fail_efault, probe.w.fault r32, 3)		// M This takes 5 cycles
EX(.fail_efault, probe.w.fault r33, 3)		// M This takes 5 cycles
EX(.fail_efault, (p6) probe.w.fault r32, 3)		// M This takes 5 cycles
EX(.fail_efault, (p7) probe.w.fault r33, 3)		// M This takes 5 cycles
	and r2 = TIF_ALLWORK_MASK,r2
	;;
	cmp.ne p8,p0=0,r2
(p8)	br.spnt.many fsys_fallback_syscall
	;;
EX(.fail_efault, st4 [r32] = r3)
EX(.fail_efault, st2 [r33] = r0)
EX(.fail_efault, (p6) st4 [r32] = r3)
EX(.fail_efault, (p7) st2 [r33] = r0)
	mov r8=0
	;;
#endif
@@ -916,7 +751,7 @@ paravirt_fsyscall_table:
	data8 0				// sigaltstack
	data8 0				// rt_sigaction
	data8 0				// rt_sigpending
	data8 fsys_rt_sigprocmask	// rt_sigprocmask
	data8 0				// rt_sigprocmask
	data8 0				// rt_sigqueueinfo	// 1180
	data8 0				// rt_sigreturn
	data8 0				// rt_sigsuspend