Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 35f05976 authored by Chris Metcalf's avatar Chris Metcalf
Browse files

tilegx: change how we find the kernel stack



Previously, we used a special-purpose register (SPR_SYSTEM_SAVE_K_0)
to hold the CPU number and the top of the current kernel stack
by using the low bits to hold the CPU number, and using the high
bits to hold the address of the page just above where we'd want
the kernel stack to be.  That way we could initialize a new SP
when first entering the kernel by just masking the SPR value and
subtracting a couple of words.

However, it's actually more useful to be able to place an arbitrary
kernel-top value in the SPR.  This allows us to create a new stack
context (e.g. for virtualization) with an arbitrary top-of-stack VA.
To make this work, we now store the CPU number in the high bits,
above the highest legal VA bit (42 bits in the current tilegx
microarchitecture).  The full 42 bits are thus available to store the
top of stack value.  Getting the current cpu (a relatively common
operation) is still fast; it's now a shift rather than a mask.

We make this change only for tilegx, since tilepro has too few SPR
bits to do this, and we don't need this support on tilepro anyway.

Signed-off-by: default avatarChris Metcalf <cmetcalf@tilera.com>
parent 4036c7d3
Loading
Loading
Loading
Loading
+33 −14
Original line number Original line Diff line number Diff line
@@ -148,9 +148,10 @@ struct thread_struct {


/*
/*
 * Start with "sp" this many bytes below the top of the kernel stack.
 * Start with "sp" this many bytes below the top of the kernel stack.
 * This preserves the invariant that a called function may write to *sp.
 * This allows us to be cache-aware when handling the initial save
 * of the pt_regs value to the stack.
 */
 */
#define STACK_TOP_DELTA 8
#define STACK_TOP_DELTA 64


/*
/*
 * When entering the kernel via a fault, start with the top of the
 * When entering the kernel via a fault, start with the top of the
@@ -234,15 +235,15 @@ extern int do_work_pending(struct pt_regs *regs, u32 flags);
unsigned long get_wchan(struct task_struct *p);
unsigned long get_wchan(struct task_struct *p);


/* Return initial ksp value for given task. */
/* Return initial ksp value for given task. */
#define task_ksp0(task) ((unsigned long)(task)->stack + THREAD_SIZE)
#define task_ksp0(task) \
	((unsigned long)(task)->stack + THREAD_SIZE - STACK_TOP_DELTA)


/* Return some info about the user process TASK. */
/* Return some info about the user process TASK. */
#define KSTK_TOP(task)	(task_ksp0(task) - STACK_TOP_DELTA)
#define task_pt_regs(task) \
#define task_pt_regs(task) \
	((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1)
	((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1)
#define current_pt_regs()                                   \
#define current_pt_regs()                                   \
	((struct pt_regs *)((stack_pointer | (THREAD_SIZE - 1)) - \
	((struct pt_regs *)((stack_pointer | (THREAD_SIZE - 1)) - \
                      (KSTK_PTREGS_GAP - 1)) - 1)
			    STACK_TOP_DELTA - (KSTK_PTREGS_GAP - 1)) - 1)
#define task_sp(task)	(task_pt_regs(task)->sp)
#define task_sp(task)	(task_pt_regs(task)->sp)
#define task_pc(task)	(task_pt_regs(task)->pc)
#define task_pc(task)	(task_pt_regs(task)->pc)
/* Aliases for pc and sp (used in fs/proc/array.c) */
/* Aliases for pc and sp (used in fs/proc/array.c) */
@@ -355,20 +356,38 @@ extern int kdata_huge;
#define KERNEL_PL CONFIG_KERNEL_PL
#define KERNEL_PL CONFIG_KERNEL_PL


/* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */
/* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */
#define CPU_LOG_MASK_VALUE 12
#ifdef __tilegx__
#define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1)
#define CPU_SHIFT 48
#if CONFIG_NR_CPUS > CPU_MASK_VALUE
#if CHIP_VA_WIDTH() > CPU_SHIFT
# error Too many cpus!
# error Too many VA bits!
#endif
#endif
#define MAX_CPU_ID ((1 << (64 - CPU_SHIFT)) - 1)
#define raw_smp_processor_id() \
#define raw_smp_processor_id() \
	((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & CPU_MASK_VALUE)
	((int)(__insn_mfspr(SPR_SYSTEM_SAVE_K_0) >> CPU_SHIFT))
#define get_current_ksp0() \
#define get_current_ksp0() \
	(__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~CPU_MASK_VALUE)
	((unsigned long)(((long)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) << \
			  (64 - CPU_SHIFT)) >> (64 - CPU_SHIFT)))
#define next_current_ksp0(task) ({ \
	unsigned long __ksp0 = task_ksp0(task) & ((1UL << CPU_SHIFT) - 1); \
	unsigned long __cpu = (long)raw_smp_processor_id() << CPU_SHIFT; \
	__ksp0 | __cpu; \
})
#else
#define LOG2_NR_CPU_IDS 6
#define MAX_CPU_ID ((1 << LOG2_NR_CPU_IDS) - 1)
#define raw_smp_processor_id() \
	((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & MAX_CPU_ID)
#define get_current_ksp0() \
	(__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~MAX_CPU_ID)
#define next_current_ksp0(task) ({ \
#define next_current_ksp0(task) ({ \
	unsigned long __ksp0 = task_ksp0(task); \
	unsigned long __ksp0 = task_ksp0(task); \
	int __cpu = raw_smp_processor_id(); \
	int __cpu = raw_smp_processor_id(); \
	BUG_ON(__ksp0 & CPU_MASK_VALUE); \
	BUG_ON(__ksp0 & MAX_CPU_ID); \
	__ksp0 | __cpu; \
	__ksp0 | __cpu; \
})
})
#endif
#if CONFIG_NR_CPUS > (MAX_CPU_ID + 1)
# error Too many cpus!
#endif


#endif /* _ASM_TILE_PROCESSOR_H */
#endif /* _ASM_TILE_PROCESSOR_H */
+1 −2
Original line number Original line Diff line number Diff line
@@ -86,7 +86,7 @@ ENTRY(_start)
	/*
	/*
	 * Load up our per-cpu offset.  When the first (master) tile
	 * Load up our per-cpu offset.  When the first (master) tile
	 * boots, this value is still zero, so we will load boot_pc
	 * boots, this value is still zero, so we will load boot_pc
	 * with start_kernel, and boot_sp with init_stack + THREAD_SIZE.
	 * with start_kernel, and boot_sp at the top of init_stack.
	 * The master tile initializes the per-cpu offset array, so that
	 * The master tile initializes the per-cpu offset array, so that
	 * when subsequent (secondary) tiles boot, they will instead load
	 * when subsequent (secondary) tiles boot, they will instead load
	 * from their per-cpu versions of boot_sp and boot_pc.
	 * from their per-cpu versions of boot_sp and boot_pc.
@@ -126,7 +126,6 @@ ENTRY(_start)
	lw sp, r1
	lw sp, r1
	or r4, sp, r4
	or r4, sp, r4
	mtspr SPR_SYSTEM_SAVE_K_0, r4  /* save ksp0 + cpu */
	mtspr SPR_SYSTEM_SAVE_K_0, r4  /* save ksp0 + cpu */
	addi sp, sp, -STACK_TOP_DELTA
	{
	{
	  move lr, zero   /* stop backtraces in the called function */
	  move lr, zero   /* stop backtraces in the called function */
	  jr r0
	  jr r0
+3 −3
Original line number Original line Diff line number Diff line
@@ -158,7 +158,7 @@ ENTRY(_start)
	/*
	/*
	 * Load up our per-cpu offset.  When the first (master) tile
	 * Load up our per-cpu offset.  When the first (master) tile
	 * boots, this value is still zero, so we will load boot_pc
	 * boots, this value is still zero, so we will load boot_pc
	 * with start_kernel, and boot_sp with init_stack + THREAD_SIZE.
	 * with start_kernel, and boot_sp with at the top of init_stack.
	 * The master tile initializes the per-cpu offset array, so that
	 * The master tile initializes the per-cpu offset array, so that
	 * when subsequent (secondary) tiles boot, they will instead load
	 * when subsequent (secondary) tiles boot, they will instead load
	 * from their per-cpu versions of boot_sp and boot_pc.
	 * from their per-cpu versions of boot_sp and boot_pc.
@@ -202,9 +202,9 @@ ENTRY(_start)
	}
	}
	ld r0, r0
	ld r0, r0
	ld sp, r1
	ld sp, r1
	or r4, sp, r4
	shli r4, r4, CPU_SHIFT
	bfins r4, sp, 0, CPU_SHIFT-1
	mtspr SPR_SYSTEM_SAVE_K_0, r4  /* save ksp0 + cpu */
	mtspr SPR_SYSTEM_SAVE_K_0, r4  /* save ksp0 + cpu */
	addi sp, sp, -STACK_TOP_DELTA
	{
	{
	  move lr, zero   /* stop backtraces in the called function */
	  move lr, zero   /* stop backtraces in the called function */
	  jr r0
	  jr r0
+5 −2
Original line number Original line Diff line number Diff line
@@ -185,7 +185,7 @@ intvec_\vecname:
	 * point sp at the top aligned address on the actual stack page.
	 * point sp at the top aligned address on the actual stack page.
	 */
	 */
	mfspr   r0, SPR_SYSTEM_SAVE_K_0
	mfspr   r0, SPR_SYSTEM_SAVE_K_0
	mm      r0, r0, zero, LOG2_THREAD_SIZE, 31
	mm      r0, r0, zero, LOG2_NR_CPU_IDS, 31


0:
0:
	/*
	/*
@@ -203,6 +203,9 @@ intvec_\vecname:
	 *    cache line 1: r14...r29
	 *    cache line 1: r14...r29
	 *    cache line 0: 2 x frame, r0..r13
	 *    cache line 0: 2 x frame, r0..r13
	 */
	 */
#if STACK_TOP_DELTA != 64
#error STACK_TOP_DELTA must be 64 for assumptions here and in task_pt_regs()
#endif
	andi    r0, r0, -64
	andi    r0, r0, -64


	/*
	/*
@@ -464,7 +467,7 @@ intvec_\vecname:
	}
	}
	{
	{
	 auli   r21, r21, ha16(__per_cpu_offset)
	 auli   r21, r21, ha16(__per_cpu_offset)
	 mm     r20, r20, zero, 0, LOG2_THREAD_SIZE-1
	 mm     r20, r20, zero, 0, LOG2_NR_CPU_IDS-1
	}
	}
	s2a     r20, r20, r21
	s2a     r20, r20, r21
	lw      tp, r20
	lw      tp, r20
+10 −11
Original line number Original line Diff line number Diff line
@@ -132,13 +132,9 @@ intvec_\vecname:
	mfspr   r3, SPR_SYSTEM_SAVE_K_0
	mfspr   r3, SPR_SYSTEM_SAVE_K_0


	/* Get &thread_info->unalign_jit_tmp[0] in r3. */
	/* Get &thread_info->unalign_jit_tmp[0] in r3. */
	bfexts  r3, r3, 0, CPU_SHIFT-1
	mm      r3, zero, LOG2_THREAD_SIZE, 63
	mm      r3, zero, LOG2_THREAD_SIZE, 63
#if THREAD_SIZE < 65536
	addli   r3, r3, THREAD_INFO_UNALIGN_JIT_TMP_OFFSET
	addli   r3, r3, -(PAGE_SIZE - THREAD_INFO_UNALIGN_JIT_TMP_OFFSET)
#else
	addli   r3, r3, -(PAGE_SIZE/2)
	addli   r3, r3, -(PAGE_SIZE/2 - THREAD_INFO_UNALIGN_JIT_TMP_OFFSET)
#endif


	/*
	/*
	 * Save r0, r1, r2 into thread_info array r3 points to
	 * Save r0, r1, r2 into thread_info array r3 points to
@@ -365,13 +361,13 @@ intvec_\vecname:


2:
2:
	/*
	/*
	 * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
	 * SYSTEM_SAVE_K_0 holds the cpu number in the high bits, and
	 * the current stack top in the higher bits.  So we recover
	 * the current stack top in the lower bits.  So we recover
	 * our stack top by just masking off the low bits, then
	 * our starting stack value by sign-extending the low bits, then
	 * point sp at the top aligned address on the actual stack page.
	 * point sp at the top aligned address on the actual stack page.
	 */
	 */
	mfspr   r0, SPR_SYSTEM_SAVE_K_0
	mfspr   r0, SPR_SYSTEM_SAVE_K_0
	mm      r0, zero, LOG2_THREAD_SIZE, 63
	bfexts  r0, r0, 0, CPU_SHIFT-1


0:
0:
	/*
	/*
@@ -393,6 +389,9 @@ intvec_\vecname:
	 *    cache line 1: r6...r13
	 *    cache line 1: r6...r13
	 *    cache line 0: 2 x frame, r0..r5
	 *    cache line 0: 2 x frame, r0..r5
	 */
	 */
#if STACK_TOP_DELTA != 64
#error STACK_TOP_DELTA must be 64 for assumptions here and in task_pt_regs()
#endif
	andi    r0, r0, -64
	andi    r0, r0, -64


	/*
	/*
@@ -690,7 +689,7 @@ intvec_\vecname:
	}
	}
	{
	{
	 shl16insli r21, r21, hw1(__per_cpu_offset)
	 shl16insli r21, r21, hw1(__per_cpu_offset)
	 bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
	 bfextu r20, r20, CPU_SHIFT, 63
	}
	}
	shl16insli r21, r21, hw0(__per_cpu_offset)
	shl16insli r21, r21, hw0(__per_cpu_offset)
	shl3add r20, r20, r21
	shl3add r20, r20, r21
Loading