Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c8921d72 authored by Helge Deller's avatar Helge Deller
Browse files

parisc: Fix and improve kernel stack unwinding

This patchset fixes and improves stack unwinding a lot:
1. Show backward stack traces with up to 30 callsites
2. Add callinfo to ENTRY_CFI() such that every assembler function will get an
   entry in the unwind table
3. Use constants instead of numbers in call_on_stack()
4. Do not depend on CONFIG_KALLSYMS to generate backtraces.
5. Speed up backtrace generation

Make sure you have this patch to GNU as installed:
https://sourceware.org/ml/binutils/2018-07/msg00474.html


Without this patch, unwind info in the kernel is often wrong for various
functions.

Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent 3b885ac1
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -36,6 +36,7 @@
#define RP_OFFSET	16
#define FRAME_SIZE	128
#define CALLEE_REG_FRAME_SIZE	144
#define REG_SZ		8
#define ASM_ULONG_INSN	.dword
#else	/* CONFIG_64BIT */
#define LDREG	ldw
@@ -50,6 +51,7 @@
#define RP_OFFSET	20
#define FRAME_SIZE	64
#define CALLEE_REG_FRAME_SIZE	128
#define REG_SZ		4
#define ASM_ULONG_INSN	.word
#endif

+11 −6
Original line number Diff line number Diff line
@@ -18,9 +18,9 @@
#ifdef __ASSEMBLY__

#define ENTRY(name) \
	.export name !\
	ALIGN	!\
name:
name:		ASM_NL\
	.export name

#ifdef CONFIG_64BIT
#define ENDPROC(name) \
@@ -31,13 +31,18 @@
	END(name)
#endif

#define ENTRY_CFI(name) \
#define ENTRY_CFI(name, ...) \
	ENTRY(name)	ASM_NL\
	.proc		ASM_NL\
	.callinfo __VA_ARGS__	ASM_NL\
	.entry		ASM_NL\
	CFI_STARTPROC

#define ENDPROC_CFI(name) \
	ENDPROC(name)	ASM_NL\
	CFI_ENDPROC
	CFI_ENDPROC	ASM_NL\
	.exit		ASM_NL\
	.procend	ASM_NL\
	ENDPROC(name)

#endif /* __ASSEMBLY__ */

+3 −0
Original line number Diff line number Diff line
@@ -4,6 +4,9 @@

#include <linux/list.h>

/* Max number of levels to backtrace */
#define MAX_UNWIND_ENTRIES	30

/* From ABI specifications */
struct unwind_table_entry {
	unsigned int region_start;
+21 −32
Original line number Diff line number Diff line
@@ -766,7 +766,6 @@ END(fault_vector_11)
#endif
	/* Fault vector is separately protected and *must* be on its own page */
	.align		PAGE_SIZE
ENTRY(end_fault_vector)

	.import		handle_interruption,code
	.import		do_cpu_irq_mask,code
@@ -778,7 +777,6 @@ ENTRY(end_fault_vector)
	 */

ENTRY_CFI(ret_from_kernel_thread)

	/* Call schedule_tail first though */
	BL	schedule_tail, %r2
	nop
@@ -817,8 +815,9 @@ ENTRY_CFI(_switch_to)
	LDREG	TASK_THREAD_INFO(%r25), %r25
	bv	%r0(%r2)
	mtctl   %r25,%cr30
ENDPROC_CFI(_switch_to)

_switch_to_ret:
ENTRY_CFI(_switch_to_ret)
	mtctl	%r0, %cr0		/* Needed for single stepping */
	callee_rest
	callee_rest_float
@@ -826,7 +825,7 @@ _switch_to_ret:
	LDREG	-RP_OFFSET(%r30), %r2
	bv	%r0(%r2)
	copy	%r26, %r28
ENDPROC_CFI(_switch_to)
ENDPROC_CFI(_switch_to_ret)

	/*
	 * Common rfi return path for interruptions, kernel execve, and
@@ -887,12 +886,14 @@ ENTRY_CFI(syscall_exit_rfi)
	STREG   %r19,PT_SR5(%r16)
	STREG   %r19,PT_SR6(%r16)
	STREG   %r19,PT_SR7(%r16)
ENDPROC_CFI(syscall_exit_rfi)

intr_return:
ENTRY_CFI(intr_return)
	/* check for reschedule */
	mfctl   %cr30,%r1
	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
ENDPROC_CFI(intr_return)

	.import do_notify_resume,code
intr_check_sig:
@@ -1048,7 +1049,6 @@ intr_extint:

	b	do_cpu_irq_mask
	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
ENDPROC_CFI(syscall_exit_rfi)


	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
@@ -1999,12 +1999,9 @@ ENDPROC_CFI(syscall_exit)
	.align L1_CACHE_BYTES
	.globl mcount
	.type  mcount, @function
ENTRY(mcount)
ENTRY_CFI(mcount, caller)
_mcount:
	.export _mcount,data
	.proc
	.callinfo caller,frame=0
	.entry
	/*
	 * The 64bit mcount() function pointer needs 4 dwords, of which the
	 * first two are free.  We optimize it here and put 2 instructions for
@@ -2026,18 +2023,13 @@ ftrace_stub:
	.dword mcount
	.dword 0 /* code in head.S puts value of global gp here */
#endif
	.exit
	.procend
ENDPROC(mcount)
ENDPROC_CFI(mcount)

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	.align 8
	.globl return_to_handler
	.type  return_to_handler, @function
ENTRY_CFI(return_to_handler)
	.proc
	.callinfo caller,frame=FRAME_SIZE
	.entry
ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
	.export parisc_return_to_handler,data
parisc_return_to_handler:
	copy %r3,%r1
@@ -2076,8 +2068,6 @@ parisc_return_to_handler:
	bv	%r0(%rp)
#endif
	LDREGM -FRAME_SIZE(%sp),%r3
	.exit
	.procend
ENDPROC_CFI(return_to_handler)

#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -2087,31 +2077,30 @@ ENDPROC_CFI(return_to_handler)
#ifdef CONFIG_IRQSTACKS
/* void call_on_stack(unsigned long param1, void *func,
		      unsigned long new_stack) */
ENTRY_CFI(call_on_stack)
ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
	copy	%sp, %r1

	/* Regarding the HPPA calling conventions for function pointers,
	   we assume the PIC register is not changed across call.  For
	   CONFIG_64BIT, the argument pointer is left to point at the
	   argument region allocated for the call to call_on_stack. */

	/* Switch to new stack.  We allocate two frames.  */
	ldo	2*FRAME_SIZE(%arg2), %sp
# ifdef CONFIG_64BIT
	/* Switch to new stack.  We allocate two 128 byte frames.  */
	ldo	256(%arg2), %sp
	/* Save previous stack pointer and return pointer in frame marker */
	STREG	%rp, -144(%sp)
	STREG	%rp, -FRAME_SIZE-RP_OFFSET(%sp)
	/* Calls always use function descriptor */
	LDREG	16(%arg1), %arg1
	bve,l	(%arg1), %rp
	STREG	%r1, -136(%sp)
	LDREG	-144(%sp), %rp
	STREG	%r1, -FRAME_SIZE-REG_SZ(%sp)
	LDREG	-FRAME_SIZE-RP_OFFSET(%sp), %rp
	bve	(%rp)
	LDREG	-136(%sp), %sp
	LDREG	-FRAME_SIZE-REG_SZ(%sp), %sp
# else
	/* Switch to new stack.  We allocate two 64 byte frames.  */
	ldo	128(%arg2), %sp
	/* Save previous stack pointer and return pointer in frame marker */
	STREG	%r1, -68(%sp)
	STREG	%rp, -84(%sp)
	STREG	%r1, -FRAME_SIZE-REG_SZ(%sp)
	STREG	%rp, -FRAME_SIZE-RP_OFFSET(%sp)
	/* Calls use function descriptor if PLABEL bit is set */
	bb,>=,n	%arg1, 30, 1f
	depwi	0,31,2, %arg1
@@ -2119,9 +2108,9 @@ ENTRY_CFI(call_on_stack)
1:
	be,l	0(%sr4,%arg1), %sr0, %r31
	copy	%r31, %rp
	LDREG	-84(%sp), %rp
	LDREG	-FRAME_SIZE-RP_OFFSET(%sp), %rp
	bv	(%rp)
	LDREG	-68(%sp), %sp
	LDREG	-FRAME_SIZE-REG_SZ(%sp), %sp
# endif /* CONFIG_64BIT */
ENDPROC_CFI(call_on_stack)
#endif /* CONFIG_IRQSTACKS */
+0 −125
Original line number Diff line number Diff line
@@ -44,10 +44,6 @@
	.align	16

ENTRY_CFI(flush_tlb_all_local)
	.proc
	.callinfo NO_CALLS
	.entry

	/*
	 * The pitlbe and pdtlbe instructions should only be used to
	 * flush the entire tlb. Also, there needs to be no intervening
@@ -189,18 +185,11 @@ fdtdone:

2:      bv		%r0(%r2)
	nop

	.exit
	.procend
ENDPROC_CFI(flush_tlb_all_local)

	.import cache_info,data

ENTRY_CFI(flush_instruction_cache_local)
	.proc
	.callinfo NO_CALLS
	.entry

	load32		cache_info, %r1

	/* Flush Instruction Cache */
@@ -256,18 +245,11 @@ fisync:
	mtsm		%r22			/* restore I-bit */
	bv		%r0(%r2)
	nop
	.exit

	.procend
ENDPROC_CFI(flush_instruction_cache_local)


	.import cache_info, data
ENTRY_CFI(flush_data_cache_local)
	.proc
	.callinfo NO_CALLS
	.entry

	load32		cache_info, %r1

	/* Flush Data Cache */
@@ -324,9 +306,6 @@ fdsync:
	mtsm		%r22			/* restore I-bit */
	bv		%r0(%r2)
	nop
	.exit

	.procend
ENDPROC_CFI(flush_data_cache_local)

/* Macros to serialize TLB purge operations on SMP.  */
@@ -362,10 +341,6 @@ ENDPROC_CFI(flush_data_cache_local)
/* Clear page using kernel mapping.  */

ENTRY_CFI(clear_page_asm)
	.proc
	.callinfo NO_CALLS
	.entry

#ifdef CONFIG_64BIT

	/* Unroll the loop.  */
@@ -424,18 +399,11 @@ ENTRY_CFI(clear_page_asm)
#endif
	bv		%r0(%r2)
	nop
	.exit

	.procend
ENDPROC_CFI(clear_page_asm)

/* Copy page using kernel mapping.  */

ENTRY_CFI(copy_page_asm)
	.proc
	.callinfo NO_CALLS
	.entry

#ifdef CONFIG_64BIT
	/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
	 * Unroll the loop by hand and arrange insn appropriately.
@@ -542,9 +510,6 @@ ENTRY_CFI(copy_page_asm)
#endif
	bv		%r0(%r2)
	nop
	.exit

	.procend
ENDPROC_CFI(copy_page_asm)

/*
@@ -598,10 +563,6 @@ ENDPROC_CFI(copy_page_asm)
	 */

ENTRY_CFI(copy_user_page_asm)
	.proc
	.callinfo NO_CALLS
	.entry

	/* Convert virtual `to' and `from' addresses to physical addresses.
	   Move `from' physical address to non shadowed register.  */
	ldil		L%(__PAGE_OFFSET), %r1
@@ -750,16 +711,9 @@ ENTRY_CFI(copy_user_page_asm)

	bv		%r0(%r2)
	nop
	.exit

	.procend
ENDPROC_CFI(copy_user_page_asm)

ENTRY_CFI(clear_user_page_asm)
	.proc
	.callinfo NO_CALLS
	.entry

	tophys_r1	%r26

	ldil		L%(TMPALIAS_MAP_START), %r28
@@ -836,16 +790,9 @@ ENTRY_CFI(clear_user_page_asm)

	bv		%r0(%r2)
	nop
	.exit

	.procend
ENDPROC_CFI(clear_user_page_asm)

ENTRY_CFI(flush_dcache_page_asm)
	.proc
	.callinfo NO_CALLS
	.entry

	ldil		L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
@@ -903,16 +850,9 @@ ENTRY_CFI(flush_dcache_page_asm)
	sync
	bv		%r0(%r2)
	nop
	.exit

	.procend
ENDPROC_CFI(flush_dcache_page_asm)

ENTRY_CFI(flush_icache_page_asm)
	.proc
	.callinfo NO_CALLS
	.entry

	ldil		L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
@@ -977,16 +917,9 @@ ENTRY_CFI(flush_icache_page_asm)
	sync
	bv		%r0(%r2)
	nop
	.exit

	.procend
ENDPROC_CFI(flush_icache_page_asm)

ENTRY_CFI(flush_kernel_dcache_page_asm)
	.proc
	.callinfo NO_CALLS
	.entry

	ldil		L%dcache_stride, %r1
	ldw		R%dcache_stride(%r1), %r23

@@ -1020,16 +953,9 @@ ENTRY_CFI(flush_kernel_dcache_page_asm)
	sync
	bv		%r0(%r2)
	nop
	.exit

	.procend
ENDPROC_CFI(flush_kernel_dcache_page_asm)

ENTRY_CFI(purge_kernel_dcache_page_asm)
	.proc
	.callinfo NO_CALLS
	.entry

	ldil		L%dcache_stride, %r1
	ldw		R%dcache_stride(%r1), %r23

@@ -1062,16 +988,9 @@ ENTRY_CFI(purge_kernel_dcache_page_asm)
	sync
	bv		%r0(%r2)
	nop
	.exit

	.procend
ENDPROC_CFI(purge_kernel_dcache_page_asm)

ENTRY_CFI(flush_user_dcache_range_asm)
	.proc
	.callinfo NO_CALLS
	.entry

	ldil		L%dcache_stride, %r1
	ldw		R%dcache_stride(%r1), %r23
	ldo		-1(%r23), %r21
@@ -1083,16 +1002,9 @@ ENTRY_CFI(flush_user_dcache_range_asm)
	sync
	bv		%r0(%r2)
	nop
	.exit

	.procend
ENDPROC_CFI(flush_user_dcache_range_asm)

ENTRY_CFI(flush_kernel_dcache_range_asm)
	.proc
	.callinfo NO_CALLS
	.entry

	ldil		L%dcache_stride, %r1
	ldw		R%dcache_stride(%r1), %r23
	ldo		-1(%r23), %r21
@@ -1105,16 +1017,9 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
	syncdma
	bv		%r0(%r2)
	nop
	.exit

	.procend
ENDPROC_CFI(flush_kernel_dcache_range_asm)

ENTRY_CFI(purge_kernel_dcache_range_asm)
	.proc
	.callinfo NO_CALLS
	.entry

	ldil		L%dcache_stride, %r1
	ldw		R%dcache_stride(%r1), %r23
	ldo		-1(%r23), %r21
@@ -1127,16 +1032,9 @@ ENTRY_CFI(purge_kernel_dcache_range_asm)
	syncdma
	bv		%r0(%r2)
	nop
	.exit

	.procend
ENDPROC_CFI(purge_kernel_dcache_range_asm)

ENTRY_CFI(flush_user_icache_range_asm)
	.proc
	.callinfo NO_CALLS
	.entry

	ldil		L%icache_stride, %r1
	ldw		R%icache_stride(%r1), %r23
	ldo		-1(%r23), %r21
@@ -1148,16 +1046,9 @@ ENTRY_CFI(flush_user_icache_range_asm)
	sync
	bv		%r0(%r2)
	nop
	.exit

	.procend
ENDPROC_CFI(flush_user_icache_range_asm)

ENTRY_CFI(flush_kernel_icache_page)
	.proc
	.callinfo NO_CALLS
	.entry

	ldil		L%icache_stride, %r1
	ldw		R%icache_stride(%r1), %r23

@@ -1191,16 +1082,9 @@ ENTRY_CFI(flush_kernel_icache_page)
	sync
	bv		%r0(%r2)
	nop
	.exit

	.procend
ENDPROC_CFI(flush_kernel_icache_page)

ENTRY_CFI(flush_kernel_icache_range_asm)
	.proc
	.callinfo NO_CALLS
	.entry

	ldil		L%icache_stride, %r1
	ldw		R%icache_stride(%r1), %r23
	ldo		-1(%r23), %r21
@@ -1212,8 +1096,6 @@ ENTRY_CFI(flush_kernel_icache_range_asm)
	sync
	bv		%r0(%r2)
	nop
	.exit
	.procend
ENDPROC_CFI(flush_kernel_icache_range_asm)

	__INIT
@@ -1223,10 +1105,6 @@ ENDPROC_CFI(flush_kernel_icache_range_asm)
	 */
	.align	256
ENTRY_CFI(disable_sr_hashing_asm)
	.proc
	.callinfo NO_CALLS
	.entry

	/*
	 * Switch to real mode
	 */
@@ -1308,9 +1186,6 @@ srdis_done:

2:      bv		%r0(%r2)
	nop
	.exit

	.procend
ENDPROC_CFI(disable_sr_hashing_asm)

	.end
Loading