Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1b00255f authored by Josh Poimboeuf's avatar Josh Poimboeuf Committed by Ingo Molnar
Browse files

x86/entry/32, x86/boot/32: Use local labels



Add the local label prefix to all non-function named labels in head_32.S
and entry_32.S.  In addition to decluttering the symbol table, it also
will help stack traces to be more sensible.  For example, the last
reported function in the idle task stack trace will be startup_32_smp()
instead of is486().

Signed-off-by: default avatarJosh Poimboeuf <jpoimboe@redhat.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nilay Vaish <nilayvaish@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/14f9f7afd478b23a762f40734da1a57c0c273f6e.1474480779.git.jpoimboe@redhat.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 59df2268
Loading
Loading
Loading
Loading
+22 −21
Original line number Diff line number Diff line
@@ -307,13 +307,13 @@ END(ret_from_exception)
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
	DISABLE_INTERRUPTS(CLBR_ANY)
need_resched:
.Lneed_resched:
	cmpl	$0, PER_CPU_VAR(__preempt_count)
	jnz	restore_all
	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)	# interrupts off (exception path) ?
	jz	restore_all
	call	preempt_schedule_irq
	jmp	need_resched
	jmp	.Lneed_resched
END(resume_kernel)
#endif

@@ -334,7 +334,7 @@ GLOBAL(__begin_SYSENTER_singlestep_region)
 */
ENTRY(xen_sysenter_target)
	addl	$5*4, %esp			/* remove xen-provided frame */
	jmp	sysenter_past_esp
	jmp	.Lsysenter_past_esp
#endif

/*
@@ -371,7 +371,7 @@ ENTRY(xen_sysenter_target)
 */
ENTRY(entry_SYSENTER_32)
	movl	TSS_sysenter_sp0(%esp), %esp
sysenter_past_esp:
.Lsysenter_past_esp:
	pushl	$__USER_DS		/* pt_regs->ss */
	pushl	%ebp			/* pt_regs->sp (stashed in bp) */
	pushfl				/* pt_regs->flags (except IF = 0) */
@@ -504,9 +504,9 @@ ENTRY(entry_INT80_32)

restore_all:
	TRACE_IRQS_IRET
restore_all_notrace:
.Lrestore_all_notrace:
#ifdef CONFIG_X86_ESPFIX32
	ALTERNATIVE	"jmp restore_nocheck", "", X86_BUG_ESPFIX
	ALTERNATIVE	"jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX

	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
	/*
@@ -518,22 +518,23 @@ restore_all_notrace:
	movb	PT_CS(%esp), %al
	andl	$(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
	cmpl	$((SEGMENT_LDT << 8) | USER_RPL), %eax
	je ldt_ss				# returning to user-space with LDT SS
	je .Lldt_ss				# returning to user-space with LDT SS
#endif
restore_nocheck:
.Lrestore_nocheck:
	RESTORE_REGS 4				# skip orig_eax/error_code
irq_return:
.Lirq_return:
	INTERRUPT_RETURN

.section .fixup, "ax"
ENTRY(iret_exc	)
	pushl	$0				# no error code
	pushl	$do_iret_error
	jmp	error_code
.previous
	_ASM_EXTABLE(irq_return, iret_exc)
	_ASM_EXTABLE(.Lirq_return, iret_exc)

#ifdef CONFIG_X86_ESPFIX32
ldt_ss:
.Lldt_ss:
/*
 * Setup and switch to ESPFIX stack
 *
@@ -562,7 +563,7 @@ ldt_ss:
	 */
	DISABLE_INTERRUPTS(CLBR_EAX)
	lss	(%esp), %esp			/* switch to espfix segment */
	jmp	restore_nocheck
	jmp	.Lrestore_nocheck
#endif
ENDPROC(entry_INT80_32)

@@ -882,7 +883,7 @@ ftrace_call:
	popl	%edx
	popl	%ecx
	popl	%eax
ftrace_ret:
.Lftrace_ret:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
@@ -952,7 +953,7 @@ GLOBAL(ftrace_regs_call)
	popl	%gs
	addl	$8, %esp			/* Skip orig_ax and ip */
	popf					/* Pop flags at end (no addl to corrupt flags) */
	jmp	ftrace_ret
	jmp	.Lftrace_ret

	popf
	jmp	ftrace_stub
@@ -963,7 +964,7 @@ ENTRY(mcount)
	jb	ftrace_stub			/* Paging not enabled yet? */

	cmpl	$ftrace_stub, ftrace_trace_function
	jnz	trace
	jnz	.Ltrace
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	cmpl	$ftrace_stub, ftrace_graph_return
	jnz	ftrace_graph_caller
@@ -976,7 +977,7 @@ ftrace_stub:
	ret

	/* taken from glibc */
trace:
.Ltrace:
	pushl	%eax
	pushl	%ecx
	pushl	%edx
@@ -1116,7 +1117,7 @@ ENTRY(nmi)
	movl	%ss, %eax
	cmpw	$__ESPFIX_SS, %ax
	popl	%eax
	je	nmi_espfix_stack
	je	.Lnmi_espfix_stack
#endif

	pushl	%eax				# pt_regs->orig_ax
@@ -1132,7 +1133,7 @@ ENTRY(nmi)

	/* Not on SYSENTER stack. */
	call	do_nmi
	jmp	restore_all_notrace
	jmp	.Lrestore_all_notrace

.Lnmi_from_sysenter_stack:
	/*
@@ -1143,10 +1144,10 @@ ENTRY(nmi)
	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esp
	call	do_nmi
	movl	%ebp, %esp
	jmp	restore_all_notrace
	jmp	.Lrestore_all_notrace

#ifdef CONFIG_X86_ESPFIX32
nmi_espfix_stack:
.Lnmi_espfix_stack:
	/*
	 * create the pointer to lss back
	 */
@@ -1164,7 +1165,7 @@ nmi_espfix_stack:
	call	do_nmi
	RESTORE_REGS
	lss	12+4(%esp), %esp		# back to espfix stack
	jmp	irq_return
	jmp	.Lirq_return
#endif
END(nmi)

+16 −16
Original line number Diff line number Diff line
@@ -248,19 +248,19 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
#ifdef CONFIG_PARAVIRT
	/* This is can only trip for a broken bootloader... */
	cmpw $0x207, pa(boot_params + BP_version)
	jb default_entry
	jb .Ldefault_entry

	/* Paravirt-compatible boot parameters.  Look to see what architecture
		we're booting under. */
	movl pa(boot_params + BP_hardware_subarch), %eax
	cmpl $num_subarch_entries, %eax
	jae bad_subarch
	jae .Lbad_subarch

	movl pa(subarch_entries)(,%eax,4), %eax
	subl $__PAGE_OFFSET, %eax
	jmp *%eax

bad_subarch:
.Lbad_subarch:
WEAK(lguest_entry)
WEAK(xen_entry)
	/* Unknown implementation; there's really
@@ -270,14 +270,14 @@ WEAK(xen_entry)
	__INITDATA

subarch_entries:
	.long default_entry		/* normal x86/PC */
	.long .Ldefault_entry		/* normal x86/PC */
	.long lguest_entry		/* lguest hypervisor */
	.long xen_entry			/* Xen hypervisor */
	.long default_entry		/* Moorestown MID */
	.long .Ldefault_entry		/* Moorestown MID */
num_subarch_entries = (. - subarch_entries) / 4
.previous
#else
	jmp default_entry
	jmp .Ldefault_entry
#endif /* CONFIG_PARAVIRT */

#ifdef CONFIG_HOTPLUG_CPU
@@ -317,7 +317,7 @@ ENTRY(startup_32_smp)
	call load_ucode_ap
#endif

default_entry:
.Ldefault_entry:
#define CR0_STATE	(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
			 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
			 X86_CR0_PG)
@@ -347,7 +347,7 @@ default_entry:
	pushfl
	popl %eax			# get EFLAGS
	testl $X86_EFLAGS_ID,%eax	# did EFLAGS.ID remained set?
	jz enable_paging		# hw disallowed setting of ID bit
	jz .Lenable_paging		# hw disallowed setting of ID bit
					# which means no CPUID and no CR4

	xorl %eax,%eax
@@ -357,13 +357,13 @@ default_entry:
	movl $1,%eax
	cpuid
	andl $~1,%edx			# Ignore CPUID.FPU
	jz enable_paging		# No flags or only CPUID.FPU = no CR4
	jz .Lenable_paging		# No flags or only CPUID.FPU = no CR4

	movl pa(mmu_cr4_features),%eax
	movl %eax,%cr4

	testb $X86_CR4_PAE, %al		# check if PAE is enabled
	jz enable_paging
	jz .Lenable_paging

	/* Check if extended functions are implemented */
	movl $0x80000000, %eax
@@ -371,7 +371,7 @@ default_entry:
	/* Value must be in the range 0x80000001 to 0x8000ffff */
	subl $0x80000001, %eax
	cmpl $(0x8000ffff-0x80000001), %eax
	ja enable_paging
	ja .Lenable_paging

	/* Clear bogus XD_DISABLE bits */
	call verify_cpu
@@ -380,7 +380,7 @@ default_entry:
	cpuid
	/* Execute Disable bit supported? */
	btl $(X86_FEATURE_NX & 31), %edx
	jnc enable_paging
	jnc .Lenable_paging

	/* Setup EFER (Extended Feature Enable Register) */
	movl $MSR_EFER, %ecx
@@ -390,7 +390,7 @@ default_entry:
	/* Make changes effective */
	wrmsr

enable_paging:
.Lenable_paging:

/*
 * Enable paging
@@ -419,7 +419,7 @@ enable_paging:
 */
	movb $4,X86			# at least 486
	cmpl $-1,X86_CPUID
	je is486
	je .Lis486

	/* get vendor info */
	xorl %eax,%eax			# call CPUID with 0 -> return vendor ID
@@ -430,7 +430,7 @@ enable_paging:
	movl %ecx,X86_VENDOR_ID+8	# last 4 chars

	orl %eax,%eax			# do we have processor info as well?
	je is486
	je .Lis486

	movl $1,%eax		# Use the CPUID instruction to get CPU type
	cpuid
@@ -444,7 +444,7 @@ enable_paging:
	movb %cl,X86_MASK
	movl %edx,X86_CAPABILITY

is486:
.Lis486:
	movl $0x50022,%ecx	# set AM, WP, NE and MP
	movl %cr0,%eax
	andl $0x80000011,%eax	# Save PG,PE,ET