Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1a338ac3 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

sched, x86: Optimize the preempt_schedule() call



Remove the bloat of the C calling convention out of the
preempt_enable() sites by creating an ASM wrapper which allows us to
do an asm("call ___preempt_schedule") instead.

calling.h bits by Andi Kleen

Suggested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-tk7xdi1cvvxewixzke8t8le1@git.kernel.org


[ Fixed build error. ]
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c2daa3be
Loading
Loading
Loading
Loading
+50 −0
Original line number Diff line number Diff line
@@ -48,6 +48,8 @@ For 32-bit we have the following conventions - kernel is built with

#include <asm/dwarf2.h>

#ifdef CONFIG_X86_64

/*
 * 64-bit system call stack frame layout defines and helpers,
 * for assembly code:
@@ -192,3 +194,51 @@ For 32-bit we have the following conventions - kernel is built with
	.macro icebp
	.byte 0xf1
	.endm

#else /* CONFIG_X86_64 */

/*
 * For 32bit only simplified versions of SAVE_ALL/RESTORE_ALL. These
 * are different from the entry_32.S versions in not changing the segment
 * registers. So only suitable for in kernel use, not when transitioning
 * from or to user space. The resulting stack frame is not a standard
 * pt_regs frame. The main use case is calling C code from assembler
 * when all the registers need to be preserved.
 */

	.macro SAVE_ALL
	pushl_cfi %eax
	CFI_REL_OFFSET eax, 0
	pushl_cfi %ebp
	CFI_REL_OFFSET ebp, 0
	pushl_cfi %edi
	CFI_REL_OFFSET edi, 0
	pushl_cfi %esi
	CFI_REL_OFFSET esi, 0
	pushl_cfi %edx
	CFI_REL_OFFSET edx, 0
	pushl_cfi %ecx
	CFI_REL_OFFSET ecx, 0
	pushl_cfi %ebx
	CFI_REL_OFFSET ebx, 0
	.endm

	.macro RESTORE_ALL
	popl_cfi %ebx
	CFI_RESTORE ebx
	popl_cfi %ecx
	CFI_RESTORE ecx
	popl_cfi %edx
	CFI_RESTORE edx
	popl_cfi %esi
	CFI_RESTORE esi
	popl_cfi %edi
	CFI_RESTORE edi
	popl_cfi %ebp
	CFI_RESTORE ebp
	popl_cfi %eax
	CFI_RESTORE eax
	.endm

#endif /* CONFIG_X86_64 */
+10 −0
Original line number Diff line number Diff line
@@ -95,4 +95,14 @@ static __always_inline bool should_resched(void)
	return unlikely(!__this_cpu_read_4(__preempt_count));
}

#ifdef CONFIG_PREEMPT
  extern asmlinkage void ___preempt_schedule(void);
# define __preempt_schedule() asm ("call ___preempt_schedule")
  extern asmlinkage void preempt_schedule(void);
# ifdef CONFIG_CONTEXT_TRACKING
    extern asmlinkage void ___preempt_schedule_context(void);
#   define __preempt_schedule_context() asm ("call ___preempt_schedule_context")
# endif
#endif

#endif /* __ASM_PREEMPT_H */
+2 −0
Original line number Diff line number Diff line
@@ -36,6 +36,8 @@ obj-y += tsc.o io_delay.o rtc.o
obj-y			+= pci-iommu_table.o
obj-y			+= resource.o

obj-$(CONFIG_PREEMPT)	+= preempt.o

obj-y				+= process.o
obj-y				+= i387.o xsave.o
obj-y				+= ptrace.o
+7 −0
Original line number Diff line number Diff line
@@ -37,3 +37,10 @@ EXPORT_SYMBOL(strstr);

EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(empty_zero_page);

#ifdef CONFIG_PREEMPT
EXPORT_SYMBOL(___preempt_schedule);
#ifdef CONFIG_CONTEXT_TRACKING
EXPORT_SYMBOL(___preempt_schedule_context);
#endif
#endif
+25 −0
Original line number Diff line number Diff line

#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/asm.h>
#include <asm/calling.h>

ENTRY(___preempt_schedule)
	CFI_STARTPROC
	SAVE_ALL
	call preempt_schedule
	RESTORE_ALL
	ret
	CFI_ENDPROC

#ifdef CONFIG_CONTEXT_TRACKING

ENTRY(___preempt_schedule_context)
	CFI_STARTPROC
	SAVE_ALL
	call preempt_schedule_context
	RESTORE_ALL
	ret
	CFI_ENDPROC

#endif
Loading