Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1871853f authored by Frederic Weisbecker's avatar Frederic Weisbecker
Browse files

x86,64: Simplify save_regs()



The save_regs function that saves the regs on low level
irq entry is complicated because of the fact it changes
its stack in the middle and also because it manipulates
data allocated in the caller frame and accesses there
are directly calculated from callee rsp value with the
return address in the middle of the way.

This complicates the static stack offsets calculation and
require more dynamic ones. It also needs a save/restore
of the function's return address.

To simplify and optimize this, turn save_regs() into a
macro.

Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jan Beulich <JBeulich@novell.com>
parent 47ce11a2
Loading
Loading
Loading
Loading
+17 −27
Original line number Diff line number Diff line
@@ -297,27 +297,22 @@ ENDPROC(native_usergs_sysret64)
	.endm

/* save partial stack frame */
	.pushsection .kprobes.text, "ax"
ENTRY(save_args)
	XCPT_FRAME
	.macro SAVE_ARGS_IRQ
	cld
	/*
	 * start from rbp in pt_regs and jump over
	 * return address.
	 */
	movq_cfi rdi, RDI+8-RBP
	movq_cfi rsi, RSI+8-RBP
	movq_cfi rdx, RDX+8-RBP
	movq_cfi rcx, RCX+8-RBP
	movq_cfi rax, RAX+8-RBP
	movq_cfi  r8,  R8+8-RBP
	movq_cfi  r9,  R9+8-RBP
	movq_cfi r10, R10+8-RBP
	movq_cfi r11, R11+8-RBP

	leaq -RBP+8(%rsp),%rdi	/* arg1 for handler */
	movq_cfi rbp, 8		/* push %rbp */
	leaq 8(%rsp), %rbp		/* mov %rsp, %ebp */
	/* start from rbp in pt_regs and jump over */
	movq_cfi rdi, RDI-RBP
	movq_cfi rsi, RSI-RBP
	movq_cfi rdx, RDX-RBP
	movq_cfi rcx, RCX-RBP
	movq_cfi rax, RAX-RBP
	movq_cfi  r8,  R8-RBP
	movq_cfi  r9,  R9-RBP
	movq_cfi r10, R10-RBP
	movq_cfi r11, R11-RBP

	leaq -RBP(%rsp),%rdi	/* arg1 for handler */
	movq_cfi rbp, 0		/* push %rbp */
	movq %rsp, %rbp
	testl $3, CS(%rdi)
	je 1f
	SWAPGS
@@ -329,19 +324,14 @@ ENTRY(save_args)
	 */
1:	incl PER_CPU_VAR(irq_count)
	jne 2f
	popq_cfi %rax			/* move return address... */
	mov PER_CPU_VAR(irq_stack_ptr),%rsp
	EMPTY_FRAME 0
	pushq_cfi %rbp			/* backlink for unwinder */
	pushq_cfi %rax			/* ... to the new stack */
	/*
	 * We entered an interrupt context - irqs are off:
	 */
2:	TRACE_IRQS_OFF
	ret
	CFI_ENDPROC
END(save_args)
	.popsection
	.endm

ENTRY(save_rest)
	PARTIAL_FRAME 1 REST_SKIP+8
@@ -791,7 +781,7 @@ END(interrupt)
	/* reserve pt_regs for scratch regs and rbp */
	subq $ORIG_RAX-RBP, %rsp
	CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
	call save_args
	SAVE_ARGS_IRQ
	PARTIAL_FRAME 0
	call \func
	.endm