Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b5be5b7f authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'x86/asm/urgent' to pick up an entry code fix



Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 527f0a91 512255a2
Loading
Loading
Loading
Loading
+4 −8
Original line number Diff line number Diff line
@@ -79,12 +79,12 @@ do { \
#else /* CONFIG_X86_32 */

/* frame pointer must be last for get_wchan */
#define SAVE_CONTEXT    "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
#define SAVE_CONTEXT    "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"

#define __EXTRA_CLOBBER  \
	, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
	  "r12", "r13", "r14", "r15", "flags"
	  "r12", "r13", "r14", "r15"

#ifdef CONFIG_CC_STACKPROTECTOR
#define __switch_canary							  \
@@ -100,11 +100,7 @@ do { \
#define __switch_canary_iparam
#endif	/* CC_STACKPROTECTOR */

/*
 * There is no need to save or restore flags, because flags are always
 * clean in kernel mode, with the possible exception of IOPL.  Kernel IOPL
 * has no effect.
 */
/* Save restore flags to clear handle leaking NT */
#define switch_to(prev, next, last) \
	asm volatile(SAVE_CONTEXT					  \
	     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \