Loading Makefile +5 −0 Original line number Original line Diff line number Diff line Loading @@ -489,6 +489,11 @@ KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) endif endif RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) export RETPOLINE_CFLAGS ifeq ($(config-targets),1) ifeq ($(config-targets),1) # =========================================================================== # =========================================================================== # *config targets only - make sure prerequisites are updated, and descend # *config targets only - make sure prerequisites are updated, and descend Loading arch/x86/Kconfig +2 −10 Original line number Original line Diff line number Diff line Loading @@ -430,6 +430,7 @@ config GOLDFISH config RETPOLINE config RETPOLINE bool "Avoid speculative indirect branches in kernel" bool "Avoid speculative indirect branches in kernel" default y default y select STACK_VALIDATION if HAVE_STACK_VALIDATION help help Compile kernel with the retpoline compiler options to guard against Compile kernel with the retpoline compiler options to guard against kernel-to-user data leaks by avoiding speculative indirect kernel-to-user data leaks by avoiding speculative indirect Loading Loading @@ -2315,7 +2316,7 @@ choice it can be used to assist security vulnerability exploitation. it can be used to assist security vulnerability exploitation. This setting can be changed at boot time via the kernel command This setting can be changed at boot time via the kernel command line parameter vsyscall=[native|emulate|none]. line parameter vsyscall=[emulate|none]. On a system with recent enough glibc (2.14 or newer) and no On a system with recent enough glibc (2.14 or newer) and no static binaries, you can say None without a performance penalty static binaries, you can say None without a performance penalty Loading @@ -2323,15 +2324,6 @@ choice If unsure, select "Emulate". If unsure, select "Emulate". config LEGACY_VSYSCALL_NATIVE bool "Native" help Actual executable code is located in the fixed vsyscall address mapping, implementing time() efficiently. Since this makes the mapping executable, it can be used during security vulnerability exploitation (traditionally as ROP gadgets). This configuration is not recommended. config LEGACY_VSYSCALL_EMULATE config LEGACY_VSYSCALL_EMULATE bool "Emulate" bool "Emulate" help help Loading arch/x86/Makefile +3 −4 Original line number Original line Diff line number Diff line Loading @@ -232,7 +232,6 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables # Avoid indirect branches in kernel to deal with Spectre # Avoid indirect branches in kernel to deal with Spectre ifdef CONFIG_RETPOLINE ifdef CONFIG_RETPOLINE RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register) ifneq ($(RETPOLINE_CFLAGS),) ifneq ($(RETPOLINE_CFLAGS),) KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE endif endif Loading arch/x86/entry/calling.h +19 −15 Original line number Original line Diff line number Diff line Loading @@ -97,7 +97,7 @@ For 32-bit we have the following conventions - kernel is built with #define SIZEOF_PTREGS 21*8 #define SIZEOF_PTREGS 21*8 .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0 /* /* * Push registers and sanitize registers of values that a * Push registers and sanitize registers of values that a * speculation attack might otherwise want to exploit. The * speculation attack might otherwise want to exploit. The Loading @@ -105,32 +105,41 @@ For 32-bit we have the following conventions - kernel is built with * could be put to use in a speculative execution gadget. * could be put to use in a speculative execution gadget. * Interleave XOR with PUSH for better uop scheduling: * Interleave XOR with PUSH for better uop scheduling: */ */ .if \save_ret pushq %rsi /* pt_regs->si */ movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */ .else pushq %rdi /* pt_regs->di */ pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ pushq %rsi /* pt_regs->si */ .endif pushq \rdx /* pt_regs->dx */ pushq \rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq %rcx /* pt_regs->cx */ pushq \rax /* pt_regs->ax */ pushq \rax /* pt_regs->ax */ pushq %r8 /* pt_regs->r8 */ pushq %r8 /* pt_regs->r8 */ xorq %r8, %r8 /* nospec r8 */ xorl %r8d, %r8d /* nospec r8 */ pushq %r9 /* pt_regs->r9 */ pushq %r9 /* pt_regs->r9 */ xorq %r9, %r9 /* nospec r9 */ xorl %r9d, %r9d /* nospec r9 */ pushq %r10 /* pt_regs->r10 */ pushq %r10 /* pt_regs->r10 */ xorq %r10, %r10 /* nospec r10 */ xorl %r10d, %r10d /* nospec r10 */ pushq %r11 /* pt_regs->r11 */ pushq %r11 /* pt_regs->r11 */ xorq %r11, %r11 /* nospec r11*/ xorl %r11d, %r11d /* nospec r11*/ pushq %rbx /* pt_regs->rbx */ pushq %rbx /* pt_regs->rbx */ xorl %ebx, %ebx /* nospec rbx*/ xorl %ebx, %ebx /* nospec rbx*/ pushq %rbp /* pt_regs->rbp */ pushq %rbp /* pt_regs->rbp */ xorl %ebp, %ebp /* nospec rbp*/ xorl %ebp, %ebp /* nospec rbp*/ pushq %r12 /* pt_regs->r12 */ pushq %r12 /* pt_regs->r12 */ xorq %r12, %r12 /* nospec r12*/ xorl %r12d, %r12d /* nospec r12*/ pushq %r13 /* pt_regs->r13 */ pushq %r13 /* pt_regs->r13 */ xorq %r13, %r13 /* nospec r13*/ xorl %r13d, %r13d /* nospec r13*/ pushq %r14 /* pt_regs->r14 */ pushq %r14 /* pt_regs->r14 */ xorq %r14, %r14 /* nospec r14*/ xorl %r14d, %r14d /* nospec r14*/ pushq %r15 /* pt_regs->r15 */ pushq %r15 /* pt_regs->r15 */ xorq %r15, %r15 /* nospec r15*/ xorl %r15d, %r15d /* nospec r15*/ UNWIND_HINT_REGS UNWIND_HINT_REGS .if \save_ret pushq %rsi /* return address on top of stack */ .endif .endm .endm .macro POP_REGS pop_rdi=1 skip_r11rcx=0 .macro POP_REGS pop_rdi=1 skip_r11rcx=0 Loading Loading @@ -172,12 +181,7 @@ For 32-bit we have the following conventions - kernel is built with */ */ .macro ENCODE_FRAME_POINTER ptregs_offset=0 .macro ENCODE_FRAME_POINTER ptregs_offset=0 #ifdef CONFIG_FRAME_POINTER #ifdef CONFIG_FRAME_POINTER .if \ptregs_offset leaq 1+\ptregs_offset(%rsp), %rbp leaq \ptregs_offset(%rsp), %rbp .else mov %rsp, %rbp .endif orq $0x1, %rbp #endif #endif .endm .endm Loading arch/x86/entry/entry_32.S +1 −2 Original line number Original line Diff line number Diff line Loading @@ -252,8 +252,7 @@ ENTRY(__switch_to_asm) * exist, overwrite the RSB with entries which capture * exist, overwrite the RSB with entries which capture * speculative execution to prevent attack. * speculative execution to prevent attack. */ */ /* Clobbers %ebx */ FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW #endif #endif /* restore callee-saved registers */ /* restore callee-saved registers */ Loading Loading
Makefile +5 −0 Original line number Original line Diff line number Diff line Loading @@ -489,6 +489,11 @@ KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) endif endif RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) export RETPOLINE_CFLAGS ifeq ($(config-targets),1) ifeq ($(config-targets),1) # =========================================================================== # =========================================================================== # *config targets only - make sure prerequisites are updated, and descend # *config targets only - make sure prerequisites are updated, and descend Loading
arch/x86/Kconfig +2 −10 Original line number Original line Diff line number Diff line Loading @@ -430,6 +430,7 @@ config GOLDFISH config RETPOLINE config RETPOLINE bool "Avoid speculative indirect branches in kernel" bool "Avoid speculative indirect branches in kernel" default y default y select STACK_VALIDATION if HAVE_STACK_VALIDATION help help Compile kernel with the retpoline compiler options to guard against Compile kernel with the retpoline compiler options to guard against kernel-to-user data leaks by avoiding speculative indirect kernel-to-user data leaks by avoiding speculative indirect Loading Loading @@ -2315,7 +2316,7 @@ choice it can be used to assist security vulnerability exploitation. it can be used to assist security vulnerability exploitation. This setting can be changed at boot time via the kernel command This setting can be changed at boot time via the kernel command line parameter vsyscall=[native|emulate|none]. line parameter vsyscall=[emulate|none]. On a system with recent enough glibc (2.14 or newer) and no On a system with recent enough glibc (2.14 or newer) and no static binaries, you can say None without a performance penalty static binaries, you can say None without a performance penalty Loading @@ -2323,15 +2324,6 @@ choice If unsure, select "Emulate". If unsure, select "Emulate". config LEGACY_VSYSCALL_NATIVE bool "Native" help Actual executable code is located in the fixed vsyscall address mapping, implementing time() efficiently. Since this makes the mapping executable, it can be used during security vulnerability exploitation (traditionally as ROP gadgets). This configuration is not recommended. config LEGACY_VSYSCALL_EMULATE config LEGACY_VSYSCALL_EMULATE bool "Emulate" bool "Emulate" help help Loading
arch/x86/Makefile +3 −4 Original line number Original line Diff line number Diff line Loading @@ -232,7 +232,6 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables # Avoid indirect branches in kernel to deal with Spectre # Avoid indirect branches in kernel to deal with Spectre ifdef CONFIG_RETPOLINE ifdef CONFIG_RETPOLINE RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register) ifneq ($(RETPOLINE_CFLAGS),) ifneq ($(RETPOLINE_CFLAGS),) KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE endif endif Loading
arch/x86/entry/calling.h +19 −15 Original line number Original line Diff line number Diff line Loading @@ -97,7 +97,7 @@ For 32-bit we have the following conventions - kernel is built with #define SIZEOF_PTREGS 21*8 #define SIZEOF_PTREGS 21*8 .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0 /* /* * Push registers and sanitize registers of values that a * Push registers and sanitize registers of values that a * speculation attack might otherwise want to exploit. The * speculation attack might otherwise want to exploit. The Loading @@ -105,32 +105,41 @@ For 32-bit we have the following conventions - kernel is built with * could be put to use in a speculative execution gadget. * could be put to use in a speculative execution gadget. * Interleave XOR with PUSH for better uop scheduling: * Interleave XOR with PUSH for better uop scheduling: */ */ .if \save_ret pushq %rsi /* pt_regs->si */ movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */ .else pushq %rdi /* pt_regs->di */ pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ pushq %rsi /* pt_regs->si */ .endif pushq \rdx /* pt_regs->dx */ pushq \rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq %rcx /* pt_regs->cx */ pushq \rax /* pt_regs->ax */ pushq \rax /* pt_regs->ax */ pushq %r8 /* pt_regs->r8 */ pushq %r8 /* pt_regs->r8 */ xorq %r8, %r8 /* nospec r8 */ xorl %r8d, %r8d /* nospec r8 */ pushq %r9 /* pt_regs->r9 */ pushq %r9 /* pt_regs->r9 */ xorq %r9, %r9 /* nospec r9 */ xorl %r9d, %r9d /* nospec r9 */ pushq %r10 /* pt_regs->r10 */ pushq %r10 /* pt_regs->r10 */ xorq %r10, %r10 /* nospec r10 */ xorl %r10d, %r10d /* nospec r10 */ pushq %r11 /* pt_regs->r11 */ pushq %r11 /* pt_regs->r11 */ xorq %r11, %r11 /* nospec r11*/ xorl %r11d, %r11d /* nospec r11*/ pushq %rbx /* pt_regs->rbx */ pushq %rbx /* pt_regs->rbx */ xorl %ebx, %ebx /* nospec rbx*/ xorl %ebx, %ebx /* nospec rbx*/ pushq %rbp /* pt_regs->rbp */ pushq %rbp /* pt_regs->rbp */ xorl %ebp, %ebp /* nospec rbp*/ xorl %ebp, %ebp /* nospec rbp*/ pushq %r12 /* pt_regs->r12 */ pushq %r12 /* pt_regs->r12 */ xorq %r12, %r12 /* nospec r12*/ xorl %r12d, %r12d /* nospec r12*/ pushq %r13 /* pt_regs->r13 */ pushq %r13 /* pt_regs->r13 */ xorq %r13, %r13 /* nospec r13*/ xorl %r13d, %r13d /* nospec r13*/ pushq %r14 /* pt_regs->r14 */ pushq %r14 /* pt_regs->r14 */ xorq %r14, %r14 /* nospec r14*/ xorl %r14d, %r14d /* nospec r14*/ pushq %r15 /* pt_regs->r15 */ pushq %r15 /* pt_regs->r15 */ xorq %r15, %r15 /* nospec r15*/ xorl %r15d, %r15d /* nospec r15*/ UNWIND_HINT_REGS UNWIND_HINT_REGS .if \save_ret pushq %rsi /* return address on top of stack */ .endif .endm .endm .macro POP_REGS pop_rdi=1 skip_r11rcx=0 .macro POP_REGS pop_rdi=1 skip_r11rcx=0 Loading Loading @@ -172,12 +181,7 @@ For 32-bit we have the following conventions - kernel is built with */ */ .macro ENCODE_FRAME_POINTER ptregs_offset=0 .macro ENCODE_FRAME_POINTER ptregs_offset=0 #ifdef CONFIG_FRAME_POINTER #ifdef CONFIG_FRAME_POINTER .if \ptregs_offset leaq 1+\ptregs_offset(%rsp), %rbp leaq \ptregs_offset(%rsp), %rbp .else mov %rsp, %rbp .endif orq $0x1, %rbp #endif #endif .endm .endm Loading
arch/x86/entry/entry_32.S +1 −2 Original line number Original line Diff line number Diff line Loading @@ -252,8 +252,7 @@ ENTRY(__switch_to_asm) * exist, overwrite the RSB with entries which capture * exist, overwrite the RSB with entries which capture * speculative execution to prevent attack. * speculative execution to prevent attack. */ */ /* Clobbers %ebx */ FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW #endif #endif /* restore callee-saved registers */ /* restore callee-saved registers */ Loading