Loading arch/x86/include/asm/irq_vectors.h +1 −1 Original line number Diff line number Diff line Loading @@ -128,7 +128,7 @@ #ifndef __ASSEMBLY__ static inline int invalid_vm86_irq(int irq) { return irq < 3 || irq > 15; return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ; } #endif Loading arch/x86/include/asm/uaccess_64.h +14 −2 Original line number Diff line number Diff line Loading @@ -192,14 +192,26 @@ static inline int __copy_from_user_nocache(void *dst, const void __user *src, unsigned size) { might_sleep(); /* * In practice this limit means that large file write()s * which get chunked to 4K copies get handled via * non-temporal stores here. Smaller writes get handled * via regular __copy_from_user(): */ if (likely(size >= PAGE_SIZE)) return __copy_user_nocache(dst, src, size, 1); else return __copy_from_user(dst, src, size); } static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size) { if (likely(size >= PAGE_SIZE)) return __copy_user_nocache(dst, src, size, 0); else return __copy_from_user_inatomic(dst, src, size); } unsigned long Loading arch/x86/kernel/efi_stub_32.S +1 −0 Original line number Diff line number Diff line Loading @@ -113,6 +113,7 @@ ENTRY(efi_call_phys) movl (%edx), %ecx pushl %ecx ret ENDPROC(efi_call_phys) .previous .data Loading arch/x86/kernel/efi_stub_64.S +7 −0 Original line number Diff line number Diff line Loading @@ -41,6 +41,7 @@ ENTRY(efi_call0) addq $32, %rsp RESTORE_XMM ret ENDPROC(efi_call0) ENTRY(efi_call1) SAVE_XMM Loading @@ -50,6 +51,7 @@ ENTRY(efi_call1) addq $32, %rsp RESTORE_XMM ret ENDPROC(efi_call1) ENTRY(efi_call2) SAVE_XMM Loading @@ -59,6 +61,7 @@ ENTRY(efi_call2) addq $32, %rsp RESTORE_XMM ret ENDPROC(efi_call2) ENTRY(efi_call3) SAVE_XMM Loading @@ -69,6 +72,7 @@ ENTRY(efi_call3) addq $32, %rsp RESTORE_XMM ret ENDPROC(efi_call3) ENTRY(efi_call4) SAVE_XMM Loading @@ -80,6 +84,7 @@ ENTRY(efi_call4) addq $32, %rsp RESTORE_XMM ret ENDPROC(efi_call4) ENTRY(efi_call5) SAVE_XMM Loading @@ -92,6 +97,7 @@ ENTRY(efi_call5) addq $48, %rsp RESTORE_XMM ret ENDPROC(efi_call5) ENTRY(efi_call6) SAVE_XMM Loading @@ -107,3 +113,4 @@ ENTRY(efi_call6) addq $48, %rsp RESTORE_XMM ret ENDPROC(efi_call6) arch/x86/kernel/entry_64.S +8 −15 Original line number Diff line number Diff line Loading @@ -77,20 +77,17 @@ ENTRY(ftrace_caller) movq 8(%rbp), %rsi subq $MCOUNT_INSN_SIZE, %rdi .globl ftrace_call ftrace_call: GLOBAL(ftrace_call) call ftrace_stub MCOUNT_RESTORE_FRAME #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: GLOBAL(ftrace_graph_call) jmp ftrace_stub #endif .globl ftrace_stub ftrace_stub: GLOBAL(ftrace_stub) retq END(ftrace_caller) Loading @@ -110,8 +107,7 @@ ENTRY(mcount) jnz ftrace_graph_caller #endif .globl ftrace_stub ftrace_stub: GLOBAL(ftrace_stub) retq trace: Loading Loading @@ -148,9 +144,7 @@ ENTRY(ftrace_graph_caller) retq END(ftrace_graph_caller) .globl return_to_handler return_to_handler: GLOBAL(return_to_handler) subq $80, %rsp movq %rax, (%rsp) Loading Loading @@ -188,6 +182,7 @@ return_to_handler: ENTRY(native_usergs_sysret64) swapgs sysretq ENDPROC(native_usergs_sysret64) #endif /* CONFIG_PARAVIRT */ Loading Loading @@ -633,16 +628,14 @@ tracesys: * Syscall return path ending with IRET. * Has correct top of stack, but partial stack frame. */ .globl int_ret_from_sys_call .globl int_with_check int_ret_from_sys_call: GLOBAL(int_ret_from_sys_call) DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF testl $3,CS-ARGOFFSET(%rsp) je retint_restore_args movl $_TIF_ALLWORK_MASK,%edi /* edi: mask to check */ int_with_check: GLOBAL(int_with_check) LOCKDEP_SYS_EXIT_IRQ GET_THREAD_INFO(%rcx) movl TI_flags(%rcx),%edx Loading Loading
arch/x86/include/asm/irq_vectors.h +1 −1 Original line number Diff line number Diff line Loading @@ -128,7 +128,7 @@ #ifndef __ASSEMBLY__ static inline int invalid_vm86_irq(int irq) { return irq < 3 || irq > 15; return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ; } #endif Loading
arch/x86/include/asm/uaccess_64.h +14 −2 Original line number Diff line number Diff line Loading @@ -192,14 +192,26 @@ static inline int __copy_from_user_nocache(void *dst, const void __user *src, unsigned size) { might_sleep(); /* * In practice this limit means that large file write()s * which get chunked to 4K copies get handled via * non-temporal stores here. Smaller writes get handled * via regular __copy_from_user(): */ if (likely(size >= PAGE_SIZE)) return __copy_user_nocache(dst, src, size, 1); else return __copy_from_user(dst, src, size); } static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size) { if (likely(size >= PAGE_SIZE)) return __copy_user_nocache(dst, src, size, 0); else return __copy_from_user_inatomic(dst, src, size); } unsigned long Loading
arch/x86/kernel/efi_stub_32.S +1 −0 Original line number Diff line number Diff line Loading @@ -113,6 +113,7 @@ ENTRY(efi_call_phys) movl (%edx), %ecx pushl %ecx ret ENDPROC(efi_call_phys) .previous .data Loading
arch/x86/kernel/efi_stub_64.S +7 −0 Original line number Diff line number Diff line Loading @@ -41,6 +41,7 @@ ENTRY(efi_call0) addq $32, %rsp RESTORE_XMM ret ENDPROC(efi_call0) ENTRY(efi_call1) SAVE_XMM Loading @@ -50,6 +51,7 @@ ENTRY(efi_call1) addq $32, %rsp RESTORE_XMM ret ENDPROC(efi_call1) ENTRY(efi_call2) SAVE_XMM Loading @@ -59,6 +61,7 @@ ENTRY(efi_call2) addq $32, %rsp RESTORE_XMM ret ENDPROC(efi_call2) ENTRY(efi_call3) SAVE_XMM Loading @@ -69,6 +72,7 @@ ENTRY(efi_call3) addq $32, %rsp RESTORE_XMM ret ENDPROC(efi_call3) ENTRY(efi_call4) SAVE_XMM Loading @@ -80,6 +84,7 @@ ENTRY(efi_call4) addq $32, %rsp RESTORE_XMM ret ENDPROC(efi_call4) ENTRY(efi_call5) SAVE_XMM Loading @@ -92,6 +97,7 @@ ENTRY(efi_call5) addq $48, %rsp RESTORE_XMM ret ENDPROC(efi_call5) ENTRY(efi_call6) SAVE_XMM Loading @@ -107,3 +113,4 @@ ENTRY(efi_call6) addq $48, %rsp RESTORE_XMM ret ENDPROC(efi_call6)
arch/x86/kernel/entry_64.S +8 −15 Original line number Diff line number Diff line Loading @@ -77,20 +77,17 @@ ENTRY(ftrace_caller) movq 8(%rbp), %rsi subq $MCOUNT_INSN_SIZE, %rdi .globl ftrace_call ftrace_call: GLOBAL(ftrace_call) call ftrace_stub MCOUNT_RESTORE_FRAME #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: GLOBAL(ftrace_graph_call) jmp ftrace_stub #endif .globl ftrace_stub ftrace_stub: GLOBAL(ftrace_stub) retq END(ftrace_caller) Loading @@ -110,8 +107,7 @@ ENTRY(mcount) jnz ftrace_graph_caller #endif .globl ftrace_stub ftrace_stub: GLOBAL(ftrace_stub) retq trace: Loading Loading @@ -148,9 +144,7 @@ ENTRY(ftrace_graph_caller) retq END(ftrace_graph_caller) .globl return_to_handler return_to_handler: GLOBAL(return_to_handler) subq $80, %rsp movq %rax, (%rsp) Loading Loading @@ -188,6 +182,7 @@ return_to_handler: ENTRY(native_usergs_sysret64) swapgs sysretq ENDPROC(native_usergs_sysret64) #endif /* CONFIG_PARAVIRT */ Loading Loading @@ -633,16 +628,14 @@ tracesys: * Syscall return path ending with IRET. * Has correct top of stack, but partial stack frame. */ .globl int_ret_from_sys_call .globl int_with_check int_ret_from_sys_call: GLOBAL(int_ret_from_sys_call) DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF testl $3,CS-ARGOFFSET(%rsp) je retint_restore_args movl $_TIF_ALLWORK_MASK,%edi /* edi: mask to check */ int_with_check: GLOBAL(int_with_check) LOCKDEP_SYS_EXIT_IRQ GET_THREAD_INFO(%rcx) movl TI_flags(%rcx),%edx Loading