Loading arch/x86/include/asm/uaccess.h +6 −6 Original line number Diff line number Diff line Loading @@ -34,8 +34,7 @@ #define user_addr_max() (current_thread_info()->addr_limit.seg) #define __addr_ok(addr) \ ((unsigned long __force)(addr) < \ (current_thread_info()->addr_limit.seg)) ((unsigned long __force)(addr) < user_addr_max()) /* * Test whether a block of memory is a valid user space address. Loading @@ -47,14 +46,14 @@ * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... */ #define __range_not_ok(addr, size) \ #define __range_not_ok(addr, size, limit) \ ({ \ unsigned long flag, roksum; \ __chk_user_ptr(addr); \ asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ : "=&r" (flag), "=r" (roksum) \ : "1" (addr), "g" ((long)(size)), \ "rm" (current_thread_info()->addr_limit.seg)); \ "rm" (limit)); \ flag; \ }) Loading @@ -77,7 +76,8 @@ * checks that the pointer is in the user space range - after calling * this function, memory access functions may still return -EFAULT. */ #define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) #define access_ok(type, addr, size) \ (likely(__range_not_ok(addr, size, user_addr_max()) == 0)) /* * The exception table consists of pairs of addresses relative to the Loading arch/x86/kernel/cpu/perf_event.c +8 −2 Original line number Diff line number Diff line Loading @@ -1757,6 +1757,12 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); } static inline int valid_user_frame(const void __user *fp, unsigned long size) { return (__range_not_ok(fp, size, TASK_SIZE) == 0); } #ifdef CONFIG_COMPAT #include <asm/compat.h> Loading @@ -1781,7 +1787,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) if (bytes != sizeof(frame)) break; if (fp < compat_ptr(regs->sp)) if (!valid_user_frame(fp, sizeof(frame))) break; perf_callchain_store(entry, frame.return_address); Loading Loading @@ -1827,7 +1833,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) if (bytes != sizeof(frame)) break; if ((unsigned long)fp < regs->sp) if (!valid_user_frame(fp, sizeof(frame))) break; perf_callchain_store(entry, frame.return_address); Loading arch/x86/lib/usercopy.c +4 −0 Original line number Diff line number Diff line Loading @@ -8,6 +8,7 @@ #include <linux/module.h> #include <asm/word-at-a-time.h> #include <linux/sched.h> /* * best effort, GUP based copy_from_user() that is NMI-safe Loading @@ -21,6 +22,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) void *map; int ret; if (__range_not_ok(from, n, TASK_SIZE) == 0) return len; do { ret = __get_user_pages_fast(addr, 1, 0, &page); if (!ret) Loading include/linux/perf_event.h +1 −1 Original line number Diff line number Diff line Loading @@ -555,7 +555,7 @@ enum perf_event_type { PERF_RECORD_MAX, /* non-ABI */ }; #define PERF_MAX_STACK_DEPTH 255 #define PERF_MAX_STACK_DEPTH 127 enum perf_callchain_context { PERF_CONTEXT_HV = (__u64)-32, Loading Loading
arch/x86/include/asm/uaccess.h +6 −6 Original line number Diff line number Diff line Loading @@ -34,8 +34,7 @@ #define user_addr_max() (current_thread_info()->addr_limit.seg) #define __addr_ok(addr) \ ((unsigned long __force)(addr) < \ (current_thread_info()->addr_limit.seg)) ((unsigned long __force)(addr) < user_addr_max()) /* * Test whether a block of memory is a valid user space address. Loading @@ -47,14 +46,14 @@ * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... */ #define __range_not_ok(addr, size) \ #define __range_not_ok(addr, size, limit) \ ({ \ unsigned long flag, roksum; \ __chk_user_ptr(addr); \ asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ : "=&r" (flag), "=r" (roksum) \ : "1" (addr), "g" ((long)(size)), \ "rm" (current_thread_info()->addr_limit.seg)); \ "rm" (limit)); \ flag; \ }) Loading @@ -77,7 +76,8 @@ * checks that the pointer is in the user space range - after calling * this function, memory access functions may still return -EFAULT. */ #define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) #define access_ok(type, addr, size) \ (likely(__range_not_ok(addr, size, user_addr_max()) == 0)) /* * The exception table consists of pairs of addresses relative to the Loading
arch/x86/kernel/cpu/perf_event.c +8 −2 Original line number Diff line number Diff line Loading @@ -1757,6 +1757,12 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); } static inline int valid_user_frame(const void __user *fp, unsigned long size) { return (__range_not_ok(fp, size, TASK_SIZE) == 0); } #ifdef CONFIG_COMPAT #include <asm/compat.h> Loading @@ -1781,7 +1787,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) if (bytes != sizeof(frame)) break; if (fp < compat_ptr(regs->sp)) if (!valid_user_frame(fp, sizeof(frame))) break; perf_callchain_store(entry, frame.return_address); Loading Loading @@ -1827,7 +1833,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) if (bytes != sizeof(frame)) break; if ((unsigned long)fp < regs->sp) if (!valid_user_frame(fp, sizeof(frame))) break; perf_callchain_store(entry, frame.return_address); Loading
arch/x86/lib/usercopy.c +4 −0 Original line number Diff line number Diff line Loading @@ -8,6 +8,7 @@ #include <linux/module.h> #include <asm/word-at-a-time.h> #include <linux/sched.h> /* * best effort, GUP based copy_from_user() that is NMI-safe Loading @@ -21,6 +22,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) void *map; int ret; if (__range_not_ok(from, n, TASK_SIZE) == 0) return len; do { ret = __get_user_pages_fast(addr, 1, 0, &page); if (!ret) Loading
include/linux/perf_event.h +1 −1 Original line number Diff line number Diff line Loading @@ -555,7 +555,7 @@ enum perf_event_type { PERF_RECORD_MAX, /* non-ABI */ }; #define PERF_MAX_STACK_DEPTH 255 #define PERF_MAX_STACK_DEPTH 127 enum perf_callchain_context { PERF_CONTEXT_HV = (__u64)-32, Loading