Loading mm/usercopy.c +6 −0 Original line number Diff line number Diff line Loading @@ -167,6 +167,8 @@ static inline void check_page_span(const void *ptr, unsigned long n, const void *end = ptr + n - 1; struct page *endpage; bool is_reserved, is_cma; const void * const stack = task_stack_page(current); const void * const stackend = stack + THREAD_SIZE; /* * Sometimes the kernel data regions are not marked Reserved (see Loading @@ -191,6 +193,10 @@ static inline void check_page_span(const void *ptr, unsigned long n, end <= (const void *)__bss_stop) return; /* Allow stack region to span multiple pages */ if (ptr >= stack && end <= stackend) return; /* Is the object wholly within one base page? */ if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) == ((unsigned long)end & (unsigned long)PAGE_MASK))) Loading Loading
mm/usercopy.c +6 −0 Original line number Diff line number Diff line Loading @@ -167,6 +167,8 @@ static inline void check_page_span(const void *ptr, unsigned long n, const void *end = ptr + n - 1; struct page *endpage; bool is_reserved, is_cma; const void * const stack = task_stack_page(current); const void * const stackend = stack + THREAD_SIZE; /* * Sometimes the kernel data regions are not marked Reserved (see Loading @@ -191,6 +193,10 @@ static inline void check_page_span(const void *ptr, unsigned long n, end <= (const void *)__bss_stop) return; /* Allow stack region to span multiple pages */ if (ptr >= stack && end <= stackend) return; /* Is the object wholly within one base page? */ if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) == ((unsigned long)end & (unsigned long)PAGE_MASK))) Loading