Loading arch/arm/include/asm/cacheflush.h +1 −2 Original line number Diff line number Diff line Loading @@ -268,8 +268,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr * Harvard caches are synchronised for the user space address range. * This is used for the ARM private sys_cacheflush system call. */ #define flush_cache_user_range(start,end) \ __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) #define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e) /* * Perform necessary cache operations to ensure that data previously Loading arch/arm/include/asm/thread_info.h +11 −0 Original line number Diff line number Diff line Loading @@ -43,6 +43,16 @@ struct cpu_context_save { __u32 extra[2]; /* Xscale 'acc' register, etc */ }; struct arm_restart_block { union { /* For user cache flushing */ struct { unsigned long start; unsigned long end; } cache; }; }; /* * low level task data that entry.S needs immediate access to. * __switch_to() assumes cpu_context follows immediately after cpu_domain. Loading @@ -68,6 +78,7 @@ struct thread_info { unsigned long thumbee_state; /* ThumbEE Handler Base register */ #endif struct restart_block restart_block; struct arm_restart_block arm_restart_block; }; #define INIT_THREAD_INFO(tsk) \ Loading arch/arm/kernel/entry-common.S +2 −2 Original line number Diff line number Diff line Loading @@ -442,10 +442,10 @@ local_restart: ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine add r1, sp, #S_OFF 2: mov why, #0 @ no longer a real syscall cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back bcs arm_syscall 2: mov why, #0 @ no longer a real syscall b sys_ni_syscall @ not private func #if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) Loading arch/arm/kernel/traps.c +51 −15 Original line number Diff line number Diff line Loading @@ -497,28 +497,64 @@ static int bad_syscall(int n, struct pt_regs *regs) return regs->ARM_r0; } static long do_cache_op_restart(struct restart_block *); static inline int do_cache_op(unsigned long start, unsigned long end, int flags) __do_cache_op(unsigned long start, unsigned long end) { struct mm_struct *mm = current->active_mm; struct vm_area_struct *vma; int ret; unsigned long chunk = PAGE_SIZE; if (end < start || flags) return -EINVAL; do { if (signal_pending(current)) { struct thread_info *ti = current_thread_info(); down_read(&mm->mmap_sem); vma = find_vma(mm, start); if (vma && vma->vm_start < end) { if (start < vma->vm_start) start = vma->vm_start; if (end > vma->vm_end) end = vma->vm_end; ti->restart_block = (struct restart_block) { .fn = do_cache_op_restart, }; up_read(&mm->mmap_sem); return flush_cache_user_range(start, end); ti->arm_restart_block = (struct arm_restart_block) { { .cache = { .start = start, .end = end, }, }, }; return -ERESTART_RESTARTBLOCK; } up_read(&mm->mmap_sem); ret = flush_cache_user_range(start, start + chunk); if (ret) return ret; cond_resched(); start += chunk; } while (start < end); return 0; } static long do_cache_op_restart(struct restart_block *unused) { struct arm_restart_block *restart_block; restart_block = ¤t_thread_info()->arm_restart_block; return __do_cache_op(restart_block->cache.start, restart_block->cache.end); } static inline int do_cache_op(unsigned long start, unsigned long end, int flags) { if (end < start || flags) return -EINVAL; if (!access_ok(VERIFY_READ, start, end - start)) return -EFAULT; return __do_cache_op(start, end); } /* Loading Loading
arch/arm/include/asm/cacheflush.h +1 −2 Original line number Diff line number Diff line Loading @@ -268,8 +268,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr * Harvard caches are synchronised for the user space address range. * This is used for the ARM private sys_cacheflush system call. */ #define flush_cache_user_range(start,end) \ __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) #define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e) /* * Perform necessary cache operations to ensure that data previously Loading
arch/arm/include/asm/thread_info.h +11 −0 Original line number Diff line number Diff line Loading @@ -43,6 +43,16 @@ struct cpu_context_save { __u32 extra[2]; /* Xscale 'acc' register, etc */ }; struct arm_restart_block { union { /* For user cache flushing */ struct { unsigned long start; unsigned long end; } cache; }; }; /* * low level task data that entry.S needs immediate access to. * __switch_to() assumes cpu_context follows immediately after cpu_domain. Loading @@ -68,6 +78,7 @@ struct thread_info { unsigned long thumbee_state; /* ThumbEE Handler Base register */ #endif struct restart_block restart_block; struct arm_restart_block arm_restart_block; }; #define INIT_THREAD_INFO(tsk) \ Loading
arch/arm/kernel/entry-common.S +2 −2 Original line number Diff line number Diff line Loading @@ -442,10 +442,10 @@ local_restart: ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine add r1, sp, #S_OFF 2: mov why, #0 @ no longer a real syscall cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back bcs arm_syscall 2: mov why, #0 @ no longer a real syscall b sys_ni_syscall @ not private func #if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) Loading
arch/arm/kernel/traps.c +51 −15 Original line number Diff line number Diff line Loading @@ -497,28 +497,64 @@ static int bad_syscall(int n, struct pt_regs *regs) return regs->ARM_r0; } static long do_cache_op_restart(struct restart_block *); static inline int do_cache_op(unsigned long start, unsigned long end, int flags) __do_cache_op(unsigned long start, unsigned long end) { struct mm_struct *mm = current->active_mm; struct vm_area_struct *vma; int ret; unsigned long chunk = PAGE_SIZE; if (end < start || flags) return -EINVAL; do { if (signal_pending(current)) { struct thread_info *ti = current_thread_info(); down_read(&mm->mmap_sem); vma = find_vma(mm, start); if (vma && vma->vm_start < end) { if (start < vma->vm_start) start = vma->vm_start; if (end > vma->vm_end) end = vma->vm_end; ti->restart_block = (struct restart_block) { .fn = do_cache_op_restart, }; up_read(&mm->mmap_sem); return flush_cache_user_range(start, end); ti->arm_restart_block = (struct arm_restart_block) { { .cache = { .start = start, .end = end, }, }, }; return -ERESTART_RESTARTBLOCK; } up_read(&mm->mmap_sem); ret = flush_cache_user_range(start, start + chunk); if (ret) return ret; cond_resched(); start += chunk; } while (start < end); return 0; } static long do_cache_op_restart(struct restart_block *unused) { struct arm_restart_block *restart_block; restart_block = ¤t_thread_info()->arm_restart_block; return __do_cache_op(restart_block->cache.start, restart_block->cache.end); } static inline int do_cache_op(unsigned long start, unsigned long end, int flags) { if (end < start || flags) return -EINVAL; if (!access_ok(VERIFY_READ, start, end - start)) return -EFAULT; return __do_cache_op(start, end); } /* Loading