Loading arch/arm/include/asm/cacheflush.h +2 −22 Original line number Diff line number Diff line Loading @@ -316,12 +316,8 @@ static inline void outer_flush_range(unsigned long start, unsigned long end) * processes address space. Really, we want to allow our "user * space" model to handle this. */ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ flush_ptrace_access(vma, page, vaddr, dst, len, 1);\ } while (0) extern void copy_to_user_page(struct vm_area_struct *, struct page *, unsigned long, void *, const void *, unsigned long); #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ Loading Loading @@ -355,17 +351,6 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig } } static inline void vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *kaddr, unsigned long len, int write) { if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { unsigned long addr = (unsigned long)kaddr; __cpuc_coherent_kern_range(addr, addr + len); } } #ifndef CONFIG_CPU_CACHE_VIPT #define flush_cache_mm(mm) \ vivt_flush_cache_mm(mm) Loading @@ -373,15 +358,10 @@ vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page, vivt_flush_cache_range(vma,start,end) #define flush_cache_page(vma,addr,pfn) \ vivt_flush_cache_page(vma,addr,pfn) #define flush_ptrace_access(vma,page,ua,ka,len,write) \ vivt_flush_ptrace_access(vma,page,ua,ka,len,write) #else extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *kaddr, unsigned long len, int write); #endif #define flush_cache_dup_mm(mm) flush_cache_mm(mm) Loading arch/arm/include/asm/smp_plat.h +5 −0 Original line number Diff line number Diff line Loading @@ -13,4 +13,9 @@ static inline int tlb_ops_need_broadcast(void) return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; } static inline int cache_ops_need_broadcast(void) { return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1; } #endif arch/arm/mm/flush.c +43 −8 Original line number Diff line number Diff line Loading @@ -13,6 +13,7 @@ #include <asm/cacheflush.h> #include <asm/cachetype.h> #include <asm/smp_plat.h> #include <asm/system.h> #include <asm/tlbflush.h> Loading Loading @@ -87,13 +88,26 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) __flush_icache_all(); } #else #define flush_pfn_alias(pfn,vaddr) do { } while (0) #endif #ifdef CONFIG_SMP static void flush_ptrace_access_other(void *args) { __flush_icache_all(); } #endif static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *kaddr, unsigned long len, int write) unsigned long uaddr, void *kaddr, unsigned long len) { if (cache_is_vivt()) { vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write); if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { unsigned long addr = (unsigned long)kaddr; __cpuc_coherent_kern_range(addr, addr + len); } return; } Loading @@ -104,16 +118,37 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, } /* VIPT non-aliasing cache */ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) && vma->vm_flags & VM_EXEC) { if (vma->vm_flags & VM_EXEC) { unsigned long addr = (unsigned long)kaddr; /* only flushing the kernel mapping on non-aliasing VIPT */ __cpuc_coherent_kern_range(addr, addr + len); #ifdef CONFIG_SMP if (cache_ops_need_broadcast()) smp_call_function(flush_ptrace_access_other, NULL, 1); #endif } } #else #define flush_pfn_alias(pfn,vaddr) do { } while (0) /* * Copy user data from/to a page which is mapped into a different * processes address space. Really, we want to allow our "user * space" model to handle this. * * Note that this code needs to run on the current CPU. */ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *dst, const void *src, unsigned long len) { #ifdef CONFIG_SMP preempt_disable(); #endif memcpy(dst, src, len); flush_ptrace_access(vma, page, uaddr, dst, len); #ifdef CONFIG_SMP preempt_enable(); #endif } void __flush_dcache_page(struct address_space *mapping, struct page *page) { Loading Loading
arch/arm/include/asm/cacheflush.h +2 −22 Original line number Diff line number Diff line Loading @@ -316,12 +316,8 @@ static inline void outer_flush_range(unsigned long start, unsigned long end) * processes address space. Really, we want to allow our "user * space" model to handle this. */ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ flush_ptrace_access(vma, page, vaddr, dst, len, 1);\ } while (0) extern void copy_to_user_page(struct vm_area_struct *, struct page *, unsigned long, void *, const void *, unsigned long); #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ Loading Loading @@ -355,17 +351,6 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig } } static inline void vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *kaddr, unsigned long len, int write) { if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { unsigned long addr = (unsigned long)kaddr; __cpuc_coherent_kern_range(addr, addr + len); } } #ifndef CONFIG_CPU_CACHE_VIPT #define flush_cache_mm(mm) \ vivt_flush_cache_mm(mm) Loading @@ -373,15 +358,10 @@ vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page, vivt_flush_cache_range(vma,start,end) #define flush_cache_page(vma,addr,pfn) \ vivt_flush_cache_page(vma,addr,pfn) #define flush_ptrace_access(vma,page,ua,ka,len,write) \ vivt_flush_ptrace_access(vma,page,ua,ka,len,write) #else extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *kaddr, unsigned long len, int write); #endif #define flush_cache_dup_mm(mm) flush_cache_mm(mm) Loading
arch/arm/include/asm/smp_plat.h +5 −0 Original line number Diff line number Diff line Loading @@ -13,4 +13,9 @@ static inline int tlb_ops_need_broadcast(void) return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; } static inline int cache_ops_need_broadcast(void) { return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1; } #endif
arch/arm/mm/flush.c +43 −8 Original line number Diff line number Diff line Loading @@ -13,6 +13,7 @@ #include <asm/cacheflush.h> #include <asm/cachetype.h> #include <asm/smp_plat.h> #include <asm/system.h> #include <asm/tlbflush.h> Loading Loading @@ -87,13 +88,26 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) __flush_icache_all(); } #else #define flush_pfn_alias(pfn,vaddr) do { } while (0) #endif #ifdef CONFIG_SMP static void flush_ptrace_access_other(void *args) { __flush_icache_all(); } #endif static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *kaddr, unsigned long len, int write) unsigned long uaddr, void *kaddr, unsigned long len) { if (cache_is_vivt()) { vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write); if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { unsigned long addr = (unsigned long)kaddr; __cpuc_coherent_kern_range(addr, addr + len); } return; } Loading @@ -104,16 +118,37 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, } /* VIPT non-aliasing cache */ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) && vma->vm_flags & VM_EXEC) { if (vma->vm_flags & VM_EXEC) { unsigned long addr = (unsigned long)kaddr; /* only flushing the kernel mapping on non-aliasing VIPT */ __cpuc_coherent_kern_range(addr, addr + len); #ifdef CONFIG_SMP if (cache_ops_need_broadcast()) smp_call_function(flush_ptrace_access_other, NULL, 1); #endif } } #else #define flush_pfn_alias(pfn,vaddr) do { } while (0) /* * Copy user data from/to a page which is mapped into a different * processes address space. Really, we want to allow our "user * space" model to handle this. * * Note that this code needs to run on the current CPU. */ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *dst, const void *src, unsigned long len) { #ifdef CONFIG_SMP preempt_disable(); #endif memcpy(dst, src, len); flush_ptrace_access(vma, page, uaddr, dst, len); #ifdef CONFIG_SMP preempt_enable(); #endif } void __flush_dcache_page(struct address_space *mapping, struct page *page) { Loading