Loading arch/arm64/include/asm/cacheflush.h +5 −0 Original line number Diff line number Diff line Loading @@ -40,6 +40,10 @@ * the implementation assumes non-aliasing VIPT D-cache and (aliasing) * VIPT or ASID-tagged VIVT I-cache. * * flush_cache_all() * * Unconditionally clean and invalidate the entire cache. * * flush_cache_mm(mm) * * Clean and invalidate all user space cache entries Loading @@ -65,6 +69,7 @@ * - kaddr - page address * - size - region size */ extern void flush_cache_all(void); extern void flush_icache_range(unsigned long start, unsigned long end); extern void __flush_dcache_area(void *addr, size_t len); extern void __inval_dcache_area(void *addr, size_t len); Loading arch/arm64/include/asm/proc-fns.h +4 −0 Original line number Diff line number Diff line Loading @@ -28,8 +28,12 @@ struct mm_struct; struct cpu_suspend_ctx; extern void cpu_cache_off(void); extern void cpu_do_idle(void); extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); void cpu_soft_restart(phys_addr_t cpu_reset, unsigned long addr) __attribute__((noreturn)); extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); Loading arch/arm64/mm/cache.S +73 −0 Original line number Diff line number Diff line Loading @@ -25,6 +25,79 @@ #include <asm/alternative.h> #include <asm/asm-uaccess.h> /* * __flush_dcache_all() * * Flush the whole D-cache. * * Corrupted registers: x0-x7, x9-x11 */ __flush_dcache_all: dmb sy // ensure ordering with previous memory accesses mrs x0, clidr_el1 // read clidr and x3, x0, #0x7000000 // extract loc from clidr lsr x3, x3, #23 // left align loc bit field cbz x3, finished // if loc is 0, then no need to clean mov x10, #0 // start clean at cache level 0 loop1: add x2, x10, x10, lsr #1 // work out 3x current cache level lsr x1, x0, x2 // extract cache type bits from clidr and x1, x1, #7 // mask of the bits for current cache only cmp x1, #2 // see what cache we have at this level b.lt skip // skip if no cache, or just i-cache save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic msr csselr_el1, x10 // select current cache level in csselr isb // isb to sych the new cssr&csidr mrs x1, ccsidr_el1 // read the new ccsidr restore_irqs x9 and x2, x1, #7 // extract the length of the cache lines add x2, x2, #4 // add 4 (line length offset) mov x4, #0x3ff and x4, x4, x1, lsr #3 // find maximum number on the way size clz w5, w4 // find bit position of way size increment mov x7, #0x7fff and x7, x7, x1, lsr #13 // extract max number of the index size loop2: mov x9, x4 // create working copy of max way size loop3: lsl x6, x9, x5 orr x11, x10, x6 // factor way and cache number into x11 lsl x6, x7, x2 orr x11, x11, x6 // factor index number into x11 dc cisw, x11 // clean & invalidate by set/way subs x9, x9, #1 // decrement the way b.ge loop3 subs x7, x7, #1 // decrement the index b.ge loop2 skip: add x10, x10, #2 // increment cache number cmp x3, x10 b.gt loop1 finished: mov x10, #0 // swith back to cache level 0 msr csselr_el1, x10 // select current cache level in csselr dsb sy isb ret ENDPROC(__flush_dcache_all) /* * flush_cache_all() * * Flush the entire cache system. The data cache flush is now achieved * using atomic clean / invalidates working outwards from L1 cache. This * is done using Set/Way based cache maintenance instructions. The * instruction cache can still be invalidated back to the point of * unification in a single instruction. */ ENTRY(flush_cache_all) mov x12, lr bl __flush_dcache_all mov x0, #0 ic ialluis // I+BTB cache invalidate ret x12 ENDPROC(flush_cache_all) /* * flush_icache_range(start,end) * Loading arch/arm64/mm/flush.c +1 −0 Original line number Diff line number Diff line Loading @@ -82,6 +82,7 @@ EXPORT_SYMBOL(flush_dcache_page); /* * Additional functions defined in assembly. */ EXPORT_SYMBOL(flush_cache_all); EXPORT_SYMBOL(flush_icache_range); #ifdef CONFIG_ARCH_HAS_PMEM_API Loading arch/arm64/mm/proc.S +46 −0 Original line number Diff line number Diff line Loading @@ -43,6 +43,52 @@ #define MAIR(attr, mt) ((attr) << ((mt) * 8)) /* * cpu_cache_off() * * Turn the CPU D-cache off. */ ENTRY(cpu_cache_off) mrs x0, sctlr_el1 bic x0, x0, #1 << 2 // clear SCTLR.C msr sctlr_el1, x0 isb ret ENDPROC(cpu_cache_off) /* * cpu_reset(loc) * * Perform a soft reset of the system. Put the CPU into the same state * as it would be if it had been reset, and branch to what would be the * reset vector. It must be executed with the flat identity mapping. * * - loc - location to jump to for soft reset */ .align 5 ENTRY(cpu_reset) mrs x1, sctlr_el1 bic x1, x1, #1 msr sctlr_el1, x1 // disable the MMU isb ret x0 ENDPROC(cpu_reset) ENTRY(cpu_soft_restart) /* Save address of cpu_reset() and reset address */ mov x19, x0 mov x20, x1 /* Turn D-cache off */ bl cpu_cache_off /* Push out all dirty data, and ensure cache is empty */ bl flush_cache_all mov x0, x20 ret x19 ENDPROC(cpu_soft_restart) /* * cpu_do_idle() * Loading Loading
arch/arm64/include/asm/cacheflush.h +5 −0 Original line number Diff line number Diff line Loading @@ -40,6 +40,10 @@ * the implementation assumes non-aliasing VIPT D-cache and (aliasing) * VIPT or ASID-tagged VIVT I-cache. * * flush_cache_all() * * Unconditionally clean and invalidate the entire cache. * * flush_cache_mm(mm) * * Clean and invalidate all user space cache entries Loading @@ -65,6 +69,7 @@ * - kaddr - page address * - size - region size */ extern void flush_cache_all(void); extern void flush_icache_range(unsigned long start, unsigned long end); extern void __flush_dcache_area(void *addr, size_t len); extern void __inval_dcache_area(void *addr, size_t len); Loading
arch/arm64/include/asm/proc-fns.h +4 −0 Original line number Diff line number Diff line Loading @@ -28,8 +28,12 @@ struct mm_struct; struct cpu_suspend_ctx; extern void cpu_cache_off(void); extern void cpu_do_idle(void); extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); void cpu_soft_restart(phys_addr_t cpu_reset, unsigned long addr) __attribute__((noreturn)); extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); Loading
arch/arm64/mm/cache.S +73 −0 Original line number Diff line number Diff line Loading @@ -25,6 +25,79 @@ #include <asm/alternative.h> #include <asm/asm-uaccess.h> /* * __flush_dcache_all() * * Flush the whole D-cache. * * Corrupted registers: x0-x7, x9-x11 */ __flush_dcache_all: dmb sy // ensure ordering with previous memory accesses mrs x0, clidr_el1 // read clidr and x3, x0, #0x7000000 // extract loc from clidr lsr x3, x3, #23 // left align loc bit field cbz x3, finished // if loc is 0, then no need to clean mov x10, #0 // start clean at cache level 0 loop1: add x2, x10, x10, lsr #1 // work out 3x current cache level lsr x1, x0, x2 // extract cache type bits from clidr and x1, x1, #7 // mask of the bits for current cache only cmp x1, #2 // see what cache we have at this level b.lt skip // skip if no cache, or just i-cache save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic msr csselr_el1, x10 // select current cache level in csselr isb // isb to sych the new cssr&csidr mrs x1, ccsidr_el1 // read the new ccsidr restore_irqs x9 and x2, x1, #7 // extract the length of the cache lines add x2, x2, #4 // add 4 (line length offset) mov x4, #0x3ff and x4, x4, x1, lsr #3 // find maximum number on the way size clz w5, w4 // find bit position of way size increment mov x7, #0x7fff and x7, x7, x1, lsr #13 // extract max number of the index size loop2: mov x9, x4 // create working copy of max way size loop3: lsl x6, x9, x5 orr x11, x10, x6 // factor way and cache number into x11 lsl x6, x7, x2 orr x11, x11, x6 // factor index number into x11 dc cisw, x11 // clean & invalidate by set/way subs x9, x9, #1 // decrement the way b.ge loop3 subs x7, x7, #1 // decrement the index b.ge loop2 skip: add x10, x10, #2 // increment cache number cmp x3, x10 b.gt loop1 finished: mov x10, #0 // swith back to cache level 0 msr csselr_el1, x10 // select current cache level in csselr dsb sy isb ret ENDPROC(__flush_dcache_all) /* * flush_cache_all() * * Flush the entire cache system. The data cache flush is now achieved * using atomic clean / invalidates working outwards from L1 cache. This * is done using Set/Way based cache maintenance instructions. The * instruction cache can still be invalidated back to the point of * unification in a single instruction. */ ENTRY(flush_cache_all) mov x12, lr bl __flush_dcache_all mov x0, #0 ic ialluis // I+BTB cache invalidate ret x12 ENDPROC(flush_cache_all) /* * flush_icache_range(start,end) * Loading
arch/arm64/mm/flush.c +1 −0 Original line number Diff line number Diff line Loading @@ -82,6 +82,7 @@ EXPORT_SYMBOL(flush_dcache_page); /* * Additional functions defined in assembly. */ EXPORT_SYMBOL(flush_cache_all); EXPORT_SYMBOL(flush_icache_range); #ifdef CONFIG_ARCH_HAS_PMEM_API Loading
arch/arm64/mm/proc.S +46 −0 Original line number Diff line number Diff line Loading @@ -43,6 +43,52 @@ #define MAIR(attr, mt) ((attr) << ((mt) * 8)) /* * cpu_cache_off() * * Turn the CPU D-cache off. */ ENTRY(cpu_cache_off) mrs x0, sctlr_el1 bic x0, x0, #1 << 2 // clear SCTLR.C msr sctlr_el1, x0 isb ret ENDPROC(cpu_cache_off) /* * cpu_reset(loc) * * Perform a soft reset of the system. Put the CPU into the same state * as it would be if it had been reset, and branch to what would be the * reset vector. It must be executed with the flat identity mapping. * * - loc - location to jump to for soft reset */ .align 5 ENTRY(cpu_reset) mrs x1, sctlr_el1 bic x1, x1, #1 msr sctlr_el1, x1 // disable the MMU isb ret x0 ENDPROC(cpu_reset) ENTRY(cpu_soft_restart) /* Save address of cpu_reset() and reset address */ mov x19, x0 mov x20, x1 /* Turn D-cache off */ bl cpu_cache_off /* Push out all dirty data, and ensure cache is empty */ bl flush_cache_all mov x0, x20 ret x19 ENDPROC(cpu_soft_restart) /* * cpu_do_idle() * Loading