Loading arch/mips/mm/c-r4k.c +21 −1 Original line number Original line Diff line number Diff line Loading @@ -618,15 +618,35 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) if (cpu_has_inclusive_pcaches) { if (cpu_has_inclusive_pcaches) { if (size >= scache_size) if (size >= scache_size) r4k_blast_scache(); r4k_blast_scache(); else else { unsigned long lsize = cpu_scache_line_size(); unsigned long almask = ~(lsize - 1); /* * There is no clearly documented alignment requirement * for the cache instruction on MIPS processors and * some processors, among them the RM5200 and RM7000 * QED processors will throw an address error for cache * hit ops with insufficient alignment. Solved by * aligning the address to cache line size. */ cache_op(Hit_Writeback_Inv_SD, addr & almask); cache_op(Hit_Writeback_Inv_SD, (addr + size - 1) & almask); blast_inv_scache_range(addr, addr + size); blast_inv_scache_range(addr, addr + size); } return; return; } } if (cpu_has_safe_index_cacheops && size >= dcache_size) { if (cpu_has_safe_index_cacheops && size >= dcache_size) { r4k_blast_dcache(); r4k_blast_dcache(); } else { } else { unsigned long lsize = cpu_dcache_line_size(); unsigned long almask = ~(lsize - 1); R4600_HIT_CACHEOP_WAR_IMPL; R4600_HIT_CACHEOP_WAR_IMPL; cache_op(Hit_Writeback_Inv_D, addr & almask); cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask); blast_inv_dcache_range(addr, addr + size); blast_inv_dcache_range(addr, addr + size); } } Loading Loading
arch/mips/mm/c-r4k.c +21 −1 Original line number Original line Diff line number Diff line Loading @@ -618,15 +618,35 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) if (cpu_has_inclusive_pcaches) { if (cpu_has_inclusive_pcaches) { if (size >= scache_size) if (size >= scache_size) r4k_blast_scache(); r4k_blast_scache(); else else { unsigned long lsize = cpu_scache_line_size(); unsigned long almask = ~(lsize - 1); /* * There is no clearly documented alignment requirement * for the cache instruction on MIPS processors and * some processors, among them the RM5200 and RM7000 * QED processors will throw an address error for cache * hit ops with insufficient alignment. Solved by * aligning the address to cache line size. */ cache_op(Hit_Writeback_Inv_SD, addr & almask); cache_op(Hit_Writeback_Inv_SD, (addr + size - 1) & almask); blast_inv_scache_range(addr, addr + size); blast_inv_scache_range(addr, addr + size); } return; return; } } if (cpu_has_safe_index_cacheops && size >= dcache_size) { if (cpu_has_safe_index_cacheops && size >= dcache_size) { r4k_blast_dcache(); r4k_blast_dcache(); } else { } else { unsigned long lsize = cpu_dcache_line_size(); unsigned long almask = ~(lsize - 1); R4600_HIT_CACHEOP_WAR_IMPL; R4600_HIT_CACHEOP_WAR_IMPL; cache_op(Hit_Writeback_Inv_D, addr & almask); cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask); blast_inv_dcache_range(addr, addr + size); blast_inv_dcache_range(addr, addr + size); } } Loading