Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a8ca8b64 authored by Ralf Baechle's avatar Ralf Baechle
Browse files

MIPS: Avoid destructive invalidation on partial cachelines.



See discussion e9c3a7c20901051031y528d0d31r18d44c5096c59e0@mail.gmail.com.

Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 012703e0
Loading
Loading
Loading
Loading
+21 −1
Original line number Original line Diff line number Diff line
@@ -618,15 +618,35 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
	if (cpu_has_inclusive_pcaches) {
	if (cpu_has_inclusive_pcaches) {
		if (size >= scache_size)
		if (size >= scache_size)
			r4k_blast_scache();
			r4k_blast_scache();
		else
		else {
			unsigned long lsize = cpu_scache_line_size();
			unsigned long almask = ~(lsize - 1);

			/*
			 * There is no clearly documented alignment requirement
			 * for the cache instruction on MIPS processors and
			 * some processors, among them the RM5200 and RM7000
			 * QED processors will throw an address error for cache
			 * hit ops with insufficient alignment.  Solved by
			 * aligning the address to cache line size.
			 */
			cache_op(Hit_Writeback_Inv_SD, addr & almask);
			cache_op(Hit_Writeback_Inv_SD,
				 (addr + size - 1) & almask);
			blast_inv_scache_range(addr, addr + size);
			blast_inv_scache_range(addr, addr + size);
		}
		return;
		return;
	}
	}


	if (cpu_has_safe_index_cacheops && size >= dcache_size) {
	if (cpu_has_safe_index_cacheops && size >= dcache_size) {
		r4k_blast_dcache();
		r4k_blast_dcache();
	} else {
	} else {
		unsigned long lsize = cpu_dcache_line_size();
		unsigned long almask = ~(lsize - 1);

		R4600_HIT_CACHEOP_WAR_IMPL;
		R4600_HIT_CACHEOP_WAR_IMPL;
		cache_op(Hit_Writeback_Inv_D, addr & almask);
		cache_op(Hit_Writeback_Inv_D, (addr + size - 1)  & almask);
		blast_inv_dcache_range(addr, addr + size);
		blast_inv_dcache_range(addr, addr + size);
	}
	}