Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 79568726 authored by Paul Mundt's avatar Paul Mundt
Browse files

sh64: Wire up the shared __flush_xxx_region() flushers.



Now with all of the prep work out of the way, kill off the SH-5 variants
and use the SH-4 version directly. This also takes advantage of the
unrolling that was previously done for the new version.

Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 43bc61d8
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -9,7 +9,7 @@ mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o \
			   extable_64.o

ifndef CONFIG_CACHE_OFF
obj-y			+= cache-sh5.o
obj-y			+= cache-sh5.o flush-sh4.o
endif

obj-y			+= $(mmu-y)
+0 −48
Original line number Diff line number Diff line
@@ -539,54 +539,6 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
		sh64_dcache_purge_user_pages(mm, start, end);
	}
}

/*
 * Purge the range of addresses from the D-cache.
 *
 * The addresses lie in the superpage mapping. There's no harm if we
 * overpurge at either end - just a small performance loss.
 */
void __flush_purge_region(void *start, int size)
{
	unsigned long long ullend, addr, aligned_start;

	aligned_start = (unsigned long long)(signed long long)(signed long) start;
	addr = L1_CACHE_ALIGN(aligned_start);
	ullend = (unsigned long long) (signed long long) (signed long) start + size;

	while (addr <= ullend) {
		__asm__ __volatile__ ("ocbp %0, 0" : : "r" (addr));
		addr += L1_CACHE_BYTES;
	}
}

void __flush_wback_region(void *start, int size)
{
	unsigned long long ullend, addr, aligned_start;

	aligned_start = (unsigned long long)(signed long long)(signed long) start;
	addr = L1_CACHE_ALIGN(aligned_start);
	ullend = (unsigned long long) (signed long long) (signed long) start + size;

	while (addr < ullend) {
		__asm__ __volatile__ ("ocbwb %0, 0" : : "r" (addr));
		addr += L1_CACHE_BYTES;
	}
}

void __flush_invalidate_region(void *start, int size)
{
	unsigned long long ullend, addr, aligned_start;

	aligned_start = (unsigned long long)(signed long long)(signed long) start;
	addr = L1_CACHE_ALIGN(aligned_start);
	ullend = (unsigned long long) (signed long long) (signed long) start + size;

	while (addr < ullend) {
		__asm__ __volatile__ ("ocbi %0, 0" : : "r" (addr));
		addr += L1_CACHE_BYTES;
	}
}
#endif /* !CONFIG_DCACHE_DISABLED */

/*