Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b638d0b9 authored by Richard Curnow's avatar Richard Curnow Committed by Paul Mundt
Browse files

sh: Optimized cache handling for SH-4/SH-4A caches.



This reworks some of the SH-4 cache handling code to more easily
accomodate newer-style caches (particularly for the > direct-mapped
case), as well as optimizing some of the old code.

Signed-off-by: default avatarRichard Curnow <richard.curnow@st.com>
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent fdfc74f9
Loading
Loading
Loading
Loading
+15 −1
Original line number Diff line number Diff line
@@ -4,6 +4,7 @@
 * CPU init code
 *
 * Copyright (C) 2002, 2003  Paul Mundt
 * Copyright (C) 2003  Richard Curnow
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
@@ -51,7 +52,15 @@ static void __init cache_init(void)
	ccr = ctrl_inl(CCR);

	/*
	 * If the cache is already enabled .. flush it.
	 * At this point we don't know whether the cache is enabled or not - a
	 * bootloader may have enabled it.  There are at least 2 things that
	 * could be dirty in the cache at this point:
	 * 1. kernel command line set up by boot loader
	 * 2. spilled registers from the prolog of this function
	 * => before re-initialising the cache, we must do a purge of the whole
	 * cache out to memory for safety.  As long as nothing is spilled
	 * during the loop to lines that have already been done, this is safe.
	 * - RPC
	 */
	if (ccr & CCR_CACHE_ENABLE) {
		unsigned long ways, waysize, addrstart;
@@ -98,6 +107,8 @@ static void __init cache_init(void)
	/* Force EMODE if possible */
	if (cpu_data->dcache.ways > 1)
		flags |= CCR_CACHE_EMODE;
	else
		flags &= ~CCR_CACHE_EMODE;
#endif

#ifdef CONFIG_SH_WRITETHROUGH
@@ -112,6 +123,9 @@ static void __init cache_init(void)
	/* Turn on OCRAM -- halve the OC */
	flags |= CCR_CACHE_ORA;
	cpu_data->dcache.sets >>= 1;

	cpu_data->dcache.way_size = cpu_data->dcache.sets *
				    cpu_data->dcache.linesz;
#endif

	ctrl_outl(flags, CCR);
+11 −0
Original line number Diff line number Diff line
@@ -113,6 +113,11 @@ int __init detect_cpu_and_cache_system(void)
		break;
	}

#ifdef CONFIG_SH_DIRECT_MAPPED
	cpu_data->icache.ways = 1;
	cpu_data->dcache.ways = 1;
#endif

	/*
	 * On anything that's not a direct-mapped cache, look to the CVR
	 * for I/D-cache specifics.
@@ -125,6 +130,9 @@ int __init detect_cpu_and_cache_system(void)
			(cpu_data->icache.way_incr - (1 << 5));
	}

	cpu_data->icache.way_size = cpu_data->icache.sets *
				    cpu_data->icache.linesz;

	if (cpu_data->dcache.ways > 1) {
		size = sizes[(cvr >> 16) & 0xf];
		cpu_data->dcache.way_incr	= (size >> 1);
@@ -133,6 +141,9 @@ int __init detect_cpu_and_cache_system(void)
			(cpu_data->dcache.way_incr - (1 << 5));
	}

	cpu_data->dcache.way_size = cpu_data->dcache.sets *
				    cpu_data->dcache.linesz;

	return 0;
}
+430 −87
Original line number Diff line number Diff line
@@ -25,28 +25,95 @@
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>

extern void __flush_cache_4096(unsigned long addr, unsigned long phys,
static void __flush_dcache_segment_1way(unsigned long start,
					unsigned long extent);
static void __flush_dcache_segment_2way(unsigned long start,
					unsigned long extent);
static void __flush_dcache_segment_4way(unsigned long start,
					unsigned long extent);

static void __flush_cache_4096(unsigned long addr, unsigned long phys,
			       unsigned long exec_offset);
extern void __flush_cache_4096_all(unsigned long start);
static void __flush_cache_4096_all_ex(unsigned long start);
extern void __flush_dcache_all(void);
static void __flush_dcache_all_ex(void);

/*
 * This is initialised here to ensure that it is not placed in the BSS.  If
 * that were to happen, note that cache_init gets called before the BSS is
 * cleared, so this would get nulled out which would be hopeless.
 */
static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
	(void (*)(unsigned long, unsigned long))0xdeadbeef;

static void compute_alias(struct cache_info *c)
{
	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
	c->n_aliases = (c->alias_mask >> PAGE_SHIFT) + 1;
}

static void __init emit_cache_params(void)
{
	printk("PVR=%08x CVR=%08x PRR=%08x\n",
		ctrl_inl(CCN_PVR),
		ctrl_inl(CCN_CVR),
		ctrl_inl(CCN_PRR));
	printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
		cpu_data->icache.ways,
		cpu_data->icache.sets,
		cpu_data->icache.way_incr);
	printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
		cpu_data->icache.entry_mask,
		cpu_data->icache.alias_mask,
		cpu_data->icache.n_aliases);
	printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
		cpu_data->dcache.ways,
		cpu_data->dcache.sets,
		cpu_data->dcache.way_incr);
	printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
		cpu_data->dcache.entry_mask,
		cpu_data->dcache.alias_mask,
		cpu_data->dcache.n_aliases);

	if (!__flush_dcache_segment_fn)
		panic("unknown number of cache ways\n");
}

/*
 * SH-4 has virtually indexed and physically tagged cache.
 */

struct semaphore p3map_sem[4];
/* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */
#define MAX_P3_SEMAPHORES 16

struct semaphore p3map_sem[MAX_P3_SEMAPHORES];

void __init p3_cache_init(void)
{
	int i;

	compute_alias(&cpu_data->icache);
	compute_alias(&cpu_data->dcache);

	switch (cpu_data->dcache.ways) {
	case 1:
		__flush_dcache_segment_fn = __flush_dcache_segment_1way;
		break;
	case 2:
		__flush_dcache_segment_fn = __flush_dcache_segment_2way;
		break;
	case 4:
		__flush_dcache_segment_fn = __flush_dcache_segment_4way;
		break;
	default:
		__flush_dcache_segment_fn = NULL;
		break;
	}

	emit_cache_params();

	if (remap_area_pages(P3SEG, 0, PAGE_SIZE * 4, _PAGE_CACHABLE))
		panic("%s failed.", __FUNCTION__);

	sema_init (&p3map_sem[0], 1);
	sema_init (&p3map_sem[1], 1);
	sema_init (&p3map_sem[2], 1);
	sema_init (&p3map_sem[3], 1);
	for (i = 0; i < cpu_data->dcache.n_aliases; i++)
		sema_init(&p3map_sem[i], 1);
}

/*
@@ -91,7 +158,6 @@ void __flush_purge_region(void *start, int size)
	}
}


/*
 * No write back please
 */
@@ -110,46 +176,6 @@ void __flush_invalidate_region(void *start, int size)
	}
}

static void __flush_dcache_all_ex(void)
{
	unsigned long addr, end_addr, entry_offset;

	end_addr = CACHE_OC_ADDRESS_ARRAY +
		(cpu_data->dcache.sets << cpu_data->dcache.entry_shift) *
		 cpu_data->dcache.ways;

	entry_offset = 1 << cpu_data->dcache.entry_shift;
	for (addr = CACHE_OC_ADDRESS_ARRAY;
	     addr < end_addr;
	     addr += entry_offset) {
		ctrl_outl(0, addr);
	}
}

static void __flush_cache_4096_all_ex(unsigned long start)
{
	unsigned long addr, entry_offset;
	int i;

	entry_offset = 1 << cpu_data->dcache.entry_shift;
	for (i = 0; i < cpu_data->dcache.ways;
	     i++, start += cpu_data->dcache.way_incr) {
		for (addr = CACHE_OC_ADDRESS_ARRAY + start;
		     addr < CACHE_OC_ADDRESS_ARRAY + 4096 + start;
		     addr += entry_offset) {
			ctrl_outl(0, addr);
		}
	}
}

void flush_cache_4096_all(unsigned long start)
{
	if (cpu_data->dcache.ways == 1)
		__flush_cache_4096_all(start);
	else
		__flush_cache_4096_all_ex(start);
}

/*
 * Write back the range of D-cache, and purge the I-cache.
 *
@@ -180,9 +206,11 @@ void flush_cache_sigtramp(unsigned long addr)

	local_irq_save(flags);
	jump_to_P2();

	for (i = 0; i < cpu_data->icache.ways;
	     i++, index += cpu_data->icache.way_incr)
		ctrl_outl(0, index);	/* Clear out Valid-bit */

	back_to_P1();
	wmb();
	local_irq_restore(flags);
@@ -194,8 +222,8 @@ static inline void flush_cache_4096(unsigned long start,
	unsigned long flags;

	/*
	 * SH7751, SH7751R, and ST40 have no restriction to handle cache.
	 * (While SH7750 must do that at P2 area.)
	 * All types of SH-4 require PC to be in P2 to operate on the I-cache.
	 * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
	 */
	if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG)
	   || start < CACHE_OC_ADDRESS_ARRAY) {
@@ -217,12 +245,13 @@ void flush_dcache_page(struct page *page)
{
	if (test_bit(PG_mapped, &page->flags)) {
		unsigned long phys = PHYSADDR(page_address(page));
		unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
		int i, n;

		/* Loop all the D-cache */
		flush_cache_4096(CACHE_OC_ADDRESS_ARRAY,          phys);
		flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x1000, phys);
		flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x2000, phys);
		flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x3000, phys);
		n = cpu_data->dcache.n_aliases;
		for (i = 0; i < n; i++, addr += PAGE_SIZE)
			flush_cache_4096(addr, phys);
	}

	wmb();
@@ -246,10 +275,7 @@ static inline void flush_icache_all(void)

void flush_dcache_all(void)
{
	if (cpu_data->dcache.ways == 1)
		__flush_dcache_all();
	else
		__flush_dcache_all_ex();
	(*__flush_dcache_segment_fn)(0UL, cpu_data->dcache.way_size);
	wmb();
}

@@ -261,6 +287,16 @@ void flush_cache_all(void)

void flush_cache_mm(struct mm_struct *mm)
{
	/*
	 * Note : (RPC) since the caches are physically tagged, the only point
	 * of flush_cache_mm for SH-4 is to get rid of aliases from the
	 * D-cache.  The assumption elsewhere, e.g. flush_cache_range, is that
	 * lines can stay resident so long as the virtual address they were
	 * accessed with (hence cache set) is in accord with the physical
	 * address (i.e. tag).  It's no different here.  So I reckon we don't
	 * need to flush the I-cache, since aliases don't matter for that.  We
	 * should try that.
	 */
	flush_cache_all();
}

@@ -273,25 +309,37 @@ void flush_cache_mm(struct mm_struct *mm)
void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn)
{
	unsigned long phys = pfn << PAGE_SHIFT;
	unsigned int alias_mask;

	alias_mask = cpu_data->dcache.alias_mask;

	/* We only need to flush D-cache when we have alias */
	if ((address^phys) & CACHE_ALIAS) {
	if ((address^phys) & alias_mask) {
		/* Loop 4K of the D-cache */
		flush_cache_4096(
			CACHE_OC_ADDRESS_ARRAY | (address & CACHE_ALIAS),
			CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
			phys);
		/* Loop another 4K of the D-cache */
		flush_cache_4096(
			CACHE_OC_ADDRESS_ARRAY | (phys & CACHE_ALIAS),
			CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
			phys);
	}

	if (vma->vm_flags & VM_EXEC)
		/* Loop 4K (half) of the I-cache */
	alias_mask = cpu_data->icache.alias_mask;
	if (vma->vm_flags & VM_EXEC) {
		/*
		 * Evict entries from the portion of the cache from which code
		 * may have been executed at this address (virtual).  There's
		 * no need to evict from the portion corresponding to the
		 * physical address as for the D-cache, because we know the
		 * kernel has never executed the code through its identity
		 * translation.
		 */
		flush_cache_4096(
			CACHE_IC_ADDRESS_ARRAY | (address & 0x1000),
			CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
			phys);
	}
}

/*
 * Write back and invalidate D-caches.
@@ -305,14 +353,28 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigne
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end)
{
	unsigned long p = start & PAGE_MASK;
	unsigned long d = 0, p = start & PAGE_MASK;
	unsigned long alias_mask = cpu_data->dcache.alias_mask;
	unsigned long n_aliases = cpu_data->dcache.n_aliases;
	unsigned long select_bit;
	unsigned long all_aliases_mask;
	unsigned long addr_offset;
	unsigned long phys;
	pgd_t *dir;
	pmd_t *pmd;
	pud_t *pud;
	pte_t *pte;
	pte_t entry;
	unsigned long phys;
	unsigned long d = 0;
	int i;

	/*
	 * If cache is only 4k-per-way, there are never any 'aliases'.  Since
	 * the cache is physically tagged, the data can just be left in there.
	 */
	if (n_aliases == 0)
		return;

	all_aliases_mask = (1 << n_aliases) - 1;

	/*
	 * Don't bother with the lookup and alias check if we have a
@@ -338,37 +400,50 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
			p &= ~((1 << PMD_SHIFT) - 1);
			p += (1 << PMD_SHIFT);
			pmd++;

			continue;
		}

		pte = pte_offset_kernel(pmd, p);

		do {
			entry = *pte;

			if ((pte_val(entry) & _PAGE_PRESENT)) {
				phys = pte_val(entry) & PTE_PHYS_MASK;
				if ((p^phys) & CACHE_ALIAS) {
					d |= 1 << ((p & CACHE_ALIAS)>>12);
					d |= 1 << ((phys & CACHE_ALIAS)>>12);
					if (d == 0x0f)

				if ((p ^ phys) & alias_mask) {
					d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
					d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);

					if (d == all_aliases_mask)
						goto loop_exit;
				}
			}

			pte++;
			p += PAGE_SIZE;
		} while (p < end && ((unsigned long)pte & ~PAGE_MASK));
		pmd++;
	} while (p < end);

loop_exit:
	if (d & 1)
		flush_cache_4096_all(0);
	if (d & 2)
		flush_cache_4096_all(0x1000);
	if (d & 4)
		flush_cache_4096_all(0x2000);
	if (d & 8)
		flush_cache_4096_all(0x3000);
	if (vma->vm_flags & VM_EXEC)
	for (i = 0, select_bit = 0x1, addr_offset = 0x0; i < n_aliases;
	     i++, select_bit <<= 1, addr_offset += PAGE_SIZE)
		if (d & select_bit) {
			(*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
			wmb();
		}

	if (vma->vm_flags & VM_EXEC) {
		/*
		 * TODO: Is this required???  Need to look at how I-cache
		 * coherency is assured when new programs are loaded to see if
		 * this matters.
		 */
		flush_icache_all();
	}
}

/*
 * flush_icache_user_range
@@ -384,3 +459,271 @@ void flush_icache_user_range(struct vm_area_struct *vma,
	mb();
}

/**
 * __flush_cache_4096
 *
 * @addr:  address in memory mapped cache array
 * @phys:  P1 address to flush (has to match tags if addr has 'A' bit
 *         set i.e. associative write)
 * @exec_offset: set to 0x20000000 if flush has to be executed from P2
 *               region else 0x0
 *
 * The offset into the cache array implied by 'addr' selects the
 * 'colour' of the virtual address range that will be flushed.  The
 * operation (purge/write-back) is selected by the lower 2 bits of
 * 'phys'.
 */
static void __flush_cache_4096(unsigned long addr, unsigned long phys,
			       unsigned long exec_offset)
{
	int way_count;
	unsigned long base_addr = addr;
	struct cache_info *dcache;
	unsigned long way_incr;
	unsigned long a, ea, p;
	unsigned long temp_pc;

	dcache = &cpu_data->dcache;
	/* Write this way for better assembly. */
	way_count = dcache->ways;
	way_incr = dcache->way_incr;

	/*
	 * Apply exec_offset (i.e. branch to P2 if required.).
	 *
	 * FIXME:
	 *
	 *	If I write "=r" for the (temp_pc), it puts this in r6 hence
	 *	trashing exec_offset before it's been added on - why?  Hence
	 *	"=&r" as a 'workaround'
	 */
	asm volatile("mov.l 1f, %0\n\t"
		     "add   %1, %0\n\t"
		     "jmp   @%0\n\t"
		     "nop\n\t"
		     ".balign 4\n\t"
		     "1:  .long 2f\n\t"
		     "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));

	/*
	 * We know there will be >=1 iteration, so write as do-while to avoid
	 * pointless nead-of-loop check for 0 iterations.
	 */
	do {
		ea = base_addr + PAGE_SIZE;
		a = base_addr;
		p = phys;

		do {
			*(volatile unsigned long *)a = p;
			/*
			 * Next line: intentionally not p+32, saves an add, p
			 * will do since only the cache tag bits need to
			 * match.
			 */
			*(volatile unsigned long *)(a+32) = p;
			a += 64;
			p += 64;
		} while (a < ea);

		base_addr += way_incr;
	} while (--way_count != 0);
}

/*
 * Break the 1, 2 and 4 way variants of this out into separate functions to
 * avoid nearly all the overhead of having the conditional stuff in the function
 * bodies (+ the 1 and 2 way cases avoid saving any registers too).
 */
static void __flush_dcache_segment_1way(unsigned long start,
					unsigned long extent_per_way)
{
	unsigned long orig_sr, sr_with_bl;
	unsigned long base_addr;
	unsigned long way_incr, linesz, way_size;
	struct cache_info *dcache;
	register unsigned long a0, a0e;

	asm volatile("stc sr, %0" : "=r" (orig_sr));
	sr_with_bl = orig_sr | (1<<28);
	base_addr = ((unsigned long)&empty_zero_page[0]);

	/*
	 * The previous code aligned base_addr to 16k, i.e. the way_size of all
	 * existing SH-4 D-caches.  Whilst I don't see a need to have this
	 * aligned to any better than the cache line size (which it will be
	 * anyway by construction), let's align it to at least the way_size of
	 * any existing or conceivable SH-4 D-cache.  -- RPC
	 */
	base_addr = ((base_addr >> 16) << 16);
	base_addr |= start;

	dcache = &cpu_data->dcache;
	linesz = dcache->linesz;
	way_incr = dcache->way_incr;
	way_size = dcache->way_size;

	a0 = base_addr;
	a0e = base_addr + extent_per_way;
	do {
		asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
		asm volatile("movca.l r0, @%0\n\t"
			     "ocbi @%0" : : "r" (a0));
		a0 += linesz;
		asm volatile("movca.l r0, @%0\n\t"
			     "ocbi @%0" : : "r" (a0));
		a0 += linesz;
		asm volatile("movca.l r0, @%0\n\t"
			     "ocbi @%0" : : "r" (a0));
		a0 += linesz;
		asm volatile("movca.l r0, @%0\n\t"
			     "ocbi @%0" : : "r" (a0));
		asm volatile("ldc %0, sr" : : "r" (orig_sr));
		a0 += linesz;
	} while (a0 < a0e);
}

static void __flush_dcache_segment_2way(unsigned long start,
					unsigned long extent_per_way)
{
	unsigned long orig_sr, sr_with_bl;
	unsigned long base_addr;
	unsigned long way_incr, linesz, way_size;
	struct cache_info *dcache;
	register unsigned long a0, a1, a0e;

	asm volatile("stc sr, %0" : "=r" (orig_sr));
	sr_with_bl = orig_sr | (1<<28);
	base_addr = ((unsigned long)&empty_zero_page[0]);

	/* See comment under 1-way above */
	base_addr = ((base_addr >> 16) << 16);
	base_addr |= start;

	dcache = &cpu_data->dcache;
	linesz = dcache->linesz;
	way_incr = dcache->way_incr;
	way_size = dcache->way_size;

	a0 = base_addr;
	a1 = a0 + way_incr;
	a0e = base_addr + extent_per_way;
	do {
		asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
		asm volatile("movca.l r0, @%0\n\t"
			     "movca.l r0, @%1\n\t"
			     "ocbi @%0\n\t"
			     "ocbi @%1" : :
			     "r" (a0), "r" (a1));
		a0 += linesz;
		a1 += linesz;
		asm volatile("movca.l r0, @%0\n\t"
			     "movca.l r0, @%1\n\t"
			     "ocbi @%0\n\t"
			     "ocbi @%1" : :
			     "r" (a0), "r" (a1));
		a0 += linesz;
		a1 += linesz;
		asm volatile("movca.l r0, @%0\n\t"
			     "movca.l r0, @%1\n\t"
			     "ocbi @%0\n\t"
			     "ocbi @%1" : :
			     "r" (a0), "r" (a1));
		a0 += linesz;
		a1 += linesz;
		asm volatile("movca.l r0, @%0\n\t"
			     "movca.l r0, @%1\n\t"
			     "ocbi @%0\n\t"
			     "ocbi @%1" : :
			     "r" (a0), "r" (a1));
		asm volatile("ldc %0, sr" : : "r" (orig_sr));
		a0 += linesz;
		a1 += linesz;
	} while (a0 < a0e);
}

static void __flush_dcache_segment_4way(unsigned long start,
					unsigned long extent_per_way)
{
	unsigned long orig_sr, sr_with_bl;
	unsigned long base_addr;
	unsigned long way_incr, linesz, way_size;
	struct cache_info *dcache;
	register unsigned long a0, a1, a2, a3, a0e;

	asm volatile("stc sr, %0" : "=r" (orig_sr));
	sr_with_bl = orig_sr | (1<<28);
	base_addr = ((unsigned long)&empty_zero_page[0]);

	/* See comment under 1-way above */
	base_addr = ((base_addr >> 16) << 16);
	base_addr |= start;

	dcache = &cpu_data->dcache;
	linesz = dcache->linesz;
	way_incr = dcache->way_incr;
	way_size = dcache->way_size;

	a0 = base_addr;
	a1 = a0 + way_incr;
	a2 = a1 + way_incr;
	a3 = a2 + way_incr;
	a0e = base_addr + extent_per_way;
	do {
		asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
		asm volatile("movca.l r0, @%0\n\t"
			     "movca.l r0, @%1\n\t"
			     "movca.l r0, @%2\n\t"
			     "movca.l r0, @%3\n\t"
			     "ocbi @%0\n\t"
			     "ocbi @%1\n\t"
			     "ocbi @%2\n\t"
			     "ocbi @%3\n\t" : :
			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));
		a0 += linesz;
		a1 += linesz;
		a2 += linesz;
		a3 += linesz;
		asm volatile("movca.l r0, @%0\n\t"
			     "movca.l r0, @%1\n\t"
			     "movca.l r0, @%2\n\t"
			     "movca.l r0, @%3\n\t"
			     "ocbi @%0\n\t"
			     "ocbi @%1\n\t"
			     "ocbi @%2\n\t"
			     "ocbi @%3\n\t" : :
			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));
		a0 += linesz;
		a1 += linesz;
		a2 += linesz;
		a3 += linesz;
		asm volatile("movca.l r0, @%0\n\t"
			     "movca.l r0, @%1\n\t"
			     "movca.l r0, @%2\n\t"
			     "movca.l r0, @%3\n\t"
			     "ocbi @%0\n\t"
			     "ocbi @%1\n\t"
			     "ocbi @%2\n\t"
			     "ocbi @%3\n\t" : :
			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));
		a0 += linesz;
		a1 += linesz;
		a2 += linesz;
		a3 += linesz;
		asm volatile("movca.l r0, @%0\n\t"
			     "movca.l r0, @%1\n\t"
			     "movca.l r0, @%2\n\t"
			     "movca.l r0, @%3\n\t"
			     "ocbi @%0\n\t"
			     "ocbi @%1\n\t"
			     "ocbi @%2\n\t"
			     "ocbi @%3\n\t" : :
			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));
		asm volatile("ldc %0, sr" : : "r" (orig_sr));
		a0 += linesz;
		a1 += linesz;
		a2 += linesz;
		a3 += linesz;
	} while (a0 < a0e);
}
+1 −98
Original line number Diff line number Diff line
@@ -193,102 +193,5 @@ ENTRY(__clear_user_page)
	 nop
.L4096:	.word	4096

ENTRY(__flush_cache_4096)
	mov.l	1f,r3
	add	r6,r3
	mov	r4,r0
	mov	#64,r2
	shll	r2
	mov	#64,r6
	jmp	@r3
	 mov	#96,r7
	.align	2
1:	.long	2f
2:
	.rept	32
	mov.l	r5,@r0
	mov.l	r5,@(32,r0)
	mov.l	r5,@(r0,r6)
	mov.l	r5,@(r0,r7)
	add	r2,r5
	add	r2,r0
	.endr
	nop
	nop
	nop
	nop
	nop
	nop
	nop
	rts
	 nop

ENTRY(__flush_dcache_all)
	mov.l	2f,r0
	mov.l	3f,r4
	and	r0,r4		! r4 = (unsigned long)&empty_zero_page[0] & ~0xffffc000
	stc	sr,r1		! save SR
	mov.l	4f,r2
	or	r1,r2
	mov	#32,r3
	shll2	r3
1:
	ldc	r2,sr		! set BL bit
	movca.l	r0,@r4
	ocbi	@r4
	add	#32,r4
	movca.l	r0,@r4
	ocbi	@r4
	add	#32,r4
	movca.l	r0,@r4
	ocbi	@r4
	add	#32,r4
	movca.l	r0,@r4
	ocbi	@r4
	ldc	r1,sr		! restore SR
	dt	r3
	bf/s	1b
	 add	#32,r4

	rts
	 nop
	.align	2
2:	.long	0xffffc000
3:	.long	empty_zero_page
4:	.long	0x10000000	! BL bit

/* __flush_cache_4096_all(unsigned long addr) */
ENTRY(__flush_cache_4096_all)
	mov.l	2f,r0
	mov.l	3f,r2
	and	r0,r2
	or	r2,r4		! r4 = addr | (unsigned long)&empty_zero_page[0] & ~0x3fff
	stc	sr,r1		! save SR
	mov.l	4f,r2
	or	r1,r2
	mov	#32,r3
1:
	ldc	r2,sr		! set BL bit
	movca.l	r0,@r4
	ocbi	@r4
	add	#32,r4
	movca.l	r0,@r4
	ocbi	@r4
	add	#32,r4
	movca.l	r0,@r4
	ocbi	@r4
	add	#32,r4
	movca.l	r0,@r4
	ocbi	@r4
	ldc	r1,sr		! restore SR
	dt	r3
	bf/s	1b
	 add	#32,r4

	rts
	 nop
	.align	2
2:	.long	0xffffc000
3:	.long	empty_zero_page
4:	.long	0x10000000	! BL bit
#endif
+18 −4
Original line number Diff line number Diff line
@@ -23,15 +23,29 @@
#define L1_CACHE_ALIGN(x)	(((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))

struct cache_info {
	unsigned int ways;
	unsigned int sets;
	unsigned int linesz;
	unsigned int ways;		/* Number of cache ways */
	unsigned int sets;		/* Number of cache sets */
	unsigned int linesz;		/* Cache line size (bytes) */

	unsigned int way_incr;
	unsigned int way_size;		/* sets * line size */

	/*
	 * way_incr is the address offset for accessing the next way
	 * in memory mapped cache array ops.
	 */
	unsigned int way_incr;
	unsigned int entry_shift;
	unsigned int entry_mask;

	/*
	 * Compute a mask which selects the address bits which overlap between
	 * 1. those used to select the cache set during indexing
	 * 2. those in the physical page number.
	 */
	unsigned int alias_mask;

	unsigned int n_aliases;		/* Number of aliases */

	unsigned long flags;
};