Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6665398a authored by Russell King's avatar Russell King
Browse files

Merge branch 'cache' (early part)

parents c0caac93 bf32eb85
Loading
Loading
Loading
Loading
+4 −8
Original line number Diff line number Diff line
@@ -308,15 +308,11 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
			memcpy(ptr, buf->safe, size);

			/*
			 * DMA buffers must have the same cache properties
			 * as if they were really used for DMA - which means
			 * data must be written back to RAM.  Note that
			 * we don't use dmac_flush_range() here for the
			 * bidirectional case because we know the cache
			 * lines will be coherent with the data written.
			 * Since we may have written to a page cache page,
			 * we need to ensure that the data will be coherent
			 * with user mappings.
			 */
			dmac_clean_range(ptr, ptr + size);
			outer_clean_range(__pa(ptr), __pa(ptr) + size);
			__cpuc_flush_kernel_dcache_area(ptr, size);
		}
		free_safe_buffer(dev->archdata.dmabounce, buf);
	}
+5 −12
Original line number Diff line number Diff line
@@ -211,7 +211,7 @@ struct cpu_cache_fns {

	void (*coherent_kern_range)(unsigned long, unsigned long);
	void (*coherent_user_range)(unsigned long, unsigned long);
	void (*flush_kern_dcache_page)(void *);
	void (*flush_kern_dcache_area)(void *, size_t);

	void (*dma_inv_range)(const void *, const void *);
	void (*dma_clean_range)(const void *, const void *);
@@ -236,7 +236,7 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_user_range		cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range	cpu_cache.coherent_kern_range
#define __cpuc_coherent_user_range	cpu_cache.coherent_user_range
#define __cpuc_flush_dcache_page	cpu_cache.flush_kern_dcache_page
#define __cpuc_flush_dcache_area	cpu_cache.flush_kern_dcache_area

/*
 * These are private to the dma-mapping API.  Do not use directly.
@@ -255,14 +255,14 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_user_range		__glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range	__glue(_CACHE,_coherent_kern_range)
#define __cpuc_coherent_user_range	__glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_page	__glue(_CACHE,_flush_kern_dcache_page)
#define __cpuc_flush_dcache_area	__glue(_CACHE,_flush_kern_dcache_area)

extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
extern void __cpuc_flush_dcache_page(void *);
extern void __cpuc_flush_dcache_area(void *, size_t);

/*
 * These are private to the dma-mapping API.  Do not use directly.
@@ -448,7 +448,7 @@ static inline void flush_kernel_dcache_page(struct page *page)
{
	/* highmem pages are always flushed upon kunmap already */
	if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
		__cpuc_flush_dcache_page(page_address(page));
		__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
}

#define flush_dcache_mmap_lock(mapping) \
@@ -465,13 +465,6 @@ static inline void flush_kernel_dcache_page(struct page *page)
 */
#define flush_icache_page(vma,page)	do { } while (0)

static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
	unsigned offset, size_t size)
{
	const void *start = (void __force *)virt + offset;
	dmac_inv_range(start, start + size);
}

/*
 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
 * vmalloc, ioremap etc) in kernel space for pages.  On non-VIPT
+6 −5
Original line number Diff line number Diff line
@@ -127,15 +127,16 @@ ENTRY(fa_coherent_user_range)
	mov	pc, lr

/*
 *	flush_kern_dcache_page(kaddr)
 *	flush_kern_dcache_area(void *addr, size_t size)
 *
 *	Ensure that the data held in the page kaddr is written back
 *	to the page in question.
 *
 *	- kaddr   - kernel address (guaranteed to be page aligned)
 *	- addr	- kernel address
 *	- size	- size of region
 */
ENTRY(fa_flush_kern_dcache_page)
	add	r1, r0, #PAGE_SZ
ENTRY(fa_flush_kern_dcache_area)
	add	r1, r0, r1
1:	mcr	p15, 0, r0, c7, c14, 1		@ clean & invalidate D line
	add	r0, r0, #CACHE_DLINESIZE
	cmp	r0, r1
@@ -213,7 +214,7 @@ ENTRY(fa_cache_fns)
	.long	fa_flush_user_cache_range
	.long	fa_coherent_kern_range
	.long	fa_coherent_user_range
	.long	fa_flush_kern_dcache_page
	.long	fa_flush_kern_dcache_area
	.long	fa_dma_inv_range
	.long	fa_dma_clean_range
	.long	fa_dma_flush_range
+72 −21
Original line number Diff line number Diff line
@@ -28,69 +28,120 @@
static void __iomem *l2x0_base;
static DEFINE_SPINLOCK(l2x0_lock);

static inline void sync_writel(unsigned long val, unsigned long reg,
			       unsigned long complete_mask)
static inline void cache_wait(void __iomem *reg, unsigned long mask)
{
	unsigned long flags;

	spin_lock_irqsave(&l2x0_lock, flags);
	writel(val, l2x0_base + reg);
	/* wait for the operation to complete */
	while (readl(l2x0_base + reg) & complete_mask)
	while (readl(reg) & mask)
		;
	spin_unlock_irqrestore(&l2x0_lock, flags);
}

static inline void cache_sync(void)
{
	sync_writel(0, L2X0_CACHE_SYNC, 1);
	void __iomem *base = l2x0_base;
	writel(0, base + L2X0_CACHE_SYNC);
	cache_wait(base + L2X0_CACHE_SYNC, 1);
}

static inline void l2x0_inv_all(void)
{
	unsigned long flags;

	/* invalidate all ways */
	sync_writel(0xff, L2X0_INV_WAY, 0xff);
	spin_lock_irqsave(&l2x0_lock, flags);
	writel(0xff, l2x0_base + L2X0_INV_WAY);
	cache_wait(l2x0_base + L2X0_INV_WAY, 0xff);
	cache_sync();
	spin_unlock_irqrestore(&l2x0_lock, flags);
}

static void l2x0_inv_range(unsigned long start, unsigned long end)
{
	unsigned long addr;
	void __iomem *base = l2x0_base;
	unsigned long flags;

	spin_lock_irqsave(&l2x0_lock, flags);
	if (start & (CACHE_LINE_SIZE - 1)) {
		start &= ~(CACHE_LINE_SIZE - 1);
		sync_writel(start, L2X0_CLEAN_INV_LINE_PA, 1);
		cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
		writel(start, base + L2X0_CLEAN_INV_LINE_PA);
		start += CACHE_LINE_SIZE;
	}

	if (end & (CACHE_LINE_SIZE - 1)) {
		end &= ~(CACHE_LINE_SIZE - 1);
		sync_writel(end, L2X0_CLEAN_INV_LINE_PA, 1);
		cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
		writel(end, base + L2X0_CLEAN_INV_LINE_PA);
	}

	for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
		sync_writel(addr, L2X0_INV_LINE_PA, 1);
	while (start < end) {
		unsigned long blk_end = start + min(end - start, 4096UL);

		while (start < blk_end) {
			cache_wait(base + L2X0_INV_LINE_PA, 1);
			writel(start, base + L2X0_INV_LINE_PA);
			start += CACHE_LINE_SIZE;
		}

		if (blk_end < end) {
			spin_unlock_irqrestore(&l2x0_lock, flags);
			spin_lock_irqsave(&l2x0_lock, flags);
		}
	}
	cache_wait(base + L2X0_INV_LINE_PA, 1);
	cache_sync();
	spin_unlock_irqrestore(&l2x0_lock, flags);
}

static void l2x0_clean_range(unsigned long start, unsigned long end)
{
	unsigned long addr;
	void __iomem *base = l2x0_base;
	unsigned long flags;

	spin_lock_irqsave(&l2x0_lock, flags);
	start &= ~(CACHE_LINE_SIZE - 1);
	for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
		sync_writel(addr, L2X0_CLEAN_LINE_PA, 1);
	while (start < end) {
		unsigned long blk_end = start + min(end - start, 4096UL);

		while (start < blk_end) {
			cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
			writel(start, base + L2X0_CLEAN_LINE_PA);
			start += CACHE_LINE_SIZE;
		}

		if (blk_end < end) {
			spin_unlock_irqrestore(&l2x0_lock, flags);
			spin_lock_irqsave(&l2x0_lock, flags);
		}
	}
	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
	cache_sync();
	spin_unlock_irqrestore(&l2x0_lock, flags);
}

static void l2x0_flush_range(unsigned long start, unsigned long end)
{
	unsigned long addr;
	void __iomem *base = l2x0_base;
	unsigned long flags;

	spin_lock_irqsave(&l2x0_lock, flags);
	start &= ~(CACHE_LINE_SIZE - 1);
	for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
		sync_writel(addr, L2X0_CLEAN_INV_LINE_PA, 1);
	while (start < end) {
		unsigned long blk_end = start + min(end - start, 4096UL);

		while (start < blk_end) {
			cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
			writel(start, base + L2X0_CLEAN_INV_LINE_PA);
			start += CACHE_LINE_SIZE;
		}

		if (blk_end < end) {
			spin_unlock_irqrestore(&l2x0_lock, flags);
			spin_lock_irqsave(&l2x0_lock, flags);
		}
	}
	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
	cache_sync();
	spin_unlock_irqrestore(&l2x0_lock, flags);
}

void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
+5 −4
Original line number Diff line number Diff line
@@ -72,14 +72,15 @@ ENTRY(v3_coherent_user_range)
	mov	pc, lr

/*
 *	flush_kern_dcache_page(void *page)
 *	flush_kern_dcache_area(void *page, size_t size)
 *
 *	Ensure no D cache aliasing occurs, either with itself or
 *	the I cache
 *
 *	- addr	- page aligned address
 *	- addr	- kernel address
 *	- size	- region size
 */
ENTRY(v3_flush_kern_dcache_page)
ENTRY(v3_flush_kern_dcache_area)
	/* FALLTHROUGH */

/*
@@ -129,7 +130,7 @@ ENTRY(v3_cache_fns)
	.long	v3_flush_user_cache_range
	.long	v3_coherent_kern_range
	.long	v3_coherent_user_range
	.long	v3_flush_kern_dcache_page
	.long	v3_flush_kern_dcache_area
	.long	v3_dma_inv_range
	.long	v3_dma_clean_range
	.long	v3_dma_flush_range
Loading