Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2c9b9c84 authored by Russell King's avatar Russell King
Browse files

ARM: add size argument to __cpuc_flush_dcache_page



... and rename the function since it no longer operates on just
pages.

Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent ccaf5f05
Loading
Loading
Loading
Loading
+5 −5
Original line number Original line Diff line number Diff line
@@ -211,7 +211,7 @@ struct cpu_cache_fns {


	void (*coherent_kern_range)(unsigned long, unsigned long);
	void (*coherent_kern_range)(unsigned long, unsigned long);
	void (*coherent_user_range)(unsigned long, unsigned long);
	void (*coherent_user_range)(unsigned long, unsigned long);
	void (*flush_kern_dcache_page)(void *);
	void (*flush_kern_dcache_area)(void *, size_t);


	void (*dma_inv_range)(const void *, const void *);
	void (*dma_inv_range)(const void *, const void *);
	void (*dma_clean_range)(const void *, const void *);
	void (*dma_clean_range)(const void *, const void *);
@@ -236,7 +236,7 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_user_range		cpu_cache.flush_user_range
#define __cpuc_flush_user_range		cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range	cpu_cache.coherent_kern_range
#define __cpuc_coherent_kern_range	cpu_cache.coherent_kern_range
#define __cpuc_coherent_user_range	cpu_cache.coherent_user_range
#define __cpuc_coherent_user_range	cpu_cache.coherent_user_range
#define __cpuc_flush_dcache_page	cpu_cache.flush_kern_dcache_page
#define __cpuc_flush_dcache_area	cpu_cache.flush_kern_dcache_area


/*
/*
 * These are private to the dma-mapping API.  Do not use directly.
 * These are private to the dma-mapping API.  Do not use directly.
@@ -255,14 +255,14 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_user_range		__glue(_CACHE,_flush_user_cache_range)
#define __cpuc_flush_user_range		__glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range	__glue(_CACHE,_coherent_kern_range)
#define __cpuc_coherent_kern_range	__glue(_CACHE,_coherent_kern_range)
#define __cpuc_coherent_user_range	__glue(_CACHE,_coherent_user_range)
#define __cpuc_coherent_user_range	__glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_page	__glue(_CACHE,_flush_kern_dcache_page)
#define __cpuc_flush_dcache_area	__glue(_CACHE,_flush_kern_dcache_area)


extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
extern void __cpuc_flush_dcache_page(void *);
extern void __cpuc_flush_dcache_area(void *, size_t);


/*
/*
 * These are private to the dma-mapping API.  Do not use directly.
 * These are private to the dma-mapping API.  Do not use directly.
@@ -448,7 +448,7 @@ static inline void flush_kernel_dcache_page(struct page *page)
{
{
	/* highmem pages are always flushed upon kunmap already */
	/* highmem pages are always flushed upon kunmap already */
	if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
	if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
		__cpuc_flush_dcache_page(page_address(page));
		__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
}
}


#define flush_dcache_mmap_lock(mapping) \
#define flush_dcache_mmap_lock(mapping) \
+6 −5
Original line number Original line Diff line number Diff line
@@ -127,15 +127,16 @@ ENTRY(fa_coherent_user_range)
	mov	pc, lr
	mov	pc, lr


/*
/*
 *	flush_kern_dcache_page(kaddr)
 *	flush_kern_dcache_area(void *addr, size_t size)
 *
 *
 *	Ensure that the data held in the page kaddr is written back
 *	Ensure that the data held in the page kaddr is written back
 *	to the page in question.
 *	to the page in question.
 *
 *
 *	- kaddr   - kernel address (guaranteed to be page aligned)
 *	- addr	- kernel address
 *	- size	- size of region
 */
 */
ENTRY(fa_flush_kern_dcache_page)
ENTRY(fa_flush_kern_dcache_area)
	add	r1, r0, #PAGE_SZ
	add	r1, r0, r1
1:	mcr	p15, 0, r0, c7, c14, 1		@ clean & invalidate D line
1:	mcr	p15, 0, r0, c7, c14, 1		@ clean & invalidate D line
	add	r0, r0, #CACHE_DLINESIZE
	add	r0, r0, #CACHE_DLINESIZE
	cmp	r0, r1
	cmp	r0, r1
@@ -213,7 +214,7 @@ ENTRY(fa_cache_fns)
	.long	fa_flush_user_cache_range
	.long	fa_flush_user_cache_range
	.long	fa_coherent_kern_range
	.long	fa_coherent_kern_range
	.long	fa_coherent_user_range
	.long	fa_coherent_user_range
	.long	fa_flush_kern_dcache_page
	.long	fa_flush_kern_dcache_area
	.long	fa_dma_inv_range
	.long	fa_dma_inv_range
	.long	fa_dma_clean_range
	.long	fa_dma_clean_range
	.long	fa_dma_flush_range
	.long	fa_dma_flush_range
+5 −4
Original line number Original line Diff line number Diff line
@@ -72,14 +72,15 @@ ENTRY(v3_coherent_user_range)
	mov	pc, lr
	mov	pc, lr


/*
/*
 *	flush_kern_dcache_page(void *page)
 *	flush_kern_dcache_area(void *page, size_t size)
 *
 *
 *	Ensure no D cache aliasing occurs, either with itself or
 *	Ensure no D cache aliasing occurs, either with itself or
 *	the I cache
 *	the I cache
 *
 *
 *	- addr	- page aligned address
 *	- addr	- kernel address
 *	- size	- region size
 */
 */
ENTRY(v3_flush_kern_dcache_page)
ENTRY(v3_flush_kern_dcache_area)
	/* FALLTHROUGH */
	/* FALLTHROUGH */


/*
/*
@@ -129,7 +130,7 @@ ENTRY(v3_cache_fns)
	.long	v3_flush_user_cache_range
	.long	v3_flush_user_cache_range
	.long	v3_coherent_kern_range
	.long	v3_coherent_kern_range
	.long	v3_coherent_user_range
	.long	v3_coherent_user_range
	.long	v3_flush_kern_dcache_page
	.long	v3_flush_kern_dcache_area
	.long	v3_dma_inv_range
	.long	v3_dma_inv_range
	.long	v3_dma_clean_range
	.long	v3_dma_clean_range
	.long	v3_dma_flush_range
	.long	v3_dma_flush_range
+5 −4
Original line number Original line Diff line number Diff line
@@ -82,14 +82,15 @@ ENTRY(v4_coherent_user_range)
	mov	pc, lr
	mov	pc, lr


/*
/*
 *	flush_kern_dcache_page(void *page)
 *	flush_kern_dcache_area(void *addr, size_t size)
 *
 *
 *	Ensure no D cache aliasing occurs, either with itself or
 *	Ensure no D cache aliasing occurs, either with itself or
 *	the I cache
 *	the I cache
 *
 *
 *	- addr	- page aligned address
 *	- addr	- kernel address
 *	- size	- region size
 */
 */
ENTRY(v4_flush_kern_dcache_page)
ENTRY(v4_flush_kern_dcache_area)
	/* FALLTHROUGH */
	/* FALLTHROUGH */


/*
/*
@@ -141,7 +142,7 @@ ENTRY(v4_cache_fns)
	.long	v4_flush_user_cache_range
	.long	v4_flush_user_cache_range
	.long	v4_coherent_kern_range
	.long	v4_coherent_kern_range
	.long	v4_coherent_user_range
	.long	v4_coherent_user_range
	.long	v4_flush_kern_dcache_page
	.long	v4_flush_kern_dcache_area
	.long	v4_dma_inv_range
	.long	v4_dma_inv_range
	.long	v4_dma_clean_range
	.long	v4_dma_clean_range
	.long	v4_dma_flush_range
	.long	v4_dma_flush_range
+6 −5
Original line number Original line Diff line number Diff line
@@ -114,15 +114,16 @@ ENTRY(v4wb_flush_user_cache_range)
	mov	pc, lr
	mov	pc, lr


/*
/*
 *	flush_kern_dcache_page(void *page)
 *	flush_kern_dcache_area(void *addr, size_t size)
 *
 *
 *	Ensure no D cache aliasing occurs, either with itself or
 *	Ensure no D cache aliasing occurs, either with itself or
 *	the I cache
 *	the I cache
 *
 *
 *	- addr	- page aligned address
 *	- addr	- kernel address
 *	- size	- region size
 */
 */
ENTRY(v4wb_flush_kern_dcache_page)
ENTRY(v4wb_flush_kern_dcache_area)
	add	r1, r0, #PAGE_SZ
	add	r1, r0, r1
	/* fall through */
	/* fall through */


/*
/*
@@ -224,7 +225,7 @@ ENTRY(v4wb_cache_fns)
	.long	v4wb_flush_user_cache_range
	.long	v4wb_flush_user_cache_range
	.long	v4wb_coherent_kern_range
	.long	v4wb_coherent_kern_range
	.long	v4wb_coherent_user_range
	.long	v4wb_coherent_user_range
	.long	v4wb_flush_kern_dcache_page
	.long	v4wb_flush_kern_dcache_area
	.long	v4wb_dma_inv_range
	.long	v4wb_dma_inv_range
	.long	v4wb_dma_clean_range
	.long	v4wb_dma_clean_range
	.long	v4wb_dma_flush_range
	.long	v4wb_dma_flush_range
Loading