Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d46befef authored by Robin Murphy's avatar Robin Murphy Committed by Catalin Marinas
Browse files

arm64: Convert __inval_cache_range() to area-based



__inval_cache_range() is already the odd one out among our data cache
maintenance routines as the only remaining range-based one; as we're
going to want an invalidation routine to call from C code for the pmem
API, let's tweak the prototype and name to bring it in line with the
clean operations, and to make its relationship with __dma_inv_area()
neatly mirror that of __clean_dcache_area_poc() and __dma_clean_area().
The loop clearing the early page tables gets mildly massaged in the
process for the sake of consistency.

Reviewed-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 09c2a7dc
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -67,6 +67,7 @@
 */
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
extern void __inval_dcache_area(void *addr, size_t len);
extern void __clean_dcache_area_poc(void *addr, size_t len);
extern void __clean_dcache_area_pou(void *addr, size_t len);
extern long __flush_cache_user_range(unsigned long start, unsigned long end);
+9 −9
Original line number Diff line number Diff line
@@ -143,8 +143,8 @@ preserve_boot_args:
	dmb	sy				// needed before dc ivac with
						// MMU off

	add	x1, x0, #0x20			// 4 x 8 bytes
	b	__inval_cache_range		// tail call
	mov	x1, #0x20			// 4 x 8 bytes
	b	__inval_dcache_area		// tail call
ENDPROC(preserve_boot_args)

/*
@@ -221,20 +221,20 @@ __create_page_tables:
	 * dirty cache lines being evicted.
	 */
	adrp	x0, idmap_pg_dir
	adrp	x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
	bl	__inval_cache_range
	ldr	x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
	bl	__inval_dcache_area

	/*
	 * Clear the idmap and swapper page tables.
	 */
	adrp	x0, idmap_pg_dir
	adrp	x6, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
	ldr	x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
1:	stp	xzr, xzr, [x0], #16
	stp	xzr, xzr, [x0], #16
	stp	xzr, xzr, [x0], #16
	stp	xzr, xzr, [x0], #16
	cmp	x0, x6
	b.lo	1b
	subs	x1, x1, #64
	b.ne	1b

	mov	x7, SWAPPER_MM_MMUFLAGS

@@ -307,9 +307,9 @@ __create_page_tables:
	 * tables again to remove any speculatively loaded cache lines.
	 */
	adrp	x0, idmap_pg_dir
	adrp	x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
	ldr	x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
	dmb	sy
	bl	__inval_cache_range
	bl	__inval_dcache_area

	ret	x28
ENDPROC(__create_page_tables)
+14 −9
Original line number Diff line number Diff line
@@ -109,20 +109,25 @@ ENTRY(__clean_dcache_area_pou)
ENDPROC(__clean_dcache_area_pou)

/*
 *	__dma_inv_area(start, size)
 *	- start   - virtual start address of region
 *	__inval_dcache_area(kaddr, size)
 *
 * 	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
 * 	are invalidated. Any partial lines at the ends of the interval are
 *	also cleaned to PoC to prevent data loss.
 *
 *	- kaddr   - kernel address
 *	- size    - size in question
 */
__dma_inv_area:
	add	x1, x1, x0
ENTRY(__inval_dcache_area)
	/* FALLTHROUGH */

/*
 *	__inval_cache_range(start, end)
 *	- start   - start address of region
 *	- end     - end address of region
 *	__dma_inv_area(start, size)
 *	- start   - virtual start address of region
 *	- size    - size in question
 */
ENTRY(__inval_cache_range)
__dma_inv_area:
	add	x1, x1, x0
	dcache_line_size x2, x3
	sub	x3, x2, #1
	tst	x1, x3				// end cache line aligned?
@@ -140,7 +145,7 @@ ENTRY(__inval_cache_range)
	b.lo	2b
	dsb	sy
	ret
ENDPIPROC(__inval_cache_range)
ENDPIPROC(__inval_dcache_area)
ENDPROC(__dma_inv_area)

/*