Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 68234df4 authored by Mark Rutland's avatar Mark Rutland Committed by Catalin Marinas
Browse files

arm64: kill flush_cache_all()



The documented semantics of flush_cache_all are not possible to provide
for arm64 (short of flushing the entire physical address space by VA),
and there are currently no users; KVM uses VA maintenance exclusively,
cpu_reset is never called, and the only two users outside of arch code
cannot be built for arm64.

While cpu_soft_reset and related functions (which call flush_cache_all)
were thought to be useful for kexec, their current implementations only
serve to mask bugs. For correctness kexec will need to perform
maintenance by VA anyway to account for system caches, line migration,
and other subtleties of the cache architecture. As the extent of this
cache maintenance will be kexec-specific, it should probably live in the
kexec code.

This patch removes flush_cache_all, and related unused components,
preventing further abuse.

Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Cc: AKASHI Takahiro <takahiro.akashi@linaro.org>
Cc: Geoff Levand <geoff@infradead.org>
Acked-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: default avatarLorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Acked-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent e8557d1f
Loading
Loading
Loading
Loading
+0 −5
Original line number Original line Diff line number Diff line
@@ -40,10 +40,6 @@
 *	the implementation assumes non-aliasing VIPT D-cache and (aliasing)
 *	the implementation assumes non-aliasing VIPT D-cache and (aliasing)
 *	VIPT or ASID-tagged VIVT I-cache.
 *	VIPT or ASID-tagged VIVT I-cache.
 *
 *
 *	flush_cache_all()
 *
 *		Unconditionally clean and invalidate the entire cache.
 *
 *	flush_cache_mm(mm)
 *	flush_cache_mm(mm)
 *
 *
 *		Clean and invalidate all user space cache entries
 *		Clean and invalidate all user space cache entries
@@ -69,7 +65,6 @@
 *		- kaddr  - page address
 *		- kaddr  - page address
 *		- size   - region size
 *		- size   - region size
 */
 */
extern void flush_cache_all(void);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
extern void __flush_dcache_area(void *addr, size_t len);
+0 −4
Original line number Original line Diff line number Diff line
@@ -28,12 +28,8 @@
struct mm_struct;
struct mm_struct;
struct cpu_suspend_ctx;
struct cpu_suspend_ctx;


extern void cpu_cache_off(void);
extern void cpu_do_idle(void);
extern void cpu_do_idle(void);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
void cpu_soft_restart(phys_addr_t cpu_reset,
		unsigned long addr) __attribute__((noreturn));
extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);


+0 −1
Original line number Original line Diff line number Diff line
@@ -41,7 +41,6 @@ struct mm_struct;
extern void show_pte(struct mm_struct *mm, unsigned long addr);
extern void show_pte(struct mm_struct *mm, unsigned long addr);
extern void __show_regs(struct pt_regs *);
extern void __show_regs(struct pt_regs *);


void soft_restart(unsigned long);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);


#define UDBG_UNDEFINED	(1 << 0)
#define UDBG_UNDEFINED	(1 << 0)
+1 −11
Original line number Original line Diff line number Diff line
@@ -58,14 +58,6 @@ unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
EXPORT_SYMBOL(__stack_chk_guard);
#endif
#endif


void soft_restart(unsigned long addr)
{
	setup_mm_for_reboot();
	cpu_soft_restart(virt_to_phys(cpu_reset), addr);
	/* Should never get here */
	BUG();
}

/*
/*
 * Function pointers to optional machine specific functions
 * Function pointers to optional machine specific functions
 */
 */
@@ -136,9 +128,7 @@ void machine_power_off(void)


/*
/*
 * Restart requires that the secondary CPUs stop performing any activity
 * Restart requires that the secondary CPUs stop performing any activity
 * while the primary CPU resets the system. Systems with a single CPU can
 * while the primary CPU resets the system. Systems with multiple CPUs must
 * use soft_restart() as their machine descriptor's .restart hook, since that
 * will cause the only available CPU to reset. Systems with multiple CPUs must
 * provide a HW restart implementation, to ensure that all CPUs reset at once.
 * provide a HW restart implementation, to ensure that all CPUs reset at once.
 * This is required so that any code running after reset on the primary CPU
 * This is required so that any code running after reset on the primary CPU
 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
+0 −73
Original line number Original line Diff line number Diff line
@@ -26,79 +26,6 @@


#include "proc-macros.S"
#include "proc-macros.S"


/*
 *	__flush_dcache_all()
 *
 *	Flush the whole D-cache.
 *
 *	Corrupted registers: x0-x7, x9-x11
 */
__flush_dcache_all:
	dmb	sy				// ensure ordering with previous memory accesses
	mrs	x0, clidr_el1			// read clidr
	and	x3, x0, #0x7000000		// extract loc from clidr
	lsr	x3, x3, #23			// left align loc bit field
	cbz	x3, finished			// if loc is 0, then no need to clean
	mov	x10, #0				// start clean at cache level 0
loop1:
	add	x2, x10, x10, lsr #1		// work out 3x current cache level
	lsr	x1, x0, x2			// extract cache type bits from clidr
	and	x1, x1, #7			// mask of the bits for current cache only
	cmp	x1, #2				// see what cache we have at this level
	b.lt	skip				// skip if no cache, or just i-cache
	save_and_disable_irqs x9		// make CSSELR and CCSIDR access atomic
	msr	csselr_el1, x10			// select current cache level in csselr
	isb					// isb to sych the new cssr&csidr
	mrs	x1, ccsidr_el1			// read the new ccsidr
	restore_irqs x9
	and	x2, x1, #7			// extract the length of the cache lines
	add	x2, x2, #4			// add 4 (line length offset)
	mov	x4, #0x3ff
	and	x4, x4, x1, lsr #3		// find maximum number on the way size
	clz	w5, w4				// find bit position of way size increment
	mov	x7, #0x7fff
	and	x7, x7, x1, lsr #13		// extract max number of the index size
loop2:
	mov	x9, x4				// create working copy of max way size
loop3:
	lsl	x6, x9, x5
	orr	x11, x10, x6			// factor way and cache number into x11
	lsl	x6, x7, x2
	orr	x11, x11, x6			// factor index number into x11
	dc	cisw, x11			// clean & invalidate by set/way
	subs	x9, x9, #1			// decrement the way
	b.ge	loop3
	subs	x7, x7, #1			// decrement the index
	b.ge	loop2
skip:
	add	x10, x10, #2			// increment cache number
	cmp	x3, x10
	b.gt	loop1
finished:
	mov	x10, #0				// swith back to cache level 0
	msr	csselr_el1, x10			// select current cache level in csselr
	dsb	sy
	isb
	ret
ENDPROC(__flush_dcache_all)

/*
 *	flush_cache_all()
 *
 *	Flush the entire cache system.  The data cache flush is now achieved
 *	using atomic clean / invalidates working outwards from L1 cache. This
 *	is done using Set/Way based cache maintainance instructions.  The
 *	instruction cache can still be invalidated back to the point of
 *	unification in a single instruction.
 */
ENTRY(flush_cache_all)
	mov	x12, lr
	bl	__flush_dcache_all
	mov	x0, #0
	ic	ialluis				// I+BTB cache invalidate
	ret	x12
ENDPROC(flush_cache_all)

/*
/*
 *	flush_icache_range(start,end)
 *	flush_icache_range(start,end)
 *
 *
Loading