Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6abdd491 authored by Will Deacon's avatar Will Deacon
Browse files

ARM: mm: use inner-shareable barriers for TLB and user cache operations



System-wide barriers aren't required for situations where we only need
to make visibility and ordering guarantees in the inner-shareable domain
(i.e. we are not dealing with devices or potentially incoherent CPUs).

This patch changes the v7 TLB operations, coherent_user_range and
dcache_clean_area functions to user inner-shareable barriers. For cache
maintenance, only the store access type is required to ensure completion.

Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 62cbbc42
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -282,7 +282,7 @@ ENTRY(v7_coherent_user_range)
	add	r12, r12, r2
	add	r12, r12, r2
	cmp	r12, r1
	cmp	r12, r1
	blo	1b
	blo	1b
	dsb
	dsb	ishst
	icache_line_size r2, r3
	icache_line_size r2, r3
	sub	r3, r2, #1
	sub	r3, r2, #1
	bic	r12, r0, r3
	bic	r12, r0, r3
@@ -294,7 +294,7 @@ ENTRY(v7_coherent_user_range)
	mov	r0, #0
	mov	r0, #0
	ALT_SMP(mcr	p15, 0, r0, c7, c1, 6)	@ invalidate BTB Inner Shareable
	ALT_SMP(mcr	p15, 0, r0, c7, c1, 6)	@ invalidate BTB Inner Shareable
	ALT_UP(mcr	p15, 0, r0, c7, c5, 6)	@ invalidate BTB
	ALT_UP(mcr	p15, 0, r0, c7, c5, 6)	@ invalidate BTB
	dsb
	dsb	ishst
	isb
	isb
	mov	pc, lr
	mov	pc, lr


+1 −1
Original line number Original line Diff line number Diff line
@@ -83,7 +83,7 @@ ENTRY(cpu_v7_dcache_clean_area)
	add	r0, r0, r2
	add	r0, r0, r2
	subs	r1, r1, r2
	subs	r1, r1, r2
	bhi	2b
	bhi	2b
	dsb
	dsb	ishst
	mov	pc, lr
	mov	pc, lr
ENDPROC(cpu_v7_dcache_clean_area)
ENDPROC(cpu_v7_dcache_clean_area)


+4 −4
Original line number Original line Diff line number Diff line
@@ -35,7 +35,7 @@
ENTRY(v7wbi_flush_user_tlb_range)
ENTRY(v7wbi_flush_user_tlb_range)
	vma_vm_mm r3, r2			@ get vma->vm_mm
	vma_vm_mm r3, r2			@ get vma->vm_mm
	mmid	r3, r3				@ get vm_mm->context.id
	mmid	r3, r3				@ get vm_mm->context.id
	dsb
	dsb	ish
	mov	r0, r0, lsr #PAGE_SHIFT		@ align address
	mov	r0, r0, lsr #PAGE_SHIFT		@ align address
	mov	r1, r1, lsr #PAGE_SHIFT
	mov	r1, r1, lsr #PAGE_SHIFT
	asid	r3, r3				@ mask ASID
	asid	r3, r3				@ mask ASID
@@ -56,7 +56,7 @@ ENTRY(v7wbi_flush_user_tlb_range)
	add	r0, r0, #PAGE_SZ
	add	r0, r0, #PAGE_SZ
	cmp	r0, r1
	cmp	r0, r1
	blo	1b
	blo	1b
	dsb
	dsb	ish
	mov	pc, lr
	mov	pc, lr
ENDPROC(v7wbi_flush_user_tlb_range)
ENDPROC(v7wbi_flush_user_tlb_range)


@@ -69,7 +69,7 @@ ENDPROC(v7wbi_flush_user_tlb_range)
 *	- end   - end address (exclusive, may not be aligned)
 *	- end   - end address (exclusive, may not be aligned)
 */
 */
ENTRY(v7wbi_flush_kern_tlb_range)
ENTRY(v7wbi_flush_kern_tlb_range)
	dsb
	dsb	ish
	mov	r0, r0, lsr #PAGE_SHIFT		@ align address
	mov	r0, r0, lsr #PAGE_SHIFT		@ align address
	mov	r1, r1, lsr #PAGE_SHIFT
	mov	r1, r1, lsr #PAGE_SHIFT
	mov	r0, r0, lsl #PAGE_SHIFT
	mov	r0, r0, lsl #PAGE_SHIFT
@@ -84,7 +84,7 @@ ENTRY(v7wbi_flush_kern_tlb_range)
	add	r0, r0, #PAGE_SZ
	add	r0, r0, #PAGE_SZ
	cmp	r0, r1
	cmp	r0, r1
	blo	1b
	blo	1b
	dsb
	dsb	ish
	isb
	isb
	mov	pc, lr
	mov	pc, lr
ENDPROC(v7wbi_flush_kern_tlb_range)
ENDPROC(v7wbi_flush_kern_tlb_range)