Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f6502791 authored by Atsushi Nemoto's avatar Atsushi Nemoto Committed by Ralf Baechle
Browse files

[MIPS] Do not use drop_mmu_context to flusing other task's VIPT I-cache.



c-r4k.c and c-sb1.c use drop_mmu_context() to flush virtually tagged
I-caches, but this does not work for flushing other task's icache.  This
is for example triggered by copy_to_user_page() called from ptrace(2).
Use indexed flush for such cases.

Signed-off-by: default avatarAtsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent a94d7020
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -475,7 +475,7 @@ static inline void local_r4k_flush_cache_page(void *args)
		}
	}
	if (exec) {
		if (cpu_has_vtag_icache) {
		if (cpu_has_vtag_icache && mm == current->active_mm) {
			int cpu = smp_processor_id();

			if (cpu_context(cpu, mm) != 0)
@@ -599,7 +599,7 @@ static inline void local_r4k_flush_icache_page(void *args)
	 * We're not sure of the virtual address(es) involved here, so
	 * we have to flush the entire I-cache.
	 */
	if (cpu_has_vtag_icache) {
	if (cpu_has_vtag_icache && vma->vm_mm == current->active_mm) {
		int cpu = smp_processor_id();

		if (cpu_context(cpu, vma->vm_mm) != 0)
+31 −25
Original line number Diff line number Diff line
@@ -154,6 +154,26 @@ static inline void __sb1_flush_icache_all(void)
	}
}

/*
 * Invalidate a range of the icache.  The addresses are virtual, and
 * the cache is virtually indexed and tagged.  However, we don't
 * necessarily have the right ASID context, so use index ops instead
 * of hit ops.
 */
static inline void __sb1_flush_icache_range(unsigned long start,
	unsigned long end)
{
	start &= ~(icache_line_size - 1);
	end = (end + icache_line_size - 1) & ~(icache_line_size - 1);

	while (start != end) {
		cache_set_op(Index_Invalidate_I, start & icache_index_mask);
		start += icache_line_size;
	}
	mispredict();
	sync();
}

/*
 * Flush the icache for a given physical page.  Need to writeback the
 * dcache first, then invalidate the icache.  If the page isn't
@@ -173,8 +193,11 @@ static void local_sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long
	/*
	 * Bumping the ASID is probably cheaper than the flush ...
	 */
	if (vma->vm_mm == current->active_mm) {
		if (cpu_context(cpu, vma->vm_mm) != 0)
			drop_mmu_context(vma->vm_mm, cpu);
	} else
		__sb1_flush_icache_range(addr, addr + PAGE_SIZE);
}

#ifdef CONFIG_SMP
@@ -210,26 +233,6 @@ void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsign
	__attribute__((alias("local_sb1_flush_cache_page")));
#endif

/*
 * Invalidate a range of the icache.  The addresses are virtual, and
 * the cache is virtually indexed and tagged.  However, we don't
 * necessarily have the right ASID context, so use index ops instead
 * of hit ops.
 */
static inline void __sb1_flush_icache_range(unsigned long start,
	unsigned long end)
{
	start &= ~(icache_line_size - 1);
	end = (end + icache_line_size - 1) & ~(icache_line_size - 1);

	while (start != end) {
		cache_set_op(Index_Invalidate_I, start & icache_index_mask);
		start += icache_line_size;
	}
	mispredict();
	sync();
}


/*
 * Invalidate all caches on this CPU
@@ -326,9 +329,12 @@ static void local_sb1_flush_icache_page(struct vm_area_struct *vma,
	 * If there's a context, bump the ASID (cheaper than a flush,
	 * since we don't know VAs!)
	 */
	if (cpu_context(cpu, vma->vm_mm) != 0) {
	if (vma->vm_mm == current->active_mm) {
		if (cpu_context(cpu, vma->vm_mm) != 0)
			drop_mmu_context(vma->vm_mm, cpu);
	}
	} else
		__sb1_flush_icache_range(start, start + PAGE_SIZE);

}

#ifdef CONFIG_SMP