Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2277ab4a authored by Paul Mundt's avatar Paul Mundt
Browse files

sh: Migrate from PG_mapped to PG_dcache_dirty.



This inverts the delayed dcache flush a bit to be more in line with other
platforms. At the same time this also gives us the ability to do some
more optimizations and cleanup. Now that the update_mmu_cache() callsite
only tests for the bit, the implementation can gradually be split out and
made generic, rather than relying on special implementations for each of
the peculiar CPU types.

SH7705 in 32kB mode and SH-4 still need slightly different handling, but
this is something that can remain isolated in the varying page copy/clear
routines. On top of that, SH-X3 is dcache coherent, so there is no need
to bother with any of these tests in the PTEAEX version of
update_mmu_cache(), so we kill that off too.

Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent c0b96cf6
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -50,6 +50,12 @@ extern unsigned long shm_align_mask;
extern unsigned long max_low_pfn, min_low_pfn;
extern unsigned long memory_start, memory_end;

static inline unsigned long
pages_do_alias(unsigned long addr1, unsigned long addr2)
{
	return (addr1 ^ addr2) & shm_align_mask;
}

extern void clear_page(void *to);
extern void copy_page(void *to, void *from);

+0 −7
Original line number Diff line number Diff line
@@ -133,13 +133,6 @@ typedef pte_t *pte_addr_t;
 */
#define pgtable_cache_init()	do { } while (0)

#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \
	defined(CONFIG_SH7705_CACHE_32KB))
struct mm_struct;
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
#endif

struct vm_area_struct;
extern void update_mmu_cache(struct vm_area_struct * vma,
			     unsigned long address, pte_t pte);
+1 −4
Original line number Diff line number Diff line
@@ -15,10 +15,7 @@
 * SH4. Unlike the SH4 this is a unified cache so we need to do some work
 * in mmap when 'exec'ing a new binary
 */
 /* 32KB cache, 4kb PAGE sizes need to check bit 12 */
#define CACHE_ALIAS 0x00001000

#define PG_mapped	PG_arch_1
#define PG_dcache_dirty	PG_arch_1

void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
+1 −1
Original line number Diff line number Diff line
@@ -38,6 +38,6 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
/* Initialization of P3 area for copy_user_page */
void p3_cache_init(void);

#define PG_mapped	PG_arch_1
#define PG_dcache_dirty	PG_arch_1

#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */
+9 −1
Original line number Diff line number Diff line
@@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/fs.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>

@@ -246,7 +247,14 @@ static inline void flush_cache_4096(unsigned long start,
 */
void flush_dcache_page(struct page *page)
{
	if (test_bit(PG_mapped, &page->flags)) {
	struct address_space *mapping = page_mapping(page);

#ifndef CONFIG_SMP
	if (mapping && !mapping_mapped(mapping))
		set_bit(PG_dcache_dirty, &page->flags);
	else
#endif
	{
		unsigned long phys = PHYSADDR(page_address(page));
		unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
		int i, n;
Loading