Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0dfae7d5 authored by Paul Mundt's avatar Paul Mundt
Browse files

sh: Use the now generic SH-4 clear/copy page ops for all MMU platforms.



Now that the SH-4 page clear/copy ops are generic, they can be used for
all platforms with CONFIG_MMU=y. SH-5 remains the odd one out, but it too
will gradually be converted over to using this interface.

SH-3 platforms which do not contain aliases will see no impact from this
change, while aliasing SH-3 platforms will get the same interface as
SH-4.

Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 221c007b
Loading
Loading
Loading
Loading
+0 −15
Original line number Original line Diff line number Diff line
@@ -49,7 +49,6 @@ static inline void flush_kernel_dcache_page(struct page *page)
	flush_dcache_page(page);
	flush_dcache_page(page);
}
}


#if (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) && !defined(CONFIG_CACHE_OFF)
extern void copy_to_user_page(struct vm_area_struct *vma,
extern void copy_to_user_page(struct vm_area_struct *vma,
	struct page *page, unsigned long vaddr, void *dst, const void *src,
	struct page *page, unsigned long vaddr, void *dst, const void *src,
	unsigned long len);
	unsigned long len);
@@ -57,20 +56,6 @@ extern void copy_to_user_page(struct vm_area_struct *vma,
extern void copy_from_user_page(struct vm_area_struct *vma,
extern void copy_from_user_page(struct vm_area_struct *vma,
	struct page *page, unsigned long vaddr, void *dst, const void *src,
	struct page *page, unsigned long vaddr, void *dst, const void *src,
	unsigned long len);
	unsigned long len);
#else
#define copy_to_user_page(vma, page, vaddr, dst, src, len)	\
	do {							\
		flush_cache_page(vma, vaddr, page_to_pfn(page));\
		memcpy(dst, src, len);				\
		flush_icache_user_range(vma, page, vaddr, len);	\
	} while (0)

#define copy_from_user_page(vma, page, vaddr, dst, src, len)	\
	do {							\
		flush_cache_page(vma, vaddr, page_to_pfn(page));\
		memcpy(dst, src, len);				\
	} while (0)
#endif


#define flush_cache_vmap(start, end)		flush_cache_all()
#define flush_cache_vmap(start, end)		flush_cache_all()
#define flush_cache_vunmap(start, end)		flush_cache_all()
#define flush_cache_vunmap(start, end)		flush_cache_all()
+6 −5
Original line number Original line Diff line number Diff line
@@ -63,22 +63,23 @@ extern void copy_page(void *to, void *from);
struct page;
struct page;
struct vm_area_struct;
struct vm_area_struct;


#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \
#if defined(CONFIG_CPU_SH5)
	(defined(CONFIG_CPU_SH5) || defined(CONFIG_CPU_SH4) || \
	 defined(CONFIG_SH7705_CACHE_32KB))
extern void clear_user_page(void *to, unsigned long address, struct page *page);
extern void clear_user_page(void *to, unsigned long address, struct page *page);
extern void copy_user_page(void *to, void *from, unsigned long address,
extern void copy_user_page(void *to, void *from, unsigned long address,
			   struct page *page);
			   struct page *page);
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)

#elif defined(CONFIG_MMU)
extern void copy_user_highpage(struct page *to, struct page *from,
extern void copy_user_highpage(struct page *to, struct page *from,
			       unsigned long vaddr, struct vm_area_struct *vma);
			       unsigned long vaddr, struct vm_area_struct *vma);
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
extern void clear_user_highpage(struct page *page, unsigned long vaddr);
extern void clear_user_highpage(struct page *page, unsigned long vaddr);
#define clear_user_highpage	clear_user_highpage
#define clear_user_highpage	clear_user_highpage
#endif

#else
#else

#define clear_user_page(page, vaddr, pg)	clear_page(page)
#define clear_user_page(page, vaddr, pg)	clear_page(page)
#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)

#endif
#endif


/*
/*
+1 −2
Original line number Original line Diff line number Diff line
@@ -141,8 +141,7 @@ extern void paging_init(void);
extern void page_table_range_init(unsigned long start, unsigned long end,
extern void page_table_range_init(unsigned long start, unsigned long end,
				  pgd_t *pgd);
				  pgd_t *pgd);


#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \
#if defined(CONFIG_MMU) && !defined(CONFIG_CPU_SH5)
    defined(CONFIG_SH7705_CACHE_32KB)) && defined(CONFIG_MMU)
extern void kmap_coherent_init(void);
extern void kmap_coherent_init(void);
#else
#else
#define kmap_coherent_init()	do { } while (0)
#define kmap_coherent_init()	do { } while (0)
+1 −5
Original line number Original line Diff line number Diff line
@@ -15,7 +15,7 @@ endif
obj-y			+= $(cache-y)
obj-y			+= $(cache-y)


mmu-y			:= tlb-nommu.o pg-nommu.o
mmu-y			:= tlb-nommu.o pg-nommu.o
mmu-$(CONFIG_MMU)	:= fault_32.o tlbflush_32.o ioremap_32.o
mmu-$(CONFIG_MMU)	:= fault_32.o tlbflush_32.o ioremap_32.o pg-mmu.o


obj-y			+= $(mmu-y)
obj-y			+= $(mmu-y)
obj-$(CONFIG_DEBUG_FS)	+= asids-debugfs.o
obj-$(CONFIG_DEBUG_FS)	+= asids-debugfs.o
@@ -29,10 +29,6 @@ tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o
tlb-$(CONFIG_CPU_SH4)		:= tlb-sh4.o
tlb-$(CONFIG_CPU_SH4)		:= tlb-sh4.o
tlb-$(CONFIG_CPU_HAS_PTEAEX)	:= tlb-pteaex.o
tlb-$(CONFIG_CPU_HAS_PTEAEX)	:= tlb-pteaex.o
obj-y				+= $(tlb-y)
obj-y				+= $(tlb-y)
ifndef CONFIG_CACHE_OFF
obj-$(CONFIG_CPU_SH4)		+= pg-sh4.o
obj-$(CONFIG_SH7705_CACHE_32KB)	+= pg-sh4.o
endif
endif
endif


obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
+17 −0
Original line number Original line Diff line number Diff line
@@ -831,4 +831,21 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
	else
	else
		sh64_clear_user_page_coloured(to, address);
		sh64_clear_user_page_coloured(to, address);
}
}

void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
		       unsigned long vaddr, void *dst, const void *src,
		       unsigned long len)
{
	flush_cache_page(vma, vaddr, page_to_pfn(page));
	memcpy(dst, src, len);
	flush_icache_user_range(vma, page, vaddr, len);
}

void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
			 unsigned long vaddr, void *dst, const void *src,
			 unsigned long len)
{
	flush_cache_page(vma, vaddr, page_to_pfn(page));
	memcpy(dst, src, len);
}
#endif
#endif
Loading