Loading arch/mips/mm/cache.c +15 −1 Original line number Diff line number Diff line Loading @@ -3,7 +3,8 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994 - 2003 by Ralf Baechle * Copyright (C) 1994 - 2003, 07 by Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2007 MIPS Technologies, Inc. */ #include <linux/init.h> #include <linux/kernel.h> Loading Loading @@ -88,6 +89,19 @@ void __flush_dcache_page(struct page *page) EXPORT_SYMBOL(__flush_dcache_page); void __flush_anon_page(struct page *page, unsigned long vmaddr) { if (pages_do_alias((unsigned long)page_address(page), vmaddr)) { void *kaddr; kaddr = kmap_coherent(page, vmaddr); flush_data_cache_page((unsigned long)kaddr); kunmap_coherent(kaddr); } } EXPORT_SYMBOL(__flush_anon_page); void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { Loading arch/mips/mm/init.c +2 −2 Original line number Diff line number Diff line Loading @@ -123,7 +123,7 @@ static void __init kmap_coherent_init(void) static inline void kmap_coherent_init(void) {} #endif static inline void *kmap_coherent(struct page *page, unsigned long addr) void *kmap_coherent(struct page *page, unsigned long addr) { enum fixed_addresses idx; unsigned long vaddr, flags, entrylo; Loading Loading @@ -177,7 +177,7 @@ static inline void *kmap_coherent(struct page *page, unsigned long addr) #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) static inline void kunmap_coherent(struct page *page) void kunmap_coherent(struct page *page) { #ifndef CONFIG_MIPS_MT_SMTC unsigned int wired; Loading include/asm-mips/cacheflush.h +12 −0 Original line number Diff line number Diff line Loading @@ -48,6 +48,15 @@ static inline void flush_dcache_page(struct page *page) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) #define ARCH_HAS_FLUSH_ANON_PAGE extern void __flush_anon_page(struct page *, unsigned long); static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) { if (cpu_has_dc_aliases && PageAnon(page)) __flush_anon_page(page, vmaddr); } static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page) { Loading Loading @@ -86,4 +95,7 @@ extern void (*flush_data_cache_page)(unsigned long addr); /* Run kernel code uncached, useful for cache probing functions. */ unsigned long __init run_uncached(void *func); extern void *kmap_coherent(struct page *page, unsigned long addr); extern void kunmap_coherent(struct page *page); #endif /* _ASM_CACHEFLUSH_H */ Loading
arch/mips/mm/cache.c +15 −1 Original line number Diff line number Diff line Loading @@ -3,7 +3,8 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994 - 2003 by Ralf Baechle * Copyright (C) 1994 - 2003, 07 by Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2007 MIPS Technologies, Inc. */ #include <linux/init.h> #include <linux/kernel.h> Loading Loading @@ -88,6 +89,19 @@ void __flush_dcache_page(struct page *page) EXPORT_SYMBOL(__flush_dcache_page); void __flush_anon_page(struct page *page, unsigned long vmaddr) { if (pages_do_alias((unsigned long)page_address(page), vmaddr)) { void *kaddr; kaddr = kmap_coherent(page, vmaddr); flush_data_cache_page((unsigned long)kaddr); kunmap_coherent(kaddr); } } EXPORT_SYMBOL(__flush_anon_page); void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { Loading
arch/mips/mm/init.c +2 −2 Original line number Diff line number Diff line Loading @@ -123,7 +123,7 @@ static void __init kmap_coherent_init(void) static inline void kmap_coherent_init(void) {} #endif static inline void *kmap_coherent(struct page *page, unsigned long addr) void *kmap_coherent(struct page *page, unsigned long addr) { enum fixed_addresses idx; unsigned long vaddr, flags, entrylo; Loading Loading @@ -177,7 +177,7 @@ static inline void *kmap_coherent(struct page *page, unsigned long addr) #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) static inline void kunmap_coherent(struct page *page) void kunmap_coherent(struct page *page) { #ifndef CONFIG_MIPS_MT_SMTC unsigned int wired; Loading
include/asm-mips/cacheflush.h +12 −0 Original line number Diff line number Diff line Loading @@ -48,6 +48,15 @@ static inline void flush_dcache_page(struct page *page) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) #define ARCH_HAS_FLUSH_ANON_PAGE extern void __flush_anon_page(struct page *, unsigned long); static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) { if (cpu_has_dc_aliases && PageAnon(page)) __flush_anon_page(page, vmaddr); } static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page) { Loading Loading @@ -86,4 +95,7 @@ extern void (*flush_data_cache_page)(unsigned long addr); /* Run kernel code uncached, useful for cache probing functions. */ unsigned long __init run_uncached(void *func); extern void *kmap_coherent(struct page *page, unsigned long addr); extern void kunmap_coherent(struct page *page); #endif /* _ASM_CACHEFLUSH_H */