Loading include/asm-ppc/tlbflush.h→include/asm-powerpc/tlbflush.h +146 −0 Original line number Original line Diff line number Diff line #ifndef _ASM_POWERPC_TLBFLUSH_H #define _ASM_POWERPC_TLBFLUSH_H /* /* * include/asm-ppc/tlbflush.h * TLB flushing: * * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables * * * This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * modify it under the terms of the GNU General Public License Loading @@ -7,87 +16,120 @@ * 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version. */ */ #ifdef __KERNEL__ #ifdef __KERNEL__ #ifndef _PPC_TLBFLUSH_H #define _PPC_TLBFLUSH_H #include <linux/config.h> #include <linux/config.h> struct mm_struct; #ifdef CONFIG_PPC64 #include <linux/percpu.h> #include <asm/page.h> #define PPC64_TLB_BATCH_NR 192 struct ppc64_tlb_batch { unsigned long index; struct mm_struct *mm; pte_t pte[PPC64_TLB_BATCH_NR]; unsigned long vaddr[PPC64_TLB_BATCH_NR]; unsigned int large; }; DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); static inline void flush_tlb_pending(void) { struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); if (batch->index) __flush_tlb_pending(batch); put_cpu_var(ppc64_tlb_batch); } extern void flush_hash_page(unsigned long va, pte_t pte, int local); void flush_hash_range(unsigned long number, int local); #else /* CONFIG_PPC64 */ #include <linux/mm.h> #include <linux/mm.h> extern void _tlbie(unsigned long address); extern void _tlbie(unsigned long address); extern void _tlbia(void); extern void _tlbia(void); #if defined(CONFIG_4xx) /* * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & * flush_tlb_kernel_range are best implemented as tlbia vs * specific tlbie's */ #ifndef CONFIG_44x #if (defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx) #define __tlbia() asm volatile ("sync; tlbia; isync" : : : "memory") #define flush_tlb_pending() asm volatile ("tlbia; sync" : : : "memory") #else #elif defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE) #define __tlbia _tlbia #define flush_tlb_pending() _tlbia() #endif #endif static inline void flush_tlb_mm(struct mm_struct *mm) /* { __tlbia(); } * This gets called at the end of handling a page fault, when static inline void flush_tlb_page(struct vm_area_struct *vma, * the kernel has put a new PTE into the page table for the process. unsigned long vmaddr) * We use it to ensure coherency between the i-cache and d-cache { _tlbie(vmaddr); } * for the page which has just been mapped in. static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, * On machines which use an MMU hash table, we use this to put a unsigned long vmaddr) * corresponding HPTE into the hash table ahead of time, instead of { _tlbie(vmaddr); } * waiting for the inevitable extra hash-table miss exception. static inline void flush_tlb_range(struct vm_area_struct *vma, */ unsigned long start, unsigned long end) extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); { __tlbia(); } static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) { __tlbia(); } #elif defined(CONFIG_FSL_BOOKE) /* TODO: determine if flush_tlb_range & flush_tlb_kernel_range #endif /* CONFIG_PPC64 */ * are best implemented as tlbia vs specific tlbie's */ #define __tlbia() _tlbia() #if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \ defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx) static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_mm(struct mm_struct *mm) { __tlbia(); } { static inline void flush_tlb_page(struct vm_area_struct *vma, flush_tlb_pending(); unsigned long vmaddr) } { _tlbie(vmaddr); } static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long vmaddr) { _tlbie(vmaddr); } static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { __tlbia(); } static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) { __tlbia(); } #elif defined(CONFIG_8xx) #define __tlbia() asm volatile ("tlbia; sync" : : : "memory") static inline void flush_tlb_mm(struct mm_struct *mm) { __tlbia(); } static inline void flush_tlb_page(struct vm_area_struct *vma, static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) unsigned long vmaddr) { _tlbie(vmaddr); } { #ifdef CONFIG_PPC64 flush_tlb_pending(); #else _tlbie(vmaddr); #endif } static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long vmaddr) unsigned long vmaddr) { _tlbie(vmaddr); } { #ifndef CONFIG_PPC64 _tlbie(vmaddr); #endif } static inline void flush_tlb_range(struct vm_area_struct *vma, static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) unsigned long start, unsigned long end) { __tlbia(); } { flush_tlb_pending(); } static inline void flush_tlb_kernel_range(unsigned long start, static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) unsigned long end) { __tlbia(); } { flush_tlb_pending(); } #else /* 6xx, 7xx, 7xxx cpus */ #else /* 6xx, 7xx, 7xxx cpus */ struct mm_struct; struct vm_area_struct; extern void flush_tlb_mm(struct mm_struct *mm); extern void flush_tlb_mm(struct mm_struct *mm); extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); unsigned long end); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); #endif #endif /* /* Loading @@ -100,16 +142,5 @@ static inline void flush_tlb_pgtables(struct mm_struct *mm, { { } } /* * This gets called at the end of handling a page fault, when * the kernel has put a new PTE into the page table for the process. * We use it to ensure coherency between the i-cache and d-cache * for the page which has just been mapped in. * On machines which use an MMU hash table, we use this to put a * corresponding HPTE into the hash table ahead of time, instead of * waiting for the inevitable extra hash-table miss exception. */ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); #endif /* _PPC_TLBFLUSH_H */ #endif /*__KERNEL__ */ #endif /*__KERNEL__ */ #endif /* _ASM_POWERPC_TLBFLUSH_H */ include/asm-ppc64/tlbflush.hdeleted 100644 → 0 +0 −52 Original line number Original line Diff line number Diff line #ifndef _PPC64_TLBFLUSH_H #define _PPC64_TLBFLUSH_H /* * TLB flushing: * * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables */ #include <linux/percpu.h> #include <asm/page.h> #define PPC64_TLB_BATCH_NR 192 struct mm_struct; struct ppc64_tlb_batch { unsigned long index; struct mm_struct *mm; pte_t pte[PPC64_TLB_BATCH_NR]; unsigned long vaddr[PPC64_TLB_BATCH_NR]; unsigned int large; }; DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); static inline void flush_tlb_pending(void) { struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); if (batch->index) __flush_tlb_pending(batch); put_cpu_var(ppc64_tlb_batch); } #define flush_tlb_mm(mm) flush_tlb_pending() #define flush_tlb_page(vma, addr) flush_tlb_pending() #define flush_tlb_page_nohash(vma, addr) do { } while (0) #define flush_tlb_range(vma, start, end) \ do { (void)(start); flush_tlb_pending(); } while (0) #define flush_tlb_kernel_range(start, end) flush_tlb_pending() #define flush_tlb_pgtables(mm, start, end) do { } while (0) extern void flush_hash_page(unsigned long va, pte_t pte, int local); void flush_hash_range(unsigned long number, int local); #endif /* _PPC64_TLBFLUSH_H */ Loading
include/asm-ppc/tlbflush.h→include/asm-powerpc/tlbflush.h +146 −0 Original line number Original line Diff line number Diff line #ifndef _ASM_POWERPC_TLBFLUSH_H #define _ASM_POWERPC_TLBFLUSH_H /* /* * include/asm-ppc/tlbflush.h * TLB flushing: * * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables * * * This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * modify it under the terms of the GNU General Public License Loading @@ -7,87 +16,120 @@ * 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version. */ */ #ifdef __KERNEL__ #ifdef __KERNEL__ #ifndef _PPC_TLBFLUSH_H #define _PPC_TLBFLUSH_H #include <linux/config.h> #include <linux/config.h> struct mm_struct; #ifdef CONFIG_PPC64 #include <linux/percpu.h> #include <asm/page.h> #define PPC64_TLB_BATCH_NR 192 struct ppc64_tlb_batch { unsigned long index; struct mm_struct *mm; pte_t pte[PPC64_TLB_BATCH_NR]; unsigned long vaddr[PPC64_TLB_BATCH_NR]; unsigned int large; }; DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); static inline void flush_tlb_pending(void) { struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); if (batch->index) __flush_tlb_pending(batch); put_cpu_var(ppc64_tlb_batch); } extern void flush_hash_page(unsigned long va, pte_t pte, int local); void flush_hash_range(unsigned long number, int local); #else /* CONFIG_PPC64 */ #include <linux/mm.h> #include <linux/mm.h> extern void _tlbie(unsigned long address); extern void _tlbie(unsigned long address); extern void _tlbia(void); extern void _tlbia(void); #if defined(CONFIG_4xx) /* * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & * flush_tlb_kernel_range are best implemented as tlbia vs * specific tlbie's */ #ifndef CONFIG_44x #if (defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx) #define __tlbia() asm volatile ("sync; tlbia; isync" : : : "memory") #define flush_tlb_pending() asm volatile ("tlbia; sync" : : : "memory") #else #elif defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE) #define __tlbia _tlbia #define flush_tlb_pending() _tlbia() #endif #endif static inline void flush_tlb_mm(struct mm_struct *mm) /* { __tlbia(); } * This gets called at the end of handling a page fault, when static inline void flush_tlb_page(struct vm_area_struct *vma, * the kernel has put a new PTE into the page table for the process. unsigned long vmaddr) * We use it to ensure coherency between the i-cache and d-cache { _tlbie(vmaddr); } * for the page which has just been mapped in. static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, * On machines which use an MMU hash table, we use this to put a unsigned long vmaddr) * corresponding HPTE into the hash table ahead of time, instead of { _tlbie(vmaddr); } * waiting for the inevitable extra hash-table miss exception. static inline void flush_tlb_range(struct vm_area_struct *vma, */ unsigned long start, unsigned long end) extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); { __tlbia(); } static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) { __tlbia(); } #elif defined(CONFIG_FSL_BOOKE) /* TODO: determine if flush_tlb_range & flush_tlb_kernel_range #endif /* CONFIG_PPC64 */ * are best implemented as tlbia vs specific tlbie's */ #define __tlbia() _tlbia() #if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \ defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx) static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_mm(struct mm_struct *mm) { __tlbia(); } { static inline void flush_tlb_page(struct vm_area_struct *vma, flush_tlb_pending(); unsigned long vmaddr) } { _tlbie(vmaddr); } static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long vmaddr) { _tlbie(vmaddr); } static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { __tlbia(); } static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) { __tlbia(); } #elif defined(CONFIG_8xx) #define __tlbia() asm volatile ("tlbia; sync" : : : "memory") static inline void flush_tlb_mm(struct mm_struct *mm) { __tlbia(); } static inline void flush_tlb_page(struct vm_area_struct *vma, static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) unsigned long vmaddr) { _tlbie(vmaddr); } { #ifdef CONFIG_PPC64 flush_tlb_pending(); #else _tlbie(vmaddr); #endif } static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long vmaddr) unsigned long vmaddr) { _tlbie(vmaddr); } { #ifndef CONFIG_PPC64 _tlbie(vmaddr); #endif } static inline void flush_tlb_range(struct vm_area_struct *vma, static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) unsigned long start, unsigned long end) { __tlbia(); } { flush_tlb_pending(); } static inline void flush_tlb_kernel_range(unsigned long start, static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) unsigned long end) { __tlbia(); } { flush_tlb_pending(); } #else /* 6xx, 7xx, 7xxx cpus */ #else /* 6xx, 7xx, 7xxx cpus */ struct mm_struct; struct vm_area_struct; extern void flush_tlb_mm(struct mm_struct *mm); extern void flush_tlb_mm(struct mm_struct *mm); extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); unsigned long end); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); #endif #endif /* /* Loading @@ -100,16 +142,5 @@ static inline void flush_tlb_pgtables(struct mm_struct *mm, { { } } /* * This gets called at the end of handling a page fault, when * the kernel has put a new PTE into the page table for the process. * We use it to ensure coherency between the i-cache and d-cache * for the page which has just been mapped in. * On machines which use an MMU hash table, we use this to put a * corresponding HPTE into the hash table ahead of time, instead of * waiting for the inevitable extra hash-table miss exception. */ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); #endif /* _PPC_TLBFLUSH_H */ #endif /*__KERNEL__ */ #endif /*__KERNEL__ */ #endif /* _ASM_POWERPC_TLBFLUSH_H */
include/asm-ppc64/tlbflush.hdeleted 100644 → 0 +0 −52 Original line number Original line Diff line number Diff line #ifndef _PPC64_TLBFLUSH_H #define _PPC64_TLBFLUSH_H /* * TLB flushing: * * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables */ #include <linux/percpu.h> #include <asm/page.h> #define PPC64_TLB_BATCH_NR 192 struct mm_struct; struct ppc64_tlb_batch { unsigned long index; struct mm_struct *mm; pte_t pte[PPC64_TLB_BATCH_NR]; unsigned long vaddr[PPC64_TLB_BATCH_NR]; unsigned int large; }; DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); static inline void flush_tlb_pending(void) { struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); if (batch->index) __flush_tlb_pending(batch); put_cpu_var(ppc64_tlb_batch); } #define flush_tlb_mm(mm) flush_tlb_pending() #define flush_tlb_page(vma, addr) flush_tlb_pending() #define flush_tlb_page_nohash(vma, addr) do { } while (0) #define flush_tlb_range(vma, start, end) \ do { (void)(start); flush_tlb_pending(); } while (0) #define flush_tlb_kernel_range(start, end) flush_tlb_pending() #define flush_tlb_pgtables(mm, start, end) do { } while (0) extern void flush_hash_page(unsigned long va, pte_t pte, int local); void flush_hash_range(unsigned long number, int local); #endif /* _PPC64_TLBFLUSH_H */