Loading arch/sh/kernel/cpu/sh5/entry.S +28 −0 Original line number Diff line number Diff line Loading @@ -143,12 +143,22 @@ resvec_save_area: trap_jtable: .long do_exception_error /* 0x000 */ .long do_exception_error /* 0x020 */ #ifdef CONFIG_MMU .long tlb_miss_load /* 0x040 */ .long tlb_miss_store /* 0x060 */ #else .long do_exception_error .long do_exception_error #endif ! ARTIFICIAL pseudo-EXPEVT setting .long do_debug_interrupt /* 0x080 */ #ifdef CONFIG_MMU .long tlb_miss_load /* 0x0A0 */ .long tlb_miss_store /* 0x0C0 */ #else .long do_exception_error .long do_exception_error #endif .long do_address_error_load /* 0x0E0 */ .long do_address_error_store /* 0x100 */ #ifdef CONFIG_SH_FPU Loading Loading @@ -185,10 +195,18 @@ trap_jtable: .endr .long do_IRQ /* 0xA00 */ .long do_IRQ /* 0xA20 */ #ifdef CONFIG_MMU .long itlb_miss_or_IRQ /* 0xA40 */ #else .long do_IRQ #endif .long do_IRQ /* 0xA60 */ .long do_IRQ /* 0xA80 */ #ifdef CONFIG_MMU .long itlb_miss_or_IRQ /* 0xAA0 */ #else .long do_IRQ #endif .long do_exception_error /* 0xAC0 */ .long do_address_error_exec /* 0xAE0 */ .rept 8 Loading Loading @@ -274,6 +292,7 @@ not_a_tlb_miss: * Instead of '.space 1024-TEXT_SIZE' place the RESVEC * block making sure the final alignment is correct. */ #ifdef CONFIG_MMU tlb_miss: synco /* TAKum03020 (but probably a good idea anyway.) */ putcon SP, KCR1 Loading Loading @@ -377,6 +396,9 @@ fixup_to_invoke_general_handler: getcon KCR1, SP pta handle_exception, tr0 blink tr0, ZERO #else /* CONFIG_MMU */ .balign 256 #endif /* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE DOES END UP AT VBR+0x600 */ Loading Loading @@ -1103,6 +1125,7 @@ restore_all: * fpu_error_or_IRQ? is a helper to deflect to the right cause. * */ #ifdef CONFIG_MMU tlb_miss_load: or SP, ZERO, r2 or ZERO, ZERO, r3 /* Read */ Loading Loading @@ -1132,6 +1155,7 @@ call_do_page_fault: movi do_page_fault, r6 ptabs r6, tr0 blink tr0, ZERO #endif /* CONFIG_MMU */ fpu_error_or_IRQA: pta its_IRQ, tr0 Loading Loading @@ -1481,6 +1505,7 @@ poke_real_address_q: ptabs LINK, tr0 blink tr0, r63 #ifdef CONFIG_MMU /* * --- User Access Handling Section */ Loading Loading @@ -1604,6 +1629,7 @@ ___clear_user_exit: ptabs LINK, tr0 blink tr0, ZERO #endif /* CONFIG_MMU */ /* * int __strncpy_from_user(unsigned long __dest, unsigned long __src, Loading Loading @@ -2014,9 +2040,11 @@ sa_default_restorer: .global asm_uaccess_start /* Just a marker */ asm_uaccess_start: #ifdef CONFIG_MMU .long ___copy_user1, ___copy_user_exit .long ___copy_user2, ___copy_user_exit .long ___clear_user1, ___clear_user_exit #endif .long ___strncpy_from_user1, ___strncpy_from_user_exit .long ___strnlen_user1, ___strnlen_user_exit .long ___get_user_asm_b1, ___get_user_asm_b_exit Loading arch/sh/mm/Makefile_64 +4 −3 Original line number Diff line number Diff line Loading @@ -2,10 +2,11 @@ # Makefile for the Linux SuperH-specific parts of the memory manager. # obj-y := init.o extable_64.o consistent.o obj-y := init.o consistent.o mmu-y := tlb-nommu.o pg-nommu.o mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o mmu-y := tlb-nommu.o pg-nommu.o extable_32.o mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o \ extable_64.o ifndef CONFIG_CACHE_OFF obj-y += cache-sh5.o Loading arch/sh/mm/cache-sh5.c +2 −0 Original line number Diff line number Diff line Loading @@ -714,6 +714,7 @@ void flush_cache_sigtramp(unsigned long vaddr) sh64_icache_inv_current_user_range(vaddr, end); } #ifdef CONFIG_MMU /* * These *MUST* lie in an area of virtual address space that's otherwise * unused. Loading Loading @@ -830,3 +831,4 @@ void clear_user_page(void *to, unsigned long address, struct page *page) else sh64_clear_user_page_coloured(to, address); } #endif include/asm-sh/io.h +7 −5 Original line number Diff line number Diff line Loading @@ -268,11 +268,6 @@ unsigned long long peek_real_address_q(unsigned long long addr); unsigned long long poke_real_address_q(unsigned long long addr, unsigned long long val); /* arch/sh/mm/ioremap_64.c */ unsigned long onchip_remap(unsigned long addr, unsigned long size, const char *name); extern void onchip_unmap(unsigned long vaddr); #if !defined(CONFIG_MMU) #define virt_to_phys(address) ((unsigned long)(address)) #define phys_to_virt(address) ((void *)(address)) Loading Loading @@ -302,9 +297,16 @@ extern void onchip_unmap(unsigned long vaddr); void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags); void __iounmap(void __iomem *addr); /* arch/sh/mm/ioremap_64.c */ unsigned long onchip_remap(unsigned long addr, unsigned long size, const char *name); extern void onchip_unmap(unsigned long vaddr); #else #define __ioremap(offset, size, flags) ((void __iomem *)(offset)) #define __iounmap(addr) do { } while (0) #define onchip_remap(addr, size, name) (addr) #define onchip_unmap(addr) do { } while (0) #endif /* CONFIG_MMU */ static inline void __iomem * Loading include/asm-sh/mmu_context.h +3 −1 Original line number Diff line number Diff line Loading @@ -27,6 +27,7 @@ /* ASID is 8-bit value, so it can't be 0x100 */ #define MMU_NO_ASID 0x100 #ifdef CONFIG_MMU #define asid_cache(cpu) (cpu_data[cpu].asid_cache) #define cpu_context(cpu, mm) ((mm)->context.id[cpu]) Loading @@ -38,7 +39,6 @@ */ #define MMU_VPN_MASK 0xfffff000 #ifdef CONFIG_MMU #if defined(CONFIG_SUPERH32) #include "mmu_context_32.h" #else Loading Loading @@ -129,6 +129,8 @@ static inline void switch_mm(struct mm_struct *prev, #define destroy_context(mm) do { } while (0) #define set_asid(asid) do { } while (0) #define get_asid() (0) #define cpu_asid(cpu, mm) ({ (void)cpu; 0; }) #define switch_and_save_asid(asid) (0) #define set_TTB(pgd) do { } while (0) #define get_TTB() (0) #define activate_context(mm,cpu) do { } while (0) Loading Loading
arch/sh/kernel/cpu/sh5/entry.S +28 −0 Original line number Diff line number Diff line Loading @@ -143,12 +143,22 @@ resvec_save_area: trap_jtable: .long do_exception_error /* 0x000 */ .long do_exception_error /* 0x020 */ #ifdef CONFIG_MMU .long tlb_miss_load /* 0x040 */ .long tlb_miss_store /* 0x060 */ #else .long do_exception_error .long do_exception_error #endif ! ARTIFICIAL pseudo-EXPEVT setting .long do_debug_interrupt /* 0x080 */ #ifdef CONFIG_MMU .long tlb_miss_load /* 0x0A0 */ .long tlb_miss_store /* 0x0C0 */ #else .long do_exception_error .long do_exception_error #endif .long do_address_error_load /* 0x0E0 */ .long do_address_error_store /* 0x100 */ #ifdef CONFIG_SH_FPU Loading Loading @@ -185,10 +195,18 @@ trap_jtable: .endr .long do_IRQ /* 0xA00 */ .long do_IRQ /* 0xA20 */ #ifdef CONFIG_MMU .long itlb_miss_or_IRQ /* 0xA40 */ #else .long do_IRQ #endif .long do_IRQ /* 0xA60 */ .long do_IRQ /* 0xA80 */ #ifdef CONFIG_MMU .long itlb_miss_or_IRQ /* 0xAA0 */ #else .long do_IRQ #endif .long do_exception_error /* 0xAC0 */ .long do_address_error_exec /* 0xAE0 */ .rept 8 Loading Loading @@ -274,6 +292,7 @@ not_a_tlb_miss: * Instead of '.space 1024-TEXT_SIZE' place the RESVEC * block making sure the final alignment is correct. */ #ifdef CONFIG_MMU tlb_miss: synco /* TAKum03020 (but probably a good idea anyway.) */ putcon SP, KCR1 Loading Loading @@ -377,6 +396,9 @@ fixup_to_invoke_general_handler: getcon KCR1, SP pta handle_exception, tr0 blink tr0, ZERO #else /* CONFIG_MMU */ .balign 256 #endif /* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE DOES END UP AT VBR+0x600 */ Loading Loading @@ -1103,6 +1125,7 @@ restore_all: * fpu_error_or_IRQ? is a helper to deflect to the right cause. * */ #ifdef CONFIG_MMU tlb_miss_load: or SP, ZERO, r2 or ZERO, ZERO, r3 /* Read */ Loading Loading @@ -1132,6 +1155,7 @@ call_do_page_fault: movi do_page_fault, r6 ptabs r6, tr0 blink tr0, ZERO #endif /* CONFIG_MMU */ fpu_error_or_IRQA: pta its_IRQ, tr0 Loading Loading @@ -1481,6 +1505,7 @@ poke_real_address_q: ptabs LINK, tr0 blink tr0, r63 #ifdef CONFIG_MMU /* * --- User Access Handling Section */ Loading Loading @@ -1604,6 +1629,7 @@ ___clear_user_exit: ptabs LINK, tr0 blink tr0, ZERO #endif /* CONFIG_MMU */ /* * int __strncpy_from_user(unsigned long __dest, unsigned long __src, Loading Loading @@ -2014,9 +2040,11 @@ sa_default_restorer: .global asm_uaccess_start /* Just a marker */ asm_uaccess_start: #ifdef CONFIG_MMU .long ___copy_user1, ___copy_user_exit .long ___copy_user2, ___copy_user_exit .long ___clear_user1, ___clear_user_exit #endif .long ___strncpy_from_user1, ___strncpy_from_user_exit .long ___strnlen_user1, ___strnlen_user_exit .long ___get_user_asm_b1, ___get_user_asm_b_exit Loading
arch/sh/mm/Makefile_64 +4 −3 Original line number Diff line number Diff line Loading @@ -2,10 +2,11 @@ # Makefile for the Linux SuperH-specific parts of the memory manager. # obj-y := init.o extable_64.o consistent.o obj-y := init.o consistent.o mmu-y := tlb-nommu.o pg-nommu.o mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o mmu-y := tlb-nommu.o pg-nommu.o extable_32.o mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o \ extable_64.o ifndef CONFIG_CACHE_OFF obj-y += cache-sh5.o Loading
arch/sh/mm/cache-sh5.c +2 −0 Original line number Diff line number Diff line Loading @@ -714,6 +714,7 @@ void flush_cache_sigtramp(unsigned long vaddr) sh64_icache_inv_current_user_range(vaddr, end); } #ifdef CONFIG_MMU /* * These *MUST* lie in an area of virtual address space that's otherwise * unused. Loading Loading @@ -830,3 +831,4 @@ void clear_user_page(void *to, unsigned long address, struct page *page) else sh64_clear_user_page_coloured(to, address); } #endif
include/asm-sh/io.h +7 −5 Original line number Diff line number Diff line Loading @@ -268,11 +268,6 @@ unsigned long long peek_real_address_q(unsigned long long addr); unsigned long long poke_real_address_q(unsigned long long addr, unsigned long long val); /* arch/sh/mm/ioremap_64.c */ unsigned long onchip_remap(unsigned long addr, unsigned long size, const char *name); extern void onchip_unmap(unsigned long vaddr); #if !defined(CONFIG_MMU) #define virt_to_phys(address) ((unsigned long)(address)) #define phys_to_virt(address) ((void *)(address)) Loading Loading @@ -302,9 +297,16 @@ extern void onchip_unmap(unsigned long vaddr); void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags); void __iounmap(void __iomem *addr); /* arch/sh/mm/ioremap_64.c */ unsigned long onchip_remap(unsigned long addr, unsigned long size, const char *name); extern void onchip_unmap(unsigned long vaddr); #else #define __ioremap(offset, size, flags) ((void __iomem *)(offset)) #define __iounmap(addr) do { } while (0) #define onchip_remap(addr, size, name) (addr) #define onchip_unmap(addr) do { } while (0) #endif /* CONFIG_MMU */ static inline void __iomem * Loading
include/asm-sh/mmu_context.h +3 −1 Original line number Diff line number Diff line Loading @@ -27,6 +27,7 @@ /* ASID is 8-bit value, so it can't be 0x100 */ #define MMU_NO_ASID 0x100 #ifdef CONFIG_MMU #define asid_cache(cpu) (cpu_data[cpu].asid_cache) #define cpu_context(cpu, mm) ((mm)->context.id[cpu]) Loading @@ -38,7 +39,6 @@ */ #define MMU_VPN_MASK 0xfffff000 #ifdef CONFIG_MMU #if defined(CONFIG_SUPERH32) #include "mmu_context_32.h" #else Loading Loading @@ -129,6 +129,8 @@ static inline void switch_mm(struct mm_struct *prev, #define destroy_context(mm) do { } while (0) #define set_asid(asid) do { } while (0) #define get_asid() (0) #define cpu_asid(cpu, mm) ({ (void)cpu; 0; }) #define switch_and_save_asid(asid) (0) #define set_TTB(pgd) do { } while (0) #define get_TTB() (0) #define activate_context(mm,cpu) do { } while (0) Loading