Loading arch/arm64/kernel/entry.S +85 −0 Original line number Diff line number Diff line Loading @@ -29,6 +29,7 @@ #include <asm/esr.h> #include <asm/irq.h> #include <asm/memory.h> #include <asm/mmu.h> #include <asm/ptrace.h> #include <asm/thread_info.h> #include <asm/uaccess.h> Loading Loading @@ -864,6 +865,90 @@ __ni_sys_trace: .popsection // .entry.text #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 /* * Exception vectors trampoline. */ .pushsection ".entry.tramp.text", "ax" .macro tramp_map_kernel, tmp mrs \tmp, ttbr1_el1 sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) bic \tmp, \tmp, #USER_ASID_FLAG msr ttbr1_el1, \tmp .endm .macro tramp_unmap_kernel, tmp mrs \tmp, ttbr1_el1 add \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) orr \tmp, \tmp, #USER_ASID_FLAG msr ttbr1_el1, \tmp /* * We avoid running the post_ttbr_update_workaround here because the * user and kernel ASIDs don't have conflicting mappings, so any * "blessing" as described in: * * http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com * * will not hurt correctness. Whilst this may partially defeat the * point of using split ASIDs in the first place, it avoids * the hit of invalidating the entire I-cache on every return to * userspace. */ .endm .macro tramp_ventry, regsize = 64 .align 7 1: .if \regsize == 64 msr tpidrro_el0, x30 // Restored in kernel_ventry .endif tramp_map_kernel x30 ldr x30, =vectors prfm plil1strm, [x30, #(1b - tramp_vectors)] msr vbar_el1, x30 add x30, x30, #(1b - tramp_vectors) isb br x30 .endm .macro tramp_exit, regsize = 64 adr x30, tramp_vectors msr vbar_el1, x30 tramp_unmap_kernel x30 .if \regsize == 64 mrs x30, far_el1 .endif eret .endm .align 11 ENTRY(tramp_vectors) .space 0x400 tramp_ventry tramp_ventry tramp_ventry tramp_ventry tramp_ventry 32 tramp_ventry 32 tramp_ventry 32 tramp_ventry 32 END(tramp_vectors) ENTRY(tramp_exit_native) tramp_exit END(tramp_exit_native) ENTRY(tramp_exit_compat) tramp_exit 32 END(tramp_exit_compat) .ltorg .popsection // .entry.tramp.text #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ /* * Special system call wrappers. */ Loading arch/arm64/kernel/vmlinux.lds.S +17 −0 Original line number Diff line number Diff line Loading @@ -56,6 +56,17 @@ jiffies = jiffies_64; #define HIBERNATE_TEXT #endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 #define TRAMP_TEXT \ . = ALIGN(PAGE_SIZE); \ VMLINUX_SYMBOL(__entry_tramp_text_start) = .; \ *(.entry.tramp.text) \ . = ALIGN(PAGE_SIZE); \ VMLINUX_SYMBOL(__entry_tramp_text_end) = .; #else #define TRAMP_TEXT #endif /* * The size of the PE/COFF section that covers the kernel image, which * runs from stext to _edata, must be a round multiple of the PE/COFF Loading Loading @@ -128,6 +139,7 @@ SECTIONS HYPERVISOR_TEXT IDMAP_TEXT HIBERNATE_TEXT TRAMP_TEXT *(.fixup) *(.gnu.warning) . = ALIGN(16); Loading Loading @@ -221,6 +233,11 @@ SECTIONS . += RESERVED_TTBR0_SIZE; #endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 tramp_pg_dir = .; . += PAGE_SIZE; #endif _end = .; STABS_DEBUG Loading Loading
arch/arm64/kernel/entry.S +85 −0 Original line number Diff line number Diff line Loading @@ -29,6 +29,7 @@ #include <asm/esr.h> #include <asm/irq.h> #include <asm/memory.h> #include <asm/mmu.h> #include <asm/ptrace.h> #include <asm/thread_info.h> #include <asm/uaccess.h> Loading Loading @@ -864,6 +865,90 @@ __ni_sys_trace: .popsection // .entry.text #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 /* * Exception vectors trampoline. */ .pushsection ".entry.tramp.text", "ax" .macro tramp_map_kernel, tmp mrs \tmp, ttbr1_el1 sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) bic \tmp, \tmp, #USER_ASID_FLAG msr ttbr1_el1, \tmp .endm .macro tramp_unmap_kernel, tmp mrs \tmp, ttbr1_el1 add \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) orr \tmp, \tmp, #USER_ASID_FLAG msr ttbr1_el1, \tmp /* * We avoid running the post_ttbr_update_workaround here because the * user and kernel ASIDs don't have conflicting mappings, so any * "blessing" as described in: * * http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com * * will not hurt correctness. Whilst this may partially defeat the * point of using split ASIDs in the first place, it avoids * the hit of invalidating the entire I-cache on every return to * userspace. */ .endm .macro tramp_ventry, regsize = 64 .align 7 1: .if \regsize == 64 msr tpidrro_el0, x30 // Restored in kernel_ventry .endif tramp_map_kernel x30 ldr x30, =vectors prfm plil1strm, [x30, #(1b - tramp_vectors)] msr vbar_el1, x30 add x30, x30, #(1b - tramp_vectors) isb br x30 .endm .macro tramp_exit, regsize = 64 adr x30, tramp_vectors msr vbar_el1, x30 tramp_unmap_kernel x30 .if \regsize == 64 mrs x30, far_el1 .endif eret .endm .align 11 ENTRY(tramp_vectors) .space 0x400 tramp_ventry tramp_ventry tramp_ventry tramp_ventry tramp_ventry 32 tramp_ventry 32 tramp_ventry 32 tramp_ventry 32 END(tramp_vectors) ENTRY(tramp_exit_native) tramp_exit END(tramp_exit_native) ENTRY(tramp_exit_compat) tramp_exit 32 END(tramp_exit_compat) .ltorg .popsection // .entry.tramp.text #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ /* * Special system call wrappers. */ Loading
arch/arm64/kernel/vmlinux.lds.S +17 −0 Original line number Diff line number Diff line Loading @@ -56,6 +56,17 @@ jiffies = jiffies_64; #define HIBERNATE_TEXT #endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 #define TRAMP_TEXT \ . = ALIGN(PAGE_SIZE); \ VMLINUX_SYMBOL(__entry_tramp_text_start) = .; \ *(.entry.tramp.text) \ . = ALIGN(PAGE_SIZE); \ VMLINUX_SYMBOL(__entry_tramp_text_end) = .; #else #define TRAMP_TEXT #endif /* * The size of the PE/COFF section that covers the kernel image, which * runs from stext to _edata, must be a round multiple of the PE/COFF Loading Loading @@ -128,6 +139,7 @@ SECTIONS HYPERVISOR_TEXT IDMAP_TEXT HIBERNATE_TEXT TRAMP_TEXT *(.fixup) *(.gnu.warning) . = ALIGN(16); Loading Loading @@ -221,6 +233,11 @@ SECTIONS . += RESERVED_TTBR0_SIZE; #endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 tramp_pg_dir = .; . += PAGE_SIZE; #endif _end = .; STABS_DEBUG Loading