Loading arch/arm64/Kconfig.debug +1 −1 Original line number Diff line number Diff line Loading @@ -73,7 +73,7 @@ config DEBUG_RODATA If in doubt, say Y config DEBUG_ALIGN_RODATA depends on DEBUG_RODATA && ARM64_4K_PAGES depends on DEBUG_RODATA bool "Align linker sections up to SECTION_SIZE" help If this option is enabled, sections that may potentially be marked as Loading arch/arm64/include/asm/assembler.h +20 −0 Original line number Diff line number Diff line Loading @@ -233,4 +233,24 @@ lr .req x30 // link register .long \sym\()_hi32 .endm /* * mov_q - move an immediate constant into a 64-bit register using * between 2 and 4 movz/movk instructions (depending on the * magnitude and sign of the operand) */ .macro mov_q, reg, val .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff) movz \reg, :abs_g1_s:\val .else .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff) movz \reg, :abs_g2_s:\val .else movz \reg, :abs_g3:\val movk \reg, :abs_g2_nc:\val .endif movk \reg, :abs_g1_nc:\val .endif movk \reg, :abs_g0_nc:\val .endm #endif /* __ASM_ASSEMBLER_H */ arch/arm64/kernel/efi-entry.S +1 −1 Original line number Diff line number Diff line Loading @@ -61,7 +61,7 @@ ENTRY(entry) */ mov x20, x0 // DTB address ldr x0, [sp, #16] // relocated _text address movz x21, #:abs_g0:stext_offset ldr w21, =stext_offset add x21, x0, x21 /* Loading arch/arm64/kernel/head.S +84 −76 Original line number Diff line number Diff line Loading @@ -25,6 +25,7 @@ #include <linux/irqchip/arm-gic-v3.h> #include <asm/assembler.h> #include <asm/boot.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> #include <asm/cache.h> Loading Loading @@ -100,8 +101,6 @@ _head: #endif #ifdef CONFIG_EFI .globl __efistub_stext_offset .set __efistub_stext_offset, stext - _head .align 3 pe_header: .ascii "PE" Loading @@ -121,11 +120,11 @@ optional_header: .short 0x20b // PE32+ format .byte 0x02 // MajorLinkerVersion .byte 0x14 // MinorLinkerVersion .long _end - stext // SizeOfCode .long _end - efi_header_end // SizeOfCode .long 0 // SizeOfInitializedData .long 0 // SizeOfUninitializedData .long __efistub_entry - _head // AddressOfEntryPoint .long __efistub_stext_offset // BaseOfCode .long efi_header_end - _head // BaseOfCode extra_header_fields: .quad 0 // ImageBase Loading @@ -142,7 +141,7 @@ extra_header_fields: .long _end - _head // SizeOfImage // Everything before the kernel image is considered part of the header .long __efistub_stext_offset // SizeOfHeaders .long efi_header_end - _head // SizeOfHeaders .long 0 // CheckSum .short 0xa // Subsystem (EFI application) .short 0 // DllCharacteristics Loading Loading @@ -186,10 +185,10 @@ section_table: .byte 0 .byte 0 .byte 0 // end of 0 padding of section name .long _end - stext // VirtualSize .long __efistub_stext_offset // VirtualAddress .long _edata - stext // SizeOfRawData .long __efistub_stext_offset // PointerToRawData .long _end - efi_header_end // VirtualSize .long efi_header_end - _head // VirtualAddress .long _edata - efi_header_end // SizeOfRawData .long efi_header_end - _head // PointerToRawData .long 0 // PointerToRelocations (0 for executables) .long 0 // PointerToLineNumbers (0 for executables) Loading @@ -198,20 +197,23 @@ section_table: .long 0xe0500020 // Characteristics (section flags) /* * EFI will load stext onwards at the 4k section alignment * EFI will load .text onwards at the 4k section alignment * described in the PE/COFF header. To ensure that instruction * sequences using an adrp and a :lo12: immediate will function * correctly at this alignment, we must ensure that stext is * correctly at this alignment, we must ensure that .text is * placed at a 4k boundary in the Image to begin with. */ .align 12 efi_header_end: #endif __INIT ENTRY(stext) bl preserve_boot_args bl el2_setup // Drop to EL1, w20=cpu_boot_mode mov x23, xzr // KASLR offset, defaults to 0 adrp x24, __PHYS_OFFSET and x23, x24, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0 bl set_cpu_boot_mode_flag bl __create_page_tables // x25=TTBR0, x26=TTBR1 /* Loading @@ -220,13 +222,11 @@ ENTRY(stext) * On return, the CPU will be ready for the MMU to be turned on and * the TCR will have been set. */ ldr x27, 0f // address to jump to after bl __cpu_setup // initialise processor adr_l x27, __primary_switch // address to jump to after // MMU has been enabled adr_l lr, __enable_mmu // return (PIC) address b __cpu_setup // initialise processor b __enable_mmu ENDPROC(stext) .align 3 0: .quad __mmap_switched - (_head - TEXT_OFFSET) + KIMAGE_VADDR /* * Preserve the arguments passed by the bootloader in x0 .. x3 Loading Loading @@ -336,7 +336,7 @@ __create_page_tables: cmp x0, x6 b.lo 1b ldr x7, =SWAPPER_MM_MMUFLAGS mov x7, SWAPPER_MM_MMUFLAGS /* * Create the identity mapping. Loading Loading @@ -392,12 +392,13 @@ __create_page_tables: * Map the kernel image (starting with PHYS_OFFSET). */ mov x0, x26 // swapper_pg_dir ldr x5, =KIMAGE_VADDR mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text) add x5, x5, x23 // add KASLR displacement create_pgd_entry x0, x5, x3, x6 ldr w6, kernel_img_size add x6, x6, x5 mov x3, x24 // phys offset adrp x6, _end // runtime __pa(_end) adrp x3, _text // runtime __pa(_text) sub x6, x6, x3 // _end - _text add x6, x6, x5 // runtime __va(_end) create_block_map x0, x7, x3, x5, x6 /* Loading @@ -412,16 +413,13 @@ __create_page_tables: ret x28 ENDPROC(__create_page_tables) kernel_img_size: .long _end - (_head - TEXT_OFFSET) .ltorg /* * The following fragment of code is executed with the MMU enabled. */ .set initial_sp, init_thread_union + THREAD_START_SP __mmap_switched: __primary_switched: mov x28, lr // preserve LR adr_l x8, vectors // load VBAR_EL1 with virtual msr vbar_el1, x8 // vector table address Loading @@ -435,44 +433,6 @@ __mmap_switched: bl __pi_memset dsb ishst // Make zero page visible to PTW #ifdef CONFIG_RELOCATABLE /* * Iterate over each entry in the relocation table, and apply the * relocations in place. */ adr_l x8, __dynsym_start // start of symbol table adr_l x9, __reloc_start // start of reloc table adr_l x10, __reloc_end // end of reloc table 0: cmp x9, x10 b.hs 2f ldp x11, x12, [x9], #24 ldr x13, [x9, #-8] cmp w12, #R_AARCH64_RELATIVE b.ne 1f add x13, x13, x23 // relocate str x13, [x11, x23] b 0b 1: cmp w12, #R_AARCH64_ABS64 b.ne 0b add x12, x12, x12, lsl #1 // symtab offset: 24x top word add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx ldr x15, [x12, #8] // Elf64_Sym::st_value cmp w14, #-0xf // SHN_ABS (0xfff1) ? add x14, x15, x23 // relocate csel x15, x14, x15, ne add x15, x13, x15 str x15, [x11, x23] b 0b 2: adr_l x8, kimage_vaddr // make relocated kimage_vaddr dc cvac, x8 // value visible to secondaries dsb sy // with MMU off #endif adr_l sp, initial_sp, x4 mov x4, sp and x4, x4, #~(THREAD_SIZE - 1) Loading @@ -488,17 +448,19 @@ __mmap_switched: bl kasan_early_init #endif #ifdef CONFIG_RANDOMIZE_BASE cbnz x23, 0f // already running randomized? tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized? b.ne 0f mov x0, x21 // pass FDT address in x0 mov x1, x23 // pass modulo offset in x1 bl kaslr_early_init // parse FDT for KASLR options cbz x0, 0f // KASLR disabled? just proceed mov x23, x0 // record KASLR offset orr x23, x23, x0 // record KASLR offset ret x28 // we must enable KASLR, return // to __enable_mmu() 0: #endif b start_kernel ENDPROC(__mmap_switched) ENDPROC(__primary_switched) /* * end early head section, begin head code that is also used for Loading Loading @@ -613,7 +575,7 @@ ENDPROC(el2_setup) * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed * in x20. See arch/arm64/include/asm/virt.h for more info. */ ENTRY(set_cpu_boot_mode_flag) set_cpu_boot_mode_flag: adr_l x1, __boot_cpu_mode cmp w20, #BOOT_CPU_MODE_EL2 b.ne 1f Loading Loading @@ -646,7 +608,7 @@ ENTRY(secondary_holding_pen) bl el2_setup // Drop to EL1, w20=cpu_boot_mode bl set_cpu_boot_mode_flag mrs x0, mpidr_el1 ldr x1, =MPIDR_HWID_BITMASK mov_q x1, MPIDR_HWID_BITMASK and x0, x0, x1 adr_l x3, secondary_holding_pen_release pen: ldr x4, [x3] Loading @@ -666,7 +628,7 @@ ENTRY(secondary_entry) b secondary_startup ENDPROC(secondary_entry) ENTRY(secondary_startup) secondary_startup: /* * Common entry point for secondary CPUs. */ Loading @@ -674,14 +636,11 @@ ENTRY(secondary_startup) adrp x26, swapper_pg_dir bl __cpu_setup // initialise processor ldr x8, kimage_vaddr ldr w9, 0f sub x27, x8, w9, sxtw // address to jump to after enabling the MMU adr_l x27, __secondary_switch // address to jump to after enabling the MMU b __enable_mmu ENDPROC(secondary_startup) 0: .long (_text - TEXT_OFFSET) - __secondary_switched ENTRY(__secondary_switched) __secondary_switched: adr_l x5, vectors msr vbar_el1, x5 isb Loading Loading @@ -743,7 +702,6 @@ __enable_mmu: ic iallu // flush instructions fetched dsb nsh // via old mapping isb add x27, x27, x23 // relocated __mmap_switched #endif br x27 ENDPROC(__enable_mmu) Loading @@ -752,3 +710,53 @@ __no_granule_support: wfe b __no_granule_support ENDPROC(__no_granule_support) __primary_switch: #ifdef CONFIG_RELOCATABLE /* * Iterate over each entry in the relocation table, and apply the * relocations in place. */ ldr w8, =__dynsym_offset // offset to symbol table ldr w9, =__rela_offset // offset to reloc table ldr w10, =__rela_size // size of reloc table mov_q x11, KIMAGE_VADDR // default virtual offset add x11, x11, x23 // actual virtual offset add x8, x8, x11 // __va(.dynsym) add x9, x9, x11 // __va(.rela) add x10, x9, x10 // __va(.rela) + sizeof(.rela) 0: cmp x9, x10 b.hs 2f ldp x11, x12, [x9], #24 ldr x13, [x9, #-8] cmp w12, #R_AARCH64_RELATIVE b.ne 1f add x13, x13, x23 // relocate str x13, [x11, x23] b 0b 1: cmp w12, #R_AARCH64_ABS64 b.ne 0b add x12, x12, x12, lsl #1 // symtab offset: 24x top word add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx ldr x15, [x12, #8] // Elf64_Sym::st_value cmp w14, #-0xf // SHN_ABS (0xfff1) ? add x14, x15, x23 // relocate csel x15, x14, x15, ne add x15, x13, x15 str x15, [x11, x23] b 0b 2: #endif ldr x8, =__primary_switched br x8 ENDPROC(__primary_switch) __secondary_switch: ldr x8, =__secondary_switched br x8 ENDPROC(__secondary_switch) arch/arm64/kernel/image.h +2 −0 Original line number Diff line number Diff line Loading @@ -73,6 +73,8 @@ #ifdef CONFIG_EFI __efistub_stext_offset = stext - _text; /* * Prevent the symbol aliases below from being emitted into the kallsyms * table, by forcing them to be absolute symbols (which are conveniently Loading Loading
arch/arm64/Kconfig.debug +1 −1 Original line number Diff line number Diff line Loading @@ -73,7 +73,7 @@ config DEBUG_RODATA If in doubt, say Y config DEBUG_ALIGN_RODATA depends on DEBUG_RODATA && ARM64_4K_PAGES depends on DEBUG_RODATA bool "Align linker sections up to SECTION_SIZE" help If this option is enabled, sections that may potentially be marked as Loading
arch/arm64/include/asm/assembler.h +20 −0 Original line number Diff line number Diff line Loading @@ -233,4 +233,24 @@ lr .req x30 // link register .long \sym\()_hi32 .endm /* * mov_q - move an immediate constant into a 64-bit register using * between 2 and 4 movz/movk instructions (depending on the * magnitude and sign of the operand) */ .macro mov_q, reg, val .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff) movz \reg, :abs_g1_s:\val .else .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff) movz \reg, :abs_g2_s:\val .else movz \reg, :abs_g3:\val movk \reg, :abs_g2_nc:\val .endif movk \reg, :abs_g1_nc:\val .endif movk \reg, :abs_g0_nc:\val .endm #endif /* __ASM_ASSEMBLER_H */
arch/arm64/kernel/efi-entry.S +1 −1 Original line number Diff line number Diff line Loading @@ -61,7 +61,7 @@ ENTRY(entry) */ mov x20, x0 // DTB address ldr x0, [sp, #16] // relocated _text address movz x21, #:abs_g0:stext_offset ldr w21, =stext_offset add x21, x0, x21 /* Loading
arch/arm64/kernel/head.S +84 −76 Original line number Diff line number Diff line Loading @@ -25,6 +25,7 @@ #include <linux/irqchip/arm-gic-v3.h> #include <asm/assembler.h> #include <asm/boot.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> #include <asm/cache.h> Loading Loading @@ -100,8 +101,6 @@ _head: #endif #ifdef CONFIG_EFI .globl __efistub_stext_offset .set __efistub_stext_offset, stext - _head .align 3 pe_header: .ascii "PE" Loading @@ -121,11 +120,11 @@ optional_header: .short 0x20b // PE32+ format .byte 0x02 // MajorLinkerVersion .byte 0x14 // MinorLinkerVersion .long _end - stext // SizeOfCode .long _end - efi_header_end // SizeOfCode .long 0 // SizeOfInitializedData .long 0 // SizeOfUninitializedData .long __efistub_entry - _head // AddressOfEntryPoint .long __efistub_stext_offset // BaseOfCode .long efi_header_end - _head // BaseOfCode extra_header_fields: .quad 0 // ImageBase Loading @@ -142,7 +141,7 @@ extra_header_fields: .long _end - _head // SizeOfImage // Everything before the kernel image is considered part of the header .long __efistub_stext_offset // SizeOfHeaders .long efi_header_end - _head // SizeOfHeaders .long 0 // CheckSum .short 0xa // Subsystem (EFI application) .short 0 // DllCharacteristics Loading Loading @@ -186,10 +185,10 @@ section_table: .byte 0 .byte 0 .byte 0 // end of 0 padding of section name .long _end - stext // VirtualSize .long __efistub_stext_offset // VirtualAddress .long _edata - stext // SizeOfRawData .long __efistub_stext_offset // PointerToRawData .long _end - efi_header_end // VirtualSize .long efi_header_end - _head // VirtualAddress .long _edata - efi_header_end // SizeOfRawData .long efi_header_end - _head // PointerToRawData .long 0 // PointerToRelocations (0 for executables) .long 0 // PointerToLineNumbers (0 for executables) Loading @@ -198,20 +197,23 @@ section_table: .long 0xe0500020 // Characteristics (section flags) /* * EFI will load stext onwards at the 4k section alignment * EFI will load .text onwards at the 4k section alignment * described in the PE/COFF header. To ensure that instruction * sequences using an adrp and a :lo12: immediate will function * correctly at this alignment, we must ensure that stext is * correctly at this alignment, we must ensure that .text is * placed at a 4k boundary in the Image to begin with. */ .align 12 efi_header_end: #endif __INIT ENTRY(stext) bl preserve_boot_args bl el2_setup // Drop to EL1, w20=cpu_boot_mode mov x23, xzr // KASLR offset, defaults to 0 adrp x24, __PHYS_OFFSET and x23, x24, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0 bl set_cpu_boot_mode_flag bl __create_page_tables // x25=TTBR0, x26=TTBR1 /* Loading @@ -220,13 +222,11 @@ ENTRY(stext) * On return, the CPU will be ready for the MMU to be turned on and * the TCR will have been set. */ ldr x27, 0f // address to jump to after bl __cpu_setup // initialise processor adr_l x27, __primary_switch // address to jump to after // MMU has been enabled adr_l lr, __enable_mmu // return (PIC) address b __cpu_setup // initialise processor b __enable_mmu ENDPROC(stext) .align 3 0: .quad __mmap_switched - (_head - TEXT_OFFSET) + KIMAGE_VADDR /* * Preserve the arguments passed by the bootloader in x0 .. x3 Loading Loading @@ -336,7 +336,7 @@ __create_page_tables: cmp x0, x6 b.lo 1b ldr x7, =SWAPPER_MM_MMUFLAGS mov x7, SWAPPER_MM_MMUFLAGS /* * Create the identity mapping. Loading Loading @@ -392,12 +392,13 @@ __create_page_tables: * Map the kernel image (starting with PHYS_OFFSET). */ mov x0, x26 // swapper_pg_dir ldr x5, =KIMAGE_VADDR mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text) add x5, x5, x23 // add KASLR displacement create_pgd_entry x0, x5, x3, x6 ldr w6, kernel_img_size add x6, x6, x5 mov x3, x24 // phys offset adrp x6, _end // runtime __pa(_end) adrp x3, _text // runtime __pa(_text) sub x6, x6, x3 // _end - _text add x6, x6, x5 // runtime __va(_end) create_block_map x0, x7, x3, x5, x6 /* Loading @@ -412,16 +413,13 @@ __create_page_tables: ret x28 ENDPROC(__create_page_tables) kernel_img_size: .long _end - (_head - TEXT_OFFSET) .ltorg /* * The following fragment of code is executed with the MMU enabled. */ .set initial_sp, init_thread_union + THREAD_START_SP __mmap_switched: __primary_switched: mov x28, lr // preserve LR adr_l x8, vectors // load VBAR_EL1 with virtual msr vbar_el1, x8 // vector table address Loading @@ -435,44 +433,6 @@ __mmap_switched: bl __pi_memset dsb ishst // Make zero page visible to PTW #ifdef CONFIG_RELOCATABLE /* * Iterate over each entry in the relocation table, and apply the * relocations in place. */ adr_l x8, __dynsym_start // start of symbol table adr_l x9, __reloc_start // start of reloc table adr_l x10, __reloc_end // end of reloc table 0: cmp x9, x10 b.hs 2f ldp x11, x12, [x9], #24 ldr x13, [x9, #-8] cmp w12, #R_AARCH64_RELATIVE b.ne 1f add x13, x13, x23 // relocate str x13, [x11, x23] b 0b 1: cmp w12, #R_AARCH64_ABS64 b.ne 0b add x12, x12, x12, lsl #1 // symtab offset: 24x top word add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx ldr x15, [x12, #8] // Elf64_Sym::st_value cmp w14, #-0xf // SHN_ABS (0xfff1) ? add x14, x15, x23 // relocate csel x15, x14, x15, ne add x15, x13, x15 str x15, [x11, x23] b 0b 2: adr_l x8, kimage_vaddr // make relocated kimage_vaddr dc cvac, x8 // value visible to secondaries dsb sy // with MMU off #endif adr_l sp, initial_sp, x4 mov x4, sp and x4, x4, #~(THREAD_SIZE - 1) Loading @@ -488,17 +448,19 @@ __mmap_switched: bl kasan_early_init #endif #ifdef CONFIG_RANDOMIZE_BASE cbnz x23, 0f // already running randomized? tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized? b.ne 0f mov x0, x21 // pass FDT address in x0 mov x1, x23 // pass modulo offset in x1 bl kaslr_early_init // parse FDT for KASLR options cbz x0, 0f // KASLR disabled? just proceed mov x23, x0 // record KASLR offset orr x23, x23, x0 // record KASLR offset ret x28 // we must enable KASLR, return // to __enable_mmu() 0: #endif b start_kernel ENDPROC(__mmap_switched) ENDPROC(__primary_switched) /* * end early head section, begin head code that is also used for Loading Loading @@ -613,7 +575,7 @@ ENDPROC(el2_setup) * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed * in x20. See arch/arm64/include/asm/virt.h for more info. */ ENTRY(set_cpu_boot_mode_flag) set_cpu_boot_mode_flag: adr_l x1, __boot_cpu_mode cmp w20, #BOOT_CPU_MODE_EL2 b.ne 1f Loading Loading @@ -646,7 +608,7 @@ ENTRY(secondary_holding_pen) bl el2_setup // Drop to EL1, w20=cpu_boot_mode bl set_cpu_boot_mode_flag mrs x0, mpidr_el1 ldr x1, =MPIDR_HWID_BITMASK mov_q x1, MPIDR_HWID_BITMASK and x0, x0, x1 adr_l x3, secondary_holding_pen_release pen: ldr x4, [x3] Loading @@ -666,7 +628,7 @@ ENTRY(secondary_entry) b secondary_startup ENDPROC(secondary_entry) ENTRY(secondary_startup) secondary_startup: /* * Common entry point for secondary CPUs. */ Loading @@ -674,14 +636,11 @@ ENTRY(secondary_startup) adrp x26, swapper_pg_dir bl __cpu_setup // initialise processor ldr x8, kimage_vaddr ldr w9, 0f sub x27, x8, w9, sxtw // address to jump to after enabling the MMU adr_l x27, __secondary_switch // address to jump to after enabling the MMU b __enable_mmu ENDPROC(secondary_startup) 0: .long (_text - TEXT_OFFSET) - __secondary_switched ENTRY(__secondary_switched) __secondary_switched: adr_l x5, vectors msr vbar_el1, x5 isb Loading Loading @@ -743,7 +702,6 @@ __enable_mmu: ic iallu // flush instructions fetched dsb nsh // via old mapping isb add x27, x27, x23 // relocated __mmap_switched #endif br x27 ENDPROC(__enable_mmu) Loading @@ -752,3 +710,53 @@ __no_granule_support: wfe b __no_granule_support ENDPROC(__no_granule_support) __primary_switch: #ifdef CONFIG_RELOCATABLE /* * Iterate over each entry in the relocation table, and apply the * relocations in place. */ ldr w8, =__dynsym_offset // offset to symbol table ldr w9, =__rela_offset // offset to reloc table ldr w10, =__rela_size // size of reloc table mov_q x11, KIMAGE_VADDR // default virtual offset add x11, x11, x23 // actual virtual offset add x8, x8, x11 // __va(.dynsym) add x9, x9, x11 // __va(.rela) add x10, x9, x10 // __va(.rela) + sizeof(.rela) 0: cmp x9, x10 b.hs 2f ldp x11, x12, [x9], #24 ldr x13, [x9, #-8] cmp w12, #R_AARCH64_RELATIVE b.ne 1f add x13, x13, x23 // relocate str x13, [x11, x23] b 0b 1: cmp w12, #R_AARCH64_ABS64 b.ne 0b add x12, x12, x12, lsl #1 // symtab offset: 24x top word add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx ldr x15, [x12, #8] // Elf64_Sym::st_value cmp w14, #-0xf // SHN_ABS (0xfff1) ? add x14, x15, x23 // relocate csel x15, x14, x15, ne add x15, x13, x15 str x15, [x11, x23] b 0b 2: #endif ldr x8, =__primary_switched br x8 ENDPROC(__primary_switch) __secondary_switch: ldr x8, =__secondary_switched br x8 ENDPROC(__secondary_switch)
arch/arm64/kernel/image.h +2 −0 Original line number Diff line number Diff line Loading @@ -73,6 +73,8 @@ #ifdef CONFIG_EFI __efistub_stext_offset = stext - _text; /* * Prevent the symbol aliases below from being emitted into the kallsyms * table, by forcing them to be absolute symbols (which are conveniently Loading