Loading arch/arm/kernel/head-common.S +159 −146 Original line number Diff line number Diff line Loading @@ -15,55 +15,6 @@ #define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2) #define ATAG_CORE_SIZE_EMPTY ((2*4) >> 2) .align 2 .type __switch_data, %object __switch_data: .long __mmap_switched .long __data_loc @ r4 .long _sdata @ r5 .long __bss_start @ r6 .long _end @ r7 .long processor_id @ r4 .long __machine_arch_type @ r5 .long __atags_pointer @ r6 .long cr_alignment @ r7 .long init_thread_union + THREAD_START_SP @ sp /* * The following fragment of code is executed with the MMU on in MMU mode, * and uses absolute addresses; this is not position independent. * * r0 = cp#15 control register * r1 = machine ID * r2 = atags pointer * r9 = processor ID */ __mmap_switched: adr r3, __switch_data + 4 ldmia r3!, {r4, r5, r6, r7} cmp r4, r5 @ Copy data segment if needed 1: cmpne r5, r6 ldrne fp, [r4], #4 strne fp, [r5], #4 bne 1b mov fp, #0 @ Clear BSS (and zero fp) 1: cmp r6, r7 strcc fp, [r6],#4 bcc 1b ARM( ldmia r3, {r4, r5, r6, r7, sp}) THUMB( ldmia r3, {r4, r5, r6, r7} ) THUMB( ldr sp, [r3, #16] ) str r9, [r4] @ Save processor ID str r1, [r5] @ Save machine type str r2, [r6] @ Save atags pointer bic r4, r0, #CR_A @ Clear 'A' bit stmia r7, {r0, r4} @ Save control register values b start_kernel ENDPROC(__mmap_switched) /* * Exception handling. Something went wrong and we can't proceed. We * ought to tell the user, but since we don't have any guarantee that Loading @@ -73,21 +24,7 @@ ENDPROC(__mmap_switched) * and hope for the best (useful if bootloader fails to pass a proper * machine ID for example). */ __error_p: #ifdef CONFIG_DEBUG_LL adr r0, str_p1 bl printascii mov r0, r9 bl printhex8 adr r0, str_p2 bl printascii b __error str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x" str_p2: .asciz ").\n" .align #endif ENDPROC(__error_p) __HEAD __error_a: #ifdef CONFIG_DEBUG_LL mov r4, r1 @ preserve machine ID Loading @@ -97,7 +34,7 @@ __error_a: bl printhex8 adr r0, str_a2 bl printascii adr r3, 4f adr r3, __lookup_machine_type_data ldmia r3, {r4, r5, r6} @ get machine desc list sub r4, r3, r4 @ get offset between virt&phys add r5, r5, r4 @ convert virt addresses to Loading Loading @@ -125,78 +62,6 @@ str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n" .align #endif __error: #ifdef CONFIG_ARCH_RPC /* * Turn the screen red on a error - RiscPC only. */ mov r0, #0x02000000 mov r3, #0x11 orr r3, r3, r3, lsl #8 orr r3, r3, r3, lsl #16 str r3, [r0], #4 str r3, [r0], #4 str r3, [r0], #4 str r3, [r0], #4 #endif 1: mov r0, r0 b 1b ENDPROC(__error) /* * Read processor ID register (CP#15, CR0), and look up in the linker-built * supported processor list. Note that we can't use the absolute addresses * for the __proc_info lists since we aren't running with the MMU on * (and therefore, we are not in the correct address space). We have to * calculate the offset. * * r9 = cpuid * Returns: * r3, r4, r6 corrupted * r5 = proc_info pointer in physical address space * r9 = cpuid (preserved) */ __lookup_processor_type: adr r3, 3f ldmia r3, {r5 - r7} add r3, r3, #8 sub r3, r3, r7 @ get offset between virt&phys add r5, r5, r3 @ convert virt addresses to add r6, r6, r3 @ physical address space 1: ldmia r5, {r3, r4} @ value, mask and r4, r4, r9 @ mask wanted bits teq r3, r4 beq 2f add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list) cmp r5, r6 blo 1b mov r5, #0 @ unknown processor 2: mov pc, lr ENDPROC(__lookup_processor_type) /* * This provides a C-API version of the above function. */ ENTRY(lookup_processor_type) stmfd sp!, {r4 - r7, r9, lr} mov r9, r0 bl __lookup_processor_type mov r0, r5 ldmfd sp!, {r4 - r7, r9, pc} ENDPROC(lookup_processor_type) /* * Look in <asm/procinfo.h> and arch/arm/kernel/arch.[ch] for * more information about the __proc_info and __arch_info structures. */ .align 2 3: .long __proc_info_begin .long __proc_info_end 4: .long . .long __arch_info_begin .long __arch_info_end /* * Lookup machine architecture in the linker-build list of architectures. * Note that we can't use the absolute addresses for the __arch_info Loading @@ -209,7 +74,7 @@ ENDPROC(lookup_processor_type) * r5 = mach_info pointer in physical address space */ __lookup_machine_type: adr r3, 4b adr r3, __lookup_machine_type_data ldmia r3, {r4, r5, r6} sub r3, r3, r4 @ get offset between virt&phys add r5, r5, r3 @ convert virt addresses to Loading @@ -225,15 +90,16 @@ __lookup_machine_type: ENDPROC(__lookup_machine_type) /* * This provides a C-API version of the above function. * Look in arch/arm/kernel/arch.[ch] for information about the * __arch_info structures. */ ENTRY(lookup_machine_type) stmfd sp!, {r4 - r6, lr} mov r1, r0 bl __lookup_machine_type mov r0, r5 ldmfd sp!, {r4 - r6, pc} ENDPROC(lookup_machine_type) .align 2 .type __lookup_machine_type_data, %object __lookup_machine_type_data: .long . .long __arch_info_begin .long __arch_info_end .size __lookup_machine_type_data, . - __lookup_machine_type_data /* Determine validity of the r2 atags pointer. The heuristic requires * that the pointer be aligned, in the first 16k of physical RAM and Loading Loading @@ -265,3 +131,150 @@ __vet_atags: 1: mov r2, #0 mov pc, lr ENDPROC(__vet_atags) /* * The following fragment of code is executed with the MMU on in MMU mode, * and uses absolute addresses; this is not position independent. * * r0 = cp#15 control register * r1 = machine ID * r2 = atags pointer * r9 = processor ID */ __INIT __mmap_switched: adr r3, __mmap_switched_data ldmia r3!, {r4, r5, r6, r7} cmp r4, r5 @ Copy data segment if needed 1: cmpne r5, r6 ldrne fp, [r4], #4 strne fp, [r5], #4 bne 1b mov fp, #0 @ Clear BSS (and zero fp) 1: cmp r6, r7 strcc fp, [r6],#4 bcc 1b ARM( ldmia r3, {r4, r5, r6, r7, sp}) THUMB( ldmia r3, {r4, r5, r6, r7} ) THUMB( ldr sp, [r3, #16] ) str r9, [r4] @ Save processor ID str r1, [r5] @ Save machine type str r2, [r6] @ Save atags pointer bic r4, r0, #CR_A @ Clear 'A' bit stmia r7, {r0, r4} @ Save control register values b start_kernel ENDPROC(__mmap_switched) .align 2 .type __mmap_switched_data, %object __mmap_switched_data: .long __data_loc @ r4 .long _sdata @ r5 .long __bss_start @ r6 .long _end @ r7 .long processor_id @ r4 .long __machine_arch_type @ r5 .long __atags_pointer @ r6 .long cr_alignment @ r7 .long init_thread_union + THREAD_START_SP @ sp .size __mmap_switched_data, . - __mmap_switched_data /* * This provides a C-API version of __lookup_machine_type */ ENTRY(lookup_machine_type) stmfd sp!, {r4 - r6, lr} mov r1, r0 bl __lookup_machine_type mov r0, r5 ldmfd sp!, {r4 - r6, pc} ENDPROC(lookup_machine_type) /* * This provides a C-API version of __lookup_processor_type */ ENTRY(lookup_processor_type) stmfd sp!, {r4 - r6, r9, lr} mov r9, r0 bl __lookup_processor_type mov r0, r5 ldmfd sp!, {r4 - r6, r9, pc} ENDPROC(lookup_processor_type) /* * Read processor ID register (CP#15, CR0), and look up in the linker-built * supported processor list. Note that we can't use the absolute addresses * for the __proc_info lists since we aren't running with the MMU on * (and therefore, we are not in the correct address space). We have to * calculate the offset. * * r9 = cpuid * Returns: * r3, r4, r6 corrupted * r5 = proc_info pointer in physical address space * r9 = cpuid (preserved) */ __CPUINIT __lookup_processor_type: adr r3, __lookup_processor_type_data ldmia r3, {r4 - r6} sub r3, r3, r4 @ get offset between virt&phys add r5, r5, r3 @ convert virt addresses to add r6, r6, r3 @ physical address space 1: ldmia r5, {r3, r4} @ value, mask and r4, r4, r9 @ mask wanted bits teq r3, r4 beq 2f add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list) cmp r5, r6 blo 1b mov r5, #0 @ unknown processor 2: mov pc, lr ENDPROC(__lookup_processor_type) /* * Look in <asm/procinfo.h> for information about the __proc_info structure. */ .align 2 .type __lookup_processor_type_data, %object __lookup_processor_type_data: .long . .long __proc_info_begin .long __proc_info_end .size __lookup_processor_type_data, . - __lookup_processor_type_data __error_p: #ifdef CONFIG_DEBUG_LL adr r0, str_p1 bl printascii mov r0, r9 bl printhex8 adr r0, str_p2 bl printascii b __error str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x" str_p2: .asciz ").\n" .align #endif ENDPROC(__error_p) __error: #ifdef CONFIG_ARCH_RPC /* * Turn the screen red on a error - RiscPC only. */ mov r0, #0x02000000 mov r3, #0x11 orr r3, r3, r3, lsl #8 orr r3, r3, r3, lsl #16 str r3, [r0], #4 str r3, [r0], #4 str r3, [r0], #4 str r3, [r0], #4 #endif 1: mov r0, r0 b 1b ENDPROC(__error) arch/arm/kernel/head-nommu.S +1 −4 Original line number Diff line number Diff line Loading @@ -48,8 +48,6 @@ ENTRY(stext) movs r8, r5 @ invalid machine (r5=0)? beq __error_a @ yes, error 'a' ldr r13, __switch_data @ address to jump to after @ the initialization is done adr lr, BSYM(__after_proc_init) @ return (PIC) address ARM( add pc, r10, #PROCINFO_INITFUNC ) THUMB( add r12, r10, #PROCINFO_INITFUNC ) Loading Loading @@ -87,8 +85,7 @@ __after_proc_init: mcr p15, 0, r0, c1, c0, 0 @ write control reg #endif /* CONFIG_CPU_CP15 */ mov r3, r13 mov pc, r3 @ clear the BSS and jump b __mmap_switched @ clear the BSS and jump @ to start_kernel ENDPROC(__after_proc_init) .ltorg Loading arch/arm/kernel/head.S +139 −111 Original line number Diff line number Diff line Loading @@ -98,113 +98,15 @@ ENTRY(stext) * above. On return, the CPU will be ready for the MMU to be * turned on, and r0 will hold the CPU control register value. */ ldr r13, __switch_data @ address to jump to after ldr r13, =__mmap_switched @ address to jump to after @ mmu has been enabled adr lr, BSYM(__enable_mmu) @ return (PIC) address adr lr, BSYM(1f) @ return (PIC) address ARM( add pc, r10, #PROCINFO_INITFUNC ) THUMB( add r12, r10, #PROCINFO_INITFUNC ) THUMB( mov pc, r12 ) 1: b __enable_mmu ENDPROC(stext) #if defined(CONFIG_SMP) ENTRY(secondary_startup) /* * Common entry point for secondary CPUs. * * Ensure that we're in SVC mode, and IRQs are disabled. Lookup * the processor type - there is no need to check the machine type * as it has already been validated by the primary processor. */ setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 mrc p15, 0, r9, c0, c0 @ get processor id bl __lookup_processor_type movs r10, r5 @ invalid processor? moveq r0, #'p' @ yes, error 'p' beq __error /* * Use the page tables supplied from __cpu_up. */ adr r4, __secondary_data ldmia r4, {r5, r7, r12} @ address to jump to after sub r4, r4, r5 @ mmu has been enabled ldr r4, [r7, r4] @ get secondary_data.pgdir adr lr, BSYM(__enable_mmu) @ return address mov r13, r12 @ __secondary_switched address ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor @ (return control reg) THUMB( add r12, r10, #PROCINFO_INITFUNC ) THUMB( mov pc, r12 ) ENDPROC(secondary_startup) /* * r6 = &secondary_data */ ENTRY(__secondary_switched) ldr sp, [r7, #4] @ get secondary_data.stack mov fp, #0 b secondary_start_kernel ENDPROC(__secondary_switched) .type __secondary_data, %object __secondary_data: .long . .long secondary_data .long __secondary_switched #endif /* defined(CONFIG_SMP) */ /* * Setup common bits before finally enabling the MMU. Essentially * this is just loading the page table pointer and domain access * registers. */ __enable_mmu: #ifdef CONFIG_ALIGNMENT_TRAP orr r0, r0, #CR_A #else bic r0, r0, #CR_A #endif #ifdef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CR_C #endif #ifdef CONFIG_CPU_BPREDICT_DISABLE bic r0, r0, #CR_Z #endif #ifdef CONFIG_CPU_ICACHE_DISABLE bic r0, r0, #CR_I #endif mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ domain_val(DOMAIN_IO, DOMAIN_CLIENT)) mcr p15, 0, r5, c3, c0, 0 @ load domain access register mcr p15, 0, r4, c2, c0, 0 @ load page table pointer b __turn_mmu_on ENDPROC(__enable_mmu) /* * Enable the MMU. This completely changes the structure of the visible * memory space. You will not be able to trace execution through this. * If you have an enquiry about this, *please* check the linux-arm-kernel * mailing list archives BEFORE sending another post to the list. * * r0 = cp#15 control register * r13 = *virtual* address to jump to upon completion * * other registers depend on the function called upon completion */ .align 5 __turn_mmu_on: mov r0, r0 mcr p15, 0, r0, c1, c0, 0 @ write control reg mrc p15, 0, r3, c0, c0, 0 @ read id reg mov r3, r3 mov r3, r13 mov pc, r3 ENDPROC(__turn_mmu_on) .ltorg /* * Setup the initial page tables. We only setup the barest Loading @@ -216,7 +118,7 @@ ENDPROC(__turn_mmu_on) * r10 = procinfo * * Returns: * r0, r3, r6, r7 corrupted * r0, r3, r5-r7 corrupted * r4 = physical page table address */ __create_page_tables: Loading @@ -238,20 +140,30 @@ __create_page_tables: ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags /* * Create identity mapping for first MB of kernel to * cater for the MMU enable. This identity mapping * will be removed by paging_init(). We use our current program * counter to determine corresponding section base address. * Create identity mapping to cater for __enable_mmu. * This identity mapping will be removed by paging_init(). */ mov r6, pc mov r6, r6, lsr #20 @ start of kernel section orr r3, r7, r6, lsl #20 @ flags + kernel base str r3, [r4, r6, lsl #2] @ identity mapping adr r0, __enable_mmu_loc ldmia r0, {r3, r5, r6} sub r0, r0, r3 @ virt->phys offset add r5, r5, r0 @ phys __enable_mmu add r6, r6, r0 @ phys __enable_mmu_end mov r5, r5, lsr #20 mov r6, r6, lsr #20 1: orr r3, r7, r5, lsl #20 @ flags + kernel base str r3, [r4, r5, lsl #2] @ identity mapping teq r5, r6 addne r5, r5, #1 @ next section bne 1b /* * Now setup the pagetables for our kernel direct * mapped region. */ mov r3, pc mov r3, r3, lsr #20 orr r3, r7, r3, lsl #20 add r0, r4, #(KERNEL_START & 0xff000000) >> 18 str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]! ldr r6, =(KERNEL_END - 1) Loading Loading @@ -335,6 +247,122 @@ __create_page_tables: mov pc, lr ENDPROC(__create_page_tables) .ltorg __enable_mmu_loc: .long . .long __enable_mmu .long __enable_mmu_end #if defined(CONFIG_SMP) __CPUINIT ENTRY(secondary_startup) /* * Common entry point for secondary CPUs. * * Ensure that we're in SVC mode, and IRQs are disabled. Lookup * the processor type - there is no need to check the machine type * as it has already been validated by the primary processor. */ setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 mrc p15, 0, r9, c0, c0 @ get processor id bl __lookup_processor_type movs r10, r5 @ invalid processor? moveq r0, #'p' @ yes, error 'p' beq __error_p /* * Use the page tables supplied from __cpu_up. */ adr r4, __secondary_data ldmia r4, {r5, r7, r12} @ address to jump to after sub r4, r4, r5 @ mmu has been enabled ldr r4, [r7, r4] @ get secondary_data.pgdir adr lr, BSYM(__enable_mmu) @ return address mov r13, r12 @ __secondary_switched address ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor @ (return control reg) THUMB( add r12, r10, #PROCINFO_INITFUNC ) THUMB( mov pc, r12 ) ENDPROC(secondary_startup) /* * r6 = &secondary_data */ ENTRY(__secondary_switched) ldr sp, [r7, #4] @ get secondary_data.stack mov fp, #0 b secondary_start_kernel ENDPROC(__secondary_switched) .type __secondary_data, %object __secondary_data: .long . .long secondary_data .long __secondary_switched #endif /* defined(CONFIG_SMP) */ /* * Setup common bits before finally enabling the MMU. Essentially * this is just loading the page table pointer and domain access * registers. * * r0 = cp#15 control register * r1 = machine ID * r2 = atags pointer * r4 = page table pointer * r9 = processor ID * r13 = *virtual* address to jump to upon completion */ __enable_mmu: #ifdef CONFIG_ALIGNMENT_TRAP orr r0, r0, #CR_A #else bic r0, r0, #CR_A #endif #ifdef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CR_C #endif #ifdef CONFIG_CPU_BPREDICT_DISABLE bic r0, r0, #CR_Z #endif #ifdef CONFIG_CPU_ICACHE_DISABLE bic r0, r0, #CR_I #endif mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ domain_val(DOMAIN_IO, DOMAIN_CLIENT)) mcr p15, 0, r5, c3, c0, 0 @ load domain access register mcr p15, 0, r4, c2, c0, 0 @ load page table pointer b __turn_mmu_on ENDPROC(__enable_mmu) /* * Enable the MMU. This completely changes the structure of the visible * memory space. You will not be able to trace execution through this. * If you have an enquiry about this, *please* check the linux-arm-kernel * mailing list archives BEFORE sending another post to the list. * * r0 = cp#15 control register * r1 = machine ID * r2 = atags pointer * r9 = processor ID * r13 = *virtual* address to jump to upon completion * * other registers depend on the function called upon completion */ .align 5 __turn_mmu_on: mov r0, r0 mcr p15, 0, r0, c1, c0, 0 @ write control reg mrc p15, 0, r3, c0, c0, 0 @ read id reg mov r3, r3 mov r3, r13 mov pc, r3 __enable_mmu_end: ENDPROC(__turn_mmu_on) #ifdef CONFIG_SMP_ON_UP __fixup_smp: Loading arch/arm/kernel/smp.c +55 −8 Original line number Diff line number Diff line Loading @@ -33,6 +33,7 @@ #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/processor.h> #include <asm/sections.h> #include <asm/tlbflush.h> #include <asm/ptrace.h> #include <asm/localtimer.h> Loading Loading @@ -67,12 +68,47 @@ enum ipi_msg_type { IPI_CPU_STOP, }; static inline void identity_mapping_add(pgd_t *pgd, unsigned long start, unsigned long end) { unsigned long addr, prot; pmd_t *pmd; prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) prot |= PMD_BIT4; for (addr = start & PGDIR_MASK; addr < end;) { pmd = pmd_offset(pgd + pgd_index(addr), addr); pmd[0] = __pmd(addr | prot); addr += SECTION_SIZE; pmd[1] = __pmd(addr | prot); addr += SECTION_SIZE; flush_pmd_entry(pmd); outer_clean_range(__pa(pmd), __pa(pmd + 1)); } } static inline void identity_mapping_del(pgd_t *pgd, unsigned long start, unsigned long end) { unsigned long addr; pmd_t *pmd; for (addr = start & PGDIR_MASK; addr < end; addr += PGDIR_SIZE) { pmd = pmd_offset(pgd + pgd_index(addr), addr); pmd[0] = __pmd(0); pmd[1] = __pmd(0); clean_pmd_entry(pmd); outer_clean_range(__pa(pmd), __pa(pmd + 1)); } } int __cpuinit __cpu_up(unsigned int cpu) { struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); struct task_struct *idle = ci->idle; pgd_t *pgd; pmd_t *pmd; int ret; /* Loading Loading @@ -101,11 +137,16 @@ int __cpuinit __cpu_up(unsigned int cpu) * a 1:1 mapping for the physical address of the kernel. */ pgd = pgd_alloc(&init_mm); pmd = pmd_offset(pgd + pgd_index(PHYS_OFFSET), PHYS_OFFSET); *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | PMD_TYPE_SECT | PMD_SECT_AP_WRITE); flush_pmd_entry(pmd); outer_clean_range(__pa(pmd), __pa(pmd + 1)); if (!pgd) return -ENOMEM; if (PHYS_OFFSET != PAGE_OFFSET) { #ifndef CONFIG_HOTPLUG_CPU identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end)); #endif identity_mapping_add(pgd, __pa(_stext), __pa(_etext)); identity_mapping_add(pgd, __pa(_sdata), __pa(_edata)); } /* * We need to tell the secondary core where to find Loading Loading @@ -143,8 +184,14 @@ int __cpuinit __cpu_up(unsigned int cpu) secondary_data.stack = NULL; secondary_data.pgdir = 0; *pmd = __pmd(0); clean_pmd_entry(pmd); if (PHYS_OFFSET != PAGE_OFFSET) { #ifndef CONFIG_HOTPLUG_CPU identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end)); #endif identity_mapping_del(pgd, __pa(_stext), __pa(_etext)); identity_mapping_del(pgd, __pa(_sdata), __pa(_edata)); } pgd_free(&init_mm, pgd); if (ret) { Loading arch/arm/kernel/vmlinux.lds.S +17 −7 Original line number Diff line number Diff line Loading @@ -8,6 +8,19 @@ #include <asm/memory.h> #include <asm/page.h> #define PROC_INFO \ VMLINUX_SYMBOL(__proc_info_begin) = .; \ *(.proc.info.init) \ VMLINUX_SYMBOL(__proc_info_end) = .; #ifdef CONFIG_HOTPLUG_CPU #define ARM_CPU_DISCARD(x) #define ARM_CPU_KEEP(x) x #else #define ARM_CPU_DISCARD(x) x #define ARM_CPU_KEEP(x) #endif OUTPUT_ARCH(arm) ENTRY(stext) Loading @@ -31,9 +44,7 @@ SECTIONS HEAD_TEXT INIT_TEXT _einittext = .; __proc_info_begin = .; *(.proc.info.init) __proc_info_end = .; ARM_CPU_DISCARD(PROC_INFO) __arch_info_begin = .; *(.arch.info.init) __arch_info_end = .; Loading Loading @@ -73,10 +84,8 @@ SECTIONS /DISCARD/ : { *(.ARM.exidx.exit.text) *(.ARM.extab.exit.text) #ifndef CONFIG_HOTPLUG_CPU *(.ARM.exidx.cpuexit.text) *(.ARM.extab.cpuexit.text) #endif ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) #ifndef CONFIG_HOTPLUG *(.ARM.exidx.devexit.text) *(.ARM.extab.devexit.text) Loading Loading @@ -105,6 +114,7 @@ SECTIONS *(.glue_7) *(.glue_7t) *(.got) /* Global offset table */ ARM_CPU_KEEP(PROC_INFO) } RO_DATA(PAGE_SIZE) Loading Loading
arch/arm/kernel/head-common.S +159 −146 Original line number Diff line number Diff line Loading @@ -15,55 +15,6 @@ #define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2) #define ATAG_CORE_SIZE_EMPTY ((2*4) >> 2) .align 2 .type __switch_data, %object __switch_data: .long __mmap_switched .long __data_loc @ r4 .long _sdata @ r5 .long __bss_start @ r6 .long _end @ r7 .long processor_id @ r4 .long __machine_arch_type @ r5 .long __atags_pointer @ r6 .long cr_alignment @ r7 .long init_thread_union + THREAD_START_SP @ sp /* * The following fragment of code is executed with the MMU on in MMU mode, * and uses absolute addresses; this is not position independent. * * r0 = cp#15 control register * r1 = machine ID * r2 = atags pointer * r9 = processor ID */ __mmap_switched: adr r3, __switch_data + 4 ldmia r3!, {r4, r5, r6, r7} cmp r4, r5 @ Copy data segment if needed 1: cmpne r5, r6 ldrne fp, [r4], #4 strne fp, [r5], #4 bne 1b mov fp, #0 @ Clear BSS (and zero fp) 1: cmp r6, r7 strcc fp, [r6],#4 bcc 1b ARM( ldmia r3, {r4, r5, r6, r7, sp}) THUMB( ldmia r3, {r4, r5, r6, r7} ) THUMB( ldr sp, [r3, #16] ) str r9, [r4] @ Save processor ID str r1, [r5] @ Save machine type str r2, [r6] @ Save atags pointer bic r4, r0, #CR_A @ Clear 'A' bit stmia r7, {r0, r4} @ Save control register values b start_kernel ENDPROC(__mmap_switched) /* * Exception handling. Something went wrong and we can't proceed. We * ought to tell the user, but since we don't have any guarantee that Loading @@ -73,21 +24,7 @@ ENDPROC(__mmap_switched) * and hope for the best (useful if bootloader fails to pass a proper * machine ID for example). */ __error_p: #ifdef CONFIG_DEBUG_LL adr r0, str_p1 bl printascii mov r0, r9 bl printhex8 adr r0, str_p2 bl printascii b __error str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x" str_p2: .asciz ").\n" .align #endif ENDPROC(__error_p) __HEAD __error_a: #ifdef CONFIG_DEBUG_LL mov r4, r1 @ preserve machine ID Loading @@ -97,7 +34,7 @@ __error_a: bl printhex8 adr r0, str_a2 bl printascii adr r3, 4f adr r3, __lookup_machine_type_data ldmia r3, {r4, r5, r6} @ get machine desc list sub r4, r3, r4 @ get offset between virt&phys add r5, r5, r4 @ convert virt addresses to Loading Loading @@ -125,78 +62,6 @@ str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n" .align #endif __error: #ifdef CONFIG_ARCH_RPC /* * Turn the screen red on a error - RiscPC only. */ mov r0, #0x02000000 mov r3, #0x11 orr r3, r3, r3, lsl #8 orr r3, r3, r3, lsl #16 str r3, [r0], #4 str r3, [r0], #4 str r3, [r0], #4 str r3, [r0], #4 #endif 1: mov r0, r0 b 1b ENDPROC(__error) /* * Read processor ID register (CP#15, CR0), and look up in the linker-built * supported processor list. Note that we can't use the absolute addresses * for the __proc_info lists since we aren't running with the MMU on * (and therefore, we are not in the correct address space). We have to * calculate the offset. * * r9 = cpuid * Returns: * r3, r4, r6 corrupted * r5 = proc_info pointer in physical address space * r9 = cpuid (preserved) */ __lookup_processor_type: adr r3, 3f ldmia r3, {r5 - r7} add r3, r3, #8 sub r3, r3, r7 @ get offset between virt&phys add r5, r5, r3 @ convert virt addresses to add r6, r6, r3 @ physical address space 1: ldmia r5, {r3, r4} @ value, mask and r4, r4, r9 @ mask wanted bits teq r3, r4 beq 2f add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list) cmp r5, r6 blo 1b mov r5, #0 @ unknown processor 2: mov pc, lr ENDPROC(__lookup_processor_type) /* * This provides a C-API version of the above function. */ ENTRY(lookup_processor_type) stmfd sp!, {r4 - r7, r9, lr} mov r9, r0 bl __lookup_processor_type mov r0, r5 ldmfd sp!, {r4 - r7, r9, pc} ENDPROC(lookup_processor_type) /* * Look in <asm/procinfo.h> and arch/arm/kernel/arch.[ch] for * more information about the __proc_info and __arch_info structures. */ .align 2 3: .long __proc_info_begin .long __proc_info_end 4: .long . .long __arch_info_begin .long __arch_info_end /* * Lookup machine architecture in the linker-build list of architectures. * Note that we can't use the absolute addresses for the __arch_info Loading @@ -209,7 +74,7 @@ ENDPROC(lookup_processor_type) * r5 = mach_info pointer in physical address space */ __lookup_machine_type: adr r3, 4b adr r3, __lookup_machine_type_data ldmia r3, {r4, r5, r6} sub r3, r3, r4 @ get offset between virt&phys add r5, r5, r3 @ convert virt addresses to Loading @@ -225,15 +90,16 @@ __lookup_machine_type: ENDPROC(__lookup_machine_type) /* * This provides a C-API version of the above function. * Look in arch/arm/kernel/arch.[ch] for information about the * __arch_info structures. */ ENTRY(lookup_machine_type) stmfd sp!, {r4 - r6, lr} mov r1, r0 bl __lookup_machine_type mov r0, r5 ldmfd sp!, {r4 - r6, pc} ENDPROC(lookup_machine_type) .align 2 .type __lookup_machine_type_data, %object __lookup_machine_type_data: .long . .long __arch_info_begin .long __arch_info_end .size __lookup_machine_type_data, . - __lookup_machine_type_data /* Determine validity of the r2 atags pointer. The heuristic requires * that the pointer be aligned, in the first 16k of physical RAM and Loading Loading @@ -265,3 +131,150 @@ __vet_atags: 1: mov r2, #0 mov pc, lr ENDPROC(__vet_atags) /* * The following fragment of code is executed with the MMU on in MMU mode, * and uses absolute addresses; this is not position independent. * * r0 = cp#15 control register * r1 = machine ID * r2 = atags pointer * r9 = processor ID */ __INIT __mmap_switched: adr r3, __mmap_switched_data ldmia r3!, {r4, r5, r6, r7} cmp r4, r5 @ Copy data segment if needed 1: cmpne r5, r6 ldrne fp, [r4], #4 strne fp, [r5], #4 bne 1b mov fp, #0 @ Clear BSS (and zero fp) 1: cmp r6, r7 strcc fp, [r6],#4 bcc 1b ARM( ldmia r3, {r4, r5, r6, r7, sp}) THUMB( ldmia r3, {r4, r5, r6, r7} ) THUMB( ldr sp, [r3, #16] ) str r9, [r4] @ Save processor ID str r1, [r5] @ Save machine type str r2, [r6] @ Save atags pointer bic r4, r0, #CR_A @ Clear 'A' bit stmia r7, {r0, r4} @ Save control register values b start_kernel ENDPROC(__mmap_switched) .align 2 .type __mmap_switched_data, %object __mmap_switched_data: .long __data_loc @ r4 .long _sdata @ r5 .long __bss_start @ r6 .long _end @ r7 .long processor_id @ r4 .long __machine_arch_type @ r5 .long __atags_pointer @ r6 .long cr_alignment @ r7 .long init_thread_union + THREAD_START_SP @ sp .size __mmap_switched_data, . - __mmap_switched_data /* * This provides a C-API version of __lookup_machine_type */ ENTRY(lookup_machine_type) stmfd sp!, {r4 - r6, lr} mov r1, r0 bl __lookup_machine_type mov r0, r5 ldmfd sp!, {r4 - r6, pc} ENDPROC(lookup_machine_type) /* * This provides a C-API version of __lookup_processor_type */ ENTRY(lookup_processor_type) stmfd sp!, {r4 - r6, r9, lr} mov r9, r0 bl __lookup_processor_type mov r0, r5 ldmfd sp!, {r4 - r6, r9, pc} ENDPROC(lookup_processor_type) /* * Read processor ID register (CP#15, CR0), and look up in the linker-built * supported processor list. Note that we can't use the absolute addresses * for the __proc_info lists since we aren't running with the MMU on * (and therefore, we are not in the correct address space). We have to * calculate the offset. * * r9 = cpuid * Returns: * r3, r4, r6 corrupted * r5 = proc_info pointer in physical address space * r9 = cpuid (preserved) */ __CPUINIT __lookup_processor_type: adr r3, __lookup_processor_type_data ldmia r3, {r4 - r6} sub r3, r3, r4 @ get offset between virt&phys add r5, r5, r3 @ convert virt addresses to add r6, r6, r3 @ physical address space 1: ldmia r5, {r3, r4} @ value, mask and r4, r4, r9 @ mask wanted bits teq r3, r4 beq 2f add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list) cmp r5, r6 blo 1b mov r5, #0 @ unknown processor 2: mov pc, lr ENDPROC(__lookup_processor_type) /* * Look in <asm/procinfo.h> for information about the __proc_info structure. */ .align 2 .type __lookup_processor_type_data, %object __lookup_processor_type_data: .long . .long __proc_info_begin .long __proc_info_end .size __lookup_processor_type_data, . - __lookup_processor_type_data __error_p: #ifdef CONFIG_DEBUG_LL adr r0, str_p1 bl printascii mov r0, r9 bl printhex8 adr r0, str_p2 bl printascii b __error str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x" str_p2: .asciz ").\n" .align #endif ENDPROC(__error_p) __error: #ifdef CONFIG_ARCH_RPC /* * Turn the screen red on a error - RiscPC only. */ mov r0, #0x02000000 mov r3, #0x11 orr r3, r3, r3, lsl #8 orr r3, r3, r3, lsl #16 str r3, [r0], #4 str r3, [r0], #4 str r3, [r0], #4 str r3, [r0], #4 #endif 1: mov r0, r0 b 1b ENDPROC(__error)
arch/arm/kernel/head-nommu.S +1 −4 Original line number Diff line number Diff line Loading @@ -48,8 +48,6 @@ ENTRY(stext) movs r8, r5 @ invalid machine (r5=0)? beq __error_a @ yes, error 'a' ldr r13, __switch_data @ address to jump to after @ the initialization is done adr lr, BSYM(__after_proc_init) @ return (PIC) address ARM( add pc, r10, #PROCINFO_INITFUNC ) THUMB( add r12, r10, #PROCINFO_INITFUNC ) Loading Loading @@ -87,8 +85,7 @@ __after_proc_init: mcr p15, 0, r0, c1, c0, 0 @ write control reg #endif /* CONFIG_CPU_CP15 */ mov r3, r13 mov pc, r3 @ clear the BSS and jump b __mmap_switched @ clear the BSS and jump @ to start_kernel ENDPROC(__after_proc_init) .ltorg Loading
arch/arm/kernel/head.S +139 −111 Original line number Diff line number Diff line Loading @@ -98,113 +98,15 @@ ENTRY(stext) * above. On return, the CPU will be ready for the MMU to be * turned on, and r0 will hold the CPU control register value. */ ldr r13, __switch_data @ address to jump to after ldr r13, =__mmap_switched @ address to jump to after @ mmu has been enabled adr lr, BSYM(__enable_mmu) @ return (PIC) address adr lr, BSYM(1f) @ return (PIC) address ARM( add pc, r10, #PROCINFO_INITFUNC ) THUMB( add r12, r10, #PROCINFO_INITFUNC ) THUMB( mov pc, r12 ) 1: b __enable_mmu ENDPROC(stext) #if defined(CONFIG_SMP) ENTRY(secondary_startup) /* * Common entry point for secondary CPUs. * * Ensure that we're in SVC mode, and IRQs are disabled. Lookup * the processor type - there is no need to check the machine type * as it has already been validated by the primary processor. */ setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 mrc p15, 0, r9, c0, c0 @ get processor id bl __lookup_processor_type movs r10, r5 @ invalid processor? moveq r0, #'p' @ yes, error 'p' beq __error /* * Use the page tables supplied from __cpu_up. */ adr r4, __secondary_data ldmia r4, {r5, r7, r12} @ address to jump to after sub r4, r4, r5 @ mmu has been enabled ldr r4, [r7, r4] @ get secondary_data.pgdir adr lr, BSYM(__enable_mmu) @ return address mov r13, r12 @ __secondary_switched address ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor @ (return control reg) THUMB( add r12, r10, #PROCINFO_INITFUNC ) THUMB( mov pc, r12 ) ENDPROC(secondary_startup) /* * r6 = &secondary_data */ ENTRY(__secondary_switched) ldr sp, [r7, #4] @ get secondary_data.stack mov fp, #0 b secondary_start_kernel ENDPROC(__secondary_switched) .type __secondary_data, %object __secondary_data: .long . .long secondary_data .long __secondary_switched #endif /* defined(CONFIG_SMP) */ /* * Setup common bits before finally enabling the MMU. Essentially * this is just loading the page table pointer and domain access * registers. */ __enable_mmu: #ifdef CONFIG_ALIGNMENT_TRAP orr r0, r0, #CR_A #else bic r0, r0, #CR_A #endif #ifdef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CR_C #endif #ifdef CONFIG_CPU_BPREDICT_DISABLE bic r0, r0, #CR_Z #endif #ifdef CONFIG_CPU_ICACHE_DISABLE bic r0, r0, #CR_I #endif mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ domain_val(DOMAIN_IO, DOMAIN_CLIENT)) mcr p15, 0, r5, c3, c0, 0 @ load domain access register mcr p15, 0, r4, c2, c0, 0 @ load page table pointer b __turn_mmu_on ENDPROC(__enable_mmu) /* * Enable the MMU. This completely changes the structure of the visible * memory space. You will not be able to trace execution through this. * If you have an enquiry about this, *please* check the linux-arm-kernel * mailing list archives BEFORE sending another post to the list. * * r0 = cp#15 control register * r13 = *virtual* address to jump to upon completion * * other registers depend on the function called upon completion */ .align 5 __turn_mmu_on: mov r0, r0 mcr p15, 0, r0, c1, c0, 0 @ write control reg mrc p15, 0, r3, c0, c0, 0 @ read id reg mov r3, r3 mov r3, r13 mov pc, r3 ENDPROC(__turn_mmu_on) .ltorg /* * Setup the initial page tables. We only setup the barest Loading @@ -216,7 +118,7 @@ ENDPROC(__turn_mmu_on) * r10 = procinfo * * Returns: * r0, r3, r6, r7 corrupted * r0, r3, r5-r7 corrupted * r4 = physical page table address */ __create_page_tables: Loading @@ -238,20 +140,30 @@ __create_page_tables: ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags /* * Create identity mapping for first MB of kernel to * cater for the MMU enable. This identity mapping * will be removed by paging_init(). We use our current program * counter to determine corresponding section base address. * Create identity mapping to cater for __enable_mmu. * This identity mapping will be removed by paging_init(). */ mov r6, pc mov r6, r6, lsr #20 @ start of kernel section orr r3, r7, r6, lsl #20 @ flags + kernel base str r3, [r4, r6, lsl #2] @ identity mapping adr r0, __enable_mmu_loc ldmia r0, {r3, r5, r6} sub r0, r0, r3 @ virt->phys offset add r5, r5, r0 @ phys __enable_mmu add r6, r6, r0 @ phys __enable_mmu_end mov r5, r5, lsr #20 mov r6, r6, lsr #20 1: orr r3, r7, r5, lsl #20 @ flags + kernel base str r3, [r4, r5, lsl #2] @ identity mapping teq r5, r6 addne r5, r5, #1 @ next section bne 1b /* * Now setup the pagetables for our kernel direct * mapped region. */ mov r3, pc mov r3, r3, lsr #20 orr r3, r7, r3, lsl #20 add r0, r4, #(KERNEL_START & 0xff000000) >> 18 str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]! ldr r6, =(KERNEL_END - 1) Loading Loading @@ -335,6 +247,122 @@ __create_page_tables: mov pc, lr ENDPROC(__create_page_tables) .ltorg __enable_mmu_loc: .long . .long __enable_mmu .long __enable_mmu_end #if defined(CONFIG_SMP) __CPUINIT ENTRY(secondary_startup) /* * Common entry point for secondary CPUs. * * Ensure that we're in SVC mode, and IRQs are disabled. Lookup * the processor type - there is no need to check the machine type * as it has already been validated by the primary processor. */ setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 mrc p15, 0, r9, c0, c0 @ get processor id bl __lookup_processor_type movs r10, r5 @ invalid processor? moveq r0, #'p' @ yes, error 'p' beq __error_p /* * Use the page tables supplied from __cpu_up. */ adr r4, __secondary_data ldmia r4, {r5, r7, r12} @ address to jump to after sub r4, r4, r5 @ mmu has been enabled ldr r4, [r7, r4] @ get secondary_data.pgdir adr lr, BSYM(__enable_mmu) @ return address mov r13, r12 @ __secondary_switched address ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor @ (return control reg) THUMB( add r12, r10, #PROCINFO_INITFUNC ) THUMB( mov pc, r12 ) ENDPROC(secondary_startup) /* * r6 = &secondary_data */ ENTRY(__secondary_switched) ldr sp, [r7, #4] @ get secondary_data.stack mov fp, #0 b secondary_start_kernel ENDPROC(__secondary_switched) .type __secondary_data, %object __secondary_data: .long . .long secondary_data .long __secondary_switched #endif /* defined(CONFIG_SMP) */ /* * Setup common bits before finally enabling the MMU. Essentially * this is just loading the page table pointer and domain access * registers. * * r0 = cp#15 control register * r1 = machine ID * r2 = atags pointer * r4 = page table pointer * r9 = processor ID * r13 = *virtual* address to jump to upon completion */ __enable_mmu: #ifdef CONFIG_ALIGNMENT_TRAP orr r0, r0, #CR_A #else bic r0, r0, #CR_A #endif #ifdef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CR_C #endif #ifdef CONFIG_CPU_BPREDICT_DISABLE bic r0, r0, #CR_Z #endif #ifdef CONFIG_CPU_ICACHE_DISABLE bic r0, r0, #CR_I #endif mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ domain_val(DOMAIN_IO, DOMAIN_CLIENT)) mcr p15, 0, r5, c3, c0, 0 @ load domain access register mcr p15, 0, r4, c2, c0, 0 @ load page table pointer b __turn_mmu_on ENDPROC(__enable_mmu) /* * Enable the MMU. This completely changes the structure of the visible * memory space. You will not be able to trace execution through this. * If you have an enquiry about this, *please* check the linux-arm-kernel * mailing list archives BEFORE sending another post to the list. * * r0 = cp#15 control register * r1 = machine ID * r2 = atags pointer * r9 = processor ID * r13 = *virtual* address to jump to upon completion * * other registers depend on the function called upon completion */ .align 5 __turn_mmu_on: mov r0, r0 mcr p15, 0, r0, c1, c0, 0 @ write control reg mrc p15, 0, r3, c0, c0, 0 @ read id reg mov r3, r3 mov r3, r13 mov pc, r3 __enable_mmu_end: ENDPROC(__turn_mmu_on) #ifdef CONFIG_SMP_ON_UP __fixup_smp: Loading
arch/arm/kernel/smp.c +55 −8 Original line number Diff line number Diff line Loading @@ -33,6 +33,7 @@ #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/processor.h> #include <asm/sections.h> #include <asm/tlbflush.h> #include <asm/ptrace.h> #include <asm/localtimer.h> Loading Loading @@ -67,12 +68,47 @@ enum ipi_msg_type { IPI_CPU_STOP, }; static inline void identity_mapping_add(pgd_t *pgd, unsigned long start, unsigned long end) { unsigned long addr, prot; pmd_t *pmd; prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) prot |= PMD_BIT4; for (addr = start & PGDIR_MASK; addr < end;) { pmd = pmd_offset(pgd + pgd_index(addr), addr); pmd[0] = __pmd(addr | prot); addr += SECTION_SIZE; pmd[1] = __pmd(addr | prot); addr += SECTION_SIZE; flush_pmd_entry(pmd); outer_clean_range(__pa(pmd), __pa(pmd + 1)); } } static inline void identity_mapping_del(pgd_t *pgd, unsigned long start, unsigned long end) { unsigned long addr; pmd_t *pmd; for (addr = start & PGDIR_MASK; addr < end; addr += PGDIR_SIZE) { pmd = pmd_offset(pgd + pgd_index(addr), addr); pmd[0] = __pmd(0); pmd[1] = __pmd(0); clean_pmd_entry(pmd); outer_clean_range(__pa(pmd), __pa(pmd + 1)); } } int __cpuinit __cpu_up(unsigned int cpu) { struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); struct task_struct *idle = ci->idle; pgd_t *pgd; pmd_t *pmd; int ret; /* Loading Loading @@ -101,11 +137,16 @@ int __cpuinit __cpu_up(unsigned int cpu) * a 1:1 mapping for the physical address of the kernel. */ pgd = pgd_alloc(&init_mm); pmd = pmd_offset(pgd + pgd_index(PHYS_OFFSET), PHYS_OFFSET); *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | PMD_TYPE_SECT | PMD_SECT_AP_WRITE); flush_pmd_entry(pmd); outer_clean_range(__pa(pmd), __pa(pmd + 1)); if (!pgd) return -ENOMEM; if (PHYS_OFFSET != PAGE_OFFSET) { #ifndef CONFIG_HOTPLUG_CPU identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end)); #endif identity_mapping_add(pgd, __pa(_stext), __pa(_etext)); identity_mapping_add(pgd, __pa(_sdata), __pa(_edata)); } /* * We need to tell the secondary core where to find Loading Loading @@ -143,8 +184,14 @@ int __cpuinit __cpu_up(unsigned int cpu) secondary_data.stack = NULL; secondary_data.pgdir = 0; *pmd = __pmd(0); clean_pmd_entry(pmd); if (PHYS_OFFSET != PAGE_OFFSET) { #ifndef CONFIG_HOTPLUG_CPU identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end)); #endif identity_mapping_del(pgd, __pa(_stext), __pa(_etext)); identity_mapping_del(pgd, __pa(_sdata), __pa(_edata)); } pgd_free(&init_mm, pgd); if (ret) { Loading
arch/arm/kernel/vmlinux.lds.S +17 −7 Original line number Diff line number Diff line Loading @@ -8,6 +8,19 @@ #include <asm/memory.h> #include <asm/page.h> #define PROC_INFO \ VMLINUX_SYMBOL(__proc_info_begin) = .; \ *(.proc.info.init) \ VMLINUX_SYMBOL(__proc_info_end) = .; #ifdef CONFIG_HOTPLUG_CPU #define ARM_CPU_DISCARD(x) #define ARM_CPU_KEEP(x) x #else #define ARM_CPU_DISCARD(x) x #define ARM_CPU_KEEP(x) #endif OUTPUT_ARCH(arm) ENTRY(stext) Loading @@ -31,9 +44,7 @@ SECTIONS HEAD_TEXT INIT_TEXT _einittext = .; __proc_info_begin = .; *(.proc.info.init) __proc_info_end = .; ARM_CPU_DISCARD(PROC_INFO) __arch_info_begin = .; *(.arch.info.init) __arch_info_end = .; Loading Loading @@ -73,10 +84,8 @@ SECTIONS /DISCARD/ : { *(.ARM.exidx.exit.text) *(.ARM.extab.exit.text) #ifndef CONFIG_HOTPLUG_CPU *(.ARM.exidx.cpuexit.text) *(.ARM.extab.cpuexit.text) #endif ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) #ifndef CONFIG_HOTPLUG *(.ARM.exidx.devexit.text) *(.ARM.extab.devexit.text) Loading Loading @@ -105,6 +114,7 @@ SECTIONS *(.glue_7) *(.glue_7t) *(.got) /* Global offset table */ ARM_CPU_KEEP(PROC_INFO) } RO_DATA(PAGE_SIZE) Loading