Loading arch/arm/boot/compressed/Makefile +1 −1 Original line number Diff line number Diff line Loading @@ -74,7 +74,7 @@ ZTEXTADDR := $(CONFIG_ZBOOT_ROM_TEXT) ZBSSADDR := $(CONFIG_ZBOOT_ROM_BSS) else ZTEXTADDR := 0 ZBSSADDR := ALIGN(4) ZBSSADDR := ALIGN(8) endif SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/ Loading arch/arm/boot/compressed/head.S +24 −11 Original line number Diff line number Diff line Loading @@ -179,15 +179,14 @@ not_angel: bl cache_on restart: adr r0, LC0 ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12} ldr sp, [r0, #32] ldmia r0, {r1, r2, r3, r6, r9, r11, r12} ldr sp, [r0, #28] /* * We might be running at a different address. We need * to fix up various pointers. */ sub r0, r0, r1 @ calculate the delta offset add r5, r5, r0 @ _start add r6, r6, r0 @ _edata #ifndef CONFIG_ZBOOT_ROM Loading @@ -206,31 +205,40 @@ restart: adr r0, LC0 /* * Check to see if we will overwrite ourselves. * r4 = final kernel address * r5 = start of this image * r9 = size of decompressed image * r10 = end of this image, including bss/stack/malloc space if non XIP * We basically want: * r4 >= r10 -> OK * r4 + image length <= r5 -> OK * r4 - 16k page directory >= r10 -> OK * r4 + image length <= current position (pc) -> OK */ add r10, r10, #16384 cmp r4, r10 bhs wont_overwrite add r10, r4, r9 cmp r10, r5 ARM( cmp r10, pc ) THUMB( mov lr, pc ) THUMB( cmp r10, lr ) bls wont_overwrite /* * Relocate ourselves past the end of the decompressed kernel. * r5 = start of this image * r6 = _edata * r10 = end of the decompressed kernel * Because we always copy ahead, we need to do it from the end and go * backward in case the source and destination overlap. */ /* Round up to next 256-byte boundary. */ add r10, r10, #256 /* * Bump to the next 256-byte boundary with the size of * the relocation code added. This avoids overwriting * ourself when the offset is small. */ add r10, r10, #((reloc_code_end - restart + 256) & ~255) bic r10, r10, #255 /* Get start of code we want to copy and align it down. */ adr r5, restart bic r5, r5, #31 sub r9, r6, r5 @ size to copy add r9, r9, #31 @ rounded up to a multiple bic r9, r9, #31 @ ... of 32 bytes Loading @@ -245,6 +253,11 @@ restart: adr r0, LC0 /* Preserve offset to relocated code. */ sub r6, r9, r6 #ifndef CONFIG_ZBOOT_ROM /* cache_clean_flush may use the stack, so relocate it */ add sp, sp, r6 #endif bl cache_clean_flush adr r0, BSYM(restart) Loading Loading @@ -333,7 +346,6 @@ not_relocated: mov r0, #0 LC0: .word LC0 @ r1 .word __bss_start @ r2 .word _end @ r3 .word _start @ r5 .word _edata @ r6 .word _image_size @ r9 .word _got_start @ r11 Loading Loading @@ -1062,6 +1074,7 @@ memdump: mov r12, r0 #endif .ltorg reloc_code_end: .align .section ".stack", "aw", %nobits Loading arch/arm/boot/compressed/vmlinux.lds.in +1 −0 Original line number Diff line number Diff line Loading @@ -54,6 +54,7 @@ SECTIONS .bss : { *(.bss) } _end = .; . = ALIGN(8); /* the stack must be 64-bit aligned */ .stack : { *(.stack) } .stab 0 : { *(.stab) } Loading Loading
arch/arm/boot/compressed/Makefile +1 −1 Original line number Diff line number Diff line Loading @@ -74,7 +74,7 @@ ZTEXTADDR := $(CONFIG_ZBOOT_ROM_TEXT) ZBSSADDR := $(CONFIG_ZBOOT_ROM_BSS) else ZTEXTADDR := 0 ZBSSADDR := ALIGN(4) ZBSSADDR := ALIGN(8) endif SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/ Loading
arch/arm/boot/compressed/head.S +24 −11 Original line number Diff line number Diff line Loading @@ -179,15 +179,14 @@ not_angel: bl cache_on restart: adr r0, LC0 ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12} ldr sp, [r0, #32] ldmia r0, {r1, r2, r3, r6, r9, r11, r12} ldr sp, [r0, #28] /* * We might be running at a different address. We need * to fix up various pointers. */ sub r0, r0, r1 @ calculate the delta offset add r5, r5, r0 @ _start add r6, r6, r0 @ _edata #ifndef CONFIG_ZBOOT_ROM Loading @@ -206,31 +205,40 @@ restart: adr r0, LC0 /* * Check to see if we will overwrite ourselves. * r4 = final kernel address * r5 = start of this image * r9 = size of decompressed image * r10 = end of this image, including bss/stack/malloc space if non XIP * We basically want: * r4 >= r10 -> OK * r4 + image length <= r5 -> OK * r4 - 16k page directory >= r10 -> OK * r4 + image length <= current position (pc) -> OK */ add r10, r10, #16384 cmp r4, r10 bhs wont_overwrite add r10, r4, r9 cmp r10, r5 ARM( cmp r10, pc ) THUMB( mov lr, pc ) THUMB( cmp r10, lr ) bls wont_overwrite /* * Relocate ourselves past the end of the decompressed kernel. * r5 = start of this image * r6 = _edata * r10 = end of the decompressed kernel * Because we always copy ahead, we need to do it from the end and go * backward in case the source and destination overlap. */ /* Round up to next 256-byte boundary. */ add r10, r10, #256 /* * Bump to the next 256-byte boundary with the size of * the relocation code added. This avoids overwriting * ourself when the offset is small. */ add r10, r10, #((reloc_code_end - restart + 256) & ~255) bic r10, r10, #255 /* Get start of code we want to copy and align it down. */ adr r5, restart bic r5, r5, #31 sub r9, r6, r5 @ size to copy add r9, r9, #31 @ rounded up to a multiple bic r9, r9, #31 @ ... of 32 bytes Loading @@ -245,6 +253,11 @@ restart: adr r0, LC0 /* Preserve offset to relocated code. */ sub r6, r9, r6 #ifndef CONFIG_ZBOOT_ROM /* cache_clean_flush may use the stack, so relocate it */ add sp, sp, r6 #endif bl cache_clean_flush adr r0, BSYM(restart) Loading Loading @@ -333,7 +346,6 @@ not_relocated: mov r0, #0 LC0: .word LC0 @ r1 .word __bss_start @ r2 .word _end @ r3 .word _start @ r5 .word _edata @ r6 .word _image_size @ r9 .word _got_start @ r11 Loading Loading @@ -1062,6 +1074,7 @@ memdump: mov r12, r0 #endif .ltorg reloc_code_end: .align .section ".stack", "aw", %nobits Loading
arch/arm/boot/compressed/vmlinux.lds.in +1 −0 Original line number Diff line number Diff line Loading @@ -54,6 +54,7 @@ SECTIONS .bss : { *(.bss) } _end = .; . = ALIGN(8); /* the stack must be 64-bit aligned */ .stack : { *(.stack) } .stab 0 : { *(.stab) } Loading