Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit af8a5af3 authored by Bernd Schmidt's avatar Bernd Schmidt Committed by Bryan Wu
Browse files

Blackfin arch: fix bug kernel not to boot up with mtd filesystems



Revert this patch:
move the init sections to the end of memory, so that after they
are free, run time memory is all continugous - this should help decrease
memory fragementation. When doing this, we also pack some of the other
sections a little closer together, to make sure we don't waste memory.
To make this happen, we need to rename the .data.init_task section to
.init_task.data, so it doesn't get picked up by the linker script glob.

Since it causes the kernel not to boot up with mtd filesystems.

Signed-off-by: default avatarBernd Schmidt <bernd.schmidt@analog.com>
Signed-off-by: default avatarBryan Wu <bryan.wu@analog.com>
parent a961d659
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -57,5 +57,5 @@ EXPORT_SYMBOL(init_task);
 * "init_task" linker map entry.
 */
union thread_union init_thread_union
    __attribute__ ((__section__(".init_task.data"))) = {
    __attribute__ ((__section__(".data.init_task"))) = {
INIT_THREAD_INFO(init_task)};
+4 −4
Original line number Diff line number Diff line
@@ -237,7 +237,7 @@ void __init setup_arch(char **cmdline_p)
	/* by now the stack is part of the init task */
	memory_end = _ramend - DMA_UNCACHED_REGION;

	_ramstart = (unsigned long)_end;
	_ramstart = (unsigned long)__bss_stop;
	memory_start = PAGE_ALIGN(_ramstart);

#if defined(CONFIG_MTD_UCLINUX)
@@ -286,7 +286,7 @@ void __init setup_arch(char **cmdline_p)
	}

	/* Relocate MTD image to the top of memory after the uncached memory area */
	dma_memcpy((char *)memory_end, _end, mtd_size);
	dma_memcpy((char *)memory_end, __bss_stop, mtd_size);

	memory_mtd_start = memory_end;
	_ebss = memory_mtd_start;	/* define _ebss for compatible */
@@ -358,10 +358,10 @@ void __init setup_arch(char **cmdline_p)
	printk(KERN_INFO "Memory map:\n"
	       KERN_INFO "  text      = 0x%p-0x%p\n"
	       KERN_INFO "  rodata    = 0x%p-0x%p\n"
	       KERN_INFO "  bss       = 0x%p-0x%p\n"
	       KERN_INFO "  data      = 0x%p-0x%p\n"
	       KERN_INFO "    stack   = 0x%p-0x%p\n"
	       KERN_INFO "  init      = 0x%p-0x%p\n"
	       KERN_INFO "  bss       = 0x%p-0x%p\n"
	       KERN_INFO "  available = 0x%p-0x%p\n"
#ifdef CONFIG_MTD_UCLINUX
	       KERN_INFO "  rootfs    = 0x%p-0x%p\n"
@@ -371,10 +371,10 @@ void __init setup_arch(char **cmdline_p)
#endif
	       , _stext, _etext,
	       __start_rodata, __end_rodata,
	       __bss_start, __bss_stop,
	       _sdata, _edata,
	       (void *)&init_thread_union, (void *)((int)(&init_thread_union) + 0x2000),
	       __init_begin, __init_end,
	       __bss_start, __bss_stop,
	       (void *)_ramstart, (void *)memory_end
#ifdef CONFIG_MTD_UCLINUX
	       , (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size)
+19 −28
Original line number Diff line number Diff line
@@ -41,9 +41,6 @@ _jiffies = _jiffies_64;
SECTIONS
{
	. = CONFIG_BOOT_LOAD;
	/* Neither the text, ro_data or bss section need to be aligned
	 * So pack them back to back
	 */
	.text :
	{
		__text = .;
@@ -61,25 +58,22 @@ SECTIONS
		*(__ex_table)
		___stop___ex_table = .;

		. = ALIGN(4);
		__etext = .;
	}

	/* Just in case the first read only is a 32-bit access */
	RO_DATA(4)

	.bss :
	{
		. = ALIGN(4);
		___bss_start = .;
		*(.bss .bss.*)
		*(COMMON)
		___bss_stop = .;
	}
	RO_DATA(PAGE_SIZE)

	.data :
	{
		/* make sure the init_task is aligned to the
		 * kernel thread size so we can locate the kernel
		 * stack properly and quickly.
		 */
		__sdata = .;
		/* This gets done first, so the glob doesn't suck it in */
		. = ALIGN(THREAD_SIZE);
		*(.data.init_task)

		. = ALIGN(32);
		*(.data.cacheline_aligned)

@@ -87,22 +81,10 @@ SECTIONS
		*(.data.*)
		CONSTRUCTORS

		/* make sure the init_task is aligned to the
		 * kernel thread size so we can locate the kernel
		 * stack properly and quickly.
		 */
		. = ALIGN(THREAD_SIZE);
		*(.init_task.data)

		__edata = .;
	}

	/* The init section should be last, so when we free it, it goes into
	 * the general memory pool, and (hopefully) will decrease fragmentation
	 * a tiny bit. The init section has a _requirement_ that it be
	 * PAGE_SIZE aligned
	 */
	. = ALIGN(PAGE_SIZE);
	___init_begin = .;

	.init.text :
@@ -197,7 +179,16 @@ SECTIONS
	. = ALIGN(PAGE_SIZE);
	___init_end = .;

	.bss :
	{
		. = ALIGN(4);
		___bss_start = .;
		*(.bss .bss.*)
		*(COMMON)
		. = ALIGN(4);
		___bss_stop = .;
		__end = .;
	}

	STABS_DEBUG