Loading arch/powerpc/kernel/cpu_setup_44x.S +6 −1 Original line number Diff line number Diff line Loading @@ -34,7 +34,12 @@ _GLOBAL(__setup_cpu_440grx) blr _GLOBAL(__setup_cpu_460ex) _GLOBAL(__setup_cpu_460gt) b __init_fpu_44x mflr r4 bl __init_fpu_44x bl __fixup_440A_mcheck mtlr r4 blr _GLOBAL(__setup_cpu_440gx) _GLOBAL(__setup_cpu_440spe) b __fixup_440A_mcheck Loading arch/powerpc/mm/40x_mmu.c +14 −2 Original line number Diff line number Diff line Loading @@ -93,7 +93,7 @@ void __init MMU_init_hw(void) unsigned long __init mmu_mapin_ram(void) { unsigned long v, s; unsigned long v, s, mapped; phys_addr_t p; v = KERNELBASE; Loading Loading @@ -130,5 +130,17 @@ unsigned long __init mmu_mapin_ram(void) s -= LARGE_PAGE_SIZE_4M; } return total_lowmem - s; mapped = total_lowmem - s; /* If the size of RAM is not an exact power of two, we may not * have covered RAM in its entirety with 16 and 4 MiB * pages. Consequently, restrict the top end of RAM currently * allocable so that calls to the LMB to allocate PTEs for "tail" * coverage with normal-sized pages (or other reasons) do not * attempt to allocate outside the allowed range. */ __initial_memory_limit_addr = memstart_addr + mapped; return mapped; } Loading
arch/powerpc/kernel/cpu_setup_44x.S +6 −1 Original line number Diff line number Diff line Loading @@ -34,7 +34,12 @@ _GLOBAL(__setup_cpu_440grx) blr _GLOBAL(__setup_cpu_460ex) _GLOBAL(__setup_cpu_460gt) b __init_fpu_44x mflr r4 bl __init_fpu_44x bl __fixup_440A_mcheck mtlr r4 blr _GLOBAL(__setup_cpu_440gx) _GLOBAL(__setup_cpu_440spe) b __fixup_440A_mcheck Loading
arch/powerpc/mm/40x_mmu.c +14 −2 Original line number Diff line number Diff line Loading @@ -93,7 +93,7 @@ void __init MMU_init_hw(void) unsigned long __init mmu_mapin_ram(void) { unsigned long v, s; unsigned long v, s, mapped; phys_addr_t p; v = KERNELBASE; Loading Loading @@ -130,5 +130,17 @@ unsigned long __init mmu_mapin_ram(void) s -= LARGE_PAGE_SIZE_4M; } return total_lowmem - s; mapped = total_lowmem - s; /* If the size of RAM is not an exact power of two, we may not * have covered RAM in its entirety with 16 and 4 MiB * pages. Consequently, restrict the top end of RAM currently * allocable so that calls to the LMB to allocate PTEs for "tail" * coverage with normal-sized pages (or other reasons) do not * attempt to allocate outside the allowed range. */ __initial_memory_limit_addr = memstart_addr + mapped; return mapped; }