Loading arch/arm/Kconfig +23 −0 Original line number Diff line number Diff line Loading @@ -1732,6 +1732,29 @@ config ARCH_WANT_GENERAL_HUGETLB source "mm/Kconfig" choice prompt "Virtual Memory Reclaim" default NO_VM_RECLAIM help Select the method of reclaiming virtual memory config ENABLE_VMALLOC_SAVING bool "Reclaim memory for each subsystem" help Enable this config to reclaim the virtual space belonging to any subsystem which is expected to have a lifetime of the entire system. This feature allows lowmem to be non- contiguous. config NO_VM_RECLAIM bool "Do not reclaim memory" help Do not reclaim any memory. This might result in less lowmem and wasting virtual memory space which could otherwise be reclaimed by using any of the other two config options. endchoice config FORCE_MAX_ZONEORDER int "Maximum zone order" if ARCH_SHMOBILE_LEGACY range 11 64 if ARCH_SHMOBILE_LEGACY Loading arch/arm/mm/init.c +73 −24 Original line number Diff line number Diff line Loading @@ -513,6 +513,54 @@ static void __init free_highpages(void) #endif } #define MLK(b, t) (b), (t), (((t) - (b)) >> 10) #define MLM(b, t) (b), (t), (((t) - (b)) >> 20) #define MLK_ROUNDUP(b, t) (b), (t), (DIV_ROUND_UP(((t) - (b)), SZ_1K)) #ifdef CONFIG_ENABLE_VMALLOC_SAVING static void print_vmalloc_lowmem_info(void) { struct memblock_region *reg, *prev_reg = NULL; pr_notice( " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n", MLM((unsigned long)high_memory, VMALLOC_END)); for_each_memblock_rev(memory, reg) { phys_addr_t start_phys = reg->base; phys_addr_t end_phys = reg->base + reg->size; if (start_phys > arm_lowmem_limit) continue; if (end_phys > arm_lowmem_limit) end_phys = arm_lowmem_limit; if (prev_reg == NULL) { prev_reg = reg; pr_notice( " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n", MLM((unsigned long)__va(start_phys), (unsigned long)__va(end_phys))); continue; } pr_notice( " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n", MLM((unsigned long)__va(end_phys), (unsigned long)__va(prev_reg->base))); pr_notice( " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n", MLM((unsigned long)__va(start_phys), (unsigned long)__va(end_phys))); } } #endif /* * mem_init() marks the free areas in the mem_map and tells us how much * memory is free. This is done after various parts of the system have Loading Loading @@ -541,39 +589,40 @@ void __init mem_init(void) mem_init_print_info(NULL); #define MLK(b, t) b, t, ((t) - (b)) >> 10 #define MLM(b, t) b, t, ((t) - (b)) >> 20 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) printk(KERN_NOTICE "Virtual kernel memory layout:\n" " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_HAVE_TCM " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" #endif " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif #ifdef CONFIG_MODULES " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif " .text : 0x%p" " - 0x%p" " (%4td kB)\n" " .init : 0x%p" " - 0x%p" " (%4td kB)\n" " .data : 0x%p" " - 0x%p" " (%4td kB)\n" " .bss : 0x%p" " - 0x%p" " (%4td kB)\n", " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n", MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + (PAGE_SIZE)), #ifdef CONFIG_HAVE_TCM MLK(DTCM_OFFSET, (unsigned long) dtcm_end), MLK(ITCM_OFFSET, (unsigned long) itcm_end), #endif MLK(FIXADDR_START, FIXADDR_END), MLK(FIXADDR_START, FIXADDR_END)); #ifdef CONFIG_ENABLE_VMALLOC_SAVING print_vmalloc_lowmem_info(); #else printk(KERN_NOTICE " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n", MLM(VMALLOC_START, VMALLOC_END), MLM(PAGE_OFFSET, (unsigned long)high_memory), MLM(PAGE_OFFSET, (unsigned long)high_memory)); #endif printk(KERN_NOTICE #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif #ifdef CONFIG_MODULES " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif " .text : 0x%p" " - 0x%p" " (%4d kB)\n" " .init : 0x%p" " - 0x%p" " (%4d kB)\n" " .data : 0x%p" " - 0x%p" " (%4d kB)\n" " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", #ifdef CONFIG_HIGHMEM MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * (PAGE_SIZE)), Loading @@ -587,10 +636,6 @@ void __init mem_init(void) MLK_ROUNDUP(_sdata, _edata), MLK_ROUNDUP(__bss_start, __bss_stop)); #undef MLK #undef MLM #undef MLK_ROUNDUP /* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. Loading @@ -616,6 +661,10 @@ void __init mem_init(void) } } #undef MLK #undef MLM #undef MLK_ROUNDUP #ifdef CONFIG_ARM_KERNMEM_PERMS struct section_perm { unsigned long start; Loading arch/arm/mm/mmu.c +13 −0 Original line number Diff line number Diff line Loading @@ -1104,6 +1104,19 @@ void __init sanity_check_meminfo(void) phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; struct memblock_region *reg; #ifdef CONFIG_ENABLE_VMALLOC_SAVING struct memblock_region *prev_reg = NULL; for_each_memblock(memory, reg) { if (prev_reg == NULL) { prev_reg = reg; continue; } vmalloc_limit += reg->base - (prev_reg->base + prev_reg->size); prev_reg = reg; } #endif for_each_memblock(memory, reg) { phys_addr_t block_start = reg->base; phys_addr_t block_end = reg->base + reg->size; Loading include/linux/memblock.h +5 −0 Original line number Diff line number Diff line Loading @@ -357,6 +357,11 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ region++) #define for_each_memblock_rev(memblock_type, region) \ for (region = memblock.memblock_type.regions + \ memblock.memblock_type.cnt - 1; \ region >= memblock.memblock_type.regions; \ region--) #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK #define __init_memblock __meminit Loading mm/vmalloc.c +11 −0 Original line number Diff line number Diff line Loading @@ -1366,16 +1366,27 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, */ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) { #ifdef CONFIG_ENABLE_VMALLOC_SAVING return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END, NUMA_NO_NODE, GFP_KERNEL, __builtin_return_address(0)); #else return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, NUMA_NO_NODE, GFP_KERNEL, __builtin_return_address(0)); #endif } struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, const void *caller) { #ifdef CONFIG_ENABLE_VMALLOC_SAVING return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END, NUMA_NO_NODE, GFP_KERNEL, caller); #else return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, NUMA_NO_NODE, GFP_KERNEL, caller); #endif } /** Loading Loading
arch/arm/Kconfig +23 −0 Original line number Diff line number Diff line Loading @@ -1732,6 +1732,29 @@ config ARCH_WANT_GENERAL_HUGETLB source "mm/Kconfig" choice prompt "Virtual Memory Reclaim" default NO_VM_RECLAIM help Select the method of reclaiming virtual memory config ENABLE_VMALLOC_SAVING bool "Reclaim memory for each subsystem" help Enable this config to reclaim the virtual space belonging to any subsystem which is expected to have a lifetime of the entire system. This feature allows lowmem to be non- contiguous. config NO_VM_RECLAIM bool "Do not reclaim memory" help Do not reclaim any memory. This might result in less lowmem and wasting virtual memory space which could otherwise be reclaimed by using any of the other two config options. endchoice config FORCE_MAX_ZONEORDER int "Maximum zone order" if ARCH_SHMOBILE_LEGACY range 11 64 if ARCH_SHMOBILE_LEGACY Loading
arch/arm/mm/init.c +73 −24 Original line number Diff line number Diff line Loading @@ -513,6 +513,54 @@ static void __init free_highpages(void) #endif } #define MLK(b, t) (b), (t), (((t) - (b)) >> 10) #define MLM(b, t) (b), (t), (((t) - (b)) >> 20) #define MLK_ROUNDUP(b, t) (b), (t), (DIV_ROUND_UP(((t) - (b)), SZ_1K)) #ifdef CONFIG_ENABLE_VMALLOC_SAVING static void print_vmalloc_lowmem_info(void) { struct memblock_region *reg, *prev_reg = NULL; pr_notice( " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n", MLM((unsigned long)high_memory, VMALLOC_END)); for_each_memblock_rev(memory, reg) { phys_addr_t start_phys = reg->base; phys_addr_t end_phys = reg->base + reg->size; if (start_phys > arm_lowmem_limit) continue; if (end_phys > arm_lowmem_limit) end_phys = arm_lowmem_limit; if (prev_reg == NULL) { prev_reg = reg; pr_notice( " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n", MLM((unsigned long)__va(start_phys), (unsigned long)__va(end_phys))); continue; } pr_notice( " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n", MLM((unsigned long)__va(end_phys), (unsigned long)__va(prev_reg->base))); pr_notice( " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n", MLM((unsigned long)__va(start_phys), (unsigned long)__va(end_phys))); } } #endif /* * mem_init() marks the free areas in the mem_map and tells us how much * memory is free. This is done after various parts of the system have Loading Loading @@ -541,39 +589,40 @@ void __init mem_init(void) mem_init_print_info(NULL); #define MLK(b, t) b, t, ((t) - (b)) >> 10 #define MLM(b, t) b, t, ((t) - (b)) >> 20 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) printk(KERN_NOTICE "Virtual kernel memory layout:\n" " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_HAVE_TCM " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" #endif " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif #ifdef CONFIG_MODULES " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif " .text : 0x%p" " - 0x%p" " (%4td kB)\n" " .init : 0x%p" " - 0x%p" " (%4td kB)\n" " .data : 0x%p" " - 0x%p" " (%4td kB)\n" " .bss : 0x%p" " - 0x%p" " (%4td kB)\n", " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n", MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + (PAGE_SIZE)), #ifdef CONFIG_HAVE_TCM MLK(DTCM_OFFSET, (unsigned long) dtcm_end), MLK(ITCM_OFFSET, (unsigned long) itcm_end), #endif MLK(FIXADDR_START, FIXADDR_END), MLK(FIXADDR_START, FIXADDR_END)); #ifdef CONFIG_ENABLE_VMALLOC_SAVING print_vmalloc_lowmem_info(); #else printk(KERN_NOTICE " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n", MLM(VMALLOC_START, VMALLOC_END), MLM(PAGE_OFFSET, (unsigned long)high_memory), MLM(PAGE_OFFSET, (unsigned long)high_memory)); #endif printk(KERN_NOTICE #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif #ifdef CONFIG_MODULES " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif " .text : 0x%p" " - 0x%p" " (%4d kB)\n" " .init : 0x%p" " - 0x%p" " (%4d kB)\n" " .data : 0x%p" " - 0x%p" " (%4d kB)\n" " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", #ifdef CONFIG_HIGHMEM MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * (PAGE_SIZE)), Loading @@ -587,10 +636,6 @@ void __init mem_init(void) MLK_ROUNDUP(_sdata, _edata), MLK_ROUNDUP(__bss_start, __bss_stop)); #undef MLK #undef MLM #undef MLK_ROUNDUP /* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. Loading @@ -616,6 +661,10 @@ void __init mem_init(void) } } #undef MLK #undef MLM #undef MLK_ROUNDUP #ifdef CONFIG_ARM_KERNMEM_PERMS struct section_perm { unsigned long start; Loading
arch/arm/mm/mmu.c +13 −0 Original line number Diff line number Diff line Loading @@ -1104,6 +1104,19 @@ void __init sanity_check_meminfo(void) phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; struct memblock_region *reg; #ifdef CONFIG_ENABLE_VMALLOC_SAVING struct memblock_region *prev_reg = NULL; for_each_memblock(memory, reg) { if (prev_reg == NULL) { prev_reg = reg; continue; } vmalloc_limit += reg->base - (prev_reg->base + prev_reg->size); prev_reg = reg; } #endif for_each_memblock(memory, reg) { phys_addr_t block_start = reg->base; phys_addr_t block_end = reg->base + reg->size; Loading
include/linux/memblock.h +5 −0 Original line number Diff line number Diff line Loading @@ -357,6 +357,11 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ region++) #define for_each_memblock_rev(memblock_type, region) \ for (region = memblock.memblock_type.regions + \ memblock.memblock_type.cnt - 1; \ region >= memblock.memblock_type.regions; \ region--) #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK #define __init_memblock __meminit Loading
mm/vmalloc.c +11 −0 Original line number Diff line number Diff line Loading @@ -1366,16 +1366,27 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, */ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) { #ifdef CONFIG_ENABLE_VMALLOC_SAVING return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END, NUMA_NO_NODE, GFP_KERNEL, __builtin_return_address(0)); #else return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, NUMA_NO_NODE, GFP_KERNEL, __builtin_return_address(0)); #endif } struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, const void *caller) { #ifdef CONFIG_ENABLE_VMALLOC_SAVING return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END, NUMA_NO_NODE, GFP_KERNEL, caller); #else return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, NUMA_NO_NODE, GFP_KERNEL, caller); #endif } /** Loading