Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ed91345b authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "ARM: enable vmalloc saving"

parents a7b7543f f55eee10
Loading
Loading
Loading
Loading
+23 −0
Original line number Diff line number Diff line
@@ -1758,6 +1758,29 @@ config ARM_MODULE_PLTS

source "mm/Kconfig"

choice
	prompt "Virtual Memory Reclaim"
	default ENABLE_VMALLOC_SAVING
	help
	  Select the method of reclaiming virtual memory

config ENABLE_VMALLOC_SAVING
	bool "Reclaim memory for each subsystem"
	help
	  Enable this config to reclaim the virtual space belonging
	  to any subsystem which is expected to have a lifetime of
	  the entire system. This feature allows lowmem to be non-
	  contiguous.

config NO_VM_RECLAIM
	bool "Do not reclaim memory"
	help
	  Do not reclaim any memory. This might result in less lowmem
	  and wasting virtual memory space which could otherwise be
	  reclaimed by using any of the other two config options.

endchoice

config FORCE_MAX_ZONEORDER
	int "Maximum zone order"
	default "12" if SOC_AM33XX
+78 −30
Original line number Diff line number Diff line
@@ -495,6 +495,54 @@ static void __init free_highpages(void)
#endif
}

#define MLK(b, t) (b), (t), (((t) - (b)) >> 10)
#define MLM(b, t) (b), (t), (((t) - (b)) >> 20)
#define MLK_ROUNDUP(b, t) (b), (t), (DIV_ROUND_UP(((t) - (b)), SZ_1K))

#ifdef CONFIG_ENABLE_VMALLOC_SAVING
static void print_vmalloc_lowmem_info(void)
{
	struct memblock_region *reg, *prev_reg = NULL;

	pr_notice(
		"	   vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n",
		MLM((unsigned long)high_memory, VMALLOC_END));

	for_each_memblock_rev(memory, reg) {
		phys_addr_t start_phys = reg->base;
		phys_addr_t end_phys = reg->base + reg->size;

		if (start_phys > arm_lowmem_limit)
			continue;

		if (end_phys > arm_lowmem_limit)
			end_phys = arm_lowmem_limit;

		if (prev_reg == NULL) {
			prev_reg = reg;

			pr_notice(
			"	   lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n",
			MLM((unsigned long)__va(start_phys),
			(unsigned long)__va(end_phys)));

			continue;
		}

		pr_notice(
		"	   vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n",
		MLM((unsigned long)__va(end_phys),
		(unsigned long)__va(prev_reg->base)));


		pr_notice(
		"	   lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n",
		MLM((unsigned long)__va(start_phys),
		(unsigned long)__va(end_phys)));
	}
}
#endif

/*
 * mem_init() marks the free areas in the mem_map and tells us how much
 * memory is free.  This is done after various parts of the system have
@@ -523,9 +571,6 @@ void __init mem_init(void)

	mem_init_print_info(NULL);

#define MLK(b, t) b, t, ((t) - (b)) >> 10
#define MLM(b, t) b, t, ((t) - (b)) >> 20
#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)

	pr_notice("Virtual kernel memory layout:\n"
			"    vector  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
@@ -533,45 +578,44 @@ void __init mem_init(void)
			"    DTCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
			"    ITCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
#endif
			"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
			"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
			"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
#ifdef CONFIG_HIGHMEM
			"    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n"
#endif
#ifdef CONFIG_MODULES
			"    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
#endif
			"      .text : 0x%p" " - 0x%p" "   (%4td kB)\n"
			"      .init : 0x%p" " - 0x%p" "   (%4td kB)\n"
			"      .data : 0x%p" " - 0x%p" "   (%4td kB)\n"
			"       .bss : 0x%p" " - 0x%p" "   (%4td kB)\n",

			MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE),
			"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n",
			MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
				(PAGE_SIZE)),
#ifdef CONFIG_HAVE_TCM
			MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
			MLK(ITCM_OFFSET, (unsigned long) itcm_end),
#endif
			MLK(FIXADDR_START, FIXADDR_END),
			MLK(FIXADDR_START, FIXADDR_END));
#ifdef CONFIG_ENABLE_VMALLOC_SAVING
	print_vmalloc_lowmem_info();
#else
	pr_notice(
		   "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
		   "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n",
			MLM(VMALLOC_START, VMALLOC_END),
			MLM(PAGE_OFFSET, (unsigned long)high_memory),
#ifdef CONFIG_HIGHMEM
			MLM(PAGE_OFFSET, (unsigned long)high_memory));
#endif
#if IS_ENABLED(CONFIG_HIGHMEM)
	pr_notice(
		   "    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n",
		   MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
				(PAGE_SIZE)),
							(PAGE_SIZE)));
#endif
#ifdef CONFIG_MODULES
			MLM(MODULES_VADDR, MODULES_END),
#if IS_ENABLED(CONFIG_MODULES)
	pr_notice(
		   "    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n",
		   MLM(MODULES_VADDR, MODULES_END));
#endif

	pr_notice(
		   "      .text : 0x%pK - 0x%pK    (%4d kB)\n"
		   "      .init : 0x%pK - 0x%pK    (%4d kB)\n"
		   "      .data : 0x%pK - 0x%pK    (%4d kB)\n"
		   "       .bss : 0x%pK - 0x%pK    (%4d kB)\n",
			MLK_ROUNDUP(_text, _etext),
			MLK_ROUNDUP(__init_begin, __init_end),
			MLK_ROUNDUP(_sdata, _edata),
			MLK_ROUNDUP(__bss_start, __bss_stop));

#undef MLK
#undef MLM
#undef MLK_ROUNDUP

	/*
	 * Check boundaries twice: Some fundamental inconsistencies can
	 * be detected at build time already.
@@ -598,6 +642,10 @@ void __init mem_init(void)
}

#ifdef CONFIG_STRICT_KERNEL_RWX
#undef MLK
#undef MLM
#undef MLK_ROUNDUP

struct section_perm {
	const char *name;
	unsigned long start;
+2 −1
Original line number Diff line number Diff line
@@ -92,6 +92,7 @@ void __init add_static_vm_early(struct static_vm *svm)
	void *vaddr;

	vm = &svm->vm;
	if (!vm_area_check_early(vm))
		vm_area_add_early(vm);
	vaddr = vm->addr;

+39 −2
Original line number Diff line number Diff line
@@ -1447,12 +1447,21 @@ static void __init map_lowmem(void)
	struct memblock_region *reg;
	phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE);
	phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
	struct static_vm *svm;
	phys_addr_t start;
	phys_addr_t end;
	unsigned long vaddr;
	unsigned long pfn;
	unsigned long length;
	unsigned int type;
	int nr = 0;

	/* Map all the lowmem memory banks. */
	for_each_memblock(memory, reg) {
		phys_addr_t start = reg->base;
		phys_addr_t end = start + reg->size;
		struct map_desc map;
		start = reg->base;
		end = start + reg->size;
		nr++;

		if (memblock_is_nomap(reg))
			continue;
@@ -1504,6 +1513,34 @@ static void __init map_lowmem(void)
			}
		}
	}
	svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));

	for_each_memblock(memory, reg) {
		struct vm_struct *vm;

		start = reg->base;
		end = start + reg->size;

		if (end > arm_lowmem_limit)
			end = arm_lowmem_limit;
		if (start >= end)
			break;

		vm = &svm->vm;
		pfn = __phys_to_pfn(start);
		vaddr = __phys_to_virt(start);
		length = end - start;
		type = MT_MEMORY_RW;

		vm->addr = (void *)(vaddr & PAGE_MASK);
		vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK));
		vm->phys_addr = __pfn_to_phys(pfn);
		vm->flags = VM_LOWMEM;
		vm->flags |= VM_ARM_MTYPE(type);
		vm->caller = map_lowmem;
		add_static_vm_early(svm++);
		mark_vmalloc_reserved_area(vm->addr, vm->size);
	}
}

#ifdef CONFIG_ARM_PV_FIXUP
+5 −0
Original line number Diff line number Diff line
@@ -398,6 +398,11 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
	for (idx = 0, rgn = &memblock_type->regions[0];			\
	     idx < memblock_type->cnt;					\
	     idx++, rgn = &memblock_type->regions[idx])
#define for_each_memblock_rev(memblock_type, region)	\
	for (region = memblock.memblock_type.regions + \
			memblock.memblock_type.cnt - 1;	\
	     region >= memblock.memblock_type.regions;	\
	     region--)

#ifdef CONFIG_MEMTEST
extern void early_memtest(phys_addr_t start, phys_addr_t end);
Loading