Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 553427bb authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: Increase the kernel virtual area to include lowmem"

parents d874198c 71a14bc3
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1719,7 +1719,7 @@ config ARM_MODULE_PLTS

choice
	prompt "Virtual Memory Reclaim"
	default NO_VM_RECLAIM
	default ENABLE_VMALLOC_SAVING
	help
	  Select the method of reclaiming virtual memory. Two values
	  are allowed to choose, one is NO_VM_RECLAIM, the other is
+75 −23
Original line number Diff line number Diff line
@@ -496,6 +496,56 @@ static void __init free_highpages(void)
#endif
}

#define MLK(b, t) (b), (t), (((t) - (b)) >> 10)
#define MLM(b, t) (b), (t), (((t) - (b)) >> 20)
#define MLK_ROUNDUP(b, t) (b), (t), (DIV_ROUND_UP(((t) - (b)), SZ_1K))

#ifdef CONFIG_ENABLE_VMALLOC_SAVING
static void print_vmalloc_lowmem_info(void)
{
	struct memblock_region *reg, *prev_reg = NULL;

	pr_notice(
		"	   vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n",
		MLM((unsigned long)high_memory, VMALLOC_END));

	for_each_memblock_rev(memory, reg) {
		phys_addr_t start_phys = reg->base;
		phys_addr_t end_phys = reg->base + reg->size;

		if (start_phys > arm_lowmem_limit)
			continue;

		if (end_phys > arm_lowmem_limit)
			end_phys = arm_lowmem_limit;

		if (prev_reg == NULL) {
			prev_reg = reg;

			pr_notice(
			"	   lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n",
			MLM((unsigned long)__va(start_phys),
			(unsigned long)__va(end_phys)));

			continue;
		}

		pr_notice(
		"	   vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n",
		MLM((unsigned long)__va(end_phys),
		(unsigned long)__va(prev_reg->base)));


		pr_notice(
		"	   lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n",
		MLM((unsigned long)__va(start_phys),
		(unsigned long)__va(end_phys)));

		prev_reg = reg;
	}
}
#endif

/*
 * mem_init() marks the free areas in the mem_map and tells us how much
 * memory is free.  This is done after various parts of the system have
@@ -524,9 +574,6 @@ void __init mem_init(void)

	mem_init_print_info(NULL);

#define MLK(b, t) b, t, ((t) - (b)) >> 10
#define MLM(b, t) b, t, ((t) - (b)) >> 20
#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)

	pr_notice("Virtual kernel memory layout:\n"
			"    vector  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
@@ -534,28 +581,33 @@ void __init mem_init(void)
			"    DTCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
			"    ITCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
#endif
			"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
			"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n",
			MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE),
#ifdef CONFIG_HAVE_TCM
			MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
			MLK(ITCM_OFFSET, (unsigned long) itcm_end),
#endif
			MLK(FIXADDR_START, FIXADDR_END));
#ifdef CONFIG_ENABLE_VMALLOC_SAVING
	print_vmalloc_lowmem_info();
#else
	pr_notice(
		   "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
			"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
		   "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n",
			MLM(VMALLOC_START, VMALLOC_END),
			MLM(PAGE_OFFSET, (unsigned long)high_memory));
#endif
	pr_notice(
#ifdef CONFIG_HIGHMEM
		   "    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n"
#endif
#ifdef CONFIG_MODULES
		   "    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
#endif
			"      .text : 0x%p" " - 0x%p" "   (%4td kB)\n"
			"      .init : 0x%p" " - 0x%p" "   (%4td kB)\n"
			"      .data : 0x%p" " - 0x%p" "   (%4td kB)\n"
			"       .bss : 0x%p" " - 0x%p" "   (%4td kB)\n",

			MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE),
#ifdef CONFIG_HAVE_TCM
			MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
			MLK(ITCM_OFFSET, (unsigned long) itcm_end),
#endif
			MLK(FIXADDR_START, FIXADDR_END),
			MLM(VMALLOC_START, VMALLOC_END),
			MLM(PAGE_OFFSET, (unsigned long)high_memory),
		   "      .text : 0x%pK - 0x%pK   (%4d kB)\n"
		   "      .init : 0x%pK - 0x%pK   (%4d kB)\n"
		   "      .data : 0x%pK - 0x%pK   (%4d kB)\n"
		   "       .bss : 0x%pK - 0x%pK   (%4d kB)\n",
#ifdef CONFIG_HIGHMEM
			MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
				(PAGE_SIZE)),
@@ -569,10 +621,6 @@ void __init mem_init(void)
			MLK_ROUNDUP(_sdata, _edata),
			MLK_ROUNDUP(__bss_start, __bss_stop));

#undef MLK
#undef MLM
#undef MLK_ROUNDUP

	/*
	 * Check boundaries twice: Some fundamental inconsistencies can
	 * be detected at build time already.
@@ -588,6 +636,10 @@ void __init mem_init(void)
#endif
}

#undef MLK
#undef MLM
#undef MLK_ROUNDUP

#ifdef CONFIG_STRICT_KERNEL_RWX
struct section_perm {
	const char *name;
+2 −1
Original line number Diff line number Diff line
@@ -92,6 +92,7 @@ void __init add_static_vm_early(struct static_vm *svm)
	void *vaddr;

	vm = &svm->vm;
	if (!vm_area_check_early(vm))
		vm_area_add_early(vm);
	vaddr = vm->addr;

+39 −2
Original line number Diff line number Diff line
@@ -1447,12 +1447,21 @@ static void __init map_lowmem(void)
	struct memblock_region *reg;
	phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE);
	phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
	struct static_vm *svm;
	phys_addr_t start;
	phys_addr_t end;
	unsigned long vaddr;
	unsigned long pfn;
	unsigned long length;
	unsigned int type;
	int nr = 0;

	/* Map all the lowmem memory banks. */
	for_each_memblock(memory, reg) {
		phys_addr_t start = reg->base;
		phys_addr_t end = start + reg->size;
		struct map_desc map;
		start = reg->base;
		end = start + reg->size;
		nr++;

		if (memblock_is_nomap(reg))
			continue;
@@ -1504,6 +1513,34 @@ static void __init map_lowmem(void)
			}
		}
	}
	svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));

	for_each_memblock(memory, reg) {
		struct vm_struct *vm;

		start = reg->base;
		end = start + reg->size;

		if (end > arm_lowmem_limit)
			end = arm_lowmem_limit;
		if (start >= end)
			break;

		vm = &svm->vm;
		pfn = __phys_to_pfn(start);
		vaddr = __phys_to_virt(start);
		length = end - start;
		type = MT_MEMORY_RW;

		vm->addr = (void *)(vaddr & PAGE_MASK);
		vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK));
		vm->phys_addr = __pfn_to_phys(pfn);
		vm->flags = VM_LOWMEM;
		vm->flags |= VM_ARM_MTYPE(type);
		vm->caller = map_lowmem;
		add_static_vm_early(svm++);
		mark_vmalloc_reserved_area(vm->addr, vm->size);
	}
}

#ifdef CONFIG_ARM_PV_FIXUP
+5 −0
Original line number Diff line number Diff line
@@ -449,6 +449,11 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
	for (i = 0, rgn = &memblock_type->regions[0];			\
	     i < memblock_type->cnt;					\
	     i++, rgn = &memblock_type->regions[i])
#define for_each_memblock_rev(memblock_type, region)	\
	for (region = memblock.memblock_type.regions + \
			memblock.memblock_type.cnt - 1;	\
	     region >= memblock.memblock_type.regions;	\
	     region--)

#ifdef CONFIG_MEMTEST
extern void early_memtest(phys_addr_t start, phys_addr_t end);
Loading