Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bba65054 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: Kconfig: Add support for config size of purging vmap_area"

parents 5cc19bc1 b85b3f46
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1633,7 +1633,7 @@ config ARM_MODULE_PLTS

choice
	prompt "Virtual Memory Reclaim"
	default NO_VM_RECLAIM
	default ENABLE_VMALLOC_SAVING
	help
	  Select the method of reclaiming virtual memory. Two values
	  are allowed to choose, one is NO_VM_RECLAIM, the other is
+2 −1
Original line number Diff line number Diff line
@@ -93,6 +93,7 @@ void __init add_static_vm_early(struct static_vm *svm)
	void *vaddr;

	vm = &svm->vm;
	if (!vm_area_check_early(vm))
		vm_area_add_early(vm);
	vaddr = vm->addr;

+39 −2
Original line number Diff line number Diff line
@@ -1452,12 +1452,21 @@ static void __init map_lowmem(void)
	struct memblock_region *reg;
	phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE);
	phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
	struct static_vm *svm;
	phys_addr_t start;
	phys_addr_t end;
	unsigned long vaddr;
	unsigned long pfn;
	unsigned long length;
	unsigned int type;
	int nr = 0;

	/* Map all the lowmem memory banks. */
	for_each_memblock(memory, reg) {
		phys_addr_t start = reg->base;
		phys_addr_t end = start + reg->size;
		struct map_desc map;
		start = reg->base;
		end = start + reg->size;
		nr++;

		if (memblock_is_nomap(reg))
			continue;
@@ -1509,6 +1518,34 @@ static void __init map_lowmem(void)
			}
		}
	}
	svm = memblock_alloc(sizeof(*svm) * nr, __alignof__(*svm));

	for_each_memblock(memory, reg) {
		struct vm_struct *vm;

		start = reg->base;
		end = start + reg->size;

		if (end > arm_lowmem_limit)
			end = arm_lowmem_limit;
		if (start >= end)
			break;

		vm = &svm->vm;
		pfn = __phys_to_pfn(start);
		vaddr = __phys_to_virt(start);
		length = end - start;
		type = MT_MEMORY_RW;

		vm->addr = (void *)(vaddr & PAGE_MASK);
		vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK));
		vm->phys_addr = __pfn_to_phys(pfn);
		vm->flags = VM_LOWMEM;
		vm->flags |= VM_ARM_MTYPE(type);
		vm->caller = map_lowmem;
		add_static_vm_early(svm++);
		mark_vmalloc_reserved_area(vm->addr, vm->size);
	}
}

#ifdef CONFIG_ARM_PV_FIXUP
+5 −0
Original line number Diff line number Diff line
@@ -675,6 +675,10 @@ unsigned long vmalloc_to_pfn(const void *addr);
 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
 * is no special casing required.
 */

#ifdef CONFIG_ENABLE_VMALLOC_SAVING
extern bool is_vmalloc_addr(const void *x);
#else
static inline bool is_vmalloc_addr(const void *x)
{
#ifdef CONFIG_MMU
@@ -685,6 +689,7 @@ static inline bool is_vmalloc_addr(const void *x)
	return false;
#endif
}
#endif //CONFIG_ENABLE_VMALLOC_SAVING

#ifndef is_ioremap_addr
#define is_ioremap_addr(x) is_vmalloc_addr(x)
+13 −0
Original line number Diff line number Diff line
@@ -27,6 +27,7 @@ struct notifier_block; /* in notifier.h */
 * vfree_atomic().
 */
#define VM_FLUSH_RESET_PERMS	0x00000100      /* Reset direct map and flush TLB on unmap */
#define VM_LOWMEM	0x00000200      /* Tracking of direct mapped lowmem */

/* bits [20..32] reserved for arch specific ioremap internals */

@@ -203,6 +204,13 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
extern struct list_head vmap_area_list;
extern __init void vm_area_add_early(struct vm_struct *vm);
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
extern __init int vm_area_check_early(struct vm_struct *vm);
#ifdef CONFIG_ENABLE_VMALLOC_SAVING
extern void mark_vmalloc_reserved_area(void *addr, unsigned long size);
#else
static inline void mark_vmalloc_reserved_area(void *addr, unsigned long size)
{ };
#endif

#ifdef CONFIG_SMP
# ifdef CONFIG_MMU
@@ -228,7 +236,12 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
#endif

#ifdef CONFIG_MMU
#ifdef CONFIG_ENABLE_VMALLOC_SAVING
extern unsigned long total_vmalloc_size;
#define VMALLOC_TOTAL total_vmalloc_size
#else
#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
#endif
#else
#define VMALLOC_TOTAL 0UL
#endif
Loading