Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 244d63ee authored by David Rientjes's avatar David Rientjes Committed by Linus Torvalds
Browse files

mm, vmalloc: remove VM_VPAGES



VM_VPAGES is unnecessary, it's easier to check is_vmalloc_addr() when
reading /proc/vmallocinfo.

[akpm@linux-foundation.org: remove VM_VPAGES reference via kvfree()]
Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 14669347
Loading
Loading
Loading
Loading
+0 −1
Original line number Original line Diff line number Diff line
@@ -14,7 +14,6 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
#define VM_ALLOC		0x00000002	/* vmalloc() */
#define VM_ALLOC		0x00000002	/* vmalloc() */
#define VM_MAP			0x00000004	/* vmap()ed pages */
#define VM_MAP			0x00000004	/* vmap()ed pages */
#define VM_USERMAP		0x00000008	/* suitable for remap_vmalloc_range */
#define VM_USERMAP		0x00000008	/* suitable for remap_vmalloc_range */
#define VM_VPAGES		0x00000010	/* buffer for pages was vmalloc'ed */
#define VM_UNINITIALIZED	0x00000020	/* vm_struct is not fully initialized */
#define VM_UNINITIALIZED	0x00000020	/* vm_struct is not fully initialized */
#define VM_NO_GUARD		0x00000040      /* don't add guard page */
#define VM_NO_GUARD		0x00000040      /* don't add guard page */
#define VM_KASAN		0x00000080      /* has allocated kasan shadow memory */
#define VM_KASAN		0x00000080      /* has allocated kasan shadow memory */
+2 −6
Original line number Original line Diff line number Diff line
@@ -1479,10 +1479,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
			__free_kmem_pages(page, 0);
			__free_kmem_pages(page, 0);
		}
		}


		if (area->flags & VM_VPAGES)
		kvfree(area->pages);
			vfree(area->pages);
		else
			kfree(area->pages);
	}
	}


	kfree(area);
	kfree(area);
@@ -1592,7 +1589,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
	if (array_size > PAGE_SIZE) {
	if (array_size > PAGE_SIZE) {
		pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
		pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
				PAGE_KERNEL, node, area->caller);
				PAGE_KERNEL, node, area->caller);
		area->flags |= VM_VPAGES;
	} else {
	} else {
		pages = kmalloc_node(array_size, nested_gfp, node);
		pages = kmalloc_node(array_size, nested_gfp, node);
	}
	}
@@ -2650,7 +2646,7 @@ static int s_show(struct seq_file *m, void *p)
	if (v->flags & VM_USERMAP)
	if (v->flags & VM_USERMAP)
		seq_puts(m, " user");
		seq_puts(m, " user");


	if (v->flags & VM_VPAGES)
	if (is_vmalloc_addr(v->pages))
		seq_puts(m, " vpages");
		seq_puts(m, " vpages");


	show_numa_info(m, v);
	show_numa_info(m, v);