Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 40b44137 authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds
Browse files

mm/slab: clean up DEBUG_PAGEALLOC processing code



Currently, open code for checking DEBUG_PAGEALLOC cache is spread to
some sites.  It makes code unreadable and hard to change.

This patch cleans up this code.  The following patch will change the
criteria for DEBUG_PAGEALLOC cache so this clean-up will help it, too.

[akpm@linux-foundation.org: fix build with CONFIG_DEBUG_PAGEALLOC=n]
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 40323278
Loading
Loading
Loading
Loading
+8 −4
Original line number Original line Diff line number Diff line
@@ -2196,13 +2196,17 @@ kernel_map_pages(struct page *page, int numpages, int enable)
#ifdef CONFIG_HIBERNATION
#ifdef CONFIG_HIBERNATION
extern bool kernel_page_present(struct page *page);
extern bool kernel_page_present(struct page *page);
#endif	/* CONFIG_HIBERNATION */
#endif	/* CONFIG_HIBERNATION */
#else
#else	/* CONFIG_DEBUG_PAGEALLOC */
static inline void
static inline void
kernel_map_pages(struct page *page, int numpages, int enable) {}
kernel_map_pages(struct page *page, int numpages, int enable) {}
#ifdef CONFIG_HIBERNATION
#ifdef CONFIG_HIBERNATION
static inline bool kernel_page_present(struct page *page) { return true; }
static inline bool kernel_page_present(struct page *page) { return true; }
#endif	/* CONFIG_HIBERNATION */
#endif	/* CONFIG_HIBERNATION */
#endif
static inline bool debug_pagealloc_enabled(void)
{
	return false;
}
#endif	/* CONFIG_DEBUG_PAGEALLOC */


#ifdef __HAVE_ARCH_GATE_AREA
#ifdef __HAVE_ARCH_GATE_AREA
extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
+49 −48
Original line number Original line Diff line number Diff line
@@ -1661,6 +1661,14 @@ static void kmem_rcu_free(struct rcu_head *head)
}
}


#if DEBUG
#if DEBUG
static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
{
	if (debug_pagealloc_enabled() && OFF_SLAB(cachep) &&
		(cachep->size % PAGE_SIZE) == 0)
		return true;

	return false;
}


#ifdef CONFIG_DEBUG_PAGEALLOC
#ifdef CONFIG_DEBUG_PAGEALLOC
static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
@@ -1694,6 +1702,23 @@ static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
	}
	}
	*addr++ = 0x87654321;
	*addr++ = 0x87654321;
}
}

static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
				int map, unsigned long caller)
{
	if (!is_debug_pagealloc_cache(cachep))
		return;

	if (caller)
		store_stackinfo(cachep, objp, caller);

	kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
}

#else
static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
				int map, unsigned long caller) {}

#endif
#endif


static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
@@ -1772,6 +1797,9 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
	int size, i;
	int size, i;
	int lines = 0;
	int lines = 0;


	if (is_debug_pagealloc_cache(cachep))
		return;

	realobj = (char *)objp + obj_offset(cachep);
	realobj = (char *)objp + obj_offset(cachep);
	size = cachep->object_size;
	size = cachep->object_size;


@@ -1837,17 +1865,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
		void *objp = index_to_obj(cachep, page, i);
		void *objp = index_to_obj(cachep, page, i);


		if (cachep->flags & SLAB_POISON) {
		if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
			if (debug_pagealloc_enabled() &&
				cachep->size % PAGE_SIZE == 0 &&
					OFF_SLAB(cachep))
				kernel_map_pages(virt_to_page(objp),
					cachep->size / PAGE_SIZE, 1);
			else
			check_poison_obj(cachep, objp);
			check_poison_obj(cachep, objp);
#else
			slab_kernel_map(cachep, objp, 1, 0);
			check_poison_obj(cachep, objp);
#endif
		}
		}
		if (cachep->flags & SLAB_RED_ZONE) {
		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
@@ -2226,16 +2245,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
	if (flags & CFLGS_OFF_SLAB) {
	if (flags & CFLGS_OFF_SLAB) {
		/* really off slab. No need for manual alignment */
		/* really off slab. No need for manual alignment */
		freelist_size = calculate_freelist_size(cachep->num, 0);
		freelist_size = calculate_freelist_size(cachep->num, 0);

#ifdef CONFIG_PAGE_POISONING
		/* If we're going to use the generic kernel_map_pages()
		 * poisoning, then it's going to smash the contents of
		 * the redzone and userword anyhow, so switch them off.
		 */
		if (debug_pagealloc_enabled() &&
			size % PAGE_SIZE == 0 && flags & SLAB_POISON)
			flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
#endif
	}
	}


	cachep->colour_off = cache_line_size();
	cachep->colour_off = cache_line_size();
@@ -2251,7 +2260,19 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
	cachep->size = size;
	cachep->size = size;
	cachep->reciprocal_buffer_size = reciprocal_value(size);
	cachep->reciprocal_buffer_size = reciprocal_value(size);


	if (flags & CFLGS_OFF_SLAB) {
#if DEBUG
	/*
	 * If we're going to use the generic kernel_map_pages()
	 * poisoning, then it's going to smash the contents of
	 * the redzone and userword anyhow, so switch them off.
	 */
	if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
		(cachep->flags & SLAB_POISON) &&
		is_debug_pagealloc_cache(cachep))
		cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
#endif

	if (OFF_SLAB(cachep)) {
		cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
		cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
		/*
		/*
		 * This is a possibility for one of the kmalloc_{dma,}_caches.
		 * This is a possibility for one of the kmalloc_{dma,}_caches.
@@ -2475,9 +2496,6 @@ static void cache_init_objs(struct kmem_cache *cachep,
	for (i = 0; i < cachep->num; i++) {
	for (i = 0; i < cachep->num; i++) {
		void *objp = index_to_obj(cachep, page, i);
		void *objp = index_to_obj(cachep, page, i);
#if DEBUG
#if DEBUG
		/* need to poison the objs? */
		if (cachep->flags & SLAB_POISON)
			poison_obj(cachep, objp, POISON_FREE);
		if (cachep->flags & SLAB_STORE_USER)
		if (cachep->flags & SLAB_STORE_USER)
			*dbg_userword(cachep, objp) = NULL;
			*dbg_userword(cachep, objp) = NULL;


@@ -2501,10 +2519,11 @@ static void cache_init_objs(struct kmem_cache *cachep,
				slab_error(cachep, "constructor overwrote the"
				slab_error(cachep, "constructor overwrote the"
					   " start of an object");
					   " start of an object");
		}
		}
		if ((cachep->size % PAGE_SIZE) == 0 &&
		/* need to poison the objs? */
			    OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
		if (cachep->flags & SLAB_POISON) {
			kernel_map_pages(virt_to_page(objp),
			poison_obj(cachep, objp, POISON_FREE);
					 cachep->size / PAGE_SIZE, 0);
			slab_kernel_map(cachep, objp, 0, 0);
		}
#else
#else
		if (cachep->ctor)
		if (cachep->ctor)
			cachep->ctor(objp);
			cachep->ctor(objp);
@@ -2716,18 +2735,8 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,


	set_obj_status(page, objnr, OBJECT_FREE);
	set_obj_status(page, objnr, OBJECT_FREE);
	if (cachep->flags & SLAB_POISON) {
	if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
		if (debug_pagealloc_enabled() &&
			(cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
			store_stackinfo(cachep, objp, caller);
			kernel_map_pages(virt_to_page(objp),
					 cachep->size / PAGE_SIZE, 0);
		} else {
		poison_obj(cachep, objp, POISON_FREE);
		poison_obj(cachep, objp, POISON_FREE);
		}
		slab_kernel_map(cachep, objp, 0, caller);
#else
		poison_obj(cachep, objp, POISON_FREE);
#endif
	}
	}
	return objp;
	return objp;
}
}
@@ -2862,16 +2871,8 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
	if (!objp)
	if (!objp)
		return objp;
		return objp;
	if (cachep->flags & SLAB_POISON) {
	if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
		if (debug_pagealloc_enabled() &&
			(cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
			kernel_map_pages(virt_to_page(objp),
					 cachep->size / PAGE_SIZE, 1);
		else
		check_poison_obj(cachep, objp);
		check_poison_obj(cachep, objp);
#else
		slab_kernel_map(cachep, objp, 1, 0);
		check_poison_obj(cachep, objp);
#endif
		poison_obj(cachep, objp, POISON_INUSE);
		poison_obj(cachep, objp, POISON_INUSE);
	}
	}
	if (cachep->flags & SLAB_STORE_USER)
	if (cachep->flags & SLAB_STORE_USER)