Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a57a4988 authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Pekka Enberg
Browse files

slab: use __GFP_COMP flag for allocating slab pages



If we use 'struct page' of first page as 'struct slab', there is no
advantage not to use __GFP_COMP. So use __GFP_COMP flag for all the cases.

Acked-by: default avatarAndi Kleen <ak@linux.intel.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarPekka Enberg <penberg@iki.fi>
parent 56f295ef
Loading
Loading
Loading
Loading
+9 −34
Original line number Diff line number Diff line
@@ -1718,15 +1718,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
{
	struct page *page;
	int nr_pages;
	int i;

#ifndef CONFIG_MMU
	/*
	 * Nommu uses slab's for process anonymous memory allocations, and thus
	 * requires __GFP_COMP to properly refcount higher order allocations
	 */
	flags |= __GFP_COMP;
#endif

	flags |= cachep->allocflags;
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
@@ -1750,12 +1741,9 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
	else
		add_zone_page_state(page_zone(page),
			NR_SLAB_UNRECLAIMABLE, nr_pages);
	for (i = 0; i < nr_pages; i++) {
		__SetPageSlab(page + i);

	__SetPageSlab(page);
	if (page->pfmemalloc)
		SetPageSlabPfmemalloc(page);
	}
	memcg_bind_pages(cachep, cachep->gfporder);

	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
@@ -1775,8 +1763,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
 */
static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
{
	unsigned long i = (1 << cachep->gfporder);
	const unsigned long nr_freed = i;
	const unsigned long nr_freed = (1 << cachep->gfporder);

	kmemcheck_free_shadow(page, cachep->gfporder);

@@ -1787,12 +1774,9 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
		sub_zone_page_state(page_zone(page),
				NR_SLAB_UNRECLAIMABLE, nr_freed);

	__ClearPageSlabPfmemalloc(page);
	while (i--) {
	BUG_ON(!PageSlab(page));
	__ClearPageSlabPfmemalloc(page);
	__ClearPageSlab(page);
		page++;
	}

	memcg_release_pages(cachep, cachep->gfporder);
	if (current->reclaim_state)
@@ -2362,7 +2346,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
	cachep->colour = left_over / cachep->colour_off;
	cachep->slab_size = slab_size;
	cachep->flags = flags;
	cachep->allocflags = 0;
	cachep->allocflags = __GFP_COMP;
	if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
		cachep->allocflags |= GFP_DMA;
	cachep->size = size;
@@ -2729,17 +2713,8 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
			   struct page *page)
{
	int nr_pages;

	nr_pages = 1;
	if (likely(!PageCompound(page)))
		nr_pages <<= cache->gfporder;

	do {
	page->slab_cache = cache;
	page->slab_page = slab;
		page++;
	} while (--nr_pages);
}

/*