Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c67a8a68 authored by Vladimir Davydov's avatar Vladimir Davydov Committed by Linus Torvalds
Browse files

memcg, slab: merge memcg_{bind,release}_pages to memcg_{un}charge_slab



Currently we have two pairs of kmemcg-related functions that are called on
slab alloc/free.  The first is memcg_{bind,release}_pages that count the
total number of pages allocated on a kmem cache.  The second is
memcg_{un}charge_slab that {un}charge slab pages to kmemcg resource
counter.  Let's just merge them to keep the code clean.

Signed-off-by: default avatarVladimir Davydov <vdavydov@parallels.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Glauber Costa <glommer@gmail.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1e32e77f
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -506,8 +506,8 @@ void memcg_update_array_size(int num_groups);
struct kmem_cache *
__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);

int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size);
void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size);
int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order);
void __memcg_uncharge_slab(struct kmem_cache *cachep, int order);

int __kmem_cache_destroy_memcg_children(struct kmem_cache *s);

+20 −2
Original line number Diff line number Diff line
@@ -2954,7 +2954,7 @@ static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v)
}
#endif

int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
{
	struct res_counter *fail_res;
	int ret = 0;
@@ -2992,7 +2992,7 @@ int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
	return ret;
}

void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
{
	res_counter_uncharge(&memcg->res, size);
	if (do_swap_account)
@@ -3390,6 +3390,24 @@ static void memcg_create_cache_enqueue(struct mem_cgroup *memcg,
	__memcg_create_cache_enqueue(memcg, cachep);
	memcg_resume_kmem_account();
}

int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order)
{
	int res;

	res = memcg_charge_kmem(cachep->memcg_params->memcg, gfp,
				PAGE_SIZE << order);
	if (!res)
		atomic_add(1 << order, &cachep->memcg_params->nr_pages);
	return res;
}

void __memcg_uncharge_slab(struct kmem_cache *cachep, int order)
{
	memcg_uncharge_kmem(cachep->memcg_params->memcg, PAGE_SIZE << order);
	atomic_sub(1 << order, &cachep->memcg_params->nr_pages);
}

/*
 * Return the kmem_cache we're supposed to use for a slab allocation.
 * We try to use the current memcg's version of the cache.
+0 −2
Original line number Diff line number Diff line
@@ -1712,7 +1712,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
	__SetPageSlab(page);
	if (page->pfmemalloc)
		SetPageSlabPfmemalloc(page);
	memcg_bind_pages(cachep, cachep->gfporder);

	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
		kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
@@ -1748,7 +1747,6 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
	page_mapcount_reset(page);
	page->mapping = NULL;

	memcg_release_pages(cachep, cachep->gfporder);
	if (current->reclaim_state)
		current->reclaim_state->reclaimed_slab += nr_freed;
	__free_pages(page, cachep->gfporder);
+2 −23
Original line number Diff line number Diff line
@@ -121,18 +121,6 @@ static inline bool is_root_cache(struct kmem_cache *s)
	return !s->memcg_params || s->memcg_params->is_root_cache;
}

static inline void memcg_bind_pages(struct kmem_cache *s, int order)
{
	if (!is_root_cache(s))
		atomic_add(1 << order, &s->memcg_params->nr_pages);
}

static inline void memcg_release_pages(struct kmem_cache *s, int order)
{
	if (!is_root_cache(s))
		atomic_sub(1 << order, &s->memcg_params->nr_pages);
}

static inline bool slab_equal_or_root(struct kmem_cache *s,
					struct kmem_cache *p)
{
@@ -198,8 +186,7 @@ static __always_inline int memcg_charge_slab(struct kmem_cache *s,
		return 0;
	if (is_root_cache(s))
		return 0;
	return memcg_charge_kmem(s->memcg_params->memcg, gfp,
				 PAGE_SIZE << order);
	return __memcg_charge_slab(s, gfp, order);
}

static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
@@ -208,7 +195,7 @@ static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
		return;
	if (is_root_cache(s))
		return;
	memcg_uncharge_kmem(s->memcg_params->memcg, PAGE_SIZE << order);
	__memcg_uncharge_slab(s, order);
}
#else
static inline bool is_root_cache(struct kmem_cache *s)
@@ -216,14 +203,6 @@ static inline bool is_root_cache(struct kmem_cache *s)
	return true;
}

static inline void memcg_bind_pages(struct kmem_cache *s, int order)
{
}

static inline void memcg_release_pages(struct kmem_cache *s, int order)
{
}

static inline bool slab_equal_or_root(struct kmem_cache *s,
				      struct kmem_cache *p)
{
+0 −2
Original line number Diff line number Diff line
@@ -1422,7 +1422,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)

	order = compound_order(page);
	inc_slabs_node(s, page_to_nid(page), page->objects);
	memcg_bind_pages(s, order);
	page->slab_cache = s;
	__SetPageSlab(page);
	if (page->pfmemalloc)
@@ -1473,7 +1472,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
	__ClearPageSlabPfmemalloc(page);
	__ClearPageSlab(page);

	memcg_release_pages(s, order);
	page_mapcount_reset(page);
	if (current->reclaim_state)
		current->reclaim_state->reclaimed_slab += pages;