Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c8dad2bb authored by Jan Blunck's avatar Jan Blunck Committed by Linus Torvalds
Browse files

memcg: reduce size of mem_cgroup by using nr_cpu_ids



As Jan Blunck <jblunck@suse.de> pointed out, allocating per-cpu stat for
memcg to the size of NR_CPUS is not good.

This patch changes mem_cgroup's cpustat allocation not based on NR_CPUS
but based on nr_cpu_ids.

Reviewed-by: default avatarLi Zefan <lizf@cn.fujitsu.com>
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f817ed48
Loading
Loading
Loading
Loading
+18 −17
Original line number Diff line number Diff line
@@ -60,7 +60,7 @@ struct mem_cgroup_stat_cpu {
} ____cacheline_aligned_in_smp;

struct mem_cgroup_stat {
	struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
	struct mem_cgroup_stat_cpu cpustat[0];
};

/*
@@ -129,11 +129,10 @@ struct mem_cgroup {

	int	prev_priority;	/* for recording reclaim priority */
	/*
	 * statistics.
	 * statistics. This must be placed at the end of memcg.
	 */
	struct mem_cgroup_stat stat;
};
static struct mem_cgroup init_mem_cgroup;

enum charge_type {
	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
@@ -1293,23 +1292,30 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
	kfree(mem->info.nodeinfo[node]);
}

static int mem_cgroup_size(void)
{
	int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
	return sizeof(struct mem_cgroup) + cpustat_size;
}

static struct mem_cgroup *mem_cgroup_alloc(void)
{
	struct mem_cgroup *mem;
	int size = mem_cgroup_size();

	if (sizeof(*mem) < PAGE_SIZE)
		mem = kmalloc(sizeof(*mem), GFP_KERNEL);
	if (size < PAGE_SIZE)
		mem = kmalloc(size, GFP_KERNEL);
	else
		mem = vmalloc(sizeof(*mem));
		mem = vmalloc(size);

	if (mem)
		memset(mem, 0, sizeof(*mem));
		memset(mem, 0, size);
	return mem;
}

static void mem_cgroup_free(struct mem_cgroup *mem)
{
	if (sizeof(*mem) < PAGE_SIZE)
	if (mem_cgroup_size() < PAGE_SIZE)
		kfree(mem);
	else
		vfree(mem);
@@ -1322,13 +1328,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
	struct mem_cgroup *mem;
	int node;

	if (unlikely((cont->parent) == NULL)) {
		mem = &init_mem_cgroup;
	} else {
	mem = mem_cgroup_alloc();
	if (!mem)
		return ERR_PTR(-ENOMEM);
	}

	res_counter_init(&mem->res);

@@ -1340,7 +1342,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
free_out:
	for_each_node_state(node, N_POSSIBLE)
		free_mem_cgroup_per_zone_info(mem, node);
	if (cont->parent != NULL)
	mem_cgroup_free(mem);
	return ERR_PTR(-ENOMEM);
}