Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2a389610 authored by David Rientjes's avatar David Rientjes Committed by Linus Torvalds
Browse files

mm, mempolicy: rename slab_node for clarity



slab_node() is actually a mempolicy function, so rename it to
mempolicy_slab_node() to make it clearer that it used for processes with
mempolicies.

At the same time, cleanup its code by saving numa_mem_id() in a local
variable (since we require a node with memory, not just any node) and
remove an obsolete comment that assumes the mempolicy is actually passed
into the function.

Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Jianguo Wu <wujianguo@huawei.com>
Cc: Tim Hockin <thockin@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 514ddb44
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -151,7 +151,7 @@ extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
				const nodemask_t *mask);
				const nodemask_t *mask);
extern unsigned slab_node(void);
extern unsigned int mempolicy_slab_node(void);


extern enum zone_type policy_zone;
extern enum zone_type policy_zone;


+6 −9
Original line number Original line Diff line number Diff line
@@ -1782,21 +1782,18 @@ static unsigned interleave_nodes(struct mempolicy *policy)
/*
/*
 * Depending on the memory policy provide a node from which to allocate the
 * Depending on the memory policy provide a node from which to allocate the
 * next slab entry.
 * next slab entry.
 * @policy must be protected by freeing by the caller.  If @policy is
 * the current task's mempolicy, this protection is implicit, as only the
 * task can change it's policy.  The system default policy requires no
 * such protection.
 */
 */
unsigned slab_node(void)
unsigned int mempolicy_slab_node(void)
{
{
	struct mempolicy *policy;
	struct mempolicy *policy;
	int node = numa_mem_id();


	if (in_interrupt())
	if (in_interrupt())
		return numa_node_id();
		return node;


	policy = current->mempolicy;
	policy = current->mempolicy;
	if (!policy || policy->flags & MPOL_F_LOCAL)
	if (!policy || policy->flags & MPOL_F_LOCAL)
		return numa_node_id();
		return node;


	switch (policy->mode) {
	switch (policy->mode) {
	case MPOL_PREFERRED:
	case MPOL_PREFERRED:
@@ -1816,11 +1813,11 @@ unsigned slab_node(void)
		struct zonelist *zonelist;
		struct zonelist *zonelist;
		struct zone *zone;
		struct zone *zone;
		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
		zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
		zonelist = &NODE_DATA(node)->node_zonelists[0];
		(void)first_zones_zonelist(zonelist, highest_zoneidx,
		(void)first_zones_zonelist(zonelist, highest_zoneidx,
							&policy->v.nodes,
							&policy->v.nodes,
							&zone);
							&zone);
		return zone ? zone->node : numa_node_id();
		return zone ? zone->node : node;
	}
	}


	default:
	default:
+2 −2
Original line number Original line Diff line number Diff line
@@ -3042,7 +3042,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
		nid_alloc = cpuset_slab_spread_node();
		nid_alloc = cpuset_slab_spread_node();
	else if (current->mempolicy)
	else if (current->mempolicy)
		nid_alloc = slab_node();
		nid_alloc = mempolicy_slab_node();
	if (nid_alloc != nid_here)
	if (nid_alloc != nid_here)
		return ____cache_alloc_node(cachep, flags, nid_alloc);
		return ____cache_alloc_node(cachep, flags, nid_alloc);
	return NULL;
	return NULL;
@@ -3074,7 +3074,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)


retry_cpuset:
retry_cpuset:
	cpuset_mems_cookie = read_mems_allowed_begin();
	cpuset_mems_cookie = read_mems_allowed_begin();
	zonelist = node_zonelist(slab_node(), flags);
	zonelist = node_zonelist(mempolicy_slab_node(), flags);


retry:
retry:
	/*
	/*
+1 −1
Original line number Original line Diff line number Diff line
@@ -1685,7 +1685,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,


	do {
	do {
		cpuset_mems_cookie = read_mems_allowed_begin();
		cpuset_mems_cookie = read_mems_allowed_begin();
		zonelist = node_zonelist(slab_node(), flags);
		zonelist = node_zonelist(mempolicy_slab_node(), flags);
		for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
		for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
			struct kmem_cache_node *n;
			struct kmem_cache_node *n;