Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fc36b8d3 authored by Lee Schermerhorn's avatar Lee Schermerhorn Committed by Linus Torvalds
Browse files

mempolicy: use MPOL_F_LOCAL to Indicate Preferred Local Policy



Now that we're using "preferred local" policy for system default, we need to
make this as fast as possible.  Because of the variable size of the mempolicy
structure [based on size of nodemasks], the preferred_node may be in a
different cacheline from the mode.  This can result in accessing an extra
cacheline in the normal case of system default policy.  Suspect this is the
cause of an observed 2-3% slowdown in page fault testing relative to kernel
without this patch series.

To alleviate this, use an internal mode flag, MPOL_F_LOCAL in the mempolicy
flags member which is guaranteed [?] to be in the same cacheline as the mode
itself.

Verified that reworked mempolicy now performs slightly better on 25-rc8-mm1
for both anon and shmem segments with system default and vma [preferred local]
policy.

Signed-off-by: default avatarLee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 53f2556b
Loading
Loading
Loading
Loading
+5 −6
Original line number Diff line number Diff line
@@ -176,12 +176,11 @@ Components of Memory Policies
	containing the cpu where the allocation takes place.

	    Internally, the Preferred policy uses a single node--the
	    preferred_node member of struct mempolicy.  A "distinguished
	    value of this preferred_node, currently '-1', is interpreted
	    as "the node containing the cpu where the allocation takes
	    place"--local allocation.  "Local" allocation policy can be
	    viewed as a Preferred policy that starts at the node containing
	    the cpu where the allocation takes place.
	    preferred_node member of struct mempolicy.  When the internal
	    mode flag MPOL_F_LOCAL is set, the preferred_node is ignored and
	    the policy is interpreted as local allocation.  "Local" allocation
	    policy can be viewed as a Preferred policy that starts at the node
	    containing the cpu where the allocation takes place.

	    It is possible for the user to specify that local allocation is
	    always preferred by passing an empty nodemask with this mode.
+1 −0
Original line number Diff line number Diff line
@@ -50,6 +50,7 @@ enum {
 * are never OR'ed into the mode in mempolicy API arguments.
 */
#define MPOL_F_SHARED  (1 << 0)	/* identify shared policies */
#define MPOL_F_LOCAL   (1 << 1)	/* preferred local allocation */

#ifdef __KERNEL__

+22 −25
Original line number Diff line number Diff line
@@ -110,7 +110,7 @@ enum zone_type policy_zone = 0;
struct mempolicy default_policy = {
	.refcnt = ATOMIC_INIT(1), /* never free it */
	.mode = MPOL_PREFERRED,
	.v =  { .preferred_node =  -1 },
	.flags = MPOL_F_LOCAL,
};

static const struct mempolicy_operations {
@@ -163,7 +163,7 @@ static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
{
	if (!nodes)
		pol->v.preferred_node = -1;	/* local allocation */
		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
	else if (nodes_empty(*nodes))
		return -EINVAL;			/*  no allowed nodes */
	else
@@ -290,14 +290,15 @@ static void mpol_rebind_preferred(struct mempolicy *pol,
	if (pol->flags & MPOL_F_STATIC_NODES) {
		int node = first_node(pol->w.user_nodemask);

		if (node_isset(node, *nodes))
		if (node_isset(node, *nodes)) {
			pol->v.preferred_node = node;
		else
			pol->v.preferred_node = -1;
			pol->flags &= ~MPOL_F_LOCAL;
		} else
			pol->flags |= MPOL_F_LOCAL;
	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
		pol->v.preferred_node = first_node(tmp);
	} else if (pol->v.preferred_node != -1) {
	} else if (!(pol->flags & MPOL_F_LOCAL)) {
		pol->v.preferred_node = node_remap(pol->v.preferred_node,
						   pol->w.cpuset_mems_allowed,
						   *nodes);
@@ -645,7 +646,7 @@ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
		*nodes = p->v.nodes;
		break;
	case MPOL_PREFERRED:
		if (p->v.preferred_node >= 0)
		if (!(p->flags & MPOL_F_LOCAL))
			node_set(p->v.preferred_node, *nodes);
		/* else return empty node mask for local allocation */
		break;
@@ -1324,13 +1325,12 @@ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
/* Return a zonelist indicated by gfp for node representing a mempolicy */
static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
{
	int nd;
	int nd = numa_node_id();

	switch (policy->mode) {
	case MPOL_PREFERRED:
		if (!(policy->flags & MPOL_F_LOCAL))
			nd = policy->v.preferred_node;
		if (nd < 0)
			nd = numa_node_id();
		break;
	case MPOL_BIND:
		/*
@@ -1339,16 +1339,13 @@ static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
		 * current node is part of the mask, we use the zonelist for
		 * the first node in the mask instead.
		 */
		nd = numa_node_id();
		if (unlikely(gfp & __GFP_THISNODE) &&
				unlikely(!node_isset(nd, policy->v.nodes)))
			nd = first_node(policy->v.nodes);
		break;
	case MPOL_INTERLEAVE: /* should not happen */
		nd = numa_node_id();
		break;
	default:
		nd = 0;
		BUG();
	}
	return node_zonelist(nd, gfp);
@@ -1379,14 +1376,15 @@ static unsigned interleave_nodes(struct mempolicy *policy)
 */
unsigned slab_node(struct mempolicy *policy)
{
	if (!policy)
	if (!policy || policy->flags & MPOL_F_LOCAL)
		return numa_node_id();

	switch (policy->mode) {
	case MPOL_PREFERRED:
		if (unlikely(policy->v.preferred_node >= 0))
		/*
		 * handled MPOL_F_LOCAL above
		 */
		return policy->v.preferred_node;
		return numa_node_id();

	case MPOL_INTERLEAVE:
		return interleave_nodes(policy);
@@ -1666,7 +1664,8 @@ int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
	case MPOL_INTERLEAVE:
		return nodes_equal(a->v.nodes, b->v.nodes);
	case MPOL_PREFERRED:
		return a->v.preferred_node == b->v.preferred_node;
		return a->v.preferred_node == b->v.preferred_node &&
			a->flags == b->flags;
	default:
		BUG();
		return 0;
@@ -1946,7 +1945,7 @@ void numa_default_policy(void)
}

/*
 * "local" is pseudo-policy:  MPOL_PREFERRED with preferred_node == -1
 * "local" is pseudo-policy:  MPOL_PREFERRED with MPOL_F_LOCAL flag
 * Used only for mpol_to_str()
 */
#define MPOL_LOCAL (MPOL_INTERLEAVE + 1)
@@ -1962,7 +1961,6 @@ static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
{
	char *p = buffer;
	int l;
	int nid;
	nodemask_t nodes;
	unsigned short mode;
	unsigned short flags = pol ? pol->flags : 0;
@@ -1979,11 +1977,10 @@ static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)

	case MPOL_PREFERRED:
		nodes_clear(nodes);
		nid = pol->v.preferred_node;
		if (nid < 0)
		if (flags & MPOL_F_LOCAL)
			mode = MPOL_LOCAL;	/* pseudo-policy */
		else
			node_set(nid, nodes);
			node_set(pol->v.preferred_node, nodes);
		break;

	case MPOL_BIND:
@@ -2004,7 +2001,7 @@ static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
	strcpy(p, policy_types[mode]);
	p += l;

	if (flags) {
	if (flags & MPOL_MODE_FLAGS) {
		int need_bar = 0;

		if (buffer + maxlen < p + 2)