Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2f5f9486 authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds
Browse files

mm: change alloc_pages_vma to pass down the policy node for local policy



Currently alloc_pages_vma() always uses the local node as policy node for
the LOCAL policy.  Pass this node down as an argument instead.

No behaviour change from this patch, but will be needed for followons.

Acked-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Signed-off-by: default avatarAndi Kleen <ak@linux.intel.com>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b8bc1dd3
Loading
Loading
Loading
Loading
+5 −4
Original line number Diff line number Diff line
@@ -332,16 +332,17 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
	return alloc_pages_current(gfp_mask, order);
}
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
			struct vm_area_struct *vma, unsigned long addr);
			struct vm_area_struct *vma, unsigned long addr,
			int node);
#else
#define alloc_pages(gfp_mask, order) \
		alloc_pages_node(numa_node_id(), gfp_mask, order)
#define alloc_pages_vma(gfp_mask, order, vma, addr)	\
#define alloc_pages_vma(gfp_mask, order, vma, addr, node)	\
	alloc_pages(gfp_mask, order)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
#define alloc_page_vma(gfp_mask, vma, addr)			\
	alloc_pages_vma(gfp_mask, 0, vma, addr)
	alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())

extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
+1 −1
Original line number Diff line number Diff line
@@ -653,7 +653,7 @@ static inline struct page *alloc_hugepage_vma(int defrag,
					      unsigned long haddr)
{
	return alloc_pages_vma(alloc_hugepage_gfpmask(defrag),
			       HPAGE_PMD_ORDER, vma, haddr);
			       HPAGE_PMD_ORDER, vma, haddr, numa_node_id());
}

#ifndef CONFIG_NUMA
+5 −6
Original line number Diff line number Diff line
@@ -1524,10 +1524,9 @@ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
}

/* Return a zonelist indicated by gfp for node representing a mempolicy */
static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
	int nd)
{
	int nd = numa_node_id();

	switch (policy->mode) {
	case MPOL_PREFERRED:
		if (!(policy->flags & MPOL_F_LOCAL))
@@ -1679,7 +1678,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
				huge_page_shift(hstate_vma(vma))), gfp_flags);
	} else {
		zl = policy_zonelist(gfp_flags, *mpol);
		zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
		if ((*mpol)->mode == MPOL_BIND)
			*nodemask = &(*mpol)->v.nodes;
	}
@@ -1820,7 +1819,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
 */
struct page *
alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
		unsigned long addr)
		unsigned long addr, int node)
{
	struct mempolicy *pol = get_vma_policy(current, vma, addr);
	struct zonelist *zl;
@@ -1836,7 +1835,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
		put_mems_allowed();
		return page;
	}
	zl = policy_zonelist(gfp, pol);
	zl = policy_zonelist(gfp, pol, node);
	if (unlikely(mpol_needs_cond_ref(pol))) {
		/*
		 * slow path: ref counted shared policy