Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c8633798 authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds
Browse files

mm: mempolicy: mbind and migrate_pages support thp migration



This patch enables thp migration for mbind(2) and migrate_pages(2).

Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: default avatarZi Yan <zi.yan@cs.rutgers.edu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: David Nellans <dnellans@nvidia.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ab6e3d09
Loading
Loading
Loading
Loading
+79 −29
Original line number Diff line number Diff line
@@ -97,6 +97,7 @@
#include <linux/mm_inline.h>
#include <linux/mmu_notifier.h>
#include <linux/printk.h>
#include <linux/swapops.h>

#include <asm/tlbflush.h>
#include <linux/uaccess.h>
@@ -426,41 +427,69 @@ static inline bool queue_pages_required(struct page *page,
	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
}

/*
 * Scan through pages checking if pages follow certain conditions,
 * and move them to the pagelist if they do.
 */
static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
				unsigned long end, struct mm_walk *walk)
{
	struct vm_area_struct *vma = walk->vma;
	int ret = 0;
	struct page *page;
	struct queue_pages *qp = walk->private;
	unsigned long flags = qp->flags;
	int nid, ret;
	pte_t *pte;
	spinlock_t *ptl;
	unsigned long flags;

	if (pmd_trans_huge(*pmd)) {
		ptl = pmd_lock(walk->mm, pmd);
		if (pmd_trans_huge(*pmd)) {
	if (unlikely(is_pmd_migration_entry(*pmd))) {
		ret = 1;
		goto unlock;
	}
	page = pmd_page(*pmd);
	if (is_huge_zero_page(page)) {
		spin_unlock(ptl);
				__split_huge_pmd(vma, pmd, addr, false, NULL);
			} else {
		__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
		goto out;
	}
	if (!thp_migration_supported()) {
		get_page(page);
		spin_unlock(ptl);
		lock_page(page);
		ret = split_huge_page(page);
		unlock_page(page);
		put_page(page);
				if (ret)
					return 0;
		goto out;
	}
		} else {
	if (!queue_pages_required(page, qp)) {
		ret = 1;
		goto unlock;
	}

	ret = 1;
	flags = qp->flags;
	/* go to thp migration */
	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
		migrate_page_add(page, qp->pagelist, flags);
unlock:
	spin_unlock(ptl);
out:
	return ret;
}

/*
 * Scan through pages checking if pages follow certain conditions,
 * and move them to the pagelist if they do.
 */
static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
			unsigned long end, struct mm_walk *walk)
{
	struct vm_area_struct *vma = walk->vma;
	struct page *page;
	struct queue_pages *qp = walk->private;
	unsigned long flags = qp->flags;
	int ret;
	pte_t *pte;
	spinlock_t *ptl;

	ptl = pmd_trans_huge_lock(pmd, vma);
	if (ptl) {
		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
		if (ret)
			return 0;
	}

	if (pmd_trans_unstable(pmd))
@@ -481,7 +510,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
			continue;
		if (!queue_pages_required(page, qp))
			continue;
		if (PageTransCompound(page)) {
		if (PageTransCompound(page) && !thp_migration_supported()) {
			get_page(page);
			pte_unmap_unlock(pte, ptl);
			lock_page(page);
@@ -893,19 +922,21 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,

#ifdef CONFIG_MIGRATION
/*
 * page migration
 * page migration, thp tail pages can be passed.
 */
static void migrate_page_add(struct page *page, struct list_head *pagelist,
				unsigned long flags)
{
	struct page *head = compound_head(page);
	/*
	 * Avoid migrating a page that is shared with others.
	 */
	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
		if (!isolate_lru_page(page)) {
			list_add_tail(&page->lru, pagelist);
			inc_node_page_state(page, NR_ISOLATED_ANON +
					    page_is_file_cache(page));
	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
		if (!isolate_lru_page(head)) {
			list_add_tail(&head->lru, pagelist);
			mod_node_page_state(page_pgdat(head),
				NR_ISOLATED_ANON + page_is_file_cache(head),
				hpage_nr_pages(head));
		}
	}
}
@@ -915,7 +946,17 @@ static struct page *new_node_page(struct page *page, unsigned long node, int **x
	if (PageHuge(page))
		return alloc_huge_page_node(page_hstate(compound_head(page)),
					node);
	else
	else if (thp_migration_supported() && PageTransHuge(page)) {
		struct page *thp;

		thp = alloc_pages_node(node,
			(GFP_TRANSHUGE | __GFP_THISNODE),
			HPAGE_PMD_ORDER);
		if (!thp)
			return NULL;
		prep_transhuge_page(thp);
		return thp;
	} else
		return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
						    __GFP_THISNODE, 0);
}
@@ -1081,6 +1122,15 @@ static struct page *new_page(struct page *page, unsigned long start, int **x)
	if (PageHuge(page)) {
		BUG_ON(!vma);
		return alloc_huge_page_noerr(vma, address, 1);
	} else if (thp_migration_supported() && PageTransHuge(page)) {
		struct page *thp;

		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
					 HPAGE_PMD_ORDER);
		if (!thp)
			return NULL;
		prep_transhuge_page(thp);
		return thp;
	}
	/*
	 * if !vma, alloc_page_vma() will use task or system default policy