Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 189ebff2 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Linus Torvalds
Browse files

hugetlb: simplify migrate_huge_page()



Since we migrate only one hugepage, don't use linked list for passing the
page around.  Directly pass the page that need to be migrated as argument.
This also removes the usage of page->lru in the migrate path.

Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Hillf Danton <dhillf@gmail.com>
Reviewed-by: default avatarMichal Hocko <mhocko@suse.cz>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 24669e58
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -15,7 +15,7 @@ extern int migrate_page(struct address_space *,
extern int migrate_pages(struct list_head *l, new_page_t x,
extern int migrate_pages(struct list_head *l, new_page_t x,
			unsigned long private, bool offlining,
			unsigned long private, bool offlining,
			enum migrate_mode mode);
			enum migrate_mode mode);
extern int migrate_huge_pages(struct list_head *l, new_page_t x,
extern int migrate_huge_page(struct page *, new_page_t x,
			unsigned long private, bool offlining,
			unsigned long private, bool offlining,
			enum migrate_mode mode);
			enum migrate_mode mode);


@@ -36,7 +36,7 @@ static inline void putback_lru_pages(struct list_head *l) {}
static inline int migrate_pages(struct list_head *l, new_page_t x,
static inline int migrate_pages(struct list_head *l, new_page_t x,
		unsigned long private, bool offlining,
		unsigned long private, bool offlining,
		enum migrate_mode mode) { return -ENOSYS; }
		enum migrate_mode mode) { return -ENOSYS; }
static inline int migrate_huge_pages(struct list_head *l, new_page_t x,
static inline int migrate_huge_page(struct page *page, new_page_t x,
		unsigned long private, bool offlining,
		unsigned long private, bool offlining,
		enum migrate_mode mode) { return -ENOSYS; }
		enum migrate_mode mode) { return -ENOSYS; }


+4 −11
Original line number Original line Diff line number Diff line
@@ -1416,7 +1416,6 @@ static int soft_offline_huge_page(struct page *page, int flags)
	int ret;
	int ret;
	unsigned long pfn = page_to_pfn(page);
	unsigned long pfn = page_to_pfn(page);
	struct page *hpage = compound_head(page);
	struct page *hpage = compound_head(page);
	LIST_HEAD(pagelist);


	ret = get_any_page(page, pfn, flags);
	ret = get_any_page(page, pfn, flags);
	if (ret < 0)
	if (ret < 0)
@@ -1431,24 +1430,18 @@ static int soft_offline_huge_page(struct page *page, int flags)
	}
	}


	/* Keep page count to indicate a given hugepage is isolated. */
	/* Keep page count to indicate a given hugepage is isolated. */

	ret = migrate_huge_page(hpage, new_page, MPOL_MF_MOVE_ALL, false,
	list_add(&hpage->lru, &pagelist);
	ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, false,
				MIGRATE_SYNC);
				MIGRATE_SYNC);
	put_page(hpage);
	if (ret) {
	if (ret) {
		struct page *page1, *page2;
		list_for_each_entry_safe(page1, page2, &pagelist, lru)
			put_page(page1);

		pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
		pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
			pfn, ret, page->flags);
			pfn, ret, page->flags);
		if (ret > 0)
			ret = -EIO;
		return ret;
		return ret;
	}
	}
done:
done:
	if (!PageHWPoison(hpage))
	if (!PageHWPoison(hpage))
		atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
		atomic_long_add(1 << compound_trans_order(hpage),
				&mce_bad_pages);
	set_page_hwpoison_huge_page(hpage);
	set_page_hwpoison_huge_page(hpage);
	dequeue_hwpoisoned_huge_page(hpage);
	dequeue_hwpoisoned_huge_page(hpage);
	/* keep elevated page count for bad page */
	/* keep elevated page count for bad page */
+21 −44
Original line number Original line Diff line number Diff line
@@ -932,15 +932,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
	if (anon_vma)
	if (anon_vma)
		put_anon_vma(anon_vma);
		put_anon_vma(anon_vma);
	unlock_page(hpage);
	unlock_page(hpage);

out:
out:
	if (rc != -EAGAIN) {
		list_del(&hpage->lru);
		put_page(hpage);
	}

	put_page(new_hpage);
	put_page(new_hpage);

	if (result) {
	if (result) {
		if (rc)
		if (rc)
			*result = rc;
			*result = rc;
@@ -1016,48 +1009,32 @@ int migrate_pages(struct list_head *from,
	return nr_failed + retry;
	return nr_failed + retry;
}
}


int migrate_huge_pages(struct list_head *from,
int migrate_huge_page(struct page *hpage, new_page_t get_new_page,
		new_page_t get_new_page, unsigned long private, bool offlining,
		      unsigned long private, bool offlining,
		      enum migrate_mode mode)
		      enum migrate_mode mode)
{
{
	int retry = 1;
	int pass, rc;
	int nr_failed = 0;
	int pass = 0;
	struct page *page;
	struct page *page2;
	int rc;

	for (pass = 0; pass < 10 && retry; pass++) {
		retry = 0;

		list_for_each_entry_safe(page, page2, from, lru) {
			cond_resched();


	for (pass = 0; pass < 10; pass++) {
		rc = unmap_and_move_huge_page(get_new_page,
		rc = unmap_and_move_huge_page(get_new_page,
					private, page, pass > 2, offlining,
					      private, hpage, pass > 2, offlining,
					      mode);
					      mode);

		switch (rc) {
		switch (rc) {
		case -ENOMEM:
		case -ENOMEM:
			goto out;
			goto out;
		case -EAGAIN:
		case -EAGAIN:
				retry++;
			/* try again */
			cond_resched();
			break;
			break;
		case 0:
		case 0:
				break;
			goto out;
		default:
		default:
				/* Permanent failure */
			rc = -EIO;
				nr_failed++;
			goto out;
				break;
			}
		}
		}
	}
	}
	rc = 0;
out:
out:
	if (rc)
	return rc;
	return rc;

	return nr_failed + retry;
}
}


#ifdef CONFIG_NUMA
#ifdef CONFIG_NUMA