Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e24f0b8f authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds
Browse files

[PATCH] page migration: simplify migrate_pages()



Currently migrate_pages() is mess with lots of goto.  Extract two functions
from migrate_pages() and get rid of the gotos.

Plus we can just unconditionally set the locked bit on the new page since we
are the only one holding a reference.  Locking is to stop others from
accessing the page once we establish references to the new page.

Remove the list_del from move_to_lru in order to have finer control over list
processing.

[akpm@osdl.org: add debug check]
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Jes Sorensen <jes@trained-monkey.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 8f9de51a
Loading
Loading
Loading
Loading
+115 −103
Original line number Original line Diff line number Diff line
@@ -84,7 +84,6 @@ int migrate_prep(void)


static inline void move_to_lru(struct page *page)
static inline void move_to_lru(struct page *page)
{
{
	list_del(&page->lru);
	if (PageActive(page)) {
	if (PageActive(page)) {
		/*
		/*
		 * lru_cache_add_active checks that
		 * lru_cache_add_active checks that
@@ -110,6 +109,7 @@ int putback_lru_pages(struct list_head *l)
	int count = 0;
	int count = 0;


	list_for_each_entry_safe(page, page2, l, lru) {
	list_for_each_entry_safe(page, page2, l, lru) {
		list_del(&page->lru);
		move_to_lru(page);
		move_to_lru(page);
		count++;
		count++;
	}
	}
@@ -534,99 +534,32 @@ static int fallback_migrate_page(struct address_space *mapping,
}
}


/*
/*
 * migrate_pages
 * Move a page to a newly allocated page
 * The page is locked and all ptes have been successfully removed.
 *
 *
 * Two lists are passed to this function. The first list
 * The new page will have replaced the old page if this function
 * contains the pages isolated from the LRU to be migrated.
 * is successful.
 * The second list contains new pages that the pages isolated
 * can be moved to.
 *
 * The function returns after 10 attempts or if no pages
 * are movable anymore because to has become empty
 * or no retryable pages exist anymore.
 *
 * Return: Number of pages not migrated when "to" ran empty.
 */
 */
int migrate_pages(struct list_head *from, struct list_head *to,
static int move_to_new_page(struct page *newpage, struct page *page)
		  struct list_head *moved, struct list_head *failed)
{
{
	int retry;
	int nr_failed = 0;
	int pass = 0;
	struct page *page;
	struct page *page2;
	int swapwrite = current->flags & PF_SWAPWRITE;
	int rc;

	if (!swapwrite)
		current->flags |= PF_SWAPWRITE;

redo:
	retry = 0;

	list_for_each_entry_safe(page, page2, from, lru) {
		struct page *newpage = NULL;
	struct address_space *mapping;
	struct address_space *mapping;

	int rc;
		cond_resched();

		rc = 0;
		if (page_count(page) == 1)
			/* page was freed from under us. So we are done. */
			goto next;

		if (to && list_empty(to))
			break;

		/*
		 * Skip locked pages during the first two passes to give the
		 * functions holding the lock time to release the page. Later we
		 * use lock_page() to have a higher chance of acquiring the
		 * lock.
		 */
		rc = -EAGAIN;
		if (pass > 2)
			lock_page(page);
		else
			if (TestSetPageLocked(page))
				goto next;

		/*
		 * Only wait on writeback if we have already done a pass where
		 * we we may have triggered writeouts for lots of pages.
		 */
		if (pass > 0)
			wait_on_page_writeback(page);
		else
			if (PageWriteback(page))
				goto unlock_page;


	/*
	/*
		 * Establish migration ptes or remove ptes
	 * Block others from accessing the page when we get around to
	 * establishing additional references. We are the only one
	 * holding a reference to the new page at this point.
	 */
	 */
		rc = -EPERM;
	if (TestSetPageLocked(newpage))
		if (try_to_unmap(page, 1) == SWAP_FAIL)
		BUG();
			/* A vma has VM_LOCKED set -> permanent failure */
			goto unlock_page;

		rc = -EAGAIN;
		if (page_mapped(page))
			goto unlock_page;


		newpage = lru_to_page(to);
		lock_page(newpage);
	/* Prepare mapping for the new page.*/
	/* Prepare mapping for the new page.*/
	newpage->index = page->index;
	newpage->index = page->index;
	newpage->mapping = page->mapping;
	newpage->mapping = page->mapping;


		/*
		 * Pages are properly locked and writeback is complete.
		 * Try to migrate the page.
		 */
	mapping = page_mapping(page);
	mapping = page_mapping(page);
	if (!mapping)
	if (!mapping)
		rc = migrate_page(mapping, newpage, page);
		rc = migrate_page(mapping, newpage, page);

	else if (mapping->a_ops->migratepage)
	else if (mapping->a_ops->migratepage)
		/*
		/*
		 * Most pages have a mapping and most filesystems
		 * Most pages have a mapping and most filesystems
@@ -642,37 +575,116 @@ redo:


	if (!rc)
	if (!rc)
		remove_migration_ptes(page, newpage);
		remove_migration_ptes(page, newpage);
	else
		newpage->mapping = NULL;


	unlock_page(newpage);
	unlock_page(newpage);


unlock_page:
	return rc;
}

/*
 * Obtain the lock on page, remove all ptes and migrate the page
 * to the newly allocated page in newpage.
 */
static int unmap_and_move(struct page *newpage, struct page *page, int force)
{
	int rc = 0;

	if (page_count(page) == 1)
		/* page was freed from under us. So we are done. */
		goto ret;

	rc = -EAGAIN;
	if (TestSetPageLocked(page)) {
		if (!force)
			goto ret;
		lock_page(page);
	}

	if (PageWriteback(page)) {
		if (!force)
			goto unlock;
		wait_on_page_writeback(page);
	}

	/*
	 * Establish migration ptes or remove ptes
	 */
	if (try_to_unmap(page, 1) != SWAP_FAIL) {
		if (!page_mapped(page))
			rc = move_to_new_page(newpage, page);
	} else
		/* A vma has VM_LOCKED set -> permanent failure */
		rc = -EPERM;

	if (rc)
	if (rc)
		remove_migration_ptes(page, page);
		remove_migration_ptes(page, page);

unlock:
	unlock_page(page);
	unlock_page(page);
ret:
	if (rc != -EAGAIN) {
		list_del(&newpage->lru);
		move_to_lru(newpage);
	}
	return rc;
}


next:
/*
		if (rc) {
 * migrate_pages
			if (newpage)
 *
				newpage->mapping = NULL;
 * Two lists are passed to this function. The first list
 * contains the pages isolated from the LRU to be migrated.
 * The second list contains new pages that the isolated pages
 * can be moved to.
 *
 * The function returns after 10 attempts or if no pages
 * are movable anymore because to has become empty
 * or no retryable pages exist anymore.
 *
 * Return: Number of pages not migrated when "to" ran empty.
 */
int migrate_pages(struct list_head *from, struct list_head *to,
		  struct list_head *moved, struct list_head *failed)
{
	int retry = 1;
	int nr_failed = 0;
	int pass = 0;
	struct page *page;
	struct page *page2;
	int swapwrite = current->flags & PF_SWAPWRITE;
	int rc;

	if (!swapwrite)
		current->flags |= PF_SWAPWRITE;

	for(pass = 0; pass < 10 && retry; pass++) {
		retry = 0;


			if (rc == -EAGAIN)
		list_for_each_entry_safe(page, page2, from, lru) {

			if (list_empty(to))
				break;

			cond_resched();

			rc = unmap_and_move(lru_to_page(to), page, pass > 2);

			switch(rc) {
			case -EAGAIN:
				retry++;
				retry++;
			else {
				break;
			case 0:
				list_move(&page->lru, moved);
				break;
			default:
				/* Permanent failure */
				/* Permanent failure */
				list_move(&page->lru, failed);
				list_move(&page->lru, failed);
				nr_failed++;
				nr_failed++;
				break;
			}
			}
		} else {
			if (newpage) {
				/* Successful migration. Return page to LRU */
				move_to_lru(newpage);
			}
			list_move(&page->lru, moved);
		}
		}
	}
	}
	if (retry && pass++ < 10)
		goto redo;


	if (!swapwrite)
	if (!swapwrite)
		current->flags &= ~PF_SWAPWRITE;
		current->flags &= ~PF_SWAPWRITE;