Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 30dad309 authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds
Browse files

mm: migration: add migrate_entry_wait_huge()



When we have a page fault for the address which is backed by a hugepage
under migration, the kernel can't wait correctly and do busy looping on
hugepage fault until the migration finishes.  As a result, users who try
to kick hugepage migration (via soft offlining, for example) occasionally
experience long delay or soft lockup.

This is because pte_offset_map_lock() can't get a correct migration entry
or a correct page table lock for hugepage.  This patch introduces
migration_entry_wait_huge() to solve this.

Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Reviewed-by: default avatarWanpeng Li <liwanp@linux.vnet.ibm.com>
Reviewed-by: default avatarMichal Hocko <mhocko@suse.cz>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: <stable@vger.kernel.org>	[2.6.35+]
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 27749f2f
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -137,6 +137,7 @@ static inline void make_migration_entry_read(swp_entry_t *entry)

extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
					unsigned long address);
extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte);
#else

#define make_migration_entry(page, write) swp_entry(0, 0)
@@ -148,6 +149,8 @@ static inline int is_migration_entry(swp_entry_t swp)
static inline void make_migration_entry_read(swp_entry_t *entryp) { }
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
					 unsigned long address) { }
static inline void migration_entry_wait_huge(struct mm_struct *mm,
					pte_t *pte) { }
static inline int is_write_migration_entry(swp_entry_t entry)
{
	return 0;
+1 −1
Original line number Diff line number Diff line
@@ -2839,7 +2839,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
	if (ptep) {
		entry = huge_ptep_get(ptep);
		if (unlikely(is_hugetlb_entry_migration(entry))) {
			migration_entry_wait(mm, (pmd_t *)ptep, address);
			migration_entry_wait_huge(mm, ptep);
			return 0;
		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
			return VM_FAULT_HWPOISON_LARGE |
+18 −5
Original line number Diff line number Diff line
@@ -200,15 +200,14 @@ static void remove_migration_ptes(struct page *old, struct page *new)
 * get to the page and wait until migration is finished.
 * When we return from this function the fault will be retried.
 */
void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
				unsigned long address)
static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
				spinlock_t *ptl)
{
	pte_t *ptep, pte;
	spinlock_t *ptl;
	pte_t pte;
	swp_entry_t entry;
	struct page *page;

	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
	spin_lock(ptl);
	pte = *ptep;
	if (!is_swap_pte(pte))
		goto out;
@@ -236,6 +235,20 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
	pte_unmap_unlock(ptep, ptl);
}

void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
				unsigned long address)
{
	spinlock_t *ptl = pte_lockptr(mm, pmd);
	pte_t *ptep = pte_offset_map(pmd, address);
	__migration_entry_wait(mm, ptep, ptl);
}

void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte)
{
	spinlock_t *ptl = &(mm)->page_table_lock;
	__migration_entry_wait(mm, pte, ptl);
}

#ifdef CONFIG_BLOCK
/* Returns true if all buffers are successfully locked */
static bool buffer_migrate_lock_buffers(struct buffer_head *head,