Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 07443a85 authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds
Browse files

mm, hugetlb: return a reserved page to a reserved pool if failed



If we fail with a reserved page, just calling put_page() is not
sufficient, because put_page() invoke free_huge_page() at last step and it
doesn't know whether a page comes from a reserved pool or not.  So it
doesn't do anything related to reserved count.  This makes reserve count
lower than how we need, because reserve count already decrease in
dequeue_huge_page_vma().  This patch fix this situation.

Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Aneesh Kumar <aneesh.kumar@linux.vnet.ibm.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Davidlohr Bueso <davidlohr@hp.com>
Cc: David Gibson <david@gibson.dropbear.id.au>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Hillf Danton <dhillf@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8312034f
Loading
Loading
Loading
Loading
+12 −1
Original line number Diff line number Diff line
@@ -572,6 +572,7 @@ retry_cpuset:
				if (!vma_has_reserves(vma, chg))
					break;

				SetPagePrivate(page);
				h->resv_huge_pages--;
				break;
			}
@@ -629,15 +630,20 @@ static void free_huge_page(struct page *page)
	int nid = page_to_nid(page);
	struct hugepage_subpool *spool =
		(struct hugepage_subpool *)page_private(page);
	bool restore_reserve;

	set_page_private(page, 0);
	page->mapping = NULL;
	BUG_ON(page_count(page));
	BUG_ON(page_mapcount(page));
	restore_reserve = PagePrivate(page);

	spin_lock(&hugetlb_lock);
	hugetlb_cgroup_uncharge_page(hstate_index(h),
				     pages_per_huge_page(h), page);
	if (restore_reserve)
		h->resv_huge_pages++;

	if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
		/* remove the page from active list */
		list_del(&page->lru);
@@ -2636,6 +2642,8 @@ retry_avoidcopy:
	spin_lock(&mm->page_table_lock);
	ptep = huge_pte_offset(mm, address & huge_page_mask(h));
	if (likely(pte_same(huge_ptep_get(ptep), pte))) {
		ClearPagePrivate(new_page);

		/* Break COW */
		huge_ptep_clear_flush(vma, address, ptep);
		set_huge_pte_at(mm, address, ptep,
@@ -2747,6 +2755,7 @@ retry:
					goto retry;
				goto out;
			}
			ClearPagePrivate(page);

			spin_lock(&inode->i_lock);
			inode->i_blocks += blocks_per_huge_page(h);
@@ -2793,8 +2802,10 @@ retry:
	if (!huge_pte_none(huge_ptep_get(ptep)))
		goto backout;

	if (anon_rmap)
	if (anon_rmap) {
		ClearPagePrivate(page);
		hugepage_add_new_anon_rmap(page, vma, address);
	}
	else
		page_dup_rmap(page);
	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)