Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8df651c7 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds
Browse files

thp: cleanup split_huge_page()



After one of bugfixes to freeze_page(), we don't have freezed pages in
rmap, therefore mapcount of all subpages of freezed THP is zero.  And we
have assert for that.

Let's drop code which deal with non-zero mapcount of subpages.

Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 88193f7c
Loading
Loading
Loading
Loading
+7 −13
Original line number Diff line number Diff line
@@ -3220,28 +3220,26 @@ static void unfreeze_page(struct anon_vma *anon_vma, struct page *page)
	}
}

static int __split_huge_page_tail(struct page *head, int tail,
static void __split_huge_page_tail(struct page *head, int tail,
		struct lruvec *lruvec, struct list_head *list)
{
	int mapcount;
	struct page *page_tail = head + tail;

	mapcount = atomic_read(&page_tail->_mapcount) + 1;
	VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
	VM_BUG_ON_PAGE(atomic_read(&page_tail->_count) != 0, page_tail);

	/*
	 * tail_page->_count is zero and not changing from under us. But
	 * get_page_unless_zero() may be running from under us on the
	 * tail_page. If we used atomic_set() below instead of atomic_add(), we
	 * tail_page. If we used atomic_set() below instead of atomic_inc(), we
	 * would then run atomic_set() concurrently with
	 * get_page_unless_zero(), and atomic_set() is implemented in C not
	 * using locked ops. spin_unlock on x86 sometime uses locked ops
	 * because of PPro errata 66, 92, so unless somebody can guarantee
	 * atomic_set() here would be safe on all archs (and not only on x86),
	 * it's safer to use atomic_add().
	 * it's safer to use atomic_inc().
	 */
	atomic_add(mapcount + 1, &page_tail->_count);

	atomic_inc(&page_tail->_count);

	page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
	page_tail->flags |= (head->flags &
@@ -3275,8 +3273,6 @@ static int __split_huge_page_tail(struct page *head, int tail,
	page_tail->index = head->index + tail;
	page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
	lru_add_page_tail(head, page_tail, lruvec, list);

	return mapcount;
}

static void __split_huge_page(struct page *page, struct list_head *list)
@@ -3284,7 +3280,7 @@ static void __split_huge_page(struct page *page, struct list_head *list)
	struct page *head = compound_head(page);
	struct zone *zone = page_zone(head);
	struct lruvec *lruvec;
	int i, tail_mapcount;
	int i;

	/* prevent PageLRU to go away from under us, and freeze lru stats */
	spin_lock_irq(&zone->lru_lock);
@@ -3293,10 +3289,8 @@ static void __split_huge_page(struct page *page, struct list_head *list)
	/* complete memcg works before add pages to LRU */
	mem_cgroup_split_huge_fixup(head);

	tail_mapcount = 0;
	for (i = HPAGE_PMD_NR - 1; i >= 1; i--)
		tail_mapcount += __split_huge_page_tail(head, i, lruvec, list);
	atomic_sub(tail_mapcount, &head->_count);
		__split_huge_page_tail(head, i, lruvec, list);

	ClearPageCompound(head);
	spin_unlock_irq(&zone->lru_lock);