Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5ecc4d85 authored by Jan Kara's avatar Jan Kara Committed by Linus Torvalds
Browse files

mm: factor out checks and accounting from __delete_from_page_cache()

Move checks and accounting updates from __delete_from_page_cache() into
a separate function.  We will reuse it when batching page cache
truncation operations.

Link: http://lkml.kernel.org/r/20171010151937.26984-7-jack@suse.cz


Signed-off-by: default avatarJan Kara <jack@suse.cz>
Acked-by: default avatarMel Gorman <mgorman@suse.de>
Reviewed-by: default avatarAndi Kleen <ak@linux.intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2300638b
Loading
Loading
Loading
Loading
+41 −31
Original line number Diff line number Diff line
@@ -181,17 +181,11 @@ static void page_cache_tree_delete(struct address_space *mapping,
	mapping->nrpages -= nr;
}

/*
 * Delete a page from the page cache and free it. Caller has to make
 * sure the page is locked and that nobody else uses it - or that usage
 * is safe.  The caller must hold the mapping's tree_lock.
 */
void __delete_from_page_cache(struct page *page, void *shadow)
static void unaccount_page_cache_page(struct address_space *mapping,
				      struct page *page)
{
	struct address_space *mapping = page->mapping;
	int nr = hpage_nr_pages(page);
	int nr;

	trace_mm_filemap_delete_from_page_cache(page);
	/*
	 * if we're uptodate, flush out into the cleancache, otherwise
	 * invalidate any existing cleancache entries.  We can't leave
@@ -228,7 +222,11 @@ void __delete_from_page_cache(struct page *page, void *shadow)
	}

	/* hugetlb pages do not participate in page cache accounting. */
	if (!PageHuge(page)) {
	if (PageHuge(page))
		return;

	nr = hpage_nr_pages(page);

	__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
	if (PageSwapBacked(page)) {
		__mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
@@ -249,9 +247,21 @@ void __delete_from_page_cache(struct page *page, void *shadow)
	 * buddy allocator.
	 */
	if (WARN_ON_ONCE(PageDirty(page)))
			account_page_cleaned(page, mapping,
					     inode_to_wb(mapping->host));
		account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
}

/*
 * Delete a page from the page cache and free it. Caller has to make
 * sure the page is locked and that nobody else uses it - or that usage
 * is safe.  The caller must hold the mapping's tree_lock.
 */
void __delete_from_page_cache(struct page *page, void *shadow)
{
	struct address_space *mapping = page->mapping;

	trace_mm_filemap_delete_from_page_cache(page);

	unaccount_page_cache_page(mapping, page);
	page_cache_tree_delete(mapping, page, shadow);
}