Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b065b432 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds
Browse files

shmem: cleanup shmem_add_to_page_cache



shmem_add_to_page_cache() has three callsites, but only one of them wants
the radix_tree_preload() (an exceptional entry guarantees that the radix
tree node is present in the other cases), and only that site can achieve
mem_cgroup_uncharge_cache_page() (PageSwapCache makes it a no-op in the
other cases).  We did it this way originally to reflect
add_to_page_cache_locked(); but it's confusing now, so move the radix_tree
preloading and mem_cgroup uncharging to that one caller.

Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d1899228
Loading
Loading
Loading
Loading
+28 −30
Original line number Diff line number Diff line
@@ -288,25 +288,21 @@ static int shmem_add_to_page_cache(struct page *page,
				   struct address_space *mapping,
				   pgoff_t index, gfp_t gfp, void *expected)
{
	int error = 0;
	int error;

	VM_BUG_ON(!PageLocked(page));
	VM_BUG_ON(!PageSwapBacked(page));

	if (!expected)
		error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
	if (!error) {
	page_cache_get(page);
	page->mapping = mapping;
	page->index = index;

	spin_lock_irq(&mapping->tree_lock);
	if (!expected)
			error = radix_tree_insert(&mapping->page_tree,
							index, page);
		error = radix_tree_insert(&mapping->page_tree, index, page);
	else
			error = shmem_radix_tree_replace(mapping, index,
							expected, page);
		error = shmem_radix_tree_replace(mapping, index, expected,
								 page);
	if (!error) {
		mapping->nrpages++;
		__inc_zone_page_state(page, NR_FILE_PAGES);
@@ -317,11 +313,6 @@ static int shmem_add_to_page_cache(struct page *page,
		spin_unlock_irq(&mapping->tree_lock);
		page_cache_release(page);
	}
		if (!expected)
			radix_tree_preload_end();
	}
	if (error)
		mem_cgroup_uncharge_cache_page(page);
	return error;
}

@@ -1202,11 +1193,18 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
		__set_page_locked(page);
		error = mem_cgroup_cache_charge(page, current->mm,
						gfp & GFP_RECLAIM_MASK);
		if (!error)
		if (error)
			goto decused;
		error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
		if (!error) {
			error = shmem_add_to_page_cache(page, mapping, index,
							gfp, NULL);
		if (error)
			radix_tree_preload_end();
		}
		if (error) {
			mem_cgroup_uncharge_cache_page(page);
			goto decused;
		}
		lru_cache_add_anon(page);

		spin_lock(&info->lock);