Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 62cccb8c authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds
Browse files

mm: simplify lock_page_memcg()



Now that migration doesn't clear page->mem_cgroup of live pages anymore,
it's safe to make lock_page_memcg() and the memcg stat functions take
pages, and spare the callers from memcg objects.

[akpm@linux-foundation.org: fix warnings]
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Suggested-by: default avatarVladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: default avatarVladimir Davydov <vdavydov@virtuozzo.com>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6a93ca8f
Loading
Loading
Loading
Loading
+8 −10
Original line number Diff line number Diff line
@@ -624,14 +624,14 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
 * The caller must hold lock_page_memcg().
 */
static void __set_page_dirty(struct page *page, struct address_space *mapping,
			     struct mem_cgroup *memcg, int warn)
			     int warn)
{
	unsigned long flags;

	spin_lock_irqsave(&mapping->tree_lock, flags);
	if (page->mapping) {	/* Race with truncate? */
		WARN_ON_ONCE(warn && !PageUptodate(page));
		account_page_dirtied(page, mapping, memcg);
		account_page_dirtied(page, mapping);
		radix_tree_tag_set(&mapping->page_tree,
				page_index(page), PAGECACHE_TAG_DIRTY);
	}
@@ -666,7 +666,6 @@ static void __set_page_dirty(struct page *page, struct address_space *mapping,
int __set_page_dirty_buffers(struct page *page)
{
	int newly_dirty;
	struct mem_cgroup *memcg;
	struct address_space *mapping = page_mapping(page);

	if (unlikely(!mapping))
@@ -686,14 +685,14 @@ int __set_page_dirty_buffers(struct page *page)
	 * Lock out page->mem_cgroup migration to keep PageDirty
	 * synchronized with per-memcg dirty page counters.
	 */
	memcg = lock_page_memcg(page);
	lock_page_memcg(page);
	newly_dirty = !TestSetPageDirty(page);
	spin_unlock(&mapping->private_lock);

	if (newly_dirty)
		__set_page_dirty(page, mapping, memcg, 1);
		__set_page_dirty(page, mapping, 1);

	unlock_page_memcg(memcg);
	unlock_page_memcg(page);

	if (newly_dirty)
		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
@@ -1167,15 +1166,14 @@ void mark_buffer_dirty(struct buffer_head *bh)
	if (!test_set_buffer_dirty(bh)) {
		struct page *page = bh->b_page;
		struct address_space *mapping = NULL;
		struct mem_cgroup *memcg;

		memcg = lock_page_memcg(page);
		lock_page_memcg(page);
		if (!TestSetPageDirty(page)) {
			mapping = page_mapping(page);
			if (mapping)
				__set_page_dirty(page, mapping, memcg, 0);
				__set_page_dirty(page, mapping, 0);
		}
		unlock_page_memcg(memcg);
		unlock_page_memcg(page);
		if (mapping)
			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
	}
+3 −4
Original line number Diff line number Diff line
@@ -1957,7 +1957,6 @@ xfs_vm_set_page_dirty(
	loff_t			end_offset;
	loff_t			offset;
	int			newly_dirty;
	struct mem_cgroup	*memcg;

	if (unlikely(!mapping))
		return !TestSetPageDirty(page);
@@ -1981,7 +1980,7 @@ xfs_vm_set_page_dirty(
	 * Lock out page->mem_cgroup migration to keep PageDirty
	 * synchronized with per-memcg dirty page counters.
	 */
	memcg = lock_page_memcg(page);
	lock_page_memcg(page);
	newly_dirty = !TestSetPageDirty(page);
	spin_unlock(&mapping->private_lock);

@@ -1992,13 +1991,13 @@ xfs_vm_set_page_dirty(
		spin_lock_irqsave(&mapping->tree_lock, flags);
		if (page->mapping) {	/* Race with truncate? */
			WARN_ON_ONCE(!PageUptodate(page));
			account_page_dirtied(page, mapping, memcg);
			account_page_dirtied(page, mapping);
			radix_tree_tag_set(&mapping->page_tree,
					page_index(page), PAGECACHE_TAG_DIRTY);
		}
		spin_unlock_irqrestore(&mapping->tree_lock, flags);
	}
	unlock_page_memcg(memcg);
	unlock_page_memcg(page);
	if (newly_dirty)
		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
	return newly_dirty;
+17 −18
Original line number Diff line number Diff line
@@ -455,42 +455,42 @@ bool mem_cgroup_oom_synchronize(bool wait);
extern int do_swap_account;
#endif

struct mem_cgroup *lock_page_memcg(struct page *page);
void unlock_page_memcg(struct mem_cgroup *memcg);
void lock_page_memcg(struct page *page);
void unlock_page_memcg(struct page *page);

/**
 * mem_cgroup_update_page_stat - update page state statistics
 * @memcg: memcg to account against
 * @page: the page
 * @idx: page state item to account
 * @val: number of pages (positive or negative)
 *
 * Callers must use lock_page_memcg() to prevent double accounting
 * when the page is concurrently being moved to another memcg:
 *
 *   memcg = lock_page_memcg(page);
 *   lock_page_memcg(page);
 *   if (TestClearPageState(page))
 *     mem_cgroup_update_page_stat(memcg, state, -1);
 *   unlock_page_memcg(memcg);
 *     mem_cgroup_update_page_stat(page, state, -1);
 *   unlock_page_memcg(page);
 */
static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
static inline void mem_cgroup_update_page_stat(struct page *page,
				 enum mem_cgroup_stat_index idx, int val)
{
	VM_BUG_ON(!rcu_read_lock_held());

	if (memcg)
		this_cpu_add(memcg->stat->count[idx], val);
	if (page->mem_cgroup)
		this_cpu_add(page->mem_cgroup->stat->count[idx], val);
}

static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
static inline void mem_cgroup_inc_page_stat(struct page *page,
					    enum mem_cgroup_stat_index idx)
{
	mem_cgroup_update_page_stat(memcg, idx, 1);
	mem_cgroup_update_page_stat(page, idx, 1);
}

static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
static inline void mem_cgroup_dec_page_stat(struct page *page,
					    enum mem_cgroup_stat_index idx)
{
	mem_cgroup_update_page_stat(memcg, idx, -1);
	mem_cgroup_update_page_stat(page, idx, -1);
}

unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
@@ -661,12 +661,11 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
}

static inline struct mem_cgroup *lock_page_memcg(struct page *page)
static inline void lock_page_memcg(struct page *page)
{
	return NULL;
}

static inline void unlock_page_memcg(struct mem_cgroup *memcg)
static inline void unlock_page_memcg(struct page *page)
{
}

@@ -692,12 +691,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
	return false;
}

static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
static inline void mem_cgroup_inc_page_stat(struct page *page,
					    enum mem_cgroup_stat_index idx)
{
}

static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
static inline void mem_cgroup_dec_page_stat(struct page *page,
					    enum mem_cgroup_stat_index idx)
{
}
+2 −3
Original line number Diff line number Diff line
@@ -1291,10 +1291,9 @@ int __set_page_dirty_nobuffers(struct page *page);
int __set_page_dirty_no_writeback(struct page *page);
int redirty_page_for_writepage(struct writeback_control *wbc,
				struct page *page);
void account_page_dirtied(struct page *page, struct address_space *mapping,
			  struct mem_cgroup *memcg);
void account_page_dirtied(struct page *page, struct address_space *mapping);
void account_page_cleaned(struct page *page, struct address_space *mapping,
			  struct mem_cgroup *memcg, struct bdi_writeback *wb);
			  struct bdi_writeback *wb);
int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
void cancel_dirty_page(struct page *page);
+1 −2
Original line number Diff line number Diff line
@@ -663,8 +663,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
				pgoff_t index, gfp_t gfp_mask);
extern void delete_from_page_cache(struct page *page);
extern void __delete_from_page_cache(struct page *page, void *shadow,
				     struct mem_cgroup *memcg);
extern void __delete_from_page_cache(struct page *page, void *shadow);
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);

/*
Loading