Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2467fe92 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: memcontrol: fix NULL pointer crash in test_clear_page_writeback()"

parents b86b6fe3 d4a74286
Loading
Loading
Loading
Loading
+28 −5
Original line number Diff line number Diff line
@@ -490,9 +490,21 @@ bool mem_cgroup_oom_synchronize(bool wait);
extern int do_swap_account;
#endif

void lock_page_memcg(struct page *page);
struct mem_cgroup *lock_page_memcg(struct page *page);
void __unlock_page_memcg(struct mem_cgroup *memcg);
void unlock_page_memcg(struct page *page);

static inline void __mem_cgroup_update_page_stat(struct page *page,
						 struct mem_cgroup *memcg,
						 enum mem_cgroup_stat_index idx,
						 int val)
{
	VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page)));

	if (memcg && memcg->stat)
		this_cpu_add(memcg->stat->count[idx], val);
}

/**
 * mem_cgroup_update_page_stat - update page state statistics
 * @page: the page
@@ -508,13 +520,12 @@ void unlock_page_memcg(struct page *page);
 *     mem_cgroup_update_page_stat(page, state, -1);
 *   unlock_page(page) or unlock_page_memcg(page)
 */

static inline void mem_cgroup_update_page_stat(struct page *page,
				 enum mem_cgroup_stat_index idx, int val)
{
	VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page)));

	if (page->mem_cgroup)
		this_cpu_add(page->mem_cgroup->stat->count[idx], val);
	__mem_cgroup_update_page_stat(page, page->mem_cgroup, idx, val);
}

static inline void mem_cgroup_inc_page_stat(struct page *page,
@@ -709,7 +720,12 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
}

static inline void lock_page_memcg(struct page *page)
static inline struct mem_cgroup *lock_page_memcg(struct page *page)
{
	return NULL;
}

static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
{
}

@@ -745,6 +761,13 @@ static inline void mem_cgroup_update_page_stat(struct page *page,
{
}

static inline void __mem_cgroup_update_page_stat(struct page *page,
						 struct mem_cgroup *memcg,
						 enum mem_cgroup_stat_index idx,
						 int nr)
{
}

static inline void mem_cgroup_inc_page_stat(struct page *page,
					    enum mem_cgroup_stat_index idx)
{
+31 −12
Original line number Diff line number Diff line
@@ -1619,9 +1619,13 @@ bool mem_cgroup_oom_synchronize(bool handle)
 * @page: the page
 *
 * This function protects unlocked LRU pages from being moved to
 * another cgroup and stabilizes their page->mem_cgroup binding.
 * another cgroup.
 *
 * It ensures lifetime of the returned memcg. Caller is responsible
 * for the lifetime of the page; __unlock_page_memcg() is available
 * when @page might get freed inside the locked section.
 */
void lock_page_memcg(struct page *page)
struct mem_cgroup *lock_page_memcg(struct page *page)
{
	struct mem_cgroup *memcg;
	unsigned long flags;
@@ -1630,18 +1634,24 @@ void lock_page_memcg(struct page *page)
	 * The RCU lock is held throughout the transaction.  The fast
	 * path can get away without acquiring the memcg->move_lock
	 * because page moving starts with an RCU grace period.
	 *
	 * The RCU lock also protects the memcg from being freed when
	 * the page state that is going to change is the only thing
	 * preventing the page itself from being freed. E.g. writeback
	 * doesn't hold a page reference and relies on PG_writeback to
	 * keep off truncation, migration and so forth.
         */
	rcu_read_lock();

	if (mem_cgroup_disabled())
		return;
		return NULL;
again:
	memcg = page->mem_cgroup;
	if (unlikely(!memcg))
		return;
		return NULL;

	if (atomic_read(&memcg->moving_account) <= 0)
		return;
		return memcg;

	spin_lock_irqsave(&memcg->move_lock, flags);
	if (memcg != page->mem_cgroup) {
@@ -1657,18 +1667,18 @@ void lock_page_memcg(struct page *page)
	memcg->move_lock_task = current;
	memcg->move_lock_flags = flags;

	return;
	return memcg;
}
EXPORT_SYMBOL(lock_page_memcg);

/**
 * unlock_page_memcg - unlock a page->mem_cgroup binding
 * @page: the page
 * __unlock_page_memcg - unlock and unpin a memcg
 * @memcg: the memcg
 *
 * Unlock and unpin a memcg returned by lock_page_memcg().
 */
void unlock_page_memcg(struct page *page)
void __unlock_page_memcg(struct mem_cgroup *memcg)
{
	struct mem_cgroup *memcg = page->mem_cgroup;

	if (memcg && memcg->move_lock_task == current) {
		unsigned long flags = memcg->move_lock_flags;

@@ -1680,6 +1690,15 @@ void unlock_page_memcg(struct page *page)

	rcu_read_unlock();
}

/**
 * unlock_page_memcg - unlock a page->mem_cgroup binding
 * @page: the page
 */
void unlock_page_memcg(struct page *page)
{
	__unlock_page_memcg(page->mem_cgroup);
}
EXPORT_SYMBOL(unlock_page_memcg);

/*
+11 −3
Original line number Diff line number Diff line
@@ -2713,9 +2713,10 @@ EXPORT_SYMBOL(clear_page_dirty_for_io);
int test_clear_page_writeback(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	struct mem_cgroup *memcg;
	int ret;

	lock_page_memcg(page);
	memcg = lock_page_memcg(page);
	if (mapping && mapping_use_writeback_tags(mapping)) {
		struct inode *inode = mapping->host;
		struct backing_dev_info *bdi = inode_to_bdi(inode);
@@ -2743,13 +2744,20 @@ int test_clear_page_writeback(struct page *page)
	} else {
		ret = TestClearPageWriteback(page);
	}
	/*
	 * NOTE: Page might be free now! Writeback doesn't hold a page
	 * reference on its own, it relies on truncation to wait for
	 * the clearing of PG_writeback. The below can only access
	 * page state that is static across allocation cycles.
	 */
	if (ret) {
		mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
		__mem_cgroup_update_page_stat(page, memcg,
					      MEM_CGROUP_STAT_WRITEBACK, -1);
		dec_node_page_state(page, NR_WRITEBACK);
		dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
		inc_node_page_state(page, NR_WRITTEN);
	}
	unlock_page_memcg(page);
	__unlock_page_memcg(memcg);
	return ret;
}