Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5c95b531 authored by jianzhou's avatar jianzhou Committed by Gerrit - the friendly Code Review server
Browse files

Revert "mm: memcontrol: fix NULL pointer crash in test_clear_page_writeback()"



This reverts commit d4a74286.
LTS android-4.9.183 (a80a7ab5) has conflicts with the reverted code.
They do the same thing, use LTS solution.

Change-Id: If64be3448dc5d4fdc66cb119aa46b52b341da8fd
Signed-off-by: default avatarjianzhou <jianzhou@codeaurora.org>
parent 7acc8a09
Loading
Loading
Loading
Loading
+5 −28
Original line number Diff line number Diff line
@@ -490,21 +490,9 @@ bool mem_cgroup_oom_synchronize(bool wait);
extern int do_swap_account;
#endif

struct mem_cgroup *lock_page_memcg(struct page *page);
void __unlock_page_memcg(struct mem_cgroup *memcg);
void lock_page_memcg(struct page *page);
void unlock_page_memcg(struct page *page);

static inline void __mem_cgroup_update_page_stat(struct page *page,
						 struct mem_cgroup *memcg,
						 enum mem_cgroup_stat_index idx,
						 int val)
{
	VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page)));

	if (memcg && memcg->stat)
		this_cpu_add(memcg->stat->count[idx], val);
}

/**
 * mem_cgroup_update_page_stat - update page state statistics
 * @page: the page
@@ -520,12 +508,13 @@ static inline void __mem_cgroup_update_page_stat(struct page *page,
 *     mem_cgroup_update_page_stat(page, state, -1);
 *   unlock_page(page) or unlock_page_memcg(page)
 */

static inline void mem_cgroup_update_page_stat(struct page *page,
				 enum mem_cgroup_stat_index idx, int val)
{
	VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page)));

	__mem_cgroup_update_page_stat(page, page->mem_cgroup, idx, val);
	if (page->mem_cgroup)
		this_cpu_add(page->mem_cgroup->stat->count[idx], val);
}

static inline void mem_cgroup_inc_page_stat(struct page *page,
@@ -720,12 +709,7 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
}

static inline struct mem_cgroup *lock_page_memcg(struct page *page)
{
	return NULL;
}

static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
static inline void lock_page_memcg(struct page *page)
{
}

@@ -761,13 +745,6 @@ static inline void mem_cgroup_update_page_stat(struct page *page,
{
}

static inline void __mem_cgroup_update_page_stat(struct page *page,
						 struct mem_cgroup *memcg,
						 enum mem_cgroup_stat_index idx,
						 int nr)
{
}

static inline void mem_cgroup_inc_page_stat(struct page *page,
					    enum mem_cgroup_stat_index idx)
{
+12 −31
Original line number Diff line number Diff line
@@ -1619,13 +1619,9 @@ bool mem_cgroup_oom_synchronize(bool handle)
 * @page: the page
 *
 * This function protects unlocked LRU pages from being moved to
 * another cgroup.
 *
 * It ensures lifetime of the returned memcg. Caller is responsible
 * for the lifetime of the page; __unlock_page_memcg() is available
 * when @page might get freed inside the locked section.
 * another cgroup and stabilizes their page->mem_cgroup binding.
 */
struct mem_cgroup *lock_page_memcg(struct page *page)
void lock_page_memcg(struct page *page)
{
	struct mem_cgroup *memcg;
	unsigned long flags;
@@ -1634,24 +1630,18 @@ struct mem_cgroup *lock_page_memcg(struct page *page)
	 * The RCU lock is held throughout the transaction.  The fast
	 * path can get away without acquiring the memcg->move_lock
	 * because page moving starts with an RCU grace period.
	 *
	 * The RCU lock also protects the memcg from being freed when
	 * the page state that is going to change is the only thing
	 * preventing the page itself from being freed. E.g. writeback
	 * doesn't hold a page reference and relies on PG_writeback to
	 * keep off truncation, migration and so forth.
	 */
	rcu_read_lock();

	if (mem_cgroup_disabled())
		return NULL;
		return;
again:
	memcg = page->mem_cgroup;
	if (unlikely(!memcg))
		return NULL;
		return;

	if (atomic_read(&memcg->moving_account) <= 0)
		return memcg;
		return;

	spin_lock_irqsave(&memcg->move_lock, flags);
	if (memcg != page->mem_cgroup) {
@@ -1667,18 +1657,18 @@ struct mem_cgroup *lock_page_memcg(struct page *page)
	memcg->move_lock_task = current;
	memcg->move_lock_flags = flags;

	return memcg;
	return;
}
EXPORT_SYMBOL(lock_page_memcg);

/**
 * __unlock_page_memcg - unlock and unpin a memcg
 * @memcg: the memcg
 *
 * Unlock and unpin a memcg returned by lock_page_memcg().
 * unlock_page_memcg - unlock a page->mem_cgroup binding
 * @page: the page
 */
void __unlock_page_memcg(struct mem_cgroup *memcg)
void unlock_page_memcg(struct page *page)
{
	struct mem_cgroup *memcg = page->mem_cgroup;

	if (memcg && memcg->move_lock_task == current) {
		unsigned long flags = memcg->move_lock_flags;

@@ -1690,15 +1680,6 @@ void __unlock_page_memcg(struct mem_cgroup *memcg)

	rcu_read_unlock();
}

/**
 * unlock_page_memcg - unlock a page->mem_cgroup binding
 * @page: the page
 */
void unlock_page_memcg(struct page *page)
{
	__unlock_page_memcg(page->mem_cgroup);
}
EXPORT_SYMBOL(unlock_page_memcg);

/*
+3 −11
Original line number Diff line number Diff line
@@ -2704,10 +2704,9 @@ EXPORT_SYMBOL(clear_page_dirty_for_io);
int test_clear_page_writeback(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	struct mem_cgroup *memcg;
	int ret;

	memcg = lock_page_memcg(page);
	lock_page_memcg(page);
	if (mapping && mapping_use_writeback_tags(mapping)) {
		struct inode *inode = mapping->host;
		struct backing_dev_info *bdi = inode_to_bdi(inode);
@@ -2735,20 +2734,13 @@ int test_clear_page_writeback(struct page *page)
	} else {
		ret = TestClearPageWriteback(page);
	}
	/*
	 * NOTE: Page might be free now! Writeback doesn't hold a page
	 * reference on its own, it relies on truncation to wait for
	 * the clearing of PG_writeback. The below can only access
	 * page state that is static across allocation cycles.
	 */
	if (ret) {
		__mem_cgroup_update_page_stat(page, memcg,
					      MEM_CGROUP_STAT_WRITEBACK, -1);
		mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
		dec_node_page_state(page, NR_WRITEBACK);
		dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
		inc_node_page_state(page, NR_WRITTEN);
	}
	__unlock_page_memcg(memcg);
	unlock_page_memcg(page);
	return ret;
}