Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6bbf85fe authored by Shiraz Hashim's avatar Shiraz Hashim Committed by Gerrit - the friendly Code Review server
Browse files

mm: zcache: fix locking sequence



Deadlock is observed in zcache reclaim paths due to
different locking sequence.

Core#0:				    Core#1:
 |spin_bug()                         |do_raw_write_lock()
 |do_raw_spin_lock()                 |_raw_write_lock_irqsave()
 |_raw_spin_lock_irqsave()           |zcache_rbnode_isolate()
 |zcache_flush_inode()               |zcache_load_delete_zaddr()
 |__cleancache_invalidate_inode()    |zcache_evict_zpage()
 |truncate_inode_pages_range()       |zbud_reclaim_page()
 |truncate_inode_pages()             |zcache_scan()
 |truncate_inode_pages_final()       |shrink_slab_node()
 |ext4_evict_inode()                 |shrink_slab()
 |evict()                            |try_to_free_pages()
 |dispose_list()                     |__alloc_pages_nodemask()
 |prune_icache_sb()                  |alloc_kmem_pages_node()
 |super_cache_scan()                 |copy_process.part.52()
 |shrink_slab_node()                 |do_fork()
 |shrink_slab()                      |sys_clone()
 |kswapd_shrink_zone.constprop       |el0_svc()
 |balance_pgdat()
 |kswapd()
 |kthread()
 |ret_from_fork()

The deadlock happens because alternate sequence are
followed while taking
 zpool->rb_lock  (protects zpool rb tree), and
 rbnode->ra_lock (protects radix tree maintained by rbtree node)

Fix the sequence of locks being taken to avoid deadlock.

Change-Id: I32db23268f63eb8eb5aee30e4462c190e2e02f48
Signed-off-by: default avatarShiraz Hashim <shashim@codeaurora.org>
parent dd0af8d2
Loading
Loading
Loading
Loading
+18 −5
Original line number Diff line number Diff line
@@ -569,10 +569,17 @@ static int zcache_store_zaddr(struct zcache_pool *zpool,
	/* Insert zcache_ra_handle to ratree */
	ret = radix_tree_insert(&rbnode->ratree, ra_index,
				(void *)zaddr);
	if (unlikely(ret))
		if (zcache_rbnode_empty(rbnode))
			zcache_rbnode_isolate(zpool, rbnode, 0);
	spin_unlock_irqrestore(&rbnode->ra_lock, flags);
	if (unlikely(ret)) {
		write_lock_irqsave(&zpool->rb_lock, flags);
		spin_lock(&rbnode->ra_lock);

		if (zcache_rbnode_empty(rbnode))
			zcache_rbnode_isolate(zpool, rbnode, 1);

		spin_unlock(&rbnode->ra_lock);
		write_unlock_irqrestore(&zpool->rb_lock, flags);
	}

	kref_put(&rbnode->refcount, zcache_rbnode_release);
	return ret;
@@ -598,10 +605,16 @@ static void *zcache_load_delete_zaddr(struct zcache_pool *zpool,

	spin_lock_irqsave(&rbnode->ra_lock, flags);
	zaddr = radix_tree_delete(&rbnode->ratree, ra_index);
	if (zcache_rbnode_empty(rbnode))
		zcache_rbnode_isolate(zpool, rbnode, 0);
	spin_unlock_irqrestore(&rbnode->ra_lock, flags);

	/* rb_lock and ra_lock must be taken again in the given sequence */
	write_lock_irqsave(&zpool->rb_lock, flags);
	spin_lock(&rbnode->ra_lock);
	if (zcache_rbnode_empty(rbnode))
		zcache_rbnode_isolate(zpool, rbnode, 1);
	spin_unlock(&rbnode->ra_lock);
	write_unlock_irqrestore(&zpool->rb_lock, flags);

	kref_put(&rbnode->refcount, zcache_rbnode_release);
out:
	return zaddr;