Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5cd57b2c authored by Chris Mason's avatar Chris Mason
Browse files

Btrfs: Add a skip_locking parameter to struct path, and make various funcs honor it



Allocations may need to read in block groups from the extent allocation tree,
which will require a tree search and take locks on the extent allocation
tree.  But, those locks might already be held in other places, leading
to deadlocks.

Since the alloc_mutex serializes everything right now, it is safe to
skip the btree locking while caching block groups.  A better fix will be
to either create a recursive lock or find a way to back off existing
locks while caching block groups.

Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 168fd7d2
Loading
Loading
Loading
Loading
+18 −14
Original line number Diff line number Diff line
@@ -64,6 +64,7 @@ void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
{
	int i;
	int keep = p->keep_locks;
	int skip = p->skip_locking;

	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
		if (!p->nodes[i])
@@ -76,6 +77,7 @@ void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
	}
	memset(p, 0, sizeof(*p));
	p->keep_locks = keep;
	p->skip_locking = skip;
}

struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
@@ -1262,6 +1264,9 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
	if (ins_len < 0)
		lowest_unlock = 2;
again:
	if (p->skip_locking)
		b = btrfs_root_node(root);
	else
		b = btrfs_lock_root_node(root);

	while (b) {
@@ -1282,6 +1287,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
			WARN_ON(1);
		level = btrfs_header_level(b);
		p->nodes[level] = b;
		if (!p->skip_locking)
			p->locks[level] = 1;
		ret = check_block(root, p, level);
		if (ret)
@@ -1349,6 +1355,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
					b = read_node_slot(root, b, slot);
				}
			}
			if (!p->skip_locking)
				btrfs_tree_lock(b);
			unlock_up(p, level, lowest_unlock);
		} else {
@@ -1392,13 +1399,6 @@ static int fixup_low_keys(struct btrfs_trans_handle *trans,
			break;
		t = path->nodes[i];
		btrfs_set_node_key(t, key, tslot);
		if (!btrfs_tree_locked(path->nodes[i])) {
			int ii;
printk("fixup without lock on level %d\n", btrfs_header_level(path->nodes[i]));
			for (ii = 0; ii < BTRFS_MAX_LEVEL; ii++) {
printk("level %d slot %d\n", ii, path->slots[ii]);
			}
		}
		btrfs_mark_buffer_dirty(path->nodes[i]);
		if (tslot != 0)
			break;
@@ -3033,8 +3033,10 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
			reada_for_search(root, path, level, slot, 0);

		next = read_node_slot(root, c, slot);
		if (!path->skip_locking) {
			WARN_ON(!btrfs_tree_locked(c));
			btrfs_tree_lock(next);
		}
		break;
	}
	path->slots[level] = slot;
@@ -3052,9 +3054,11 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
		if (level == 1 && path->locks[1] && path->reada)
			reada_for_search(root, path, level, slot, 0);
		next = read_node_slot(root, next, 0);
		if (!path->skip_locking) {
			WARN_ON(!btrfs_tree_locked(path->nodes[level]));
			btrfs_tree_lock(next);
		}
	}
done:
	unlock_up(path, 0, 1);
	return 0;
+1 −0
Original line number Diff line number Diff line
@@ -335,6 +335,7 @@ struct btrfs_path {
	int reada;
	/* keep some upper locks as we walk down */
	int keep_locks;
	int skip_locking;
	int lowest_level;
};

+6 −0
Original line number Diff line number Diff line
@@ -88,6 +88,12 @@ static int cache_block_group(struct btrfs_root *root,
		return -ENOMEM;

	path->reada = 2;
	/*
	 * we get into deadlocks with paths held by callers of this function.
	 * since the alloc_mutex is protecting things right now, just
	 * skip the locking here
	 */
	path->skip_locking = 1;
	first_free = block_group->key.objectid;
	key.objectid = block_group->key.objectid;
	key.offset = 0;