Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 48c0d9ec authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable:
  Btrfs: hold trans_mutex when using btrfs_record_root_in_trans
  Btrfs: make a lockdep class for the extent buffer locks
  Btrfs: fs/btrfs/volumes.c: remove useless kzalloc
  Btrfs: remove unused code in split_state()
  Btrfs: remove btrfs_init_path
  Btrfs: balance_level checks !child after access
  Btrfs: Avoid using __GFP_HIGHMEM with slab allocator
  Btrfs: don't clean old snapshots on sync(1)
  Btrfs: use larger metadata clusters in ssd mode
  Btrfs: process mount options on mount -o remount,
  Btrfs: make sure all pending extent operations are complete
parents 0637810f 24562425
Loading
Loading
Loading
Loading
+37 −21
Original line number Diff line number Diff line
@@ -38,19 +38,12 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
		   struct btrfs_path *path, int level, int slot);

inline void btrfs_init_path(struct btrfs_path *p)
{
	memset(p, 0, sizeof(*p));
}

struct btrfs_path *btrfs_alloc_path(void)
{
	struct btrfs_path *path;
	path = kmem_cache_alloc(btrfs_path_cachep, GFP_NOFS);
	if (path) {
		btrfs_init_path(path);
	path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
	if (path)
		path->reada = 1;
	}
	return path;
}

@@ -69,14 +62,38 @@ noinline void btrfs_set_path_blocking(struct btrfs_path *p)

/*
 * reset all the locked nodes in the patch to spinning locks.
 *
 * held is used to keep lockdep happy, when lockdep is enabled
 * we set held to a blocking lock before we go around and
 * retake all the spinlocks in the path.  You can safely use NULL
 * for held
 */
noinline void btrfs_clear_path_blocking(struct btrfs_path *p)
noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
					struct extent_buffer *held)
{
	int i;
	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {

#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/* lockdep really cares that we take all of these spinlocks
	 * in the right order.  If any of the locks in the path are not
	 * currently blocking, it is going to complain.  So, make really
	 * really sure by forcing the path to blocking before we clear
	 * the path blocking.
	 */
	if (held)
		btrfs_set_lock_blocking(held);
	btrfs_set_path_blocking(p);
#endif

	for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
		if (p->nodes[i] && p->locks[i])
			btrfs_clear_lock_blocking(p->nodes[i]);
	}

#ifdef CONFIG_DEBUG_LOCK_ALLOC
	if (held)
		btrfs_clear_lock_blocking(held);
#endif
}

/* this also releases the path */
@@ -286,7 +303,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
						  trans->transid, level, &ins);
		BUG_ON(ret);
		cow = btrfs_init_new_buffer(trans, root, prealloc_dest,
					    buf->len);
					    buf->len, level);
	} else {
		cow = btrfs_alloc_free_block(trans, root, buf->len,
					     parent_start,
@@ -917,9 +934,9 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,

		/* promote the child to a root */
		child = read_node_slot(root, mid, 0);
		BUG_ON(!child);
		btrfs_tree_lock(child);
		btrfs_set_lock_blocking(child);
		BUG_ON(!child);
		ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 0);
		BUG_ON(ret);

@@ -1566,7 +1583,7 @@ cow_done:
		if (!p->skip_locking)
			p->locks[level] = 1;

		btrfs_clear_path_blocking(p);
		btrfs_clear_path_blocking(p, NULL);

		/*
		 * we have a lock on b and as long as we aren't changing
@@ -1605,7 +1622,7 @@ cow_done:

				btrfs_set_path_blocking(p);
				sret = split_node(trans, root, p, level);
				btrfs_clear_path_blocking(p);
				btrfs_clear_path_blocking(p, NULL);

				BUG_ON(sret > 0);
				if (sret) {
@@ -1625,7 +1642,7 @@ cow_done:

				btrfs_set_path_blocking(p);
				sret = balance_level(trans, root, p, level);
				btrfs_clear_path_blocking(p);
				btrfs_clear_path_blocking(p, NULL);

				if (sret) {
					ret = sret;
@@ -1688,13 +1705,13 @@ cow_done:
			if (!p->skip_locking) {
				int lret;

				btrfs_clear_path_blocking(p);
				btrfs_clear_path_blocking(p, NULL);
				lret = btrfs_try_spin_lock(b);

				if (!lret) {
					btrfs_set_path_blocking(p);
					btrfs_tree_lock(b);
					btrfs_clear_path_blocking(p);
					btrfs_clear_path_blocking(p, b);
				}
			}
		} else {
@@ -1706,7 +1723,7 @@ cow_done:
				btrfs_set_path_blocking(p);
				sret = split_leaf(trans, root, key,
						      p, ins_len, ret == 0);
				btrfs_clear_path_blocking(p);
				btrfs_clear_path_blocking(p, NULL);

				BUG_ON(sret > 0);
				if (sret) {
@@ -3926,7 +3943,6 @@ find_next_key:
				btrfs_release_path(root, path);
				goto again;
			} else {
				btrfs_clear_path_blocking(path);
				goto out;
			}
		}
@@ -3946,7 +3962,7 @@ find_next_key:
		path->locks[level - 1] = 1;
		path->nodes[level - 1] = cur;
		unlock_up(path, level, 1);
		btrfs_clear_path_blocking(path);
		btrfs_clear_path_blocking(path, NULL);
	}
out:
	if (ret == 0)
+3 −8
Original line number Diff line number Diff line
@@ -43,11 +43,7 @@ struct btrfs_ordered_sum;

#define BTRFS_ACL_NOT_CACHED    ((void *)-1)

#ifdef CONFIG_LOCKDEP
# define BTRFS_MAX_LEVEL 7
#else
#define BTRFS_MAX_LEVEL 8
#endif

/* holds pointers to all of the tree roots */
#define BTRFS_ROOT_TREE_OBJECTID 1ULL
@@ -1715,7 +1711,8 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
					     u64 empty_size);
struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
					    struct btrfs_root *root,
					    u64 bytenr, u32 blocksize);
					    u64 bytenr, u32 blocksize,
					    int level);
int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
		       struct btrfs_root *root,
		       u64 num_bytes, u64 parent, u64 min_bytes,
@@ -1834,9 +1831,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p);
struct btrfs_path *btrfs_alloc_path(void);
void btrfs_free_path(struct btrfs_path *p);
void btrfs_init_path(struct btrfs_path *p);
void btrfs_set_path_blocking(struct btrfs_path *p);
void btrfs_clear_path_blocking(struct btrfs_path *p);
void btrfs_unlock_up_safe(struct btrfs_path *p, int level);

int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+45 −1
Original line number Diff line number Diff line
@@ -75,6 +75,40 @@ struct async_submit_bio {
	struct btrfs_work work;
};

/* These are used to set the lockdep class on the extent buffer locks.
 * The class is set by the readpage_end_io_hook after the buffer has
 * passed csum validation but before the pages are unlocked.
 *
 * The lockdep class is also set by btrfs_init_new_buffer on freshly
 * allocated blocks.
 *
 * The class is based on the level in the tree block, which allows lockdep
 * to know that lower nodes nest inside the locks of higher nodes.
 *
 * We also add a check to make sure the highest level of the tree is
 * the same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this
 * code needs update as well.
 */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# if BTRFS_MAX_LEVEL != 8
#  error
# endif
static struct lock_class_key btrfs_eb_class[BTRFS_MAX_LEVEL + 1];
static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = {
	/* leaf */
	"btrfs-extent-00",
	"btrfs-extent-01",
	"btrfs-extent-02",
	"btrfs-extent-03",
	"btrfs-extent-04",
	"btrfs-extent-05",
	"btrfs-extent-06",
	"btrfs-extent-07",
	/* highest possible level */
	"btrfs-extent-08",
};
#endif

/*
 * extents on the btree inode are pretty simple, there's one extent
 * that covers the entire device
@@ -347,6 +381,15 @@ static int check_tree_block_fsid(struct btrfs_root *root,
	return ret;
}

#ifdef CONFIG_DEBUG_LOCK_ALLOC
void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level)
{
	lockdep_set_class_and_name(&eb->lock,
			   &btrfs_eb_class[level],
			   btrfs_eb_name[level]);
}
#endif

static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
			       struct extent_state *state)
{
@@ -392,6 +435,8 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
	}
	found_level = btrfs_header_level(eb);

	btrfs_set_buffer_lockdep_class(eb, found_level);

	ret = csum_tree_block(root, eb, 1);
	if (ret)
		ret = -EIO;
@@ -1777,7 +1822,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
	ret = find_and_setup_root(tree_root, fs_info,
				  BTRFS_DEV_TREE_OBJECTID, dev_root);
	dev_root->track_dirty = 1;

	if (ret)
		goto fail_extent_root;

+10 −0
Original line number Diff line number Diff line
@@ -101,4 +101,14 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
		       struct btrfs_root *root);
int btree_lock_page_hook(struct page *page);


#ifdef CONFIG_DEBUG_LOCK_ALLOC
void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level);
#else
static inline void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb,
						 int level)
{
}
#endif
#endif
+51 −32
Original line number Diff line number Diff line
@@ -1323,8 +1323,25 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
			 struct btrfs_root *root)
{
	u64 start;
	u64 end;
	int ret;

	while(1) {
		finish_current_insert(trans, root->fs_info->extent_root, 1);
		del_pending_extents(trans, root->fs_info->extent_root, 1);

		/* is there more work to do? */
		ret = find_first_extent_bit(&root->fs_info->pending_del,
					    0, &start, &end, EXTENT_WRITEBACK);
		if (!ret)
			continue;
		ret = find_first_extent_bit(&root->fs_info->extent_ins,
					    0, &start, &end, EXTENT_WRITEBACK);
		if (!ret)
			continue;
		break;
	}
	return 0;
}

@@ -2211,13 +2228,12 @@ static int finish_current_insert(struct btrfs_trans_handle *trans,
	u64 end;
	u64 priv;
	u64 search = 0;
	u64 skipped = 0;
	struct btrfs_fs_info *info = extent_root->fs_info;
	struct btrfs_path *path;
	struct pending_extent_op *extent_op, *tmp;
	struct list_head insert_list, update_list;
	int ret;
	int num_inserts = 0, max_inserts;
	int num_inserts = 0, max_inserts, restart = 0;

	path = btrfs_alloc_path();
	INIT_LIST_HEAD(&insert_list);
@@ -2233,19 +2249,19 @@ again:
		ret = find_first_extent_bit(&info->extent_ins, search, &start,
					    &end, EXTENT_WRITEBACK);
		if (ret) {
			if (skipped && all && !num_inserts &&
			if (restart && !num_inserts &&
			    list_empty(&update_list)) {
				skipped = 0;
				restart = 0;
				search = 0;
				continue;
			}
			mutex_unlock(&info->extent_ins_mutex);
			break;
		}

		ret = try_lock_extent(&info->extent_ins, start, end, GFP_NOFS);
		if (!ret) {
			skipped = 1;
			if (all)
				restart = 1;
			search = end + 1;
			if (need_resched()) {
				mutex_unlock(&info->extent_ins_mutex);
@@ -2264,7 +2280,7 @@ again:
			list_add_tail(&extent_op->list, &insert_list);
			search = end + 1;
			if (num_inserts == max_inserts) {
				mutex_unlock(&info->extent_ins_mutex);
				restart = 1;
				break;
			}
		} else if (extent_op->type == PENDING_BACKREF_UPDATE) {
@@ -2280,7 +2296,6 @@ again:
	 * somebody marked this thing for deletion then just unlock it and be
	 * done, the free_extents will handle it
	 */
	mutex_lock(&info->extent_ins_mutex);
	list_for_each_entry_safe(extent_op, tmp, &update_list, list) {
		clear_extent_bits(&info->extent_ins, extent_op->bytenr,
				  extent_op->bytenr + extent_op->num_bytes - 1,
@@ -2302,6 +2317,10 @@ again:
	if (!list_empty(&update_list)) {
		ret = update_backrefs(trans, extent_root, path, &update_list);
		BUG_ON(ret);

		/* we may have COW'ed new blocks, so lets start over */
		if (all)
			restart = 1;
	}

	/*
@@ -2309,9 +2328,9 @@ again:
	 * need to make sure everything is cleaned then reset everything and
	 * go back to the beginning
	 */
	if (!num_inserts && all && skipped) {
	if (!num_inserts && restart) {
		search = 0;
		skipped = 0;
		restart = 0;
		INIT_LIST_HEAD(&update_list);
		INIT_LIST_HEAD(&insert_list);
		goto again;
@@ -2368,27 +2387,19 @@ again:
	BUG_ON(ret);

	/*
	 * if we broke out of the loop in order to insert stuff because we hit
	 * the maximum number of inserts at a time we can handle, then loop
	 * back and pick up where we left off
	 */
	if (num_inserts == max_inserts) {
		INIT_LIST_HEAD(&insert_list);
		INIT_LIST_HEAD(&update_list);
		num_inserts = 0;
		goto again;
	}

	/*
	 * again, if we need to make absolutely sure there are no more pending
	 * extent operations left and we know that we skipped some, go back to
	 * the beginning and do it all again
	 * if restart is set for whatever reason we need to go back and start
	 * searching through the pending list again.
	 *
	 * We just inserted some extents, which could have resulted in new
	 * blocks being allocated, which would result in new blocks needing
	 * updates, so if all is set we _must_ restart to get the updated
	 * blocks.
	 */
	if (all && skipped) {
	if (restart || all) {
		INIT_LIST_HEAD(&insert_list);
		INIT_LIST_HEAD(&update_list);
		search = 0;
		skipped = 0;
		restart = 0;
		num_inserts = 0;
		goto again;
	}
@@ -2709,6 +2720,8 @@ again:
		goto again;
	}

	if (!err)
		finish_current_insert(trans, extent_root, 0);
	return err;
}

@@ -2859,6 +2872,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,

	if (data & BTRFS_BLOCK_GROUP_METADATA) {
		last_ptr = &root->fs_info->last_alloc;
		if (!btrfs_test_opt(root, SSD))
			empty_cluster = 64 * 1024;
	}

@@ -3402,7 +3416,8 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans,

struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
					    struct btrfs_root *root,
					    u64 bytenr, u32 blocksize)
					    u64 bytenr, u32 blocksize,
					    int level)
{
	struct extent_buffer *buf;

@@ -3410,6 +3425,7 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
	if (!buf)
		return ERR_PTR(-ENOMEM);
	btrfs_set_header_generation(buf, trans->transid);
	btrfs_set_buffer_lockdep_class(buf, level);
	btrfs_tree_lock(buf);
	clean_tree_block(trans, root, buf);

@@ -3453,7 +3469,8 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
		return ERR_PTR(ret);
	}

	buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize);
	buf = btrfs_init_new_buffer(trans, root, ins.objectid,
				    blocksize, level);
	return buf;
}

@@ -5641,7 +5658,9 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
			prev_block = block_start;
		}

		mutex_lock(&extent_root->fs_info->trans_mutex);
		btrfs_record_root_in_trans(found_root);
		mutex_unlock(&extent_root->fs_info->trans_mutex);
		if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
			/*
			 * try to update data extent references while
Loading