Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d54b5c13 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull btrfs fixes from David Sterba:
 "This contains a few fixups to the qgroup patches that were merged this
  dev cycle, unaligned access fix, blockgroup removal corner case fix
  and a small debugging output tweak"

* tag 'for-4.17-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: print-tree: debugging output enhancement
  btrfs: Fix race condition between delayed refs and blockgroup removal
  btrfs: fix unaligned access in readdir
  btrfs: Fix wrong btrfs_delalloc_release_extents parameter
  btrfs: delayed-inode: Remove wrong qgroup meta reservation calls
  btrfs: qgroup: Use independent and accurate per inode qgroup rsv
  btrfs: qgroup: Commit transaction in advance to reduce early EDQUOT
parents 37a535ed c0872323
Loading
Loading
Loading
Loading
+25 −0
Original line number Diff line number Diff line
@@ -459,6 +459,25 @@ struct btrfs_block_rsv {
	unsigned short full;
	unsigned short type;
	unsigned short failfast;

	/*
	 * Qgroup equivalent for @size @reserved
	 *
	 * Unlike normal @size/@reserved for inode rsv, qgroup doesn't care
	 * about things like csum size nor how many tree blocks it will need to
	 * reserve.
	 *
	 * Qgroup cares more about net change of the extent usage.
	 *
	 * So for one newly inserted file extent, in worst case it will cause
	 * leaf split and level increase, nodesize for each file extent is
	 * already too much.
	 *
	 * In short, qgroup_size/reserved is the upper limit of possible needed
	 * qgroup metadata reservation.
	 */
	u64 qgroup_rsv_size;
	u64 qgroup_rsv_reserved;
};

/*
@@ -714,6 +733,12 @@ struct btrfs_delayed_root;
 */
#define BTRFS_FS_EXCL_OP			16

/*
 * To info transaction_kthread we need an immediate commit so it doesn't
 * need to wait for commit_interval
 */
#define BTRFS_FS_NEED_ASYNC_COMMIT		17

struct btrfs_fs_info {
	u8 fsid[BTRFS_FSID_SIZE];
	u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
+16 −4
Original line number Diff line number Diff line
@@ -556,6 +556,12 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
	dst_rsv = &fs_info->delayed_block_rsv;

	num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);

	/*
	 * Here we migrate space rsv from transaction rsv, since have already
	 * reserved space when starting a transaction.  So no need to reserve
	 * qgroup space here.
	 */
	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
	if (!ret) {
		trace_btrfs_space_reservation(fs_info, "delayed_item",
@@ -577,7 +583,10 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
		return;

	rsv = &fs_info->delayed_block_rsv;
	btrfs_qgroup_convert_reserved_meta(root, item->bytes_reserved);
	/*
	 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
	 * to release/reserve qgroup space.
	 */
	trace_btrfs_space_reservation(fs_info, "delayed_item",
				      item->key.objectid, item->bytes_reserved,
				      0);
@@ -602,9 +611,6 @@ static int btrfs_delayed_inode_reserve_metadata(

	num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);

	ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
	if (ret < 0)
		return ret;
	/*
	 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
	 * which doesn't reserve space for speed.  This is a problem since we
@@ -616,6 +622,10 @@ static int btrfs_delayed_inode_reserve_metadata(
	 */
	if (!src_rsv || (!trans->bytes_reserved &&
			 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
		ret = btrfs_qgroup_reserve_meta_prealloc(root,
				fs_info->nodesize, true);
		if (ret < 0)
			return ret;
		ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
					  BTRFS_RESERVE_NO_FLUSH);
		/*
@@ -634,6 +644,8 @@ static int btrfs_delayed_inode_reserve_metadata(
						      "delayed_inode",
						      btrfs_ino(inode),
						      num_bytes, 1);
		} else {
			btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
		}
		return ret;
	}
+14 −5
Original line number Diff line number Diff line
@@ -540,8 +540,10 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
		     struct btrfs_delayed_ref_head *head_ref,
		     struct btrfs_qgroup_extent_record *qrecord,
		     u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
		     int action, int is_data, int *qrecord_inserted_ret,
		     int action, int is_data, int is_system,
		     int *qrecord_inserted_ret,
		     int *old_ref_mod, int *new_ref_mod)

{
	struct btrfs_delayed_ref_head *existing;
	struct btrfs_delayed_ref_root *delayed_refs;
@@ -585,6 +587,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
	head_ref->ref_mod = count_mod;
	head_ref->must_insert_reserved = must_insert_reserved;
	head_ref->is_data = is_data;
	head_ref->is_system = is_system;
	head_ref->ref_tree = RB_ROOT;
	INIT_LIST_HEAD(&head_ref->ref_add_list);
	RB_CLEAR_NODE(&head_ref->href_node);
@@ -772,6 +775,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
	struct btrfs_delayed_ref_root *delayed_refs;
	struct btrfs_qgroup_extent_record *record = NULL;
	int qrecord_inserted;
	int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);

	BUG_ON(extent_op && extent_op->is_data);
	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
@@ -800,8 +804,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
	 */
	head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
					bytenr, num_bytes, 0, 0, action, 0,
					&qrecord_inserted, old_ref_mod,
					new_ref_mod);
					is_system, &qrecord_inserted,
					old_ref_mod, new_ref_mod);

	add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
			     num_bytes, parent, ref_root, level, action);
@@ -868,7 +872,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
	 */
	head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
					bytenr, num_bytes, ref_root, reserved,
					action, 1, &qrecord_inserted,
					action, 1, 0, &qrecord_inserted,
					old_ref_mod, new_ref_mod);

	add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
@@ -898,9 +902,14 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);

	/*
	 * extent_ops just modify the flags of an extent and they don't result
	 * in ref count changes, hence it's safe to pass false/0 for is_system
	 * argument
	 */
	add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
			     num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
			     extent_op->is_data, NULL, NULL, NULL);
			     extent_op->is_data, 0, NULL, NULL, NULL);

	spin_unlock(&delayed_refs->lock);
	return 0;
+1 −0
Original line number Diff line number Diff line
@@ -127,6 +127,7 @@ struct btrfs_delayed_ref_head {
	 */
	unsigned int must_insert_reserved:1;
	unsigned int is_data:1;
	unsigned int is_system:1;
	unsigned int processing:1;
};

+1 −0
Original line number Diff line number Diff line
@@ -1824,6 +1824,7 @@ static int transaction_kthread(void *arg)

		now = get_seconds();
		if (cur->state < TRANS_STATE_BLOCKED &&
		    !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) &&
		    (now < cur->start_time ||
		     now - cur->start_time < fs_info->commit_interval)) {
			spin_unlock(&fs_info->trans_lock);
Loading