Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 212a17ab authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus-unmerged' of...

Merge branch 'for-linus-unmerged' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable

* 'for-linus-unmerged' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable: (45 commits)
  Btrfs: fix __btrfs_map_block on 32 bit machines
  btrfs: fix possible deadlock by clearing __GFP_FS flag
  btrfs: check link counter overflow in link(2)
  btrfs: don't mess with i_nlink of unlocked inode in rename()
  Btrfs: check return value of btrfs_alloc_path()
  Btrfs: fix OOPS of empty filesystem after balance
  Btrfs: fix memory leak of empty filesystem after balance
  Btrfs: fix return value of setflags ioctl
  Btrfs: fix uncheck memory allocations
  btrfs: make inode ref log recovery faster
  Btrfs: add btrfs_trim_fs() to handle FITRIM
  Btrfs: adjust btrfs_discard_extent() return errors and trimmed bytes
  Btrfs: make btrfs_map_block() return entire free extent for each device of RAID0/1/10/DUP
  Btrfs: make update_reserved_bytes() public
  btrfs: return EXDEV when linking from different subvolumes
  Btrfs: Per file/directory controls for COW and compression
  Btrfs: add datacow flag in inode flag
  btrfs: use GFP_NOFS instead of GFP_KERNEL
  Btrfs: check return value of read_tree_block()
  btrfs: properly access unaligned checksum buffer
  ...

Fix up trivial conflicts in fs/btrfs/volumes.c due to plug removal in
the block layer.
parents baaca1a6 d9d04879
Loading
Loading
Loading
Loading
+1 −2
Original line number Original line Diff line number Diff line
@@ -136,9 +136,8 @@ struct btrfs_inode {
	 * items we think we'll end up using, and reserved_extents is the number
	 * items we think we'll end up using, and reserved_extents is the number
	 * of extent items we've reserved metadata for.
	 * of extent items we've reserved metadata for.
	 */
	 */
	spinlock_t accounting_lock;
	atomic_t outstanding_extents;
	atomic_t outstanding_extents;
	int reserved_extents;
	atomic_t reserved_extents;


	/*
	/*
	 * ordered_data_close is set by truncate when a file that used
	 * ordered_data_close is set by truncate when a file that used
+13 −4
Original line number Original line Diff line number Diff line
@@ -340,6 +340,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,


	WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1));
	WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1));
	cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
	cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
	if (!cb)
		return -ENOMEM;
	atomic_set(&cb->pending_bios, 0);
	atomic_set(&cb->pending_bios, 0);
	cb->errors = 0;
	cb->errors = 0;
	cb->inode = inode;
	cb->inode = inode;
@@ -354,6 +356,10 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
	bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
	bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;


	bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
	bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
	if(!bio) {
		kfree(cb);
		return -ENOMEM;
	}
	bio->bi_private = cb;
	bio->bi_private = cb;
	bio->bi_end_io = end_compressed_bio_write;
	bio->bi_end_io = end_compressed_bio_write;
	atomic_inc(&cb->pending_bios);
	atomic_inc(&cb->pending_bios);
@@ -657,8 +663,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
			atomic_inc(&cb->pending_bios);
			atomic_inc(&cb->pending_bios);


			if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
			if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
				btrfs_lookup_bio_sums(root, inode, comp_bio,
				ret = btrfs_lookup_bio_sums(root, inode,
						      sums);
							comp_bio, sums);
				BUG_ON(ret);
			}
			}
			sums += (comp_bio->bi_size + root->sectorsize - 1) /
			sums += (comp_bio->bi_size + root->sectorsize - 1) /
				root->sectorsize;
				root->sectorsize;
@@ -683,8 +690,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
	ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
	ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
	BUG_ON(ret);
	BUG_ON(ret);


	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
		btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
		ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
		BUG_ON(ret);
	}


	ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
	ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
	BUG_ON(ret);
	BUG_ON(ret);
+16 −143
Original line number Original line Diff line number Diff line
@@ -147,10 +147,11 @@ noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
{
{
	struct extent_buffer *eb;
	struct extent_buffer *eb;
	spin_lock(&root->node_lock);

	eb = root->node;
	rcu_read_lock();
	eb = rcu_dereference(root->node);
	extent_buffer_get(eb);
	extent_buffer_get(eb);
	spin_unlock(&root->node_lock);
	rcu_read_unlock();
	return eb;
	return eb;
}
}


@@ -165,14 +166,8 @@ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
	while (1) {
	while (1) {
		eb = btrfs_root_node(root);
		eb = btrfs_root_node(root);
		btrfs_tree_lock(eb);
		btrfs_tree_lock(eb);

		if (eb == root->node)
		spin_lock(&root->node_lock);
		if (eb == root->node) {
			spin_unlock(&root->node_lock);
			break;
			break;
		}
		spin_unlock(&root->node_lock);

		btrfs_tree_unlock(eb);
		btrfs_tree_unlock(eb);
		free_extent_buffer(eb);
		free_extent_buffer(eb);
	}
	}
@@ -458,10 +453,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
		else
		else
			parent_start = 0;
			parent_start = 0;


		spin_lock(&root->node_lock);
		root->node = cow;
		extent_buffer_get(cow);
		extent_buffer_get(cow);
		spin_unlock(&root->node_lock);
		rcu_assign_pointer(root->node, cow);


		btrfs_free_tree_block(trans, root, buf, parent_start,
		btrfs_free_tree_block(trans, root, buf, parent_start,
				      last_ref);
				      last_ref);
@@ -542,6 +535,9 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,


	ret = __btrfs_cow_block(trans, root, buf, parent,
	ret = __btrfs_cow_block(trans, root, buf, parent,
				 parent_slot, cow_ret, search_start, 0);
				 parent_slot, cow_ret, search_start, 0);

	trace_btrfs_cow_block(root, buf, *cow_ret);

	return ret;
	return ret;
}
}


@@ -686,6 +682,8 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
			if (!cur) {
			if (!cur) {
				cur = read_tree_block(root, blocknr,
				cur = read_tree_block(root, blocknr,
							 blocksize, gen);
							 blocksize, gen);
				if (!cur)
					return -EIO;
			} else if (!uptodate) {
			} else if (!uptodate) {
				btrfs_read_buffer(cur, gen);
				btrfs_read_buffer(cur, gen);
			}
			}
@@ -732,122 +730,6 @@ static inline unsigned int leaf_data_end(struct btrfs_root *root,
	return btrfs_item_offset_nr(leaf, nr - 1);
	return btrfs_item_offset_nr(leaf, nr - 1);
}
}


/*
 * extra debugging checks to make sure all the items in a key are
 * well formed and in the proper order
 */
static int check_node(struct btrfs_root *root, struct btrfs_path *path,
		      int level)
{
	struct extent_buffer *parent = NULL;
	struct extent_buffer *node = path->nodes[level];
	struct btrfs_disk_key parent_key;
	struct btrfs_disk_key node_key;
	int parent_slot;
	int slot;
	struct btrfs_key cpukey;
	u32 nritems = btrfs_header_nritems(node);

	if (path->nodes[level + 1])
		parent = path->nodes[level + 1];

	slot = path->slots[level];
	BUG_ON(nritems == 0);
	if (parent) {
		parent_slot = path->slots[level + 1];
		btrfs_node_key(parent, &parent_key, parent_slot);
		btrfs_node_key(node, &node_key, 0);
		BUG_ON(memcmp(&parent_key, &node_key,
			      sizeof(struct btrfs_disk_key)));
		BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
		       btrfs_header_bytenr(node));
	}
	BUG_ON(nritems > BTRFS_NODEPTRS_PER_BLOCK(root));
	if (slot != 0) {
		btrfs_node_key_to_cpu(node, &cpukey, slot - 1);
		btrfs_node_key(node, &node_key, slot);
		BUG_ON(comp_keys(&node_key, &cpukey) <= 0);
	}
	if (slot < nritems - 1) {
		btrfs_node_key_to_cpu(node, &cpukey, slot + 1);
		btrfs_node_key(node, &node_key, slot);
		BUG_ON(comp_keys(&node_key, &cpukey) >= 0);
	}
	return 0;
}

/*
 * extra checking to make sure all the items in a leaf are
 * well formed and in the proper order
 */
static int check_leaf(struct btrfs_root *root, struct btrfs_path *path,
		      int level)
{
	struct extent_buffer *leaf = path->nodes[level];
	struct extent_buffer *parent = NULL;
	int parent_slot;
	struct btrfs_key cpukey;
	struct btrfs_disk_key parent_key;
	struct btrfs_disk_key leaf_key;
	int slot = path->slots[0];

	u32 nritems = btrfs_header_nritems(leaf);

	if (path->nodes[level + 1])
		parent = path->nodes[level + 1];

	if (nritems == 0)
		return 0;

	if (parent) {
		parent_slot = path->slots[level + 1];
		btrfs_node_key(parent, &parent_key, parent_slot);
		btrfs_item_key(leaf, &leaf_key, 0);

		BUG_ON(memcmp(&parent_key, &leaf_key,
		       sizeof(struct btrfs_disk_key)));
		BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
		       btrfs_header_bytenr(leaf));
	}
	if (slot != 0 && slot < nritems - 1) {
		btrfs_item_key(leaf, &leaf_key, slot);
		btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1);
		if (comp_keys(&leaf_key, &cpukey) <= 0) {
			btrfs_print_leaf(root, leaf);
			printk(KERN_CRIT "slot %d offset bad key\n", slot);
			BUG_ON(1);
		}
		if (btrfs_item_offset_nr(leaf, slot - 1) !=
		       btrfs_item_end_nr(leaf, slot)) {
			btrfs_print_leaf(root, leaf);
			printk(KERN_CRIT "slot %d offset bad\n", slot);
			BUG_ON(1);
		}
	}
	if (slot < nritems - 1) {
		btrfs_item_key(leaf, &leaf_key, slot);
		btrfs_item_key_to_cpu(leaf, &cpukey, slot + 1);
		BUG_ON(comp_keys(&leaf_key, &cpukey) >= 0);
		if (btrfs_item_offset_nr(leaf, slot) !=
			btrfs_item_end_nr(leaf, slot + 1)) {
			btrfs_print_leaf(root, leaf);
			printk(KERN_CRIT "slot %d offset bad\n", slot);
			BUG_ON(1);
		}
	}
	BUG_ON(btrfs_item_offset_nr(leaf, 0) +
	       btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root));
	return 0;
}

static noinline int check_block(struct btrfs_root *root,
				struct btrfs_path *path, int level)
{
	return 0;
	if (level == 0)
		return check_leaf(root, path, level);
	return check_node(root, path, level);
}


/*
/*
 * search for key in the extent_buffer.  The items start at offset p,
 * search for key in the extent_buffer.  The items start at offset p,
@@ -1046,9 +928,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
			goto enospc;
			goto enospc;
		}
		}


		spin_lock(&root->node_lock);
		rcu_assign_pointer(root->node, child);
		root->node = child;
		spin_unlock(&root->node_lock);


		add_root_to_dirty_list(root);
		add_root_to_dirty_list(root);
		btrfs_tree_unlock(child);
		btrfs_tree_unlock(child);
@@ -1188,7 +1068,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
		}
		}
	}
	}
	/* double check we haven't messed things up */
	/* double check we haven't messed things up */
	check_block(root, path, level);
	if (orig_ptr !=
	if (orig_ptr !=
	    btrfs_node_blockptr(path->nodes[level], path->slots[level]))
	    btrfs_node_blockptr(path->nodes[level], path->slots[level]))
		BUG();
		BUG();
@@ -1798,12 +1677,6 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
		if (!cow)
		if (!cow)
			btrfs_unlock_up_safe(p, level + 1);
			btrfs_unlock_up_safe(p, level + 1);


		ret = check_block(root, p, level);
		if (ret) {
			ret = -1;
			goto done;
		}

		ret = bin_search(b, key, level, &slot);
		ret = bin_search(b, key, level, &slot);


		if (level != 0) {
		if (level != 0) {
@@ -2130,10 +2003,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,


	btrfs_mark_buffer_dirty(c);
	btrfs_mark_buffer_dirty(c);


	spin_lock(&root->node_lock);
	old = root->node;
	old = root->node;
	root->node = c;
	rcu_assign_pointer(root->node, c);
	spin_unlock(&root->node_lock);


	/* the super has an extra ref to root->node */
	/* the super has an extra ref to root->node */
	free_extent_buffer(old);
	free_extent_buffer(old);
@@ -3840,7 +3711,8 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
	unsigned long ptr;
	unsigned long ptr;


	path = btrfs_alloc_path();
	path = btrfs_alloc_path();
	BUG_ON(!path);
	if (!path)
		return -ENOMEM;
	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
	if (!ret) {
	if (!ret) {
		leaf = path->nodes[0];
		leaf = path->nodes[0];
@@ -4217,6 +4089,7 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
		}
		}
		btrfs_set_path_blocking(path);
		btrfs_set_path_blocking(path);
		cur = read_node_slot(root, cur, slot);
		cur = read_node_slot(root, cur, slot);
		BUG_ON(!cur);


		btrfs_tree_lock(cur);
		btrfs_tree_lock(cur);


+13 −6
Original line number Original line Diff line number Diff line
@@ -28,6 +28,7 @@
#include <linux/wait.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/slab.h>
#include <linux/kobject.h>
#include <linux/kobject.h>
#include <trace/events/btrfs.h>
#include <asm/kmap_types.h>
#include <asm/kmap_types.h>
#include "extent_io.h"
#include "extent_io.h"
#include "extent_map.h"
#include "extent_map.h"
@@ -40,6 +41,7 @@ extern struct kmem_cache *btrfs_trans_handle_cachep;
extern struct kmem_cache *btrfs_transaction_cachep;
extern struct kmem_cache *btrfs_transaction_cachep;
extern struct kmem_cache *btrfs_bit_radix_cachep;
extern struct kmem_cache *btrfs_bit_radix_cachep;
extern struct kmem_cache *btrfs_path_cachep;
extern struct kmem_cache *btrfs_path_cachep;
extern struct kmem_cache *btrfs_free_space_cachep;
struct btrfs_ordered_sum;
struct btrfs_ordered_sum;


#define BTRFS_MAGIC "_BHRfS_M"
#define BTRFS_MAGIC "_BHRfS_M"
@@ -782,9 +784,6 @@ struct btrfs_free_cluster {
	/* first extent starting offset */
	/* first extent starting offset */
	u64 window_start;
	u64 window_start;


	/* if this cluster simply points at a bitmap in the block group */
	bool points_to_bitmap;

	struct btrfs_block_group_cache *block_group;
	struct btrfs_block_group_cache *block_group;
	/*
	/*
	 * when a cluster is allocated from a block group, we put the
	 * when a cluster is allocated from a block group, we put the
@@ -1283,6 +1282,7 @@ struct btrfs_root {
#define BTRFS_INODE_NODUMP		(1 << 8)
#define BTRFS_INODE_NODUMP		(1 << 8)
#define BTRFS_INODE_NOATIME		(1 << 9)
#define BTRFS_INODE_NOATIME		(1 << 9)
#define BTRFS_INODE_DIRSYNC		(1 << 10)
#define BTRFS_INODE_DIRSYNC		(1 << 10)
#define BTRFS_INODE_COMPRESS		(1 << 11)


/* some macros to generate set/get funcs for the struct fields.  This
/* some macros to generate set/get funcs for the struct fields.  This
 * assumes there is a lefoo_to_cpu for every type, so lets make a simple
 * assumes there is a lefoo_to_cpu for every type, so lets make a simple
@@ -2157,6 +2157,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
		      u64 root_objectid, u64 owner, u64 offset);
		      u64 root_objectid, u64 owner, u64 offset);


int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len);
int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len);
int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
				u64 num_bytes, int reserve, int sinfo);
int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
				struct btrfs_root *root);
				struct btrfs_root *root);
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
@@ -2227,10 +2229,12 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
int btrfs_error_unpin_extent_range(struct btrfs_root *root,
int btrfs_error_unpin_extent_range(struct btrfs_root *root,
				   u64 start, u64 end);
				   u64 start, u64 end);
int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
			       u64 num_bytes);
			       u64 num_bytes, u64 *actual_bytes);
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
			    struct btrfs_root *root, u64 type);
			    struct btrfs_root *root, u64 type);
int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range);


int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
/* ctree.c */
/* ctree.c */
int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
		     int level, int *slot);
		     int level, int *slot);
@@ -2392,6 +2396,9 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
					  struct btrfs_path *path, u64 dir,
					  struct btrfs_path *path, u64 dir,
					  const char *name, u16 name_len,
					  const char *name, u16 name_len,
					  int mod);
					  int mod);
int verify_dir_item(struct btrfs_root *root,
		    struct extent_buffer *leaf,
		    struct btrfs_dir_item *dir_item);


/* orphan.c */
/* orphan.c */
int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
@@ -2528,7 +2535,7 @@ int btrfs_update_inode(struct btrfs_trans_handle *trans,
			      struct inode *inode);
			      struct inode *inode);
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
void btrfs_orphan_cleanup(struct btrfs_root *root);
int btrfs_orphan_cleanup(struct btrfs_root *root);
void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans,
void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans,
				struct btrfs_pending_snapshot *pending,
				struct btrfs_pending_snapshot *pending,
				u64 *bytes_to_reserve);
				u64 *bytes_to_reserve);
@@ -2536,7 +2543,7 @@ void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans,
				struct btrfs_pending_snapshot *pending);
				struct btrfs_pending_snapshot *pending);
void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
			      struct btrfs_root *root);
			      struct btrfs_root *root);
int btrfs_cont_expand(struct inode *inode, loff_t size);
int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size);
int btrfs_invalidate_inodes(struct btrfs_root *root);
int btrfs_invalidate_inodes(struct btrfs_root *root);
void btrfs_add_delayed_iput(struct inode *inode);
void btrfs_add_delayed_iput(struct inode *inode);
void btrfs_run_delayed_iputs(struct btrfs_root *root);
void btrfs_run_delayed_iputs(struct btrfs_root *root);
+6 −0
Original line number Original line Diff line number Diff line
@@ -483,6 +483,8 @@ static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans,
	INIT_LIST_HEAD(&head_ref->cluster);
	INIT_LIST_HEAD(&head_ref->cluster);
	mutex_init(&head_ref->mutex);
	mutex_init(&head_ref->mutex);


	trace_btrfs_delayed_ref_head(ref, head_ref, action);

	existing = tree_insert(&delayed_refs->root, &ref->rb_node);
	existing = tree_insert(&delayed_refs->root, &ref->rb_node);


	if (existing) {
	if (existing) {
@@ -537,6 +539,8 @@ static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans,
	}
	}
	full_ref->level = level;
	full_ref->level = level;


	trace_btrfs_delayed_tree_ref(ref, full_ref, action);

	existing = tree_insert(&delayed_refs->root, &ref->rb_node);
	existing = tree_insert(&delayed_refs->root, &ref->rb_node);


	if (existing) {
	if (existing) {
@@ -591,6 +595,8 @@ static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
	full_ref->objectid = owner;
	full_ref->objectid = owner;
	full_ref->offset = offset;
	full_ref->offset = offset;


	trace_btrfs_delayed_data_ref(ref, full_ref, action);

	existing = tree_insert(&delayed_refs->root, &ref->rb_node);
	existing = tree_insert(&delayed_refs->root, &ref->rb_node);


	if (existing) {
	if (existing) {
Loading