Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c226fd65 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable:
  Btrfs: try to free metadata pages when we free btree blocks
  Btrfs: add extra flushing for renames and truncates
  Btrfs: make sure btrfs_update_delayed_ref doesn't increase ref_mod
  Btrfs: optimize fsyncs on old files
  Btrfs: tree logging unlink/rename fixes
  Btrfs: Make sure i_nlink doesn't hit zero too soon during log replay
  Btrfs: limit balancing work while flushing delayed refs
  Btrfs: readahead checksums during btrfs_finish_ordered_io
  Btrfs: leave btree locks spinning more often
  Btrfs: Only let very young transactions grow during commit
  Btrfs: Check for a blocking lock before taking the spin
  Btrfs: reduce stack in cow_file_range
  Btrfs: reduce stalls during transaction commit
  Btrfs: process the delayed reference queue in clusters
  Btrfs: try to cleanup delayed refs while freeing extents
  Btrfs: reduce stack usage in some crucial tree balancing functions
  Btrfs: do extent allocation and reference count updates in the background
  Btrfs: don't preallocate metadata blocks during btrfs_search_slot
parents c09bca78 d57e62b8
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -8,7 +8,7 @@ btrfs-y := super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
	   extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
	   extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
	   ref-cache.o export.o tree-log.o acl.o free-space-cache.o zlib.o \
	   compression.o
	   compression.o delayed-ref.o
else

# Normal Makefile
+25 −6
Original line number Diff line number Diff line
@@ -66,6 +66,12 @@ struct btrfs_inode {
	 */
	struct list_head delalloc_inodes;

	/*
	 * list for tracking inodes that must be sent to disk before a
	 * rename or truncate commit
	 */
	struct list_head ordered_operations;

	/* the space_info for where this inode's data allocations are done */
	struct btrfs_space_info *space_info;

@@ -86,12 +92,6 @@ struct btrfs_inode {
	 */
	u64 logged_trans;

	/*
	 * trans that last made a change that should be fully fsync'd.  This
	 * gets reset to zero each time the inode is logged
	 */
	u64 log_dirty_trans;

	/* total number of bytes pending delalloc, used by stat to calc the
	 * real block usage of the file
	 */
@@ -121,6 +121,25 @@ struct btrfs_inode {
	/* the start of block group preferred for allocations. */
	u64 block_group;

	/* the fsync log has some corner cases that mean we have to check
	 * directories to see if any unlinks have been done before
	 * the directory was logged.  See tree-log.c for all the
	 * details
	 */
	u64 last_unlink_trans;

	/*
	 * ordered_data_close is set by truncate when a file that used
	 * to have good data has been truncated to zero.  When it is set
	 * the btrfs file release call will add this inode to the
	 * ordered operations list so that we make sure to flush out any
	 * new data the application may have written before commit.
	 *
	 * yes, its silly to have a single bitflag, but we might grow more
	 * of these.
	 */
	unsigned ordered_data_close:1;

	struct inode vfs_inode;
};

+316 −272

File changed.

Preview size limit exceeded, changes collapsed.

+55 −14
Original line number Diff line number Diff line
@@ -45,6 +45,13 @@ struct btrfs_ordered_sum;

#define BTRFS_MAX_LEVEL 8

/*
 * files bigger than this get some pre-flushing when they are added
 * to the ordered operations list.  That way we limit the total
 * work done by the commit
 */
#define BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT (8 * 1024 * 1024)

/* holds pointers to all of the tree roots */
#define BTRFS_ROOT_TREE_OBJECTID 1ULL

@@ -401,15 +408,16 @@ struct btrfs_path {
	int locks[BTRFS_MAX_LEVEL];
	int reada;
	/* keep some upper locks as we walk down */
	int keep_locks;
	int skip_locking;
	int lowest_level;

	/*
	 * set by btrfs_split_item, tells search_slot to keep all locks
	 * and to force calls to keep space in the nodes
	 */
	int search_for_split;
	unsigned int search_for_split:1;
	unsigned int keep_locks:1;
	unsigned int skip_locking:1;
	unsigned int leave_spinning:1;
};

/*
@@ -688,15 +696,18 @@ struct btrfs_fs_info {
	struct rb_root block_group_cache_tree;

	struct extent_io_tree pinned_extents;
	struct extent_io_tree pending_del;
	struct extent_io_tree extent_ins;

	/* logical->physical extent mapping */
	struct btrfs_mapping_tree mapping_tree;

	u64 generation;
	u64 last_trans_committed;
	u64 last_trans_new_blockgroup;

	/*
	 * this is updated to the current trans every time a full commit
	 * is required instead of the faster short fsync log commits
	 */
	u64 last_trans_log_full_commit;
	u64 open_ioctl_trans;
	unsigned long mount_opt;
	u64 max_extent;
@@ -717,12 +728,21 @@ struct btrfs_fs_info {
	struct mutex tree_log_mutex;
	struct mutex transaction_kthread_mutex;
	struct mutex cleaner_mutex;
	struct mutex extent_ins_mutex;
	struct mutex pinned_mutex;
	struct mutex chunk_mutex;
	struct mutex drop_mutex;
	struct mutex volume_mutex;
	struct mutex tree_reloc_mutex;

	/*
	 * this protects the ordered operations list only while we are
	 * processing all of the entries on it.  This way we make
	 * sure the commit code doesn't find the list temporarily empty
	 * because another function happens to be doing non-waiting preflush
	 * before jumping into the main commit.
	 */
	struct mutex ordered_operations_mutex;

	struct list_head trans_list;
	struct list_head hashers;
	struct list_head dead_roots;
@@ -737,9 +757,28 @@ struct btrfs_fs_info {
	 * ordered extents
	 */
	spinlock_t ordered_extent_lock;

	/*
	 * all of the data=ordered extents pending writeback
	 * these can span multiple transactions and basically include
	 * every dirty data page that isn't from nodatacow
	 */
	struct list_head ordered_extents;

	/*
	 * all of the inodes that have delalloc bytes.  It is possible for
	 * this list to be empty even when there is still dirty data=ordered
	 * extents waiting to finish IO.
	 */
	struct list_head delalloc_inodes;

	/*
	 * special rename and truncate targets that must be on disk before
	 * we're allowed to commit.  This is basically the ext3 style
	 * data=ordered list.
	 */
	struct list_head ordered_operations;

	/*
	 * there is a pool of worker threads for checksumming during writes
	 * and a pool for checksumming after reads.  This is because readers
@@ -781,6 +820,11 @@ struct btrfs_fs_info {
	atomic_t throttle_gen;

	u64 total_pinned;

	/* protected by the delalloc lock, used to keep from writing
	 * metadata until there is a nice batch
	 */
	u64 dirty_metadata_bytes;
	struct list_head dirty_cowonly_roots;

	struct btrfs_fs_devices *fs_devices;
@@ -1704,18 +1748,15 @@ static inline struct dentry *fdentry(struct file *file)
}

/* extent-tree.c */
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
			   struct btrfs_root *root, unsigned long count);
int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len);
int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans,
			    struct btrfs_root *root, u64 bytenr,
			    u64 num_bytes, u32 *refs);
int btrfs_update_pinned_extents(struct btrfs_root *root,
				u64 bytenr, u64 num, int pin);
int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
			struct btrfs_root *root, struct extent_buffer *leaf);
int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
			  struct btrfs_root *root, u64 objectid, u64 bytenr);
int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
			 struct btrfs_root *root);
int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy);
struct btrfs_block_group_cache *btrfs_lookup_block_group(
						 struct btrfs_fs_info *info,
@@ -1777,7 +1818,7 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
			 u64 root_objectid, u64 ref_generation,
			 u64 owner_objectid);
int btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
			    struct btrfs_root *root, u64 bytenr,
			    struct btrfs_root *root, u64 bytenr, u64 num_bytes,
			    u64 orig_parent, u64 parent,
			    u64 root_objectid, u64 ref_generation,
			    u64 owner_objectid);
@@ -1838,7 +1879,7 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
int btrfs_cow_block(struct btrfs_trans_handle *trans,
		    struct btrfs_root *root, struct extent_buffer *buf,
		    struct extent_buffer *parent, int parent_slot,
		    struct extent_buffer **cow_ret, u64 prealloc_dest);
		    struct extent_buffer **cow_ret);
int btrfs_copy_root(struct btrfs_trans_handle *trans,
		      struct btrfs_root *root,
		      struct extent_buffer *buf,

fs/btrfs/delayed-ref.c

0 → 100644
+669 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2009 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

#include <linux/sched.h>
#include <linux/sort.h>
#include <linux/ftrace.h>
#include "ctree.h"
#include "delayed-ref.h"
#include "transaction.h"

/*
 * delayed back reference update tracking.  For subvolume trees
 * we queue up extent allocations and backref maintenance for
 * delayed processing.   This avoids deep call chains where we
 * add extents in the middle of btrfs_search_slot, and it allows
 * us to buffer up frequently modified backrefs in an rb tree instead
 * of hammering updates on the extent allocation tree.
 *
 * Right now this code is only used for reference counted trees, but
 * the long term goal is to get rid of the similar code for delayed
 * extent tree modifications.
 */

/*
 * entries in the rb tree are ordered by the byte number of the extent
 * and by the byte number of the parent block.
 */
static int comp_entry(struct btrfs_delayed_ref_node *ref,
		      u64 bytenr, u64 parent)
{
	if (bytenr < ref->bytenr)
		return -1;
	if (bytenr > ref->bytenr)
		return 1;
	if (parent < ref->parent)
		return -1;
	if (parent > ref->parent)
		return 1;
	return 0;
}

/*
 * insert a new ref into the rbtree.  This returns any existing refs
 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
 * inserted.
 */
static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
						  u64 bytenr, u64 parent,
						  struct rb_node *node)
{
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent_node = NULL;
	struct btrfs_delayed_ref_node *entry;
	int cmp;

	while (*p) {
		parent_node = *p;
		entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
				 rb_node);

		cmp = comp_entry(entry, bytenr, parent);
		if (cmp < 0)
			p = &(*p)->rb_left;
		else if (cmp > 0)
			p = &(*p)->rb_right;
		else
			return entry;
	}

	entry = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
	rb_link_node(node, parent_node, p);
	rb_insert_color(node, root);
	return NULL;
}

/*
 * find an entry based on (bytenr,parent).  This returns the delayed
 * ref if it was able to find one, or NULL if nothing was in that spot
 */
static struct btrfs_delayed_ref_node *tree_search(struct rb_root *root,
				  u64 bytenr, u64 parent,
				  struct btrfs_delayed_ref_node **last)
{
	struct rb_node *n = root->rb_node;
	struct btrfs_delayed_ref_node *entry;
	int cmp;

	while (n) {
		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
		WARN_ON(!entry->in_tree);
		if (last)
			*last = entry;

		cmp = comp_entry(entry, bytenr, parent);
		if (cmp < 0)
			n = n->rb_left;
		else if (cmp > 0)
			n = n->rb_right;
		else
			return entry;
	}
	return NULL;
}

int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
			   struct btrfs_delayed_ref_head *head)
{
	struct btrfs_delayed_ref_root *delayed_refs;

	delayed_refs = &trans->transaction->delayed_refs;
	assert_spin_locked(&delayed_refs->lock);
	if (mutex_trylock(&head->mutex))
		return 0;

	atomic_inc(&head->node.refs);
	spin_unlock(&delayed_refs->lock);

	mutex_lock(&head->mutex);
	spin_lock(&delayed_refs->lock);
	if (!head->node.in_tree) {
		mutex_unlock(&head->mutex);
		btrfs_put_delayed_ref(&head->node);
		return -EAGAIN;
	}
	btrfs_put_delayed_ref(&head->node);
	return 0;
}

int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
			   struct list_head *cluster, u64 start)
{
	int count = 0;
	struct btrfs_delayed_ref_root *delayed_refs;
	struct rb_node *node;
	struct btrfs_delayed_ref_node *ref;
	struct btrfs_delayed_ref_head *head;

	delayed_refs = &trans->transaction->delayed_refs;
	if (start == 0) {
		node = rb_first(&delayed_refs->root);
	} else {
		ref = NULL;
		tree_search(&delayed_refs->root, start, (u64)-1, &ref);
		if (ref) {
			struct btrfs_delayed_ref_node *tmp;

			node = rb_prev(&ref->rb_node);
			while (node) {
				tmp = rb_entry(node,
					       struct btrfs_delayed_ref_node,
					       rb_node);
				if (tmp->bytenr < start)
					break;
				ref = tmp;
				node = rb_prev(&ref->rb_node);
			}
			node = &ref->rb_node;
		} else
			node = rb_first(&delayed_refs->root);
	}
again:
	while (node && count < 32) {
		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
		if (btrfs_delayed_ref_is_head(ref)) {
			head = btrfs_delayed_node_to_head(ref);
			if (list_empty(&head->cluster)) {
				list_add_tail(&head->cluster, cluster);
				delayed_refs->run_delayed_start =
					head->node.bytenr;
				count++;

				WARN_ON(delayed_refs->num_heads_ready == 0);
				delayed_refs->num_heads_ready--;
			} else if (count) {
				/* the goal of the clustering is to find extents
				 * that are likely to end up in the same extent
				 * leaf on disk.  So, we don't want them spread
				 * all over the tree.  Stop now if we've hit
				 * a head that was already in use
				 */
				break;
			}
		}
		node = rb_next(node);
	}
	if (count) {
		return 0;
	} else if (start) {
		/*
		 * we've gone to the end of the rbtree without finding any
		 * clusters.  start from the beginning and try again
		 */
		start = 0;
		node = rb_first(&delayed_refs->root);
		goto again;
	}
	return 1;
}

/*
 * This checks to see if there are any delayed refs in the
 * btree for a given bytenr.  It returns one if it finds any
 * and zero otherwise.
 *
 * If it only finds a head node, it returns 0.
 *
 * The idea is to use this when deciding if you can safely delete an
 * extent from the extent allocation tree.  There may be a pending
 * ref in the rbtree that adds or removes references, so as long as this
 * returns one you need to leave the BTRFS_EXTENT_ITEM in the extent
 * allocation tree.
 */
int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr)
{
	struct btrfs_delayed_ref_node *ref;
	struct btrfs_delayed_ref_root *delayed_refs;
	struct rb_node *prev_node;
	int ret = 0;

	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);

	ref = tree_search(&delayed_refs->root, bytenr, (u64)-1, NULL);
	if (ref) {
		prev_node = rb_prev(&ref->rb_node);
		if (!prev_node)
			goto out;
		ref = rb_entry(prev_node, struct btrfs_delayed_ref_node,
			       rb_node);
		if (ref->bytenr == bytenr)
			ret = 1;
	}
out:
	spin_unlock(&delayed_refs->lock);
	return ret;
}

/*
 * helper function to lookup reference count
 *
 * the head node for delayed ref is used to store the sum of all the
 * reference count modifications queued up in the rbtree.  This way you
 * can check to see what the reference count would be if all of the
 * delayed refs are processed.
 */
int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans,
			    struct btrfs_root *root, u64 bytenr,
			    u64 num_bytes, u32 *refs)
{
	struct btrfs_delayed_ref_node *ref;
	struct btrfs_delayed_ref_head *head;
	struct btrfs_delayed_ref_root *delayed_refs;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_extent_item *ei;
	struct btrfs_key key;
	u32 num_refs;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = bytenr;
	key.type = BTRFS_EXTENT_ITEM_KEY;
	key.offset = num_bytes;
	delayed_refs = &trans->transaction->delayed_refs;
again:
	ret = btrfs_search_slot(trans, root->fs_info->extent_root,
				&key, path, 0, 0);
	if (ret < 0)
		goto out;

	if (ret == 0) {
		leaf = path->nodes[0];
		ei = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_extent_item);
		num_refs = btrfs_extent_refs(leaf, ei);
	} else {
		num_refs = 0;
		ret = 0;
	}

	spin_lock(&delayed_refs->lock);
	ref = tree_search(&delayed_refs->root, bytenr, (u64)-1, NULL);
	if (ref) {
		head = btrfs_delayed_node_to_head(ref);
		if (mutex_trylock(&head->mutex)) {
			num_refs += ref->ref_mod;
			mutex_unlock(&head->mutex);
			*refs = num_refs;
			goto out;
		}

		atomic_inc(&ref->refs);
		spin_unlock(&delayed_refs->lock);

		btrfs_release_path(root->fs_info->extent_root, path);

		mutex_lock(&head->mutex);
		mutex_unlock(&head->mutex);
		btrfs_put_delayed_ref(ref);
		goto again;
	} else {
		*refs = num_refs;
	}
out:
	spin_unlock(&delayed_refs->lock);
	btrfs_free_path(path);
	return ret;
}

/*
 * helper function to update an extent delayed ref in the
 * rbtree.  existing and update must both have the same
 * bytenr and parent
 *
 * This may free existing if the update cancels out whatever
 * operation it was doing.
 */
static noinline void
update_existing_ref(struct btrfs_trans_handle *trans,
		    struct btrfs_delayed_ref_root *delayed_refs,
		    struct btrfs_delayed_ref_node *existing,
		    struct btrfs_delayed_ref_node *update)
{
	struct btrfs_delayed_ref *existing_ref;
	struct btrfs_delayed_ref *ref;

	existing_ref = btrfs_delayed_node_to_ref(existing);
	ref = btrfs_delayed_node_to_ref(update);

	if (ref->pin)
		existing_ref->pin = 1;

	if (ref->action != existing_ref->action) {
		/*
		 * this is effectively undoing either an add or a
		 * drop.  We decrement the ref_mod, and if it goes
		 * down to zero we just delete the entry without
		 * every changing the extent allocation tree.
		 */
		existing->ref_mod--;
		if (existing->ref_mod == 0) {
			rb_erase(&existing->rb_node,
				 &delayed_refs->root);
			existing->in_tree = 0;
			btrfs_put_delayed_ref(existing);
			delayed_refs->num_entries--;
			if (trans->delayed_ref_updates)
				trans->delayed_ref_updates--;
		}
	} else {
		if (existing_ref->action == BTRFS_ADD_DELAYED_REF) {
			/* if we're adding refs, make sure all the
			 * details match up.  The extent could
			 * have been totally freed and reallocated
			 * by a different owner before the delayed
			 * ref entries were removed.
			 */
			existing_ref->owner_objectid = ref->owner_objectid;
			existing_ref->generation = ref->generation;
			existing_ref->root = ref->root;
			existing->num_bytes = update->num_bytes;
		}
		/*
		 * the action on the existing ref matches
		 * the action on the ref we're trying to add.
		 * Bump the ref_mod by one so the backref that
		 * is eventually added/removed has the correct
		 * reference count
		 */
		existing->ref_mod += update->ref_mod;
	}
}

/*
 * helper function to update the accounting in the head ref
 * existing and update must have the same bytenr
 */
static noinline void
update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
			 struct btrfs_delayed_ref_node *update)
{
	struct btrfs_delayed_ref_head *existing_ref;
	struct btrfs_delayed_ref_head *ref;

	existing_ref = btrfs_delayed_node_to_head(existing);
	ref = btrfs_delayed_node_to_head(update);

	if (ref->must_insert_reserved) {
		/* if the extent was freed and then
		 * reallocated before the delayed ref
		 * entries were processed, we can end up
		 * with an existing head ref without
		 * the must_insert_reserved flag set.
		 * Set it again here
		 */
		existing_ref->must_insert_reserved = ref->must_insert_reserved;

		/*
		 * update the num_bytes so we make sure the accounting
		 * is done correctly
		 */
		existing->num_bytes = update->num_bytes;

	}

	/*
	 * update the reference mod on the head to reflect this new operation
	 */
	existing->ref_mod += update->ref_mod;
}

/*
 * helper function to actually insert a delayed ref into the rbtree.
 * this does all the dirty work in terms of maintaining the correct
 * overall modification count in the head node and properly dealing
 * with updating existing nodes as new modifications are queued.
 */
static noinline int __btrfs_add_delayed_ref(struct btrfs_trans_handle *trans,
			  struct btrfs_delayed_ref_node *ref,
			  u64 bytenr, u64 num_bytes, u64 parent, u64 ref_root,
			  u64 ref_generation, u64 owner_objectid, int action,
			  int pin)
{
	struct btrfs_delayed_ref_node *existing;
	struct btrfs_delayed_ref *full_ref;
	struct btrfs_delayed_ref_head *head_ref = NULL;
	struct btrfs_delayed_ref_root *delayed_refs;
	int count_mod = 1;
	int must_insert_reserved = 0;

	/*
	 * the head node stores the sum of all the mods, so dropping a ref
	 * should drop the sum in the head node by one.
	 */
	if (parent == (u64)-1) {
		if (action == BTRFS_DROP_DELAYED_REF)
			count_mod = -1;
		else if (action == BTRFS_UPDATE_DELAYED_HEAD)
			count_mod = 0;
	}

	/*
	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
	 * the reserved accounting when the extent is finally added, or
	 * if a later modification deletes the delayed ref without ever
	 * inserting the extent into the extent allocation tree.
	 * ref->must_insert_reserved is the flag used to record
	 * that accounting mods are required.
	 *
	 * Once we record must_insert_reserved, switch the action to
	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
	 */
	if (action == BTRFS_ADD_DELAYED_EXTENT) {
		must_insert_reserved = 1;
		action = BTRFS_ADD_DELAYED_REF;
	} else {
		must_insert_reserved = 0;
	}


	delayed_refs = &trans->transaction->delayed_refs;

	/* first set the basic ref node struct up */
	atomic_set(&ref->refs, 1);
	ref->bytenr = bytenr;
	ref->parent = parent;
	ref->ref_mod = count_mod;
	ref->in_tree = 1;
	ref->num_bytes = num_bytes;

	if (btrfs_delayed_ref_is_head(ref)) {
		head_ref = btrfs_delayed_node_to_head(ref);
		head_ref->must_insert_reserved = must_insert_reserved;
		INIT_LIST_HEAD(&head_ref->cluster);
		mutex_init(&head_ref->mutex);
	} else {
		full_ref = btrfs_delayed_node_to_ref(ref);
		full_ref->root = ref_root;
		full_ref->generation = ref_generation;
		full_ref->owner_objectid = owner_objectid;
		full_ref->pin = pin;
		full_ref->action = action;
	}

	existing = tree_insert(&delayed_refs->root, bytenr,
			       parent, &ref->rb_node);

	if (existing) {
		if (btrfs_delayed_ref_is_head(ref))
			update_existing_head_ref(existing, ref);
		else
			update_existing_ref(trans, delayed_refs, existing, ref);

		/*
		 * we've updated the existing ref, free the newly
		 * allocated ref
		 */
		kfree(ref);
	} else {
		if (btrfs_delayed_ref_is_head(ref)) {
			delayed_refs->num_heads++;
			delayed_refs->num_heads_ready++;
		}
		delayed_refs->num_entries++;
		trans->delayed_ref_updates++;
	}
	return 0;
}

/*
 * add a delayed ref to the tree.  This does all of the accounting required
 * to make sure the delayed ref is eventually processed before this
 * transaction commits.
 */
int btrfs_add_delayed_ref(struct btrfs_trans_handle *trans,
			  u64 bytenr, u64 num_bytes, u64 parent, u64 ref_root,
			  u64 ref_generation, u64 owner_objectid, int action,
			  int pin)
{
	struct btrfs_delayed_ref *ref;
	struct btrfs_delayed_ref_head *head_ref;
	struct btrfs_delayed_ref_root *delayed_refs;
	int ret;

	ref = kmalloc(sizeof(*ref), GFP_NOFS);
	if (!ref)
		return -ENOMEM;

	/*
	 * the parent = 0 case comes from cases where we don't actually
	 * know the parent yet.  It will get updated later via a add/drop
	 * pair.
	 */
	if (parent == 0)
		parent = bytenr;

	head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
	if (!head_ref) {
		kfree(ref);
		return -ENOMEM;
	}
	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);

	/*
	 * insert both the head node and the new ref without dropping
	 * the spin lock
	 */
	ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes,
				      (u64)-1, 0, 0, 0, action, pin);
	BUG_ON(ret);

	ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes,
				      parent, ref_root, ref_generation,
				      owner_objectid, action, pin);
	BUG_ON(ret);
	spin_unlock(&delayed_refs->lock);
	return 0;
}

/*
 * this does a simple search for the head node for a given extent.
 * It must be called with the delayed ref spinlock held, and it returns
 * the head node if any where found, or NULL if not.
 */
struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
{
	struct btrfs_delayed_ref_node *ref;
	struct btrfs_delayed_ref_root *delayed_refs;

	delayed_refs = &trans->transaction->delayed_refs;
	ref = tree_search(&delayed_refs->root, bytenr, (u64)-1, NULL);
	if (ref)
		return btrfs_delayed_node_to_head(ref);
	return NULL;
}

/*
 * add a delayed ref to the tree.  This does all of the accounting required
 * to make sure the delayed ref is eventually processed before this
 * transaction commits.
 *
 * The main point of this call is to add and remove a backreference in a single
 * shot, taking the lock only once, and only searching for the head node once.
 *
 * It is the same as doing a ref add and delete in two separate calls.
 */
int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
			  u64 bytenr, u64 num_bytes, u64 orig_parent,
			  u64 parent, u64 orig_ref_root, u64 ref_root,
			  u64 orig_ref_generation, u64 ref_generation,
			  u64 owner_objectid, int pin)
{
	struct btrfs_delayed_ref *ref;
	struct btrfs_delayed_ref *old_ref;
	struct btrfs_delayed_ref_head *head_ref;
	struct btrfs_delayed_ref_root *delayed_refs;
	int ret;

	ref = kmalloc(sizeof(*ref), GFP_NOFS);
	if (!ref)
		return -ENOMEM;

	old_ref = kmalloc(sizeof(*old_ref), GFP_NOFS);
	if (!old_ref) {
		kfree(ref);
		return -ENOMEM;
	}

	/*
	 * the parent = 0 case comes from cases where we don't actually
	 * know the parent yet.  It will get updated later via a add/drop
	 * pair.
	 */
	if (parent == 0)
		parent = bytenr;
	if (orig_parent == 0)
		orig_parent = bytenr;

	head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
	if (!head_ref) {
		kfree(ref);
		kfree(old_ref);
		return -ENOMEM;
	}
	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);

	/*
	 * insert both the head node and the new ref without dropping
	 * the spin lock
	 */
	ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes,
				      (u64)-1, 0, 0, 0,
				      BTRFS_UPDATE_DELAYED_HEAD, 0);
	BUG_ON(ret);

	ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes,
				      parent, ref_root, ref_generation,
				      owner_objectid, BTRFS_ADD_DELAYED_REF, 0);
	BUG_ON(ret);

	ret = __btrfs_add_delayed_ref(trans, &old_ref->node, bytenr, num_bytes,
				      orig_parent, orig_ref_root,
				      orig_ref_generation, owner_objectid,
				      BTRFS_DROP_DELAYED_REF, pin);
	BUG_ON(ret);
	spin_unlock(&delayed_refs->lock);
	return 0;
}
Loading