Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0414efae authored by Li Zefan's avatar Li Zefan
Browse files

Btrfs: Make the code for reading/writing free space cache generic



Extract out block group specific code from lookup_free_space_inode(),
create_free_space_inode(), load_free_space_cache() and
btrfs_write_out_cache(), so the code can be used to read/write
free ino cache.

Signed-off-by: default avatarLi Zefan <lizf@cn.fujitsu.com>
parent 581bb050
Loading
Loading
Loading
Loading
+204 −154
Original line number Original line Diff line number Diff line
@@ -33,9 +33,9 @@
static int link_free_space(struct btrfs_free_space_ctl *ctl,
static int link_free_space(struct btrfs_free_space_ctl *ctl,
			   struct btrfs_free_space *info);
			   struct btrfs_free_space *info);


struct inode *lookup_free_space_inode(struct btrfs_root *root,
static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
				      struct btrfs_block_group_cache
					       struct btrfs_path *path,
				      *block_group, struct btrfs_path *path)
					       u64 offset)
{
{
	struct btrfs_key key;
	struct btrfs_key key;
	struct btrfs_key location;
	struct btrfs_key location;
@@ -45,15 +45,8 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
	struct inode *inode = NULL;
	struct inode *inode = NULL;
	int ret;
	int ret;


	spin_lock(&block_group->lock);
	if (block_group->inode)
		inode = igrab(block_group->inode);
	spin_unlock(&block_group->lock);
	if (inode)
		return inode;

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
	key.offset = block_group->key.objectid;
	key.offset = offset;
	key.type = 0;
	key.type = 0;


	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
@@ -83,6 +76,27 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,


	inode->i_mapping->flags &= ~__GFP_FS;
	inode->i_mapping->flags &= ~__GFP_FS;


	return inode;
}

struct inode *lookup_free_space_inode(struct btrfs_root *root,
				      struct btrfs_block_group_cache
				      *block_group, struct btrfs_path *path)
{
	struct inode *inode = NULL;

	spin_lock(&block_group->lock);
	if (block_group->inode)
		inode = igrab(block_group->inode);
	spin_unlock(&block_group->lock);
	if (inode)
		return inode;

	inode = __lookup_free_space_inode(root, path,
					  block_group->key.objectid);
	if (IS_ERR(inode))
		return inode;

	spin_lock(&block_group->lock);
	spin_lock(&block_group->lock);
	if (!root->fs_info->closing) {
	if (!root->fs_info->closing) {
		block_group->inode = igrab(inode);
		block_group->inode = igrab(inode);
@@ -93,24 +107,18 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
	return inode;
	return inode;
}
}


int create_free_space_inode(struct btrfs_root *root,
int __create_free_space_inode(struct btrfs_root *root,
			      struct btrfs_trans_handle *trans,
			      struct btrfs_trans_handle *trans,
			    struct btrfs_block_group_cache *block_group,
			      struct btrfs_path *path, u64 ino, u64 offset)
			    struct btrfs_path *path)
{
{
	struct btrfs_key key;
	struct btrfs_key key;
	struct btrfs_disk_key disk_key;
	struct btrfs_disk_key disk_key;
	struct btrfs_free_space_header *header;
	struct btrfs_free_space_header *header;
	struct btrfs_inode_item *inode_item;
	struct btrfs_inode_item *inode_item;
	struct extent_buffer *leaf;
	struct extent_buffer *leaf;
	u64 objectid;
	int ret;
	int ret;


	ret = btrfs_find_free_objectid(root, &objectid);
	ret = btrfs_insert_empty_inode(trans, root, path, ino);
	if (ret < 0)
		return ret;

	ret = btrfs_insert_empty_inode(trans, root, path, objectid);
	if (ret)
	if (ret)
		return ret;
		return ret;


@@ -130,13 +138,12 @@ int create_free_space_inode(struct btrfs_root *root,
			      BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM);
			      BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM);
	btrfs_set_inode_nlink(leaf, inode_item, 1);
	btrfs_set_inode_nlink(leaf, inode_item, 1);
	btrfs_set_inode_transid(leaf, inode_item, trans->transid);
	btrfs_set_inode_transid(leaf, inode_item, trans->transid);
	btrfs_set_inode_block_group(leaf, inode_item,
	btrfs_set_inode_block_group(leaf, inode_item, offset);
				    block_group->key.objectid);
	btrfs_mark_buffer_dirty(leaf);
	btrfs_mark_buffer_dirty(leaf);
	btrfs_release_path(root, path);
	btrfs_release_path(root, path);


	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
	key.offset = block_group->key.objectid;
	key.offset = offset;
	key.type = 0;
	key.type = 0;


	ret = btrfs_insert_empty_item(trans, root, path, &key,
	ret = btrfs_insert_empty_item(trans, root, path, &key,
@@ -156,6 +163,22 @@ int create_free_space_inode(struct btrfs_root *root,
	return 0;
	return 0;
}
}


int create_free_space_inode(struct btrfs_root *root,
			    struct btrfs_trans_handle *trans,
			    struct btrfs_block_group_cache *block_group,
			    struct btrfs_path *path)
{
	int ret;
	u64 ino;

	ret = btrfs_find_free_objectid(root, &ino);
	if (ret < 0)
		return ret;

	return __create_free_space_inode(root, trans, path, ino,
					 block_group->key.objectid);
}

int btrfs_truncate_free_space_cache(struct btrfs_root *root,
int btrfs_truncate_free_space_cache(struct btrfs_root *root,
				    struct btrfs_trans_handle *trans,
				    struct btrfs_trans_handle *trans,
				    struct btrfs_path *path,
				    struct btrfs_path *path,
@@ -208,16 +231,13 @@ static int readahead_cache(struct inode *inode)
	return 0;
	return 0;
}
}


int load_free_space_cache(struct btrfs_fs_info *fs_info,
int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
			  struct btrfs_block_group_cache *block_group)
			    struct btrfs_free_space_ctl *ctl,
			    struct btrfs_path *path, u64 offset)
{
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct btrfs_root *root = fs_info->tree_root;
	struct inode *inode;
	struct btrfs_free_space_header *header;
	struct btrfs_free_space_header *header;
	struct extent_buffer *leaf;
	struct extent_buffer *leaf;
	struct page *page;
	struct page *page;
	struct btrfs_path *path;
	u32 *checksums = NULL, *crc;
	u32 *checksums = NULL, *crc;
	char *disk_crcs = NULL;
	char *disk_crcs = NULL;
	struct btrfs_key key;
	struct btrfs_key key;
@@ -225,76 +245,47 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
	u64 num_entries;
	u64 num_entries;
	u64 num_bitmaps;
	u64 num_bitmaps;
	u64 generation;
	u64 generation;
	u64 used = btrfs_block_group_used(&block_group->item);
	u32 cur_crc = ~(u32)0;
	u32 cur_crc = ~(u32)0;
	pgoff_t index = 0;
	pgoff_t index = 0;
	unsigned long first_page_offset;
	unsigned long first_page_offset;
	int num_checksums;
	int num_checksums;
	int ret = 0;
	int ret = 0, ret2;

	/*
	 * If we're unmounting then just return, since this does a search on the
	 * normal root and not the commit root and we could deadlock.
	 */
	smp_mb();
	if (fs_info->closing)
		return 0;

	/*
	 * If this block group has been marked to be cleared for one reason or
	 * another then we can't trust the on disk cache, so just return.
	 */
	spin_lock(&block_group->lock);
	if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
		spin_unlock(&block_group->lock);
		return 0;
	}
	spin_unlock(&block_group->lock);


	INIT_LIST_HEAD(&bitmaps);
	INIT_LIST_HEAD(&bitmaps);


	path = btrfs_alloc_path();
	if (!path)
		return 0;

	inode = lookup_free_space_inode(root, block_group, path);
	if (IS_ERR(inode)) {
		btrfs_free_path(path);
		return 0;
	}

	/* Nothing in the space cache, goodbye */
	/* Nothing in the space cache, goodbye */
	if (!i_size_read(inode)) {
	if (!i_size_read(inode))
		btrfs_free_path(path);
		goto out;
		goto out;
	}


	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
	key.offset = block_group->key.objectid;
	key.offset = offset;
	key.type = 0;
	key.type = 0;


	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret) {
	if (ret < 0)
		btrfs_free_path(path);
		goto out;
	else if (ret > 0) {
		btrfs_release_path(root, path);
		ret = 0;
		goto out;
		goto out;
	}
	}


	ret = -1;

	leaf = path->nodes[0];
	leaf = path->nodes[0];
	header = btrfs_item_ptr(leaf, path->slots[0],
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
				struct btrfs_free_space_header);
	num_entries = btrfs_free_space_entries(leaf, header);
	num_entries = btrfs_free_space_entries(leaf, header);
	num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
	num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
	generation = btrfs_free_space_generation(leaf, header);
	generation = btrfs_free_space_generation(leaf, header);
	btrfs_free_path(path);
	btrfs_release_path(root, path);


	if (BTRFS_I(inode)->generation != generation) {
	if (BTRFS_I(inode)->generation != generation) {
		printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
		printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
		       " not match free space cache generation (%llu) for "
		       " not match free space cache generation (%llu)\n",
		       "block group %llu\n",
		       (unsigned long long)BTRFS_I(inode)->generation,
		       (unsigned long long)BTRFS_I(inode)->generation,
		       (unsigned long long)generation,
		       (unsigned long long)generation);
		       (unsigned long long)block_group->key.objectid);
		goto out;
		goto free_cache;
	}
	}


	if (!num_entries)
	if (!num_entries)
@@ -311,10 +302,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
		goto out;
		goto out;


	ret = readahead_cache(inode);
	ret = readahead_cache(inode);
	if (ret) {
	if (ret)
		ret = 0;
		goto out;
		goto out;
	}


	while (1) {
	while (1) {
		struct btrfs_free_space_entry *entry;
		struct btrfs_free_space_entry *entry;
@@ -333,10 +322,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
		}
		}


		page = grab_cache_page(inode->i_mapping, index);
		page = grab_cache_page(inode->i_mapping, index);
		if (!page) {
		if (!page)
			ret = 0;
			goto free_cache;
			goto free_cache;
		}


		if (!PageUptodate(page)) {
		if (!PageUptodate(page)) {
			btrfs_readpage(NULL, page);
			btrfs_readpage(NULL, page);
@@ -345,9 +332,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
				unlock_page(page);
				unlock_page(page);
				page_cache_release(page);
				page_cache_release(page);
				printk(KERN_ERR "btrfs: error reading free "
				printk(KERN_ERR "btrfs: error reading free "
				       "space cache: %llu\n",
				       "space cache\n");
				       (unsigned long long)
				       block_group->key.objectid);
				goto free_cache;
				goto free_cache;
			}
			}
		}
		}
@@ -360,13 +345,10 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
			gen = addr + (sizeof(u32) * num_checksums);
			gen = addr + (sizeof(u32) * num_checksums);
			if (*gen != BTRFS_I(inode)->generation) {
			if (*gen != BTRFS_I(inode)->generation) {
				printk(KERN_ERR "btrfs: space cache generation"
				printk(KERN_ERR "btrfs: space cache generation"
				       " (%llu) does not match inode (%llu) "
				       " (%llu) does not match inode (%llu)\n",
				       "for block group %llu\n",
				       (unsigned long long)*gen,
				       (unsigned long long)*gen,
				       (unsigned long long)
				       (unsigned long long)
				       BTRFS_I(inode)->generation,
				       BTRFS_I(inode)->generation);
				       (unsigned long long)
				       block_group->key.objectid);
				kunmap(page);
				kunmap(page);
				unlock_page(page);
				unlock_page(page);
				page_cache_release(page);
				page_cache_release(page);
@@ -382,9 +364,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
					  PAGE_CACHE_SIZE - start_offset);
					  PAGE_CACHE_SIZE - start_offset);
		btrfs_csum_final(cur_crc, (char *)&cur_crc);
		btrfs_csum_final(cur_crc, (char *)&cur_crc);
		if (cur_crc != *crc) {
		if (cur_crc != *crc) {
			printk(KERN_ERR "btrfs: crc mismatch for page %lu in "
			printk(KERN_ERR "btrfs: crc mismatch for page %lu\n",
			       "block group %llu\n", index,
			       index);
			       (unsigned long long)block_group->key.objectid);
			kunmap(page);
			kunmap(page);
			unlock_page(page);
			unlock_page(page);
			page_cache_release(page);
			page_cache_release(page);
@@ -432,7 +413,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
					goto free_cache;
					goto free_cache;
				}
				}
				spin_lock(&ctl->tree_lock);
				spin_lock(&ctl->tree_lock);
				ret = link_free_space(ctl, e);
				ret2 = link_free_space(ctl, e);
				ctl->total_bitmaps++;
				ctl->total_bitmaps++;
				ctl->op->recalc_thresholds(ctl);
				ctl->op->recalc_thresholds(ctl);
				spin_unlock(&ctl->tree_lock);
				spin_unlock(&ctl->tree_lock);
@@ -471,42 +452,96 @@ next:
		index++;
		index++;
	}
	}


	spin_lock(&ctl->tree_lock);
	if (ctl->free_space != (block_group->key.offset - used -
				block_group->bytes_super)) {
		spin_unlock(&ctl->tree_lock);
		printk(KERN_ERR "block group %llu has an wrong amount of free "
		       "space\n", block_group->key.objectid);
		ret = 0;
		goto free_cache;
	}
	spin_unlock(&ctl->tree_lock);

	ret = 1;
	ret = 1;
out:
out:
	kfree(checksums);
	kfree(checksums);
	kfree(disk_crcs);
	kfree(disk_crcs);
	iput(inode);
	return ret;
	return ret;

free_cache:
free_cache:
	__btrfs_remove_free_space_cache(ctl);
	goto out;
}

int load_free_space_cache(struct btrfs_fs_info *fs_info,
			  struct btrfs_block_group_cache *block_group)
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct btrfs_root *root = fs_info->tree_root;
	struct inode *inode;
	struct btrfs_path *path;
	int ret;
	bool matched;
	u64 used = btrfs_block_group_used(&block_group->item);

	/*
	 * If we're unmounting then just return, since this does a search on the
	 * normal root and not the commit root and we could deadlock.
	 */
	smp_mb();
	if (fs_info->closing)
		return 0;

	/*
	 * If this block group has been marked to be cleared for one reason or
	 * another then we can't trust the on disk cache, so just return.
	 */
	spin_lock(&block_group->lock);
	if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
		spin_unlock(&block_group->lock);
		return 0;
	}
	spin_unlock(&block_group->lock);

	path = btrfs_alloc_path();
	if (!path)
		return 0;

	inode = lookup_free_space_inode(root, block_group, path);
	if (IS_ERR(inode)) {
		btrfs_free_path(path);
		return 0;
	}

	ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
				      path, block_group->key.objectid);
	btrfs_free_path(path);
	if (ret <= 0)
		goto out;

	spin_lock(&ctl->tree_lock);
	matched = (ctl->free_space == (block_group->key.offset - used -
				       block_group->bytes_super));
	spin_unlock(&ctl->tree_lock);

	if (!matched) {
		__btrfs_remove_free_space_cache(ctl);
		printk(KERN_ERR "block group %llu has an wrong amount of free "
		       "space\n", block_group->key.objectid);
		ret = -1;
	}
out:
	if (ret < 0) {
		/* This cache is bogus, make sure it gets cleared */
		/* This cache is bogus, make sure it gets cleared */
		spin_lock(&block_group->lock);
		spin_lock(&block_group->lock);
		block_group->disk_cache_state = BTRFS_DC_CLEAR;
		block_group->disk_cache_state = BTRFS_DC_CLEAR;
		spin_unlock(&block_group->lock);
		spin_unlock(&block_group->lock);
	btrfs_remove_free_space_cache(block_group);

	goto out;
		printk(KERN_ERR "btrfs: failed to load free space cache "
		       "for block group %llu\n", block_group->key.objectid);
	}
	}


int btrfs_write_out_cache(struct btrfs_root *root,
	iput(inode);
			  struct btrfs_trans_handle *trans,
	return ret;
}

int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
			    struct btrfs_free_space_ctl *ctl,
			    struct btrfs_block_group_cache *block_group,
			    struct btrfs_block_group_cache *block_group,
			  struct btrfs_path *path)
			    struct btrfs_trans_handle *trans,
			    struct btrfs_path *path, u64 offset)
{
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct btrfs_free_space_header *header;
	struct btrfs_free_space_header *header;
	struct extent_buffer *leaf;
	struct extent_buffer *leaf;
	struct inode *inode;
	struct rb_node *node;
	struct rb_node *node;
	struct list_head *pos, *n;
	struct list_head *pos, *n;
	struct page **pages;
	struct page **pages;
@@ -523,35 +558,18 @@ int btrfs_write_out_cache(struct btrfs_root *root,
	int index = 0, num_pages = 0;
	int index = 0, num_pages = 0;
	int entries = 0;
	int entries = 0;
	int bitmaps = 0;
	int bitmaps = 0;
	int ret = 0;
	int ret = -1;
	bool next_page = false;
	bool next_page = false;
	bool out_of_space = false;
	bool out_of_space = false;


	root = root->fs_info->tree_root;

	INIT_LIST_HEAD(&bitmap_list);
	INIT_LIST_HEAD(&bitmap_list);


	spin_lock(&block_group->lock);
	if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
		spin_unlock(&block_group->lock);
		return 0;
	}
	spin_unlock(&block_group->lock);

	inode = lookup_free_space_inode(root, block_group, path);
	if (IS_ERR(inode))
		return 0;

	if (!i_size_read(inode)) {
		iput(inode);
		return 0;
	}

	node = rb_first(&ctl->free_space_offset);
	node = rb_first(&ctl->free_space_offset);
	if (!node) {
	if (!node)
		iput(inode);
		return 0;
		return 0;
	}

	if (!i_size_read(inode))
		return -1;


	num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
	num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
		PAGE_CACHE_SHIFT;
		PAGE_CACHE_SHIFT;
@@ -561,16 +579,13 @@ int btrfs_write_out_cache(struct btrfs_root *root,


	/* We need a checksum per page. */
	/* We need a checksum per page. */
	crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
	crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
	if (!crc) {
	if (!crc)
		iput(inode);
		return -1;
		return 0;
	}


	pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
	pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
	if (!pages) {
	if (!pages) {
		kfree(crc);
		kfree(crc);
		iput(inode);
		return -1;
		return 0;
	}
	}


	/* Since the first page has all of our checksums and our generation we
	/* Since the first page has all of our checksums and our generation we
@@ -580,7 +595,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
	first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
	first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);


	/* Get the cluster for this block_group if it exists */
	/* Get the cluster for this block_group if it exists */
	if (!list_empty(&block_group->cluster_list))
	if (block_group && !list_empty(&block_group->cluster_list))
		cluster = list_entry(block_group->cluster_list.next,
		cluster = list_entry(block_group->cluster_list.next,
				     struct btrfs_free_cluster,
				     struct btrfs_free_cluster,
				     block_group_list);
				     block_group_list);
@@ -622,6 +637,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
	 * When searching for pinned extents, we need to start at our start
	 * When searching for pinned extents, we need to start at our start
	 * offset.
	 * offset.
	 */
	 */
	if (block_group)
		start = block_group->key.objectid;
		start = block_group->key.objectid;


	/* Write out the extent entries */
	/* Write out the extent entries */
@@ -680,7 +696,8 @@ int btrfs_write_out_cache(struct btrfs_root *root,
		 * We want to add any pinned extents to our free space cache
		 * We want to add any pinned extents to our free space cache
		 * so we don't leak the space
		 * so we don't leak the space
		 */
		 */
		while (!next_page && (start < block_group->key.objectid +
		while (block_group && !next_page &&
		       (start < block_group->key.objectid +
			block_group->key.offset)) {
			block_group->key.offset)) {
			ret = find_first_extent_bit(unpin, start, &start, &end,
			ret = find_first_extent_bit(unpin, start, &start, &end,
						    EXTENT_DIRTY);
						    EXTENT_DIRTY);
@@ -799,12 +816,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
	filemap_write_and_wait(inode->i_mapping);
	filemap_write_and_wait(inode->i_mapping);


	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
	key.offset = block_group->key.objectid;
	key.offset = offset;
	key.type = 0;
	key.type = 0;


	ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
	ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
	if (ret < 0) {
	if (ret < 0) {
		ret = 0;
		ret = -1;
		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
				 EXTENT_DIRTY | EXTENT_DELALLOC |
				 EXTENT_DIRTY | EXTENT_DELALLOC |
				 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
				 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
@@ -817,8 +834,8 @@ int btrfs_write_out_cache(struct btrfs_root *root,
		path->slots[0]--;
		path->slots[0]--;
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
		if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
		    found_key.offset != block_group->key.objectid) {
		    found_key.offset != offset) {
			ret = 0;
			ret = -1;
			clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
			clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
					 EXTENT_DIRTY | EXTENT_DELALLOC |
					 EXTENT_DIRTY | EXTENT_DELALLOC |
					 EXTENT_DO_ACCOUNTING, 0, 0, NULL,
					 EXTENT_DO_ACCOUNTING, 0, 0, NULL,
@@ -838,16 +855,49 @@ int btrfs_write_out_cache(struct btrfs_root *root,
	ret = 1;
	ret = 1;


out_free:
out_free:
	if (ret == 0) {
	if (ret != 1) {
		invalidate_inode_pages2_range(inode->i_mapping, 0, index);
		invalidate_inode_pages2_range(inode->i_mapping, 0, index);
		spin_lock(&block_group->lock);
		block_group->disk_cache_state = BTRFS_DC_ERROR;
		spin_unlock(&block_group->lock);
		BTRFS_I(inode)->generation = 0;
		BTRFS_I(inode)->generation = 0;
	}
	}
	kfree(checksums);
	kfree(checksums);
	kfree(pages);
	kfree(pages);
	btrfs_update_inode(trans, root, inode);
	btrfs_update_inode(trans, root, inode);
	return ret;
}

int btrfs_write_out_cache(struct btrfs_root *root,
			  struct btrfs_trans_handle *trans,
			  struct btrfs_block_group_cache *block_group,
			  struct btrfs_path *path)
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct inode *inode;
	int ret = 0;

	root = root->fs_info->tree_root;

	spin_lock(&block_group->lock);
	if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
		spin_unlock(&block_group->lock);
		return 0;
	}
	spin_unlock(&block_group->lock);

	inode = lookup_free_space_inode(root, block_group, path);
	if (IS_ERR(inode))
		return 0;

	ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
				      path, block_group->key.objectid);
	if (ret < 0) {
		spin_lock(&block_group->lock);
		block_group->disk_cache_state = BTRFS_DC_ERROR;
		spin_unlock(&block_group->lock);

		printk(KERN_ERR "btrfs: failed to write free space cace "
		       "for block group %llu\n", block_group->key.objectid);
	}

	iput(inode);
	iput(inode);
	return ret;
	return ret;
}
}