Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit aa021baa authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable:
  Btrfs: fix panic when trying to destroy a newly allocated
  Btrfs: allow more metadata chunk preallocation
  Btrfs: fallback on uncompressed io if compressed io fails
  Btrfs: find ideal block group for caching
  Btrfs: avoid null deref in unpin_extent_cache()
  Btrfs: skip btrfs_release_path in btrfs_update_root and btrfs_del_root
  Btrfs: fix some metadata enospc issues
  Btrfs: fix how we set max_size for free space clusters
  Btrfs: cleanup transaction starting and fix journal_info usage
  Btrfs: fix data allocation hint start
parents 404291ac a6dbd429
Loading
Loading
Loading
Loading
+88 −25
Original line number Diff line number Diff line
@@ -2977,10 +2977,10 @@ static int maybe_allocate_chunk(struct btrfs_root *root,

	free_space = btrfs_super_total_bytes(disk_super);
	/*
	 * we allow the metadata to grow to a max of either 5gb or 5% of the
	 * we allow the metadata to grow to a max of either 10gb or 5% of the
	 * space in the volume.
	 */
	min_metadata = min((u64)5 * 1024 * 1024 * 1024,
	min_metadata = min((u64)10 * 1024 * 1024 * 1024,
			     div64_u64(free_space * 5, 100));
	if (info->total_bytes >= min_metadata) {
		spin_unlock(&info->lock);
@@ -4102,7 +4102,7 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
}

enum btrfs_loop_type {
	LOOP_CACHED_ONLY = 0,
	LOOP_FIND_IDEAL = 0,
	LOOP_CACHING_NOWAIT = 1,
	LOOP_CACHING_WAIT = 2,
	LOOP_ALLOC_CHUNK = 3,
@@ -4131,12 +4131,15 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
	struct btrfs_block_group_cache *block_group = NULL;
	int empty_cluster = 2 * 1024 * 1024;
	int allowed_chunk_alloc = 0;
	int done_chunk_alloc = 0;
	struct btrfs_space_info *space_info;
	int last_ptr_loop = 0;
	int loop = 0;
	bool found_uncached_bg = false;
	bool failed_cluster_refill = false;
	bool failed_alloc = false;
	u64 ideal_cache_percent = 0;
	u64 ideal_cache_offset = 0;

	WARN_ON(num_bytes < root->sectorsize);
	btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
@@ -4172,14 +4175,19 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
		empty_cluster = 0;

	if (search_start == hint_byte) {
ideal_cache:
		block_group = btrfs_lookup_block_group(root->fs_info,
						       search_start);
		/*
		 * we don't want to use the block group if it doesn't match our
		 * allocation bits, or if its not cached.
		 *
		 * However if we are re-searching with an ideal block group
		 * picked out then we don't care that the block group is cached.
		 */
		if (block_group && block_group_bits(block_group, data) &&
		    block_group_cache_done(block_group)) {
		    (block_group->cached != BTRFS_CACHE_NO ||
		     search_start == ideal_cache_offset)) {
			down_read(&space_info->groups_sem);
			if (list_empty(&block_group->list) ||
			    block_group->ro) {
@@ -4191,13 +4199,13 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
				 */
				btrfs_put_block_group(block_group);
				up_read(&space_info->groups_sem);
			} else
			} else {
				goto have_block_group;
			}
		} else if (block_group) {
			btrfs_put_block_group(block_group);
		}
	}

search:
	down_read(&space_info->groups_sem);
	list_for_each_entry(block_group, &space_info->block_groups, list) {
@@ -4209,28 +4217,45 @@ search:

have_block_group:
		if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
			u64 free_percent;

			free_percent = btrfs_block_group_used(&block_group->item);
			free_percent *= 100;
			free_percent = div64_u64(free_percent,
						 block_group->key.offset);
			free_percent = 100 - free_percent;
			if (free_percent > ideal_cache_percent &&
			    likely(!block_group->ro)) {
				ideal_cache_offset = block_group->key.objectid;
				ideal_cache_percent = free_percent;
			}

			/*
			 * we want to start caching kthreads, but not too many
			 * right off the bat so we don't overwhelm the system,
			 * so only start them if there are less than 2 and we're
			 * in the initial allocation phase.
			 * We only want to start kthread caching if we are at
			 * the point where we will wait for caching to make
			 * progress, or if our ideal search is over and we've
			 * found somebody to start caching.
			 */
			if (loop > LOOP_CACHING_NOWAIT ||
			    atomic_read(&space_info->caching_threads) < 2) {
			    (loop > LOOP_FIND_IDEAL &&
			     atomic_read(&space_info->caching_threads) < 2)) {
				ret = cache_block_group(block_group);
				BUG_ON(ret);
			}
		}

		cached = block_group_cache_done(block_group);
		if (unlikely(!cached)) {
			found_uncached_bg = true;

			/* if we only want cached bgs, loop */
			if (loop == LOOP_CACHED_ONLY)
			/*
			 * If loop is set for cached only, try the next block
			 * group.
			 */
			if (loop == LOOP_FIND_IDEAL)
				goto loop;
		}

		cached = block_group_cache_done(block_group);
		if (unlikely(!cached))
			found_uncached_bg = true;

		if (unlikely(block_group->ro))
			goto loop;

@@ -4410,9 +4435,11 @@ loop:
	}
	up_read(&space_info->groups_sem);

	/* LOOP_CACHED_ONLY, only search fully cached block groups
	 * LOOP_CACHING_NOWAIT, search partially cached block groups, but
	 *			dont wait foR them to finish caching
	/* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
	 *			for them to make caching progress.  Also
	 *			determine the best possible bg to cache
	 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
	 *			caching kthreads as we move along
	 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
	 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
	 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
@@ -4421,12 +4448,47 @@ loop:
	if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
	    (found_uncached_bg || empty_size || empty_cluster ||
	     allowed_chunk_alloc)) {
		if (found_uncached_bg) {
		if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
			found_uncached_bg = false;
			if (loop < LOOP_CACHING_WAIT) {
			loop++;
			if (!ideal_cache_percent &&
			    atomic_read(&space_info->caching_threads))
				goto search;

			/*
			 * 1 of the following 2 things have happened so far
			 *
			 * 1) We found an ideal block group for caching that
			 * is mostly full and will cache quickly, so we might
			 * as well wait for it.
			 *
			 * 2) We searched for cached only and we didn't find
			 * anything, and we didn't start any caching kthreads
			 * either, so chances are we will loop through and
			 * start a couple caching kthreads, and then come back
			 * around and just wait for them.  This will be slower
			 * because we will have 2 caching kthreads reading at
			 * the same time when we could have just started one
			 * and waited for it to get far enough to give us an
			 * allocation, so go ahead and go to the wait caching
			 * loop.
			 */
			loop = LOOP_CACHING_WAIT;
			search_start = ideal_cache_offset;
			ideal_cache_percent = 0;
			goto ideal_cache;
		} else if (loop == LOOP_FIND_IDEAL) {
			/*
			 * Didn't find a uncached bg, wait on anything we find
			 * next.
			 */
			loop = LOOP_CACHING_WAIT;
			goto search;
		}

		if (loop < LOOP_CACHING_WAIT) {
			loop++;
			goto search;
		}

		if (loop == LOOP_ALLOC_CHUNK) {
@@ -4438,7 +4500,8 @@ loop:
			ret = do_chunk_alloc(trans, root, num_bytes +
					     2 * 1024 * 1024, data, 1);
			allowed_chunk_alloc = 0;
		} else {
			done_chunk_alloc = 1;
		} else if (!done_chunk_alloc) {
			space_info->force_alloc = 1;
		}

+1 −1
Original line number Diff line number Diff line
@@ -208,7 +208,7 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
	write_lock(&tree->lock);
	em = lookup_extent_mapping(tree, start, len);

	WARN_ON(em->start != start || !em);
	WARN_ON(!em || em->start != start);

	if (!em)
		goto out;
+1 −1
Original line number Diff line number Diff line
@@ -1296,7 +1296,7 @@ again:
			window_start = entry->offset;
			window_free = entry->bytes;
			last = entry;
			max_extent = 0;
			max_extent = entry->bytes;
		} else {
			last = next;
			window_free += next->bytes;
+80 −15
Original line number Diff line number Diff line
@@ -538,7 +538,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	struct extent_io_tree *io_tree;
	int ret;
	int ret = 0;

	if (list_empty(&async_cow->extents))
		return 0;
@@ -552,6 +552,7 @@ static noinline int submit_compressed_extents(struct inode *inode,

		io_tree = &BTRFS_I(inode)->io_tree;

retry:
		/* did the compression code fall back to uncompressed IO? */
		if (!async_extent->pages) {
			int page_started = 0;
@@ -562,7 +563,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
				    async_extent->ram_size - 1, GFP_NOFS);

			/* allocate blocks */
			cow_file_range(inode, async_cow->locked_page,
			ret = cow_file_range(inode, async_cow->locked_page,
					     async_extent->start,
					     async_extent->start +
					     async_extent->ram_size - 1,
@@ -574,7 +575,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
			 * and IO for us.  Otherwise, we need to submit
			 * all those pages down to the drive.
			 */
			if (!page_started)
			if (!page_started && !ret)
				extent_write_locked_range(io_tree,
						  inode, async_extent->start,
						  async_extent->start +
@@ -602,7 +603,21 @@ static noinline int submit_compressed_extents(struct inode *inode,
					   async_extent->compressed_size,
					   0, alloc_hint,
					   (u64)-1, &ins, 1);
		BUG_ON(ret);
		if (ret) {
			int i;
			for (i = 0; i < async_extent->nr_pages; i++) {
				WARN_ON(async_extent->pages[i]->mapping);
				page_cache_release(async_extent->pages[i]);
			}
			kfree(async_extent->pages);
			async_extent->nr_pages = 0;
			async_extent->pages = NULL;
			unlock_extent(io_tree, async_extent->start,
				      async_extent->start +
				      async_extent->ram_size - 1, GFP_NOFS);
			goto retry;
		}

		em = alloc_extent_map(GFP_NOFS);
		em->start = async_extent->start;
		em->len = async_extent->ram_size;
@@ -743,9 +758,23 @@ static noinline int cow_file_range(struct inode *inode,
	em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
				   start, num_bytes);
	if (em) {
		/*
		 * if block start isn't an actual block number then find the
		 * first block in this inode and use that as a hint.  If that
		 * block is also bogus then just don't worry about it.
		 */
		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
			free_extent_map(em);
			em = search_extent_mapping(em_tree, 0, 0);
			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
				alloc_hint = em->block_start;
			if (em)
				free_extent_map(em);
		} else {
			alloc_hint = em->block_start;
			free_extent_map(em);
		}
	}
	read_unlock(&BTRFS_I(inode)->extent_tree.lock);
	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);

@@ -2474,7 +2503,19 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)

	root = BTRFS_I(dir)->root;

	/*
	 * 5 items for unlink inode
	 * 1 for orphan
	 */
	ret = btrfs_reserve_metadata_space(root, 6);
	if (ret)
		return ret;

	trans = btrfs_start_transaction(root, 1);
	if (IS_ERR(trans)) {
		btrfs_unreserve_metadata_space(root, 6);
		return PTR_ERR(trans);
	}

	btrfs_set_trans_block_group(trans, dir);

@@ -2489,6 +2530,7 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
	nr = trans->blocks_used;

	btrfs_end_transaction_throttle(trans, root);
	btrfs_unreserve_metadata_space(root, 6);
	btrfs_btree_balance_dirty(root, nr);
	return ret;
}
@@ -2569,7 +2611,16 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
	    inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
		return -ENOTEMPTY;

	ret = btrfs_reserve_metadata_space(root, 5);
	if (ret)
		return ret;

	trans = btrfs_start_transaction(root, 1);
	if (IS_ERR(trans)) {
		btrfs_unreserve_metadata_space(root, 5);
		return PTR_ERR(trans);
	}

	btrfs_set_trans_block_group(trans, dir);

	if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
@@ -2592,6 +2643,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
out:
	nr = trans->blocks_used;
	ret = btrfs_end_transaction_throttle(trans, root);
	btrfs_unreserve_metadata_space(root, 5);
	btrfs_btree_balance_dirty(root, nr);

	if (ret && !err)
@@ -5128,6 +5180,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
	ei->logged_trans = 0;
	ei->outstanding_extents = 0;
	ei->reserved_extents = 0;
	ei->root = NULL;
	spin_lock_init(&ei->accounting_lock);
	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
	INIT_LIST_HEAD(&ei->i_orphan);
@@ -5143,6 +5196,14 @@ void btrfs_destroy_inode(struct inode *inode)
	WARN_ON(!list_empty(&inode->i_dentry));
	WARN_ON(inode->i_data.nrpages);

	/*
	 * This can happen where we create an inode, but somebody else also
	 * created the same inode and we need to destroy the one we already
	 * created.
	 */
	if (!root)
		goto free;

	/*
	 * Make sure we're properly removed from the ordered operation
	 * lists.
@@ -5178,6 +5239,7 @@ void btrfs_destroy_inode(struct inode *inode)
	}
	inode_tree_del(inode);
	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
free:
	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}

@@ -5283,11 +5345,14 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
		return -ENOTEMPTY;

	/*
	 * 2 items for dir items
	 * 1 item for orphan entry
	 * 1 item for ref
	 * We want to reserve the absolute worst case amount of items.  So if
	 * both inodes are subvols and we need to unlink them then that would
	 * require 4 item modifications, but if they are both normal inodes it
	 * would require 5 item modifications, so we'll assume their normal
	 * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
	 * should cover the worst case number of items we'll modify.
	 */
	ret = btrfs_reserve_metadata_space(root, 4);
	ret = btrfs_reserve_metadata_space(root, 11);
	if (ret)
		return ret;

@@ -5403,7 +5468,7 @@ out_fail:
	if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
		up_read(&root->fs_info->subvol_sem);

	btrfs_unreserve_metadata_space(root, 4);
	btrfs_unreserve_metadata_space(root, 11);
	return ret;
}

+0 −2
Original line number Diff line number Diff line
@@ -159,7 +159,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
	write_extent_buffer(l, item, ptr, sizeof(*item));
	btrfs_mark_buffer_dirty(path->nodes[0]);
out:
	btrfs_release_path(root, path);
	btrfs_free_path(path);
	return ret;
}
@@ -332,7 +331,6 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
	BUG_ON(refs != 0);
	ret = btrfs_del_item(trans, root, path);
out:
	btrfs_release_path(root, path);
	btrfs_free_path(path);
	return ret;
}
Loading