Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit da370f1d authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull btrfs fixes from David Sterba:
 "We have a few assorted fixes, some of them show up during fstests so I
  gave them more testing"

* tag 'for-4.16-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: Fix use-after-free when cleaning up fs_devs with a single stale device
  Btrfs: fix null pointer dereference when replacing missing device
  btrfs: remove spurious WARN_ON(ref->count < 0) in find_parent_nodes
  btrfs: Ignore errors from btrfs_qgroup_trace_extent_post
  Btrfs: fix unexpected -EEXIST when creating new inode
  Btrfs: fix use-after-free on root->orphan_block_rsv
  Btrfs: fix btrfs_evict_inode to handle abnormal inodes correctly
  Btrfs: fix extent state leak from tree log
  Btrfs: fix crash due to not cleaning up tree log block's dirty bits
  Btrfs: fix deadlock in run_delalloc_nocow
parents c85b0b14 fd649f10
Loading
Loading
Loading
Loading
+10 −1
Original line number Diff line number Diff line
@@ -1264,7 +1264,16 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
	while (node) {
		ref = rb_entry(node, struct prelim_ref, rbnode);
		node = rb_next(&ref->rbnode);
		WARN_ON(ref->count < 0);
		/*
		 * ref->count < 0 can happen here if there are delayed
		 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
		 * prelim_ref_insert() relies on this when merging
		 * identical refs to keep the overall count correct.
		 * prelim_ref_insert() will merge only those refs
		 * which compare identically.  Any refs having
		 * e.g. different offsets would not be merged,
		 * and would retain their original ref->count < 0.
		 */
		if (roots && ref->count && ref->root_id && ref->parent == 0) {
			if (sc && sc->root_objectid &&
			    ref->root_id != sc->root_objectid) {
+2 −1
Original line number Diff line number Diff line
@@ -821,7 +821,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
	spin_unlock(&delayed_refs->lock);

	if (qrecord_inserted)
		return btrfs_qgroup_trace_extent_post(fs_info, record);
		btrfs_qgroup_trace_extent_post(fs_info, record);

	return 0;

free_head_ref:
+4 −0
Original line number Diff line number Diff line
@@ -2147,6 +2147,10 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
			u64 bytes;
			struct request_queue *req_q;

			if (!stripe->dev->bdev) {
				ASSERT(btrfs_test_opt(fs_info, DEGRADED));
				continue;
			}
			req_q = bdev_get_queue(stripe->dev->bdev);
			if (!blk_queue_discard(req_q))
				continue;
+26 −15
Original line number Diff line number Diff line
@@ -1335,8 +1335,11 @@ static noinline int run_delalloc_nocow(struct inode *inode,
		leaf = path->nodes[0];
		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret < 0)
			if (ret < 0) {
				if (cow_start != (u64)-1)
					cur_offset = cow_start;
				goto error;
			}
			if (ret > 0)
				break;
			leaf = path->nodes[0];
@@ -3385,6 +3388,11 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans,
		ret = btrfs_orphan_reserve_metadata(trans, inode);
		ASSERT(!ret);
		if (ret) {
			/*
			 * dec doesn't need spin_lock as ->orphan_block_rsv
			 * would be released only if ->orphan_inodes is
			 * zero.
			 */
			atomic_dec(&root->orphan_inodes);
			clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
				  &inode->runtime_flags);
@@ -3399,12 +3407,17 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans,
	if (insert >= 1) {
		ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
		if (ret) {
			atomic_dec(&root->orphan_inodes);
			if (reserve) {
				clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
					  &inode->runtime_flags);
				btrfs_orphan_release_metadata(inode);
			}
			/*
			 * btrfs_orphan_commit_root may race with us and set
			 * ->orphan_block_rsv to zero, in order to avoid that,
			 * decrease ->orphan_inodes after everything is done.
			 */
			atomic_dec(&root->orphan_inodes);
			if (ret != -EEXIST) {
				clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
					  &inode->runtime_flags);
@@ -3436,28 +3449,26 @@ static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
{
	struct btrfs_root *root = inode->root;
	int delete_item = 0;
	int release_rsv = 0;
	int ret = 0;

	spin_lock(&root->orphan_lock);
	if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
			       &inode->runtime_flags))
		delete_item = 1;

	if (delete_item && trans)
		ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));

	if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
			       &inode->runtime_flags))
		release_rsv = 1;
	spin_unlock(&root->orphan_lock);
		btrfs_orphan_release_metadata(inode);

	if (delete_item) {
	/*
	 * btrfs_orphan_commit_root may race with us and set ->orphan_block_rsv
	 * to zero, in order to avoid that, decrease ->orphan_inodes after
	 * everything is done.
	 */
	if (delete_item)
		atomic_dec(&root->orphan_inodes);
		if (trans)
			ret = btrfs_del_orphan_item(trans, root,
						    btrfs_ino(inode));
	}

	if (release_rsv)
		btrfs_orphan_release_metadata(inode);

	return ret;
}
@@ -5281,7 +5292,7 @@ void btrfs_evict_inode(struct inode *inode)
	trace_btrfs_inode_evict(inode);

	if (!root) {
		kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
		clear_inode(inode);
		return;
	}

+7 −2
Original line number Diff line number Diff line
@@ -1442,8 +1442,13 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
	int ret;

	ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false);
	if (ret < 0)
		return ret;
	if (ret < 0) {
		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
		btrfs_warn(fs_info,
"error accounting new delayed refs extent (err code: %d), quota inconsistent",
			ret);
		return 0;
	}

	/*
	 * Here we don't need to get the lock of
Loading