Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 33c0022f authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull btrfs fixes from Chris Mason.

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
  Btrfs: limit the path size in send to PATH_MAX
  Btrfs: correctly set profile flags on seqlock retry
  Btrfs: use correct key when repeating search for extent item
  Btrfs: fix inode caching vs tree log
  Btrfs: fix possible memory leaks in open_ctree()
  Btrfs: avoid triggering bug_on() when we fail to start inode caching task
  Btrfs: move btrfs_{set,clear}_and_info() to ctree.h
  btrfs: replace error code from btrfs_drop_extents
  btrfs: Change the hole range to a more accurate value.
  btrfs: fix use-after-free in mount_subvol()
parents 2b9d1c05 cfd4a535
Loading
Loading
Loading
Loading
+14 −0
Original line number Diff line number Diff line
@@ -2058,6 +2058,20 @@ struct btrfs_ioctl_defrag_range_args {
#define btrfs_raw_test_opt(o, opt)	((o) & BTRFS_MOUNT_##opt)
#define btrfs_test_opt(root, opt)	((root)->fs_info->mount_opt & \
					 BTRFS_MOUNT_##opt)
#define btrfs_set_and_info(root, opt, fmt, args...)			\
{									\
	if (!btrfs_test_opt(root, opt))					\
		btrfs_info(root->fs_info, fmt, ##args);			\
	btrfs_set_opt(root->fs_info->mount_opt, opt);			\
}

#define btrfs_clear_and_info(root, opt, fmt, args...)			\
{									\
	if (btrfs_test_opt(root, opt))					\
		btrfs_info(root->fs_info, fmt, ##args);			\
	btrfs_clear_opt(root->fs_info->mount_opt, opt);			\
}

/*
 * Inode flags
 */
+5 −5
Original line number Diff line number Diff line
@@ -2861,7 +2861,7 @@ int open_ctree(struct super_block *sb,
			printk(KERN_ERR "BTRFS: failed to read log tree\n");
			free_extent_buffer(log_tree_root->node);
			kfree(log_tree_root);
			goto fail_trans_kthread;
			goto fail_qgroup;
		}
		/* returns with log_tree_root freed on success */
		ret = btrfs_recover_log_trees(log_tree_root);
@@ -2870,24 +2870,24 @@ int open_ctree(struct super_block *sb,
				    "Failed to recover log tree");
			free_extent_buffer(log_tree_root->node);
			kfree(log_tree_root);
			goto fail_trans_kthread;
			goto fail_qgroup;
		}

		if (sb->s_flags & MS_RDONLY) {
			ret = btrfs_commit_super(tree_root);
			if (ret)
				goto fail_trans_kthread;
				goto fail_qgroup;
		}
	}

	ret = btrfs_find_orphan_roots(tree_root);
	if (ret)
		goto fail_trans_kthread;
		goto fail_qgroup;

	if (!(sb->s_flags & MS_RDONLY)) {
		ret = btrfs_cleanup_fs_roots(fs_info);
		if (ret)
			goto fail_trans_kthread;
			goto fail_qgroup;

		ret = btrfs_recover_relocation(tree_root);
		if (ret < 0) {
+5 −1
Original line number Diff line number Diff line
@@ -1542,6 +1542,7 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
				ret = 0;
		}
		if (ret) {
			key.objectid = bytenr;
			key.type = BTRFS_EXTENT_ITEM_KEY;
			key.offset = num_bytes;
			btrfs_release_path(path);
@@ -3542,11 +3543,13 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
	return extended_to_chunk(flags | tmp);
}

static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
{
	unsigned seq;
	u64 flags;

	do {
		flags = orig_flags;
		seq = read_seqbegin(&root->fs_info->profiles_lock);

		if (flags & BTRFS_BLOCK_GROUP_DATA)
@@ -5719,6 +5722,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,

			if (ret > 0 && skinny_metadata) {
				skinny_metadata = false;
				key.objectid = bytenr;
				key.type = BTRFS_EXTENT_ITEM_KEY;
				key.offset = num_bytes;
				btrfs_release_path(path);
+4 −4
Original line number Diff line number Diff line
@@ -800,7 +800,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
		if (start > key.offset && end < extent_end) {
			BUG_ON(del_nr > 0);
			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
				ret = -EINVAL;
				ret = -EOPNOTSUPP;
				break;
			}

@@ -846,7 +846,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
		 */
		if (start <= key.offset && end < extent_end) {
			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
				ret = -EINVAL;
				ret = -EOPNOTSUPP;
				break;
			}

@@ -872,7 +872,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
		if (start > key.offset && end >= extent_end) {
			BUG_ON(del_nr > 0);
			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
				ret = -EINVAL;
				ret = -EOPNOTSUPP;
				break;
			}

@@ -1777,7 +1777,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
	start_pos = round_down(pos, root->sectorsize);
	if (start_pos > i_size_read(inode)) {
		/* Expand hole size to cover write data, preventing empty gap */
		end_pos = round_up(pos + iov->iov_len, root->sectorsize);
		end_pos = round_up(pos + count, root->sectorsize);
		err = btrfs_cont_expand(inode, i_size_read(inode), end_pos);
		if (err) {
			mutex_unlock(&inode->i_mutex);
+7 −17
Original line number Diff line number Diff line
@@ -176,7 +176,11 @@ static void start_caching(struct btrfs_root *root)

	tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n",
			  root->root_key.objectid);
	BUG_ON(IS_ERR(tsk)); /* -ENOMEM */
	if (IS_ERR(tsk)) {
		btrfs_warn(root->fs_info, "failed to start inode caching task");
		btrfs_clear_and_info(root, CHANGE_INODE_CACHE,
				"disabling inode map caching");
	}
}

int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
@@ -205,24 +209,14 @@ int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)

void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
{
	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
	struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;

	if (!btrfs_test_opt(root, INODE_MAP_CACHE))
		return;

again:
	if (root->cached == BTRFS_CACHE_FINISHED) {
		__btrfs_add_free_space(ctl, objectid, 1);
		__btrfs_add_free_space(pinned, objectid, 1);
	} else {
		/*
		 * If we are in the process of caching free ino chunks,
		 * to avoid adding the same inode number to the free_ino
		 * tree twice due to cross transaction, we'll leave it
		 * in the pinned tree until a transaction is committed
		 * or the caching work is done.
		 */

		down_write(&root->fs_info->commit_root_sem);
		spin_lock(&root->cache_lock);
		if (root->cached == BTRFS_CACHE_FINISHED) {
@@ -234,10 +228,6 @@ void btrfs_return_ino(struct btrfs_root *root, u64 objectid)

		start_caching(root);

		if (objectid <= root->cache_progress ||
		    objectid >= root->highest_objectid)
			__btrfs_add_free_space(ctl, objectid, 1);
		else
		__btrfs_add_free_space(pinned, objectid, 1);

		up_write(&root->fs_info->commit_root_sem);
Loading