Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cb7ab021 authored by Wang Shilong's avatar Wang Shilong Committed by Chris Mason
Browse files

Btrfs: wrap repeated code into scrub_blocked_if_needed()



Just wrap same code into one function scrub_blocked_if_needed().

This make a change that we will move waiting (@workers_pending = 0)
before we can wake up commiting transaction(atomic_inc(@scrub_paused)),
we must take carefully to not deadlock here.

Thread 1			Thread 2
				|->btrfs_commit_transaction()
					|->set trans type(COMMIT_DOING)
					|->btrfs_scrub_paused()(blocked)
|->join_transaction(blocked)

Move btrfs_scrub_paused() before setting trans type which means we can
still join a transaction when commiting_transaction is blocked.

Signed-off-by: default avatarWang Shilong <wangsl.fnst@cn.fujitsu.com>
Suggested-by: default avatarMiao Xie <miaox@cn.fujitsu.com>
Signed-off-by: default avatarJosef Bacik <jbacik@fb.com>
Signed-off-by: default avatarChris Mason <clm@fb.com>
parent 3cb0929a
Loading
Loading
Loading
Loading
+17 −26
Original line number Diff line number Diff line
@@ -256,6 +256,7 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
			    int mirror_num, u64 physical_for_dev_replace);
static void copy_nocow_pages_worker(struct btrfs_work *work);
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);


@@ -270,7 +271,7 @@ static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
	wake_up(&sctx->list_wait);
}

static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
{
	while (atomic_read(&fs_info->scrub_pause_req)) {
		mutex_unlock(&fs_info->scrub_lock);
@@ -280,6 +281,19 @@ static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
	}
}

static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
{
	atomic_inc(&fs_info->scrubs_paused);
	wake_up(&fs_info->scrub_pause_wait);

	mutex_lock(&fs_info->scrub_lock);
	__scrub_blocked_if_needed(fs_info);
	atomic_dec(&fs_info->scrubs_paused);
	mutex_unlock(&fs_info->scrub_lock);

	wake_up(&fs_info->scrub_pause_wait);
}

/*
 * used for workers that require transaction commits (i.e., for the
 * NOCOW case)
@@ -2295,8 +2309,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,

	wait_event(sctx->list_wait,
		   atomic_read(&sctx->bios_in_flight) == 0);
	atomic_inc(&fs_info->scrubs_paused);
	wake_up(&fs_info->scrub_pause_wait);
	scrub_blocked_if_needed(fs_info);

	/* FIXME it might be better to start readahead at commit root */
	key_start.objectid = logical;
@@ -2320,12 +2333,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
	if (!IS_ERR(reada2))
		btrfs_reada_wait(reada2);

	mutex_lock(&fs_info->scrub_lock);
	scrub_blocked_if_needed(fs_info);
	atomic_dec(&fs_info->scrubs_paused);
	mutex_unlock(&fs_info->scrub_lock);

	wake_up(&fs_info->scrub_pause_wait);

	/*
	 * collect all data csums for the stripe to avoid seeking during
@@ -2362,15 +2369,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
			wait_event(sctx->list_wait,
				   atomic_read(&sctx->bios_in_flight) == 0);
			atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
			atomic_inc(&fs_info->scrubs_paused);
			wake_up(&fs_info->scrub_pause_wait);

			mutex_lock(&fs_info->scrub_lock);
			scrub_blocked_if_needed(fs_info);
			atomic_dec(&fs_info->scrubs_paused);
			mutex_unlock(&fs_info->scrub_lock);

			wake_up(&fs_info->scrub_pause_wait);
		}

		key.objectid = logical;
@@ -2685,17 +2684,9 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
		wait_event(sctx->list_wait,
			   atomic_read(&sctx->bios_in_flight) == 0);
		atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
		atomic_inc(&fs_info->scrubs_paused);
		wake_up(&fs_info->scrub_pause_wait);
		wait_event(sctx->list_wait,
			   atomic_read(&sctx->workers_pending) == 0);

		mutex_lock(&fs_info->scrub_lock);
		scrub_blocked_if_needed(fs_info);
		atomic_dec(&fs_info->scrubs_paused);
		mutex_unlock(&fs_info->scrub_lock);

		wake_up(&fs_info->scrub_pause_wait);

		btrfs_put_block_group(cache);
		if (ret)
@@ -2912,7 +2903,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
	 * checking @scrub_pause_req here, we can avoid
	 * race between committing transaction and scrubbing.
	 */
	scrub_blocked_if_needed(fs_info);
	__scrub_blocked_if_needed(fs_info);
	atomic_inc(&fs_info->scrubs_running);
	mutex_unlock(&fs_info->scrub_lock);

+2 −1
Original line number Diff line number Diff line
@@ -1748,6 +1748,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
		goto cleanup_transaction;

	btrfs_wait_delalloc_flush(root->fs_info);

	btrfs_scrub_pause(root);
	/*
	 * Ok now we need to make sure to block out any other joins while we
	 * commit the transaction.  We could have started a join before setting
@@ -1812,7 +1814,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,

	WARN_ON(cur_trans != trans->transaction);

	btrfs_scrub_pause(root);
	/* btrfs_commit_tree_roots is responsible for getting the
	 * various roots consistent with each other.  Every pointer
	 * in the tree of tree roots has to point to the most up to date