Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 093258e6 authored by David Sterba's avatar David Sterba
Browse files

btrfs: replace waitqueue_actvie with cond_wake_up



Use the wrappers and reduce the amount of low-level details about the
waitqueue management.

Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 3d3a2e61
Loading
Loading
Loading
Loading
+1 −6
Original line number Diff line number Diff line
@@ -990,12 +990,7 @@ static void __free_workspace(int type, struct list_head *workspace,
		btrfs_compress_op[idx]->free_workspace(workspace);
	atomic_dec(total_ws);
wake:
	/*
	 * Make sure counter is updated before we wake up waiters.
	 */
	smp_mb();
	if (waitqueue_active(ws_wait))
		wake_up(ws_wait);
	cond_wake_up(ws_wait);
}

static void free_workspace(int type, struct list_head *ws)
+3 −6
Original line number Diff line number Diff line
@@ -460,13 +460,10 @@ static void finish_one_item(struct btrfs_delayed_root *delayed_root)
{
	int seq = atomic_inc_return(&delayed_root->items_seq);

	/*
	 * atomic_dec_return implies a barrier for waitqueue_active
	 */
	/* atomic_dec_return implies a barrier */
	if ((atomic_dec_return(&delayed_root->items) <
	    BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
	    waitqueue_active(&delayed_root->wait))
		wake_up(&delayed_root->wait);
	    BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
		cond_wake_up_nomb(&delayed_root->wait);
}

static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
+4 −6
Original line number Diff line number Diff line
@@ -1009,9 +1009,9 @@ void btrfs_dev_replace_clear_lock_blocking(
	ASSERT(atomic_read(&dev_replace->read_locks) > 0);
	ASSERT(atomic_read(&dev_replace->blocking_readers) > 0);
	read_lock(&dev_replace->lock);
	if (atomic_dec_and_test(&dev_replace->blocking_readers) &&
	    waitqueue_active(&dev_replace->read_lock_wq))
		wake_up(&dev_replace->read_lock_wq);
	/* Barrier implied by atomic_dec_and_test */
	if (atomic_dec_and_test(&dev_replace->blocking_readers))
		cond_wake_up_nomb(&dev_replace->read_lock_wq);
}

void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info)
@@ -1022,9 +1022,7 @@ void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info)
void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount)
{
	percpu_counter_sub(&fs_info->bio_counter, amount);

	if (waitqueue_active(&fs_info->replace_wait))
		wake_up(&fs_info->replace_wait);
	cond_wake_up_nomb(&fs_info->replace_wait);
}

void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info)
+1 −6
Original line number Diff line number Diff line
@@ -11081,12 +11081,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
void btrfs_end_write_no_snapshotting(struct btrfs_root *root)
{
	percpu_counter_dec(&root->subv_writers->counter);
	/*
	 * Make sure counter is updated before we wake up waiters.
	 */
	smp_mb();
	if (waitqueue_active(&root->subv_writers->wait))
		wake_up(&root->subv_writers->wait);
	cond_wake_up(&root->subv_writers->wait);
}

int btrfs_start_write_no_snapshotting(struct btrfs_root *root)
+3 −6
Original line number Diff line number Diff line
@@ -1156,13 +1156,10 @@ static noinline void async_cow_submit(struct btrfs_work *work)
	nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
		PAGE_SHIFT;

	/*
	 * atomic_sub_return implies a barrier for waitqueue_active
	 */
	/* atomic_sub_return implies a barrier */
	if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
	    5 * SZ_1M &&
	    waitqueue_active(&fs_info->async_submit_wait))
		wake_up(&fs_info->async_submit_wait);
	    5 * SZ_1M)
		cond_wake_up_nomb(&fs_info->async_submit_wait);

	if (async_cow->inode)
		submit_compressed_extents(async_cow->inode, async_cow);
Loading