Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 031f24da authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba
Browse files

btrfs: Use btrfs_mark_bg_unused to replace open code



Introduce a small helper, btrfs_mark_bg_unused(), to acquire locks and
add a block group to unused_bgs list.

No functional modification, and only 3 callers are involved.

Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 2556fbb0
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -2801,6 +2801,7 @@ void btrfs_wait_for_snapshot_creation(struct btrfs_root *root);
void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
		       u64 start, u64 end);
		       u64 start, u64 end);
void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg);


/* ctree.c */
/* ctree.c */
int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
+17 −19
Original line number Original line Diff line number Diff line
@@ -6166,16 +6166,8 @@ static int update_block_group(struct btrfs_trans_handle *trans,
		 * dirty list to avoid races between cleaner kthread and space
		 * dirty list to avoid races between cleaner kthread and space
		 * cache writeout.
		 * cache writeout.
		 */
		 */
		if (!alloc && old_val == 0) {
		if (!alloc && old_val == 0)
			spin_lock(&info->unused_bgs_lock);
			btrfs_mark_bg_unused(cache);
			if (list_empty(&cache->bg_list)) {
				btrfs_get_block_group(cache);
				trace_btrfs_add_unused_block_group(cache);
				list_add_tail(&cache->bg_list,
					      &info->unused_bgs);
			}
			spin_unlock(&info->unused_bgs_lock);
		}


		btrfs_put_block_group(cache);
		btrfs_put_block_group(cache);
		total -= num_bytes;
		total -= num_bytes;
@@ -9987,15 +9979,8 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
		if (btrfs_chunk_readonly(info, cache->key.objectid)) {
		if (btrfs_chunk_readonly(info, cache->key.objectid)) {
			inc_block_group_ro(cache, 1);
			inc_block_group_ro(cache, 1);
		} else if (btrfs_block_group_used(&cache->item) == 0) {
		} else if (btrfs_block_group_used(&cache->item) == 0) {
			spin_lock(&info->unused_bgs_lock);
			ASSERT(list_empty(&cache->bg_list));
			/* Should always be true but just in case. */
			btrfs_mark_bg_unused(cache);
			if (list_empty(&cache->bg_list)) {
				btrfs_get_block_group(cache);
				trace_btrfs_add_unused_block_group(cache);
				list_add_tail(&cache->bg_list,
					      &info->unused_bgs);
			}
			spin_unlock(&info->unused_bgs_lock);
		}
		}
	}
	}


@@ -10914,3 +10899,16 @@ void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
			       !atomic_read(&root->will_be_snapshotted));
			       !atomic_read(&root->will_be_snapshotted));
	}
	}
}
}

void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg)
{
	struct btrfs_fs_info *fs_info = bg->fs_info;

	spin_lock(&fs_info->unused_bgs_lock);
	if (list_empty(&bg->bg_list)) {
		btrfs_get_block_group(bg);
		trace_btrfs_add_unused_block_group(bg);
		list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
	}
	spin_unlock(&fs_info->unused_bgs_lock);
}
+1 −8
Original line number Original line Diff line number Diff line
@@ -3951,14 +3951,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
		if (!cache->removed && !cache->ro && cache->reserved == 0 &&
		if (!cache->removed && !cache->ro && cache->reserved == 0 &&
		    btrfs_block_group_used(&cache->item) == 0) {
		    btrfs_block_group_used(&cache->item) == 0) {
			spin_unlock(&cache->lock);
			spin_unlock(&cache->lock);
			spin_lock(&fs_info->unused_bgs_lock);
			btrfs_mark_bg_unused(cache);
			if (list_empty(&cache->bg_list)) {
				btrfs_get_block_group(cache);
				trace_btrfs_add_unused_block_group(cache);
				list_add_tail(&cache->bg_list,
					      &fs_info->unused_bgs);
			}
			spin_unlock(&fs_info->unused_bgs_lock);
		} else {
		} else {
			spin_unlock(&cache->lock);
			spin_unlock(&cache->lock);
		}
		}