Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fceb0d85 authored by Damien Le Moal's avatar Damien Le Moal Committed by Greg Kroah-Hartman
Browse files

dm zoned: fix metadata block ref counting



commit 33c2865f8d011a2ca9f67124ddab9dc89382e9f1 upstream.

Since the ref field of struct dmz_mblock is always used with the
spinlock of struct dmz_metadata locked, there is no need to use an
atomic_t type. Change the type of the ref field to an unsigne
integer.

Fixes: 3b1a94c8 ("dm zoned: drive-managed zoned block device target")
Cc: stable@vger.kernel.org
Signed-off-by: default avatarDamien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent b4212807
Loading
Loading
Loading
Loading
+11 −9
Original line number Diff line number Diff line
@@ -99,7 +99,7 @@ struct dmz_mblock {
	struct rb_node		node;
	struct list_head	link;
	sector_t		no;
	atomic_t		ref;
	unsigned int		ref;
	unsigned long		state;
	struct page		*page;
	void			*data;
@@ -296,7 +296,7 @@ static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd,

	RB_CLEAR_NODE(&mblk->node);
	INIT_LIST_HEAD(&mblk->link);
	atomic_set(&mblk->ref, 0);
	mblk->ref = 0;
	mblk->state = 0;
	mblk->no = mblk_no;
	mblk->data = page_address(mblk->page);
@@ -397,7 +397,7 @@ static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd,
		return NULL;

	spin_lock(&zmd->mblk_lock);
	atomic_inc(&mblk->ref);
	mblk->ref++;
	set_bit(DMZ_META_READING, &mblk->state);
	dmz_insert_mblock(zmd, mblk);
	spin_unlock(&zmd->mblk_lock);
@@ -484,7 +484,8 @@ static void dmz_release_mblock(struct dmz_metadata *zmd,

	spin_lock(&zmd->mblk_lock);

	if (atomic_dec_and_test(&mblk->ref)) {
	mblk->ref--;
	if (mblk->ref == 0) {
		if (test_bit(DMZ_META_ERROR, &mblk->state)) {
			rb_erase(&mblk->node, &zmd->mblk_rbtree);
			dmz_free_mblock(zmd, mblk);
@@ -511,7 +512,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
	mblk = dmz_lookup_mblock(zmd, mblk_no);
	if (mblk) {
		/* Cache hit: remove block from LRU list */
		if (atomic_inc_return(&mblk->ref) == 1 &&
		mblk->ref++;
		if (mblk->ref == 1 &&
		    !test_bit(DMZ_META_DIRTY, &mblk->state))
			list_del_init(&mblk->link);
	}
@@ -753,7 +755,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)

		spin_lock(&zmd->mblk_lock);
		clear_bit(DMZ_META_DIRTY, &mblk->state);
		if (atomic_read(&mblk->ref) == 0)
		if (mblk->ref == 0)
			list_add_tail(&mblk->link, &zmd->mblk_lru_list);
		spin_unlock(&zmd->mblk_lock);
	}
@@ -2308,7 +2310,7 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
		mblk = list_first_entry(&zmd->mblk_dirty_list,
					struct dmz_mblock, link);
		dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)",
			     (u64)mblk->no, atomic_read(&mblk->ref));
			     (u64)mblk->no, mblk->ref);
		list_del_init(&mblk->link);
		rb_erase(&mblk->node, &zmd->mblk_rbtree);
		dmz_free_mblock(zmd, mblk);
@@ -2326,8 +2328,8 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
	root = &zmd->mblk_rbtree;
	rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
		dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree",
			     (u64)mblk->no, atomic_read(&mblk->ref));
		atomic_set(&mblk->ref, 0);
			     (u64)mblk->no, mblk->ref);
		mblk->ref = 0;
		dmz_free_mblock(zmd, mblk);
	}