Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f0d07b7f authored by Jan Kara's avatar Jan Kara Committed by Fengguang Wu
Browse files

writeback: Remove wb->list_lock from writeback_single_inode()



writeback_single_inode() doesn't need wb->list_lock for anything on entry now.
So remove the requirement. This makes locking of writeback_single_inode()
temporarily awkward (entering with i_lock, returning with i_lock and
wb->list_lock) but it will be sanitized in the next patch.

Also inode_wait_for_writeback() doesn't need wb->list_lock for anything. It was
just taking it to make usage convenient for callers but with
writeback_single_inode() changing it's not very convenient anymore. So remove
the lock from that function.

Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarFengguang Wu <fengguang.wu@intel.com>
parent ccb26b5a
Loading
Loading
Loading
Loading
+7 −10
Original line number Diff line number Diff line
@@ -328,8 +328,7 @@ static int write_inode(struct inode *inode, struct writeback_control *wbc)
/*
 * Wait for writeback on an inode to complete.
 */
static void inode_wait_for_writeback(struct inode *inode,
				     struct bdi_writeback *wb)
static void inode_wait_for_writeback(struct inode *inode)
{
	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
	wait_queue_head_t *wqh;
@@ -337,9 +336,7 @@ static void inode_wait_for_writeback(struct inode *inode,
	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
	while (inode->i_state & I_SYNC) {
		spin_unlock(&inode->i_lock);
		spin_unlock(&wb->list_lock);
		__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
		spin_lock(&wb->list_lock);
		spin_lock(&inode->i_lock);
	}
}
@@ -418,7 +415,6 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
	unsigned dirty;
	int ret;

	assert_spin_locked(&wb->list_lock);
	assert_spin_locked(&inode->i_lock);

	if (!atomic_read(&inode->i_count))
@@ -432,7 +428,7 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
		/*
		 * It's a data-integrity sync.  We must wait.
		 */
		inode_wait_for_writeback(inode, wb);
		inode_wait_for_writeback(inode);
	}

	BUG_ON(inode->i_state & I_SYNC);
@@ -440,7 +436,6 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
	/* Set I_SYNC, reset I_DIRTY_PAGES */
	inode->i_state |= I_SYNC;
	spin_unlock(&inode->i_lock);
	spin_unlock(&wb->list_lock);

	ret = do_writepages(mapping, wbc);

@@ -587,6 +582,8 @@ static long writeback_sb_inodes(struct super_block *sb,
			trace_writeback_sb_inodes_requeue(inode);
			continue;
		}
		spin_unlock(&wb->list_lock);

		__iget(inode);
		write_chunk = writeback_chunk_size(wb->bdi, work);
		wbc.nr_to_write = write_chunk;
@@ -803,8 +800,10 @@ static long wb_writeback(struct bdi_writeback *wb,
			trace_writeback_wait(wb->bdi, work);
			inode = wb_inode(wb->b_more_io.prev);
			spin_lock(&inode->i_lock);
			inode_wait_for_writeback(inode, wb);
			spin_unlock(&wb->list_lock);
			inode_wait_for_writeback(inode);
			spin_unlock(&inode->i_lock);
			spin_lock(&wb->list_lock);
		}
	}
	spin_unlock(&wb->list_lock);
@@ -1350,7 +1349,6 @@ int write_inode_now(struct inode *inode, int sync)
		wbc.nr_to_write = 0;

	might_sleep();
	spin_lock(&wb->list_lock);
	spin_lock(&inode->i_lock);
	ret = writeback_single_inode(inode, wb, &wbc);
	spin_unlock(&inode->i_lock);
@@ -1375,7 +1373,6 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc)
	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
	int ret;

	spin_lock(&wb->list_lock);
	spin_lock(&inode->i_lock);
	ret = writeback_single_inode(inode, wb, wbc);
	spin_unlock(&inode->i_lock);