Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7bc511a8 authored by Jan Kara's avatar Jan Kara Committed by Pradeep P V K
Browse files

writeback: Protect inode->i_io_list with inode->i_lock



Currently, operations on inode->i_io_list are protected by
wb->list_lock. In the following patches we'll need to maintain
consistency between inode->i_state and inode->i_io_list so change the
code so that inode->i_lock protects also all inode's i_io_list handling.

Change-Id: Idad8e2028465703d5e48fd22b8a568eb39530864
Reviewed-by: default avatarMartijn Coenen <maco@android.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
CC: stable@vger.kernel.org # Prerequisite for "writeback: Avoid skipping inode writeback"
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Git-commit: 64cf8ca4
Git-repo: https://android.googlesource.com/kernel/common/


Signed-off-by: default avatarPradeep P V K <ppvk@codeaurora.org>
parent 81d62da0
Loading
Loading
Loading
Loading
+17 −5
Original line number Diff line number Diff line
@@ -160,6 +160,7 @@ static void inode_io_list_del_locked(struct inode *inode,
				     struct bdi_writeback *wb)
{
	assert_spin_locked(&wb->list_lock);
	assert_spin_locked(&inode->i_lock);

	list_del_init(&inode->i_io_list);
	wb_io_lists_depopulated(wb);
@@ -1041,7 +1042,9 @@ void inode_io_list_del(struct inode *inode)
	struct bdi_writeback *wb;

	wb = inode_to_wb_and_lock_list(inode);
	spin_lock(&inode->i_lock);
	inode_io_list_del_locked(inode, wb);
	spin_unlock(&inode->i_lock);
	spin_unlock(&wb->list_lock);
}

@@ -1090,8 +1093,10 @@ void sb_clear_inode_writeback(struct inode *inode)
 * the case then the inode must have been redirtied while it was being written
 * out and we don't reset its dirtied_when.
 */
static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
{
	assert_spin_locked(&inode->i_lock);

	if (!list_empty(&wb->b_dirty)) {
		struct inode *tail;

@@ -1102,6 +1107,13 @@ static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
	inode_io_list_move_locked(inode, wb, &wb->b_dirty);
}

static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
{
	spin_lock(&inode->i_lock);
	redirty_tail_locked(inode, wb);
	spin_unlock(&inode->i_lock);
}

/*
 * requeue inode for re-scanning after bdi->b_io list is exhausted.
 */
@@ -1312,7 +1324,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
		 * writeback is not making progress due to locked
		 * buffers. Skip this inode for now.
		 */
		redirty_tail(inode, wb);
		redirty_tail_locked(inode, wb);
		return;
	}

@@ -1332,7 +1344,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
			 * retrying writeback of the dirty page/inode
			 * that cannot be performed immediately.
			 */
			redirty_tail(inode, wb);
			redirty_tail_locked(inode, wb);
		}
	} else if (inode->i_state & I_DIRTY) {
		/*
@@ -1340,7 +1352,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
		 * such as delayed allocation during submission or metadata
		 * updates after data IO completion.
		 */
		redirty_tail(inode, wb);
		redirty_tail_locked(inode, wb);
	} else if (inode->i_state & I_DIRTY_TIME) {
		inode->dirtied_when = jiffies;
		inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
@@ -1587,8 +1599,8 @@ static long writeback_sb_inodes(struct super_block *sb,
		 */
		spin_lock(&inode->i_lock);
		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
			redirty_tail_locked(inode, wb);
			spin_unlock(&inode->i_lock);
			redirty_tail(inode, wb);
			continue;
		}
		if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {