Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 489f8145 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "writeback: Drop I_DIRTY_TIME_EXPIRE"

parents 17b53e15 7c0cd19b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -5280,7 +5280,7 @@ static int other_inode_match(struct inode * inode, unsigned long ino,
	    (inode->i_state & I_DIRTY_TIME)) {
		struct ext4_inode_info	*ei = EXT4_I(inode);

		inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED);
		inode->i_state &= ~I_DIRTY_TIME;
		spin_unlock(&inode->i_lock);

		spin_lock(&ei->i_raw_lock);
+53 −50
Original line number Diff line number Diff line
@@ -45,7 +45,6 @@ struct wb_completion {
struct wb_writeback_work {
	long nr_pages;
	struct super_block *sb;
	unsigned long *older_than_this;
	enum writeback_sync_modes sync_mode;
	unsigned int tagged_writepages:1;
	unsigned int for_kupdate:1;
@@ -160,7 +159,9 @@ static void inode_io_list_del_locked(struct inode *inode,
				     struct bdi_writeback *wb)
{
	assert_spin_locked(&wb->list_lock);
	assert_spin_locked(&inode->i_lock);

	inode->i_state &= ~I_SYNC_QUEUED;
	list_del_init(&inode->i_io_list);
	wb_io_lists_depopulated(wb);
}
@@ -1041,7 +1042,9 @@ void inode_io_list_del(struct inode *inode)
	struct bdi_writeback *wb;

	wb = inode_to_wb_and_lock_list(inode);
	spin_lock(&inode->i_lock);
	inode_io_list_del_locked(inode, wb);
	spin_unlock(&inode->i_lock);
	spin_unlock(&wb->list_lock);
}

@@ -1090,8 +1093,10 @@ void sb_clear_inode_writeback(struct inode *inode)
 * the case then the inode must have been redirtied while it was being written
 * out and we don't reset its dirtied_when.
 */
static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
{
	assert_spin_locked(&inode->i_lock);

	if (!list_empty(&wb->b_dirty)) {
		struct inode *tail;

@@ -1100,6 +1105,14 @@ static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
			inode->dirtied_when = jiffies;
	}
	inode_io_list_move_locked(inode, wb, &wb->b_dirty);
	inode->i_state &= ~I_SYNC_QUEUED;
}

static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
{
	spin_lock(&inode->i_lock);
	redirty_tail_locked(inode, wb);
	spin_unlock(&inode->i_lock);
}

/*
@@ -1138,16 +1151,13 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t)
#define EXPIRE_DIRTY_ATIME 0x0001

/*
 * Move expired (dirtied before work->older_than_this) dirty inodes from
 * Move expired (dirtied before dirtied_before) dirty inodes from
 * @delaying_queue to @dispatch_queue.
 */
static int move_expired_inodes(struct list_head *delaying_queue,
			       struct list_head *dispatch_queue,
			       int flags,
			       struct wb_writeback_work *work)
			       unsigned long dirtied_before)
{
	unsigned long *older_than_this = NULL;
	unsigned long expire_time;
	LIST_HEAD(tmp);
	struct list_head *pos, *node;
	struct super_block *sb = NULL;
@@ -1155,21 +1165,15 @@ static int move_expired_inodes(struct list_head *delaying_queue,
	int do_sb_sort = 0;
	int moved = 0;

	if ((flags & EXPIRE_DIRTY_ATIME) == 0)
		older_than_this = work->older_than_this;
	else if (!work->for_sync) {
		expire_time = jiffies - (dirtytime_expire_interval * HZ);
		older_than_this = &expire_time;
	}
	while (!list_empty(delaying_queue)) {
		inode = wb_inode(delaying_queue->prev);
		if (older_than_this &&
		    inode_dirtied_after(inode, *older_than_this))
		if (inode_dirtied_after(inode, dirtied_before))
			break;
		list_move(&inode->i_io_list, &tmp);
		moved++;
		if (flags & EXPIRE_DIRTY_ATIME)
			set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state);
		spin_lock(&inode->i_lock);
		inode->i_state |= I_SYNC_QUEUED;
		spin_unlock(&inode->i_lock);
		if (sb_is_blkdev_sb(inode->i_sb))
			continue;
		if (sb && sb != inode->i_sb)
@@ -1207,18 +1211,22 @@ static int move_expired_inodes(struct list_head *delaying_queue,
 *                                           |
 *                                           +--> dequeue for IO
 */
static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work,
		     unsigned long dirtied_before)
{
	int moved;
	unsigned long time_expire_jif = dirtied_before;

	assert_spin_locked(&wb->list_lock);
	list_splice_init(&wb->b_more_io, &wb->b_io);
	moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work);
	moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before);
	if (!work->for_sync)
		time_expire_jif = jiffies - dirtytime_expire_interval * HZ;
	moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
				     EXPIRE_DIRTY_ATIME, work);
				     time_expire_jif);
	if (moved)
		wb_io_lists_populated(wb);
	trace_writeback_queue_io(wb, work, moved);
	trace_writeback_queue_io(wb, work, dirtied_before, moved);
}

static int write_inode(struct inode *inode, struct writeback_control *wbc)
@@ -1312,7 +1320,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
		 * writeback is not making progress due to locked
		 * buffers. Skip this inode for now.
		 */
		redirty_tail(inode, wb);
		redirty_tail_locked(inode, wb);
		return;
	}

@@ -1332,7 +1340,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
			 * retrying writeback of the dirty page/inode
			 * that cannot be performed immediately.
			 */
			redirty_tail(inode, wb);
			redirty_tail_locked(inode, wb);
		}
	} else if (inode->i_state & I_DIRTY) {
		/*
@@ -1340,10 +1348,11 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
		 * such as delayed allocation during submission or metadata
		 * updates after data IO completion.
		 */
		redirty_tail(inode, wb);
		redirty_tail_locked(inode, wb);
	} else if (inode->i_state & I_DIRTY_TIME) {
		inode->dirtied_when = jiffies;
		inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
		inode->i_state &= ~I_SYNC_QUEUED;
	} else {
		/* The inode is clean. Remove from writeback lists. */
		inode_io_list_del_locked(inode, wb);
@@ -1390,18 +1399,14 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
	spin_lock(&inode->i_lock);

	dirty = inode->i_state & I_DIRTY;
	if (inode->i_state & I_DIRTY_TIME) {
		if ((dirty & I_DIRTY_INODE) ||
		    wbc->sync_mode == WB_SYNC_ALL ||
		    unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) ||
		    unlikely(time_after(jiffies,
					(inode->dirtied_time_when +
					 dirtytime_expire_interval * HZ)))) {
			dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
	if ((inode->i_state & I_DIRTY_TIME) &&
	    ((dirty & I_DIRTY_INODE) ||
	     wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
	     time_after(jiffies, inode->dirtied_time_when +
			dirtytime_expire_interval * HZ))) {
		dirty |= I_DIRTY_TIME;
		trace_writeback_lazytime(inode);
	}
	} else
		inode->i_state &= ~I_DIRTY_TIME_EXPIRED;
	inode->i_state &= ~dirty;

	/*
@@ -1587,8 +1592,8 @@ static long writeback_sb_inodes(struct super_block *sb,
		 */
		spin_lock(&inode->i_lock);
		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
			redirty_tail_locked(inode, wb);
			spin_unlock(&inode->i_lock);
			redirty_tail(inode, wb);
			continue;
		}
		if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
@@ -1729,7 +1734,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
	blk_start_plug(&plug);
	spin_lock(&wb->list_lock);
	if (list_empty(&wb->b_io))
		queue_io(wb, &work);
		queue_io(wb, &work, jiffies);
	__writeback_inodes_wb(wb, &work);
	spin_unlock(&wb->list_lock);
	blk_finish_plug(&plug);
@@ -1749,7 +1754,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
 * takes longer than a dirty_writeback_interval interval, then leave a
 * one-second gap.
 *
 * older_than_this takes precedence over nr_to_write.  So we'll only write back
 * dirtied_before takes precedence over nr_to_write.  So we'll only write back
 * all dirty pages if they are all attached to "old" mappings.
 */
static long wb_writeback(struct bdi_writeback *wb,
@@ -1757,14 +1762,11 @@ static long wb_writeback(struct bdi_writeback *wb,
{
	unsigned long wb_start = jiffies;
	long nr_pages = work->nr_pages;
	unsigned long oldest_jif;
	unsigned long dirtied_before = jiffies;
	struct inode *inode;
	long progress;
	struct blk_plug plug;

	oldest_jif = jiffies;
	work->older_than_this = &oldest_jif;

	blk_start_plug(&plug);
	spin_lock(&wb->list_lock);
	for (;;) {
@@ -1798,14 +1800,14 @@ static long wb_writeback(struct bdi_writeback *wb,
		 * safe.
		 */
		if (work->for_kupdate) {
			oldest_jif = jiffies -
			dirtied_before = jiffies -
				msecs_to_jiffies(dirty_expire_interval * 10);
		} else if (work->for_background)
			oldest_jif = jiffies;
			dirtied_before = jiffies;

		trace_writeback_start(wb, work);
		if (list_empty(&wb->b_io))
			queue_io(wb, work);
			queue_io(wb, work, dirtied_before);
		if (work->sb)
			progress = writeback_sb_inodes(work->sb, wb, work);
		else
@@ -2207,11 +2209,12 @@ void __mark_inode_dirty(struct inode *inode, int flags)
		inode->i_state |= flags;

		/*
		 * If the inode is being synced, just update its dirty state.
		 * The unlocker will place the inode on the appropriate
		 * superblock list, based upon its state.
		 * If the inode is queued for writeback by flush worker, just
		 * update its dirty state. Once the flush worker is done with
		 * the inode it will place it on the appropriate superblock
		 * list, based upon its state.
		 */
		if (inode->i_state & I_SYNC)
		if (inode->i_state & I_SYNC_QUEUED)
			goto out_unlock_inode;

		/*
+5 −2
Original line number Diff line number Diff line
@@ -2092,6 +2092,10 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
 *
 * I_CREATING		New object's inode in the middle of setting up.
 *
 * I_SYNC_QUEUED	Inode is queued in b_io or b_more_io writeback lists.
 *			Used to detect that mark_inode_dirty() should not move
 *			inode between dirty lists.
 *
 * Q: What is the difference between I_WILL_FREE and I_FREEING?
 */
#define I_DIRTY_SYNC		(1 << 0)
@@ -2109,11 +2113,10 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
#define I_DIO_WAKEUP		(1 << __I_DIO_WAKEUP)
#define I_LINKABLE		(1 << 10)
#define I_DIRTY_TIME		(1 << 11)
#define __I_DIRTY_TIME_EXPIRED	12
#define I_DIRTY_TIME_EXPIRED	(1 << __I_DIRTY_TIME_EXPIRED)
#define I_WB_SWITCH		(1 << 13)
#define I_OVL_INUSE		(1 << 14)
#define I_CREATING		(1 << 15)
#define I_SYNC_QUEUED		(1 << 17)

#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
+6 −8
Original line number Diff line number Diff line
@@ -20,7 +20,6 @@
		{I_CLEAR,		"I_CLEAR"},		\
		{I_SYNC,		"I_SYNC"},		\
		{I_DIRTY_TIME,		"I_DIRTY_TIME"},	\
		{I_DIRTY_TIME_EXPIRED,	"I_DIRTY_TIME_EXPIRED"}, \
		{I_REFERENCED,		"I_REFERENCED"}		\
	)

@@ -360,8 +359,9 @@ DEFINE_WBC_EVENT(wbc_writepage);
TRACE_EVENT(writeback_queue_io,
	TP_PROTO(struct bdi_writeback *wb,
		 struct wb_writeback_work *work,
		 unsigned long dirtied_before,
		 int moved),
	TP_ARGS(wb, work, moved),
	TP_ARGS(wb, work, dirtied_before, moved),
	TP_STRUCT__entry(
		__array(char,		name, 32)
		__field(unsigned long,	older)
@@ -371,19 +371,17 @@ TRACE_EVENT(writeback_queue_io,
		__field(unsigned int,	cgroup_ino)
	),
	TP_fast_assign(
		unsigned long *older_than_this = work->older_than_this;
		strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
		__entry->older	= older_than_this ?  *older_than_this : 0;
		__entry->age	= older_than_this ?
				  (jiffies - *older_than_this) * 1000 / HZ : -1;
		__entry->older	= dirtied_before;
		__entry->age	= (jiffies - dirtied_before) * 1000 / HZ;
		__entry->moved	= moved;
		__entry->reason	= work->reason;
		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
	),
	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
		__entry->name,
		__entry->older,	/* older_than_this in jiffies */
		__entry->age,	/* older_than_this in relative milliseconds */
		__entry->older,	/* dirtied_before in jiffies */
		__entry->age,	/* dirtied_before in relative milliseconds */
		__entry->moved,
		__print_symbolic(__entry->reason, WB_WORK_REASON),
		__entry->cgroup_ino