Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e8e8a0c6 authored by Jens Axboe's avatar Jens Axboe
Browse files

writeback: move nr_pages == 0 logic to one location



Now that we have no external callers of wb_start_writeback(), we
can shuffle the passing in of 'nr_pages'. Everybody passes in 0
at this point, so just kill the argument and move the dirty
count retrieval to that function.

Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Tested-by: default avatarChris Mason <clm@fb.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 9dfb176f
Loading
Loading
Loading
Loading
+17 −24
Original line number Diff line number Diff line
@@ -933,8 +933,18 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,

#endif	/* CONFIG_CGROUP_WRITEBACK */

static void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
				enum wb_reason reason)
/*
 * Add in the number of potentially dirty inodes, because each inode
 * write can dirty pagecache in the underlying blockdev.
 */
static unsigned long get_nr_dirty_pages(void)
{
	return global_node_page_state(NR_FILE_DIRTY) +
		global_node_page_state(NR_UNSTABLE_NFS) +
		get_nr_dirty_inodes();
}

static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason)
{
	struct wb_writeback_work *work;

@@ -954,7 +964,7 @@ static void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
	}

	work->sync_mode	= WB_SYNC_NONE;
	work->nr_pages	= nr_pages;
	work->nr_pages	= wb_split_bdi_pages(wb, get_nr_dirty_pages());
	work->range_cyclic = 1;
	work->reason	= reason;
	work->auto_free	= 1;
@@ -1814,17 +1824,6 @@ static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
	return work;
}

/*
 * Add in the number of potentially dirty inodes, because each inode
 * write can dirty pagecache in the underlying blockdev.
 */
static unsigned long get_nr_dirty_pages(void)
{
	return global_node_page_state(NR_FILE_DIRTY) +
		global_node_page_state(NR_UNSTABLE_NFS) +
		get_nr_dirty_inodes();
}

static long wb_check_background_flush(struct bdi_writeback *wb)
{
	if (wb_over_bg_thresh(wb)) {
@@ -1951,7 +1950,7 @@ void wb_workfn(struct work_struct *work)
 * write back the whole world.
 */
static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
					 long nr_pages, enum wb_reason reason)
					 enum wb_reason reason)
{
	struct bdi_writeback *wb;

@@ -1959,17 +1958,14 @@ static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
		return;

	list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
		wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages),
					reason);
		wb_start_writeback(wb, reason);
}

void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
				enum wb_reason reason)
{
	long nr_pages = get_nr_dirty_pages();

	rcu_read_lock();
	__wakeup_flusher_threads_bdi(bdi, nr_pages, reason);
	__wakeup_flusher_threads_bdi(bdi, reason);
	rcu_read_unlock();
}

@@ -1979,7 +1975,6 @@ void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
void wakeup_flusher_threads(enum wb_reason reason)
{
	struct backing_dev_info *bdi;
	long nr_pages;

	/*
	 * If we are expecting writeback progress we must submit plugged IO.
@@ -1987,11 +1982,9 @@ void wakeup_flusher_threads(enum wb_reason reason)
	if (blk_needs_flush_plug(current))
		blk_schedule_flush_plug(current);

	nr_pages = get_nr_dirty_pages();

	rcu_read_lock();
	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
		__wakeup_flusher_threads_bdi(bdi, nr_pages, reason);
		__wakeup_flusher_threads_bdi(bdi, reason);
	rcu_read_unlock();
}