Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1b430bee authored by Wu Fengguang's avatar Wu Fengguang Committed by Linus Torvalds
Browse files

writeback: remove nonblocking/encountered_congestion references



This removes more dead code that was somehow missed by commit 0d99519e
(writeback: remove unused nonblocking and congestion checks).  There are
no behavior change except for the removal of two entries from one of the
ext4 tracing interface.

The nonblocking checks in ->writepages are no longer used because the
flusher now prefer to block on get_request_wait() than to skip inodes on
IO congestion.  The latter will lead to more seeky IO.

The nonblocking checks in ->writepage are no longer used because it's
redundant with the WB_SYNC_NONE check.

We no long set ->nonblocking in VM page out and page migration, because
a) it's effectively redundant with WB_SYNC_NONE in current code
b) it's old semantic of "Don't get stuck on request queues" is mis-behavior:
   that would skip some dirty inodes on congestion and page out others, which
   is unfair in terms of LRU age.

Inspired by Christoph Hellwig. Thanks!

Signed-off-by: default avatarWu Fengguang <fengguang.wu@intel.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: David Howells <dhowells@redhat.com>
Cc: Sage Weil <sage@newdream.net>
Cc: Steve French <sfrench@samba.org>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d19d5476
Loading
Loading
Loading
Loading
+1 −18
Original line number Original line Diff line number Diff line
@@ -438,7 +438,6 @@ static int afs_write_back_from_locked_page(struct afs_writeback *wb,
 */
 */
int afs_writepage(struct page *page, struct writeback_control *wbc)
int afs_writepage(struct page *page, struct writeback_control *wbc)
{
{
	struct backing_dev_info *bdi = page->mapping->backing_dev_info;
	struct afs_writeback *wb;
	struct afs_writeback *wb;
	int ret;
	int ret;


@@ -455,8 +454,6 @@ int afs_writepage(struct page *page, struct writeback_control *wbc)
	}
	}


	wbc->nr_to_write -= ret;
	wbc->nr_to_write -= ret;
	if (wbc->nonblocking && bdi_write_congested(bdi))
		wbc->encountered_congestion = 1;


	_leave(" = 0");
	_leave(" = 0");
	return 0;
	return 0;
@@ -469,7 +466,6 @@ static int afs_writepages_region(struct address_space *mapping,
				 struct writeback_control *wbc,
				 struct writeback_control *wbc,
				 pgoff_t index, pgoff_t end, pgoff_t *_next)
				 pgoff_t index, pgoff_t end, pgoff_t *_next)
{
{
	struct backing_dev_info *bdi = mapping->backing_dev_info;
	struct afs_writeback *wb;
	struct afs_writeback *wb;
	struct page *page;
	struct page *page;
	int ret, n;
	int ret, n;
@@ -529,11 +525,6 @@ static int afs_writepages_region(struct address_space *mapping,


		wbc->nr_to_write -= ret;
		wbc->nr_to_write -= ret;


		if (wbc->nonblocking && bdi_write_congested(bdi)) {
			wbc->encountered_congestion = 1;
			break;
		}

		cond_resched();
		cond_resched();
	} while (index < end && wbc->nr_to_write > 0);
	} while (index < end && wbc->nr_to_write > 0);


@@ -548,24 +539,16 @@ static int afs_writepages_region(struct address_space *mapping,
int afs_writepages(struct address_space *mapping,
int afs_writepages(struct address_space *mapping,
		   struct writeback_control *wbc)
		   struct writeback_control *wbc)
{
{
	struct backing_dev_info *bdi = mapping->backing_dev_info;
	pgoff_t start, end, next;
	pgoff_t start, end, next;
	int ret;
	int ret;


	_enter("");
	_enter("");


	if (wbc->nonblocking && bdi_write_congested(bdi)) {
		wbc->encountered_congestion = 1;
		_leave(" = 0 [congest]");
		return 0;
	}

	if (wbc->range_cyclic) {
	if (wbc->range_cyclic) {
		start = mapping->writeback_index;
		start = mapping->writeback_index;
		end = -1;
		end = -1;
		ret = afs_writepages_region(mapping, wbc, start, end, &next);
		ret = afs_writepages_region(mapping, wbc, start, end, &next);
		if (start > 0 && wbc->nr_to_write > 0 && ret == 0 &&
		if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
		    !(wbc->nonblocking && wbc->encountered_congestion))
			ret = afs_writepages_region(mapping, wbc, 0, start,
			ret = afs_writepages_region(mapping, wbc, 0, start,
						    &next);
						    &next);
		mapping->writeback_index = next;
		mapping->writeback_index = next;
+1 −1
Original line number Original line Diff line number Diff line
@@ -1706,7 +1706,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
		 * and kswapd activity, but those code paths have their own
		 * and kswapd activity, but those code paths have their own
		 * higher-level throttling.
		 * higher-level throttling.
		 */
		 */
		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
		if (wbc->sync_mode != WB_SYNC_NONE) {
			lock_buffer(bh);
			lock_buffer(bh);
		} else if (!trylock_buffer(bh)) {
		} else if (!trylock_buffer(bh)) {
			redirty_page_for_writepage(wbc, page);
			redirty_page_for_writepage(wbc, page);
+0 −9
Original line number Original line Diff line number Diff line
@@ -591,7 +591,6 @@ static int ceph_writepages_start(struct address_space *mapping,
				 struct writeback_control *wbc)
				 struct writeback_control *wbc)
{
{
	struct inode *inode = mapping->host;
	struct inode *inode = mapping->host;
	struct backing_dev_info *bdi = mapping->backing_dev_info;
	struct ceph_inode_info *ci = ceph_inode(inode);
	struct ceph_inode_info *ci = ceph_inode(inode);
	struct ceph_fs_client *fsc;
	struct ceph_fs_client *fsc;
	pgoff_t index, start, end;
	pgoff_t index, start, end;
@@ -633,13 +632,6 @@ static int ceph_writepages_start(struct address_space *mapping,


	pagevec_init(&pvec, 0);
	pagevec_init(&pvec, 0);


	/* ?? */
	if (wbc->nonblocking && bdi_write_congested(bdi)) {
		dout(" writepages congested\n");
		wbc->encountered_congestion = 1;
		goto out_final;
	}

	/* where to start/end? */
	/* where to start/end? */
	if (wbc->range_cyclic) {
	if (wbc->range_cyclic) {
		start = mapping->writeback_index; /* Start from prev offset */
		start = mapping->writeback_index; /* Start from prev offset */
@@ -885,7 +877,6 @@ static int ceph_writepages_start(struct address_space *mapping,
		rc = 0;  /* vfs expects us to return 0 */
		rc = 0;  /* vfs expects us to return 0 */
	ceph_put_snap_context(snapc);
	ceph_put_snap_context(snapc);
	dout("writepages done, rc = %d\n", rc);
	dout("writepages done, rc = %d\n", rc);
out_final:
	return rc;
	return rc;
}
}


+0 −10
Original line number Original line Diff line number Diff line
@@ -1303,7 +1303,6 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
static int cifs_writepages(struct address_space *mapping,
static int cifs_writepages(struct address_space *mapping,
			   struct writeback_control *wbc)
			   struct writeback_control *wbc)
{
{
	struct backing_dev_info *bdi = mapping->backing_dev_info;
	unsigned int bytes_to_write;
	unsigned int bytes_to_write;
	unsigned int bytes_written;
	unsigned int bytes_written;
	struct cifs_sb_info *cifs_sb;
	struct cifs_sb_info *cifs_sb;
@@ -1326,15 +1325,6 @@ static int cifs_writepages(struct address_space *mapping,
	int scanned = 0;
	int scanned = 0;
	int xid, long_op;
	int xid, long_op;


	/*
	 * BB: Is this meaningful for a non-block-device file system?
	 * If it is, we should test it again after we do I/O
	 */
	if (wbc->nonblocking && bdi_write_congested(bdi)) {
		wbc->encountered_congestion = 1;
		return 0;
	}

	cifs_sb = CIFS_SB(mapping->host->i_sb);
	cifs_sb = CIFS_SB(mapping->host->i_sb);


	/*
	/*
+1 −1
Original line number Original line Diff line number Diff line
@@ -55,7 +55,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
		 * activity, but those code paths have their own higher-level
		 * activity, but those code paths have their own higher-level
		 * throttling.
		 * throttling.
		 */
		 */
		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
		if (wbc->sync_mode != WB_SYNC_NONE) {
			lock_buffer(bh);
			lock_buffer(bh);
		} else if (!trylock_buffer(bh)) {
		} else if (!trylock_buffer(bh)) {
			redirty_page_for_writepage(wbc, page);
			redirty_page_for_writepage(wbc, page);
Loading