Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d6c843b9 authored by Peng Tao's avatar Peng Tao Committed by Trond Myklebust
Browse files

nfs: only remove page from mapping if launder_page fails



Instead of dropping pages when write fails, only do it when
we get fatal failure in launder_page write back.

Signed-off-by: default avatarPeng Tao <tao.peng@primarydata.com>
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@primarydata.com>
parent 0bcbf039
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -545,7 +545,7 @@ static int nfs_launder_page(struct page *page)
		inode->i_ino, (long long)page_offset(page));

	nfs_fscache_wait_on_page_write(nfsi, page);
	return nfs_wb_page(inode, page);
	return nfs_wb_launder_page(inode, page);
}

static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+23 −16
Original line number Diff line number Diff line
@@ -559,7 +559,8 @@ static void nfs_write_error_remove_page(struct nfs_page *req)
 * May return an error if the user signalled nfs_wait_on_request().
 */
static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
				struct page *page, bool nonblock)
				struct page *page, bool nonblock,
				bool launder)
{
	struct nfs_page *req;
	int ret = 0;
@@ -578,17 +579,19 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
	if (!nfs_pageio_add_request(pgio, req)) {
		ret = pgio->pg_error;
		/*
		 * Remove the problematic req upon fatal errors,
		 * while other dirty pages can still be around
		 * until they get flushed.
		 * Remove the problematic req upon fatal errors
		 * in launder case, while other dirty pages can
		 * still be around until they get flushed.
		 */
		if (nfs_error_is_fatal(ret)) {
			nfs_context_set_write_error(req->wb_context, ret);
			if (launder) {
				nfs_write_error_remove_page(req);
		} else {
				goto out;
			}
		}
		nfs_redirty_request(req);
		ret = -EAGAIN;
		}
	} else
		nfs_add_stats(page_file_mapping(page)->host,
				NFSIOS_WRITEPAGES, 1);
@@ -596,12 +599,14 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
	return ret;
}

static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
			    struct nfs_pageio_descriptor *pgio, bool launder)
{
	int ret;

	nfs_pageio_cond_complete(pgio, page_file_index(page));
	ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
	ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE,
				   launder);
	if (ret == -EAGAIN) {
		redirty_page_for_writepage(wbc, page);
		ret = 0;
@@ -612,7 +617,9 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st
/*
 * Write an mmapped page to the server.
 */
static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
static int nfs_writepage_locked(struct page *page,
				struct writeback_control *wbc,
				bool launder)
{
	struct nfs_pageio_descriptor pgio;
	struct inode *inode = page_file_mapping(page)->host;
@@ -621,7 +628,7 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc
	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
	nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
				false, &nfs_async_write_completion_ops);
	err = nfs_do_writepage(page, wbc, &pgio);
	err = nfs_do_writepage(page, wbc, &pgio, launder);
	nfs_pageio_complete(&pgio);
	if (err < 0)
		return err;
@@ -634,7 +641,7 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
{
	int ret;

	ret = nfs_writepage_locked(page, wbc);
	ret = nfs_writepage_locked(page, wbc, false);
	unlock_page(page);
	return ret;
}
@@ -643,7 +650,7 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control *
{
	int ret;

	ret = nfs_do_writepage(page, wbc, data);
	ret = nfs_do_writepage(page, wbc, data, false);
	unlock_page(page);
	return ret;
}
@@ -1931,7 +1938,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
/*
 * Write back all requests on one page - we do this before reading it.
 */
int nfs_wb_page(struct inode *inode, struct page *page)
int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder)
{
	loff_t range_start = page_file_offset(page);
	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
@@ -1948,7 +1955,7 @@ int nfs_wb_page(struct inode *inode, struct page *page)
	for (;;) {
		wait_on_page_writeback(page);
		if (clear_page_dirty_for_io(page)) {
			ret = nfs_writepage_locked(page, &wbc);
			ret = nfs_writepage_locked(page, &wbc, launder);
			if (ret < 0)
				goto out_error;
			continue;
+13 −1
Original line number Diff line number Diff line
@@ -517,12 +517,24 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
 */
extern int nfs_sync_inode(struct inode *inode);
extern int nfs_wb_all(struct inode *inode);
extern int nfs_wb_page(struct inode *inode, struct page* page);
extern int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder);
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
extern int  nfs_commit_inode(struct inode *, int);
extern struct nfs_commit_data *nfs_commitdata_alloc(void);
extern void nfs_commit_free(struct nfs_commit_data *data);

static inline int
nfs_wb_launder_page(struct inode *inode, struct page *page)
{
	return nfs_wb_single_page(inode, page, true);
}

static inline int
nfs_wb_page(struct inode *inode, struct page *page)
{
	return nfs_wb_single_page(inode, page, false);
}

static inline int
nfs_have_writebacks(struct inode *inode)
{