Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6d17d653 authored by Trond Myklebust's avatar Trond Myklebust
Browse files

NFS: Simplify page writeback



We don't expect the page header lock to ever be held across I/O, so
it should always be safe to wait for it, even if we're doing nonblocking
writebacks.

Signed-off-by: default avatarTrond Myklebust <trond.myklebust@primarydata.com>
parent ef954844
Loading
Loading
Loading
Loading
+10 −20
Original line number Diff line number Diff line
@@ -366,7 +366,6 @@ nfs_page_group_clear_bits(struct nfs_page *req)
 * @inode - inode associated with request page group, must be holding inode lock
 * @head  - head request of page group, must be holding head lock
 * @req   - request that couldn't lock and needs to wait on the req bit lock
 * @nonblock - if true, don't actually wait
 *
 * NOTE: this must be called holding page_group bit lock and inode spin lock
 *       and BOTH will be released before returning.
@@ -375,7 +374,7 @@ nfs_page_group_clear_bits(struct nfs_page *req)
 */
static int
nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
			  struct nfs_page *req, bool nonblock)
			  struct nfs_page *req)
	__releases(&inode->i_lock)
{
	struct nfs_page *tmp;
@@ -396,10 +395,7 @@ nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
	/* release ref from nfs_page_find_head_request_locked */
	nfs_release_request(head);

	if (!nonblock)
	ret = nfs_wait_on_request(req);
	else
		ret = -EAGAIN;
	nfs_release_request(req);

	return ret;
@@ -464,7 +460,6 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
 *                              operations for this page.
 *
 * @page - the page used to lookup the "page group" of nfs_page structures
 * @nonblock - if true, don't block waiting for request locks
 *
 * This function joins all sub requests to the head request by first
 * locking all requests in the group, cancelling any pending operations
@@ -478,7 +473,7 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
 * error was encountered.
 */
static struct nfs_page *
nfs_lock_and_join_requests(struct page *page, bool nonblock)
nfs_lock_and_join_requests(struct page *page)
{
	struct inode *inode = page_file_mapping(page)->host;
	struct nfs_page *head, *subreq;
@@ -511,16 +506,11 @@ nfs_lock_and_join_requests(struct page *page, bool nonblock)
	if (ret < 0) {
		spin_unlock(&inode->i_lock);

		if (!nonblock && ret == -EAGAIN) {
		nfs_page_group_lock_wait(head);
		nfs_release_request(head);
		goto try_again;
	}

		nfs_release_request(head);
		return ERR_PTR(ret);
	}

	/* lock each request in the page group */
	subreq = head;
	do {
@@ -543,7 +533,7 @@ nfs_lock_and_join_requests(struct page *page, bool nonblock)
			/* releases page group bit lock and
			 * inode spin lock and all references */
			ret = nfs_unroll_locks_and_wait(inode, head,
				subreq, nonblock);
				subreq);

			if (ret == 0)
				goto try_again;
@@ -624,12 +614,12 @@ nfs_error_is_fatal_on_server(int err)
 * May return an error if the user signalled nfs_wait_on_request().
 */
static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
				struct page *page, bool nonblock)
				struct page *page)
{
	struct nfs_page *req;
	int ret = 0;

	req = nfs_lock_and_join_requests(page, nonblock);
	req = nfs_lock_and_join_requests(page);
	if (!req)
		goto out;
	ret = PTR_ERR(req);
@@ -672,7 +662,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
	int ret;

	nfs_pageio_cond_complete(pgio, page_index(page));
	ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
	ret = nfs_page_async_flush(pgio, page);
	if (ret == -EAGAIN) {
		redirty_page_for_writepage(wbc, page);
		ret = 0;
@@ -2015,7 +2005,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)

	/* blocking call to cancel all requests and join to a single (head)
	 * request */
	req = nfs_lock_and_join_requests(page, false);
	req = nfs_lock_and_join_requests(page);

	if (IS_ERR(req)) {
		ret = PTR_ERR(req);