Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e7d39069 authored by Trond Myklebust's avatar Trond Myklebust
Browse files

NFS: Clean up nfs_update_request()



Simplify the loop in nfs_update_request by moving into a separate function
the code that attempts to update an existing cached NFS write.

Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent 396cee97
Loading
Loading
Loading
Loading
+103 −98
Original line number Original line Diff line number Diff line
@@ -34,9 +34,6 @@
/*
/*
 * Local function declarations
 * Local function declarations
 */
 */
static struct nfs_page * nfs_update_request(struct nfs_open_context*,
					    struct page *,
					    unsigned int, unsigned int);
static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
				  struct inode *inode, int ioflags);
				  struct inode *inode, int ioflags);
static void nfs_redirty_request(struct nfs_page *req);
static void nfs_redirty_request(struct nfs_page *req);
@@ -169,30 +166,6 @@ static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int
	SetPageUptodate(page);
	SetPageUptodate(page);
}
}


static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
		unsigned int offset, unsigned int count)
{
	struct nfs_page	*req;
	int ret;

	for (;;) {
		req = nfs_update_request(ctx, page, offset, count);
		if (!IS_ERR(req))
			break;
		ret = PTR_ERR(req);
		if (ret != -EBUSY)
			return ret;
		ret = nfs_wb_page(page->mapping->host, page);
		if (ret != 0)
			return ret;
	}
	/* Update file length */
	nfs_grow_file(page, offset, count);
	nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
	nfs_clear_page_tag_locked(req);
	return 0;
}

static int wb_priority(struct writeback_control *wbc)
static int wb_priority(struct writeback_control *wbc)
{
{
	if (wbc->for_reclaim)
	if (wbc->for_reclaim)
@@ -356,11 +329,19 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
/*
/*
 * Insert a write request into an inode
 * Insert a write request into an inode
 */
 */
static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
{
{
	struct nfs_inode *nfsi = NFS_I(inode);
	struct nfs_inode *nfsi = NFS_I(inode);
	int error;
	int error;


	error = radix_tree_preload(GFP_NOFS);
	if (error != 0)
		goto out;

	/* Lock the request! */
	nfs_lock_request_dontget(req);

	spin_lock(&inode->i_lock);
	error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
	error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
	BUG_ON(error);
	BUG_ON(error);
	if (!nfsi->npages) {
	if (!nfsi->npages) {
@@ -374,6 +355,10 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
	kref_get(&req->wb_kref);
	kref_get(&req->wb_kref);
	radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
	radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
				NFS_PAGE_TAG_LOCKED);
				NFS_PAGE_TAG_LOCKED);
	spin_unlock(&inode->i_lock);
	radix_tree_preload_end();
out:
	return error;
}
}


/*
/*
@@ -565,101 +550,121 @@ static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, pg
#endif
#endif


/*
/*
 * Try to update any existing write request, or create one if there is none.
 * Search for an existing write request, and attempt to update
 * In order to match, the request's credentials must match those of
 * it to reflect a new dirty region on a given page.
 * the calling process.
 *
 *
 * Note: Should always be called with the Page Lock held!
 * If the attempt fails, then the existing request is flushed out
 * to disk.
 */
 */
static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
		struct page *page, unsigned int offset, unsigned int bytes)
		struct page *page,
		unsigned int offset,
		unsigned int bytes)
{
{
	struct address_space *mapping = page->mapping;
	struct nfs_page *req;
	struct inode *inode = mapping->host;
	unsigned int rqend;
	struct nfs_page		*req, *new = NULL;
	unsigned int end;
	pgoff_t		rqend, end;
	int error;

	if (!PagePrivate(page))
		return NULL;


	end = offset + bytes;
	end = offset + bytes;
	spin_lock(&inode->i_lock);


	for (;;) {
	for (;;) {
		/* Loop over all inode entries and see if we find
		 * A request for the page we wish to update
		 */
		spin_lock(&inode->i_lock);
		req = nfs_page_find_request_locked(page);
		req = nfs_page_find_request_locked(page);
		if (req) {
		if (req == NULL)
			if (!nfs_set_page_tag_locked(req)) {
			goto out_unlock;
				int error;


				spin_unlock(&inode->i_lock);
		rqend = req->wb_offset + req->wb_bytes;
				error = nfs_wait_on_request(req);
		/*
				nfs_release_request(req);
		 * Tell the caller to flush out the request if
				if (error < 0) {
		 * the offsets are non-contiguous.
					if (new) {
		 * Note: nfs_flush_incompatible() will already
						radix_tree_preload_end();
		 * have flushed out requests having wrong owners.
						nfs_release_request(new);
		 */
					}
		if (!nfs_dirty_request(req)
					return ERR_PTR(error);
		    || offset > rqend
				}
		    || end < req->wb_offset)
				continue;
			goto out_flushme;
			}

			spin_unlock(&inode->i_lock);
		if (nfs_set_page_tag_locked(req))
			if (new) {
				radix_tree_preload_end();
				nfs_release_request(new);
			}
			break;
			break;
		}


		if (new) {
		/* The request is locked, so wait and then retry */
			nfs_lock_request_dontget(new);
			nfs_inode_add_request(inode, new);
			spin_unlock(&inode->i_lock);
			radix_tree_preload_end();
			req = new;
			goto out;
		}
		spin_unlock(&inode->i_lock);
		spin_unlock(&inode->i_lock);

		error = nfs_wait_on_request(req);
		new = nfs_create_request(ctx, inode, page, offset, bytes);
		nfs_release_request(req);
		if (IS_ERR(new))
		if (error != 0)
			return new;
			goto out_err;
		if (radix_tree_preload(GFP_NOFS)) {
		spin_lock(&inode->i_lock);
			nfs_release_request(new);
			return ERR_PTR(-ENOMEM);
		}
	}

	/* We have a request for our page.
	 * If the creds don't match, or the
	 * page addresses don't match,
	 * tell the caller to wait on the conflicting
	 * request.
	 */
	rqend = req->wb_offset + req->wb_bytes;
	if (req->wb_context != ctx
	    || req->wb_page != page
	    || !nfs_dirty_request(req)
	    || offset > rqend || end < req->wb_offset) {
		nfs_clear_page_tag_locked(req);
		return ERR_PTR(-EBUSY);
	}
	}


	/* Okay, the request matches. Update the region */
	/* Okay, the request matches. Update the region */
	if (offset < req->wb_offset) {
	if (offset < req->wb_offset) {
		req->wb_offset = offset;
		req->wb_offset = offset;
		req->wb_pgbase = offset;
		req->wb_pgbase = offset;
		req->wb_bytes = max(end, rqend) - req->wb_offset;
		goto out;
	}
	}

	if (end > rqend)
	if (end > rqend)
		req->wb_bytes = end - req->wb_offset;
		req->wb_bytes = end - req->wb_offset;
	else
		req->wb_bytes = rqend - req->wb_offset;
out_unlock:
	spin_unlock(&inode->i_lock);
	return req;
out_flushme:
	spin_unlock(&inode->i_lock);
	nfs_release_request(req);
	error = nfs_wb_page(inode, page);
out_err:
	return ERR_PTR(error);
}


/*
 * Try to update an existing write request, or create one if there is none.
 *
 * Note: Should always be called with the Page Lock held to prevent races
 * if we have to add a new request. Also assumes that the caller has
 * already called nfs_flush_incompatible() if necessary.
 */
static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
		struct page *page, unsigned int offset, unsigned int bytes)
{
	struct inode *inode = page->mapping->host;
	struct nfs_page	*req;
	int error;

	req = nfs_try_to_update_request(inode, page, offset, bytes);
	if (req != NULL)
		goto out;
	req = nfs_create_request(ctx, inode, page, offset, bytes);
	if (IS_ERR(req))
		goto out;
	error = nfs_inode_add_request(inode, req);
	if (error != 0) {
		nfs_release_request(req);
		req = ERR_PTR(error);
	}
out:
out:
	return req;
	return req;
}
}


static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
		unsigned int offset, unsigned int count)
{
	struct nfs_page	*req;

	req = nfs_setup_write_request(ctx, page, offset, count);
	if (IS_ERR(req))
		return PTR_ERR(req);
	/* Update file length */
	nfs_grow_file(page, offset, count);
	nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
	nfs_clear_page_tag_locked(req);
	return 0;
}

int nfs_flush_incompatible(struct file *file, struct page *page)
int nfs_flush_incompatible(struct file *file, struct page *page)
{
{
	struct nfs_open_context *ctx = nfs_file_open_context(file);
	struct nfs_open_context *ctx = nfs_file_open_context(file);