Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit acee478a authored by Trond Myklebust's avatar Trond Myklebust
Browse files

NFS: Clean up the write request locking.



Ensure that we set/clear NFS_PAGE_TAG_LOCKED when the nfs_page is hashed.

Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent 8b1f9ee5
Loading
Loading
Loading
Loading
+8 −5
Original line number Original line Diff line number Diff line
@@ -111,12 +111,13 @@ void nfs_unlock_request(struct nfs_page *req)
 * nfs_set_page_tag_locked - Tag a request as locked
 * nfs_set_page_tag_locked - Tag a request as locked
 * @req:
 * @req:
 */
 */
static int nfs_set_page_tag_locked(struct nfs_page *req)
int nfs_set_page_tag_locked(struct nfs_page *req)
{
{
	struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode);
	struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode);


	if (!nfs_lock_request(req))
	if (!nfs_lock_request_dontget(req))
		return 0;
		return 0;
	if (req->wb_page != NULL)
		radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
		radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
	return 1;
	return 1;
}
}
@@ -132,8 +133,9 @@ void nfs_clear_page_tag_locked(struct nfs_page *req)
	if (req->wb_page != NULL) {
	if (req->wb_page != NULL) {
		spin_lock(&inode->i_lock);
		spin_lock(&inode->i_lock);
		radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
		radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
		nfs_unlock_request(req);
		spin_unlock(&inode->i_lock);
		spin_unlock(&inode->i_lock);
	}
	} else
		nfs_unlock_request(req);
		nfs_unlock_request(req);
}
}


@@ -421,6 +423,7 @@ int nfs_scan_list(struct nfs_inode *nfsi,
				goto out;
				goto out;
			idx_start = req->wb_index + 1;
			idx_start = req->wb_index + 1;
			if (nfs_set_page_tag_locked(req)) {
			if (nfs_set_page_tag_locked(req)) {
				kref_get(&req->wb_kref);
				nfs_list_remove_request(req);
				nfs_list_remove_request(req);
				radix_tree_tag_clear(&nfsi->nfs_page_tree,
				radix_tree_tag_clear(&nfsi->nfs_page_tree,
						req->wb_index, tag);
						req->wb_index, tag);
+7 −9
Original line number Original line Diff line number Diff line
@@ -196,7 +196,7 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
	}
	}
	/* Update file length */
	/* Update file length */
	nfs_grow_file(page, offset, count);
	nfs_grow_file(page, offset, count);
	nfs_unlock_request(req);
	nfs_clear_page_tag_locked(req);
	return 0;
	return 0;
}
}


@@ -252,7 +252,6 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
				struct page *page)
				struct page *page)
{
{
	struct inode *inode = page->mapping->host;
	struct inode *inode = page->mapping->host;
	struct nfs_inode *nfsi = NFS_I(inode);
	struct nfs_page *req;
	struct nfs_page *req;
	int ret;
	int ret;


@@ -263,10 +262,10 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
			spin_unlock(&inode->i_lock);
			spin_unlock(&inode->i_lock);
			return 0;
			return 0;
		}
		}
		if (nfs_lock_request_dontget(req))
		if (nfs_set_page_tag_locked(req))
			break;
			break;
		/* Note: If we hold the page lock, as is the case in nfs_writepage,
		/* Note: If we hold the page lock, as is the case in nfs_writepage,
		 *	 then the call to nfs_lock_request_dontget() will always
		 *	 then the call to nfs_set_page_tag_locked() will always
		 *	 succeed provided that someone hasn't already marked the
		 *	 succeed provided that someone hasn't already marked the
		 *	 request as dirty (in which case we don't care).
		 *	 request as dirty (in which case we don't care).
		 */
		 */
@@ -280,7 +279,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
	if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
	if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
		/* This request is marked for commit */
		/* This request is marked for commit */
		spin_unlock(&inode->i_lock);
		spin_unlock(&inode->i_lock);
		nfs_unlock_request(req);
		nfs_clear_page_tag_locked(req);
		nfs_pageio_complete(pgio);
		nfs_pageio_complete(pgio);
		return 0;
		return 0;
	}
	}
@@ -288,8 +287,6 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
		spin_unlock(&inode->i_lock);
		spin_unlock(&inode->i_lock);
		BUG();
		BUG();
	}
	}
	radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
			NFS_PAGE_TAG_LOCKED);
	spin_unlock(&inode->i_lock);
	spin_unlock(&inode->i_lock);
	nfs_pageio_add_request(pgio, req);
	nfs_pageio_add_request(pgio, req);
	return 0;
	return 0;
@@ -381,6 +378,7 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
	set_page_private(req->wb_page, (unsigned long)req);
	set_page_private(req->wb_page, (unsigned long)req);
	nfsi->npages++;
	nfsi->npages++;
	kref_get(&req->wb_kref);
	kref_get(&req->wb_kref);
	radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
	return 0;
	return 0;
}
}


@@ -596,7 +594,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
		spin_lock(&inode->i_lock);
		spin_lock(&inode->i_lock);
		req = nfs_page_find_request_locked(page);
		req = nfs_page_find_request_locked(page);
		if (req) {
		if (req) {
			if (!nfs_lock_request_dontget(req)) {
			if (!nfs_set_page_tag_locked(req)) {
				int error;
				int error;


				spin_unlock(&inode->i_lock);
				spin_unlock(&inode->i_lock);
@@ -646,7 +644,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
	    || req->wb_page != page
	    || req->wb_page != page
	    || !nfs_dirty_request(req)
	    || !nfs_dirty_request(req)
	    || offset > rqend || end < req->wb_offset) {
	    || offset > rqend || end < req->wb_offset) {
		nfs_unlock_request(req);
		nfs_clear_page_tag_locked(req);
		return ERR_PTR(-EBUSY);
		return ERR_PTR(-EBUSY);
	}
	}


+1 −12
Original line number Original line Diff line number Diff line
@@ -83,6 +83,7 @@ extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc);
extern	void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t);
extern	void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t);
extern  int nfs_wait_on_request(struct nfs_page *);
extern  int nfs_wait_on_request(struct nfs_page *);
extern	void nfs_unlock_request(struct nfs_page *req);
extern	void nfs_unlock_request(struct nfs_page *req);
extern	int nfs_set_page_tag_locked(struct nfs_page *req);
extern  void nfs_clear_page_tag_locked(struct nfs_page *req);
extern  void nfs_clear_page_tag_locked(struct nfs_page *req);




@@ -95,18 +96,6 @@ nfs_lock_request_dontget(struct nfs_page *req)
	return !test_and_set_bit(PG_BUSY, &req->wb_flags);
	return !test_and_set_bit(PG_BUSY, &req->wb_flags);
}
}


/*
 * Lock the page of an asynchronous request and take a reference
 */
static inline int
nfs_lock_request(struct nfs_page *req)
{
	if (test_and_set_bit(PG_BUSY, &req->wb_flags))
		return 0;
	kref_get(&req->wb_kref);
	return 1;
}

/**
/**
 * nfs_list_add_request - Insert a request into a list
 * nfs_list_add_request - Insert a request into a list
 * @req: request
 * @req: request