Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e14bebf6 authored by Trond Myklebust's avatar Trond Myklebust
Browse files

NFS: Don't check request offset and size without holding a lock



Request offsets and sizes are not guaranteed to be stable unless you
are holding the request locked.

Signed-off-by: default avatarTrond Myklebust <trond.myklebust@primarydata.com>
parent a0e265bc
Loading
Loading
Loading
Loading
+12 −12
Original line number Diff line number Diff line
@@ -523,6 +523,17 @@ nfs_lock_and_join_requests(struct page *page)
	total_bytes = head->wb_bytes;
	for (subreq = head->wb_this_page; subreq != head;
			subreq = subreq->wb_this_page) {
		if (!nfs_lock_request(subreq)) {
			/* releases page group bit lock and
			 * inode spin lock and all references */
			ret = nfs_unroll_locks_and_wait(inode, head,
				subreq);

			if (ret == 0)
				goto try_again;

			return ERR_PTR(ret);
		}
		/*
		 * Subrequests are always contiguous, non overlapping
		 * and in order - but may be repeated (mirrored writes).
@@ -533,21 +544,10 @@ nfs_lock_and_join_requests(struct page *page)
		} else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset ||
			    ((subreq->wb_offset + subreq->wb_bytes) >
			     (head->wb_offset + total_bytes)))) {
			nfs_unlock_request(subreq);
			nfs_unroll_locks_and_wait(inode, head, subreq);
			return ERR_PTR(-EIO);
		}

		if (!nfs_lock_request(subreq)) {
			/* releases page group bit lock and
			 * inode spin lock and all references */
			ret = nfs_unroll_locks_and_wait(inode, head,
				subreq);

			if (ret == 0)
				goto try_again;

			return ERR_PTR(ret);
		}
	}

	/* Now that all requests are locked, make sure they aren't on any list.