Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a0544c94 authored by Chuck Lever's avatar Chuck Lever Committed by J. Bruce Fields
Browse files

svcrdma: Hook up the logic to return ERR_CHUNK



RFC 5666 Section 4.2 states:

> When the peer detects an RPC-over-RDMA header version that it does
> not support (currently this document defines only version 1), it
> replies with an error code of ERR_VERS, and provides the low and
> high inclusive version numbers it does, in fact, support.

And:

> When other decoding errors are detected in the header or chunks,
> either an RPC decode error MAY be returned or the RPC/RDMA error
> code ERR_CHUNK MUST be returned.

The Linux NFS server does throw ERR_VERS when a client sends it
a request whose rdma_version is not "one." But it does not return
ERR_CHUNK when a header decoding error occurs. It just drops the
request.

To improve protocol extensibility, it should reject invalid values
in the rdma_proc field instead of treating them all like RDMA_MSG.
Otherwise clients can't detect when the server doesn't support
new rdma_proc values.

Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Reviewed-by: default avatarDevesh Sharma <devesh.sharma@broadcom.com>
Tested-by: default avatarDevesh Sharma <devesh.sharma@broadcom.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent f3ea53fb
Loading
Loading
Loading
Loading
+42 −13
Original line number Original line Diff line number Diff line
@@ -148,22 +148,41 @@ static __be32 *decode_reply_array(__be32 *va, __be32 *vaend)
int svc_rdma_xdr_decode_req(struct rpcrdma_msg *rmsgp, struct svc_rqst *rqstp)
int svc_rdma_xdr_decode_req(struct rpcrdma_msg *rmsgp, struct svc_rqst *rqstp)
{
{
	__be32 *va, *vaend;
	__be32 *va, *vaend;
	unsigned int len;
	u32 hdr_len;
	u32 hdr_len;


	/* Verify that there's enough bytes for header + something */
	/* Verify that there's enough bytes for header + something */
	if (rqstp->rq_arg.len <= RPCRDMA_HDRLEN_MIN) {
	if (rqstp->rq_arg.len <= RPCRDMA_HDRLEN_ERR) {
		dprintk("svcrdma: header too short = %d\n",
		dprintk("svcrdma: header too short = %d\n",
			rqstp->rq_arg.len);
			rqstp->rq_arg.len);
		return -EINVAL;
		return -EINVAL;
	}
	}


	if (rmsgp->rm_vers != rpcrdma_version)
	if (rmsgp->rm_vers != rpcrdma_version) {
		dprintk("%s: bad version %u\n", __func__,
			be32_to_cpu(rmsgp->rm_vers));
		return -EPROTONOSUPPORT;
		return -EPROTONOSUPPORT;
	}

	switch (be32_to_cpu(rmsgp->rm_type)) {
	case RDMA_MSG:
	case RDMA_NOMSG:
		break;


	/* Pull in the extra for the padded case and bump our pointer */
	case RDMA_DONE:
	if (rmsgp->rm_type == rdma_msgp) {
		/* Just drop it */
		int hdrlen;
		dprintk("svcrdma: dropping RDMA_DONE message\n");
		return 0;

	case RDMA_ERROR:
		/* Possible if this is a backchannel reply.
		 * XXX: We should cancel this XID, though.
		 */
		dprintk("svcrdma: dropping RDMA_ERROR message\n");
		return 0;


	case RDMA_MSGP:
		/* Pull in the extra for the padded case, bump our pointer */
		rmsgp->rm_body.rm_padded.rm_align =
		rmsgp->rm_body.rm_padded.rm_align =
			be32_to_cpu(rmsgp->rm_body.rm_padded.rm_align);
			be32_to_cpu(rmsgp->rm_body.rm_padded.rm_align);
		rmsgp->rm_body.rm_padded.rm_thresh =
		rmsgp->rm_body.rm_padded.rm_thresh =
@@ -171,11 +190,15 @@ int svc_rdma_xdr_decode_req(struct rpcrdma_msg *rmsgp, struct svc_rqst *rqstp)


		va = &rmsgp->rm_body.rm_padded.rm_pempty[4];
		va = &rmsgp->rm_body.rm_padded.rm_pempty[4];
		rqstp->rq_arg.head[0].iov_base = va;
		rqstp->rq_arg.head[0].iov_base = va;
		hdrlen = (u32)((unsigned long)va - (unsigned long)rmsgp);
		len = (u32)((unsigned long)va - (unsigned long)rmsgp);
		rqstp->rq_arg.head[0].iov_len -= hdrlen;
		rqstp->rq_arg.head[0].iov_len -= len;
		if (hdrlen > rqstp->rq_arg.len)
		if (len > rqstp->rq_arg.len)
			return -EINVAL;
		return len;
	default:
		dprintk("svcrdma: bad rdma procedure (%u)\n",
			be32_to_cpu(rmsgp->rm_type));
		return -EINVAL;
		return -EINVAL;
		return hdrlen;
	}
	}


	/* The chunk list may contain either a read chunk list or a write
	/* The chunk list may contain either a read chunk list or a write
@@ -184,14 +207,20 @@ int svc_rdma_xdr_decode_req(struct rpcrdma_msg *rmsgp, struct svc_rqst *rqstp)
	va = &rmsgp->rm_body.rm_chunks[0];
	va = &rmsgp->rm_body.rm_chunks[0];
	vaend = (__be32 *)((unsigned long)rmsgp + rqstp->rq_arg.len);
	vaend = (__be32 *)((unsigned long)rmsgp + rqstp->rq_arg.len);
	va = decode_read_list(va, vaend);
	va = decode_read_list(va, vaend);
	if (!va)
	if (!va) {
		dprintk("svcrdma: failed to decode read list\n");
		return -EINVAL;
		return -EINVAL;
	}
	va = decode_write_list(va, vaend);
	va = decode_write_list(va, vaend);
	if (!va)
	if (!va) {
		dprintk("svcrdma: failed to decode write list\n");
		return -EINVAL;
		return -EINVAL;
	}
	va = decode_reply_array(va, vaend);
	va = decode_reply_array(va, vaend);
	if (!va)
	if (!va) {
		dprintk("svcrdma: failed to decode reply chunk\n");
		return -EINVAL;
		return -EINVAL;
	}


	rqstp->rq_arg.head[0].iov_base = va;
	rqstp->rq_arg.head[0].iov_base = va;
	hdr_len = (unsigned long)va - (unsigned long)rmsgp;
	hdr_len = (unsigned long)va - (unsigned long)rmsgp;
+4 −0
Original line number Original line Diff line number Diff line
@@ -657,6 +657,8 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
	ret = svc_rdma_xdr_decode_req(rmsgp, rqstp);
	ret = svc_rdma_xdr_decode_req(rmsgp, rqstp);
	if (ret < 0)
	if (ret < 0)
		goto out_err;
		goto out_err;
	if (ret == 0)
		goto out_drop;
	rqstp->rq_xprt_hlen = ret;
	rqstp->rq_xprt_hlen = ret;


	if (svc_rdma_is_backchannel_reply(xprt, rmsgp)) {
	if (svc_rdma_is_backchannel_reply(xprt, rmsgp)) {
@@ -710,6 +712,8 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
defer:
defer:
	return 0;
	return 0;


out_drop:
	svc_rdma_put_context(ctxt, 1);
repost:
repost:
	return svc_rdma_repost_recv(rdma_xprt, GFP_KERNEL);
	return svc_rdma_repost_recv(rdma_xprt, GFP_KERNEL);
}
}