Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ed3aa742 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker
Browse files

xprtrdma: Move common wait_for_buffer_space call to parent function



Clean up: The logic to wait for write space is common to a bunch of
the encoding helper functions. Lift it out and put it in the tail
of rpcrdma_marshal_req().

Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent a8f688ec
Loading
Loading
Loading
Loading
+12 −19
Original line number Diff line number Diff line
@@ -366,7 +366,7 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
		seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
						   false, &mr);
		if (IS_ERR(seg))
			goto out_maperr;
			return PTR_ERR(seg);
		rpcrdma_mr_push(mr, &req->rl_registered);

		if (encode_read_segment(xdr, mr, pos) < 0)
@@ -378,11 +378,6 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
	} while (nsegs);

	return 0;

out_maperr:
	if (PTR_ERR(seg) == -EAGAIN)
		xprt_wait_for_buffer_space(rqst->rq_task, NULL);
	return PTR_ERR(seg);
}

/* Register and XDR encode the Write list. Supports encoding a list
@@ -429,7 +424,7 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
		seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
						   true, &mr);
		if (IS_ERR(seg))
			goto out_maperr;
			return PTR_ERR(seg);
		rpcrdma_mr_push(mr, &req->rl_registered);

		if (encode_rdma_segment(xdr, mr) < 0)
@@ -446,11 +441,6 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
	*segcount = cpu_to_be32(nchunks);

	return 0;

out_maperr:
	if (PTR_ERR(seg) == -EAGAIN)
		xprt_wait_for_buffer_space(rqst->rq_task, NULL);
	return PTR_ERR(seg);
}

/* Register and XDR encode the Reply chunk. Supports encoding an array
@@ -492,7 +482,7 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
		seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
						   true, &mr);
		if (IS_ERR(seg))
			goto out_maperr;
			return PTR_ERR(seg);
		rpcrdma_mr_push(mr, &req->rl_registered);

		if (encode_rdma_segment(xdr, mr) < 0)
@@ -509,11 +499,6 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
	*segcount = cpu_to_be32(nchunks);

	return 0;

out_maperr:
	if (PTR_ERR(seg) == -EAGAIN)
		xprt_wait_for_buffer_space(rqst->rq_task, NULL);
	return PTR_ERR(seg);
}

/**
@@ -884,7 +869,15 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
	return 0;

out_err:
	switch (ret) {
	case -EAGAIN:
		xprt_wait_for_buffer_space(rqst->rq_task, NULL);
		break;
	case -ENOBUFS:
		break;
	default:
		r_xprt->rx_stats.failed_marshal_count++;
	}
	return ret;
}