Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5a6d1db4 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker
Browse files

SUNRPC: Add a transport-specific private field in rpc_rqst



Currently there's a hidden and indirect mechanism for finding the
rpcrdma_req that goes with an rpc_rqst. It depends on getting from
the rq_buffer pointer in struct rpc_rqst to the struct
rpcrdma_regbuf that controls that buffer, and then to the struct
rpcrdma_req it goes with.

This was done back in the day to avoid the need to add a per-rqst
pointer or to alter the buf_free API when support for RPC-over-RDMA
was introduced.

I'm about to change the way regbuf's work to support larger inline
thresholds. Now is a good time to replace this indirect mechanism
with something that is more straightforward. I guess this should be
considered a clean up.

Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 68778945
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -83,6 +83,7 @@ struct rpc_rqst {
	void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
	void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
	struct list_head	rq_list;
	struct list_head	rq_list;


	void			*rq_xprtdata;	/* Per-xprt private data */
	void			*rq_buffer;	/* Call XDR encode buffer */
	void			*rq_buffer;	/* Call XDR encode buffer */
	size_t			rq_callsize;
	size_t			rq_callsize;
	void			*rq_rbuffer;	/* Reply XDR decode buffer */
	void			*rq_rbuffer;	/* Reply XDR decode buffer */
+2 −4
Original line number Original line Diff line number Diff line
@@ -55,11 +55,9 @@ static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
	rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL);
	rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL);
	if (IS_ERR(rb))
	if (IS_ERR(rb))
		goto out_fail;
		goto out_fail;
	rb->rg_owner = req;
	req->rl_sendbuf = rb;
	req->rl_sendbuf = rb;
	/* so that rpcr_to_rdmar works when receiving a request */
	xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base, size);
	rqst->rq_buffer = (void *)req->rl_sendbuf->rg_base;
	rpcrdma_set_xprtdata(rqst, req);
	xdr_buf_init(&rqst->rq_snd_buf, rqst->rq_buffer, size);
	return 0;
	return 0;


out_fail:
out_fail:
+1 −1
Original line number Original line Diff line number Diff line
@@ -523,6 +523,7 @@ xprt_rdma_allocate(struct rpc_task *task)
out:
out:
	dprintk("RPC:       %s: size %zd, request 0x%p\n", __func__, size, req);
	dprintk("RPC:       %s: size %zd, request 0x%p\n", __func__, size, req);
	req->rl_connect_cookie = 0;	/* our reserved value */
	req->rl_connect_cookie = 0;	/* our reserved value */
	rpcrdma_set_xprtdata(rqst, req);
	rqst->rq_buffer = req->rl_sendbuf->rg_base;
	rqst->rq_buffer = req->rl_sendbuf->rg_base;
	rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_rcvsize;
	rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_rcvsize;
	return 0;
	return 0;
@@ -559,7 +560,6 @@ xprt_rdma_allocate(struct rpc_task *task)
	rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, size, flags);
	rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, size, flags);
	if (IS_ERR(rb))
	if (IS_ERR(rb))
		goto out_fail;
		goto out_fail;
	rb->rg_owner = req;


	r_xprt->rx_stats.hardway_register_count += size;
	r_xprt->rx_stats.hardway_register_count += size;
	rpcrdma_free_regbuf(&r_xprt->rx_ia, req->rl_sendbuf);
	rpcrdma_free_regbuf(&r_xprt->rx_ia, req->rl_sendbuf);
+0 −1
Original line number Original line Diff line number Diff line
@@ -1210,7 +1210,6 @@ rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags)
	iov->length = size;
	iov->length = size;
	iov->lkey = ia->ri_pd->local_dma_lkey;
	iov->lkey = ia->ri_pd->local_dma_lkey;
	rb->rg_size = size;
	rb->rg_size = size;
	rb->rg_owner = NULL;
	return rb;
	return rb;


out_free:
out_free:
+7 −6
Original line number Original line Diff line number Diff line
@@ -113,7 +113,6 @@ struct rpcrdma_ep {


struct rpcrdma_regbuf {
struct rpcrdma_regbuf {
	size_t			rg_size;
	size_t			rg_size;
	struct rpcrdma_req	*rg_owner;
	struct ib_sge		rg_iov;
	struct ib_sge		rg_iov;
	__be32			rg_base[0] __attribute__ ((aligned(256)));
	__be32			rg_base[0] __attribute__ ((aligned(256)));
};
};
@@ -297,14 +296,16 @@ struct rpcrdma_req {
	struct rpcrdma_mr_seg	rl_segments[RPCRDMA_MAX_SEGS];
	struct rpcrdma_mr_seg	rl_segments[RPCRDMA_MAX_SEGS];
};
};


static inline void
rpcrdma_set_xprtdata(struct rpc_rqst *rqst, struct rpcrdma_req *req)
{
	rqst->rq_xprtdata = req;
}

static inline struct rpcrdma_req *
static inline struct rpcrdma_req *
rpcr_to_rdmar(struct rpc_rqst *rqst)
rpcr_to_rdmar(struct rpc_rqst *rqst)
{
{
	void *buffer = rqst->rq_buffer;
	return rqst->rq_xprtdata;
	struct rpcrdma_regbuf *rb;

	rb = container_of(buffer, struct rpcrdma_regbuf, rg_base);
	return rb->rg_owner;
}
}


/*
/*