Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit edb41e61 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker
Browse files

xprtrdma: Make rpc_rqst part of rpcrdma_req



This simplifies allocation of the generic RPC slot and xprtrdma
specific per-RPC resources.

It also makes xprtrdma more like the socket-based transports:
->buf_alloc and ->buf_free are now responsible only for send and
receive buffers.

Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 48be539d
Loading
Loading
Loading
Loading
+0 −1
Original line number Original line Diff line number Diff line
@@ -84,7 +84,6 @@ struct rpc_rqst {
	void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
	void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
	struct list_head	rq_list;
	struct list_head	rq_list;


	void			*rq_xprtdata;	/* Per-xprt private data */
	void			*rq_buffer;	/* Call XDR encode buffer */
	void			*rq_buffer;	/* Call XDR encode buffer */
	size_t			rq_callsize;
	size_t			rq_callsize;
	void			*rq_rbuffer;	/* Reply XDR decode buffer */
	void			*rq_rbuffer;	/* Reply XDR decode buffer */
+35 −42
Original line number Original line Diff line number Diff line
@@ -29,13 +29,16 @@ static void rpcrdma_bc_free_rqst(struct rpcrdma_xprt *r_xprt,
	spin_unlock(&buf->rb_reqslock);
	spin_unlock(&buf->rb_reqslock);


	rpcrdma_destroy_req(req);
	rpcrdma_destroy_req(req);

	kfree(rqst);
}
}


static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
static int rpcrdma_bc_setup_reqs(struct rpcrdma_xprt *r_xprt,
				 struct rpc_rqst *rqst)
				 unsigned int count)
{
{
	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
	struct rpc_rqst *rqst;
	unsigned int i;

	for (i = 0; i < (count << 1); i++) {
		struct rpcrdma_regbuf *rb;
		struct rpcrdma_regbuf *rb;
		struct rpcrdma_req *req;
		struct rpcrdma_req *req;
		size_t size;
		size_t size;
@@ -43,6 +46,15 @@ static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
		req = rpcrdma_create_req(r_xprt);
		req = rpcrdma_create_req(r_xprt);
		if (IS_ERR(req))
		if (IS_ERR(req))
			return PTR_ERR(req);
			return PTR_ERR(req);
		rqst = &req->rl_slot;

		rqst->rq_xprt = xprt;
		INIT_LIST_HEAD(&rqst->rq_list);
		INIT_LIST_HEAD(&rqst->rq_bc_list);
		__set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
		spin_lock_bh(&xprt->bc_pa_lock);
		list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
		spin_unlock_bh(&xprt->bc_pa_lock);


		size = r_xprt->rx_data.inline_rsize;
		size = r_xprt->rx_data.inline_rsize;
		rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL);
		rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL);
@@ -51,7 +63,7 @@ static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
		req->rl_sendbuf = rb;
		req->rl_sendbuf = rb;
		xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base,
		xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base,
			     min_t(size_t, size, PAGE_SIZE));
			     min_t(size_t, size, PAGE_SIZE));
	rpcrdma_set_xprtdata(rqst, req);
	}
	return 0;
	return 0;


out_fail:
out_fail:
@@ -86,9 +98,6 @@ static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt *r_xprt,
int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
{
{
	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
	struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
	struct rpc_rqst *rqst;
	unsigned int i;
	int rc;
	int rc;


	/* The backchannel reply path returns each rpc_rqst to the
	/* The backchannel reply path returns each rpc_rqst to the
@@ -103,26 +112,10 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
	if (reqs > RPCRDMA_BACKWARD_WRS >> 1)
	if (reqs > RPCRDMA_BACKWARD_WRS >> 1)
		goto out_err;
		goto out_err;


	for (i = 0; i < (reqs << 1); i++) {
	rc = rpcrdma_bc_setup_reqs(r_xprt, reqs);
		rqst = kzalloc(sizeof(*rqst), GFP_KERNEL);
	if (rc)
		if (!rqst)
			goto out_free;

		dprintk("RPC:       %s: new rqst %p\n", __func__, rqst);

		rqst->rq_xprt = &r_xprt->rx_xprt;
		INIT_LIST_HEAD(&rqst->rq_list);
		INIT_LIST_HEAD(&rqst->rq_bc_list);
		__set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);

		if (rpcrdma_bc_setup_rqst(r_xprt, rqst))
		goto out_free;
		goto out_free;


		spin_lock_bh(&xprt->bc_pa_lock);
		list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
		spin_unlock_bh(&xprt->bc_pa_lock);
	}

	rc = rpcrdma_bc_setup_reps(r_xprt, reqs);
	rc = rpcrdma_bc_setup_reps(r_xprt, reqs);
	if (rc)
	if (rc)
		goto out_free;
		goto out_free;
@@ -131,7 +124,7 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
	if (rc)
	if (rc)
		goto out_free;
		goto out_free;


	buffer->rb_bc_srv_max_requests = reqs;
	r_xprt->rx_buf.rb_bc_srv_max_requests = reqs;
	request_module("svcrdma");
	request_module("svcrdma");
	trace_xprtrdma_cb_setup(r_xprt, reqs);
	trace_xprtrdma_cb_setup(r_xprt, reqs);
	return 0;
	return 0;
+9 −26
Original line number Original line Diff line number Diff line
@@ -331,9 +331,7 @@ xprt_setup_rdma(struct xprt_create *args)
		return ERR_PTR(-EBADF);
		return ERR_PTR(-EBADF);
	}
	}


	xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt),
	xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 0, 0);
			xprt_rdma_slot_table_entries,
			xprt_rdma_slot_table_entries);
	if (xprt == NULL) {
	if (xprt == NULL) {
		dprintk("RPC:       %s: couldn't allocate rpcrdma_xprt\n",
		dprintk("RPC:       %s: couldn't allocate rpcrdma_xprt\n",
			__func__);
			__func__);
@@ -365,7 +363,7 @@ xprt_setup_rdma(struct xprt_create *args)
		xprt_set_bound(xprt);
		xprt_set_bound(xprt);
	xprt_rdma_format_addresses(xprt, sap);
	xprt_rdma_format_addresses(xprt, sap);


	cdata.max_requests = xprt->max_reqs;
	cdata.max_requests = xprt_rdma_slot_table_entries;


	cdata.rsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA write max */
	cdata.rsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA write max */
	cdata.wsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA read max */
	cdata.wsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA read max */
@@ -550,22 +548,18 @@ xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
static void
static void
xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
{
{
	struct rpc_rqst *rqst;
	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
	struct rpcrdma_req *req;


	spin_lock(&xprt->reserve_lock);
	req = rpcrdma_buffer_get(&r_xprt->rx_buf);
	if (list_empty(&xprt->free))
	if (!req)
		goto out_sleep;
		goto out_sleep;
	rqst = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
	task->tk_rqstp = &req->rl_slot;
	list_del(&rqst->rq_list);
	spin_unlock(&xprt->reserve_lock);

	task->tk_rqstp = rqst;
	task->tk_status = 0;
	task->tk_status = 0;
	return;
	return;


out_sleep:
out_sleep:
	rpc_sleep_on(&xprt->backlog, task, NULL);
	rpc_sleep_on(&xprt->backlog, task, NULL);
	spin_unlock(&xprt->reserve_lock);
	task->tk_status = -EAGAIN;
	task->tk_status = -EAGAIN;
}
}


@@ -579,11 +573,8 @@ static void
xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
{
{
	memset(rqst, 0, sizeof(*rqst));
	memset(rqst, 0, sizeof(*rqst));

	rpcrdma_buffer_put(rpcr_to_rdmar(rqst));
	spin_lock(&xprt->reserve_lock);
	list_add(&rqst->rq_list, &xprt->free);
	rpc_wake_up_next(&xprt->backlog);
	rpc_wake_up_next(&xprt->backlog);
	spin_unlock(&xprt->reserve_lock);
}
}


static bool
static bool
@@ -656,13 +647,9 @@ xprt_rdma_allocate(struct rpc_task *task)
{
{
	struct rpc_rqst *rqst = task->tk_rqstp;
	struct rpc_rqst *rqst = task->tk_rqstp;
	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
	struct rpcrdma_req *req;
	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
	gfp_t flags;
	gfp_t flags;


	req = rpcrdma_buffer_get(&r_xprt->rx_buf);
	if (req == NULL)
		goto out_get;

	flags = RPCRDMA_DEF_GFP;
	flags = RPCRDMA_DEF_GFP;
	if (RPC_IS_SWAPPER(task))
	if (RPC_IS_SWAPPER(task))
		flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
		flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
@@ -672,15 +659,12 @@ xprt_rdma_allocate(struct rpc_task *task)
	if (!rpcrdma_get_recvbuf(r_xprt, req, rqst->rq_rcvsize, flags))
	if (!rpcrdma_get_recvbuf(r_xprt, req, rqst->rq_rcvsize, flags))
		goto out_fail;
		goto out_fail;


	rpcrdma_set_xprtdata(rqst, req);
	rqst->rq_buffer = req->rl_sendbuf->rg_base;
	rqst->rq_buffer = req->rl_sendbuf->rg_base;
	rqst->rq_rbuffer = req->rl_recvbuf->rg_base;
	rqst->rq_rbuffer = req->rl_recvbuf->rg_base;
	trace_xprtrdma_allocate(task, req);
	trace_xprtrdma_allocate(task, req);
	return 0;
	return 0;


out_fail:
out_fail:
	rpcrdma_buffer_put(req);
out_get:
	trace_xprtrdma_allocate(task, NULL);
	trace_xprtrdma_allocate(task, NULL);
	return -ENOMEM;
	return -ENOMEM;
}
}
@@ -701,7 +685,6 @@ xprt_rdma_free(struct rpc_task *task)
	if (test_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags))
	if (test_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags))
		rpcrdma_release_rqst(r_xprt, req);
		rpcrdma_release_rqst(r_xprt, req);
	trace_xprtrdma_rpc_done(task, req);
	trace_xprtrdma_rpc_done(task, req);
	rpcrdma_buffer_put(req);
}
}


/**
/**
+2 −7
Original line number Original line Diff line number Diff line
@@ -335,6 +335,7 @@ enum {
struct rpcrdma_buffer;
struct rpcrdma_buffer;
struct rpcrdma_req {
struct rpcrdma_req {
	struct list_head	rl_list;
	struct list_head	rl_list;
	struct rpc_rqst		rl_slot;
	struct rpcrdma_buffer	*rl_buffer;
	struct rpcrdma_buffer	*rl_buffer;
	struct rpcrdma_rep	*rl_reply;
	struct rpcrdma_rep	*rl_reply;
	struct xdr_stream	rl_stream;
	struct xdr_stream	rl_stream;
@@ -357,16 +358,10 @@ enum {
	RPCRDMA_REQ_F_TX_RESOURCES,
	RPCRDMA_REQ_F_TX_RESOURCES,
};
};


static inline void
rpcrdma_set_xprtdata(struct rpc_rqst *rqst, struct rpcrdma_req *req)
{
	rqst->rq_xprtdata = req;
}

static inline struct rpcrdma_req *
static inline struct rpcrdma_req *
rpcr_to_rdmar(const struct rpc_rqst *rqst)
rpcr_to_rdmar(const struct rpc_rqst *rqst)
{
{
	return rqst->rq_xprtdata;
	return container_of(rqst, struct rpcrdma_req, rl_slot);
}
}


static inline void
static inline void