Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9450ca8e authored by Chuck Lever's avatar Chuck Lever Committed by J. Bruce Fields
Browse files

svcrdma: Clean up after converting svc_rdma_recvfrom to rdma_rw API



Clean up: Registration mode details are now handled by the rdma_rw
API, and thus can be removed from svcrdma.

Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent 0d956e69
Loading
Loading
Loading
Loading
+0 −4
Original line number Original line Diff line number Diff line
@@ -90,9 +90,6 @@ struct svc_rdma_op_ctxt {
	struct page *pages[RPCSVC_MAXPAGES];
	struct page *pages[RPCSVC_MAXPAGES];
};
};


#define	SVCRDMA_DEVCAP_FAST_REG		1	/* fast mr registration */
#define	SVCRDMA_DEVCAP_READ_W_INV	2	/* read w/ invalidate */

struct svcxprt_rdma {
struct svcxprt_rdma {
	struct svc_xprt      sc_xprt;		/* SVC transport structure */
	struct svc_xprt      sc_xprt;		/* SVC transport structure */
	struct rdma_cm_id    *sc_cm_id;		/* RDMA connection id */
	struct rdma_cm_id    *sc_cm_id;		/* RDMA connection id */
@@ -123,7 +120,6 @@ struct svcxprt_rdma {
	struct ib_qp         *sc_qp;
	struct ib_qp         *sc_qp;
	struct ib_cq         *sc_rq_cq;
	struct ib_cq         *sc_rq_cq;
	struct ib_cq         *sc_sq_cq;
	struct ib_cq         *sc_sq_cq;
	u32		     sc_dev_caps;	/* distilled device caps */


	spinlock_t	     sc_lock;		/* transport lock */
	spinlock_t	     sc_lock;		/* transport lock */


+4 −35
Original line number Original line Diff line number Diff line
@@ -783,7 +783,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
	memset(&qp_attr, 0, sizeof qp_attr);
	memset(&qp_attr, 0, sizeof qp_attr);
	qp_attr.event_handler = qp_event_handler;
	qp_attr.event_handler = qp_event_handler;
	qp_attr.qp_context = &newxprt->sc_xprt;
	qp_attr.qp_context = &newxprt->sc_xprt;
	qp_attr.port_num = newxprt->sc_cm_id->port_num;
	qp_attr.port_num = newxprt->sc_port_num;
	qp_attr.cap.max_rdma_ctxs = newxprt->sc_max_requests;
	qp_attr.cap.max_rdma_ctxs = newxprt->sc_max_requests;
	qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
	qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
	qp_attr.cap.max_recv_wr = newxprt->sc_rq_depth;
	qp_attr.cap.max_recv_wr = newxprt->sc_rq_depth;
@@ -807,43 +807,12 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
	}
	}
	newxprt->sc_qp = newxprt->sc_cm_id->qp;
	newxprt->sc_qp = newxprt->sc_cm_id->qp;


	/*
	if (!(dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
	 * Use the most secure set of MR resources based on the
	 * transport type and available memory management features in
	 * the device. Here's the table implemented below:
	 *
	 *		Fast	Global	DMA	Remote WR
	 *		Reg	LKEY	MR	Access
	 *		Sup'd	Sup'd	Needed	Needed
	 *
	 * IWARP	N	N	Y	Y
	 *		N	Y	Y	Y
	 *		Y	N	Y	N
	 *		Y	Y	N	-
	 *
	 * IB		N	N	Y	N
	 *		N	Y	N	-
	 *		Y	N	Y	N
	 *		Y	Y	N	-
	 *
	 * NB:	iWARP requires remote write access for the data sink
	 *	of an RDMA_READ. IB does not.
	 */
	if (dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
		newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
	} else
		newxprt->sc_snd_w_inv = false;
		newxprt->sc_snd_w_inv = false;

	if (!rdma_protocol_iwarp(dev, newxprt->sc_port_num) &&
	/*
	    !rdma_ib_or_roce(dev, newxprt->sc_port_num))
	 * Determine if a DMA MR is required and if so, what privs are required
	 */
	if (!rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num) &&
	    !rdma_ib_or_roce(dev, newxprt->sc_cm_id->port_num))
		goto errout;
		goto errout;


	if (rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num))
		newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;

	/* Post receive buffers */
	/* Post receive buffers */
	for (i = 0; i < newxprt->sc_max_requests; i++) {
	for (i = 0; i < newxprt->sc_max_requests; i++) {
		ret = svc_rdma_post_recv(newxprt, GFP_KERNEL);
		ret = svc_rdma_post_recv(newxprt, GFP_KERNEL);