Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1711386c authored by Tom Tucker's avatar Tom Tucker
Browse files

svcrdma: Move the QP and cm_id destruction to svc_rdma_free



Move the destruction of the QP and CM_ID to the free path so that the
QP cleanup code doesn't race with the dto_tasklet handling flushed WR.
The QP reference is not needed because we now have a reference for
every WR.

Also add a guard in the SQ and RQ completion handlers to ignore
calls generated by some providers when the QP is destroyed.

Signed-off-by: default avatarTom Tucker <tom@opengridcomputing.com>
parent 0905c0f0
Loading
Loading
Loading
Loading
+20 −20
Original line number Diff line number Diff line
@@ -252,6 +252,10 @@ static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
	struct svcxprt_rdma *xprt = cq_context;
	unsigned long flags;

	/* Guard against unconditional flush call for destroyed QP */
	if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
		return;

	/*
	 * Set the bit regardless of whether or not it's on the list
	 * because it may be on the list already due to an SQ
@@ -393,6 +397,10 @@ static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
	struct svcxprt_rdma *xprt = cq_context;
	unsigned long flags;

	/* Guard against unconditional flush call for destroyed QP */
	if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
		return;

	/*
	 * Set the bit regardless of whether or not it's on the list
	 * because it may be on the list already due to an RQ
@@ -852,7 +860,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
		newxprt->sc_sq_depth = qp_attr.cap.max_send_wr;
		newxprt->sc_max_requests = qp_attr.cap.max_recv_wr;
	}
	svc_xprt_get(&newxprt->sc_xprt);
	newxprt->sc_qp = newxprt->sc_cm_id->qp;

	/* Register all of physical memory */
@@ -926,10 +933,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
	dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
	/* Take a reference in case the DTO handler runs */
	svc_xprt_get(&newxprt->sc_xprt);
	if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp)) {
	if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
		ib_destroy_qp(newxprt->sc_qp);
		svc_xprt_put(&newxprt->sc_xprt);
	}
	rdma_destroy_id(newxprt->sc_cm_id);
	/* This call to put will destroy the transport */
	svc_xprt_put(&newxprt->sc_xprt);
@@ -941,10 +946,7 @@ static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
}

/*
 * When connected, an svc_xprt has at least three references:
 *
 * - A reference held by the QP. We still hold that here because this
 *   code deletes the QP and puts the reference.
 * When connected, an svc_xprt has at least two references:
 *
 * - A reference held by the cm_id between the ESTABLISHED and
 *   DISCONNECTED events. If the remote peer disconnected first, this
@@ -953,7 +955,7 @@ static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
 * - A reference held by the svc_recv code that called this function
 *   as part of close processing.
 *
 * At a minimum two references should still be held.
 * At a minimum one references should still be held.
 */
static void svc_rdma_detach(struct svc_xprt *xprt)
{
@@ -963,15 +965,6 @@ static void svc_rdma_detach(struct svc_xprt *xprt)

	/* Disconnect and flush posted WQE */
	rdma_disconnect(rdma->sc_cm_id);

	/* Destroy the QP if present (not a listener) */
	if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) {
		ib_destroy_qp(rdma->sc_qp);
		svc_xprt_put(xprt);
	}

	/* Destroy the CM ID */
	rdma_destroy_id(rdma->sc_cm_id);
}

static void __svc_rdma_free(struct work_struct *work)
@@ -983,6 +976,13 @@ static void __svc_rdma_free(struct work_struct *work)
	/* We should only be called from kref_put */
	BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0);

	/* Destroy the QP if present (not a listener) */
	if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
		ib_destroy_qp(rdma->sc_qp);

	/* Destroy the CM ID */
	rdma_destroy_id(rdma->sc_cm_id);

	if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
		ib_destroy_cq(rdma->sc_sq_cq);