Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit df158561 authored by Amrani, Ram's avatar Amrani, Ram Committed by Doug Ledford
Browse files

RDMA/qedr: restructure functions that create/destroy QPs



Simplify function and sub-function flow of QP creation and destruction.
This also serves as a preparation for SRQ and iWARP support.

Signed-off-by: default avatarRam Amrani <Ram.Amrani@cavium.com>
Reviewed-by: default avatarMichal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent bb75f33c
Loading
Loading
Loading
Loading
+236 −313
Original line number Diff line number Diff line
@@ -1076,30 +1076,6 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
	return 0;
}

static void qedr_cleanup_user_sq(struct qedr_dev *dev, struct qedr_qp *qp)
{
	qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
	ib_umem_release(qp->usq.umem);
}

static void qedr_cleanup_user_rq(struct qedr_dev *dev, struct qedr_qp *qp)
{
	qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
	ib_umem_release(qp->urq.umem);
}

static void qedr_cleanup_kernel_sq(struct qedr_dev *dev, struct qedr_qp *qp)
{
	dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
	kfree(qp->wqe_wr_id);
}

static void qedr_cleanup_kernel_rq(struct qedr_dev *dev, struct qedr_qp *qp)
{
	dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
	kfree(qp->rqe_wr_id);
}

static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
			       struct ib_qp_init_attr *attrs)
{
@@ -1188,15 +1164,13 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev,
	return rc;
}

static void qedr_set_qp_init_params(struct qedr_dev *dev,
static void qedr_set_common_qp_params(struct qedr_dev *dev,
				      struct qedr_qp *qp,
				      struct qedr_pd *pd,
				      struct ib_qp_init_attr *attrs)
{
	qp->pd = pd;

	spin_lock_init(&qp->q_lock);

	qp->pd = pd;
	qp->qp_type = attrs->qp_type;
	qp->max_inline_data = attrs->cap.max_inline_data;
	qp->sq.max_sges = attrs->cap.max_send_sge;
@@ -1205,7 +1179,11 @@ static void qedr_set_qp_init_params(struct qedr_dev *dev,
	qp->sq_cq = get_qedr_cq(attrs->send_cq);
	qp->rq_cq = get_qedr_cq(attrs->recv_cq);
	qp->dev = dev;
	qp->rq.max_sges = attrs->cap.max_recv_sge;

	DP_DEBUG(dev, QEDR_MSG_QP,
		 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
		 qp->rq.max_sges, qp->rq_cq->icid);
	DP_DEBUG(dev, QEDR_MSG_QP,
		 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
		 pd->pd_id, qp->qp_type, qp->max_inline_data,
@@ -1213,95 +1191,149 @@ static void qedr_set_qp_init_params(struct qedr_dev *dev,
	DP_DEBUG(dev, QEDR_MSG_QP,
		 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
		 qp->sq.max_sges, qp->sq_cq->icid);
	qp->rq.max_sges = attrs->cap.max_recv_sge;
	DP_DEBUG(dev, QEDR_MSG_QP,
		 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
		 qp->rq.max_sges, qp->rq_cq->icid);
}

static inline void
qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params *params,
			 struct qedr_create_qp_ureq *ureq)
{
	/* QP handle to be written in CQE */
	params->qp_handle_lo = ureq->qp_handle_lo;
	params->qp_handle_hi = ureq->qp_handle_hi;
}

static inline void
qedr_init_qp_kernel_doorbell_sq(struct qedr_dev *dev, struct qedr_qp *qp)
static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
{
	qp->sq.db = dev->db_addr +
		    DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
	qp->sq.db_data.data.icid = qp->icid + 1;
	qp->rq.db = dev->db_addr +
		    DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
	qp->rq.db_data.data.icid = qp->icid;
}

static inline void
qedr_init_qp_kernel_doorbell_rq(struct qedr_dev *dev, struct qedr_qp *qp)
qedr_init_common_qp_in_params(struct qedr_dev *dev,
			      struct qedr_pd *pd,
			      struct qedr_qp *qp,
			      struct ib_qp_init_attr *attrs,
			      bool fmr_and_reserved_lkey,
			      struct qed_rdma_create_qp_in_params *params)
{
	qp->rq.db = dev->db_addr +
		    DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
	qp->rq.db_data.data.icid = qp->icid;
	/* QP handle to be written in an async event */
	params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
	params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);

	params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
	params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
	params->pd = pd->pd_id;
	params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
	params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
	params->stats_queue = 0;
	params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
	params->srq_id = 0;
	params->use_srq = false;
}

static inline int
qedr_init_qp_kernel_params_rq(struct qedr_dev *dev,
			      struct qedr_qp *qp, struct ib_qp_init_attr *attrs)
static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
{
	/* Allocate driver internal RQ array */
	qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
				GFP_KERNEL);
	if (!qp->rqe_wr_id)
		return -ENOMEM;
	DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
		 "qp=%p. "
		 "sq_addr=0x%llx, "
		 "sq_len=%zd, "
		 "rq_addr=0x%llx, "
		 "rq_len=%zd"
		 "\n",
		 qp,
		 qp->usq.buf_addr,
		 qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
}

	DP_DEBUG(dev, QEDR_MSG_QP, "RQ max_wr set to %d.\n", qp->rq.max_wr);
static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
{
	if (qp->usq.umem)
		ib_umem_release(qp->usq.umem);
	qp->usq.umem = NULL;

	return 0;
	if (qp->urq.umem)
		ib_umem_release(qp->urq.umem);
	qp->urq.umem = NULL;
}

static inline int
qedr_init_qp_kernel_params_sq(struct qedr_dev *dev,
static int qedr_create_user_qp(struct qedr_dev *dev,
			       struct qedr_qp *qp,
			      struct ib_qp_init_attr *attrs,
			      struct qed_rdma_create_qp_in_params *params)
			       struct ib_pd *ibpd,
			       struct ib_udata *udata,
			       struct ib_qp_init_attr *attrs)
{
	u32 temp_max_wr;
	struct qed_rdma_create_qp_in_params in_params;
	struct qed_rdma_create_qp_out_params out_params;
	struct qedr_pd *pd = get_qedr_pd(ibpd);
	struct ib_ucontext *ib_ctx = NULL;
	struct qedr_ucontext *ctx = NULL;
	struct qedr_create_qp_ureq ureq;
	int rc = -EINVAL;

	/* Allocate driver internal SQ array */
	temp_max_wr = attrs->cap.max_send_wr * dev->wq_multiplier;
	temp_max_wr = min_t(u32, temp_max_wr, dev->attr.max_sqe);
	ib_ctx = ibpd->uobject->context;
	ctx = get_qedr_ucontext(ib_ctx);

	/* temp_max_wr < attr->max_sqe < u16 so the casting is safe */
	qp->sq.max_wr = (u16)temp_max_wr;
	qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
				GFP_KERNEL);
	if (!qp->wqe_wr_id)
		return -ENOMEM;
	memset(&ureq, 0, sizeof(ureq));
	rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
	if (rc) {
		DP_ERR(dev, "Problem copying data from user space\n");
		return rc;
	}

	DP_DEBUG(dev, QEDR_MSG_QP, "SQ max_wr set to %d.\n", qp->sq.max_wr);
	/* SQ - read access only (0), dma sync not required (0) */
	rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
				  ureq.sq_len, 0, 0);
	if (rc)
		return rc;

	/* QP handle to be written in CQE */
	params->qp_handle_lo = lower_32_bits((uintptr_t)qp);
	params->qp_handle_hi = upper_32_bits((uintptr_t)qp);
	/* RQ - read access only (0), dma sync not required (0) */
	rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
				  ureq.rq_len, 0, 0);

	if (rc)
		return rc;

	memset(&in_params, 0, sizeof(in_params));
	qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
	in_params.qp_handle_lo = ureq.qp_handle_lo;
	in_params.qp_handle_hi = ureq.qp_handle_hi;
	in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
	in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
	in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
	in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;

	qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
					      &in_params, &out_params);

	if (!qp->qed_qp) {
		rc = -ENOMEM;
		goto err1;
	}

	qp->qp_id = out_params.qp_id;
	qp->icid = out_params.icid;

	rc = qedr_copy_qp_uresp(dev, qp, udata);
	if (rc)
		goto err;

	qedr_qp_user_print(dev, qp);

	return 0;
err:
	rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
	if (rc)
		DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);

err1:
	qedr_cleanup_user(dev, qp);
	return rc;
}

static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev,
static int
qedr_roce_create_kernel_qp(struct qedr_dev *dev,
			   struct qedr_qp *qp,
					 struct ib_qp_init_attr *attrs)
			   struct qed_rdma_create_qp_in_params *in_params,
			   u32 n_sq_elems, u32 n_rq_elems)
{
	u32 n_sq_elems, n_sq_entries;
	struct qed_rdma_create_qp_out_params out_params;
	int rc;

	/* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
	 * the ring. The ring should allow at least a single WR, even if the
	 * user requested none, due to allocation issues.
	 */
	n_sq_entries = attrs->cap.max_send_wr;
	n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
	n_sq_entries = max_t(u32, n_sq_entries, 1);
	n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
	rc = dev->ops->common->chain_alloc(dev->cdev,
					   QED_CHAIN_USE_TO_PRODUCE,
					   QED_CHAIN_MODE_PBL,
@@ -1309,31 +1341,13 @@ static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev,
					   n_sq_elems,
					   QEDR_SQE_ELEMENT_SIZE,
					   &qp->sq.pbl);
	if (rc) {
		DP_ERR(dev, "failed to allocate QP %p SQ\n", qp);
		return rc;
	}

	DP_DEBUG(dev, QEDR_MSG_SQ,
		 "SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n",
		 qed_chain_get_pbl_phys(&qp->sq.pbl), attrs->cap.max_send_wr,
		 n_sq_entries, qed_chain_get_capacity(&qp->sq.pbl), rc);
	return 0;
}
	if (rc)
		return rc;

static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev,
					 struct qedr_qp *qp,
					 struct ib_qp_init_attr *attrs)
{
	u32 n_rq_elems, n_rq_entries;
	int rc;
	in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
	in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);

	/* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
	 * the ring. There ring should allow at least a single WR, even if the
	 * user requested none, due to allocation issues.
	 */
	n_rq_entries = max_t(u32, attrs->cap.max_recv_wr, 1);
	n_rq_elems = n_rq_entries * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
	rc = dev->ops->common->chain_alloc(dev->cdev,
					   QED_CHAIN_USE_TO_CONSUME_PRODUCE,
					   QED_CHAIN_MODE_PBL,
@@ -1341,136 +1355,102 @@ static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev,
					   n_rq_elems,
					   QEDR_RQE_ELEMENT_SIZE,
					   &qp->rq.pbl);
	if (rc)
		return rc;

	if (rc) {
		DP_ERR(dev, "failed to allocate memory for QP %p RQ\n", qp);
		return -ENOMEM;
	}
	in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
	in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);

	DP_DEBUG(dev, QEDR_MSG_RQ,
		 "RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n",
		 qed_chain_get_pbl_phys(&qp->rq.pbl), attrs->cap.max_recv_wr,
		 n_rq_entries, qed_chain_get_capacity(&qp->rq.pbl), rc);
	qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
					      in_params, &out_params);

	/* n_rq_entries < u16 so the casting is safe */
	qp->rq.max_wr = (u16)n_rq_entries;
	if (!qp->qed_qp)
		return -EINVAL;

	return 0;
}
	qp->qp_id = out_params.qp_id;
	qp->icid = out_params.icid;

static inline void
qedr_init_qp_in_params_sq(struct qedr_dev *dev,
			  struct qedr_pd *pd,
			  struct qedr_qp *qp,
			  struct ib_qp_init_attr *attrs,
			  struct ib_udata *udata,
			  struct qed_rdma_create_qp_in_params *params)
{
	/* QP handle to be written in an async event */
	params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
	params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
	qedr_set_roce_db_info(dev, qp);

	params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
	params->fmr_and_reserved_lkey = !udata;
	params->pd = pd->pd_id;
	params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
	params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
	params->max_sq_sges = 0;
	params->stats_queue = 0;

	if (udata) {
		params->sq_num_pages = qp->usq.pbl_info.num_pbes;
		params->sq_pbl_ptr = qp->usq.pbl_tbl->pa;
	} else {
		params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
		params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
	}
	return 0;
}

static inline void
qedr_init_qp_in_params_rq(struct qedr_qp *qp,
			  struct ib_qp_init_attr *attrs,
			  struct ib_udata *udata,
			  struct qed_rdma_create_qp_in_params *params)
static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
{
	params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
	params->srq_id = 0;
	params->use_srq = false;

	if (udata) {
		params->rq_num_pages = qp->urq.pbl_info.num_pbes;
		params->rq_pbl_ptr = qp->urq.pbl_tbl->pa;
	} else {
		params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
		params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
	}
}
	dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
	kfree(qp->wqe_wr_id);

static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
{
	DP_DEBUG(dev, QEDR_MSG_QP,
		 "create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n",
		 qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
		 qp->urq.buf_len);
	dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
	kfree(qp->rqe_wr_id);
}

static inline int qedr_init_user_qp(struct ib_ucontext *ib_ctx,
				    struct qedr_dev *dev,
static int qedr_create_kernel_qp(struct qedr_dev *dev,
				 struct qedr_qp *qp,
				    struct qedr_create_qp_ureq *ureq)
				 struct ib_pd *ibpd,
				 struct ib_qp_init_attr *attrs)
{
	int rc;
	struct qed_rdma_create_qp_in_params in_params;
	struct qedr_pd *pd = get_qedr_pd(ibpd);
	int rc = -EINVAL;
	u32 n_rq_elems;
	u32 n_sq_elems;
	u32 n_sq_entries;

	/* SQ - read access only (0), dma sync not required (0) */
	rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq->sq_addr,
				  ureq->sq_len, 0, 0);
	if (rc)
		return rc;
	memset(&in_params, 0, sizeof(in_params));

	/* RQ - read access only (0), dma sync not required (0) */
	rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq->rq_addr,
				  ureq->rq_len, 0, 0);
	/* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
	 * the ring. The ring should allow at least a single WR, even if the
	 * user requested none, due to allocation issues.
	 * We should add an extra WR since the prod and cons indices of
	 * wqe_wr_id are managed in such a way that the WQ is considered full
	 * when (prod+1)%max_wr==cons. We currently don't do that because we
	 * double the number of entries due an iSER issue that pushes far more
	 * WRs than indicated. If we decline its ib_post_send() then we get
	 * error prints in the dmesg we'd like to avoid.
	 */
	qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
			      dev->attr.max_sqe);

	if (rc)
		qedr_cleanup_user_sq(dev, qp);
	return rc;
	qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
				GFP_KERNEL);
	if (!qp->wqe_wr_id) {
		DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
		return -ENOMEM;
	}

static inline int
qedr_init_kernel_qp(struct qedr_dev *dev,
		    struct qedr_qp *qp,
		    struct ib_qp_init_attr *attrs,
		    struct qed_rdma_create_qp_in_params *params)
{
	int rc;
	/* QP handle to be written in CQE */
	in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
	in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);

	rc = qedr_init_qp_kernel_sq(dev, qp, attrs);
	if (rc) {
		DP_ERR(dev, "failed to init kernel QP %p SQ\n", qp);
		return rc;
	}
	/* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
	 * the ring. There ring should allow at least a single WR, even if the
	 * user requested none, due to allocation issues.
	 */
	qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);

	rc = qedr_init_qp_kernel_params_sq(dev, qp, attrs, params);
	if (rc) {
		dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
		DP_ERR(dev, "failed to init kernel QP %p SQ params\n", qp);
		return rc;
	/* Allocate driver internal RQ array */
	qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
				GFP_KERNEL);
	if (!qp->rqe_wr_id) {
		DP_ERR(dev,
		       "create qp: failed RQ shadow memory allocation\n");
		kfree(qp->wqe_wr_id);
		return -ENOMEM;
	}

	rc = qedr_init_qp_kernel_rq(dev, qp, attrs);
	if (rc) {
		qedr_cleanup_kernel_sq(dev, qp);
		DP_ERR(dev, "failed to init kernel QP %p RQ\n", qp);
		return rc;
	}
	qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);

	rc = qedr_init_qp_kernel_params_rq(dev, qp, attrs);
	if (rc) {
		DP_ERR(dev, "failed to init kernel QP %p RQ params\n", qp);
		qedr_cleanup_kernel_sq(dev, qp);
		dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
		return rc;
	}
	n_sq_entries = attrs->cap.max_send_wr;
	n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
	n_sq_entries = max_t(u32, n_sq_entries, 1);
	n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;

	n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;

	rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
					n_sq_elems, n_rq_elems);
	if (rc)
		qedr_cleanup_kernel(dev, qp);

	return rc;
}
@@ -1480,12 +1460,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
			     struct ib_udata *udata)
{
	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
	struct qed_rdma_create_qp_out_params out_params;
	struct qed_rdma_create_qp_in_params in_params;
	struct qedr_pd *pd = get_qedr_pd(ibpd);
	struct ib_ucontext *ib_ctx = NULL;
	struct qedr_ucontext *ctx = NULL;
	struct qedr_create_qp_ureq ureq;
	struct qedr_qp *qp;
	struct ib_qp *ibqp;
	int rc = 0;
@@ -1500,101 +1475,42 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
	if (attrs->srq)
		return ERR_PTR(-EINVAL);

	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
	if (!qp)
		return ERR_PTR(-ENOMEM);

	DP_DEBUG(dev, QEDR_MSG_QP,
		 "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
		 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
		 udata ? "user library" : "kernel", attrs->event_handler, pd,
		 get_qedr_cq(attrs->send_cq),
		 get_qedr_cq(attrs->send_cq)->icid,
		 get_qedr_cq(attrs->recv_cq),
		 get_qedr_cq(attrs->recv_cq)->icid);

	qedr_set_qp_init_params(dev, qp, pd, attrs);
	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
	if (!qp) {
		DP_ERR(dev, "create qp: failed allocating memory\n");
		return ERR_PTR(-ENOMEM);
	}

	qedr_set_common_qp_params(dev, qp, pd, attrs);

	if (attrs->qp_type == IB_QPT_GSI) {
		if (udata) {
			DP_ERR(dev,
			       "create qp: unexpected udata when creating GSI QP\n");
			goto err0;
		}
		ibqp = qedr_create_gsi_qp(dev, attrs, qp);
		if (IS_ERR(ibqp))
			kfree(qp);
		return ibqp;
	}

	memset(&in_params, 0, sizeof(in_params));

	if (udata) {
		if (!(udata && ibpd->uobject && ibpd->uobject->context))
			goto err0;

		ib_ctx = ibpd->uobject->context;
		ctx = get_qedr_ucontext(ib_ctx);

		memset(&ureq, 0, sizeof(ureq));
		if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
			DP_ERR(dev,
			       "create qp: problem copying data from user space\n");
			goto err0;
		}

		rc = qedr_init_user_qp(ib_ctx, dev, qp, &ureq);
		if (rc)
			goto err0;
	if (udata)
		rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
	else
		rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);

		qedr_init_qp_user_params(&in_params, &ureq);
	} else {
		rc = qedr_init_kernel_qp(dev, qp, attrs, &in_params);
	if (rc)
			goto err0;
	}

	qedr_init_qp_in_params_sq(dev, pd, qp, attrs, udata, &in_params);
	qedr_init_qp_in_params_rq(qp, attrs, udata, &in_params);

	qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
					      &in_params, &out_params);

	if (!qp->qed_qp)
		goto err1;
		goto err;

	qp->qp_id = out_params.qp_id;
	qp->icid = out_params.icid;
	qp->ibqp.qp_num = qp->qp_id;

	if (udata) {
		rc = qedr_copy_qp_uresp(dev, qp, udata);
		if (rc)
			goto err2;

		qedr_qp_user_print(dev, qp);
	} else {
		qedr_init_qp_kernel_doorbell_sq(dev, qp);
		qedr_init_qp_kernel_doorbell_rq(dev, qp);
	}

	DP_DEBUG(dev, QEDR_MSG_QP, "created %s space QP %p\n",
		 udata ? "user" : "kernel", qp);

	return &qp->ibqp;

err2:
	rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
	if (rc)
		DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
err1:
	if (udata) {
		qedr_cleanup_user_sq(dev, qp);
		qedr_cleanup_user_rq(dev, qp);
	} else {
		qedr_cleanup_kernel_sq(dev, qp);
		qedr_cleanup_kernel_rq(dev, qp);
	}

err0:
err:
	kfree(qp);

	return ERR_PTR(-EFAULT);
@@ -2067,6 +1983,24 @@ int qedr_query_qp(struct ib_qp *ibqp,
	return rc;
}

int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
{
	int rc = 0;

	if (qp->qp_type != IB_QPT_GSI) {
		rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
		if (rc)
			return rc;
	}

	if (qp->ibqp.uobject && qp->ibqp.uobject->context)
		qedr_cleanup_user(dev, qp);
	else
		qedr_cleanup_kernel(dev, qp);

	return 0;
}

int qedr_destroy_qp(struct ib_qp *ibqp)
{
	struct qedr_qp *qp = get_qedr_qp(ibqp);
@@ -2089,21 +2023,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
		qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
	}

	if (qp->qp_type != IB_QPT_GSI) {
		rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
		if (rc)
			return rc;
	} else {
	if (qp->qp_type == IB_QPT_GSI)
		qedr_destroy_gsi_qp(dev);
	}

	if (ibqp->uobject && ibqp->uobject->context) {
		qedr_cleanup_user_sq(dev, qp);
		qedr_cleanup_user_rq(dev, qp);
	} else {
		qedr_cleanup_kernel_sq(dev, qp);
		qedr_cleanup_kernel_rq(dev, qp);
	}
	qedr_free_qp_resources(dev, qp);

	kfree(qp);