Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 04b5d028 authored by Steve Wise's avatar Steve Wise Committed by Roland Dreier
Browse files

RDMA/cxgb3: Handle EEH events



- wrap calls into cxgb3 and fail them if we're in the middle
  of a PCI EEH event.

- correctly unwind and release endpoint and other resources when
  we are in an EEH event.

- dispatch IB_EVENT_DEVICE_FATAL event when cxgb3 notifies iw_cxgb3 of
  a fatal error.

Signed-off-by: default avatarSteve Wise <swise@opengridcomputing.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 5d80f8e5
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -152,7 +152,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
	sge_cmd = qpid << 8 | 3;
	wqe->sge_cmd = cpu_to_be64(sge_cmd);
	skb->priority = CPL_PRIORITY_CONTROL;
	return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
	return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
}

int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
@@ -571,7 +571,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
	     (unsigned long long) rdev_p->ctrl_qp.dma_addr,
	     rdev_p->ctrl_qp.workq, 1 << T3_CTRL_QP_SIZE_LOG2);
	skb->priority = CPL_PRIORITY_CONTROL;
	return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
	return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
err:
	kfree_skb(skb);
	return err;
@@ -701,7 +701,7 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
	u32 stag_idx;
	u32 wptr;

	if (rdev_p->flags)
	if (cxio_fatal_error(rdev_p))
		return -EIO;

	stag_state = stag_state > 0;
@@ -858,7 +858,7 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
	wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size);
	wqe->irs = cpu_to_be32(attr->irs);
	skb->priority = 0;	/* 0=>ToeQ; 1=>CtrlQ */
	return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
	return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
}

void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
@@ -1041,9 +1041,9 @@ void cxio_rdev_close(struct cxio_rdev *rdev_p)
		cxio_hal_pblpool_destroy(rdev_p);
		cxio_hal_rqtpool_destroy(rdev_p);
		list_del(&rdev_p->entry);
		rdev_p->t3cdev_p->ulp = NULL;
		cxio_hal_destroy_ctrl_qp(rdev_p);
		cxio_hal_destroy_resource(rdev_p->rscp);
		rdev_p->t3cdev_p->ulp = NULL;
	}
}

+6 −0
Original line number Diff line number Diff line
@@ -115,6 +115,11 @@ struct cxio_rdev {
#define	CXIO_ERROR_FATAL	1
};

static inline int cxio_fatal_error(struct cxio_rdev *rdev_p)
{
	return rdev_p->flags & CXIO_ERROR_FATAL;
}

static inline int cxio_num_stags(struct cxio_rdev *rdev_p)
{
	return min((int)T3_MAX_NUM_STAG, (int)((rdev_p->rnic_info.tpt_top - rdev_p->rnic_info.tpt_base) >> 5));
@@ -188,6 +193,7 @@ void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
void cxio_flush_hw_cq(struct t3_cq *cq);
int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
		     u8 *cqe_flushed, u64 *cookie, u32 *credit);
int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb);

#define MOD "iw_cxgb3: "
#define PDBG(fmt, args...) pr_debug(MOD fmt, ## args)
+9 −2
Original line number Diff line number Diff line
@@ -165,12 +165,19 @@ static void close_rnic_dev(struct t3cdev *tdev)
static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error)
{
	struct cxio_rdev *rdev = tdev->ulp;
	struct iwch_dev *rnicp = rdev_to_iwch_dev(rdev);
	struct ib_event event;

	if (status == OFFLOAD_STATUS_DOWN)
	if (status == OFFLOAD_STATUS_DOWN) {
		rdev->flags = CXIO_ERROR_FATAL;

	return;
		event.device = &rnicp->ibdev;
		event.event  = IB_EVENT_DEVICE_FATAL;
		event.element.port_num = 0;
		ib_dispatch_event(&event);
	}

	return;
}

static int __init iwch_init_module(void)
+5 −0
Original line number Diff line number Diff line
@@ -117,6 +117,11 @@ static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev)
	return container_of(ibdev, struct iwch_dev, ibdev);
}

static inline struct iwch_dev *rdev_to_iwch_dev(struct cxio_rdev *rdev)
{
	return container_of(rdev, struct iwch_dev, rdev);
}

static inline int t3b_device(const struct iwch_dev *rhp)
{
	return rhp->rdev.t3cdev_p->type == T3B;
+65 −25
Original line number Diff line number Diff line
@@ -139,6 +139,38 @@ static void stop_ep_timer(struct iwch_ep *ep)
	put_ep(&ep->com);
}

int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
{
	int	error = 0;
	struct cxio_rdev *rdev;

	rdev = (struct cxio_rdev *)tdev->ulp;
	if (cxio_fatal_error(rdev)) {
		kfree_skb(skb);
		return -EIO;
	}
	error = l2t_send(tdev, skb, l2e);
	if (error)
		kfree_skb(skb);
	return error;
}

int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
{
	int	error = 0;
	struct cxio_rdev *rdev;

	rdev = (struct cxio_rdev *)tdev->ulp;
	if (cxio_fatal_error(rdev)) {
		kfree_skb(skb);
		return -EIO;
	}
	error = cxgb3_ofld_send(tdev, skb);
	if (error)
		kfree_skb(skb);
	return error;
}

static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
{
	struct cpl_tid_release *req;
@@ -150,7 +182,7 @@ static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
	skb->priority = CPL_PRIORITY_SETUP;
	cxgb3_ofld_send(tdev, skb);
	iwch_cxgb3_ofld_send(tdev, skb);
	return;
}

@@ -172,8 +204,7 @@ int iwch_quiesce_tid(struct iwch_ep *ep)
	req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);

	skb->priority = CPL_PRIORITY_DATA;
	cxgb3_ofld_send(ep->com.tdev, skb);
	return 0;
	return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
}

int iwch_resume_tid(struct iwch_ep *ep)
@@ -194,8 +225,7 @@ int iwch_resume_tid(struct iwch_ep *ep)
	req->val = 0;

	skb->priority = CPL_PRIORITY_DATA;
	cxgb3_ofld_send(ep->com.tdev, skb);
	return 0;
	return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
}

static void set_emss(struct iwch_ep *ep, u16 opt)
@@ -382,7 +412,7 @@ static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)

	PDBG("%s t3cdev %p\n", __func__, dev);
	req->cmd = CPL_ABORT_NO_RST;
	cxgb3_ofld_send(dev, skb);
	iwch_cxgb3_ofld_send(dev, skb);
}

static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
@@ -402,8 +432,7 @@ static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
	req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
	l2t_send(ep->com.tdev, skb, ep->l2t);
	return 0;
	return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
}

static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
@@ -424,8 +453,7 @@ static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
	req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
	req->cmd = CPL_ABORT_SEND_RST;
	l2t_send(ep->com.tdev, skb, ep->l2t);
	return 0;
	return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
}

static int send_connect(struct iwch_ep *ep)
@@ -469,8 +497,7 @@ static int send_connect(struct iwch_ep *ep)
	req->opt0l = htonl(opt0l);
	req->params = 0;
	req->opt2 = htonl(opt2);
	l2t_send(ep->com.tdev, skb, ep->l2t);
	return 0;
	return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
}

static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
@@ -527,7 +554,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
	req->sndseq = htonl(ep->snd_seq);
	BUG_ON(ep->mpa_skb);
	ep->mpa_skb = skb;
	l2t_send(ep->com.tdev, skb, ep->l2t);
	iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
	start_ep_timer(ep);
	state_set(&ep->com, MPA_REQ_SENT);
	return;
@@ -578,8 +605,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
	req->sndseq = htonl(ep->snd_seq);
	BUG_ON(ep->mpa_skb);
	ep->mpa_skb = skb;
	l2t_send(ep->com.tdev, skb, ep->l2t);
	return 0;
	return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
}

static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
@@ -630,8 +656,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
	req->sndseq = htonl(ep->snd_seq);
	ep->mpa_skb = skb;
	state_set(&ep->com, MPA_REP_SENT);
	l2t_send(ep->com.tdev, skb, ep->l2t);
	return 0;
	return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
}

static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
@@ -795,7 +820,7 @@ static int update_rx_credits(struct iwch_ep *ep, u32 credits)
	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
	req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
	skb->priority = CPL_PRIORITY_ACK;
	cxgb3_ofld_send(ep->com.tdev, skb);
	iwch_cxgb3_ofld_send(ep->com.tdev, skb);
	return credits;
}

@@ -1203,8 +1228,7 @@ static int listen_start(struct iwch_listen_ep *ep)
	req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));

	skb->priority = 1;
	cxgb3_ofld_send(ep->com.tdev, skb);
	return 0;
	return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
}

static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
@@ -1237,8 +1261,7 @@ static int listen_stop(struct iwch_listen_ep *ep)
	req->cpu_idx = 0;
	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
	skb->priority = 1;
	cxgb3_ofld_send(ep->com.tdev, skb);
	return 0;
	return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
}

static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
@@ -1286,7 +1309,7 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
	rpl->opt2 = htonl(opt2);
	rpl->rsvd = rpl->opt2;	/* workaround for HW bug */
	skb->priority = CPL_PRIORITY_SETUP;
	l2t_send(ep->com.tdev, skb, ep->l2t);
	iwch_l2t_send(ep->com.tdev, skb, ep->l2t);

	return;
}
@@ -1315,7 +1338,7 @@ static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
		rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
		rpl->opt2 = 0;
		rpl->rsvd = rpl->opt2;
		cxgb3_ofld_send(tdev, skb);
		iwch_cxgb3_ofld_send(tdev, skb);
	}
}

@@ -1613,7 +1636,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
	rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
	OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
	rpl->cmd = CPL_ABORT_NO_RST;
	cxgb3_ofld_send(ep->com.tdev, rpl_skb);
	iwch_cxgb3_ofld_send(ep->com.tdev, rpl_skb);
out:
	if (release)
		release_ep_resources(ep);
@@ -2017,8 +2040,11 @@ int iwch_destroy_listen(struct iw_cm_id *cm_id)
	ep->com.rpl_done = 0;
	ep->com.rpl_err = 0;
	err = listen_stop(ep);
	if (err)
		goto done;
	wait_event(ep->com.waitq, ep->com.rpl_done);
	cxgb3_free_stid(ep->com.tdev, ep->stid);
done:
	err = ep->com.rpl_err;
	cm_id->rem_ref(cm_id);
	put_ep(&ep->com);
@@ -2030,12 +2056,22 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
	int ret=0;
	unsigned long flags;
	int close = 0;
	int fatal = 0;
	struct t3cdev *tdev;
	struct cxio_rdev *rdev;

	spin_lock_irqsave(&ep->com.lock, flags);

	PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
	     states[ep->com.state], abrupt);

	tdev = (struct t3cdev *)ep->com.tdev;
	rdev = (struct cxio_rdev *)tdev->ulp;
	if (cxio_fatal_error(rdev)) {
		fatal = 1;
		close_complete_upcall(ep);
		ep->com.state = DEAD;
	}
	switch (ep->com.state) {
	case MPA_REQ_WAIT:
	case MPA_REQ_SENT:
@@ -2075,7 +2111,11 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
			ret = send_abort(ep, NULL, gfp);
		else
			ret = send_halfclose(ep, gfp);
		if (ret)
			fatal = 1;
	}
	if (fatal)
		release_ep_resources(ep);
	return ret;
}

Loading