Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit de3d3530 authored by Steve Wise's avatar Steve Wise Committed by Roland Dreier
Browse files

RDMA/cxgb3: Streaming -> RDMA mode transition fixes



Due to a HW issue, our current scheme to transition the connection from
streaming to rdma mode is broken on the passive side.  The firmware
and driver now support a new transition scheme for the passive side:

 - driver posts rdma_init_wr (now including the initial receive seqno)
 - driver posts last streaming message via TX_DATA message (MPA start
   response)
 - uP atomically sends the last streaming message and transitions the
   tcb to rdma mode.
 - driver waits for wr_ack indicating the last streaming message was ACKed.

NOTE: This change also bumps the required firmware version to 4.3.

Signed-off-by: default avatarSteve Wise <swise@opengridcomputing.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 149983af
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -833,7 +833,7 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
	wqe->ird = cpu_to_be32(attr->ird);
	wqe->ird = cpu_to_be32(attr->ird);
	wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
	wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
	wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size);
	wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size);
	wqe->rsvd = 0;
	wqe->irs = cpu_to_be32(attr->irs);
	skb->priority = 0;	/* 0=>ToeQ; 1=>CtrlQ */
	skb->priority = 0;	/* 0=>ToeQ; 1=>CtrlQ */
	return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
	return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
}
}
+2 −1
Original line number Original line Diff line number Diff line
@@ -294,6 +294,7 @@ struct t3_rdma_init_attr {
	u64 qp_dma_addr;
	u64 qp_dma_addr;
	u32 qp_dma_size;
	u32 qp_dma_size;
	u32 flags;
	u32 flags;
	u32 irs;
};
};


struct t3_rdma_init_wr {
struct t3_rdma_init_wr {
@@ -314,7 +315,7 @@ struct t3_rdma_init_wr {
	__be32 ird;
	__be32 ird;
	__be64 qp_dma_addr;	/* 7 */
	__be64 qp_dma_addr;	/* 7 */
	__be32 qp_dma_size;	/* 8 */
	__be32 qp_dma_size;	/* 8 */
	u32 rsvd;
	u32 irs;
};
};


struct t3_genbit {
struct t3_genbit {
+32 −50
Original line number Original line Diff line number Diff line
@@ -515,7 +515,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
	req->len = htonl(len);
	req->len = htonl(len);
	req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
	req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
			   V_TX_SNDBUF(snd_win>>15));
			   V_TX_SNDBUF(snd_win>>15));
	req->flags = htonl(F_TX_IMM_ACK|F_TX_INIT);
	req->flags = htonl(F_TX_INIT);
	req->sndseq = htonl(ep->snd_seq);
	req->sndseq = htonl(ep->snd_seq);
	BUG_ON(ep->mpa_skb);
	BUG_ON(ep->mpa_skb);
	ep->mpa_skb = skb;
	ep->mpa_skb = skb;
@@ -566,7 +566,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
	req->len = htonl(mpalen);
	req->len = htonl(mpalen);
	req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
	req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
			   V_TX_SNDBUF(snd_win>>15));
			   V_TX_SNDBUF(snd_win>>15));
	req->flags = htonl(F_TX_IMM_ACK|F_TX_INIT);
	req->flags = htonl(F_TX_INIT);
	req->sndseq = htonl(ep->snd_seq);
	req->sndseq = htonl(ep->snd_seq);
	BUG_ON(ep->mpa_skb);
	BUG_ON(ep->mpa_skb);
	ep->mpa_skb = skb;
	ep->mpa_skb = skb;
@@ -618,7 +618,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
	req->len = htonl(len);
	req->len = htonl(len);
	req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
	req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
			   V_TX_SNDBUF(snd_win>>15));
			   V_TX_SNDBUF(snd_win>>15));
	req->flags = htonl(F_TX_MORE | F_TX_IMM_ACK | F_TX_INIT);
	req->flags = htonl(F_TX_INIT);
	req->sndseq = htonl(ep->snd_seq);
	req->sndseq = htonl(ep->snd_seq);
	ep->mpa_skb = skb;
	ep->mpa_skb = skb;
	state_set(&ep->com, MPA_REP_SENT);
	state_set(&ep->com, MPA_REP_SENT);
@@ -641,6 +641,7 @@ static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
	cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
	cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);


	ep->snd_seq = ntohl(req->snd_isn);
	ep->snd_seq = ntohl(req->snd_isn);
	ep->rcv_seq = ntohl(req->rcv_isn);


	set_emss(ep, ntohs(req->tcp_opt));
	set_emss(ep, ntohs(req->tcp_opt));


@@ -1023,6 +1024,9 @@ static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
	skb_pull(skb, sizeof(*hdr));
	skb_pull(skb, sizeof(*hdr));
	skb_trim(skb, dlen);
	skb_trim(skb, dlen);


	ep->rcv_seq += dlen;
	BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));

	switch (state_read(&ep->com)) {
	switch (state_read(&ep->com)) {
	case MPA_REQ_SENT:
	case MPA_REQ_SENT:
		process_mpa_reply(ep, skb);
		process_mpa_reply(ep, skb);
@@ -1060,7 +1064,6 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
	struct iwch_ep *ep = ctx;
	struct iwch_ep *ep = ctx;
	struct cpl_wr_ack *hdr = cplhdr(skb);
	struct cpl_wr_ack *hdr = cplhdr(skb);
	unsigned int credits = ntohs(hdr->credits);
	unsigned int credits = ntohs(hdr->credits);
	enum iwch_qp_attr_mask  mask;


	PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits);
	PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits);


@@ -1072,30 +1075,6 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
	ep->mpa_skb = NULL;
	ep->mpa_skb = NULL;
	dst_confirm(ep->dst);
	dst_confirm(ep->dst);
	if (state_read(&ep->com) == MPA_REP_SENT) {
	if (state_read(&ep->com) == MPA_REP_SENT) {
		struct iwch_qp_attributes attrs;

		/* bind QP to EP and move to RTS */
		attrs.mpa_attr = ep->mpa_attr;
		attrs.max_ird = ep->ord;
		attrs.max_ord = ep->ord;
		attrs.llp_stream_handle = ep;
		attrs.next_state = IWCH_QP_STATE_RTS;

		/* bind QP and TID with INIT_WR */
		mask = IWCH_QP_ATTR_NEXT_STATE |
				     IWCH_QP_ATTR_LLP_STREAM_HANDLE |
				     IWCH_QP_ATTR_MPA_ATTR |
				     IWCH_QP_ATTR_MAX_IRD |
				     IWCH_QP_ATTR_MAX_ORD;

		ep->com.rpl_err = iwch_modify_qp(ep->com.qp->rhp,
				     ep->com.qp, mask, &attrs, 1);

		if (!ep->com.rpl_err) {
			state_set(&ep->com, FPDU_MODE);
			established_upcall(ep);
		}

		ep->com.rpl_done = 1;
		ep->com.rpl_done = 1;
		PDBG("waking up ep %p\n", ep);
		PDBG("waking up ep %p\n", ep);
		wake_up(&ep->com.waitq);
		wake_up(&ep->com.waitq);
@@ -1378,6 +1357,7 @@ static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)


	PDBG("%s ep %p\n", __FUNCTION__, ep);
	PDBG("%s ep %p\n", __FUNCTION__, ep);
	ep->snd_seq = ntohl(req->snd_isn);
	ep->snd_seq = ntohl(req->snd_isn);
	ep->rcv_seq = ntohl(req->rcv_isn);


	set_emss(ep, ntohs(req->tcp_opt));
	set_emss(ep, ntohs(req->tcp_opt));


@@ -1732,10 +1712,8 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
	struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
	struct iwch_qp *qp = get_qhp(h, conn_param->qpn);


	PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid);
	PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid);
	if (state_read(&ep->com) == DEAD) {
	if (state_read(&ep->com) == DEAD)
		put_ep(&ep->com);
		return -ECONNRESET;
		return -ECONNRESET;
	}


	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
	BUG_ON(!qp);
	BUG_ON(!qp);
@@ -1755,17 +1733,8 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
	ep->ird = conn_param->ird;
	ep->ird = conn_param->ird;
	ep->ord = conn_param->ord;
	ep->ord = conn_param->ord;
	PDBG("%s %d ird %d ord %d\n", __FUNCTION__, __LINE__, ep->ird, ep->ord);
	PDBG("%s %d ird %d ord %d\n", __FUNCTION__, __LINE__, ep->ird, ep->ord);

	get_ep(&ep->com);
	get_ep(&ep->com);
	err = send_mpa_reply(ep, conn_param->private_data,
			     conn_param->private_data_len);
	if (err) {
		ep->com.cm_id = NULL;
		ep->com.qp = NULL;
		cm_id->rem_ref(cm_id);
		abort_connection(ep, NULL, GFP_KERNEL);
		put_ep(&ep->com);
		return err;
	}


	/* bind QP to EP and move to RTS */
	/* bind QP to EP and move to RTS */
	attrs.mpa_attr = ep->mpa_attr;
	attrs.mpa_attr = ep->mpa_attr;
@@ -1783,16 +1752,29 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)


	err = iwch_modify_qp(ep->com.qp->rhp,
	err = iwch_modify_qp(ep->com.qp->rhp,
			     ep->com.qp, mask, &attrs, 1);
			     ep->com.qp, mask, &attrs, 1);
	if (err)
		goto err;

	err = send_mpa_reply(ep, conn_param->private_data,
			     conn_param->private_data_len);
	if (err)
		goto err;


	if (err) {
	/* wait for wr_ack */
	wait_event(ep->com.waitq, ep->com.rpl_done);
	err = ep->com.rpl_err;
	if (err)
		goto err;

	state_set(&ep->com, FPDU_MODE);
	established_upcall(ep);
	put_ep(&ep->com);
	return 0;
err:
	ep->com.cm_id = NULL;
	ep->com.cm_id = NULL;
	ep->com.qp = NULL;
	ep->com.qp = NULL;
	cm_id->rem_ref(cm_id);
	cm_id->rem_ref(cm_id);
	abort_connection(ep, NULL, GFP_KERNEL);
	abort_connection(ep, NULL, GFP_KERNEL);
	} else {
		state_set(&ep->com, FPDU_MODE);
		established_upcall(ep);
	}
	put_ep(&ep->com);
	put_ep(&ep->com);
	return err;
	return err;
}
}
+1 −0
Original line number Original line Diff line number Diff line
@@ -175,6 +175,7 @@ struct iwch_ep {
	unsigned int atid;
	unsigned int atid;
	u32 hwtid;
	u32 hwtid;
	u32 snd_seq;
	u32 snd_seq;
	u32 rcv_seq;
	struct l2t_entry *l2t;
	struct l2t_entry *l2t;
	struct dst_entry *dst;
	struct dst_entry *dst;
	struct sk_buff *mpa_skb;
	struct sk_buff *mpa_skb;
+1 −0
Original line number Original line Diff line number Diff line
@@ -732,6 +732,7 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
	init_attr.qp_dma_addr = qhp->wq.dma_addr;
	init_attr.qp_dma_addr = qhp->wq.dma_addr;
	init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
	init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
	init_attr.flags = rqes_posted(qhp) ? RECVS_POSTED : 0;
	init_attr.flags = rqes_posted(qhp) ? RECVS_POSTED : 0;
	init_attr.irs = qhp->ep->rcv_seq;
	PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
	PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
	     "flags 0x%x qpcaps 0x%x\n", __FUNCTION__,
	     "flags 0x%x qpcaps 0x%x\n", __FUNCTION__,
	     init_attr.rq_addr, init_attr.rq_size,
	     init_attr.rq_addr, init_attr.rq_size,
Loading