Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 54d10c1e authored by Dennis Dalessandro's avatar Dennis Dalessandro Committed by Doug Ledford
Browse files

staging/rdma/hfi1: Use rdmavt send flags and recv flags



Use the definitions of the s_flags and r_flags which are now in rdmavt.

Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Reviewed-by: default avatarHarish Chegondi <harish.chegondi@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 90963ad7
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -781,14 +781,14 @@ static inline void process_rcv_qp_work(struct hfi1_packet *packet)
	 */
	list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
		list_del_init(&qp->rspwait);
		if (qp->r_flags & HFI1_R_RSP_DEFERED_ACK) {
			qp->r_flags &= ~HFI1_R_RSP_DEFERED_ACK;
		if (qp->r_flags & RVT_R_RSP_NAK) {
			qp->r_flags &= ~RVT_R_RSP_NAK;
			hfi1_send_rc_ack(rcd, qp, 0);
		}
		if (qp->r_flags & HFI1_R_RSP_SEND) {
		if (qp->r_flags & RVT_R_RSP_SEND) {
			unsigned long flags;

			qp->r_flags &= ~HFI1_R_RSP_SEND;
			qp->r_flags &= ~RVT_R_RSP_SEND;
			spin_lock_irqsave(&qp->s_lock, flags);
			if (ib_hfi1_state_ops[qp->state] &
					HFI1_PROCESS_OR_FLUSH_SEND)
+1 −1
Original line number Diff line number Diff line
@@ -1564,7 +1564,7 @@ static void sc_piobufavail(struct send_context *sc)
	write_sequnlock_irqrestore(&dev->iowait_lock, flags);

	for (i = 0; i < n; i++)
		hfi1_qp_wakeup(qps[i], HFI1_S_WAIT_PIO);
		hfi1_qp_wakeup(qps[i], RVT_S_WAIT_PIO);
}

/* translate a send credit update to a bit code of reasons */
+29 −29
Original line number Diff line number Diff line
@@ -360,7 +360,7 @@ static void reset_qp(struct rvt_qp *qp, enum ib_qp_type type)
		hfi1_do_send,
		iowait_sleep,
		iowait_wakeup);
	qp->s_flags &= HFI1_S_SIGNAL_REQ_WR;
	qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
	qp->s_hdrwords = 0;
	qp->s_wqe = NULL;
	qp->s_draining = 0;
@@ -407,7 +407,7 @@ static void clear_mr_refs(struct rvt_qp *qp, int clr_sends)
{
	unsigned n;

	if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags))
	if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
		hfi1_put_ss(&qp->s_rdma_read_sge);

	hfi1_put_ss(&qp->r_sge);
@@ -471,24 +471,24 @@ int hfi1_error_qp(struct rvt_qp *qp, enum ib_wc_status err)

	qp->state = IB_QPS_ERR;

	if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) {
		qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR);
	if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
		qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
		del_timer(&qp->s_timer);
	}

	if (qp->s_flags & HFI1_S_ANY_WAIT_SEND)
		qp->s_flags &= ~HFI1_S_ANY_WAIT_SEND;
	if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
		qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;

	write_seqlock(&dev->iowait_lock);
	if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & HFI1_S_BUSY)) {
		qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
	if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) {
		qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
		list_del_init(&priv->s_iowait.list);
		if (atomic_dec_and_test(&qp->refcount))
			wake_up(&qp->wait);
	}
	write_sequnlock(&dev->iowait_lock);

	if (!(qp->s_flags & HFI1_S_BUSY)) {
	if (!(qp->s_flags & RVT_S_BUSY)) {
		qp->s_hdrwords = 0;
		if (qp->s_rdma_mr) {
			rvt_put_mr(qp->s_rdma_mr);
@@ -507,7 +507,7 @@ int hfi1_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
	wc.qp = &qp->ibqp;
	wc.opcode = IB_WC_RECV;

	if (test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) {
	if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
		wc.wr_id = qp->r_wr_id;
		wc.status = err;
		hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
@@ -742,7 +742,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
		if (qp->state != IB_QPS_RESET) {
			qp->state = IB_QPS_RESET;
			flush_iowait(qp);
			qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_ANY_WAIT);
			qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
			spin_unlock(&qp->s_lock);
			spin_unlock_irq(&qp->r_lock);
			/* Stop the sending work queue and retry timer */
@@ -762,7 +762,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,

	case IB_QPS_RTR:
		/* Allow event to re-trigger if QP set to RTR more than once */
		qp->r_flags &= ~HFI1_R_COMM_EST;
		qp->r_flags &= ~RVT_R_COMM_EST;
		qp->state = new_state;
		break;

@@ -828,7 +828,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
			qp->remote_ah_attr = qp->alt_ah_attr;
			qp->port_num = qp->alt_ah_attr.port_num;
			qp->s_pkey_index = qp->s_alt_pkey_index;
			qp->s_flags |= HFI1_S_AHG_CLEAR;
			qp->s_flags |= RVT_S_AHG_CLEAR;
			priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
			priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
		}
@@ -954,7 +954,7 @@ int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
	init_attr->recv_cq = qp->ibqp.recv_cq;
	init_attr->srq = qp->ibqp.srq;
	init_attr->cap = attr->cap;
	if (qp->s_flags & HFI1_S_SIGNAL_REQ_WR)
	if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
		init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
	else
		init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
@@ -1154,7 +1154,7 @@ struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd,
		qp->s_size = init_attr->cap.max_send_wr + 1;
		qp->s_max_sge = init_attr->cap.max_send_sge;
		if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
			qp->s_flags = HFI1_S_SIGNAL_REQ_WR;
			qp->s_flags = RVT_S_SIGNAL_REQ_WR;
		dev = to_idev(ibpd->device);
		dd = dd_from_dev(dev);
		err = alloc_qpn(dd, &dev->qp_dev->qpn_table, init_attr->qp_type,
@@ -1292,7 +1292,7 @@ int hfi1_destroy_qp(struct ib_qp *ibqp)
	if (qp->state != IB_QPS_RESET) {
		qp->state = IB_QPS_RESET;
		flush_iowait(qp);
		qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_ANY_WAIT);
		qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
		spin_unlock(&qp->s_lock);
		spin_unlock_irq(&qp->r_lock);
		cancel_work_sync(&priv->s_iowait.iowork);
@@ -1398,20 +1398,20 @@ void hfi1_get_credit(struct rvt_qp *qp, u32 aeth)
	 * honor the credit field.
	 */
	if (credit == HFI1_AETH_CREDIT_INVAL) {
		if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) {
			qp->s_flags |= HFI1_S_UNLIMITED_CREDIT;
			if (qp->s_flags & HFI1_S_WAIT_SSN_CREDIT) {
				qp->s_flags &= ~HFI1_S_WAIT_SSN_CREDIT;
		if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
			qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
			if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
				qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
				hfi1_schedule_send(qp);
			}
		}
	} else if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) {
	} else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
		/* Compute new LSN (i.e., MSN + credit) */
		credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK;
		if (cmp_msn(credit, qp->s_lsn) > 0) {
			qp->s_lsn = credit;
			if (qp->s_flags & HFI1_S_WAIT_SSN_CREDIT) {
				qp->s_flags &= ~HFI1_S_WAIT_SSN_CREDIT;
			if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
				qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
				hfi1_schedule_send(qp);
			}
		}
@@ -1469,13 +1469,13 @@ static int iowait_sleep(
				to_iport(qp->ibqp.device, qp->port_num);

			ibp->rvp.n_dmawait++;
			qp->s_flags |= HFI1_S_WAIT_DMA_DESC;
			qp->s_flags |= RVT_S_WAIT_DMA_DESC;
			list_add_tail(&priv->s_iowait.list, &sde->dmawait);
			trace_hfi1_qpsleep(qp, HFI1_S_WAIT_DMA_DESC);
			trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
			atomic_inc(&qp->refcount);
		}
		write_sequnlock(&dev->iowait_lock);
		qp->s_flags &= ~HFI1_S_BUSY;
		qp->s_flags &= ~RVT_S_BUSY;
		spin_unlock_irqrestore(&qp->s_lock, flags);
		ret = -EBUSY;
	} else {
@@ -1495,7 +1495,7 @@ static void iowait_wakeup(struct iowait *wait, int reason)
	struct rvt_qp *qp = iowait_to_qp(wait);

	WARN_ON(reason != SDMA_AVAIL_REASON);
	hfi1_qp_wakeup(qp, HFI1_S_WAIT_DMA_DESC);
	hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC);
}

int hfi1_qp_init(struct hfi1_ibdev *dev)
@@ -1712,7 +1712,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)

void qp_comm_est(struct rvt_qp *qp)
{
	qp->r_flags |= HFI1_R_COMM_EST;
	qp->r_flags |= RVT_R_COMM_EST;
	if (qp->ibqp.event_handler) {
		struct ib_event ev;

@@ -1736,7 +1736,7 @@ void hfi1_migrate_qp(struct rvt_qp *qp)
	qp->remote_ah_attr = qp->alt_ah_attr;
	qp->port_num = qp->alt_ah_attr.port_num;
	qp->s_pkey_index = qp->s_alt_pkey_index;
	qp->s_flags |= HFI1_S_AHG_CLEAR;
	qp->s_flags |= RVT_S_AHG_CLEAR;
	priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
	priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);

+1 −1
Original line number Diff line number Diff line
@@ -125,7 +125,7 @@ static inline void clear_ahg(struct rvt_qp *qp)
	struct hfi1_qp_priv *priv = qp->priv;

	priv->s_hdr->ahgcount = 0;
	qp->s_flags &= ~(HFI1_S_AHG_VALID | HFI1_S_AHG_CLEAR);
	qp->s_flags &= ~(RVT_S_AHG_VALID | RVT_S_AHG_CLEAR);
	if (priv->s_sde && qp->s_ahgidx >= 0)
		sdma_ahg_free(priv->s_sde, qp->s_ahgidx);
	qp->s_ahgidx = -1;
+76 −76
Original line number Diff line number Diff line
@@ -76,7 +76,7 @@ static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,

static void start_timer(struct rvt_qp *qp)
{
	qp->s_flags |= HFI1_S_TIMER;
	qp->s_flags |= RVT_S_TIMER;
	qp->s_timer.function = rc_timeout;
	/* 4.096 usec. * (1 << qp->timeout) */
	qp->s_timer.expires = jiffies + qp->timeout_jiffies;
@@ -133,7 +133,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
	case OP(ACKNOWLEDGE):
		/* Check for no next entry in the queue. */
		if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
			if (qp->s_flags & HFI1_S_ACK_PENDING)
			if (qp->s_flags & RVT_S_ACK_PENDING)
				goto normal;
			goto bail;
		}
@@ -218,7 +218,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
		 * (see above).
		 */
		qp->s_ack_state = OP(SEND_ONLY);
		qp->s_flags &= ~HFI1_S_ACK_PENDING;
		qp->s_flags &= ~RVT_S_ACK_PENDING;
		qp->s_cur_sge = NULL;
		if (qp->s_nak_state)
			ohdr->u.aeth =
@@ -242,12 +242,12 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
	qp->s_ack_state = OP(ACKNOWLEDGE);
	/*
	 * Ensure s_rdma_ack_cnt changes are committed prior to resetting
	 * HFI1_S_RESP_PENDING
	 * RVT_S_RESP_PENDING
	 */
	smp_wmb();
	qp->s_flags &= ~(HFI1_S_RESP_PENDING
				| HFI1_S_ACK_PENDING
				| HFI1_S_AHG_VALID);
	qp->s_flags &= ~(RVT_S_RESP_PENDING
				| RVT_S_ACK_PENDING
				| RVT_S_AHG_VALID);
	return 0;
}

@@ -287,7 +287,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
	spin_lock_irqsave(&qp->s_lock, flags);

	/* Sending responses has higher priority over sending requests. */
	if ((qp->s_flags & HFI1_S_RESP_PENDING) &&
	if ((qp->s_flags & RVT_S_RESP_PENDING) &&
	    make_rc_ack(dev, qp, ohdr, pmtu))
		goto done;

@@ -299,7 +299,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
			goto bail;
		/* If DMAs are in progress, we can't flush immediately. */
		if (atomic_read(&priv->s_iowait.sdma_busy)) {
			qp->s_flags |= HFI1_S_WAIT_DMA;
			qp->s_flags |= RVT_S_WAIT_DMA;
			goto bail;
		}
		clear_ahg(qp);
@@ -310,12 +310,12 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
		goto done;
	}

	if (qp->s_flags & (HFI1_S_WAIT_RNR | HFI1_S_WAIT_ACK))
	if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
		goto bail;

	if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
		if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
			qp->s_flags |= HFI1_S_WAIT_PSN;
			qp->s_flags |= RVT_S_WAIT_PSN;
			goto bail;
		}
		qp->s_sending_psn = qp->s_psn;
@@ -348,7 +348,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
			 */
			if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
			    qp->s_num_rd_atomic) {
				qp->s_flags |= HFI1_S_WAIT_FENCE;
				qp->s_flags |= RVT_S_WAIT_FENCE;
				goto bail;
			}
			wqe->psn = qp->s_next_psn;
@@ -366,9 +366,9 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
		case IB_WR_SEND:
		case IB_WR_SEND_WITH_IMM:
			/* If no credit, return. */
			if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT) &&
			if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
			    cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
				qp->s_flags |= HFI1_S_WAIT_SSN_CREDIT;
				qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
				goto bail;
			}
			wqe->lpsn = wqe->psn;
@@ -394,14 +394,14 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
			break;

		case IB_WR_RDMA_WRITE:
			if (newreq && !(qp->s_flags & HFI1_S_UNLIMITED_CREDIT))
			if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
				qp->s_lsn++;
			/* FALLTHROUGH */
		case IB_WR_RDMA_WRITE_WITH_IMM:
			/* If no credit, return. */
			if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT) &&
			if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
			    cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
				qp->s_flags |= HFI1_S_WAIT_SSN_CREDIT;
				qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
				goto bail;
			}
			ohdr->u.rc.reth.vaddr =
@@ -441,11 +441,11 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
			if (newreq) {
				if (qp->s_num_rd_atomic >=
				    qp->s_max_rd_atomic) {
					qp->s_flags |= HFI1_S_WAIT_RDMAR;
					qp->s_flags |= RVT_S_WAIT_RDMAR;
					goto bail;
				}
				qp->s_num_rd_atomic++;
				if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT))
				if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
					qp->s_lsn++;
				/*
				 * Adjust s_next_psn to count the
@@ -478,11 +478,11 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
			if (newreq) {
				if (qp->s_num_rd_atomic >=
				    qp->s_max_rd_atomic) {
					qp->s_flags |= HFI1_S_WAIT_RDMAR;
					qp->s_flags |= RVT_S_WAIT_RDMAR;
					goto bail;
				}
				qp->s_num_rd_atomic++;
				if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT))
				if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
					qp->s_lsn++;
				wqe->lpsn = wqe->psn;
			}
@@ -649,9 +649,9 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
	delta = delta_psn(bth2, wqe->psn);
	if (delta && delta % HFI1_PSN_CREDIT == 0)
		bth2 |= IB_BTH_REQ_ACK;
	if (qp->s_flags & HFI1_S_SEND_ONE) {
		qp->s_flags &= ~HFI1_S_SEND_ONE;
		qp->s_flags |= HFI1_S_WAIT_ACK;
	if (qp->s_flags & RVT_S_SEND_ONE) {
		qp->s_flags &= ~RVT_S_SEND_ONE;
		qp->s_flags |= RVT_S_WAIT_ACK;
		bth2 |= IB_BTH_REQ_ACK;
	}
	qp->s_len -= len;
@@ -669,7 +669,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
	goto unlock;

bail:
	qp->s_flags &= ~HFI1_S_BUSY;
	qp->s_flags &= ~RVT_S_BUSY;
unlock:
	spin_unlock_irqrestore(&qp->s_lock, flags);
	return ret;
@@ -701,7 +701,7 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
	unsigned long flags;

	/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
	if (qp->s_flags & HFI1_S_RESP_PENDING)
	if (qp->s_flags & RVT_S_RESP_PENDING)
		goto queue_ack;

	/* Ensure s_rdma_ack_cnt changes are committed */
@@ -774,11 +774,11 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
queue_ack:
	this_cpu_inc(*ibp->rvp.rc_qacks);
	spin_lock_irqsave(&qp->s_lock, flags);
	qp->s_flags |= HFI1_S_ACK_PENDING | HFI1_S_RESP_PENDING;
	qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
	qp->s_nak_state = qp->r_nak_state;
	qp->s_ack_psn = qp->r_ack_psn;
	if (is_fecn)
		qp->s_flags |= HFI1_S_ECN;
		qp->s_flags |= RVT_S_ECN;

	/* Schedule the send tasklet. */
	hfi1_schedule_send(qp);
@@ -866,14 +866,14 @@ static void reset_psn(struct rvt_qp *qp, u32 psn)
done:
	qp->s_psn = psn;
	/*
	 * Set HFI1_S_WAIT_PSN as rc_complete() may start the timer
	 * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
	 * asynchronously before the send tasklet can get scheduled.
	 * Doing it in hfi1_make_rc_req() is too late.
	 */
	if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
	    (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
		qp->s_flags |= HFI1_S_WAIT_PSN;
	qp->s_flags &= ~HFI1_S_AHG_VALID;
		qp->s_flags |= RVT_S_WAIT_PSN;
	qp->s_flags &= ~RVT_S_AHG_VALID;
}

/*
@@ -904,11 +904,11 @@ static void restart_rc(struct rvt_qp *qp, u32 psn, int wait)
	else
		ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);

	qp->s_flags &= ~(HFI1_S_WAIT_FENCE | HFI1_S_WAIT_RDMAR |
			 HFI1_S_WAIT_SSN_CREDIT | HFI1_S_WAIT_PSN |
			 HFI1_S_WAIT_ACK);
	qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
			 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
			 RVT_S_WAIT_ACK);
	if (wait)
		qp->s_flags |= HFI1_S_SEND_ONE;
		qp->s_flags |= RVT_S_SEND_ONE;
	reset_psn(qp, psn);
}

@@ -923,10 +923,10 @@ static void rc_timeout(unsigned long arg)

	spin_lock_irqsave(&qp->r_lock, flags);
	spin_lock(&qp->s_lock);
	if (qp->s_flags & HFI1_S_TIMER) {
	if (qp->s_flags & RVT_S_TIMER) {
		ibp = to_iport(qp->ibqp.device, qp->port_num);
		ibp->rvp.n_rc_timeouts++;
		qp->s_flags &= ~HFI1_S_TIMER;
		qp->s_flags &= ~RVT_S_TIMER;
		del_timer(&qp->s_timer);
		trace_hfi1_rc_timeout(qp, qp->s_last_psn + 1);
		restart_rc(qp, qp->s_last_psn + 1, 1);
@@ -945,8 +945,8 @@ void hfi1_rc_rnr_retry(unsigned long arg)
	unsigned long flags;

	spin_lock_irqsave(&qp->s_lock, flags);
	if (qp->s_flags & HFI1_S_WAIT_RNR) {
		qp->s_flags &= ~HFI1_S_WAIT_RNR;
	if (qp->s_flags & RVT_S_WAIT_RNR) {
		qp->s_flags &= ~RVT_S_WAIT_RNR;
		del_timer(&qp->s_timer);
		hfi1_schedule_send(qp);
	}
@@ -1017,7 +1017,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr)
	 */
	if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
	    !(qp->s_flags &
		(HFI1_S_TIMER | HFI1_S_WAIT_RNR | HFI1_S_WAIT_PSN)) &&
		(RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
		(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK))
		start_timer(qp);

@@ -1032,7 +1032,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr)
			rvt_put_mr(sge->mr);
		}
		/* Post a send completion queue entry if requested. */
		if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) ||
		if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
		    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
			memset(&wc, 0, sizeof(wc));
			wc.wr_id = wqe->wr.wr_id;
@@ -1050,9 +1050,9 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr)
	 * and they are now complete, restart sending.
	 */
	trace_hfi1_rc_sendcomplete(qp, psn);
	if (qp->s_flags & HFI1_S_WAIT_PSN &&
	if (qp->s_flags & RVT_S_WAIT_PSN &&
	    cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
		qp->s_flags &= ~HFI1_S_WAIT_PSN;
		qp->s_flags &= ~RVT_S_WAIT_PSN;
		qp->s_sending_psn = qp->s_psn;
		qp->s_sending_hpsn = qp->s_psn - 1;
		hfi1_schedule_send(qp);
@@ -1089,7 +1089,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
			rvt_put_mr(sge->mr);
		}
		/* Post a send completion queue entry if requested. */
		if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) ||
		if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
		    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
			memset(&wc, 0, sizeof(wc));
			wc.wr_id = wqe->wr.wr_id;
@@ -1169,8 +1169,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
	int diff;

	/* Remove QP from retry timer */
	if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) {
		qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR);
	if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
		qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
		del_timer(&qp->s_timer);
	}

@@ -1218,11 +1218,11 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
		      wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
		     (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
			/* Retry this request. */
			if (!(qp->r_flags & HFI1_R_RDMAR_SEQ)) {
				qp->r_flags |= HFI1_R_RDMAR_SEQ;
			if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
				qp->r_flags |= RVT_R_RDMAR_SEQ;
				restart_rc(qp, qp->s_last_psn + 1, 0);
				if (list_empty(&qp->rspwait)) {
					qp->r_flags |= HFI1_R_RSP_SEND;
					qp->r_flags |= RVT_R_RSP_SEND;
					atomic_inc(&qp->refcount);
					list_add_tail(&qp->rspwait,
						      &rcd->qp_wait_list);
@@ -1245,14 +1245,14 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
		     wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
			qp->s_num_rd_atomic--;
			/* Restart sending task if fence is complete */
			if ((qp->s_flags & HFI1_S_WAIT_FENCE) &&
			if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
			    !qp->s_num_rd_atomic) {
				qp->s_flags &= ~(HFI1_S_WAIT_FENCE |
						 HFI1_S_WAIT_ACK);
				qp->s_flags &= ~(RVT_S_WAIT_FENCE |
						 RVT_S_WAIT_ACK);
				hfi1_schedule_send(qp);
			} else if (qp->s_flags & HFI1_S_WAIT_RDMAR) {
				qp->s_flags &= ~(HFI1_S_WAIT_RDMAR |
						 HFI1_S_WAIT_ACK);
			} else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
				qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
						 RVT_S_WAIT_ACK);
				hfi1_schedule_send(qp);
			}
		}
@@ -1280,8 +1280,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
			qp->s_state = OP(SEND_LAST);
			qp->s_psn = psn + 1;
		}
		if (qp->s_flags & HFI1_S_WAIT_ACK) {
			qp->s_flags &= ~HFI1_S_WAIT_ACK;
		if (qp->s_flags & RVT_S_WAIT_ACK) {
			qp->s_flags &= ~RVT_S_WAIT_ACK;
			hfi1_schedule_send(qp);
		}
		hfi1_get_credit(qp, aeth);
@@ -1295,7 +1295,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
		ibp->rvp.n_rnr_naks++;
		if (qp->s_acked == qp->s_tail)
			goto bail;
		if (qp->s_flags & HFI1_S_WAIT_RNR)
		if (qp->s_flags & RVT_S_WAIT_RNR)
			goto bail;
		if (qp->s_rnr_retry == 0) {
			status = IB_WC_RNR_RETRY_EXC_ERR;
@@ -1311,8 +1311,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,

		reset_psn(qp, psn);

		qp->s_flags &= ~(HFI1_S_WAIT_SSN_CREDIT | HFI1_S_WAIT_ACK);
		qp->s_flags |= HFI1_S_WAIT_RNR;
		qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
		qp->s_flags |= RVT_S_WAIT_RNR;
		qp->s_timer.function = hfi1_rc_rnr_retry;
		qp->s_timer.expires = jiffies + usecs_to_jiffies(
			ib_hfi1_rnr_table[(aeth >> HFI1_AETH_CREDIT_SHIFT) &
@@ -1387,8 +1387,8 @@ static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
	struct rvt_swqe *wqe;

	/* Remove QP from retry timer */
	if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) {
		qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR);
	if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
		qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
		del_timer(&qp->s_timer);
	}

@@ -1403,10 +1403,10 @@ static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
	}

	ibp->rvp.n_rdma_seq++;
	qp->r_flags |= HFI1_R_RDMAR_SEQ;
	qp->r_flags |= RVT_R_RDMAR_SEQ;
	restart_rc(qp, qp->s_last_psn + 1, 0);
	if (list_empty(&qp->rspwait)) {
		qp->r_flags |= HFI1_R_RSP_SEND;
		qp->r_flags |= RVT_R_RSP_SEND;
		atomic_inc(&qp->refcount);
		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
	}
@@ -1466,10 +1466,10 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp,
	 * Skip everything other than the PSN we expect, if we are waiting
	 * for a reply to a restarted RDMA read or atomic op.
	 */
	if (qp->r_flags & HFI1_R_RDMAR_SEQ) {
	if (qp->r_flags & RVT_R_RDMAR_SEQ) {
		if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
			goto ack_done;
		qp->r_flags &= ~HFI1_R_RDMAR_SEQ;
		qp->r_flags &= ~RVT_R_RDMAR_SEQ;
	}

	if (unlikely(qp->s_acked == qp->s_tail))
@@ -1520,10 +1520,10 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp,
		 * We got a response so update the timeout.
		 * 4.096 usec. * (1 << qp->timeout)
		 */
		qp->s_flags |= HFI1_S_TIMER;
		qp->s_flags |= RVT_S_TIMER;
		mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
		if (qp->s_flags & HFI1_S_WAIT_ACK) {
			qp->s_flags &= ~HFI1_S_WAIT_ACK;
		if (qp->s_flags & RVT_S_WAIT_ACK) {
			qp->s_flags &= ~RVT_S_WAIT_ACK;
			hfi1_schedule_send(qp);
		}

@@ -1613,7 +1613,7 @@ static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
				  struct rvt_qp *qp)
{
	if (list_empty(&qp->rspwait)) {
		qp->r_flags |= HFI1_R_RSP_DEFERED_ACK;
		qp->r_flags |= RVT_R_RSP_NAK;
		atomic_inc(&qp->refcount);
		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
	}
@@ -1627,7 +1627,7 @@ static inline void rc_cancel_ack(struct rvt_qp *qp)
	if (list_empty(&qp->rspwait))
		return;
	list_del_init(&qp->rspwait);
	qp->r_flags &= ~HFI1_R_RSP_DEFERED_ACK;
	qp->r_flags &= ~RVT_R_RSP_NAK;
	if (atomic_dec_and_test(&qp->refcount))
		wake_up(&qp->wait);
}
@@ -1813,7 +1813,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
		break;
	}
	qp->s_ack_state = OP(ACKNOWLEDGE);
	qp->s_flags |= HFI1_S_RESP_PENDING;
	qp->s_flags |= RVT_S_RESP_PENDING;
	qp->r_nak_state = 0;
	hfi1_schedule_send(qp);

@@ -2057,7 +2057,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
		break;
	}

	if (qp->state == IB_QPS_RTR && !(qp->r_flags & HFI1_R_COMM_EST))
	if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
		qp_comm_est(qp);

	/* OK, process the packet. */
@@ -2127,7 +2127,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
		hfi1_copy_sge(&qp->r_sge, data, tlen, 1);
		hfi1_put_ss(&qp->r_sge);
		qp->r_msn++;
		if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags))
		if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
			break;
		wc.wr_id = qp->r_wr_id;
		wc.status = IB_WC_SUCCESS;
@@ -2264,7 +2264,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
		qp->r_head_ack_queue = next;

		/* Schedule the send tasklet. */
		qp->s_flags |= HFI1_S_RESP_PENDING;
		qp->s_flags |= RVT_S_RESP_PENDING;
		hfi1_schedule_send(qp);

		spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -2331,7 +2331,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
		qp->r_head_ack_queue = next;

		/* Schedule the send tasklet. */
		qp->s_flags |= HFI1_S_RESP_PENDING;
		qp->s_flags |= RVT_S_RESP_PENDING;
		hfi1_schedule_send(qp);

		spin_unlock_irqrestore(&qp->s_lock, flags);
Loading