Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ffc26907 authored by Dennis Dalessandro's avatar Dennis Dalessandro Committed by Doug Ledford
Browse files

IB/qib: Remove driver specific members from qib qp type



In preparation for moving the queue pair data structure to rdmavt the
members of the driver specific queue pairs which are not common need to be
pushed off to a private driver structure. This structure will be available
in the queue pair once moved to rdmavt as a void pointer. This patch while
not adding a lot of value in and of itself is a prerequisite to move the
queue pair out of the drivers and into rdmavt.

The driver specific, private queue pair data structure should condense as
more of the send side code moves to rdmavt.

Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 869a2a96
Loading
Loading
Loading
Loading
+46 −30
Original line number Original line Diff line number Diff line
@@ -371,10 +371,11 @@ struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
 */
 */
static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
{
{
	struct qib_qp_priv *priv = qp->priv;
	qp->remote_qpn = 0;
	qp->remote_qpn = 0;
	qp->qkey = 0;
	qp->qkey = 0;
	qp->qp_access_flags = 0;
	qp->qp_access_flags = 0;
	atomic_set(&qp->s_dma_busy, 0);
	atomic_set(&priv->s_dma_busy, 0);
	qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
	qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
	qp->s_hdrwords = 0;
	qp->s_hdrwords = 0;
	qp->s_wqe = NULL;
	qp->s_wqe = NULL;
@@ -474,6 +475,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
 */
 */
int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
{
{
	struct qib_qp_priv *priv = qp->priv;
	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
	struct ib_wc wc;
	struct ib_wc wc;
	int ret = 0;
	int ret = 0;
@@ -492,9 +494,9 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
		qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
		qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;


	spin_lock(&dev->pending_lock);
	spin_lock(&dev->pending_lock);
	if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
	if (!list_empty(&priv->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
		qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
		qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
		list_del_init(&qp->iowait);
		list_del_init(&priv->iowait);
	}
	}
	spin_unlock(&dev->pending_lock);
	spin_unlock(&dev->pending_lock);


@@ -504,9 +506,9 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
			qib_put_mr(qp->s_rdma_mr);
			qib_put_mr(qp->s_rdma_mr);
			qp->s_rdma_mr = NULL;
			qp->s_rdma_mr = NULL;
		}
		}
		if (qp->s_tx) {
		if (priv->s_tx) {
			qib_put_txreq(qp->s_tx);
			qib_put_txreq(priv->s_tx);
			qp->s_tx = NULL;
			priv->s_tx = NULL;
		}
		}
	}
	}


@@ -572,6 +574,7 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
{
{
	struct qib_ibdev *dev = to_idev(ibqp->device);
	struct qib_ibdev *dev = to_idev(ibqp->device);
	struct qib_qp *qp = to_iqp(ibqp);
	struct qib_qp *qp = to_iqp(ibqp);
	struct qib_qp_priv *priv = qp->priv;
	enum ib_qp_state cur_state, new_state;
	enum ib_qp_state cur_state, new_state;
	struct ib_event ev;
	struct ib_event ev;
	int lastwqe = 0;
	int lastwqe = 0;
@@ -699,19 +702,20 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
		if (qp->state != IB_QPS_RESET) {
		if (qp->state != IB_QPS_RESET) {
			qp->state = IB_QPS_RESET;
			qp->state = IB_QPS_RESET;
			spin_lock(&dev->pending_lock);
			spin_lock(&dev->pending_lock);
			if (!list_empty(&qp->iowait))
			if (!list_empty(&priv->iowait))
				list_del_init(&qp->iowait);
				list_del_init(&priv->iowait);
			spin_unlock(&dev->pending_lock);
			spin_unlock(&dev->pending_lock);
			qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
			qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
			spin_unlock(&qp->s_lock);
			spin_unlock(&qp->s_lock);
			spin_unlock_irq(&qp->r_lock);
			spin_unlock_irq(&qp->r_lock);
			/* Stop the sending work queue and retry timer */
			/* Stop the sending work queue and retry timer */
			cancel_work_sync(&qp->s_work);
			cancel_work_sync(&priv->s_work);
			del_timer_sync(&qp->s_timer);
			del_timer_sync(&qp->s_timer);
			wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
			wait_event(priv->wait_dma,
			if (qp->s_tx) {
				   !atomic_read(&priv->s_dma_busy));
				qib_put_txreq(qp->s_tx);
			if (priv->s_tx) {
				qp->s_tx = NULL;
				qib_put_txreq(priv->s_tx);
				priv->s_tx = NULL;
			}
			}
			remove_qp(dev, qp);
			remove_qp(dev, qp);
			wait_event(qp->wait, !atomic_read(&qp->refcount));
			wait_event(qp->wait, !atomic_read(&qp->refcount));
@@ -987,7 +991,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
	size_t sg_list_sz;
	size_t sg_list_sz;
	struct ib_qp *ret;
	struct ib_qp *ret;
	gfp_t gfp;
	gfp_t gfp;

	struct qib_qp_priv *priv;


	if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
	if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
	    init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
	    init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
@@ -1055,11 +1059,18 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
			goto bail_swq;
			goto bail_swq;
		}
		}
		RCU_INIT_POINTER(qp->next, NULL);
		RCU_INIT_POINTER(qp->next, NULL);
		qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp);
		priv = kzalloc(sizeof(*priv), gfp);
		if (!qp->s_hdr) {
		if (!priv) {
			ret = ERR_PTR(-ENOMEM);
			goto bail_qp_hdr;
		}
		priv->owner = qp;
		priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
		if (!priv->s_hdr) {
			ret = ERR_PTR(-ENOMEM);
			ret = ERR_PTR(-ENOMEM);
			goto bail_qp;
			goto bail_qp;
		}
		}
		qp->priv = priv;
		qp->timeout_jiffies =
		qp->timeout_jiffies =
			usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
			usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
				1000UL);
				1000UL);
@@ -1095,11 +1106,11 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
		spin_lock_init(&qp->r_rq.lock);
		spin_lock_init(&qp->r_rq.lock);
		atomic_set(&qp->refcount, 0);
		atomic_set(&qp->refcount, 0);
		init_waitqueue_head(&qp->wait);
		init_waitqueue_head(&qp->wait);
		init_waitqueue_head(&qp->wait_dma);
		init_waitqueue_head(&priv->wait_dma);
		init_timer(&qp->s_timer);
		init_timer(&qp->s_timer);
		qp->s_timer.data = (unsigned long)qp;
		qp->s_timer.data = (unsigned long)qp;
		INIT_WORK(&qp->s_work, qib_do_send);
		INIT_WORK(&priv->s_work, qib_do_send);
		INIT_LIST_HEAD(&qp->iowait);
		INIT_LIST_HEAD(&priv->iowait);
		INIT_LIST_HEAD(&qp->rspwait);
		INIT_LIST_HEAD(&qp->rspwait);
		qp->state = IB_QPS_RESET;
		qp->state = IB_QPS_RESET;
		qp->s_wq = swq;
		qp->s_wq = swq;
@@ -1189,7 +1200,9 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
		vfree(qp->r_rq.wq);
		vfree(qp->r_rq.wq);
	free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
	free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
bail_qp:
bail_qp:
	kfree(qp->s_hdr);
	kfree(priv->s_hdr);
	kfree(priv);
bail_qp_hdr:
	kfree(qp);
	kfree(qp);
bail_swq:
bail_swq:
	vfree(swq);
	vfree(swq);
@@ -1210,23 +1223,24 @@ int qib_destroy_qp(struct ib_qp *ibqp)
{
{
	struct qib_qp *qp = to_iqp(ibqp);
	struct qib_qp *qp = to_iqp(ibqp);
	struct qib_ibdev *dev = to_idev(ibqp->device);
	struct qib_ibdev *dev = to_idev(ibqp->device);
	struct qib_qp_priv *priv = qp->priv;


	/* Make sure HW and driver activity is stopped. */
	/* Make sure HW and driver activity is stopped. */
	spin_lock_irq(&qp->s_lock);
	spin_lock_irq(&qp->s_lock);
	if (qp->state != IB_QPS_RESET) {
	if (qp->state != IB_QPS_RESET) {
		qp->state = IB_QPS_RESET;
		qp->state = IB_QPS_RESET;
		spin_lock(&dev->pending_lock);
		spin_lock(&dev->pending_lock);
		if (!list_empty(&qp->iowait))
		if (!list_empty(&priv->iowait))
			list_del_init(&qp->iowait);
			list_del_init(&priv->iowait);
		spin_unlock(&dev->pending_lock);
		spin_unlock(&dev->pending_lock);
		qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
		qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
		spin_unlock_irq(&qp->s_lock);
		spin_unlock_irq(&qp->s_lock);
		cancel_work_sync(&qp->s_work);
		cancel_work_sync(&priv->s_work);
		del_timer_sync(&qp->s_timer);
		del_timer_sync(&qp->s_timer);
		wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
		wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
		if (qp->s_tx) {
		if (priv->s_tx) {
			qib_put_txreq(qp->s_tx);
			qib_put_txreq(priv->s_tx);
			qp->s_tx = NULL;
			priv->s_tx = NULL;
		}
		}
		remove_qp(dev, qp);
		remove_qp(dev, qp);
		wait_event(qp->wait, !atomic_read(&qp->refcount));
		wait_event(qp->wait, !atomic_read(&qp->refcount));
@@ -1245,7 +1259,8 @@ int qib_destroy_qp(struct ib_qp *ibqp)
	else
	else
		vfree(qp->r_rq.wq);
		vfree(qp->r_rq.wq);
	vfree(qp->s_wq);
	vfree(qp->s_wq);
	kfree(qp->s_hdr);
	kfree(priv->s_hdr);
	kfree(priv);
	kfree(qp);
	kfree(qp);
	return 0;
	return 0;
}
}
@@ -1368,6 +1383,7 @@ void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
{
{
	struct qib_swqe *wqe;
	struct qib_swqe *wqe;
	struct qib_qp *qp = iter->qp;
	struct qib_qp *qp = iter->qp;
	struct qib_qp_priv *priv = qp->priv;


	wqe = get_swqe_ptr(qp, qp->s_last);
	wqe = get_swqe_ptr(qp, qp->s_last);
	seq_printf(s,
	seq_printf(s,
@@ -1379,8 +1395,8 @@ void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
		   wqe->wr.opcode,
		   wqe->wr.opcode,
		   qp->s_hdrwords,
		   qp->s_hdrwords,
		   qp->s_flags,
		   qp->s_flags,
		   atomic_read(&qp->s_dma_busy),
		   atomic_read(&priv->s_dma_busy),
		   !list_empty(&qp->iowait),
		   !list_empty(&priv->iowait),
		   qp->timeout,
		   qp->timeout,
		   wqe->ssn,
		   wqe->ssn,
		   qp->s_lsn,
		   qp->s_lsn,
+4 −3
Original line number Original line Diff line number Diff line
@@ -230,6 +230,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
 */
 */
int qib_make_rc_req(struct qib_qp *qp)
int qib_make_rc_req(struct qib_qp *qp)
{
{
	struct qib_qp_priv *priv = qp->priv;
	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
	struct qib_other_headers *ohdr;
	struct qib_other_headers *ohdr;
	struct qib_sge_state *ss;
	struct qib_sge_state *ss;
@@ -244,9 +245,9 @@ int qib_make_rc_req(struct qib_qp *qp)
	int ret = 0;
	int ret = 0;
	int delta;
	int delta;


	ohdr = &qp->s_hdr->u.oth;
	ohdr = &priv->s_hdr->u.oth;
	if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
	if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
		ohdr = &qp->s_hdr->u.l.oth;
		ohdr = &priv->s_hdr->u.l.oth;


	/*
	/*
	 * The lock is needed to synchronize between the sending tasklet,
	 * The lock is needed to synchronize between the sending tasklet,
@@ -266,7 +267,7 @@ int qib_make_rc_req(struct qib_qp *qp)
		if (qp->s_last == qp->s_head)
		if (qp->s_last == qp->s_head)
			goto bail;
			goto bail;
		/* If DMAs are in progress, we can't flush immediately. */
		/* If DMAs are in progress, we can't flush immediately. */
		if (atomic_read(&qp->s_dma_busy)) {
		if (atomic_read(&priv->s_dma_busy)) {
			qp->s_flags |= QIB_S_WAIT_DMA;
			qp->s_flags |= QIB_S_WAIT_DMA;
			goto bail;
			goto bail;
		}
		}
+11 −7
Original line number Original line Diff line number Diff line
@@ -675,6 +675,7 @@ u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
			 u32 bth0, u32 bth2)
			 u32 bth0, u32 bth2)
{
{
	struct qib_qp_priv *priv = qp->priv;
	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
	u16 lrh0;
	u16 lrh0;
	u32 nwords;
	u32 nwords;
@@ -685,17 +686,18 @@ void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
	nwords = (qp->s_cur_size + extra_bytes) >> 2;
	nwords = (qp->s_cur_size + extra_bytes) >> 2;
	lrh0 = QIB_LRH_BTH;
	lrh0 = QIB_LRH_BTH;
	if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
	if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
		qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh,
		qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
					       &qp->remote_ah_attr.grh,
					       &qp->remote_ah_attr.grh,
					       qp->s_hdrwords, nwords);
					       qp->s_hdrwords, nwords);
		lrh0 = QIB_LRH_GRH;
		lrh0 = QIB_LRH_GRH;
	}
	}
	lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
	lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
		qp->remote_ah_attr.sl << 4;
		qp->remote_ah_attr.sl << 4;
	qp->s_hdr->lrh[0] = cpu_to_be16(lrh0);
	priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
	qp->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
	priv->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
	qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
	priv->s_hdr->lrh[2] =
	qp->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
			cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
	priv->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
				       qp->remote_ah_attr.src_path_bits);
				       qp->remote_ah_attr.src_path_bits);
	bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
	bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
	bth0 |= extra_bytes << 20;
	bth0 |= extra_bytes << 20;
@@ -717,7 +719,9 @@ void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
 */
 */
void qib_do_send(struct work_struct *work)
void qib_do_send(struct work_struct *work)
{
{
	struct qib_qp *qp = container_of(work, struct qib_qp, s_work);
	struct qib_qp_priv *priv = container_of(work, struct qib_qp_priv,
						s_work);
	struct qib_qp *qp = priv->owner;
	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
	int (*make_req)(struct qib_qp *qp);
	int (*make_req)(struct qib_qp *qp);
@@ -756,7 +760,7 @@ void qib_do_send(struct work_struct *work)
			 * If the packet cannot be sent now, return and
			 * If the packet cannot be sent now, return and
			 * the send tasklet will be woken up later.
			 * the send tasklet will be woken up later.
			 */
			 */
			if (qib_verbs_send(qp, qp->s_hdr, qp->s_hdrwords,
			if (qib_verbs_send(qp, priv->s_hdr, qp->s_hdrwords,
					   qp->s_cur_sge, qp->s_cur_size))
					   qp->s_cur_sge, qp->s_cur_size))
				break;
				break;
			/* Record that s_hdr is empty. */
			/* Record that s_hdr is empty. */
+11 −6
Original line number Original line Diff line number Diff line
@@ -513,7 +513,9 @@ int qib_sdma_running(struct qib_pportdata *ppd)
static void complete_sdma_err_req(struct qib_pportdata *ppd,
static void complete_sdma_err_req(struct qib_pportdata *ppd,
				  struct qib_verbs_txreq *tx)
				  struct qib_verbs_txreq *tx)
{
{
	atomic_inc(&tx->qp->s_dma_busy);
	struct qib_qp_priv *priv = tx->qp->priv;

	atomic_inc(&priv->s_dma_busy);
	/* no sdma descriptors, so no unmap_desc */
	/* no sdma descriptors, so no unmap_desc */
	tx->txreq.start_idx = 0;
	tx->txreq.start_idx = 0;
	tx->txreq.next_descq_idx = 0;
	tx->txreq.next_descq_idx = 0;
@@ -543,6 +545,7 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd,
	u64 sdmadesc[2];
	u64 sdmadesc[2];
	u32 dwoffset;
	u32 dwoffset;
	dma_addr_t addr;
	dma_addr_t addr;
	struct qib_qp_priv *priv;


	spin_lock_irqsave(&ppd->sdma_lock, flags);
	spin_lock_irqsave(&ppd->sdma_lock, flags);


@@ -644,8 +647,8 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd,
		descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
		descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
		descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
		descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);

	priv = tx->qp->priv;
	atomic_inc(&tx->qp->s_dma_busy);
	atomic_inc(&priv->s_dma_busy);
	tx->txreq.next_descq_idx = tail;
	tx->txreq.next_descq_idx = tail;
	ppd->dd->f_sdma_update_tail(ppd, tail);
	ppd->dd->f_sdma_update_tail(ppd, tail);
	ppd->sdma_descq_added += tx->txreq.sg_count;
	ppd->sdma_descq_added += tx->txreq.sg_count;
@@ -663,6 +666,7 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd,
		unmap_desc(ppd, tail);
		unmap_desc(ppd, tail);
	}
	}
	qp = tx->qp;
	qp = tx->qp;
	priv = qp->priv;
	qib_put_txreq(tx);
	qib_put_txreq(tx);
	spin_lock(&qp->r_lock);
	spin_lock(&qp->r_lock);
	spin_lock(&qp->s_lock);
	spin_lock(&qp->s_lock);
@@ -679,6 +683,7 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd,


busy:
busy:
	qp = tx->qp;
	qp = tx->qp;
	priv = qp->priv;
	spin_lock(&qp->s_lock);
	spin_lock(&qp->s_lock);
	if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
	if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
		struct qib_ibdev *dev;
		struct qib_ibdev *dev;
@@ -690,16 +695,16 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd,
		 */
		 */
		tx->ss = ss;
		tx->ss = ss;
		tx->dwords = dwords;
		tx->dwords = dwords;
		qp->s_tx = tx;
		priv->s_tx = tx;
		dev = &ppd->dd->verbs_dev;
		dev = &ppd->dd->verbs_dev;
		spin_lock(&dev->pending_lock);
		spin_lock(&dev->pending_lock);
		if (list_empty(&qp->iowait)) {
		if (list_empty(&priv->iowait)) {
			struct qib_ibport *ibp;
			struct qib_ibport *ibp;


			ibp = &ppd->ibport_data;
			ibp = &ppd->ibport_data;
			ibp->n_dmawait++;
			ibp->n_dmawait++;
			qp->s_flags |= QIB_S_WAIT_DMA_DESC;
			qp->s_flags |= QIB_S_WAIT_DMA_DESC;
			list_add_tail(&qp->iowait, &dev->dmawait);
			list_add_tail(&priv->iowait, &dev->dmawait);
		}
		}
		spin_unlock(&dev->pending_lock);
		spin_unlock(&dev->pending_lock);
		qp->s_flags &= ~QIB_S_BUSY;
		qp->s_flags &= ~QIB_S_BUSY;
+4 −3
Original line number Original line Diff line number Diff line
@@ -45,6 +45,7 @@
 */
 */
int qib_make_uc_req(struct qib_qp *qp)
int qib_make_uc_req(struct qib_qp *qp)
{
{
	struct qib_qp_priv *priv = qp->priv;
	struct qib_other_headers *ohdr;
	struct qib_other_headers *ohdr;
	struct qib_swqe *wqe;
	struct qib_swqe *wqe;
	unsigned long flags;
	unsigned long flags;
@@ -63,7 +64,7 @@ int qib_make_uc_req(struct qib_qp *qp)
		if (qp->s_last == qp->s_head)
		if (qp->s_last == qp->s_head)
			goto bail;
			goto bail;
		/* If DMAs are in progress, we can't flush immediately. */
		/* If DMAs are in progress, we can't flush immediately. */
		if (atomic_read(&qp->s_dma_busy)) {
		if (atomic_read(&priv->s_dma_busy)) {
			qp->s_flags |= QIB_S_WAIT_DMA;
			qp->s_flags |= QIB_S_WAIT_DMA;
			goto bail;
			goto bail;
		}
		}
@@ -72,9 +73,9 @@ int qib_make_uc_req(struct qib_qp *qp)
		goto done;
		goto done;
	}
	}


	ohdr = &qp->s_hdr->u.oth;
	ohdr = &priv->s_hdr->u.oth;
	if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
	if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
		ohdr = &qp->s_hdr->u.l.oth;
		ohdr = &priv->s_hdr->u.l.oth;


	/* header size in 32-bit words LRH+BTH = (8+12)/4. */
	/* header size in 32-bit words LRH+BTH = (8+12)/4. */
	hwords = 5;
	hwords = 5;
Loading