Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 48947109 authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Roland Dreier
Browse files

IB/qib: Optimize locking for get_txreq()



The current code locks the QP s_lock, followed by the pending_lock, I
guess to to protect against the allocate failing.

This patch only locks the pending_lock, assuming that the empty case
is an exeception, in which case the pending_lock is dropped, and the
original code is executed.  This will save a lock of s_lock in the
normal case.

The observation is that the sdma descriptors will deplete at twice the
rate of txreq's, so this should be rare.

Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@qlogic.com>
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
parent eddfb675
Loading
Loading
Loading
Loading
+33 −10
Original line number Diff line number Diff line
@@ -913,8 +913,8 @@ static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
		__raw_writel(last, piobuf);
}

static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
					 struct qib_qp *qp, int *retp)
static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
					   struct qib_qp *qp)
{
	struct qib_verbs_txreq *tx;
	unsigned long flags;
@@ -926,8 +926,9 @@ static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
		struct list_head *l = dev->txreq_free.next;

		list_del(l);
		spin_unlock(&dev->pending_lock);
		spin_unlock_irqrestore(&qp->s_lock, flags);
		tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
		*retp = 0;
	} else {
		if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
		    list_empty(&qp->iowait)) {
@@ -935,14 +936,33 @@ static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
			qp->s_flags |= QIB_S_WAIT_TX;
			list_add_tail(&qp->iowait, &dev->txwait);
		}
		tx = NULL;
		qp->s_flags &= ~QIB_S_BUSY;
		*retp = -EBUSY;
	}

		spin_unlock(&dev->pending_lock);
		spin_unlock_irqrestore(&qp->s_lock, flags);
		tx = ERR_PTR(-EBUSY);
	}
	return tx;
}

static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
					 struct qib_qp *qp)
{
	struct qib_verbs_txreq *tx;
	unsigned long flags;

	spin_lock_irqsave(&dev->pending_lock, flags);
	/* assume the list non empty */
	if (likely(!list_empty(&dev->txreq_free))) {
		struct list_head *l = dev->txreq_free.next;

		list_del(l);
		spin_unlock_irqrestore(&dev->pending_lock, flags);
		tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
	} else {
		/* call slow path to get the extra lock */
		spin_unlock_irqrestore(&dev->pending_lock, flags);
		tx =  __get_txreq(dev, qp);
	}
	return tx;
}

@@ -1122,9 +1142,9 @@ static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
		goto bail;
	}

	tx = get_txreq(dev, qp, &ret);
	if (!tx)
		goto bail;
	tx = get_txreq(dev, qp);
	if (IS_ERR(tx))
		goto bail_tx;

	control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
				       be16_to_cpu(hdr->lrh[0]) >> 12);
@@ -1195,6 +1215,9 @@ unaligned:
	ibp->n_unaligned++;
bail:
	return ret;
bail_tx:
	ret = PTR_ERR(tx);
	goto bail;
}

/*