Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 967bcfc0 authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Doug Ledford
Browse files

IB/qib: Improve ipoib UD performance



Based on profiling, UD performance drops in case of processes
in a single client due to excess context switches when
the progress workqueue is scheduled.

This is solved by modifying the heuristic to select the
direct progress instead of the scheduling progress via
the workqueue when UD-like situations are detected in
the heuristic.

Reviewed-by: default avatarVinit Agnihotri <vinit.abhay.agnihotri@intel.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 4ed088e6
Loading
Loading
Loading
Loading
+8 −3
Original line number Diff line number Diff line
@@ -346,6 +346,7 @@ static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr,
	unsigned long flags;
	struct qib_lkey_table *rkt;
	struct qib_pd *pd;
	int avoid_schedule = 0;

	spin_lock_irqsave(&qp->s_lock, flags);

@@ -438,11 +439,15 @@ static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr,
	    qp->ibqp.qp_type == IB_QPT_RC) {
		if (wqe->length > 0x80000000U)
			goto bail_inval_free;
		if (wqe->length <= qp->pmtu)
			avoid_schedule = 1;
	} else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
				  qp->port_num - 1)->ibmtu)
				  qp->port_num - 1)->ibmtu) {
		goto bail_inval_free;
	else
	} else {
		atomic_inc(&to_iah(ud_wr(wr)->ah)->refcount);
		avoid_schedule = 1;
	}
	wqe->ssn = qp->s_ssn++;
	qp->s_head = next;

@@ -458,7 +463,7 @@ static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr,
bail_inval:
	ret = -EINVAL;
bail:
	if (!ret && !wr->next &&
	if (!ret && !wr->next && !avoid_schedule &&
	 !qib_sdma_empty(
	   dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) {
		qib_schedule_send(qp);