Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2fd36865 authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Greg Kroah-Hartman
Browse files

staging/rdma/hfi1: add common routine for queuing acks



This patch is a prelimary patch required to
coalesce acks.

The routine to "schedule" a QP for sending a NAK is
now centralized in rc_defer_ack().  The flag is changed
for clarity since the all acks will potentially use
the deferral  mechanism.

Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 46b010d3
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -714,8 +714,8 @@ static inline void process_rcv_qp_work(struct hfi1_packet *packet)
	 */
	list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
		list_del_init(&qp->rspwait);
		if (qp->r_flags & HFI1_R_RSP_NAK) {
			qp->r_flags &= ~HFI1_R_RSP_NAK;
		if (qp->r_flags & HFI1_R_RSP_DEFERED_ACK) {
			qp->r_flags &= ~HFI1_R_RSP_DEFERED_ACK;
			hfi1_send_rc_ack(rcd, qp, 0);
		}
		if (qp->r_flags & HFI1_R_RSP_SEND) {
+15 −27
Original line number Diff line number Diff line
@@ -1608,6 +1608,16 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp,
	return;
}

static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
				  struct hfi1_qp *qp)
{
	if (list_empty(&qp->rspwait)) {
		qp->r_flags |= HFI1_R_RSP_DEFERED_ACK;
		atomic_inc(&qp->refcount);
		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
	}
}

/**
 * rc_rcv_error - process an incoming duplicate or error RC packet
 * @ohdr: the other headers for this packet
@@ -1650,11 +1660,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
			 * in the receive queue have been processed.
			 * Otherwise, we end up propagating congestion.
			 */
			if (list_empty(&qp->rspwait)) {
				qp->r_flags |= HFI1_R_RSP_NAK;
				atomic_inc(&qp->refcount);
				list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
			}
			rc_defered_ack(rcd, qp);
		}
		goto done;
	}
@@ -2337,11 +2343,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
	qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
	qp->r_ack_psn = qp->r_psn;
	/* Queue RNR NAK for later */
	if (list_empty(&qp->rspwait)) {
		qp->r_flags |= HFI1_R_RSP_NAK;
		atomic_inc(&qp->refcount);
		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
	}
	rc_defered_ack(rcd, qp);
	return;

nack_op_err:
@@ -2349,11 +2351,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
	qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
	qp->r_ack_psn = qp->r_psn;
	/* Queue NAK for later */
	if (list_empty(&qp->rspwait)) {
		qp->r_flags |= HFI1_R_RSP_NAK;
		atomic_inc(&qp->refcount);
		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
	}
	rc_defered_ack(rcd, qp);
	return;

nack_inv_unlck:
@@ -2363,11 +2361,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
	qp->r_nak_state = IB_NAK_INVALID_REQUEST;
	qp->r_ack_psn = qp->r_psn;
	/* Queue NAK for later */
	if (list_empty(&qp->rspwait)) {
		qp->r_flags |= HFI1_R_RSP_NAK;
		atomic_inc(&qp->refcount);
		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
	}
	rc_defered_ack(rcd, qp);
	return;

nack_acc_unlck:
@@ -2421,13 +2415,7 @@ void hfi1_rc_hdrerr(
			 * Otherwise, we end up
			 * propagating congestion.
			 */
			if (list_empty(&qp->rspwait)) {
				qp->r_flags |= HFI1_R_RSP_NAK;
				atomic_inc(&qp->refcount);
				list_add_tail(
					&qp->rspwait,
					&rcd->qp_wait_list);
				}
			rc_defered_ack(rcd, qp);
		} /* Out of sequence NAK */
	} /* QP Request NAKs */
}
+7 −5
Original line number Diff line number Diff line
@@ -555,7 +555,9 @@ struct hfi1_qp {
 */
#define HFI1_R_REUSE_SGE       0x01
#define HFI1_R_RDMAR_SEQ       0x02
#define HFI1_R_RSP_NAK   0x04
/* defer ack until end of interrupt session */
#define HFI1_R_RSP_DEFERED_ACK 0x04
/* relay ack to send engine */
#define HFI1_R_RSP_SEND        0x08
#define HFI1_R_COMM_EST        0x10