Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4c6829c5 authored by Dennis Dalessandro's avatar Dennis Dalessandro Committed by Doug Ledford
Browse files

staging/rdma/hfi1: Remove driver specific members from hfi1 qp type



In preparation for moving the queue pair data structure to rdmavt the
members of the driver specific queue pairs which are not common need to be
pushed off to a private driver structure. This structure will be available
in the queue pair once moved to rdmavt as a void pointer. This patch while
not adding a lot of value in and of itself is a prerequisite to move the
queue pair out of the drivers and into rdmavt.

The driver specific, private queue pair data structure should condense as
more of the send side code moves to rdmavt.

Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent cd4ceee3
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -1619,7 +1619,8 @@ int snoop_send_dma_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
int snoop_send_pio_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
			   u64 pbc)
{
	struct ahg_ib_header *ahdr = qp->s_hdr;
	struct hfi1_qp_priv *priv = qp->priv;
	struct ahg_ib_header *ahdr = priv->s_hdr;
	u32 hdrwords = qp->s_hdrwords;
	struct hfi1_sge_state *ss = qp->s_cur_sge;
	u32 len = qp->s_cur_size;
+4 −2
Original line number Diff line number Diff line
@@ -1528,6 +1528,7 @@ static void sc_piobufavail(struct send_context *sc)
	struct list_head *list;
	struct hfi1_qp *qps[PIO_WAIT_BATCH_SIZE];
	struct hfi1_qp *qp;
	struct hfi1_qp_priv *priv;
	unsigned long flags;
	unsigned i, n = 0;

@@ -1547,8 +1548,9 @@ static void sc_piobufavail(struct send_context *sc)
		if (n == ARRAY_SIZE(qps))
			goto full;
		wait = list_first_entry(list, struct iowait, list);
		qp = container_of(wait, struct hfi1_qp, s_iowait);
		list_del_init(&qp->s_iowait.list);
		qp = iowait_to_qp(wait);
		priv = qp->priv;
		list_del_init(&priv->s_iowait.list);
		/* refcount held until actual wake up */
		qps[n++] = qp;
	}
+50 −28
Original line number Diff line number Diff line
@@ -349,11 +349,12 @@ static unsigned free_all_qps(struct hfi1_devdata *dd)
 */
static void reset_qp(struct hfi1_qp *qp, enum ib_qp_type type)
{
	struct hfi1_qp_priv *priv = qp->priv;
	qp->remote_qpn = 0;
	qp->qkey = 0;
	qp->qp_access_flags = 0;
	iowait_init(
		&qp->s_iowait,
		&priv->s_iowait,
		1,
		hfi1_do_send,
		iowait_sleep,
@@ -378,7 +379,7 @@ static void reset_qp(struct hfi1_qp *qp, enum ib_qp_type type)
	}
	qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
	qp->r_nak_state = 0;
	qp->r_adefered = 0;
	priv->r_adefered = 0;
	qp->r_aflags = 0;
	qp->r_flags = 0;
	qp->s_head = 0;
@@ -460,6 +461,7 @@ static void clear_mr_refs(struct hfi1_qp *qp, int clr_sends)
int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err)
{
	struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
	struct hfi1_qp_priv *priv = qp->priv;
	struct ib_wc wc;
	int ret = 0;

@@ -477,9 +479,9 @@ int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err)
		qp->s_flags &= ~HFI1_S_ANY_WAIT_SEND;

	write_seqlock(&dev->iowait_lock);
	if (!list_empty(&qp->s_iowait.list) && !(qp->s_flags & HFI1_S_BUSY)) {
	if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & HFI1_S_BUSY)) {
		qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
		list_del_init(&qp->s_iowait.list);
		list_del_init(&priv->s_iowait.list);
		if (atomic_dec_and_test(&qp->refcount))
			wake_up(&qp->wait);
	}
@@ -544,11 +546,13 @@ int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err)

static void flush_tx_list(struct hfi1_qp *qp)
{
	while (!list_empty(&qp->s_iowait.tx_head)) {
	struct hfi1_qp_priv *priv = qp->priv;

	while (!list_empty(&priv->s_iowait.tx_head)) {
		struct sdma_txreq *tx;

		tx = list_first_entry(
			&qp->s_iowait.tx_head,
			&priv->s_iowait.tx_head,
			struct sdma_txreq,
			list);
		list_del_init(&tx->list);
@@ -559,12 +563,13 @@ static void flush_tx_list(struct hfi1_qp *qp)

static void flush_iowait(struct hfi1_qp *qp)
{
	struct hfi1_qp_priv *priv = qp->priv;
	struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
	unsigned long flags;

	write_seqlock_irqsave(&dev->iowait_lock, flags);
	if (!list_empty(&qp->s_iowait.list)) {
		list_del_init(&qp->s_iowait.list);
	if (!list_empty(&priv->s_iowait.list)) {
		list_del_init(&priv->s_iowait.list);
		if (atomic_dec_and_test(&qp->refcount))
			wake_up(&qp->wait);
	}
@@ -612,6 +617,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
{
	struct hfi1_ibdev *dev = to_idev(ibqp->device);
	struct hfi1_qp *qp = to_iqp(ibqp);
	struct hfi1_qp_priv *priv = qp->priv;
	enum ib_qp_state cur_state, new_state;
	struct ib_event ev;
	int lastwqe = 0;
@@ -738,9 +744,9 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
			spin_unlock(&qp->s_lock);
			spin_unlock_irq(&qp->r_lock);
			/* Stop the sending work queue and retry timer */
			cancel_work_sync(&qp->s_iowait.iowork);
			cancel_work_sync(&priv->s_iowait.iowork);
			del_timer_sync(&qp->s_timer);
			iowait_sdma_drain(&qp->s_iowait);
			iowait_sdma_drain(&priv->s_iowait);
			flush_tx_list(qp);
			remove_qp(dev, qp);
			wait_event(qp->wait, !atomic_read(&qp->refcount));
@@ -805,8 +811,8 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
		qp->remote_ah_attr = attr->ah_attr;
		qp->s_srate = attr->ah_attr.static_rate;
		qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
		qp->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
		qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
		priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
		priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
	}

	if (attr_mask & IB_QP_ALT_PATH) {
@@ -821,8 +827,8 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
			qp->port_num = qp->alt_ah_attr.port_num;
			qp->s_pkey_index = qp->s_alt_pkey_index;
			qp->s_flags |= HFI1_S_AHG_CLEAR;
			qp->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
			qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
			priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
			priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
		}
	}

@@ -1031,6 +1037,7 @@ struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd,
			     struct ib_udata *udata)
{
	struct hfi1_qp *qp;
	struct hfi1_qp_priv *priv;
	int err;
	struct hfi1_swqe *swq = NULL;
	struct hfi1_ibdev *dev;
@@ -1098,11 +1105,18 @@ struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd,
			goto bail_swq;
		}
		RCU_INIT_POINTER(qp->next, NULL);
		qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
		if (!qp->s_hdr) {
		priv = kzalloc(sizeof(*priv), GFP_KERNEL);
		if (!priv) {
			ret = ERR_PTR(-ENOMEM);
			goto bail_qp_priv;
		}
		priv->owner = qp;
		priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL);
		if (!priv->s_hdr) {
			ret = ERR_PTR(-ENOMEM);
			goto bail_qp;
		}
		qp->priv = priv;
		qp->timeout_jiffies =
			usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
				1000UL);
@@ -1245,7 +1259,9 @@ struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd,
		vfree(qp->r_rq.wq);
	free_qpn(&dev->qp_dev->qpn_table, qp->ibqp.qp_num);
bail_qp:
	kfree(qp->s_hdr);
	kfree(priv->s_hdr);
	kfree(priv);
bail_qp_priv:
	kfree(qp);
bail_swq:
	vfree(swq);
@@ -1266,6 +1282,7 @@ int hfi1_destroy_qp(struct ib_qp *ibqp)
{
	struct hfi1_qp *qp = to_iqp(ibqp);
	struct hfi1_ibdev *dev = to_idev(ibqp->device);
	struct hfi1_qp_priv *priv = qp->priv;

	/* Make sure HW and driver activity is stopped. */
	spin_lock_irq(&qp->r_lock);
@@ -1276,9 +1293,9 @@ int hfi1_destroy_qp(struct ib_qp *ibqp)
		qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_ANY_WAIT);
		spin_unlock(&qp->s_lock);
		spin_unlock_irq(&qp->r_lock);
		cancel_work_sync(&qp->s_iowait.iowork);
		cancel_work_sync(&priv->s_iowait.iowork);
		del_timer_sync(&qp->s_timer);
		iowait_sdma_drain(&qp->s_iowait);
		iowait_sdma_drain(&priv->s_iowait);
		flush_tx_list(qp);
		remove_qp(dev, qp);
		wait_event(qp->wait, !atomic_read(&qp->refcount));
@@ -1301,7 +1318,8 @@ int hfi1_destroy_qp(struct ib_qp *ibqp)
	else
		vfree(qp->r_rq.wq);
	vfree(qp->s_wq);
	kfree(qp->s_hdr);
	kfree(priv->s_hdr);
	kfree(priv);
	kfree(qp);
	return 0;
}
@@ -1422,11 +1440,13 @@ static int iowait_sleep(
{
	struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq);
	struct hfi1_qp *qp;
	struct hfi1_qp_priv *priv;
	unsigned long flags;
	int ret = 0;
	struct hfi1_ibdev *dev;

	qp = tx->qp;
	priv = qp->priv;

	spin_lock_irqsave(&qp->s_lock, flags);
	if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) {
@@ -1442,13 +1462,13 @@ static int iowait_sleep(
		write_seqlock(&dev->iowait_lock);
		if (sdma_progress(sde, seq, stx))
			goto eagain;
		if (list_empty(&qp->s_iowait.list)) {
		if (list_empty(&priv->s_iowait.list)) {
			struct hfi1_ibport *ibp =
				to_iport(qp->ibqp.device, qp->port_num);

			ibp->n_dmawait++;
			qp->s_flags |= HFI1_S_WAIT_DMA_DESC;
			list_add_tail(&qp->s_iowait.list, &sde->dmawait);
			list_add_tail(&priv->s_iowait.list, &sde->dmawait);
			trace_hfi1_qpsleep(qp, HFI1_S_WAIT_DMA_DESC);
			atomic_inc(&qp->refcount);
		}
@@ -1470,7 +1490,7 @@ static int iowait_sleep(

static void iowait_wakeup(struct iowait *wait, int reason)
{
	struct hfi1_qp *qp = container_of(wait, struct hfi1_qp, s_iowait);
	struct hfi1_qp *qp = iowait_to_qp(wait);

	WARN_ON(reason != SDMA_AVAIL_REASON);
	hfi1_qp_wakeup(qp, HFI1_S_WAIT_DMA_DESC);
@@ -1651,9 +1671,10 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
{
	struct hfi1_swqe *wqe;
	struct hfi1_qp *qp = iter->qp;
	struct hfi1_qp_priv *priv = qp->priv;
	struct sdma_engine *sde;

	sde = qp_to_sdma_engine(qp, qp->s_sc);
	sde = qp_to_sdma_engine(qp, priv->s_sc);
	wqe = get_swqe_ptr(qp, qp->s_last);
	seq_printf(s,
		   "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x SL %u MTU %d %u %u %u SDE %p,%u\n",
@@ -1666,8 +1687,8 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
		   wqe ? wqe->wr.opcode : 0,
		   qp->s_hdrwords,
		   qp->s_flags,
		   atomic_read(&qp->s_iowait.sdma_busy),
		   !list_empty(&qp->s_iowait.list),
		   atomic_read(&priv->s_iowait.sdma_busy),
		   !list_empty(&priv->s_iowait.list),
		   qp->timeout,
		   wqe ? wqe->ssn : 0,
		   qp->s_lsn,
@@ -1706,6 +1727,7 @@ void qp_comm_est(struct hfi1_qp *qp)
 */
void hfi1_migrate_qp(struct hfi1_qp *qp)
{
	struct hfi1_qp_priv *priv = qp->priv;
	struct ib_event ev;

	qp->s_mig_state = IB_MIG_MIGRATED;
@@ -1713,8 +1735,8 @@ void hfi1_migrate_qp(struct hfi1_qp *qp)
	qp->port_num = qp->alt_ah_attr.port_num;
	qp->s_pkey_index = qp->s_alt_pkey_index;
	qp->s_flags |= HFI1_S_AHG_CLEAR;
	qp->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
	qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
	priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
	priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);

	ev.device = qp->ibqp.device;
	ev.element.qp = &qp->ibqp;
+9 −6
Original line number Diff line number Diff line
@@ -123,10 +123,12 @@ static inline struct hfi1_qp *hfi1_lookup_qpn(struct hfi1_ibport *ibp,
 */
static inline void clear_ahg(struct hfi1_qp *qp)
{
	qp->s_hdr->ahgcount = 0;
	struct hfi1_qp_priv *priv = qp->priv;

	priv->s_hdr->ahgcount = 0;
	qp->s_flags &= ~(HFI1_S_AHG_VALID | HFI1_S_AHG_CLEAR);
	if (qp->s_sde && qp->s_ahgidx >= 0)
		sdma_ahg_free(qp->s_sde, qp->s_ahgidx);
	if (priv->s_sde && qp->s_ahgidx >= 0)
		sdma_ahg_free(priv->s_sde, qp->s_ahgidx);
	qp->s_ahgidx = -1;
}

@@ -257,14 +259,15 @@ void qp_comm_est(struct hfi1_qp *qp);
 */
static inline void _hfi1_schedule_send(struct hfi1_qp *qp)
{
	struct hfi1_qp_priv *priv = qp->priv;
	struct hfi1_ibport *ibp =
		to_iport(qp->ibqp.device, qp->port_num);
	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
	struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);

	iowait_schedule(&qp->s_iowait, ppd->hfi1_wq,
			qp->s_sde ?
			qp->s_sde->cpu :
	iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
			priv->s_sde ?
			priv->s_sde->cpu :
			cpumask_first(cpumask_of_node(dd->assigned_node_id)));
}

+11 −6
Original line number Diff line number Diff line
@@ -259,6 +259,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct hfi1_qp *qp,
 */
int hfi1_make_rc_req(struct hfi1_qp *qp)
{
	struct hfi1_qp_priv *priv = qp->priv;
	struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
	struct hfi1_other_headers *ohdr;
	struct hfi1_sge_state *ss;
@@ -275,9 +276,9 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
	int middle = 0;
	int delta;

	ohdr = &qp->s_hdr->ibh.u.oth;
	ohdr = &priv->s_hdr->ibh.u.oth;
	if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
		ohdr = &qp->s_hdr->ibh.u.l.oth;
		ohdr = &priv->s_hdr->ibh.u.l.oth;

	/*
	 * The lock is needed to synchronize between the sending tasklet,
@@ -297,7 +298,7 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
		if (qp->s_last == qp->s_head)
			goto bail;
		/* If DMAs are in progress, we can't flush immediately. */
		if (atomic_read(&qp->s_iowait.sdma_busy)) {
		if (atomic_read(&priv->s_iowait.sdma_busy)) {
			qp->s_flags |= HFI1_S_WAIT_DMA;
			goto bail;
		}
@@ -1620,7 +1621,9 @@ static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,

static inline void rc_cancel_ack(struct hfi1_qp *qp)
{
	qp->r_adefered = 0;
	struct hfi1_qp_priv *priv = qp->priv;

	priv->r_adefered = 0;
	if (list_empty(&qp->rspwait))
		return;
	list_del_init(&qp->rspwait);
@@ -2347,11 +2350,13 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
	qp->r_nak_state = 0;
	/* Send an ACK if requested or required. */
	if (psn & IB_BTH_REQ_ACK) {
		struct hfi1_qp_priv *priv = qp->priv;

		if (packet->numpkt == 0) {
			rc_cancel_ack(qp);
			goto send_ack;
		}
		if (qp->r_adefered >= HFI1_PSN_CREDIT) {
		if (priv->r_adefered >= HFI1_PSN_CREDIT) {
			rc_cancel_ack(qp);
			goto send_ack;
		}
@@ -2359,7 +2364,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
			rc_cancel_ack(qp);
			goto send_ack;
		}
		qp->r_adefered++;
		priv->r_adefered++;
		rc_defered_ack(rcd, qp);
	}
	return;
Loading