Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7c2e11fe authored by Dennis Dalessandro's avatar Dennis Dalessandro Committed by Doug Ledford
Browse files

IB/qib: Remove qp and mr functionality from qib



Remove qp and mr support from qib and use rdmavt. These two changes
cannot be reasonably be split apart into separate patches because they
depend on each other in mulitple places. This paves the way to remove
even more functions in subsequent patches.

Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 6a9df403
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o
obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o


ib_qib-y := qib_cq.o qib_diag.o qib_driver.o qib_eeprom.o \
ib_qib-y := qib_cq.o qib_diag.o qib_driver.o qib_eeprom.o \
	qib_file_ops.o qib_fs.o qib_init.o qib_intr.o qib_keys.o \
	qib_file_ops.o qib_fs.o qib_init.o qib_intr.o \
	qib_mad.o qib_mmap.o qib_mr.o qib_pcie.o qib_pio_copy.o \
	qib_mad.o qib_mmap.o qib_pcie.o qib_pio_copy.o \
	qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o qib_srq.o \
	qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o qib_srq.o \
	qib_sysfs.o qib_twsi.o qib_tx.o qib_uc.o qib_ud.o \
	qib_sysfs.o qib_twsi.o qib_tx.o qib_uc.o qib_ud.o \
	qib_user_pages.o qib_user_sdma.o qib_verbs_mcast.o qib_iba7220.o \
	qib_user_pages.o qib_user_sdma.o qib_verbs_mcast.o qib_iba7220.o \
+7 −7
Original line number Original line Diff line number Diff line
@@ -231,7 +231,7 @@ struct qib_ctxtdata {
	/* ctxt rcvhdrq head offset */
	/* ctxt rcvhdrq head offset */
	u32 head;
	u32 head;
	/* lookaside fields */
	/* lookaside fields */
	struct qib_qp *lookaside_qp;
	struct rvt_qp *lookaside_qp;
	u32 lookaside_qpn;
	u32 lookaside_qpn;
	/* QPs waiting for context processing */
	/* QPs waiting for context processing */
	struct list_head qp_wait_list;
	struct list_head qp_wait_list;
@@ -241,7 +241,7 @@ struct qib_ctxtdata {
#endif
#endif
};
};


struct qib_sge_state;
struct rvt_sge_state;


struct qib_sdma_txreq {
struct qib_sdma_txreq {
	int                 flags;
	int                 flags;
@@ -259,14 +259,14 @@ struct qib_sdma_desc {


struct qib_verbs_txreq {
struct qib_verbs_txreq {
	struct qib_sdma_txreq   txreq;
	struct qib_sdma_txreq   txreq;
	struct qib_qp           *qp;
	struct rvt_qp           *qp;
	struct qib_swqe         *wqe;
	struct rvt_swqe         *wqe;
	u32                     dwords;
	u32                     dwords;
	u16                     hdr_dwords;
	u16                     hdr_dwords;
	u16                     hdr_inx;
	u16                     hdr_inx;
	struct qib_pio_header	*align_buf;
	struct qib_pio_header	*align_buf;
	struct qib_mregion	*mr;
	struct rvt_mregion	*mr;
	struct qib_sge_state    *ss;
	struct rvt_sge_state    *ss;
};
};


#define QIB_SDMA_TXREQ_F_USELARGEBUF  0x1
#define QIB_SDMA_TXREQ_F_USELARGEBUF  0x1
@@ -1324,7 +1324,7 @@ void __qib_sdma_intr(struct qib_pportdata *);
void qib_sdma_intr(struct qib_pportdata *);
void qib_sdma_intr(struct qib_pportdata *);
void qib_user_sdma_send_desc(struct qib_pportdata *dd,
void qib_user_sdma_send_desc(struct qib_pportdata *dd,
			struct list_head *pktlist);
			struct list_head *pktlist);
int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *,
int qib_sdma_verbs_send(struct qib_pportdata *, struct rvt_sge_state *,
			u32, struct qib_verbs_txreq *);
			u32, struct qib_verbs_txreq *);
/* ppd->sdma_lock should be locked before calling this. */
/* ppd->sdma_lock should be locked before calling this. */
int qib_sdma_make_progress(struct qib_pportdata *dd);
int qib_sdma_make_progress(struct qib_pportdata *dd);
+1 −1
Original line number Original line Diff line number Diff line
@@ -466,7 +466,7 @@ int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)


	if (cq->ip) {
	if (cq->ip) {
		struct qib_ibdev *dev = to_idev(ibcq->device);
		struct qib_ibdev *dev = to_idev(ibcq->device);
		struct qib_mmap_info *ip = cq->ip;
		struct rvt_mmap_info *ip = cq->ip;


		qib_update_mmap_info(dev, ip, sz, wc);
		qib_update_mmap_info(dev, ip, sz, wc);


+2 −2
Original line number Original line Diff line number Diff line
@@ -322,7 +322,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
		struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr;
		struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr;
		struct qib_other_headers *ohdr = NULL;
		struct qib_other_headers *ohdr = NULL;
		struct qib_ibport *ibp = &ppd->ibport_data;
		struct qib_ibport *ibp = &ppd->ibport_data;
		struct qib_qp *qp = NULL;
		struct rvt_qp *qp = NULL;
		u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
		u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
		u16 lid  = be16_to_cpu(hdr->lrh[1]);
		u16 lid  = be16_to_cpu(hdr->lrh[1]);
		int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
		int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
@@ -472,7 +472,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
	u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
	u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
	int last;
	int last;
	u64 lval;
	u64 lval;
	struct qib_qp *qp, *nqp;
	struct rvt_qp *qp, *nqp;


	l = rcd->head;
	l = rcd->head;
	rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
	rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
+15 −169
Original line number Original line Diff line number Diff line
@@ -46,20 +46,20 @@
 *
 *
 */
 */


int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
int qib_alloc_lkey(struct rvt_mregion *mr, int dma_region)
{
{
	unsigned long flags;
	unsigned long flags;
	u32 r;
	u32 r;
	u32 n;
	u32 n;
	int ret = 0;
	int ret = 0;
	struct qib_ibdev *dev = to_idev(mr->pd->device);
	struct qib_ibdev *dev = to_idev(mr->pd->device);
	struct qib_lkey_table *rkt = &dev->lk_table;
	struct rvt_lkey_table *rkt = &dev->lk_table;


	spin_lock_irqsave(&rkt->lock, flags);
	spin_lock_irqsave(&rkt->lock, flags);


	/* special case for dma_mr lkey == 0 */
	/* special case for dma_mr lkey == 0 */
	if (dma_region) {
	if (dma_region) {
		struct qib_mregion *tmr;
		struct rvt_mregion *tmr;


		tmr = rcu_access_pointer(dev->dma_mr);
		tmr = rcu_access_pointer(dev->dma_mr);
		if (!tmr) {
		if (!tmr) {
@@ -90,8 +90,8 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
	 * bits are capped in qib_verbs.c to insure enough bits
	 * bits are capped in qib_verbs.c to insure enough bits
	 * for generation number
	 * for generation number
	 */
	 */
	mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
	mr->lkey = (r << (32 - ib_rvt_lkey_table_size)) |
		((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
		((((1 << (24 - ib_rvt_lkey_table_size)) - 1) & rkt->gen)
		 << 8);
		 << 8);
	if (mr->lkey == 0) {
	if (mr->lkey == 0) {
		mr->lkey |= 1 << 8;
		mr->lkey |= 1 << 8;
@@ -114,13 +114,13 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
 * qib_free_lkey - free an lkey
 * qib_free_lkey - free an lkey
 * @mr: mr to free from tables
 * @mr: mr to free from tables
 */
 */
void qib_free_lkey(struct qib_mregion *mr)
void qib_free_lkey(struct rvt_mregion *mr)
{
{
	unsigned long flags;
	unsigned long flags;
	u32 lkey = mr->lkey;
	u32 lkey = mr->lkey;
	u32 r;
	u32 r;
	struct qib_ibdev *dev = to_idev(mr->pd->device);
	struct qib_ibdev *dev = to_idev(mr->pd->device);
	struct qib_lkey_table *rkt = &dev->lk_table;
	struct rvt_lkey_table *rkt = &dev->lk_table;


	spin_lock_irqsave(&rkt->lock, flags);
	spin_lock_irqsave(&rkt->lock, flags);
	if (!mr->lkey_published)
	if (!mr->lkey_published)
@@ -128,7 +128,7 @@ void qib_free_lkey(struct qib_mregion *mr)
	if (lkey == 0)
	if (lkey == 0)
		RCU_INIT_POINTER(dev->dma_mr, NULL);
		RCU_INIT_POINTER(dev->dma_mr, NULL);
	else {
	else {
		r = lkey >> (32 - ib_qib_lkey_table_size);
		r = lkey >> (32 - ib_rvt_lkey_table_size);
		RCU_INIT_POINTER(rkt->table[r], NULL);
		RCU_INIT_POINTER(rkt->table[r], NULL);
	}
	}
	qib_put_mr(mr);
	qib_put_mr(mr);
@@ -137,105 +137,6 @@ void qib_free_lkey(struct qib_mregion *mr)
	spin_unlock_irqrestore(&rkt->lock, flags);
	spin_unlock_irqrestore(&rkt->lock, flags);
}
}


/**
 * qib_lkey_ok - check IB SGE for validity and initialize
 * @rkt: table containing lkey to check SGE against
 * @pd: protection domain
 * @isge: outgoing internal SGE
 * @sge: SGE to check
 * @acc: access flags
 *
 * Return 1 if valid and successful, otherwise returns 0.
 *
 * increments the reference count upon success
 *
 * Check the IB SGE for validity and initialize our internal version
 * of it.
 */
int qib_lkey_ok(struct qib_lkey_table *rkt, struct rvt_pd *pd,
		struct qib_sge *isge, struct ib_sge *sge, int acc)
{
	struct qib_mregion *mr;
	unsigned n, m;
	size_t off;

	/*
	 * We use LKEY == zero for kernel virtual addresses
	 * (see qib_get_dma_mr and qib_dma.c).
	 */
	rcu_read_lock();
	if (sge->lkey == 0) {
		struct qib_ibdev *dev = to_idev(pd->ibpd.device);

		if (pd->user)
			goto bail;
		mr = rcu_dereference(dev->dma_mr);
		if (!mr)
			goto bail;
		if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
			goto bail;
		rcu_read_unlock();

		isge->mr = mr;
		isge->vaddr = (void *) sge->addr;
		isge->length = sge->length;
		isge->sge_length = sge->length;
		isge->m = 0;
		isge->n = 0;
		goto ok;
	}
	mr = rcu_dereference(
		rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]);
	if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
		goto bail;

	off = sge->addr - mr->user_base;
	if (unlikely(sge->addr < mr->user_base ||
		     off + sge->length > mr->length ||
		     (mr->access_flags & acc) != acc))
		goto bail;
	if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
		goto bail;
	rcu_read_unlock();

	off += mr->offset;
	if (mr->page_shift) {
		/*
		page sizes are uniform power of 2 so no loop is necessary
		entries_spanned_by_off is the number of times the loop below
		would have executed.
		*/
		size_t entries_spanned_by_off;

		entries_spanned_by_off = off >> mr->page_shift;
		off -= (entries_spanned_by_off << mr->page_shift);
		m = entries_spanned_by_off/QIB_SEGSZ;
		n = entries_spanned_by_off%QIB_SEGSZ;
	} else {
		m = 0;
		n = 0;
		while (off >= mr->map[m]->segs[n].length) {
			off -= mr->map[m]->segs[n].length;
			n++;
			if (n >= QIB_SEGSZ) {
				m++;
				n = 0;
			}
		}
	}
	isge->mr = mr;
	isge->vaddr = mr->map[m]->segs[n].vaddr + off;
	isge->length = mr->map[m]->segs[n].length - off;
	isge->sge_length = sge->length;
	isge->m = m;
	isge->n = n;
ok:
	return 1;
bail:
	rcu_read_unlock();
	return 0;
}

/**
/**
 * qib_rkey_ok - check the IB virtual address, length, and RKEY
 * qib_rkey_ok - check the IB virtual address, length, and RKEY
 * @qp: qp for validation
 * @qp: qp for validation
@@ -249,11 +150,11 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct rvt_pd *pd,
 *
 *
 * increments the reference count upon success
 * increments the reference count upon success
 */
 */
int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
int qib_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
		u32 len, u64 vaddr, u32 rkey, int acc)
		u32 len, u64 vaddr, u32 rkey, int acc)
{
{
	struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
	struct rvt_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
	struct qib_mregion *mr;
	struct rvt_mregion *mr;
	unsigned n, m;
	unsigned n, m;
	size_t off;
	size_t off;


@@ -285,7 +186,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
	}
	}


	mr = rcu_dereference(
	mr = rcu_dereference(
		rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]);
		rkt->table[(rkey >> (32 - ib_rvt_lkey_table_size))]);
	if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
	if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
		goto bail;
		goto bail;


@@ -308,15 +209,15 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,


		entries_spanned_by_off = off >> mr->page_shift;
		entries_spanned_by_off = off >> mr->page_shift;
		off -= (entries_spanned_by_off << mr->page_shift);
		off -= (entries_spanned_by_off << mr->page_shift);
		m = entries_spanned_by_off/QIB_SEGSZ;
		m = entries_spanned_by_off / RVT_SEGSZ;
		n = entries_spanned_by_off%QIB_SEGSZ;
		n = entries_spanned_by_off % RVT_SEGSZ;
	} else {
	} else {
		m = 0;
		m = 0;
		n = 0;
		n = 0;
		while (off >= mr->map[m]->segs[n].length) {
		while (off >= mr->map[m]->segs[n].length) {
			off -= mr->map[m]->segs[n].length;
			off -= mr->map[m]->segs[n].length;
			n++;
			n++;
			if (n >= QIB_SEGSZ) {
			if (n >= RVT_SEGSZ) {
				m++;
				m++;
				n = 0;
				n = 0;
			}
			}
@@ -335,58 +236,3 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
	return 0;
	return 0;
}
}
/*
 * Initialize the memory region specified by the work request.
 */
int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr)
{
	struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
	struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
	struct qib_mr *mr = to_imr(wr->mr);
	struct qib_mregion *mrg;
	u32 key = wr->key;
	unsigned i, n, m;
	int ret = -EINVAL;
	unsigned long flags;
	u64 *page_list;
	size_t ps;

	spin_lock_irqsave(&rkt->lock, flags);
	if (pd->user || key == 0)
		goto bail;

	mrg = rcu_dereference_protected(
		rkt->table[(key >> (32 - ib_qib_lkey_table_size))],
		lockdep_is_held(&rkt->lock));
	if (unlikely(mrg == NULL || qp->ibqp.pd != mrg->pd))
		goto bail;

	if (mr->npages > mrg->max_segs)
		goto bail;

	ps = mr->ibmr.page_size;
	if (mr->ibmr.length > ps * mr->npages)
		goto bail;

	mrg->user_base = mr->ibmr.iova;
	mrg->iova = mr->ibmr.iova;
	mrg->lkey = key;
	mrg->length = mr->ibmr.length;
	mrg->access_flags = wr->access;
	page_list = mr->pages;
	m = 0;
	n = 0;
	for (i = 0; i < mr->npages; i++) {
		mrg->map[m]->segs[n].vaddr = (void *) page_list[i];
		mrg->map[m]->segs[n].length = ps;
		if (++n == QIB_SEGSZ) {
			m++;
			n = 0;
		}
	}

	ret = 0;
bail:
	spin_unlock_irqrestore(&rkt->lock, flags);
	return ret;
}
Loading