Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 184e2516 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull more infiniband changes from Roland Dreier:
 "Second batch of InfiniBand/RDMA changes for 3.8:
   - cxgb4 changes to fix lookup engine hash collisions
   - mlx4 changes to make flow steering usable
   - fix to IPoIB to avoid pinning dst reference for too long"

* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  RDMA/cxgb4: Fix bug for active and passive LE hash collision path
  RDMA/cxgb4: Fix LE hash collision bug for passive open connection
  RDMA/cxgb4: Fix LE hash collision bug for active open connection
  mlx4_core: Allow choosing flow steering mode
  mlx4_core: Adjustments to Flow Steering activation logic for SR-IOV
  mlx4_core: Fix error flow in the flow steering wrapper
  mlx4_core: Add QPN enforcement for flow steering rules set by VFs
  cxgb4: Add LE hash collision bug fix path in LLD driver
  cxgb4: Add T4 filter support
  IPoIB: Call skb_dst_drop() once skb is enqueued for sending
parents 0264405b d72623b6
Loading
Loading
Loading
Loading
+682 −109

File changed.

Preview size limit exceeded, changes collapsed.

+204 −6
Original line number Original line Diff line number Diff line
@@ -279,6 +279,11 @@ static int stats_show(struct seq_file *seq, void *v)
	seq_printf(seq, " DB State: %s Transitions %llu\n",
	seq_printf(seq, " DB State: %s Transitions %llu\n",
		   db_state_str[dev->db_state],
		   db_state_str[dev->db_state],
		   dev->rdev.stats.db_state_transitions);
		   dev->rdev.stats.db_state_transitions);
	seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
	seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
		   dev->rdev.stats.act_ofld_conn_fails);
	seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
		   dev->rdev.stats.pas_ofld_conn_fails);
	return 0;
	return 0;
}
}


@@ -309,6 +314,9 @@ static ssize_t stats_clear(struct file *file, const char __user *buf,
	dev->rdev.stats.db_empty = 0;
	dev->rdev.stats.db_empty = 0;
	dev->rdev.stats.db_drop = 0;
	dev->rdev.stats.db_drop = 0;
	dev->rdev.stats.db_state_transitions = 0;
	dev->rdev.stats.db_state_transitions = 0;
	dev->rdev.stats.tcam_full = 0;
	dev->rdev.stats.act_ofld_conn_fails = 0;
	dev->rdev.stats.pas_ofld_conn_fails = 0;
	mutex_unlock(&dev->rdev.stats.lock);
	mutex_unlock(&dev->rdev.stats.lock);
	return count;
	return count;
}
}
@@ -322,6 +330,113 @@ static const struct file_operations stats_debugfs_fops = {
	.write   = stats_clear,
	.write   = stats_clear,
};
};


static int dump_ep(int id, void *p, void *data)
{
	struct c4iw_ep *ep = p;
	struct c4iw_debugfs_data *epd = data;
	int space;
	int cc;

	space = epd->bufsize - epd->pos - 1;
	if (space == 0)
		return 1;

	cc = snprintf(epd->buf + epd->pos, space,
			"ep %p cm_id %p qp %p state %d flags 0x%lx history 0x%lx "
			"hwtid %d atid %d %pI4:%d <-> %pI4:%d\n",
			ep, ep->com.cm_id, ep->com.qp, (int)ep->com.state,
			ep->com.flags, ep->com.history, ep->hwtid, ep->atid,
			&ep->com.local_addr.sin_addr.s_addr,
			ntohs(ep->com.local_addr.sin_port),
			&ep->com.remote_addr.sin_addr.s_addr,
			ntohs(ep->com.remote_addr.sin_port));
	if (cc < space)
		epd->pos += cc;
	return 0;
}

static int dump_listen_ep(int id, void *p, void *data)
{
	struct c4iw_listen_ep *ep = p;
	struct c4iw_debugfs_data *epd = data;
	int space;
	int cc;

	space = epd->bufsize - epd->pos - 1;
	if (space == 0)
		return 1;

	cc = snprintf(epd->buf + epd->pos, space,
			"ep %p cm_id %p state %d flags 0x%lx stid %d backlog %d "
			"%pI4:%d\n", ep, ep->com.cm_id, (int)ep->com.state,
			ep->com.flags, ep->stid, ep->backlog,
			&ep->com.local_addr.sin_addr.s_addr,
			ntohs(ep->com.local_addr.sin_port));
	if (cc < space)
		epd->pos += cc;
	return 0;
}

static int ep_release(struct inode *inode, struct file *file)
{
	struct c4iw_debugfs_data *epd = file->private_data;
	if (!epd) {
		pr_info("%s null qpd?\n", __func__);
		return 0;
	}
	vfree(epd->buf);
	kfree(epd);
	return 0;
}

static int ep_open(struct inode *inode, struct file *file)
{
	struct c4iw_debugfs_data *epd;
	int ret = 0;
	int count = 1;

	epd = kmalloc(sizeof(*epd), GFP_KERNEL);
	if (!epd) {
		ret = -ENOMEM;
		goto out;
	}
	epd->devp = inode->i_private;
	epd->pos = 0;

	spin_lock_irq(&epd->devp->lock);
	idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
	idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
	idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
	spin_unlock_irq(&epd->devp->lock);

	epd->bufsize = count * 160;
	epd->buf = vmalloc(epd->bufsize);
	if (!epd->buf) {
		ret = -ENOMEM;
		goto err1;
	}

	spin_lock_irq(&epd->devp->lock);
	idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
	idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
	idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
	spin_unlock_irq(&epd->devp->lock);

	file->private_data = epd;
	goto out;
err1:
	kfree(epd);
out:
	return ret;
}

static const struct file_operations ep_debugfs_fops = {
	.owner   = THIS_MODULE,
	.open    = ep_open,
	.release = ep_release,
	.read    = debugfs_read,
};

static int setup_debugfs(struct c4iw_dev *devp)
static int setup_debugfs(struct c4iw_dev *devp)
{
{
	struct dentry *de;
	struct dentry *de;
@@ -344,6 +459,11 @@ static int setup_debugfs(struct c4iw_dev *devp)
	if (de && de->d_inode)
	if (de && de->d_inode)
		de->d_inode->i_size = 4096;
		de->d_inode->i_size = 4096;


	de = debugfs_create_file("eps", S_IWUSR, devp->debugfs_root,
			(void *)devp, &ep_debugfs_fops);
	if (de && de->d_inode)
		de->d_inode->i_size = 4096;

	return 0;
	return 0;
}
}


@@ -475,6 +595,9 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
	idr_destroy(&ctx->dev->cqidr);
	idr_destroy(&ctx->dev->cqidr);
	idr_destroy(&ctx->dev->qpidr);
	idr_destroy(&ctx->dev->qpidr);
	idr_destroy(&ctx->dev->mmidr);
	idr_destroy(&ctx->dev->mmidr);
	idr_destroy(&ctx->dev->hwtid_idr);
	idr_destroy(&ctx->dev->stid_idr);
	idr_destroy(&ctx->dev->atid_idr);
	iounmap(ctx->dev->rdev.oc_mw_kva);
	iounmap(ctx->dev->rdev.oc_mw_kva);
	ib_dealloc_device(&ctx->dev->ibdev);
	ib_dealloc_device(&ctx->dev->ibdev);
	ctx->dev = NULL;
	ctx->dev = NULL;
@@ -532,6 +655,9 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
	idr_init(&devp->cqidr);
	idr_init(&devp->cqidr);
	idr_init(&devp->qpidr);
	idr_init(&devp->qpidr);
	idr_init(&devp->mmidr);
	idr_init(&devp->mmidr);
	idr_init(&devp->hwtid_idr);
	idr_init(&devp->stid_idr);
	idr_init(&devp->atid_idr);
	spin_lock_init(&devp->lock);
	spin_lock_init(&devp->lock);
	mutex_init(&devp->rdev.stats.lock);
	mutex_init(&devp->rdev.stats.lock);
	mutex_init(&devp->db_mutex);
	mutex_init(&devp->db_mutex);
@@ -577,14 +703,76 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
	return ctx;
	return ctx;
}
}


static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
						 const __be64 *rsp,
						 u32 pktshift)
{
	struct sk_buff *skb;

	/*
	 * Allocate space for cpl_pass_accept_req which will be synthesized by
	 * driver. Once the driver synthesizes the request the skb will go
	 * through the regular cpl_pass_accept_req processing.
	 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
	 * cpl_rx_pkt.
	 */
	skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
			sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
	if (unlikely(!skb))
		return NULL;

	 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
		   sizeof(struct rss_header) - pktshift);

	/*
	 * This skb will contain:
	 *   rss_header from the rspq descriptor (1 flit)
	 *   cpl_rx_pkt struct from the rspq descriptor (2 flits)
	 *   space for the difference between the size of an
	 *      rx_pkt and pass_accept_req cpl (1 flit)
	 *   the packet data from the gl
	 */
	skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
				sizeof(struct rss_header));
	skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
				       sizeof(struct cpl_pass_accept_req),
				       gl->va + pktshift,
				       gl->tot_len - pktshift);
	return skb;
}

static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
			   const __be64 *rsp)
{
	unsigned int opcode = *(u8 *)rsp;
	struct sk_buff *skb;

	if (opcode != CPL_RX_PKT)
		goto out;

	skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
	if (skb == NULL)
		goto out;

	if (c4iw_handlers[opcode] == NULL) {
		pr_info("%s no handler opcode 0x%x...\n", __func__,
		       opcode);
		kfree_skb(skb);
		goto out;
	}
	c4iw_handlers[opcode](dev, skb);
	return 1;
out:
	return 0;
}

static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
			const struct pkt_gl *gl)
			const struct pkt_gl *gl)
{
{
	struct uld_ctx *ctx = handle;
	struct uld_ctx *ctx = handle;
	struct c4iw_dev *dev = ctx->dev;
	struct c4iw_dev *dev = ctx->dev;
	struct sk_buff *skb;
	struct sk_buff *skb;
	const struct cpl_act_establish *rpl;
	u8 opcode;
	unsigned int opcode;


	if (gl == NULL) {
	if (gl == NULL) {
		/* omit RSS and rsp_ctrl at end of descriptor */
		/* omit RSS and rsp_ctrl at end of descriptor */
@@ -600,6 +788,18 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,


		u32 qid = be32_to_cpu(rc->pldbuflen_qid);
		u32 qid = be32_to_cpu(rc->pldbuflen_qid);
		c4iw_ev_handler(dev, qid);
		c4iw_ev_handler(dev, qid);
		return 0;
	} else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
		if (recv_rx_pkt(dev, gl, rsp))
			return 0;

		pr_info("%s: unexpected FL contents at %p, " \
		       "RSS %#llx, FL %#llx, len %u\n",
		       pci_name(ctx->lldi.pdev), gl->va,
		       (unsigned long long)be64_to_cpu(*rsp),
		       (unsigned long long)be64_to_cpu(*(u64 *)gl->va),
		       gl->tot_len);

		return 0;
		return 0;
	} else {
	} else {
		skb = cxgb4_pktgl_to_skb(gl, 128, 128);
		skb = cxgb4_pktgl_to_skb(gl, 128, 128);
@@ -607,13 +807,11 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
			goto nomem;
			goto nomem;
	}
	}


	rpl = cplhdr(skb);
	opcode = *(u8 *)rsp;
	opcode = rpl->ot.opcode;

	if (c4iw_handlers[opcode])
	if (c4iw_handlers[opcode])
		c4iw_handlers[opcode](dev, skb);
		c4iw_handlers[opcode](dev, skb);
	else
	else
		printk(KERN_INFO "%s no handler opcode 0x%x...\n", __func__,
		pr_info("%s no handler opcode 0x%x...\n", __func__,
		       opcode);
		       opcode);


	return 0;
	return 0;
+33 −0
Original line number Original line Diff line number Diff line
@@ -130,6 +130,9 @@ struct c4iw_stats {
	u64  db_empty;
	u64  db_empty;
	u64  db_drop;
	u64  db_drop;
	u64  db_state_transitions;
	u64  db_state_transitions;
	u64  tcam_full;
	u64  act_ofld_conn_fails;
	u64  pas_ofld_conn_fails;
};
};


struct c4iw_rdev {
struct c4iw_rdev {
@@ -223,6 +226,9 @@ struct c4iw_dev {
	struct dentry *debugfs_root;
	struct dentry *debugfs_root;
	enum db_state db_state;
	enum db_state db_state;
	int qpcnt;
	int qpcnt;
	struct idr hwtid_idr;
	struct idr atid_idr;
	struct idr stid_idr;
};
};


static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
@@ -712,6 +718,31 @@ enum c4iw_ep_flags {
	CLOSE_SENT		= 3,
	CLOSE_SENT		= 3,
};
};


enum c4iw_ep_history {
	ACT_OPEN_REQ            = 0,
	ACT_OFLD_CONN           = 1,
	ACT_OPEN_RPL            = 2,
	ACT_ESTAB               = 3,
	PASS_ACCEPT_REQ         = 4,
	PASS_ESTAB              = 5,
	ABORT_UPCALL            = 6,
	ESTAB_UPCALL            = 7,
	CLOSE_UPCALL            = 8,
	ULP_ACCEPT              = 9,
	ULP_REJECT              = 10,
	TIMEDOUT                = 11,
	PEER_ABORT              = 12,
	PEER_CLOSE              = 13,
	CONNREQ_UPCALL          = 14,
	ABORT_CONN              = 15,
	DISCONN_UPCALL          = 16,
	EP_DISC_CLOSE           = 17,
	EP_DISC_ABORT           = 18,
	CONN_RPL_UPCALL         = 19,
	ACT_RETRY_NOMEM         = 20,
	ACT_RETRY_INUSE         = 21
};

struct c4iw_ep_common {
struct c4iw_ep_common {
	struct iw_cm_id *cm_id;
	struct iw_cm_id *cm_id;
	struct c4iw_qp *qp;
	struct c4iw_qp *qp;
@@ -723,6 +754,7 @@ struct c4iw_ep_common {
	struct sockaddr_in remote_addr;
	struct sockaddr_in remote_addr;
	struct c4iw_wr_wait wr_wait;
	struct c4iw_wr_wait wr_wait;
	unsigned long flags;
	unsigned long flags;
	unsigned long history;
};
};


struct c4iw_listen_ep {
struct c4iw_listen_ep {
@@ -760,6 +792,7 @@ struct c4iw_ep {
	u8 tos;
	u8 tos;
	u8 retry_with_mpa_v1;
	u8 retry_with_mpa_v1;
	u8 tried_with_mpa_v1;
	u8 tried_with_mpa_v1;
	unsigned int retry_count;
};
};


static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
+3 −0
Original line number Original line Diff line number Diff line
@@ -752,6 +752,9 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
		dev->trans_start = jiffies;
		dev->trans_start = jiffies;
		++tx->tx_head;
		++tx->tx_head;


		skb_orphan(skb);
		skb_dst_drop(skb);

		if (++priv->tx_outstanding == ipoib_sendq_size) {
		if (++priv->tx_outstanding == ipoib_sendq_size) {
			ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
			ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
				  tx->qp->qp_num);
				  tx->qp->qp_num);
+2 −1
Original line number Original line Diff line number Diff line
@@ -615,8 +615,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,


		address->last_send = priv->tx_head;
		address->last_send = priv->tx_head;
		++priv->tx_head;
		++priv->tx_head;
		skb_orphan(skb);


		skb_orphan(skb);
		skb_dst_drop(skb);
	}
	}


	if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
	if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
Loading