Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 846b9996 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull infiniband/rdma fixes from Roland Dreier:
 "Grab bag of InfiniBand/RDMA fixes:
   - IPoIB fixes for regressions introduced by path database conversion
   - mlx4 fixes for bugs with large memory systems and regressions from
     SR-IOV patches
   - RDMA CM fix for passing bad event up to userspace
   - Other minor fixes"

* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB/mlx4: Check iboe netdev pointer before dereferencing it
  mlx4_core: Clean up buddy bitmap allocation
  mlx4_core: Fix integer overflow issues around MTT table
  mlx4_core: Allow large mlx4_buddy bitmaps
  IB/srp: Fix a race condition
  IB/qib: Fix error return code in qib_init_7322_variables()
  IB: Fix typos in infiniband drivers
  IB/ipoib: Fix RCU pointer dereference of wrong object
  IB/ipoib: Add missing locking when CM object is deleted
  RDMA/ucma.c: Fix for events with wrong context on iWARP
  RDMA/ocrdma: Don't call vlan_dev_real_dev() for non-VLAN netdevs
  IB/mlx4: Fix possible deadlock on sm_lock spinlock
parents 225a389b c0369b29
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -267,6 +267,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
	if (!uevent)
	if (!uevent)
		return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
		return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;


	mutex_lock(&ctx->file->mut);
	uevent->cm_id = cm_id;
	uevent->cm_id = cm_id;
	ucma_set_event_context(ctx, event, uevent);
	ucma_set_event_context(ctx, event, uevent);
	uevent->resp.event = event->event;
	uevent->resp.event = event->event;
@@ -277,7 +278,6 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
		ucma_copy_conn_event(&uevent->resp.param.conn,
		ucma_copy_conn_event(&uevent->resp.param.conn,
				     &event->param.conn);
				     &event->param.conn);


	mutex_lock(&ctx->file->mut);
	if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
	if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
		if (!ctx->backlog) {
		if (!ctx->backlog) {
			ret = -ENOMEM;
			ret = -ENOMEM;
+1 −1
Original line number Original line Diff line number Diff line
@@ -439,7 +439,7 @@ static int c2_rnic_close(struct c2_dev *c2dev)


/*
/*
 * Called by c2_probe to initialize the RNIC. This principally
 * Called by c2_probe to initialize the RNIC. This principally
 * involves initalizing the various limits and resouce pools that
 * involves initializing the various limits and resource pools that
 * comprise the RNIC instance.
 * comprise the RNIC instance.
 */
 */
int __devinit c2_rnic_init(struct c2_dev *c2dev)
int __devinit c2_rnic_init(struct c2_dev *c2dev)
+1 −1
Original line number Original line Diff line number Diff line
@@ -1680,7 +1680,7 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
 * T3A does 3 things when a TERM is received:
 * T3A does 3 things when a TERM is received:
 * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
 * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
 * 2) generate an async event on the QP with the TERMINATE opcode
 * 2) generate an async event on the QP with the TERMINATE opcode
 * 3) post a TERMINATE opcde cqe into the associated CQ.
 * 3) post a TERMINATE opcode cqe into the associated CQ.
 *
 *
 * For (1), we save the message in the qp for later consumer consumption.
 * For (1), we save the message in the qp for later consumer consumption.
 * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
 * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
+10 −6
Original line number Original line Diff line number Diff line
@@ -125,6 +125,7 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
{
{
	struct ib_ah *new_ah;
	struct ib_ah *new_ah;
	struct ib_ah_attr ah_attr;
	struct ib_ah_attr ah_attr;
	unsigned long flags;


	if (!dev->send_agent[port_num - 1][0])
	if (!dev->send_agent[port_num - 1][0])
		return;
		return;
@@ -139,11 +140,11 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
	if (IS_ERR(new_ah))
	if (IS_ERR(new_ah))
		return;
		return;


	spin_lock(&dev->sm_lock);
	spin_lock_irqsave(&dev->sm_lock, flags);
	if (dev->sm_ah[port_num - 1])
	if (dev->sm_ah[port_num - 1])
		ib_destroy_ah(dev->sm_ah[port_num - 1]);
		ib_destroy_ah(dev->sm_ah[port_num - 1]);
	dev->sm_ah[port_num - 1] = new_ah;
	dev->sm_ah[port_num - 1] = new_ah;
	spin_unlock(&dev->sm_lock);
	spin_unlock_irqrestore(&dev->sm_lock, flags);
}
}


/*
/*
@@ -197,13 +198,15 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
static void node_desc_override(struct ib_device *dev,
static void node_desc_override(struct ib_device *dev,
			       struct ib_mad *mad)
			       struct ib_mad *mad)
{
{
	unsigned long flags;

	if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
	if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
	     mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
	     mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
	    mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
	    mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
	    mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
	    mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
		spin_lock(&to_mdev(dev)->sm_lock);
		spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
		memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
		memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
		spin_unlock(&to_mdev(dev)->sm_lock);
		spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
	}
	}
}
}


@@ -213,6 +216,7 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
	struct ib_mad_send_buf *send_buf;
	struct ib_mad_send_buf *send_buf;
	struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
	struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
	int ret;
	int ret;
	unsigned long flags;


	if (agent) {
	if (agent) {
		send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
		send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
@@ -225,13 +229,13 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
		 * wrong following the IB spec strictly, but we know
		 * wrong following the IB spec strictly, but we know
		 * it's OK for our devices).
		 * it's OK for our devices).
		 */
		 */
		spin_lock(&dev->sm_lock);
		spin_lock_irqsave(&dev->sm_lock, flags);
		memcpy(send_buf->mad, mad, sizeof *mad);
		memcpy(send_buf->mad, mad, sizeof *mad);
		if ((send_buf->ah = dev->sm_ah[port_num - 1]))
		if ((send_buf->ah = dev->sm_ah[port_num - 1]))
			ret = ib_post_send_mad(send_buf, NULL);
			ret = ib_post_send_mad(send_buf, NULL);
		else
		else
			ret = -EINVAL;
			ret = -EINVAL;
		spin_unlock(&dev->sm_lock);
		spin_unlock_irqrestore(&dev->sm_lock, flags);


		if (ret)
		if (ret)
			ib_free_send_mad(send_buf);
			ib_free_send_mad(send_buf);
+3 −2
Original line number Original line Diff line number Diff line
@@ -423,6 +423,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
				 struct ib_device_modify *props)
				 struct ib_device_modify *props)
{
{
	struct mlx4_cmd_mailbox *mailbox;
	struct mlx4_cmd_mailbox *mailbox;
	unsigned long flags;


	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
		return -EOPNOTSUPP;
		return -EOPNOTSUPP;
@@ -430,9 +431,9 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
		return 0;
		return 0;


	spin_lock(&to_mdev(ibdev)->sm_lock);
	spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
	memcpy(ibdev->node_desc, props->node_desc, 64);
	memcpy(ibdev->node_desc, props->node_desc, 64);
	spin_unlock(&to_mdev(ibdev)->sm_lock);
	spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);


	/*
	/*
	 * If possible, pass node desc to FW, so it can generate
	 * If possible, pass node desc to FW, so it can generate
Loading