Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bb5204c2 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull IB regression fixes from Roland Dreier:

 - Fix mlx4 VFs not working on old guests because of 64B CQE changes

 - Fix ill-considered sparse fix for qib

 - Fix IPoIB crash due to skb double destruct introduced in 3.8-rc1

* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB/qib: Fix for broken sparse warning fix
  mlx4_core: Fix advertisement of wrong PF context behaviour
  IPoIB: Fix crash due to skb double destruct
parents 8d19514f cbdba97a
Loading
Loading
Loading
Loading
+3 −8
Original line number Original line Diff line number Diff line
@@ -263,20 +263,15 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
		struct qib_qp __rcu **qpp;
		struct qib_qp __rcu **qpp;


		qpp = &dev->qp_table[n];
		qpp = &dev->qp_table[n];
		q = rcu_dereference_protected(*qpp,
		for (; (q = rcu_dereference_protected(*qpp,
			lockdep_is_held(&dev->qpt_lock));
				lockdep_is_held(&dev->qpt_lock))) != NULL;
		for (; q; qpp = &q->next) {
				qpp = &q->next)
			if (q == qp) {
			if (q == qp) {
				atomic_dec(&qp->refcount);
				atomic_dec(&qp->refcount);
				*qpp = qp->next;
				*qpp = qp->next;
				rcu_assign_pointer(qp->next, NULL);
				rcu_assign_pointer(qp->next, NULL);
				q = rcu_dereference_protected(*qpp,
					lockdep_is_held(&dev->qpt_lock));
				break;
				break;
			}
			}
			q = rcu_dereference_protected(*qpp,
				lockdep_is_held(&dev->qpt_lock));
		}
	}
	}


	spin_unlock_irqrestore(&dev->qpt_lock, flags);
	spin_unlock_irqrestore(&dev->qpt_lock, flags);
+3 −3
Original line number Original line Diff line number Diff line
@@ -741,6 +741,9 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_


	tx_req->mapping = addr;
	tx_req->mapping = addr;


	skb_orphan(skb);
	skb_dst_drop(skb);

	rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
	rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
		       addr, skb->len);
		       addr, skb->len);
	if (unlikely(rc)) {
	if (unlikely(rc)) {
@@ -752,9 +755,6 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
		dev->trans_start = jiffies;
		dev->trans_start = jiffies;
		++tx->tx_head;
		++tx->tx_head;


		skb_orphan(skb);
		skb_dst_drop(skb);

		if (++priv->tx_outstanding == ipoib_sendq_size) {
		if (++priv->tx_outstanding == ipoib_sendq_size) {
			ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
			ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
				  tx->qp->qp_num);
				  tx->qp->qp_num);
+3 −3
Original line number Original line Diff line number Diff line
@@ -600,6 +600,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
		netif_stop_queue(dev);
		netif_stop_queue(dev);
	}
	}


	skb_orphan(skb);
	skb_dst_drop(skb);

	rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
	rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
		       address->ah, qpn, tx_req, phead, hlen);
		       address->ah, qpn, tx_req, phead, hlen);
	if (unlikely(rc)) {
	if (unlikely(rc)) {
@@ -615,9 +618,6 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,


		address->last_send = priv->tx_head;
		address->last_send = priv->tx_head;
		++priv->tx_head;
		++priv->tx_head;

		skb_orphan(skb);
		skb_dst_drop(skb);
	}
	}


	if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
	if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
+1 −1
Original line number Original line Diff line number Diff line
@@ -380,7 +380,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
		}
		}
	}
	}


	if ((dev_cap->flags &
	if ((dev->caps.flags &
	    (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
	    (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
	    mlx4_is_master(dev))
	    mlx4_is_master(dev))
		dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
		dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;