Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 943c246e authored by Roland Dreier's avatar Roland Dreier
Browse files

IPoIB: Use netif_tx_lock() and get rid of private tx_lock, LLTX



Currently, IPoIB is an LLTX driver that uses its own IRQ-disabling
tx_lock.  Not only do we want to get rid of LLTX, this actually causes
problems because of the skb_orphan() done with this tx_lock held: some
skb destructors expect to be run with interrupts enabled.

The simplest fix for this is to get rid of the driver-private tx_lock
and stop using LLTX.  We kill off priv->tx_lock and use
netif_tx_lock[_bh]() instead; the patch to do this is a tiny bit
tricky because we need to update places that take priv->lock inside
the tx_lock to disable IRQs, rather than relying on tx_lock having
already disabled IRQs.

Also, there are a couple of places where we need to disable BHs to
make sure we have a consistent context to call netif_tx_lock() (since
we no longer can use _irqsave() variants), and we also have to change
ipoib_send_comp_handler() to call drain_tx_cq() through a timer rather
than directly, because ipoib_send_comp_handler() runs in interrupt
context and drain_tx_cq() must run in BH context so it can call
netif_tx_lock().

Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent c9da4bad
Loading
Loading
Loading
Loading
+3 −5
Original line number Diff line number Diff line
@@ -268,10 +268,9 @@ struct ipoib_lro {
};

/*
 * Device private locking: tx_lock protects members used in TX fast
 * path (and we use LLTX so upper layers don't do extra locking).
 * lock protects everything else.  lock nests inside of tx_lock (ie
 * tx_lock must be acquired first if needed).
 * Device private locking: network stack tx_lock protects members used
 * in TX fast path, lock protects everything else.  lock nests inside
 * of tx_lock (ie tx_lock must be acquired first if needed).
 */
struct ipoib_dev_priv {
	spinlock_t lock;
@@ -320,7 +319,6 @@ struct ipoib_dev_priv {

	struct ipoib_rx_buf *rx_ring;

	spinlock_t	     tx_lock;
	struct ipoib_tx_buf *tx_ring;
	unsigned	     tx_head;
	unsigned	     tx_tail;
+52 −36
Original line number Diff line number Diff line
@@ -786,7 +786,8 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)

	dev_kfree_skb_any(tx_req->skb);

	spin_lock_irqsave(&priv->tx_lock, flags);
	netif_tx_lock(dev);

	++tx->tx_tail;
	if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
	    netif_queue_stopped(dev) &&
@@ -801,7 +802,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
			   "(status=%d, wrid=%d vend_err %x)\n",
			   wc->status, wr_id, wc->vendor_err);

		spin_lock(&priv->lock);
		spin_lock_irqsave(&priv->lock, flags);
		neigh = tx->neigh;

		if (neigh) {
@@ -821,10 +822,10 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)

		clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);

		spin_unlock(&priv->lock);
		spin_unlock_irqrestore(&priv->lock, flags);
	}

	spin_unlock_irqrestore(&priv->tx_lock, flags);
	netif_tx_unlock(dev);
}

int ipoib_cm_dev_open(struct net_device *dev)
@@ -1149,7 +1150,6 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
{
	struct ipoib_dev_priv *priv = netdev_priv(p->dev);
	struct ipoib_cm_tx_buf *tx_req;
	unsigned long flags;
	unsigned long begin;

	ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
@@ -1180,12 +1180,12 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
				    DMA_TO_DEVICE);
		dev_kfree_skb_any(tx_req->skb);
		++p->tx_tail;
		spin_lock_irqsave(&priv->tx_lock, flags);
		netif_tx_lock_bh(p->dev);
		if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
		    netif_queue_stopped(p->dev) &&
		    test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
			netif_wake_queue(p->dev);
		spin_unlock_irqrestore(&priv->tx_lock, flags);
		netif_tx_unlock_bh(p->dev);
	}

	if (p->qp)
@@ -1202,6 +1202,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
	struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
	struct net_device *dev = priv->dev;
	struct ipoib_neigh *neigh;
	unsigned long flags;
	int ret;

	switch (event->event) {
@@ -1220,8 +1221,8 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
	case IB_CM_REJ_RECEIVED:
	case IB_CM_TIMEWAIT_EXIT:
		ipoib_dbg(priv, "CM error %d.\n", event->event);
		spin_lock_irq(&priv->tx_lock);
		spin_lock(&priv->lock);
		netif_tx_lock_bh(dev);
		spin_lock_irqsave(&priv->lock, flags);
		neigh = tx->neigh;

		if (neigh) {
@@ -1239,8 +1240,8 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
			queue_work(ipoib_workqueue, &priv->cm.reap_task);
		}

		spin_unlock(&priv->lock);
		spin_unlock_irq(&priv->tx_lock);
		spin_unlock_irqrestore(&priv->lock, flags);
		netif_tx_unlock_bh(dev);
		break;
	default:
		break;
@@ -1294,19 +1295,24 @@ static void ipoib_cm_tx_start(struct work_struct *work)
	struct ib_sa_path_rec pathrec;
	u32 qpn;

	spin_lock_irqsave(&priv->tx_lock, flags);
	spin_lock(&priv->lock);
	netif_tx_lock_bh(dev);
	spin_lock_irqsave(&priv->lock, flags);

	while (!list_empty(&priv->cm.start_list)) {
		p = list_entry(priv->cm.start_list.next, typeof(*p), list);
		list_del_init(&p->list);
		neigh = p->neigh;
		qpn = IPOIB_QPN(neigh->neighbour->ha);
		memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
		spin_unlock(&priv->lock);
		spin_unlock_irqrestore(&priv->tx_lock, flags);

		spin_unlock_irqrestore(&priv->lock, flags);
		netif_tx_unlock_bh(dev);

		ret = ipoib_cm_tx_init(p, qpn, &pathrec);
		spin_lock_irqsave(&priv->tx_lock, flags);
		spin_lock(&priv->lock);

		netif_tx_lock_bh(dev);
		spin_lock_irqsave(&priv->lock, flags);

		if (ret) {
			neigh = p->neigh;
			if (neigh) {
@@ -1320,44 +1326,52 @@ static void ipoib_cm_tx_start(struct work_struct *work)
			kfree(p);
		}
	}
	spin_unlock(&priv->lock);
	spin_unlock_irqrestore(&priv->tx_lock, flags);

	spin_unlock_irqrestore(&priv->lock, flags);
	netif_tx_unlock_bh(dev);
}

static void ipoib_cm_tx_reap(struct work_struct *work)
{
	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
						   cm.reap_task);
	struct net_device *dev = priv->dev;
	struct ipoib_cm_tx *p;
	unsigned long flags;

	netif_tx_lock_bh(dev);
	spin_lock_irqsave(&priv->lock, flags);

	spin_lock_irq(&priv->tx_lock);
	spin_lock(&priv->lock);
	while (!list_empty(&priv->cm.reap_list)) {
		p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
		list_del(&p->list);
		spin_unlock(&priv->lock);
		spin_unlock_irq(&priv->tx_lock);
		spin_unlock_irqrestore(&priv->lock, flags);
		netif_tx_unlock_bh(dev);
		ipoib_cm_tx_destroy(p);
		spin_lock_irq(&priv->tx_lock);
		spin_lock(&priv->lock);
		netif_tx_lock_bh(dev);
		spin_lock_irqsave(&priv->lock, flags);
	}
	spin_unlock(&priv->lock);
	spin_unlock_irq(&priv->tx_lock);

	spin_unlock_irqrestore(&priv->lock, flags);
	netif_tx_unlock_bh(dev);
}

static void ipoib_cm_skb_reap(struct work_struct *work)
{
	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
						   cm.skb_task);
	struct net_device *dev = priv->dev;
	struct sk_buff *skb;

	unsigned long flags;
	unsigned mtu = priv->mcast_mtu;

	spin_lock_irq(&priv->tx_lock);
	spin_lock(&priv->lock);
	netif_tx_lock_bh(dev);
	spin_lock_irqsave(&priv->lock, flags);

	while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
		spin_unlock(&priv->lock);
		spin_unlock_irq(&priv->tx_lock);
		spin_unlock_irqrestore(&priv->lock, flags);
		netif_tx_unlock_bh(dev);

		if (skb->protocol == htons(ETH_P_IP))
			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -1365,11 +1379,13 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev);
#endif
		dev_kfree_skb_any(skb);
		spin_lock_irq(&priv->tx_lock);
		spin_lock(&priv->lock);

		netif_tx_lock_bh(dev);
		spin_lock_irqsave(&priv->lock, flags);
	}
	spin_unlock(&priv->lock);
	spin_unlock_irq(&priv->tx_lock);

	spin_unlock_irqrestore(&priv->lock, flags);
	netif_tx_unlock_bh(dev);
}

void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
+22 −8
Original line number Diff line number Diff line
@@ -468,21 +468,22 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
static void drain_tx_cq(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	unsigned long flags;

	spin_lock_irqsave(&priv->tx_lock, flags);
	netif_tx_lock(dev);
	while (poll_tx(priv))
		; /* nothing */

	if (netif_queue_stopped(dev))
		mod_timer(&priv->poll_timer, jiffies + 1);

	spin_unlock_irqrestore(&priv->tx_lock, flags);
	netif_tx_unlock(dev);
}

void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
{
	drain_tx_cq((struct net_device *)dev_ptr);
	struct ipoib_dev_priv *priv = netdev_priv(dev_ptr);

	mod_timer(&priv->poll_timer, jiffies);
}

static inline int post_send(struct ipoib_dev_priv *priv,
@@ -614,17 +615,20 @@ static void __ipoib_reap_ah(struct net_device *dev)
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	struct ipoib_ah *ah, *tah;
	LIST_HEAD(remove_list);
	unsigned long flags;

	netif_tx_lock_bh(dev);
	spin_lock_irqsave(&priv->lock, flags);

	spin_lock_irq(&priv->tx_lock);
	spin_lock(&priv->lock);
	list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
		if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
			list_del(&ah->list);
			ib_destroy_ah(ah->ah);
			kfree(ah);
		}
	spin_unlock(&priv->lock);
	spin_unlock_irq(&priv->tx_lock);

	spin_unlock_irqrestore(&priv->lock, flags);
	netif_tx_unlock_bh(dev);
}

void ipoib_reap_ah(struct work_struct *work)
@@ -761,6 +765,14 @@ void ipoib_drain_cq(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	int i, n;

	/*
	 * We call completion handling routines that expect to be
	 * called from the BH-disabled NAPI poll context, so disable
	 * BHs here too.
	 */
	local_bh_disable();

	do {
		n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
		for (i = 0; i < n; ++i) {
@@ -784,6 +796,8 @@ void ipoib_drain_cq(struct net_device *dev)

	while (poll_tx(priv))
		; /* nothing */

	local_bh_enable();
}

int ipoib_ib_dev_stop(struct net_device *dev, int flush)
+28 −40
Original line number Diff line number Diff line
@@ -373,9 +373,10 @@ void ipoib_flush_paths(struct net_device *dev)
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	struct ipoib_path *path, *tp;
	LIST_HEAD(remove_list);
	unsigned long flags;

	spin_lock_irq(&priv->tx_lock);
	spin_lock(&priv->lock);
	netif_tx_lock_bh(dev);
	spin_lock_irqsave(&priv->lock, flags);

	list_splice_init(&priv->path_list, &remove_list);

@@ -385,15 +386,16 @@ void ipoib_flush_paths(struct net_device *dev)
	list_for_each_entry_safe(path, tp, &remove_list, list) {
		if (path->query)
			ib_sa_cancel_query(path->query_id, path->query);
		spin_unlock(&priv->lock);
		spin_unlock_irq(&priv->tx_lock);
		spin_unlock_irqrestore(&priv->lock, flags);
		netif_tx_unlock_bh(dev);
		wait_for_completion(&path->done);
		path_free(dev, path);
		spin_lock_irq(&priv->tx_lock);
		spin_lock(&priv->lock);
		netif_tx_lock_bh(dev);
		spin_lock_irqsave(&priv->lock, flags);
	}
	spin_unlock(&priv->lock);
	spin_unlock_irq(&priv->tx_lock);

	spin_unlock_irqrestore(&priv->lock, flags);
	netif_tx_unlock_bh(dev);
}

static void path_rec_completion(int status,
@@ -555,6 +557,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	struct ipoib_path *path;
	struct ipoib_neigh *neigh;
	unsigned long flags;

	neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
	if (!neigh) {
@@ -563,11 +566,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
		return;
	}

	/*
	 * We can only be called from ipoib_start_xmit, so we're
	 * inside tx_lock -- no need to save/restore flags.
	 */
	spin_lock(&priv->lock);
	spin_lock_irqsave(&priv->lock, flags);

	path = __path_find(dev, skb->dst->neighbour->ha + 4);
	if (!path) {
@@ -614,7 +613,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
		__skb_queue_tail(&neigh->queue, skb);
	}

	spin_unlock(&priv->lock);
	spin_unlock_irqrestore(&priv->lock, flags);
	return;

err_list:
@@ -626,7 +625,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
	++dev->stats.tx_dropped;
	dev_kfree_skb_any(skb);

	spin_unlock(&priv->lock);
	spin_unlock_irqrestore(&priv->lock, flags);
}

static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
@@ -650,12 +649,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	struct ipoib_path *path;
	unsigned long flags;

	/*
	 * We can only be called from ipoib_start_xmit, so we're
	 * inside tx_lock -- no need to save/restore flags.
	 */
	spin_lock(&priv->lock);
	spin_lock_irqsave(&priv->lock, flags);

	path = __path_find(dev, phdr->hwaddr + 4);
	if (!path || !path->valid) {
@@ -667,7 +663,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
			__skb_queue_tail(&path->queue, skb);

			if (path_rec_start(dev, path)) {
				spin_unlock(&priv->lock);
				spin_unlock_irqrestore(&priv->lock, flags);
				path_free(dev, path);
				return;
			} else
@@ -677,7 +673,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
			dev_kfree_skb_any(skb);
		}

		spin_unlock(&priv->lock);
		spin_unlock_irqrestore(&priv->lock, flags);
		return;
	}

@@ -696,7 +692,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
		dev_kfree_skb_any(skb);
	}

	spin_unlock(&priv->lock);
	spin_unlock_irqrestore(&priv->lock, flags);
}

static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -705,13 +701,10 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
	struct ipoib_neigh *neigh;
	unsigned long flags;

	if (unlikely(!spin_trylock_irqsave(&priv->tx_lock, flags)))
		return NETDEV_TX_LOCKED;

	if (likely(skb->dst && skb->dst->neighbour)) {
		if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
			ipoib_path_lookup(skb, dev);
			goto out;
			return NETDEV_TX_OK;
		}

		neigh = *to_ipoib_neigh(skb->dst->neighbour);
@@ -721,7 +714,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
					    skb->dst->neighbour->ha + 4,
					    sizeof(union ib_gid))) ||
					 (neigh->dev != dev))) {
				spin_lock(&priv->lock);
				spin_lock_irqsave(&priv->lock, flags);
				/*
				 * It's safe to call ipoib_put_ah() inside
				 * priv->lock here, because we know that
@@ -732,25 +725,25 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
				ipoib_put_ah(neigh->ah);
				list_del(&neigh->list);
				ipoib_neigh_free(dev, neigh);
				spin_unlock(&priv->lock);
				spin_unlock_irqrestore(&priv->lock, flags);
				ipoib_path_lookup(skb, dev);
				goto out;
				return NETDEV_TX_OK;
			}

		if (ipoib_cm_get(neigh)) {
			if (ipoib_cm_up(neigh)) {
				ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
				goto out;
				return NETDEV_TX_OK;
			}
		} else if (neigh->ah) {
			ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha));
			goto out;
			return NETDEV_TX_OK;
		}

		if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
			spin_lock(&priv->lock);
			spin_lock_irqsave(&priv->lock, flags);
			__skb_queue_tail(&neigh->queue, skb);
			spin_unlock(&priv->lock);
			spin_unlock_irqrestore(&priv->lock, flags);
		} else {
			++dev->stats.tx_dropped;
			dev_kfree_skb_any(skb);
@@ -779,16 +772,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
					   IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
				dev_kfree_skb_any(skb);
				++dev->stats.tx_dropped;
				goto out;
				return NETDEV_TX_OK;
			}

			unicast_arp_send(skb, dev, phdr);
		}
	}

out:
	spin_unlock_irqrestore(&priv->tx_lock, flags);

	return NETDEV_TX_OK;
}

@@ -1052,7 +1042,6 @@ static void ipoib_setup(struct net_device *dev)
	dev->type		 = ARPHRD_INFINIBAND;
	dev->tx_queue_len	 = ipoib_sendq_size * 2;
	dev->features		 = (NETIF_F_VLAN_CHALLENGED	|
				    NETIF_F_LLTX		|
				    NETIF_F_HIGHDMA);

	memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
@@ -1064,7 +1053,6 @@ static void ipoib_setup(struct net_device *dev)
	ipoib_lro_setup(priv);

	spin_lock_init(&priv->lock);
	spin_lock_init(&priv->tx_lock);

	mutex_init(&priv->vlan_mutex);

+13 −18
Original line number Diff line number Diff line
@@ -69,14 +69,13 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
	struct net_device *dev = mcast->dev;
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	struct ipoib_neigh *neigh, *tmp;
	unsigned long flags;
	int tx_dropped = 0;

	ipoib_dbg_mcast(netdev_priv(dev),
			"deleting multicast group " IPOIB_GID_FMT "\n",
			IPOIB_GID_ARG(mcast->mcmember.mgid));

	spin_lock_irqsave(&priv->lock, flags);
	spin_lock_irq(&priv->lock);

	list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
		/*
@@ -90,7 +89,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
		ipoib_neigh_free(dev, neigh);
	}

	spin_unlock_irqrestore(&priv->lock, flags);
	spin_unlock_irq(&priv->lock);

	if (mcast->ah)
		ipoib_put_ah(mcast->ah);
@@ -100,9 +99,9 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
		dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
	}

	spin_lock_irqsave(&priv->tx_lock, flags);
	netif_tx_lock_bh(dev);
	dev->stats.tx_dropped += tx_dropped;
	spin_unlock_irqrestore(&priv->tx_lock, flags);
	netif_tx_unlock_bh(dev);

	kfree(mcast);
}
@@ -259,10 +258,10 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
	}

	/* actually send any queued packets */
	spin_lock_irq(&priv->tx_lock);
	netif_tx_lock_bh(dev);
	while (!skb_queue_empty(&mcast->pkt_queue)) {
		struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
		spin_unlock_irq(&priv->tx_lock);
		netif_tx_unlock_bh(dev);

		skb->dev = dev;

@@ -273,9 +272,9 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,

		if (dev_queue_xmit(skb))
			ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
		spin_lock_irq(&priv->tx_lock);
		netif_tx_lock_bh(dev);
	}
	spin_unlock_irq(&priv->tx_lock);
	netif_tx_unlock_bh(dev);

	return 0;
}
@@ -286,7 +285,6 @@ ipoib_mcast_sendonly_join_complete(int status,
{
	struct ipoib_mcast *mcast = multicast->context;
	struct net_device *dev = mcast->dev;
	struct ipoib_dev_priv *priv = netdev_priv(dev);

	/* We trap for port events ourselves. */
	if (status == -ENETRESET)
@@ -302,12 +300,12 @@ ipoib_mcast_sendonly_join_complete(int status,
					IPOIB_GID_ARG(mcast->mcmember.mgid), status);

		/* Flush out any queued packets */
		spin_lock_irq(&priv->tx_lock);
		netif_tx_lock_bh(dev);
		while (!skb_queue_empty(&mcast->pkt_queue)) {
			++dev->stats.tx_dropped;
			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
		}
		spin_unlock_irq(&priv->tx_lock);
		netif_tx_unlock_bh(dev);

		/* Clear the busy flag so we try again */
		status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
@@ -662,12 +660,9 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	struct ipoib_mcast *mcast;
	unsigned long flags;

	/*
	 * We can only be called from ipoib_start_xmit, so we're
	 * inside tx_lock -- no need to save/restore flags.
	 */
	spin_lock(&priv->lock);
	spin_lock_irqsave(&priv->lock, flags);

	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
	    !priv->broadcast					||
@@ -738,7 +733,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
	}

unlock:
	spin_unlock(&priv->lock);
	spin_unlock_irqrestore(&priv->lock, flags);
}

void ipoib_mcast_dev_flush(struct net_device *dev)