Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dc2b4847 authored by David S. Miller's avatar David S. Miller
Browse files

netdev: Move queue_lock into struct netdev_queue.



The lock is now an attribute of the device queue.

One thing to notice is that "suspicious" places
emerge which will need specific training about
multiple queue handling.  They are so marked with
explicit "netdev->rx_queue" and "netdev->tx_queue"
references.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5ce2d488
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -229,12 +229,12 @@ module_param(numifbs, int, 0);
MODULE_PARM_DESC(numifbs, "Number of ifb devices");

/*
 * dev_ifb->queue_lock is usually taken after dev->ingress_lock,
 * dev_ifb->tx_queue.lock is usually taken after dev->ingress_lock,
 * reversely to e.g. qdisc_lock_tree(). It should be safe until
 * ifb doesn't take dev->queue_lock with dev_ifb->ingress_lock.
 * ifb doesn't take dev->tx_queue.lock with dev_ifb->ingress_lock.
 * But lockdep should know that ifb has different locks from dev.
 */
static struct lock_class_key ifb_queue_lock_key;
static struct lock_class_key ifb_tx_queue_lock_key;
static struct lock_class_key ifb_ingress_lock_key;


@@ -258,7 +258,7 @@ static int __init ifb_init_one(int index)
	if (err < 0)
		goto err;

	lockdep_set_class(&dev_ifb->queue_lock, &ifb_queue_lock_key);
	lockdep_set_class(&dev_ifb->tx_queue.lock, &ifb_tx_queue_lock_key);
	lockdep_set_class(&dev_ifb->ingress_lock, &ifb_ingress_lock_key);

	return 0;
+2 −2
Original line number Diff line number Diff line
@@ -449,6 +449,7 @@ static inline void napi_synchronize(const struct napi_struct *n)
#endif

struct netdev_queue {
	spinlock_t		lock;
	struct net_device	*dev;
};

@@ -629,7 +630,7 @@ struct net_device
	unsigned char		broadcast[MAX_ADDR_LEN];	/* hw bcast add	*/

	struct netdev_queue	rx_queue;
	struct netdev_queue	tx_queue;
	struct netdev_queue	tx_queue ____cacheline_aligned_in_smp;

	/* ingress path synchronizer */
	spinlock_t		ingress_lock;
@@ -639,7 +640,6 @@ struct net_device
 * Cache line mostly used on queue transmit path (qdisc)
 */
	/* device queue lock */
	spinlock_t		queue_lock ____cacheline_aligned_in_smp;
	struct Qdisc		*qdisc;
	struct Qdisc		*qdisc_sleeping;
	struct list_head	qdisc_list;
+22 −11
Original line number Diff line number Diff line
@@ -1667,6 +1667,7 @@ out_kfree_skb:
int dev_queue_xmit(struct sk_buff *skb)
{
	struct net_device *dev = skb->dev;
	struct netdev_queue *txq;
	struct Qdisc *q;
	int rc = -ENOMEM;

@@ -1699,14 +1700,15 @@ int dev_queue_xmit(struct sk_buff *skb)
	}

gso:
	spin_lock_prefetch(&dev->queue_lock);
	txq = &dev->tx_queue;
	spin_lock_prefetch(&txq->lock);

	/* Disable soft irqs for various locks below. Also
	 * stops preemption for RCU.
	 */
	rcu_read_lock_bh();

	/* Updates of qdisc are serialized by queue_lock.
	/* Updates of qdisc are serialized by queue->lock.
	 * The struct Qdisc which is pointed to by qdisc is now a
	 * rcu structure - it may be accessed without acquiring
	 * a lock (but the structure may be stale.) The freeing of the
@@ -1714,7 +1716,7 @@ gso:
	 * more references to it.
	 *
	 * If the qdisc has an enqueue function, we still need to
	 * hold the queue_lock before calling it, since queue_lock
	 * hold the queue->lock before calling it, since queue->lock
	 * also serializes access to the device queue.
	 */

@@ -1724,19 +1726,19 @@ gso:
#endif
	if (q->enqueue) {
		/* Grab device queue */
		spin_lock(&dev->queue_lock);
		spin_lock(&txq->lock);
		q = dev->qdisc;
		if (q->enqueue) {
			/* reset queue_mapping to zero */
			skb_set_queue_mapping(skb, 0);
			rc = q->enqueue(skb, q);
			qdisc_run(dev);
			spin_unlock(&dev->queue_lock);
			spin_unlock(&txq->lock);

			rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
			goto out;
		}
		spin_unlock(&dev->queue_lock);
		spin_unlock(&txq->lock);
	}

	/* The device has no queue. Common case for software devices:
@@ -1919,14 +1921,17 @@ static void net_tx_action(struct softirq_action *h)

		while (head) {
			struct net_device *dev = head;
			struct netdev_queue *txq;
			head = head->next_sched;

			txq = &dev->tx_queue;

			smp_mb__before_clear_bit();
			clear_bit(__LINK_STATE_SCHED, &dev->state);

			if (spin_trylock(&dev->queue_lock)) {
			if (spin_trylock(&txq->lock)) {
				qdisc_run(dev);
				spin_unlock(&dev->queue_lock);
				spin_unlock(&txq->lock);
			} else {
				netif_schedule(dev);
			}
@@ -3787,7 +3792,6 @@ int register_netdevice(struct net_device *dev)
	BUG_ON(!dev_net(dev));
	net = dev_net(dev);

	spin_lock_init(&dev->queue_lock);
	spin_lock_init(&dev->_xmit_lock);
	netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
	dev->xmit_lock_owner = -1;
@@ -4072,10 +4076,17 @@ static struct net_device_stats *internal_stats(struct net_device *dev)
	return &dev->stats;
}

static void netdev_init_one_queue(struct net_device *dev,
				  struct netdev_queue *queue)
{
	spin_lock_init(&queue->lock);
	queue->dev = dev;
}

static void netdev_init_queues(struct net_device *dev)
{
	dev->rx_queue.dev = dev;
	dev->tx_queue.dev = dev;
	netdev_init_one_queue(dev, &dev->rx_queue);
	netdev_init_one_queue(dev, &dev->tx_queue);
}

/**
+5 −5
Original line number Diff line number Diff line
@@ -636,7 +636,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)

	/* ensure that TX flow won't interrupt us
	 * until the end of the call to requeue function */
	spin_lock_bh(&local->mdev->queue_lock);
	spin_lock_bh(&local->mdev->tx_queue.lock);

	/* create a new queue for this aggregation */
	ret = ieee80211_ht_agg_queue_add(local, sta, tid);
@@ -675,7 +675,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)

	/* Will put all the packets in the new SW queue */
	ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
	spin_unlock_bh(&local->mdev->queue_lock);
	spin_unlock_bh(&local->mdev->tx_queue.lock);
	spin_unlock_bh(&sta->lock);

	/* send an addBA request */
@@ -701,7 +701,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
err_unlock_queue:
	kfree(sta->ampdu_mlme.tid_tx[tid]);
	sta->ampdu_mlme.tid_tx[tid] = NULL;
	spin_unlock_bh(&local->mdev->queue_lock);
	spin_unlock_bh(&local->mdev->tx_queue.lock);
	ret = -EBUSY;
err_unlock_sta:
	spin_unlock_bh(&sta->lock);
@@ -875,10 +875,10 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)

	/* avoid ordering issues: we are the only one that can modify
	 * the content of the qdiscs */
	spin_lock_bh(&local->mdev->queue_lock);
	spin_lock_bh(&local->mdev->tx_queue.lock);
	/* remove the queue for this aggregation */
	ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
	spin_unlock_bh(&local->mdev->queue_lock);
	spin_unlock_bh(&local->mdev->tx_queue.lock);

	/* we just requeued the all the frames that were in the removed
	 * queue, and since we might miss a softirq we do netif_schedule.
+1 −1
Original line number Diff line number Diff line
@@ -648,7 +648,7 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
}

/**
 * the caller needs to hold local->mdev->queue_lock
 * the caller needs to hold local->mdev->tx_queue.lock
 */
void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
				   struct sta_info *sta, u16 tid,
Loading