Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 555353cf authored by David S. Miller's avatar David S. Miller
Browse files

netdev: The ingress_lock member is no longer needed.



Every qdisc is assosciated with a queue, and in the case of ingress
qdiscs that will now be netdev->rx_queue so using that queue's lock is
the thing to do.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent dc2b4847
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -229,13 +229,13 @@ module_param(numifbs, int, 0);
MODULE_PARM_DESC(numifbs, "Number of ifb devices");

/*
 * dev_ifb->tx_queue.lock is usually taken after dev->ingress_lock,
 * dev_ifb->tx_queue.lock is usually taken after dev->rx_queue.lock,
 * reversely to e.g. qdisc_lock_tree(). It should be safe until
 * ifb doesn't take dev->tx_queue.lock with dev_ifb->ingress_lock.
 * ifb doesn't take dev->tx_queue.lock with dev_ifb->rx_queue.lock.
 * But lockdep should know that ifb has different locks from dev.
 */
static struct lock_class_key ifb_tx_queue_lock_key;
static struct lock_class_key ifb_ingress_lock_key;
static struct lock_class_key ifb_rx_queue_lock_key;


static int __init ifb_init_one(int index)
@@ -259,7 +259,7 @@ static int __init ifb_init_one(int index)
		goto err;

	lockdep_set_class(&dev_ifb->tx_queue.lock, &ifb_tx_queue_lock_key);
	lockdep_set_class(&dev_ifb->ingress_lock, &ifb_ingress_lock_key);
	lockdep_set_class(&dev_ifb->rx_queue.lock, &ifb_rx_queue_lock_key);

	return 0;

+0 −2
Original line number Diff line number Diff line
@@ -632,8 +632,6 @@ struct net_device
	struct netdev_queue	rx_queue;
	struct netdev_queue	tx_queue ____cacheline_aligned_in_smp;

	/* ingress path synchronizer */
	spinlock_t		ingress_lock;
	struct Qdisc		*qdisc_ingress;

/*
+7 −5
Original line number Diff line number Diff line
@@ -2014,10 +2014,11 @@ static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
 */
static int ing_filter(struct sk_buff *skb)
{
	struct Qdisc *q;
	struct net_device *dev = skb->dev;
	int result = TC_ACT_OK;
	u32 ttl = G_TC_RTTL(skb->tc_verd);
	struct netdev_queue *rxq;
	int result = TC_ACT_OK;
	struct Qdisc *q;

	if (MAX_RED_LOOP < ttl++) {
		printk(KERN_WARNING
@@ -2029,10 +2030,12 @@ static int ing_filter(struct sk_buff *skb)
	skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);

	spin_lock(&dev->ingress_lock);
	rxq = &dev->rx_queue;

	spin_lock(&rxq->lock);
	if ((q = dev->qdisc_ingress) != NULL)
		result = q->enqueue(skb, q);
	spin_unlock(&dev->ingress_lock);
	spin_unlock(&rxq->lock);

	return result;
}
@@ -3795,7 +3798,6 @@ int register_netdevice(struct net_device *dev)
	spin_lock_init(&dev->_xmit_lock);
	netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
	dev->xmit_lock_owner = -1;
	spin_lock_init(&dev->ingress_lock);

	dev->iflink = -1;

+1 −2
Original line number Diff line number Diff line
@@ -601,12 +601,11 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,

	sch->parent = parent;

	sch->stats_lock = &dev_queue->lock;
	if (handle == TC_H_INGRESS) {
		sch->flags |= TCQ_F_INGRESS;
		sch->stats_lock = &dev->ingress_lock;
		handle = TC_H_MAKE(TC_H_INGRESS, 0);
	} else {
		sch->stats_lock = &dev_queue->lock;
		if (handle == 0) {
			handle = qdisc_alloc_handle(dev);
			err = -ENOMEM;
+5 −5
Original line number Diff line number Diff line
@@ -35,24 +35,24 @@
 * - enqueue, dequeue are serialized via top level device
 *   spinlock queue->lock.
 * - ingress filtering is serialized via top level device
 *   spinlock dev->ingress_lock.
 *   spinlock dev->rx_queue.lock.
 * - updates to tree and tree walking are only done under the rtnl mutex.
 */

void qdisc_lock_tree(struct net_device *dev)
	__acquires(dev->tx_queue.lock)
	__acquires(dev->ingress_lock)
	__acquires(dev->rx_queue.lock)
{
	spin_lock_bh(&dev->tx_queue.lock);
	spin_lock(&dev->ingress_lock);
	spin_lock(&dev->rx_queue.lock);
}
EXPORT_SYMBOL(qdisc_lock_tree);

void qdisc_unlock_tree(struct net_device *dev)
	__releases(dev->ingress_lock)
	__releases(dev->rx_queue.lock)
	__releases(dev->tx_queue.lock)
{
	spin_unlock(&dev->ingress_lock);
	spin_unlock(&dev->rx_queue.lock);
	spin_unlock_bh(&dev->tx_queue.lock);
}
EXPORT_SYMBOL(qdisc_unlock_tree);