Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 10c51b56 authored by Daniel Borkmann's avatar Daniel Borkmann Committed by David S. Miller
Browse files

net: add skb_get_tx_queue() helper



Replace occurences of skb_get_queue_mapping() and follow-up
netdev_get_tx_queue() with an actual helper function.

Signed-off-by: default avatarDaniel Borkmann <dborkman@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a3bf5c42
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -1747,6 +1747,12 @@ struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
	return &dev->_tx[index];
}

static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
						    const struct sk_buff *skb)
{
	return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
}

static inline void netdev_for_each_tx_queue(struct net_device *dev,
					    void (*f)(struct net_device *,
						      struct netdev_queue *,
+1 −1
Original line number Diff line number Diff line
@@ -115,7 +115,7 @@ static void queue_process(struct work_struct *work)
			continue;
		}

		txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
		txq = skb_get_tx_queue(dev, skb);

		local_irq_save(flags);
		HARD_TX_LOCK(dev, txq, smp_processor_id());
+1 −3
Original line number Diff line number Diff line
@@ -3286,7 +3286,6 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
{
	struct net_device *odev = pkt_dev->odev;
	struct netdev_queue *txq;
	u16 queue_map;
	int ret;

	/* If device is offline, then don't send */
@@ -3324,8 +3323,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
	if (pkt_dev->delay && pkt_dev->last_ok)
		spin(pkt_dev, pkt_dev->next_tx);

	queue_map = skb_get_queue_mapping(pkt_dev->skb);
	txq = netdev_get_tx_queue(odev, queue_map);
	txq = skb_get_tx_queue(odev, pkt_dev->skb);

	local_bh_disable();

+1 −3
Original line number Diff line number Diff line
@@ -243,7 +243,6 @@ static int packet_direct_xmit(struct sk_buff *skb)
	netdev_features_t features;
	struct netdev_queue *txq;
	int ret = NETDEV_TX_BUSY;
	u16 queue_map;

	if (unlikely(!netif_running(dev) ||
		     !netif_carrier_ok(dev)))
@@ -254,8 +253,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
	    __skb_linearize(skb))
		goto drop;

	queue_map = skb_get_queue_mapping(skb);
	txq = netdev_get_tx_queue(dev, queue_map);
	txq = skb_get_tx_queue(dev, skb);

	local_bh_disable();

+4 −2
Original line number Diff line number Diff line
@@ -63,7 +63,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)

	if (unlikely(skb)) {
		/* check the reason of requeuing without tx lock first */
		txq = netdev_get_tx_queue(txq->dev, skb_get_queue_mapping(skb));
		txq = skb_get_tx_queue(txq->dev, skb);
		if (!netif_xmit_frozen_or_stopped(txq)) {
			q->gso_skb = NULL;
			q->q.qlen--;
@@ -183,10 +183,12 @@ static inline int qdisc_restart(struct Qdisc *q)
	skb = dequeue_skb(q);
	if (unlikely(!skb))
		return 0;

	WARN_ON_ONCE(skb_dst_is_noref(skb));

	root_lock = qdisc_lock(q);
	dev = qdisc_dev(q);
	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
	txq = skb_get_tx_queue(dev, skb);

	return sch_direct_xmit(skb, q, dev, txq, root_lock);
}