Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c773e847 authored by David S. Miller's avatar David S. Miller
Browse files

netdev: Move _xmit_lock and xmit_lock_owner into netdev_queue.



Accesses are mostly structured such that when there are multiple TX
queues the code transformations will be a little bit simpler.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent eb6aafe3
Loading
Loading
Loading
Loading
+12 −1
Original line number Diff line number Diff line
@@ -5019,6 +5019,17 @@ static int bond_check_params(struct bond_params *params)

static struct lock_class_key bonding_netdev_xmit_lock_key;

static void bond_set_lockdep_class_one(struct netdev_queue *txq)
{
	lockdep_set_class(&txq->_xmit_lock,
			  &bonding_netdev_xmit_lock_key);
}

static void bond_set_lockdep_class(struct net_device *dev)
{
	bond_set_lockdep_class_one(&dev->tx_queue);
}

/* Create a new bond based on the specified name and bonding parameters.
 * If name is NULL, obtain a suitable "bond%d" name for us.
 * Caller must NOT hold rtnl_lock; we need to release it here before we
@@ -5076,7 +5087,7 @@ int bond_create(char *name, struct bond_params *params)
		goto out_bond;
	}

	lockdep_set_class(&bond_dev->_xmit_lock, &bonding_netdev_xmit_lock_key);
	bond_set_lockdep_class(bond_dev);

	netif_carrier_off(bond_dev);

+11 −1
Original line number Diff line number Diff line
@@ -124,6 +124,16 @@ static LIST_HEAD(bpq_devices);
 */
static struct lock_class_key bpq_netdev_xmit_lock_key;

static void bpq_set_lockdep_class_one(struct netdev_queue *txq)
{
	lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key);
}

static void bpq_set_lockdep_class(struct net_device *dev)
{
	bpq_set_lockdep_class_one(&dev->tx_queue);
}

/* ------------------------------------------------------------------------ */


@@ -523,7 +533,7 @@ static int bpq_new_device(struct net_device *edev)
	err = register_netdevice(ndev);
	if (err)
		goto error;
	lockdep_set_class(&ndev->_xmit_lock, &bpq_netdev_xmit_lock_key);
	bpq_set_lockdep_class(ndev);

	/* List protected by RTNL */
	list_add_rcu(&bpq->bpq_list, &bpq_devices);
+13 −1
Original line number Diff line number Diff line
@@ -277,6 +277,17 @@ static struct lock_class_key macvlan_netdev_xmit_lock_key;
#define MACVLAN_STATE_MASK \
	((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))

static void macvlan_set_lockdep_class_one(struct netdev_queue *txq)
{
	lockdep_set_class(&txq->_xmit_lock,
			  &macvlan_netdev_xmit_lock_key);
}

static void macvlan_set_lockdep_class(struct net_device *dev)
{
	macvlan_set_lockdep_class_one(&dev->tx_queue);
}

static int macvlan_init(struct net_device *dev)
{
	struct macvlan_dev *vlan = netdev_priv(dev);
@@ -287,7 +298,8 @@ static int macvlan_init(struct net_device *dev)
	dev->features 		= lowerdev->features & MACVLAN_FEATURES;
	dev->iflink		= lowerdev->ifindex;

	lockdep_set_class(&dev->_xmit_lock, &macvlan_netdev_xmit_lock_key);
	macvlan_set_lockdep_class(dev);

	return 0;
}

+11 −1
Original line number Diff line number Diff line
@@ -3102,6 +3102,16 @@ static void prism2_clear_set_tim_queue(local_info_t *local)
 */
static struct lock_class_key hostap_netdev_xmit_lock_key;

static void prism2_set_lockdep_class_one(struct netdev_queue *txq)
{
	lockdep_set_class(&txq->_xmit_lock,
			  &hostap_netdev_xmit_lock_key);
}

static void prism2_set_lockdep_class(struct net_device *dev)
{
	prism2_set_lockdep_class_one(&dev->tx_queue);
}

static struct net_device *
prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
@@ -3268,7 +3278,7 @@ while (0)
	if (ret >= 0)
		ret = register_netdevice(dev);

	lockdep_set_class(&dev->_xmit_lock, &hostap_netdev_xmit_lock_key);
	prism2_set_lockdep_class(dev);
	rtnl_unlock();
	if (ret < 0) {
		printk(KERN_WARNING "%s: register netdevice failed!\n",
+39 −23
Original line number Diff line number Diff line
@@ -453,6 +453,8 @@ struct netdev_queue {
	struct net_device	*dev;
	struct Qdisc		*qdisc;
	struct sk_buff		*gso_skb;
	spinlock_t		_xmit_lock;
	int			xmit_lock_owner;
	struct Qdisc		*qdisc_sleeping;
	struct list_head	qdisc_list;
	struct netdev_queue	*next_sched;
@@ -639,12 +641,6 @@ struct net_device
/*
 * One part is mostly used on xmit path (device)
 */
	/* hard_start_xmit synchronizer */
	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp;
	/* cpu id of processor entered to hard_start_xmit or -1,
	   if nobody entered there.
	 */
	int			xmit_lock_owner;
	void			*priv;	/* pointer to private data	*/
	int			(*hard_start_xmit) (struct sk_buff *skb,
						    struct net_device *dev);
@@ -1402,52 +1398,72 @@ static inline void netif_rx_complete(struct net_device *dev,
 *
 * Get network device transmit lock
 */
static inline void __netif_tx_lock(struct net_device *dev, int cpu)
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
	spin_lock(&dev->_xmit_lock);
	dev->xmit_lock_owner = cpu;
	spin_lock(&txq->_xmit_lock);
	txq->xmit_lock_owner = cpu;
}

static inline void netif_tx_lock(struct net_device *dev)
{
	__netif_tx_lock(dev, smp_processor_id());
	__netif_tx_lock(&dev->tx_queue, smp_processor_id());
}

static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
	spin_lock_bh(&txq->_xmit_lock);
	txq->xmit_lock_owner = smp_processor_id();
}

static inline void netif_tx_lock_bh(struct net_device *dev)
{
	spin_lock_bh(&dev->_xmit_lock);
	dev->xmit_lock_owner = smp_processor_id();
	__netif_tx_lock_bh(&dev->tx_queue);
}

static inline int netif_tx_trylock(struct net_device *dev)
static inline int __netif_tx_trylock(struct netdev_queue *txq)
{
	int ok = spin_trylock(&dev->_xmit_lock);
	int ok = spin_trylock(&txq->_xmit_lock);
	if (likely(ok))
		dev->xmit_lock_owner = smp_processor_id();
		txq->xmit_lock_owner = smp_processor_id();
	return ok;
}

static inline int netif_tx_trylock(struct net_device *dev)
{
	return __netif_tx_trylock(&dev->tx_queue);
}

static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
	txq->xmit_lock_owner = -1;
	spin_unlock(&txq->_xmit_lock);
}

static inline void netif_tx_unlock(struct net_device *dev)
{
	dev->xmit_lock_owner = -1;
	spin_unlock(&dev->_xmit_lock);
	__netif_tx_unlock(&dev->tx_queue);
}

static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
	txq->xmit_lock_owner = -1;
	spin_unlock_bh(&txq->_xmit_lock);
}

static inline void netif_tx_unlock_bh(struct net_device *dev)
{
	dev->xmit_lock_owner = -1;
	spin_unlock_bh(&dev->_xmit_lock);
	__netif_tx_unlock_bh(&dev->tx_queue);
}

#define HARD_TX_LOCK(dev, cpu) {			\
#define HARD_TX_LOCK(dev, txq, cpu) {			\
	if ((dev->features & NETIF_F_LLTX) == 0) {	\
		__netif_tx_lock(dev, cpu);			\
		__netif_tx_lock(txq, cpu);		\
	}						\
}

#define HARD_TX_UNLOCK(dev) {				\
#define HARD_TX_UNLOCK(dev, txq) {			\
	if ((dev->features & NETIF_F_LLTX) == 0) {	\
		netif_tx_unlock(dev);			\
		__netif_tx_unlock(txq);			\
	}						\
}

Loading