Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8fdd95ec authored by Herbert Xu's avatar Herbert Xu Committed by David S. Miller
Browse files

netpoll: Allow netpoll_setup/cleanup recursion



This patch adds the functions __netpoll_setup/__netpoll_cleanup
which is designed to be called recursively through ndo_netpoll_seutp.

They must be called with RTNL held, and the caller must initialise
np->dev and ensure that it has a valid reference count.

Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4247e161
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -46,9 +46,11 @@ void netpoll_poll(struct netpoll *np);
void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
void netpoll_print_options(struct netpoll *np);
int netpoll_parse_options(struct netpoll *np, char *opt);
int __netpoll_setup(struct netpoll *np);
int netpoll_setup(struct netpoll *np);
int netpoll_trap(void);
void netpoll_set_trap(int trap);
void __netpoll_cleanup(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np);
int __netpoll_rx(struct sk_buff *skb);
void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
+97 −79
Original line number Diff line number Diff line
@@ -693,15 +693,78 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
	return -1;
}

int netpoll_setup(struct netpoll *np)
int __netpoll_setup(struct netpoll *np)
{
	struct net_device *ndev = NULL;
	struct in_device *in_dev;
	struct net_device *ndev = np->dev;
	struct netpoll_info *npinfo;
	const struct net_device_ops *ops;
	unsigned long flags;
	int err;

	if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
	    !ndev->netdev_ops->ndo_poll_controller) {
		printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
		       np->name, np->dev_name);
		err = -ENOTSUPP;
		goto out;
	}

	if (!ndev->npinfo) {
		npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
		if (!npinfo) {
			err = -ENOMEM;
			goto out;
		}

		npinfo->rx_flags = 0;
		INIT_LIST_HEAD(&npinfo->rx_np);

		spin_lock_init(&npinfo->rx_lock);
		skb_queue_head_init(&npinfo->arp_tx);
		skb_queue_head_init(&npinfo->txq);
		INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);

		atomic_set(&npinfo->refcnt, 1);

		ops = np->dev->netdev_ops;
		if (ops->ndo_netpoll_setup) {
			err = ops->ndo_netpoll_setup(ndev, npinfo);
			if (err)
				goto free_npinfo;
		}
	} else {
		npinfo = ndev->npinfo;
		atomic_inc(&npinfo->refcnt);
	}

	npinfo->netpoll = np;

	if (np->rx_hook) {
		spin_lock_irqsave(&npinfo->rx_lock, flags);
		npinfo->rx_flags |= NETPOLL_RX_ENABLED;
		list_add_tail(&np->rx, &npinfo->rx_np);
		spin_unlock_irqrestore(&npinfo->rx_lock, flags);
	}

	/* last thing to do is link it to the net device structure */
	rcu_assign_pointer(ndev->npinfo, npinfo);
	rtnl_unlock();

	return 0;

free_npinfo:
	kfree(npinfo);
out:
	return err;
}
EXPORT_SYMBOL_GPL(__netpoll_setup);

int netpoll_setup(struct netpoll *np)
{
	struct net_device *ndev = NULL;
	struct in_device *in_dev;
	int err;

	if (np->dev_name)
		ndev = dev_get_by_name(&init_net, np->dev_name);
	if (!ndev) {
@@ -774,61 +837,14 @@ int netpoll_setup(struct netpoll *np)
	refill_skbs();

	rtnl_lock();
	if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
	    !ndev->netdev_ops->ndo_poll_controller) {
		printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
		       np->name, np->dev_name);
		err = -ENOTSUPP;
		goto unlock;
	}

	if (!ndev->npinfo) {
		npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
		if (!npinfo) {
			err = -ENOMEM;
			goto unlock;
		}

		npinfo->rx_flags = 0;
		INIT_LIST_HEAD(&npinfo->rx_np);

		spin_lock_init(&npinfo->rx_lock);
		skb_queue_head_init(&npinfo->arp_tx);
		skb_queue_head_init(&npinfo->txq);
		INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);

		atomic_set(&npinfo->refcnt, 1);
	err = __netpoll_setup(np);
	rtnl_unlock();

		ops = np->dev->netdev_ops;
		if (ops->ndo_netpoll_setup) {
			err = ops->ndo_netpoll_setup(ndev, npinfo);
	if (err)
				goto free_npinfo;
		}
	} else {
		npinfo = ndev->npinfo;
		atomic_inc(&npinfo->refcnt);
	}

	npinfo->netpoll = np;

	if (np->rx_hook) {
		spin_lock_irqsave(&npinfo->rx_lock, flags);
		npinfo->rx_flags |= NETPOLL_RX_ENABLED;
		list_add_tail(&np->rx, &npinfo->rx_np);
		spin_unlock_irqrestore(&npinfo->rx_lock, flags);
	}

	/* last thing to do is link it to the net device structure */
	rcu_assign_pointer(ndev->npinfo, npinfo);
	rtnl_unlock();
		goto put;

	return 0;

free_npinfo:
	kfree(npinfo);
unlock:
	rtnl_unlock();
put:
	dev_put(ndev);
	return err;
@@ -841,18 +857,15 @@ static int __init netpoll_init(void)
}
core_initcall(netpoll_init);

void netpoll_cleanup(struct netpoll *np)
void __netpoll_cleanup(struct netpoll *np)
{
	struct netpoll_info *npinfo;
	unsigned long flags;
	int free = 0;

	if (!np->dev)
	npinfo = np->dev->npinfo;
	if (!npinfo)
		return;

	rtnl_lock();
	npinfo = np->dev->npinfo;
	if (npinfo) {
	if (!list_empty(&npinfo->rx_np)) {
		spin_lock_irqsave(&npinfo->rx_lock, flags);
		list_del(&np->rx);
@@ -861,8 +874,7 @@ void netpoll_cleanup(struct netpoll *np)
		spin_unlock_irqrestore(&npinfo->rx_lock, flags);
	}

		free = atomic_dec_and_test(&npinfo->refcnt);
		if (free) {
	if (atomic_dec_and_test(&npinfo->refcnt)) {
		const struct net_device_ops *ops;

		ops = np->dev->netdev_ops;
@@ -870,11 +882,7 @@ void netpoll_cleanup(struct netpoll *np)
			ops->ndo_netpoll_cleanup(np->dev);

		rcu_assign_pointer(np->dev->npinfo, NULL);
		}
	}
	rtnl_unlock();

	if (free) {
		/* avoid racing with NAPI reading npinfo */
		synchronize_rcu_bh();

@@ -886,9 +894,19 @@ void netpoll_cleanup(struct netpoll *np)
		__skb_queue_purge(&npinfo->txq);
		kfree(npinfo);
	}
}
EXPORT_SYMBOL_GPL(__netpoll_cleanup);

	dev_put(np->dev);
void netpoll_cleanup(struct netpoll *np)
{
	if (!np->dev)
		return;

	rtnl_lock();
	__netpoll_cleanup(np);
	rtnl_unlock();

	dev_put(np->dev);
	np->dev = NULL;
}