Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7f082319 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'xen-netfront'



David Vrabel says:

====================
xen-netfront: fix resume regressions in 3.16-rc1

The introduction of multi-queue support to xen-netfront in 3.16-rc1,
broke resume/migration.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a705a906 ce58725f
Loading
Loading
Loading
Loading
+75 −34
Original line number Diff line number Diff line
@@ -1437,10 +1437,11 @@ static void xennet_end_access(int ref, void *page)
static void xennet_disconnect_backend(struct netfront_info *info)
{
	unsigned int i = 0;
	struct netfront_queue *queue = NULL;
	unsigned int num_queues = info->netdev->real_num_tx_queues;

	for (i = 0; i < num_queues; ++i) {
		struct netfront_queue *queue = &info->queues[i];

		/* Stop old i/f to prevent errors whilst we rebuild the state. */
		spin_lock_bh(&queue->rx_lock);
		spin_lock_irq(&queue->tx_lock);
@@ -1698,8 +1699,6 @@ static int xennet_init_queue(struct netfront_queue *queue)
		goto exit_free_tx;
	}

	netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll, 64);

	return 0;

 exit_free_tx:
@@ -1790,6 +1789,70 @@ static int write_queue_xenstore_keys(struct netfront_queue *queue,
	return err;
}

static void xennet_destroy_queues(struct netfront_info *info)
{
	unsigned int i;

	rtnl_lock();

	for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
		struct netfront_queue *queue = &info->queues[i];

		if (netif_running(info->netdev))
			napi_disable(&queue->napi);
		netif_napi_del(&queue->napi);
	}

	rtnl_unlock();

	kfree(info->queues);
	info->queues = NULL;
}

static int xennet_create_queues(struct netfront_info *info,
				unsigned int num_queues)
{
	unsigned int i;
	int ret;

	info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
			       GFP_KERNEL);
	if (!info->queues)
		return -ENOMEM;

	rtnl_lock();

	for (i = 0; i < num_queues; i++) {
		struct netfront_queue *queue = &info->queues[i];

		queue->id = i;
		queue->info = info;

		ret = xennet_init_queue(queue);
		if (ret < 0) {
			dev_warn(&info->netdev->dev, "only created %d queues\n",
				 num_queues);
			num_queues = i;
			break;
		}

		netif_napi_add(queue->info->netdev, &queue->napi,
			       xennet_poll, 64);
		if (netif_running(info->netdev))
			napi_enable(&queue->napi);
	}

	netif_set_real_num_tx_queues(info->netdev, num_queues);

	rtnl_unlock();

	if (num_queues == 0) {
		dev_err(&info->netdev->dev, "no queues\n");
		return -EINVAL;
	}
	return 0;
}

/* Common code used when first setting up, and when resuming. */
static int talk_to_netback(struct xenbus_device *dev,
			   struct netfront_info *info)
@@ -1826,42 +1889,20 @@ static int talk_to_netback(struct xenbus_device *dev,
		goto out;
	}

	/* Allocate array of queues */
	info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), GFP_KERNEL);
	if (!info->queues) {
		err = -ENOMEM;
		goto out;
	}
	rtnl_lock();
	netif_set_real_num_tx_queues(info->netdev, num_queues);
	rtnl_unlock();
	if (info->queues)
		xennet_destroy_queues(info);

	err = xennet_create_queues(info, num_queues);
	if (err < 0)
		goto destroy_ring;

	/* Create shared ring, alloc event channel -- for each queue */
	for (i = 0; i < num_queues; ++i) {
		queue = &info->queues[i];
		queue->id = i;
		queue->info = info;
		err = xennet_init_queue(queue);
		if (err) {
			/* xennet_init_queue() cleans up after itself on failure,
			 * but we still have to clean up any previously initialised
			 * queues. If i > 0, set num_queues to i, then goto
			 * destroy_ring, which calls xennet_disconnect_backend()
			 * to tidy up.
			 */
			if (i > 0) {
				rtnl_lock();
				netif_set_real_num_tx_queues(info->netdev, i);
				rtnl_unlock();
				goto destroy_ring;
			} else {
				goto out;
			}
		}
		err = setup_netfront(dev, queue, feature_split_evtchn);
		if (err) {
			/* As for xennet_init_queue(), setup_netfront() will tidy
			 * up the current queue on error, but we need to clean up
			/* setup_netfront() will tidy up the current
			 * queue on error, but we need to clean up
			 * those already allocated.
			 */
			if (i > 0) {