Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 46b4f7f5 authored by stephen hemminger's avatar stephen hemminger Committed by David S. Miller
Browse files

netvsc: eliminate per-device outstanding send counter



Since now keep track of per-queue outstanding sends, we can avoid
one atomic update by removing no longer needed per-device atomic.

Signed-off-by: default avatarStephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2289f0aa
Loading
Loading
Loading
Loading
+0 −1
Original line number Original line Diff line number Diff line
@@ -729,7 +729,6 @@ struct netvsc_channel {
struct netvsc_device {
struct netvsc_device {
	u32 nvsp_version;
	u32 nvsp_version;


	atomic_t num_outstanding_sends;
	wait_queue_head_t wait_drain;
	wait_queue_head_t wait_drain;
	bool destroy;
	bool destroy;


+16 −28
Original line number Original line Diff line number Diff line
@@ -90,29 +90,23 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
	kfree(nvdev);
	kfree(nvdev);
}
}


static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
{
	struct netvsc_device *net_device = hv_device_to_netvsc_device(device);


	if (net_device && net_device->destroy)
static inline bool netvsc_channel_idle(const struct netvsc_device *net_device,
		net_device = NULL;
				       u16 q_idx)
{
	const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];


	return net_device;
	return atomic_read(&net_device->num_outstanding_recvs) == 0 &&
		atomic_read(&nvchan->queue_sends) == 0;
}
}


static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
{
{
	struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
	struct netvsc_device *net_device = hv_device_to_netvsc_device(device);


	if (!net_device)
	if (net_device && net_device->destroy)
		goto get_in_err;

	if (net_device->destroy &&
	    atomic_read(&net_device->num_outstanding_sends) == 0 &&
	    atomic_read(&net_device->num_outstanding_recvs) == 0)
		net_device = NULL;
		net_device = NULL;


get_in_err:
	return net_device;
	return net_device;
}
}


@@ -612,7 +606,6 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
	struct net_device *ndev = hv_get_drvdata(device);
	struct net_device *ndev = hv_get_drvdata(device);
	struct net_device_context *net_device_ctx = netdev_priv(ndev);
	struct net_device_context *net_device_ctx = netdev_priv(ndev);
	struct vmbus_channel *channel = device->channel;
	struct vmbus_channel *channel = device->channel;
	int num_outstanding_sends;
	u16 q_idx = 0;
	u16 q_idx = 0;
	int queue_sends;
	int queue_sends;


@@ -630,13 +623,10 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
		dev_consume_skb_any(skb);
		dev_consume_skb_any(skb);
	}
	}


	num_outstanding_sends =
		atomic_dec_return(&net_device->num_outstanding_sends);

	queue_sends =
	queue_sends =
		atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
		atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);


	if (net_device->destroy && num_outstanding_sends == 0)
	if (net_device->destroy && queue_sends == 0)
		wake_up(&net_device->wait_drain);
		wake_up(&net_device->wait_drain);


	if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
	if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
@@ -823,15 +813,10 @@ static inline int netvsc_send_pkt(
	}
	}


	if (ret == 0) {
	if (ret == 0) {
		atomic_inc(&net_device->num_outstanding_sends);
		atomic_inc_return(&nvchan->queue_sends);
		atomic_inc_return(&nvchan->queue_sends);


		if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
		if (ring_avail < RING_AVAIL_PERCENT_LOWATER)
			netif_tx_stop_queue(txq);
			netif_tx_stop_queue(txq);

			if (atomic_read(&nvchan->queue_sends) < 1)
				netif_tx_wake_queue(txq);
		}
	} else if (ret == -EAGAIN) {
	} else if (ret == -EAGAIN) {
		netif_tx_stop_queue(txq);
		netif_tx_stop_queue(txq);
		if (atomic_read(&nvchan->queue_sends) < 1) {
		if (atomic_read(&nvchan->queue_sends) < 1) {
@@ -1259,11 +1244,14 @@ void netvsc_channel_cb(void *context)
	else
	else
		device = channel->device_obj;
		device = channel->device_obj;


	net_device = get_inbound_net_device(device);
	ndev = hv_get_drvdata(device);
	if (!net_device)
	if (unlikely(!ndev))
		return;
		return;


	ndev = hv_get_drvdata(device);
	net_device = net_device_to_netvsc_device(ndev);
	if (unlikely(net_device->destroy) &&
	    netvsc_channel_idle(net_device, q_idx))
		return;


	while ((desc = get_next_pkt_raw(channel)) != NULL) {
	while ((desc = get_next_pkt_raw(channel)) != NULL) {
		netvsc_process_raw_pkt(device, channel, net_device,
		netvsc_process_raw_pkt(device, channel, net_device,
+18 −3
Original line number Original line Diff line number Diff line
@@ -903,6 +903,23 @@ static int rndis_filter_init_device(struct rndis_device *dev)
	return ret;
	return ret;
}
}


static bool netvsc_device_idle(const struct netvsc_device *nvdev)
{
	int i;

	if (atomic_read(&nvdev->num_outstanding_recvs) > 0)
		return false;

	for (i = 0; i < nvdev->num_chn; i++) {
		const struct netvsc_channel *nvchan = &nvdev->chan_table[i];

		if (atomic_read(&nvchan->queue_sends) > 0)
			return false;
	}

	return true;
}

static void rndis_filter_halt_device(struct rndis_device *dev)
static void rndis_filter_halt_device(struct rndis_device *dev)
{
{
	struct rndis_request *request;
	struct rndis_request *request;
@@ -933,9 +950,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
	spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags);
	spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags);


	/* Wait for all send completions */
	/* Wait for all send completions */
	wait_event(nvdev->wait_drain,
	wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
		   atomic_read(&nvdev->num_outstanding_sends) == 0 &&
		   atomic_read(&nvdev->num_outstanding_recvs) == 0);


	if (request)
	if (request)
		put_rndis_request(dev, request);
		put_rndis_request(dev, request);