Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 15a863bf authored by stephen hemminger's avatar stephen hemminger Committed by David S. Miller
Browse files

netvsc: implement NAPI



Use NAPI (softirq), to handle receive packets and send completions.
Previously this was handled by tasklet.

Signed-off-by: default avatarStephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f3dd3f47
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -196,6 +196,7 @@ int netvsc_recv_callback(struct net_device *net,
			 const struct ndis_tcp_ip_checksum_info *csum_info,
			 const struct ndis_pkt_8021q_info *vlan);
void netvsc_channel_cb(void *context);
int netvsc_poll(struct napi_struct *napi, int budget);
int rndis_filter_open(struct netvsc_device *nvdev);
int rndis_filter_close(struct netvsc_device *nvdev);
int rndis_filter_device_add(struct hv_device *dev,
@@ -720,6 +721,7 @@ struct net_device_context {
/* Per channel data */
struct netvsc_channel {
	struct vmbus_channel *channel;
	struct napi_struct napi;
	struct multi_send_data msd;
	struct multi_recv_comp mrc;
	atomic_t queue_sends;
+98 −42
Original line number Diff line number Diff line
@@ -556,6 +556,7 @@ void netvsc_device_remove(struct hv_device *device)
	struct net_device *ndev = hv_get_drvdata(device);
	struct net_device_context *net_device_ctx = netdev_priv(ndev);
	struct netvsc_device *net_device = net_device_ctx->nvdev;
	int i;

	netvsc_disconnect_vsp(device);

@@ -570,6 +571,9 @@ void netvsc_device_remove(struct hv_device *device)
	/* Now, we can close the channel safely */
	vmbus_close(device->channel);

	for (i = 0; i < VRSS_CHANNEL_MAX; i++)
		napi_disable(&net_device->chan_table[0].napi);

	/* Release all resources */
	free_netvsc_device(net_device);
}
@@ -1063,7 +1067,7 @@ static inline struct recv_comp_data *get_recv_comp_slot(
	return rcd;
}

static void netvsc_receive(struct net_device *ndev,
static int netvsc_receive(struct net_device *ndev,
		   struct netvsc_device *net_device,
		   struct net_device_context *net_device_ctx,
		   struct hv_device *device,
@@ -1073,20 +1077,19 @@ static void netvsc_receive(struct net_device *ndev,
{
	const struct vmtransfer_page_packet_header *vmxferpage_packet
		= container_of(desc, const struct vmtransfer_page_packet_header, d);
	u16 q_idx = channel->offermsg.offer.sub_channel_index;
	char *recv_buf = net_device->recv_buf;
	u32 status = NVSP_STAT_SUCCESS;
	int i;
	int count = 0;
	int ret;
	struct recv_comp_data *rcd;
	u16 q_idx = channel->offermsg.offer.sub_channel_index;

	/* Make sure this is a valid nvsp packet */
	if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
		netif_err(net_device_ctx, rx_err, ndev,
			  "Unknown nvsp packet type received %u\n",
			  nvsp->hdr.msg_type);
		return;
		return 0;
	}

	if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
@@ -1094,7 +1097,7 @@ static void netvsc_receive(struct net_device *ndev,
			  "Invalid xfer page set id - expecting %x got %x\n",
			  NETVSC_RECEIVE_BUFFER_ID,
			  vmxferpage_packet->xfer_pageset_id);
		return;
		return 0;
	}

	count = vmxferpage_packet->range_cnt;
@@ -1110,26 +1113,26 @@ static void netvsc_receive(struct net_device *ndev,
					      channel, data, buflen);
	}

	if (!net_device->chan_table[q_idx].mrc.buf) {
	if (net_device->chan_table[q_idx].mrc.buf) {
		struct recv_comp_data *rcd;

		rcd = get_recv_comp_slot(net_device, channel, q_idx);
		if (rcd) {
			rcd->tid = vmxferpage_packet->d.trans_id;
			rcd->status = status;
		} else {
			netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
				   q_idx, vmxferpage_packet->d.trans_id);
		}
	} else {
		ret = netvsc_send_recv_completion(channel,
						  vmxferpage_packet->d.trans_id,
						  status);
		if (ret)
			netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n",
				   q_idx, vmxferpage_packet->d.trans_id, ret);
		return;
	}

	rcd = get_recv_comp_slot(net_device, channel, q_idx);

	if (!rcd) {
		netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
			   q_idx, vmxferpage_packet->d.trans_id);
		return;
	}

	rcd->tid = vmxferpage_packet->d.trans_id;
	rcd->status = status;
	return count;
}

static void netvsc_send_table(struct hv_device *hdev,
@@ -1179,7 +1182,7 @@ static inline void netvsc_receive_inband(struct hv_device *hdev,
	}
}

static void netvsc_process_raw_pkt(struct hv_device *device,
static int netvsc_process_raw_pkt(struct hv_device *device,
				  struct vmbus_channel *channel,
				  struct netvsc_device *net_device,
				  struct net_device *ndev,
@@ -1195,7 +1198,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
		break;

	case VM_PKT_DATA_USING_XFER_PAGES:
		netvsc_receive(ndev, net_device, net_device_ctx,
		return netvsc_receive(ndev, net_device, net_device_ctx,
				      device, channel, desc, nvmsg);
		break;

@@ -1208,22 +1211,64 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
			   desc->type, request_id);
		break;
	}

	return 0;
}

static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
{
	struct vmbus_channel *primary = channel->primary_channel;

	return primary ? primary->device_obj : channel->device_obj;
}

int netvsc_poll(struct napi_struct *napi, int budget)
{
	struct netvsc_channel *nvchan
		= container_of(napi, struct netvsc_channel, napi);
	struct vmbus_channel *channel = nvchan->channel;
	struct hv_device *device = netvsc_channel_to_device(channel);
	u16 q_idx = channel->offermsg.offer.sub_channel_index;
	struct net_device *ndev = hv_get_drvdata(device);
	struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
	const struct vmpacket_descriptor *desc;
	int work_done = 0;

	desc = hv_pkt_iter_first(channel);
	while (desc) {
		int count;

		count = netvsc_process_raw_pkt(device, channel, net_device,
					       ndev, desc->trans_id, desc);
		work_done += count;
		desc = __hv_pkt_iter_next(channel, desc);

		/* If receive packet budget is exhausted, reschedule */
		if (work_done >= budget) {
			work_done = budget;
			break;
		}
	}
	hv_pkt_iter_close(channel);

	/* If ring is empty and NAPI is not doing polling */
	if (work_done < budget &&
	    napi_complete_done(napi, work_done) &&
	    hv_end_read(&channel->inbound) != 0)
		napi_reschedule(napi);

	netvsc_chk_recv_comp(net_device, channel, q_idx);
	return work_done;
}

void netvsc_channel_cb(void *context)
{
	struct vmbus_channel *channel = context;
	struct hv_device *device = netvsc_channel_to_device(channel);
	u16 q_idx = channel->offermsg.offer.sub_channel_index;
	struct hv_device *device;
	struct netvsc_device *net_device;
	struct vmpacket_descriptor *desc;
	struct net_device *ndev;

	if (channel->primary_channel != NULL)
		device = channel->primary_channel->device_obj;
	else
		device = channel->device_obj;

	ndev = hv_get_drvdata(device);
	if (unlikely(!ndev))
		return;
@@ -1233,13 +1278,9 @@ void netvsc_channel_cb(void *context)
	    netvsc_channel_idle(net_device, q_idx))
		return;

	foreach_vmbus_pkt(desc, channel) {
		netvsc_process_raw_pkt(device, channel, net_device,
				       ndev, desc->trans_id, desc);

	}

	netvsc_chk_recv_comp(net_device, channel, q_idx);
	/* disable interupts from host */
	hv_begin_read(&channel->inbound);
	napi_schedule(&net_device->chan_table[q_idx].napi);
}

/*
@@ -1261,6 +1302,11 @@ int netvsc_device_add(struct hv_device *device,

	net_device->ring_size = ring_size;

	/* Because the device uses NAPI, all the interrupt batching and
	 * control is done via Net softirq, not the channel handling
	 */
	set_channel_read_mode(device->channel, HV_CALL_ISR);

	/* Open the channel */
	ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
			 ring_size * PAGE_SIZE, NULL, 0,
@@ -1278,8 +1324,16 @@ int netvsc_device_add(struct hv_device *device,
	 * chn_table with the default channel to use it before subchannels are
	 * opened.
	 */
	for (i = 0; i < VRSS_CHANNEL_MAX; i++)
		net_device->chan_table[i].channel = device->channel;
	for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
		struct netvsc_channel *nvchan = &net_device->chan_table[i];

		nvchan->channel = device->channel;
		netif_napi_add(ndev, &nvchan->napi,
			       netvsc_poll, NAPI_POLL_WEIGHT);
	}

	/* Enable NAPI handler for init callbacks */
	napi_enable(&net_device->chan_table[0].napi);

	/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
	 * populated.
@@ -1299,6 +1353,8 @@ int netvsc_device_add(struct hv_device *device,
	return ret;

close:
	napi_disable(&net_device->chan_table[0].napi);

	/* Now, we can close the channel safely */
	vmbus_close(device->channel);

+0 −5
Original line number Diff line number Diff line
@@ -690,11 +690,6 @@ int netvsc_recv_callback(struct net_device *net,
		++rx_stats->multicast;
	u64_stats_update_end(&rx_stats->syncp);

	/*
	 * Pass the skb back up. Network stack will deallocate the skb when it
	 * is done.
	 * TODO - use NAPI?
	 */
	netif_receive_skb(skb);
	rcu_read_unlock();

+2 −0
Original line number Diff line number Diff line
@@ -1012,6 +1012,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
	if (ret == 0)
		nvscdev->chan_table[chn_index].channel = new_sc;

	napi_enable(&nvscdev->chan_table[chn_index].napi);

	spin_lock_irqsave(&nvscdev->sc_lock, flags);
	nvscdev->num_sc_offered--;
	spin_unlock_irqrestore(&nvscdev->sc_lock, flags);