Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2b3a9afc authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'netvsc-small-cleanups'



Stephen Hemminger says:

====================
netvsc: small cleanups

These are all small optimizations found during development of later features.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e073782a 592b4fe8
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -763,8 +763,7 @@ struct netvsc_device {

	refcount_t sc_offered;

	/* Holds rndis device info */
	void *extension;
	struct rndis_device *extension;

	int ring_size;

+15 −34
Original line number Diff line number Diff line
@@ -97,16 +97,6 @@ static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
	call_rcu(&nvdev->rcu, free_netvsc_device);
}

static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
{
	struct netvsc_device *net_device = hv_device_to_netvsc_device(device);

	if (net_device && net_device->destroy)
		net_device = NULL;

	return net_device;
}

static void netvsc_destroy_buf(struct hv_device *device)
{
	struct nvsp_message *revoke_packet;
@@ -243,18 +233,15 @@ static void netvsc_destroy_buf(struct hv_device *device)
	kfree(net_device->send_section_map);
}

static int netvsc_init_buf(struct hv_device *device)
static int netvsc_init_buf(struct hv_device *device,
			   struct netvsc_device *net_device)
{
	int ret = 0;
	struct netvsc_device *net_device;
	struct nvsp_message *init_packet;
	struct net_device *ndev;
	size_t map_words;
	int node;

	net_device = get_outbound_net_device(device);
	if (!net_device)
		return -ENODEV;
	ndev = hv_get_drvdata(device);

	node = cpu_to_node(device->channel->target_cpu);
@@ -285,9 +272,7 @@ static int netvsc_init_buf(struct hv_device *device)

	/* Notify the NetVsp of the gpadl handle */
	init_packet = &net_device->channel_init_pkt;

	memset(init_packet, 0, sizeof(struct nvsp_message));

	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
	init_packet->msg.v1_msg.send_recv_buf.
		gpadl_handle = net_device->recv_buf_gpadl_handle;
@@ -486,20 +471,15 @@ static int negotiate_nvsp_ver(struct hv_device *device,
	return ret;
}

static int netvsc_connect_vsp(struct hv_device *device)
static int netvsc_connect_vsp(struct hv_device *device,
			      struct netvsc_device *net_device)
{
	int ret;
	struct netvsc_device *net_device;
	struct nvsp_message *init_packet;
	int ndis_version;
	const u32 ver_list[] = {
		NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
		NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
	int i;

	net_device = get_outbound_net_device(device);
	if (!net_device)
		return -ENODEV;
		NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5
	};
	struct nvsp_message *init_packet;
	int ndis_version, i, ret;

	init_packet = &net_device->channel_init_pkt;

@@ -549,7 +529,7 @@ static int netvsc_connect_vsp(struct hv_device *device)
		net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
	net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;

	ret = netvsc_init_buf(device);
	ret = netvsc_init_buf(device, net_device);

cleanup:
	return ret;
@@ -843,7 +823,7 @@ int netvsc_send(struct hv_device *device,
		struct hv_page_buffer **pb,
		struct sk_buff *skb)
{
	struct netvsc_device *net_device;
	struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
	int ret = 0;
	struct netvsc_channel *nvchan;
	u32 pktlen = packet->total_data_buflen, msd_len = 0;
@@ -854,15 +834,15 @@ int netvsc_send(struct hv_device *device,
	bool try_batch;
	bool xmit_more = (skb != NULL) ? skb->xmit_more : false;

	net_device = get_outbound_net_device(device);
	if (!net_device)
	/* If device is rescinded, return error and packet will get dropped. */
	if (unlikely(net_device->destroy))
		return -ENODEV;

	/* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get
	 * here before the negotiation with the host is finished and
	 * send_section_map may not be allocated yet.
	 */
	if (!net_device->send_section_map)
	if (unlikely(!net_device->send_section_map))
		return -EAGAIN;

	nvchan = &net_device->chan_table[packet->q_idx];
@@ -1349,7 +1329,7 @@ int netvsc_device_add(struct hv_device *device,
	rcu_assign_pointer(net_device_ctx->nvdev, net_device);

	/* Connect with the NetVsp */
	ret = netvsc_connect_vsp(device);
	ret = netvsc_connect_vsp(device, net_device);
	if (ret != 0) {
		netdev_err(ndev,
			"unable to connect to NetVSP - %d\n", ret);
@@ -1368,4 +1348,5 @@ int netvsc_device_add(struct hv_device *device,
	free_netvsc_device(&net_device->rcu);

	return ret;

}
+13 −40
Original line number Diff line number Diff line
@@ -120,7 +120,7 @@ static int netvsc_close(struct net_device *net)
	struct net_device_context *net_device_ctx = netdev_priv(net);
	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
	int ret;
	u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
	u32 aread, i, msec = 10, retry = 0, retry_max = 20;
	struct vmbus_channel *chn;

	netif_tx_disable(net);
@@ -141,15 +141,11 @@ static int netvsc_close(struct net_device *net)
			if (!chn)
				continue;

			hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
						     &awrite);

			aread = hv_get_bytes_to_read(&chn->inbound);
			if (aread)
				break;

			hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
						     &awrite);

			aread = hv_get_bytes_to_read(&chn->outbound);
			if (aread)
				break;
		}
@@ -345,34 +341,14 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
	return slots_used;
}

static int count_skb_frag_slots(struct sk_buff *skb)
{
	int i, frags = skb_shinfo(skb)->nr_frags;
	int pages = 0;

	for (i = 0; i < frags; i++) {
		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
		unsigned long size = skb_frag_size(frag);
		unsigned long offset = frag->page_offset;

		/* Skip unused frames from start of page */
		offset &= ~PAGE_MASK;
		pages += PFN_UP(offset + size);
	}
	return pages;
}

static int netvsc_get_slots(struct sk_buff *skb)
/* Estimate number of page buffers neede to transmit
 * Need at most 2 for RNDIS header plus skb body and fragments.
 */
static unsigned int netvsc_get_slots(const struct sk_buff *skb)
{
	char *data = skb->data;
	unsigned int offset = offset_in_page(data);
	unsigned int len = skb_headlen(skb);
	int slots;
	int frag_slots;

	slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
	frag_slots = count_skb_frag_slots(skb);
	return slots + frag_slots;
	return PFN_UP(offset_in_page(skb->data) + skb_headlen(skb))
		+ skb_shinfo(skb)->nr_frags
		+ 2;
}

static u32 net_checksum_info(struct sk_buff *skb)
@@ -410,21 +386,18 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
	struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
	struct hv_page_buffer *pb = page_buf;

	/* We will atmost need two pages to describe the rndis
	 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
	/* We can only transmit MAX_PAGE_BUFFER_COUNT number
	 * of pages in a single packet. If skb is scattered around
	 * more pages we try linearizing it.
	 */

	num_data_pgs = netvsc_get_slots(skb) + 2;

	num_data_pgs = netvsc_get_slots(skb);
	if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
		++net_device_ctx->eth_stats.tx_scattered;

		if (skb_linearize(skb))
			goto no_memory;

		num_data_pgs = netvsc_get_slots(skb) + 2;
		num_data_pgs = netvsc_get_slots(skb);
		if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
			++net_device_ctx->eth_stats.tx_too_big;
			goto drop;