Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 264e6777 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'netvsc-enhancements'



Stephen Hemminger says:

====================
netvsc driver enhancements for net-next

Lots of little things in here. Support for minor more ethtool control,
negotiation of offload parameters with host (based on FreeBSD) and
several cleanups.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f2ceab0b 1130383c
Loading
Loading
Loading
Loading
+178 −38
Original line number Diff line number Diff line
@@ -34,6 +34,7 @@

#define NDIS_OBJECT_TYPE_RSS_CAPABILITIES 0x88
#define NDIS_OBJECT_TYPE_RSS_PARAMETERS 0x89
#define NDIS_OBJECT_TYPE_OFFLOAD	0xa7

#define NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2 2
#define NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2 2
@@ -118,6 +119,7 @@ struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */

/* Fwd declaration */
struct ndis_tcp_ip_checksum_info;
struct ndis_pkt_8021q_info;

/*
 * Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
@@ -135,8 +137,10 @@ struct hv_netvsc_packet {
	u8 page_buf_cnt;

	u16 q_idx;
	u32 send_buf_index;
	u16 total_packets;

	u32 total_bytes;
	u32 send_buf_index;
	u32 total_data_buflen;
};

@@ -155,6 +159,8 @@ enum rndis_device_state {
	RNDIS_DEV_DATAINITIALIZED,
};

#define NETVSC_HASH_KEYLEN 40

struct rndis_device {
	struct net_device *ndev;

@@ -165,14 +171,17 @@ struct rndis_device {
	spinlock_t request_lock;
	struct list_head req_list;

	unsigned char hw_mac_adr[ETH_ALEN];
	u8 hw_mac_adr[ETH_ALEN];
	u8 rss_key[NETVSC_HASH_KEYLEN];
	u16 ind_table[ITAB_NUM];
};


/* Interface */
struct rndis_message;
struct netvsc_device;
int netvsc_device_add(struct hv_device *device, void *additional_info);
int netvsc_device_add(struct hv_device *device,
		      const struct netvsc_device_info *info);
void netvsc_device_remove(struct hv_device *device);
int netvsc_send(struct hv_device *device,
		struct hv_netvsc_packet *packet,
@@ -181,22 +190,25 @@ int netvsc_send(struct hv_device *device,
		struct sk_buff *skb);
void netvsc_linkstatus_callback(struct hv_device *device_obj,
				struct rndis_message *resp);
int netvsc_recv_callback(struct hv_device *device_obj,
			struct hv_netvsc_packet *packet,
			void **data,
			struct ndis_tcp_ip_checksum_info *csum_info,
int netvsc_recv_callback(struct net_device *net,
			 struct vmbus_channel *channel,
			u16 vlan_tci);
			 void  *data, u32 len,
			 const struct ndis_tcp_ip_checksum_info *csum_info,
			 const struct ndis_pkt_8021q_info *vlan);
void netvsc_channel_cb(void *context);
int rndis_filter_open(struct netvsc_device *nvdev);
int rndis_filter_close(struct netvsc_device *nvdev);
int rndis_filter_device_add(struct hv_device *dev,
			void *additional_info);
void rndis_filter_device_remove(struct hv_device *dev);
int rndis_filter_receive(struct hv_device *dev,
			struct hv_netvsc_packet *pkt,
			void **data,
			struct vmbus_channel *channel);
			    struct netvsc_device_info *info);
void rndis_filter_device_remove(struct hv_device *dev,
				struct netvsc_device *nvdev);
int rndis_filter_set_rss_param(struct rndis_device *rdev,
			       const u8 *key, int num_queue);
int rndis_filter_receive(struct net_device *ndev,
			 struct netvsc_device *net_dev,
			 struct hv_device *dev,
			 struct vmbus_channel *channel,
			 void *data, u32 buflen);

int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
int rndis_filter_set_device_mac(struct net_device *ndev, char *mac);
@@ -622,6 +634,7 @@ struct nvsp_message {

#define VRSS_SEND_TAB_SIZE 16
#define VRSS_CHANNEL_MAX 64
#define VRSS_CHANNEL_DEFAULT 8

#define RNDIS_MAX_PKT_DEFAULT 8
#define RNDIS_PKT_ALIGN_DEFAULT 8
@@ -685,8 +698,7 @@ struct net_device_context {
	struct work_struct work;
	u32 msg_enable; /* debug level */

	struct netvsc_stats __percpu *tx_stats;
	struct netvsc_stats __percpu *rx_stats;
	u32 tx_checksum_mask;

	/* Ethtool settings */
	u8 duplex;
@@ -705,11 +717,21 @@ struct net_device_context {
	u32 vf_serial;
};

/* Per channel data */
struct netvsc_channel {
	struct vmbus_channel *channel;
	struct multi_send_data msd;
	struct multi_recv_comp mrc;
	atomic_t queue_sends;

	struct netvsc_stats tx_stats;
	struct netvsc_stats rx_stats;
};

/* Per netvsc device */
struct netvsc_device {
	u32 nvsp_version;

	atomic_t num_outstanding_sends;
	wait_queue_head_t wait_drain;
	bool destroy;

@@ -735,32 +757,25 @@ struct netvsc_device {

	struct nvsp_message revoke_packet;

	struct vmbus_channel *chn_table[VRSS_CHANNEL_MAX];
	u32 send_table[VRSS_SEND_TAB_SIZE];
	u32 max_chn;
	u32 num_chn;
	spinlock_t sc_lock; /* Protects num_sc_offered variable */
	u32 num_sc_offered;
	atomic_t queue_sends[VRSS_CHANNEL_MAX];

	/* Holds rndis device info */
	void *extension;

	int ring_size;

	/* The primary channel callback buffer */
	unsigned char *cb_buffer;
	/* The sub channel callback buffer */
	unsigned char *sub_cb_buf;

	struct multi_send_data msd[VRSS_CHANNEL_MAX];
	u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
	u32 pkt_align; /* alignment bytes, e.g. 8 */

	struct multi_recv_comp mrc[VRSS_CHANNEL_MAX];
	atomic_t num_outstanding_recvs;

	atomic_t open_cnt;

	struct netvsc_channel chan_table[VRSS_CHANNEL_MAX];
};

static inline struct netvsc_device *
@@ -939,7 +954,7 @@ struct ndis_pkt_8021q_info {
	};
};

struct ndis_oject_header {
struct ndis_object_header {
	u8 type;
	u8 revision;
	u16 size;
@@ -947,6 +962,9 @@ struct ndis_oject_header {

#define NDIS_OBJECT_TYPE_DEFAULT	0x80
#define NDIS_OFFLOAD_PARAMETERS_REVISION_3 3
#define NDIS_OFFLOAD_PARAMETERS_REVISION_2 2
#define NDIS_OFFLOAD_PARAMETERS_REVISION_1 1

#define NDIS_OFFLOAD_PARAMETERS_NO_CHANGE 0
#define NDIS_OFFLOAD_PARAMETERS_LSOV2_DISABLED 1
#define NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED  2
@@ -973,8 +991,135 @@ struct ndis_oject_header {
#define OID_TCP_CONNECTION_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020F /* query */
#define OID_OFFLOAD_ENCAPSULATION 0x0101010A /* set/query */

/*
 * OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES
 * ndis_type: NDIS_OBJTYPE_OFFLOAD
 */

#define	NDIS_OFFLOAD_ENCAP_NONE		0x0000
#define	NDIS_OFFLOAD_ENCAP_NULL		0x0001
#define	NDIS_OFFLOAD_ENCAP_8023		0x0002
#define	NDIS_OFFLOAD_ENCAP_8023PQ	0x0004
#define	NDIS_OFFLOAD_ENCAP_8023PQ_OOB	0x0008
#define	NDIS_OFFLOAD_ENCAP_RFC1483	0x0010

struct ndis_csum_offload {
	u32	ip4_txenc;
	u32	ip4_txcsum;
#define	NDIS_TXCSUM_CAP_IP4OPT		0x001
#define	NDIS_TXCSUM_CAP_TCP4OPT		0x004
#define	NDIS_TXCSUM_CAP_TCP4		0x010
#define	NDIS_TXCSUM_CAP_UDP4		0x040
#define	NDIS_TXCSUM_CAP_IP4		0x100

#define NDIS_TXCSUM_ALL_TCP4	(NDIS_TXCSUM_CAP_TCP4 | NDIS_TXCSUM_CAP_TCP4OPT)

	u32	ip4_rxenc;
	u32	ip4_rxcsum;
#define	NDIS_RXCSUM_CAP_IP4OPT		0x001
#define	NDIS_RXCSUM_CAP_TCP4OPT		0x004
#define	NDIS_RXCSUM_CAP_TCP4		0x010
#define	NDIS_RXCSUM_CAP_UDP4		0x040
#define	NDIS_RXCSUM_CAP_IP4		0x100
	u32	ip6_txenc;
	u32	ip6_txcsum;
#define	NDIS_TXCSUM_CAP_IP6EXT		0x001
#define	NDIS_TXCSUM_CAP_TCP6OPT		0x004
#define	NDIS_TXCSUM_CAP_TCP6		0x010
#define	NDIS_TXCSUM_CAP_UDP6		0x040
	u32	ip6_rxenc;
	u32	ip6_rxcsum;
#define	NDIS_RXCSUM_CAP_IP6EXT		0x001
#define	NDIS_RXCSUM_CAP_TCP6OPT		0x004
#define	NDIS_RXCSUM_CAP_TCP6		0x010
#define	NDIS_RXCSUM_CAP_UDP6		0x040

#define NDIS_TXCSUM_ALL_TCP6	(NDIS_TXCSUM_CAP_TCP6 |		\
				 NDIS_TXCSUM_CAP_TCP6OPT |	\
				 NDIS_TXCSUM_CAP_IP6EXT)
};

struct ndis_lsov1_offload {
	u32	encap;
	u32	maxsize;
	u32	minsegs;
	u32	opts;
};

struct ndis_ipsecv1_offload {
	u32	encap;
	u32	ah_esp;
	u32	xport_tun;
	u32	ip4_opts;
	u32	flags;
	u32	ip4_ah;
	u32	ip4_esp;
};

struct ndis_lsov2_offload {
	u32	ip4_encap;
	u32	ip4_maxsz;
	u32	ip4_minsg;
	u32	ip6_encap;
	u32	ip6_maxsz;
	u32	ip6_minsg;
	u32	ip6_opts;
#define	NDIS_LSOV2_CAP_IP6EXT		0x001
#define	NDIS_LSOV2_CAP_TCP6OPT		0x004

#define NDIS_LSOV2_CAP_IP6		(NDIS_LSOV2_CAP_IP6EXT | \
					 NDIS_LSOV2_CAP_TCP6OPT)
};

struct ndis_ipsecv2_offload {
	u32	encap;
	u16	ip6;
	u16	ip4opt;
	u16	ip6ext;
	u16	ah;
	u16	esp;
	u16	ah_esp;
	u16	xport;
	u16	tun;
	u16	xport_tun;
	u16	lso;
	u16	extseq;
	u32	udp_esp;
	u32	auth;
	u32	crypto;
	u32	sa_caps;
};

struct ndis_rsc_offload {
	u16	ip4;
	u16	ip6;
};

struct ndis_encap_offload {
	u32	flags;
	u32	maxhdr;
};

struct ndis_offload {
	struct ndis_object_header	header;
	struct ndis_csum_offload	csum;
	struct ndis_lsov1_offload	lsov1;
	struct ndis_ipsecv1_offload	ipsecv1;
	struct ndis_lsov2_offload	lsov2;
	u32				flags;
	/* NDIS >= 6.1 */
	struct ndis_ipsecv2_offload	ipsecv2;
	/* NDIS >= 6.30 */
	struct ndis_rsc_offload		rsc;
	struct ndis_encap_offload	encap_gre;
};

#define	NDIS_OFFLOAD_SIZE		sizeof(struct ndis_offload)
#define	NDIS_OFFLOAD_SIZE_6_0		offsetof(struct ndis_offload, ipsecv2)
#define	NDIS_OFFLOAD_SIZE_6_1		offsetof(struct ndis_offload, rsc)

struct ndis_offload_params {
	struct ndis_oject_header header;
	struct ndis_object_header header;
	u8 ip_v4_csum;
	u8 tcp_ip_v4_csum;
	u8 udp_ip_v4_csum;
@@ -1301,15 +1446,10 @@ struct rndis_message {
#define NDIS_PACKET_TYPE_FUNCTIONAL	0x00000400
#define NDIS_PACKET_TYPE_MAC_FRAME	0x00000800

#define INFO_IPV4       2
#define INFO_IPV6       4
#define INFO_TCP        2
#define INFO_UDP        4

#define TRANSPORT_INFO_NOT_IP   0
#define TRANSPORT_INFO_IPV4_TCP ((INFO_IPV4 << 16) | INFO_TCP)
#define TRANSPORT_INFO_IPV4_UDP ((INFO_IPV4 << 16) | INFO_UDP)
#define TRANSPORT_INFO_IPV6_TCP ((INFO_IPV6 << 16) | INFO_TCP)
#define TRANSPORT_INFO_IPV6_UDP ((INFO_IPV6 << 16) | INFO_UDP)
#define TRANSPORT_INFO_IPV4_TCP 0x01
#define TRANSPORT_INFO_IPV4_UDP 0x02
#define TRANSPORT_INFO_IPV6_TCP 0x10
#define TRANSPORT_INFO_IPV6_UDP 0x20

#endif /* _HYPERV_NET_H */
+116 −205
Original line number Diff line number Diff line
@@ -67,14 +67,8 @@ static struct netvsc_device *alloc_net_device(void)
	if (!net_device)
		return NULL;

	net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL);
	if (!net_device->cb_buffer) {
		kfree(net_device);
		return NULL;
	}

	net_device->mrc[0].buf = vzalloc(NETVSC_RECVSLOT_MAX *
					 sizeof(struct recv_comp_data));
	net_device->chan_table[0].mrc.buf
		= vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));

	init_waitqueue_head(&net_device->wait_drain);
	net_device->destroy = false;
@@ -91,35 +85,28 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
	int i;

	for (i = 0; i < VRSS_CHANNEL_MAX; i++)
		vfree(nvdev->mrc[i].buf);
		vfree(nvdev->chan_table[i].mrc.buf);

	kfree(nvdev->cb_buffer);
	kfree(nvdev);
}

static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
{
	struct netvsc_device *net_device = hv_device_to_netvsc_device(device);

	if (net_device && net_device->destroy)
		net_device = NULL;
static inline bool netvsc_channel_idle(const struct netvsc_device *net_device,
				       u16 q_idx)
{
	const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];

	return net_device;
	return atomic_read(&net_device->num_outstanding_recvs) == 0 &&
		atomic_read(&nvchan->queue_sends) == 0;
}

static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
{
	struct netvsc_device *net_device = hv_device_to_netvsc_device(device);

	if (!net_device)
		goto get_in_err;

	if (net_device->destroy &&
	    atomic_read(&net_device->num_outstanding_sends) == 0 &&
	    atomic_read(&net_device->num_outstanding_recvs) == 0)
	if (net_device && net_device->destroy)
		net_device = NULL;

get_in_err:
	return net_device;
}

@@ -584,7 +571,6 @@ void netvsc_device_remove(struct hv_device *device)
	vmbus_close(device->channel);

	/* Release all resources */
	vfree(net_device->sub_cb_buf);
	free_netvsc_device(net_device);
}

@@ -620,29 +606,35 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
	struct net_device *ndev = hv_get_drvdata(device);
	struct net_device_context *net_device_ctx = netdev_priv(ndev);
	struct vmbus_channel *channel = device->channel;
	int num_outstanding_sends;
	u16 q_idx = 0;
	int queue_sends;

	/* Notify the layer above us */
	if (likely(skb)) {
		struct hv_netvsc_packet *nvsc_packet
		const struct hv_netvsc_packet *packet
			= (struct hv_netvsc_packet *)skb->cb;
		u32 send_index = nvsc_packet->send_buf_index;
		u32 send_index = packet->send_buf_index;
		struct netvsc_stats *tx_stats;

		if (send_index != NETVSC_INVALID_INDEX)
			netvsc_free_send_slot(net_device, send_index);
		q_idx = nvsc_packet->q_idx;
		q_idx = packet->q_idx;
		channel = incoming_channel;

		tx_stats = &net_device->chan_table[q_idx].tx_stats;

		u64_stats_update_begin(&tx_stats->syncp);
		tx_stats->packets += packet->total_packets;
		tx_stats->bytes += packet->total_bytes;
		u64_stats_update_end(&tx_stats->syncp);

		dev_consume_skb_any(skb);
	}

	num_outstanding_sends =
		atomic_dec_return(&net_device->num_outstanding_sends);
	queue_sends = atomic_dec_return(&net_device->queue_sends[q_idx]);
	queue_sends =
		atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);

	if (net_device->destroy && num_outstanding_sends == 0)
	if (net_device->destroy && queue_sends == 0)
		wake_up(&net_device->wait_drain);

	if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
@@ -688,27 +680,15 @@ static void netvsc_send_completion(struct netvsc_device *net_device,

static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
{
	unsigned long index;
	u32 max_words = net_device->map_words;
	unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
	u32 section_cnt = net_device->send_section_cnt;
	int ret_val = NETVSC_INVALID_INDEX;
	int i;
	int prev_val;

	for (i = 0; i < max_words; i++) {
		if (!~(map_addr[i]))
			continue;
		index = ffz(map_addr[i]);
		prev_val = sync_test_and_set_bit(index, &map_addr[i]);
		if (prev_val)
			continue;
		if ((index + (i * BITS_PER_LONG)) >= section_cnt)
			break;
		ret_val = (index + (i * BITS_PER_LONG));
		break;
	unsigned long *map_addr = net_device->send_section_map;
	unsigned int i;

	for_each_clear_bit(i, map_addr, net_device->map_words) {
		if (sync_test_and_set_bit(i, map_addr) == 0)
			return i;
	}
	return ret_val;

	return NETVSC_INVALID_INDEX;
}

static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
@@ -765,9 +745,11 @@ static inline int netvsc_send_pkt(
	struct sk_buff *skb)
{
	struct nvsp_message nvmsg;
	u16 q_idx = packet->q_idx;
	struct vmbus_channel *out_channel = net_device->chn_table[q_idx];
	struct netvsc_channel *nvchan
		= &net_device->chan_table[packet->q_idx];
	struct vmbus_channel *out_channel = nvchan->channel;
	struct net_device *ndev = hv_get_drvdata(device);
	struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
	u64 req_id;
	int ret;
	struct hv_page_buffer *pgbuf;
@@ -827,23 +809,14 @@ static inline int netvsc_send_pkt(
	}

	if (ret == 0) {
		atomic_inc(&net_device->num_outstanding_sends);
		atomic_inc(&net_device->queue_sends[q_idx]);

		if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
			netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx));
		atomic_inc_return(&nvchan->queue_sends);

			if (atomic_read(&net_device->
				queue_sends[q_idx]) < 1)
				netif_tx_wake_queue(netdev_get_tx_queue(
						    ndev, q_idx));
		}
		if (ring_avail < RING_AVAIL_PERCENT_LOWATER)
			netif_tx_stop_queue(txq);
	} else if (ret == -EAGAIN) {
		netif_tx_stop_queue(netdev_get_tx_queue(
				    ndev, q_idx));
		if (atomic_read(&net_device->queue_sends[q_idx]) < 1) {
			netif_tx_wake_queue(netdev_get_tx_queue(
					    ndev, q_idx));
		netif_tx_stop_queue(txq);
		if (atomic_read(&nvchan->queue_sends) < 1) {
			netif_tx_wake_queue(txq);
			ret = -ENOSPC;
		}
	} else {
@@ -874,8 +847,7 @@ int netvsc_send(struct hv_device *device,
{
	struct netvsc_device *net_device;
	int ret = 0;
	struct vmbus_channel *out_channel;
	u16 q_idx = packet->q_idx;
	struct netvsc_channel *nvchan;
	u32 pktlen = packet->total_data_buflen, msd_len = 0;
	unsigned int section_index = NETVSC_INVALID_INDEX;
	struct multi_send_data *msdp;
@@ -895,8 +867,7 @@ int netvsc_send(struct hv_device *device,
	if (!net_device->send_section_map)
		return -EAGAIN;

	out_channel = net_device->chn_table[q_idx];

	nvchan = &net_device->chan_table[packet->q_idx];
	packet->send_buf_index = NETVSC_INVALID_INDEX;
	packet->cp_partial = false;

@@ -908,9 +879,8 @@ int netvsc_send(struct hv_device *device,
		goto send_now;
	}

	msdp = &net_device->msd[q_idx];

	/* batch packets in send buffer if possible */
	msdp = &nvchan->msd;
	if (msdp->pkt)
		msd_len = msdp->pkt->total_data_buflen;

@@ -950,6 +920,11 @@ int netvsc_send(struct hv_device *device,
			packet->total_data_buflen += msd_len;
		}

		if (msdp->pkt) {
			packet->total_packets += msdp->pkt->total_packets;
			packet->total_bytes += msdp->pkt->total_bytes;
		}

		if (msdp->skb)
			dev_consume_skb_any(msdp->skb);

@@ -1011,8 +986,9 @@ static int netvsc_send_recv_completion(struct vmbus_channel *channel,
static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
					u32 *filled, u32 *avail)
{
	u32 first = nvdev->mrc[q_idx].first;
	u32 next = nvdev->mrc[q_idx].next;
	struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
	u32 first = mrc->first;
	u32 next = mrc->next;

	*filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next :
		  next - first;
@@ -1024,26 +1000,26 @@ static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device
							 *nvdev, u16 q_idx)
{
	struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
	u32 filled, avail;

	if (!nvdev->mrc[q_idx].buf)
	if (unlikely(!mrc->buf))
		return NULL;

	count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
	if (!filled)
		return NULL;

	return nvdev->mrc[q_idx].buf + nvdev->mrc[q_idx].first *
	       sizeof(struct recv_comp_data);
	return mrc->buf + mrc->first * sizeof(struct recv_comp_data);
}

/* Put the first filled slot back to available pool */
static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx)
{
	struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
	int num_recv;

	nvdev->mrc[q_idx].first = (nvdev->mrc[q_idx].first + 1) %
				  NETVSC_RECVSLOT_MAX;
	mrc->first = (mrc->first + 1) % NETVSC_RECVSLOT_MAX;

	num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs);

@@ -1078,13 +1054,14 @@ static void netvsc_chk_recv_comp(struct netvsc_device *nvdev,
static inline struct recv_comp_data *get_recv_comp_slot(
	struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx)
{
	struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
	u32 filled, avail, next;
	struct recv_comp_data *rcd;

	if (!nvdev->recv_section)
	if (unlikely(!nvdev->recv_section))
		return NULL;

	if (!nvdev->mrc[q_idx].buf)
	if (unlikely(!mrc->buf))
		return NULL;

	if (atomic_read(&nvdev->num_outstanding_recvs) >
@@ -1095,59 +1072,43 @@ static inline struct recv_comp_data *get_recv_comp_slot(
	if (!avail)
		return NULL;

	next = nvdev->mrc[q_idx].next;
	rcd = nvdev->mrc[q_idx].buf + next * sizeof(struct recv_comp_data);
	nvdev->mrc[q_idx].next = (next + 1) % NETVSC_RECVSLOT_MAX;
	next = mrc->next;
	rcd = mrc->buf + next * sizeof(struct recv_comp_data);
	mrc->next = (next + 1) % NETVSC_RECVSLOT_MAX;

	atomic_inc(&nvdev->num_outstanding_recvs);

	return rcd;
}

static void netvsc_receive(struct netvsc_device *net_device,
			struct vmbus_channel *channel,
static void netvsc_receive(struct net_device *ndev,
		   struct netvsc_device *net_device,
		   struct net_device_context *net_device_ctx,
		   struct hv_device *device,
			struct vmpacket_descriptor *packet)
		   struct vmbus_channel *channel,
		   struct vmtransfer_page_packet_header *vmxferpage_packet,
		   struct nvsp_message *nvsp)
{
	struct vmtransfer_page_packet_header *vmxferpage_packet;
	struct nvsp_message *nvsp_packet;
	struct hv_netvsc_packet nv_pkt;
	struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
	char *recv_buf = net_device->recv_buf;
	u32 status = NVSP_STAT_SUCCESS;
	int i;
	int count = 0;
	struct net_device *ndev = hv_get_drvdata(device);
	void *data;
	int ret;
	struct recv_comp_data *rcd;
	u16 q_idx = channel->offermsg.offer.sub_channel_index;

	/*
	 * All inbound packets other than send completion should be xfer page
	 * packet
	 */
	if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
		netdev_err(ndev, "Unknown packet type received - %d\n",
			   packet->type);
		return;
	}

	nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
			(packet->offset8 << 3));

	/* Make sure this is a valid nvsp packet */
	if (nvsp_packet->hdr.msg_type !=
	    NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
		netdev_err(ndev, "Unknown nvsp packet type received-"
			" %d\n", nvsp_packet->hdr.msg_type);
	if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
		netif_err(net_device_ctx, rx_err, ndev,
			  "Unknown nvsp packet type received %u\n",
			  nvsp->hdr.msg_type);
		return;
	}

	vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;

	if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
		netdev_err(ndev, "Invalid xfer page set id - "
			   "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID,
	if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
		netif_err(net_device_ctx, rx_err, ndev,
			  "Invalid xfer page set id - expecting %x got %x\n",
			  NETVSC_RECEIVE_BUFFER_ID,
			  vmxferpage_packet->xfer_pageset_id);
		return;
	}
@@ -1156,18 +1117,16 @@ static void netvsc_receive(struct netvsc_device *net_device,

	/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
	for (i = 0; i < count; i++) {
		/* Initialize the netvsc packet */
		data = (void *)((unsigned long)net_device->
			recv_buf + vmxferpage_packet->ranges[i].byte_offset);
		netvsc_packet->total_data_buflen =
					vmxferpage_packet->ranges[i].byte_count;
		void *data = recv_buf
			+ vmxferpage_packet->ranges[i].byte_offset;
		u32 buflen = vmxferpage_packet->ranges[i].byte_count;

		/* Pass it to the upper layer */
		status = rndis_filter_receive(device, netvsc_packet, &data,
					      channel);
		status = rndis_filter_receive(ndev, net_device, device,
					      channel, data, buflen);
	}

	if (!net_device->mrc[q_idx].buf) {
	if (!net_device->chan_table[q_idx].mrc.buf) {
		ret = netvsc_send_recv_completion(channel,
						  vmxferpage_packet->d.trans_id,
						  status);
@@ -1243,11 +1202,10 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
				   u64 request_id,
				   struct vmpacket_descriptor *desc)
{
	struct nvsp_message *nvmsg;
	struct net_device_context *net_device_ctx = netdev_priv(ndev);

	nvmsg = (struct nvsp_message *)((unsigned long)
		desc + (desc->offset8 << 3));
	struct nvsp_message *nvmsg
		= (struct nvsp_message *)((unsigned long)desc
					  + (desc->offset8 << 3));

	switch (desc->type) {
	case VM_PKT_COMP:
@@ -1255,7 +1213,10 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
		break;

	case VM_PKT_DATA_USING_XFER_PAGES:
		netvsc_receive(net_device, channel, device, desc);
		netvsc_receive(ndev, net_device, net_device_ctx,
			       device, channel,
			       (struct vmtransfer_page_packet_header *)desc,
			       nvmsg);
		break;

	case VM_PKT_DATA_INBAND:
@@ -1271,16 +1232,11 @@ static void netvsc_process_raw_pkt(struct hv_device *device,

void netvsc_channel_cb(void *context)
{
	int ret;
	struct vmbus_channel *channel = (struct vmbus_channel *)context;
	struct vmbus_channel *channel = context;
	u16 q_idx = channel->offermsg.offer.sub_channel_index;
	struct hv_device *device;
	struct netvsc_device *net_device;
	u32 bytes_recvd;
	u64 request_id;
	struct vmpacket_descriptor *desc;
	unsigned char *buffer;
	int bufferlen = NETVSC_PACKET_SIZE;
	struct net_device *ndev;
	bool need_to_commit = false;

@@ -1289,68 +1245,25 @@ void netvsc_channel_cb(void *context)
	else
		device = channel->device_obj;

	net_device = get_inbound_net_device(device);
	if (!net_device)
		return;
	ndev = hv_get_drvdata(device);
	buffer = get_per_channel_state(channel);

	do {
		desc = get_next_pkt_raw(channel);
		if (desc != NULL) {
			netvsc_process_raw_pkt(device,
					       channel,
					       net_device,
					       ndev,
					       desc->trans_id,
					       desc);

			put_pkt_raw(channel, desc);
			need_to_commit = true;
			continue;
		}
		if (need_to_commit) {
			need_to_commit = false;
			commit_rd_index(channel);
		}
	if (unlikely(!ndev))
		return;

		ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
					   &bytes_recvd, &request_id);
		if (ret == 0) {
			if (bytes_recvd > 0) {
				desc = (struct vmpacket_descriptor *)buffer;
				netvsc_process_raw_pkt(device,
						       channel,
						       net_device,
						       ndev,
						       request_id,
						       desc);
			} else {
				/*
				 * We are done for this pass.
				 */
				break;
			}
	net_device = net_device_to_netvsc_device(ndev);
	if (unlikely(net_device->destroy) &&
	    netvsc_channel_idle(net_device, q_idx))
		return;

		} else if (ret == -ENOBUFS) {
			if (bufferlen > NETVSC_PACKET_SIZE)
				kfree(buffer);
			/* Handle large packet */
			buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
			if (buffer == NULL) {
				/* Try again next time around */
				netdev_err(ndev,
					   "unable to allocate buffer of size "
					   "(%d)!!\n", bytes_recvd);
				break;
			}
	while ((desc = get_next_pkt_raw(channel)) != NULL) {
		netvsc_process_raw_pkt(device, channel, net_device,
				       ndev, desc->trans_id, desc);

			bufferlen = bytes_recvd;
		put_pkt_raw(channel, desc);
		need_to_commit = true;
	}
	} while (1);

	if (bufferlen > NETVSC_PACKET_SIZE)
		kfree(buffer);
	if (need_to_commit)
		commit_rd_index(channel);

	netvsc_chk_recv_comp(net_device, channel, q_idx);
}
@@ -1359,11 +1272,11 @@ void netvsc_channel_cb(void *context)
 * netvsc_device_add - Callback when the device belonging to this
 * driver is added
 */
int netvsc_device_add(struct hv_device *device, void *additional_info)
int netvsc_device_add(struct hv_device *device,
		      const struct netvsc_device_info *device_info)
{
	int i, ret = 0;
	int ring_size =
	((struct netvsc_device_info *)additional_info)->ring_size;
	int ring_size = device_info->ring_size;
	struct netvsc_device *net_device;
	struct net_device *ndev = hv_get_drvdata(device);
	struct net_device_context *net_device_ctx = netdev_priv(ndev);
@@ -1374,8 +1287,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)

	net_device->ring_size = ring_size;

	set_per_channel_state(device->channel, net_device->cb_buffer);

	/* Open the channel */
	ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
			 ring_size * PAGE_SIZE, NULL, 0,
@@ -1394,7 +1305,7 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
	 * opened.
	 */
	for (i = 0; i < VRSS_CHANNEL_MAX; i++)
		net_device->chn_table[i] = device->channel;
		net_device->chan_table[i].channel = device->channel;

	/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
	 * populated.
+339 −230

File changed.

Preview size limit exceeded, changes collapsed.

+217 −121

File changed.

Preview size limit exceeded, changes collapsed.