Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cad5c197 authored by stephen hemminger's avatar stephen hemminger Committed by David S. Miller
Browse files

netvsc: keep track of some non-fatal overload conditions



Add ethtool statistics for case where send chimmeny buffer is
exhausted and driver has to fall back to doing scatter/gather
send. Also, add statistic for case where ring buffer is full and
receive completions are delayed.

Signed-off-by: default avatarStephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8b532797
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -680,6 +680,8 @@ struct netvsc_ethtool_stats {
	unsigned long tx_no_space;
	unsigned long tx_too_big;
	unsigned long tx_busy;
	unsigned long tx_send_full;
	unsigned long rx_comp_busy;
};

struct netvsc_vf_pcpu_stats {
+13 −6
Original line number Diff line number Diff line
@@ -883,7 +883,9 @@ int netvsc_send(struct net_device_context *ndev_ctx,
	} else if (pktlen + net_device->pkt_align <
		   net_device->send_section_size) {
		section_index = netvsc_get_next_send_section(net_device);
		if (section_index != NETVSC_INVALID_INDEX) {
		if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
			++ndev_ctx->eth_stats.tx_send_full;
		} else {
			move_pkt_msd(&msd_send, &msd_skb, msdp);
			msd_len = 0;
		}
@@ -949,9 +951,10 @@ int netvsc_send(struct net_device_context *ndev_ctx,
}

/* Send pending recv completions */
static int send_recv_completions(struct netvsc_channel *nvchan)
static int send_recv_completions(struct net_device *ndev,
				 struct netvsc_device *nvdev,
				 struct netvsc_channel *nvchan)
{
	struct netvsc_device *nvdev = nvchan->net_device;
	struct multi_recv_comp *mrc = &nvchan->mrc;
	struct recv_comp_msg {
		struct nvsp_message_header hdr;
@@ -969,8 +972,12 @@ static int send_recv_completions(struct netvsc_channel *nvchan)
		msg.status = rcd->status;
		ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
				       rcd->tid, VM_PKT_COMP, 0);
		if (unlikely(ret))
		if (unlikely(ret)) {
			struct net_device_context *ndev_ctx = netdev_priv(ndev);

			++ndev_ctx->eth_stats.rx_comp_busy;
			return ret;
		}

		if (++mrc->first == nvdev->recv_completion_cnt)
			mrc->first = 0;
@@ -1011,7 +1018,7 @@ static void enq_receive_complete(struct net_device *ndev,
	recv_comp_slot_avail(nvdev, mrc, &filled, &avail);

	if (unlikely(filled > NAPI_POLL_WEIGHT)) {
		send_recv_completions(nvchan);
		send_recv_completions(ndev, nvdev, nvchan);
		recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
	}

@@ -1194,7 +1201,7 @@ int netvsc_poll(struct napi_struct *napi, int budget)
	 * then re-enable host interrupts
	 *     and reschedule if ring is not empty.
	 */
	if (send_recv_completions(nvchan) == 0 &&
	if (send_recv_completions(ndev, net_device, nvchan) == 0 &&
	    work_done < budget &&
	    napi_complete_done(napi, work_done) &&
	    hv_end_read(&channel->inbound)) {
+2 −0
Original line number Diff line number Diff line
@@ -1112,6 +1112,8 @@ static const struct {
	{ "tx_no_space",  offsetof(struct netvsc_ethtool_stats, tx_no_space) },
	{ "tx_too_big",	  offsetof(struct netvsc_ethtool_stats, tx_too_big) },
	{ "tx_busy",	  offsetof(struct netvsc_ethtool_stats, tx_busy) },
	{ "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
	{ "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
}, vf_stats[] = {
	{ "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
	{ "vf_rx_bytes",   offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },