Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 34458f2b authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "usb: rndis: Add ul aggregation stats info to debug files"

parents 80d7a8cc 3e1e0d09
Loading
Loading
Loading
Loading
+23 −5
Original line number Diff line number Diff line
@@ -70,10 +70,10 @@
 *   - MS-Windows drivers sometimes emit undocumented requests.
 */

static bool rndis_multipacket_dl_disable;
module_param(rndis_multipacket_dl_disable, bool, 0644);
MODULE_PARM_DESC(rndis_multipacket_dl_disable,
	"Disable RNDIS Multi-packet support in DownLink");
static unsigned int rndis_dl_max_pkt_per_xfer = 3;
module_param(rndis_dl_max_pkt_per_xfer, uint, 0644);
MODULE_PARM_DESC(rndis_dl_max_pkt_per_xfer,
	"Maximum packets per transfer for DL aggregation");

static unsigned int rndis_ul_max_pkt_per_xfer = 3;
module_param(rndis_ul_max_pkt_per_xfer, uint, 0644);
@@ -116,7 +116,7 @@ static unsigned int bitrate(struct usb_gadget *g)

#define RNDIS_STATUS_INTERVAL_MS	32
#define STATUS_BYTECOUNT		8	/* 8 bytes data */

#define MAX_TRANSFER_SIZE		0x800

/* interface descriptor: */

@@ -462,7 +462,9 @@ static void rndis_response_complete(struct usb_ep *ep, struct usb_request *req)
static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct f_rndis			*rndis = req->context;
	struct usb_composite_dev	*cdev = rndis->port.func.config->cdev;
	int				status;
	rndis_init_msg_type		*buf;

	/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
//	spin_lock(&dev->lock);
@@ -470,6 +472,21 @@ static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
	if (status < 0)
		pr_err("RNDIS command error %d, %d/%d\n",
			status, req->actual, req->length);

	buf = (rndis_init_msg_type *)req->buf;

	if (buf->MessageType == cpu_to_le32(RNDIS_MSG_INIT)) {
		if (le32_to_cpu(buf->MaxTransferSize) > MAX_TRANSFER_SIZE)
			rndis->port.multi_pkt_xfer = 1;
		else
			rndis->port.multi_pkt_xfer = 0;
		DBG(cdev, "%s: MaxTransferSize: %d : Multi_pkt_txr: %s\n",
				__func__, buf->MaxTransferSize,
				rndis->port.multi_pkt_xfer ? "enabled" :
							    "disabled");
		if (rndis_dl_max_pkt_per_xfer <= 1)
			rndis->port.multi_pkt_xfer = 0;
	}
//	spin_unlock(&dev->lock);
}

@@ -1057,6 +1074,7 @@ static struct usb_function *rndis_alloc(struct usb_function_instance *fi)
	rndis->port.wrap = rndis_add_header;
	rndis->port.unwrap = rndis_rm_hdr;
	rndis->port.ul_max_pkts_per_xfer = rndis_ul_max_pkt_per_xfer;
	rndis->port.dl_max_pkts_per_xfer = rndis_dl_max_pkt_per_xfer;

	rndis->port.func.name = "rndis";
	/* descriptors are per-instance copies */
+17 −6
Original line number Diff line number Diff line
@@ -1135,7 +1135,7 @@ int rndis_rm_hdr(struct gether *port,
			struct sk_buff *skb,
			struct sk_buff_head *list)
{
	int num_pkts = 1;
	int num_pkts = 0;

	if (skb->len > rndis_ul_max_xfer_size_rcvd)
		rndis_ul_max_xfer_size_rcvd = skb->len;
@@ -1147,12 +1147,15 @@ int rndis_rm_hdr(struct gether *port,

		/* some rndis hosts send extra byte to avoid zlp, ignore it */
		if (skb->len == 1) {
			if (num_pkts > rndis_ul_max_pkt_per_xfer_rcvd)
				rndis_ul_max_pkt_per_xfer_rcvd = num_pkts;

			dev_kfree_skb_any(skb);
			return 0;
		}

		if (skb->len < sizeof(*hdr)) {
			pr_err("invalid rndis pkt: skblen:%u hdr_len:%lu",
			pr_err("invalid rndis pkt: skblen:%u hdr_len:%zu",
					skb->len, sizeof(*hdr));
			dev_kfree_skb_any(skb);
			return -EINVAL;
@@ -1179,6 +1182,8 @@ int rndis_rm_hdr(struct gether *port,
			return -EINVAL;
		}

		num_pkts++;

		skb_pull(skb, data_offset + 8);

		if (msg_len == skb->len) {
@@ -1196,8 +1201,6 @@ int rndis_rm_hdr(struct gether *port,
		skb_pull(skb, msg_len - sizeof(*hdr));
		skb_trim(skb2, data_len);
		skb_queue_tail(list, skb2);

		num_pkts++;
	}

	if (num_pkts > rndis_ul_max_pkt_per_xfer_rcvd)
@@ -1235,7 +1238,9 @@ static int rndis_proc_show(struct seq_file *m, void *v)
			 "speed     : %d\n"
			 "cable     : %s\n"
			 "vendor ID : 0x%08X\n"
			 "vendor    : %s\n",
			 "vendor    : %s\n"
			 "ul-max-xfer-size:%zu max-xfer-size-rcvd: %d\n"
			 "ul-max-pkts-per-xfer:%d max-pkts-per-xfer-rcvd:%d\n",
			 param->confignr, (param->used) ? "y" : "n",
			 ({ char *s = "?";
			 switch (param->state) {
@@ -1249,7 +1254,13 @@ static int rndis_proc_show(struct seq_file *m, void *v)
			 param->medium,
			 (param->media_state) ? 0 : param->speed*100,
			 (param->media_state) ? "disconnected" : "connected",
			 param->vendorID, param->vendorDescr);
			 param->vendorID, param->vendorDescr,
			 param->dev ? param->max_pkt_per_xfer *
				 (param->dev->mtu + sizeof(struct ethhdr) +
				 sizeof(struct rndis_packet_msg_type) + 22) : 0,
			 rndis_ul_max_xfer_size_rcvd,
			 param->max_pkt_per_xfer,
			 rndis_ul_max_pkt_per_xfer_rcvd);
	return 0;
}

+300 −72
Original line number Diff line number Diff line
@@ -68,7 +68,12 @@ struct eth_dev {

	spinlock_t		req_lock;	/* guard {rx,tx}_reqs */
	struct list_head	tx_reqs, rx_reqs;
	atomic_t		tx_qlen;
	unsigned int		tx_qlen;
/* Minimum number of TX USB request queued to UDC */
#define TX_REQ_THRESHOLD	5
	int			no_tx_req_used;
	int			tx_skb_hold_count;
	u32			tx_req_bufsize;

	struct sk_buff_head	rx_frames;

@@ -76,6 +81,7 @@ struct eth_dev {

	unsigned		header_len;
	unsigned int		ul_max_pkts_per_xfer;
	unsigned int		dl_max_pkts_per_xfer;
	struct sk_buff		*(*wrap)(struct gether *, struct sk_buff *skb);
	int			(*unwrap)(struct gether *,
						struct sk_buff *skb,
@@ -260,7 +266,7 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
		size = max_t(size_t, size, dev->port_usb->fixed_out_len);
	spin_unlock_irqrestore(&dev->lock, flags);

	DBG(dev, "%s: size: %d\n", __func__, size);
	DBG(dev, "%s: size: %zd\n", __func__, size);
	skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags);
	if (skb == NULL) {
		DBG(dev, "no rx skb\n");
@@ -312,6 +318,10 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
				status = dev->unwrap(dev->port_usb,
							skb,
							&dev->rx_frames);
				if (status == -EINVAL)
					dev->net->stats.rx_errors++;
				else if (status == -EOVERFLOW)
					dev->net->stats.rx_over_errors++;
			} else {
				dev_kfree_skb_any(skb);
				status = -ENOTCONN;
@@ -401,15 +411,20 @@ static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)

static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
{
	int	status;
	int	status = 0;

	spin_lock(&dev->req_lock);
	if (link->in_ep) {
		status = prealloc(&dev->tx_reqs, link->in_ep, n);
			if (status < 0)
				goto fail;
	}

	if (link->out_ep) {
		status = prealloc(&dev->rx_reqs, link->out_ep, n);
		if (status < 0)
			goto fail;
	}
	goto done;
fail:
	DBG(dev, "can't alloc requests\n");
@@ -449,6 +464,31 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
	spin_unlock_irqrestore(&dev->req_lock, flags);
}

static __be16 ether_ip_type_trans(struct sk_buff *skb,
	struct net_device *dev)
{
	__be16	protocol = 0;

	skb->dev = dev;

	switch (skb->data[0] & 0xf0) {
	case 0x40:
		protocol = htons(ETH_P_IP);
		break;
	case 0x60:
		protocol = htons(ETH_P_IPV6);
		break;
	default:
		if ((skb->data[0] & 0x40) == 0x00)
			protocol = htons(ETH_P_MAP);
		else
			pr_debug_ratelimited("[%s] L3 protocol decode error: 0x%02x",
					dev->name, skb->data[0] & 0xf0);
	}

	return protocol;
}

static void process_rx_w(struct work_struct *work)
{
	struct eth_dev	*dev = container_of(work, struct eth_dev, rx_work);
@@ -468,7 +508,12 @@ static void process_rx_w(struct work_struct *work)
			dev_kfree_skb_any(skb);
			continue;
		}

		if (test_bit(RMNET_MODE_LLP_IP, &dev->flags))
			skb->protocol = ether_ip_type_trans(skb, dev->net);
		else
			skb->protocol = eth_type_trans(skb, dev->net);

		dev->net->stats.rx_packets++;
		dev->net->stats.rx_bytes += skb->len;

@@ -496,6 +541,16 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct sk_buff	*skb = req->context;
	struct eth_dev	*dev = ep->driver_data;
	struct net_device *net = dev->net;
	struct usb_request *new_req;
	struct usb_ep *in;
	int length;
	int retval;

	if (!dev->port_usb) {
		usb_ep_free_request(ep, req);
		return;
	}

	switch (req->status) {
	default:
@@ -504,19 +559,92 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req)
		/* FALLTHROUGH */
	case -ECONNRESET:		/* unlink */
	case -ESHUTDOWN:		/* disconnect etc */
		dev_kfree_skb_any(skb);

		break;
	case 0:
		dev->net->stats.tx_bytes += skb->len;
		dev_consume_skb_any(skb);
		if (!req->zero)
			dev->net->stats.tx_bytes += req->length-1;
		else
			dev->net->stats.tx_bytes += req->length;


	}
	dev->net->stats.tx_packets++;

	spin_lock(&dev->req_lock);
	list_add(&req->list, &dev->tx_reqs);
	list_add_tail(&req->list, &dev->tx_reqs);

	if (dev->port_usb->multi_pkt_xfer) {
		dev->no_tx_req_used--;
		req->length = 0;
		in = dev->port_usb->in_ep;

		if (!list_empty(&dev->tx_reqs)) {
			new_req = container_of(dev->tx_reqs.next,
					struct usb_request, list);
			list_del(&new_req->list);
			spin_unlock(&dev->req_lock);
			if (new_req->length > 0) {
				length = new_req->length;

				/* NCM requires no zlp if transfer is
				 * dwNtbInMaxSize
				 */
				if (dev->port_usb->is_fixed &&
					length == dev->port_usb->fixed_in_len &&
					(length % in->maxpacket) == 0)
					new_req->zero = 0;
				else
					new_req->zero = 1;

				/* use zlp framing on tx for strict CDC-Ether
				 * conformance, though any robust network rx
				 * path ignores extra padding. and some hardware
				 * doesn't like to write zlps.
				 */
				if (new_req->zero && !dev->zlp &&
						(length % in->maxpacket) == 0) {
					new_req->zero = 0;
					length++;
				}

				new_req->length = length;
				retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
				switch (retval) {
				default:
					DBG(dev, "tx queue err %d\n", retval);
					new_req->length = 0;
					spin_lock(&dev->req_lock);
					list_add_tail(&new_req->list,
							&dev->tx_reqs);
					spin_unlock(&dev->req_lock);
					break;
				case 0:
					spin_lock(&dev->req_lock);
					dev->no_tx_req_used++;
					spin_unlock(&dev->req_lock);
					//net->trans_start = jiffies;
					netif_trans_update(net);
				}
			} else {
				spin_lock(&dev->req_lock);
				/*
				 * Put the idle request at the back of the
				 * queue. The xmit function will put the
				 * unfinished request at the beginning of the
				 * queue.
				 */
				list_add_tail(&new_req->list, &dev->tx_reqs);
				spin_unlock(&dev->req_lock);
			}
		} else {
			spin_unlock(&dev->req_lock);
		}
	} else {
		spin_unlock(&dev->req_lock);
		dev_kfree_skb_any(skb);
	}

	atomic_dec(&dev->tx_qlen);
	if (netif_carrier_ok(dev->net))
		netif_wake_queue(dev->net);
}
@@ -526,6 +654,26 @@ static inline int is_promisc(u16 cdc_filter)
	return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
}

static void alloc_tx_buffer(struct eth_dev *dev)
{
	struct list_head	*act;
	struct usb_request	*req;

	dev->tx_req_bufsize = (dev->dl_max_pkts_per_xfer *
				(dev->net->mtu
				+ sizeof(struct ethhdr)
				/* size of rndis_packet_msg_type */
				+ 44
				+ 22));

	list_for_each(act, &dev->tx_reqs) {
		req = container_of(act, struct usb_request, list);
		if (!req->buf)
			req->buf = kmalloc(dev->tx_req_bufsize,
						GFP_ATOMIC);
	}
}

static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
					struct net_device *net)
{
@@ -536,11 +684,13 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
	unsigned long		flags;
	struct usb_ep		*in;
	u16			cdc_filter;
	bool			multi_pkt_xfer = false;

	spin_lock_irqsave(&dev->lock, flags);
	if (dev->port_usb) {
		in = dev->port_usb->in_ep;
		cdc_filter = dev->port_usb->cdc_filter;
		multi_pkt_xfer = dev->port_usb->multi_pkt_xfer;
	} else {
		in = NULL;
		cdc_filter = 0;
@@ -552,6 +702,10 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
		return NETDEV_TX_OK;
	}

	/* Allocate memory for tx_reqs to support multi packet transfer */
	if (multi_pkt_xfer && !dev->tx_req_bufsize)
		alloc_tx_buffer(dev);

	/* apply outgoing CDC or RNDIS filters */
	if (skb && !is_promisc(cdc_filter)) {
		u8		*dest = skb->data;
@@ -615,9 +769,37 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
		}
	}

	spin_lock_irqsave(&dev->req_lock, flags);
	dev->tx_skb_hold_count++;
	spin_unlock_irqrestore(&dev->req_lock, flags);

	if (multi_pkt_xfer) {
		memcpy(req->buf + req->length, skb->data, skb->len);
		req->length = req->length + skb->len;
		length = req->length;
		dev_kfree_skb_any(skb);

		spin_lock_irqsave(&dev->req_lock, flags);
		if (dev->tx_skb_hold_count < dev->dl_max_pkts_per_xfer) {
			if (dev->no_tx_req_used > TX_REQ_THRESHOLD) {
				list_add(&req->list, &dev->tx_reqs);
				spin_unlock_irqrestore(&dev->req_lock, flags);
				goto success;
			}
		}

		dev->no_tx_req_used++;
		spin_unlock_irqrestore(&dev->req_lock, flags);

		spin_lock_irqsave(&dev->lock, flags);
		dev->tx_skb_hold_count = 0;
		spin_unlock_irqrestore(&dev->lock, flags);
	} else {
		length = skb->len;
		req->buf = skb->data;
		req->context = skb;
	}

	req->complete = tx_complete;

	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
@@ -633,11 +815,27 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
	 * though any robust network rx path ignores extra padding.
	 * and some hardware doesn't like to write zlps.
	 */
	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) {
		req->zero = 0;
		length++;
	}

	req->length = length;

	/* throttle highspeed IRQ rate back slightly */
	if (gadget_is_dualspeed(dev->gadget) &&
			 (dev->gadget->speed == USB_SPEED_HIGH)) {
		dev->tx_qlen++;
		if (dev->tx_qlen == dev->qmult/2) {
			req->no_interrupt = 0;
			dev->tx_qlen = 0;
		} else {
			req->no_interrupt = 1;
		}
	} else {
		req->no_interrupt = 0;
	}

	retval = usb_ep_queue(in, req, GFP_ATOMIC);
	switch (retval) {
	default:
@@ -645,11 +843,13 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
		break;
	case 0:
		netif_trans_update(net);
		atomic_inc(&dev->tx_qlen);
	}

	if (retval) {
		if (!multi_pkt_xfer)
			dev_kfree_skb_any(skb);
		else
			req->length = 0;
drop:
		dev->net->stats.tx_dropped++;
multiframe:
@@ -659,6 +859,7 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
		list_add(&req->list, &dev->tx_reqs);
		spin_unlock_irqrestore(&dev->req_lock, flags);
	}
success:
	return NETDEV_TX_OK;
}

@@ -672,7 +873,7 @@ static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
	rx_fill(dev, gfp_flags);

	/* and open the tx floodgates */
	atomic_set(&dev->tx_qlen, 0);
	dev->tx_qlen = 0;
	netif_wake_queue(dev->net);
}

@@ -726,18 +927,26 @@ static int eth_stop(struct net_device *net)
		 * their own pace; the network stack can handle old packets.
		 * For the moment we leave this here, since it works.
		 */
		if (link->in_ep) {
			in = link->in_ep->desc;
		out = link->out_ep->desc;
			usb_ep_disable(link->in_ep);
		usb_ep_disable(link->out_ep);
			if (netif_carrier_ok(net)) {
			DBG(dev, "host still using in/out endpoints\n");
				DBG(dev, "host still using in endpoints\n");
				link->in_ep->desc = in;
			link->out_ep->desc = out;
				usb_ep_enable(link->in_ep);
			}
		}

		if (link->out_ep) {
			out = link->out_ep->desc;
			usb_ep_disable(link->out_ep);
			if (netif_carrier_ok(net)) {
				DBG(dev, "host still using out endpoints\n");
				link->out_ep->desc = out;
				usb_ep_enable(link->out_ep);
			}
		}
	}
	spin_unlock_irqrestore(&dev->lock, flags);

	return 0;
@@ -1046,6 +1255,7 @@ struct net_device *gether_setup_name_default(const char *netname)
	spin_lock_init(&dev->lock);
	spin_lock_init(&dev->req_lock);
	INIT_WORK(&dev->work, eth_work);
	INIT_WORK(&dev->rx_work, process_rx_w);
	INIT_LIST_HEAD(&dev->tx_reqs);
	INIT_LIST_HEAD(&dev->rx_reqs);

@@ -1269,6 +1479,7 @@ struct net_device *gether_connect(struct gether *link)
	if (!dev)
		return ERR_PTR(-EINVAL);

	if (link->in_ep) {
		link->in_ep->driver_data = dev;
		result = usb_ep_enable(link->in_ep);
		if (result != 0) {
@@ -1276,7 +1487,9 @@ struct net_device *gether_connect(struct gether *link)
				link->in_ep->name, result);
			goto fail0;
		}
	}

	if (link->out_ep) {
		link->out_ep->driver_data = dev;
		result = usb_ep_enable(link->out_ep);
		if (result != 0) {
@@ -1284,6 +1497,7 @@ struct net_device *gether_connect(struct gether *link)
				link->out_ep->name, result);
			goto fail1;
		}
	}

	if (result == 0)
		result = alloc_requests(dev, link, qlen(dev->gadget,
@@ -1298,8 +1512,12 @@ struct net_device *gether_connect(struct gether *link)
		dev->unwrap = link->unwrap;
		dev->wrap = link->wrap;
		dev->ul_max_pkts_per_xfer = link->ul_max_pkts_per_xfer;
		dev->dl_max_pkts_per_xfer = link->dl_max_pkts_per_xfer;

		spin_lock(&dev->lock);
		dev->tx_skb_hold_count = 0;
		dev->no_tx_req_used = 0;
		dev->tx_req_bufsize = 0;
		dev->port_usb = link;
		if (netif_running(dev->net)) {
			if (link->open)
@@ -1316,8 +1534,10 @@ struct net_device *gether_connect(struct gether *link)

	/* on error, disable any endpoints  */
	} else {
		if (link->out_ep)
			(void) usb_ep_disable(link->out_ep);
fail1:
		if (link->in_ep)
			(void) usb_ep_disable(link->in_ep);
	}
fail0:
@@ -1359,23 +1579,30 @@ void gether_disconnect(struct gether *link)
	 * of all pending i/o.  then free the request objects
	 * and forget about the endpoints.
	 */
	if (link->in_ep) {
		usb_ep_disable(link->in_ep);
		spin_lock(&dev->req_lock);
		while (!list_empty(&dev->tx_reqs)) {
		req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
			req = list_first_entry(&dev->tx_reqs,
						struct usb_request, list);
			list_del(&req->list);

			spin_unlock(&dev->req_lock);
			if (link->multi_pkt_xfer)
				kfree(req->buf);
			usb_ep_free_request(link->in_ep, req);
			spin_lock(&dev->req_lock);
		}
		spin_unlock(&dev->req_lock);
		link->in_ep->desc = NULL;
	}

	if (link->out_ep) {
		usb_ep_disable(link->out_ep);
		spin_lock(&dev->req_lock);
		while (!list_empty(&dev->rx_reqs)) {
		req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
			req = list_first_entry(&dev->rx_reqs,
						struct usb_request, list);
			list_del(&req->list);

			spin_unlock(&dev->req_lock);
@@ -1390,6 +1617,7 @@ void gether_disconnect(struct gether *link)
		spin_unlock(&dev->rx_frames.lock);

		link->out_ep->desc = NULL;
	}

	/* finish forgetting about this USB link episode */
	dev->header_len = 0;
+1 −2
Original line number Diff line number Diff line
@@ -74,8 +74,7 @@ struct gether {
	u32				fixed_out_len;
	u32				fixed_in_len;
	unsigned int			ul_max_pkts_per_xfer;
/* Max number of SKB packets to be used to create Multi Packet RNDIS */
#define TX_SKB_HOLD_THRESHOLD		3
	unsigned int			dl_max_pkts_per_xfer;
	bool				multi_pkt_xfer;
	bool				supports_multi_frame;
	struct sk_buff			*(*wrap)(struct gether *port,