Loading drivers/usb/gadget/function/u_ether.c +14 −1 Original line number Original line Diff line number Diff line Loading @@ -573,6 +573,11 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req) switch (retval) { switch (retval) { default: default: DBG(dev, "tx queue err %d\n", retval); DBG(dev, "tx queue err %d\n", retval); new_req->length = 0; spin_lock(&dev->req_lock); list_add_tail(&new_req->list, &dev->tx_reqs); spin_unlock(&dev->req_lock); break; break; case 0: case 0: spin_lock(&dev->req_lock); spin_lock(&dev->req_lock); Loading @@ -582,7 +587,13 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req) } } } else { } else { spin_lock(&dev->req_lock); spin_lock(&dev->req_lock); list_add(&new_req->list, &dev->tx_reqs); /* * Put the idle request at the back of the * queue. The xmit function will put the * unfinished request at the beginning of the * queue. */ list_add_tail(&new_req->list, &dev->tx_reqs); spin_unlock(&dev->req_lock); spin_unlock(&dev->req_lock); } } } else { } else { Loading Loading @@ -796,6 +807,8 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, if (retval) { if (retval) { if (!multi_pkt_xfer) if (!multi_pkt_xfer) dev_kfree_skb_any(skb); dev_kfree_skb_any(skb); else req->length = 0; drop: drop: dev->net->stats.tx_dropped++; dev->net->stats.tx_dropped++; multiframe: multiframe: Loading Loading
drivers/usb/gadget/function/u_ether.c +14 −1 Original line number Original line Diff line number Diff line Loading @@ -573,6 +573,11 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req) switch (retval) { switch (retval) { default: default: DBG(dev, "tx queue err %d\n", retval); DBG(dev, "tx queue err %d\n", retval); new_req->length = 0; spin_lock(&dev->req_lock); list_add_tail(&new_req->list, &dev->tx_reqs); spin_unlock(&dev->req_lock); break; break; case 0: case 0: spin_lock(&dev->req_lock); spin_lock(&dev->req_lock); Loading @@ -582,7 +587,13 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req) } } } else { } else { spin_lock(&dev->req_lock); spin_lock(&dev->req_lock); list_add(&new_req->list, &dev->tx_reqs); /* * Put the idle request at the back of the * queue. The xmit function will put the * unfinished request at the beginning of the * queue. */ list_add_tail(&new_req->list, &dev->tx_reqs); spin_unlock(&dev->req_lock); spin_unlock(&dev->req_lock); } } } else { } else { Loading Loading @@ -796,6 +807,8 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, if (retval) { if (retval) { if (!multi_pkt_xfer) if (!multi_pkt_xfer) dev_kfree_skb_any(skb); dev_kfree_skb_any(skb); else req->length = 0; drop: drop: dev->net->stats.tx_dropped++; dev->net->stats.tx_dropped++; multiframe: multiframe: Loading