Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dae2e9f4 authored by Pradeep A. Dalvi's avatar Pradeep A. Dalvi Committed by David S. Miller
Browse files

netdev: ethernet dev_alloc_skb to netdev_alloc_skb



Replaced deprecating dev_alloc_skb with netdev_alloc_skb in drivers/net/ethernet
  - Removed extra skb->dev = dev after netdev_alloc_skb

Signed-off-by: default avatarPradeep A Dalvi <netdev@pradeepdalvi.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c4062dfc
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -150,7 +150,7 @@ static void netx_eth_receive(struct net_device *ndev)
	seg = (val & FIFO_PTR_SEGMENT_MASK) >> FIFO_PTR_SEGMENT_SHIFT;
	len = (val & FIFO_PTR_FRAMELEN_MASK) >> FIFO_PTR_FRAMELEN_SHIFT;

	skb = dev_alloc_skb(len);
	skb = netdev_alloc_skb(ndev, len);
	if (unlikely(skb == NULL)) {
		printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
			ndev->name);
+1 −1
Original line number Diff line number Diff line
@@ -735,7 +735,7 @@ static void netdev_rx(struct net_device *dev)

		if (status & RXDS_RXGD) {
			data = ether->rdesc->recv_buf[ether->cur_rx];
			skb = dev_alloc_skb(length+2);
			skb = netdev_alloc_skb(dev, length + 2);
			if (!skb) {
				dev_err(&pdev->dev, "get skb buffer error\n");
				ether->stats.rx_dropped++;
+4 −4
Original line number Diff line number Diff line
@@ -1815,7 +1815,7 @@ static int nv_alloc_rx(struct net_device *dev)
		less_rx = np->last_rx.orig;

	while (np->put_rx.orig != less_rx) {
		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
		if (skb) {
			np->put_rx_ctx->skb = skb;
			np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
@@ -1850,7 +1850,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
		less_rx = np->last_rx.ex;

	while (np->put_rx.ex != less_rx) {
		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
		if (skb) {
			np->put_rx_ctx->skb = skb;
			np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
@@ -4993,9 +4993,9 @@ static int nv_loopback_test(struct net_device *dev)

	/* setup packet for tx */
	pkt_len = ETH_DATA_LEN;
	tx_skb = dev_alloc_skb(pkt_len);
	tx_skb = netdev_alloc_skb(dev, pkt_len);
	if (!tx_skb) {
		netdev_err(dev, "dev_alloc_skb() failed during loopback test\n");
		netdev_err(dev, "netdev_alloc_skb() failed during loopback test\n");
		ret = 0;
		goto out;
	}
+3 −5
Original line number Diff line number Diff line
@@ -1188,11 +1188,10 @@ static void hamachi_init_ring(struct net_device *dev)
	}
	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
	for (i = 0; i < RX_RING_SIZE; i++) {
		struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2);
		struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2);
		hmp->rx_skbuff[i] = skb;
		if (skb == NULL)
			break;
		skb->dev = dev;         /* Mark as being used by this device. */
		skb_reserve(skb, 2); /* 16 byte align the IP header. */
                hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
			skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
@@ -1488,7 +1487,7 @@ static int hamachi_rx(struct net_device *dev)
			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < rx_copybreak &&
			    (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
#ifdef RX_CHECKSUM
				printk(KERN_ERR "%s: rx_copybreak non-zero "
				  "not good with RX_CHECKSUM\n", dev->name);
@@ -1591,12 +1590,11 @@ static int hamachi_rx(struct net_device *dev)
		entry = hmp->dirty_rx % RX_RING_SIZE;
		desc = &(hmp->rx_ring[entry]);
		if (hmp->rx_skbuff[entry] == NULL) {
			struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2);
			struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2);

			hmp->rx_skbuff[entry] = skb;
			if (skb == NULL)
				break;		/* Better luck next round. */
			skb->dev = dev;		/* Mark as being used by this device. */
			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
                	desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
				skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+3 −5
Original line number Diff line number Diff line
@@ -743,11 +743,10 @@ static int yellowfin_init_ring(struct net_device *dev)
	}

	for (i = 0; i < RX_RING_SIZE; i++) {
		struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
		struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
		yp->rx_skbuff[i] = skb;
		if (skb == NULL)
			break;
		skb->dev = dev;		/* Mark as being used by this device. */
		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
		yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
			skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
@@ -1133,7 +1132,7 @@ static int yellowfin_rx(struct net_device *dev)
					PCI_DMA_FROMDEVICE);
				yp->rx_skbuff[entry] = NULL;
			} else {
				skb = dev_alloc_skb(pkt_len + 2);
				skb = netdev_alloc_skb(dev, pkt_len + 2);
				if (skb == NULL)
					break;
				skb_reserve(skb, 2);	/* 16 byte align the IP header */
@@ -1156,11 +1155,10 @@ static int yellowfin_rx(struct net_device *dev)
	for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
		entry = yp->dirty_rx % RX_RING_SIZE;
		if (yp->rx_skbuff[entry] == NULL) {
			struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
			struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
			if (skb == NULL)
				break;				/* Better luck next round. */
			yp->rx_skbuff[entry] = skb;
			skb->dev = dev;	/* Mark as being used by this device. */
			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
			yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
				skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
Loading