Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7598b349 authored by Zhu Yanjun's avatar Zhu Yanjun Committed by David S. Miller
Browse files

forcedeth: replace pci_map_single with dma_map_single functions



pci_map_single functions are obsolete. So replace them with
dma_map_single functions.

Signed-off-by: default avatarZhu Yanjun <yanjun.zhu@oracle.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5416953f
Loading
Loading
Loading
Loading
+36 −34
Original line number Diff line number Diff line
@@ -1813,11 +1813,11 @@ static int nv_alloc_rx(struct net_device *dev)
		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
		if (skb) {
			np->put_rx_ctx->skb = skb;
			np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
			np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
							     skb->data,
							     skb_tailroom(skb),
							     PCI_DMA_FROMDEVICE);
			if (pci_dma_mapping_error(np->pci_dev,
							     DMA_FROM_DEVICE);
			if (dma_mapping_error(&np->pci_dev->dev,
					      np->put_rx_ctx->dma)) {
				kfree_skb(skb);
				goto packet_dropped;
@@ -1854,11 +1854,11 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
		if (skb) {
			np->put_rx_ctx->skb = skb;
			np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
			np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
							     skb->data,
							     skb_tailroom(skb),
							     PCI_DMA_FROMDEVICE);
			if (pci_dma_mapping_error(np->pci_dev,
							     DMA_FROM_DEVICE);
			if (dma_mapping_error(&np->pci_dev->dev,
					      np->put_rx_ctx->dma)) {
				kfree_skb(skb);
				goto packet_dropped;
@@ -1977,9 +1977,9 @@ static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
{
	if (tx_skb->dma) {
		if (tx_skb->dma_single)
			pci_unmap_single(np->pci_dev, tx_skb->dma,
			dma_unmap_single(&np->pci_dev->dev, tx_skb->dma,
					 tx_skb->dma_len,
					 PCI_DMA_TODEVICE);
					 DMA_TO_DEVICE);
		else
			pci_unmap_page(np->pci_dev, tx_skb->dma,
				       tx_skb->dma_len,
@@ -2047,10 +2047,10 @@ static void nv_drain_rx(struct net_device *dev)
		}
		wmb();
		if (np->rx_skb[i].skb) {
			pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
			dma_unmap_single(&np->pci_dev->dev, np->rx_skb[i].dma,
					 (skb_end_pointer(np->rx_skb[i].skb) -
					 np->rx_skb[i].skb->data),
					 PCI_DMA_FROMDEVICE);
					 DMA_FROM_DEVICE);
			dev_kfree_skb(np->rx_skb[i].skb);
			np->rx_skb[i].skb = NULL;
		}
@@ -2224,9 +2224,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
		prev_tx = put_tx;
		prev_tx_ctx = np->put_tx_ctx;
		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
		np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
						PCI_DMA_TODEVICE);
		if (pci_dma_mapping_error(np->pci_dev,
		np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
						     skb->data + offset, bcnt,
						     DMA_TO_DEVICE);
		if (dma_mapping_error(&np->pci_dev->dev,
				      np->put_tx_ctx->dma)) {
			/* on DMA mapping error - drop the packet */
			dev_kfree_skb_any(skb);
@@ -2373,9 +2374,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
		prev_tx = put_tx;
		prev_tx_ctx = np->put_tx_ctx;
		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
		np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
						PCI_DMA_TODEVICE);
		if (pci_dma_mapping_error(np->pci_dev,
		np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
						     skb->data + offset, bcnt,
						     DMA_TO_DEVICE);
		if (dma_mapping_error(&np->pci_dev->dev,
				      np->put_tx_ctx->dma)) {
			/* on DMA mapping error - drop the packet */
			dev_kfree_skb_any(skb);
@@ -2810,9 +2812,9 @@ static int nv_rx_process(struct net_device *dev, int limit)
		 * TODO: check if a prefetch of the first cacheline improves
		 * the performance.
		 */
		pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
		dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma,
				 np->get_rx_ctx->dma_len,
				PCI_DMA_FROMDEVICE);
				 DMA_FROM_DEVICE);
		skb = np->get_rx_ctx->skb;
		np->get_rx_ctx->skb = NULL;

@@ -2916,9 +2918,9 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
		 * TODO: check if a prefetch of the first cacheline improves
		 * the performance.
		 */
		pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
		dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma,
				 np->get_rx_ctx->dma_len,
				PCI_DMA_FROMDEVICE);
				 DMA_FROM_DEVICE);
		skb = np->get_rx_ctx->skb;
		np->get_rx_ctx->skb = NULL;

@@ -5070,10 +5072,10 @@ static int nv_loopback_test(struct net_device *dev)
		ret = 0;
		goto out;
	}
	test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
	test_dma_addr = dma_map_single(&np->pci_dev->dev, tx_skb->data,
				       skb_tailroom(tx_skb),
				       PCI_DMA_FROMDEVICE);
	if (pci_dma_mapping_error(np->pci_dev,
				       DMA_FROM_DEVICE);
	if (dma_mapping_error(&np->pci_dev->dev,
			      test_dma_addr)) {
		dev_kfree_skb_any(tx_skb);
		goto out;
@@ -5129,9 +5131,9 @@ static int nv_loopback_test(struct net_device *dev)
		}
	}

	pci_unmap_single(np->pci_dev, test_dma_addr,
	dma_unmap_single(&np->pci_dev->dev, test_dma_addr,
			 (skb_end_pointer(tx_skb) - tx_skb->data),
		       PCI_DMA_TODEVICE);
			 DMA_TO_DEVICE);
	dev_kfree_skb_any(tx_skb);
 out:
	/* stop engines */