Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 56e3b9df authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by David S. Miller
Browse files

cxgb3: use the DMA state API instead of the pci equivalents

This replace the PCI DMA state API (include/linux/pci-dma.h) with the
DMA equivalents since the PCI DMA state API will be obsolete.

No functional change.

For further information about the background:

http://marc.info/?l=linux-netdev&m=127037540020276&w=2



Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Divy Le Ray <divy@chelsio.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 094f92a6
Loading
Loading
Loading
Loading
+10 −10
Original line number Original line Diff line number Diff line
@@ -118,7 +118,7 @@ struct rx_sw_desc { /* SW state per Rx descriptor */
		struct sk_buff *skb;
		struct sk_buff *skb;
		struct fl_pg_chunk pg_chunk;
		struct fl_pg_chunk pg_chunk;
	};
	};
	DECLARE_PCI_UNMAP_ADDR(dma_addr);
	DEFINE_DMA_UNMAP_ADDR(dma_addr);
};
};


struct rsp_desc {		/* response queue descriptor */
struct rsp_desc {		/* response queue descriptor */
@@ -208,7 +208,7 @@ static inline int need_skb_unmap(void)
	 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
	 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
	 */
	 */
	struct dummy {
	struct dummy {
		DECLARE_PCI_UNMAP_ADDR(addr);
		DEFINE_DMA_UNMAP_ADDR(addr);
	};
	};


	return sizeof(struct dummy) != 0;
	return sizeof(struct dummy) != 0;
@@ -363,7 +363,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
		put_page(d->pg_chunk.page);
		put_page(d->pg_chunk.page);
		d->pg_chunk.page = NULL;
		d->pg_chunk.page = NULL;
	} else {
	} else {
		pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
		pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
				 q->buf_size, PCI_DMA_FROMDEVICE);
				 q->buf_size, PCI_DMA_FROMDEVICE);
		kfree_skb(d->skb);
		kfree_skb(d->skb);
		d->skb = NULL;
		d->skb = NULL;
@@ -419,7 +419,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
	if (unlikely(pci_dma_mapping_error(pdev, mapping)))
	if (unlikely(pci_dma_mapping_error(pdev, mapping)))
		return -ENOMEM;
		return -ENOMEM;


	pci_unmap_addr_set(sd, dma_addr, mapping);
	dma_unmap_addr_set(sd, dma_addr, mapping);


	d->addr_lo = cpu_to_be32(mapping);
	d->addr_lo = cpu_to_be32(mapping);
	d->addr_hi = cpu_to_be32((u64) mapping >> 32);
	d->addr_hi = cpu_to_be32((u64) mapping >> 32);
@@ -515,7 +515,7 @@ nomem: q->alloc_failed++;
				break;
				break;
			}
			}
			mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
			mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
			pci_unmap_addr_set(sd, dma_addr, mapping);
			dma_unmap_addr_set(sd, dma_addr, mapping);


			add_one_rx_chunk(mapping, d, q->gen);
			add_one_rx_chunk(mapping, d, q->gen);
			pci_dma_sync_single_for_device(adap->pdev, mapping,
			pci_dma_sync_single_for_device(adap->pdev, mapping,
@@ -791,11 +791,11 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
		if (likely(skb != NULL)) {
		if (likely(skb != NULL)) {
			__skb_put(skb, len);
			__skb_put(skb, len);
			pci_dma_sync_single_for_cpu(adap->pdev,
			pci_dma_sync_single_for_cpu(adap->pdev,
					    pci_unmap_addr(sd, dma_addr), len,
					    dma_unmap_addr(sd, dma_addr), len,
					    PCI_DMA_FROMDEVICE);
					    PCI_DMA_FROMDEVICE);
			memcpy(skb->data, sd->skb->data, len);
			memcpy(skb->data, sd->skb->data, len);
			pci_dma_sync_single_for_device(adap->pdev,
			pci_dma_sync_single_for_device(adap->pdev,
					    pci_unmap_addr(sd, dma_addr), len,
					    dma_unmap_addr(sd, dma_addr), len,
					    PCI_DMA_FROMDEVICE);
					    PCI_DMA_FROMDEVICE);
		} else if (!drop_thres)
		} else if (!drop_thres)
			goto use_orig_buf;
			goto use_orig_buf;
@@ -810,7 +810,7 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
		goto recycle;
		goto recycle;


use_orig_buf:
use_orig_buf:
	pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
	pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
			 fl->buf_size, PCI_DMA_FROMDEVICE);
			 fl->buf_size, PCI_DMA_FROMDEVICE);
	skb = sd->skb;
	skb = sd->skb;
	skb_put(skb, len);
	skb_put(skb, len);
@@ -843,7 +843,7 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
	struct sk_buff *newskb, *skb;
	struct sk_buff *newskb, *skb;
	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];


	dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr);
	dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);


	newskb = skb = q->pg_skb;
	newskb = skb = q->pg_skb;
	if (!skb && (len <= SGE_RX_COPY_THRES)) {
	if (!skb && (len <= SGE_RX_COPY_THRES)) {
@@ -2097,7 +2097,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
	fl->credits--;
	fl->credits--;


	pci_dma_sync_single_for_cpu(adap->pdev,
	pci_dma_sync_single_for_cpu(adap->pdev,
				    pci_unmap_addr(sd, dma_addr),
				    dma_unmap_addr(sd, dma_addr),
				    fl->buf_size - SGE_PG_RSVD,
				    fl->buf_size - SGE_PG_RSVD,
				    PCI_DMA_FROMDEVICE);
				    PCI_DMA_FROMDEVICE);