Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ede58ef2 authored by chas williams - CONTRACTOR's avatar chas williams - CONTRACTOR Committed by David S. Miller
Browse files

atm: remove deprecated use of pci api

parent abee1cef
Loading
Loading
Loading
Loading
+19 −14
Original line number Diff line number Diff line
@@ -354,9 +354,9 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
	eni_vcc = ENI_VCC(vcc);
	paddr = 0; /* GCC, shut up */
	if (skb) {
		paddr = pci_map_single(eni_dev->pci_dev,skb->data,skb->len,
		    PCI_DMA_FROMDEVICE);
		if (pci_dma_mapping_error(eni_dev->pci_dev, paddr))
		paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len,
				       DMA_FROM_DEVICE);
		if (dma_mapping_error(&eni_dev->pci_dev->dev, paddr))
			goto dma_map_error;
		ENI_PRV_PADDR(skb) = paddr;
		if (paddr & 3)
@@ -481,8 +481,8 @@ rx_enqueued++;

trouble:
	if (paddr)
		pci_unmap_single(eni_dev->pci_dev,paddr,skb->len,
		    PCI_DMA_FROMDEVICE);
		dma_unmap_single(&eni_dev->pci_dev->dev,paddr,skb->len,
				 DMA_FROM_DEVICE);
dma_map_error:
	if (skb) dev_kfree_skb_irq(skb);
	return -1;
@@ -758,8 +758,8 @@ rx_dequeued++;
		}
		eni_vcc->rxing--;
		eni_vcc->rx_pos = ENI_PRV_POS(skb) & (eni_vcc->words-1);
		pci_unmap_single(eni_dev->pci_dev,ENI_PRV_PADDR(skb),skb->len,
		    PCI_DMA_TODEVICE);
		dma_unmap_single(&eni_dev->pci_dev->dev,ENI_PRV_PADDR(skb),skb->len,
			         DMA_TO_DEVICE);
		if (!skb->len) dev_kfree_skb_irq(skb);
		else {
			EVENT("pushing (len=%ld)\n",skb->len,0);
@@ -1112,8 +1112,8 @@ DPRINTK("iovcnt = %d\n",skb_shinfo(skb)->nr_frags);
		    vcc->dev->number);
		return enq_jam;
	}
	paddr = pci_map_single(eni_dev->pci_dev,skb->data,skb->len,
	    PCI_DMA_TODEVICE);
	paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len,
			       DMA_TO_DEVICE);
	ENI_PRV_PADDR(skb) = paddr;
	/* prepare DMA queue entries */
	j = 0;
@@ -1226,8 +1226,8 @@ static void dequeue_tx(struct atm_dev *dev)
			break;
		}
		ENI_VCC(vcc)->txing -= ENI_PRV_SIZE(skb);
		pci_unmap_single(eni_dev->pci_dev,ENI_PRV_PADDR(skb),skb->len,
		    PCI_DMA_TODEVICE);
		dma_unmap_single(&eni_dev->pci_dev->dev,ENI_PRV_PADDR(skb),skb->len,
				 DMA_TO_DEVICE);
		if (vcc->pop) vcc->pop(vcc,skb);
		else dev_kfree_skb_irq(skb);
		atomic_inc(&vcc->stats->tx);
@@ -2240,13 +2240,18 @@ static int eni_init_one(struct pci_dev *pci_dev,
	if (rc < 0)
		goto out;

	rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
	if (rc < 0)
		goto out;

	rc = -ENOMEM;
	eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL);
	if (!eni_dev)
		goto err_disable;

	zero = &eni_dev->zero;
	zero->addr = pci_alloc_consistent(pci_dev, ENI_ZEROES_SIZE, &zero->dma);
	zero->addr = dma_alloc_coherent(&pci_dev->dev,
					ENI_ZEROES_SIZE, &zero->dma, GFP_KERNEL);
	if (!zero->addr)
		goto err_kfree;

@@ -2277,7 +2282,7 @@ static int eni_init_one(struct pci_dev *pci_dev,
err_unregister:
	atm_dev_deregister(dev);
err_free_consistent:
	pci_free_consistent(pci_dev, ENI_ZEROES_SIZE, zero->addr, zero->dma);
	dma_free_coherent(&pci_dev->dev, ENI_ZEROES_SIZE, zero->addr, zero->dma);
err_kfree:
	kfree(eni_dev);
err_disable:
@@ -2302,7 +2307,7 @@ static void eni_remove_one(struct pci_dev *pdev)

	eni_do_release(dev);
	atm_dev_deregister(dev);
	pci_free_consistent(pdev, ENI_ZEROES_SIZE, zero->addr, zero->dma);
	dma_free_coherent(&pdev->dev, ENI_ZEROES_SIZE, zero->addr, zero->dma);
	kfree(ed);
	pci_disable_device(pdev);
}
+14 −8
Original line number Diff line number Diff line
@@ -425,7 +425,7 @@ static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
static u32
fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
{
    u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
    u32 dma_addr = dma_map_single(&((struct pci_dev *) fore200e->bus_dev)->dev, virt_addr, size, direction);

    DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d,  --> dma_addr = 0x%08x\n",
	    virt_addr, size, direction, dma_addr);
@@ -440,7 +440,7 @@ fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int di
    DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
	    dma_addr, size, direction);

    pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
    dma_unmap_single(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
}


@@ -449,7 +449,7 @@ fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size,
{
    DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);

    pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
    dma_sync_single_for_cpu(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
}

static void
@@ -457,7 +457,7 @@ fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int si
{
    DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);

    pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
    dma_sync_single_for_device(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
}


@@ -470,9 +470,10 @@ fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
{
    /* returned chunks are page-aligned */
    chunk->alloc_size = size * nbr;
    chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev,
    chunk->alloc_addr = dma_alloc_coherent(&((struct pci_dev *) fore200e->bus_dev)->dev,
					   chunk->alloc_size,
					     &chunk->dma_addr);
					   &chunk->dma_addr,
					   GFP_KERNEL);
    
    if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
	return -ENOMEM;
@@ -488,7 +489,7 @@ fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
static void
fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
{
    pci_free_consistent((struct pci_dev*)fore200e->bus_dev,
    dma_free_coherent(&((struct pci_dev *) fore200e->bus_dev)->dev,
			chunk->alloc_size,
			chunk->alloc_addr,
			chunk->dma_addr);
@@ -2708,6 +2709,11 @@ static int fore200e_pca_detect(struct pci_dev *pci_dev,
	goto out;
    }

    if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
	err = -EINVAL;
	goto out;
    }
    
    fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
    if (fore200e == NULL) {
	err = -ENOMEM;
+64 −61
Original line number Diff line number Diff line
@@ -359,7 +359,7 @@ static int he_init_one(struct pci_dev *pci_dev,

	if (pci_enable_device(pci_dev))
		return -EIO;
	if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
	if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)) != 0) {
		printk(KERN_WARNING "he: no suitable dma available\n");
		err = -EIO;
		goto init_one_failure;
@@ -533,9 +533,9 @@ static void he_init_tx_lbfp(struct he_dev *he_dev)

static int he_init_tpdrq(struct he_dev *he_dev)
{
	he_dev->tpdrq_base = pci_zalloc_consistent(he_dev->pci_dev,
	he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
						 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
						   &he_dev->tpdrq_phys);
						 &he_dev->tpdrq_phys, GFP_KERNEL);
	if (he_dev->tpdrq_base == NULL) {
		hprintk("failed to alloc tpdrq\n");
		return -ENOMEM;
@@ -796,16 +796,16 @@ static int he_init_group(struct he_dev *he_dev, int group)
	}

	/* large buffer pool */
	he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
	he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev,
					    CONFIG_RBPL_BUFSIZE, 64, 0);
	if (he_dev->rbpl_pool == NULL) {
		hprintk("unable to create rbpl pool\n");
		goto out_free_rbpl_virt;
	}

	he_dev->rbpl_base = pci_zalloc_consistent(he_dev->pci_dev,
	he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
						CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
						  &he_dev->rbpl_phys);
						&he_dev->rbpl_phys, GFP_KERNEL);
	if (he_dev->rbpl_base == NULL) {
		hprintk("failed to alloc rbpl_base\n");
		goto out_destroy_rbpl_pool;
@@ -815,7 +815,7 @@ static int he_init_group(struct he_dev *he_dev, int group)

	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {

		heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping);
		heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping);
		if (!heb)
			goto out_free_rbpl;
		heb->mapping = mapping;
@@ -842,9 +842,9 @@ static int he_init_group(struct he_dev *he_dev, int group)

	/* rx buffer ready queue */

	he_dev->rbrq_base = pci_zalloc_consistent(he_dev->pci_dev,
	he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
						CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
						  &he_dev->rbrq_phys);
						&he_dev->rbrq_phys, GFP_KERNEL);
	if (he_dev->rbrq_base == NULL) {
		hprintk("failed to allocate rbrq\n");
		goto out_free_rbpl;
@@ -866,9 +866,9 @@ static int he_init_group(struct he_dev *he_dev, int group)

	/* tx buffer ready queue */

	he_dev->tbrq_base = pci_zalloc_consistent(he_dev->pci_dev,
	he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
						CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
						  &he_dev->tbrq_phys);
						&he_dev->tbrq_phys, GFP_KERNEL);
	if (he_dev->tbrq_base == NULL) {
		hprintk("failed to allocate tbrq\n");
		goto out_free_rbpq_base;
@@ -884,18 +884,18 @@ static int he_init_group(struct he_dev *he_dev, int group)
	return 0;

out_free_rbpq_base:
	pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
	dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE *
			  sizeof(struct he_rbrq), he_dev->rbrq_base,
			  he_dev->rbrq_phys);
out_free_rbpl:
	list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
		pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
		dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);

	pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
	dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE *
			  sizeof(struct he_rbp), he_dev->rbpl_base,
			  he_dev->rbpl_phys);
out_destroy_rbpl_pool:
	pci_pool_destroy(he_dev->rbpl_pool);
	dma_pool_destroy(he_dev->rbpl_pool);
out_free_rbpl_virt:
	kfree(he_dev->rbpl_virt);
out_free_rbpl_table:
@@ -911,8 +911,11 @@ static int he_init_irq(struct he_dev *he_dev)
	/* 2.9.3.5  tail offset for each interrupt queue is located after the
		    end of the interrupt queue */

	he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
			(CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
	he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
					       (CONFIG_IRQ_SIZE + 1)
					       * sizeof(struct he_irq),
					       &he_dev->irq_phys,
					       GFP_KERNEL);
	if (he_dev->irq_base == NULL) {
		hprintk("failed to allocate irq\n");
		return -ENOMEM;
@@ -1419,10 +1422,10 @@ static int he_start(struct atm_dev *dev)

	he_init_tpdrq(he_dev);

	he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
	he_dev->tpd_pool = dma_pool_create("tpd", &he_dev->pci_dev->dev,
					   sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
	if (he_dev->tpd_pool == NULL) {
		hprintk("unable to create tpd pci_pool\n");
		hprintk("unable to create tpd dma_pool\n");
		return -ENOMEM;         
	}

@@ -1459,9 +1462,9 @@ static int he_start(struct atm_dev *dev)

	/* host status page */

	he_dev->hsp = pci_zalloc_consistent(he_dev->pci_dev,
	he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev,
					  sizeof(struct he_hsp),
					    &he_dev->hsp_phys);
					  &he_dev->hsp_phys, GFP_KERNEL);
	if (he_dev->hsp == NULL) {
		hprintk("failed to allocate host status page\n");
		return -ENOMEM;
@@ -1558,18 +1561,18 @@ he_stop(struct he_dev *he_dev)
		free_irq(he_dev->irq, he_dev);

	if (he_dev->irq_base)
		pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
		dma_free_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1)
				  * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);

	if (he_dev->hsp)
		pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
		dma_free_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp),
				  he_dev->hsp, he_dev->hsp_phys);

	if (he_dev->rbpl_base) {
		list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
			pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
			dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);

		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE
				  * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
	}

@@ -1577,22 +1580,22 @@ he_stop(struct he_dev *he_dev)
	kfree(he_dev->rbpl_table);

	if (he_dev->rbpl_pool)
		pci_pool_destroy(he_dev->rbpl_pool);
		dma_pool_destroy(he_dev->rbpl_pool);

	if (he_dev->rbrq_base)
		pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
				  he_dev->rbrq_base, he_dev->rbrq_phys);

	if (he_dev->tbrq_base)
		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
				  he_dev->tbrq_base, he_dev->tbrq_phys);

	if (he_dev->tpdrq_base)
		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
				  he_dev->tpdrq_base, he_dev->tpdrq_phys);

	if (he_dev->tpd_pool)
		pci_pool_destroy(he_dev->tpd_pool);
		dma_pool_destroy(he_dev->tpd_pool);

	if (he_dev->pci_dev) {
		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
@@ -1610,7 +1613,7 @@ __alloc_tpd(struct he_dev *he_dev)
	struct he_tpd *tpd;
	dma_addr_t mapping;

	tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping);
	tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping);
	if (tpd == NULL)
		return NULL;
			
@@ -1681,7 +1684,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
				clear_bit(i, he_dev->rbpl_table);
				list_del(&heb->entry);
				pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
				dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
			}
					
			goto next_rbrq_entry;
@@ -1774,7 +1777,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
		++pdus_assembled;

		list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
			pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
			dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
		INIT_LIST_HEAD(&he_vcc->buffers);
		he_vcc->pdu_len = 0;

@@ -1843,10 +1846,10 @@ he_service_tbrq(struct he_dev *he_dev, int group)

		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
			if (tpd->iovec[slot].addr)
				pci_unmap_single(he_dev->pci_dev,
				dma_unmap_single(&he_dev->pci_dev->dev,
					tpd->iovec[slot].addr,
					tpd->iovec[slot].len & TPD_LEN_MASK,
							PCI_DMA_TODEVICE);
							DMA_TO_DEVICE);
			if (tpd->iovec[slot].len & TPD_LST)
				break;
				
@@ -1861,7 +1864,7 @@ he_service_tbrq(struct he_dev *he_dev, int group)

next_tbrq_entry:
		if (tpd)
			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
			dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
		he_dev->tbrq_head = (struct he_tbrq *)
				((unsigned long) he_dev->tbrq_base |
					TBRQ_MASK(he_dev->tbrq_head + 1));
@@ -1905,7 +1908,7 @@ he_service_rbpl(struct he_dev *he_dev, int group)
		}
		he_dev->rbpl_hint = i + 1;

		heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping);
		heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping);
		if (!heb)
			break;
		heb->mapping = mapping;
@@ -2084,10 +2087,10 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
			 */
			for (slot = 0; slot < TPD_MAXIOV; ++slot) {
				if (tpd->iovec[slot].addr)
					pci_unmap_single(he_dev->pci_dev,
					dma_unmap_single(&he_dev->pci_dev->dev,
						tpd->iovec[slot].addr,
						tpd->iovec[slot].len & TPD_LEN_MASK,
								PCI_DMA_TODEVICE);
								DMA_TO_DEVICE);
			}
			if (tpd->skb) {
				if (tpd->vcc->pop)
@@ -2096,7 +2099,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
					dev_kfree_skb_any(tpd->skb);
				atomic_inc(&tpd->vcc->stats->tx_err);
			}
			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
			dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
			return;
		}
	}
@@ -2550,8 +2553,8 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
	}

#ifdef USE_SCATTERGATHER
	tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
				skb_headlen(skb), PCI_DMA_TODEVICE);
	tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data,
				skb_headlen(skb), DMA_TO_DEVICE);
	tpd->iovec[slot].len = skb_headlen(skb);
	++slot;

@@ -2579,9 +2582,9 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
			slot = 0;
		}

		tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
		tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev,
			(void *) page_address(frag->page) + frag->page_offset,
				frag->size, PCI_DMA_TODEVICE);
				frag->size, DMA_TO_DEVICE);
		tpd->iovec[slot].len = frag->size;
		++slot;

@@ -2589,7 +2592,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)

	tpd->iovec[slot - 1].len |= TPD_LST;
#else
	tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
	tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
	tpd->length0 = skb->len | TPD_LST;
#endif
	tpd->status |= TPD_INT;
+2 −2
Original line number Diff line number Diff line
@@ -281,7 +281,7 @@ struct he_dev {
	int irq_peak;

	struct tasklet_struct tasklet;
	struct pci_pool *tpd_pool;
	struct dma_pool *tpd_pool;
	struct list_head outstanding_tpds;

	dma_addr_t tpdrq_phys;
@@ -296,7 +296,7 @@ struct he_dev {
	struct he_buff **rbpl_virt;
	unsigned long *rbpl_table;
	unsigned long rbpl_hint;
	struct pci_pool *rbpl_pool;
	struct dma_pool *rbpl_pool;
	dma_addr_t rbpl_phys;
	struct he_rbp *rbpl_base, *rbpl_tail;
	struct list_head rbpl_outstanding;
+57 −50
Original line number Diff line number Diff line
@@ -641,7 +641,8 @@ alloc_scq(struct idt77252_dev *card, int class)
	scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL);
	if (!scq)
		return NULL;
	scq->base = pci_zalloc_consistent(card->pcidev, SCQ_SIZE, &scq->paddr);
	scq->base = dma_zalloc_coherent(&card->pcidev->dev, SCQ_SIZE,
					&scq->paddr, GFP_KERNEL);
	if (scq->base == NULL) {
		kfree(scq);
		return NULL;
@@ -669,12 +670,12 @@ free_scq(struct idt77252_dev *card, struct scq_info *scq)
	struct sk_buff *skb;
	struct atm_vcc *vcc;

	pci_free_consistent(card->pcidev, SCQ_SIZE,
	dma_free_coherent(&card->pcidev->dev, SCQ_SIZE,
			  scq->base, scq->paddr);

	while ((skb = skb_dequeue(&scq->transmit))) {
		pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
				 skb->len, PCI_DMA_TODEVICE);
		dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
				 skb->len, DMA_TO_DEVICE);

		vcc = ATM_SKB(skb)->vcc;
		if (vcc->pop)
@@ -684,8 +685,8 @@ free_scq(struct idt77252_dev *card, struct scq_info *scq)
	}

	while ((skb = skb_dequeue(&scq->pending))) {
		pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
				 skb->len, PCI_DMA_TODEVICE);
		dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
				 skb->len, DMA_TO_DEVICE);

		vcc = ATM_SKB(skb)->vcc;
		if (vcc->pop)
@@ -800,8 +801,8 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
	if (skb) {
		TXPRINTK("%s: freeing skb at %p.\n", card->name, skb);

		pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
				 skb->len, PCI_DMA_TODEVICE);
		dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
				 skb->len, DMA_TO_DEVICE);

		vcc = ATM_SKB(skb)->vcc;

@@ -846,8 +847,8 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc,
	tbd = &IDT77252_PRV_TBD(skb);
	vcc = ATM_SKB(skb)->vcc;

	IDT77252_PRV_PADDR(skb) = pci_map_single(card->pcidev, skb->data,
						 skb->len, PCI_DMA_TODEVICE);
	IDT77252_PRV_PADDR(skb) = dma_map_single(&card->pcidev->dev, skb->data,
						 skb->len, DMA_TO_DEVICE);

	error = -EINVAL;

@@ -924,8 +925,8 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc,
	return 0;

errout:
	pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
			 skb->len, PCI_DMA_TODEVICE);
	dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
			 skb->len, DMA_TO_DEVICE);
	return error;
}

@@ -970,8 +971,8 @@ init_rsq(struct idt77252_dev *card)
{
	struct rsq_entry *rsqe;

	card->rsq.base = pci_zalloc_consistent(card->pcidev, RSQSIZE,
					       &card->rsq.paddr);
	card->rsq.base = dma_zalloc_coherent(&card->pcidev->dev, RSQSIZE,
					     &card->rsq.paddr, GFP_KERNEL);
	if (card->rsq.base == NULL) {
		printk("%s: can't allocate RSQ.\n", card->name);
		return -1;
@@ -1001,7 +1002,7 @@ init_rsq(struct idt77252_dev *card)
static void
deinit_rsq(struct idt77252_dev *card)
{
	pci_free_consistent(card->pcidev, RSQSIZE,
	dma_free_coherent(&card->pcidev->dev, RSQSIZE,
			  card->rsq.base, card->rsq.paddr);
}

@@ -1057,9 +1058,9 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)

	vcc = vc->rx_vcc;

	pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(skb),
	dma_sync_single_for_cpu(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
				skb_end_pointer(skb) - skb->data,
				    PCI_DMA_FROMDEVICE);
				DMA_FROM_DEVICE);

	if ((vcc->qos.aal == ATM_AAL0) ||
	    (vcc->qos.aal == ATM_AAL34)) {
@@ -1180,9 +1181,9 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
			return;
		}

		pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
		dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
				 skb_end_pointer(skb) - skb->data,
				 PCI_DMA_FROMDEVICE);
				 DMA_FROM_DEVICE);
		sb_pool_remove(card, skb);

		skb_trim(skb, len);
@@ -1254,9 +1255,9 @@ idt77252_rx_raw(struct idt77252_dev *card)
	head = IDT77252_PRV_PADDR(queue) + (queue->data - queue->head - 16);
	tail = readl(SAR_REG_RAWCT);

	pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue),
	dma_sync_single_for_cpu(&card->pcidev->dev, IDT77252_PRV_PADDR(queue),
				skb_end_offset(queue) - 16,
				    PCI_DMA_FROMDEVICE);
				DMA_FROM_DEVICE);

	while (head != tail) {
		unsigned int vpi, vci;
@@ -1348,11 +1349,11 @@ idt77252_rx_raw(struct idt77252_dev *card)
			if (next) {
				card->raw_cell_head = next;
				queue = card->raw_cell_head;
				pci_dma_sync_single_for_cpu(card->pcidev,
				dma_sync_single_for_cpu(&card->pcidev->dev,
							IDT77252_PRV_PADDR(queue),
							(skb_end_pointer(queue) -
							 queue->data),
							    PCI_DMA_FROMDEVICE);
							DMA_FROM_DEVICE);
			} else {
				card->raw_cell_head = NULL;
				printk("%s: raw cell queue overrun\n",
@@ -1375,8 +1376,8 @@ init_tsq(struct idt77252_dev *card)
{
	struct tsq_entry *tsqe;

	card->tsq.base = pci_alloc_consistent(card->pcidev, RSQSIZE,
					      &card->tsq.paddr);
	card->tsq.base = dma_alloc_coherent(&card->pcidev->dev, RSQSIZE,
					    &card->tsq.paddr, GFP_KERNEL);
	if (card->tsq.base == NULL) {
		printk("%s: can't allocate TSQ.\n", card->name);
		return -1;
@@ -1398,7 +1399,7 @@ init_tsq(struct idt77252_dev *card)
static void
deinit_tsq(struct idt77252_dev *card)
{
	pci_free_consistent(card->pcidev, TSQSIZE,
	dma_free_coherent(&card->pcidev->dev, TSQSIZE,
			  card->tsq.base, card->tsq.paddr);
}

@@ -1861,9 +1862,9 @@ add_rx_skb(struct idt77252_dev *card, int queue,
			goto outfree;
		}

		paddr = pci_map_single(card->pcidev, skb->data,
		paddr = dma_map_single(&card->pcidev->dev, skb->data,
				       skb_end_pointer(skb) - skb->data,
				       PCI_DMA_FROMDEVICE);
				       DMA_FROM_DEVICE);
		IDT77252_PRV_PADDR(skb) = paddr;

		if (push_rx_skb(card, skb, queue)) {
@@ -1875,8 +1876,8 @@ add_rx_skb(struct idt77252_dev *card, int queue,
	return;

outunmap:
	pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
			 skb_end_pointer(skb) - skb->data, PCI_DMA_FROMDEVICE);
	dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
			 skb_end_pointer(skb) - skb->data, DMA_FROM_DEVICE);

	handle = IDT77252_PRV_POOL(skb);
	card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL;
@@ -1892,15 +1893,15 @@ recycle_rx_skb(struct idt77252_dev *card, struct sk_buff *skb)
	u32 handle = IDT77252_PRV_POOL(skb);
	int err;

	pci_dma_sync_single_for_device(card->pcidev, IDT77252_PRV_PADDR(skb),
	dma_sync_single_for_device(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
				   skb_end_pointer(skb) - skb->data,
				       PCI_DMA_FROMDEVICE);
				   DMA_FROM_DEVICE);

	err = push_rx_skb(card, skb, POOL_QUEUE(handle));
	if (err) {
		pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
		dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
				 skb_end_pointer(skb) - skb->data,
				 PCI_DMA_FROMDEVICE);
				 DMA_FROM_DEVICE);
		sb_pool_remove(card, skb);
		dev_kfree_skb(skb);
	}
@@ -3058,11 +3059,11 @@ deinit_card(struct idt77252_dev *card)
		for (j = 0; j < FBQ_SIZE; j++) {
			skb = card->sbpool[i].skb[j];
			if (skb) {
				pci_unmap_single(card->pcidev,
				dma_unmap_single(&card->pcidev->dev,
						 IDT77252_PRV_PADDR(skb),
						 (skb_end_pointer(skb) -
						  skb->data),
						 PCI_DMA_FROMDEVICE);
						 DMA_FROM_DEVICE);
				card->sbpool[i].skb[j] = NULL;
				dev_kfree_skb(skb);
			}
@@ -3076,7 +3077,7 @@ deinit_card(struct idt77252_dev *card)
	vfree(card->vcs);

	if (card->raw_cell_hnd) {
		pci_free_consistent(card->pcidev, 2 * sizeof(u32),
		dma_free_coherent(&card->pcidev->dev, 2 * sizeof(u32),
				  card->raw_cell_hnd, card->raw_cell_paddr);
	}

@@ -3397,9 +3398,10 @@ static int init_card(struct atm_dev *dev)
	writel(0, SAR_REG_GP);

	/* Initialize RAW Cell Handle Register  */
	card->raw_cell_hnd = pci_zalloc_consistent(card->pcidev,
	card->raw_cell_hnd = dma_zalloc_coherent(&card->pcidev->dev,
						 2 * sizeof(u32),
						   &card->raw_cell_paddr);
						 &card->raw_cell_paddr,
						 GFP_KERNEL);
	if (!card->raw_cell_hnd) {
		printk("%s: memory allocation failure.\n", card->name);
		deinit_card(card);
@@ -3611,6 +3613,11 @@ static int idt77252_init_one(struct pci_dev *pcidev,
		return err;
	}

	if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) {
		printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev));
		return err;
	}

	card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL);
	if (!card) {
		printk("idt77252-%d: can't allocate private data\n", index);
Loading