Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2bbd57ca authored by Bartlomiej Zolnierkiewicz's avatar Bartlomiej Zolnierkiewicz
Browse files

ide: switch to DMA-mapping API part #2



Follow-up to commit 5c05ff68
("ide: switch to DMA-mapping API"):

* pci_{alloc,free}_consistent() -> dma_{alloc,free}_coherent()
  in ide_{allocate,release}_dma_engine().

* Add ->prd_max_nents and ->prd_ent_size fields to ide_hwif_t
  (+ set default values in ide_allocate_dma_engine()).

* Make ide_{allocate,release}_dma_engine() available also
  for CONFIG_BLK_DEV_IDEDMA_SFF=n.  Then convert au1xxx-ide.c,
  scc_pata.c and sgiioc4.c to use them.

* Add missing ->init_dma method to scc_pata.

This patch also fixes:
- ->dmatable_cpu leak for au1xxx-ide
- too early realease of ->dmatable_cpu for scc_pata
- wrong amount of ->dmatable_cpu memory being freed for sgiioc4

While at it:
- remove superfluous ->dma_base check from ide_unregister()
- return -ENOMEM on error in ide_release_dma_engine()
- beautify error message in ide_release_dma_engine()

Signed-off-by: default avatarBartlomiej Zolnierkiewicz <bzolnier@gmail.com>
parent ffa15a69
Loading
Loading
Loading
Loading
+19 −12
Original line number Diff line number Diff line
@@ -844,36 +844,43 @@ void ide_dma_timeout(ide_drive_t *drive)
}
EXPORT_SYMBOL_GPL(ide_dma_timeout);

#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
void ide_release_dma_engine(ide_hwif_t *hwif)
{
	if (hwif->dmatable_cpu) {
		struct pci_dev *pdev = to_pci_dev(hwif->dev);
		int prd_size = hwif->prd_max_nents * hwif->prd_ent_size;

		pci_free_consistent(pdev, PRD_ENTRIES * PRD_BYTES,
		dma_free_coherent(hwif->dev, prd_size,
				  hwif->dmatable_cpu, hwif->dmatable_dma);
		hwif->dmatable_cpu = NULL;
	}
}
EXPORT_SYMBOL_GPL(ide_release_dma_engine);

int ide_allocate_dma_engine(ide_hwif_t *hwif)
{
	struct pci_dev *pdev = to_pci_dev(hwif->dev);
	int prd_size;

	hwif->dmatable_cpu = pci_alloc_consistent(pdev,
						  PRD_ENTRIES * PRD_BYTES,
						  &hwif->dmatable_dma);
	if (hwif->prd_max_nents == 0)
		hwif->prd_max_nents = PRD_ENTRIES;
	if (hwif->prd_ent_size == 0)
		hwif->prd_ent_size = PRD_BYTES;

	if (hwif->dmatable_cpu)
		return 0;
	prd_size = hwif->prd_max_nents * hwif->prd_ent_size;

	printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n",
	hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, prd_size,
						&hwif->dmatable_dma,
						GFP_ATOMIC);
	if (hwif->dmatable_cpu == NULL) {
		printk(KERN_ERR "%s: unable to allocate PRD table\n",
			hwif->name);
		return -ENOMEM;
	}

	return 1;
	return 0;
}
EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);

#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
const struct ide_dma_ops sff_dma_ops = {
	.dma_host_set		= ide_dma_host_set,
	.dma_setup		= ide_dma_setup,
+1 −2
Original line number Diff line number Diff line
@@ -227,7 +227,6 @@ void ide_unregister(ide_hwif_t *hwif)
	kfree(hwif->sg_table);
	unregister_blkdev(hwif->major, hwif->name);

	if (hwif->dma_base)
	ide_release_dma_engine(hwif);

	mutex_unlock(&ide_cfg_mtx);
+3 −4
Original line number Diff line number Diff line
@@ -428,9 +428,8 @@ static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
	auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
							     NUM_DESCRIPTORS);

	hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev,
						PRD_ENTRIES * PRD_BYTES,        /* 1 Page */
						&hwif->dmatable_dma, GFP_KERNEL);
	/* FIXME: check return value */
	(void)ide_allocate_dma_engine(hwif);
	
	au1xxx_dbdma_start( auide->tx_chan );
	au1xxx_dbdma_start( auide->rx_chan );
+7 −7
Original line number Diff line number Diff line
@@ -821,6 +821,12 @@ static void __devinit init_iops_scc(ide_hwif_t *hwif)
	init_mmio_iops_scc(hwif);
}

static int __devinit scc_init_dma(ide_hwif_t *hwif,
				  const struct ide_port_info *d)
{
	return ide_allocate_dma_engine(hwif);
}

static u8 scc_cable_detect(ide_hwif_t *hwif)
{
	return ATA_CBL_PATA80;
@@ -885,6 +891,7 @@ static const struct ide_dma_ops scc_dma_ops = {
  {							\
      .name		= name_str,			\
      .init_iops	= init_iops_scc,		\
      .init_dma		= scc_init_dma,			\
      .init_hwif	= init_hwif_scc,		\
      .tp_ops		= &scc_tp_ops,		\
      .port_ops		= &scc_port_ops,		\
@@ -922,13 +929,6 @@ static void __devexit scc_remove(struct pci_dev *dev)
{
	struct scc_ports *ports = pci_get_drvdata(dev);
	struct ide_host *host = ports->host;
	ide_hwif_t *hwif = host->ports[0];

	if (hwif->dmatable_cpu) {
		pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES,
				    hwif->dmatable_cpu, hwif->dmatable_dma);
		hwif->dmatable_cpu = NULL;
	}

	ide_host_remove(host);

+7 −8
Original line number Diff line number Diff line
@@ -357,14 +357,13 @@ ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d)
	}
	hwif->dma_base = (unsigned long) virt_dma_base;

	hwif->dmatable_cpu = pci_alloc_consistent(dev,
					  IOC4_PRD_ENTRIES * IOC4_PRD_BYTES,
					  &hwif->dmatable_dma);
	hwif->sg_max_nents = IOC4_PRD_ENTRIES;

	if (!hwif->dmatable_cpu)
		goto dma_pci_alloc_failure;
	hwif->prd_max_nents = IOC4_PRD_ENTRIES;
	hwif->prd_ent_size = IOC4_PRD_BYTES;

	hwif->sg_max_nents = IOC4_PRD_ENTRIES;
	if (ide_allocate_dma_engine(hwif))
		goto dma_pci_alloc_failure;

	pad = pci_alloc_consistent(dev, IOC4_IDE_CACHELINE_SIZE,
				   (dma_addr_t *)&hwif->extra_base);
@@ -373,8 +372,8 @@ ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d)
		return 0;
	}

	pci_free_consistent(dev, IOC4_PRD_ENTRIES * IOC4_PRD_BYTES,
			    hwif->dmatable_cpu, hwif->dmatable_dma);
	ide_release_dma_engine(hwif);

	printk(KERN_ERR "%s(%s) -- ERROR: Unable to allocate DMA maps\n",
	       __func__, hwif->name);
	printk(KERN_INFO "%s: changing from DMA to PIO mode", hwif->name);
Loading