Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d73341bf authored by Anton Blanchard's avatar Anton Blanchard Committed by Christoph Hellwig
Browse files

ipr: convert to generic DMA API



Even though the ipr driver is only used on PCI, convert it
to use the generic DMA API.

Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarBrian King <brking@linux.vnet.ibm.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 6932fc67
Loading
Loading
Loading
Loading
+52 −49
Original line number Diff line number Diff line
@@ -3942,8 +3942,9 @@ static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
		return -EIO;
	}

	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
					sglist->num_sg, DMA_TO_DEVICE);
	sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
					sglist->scatterlist, sglist->num_sg,
					DMA_TO_DEVICE);

	if (!sglist->num_dma_sg) {
		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -5571,7 +5572,7 @@ static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
	nseg = scsi_dma_map(scsi_cmd);
	if (nseg < 0) {
		if (printk_ratelimit())
			dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
			dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
		return -1;
	}

@@ -5622,7 +5623,7 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,

	nseg = scsi_dma_map(scsi_cmd);
	if (nseg < 0) {
		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
		dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
		return -1;
	}

@@ -8392,7 +8393,7 @@ static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;

	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
	dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
		     sglist->num_sg, DMA_TO_DEVICE);

	ipr_cmd->job_step = ipr_reset_alert;
@@ -8832,7 +8833,7 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)

	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
		if (ioa_cfg->ipr_cmnd_list[i])
			pci_pool_free(ioa_cfg->ipr_cmd_pool,
			dma_pool_free(ioa_cfg->ipr_cmd_pool,
				      ioa_cfg->ipr_cmnd_list[i],
				      ioa_cfg->ipr_cmnd_list_dma[i]);

@@ -8840,7 +8841,7 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
	}

	if (ioa_cfg->ipr_cmd_pool)
		pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
		dma_pool_destroy(ioa_cfg->ipr_cmd_pool);

	kfree(ioa_cfg->ipr_cmnd_list);
	kfree(ioa_cfg->ipr_cmnd_list_dma);
@@ -8861,22 +8862,21 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
	int i;

	kfree(ioa_cfg->res_entries);
	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
	dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
			  ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
	ipr_free_cmd_blks(ioa_cfg);

	for (i = 0; i < ioa_cfg->hrrq_num; i++)
		pci_free_consistent(ioa_cfg->pdev,
		dma_free_coherent(&ioa_cfg->pdev->dev,
				  sizeof(u32) * ioa_cfg->hrrq[i].size,
				  ioa_cfg->hrrq[i].host_rrq,
				  ioa_cfg->hrrq[i].host_rrq_dma);

	pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
			    ioa_cfg->u.cfg_table,
			    ioa_cfg->cfg_table_dma);
	dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
			  ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);

	for (i = 0; i < IPR_NUM_HCAMS; i++) {
		pci_free_consistent(ioa_cfg->pdev,
		dma_free_coherent(&ioa_cfg->pdev->dev,
				  sizeof(struct ipr_hostrcb),
				  ioa_cfg->hostrcb[i],
				  ioa_cfg->hostrcb_dma[i]);
@@ -8940,7 +8940,7 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
	dma_addr_t dma_addr;
	int i, entries_each_hrrq, hrrq_id = 0;

	ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
	ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
						sizeof(struct ipr_cmnd), 512, 0);

	if (!ioa_cfg->ipr_cmd_pool)
@@ -8990,7 +8990,7 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
	}

	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
		ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
		ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);

		if (!ipr_cmd) {
			ipr_free_cmd_blks(ioa_cfg);
@@ -9061,9 +9061,10 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
		ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
	}

	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
	ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
					      sizeof(struct ipr_misc_cbs),
						&ioa_cfg->vpd_cbs_dma);
					      &ioa_cfg->vpd_cbs_dma,
					      GFP_KERNEL);

	if (!ioa_cfg->vpd_cbs)
		goto out_free_res_entries;
@@ -9072,13 +9073,14 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
		goto out_free_vpd_cbs;

	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
		ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
		ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
					sizeof(u32) * ioa_cfg->hrrq[i].size,
					&ioa_cfg->hrrq[i].host_rrq_dma);
					&ioa_cfg->hrrq[i].host_rrq_dma,
					GFP_KERNEL);

		if (!ioa_cfg->hrrq[i].host_rrq)  {
			while (--i > 0)
				pci_free_consistent(pdev,
				dma_free_coherent(&pdev->dev,
					sizeof(u32) * ioa_cfg->hrrq[i].size,
					ioa_cfg->hrrq[i].host_rrq,
					ioa_cfg->hrrq[i].host_rrq_dma);
@@ -9087,17 +9089,19 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
		ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
	}

	ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
	ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
						  ioa_cfg->cfg_table_size,
						    &ioa_cfg->cfg_table_dma);
						  &ioa_cfg->cfg_table_dma,
						  GFP_KERNEL);

	if (!ioa_cfg->u.cfg_table)
		goto out_free_host_rrq;

	for (i = 0; i < IPR_NUM_HCAMS; i++) {
		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
		ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
							 sizeof(struct ipr_hostrcb),
							   &ioa_cfg->hostrcb_dma[i]);
							 &ioa_cfg->hostrcb_dma[i],
							 GFP_KERNEL);

		if (!ioa_cfg->hostrcb[i])
			goto out_free_hostrcb_dma;
@@ -9121,16 +9125,15 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)

out_free_hostrcb_dma:
	while (i-- > 0) {
		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
		dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
				  ioa_cfg->hostrcb[i],
				  ioa_cfg->hostrcb_dma[i]);
	}
	pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
			    ioa_cfg->u.cfg_table,
			    ioa_cfg->cfg_table_dma);
	dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
			  ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
out_free_host_rrq:
	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
		pci_free_consistent(pdev,
		dma_free_coherent(&pdev->dev,
				  sizeof(u32) * ioa_cfg->hrrq[i].size,
				  ioa_cfg->hrrq[i].host_rrq,
				  ioa_cfg->hrrq[i].host_rrq_dma);
@@ -9138,7 +9141,7 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
out_ipr_free_cmd_blocks:
	ipr_free_cmd_blks(ioa_cfg);
out_free_vpd_cbs:
	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
	dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
			  ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
out_free_res_entries:
	kfree(ioa_cfg->res_entries);
@@ -9579,13 +9582,13 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
	ipr_init_regs(ioa_cfg);

	if (ioa_cfg->sis64) {
		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
		rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
		if (rc < 0) {
			dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
			rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
			rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
		}
	} else
		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));

	if (rc < 0) {
		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
+1 −1
Original line number Diff line number Diff line
@@ -1549,7 +1549,7 @@ struct ipr_ioa_cfg {
	struct ipr_misc_cbs *vpd_cbs;
	dma_addr_t vpd_cbs_dma;

	struct pci_pool *ipr_cmd_pool;
	struct dma_pool *ipr_cmd_pool;

	struct ipr_cmnd *reset_cmd;
	int (*reset) (struct ipr_cmnd *);