Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 13812621 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

skd: switch to the generic DMA API



The PCI DMA API is deprecated, switch to the generic DMA API instead.
Also make use of the dma_set_mask_and_coherent helper to easily set
the streaming an coherent DMA masks together.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ecb0a83e
Loading
Loading
Loading
Loading
+25 −38
Original line number Diff line number Diff line
@@ -632,7 +632,7 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
	 * Map scatterlist to PCI bus addresses.
	 * Note PCI might change the number of entries.
	 */
	n_sg = pci_map_sg(skdev->pdev, sgl, n_sg, skreq->data_dir);
	n_sg = dma_map_sg(&skdev->pdev->dev, sgl, n_sg, skreq->data_dir);
	if (n_sg <= 0)
		return false;

@@ -682,7 +682,8 @@ static void skd_postop_sg_list(struct skd_device *skdev,
	skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
		skreq->sksg_dma_address +
		((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
	pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir);
	dma_unmap_sg(&skdev->pdev->dev, &skreq->sg[0], skreq->n_sg,
		     skreq->data_dir);
}

/*
@@ -2632,8 +2633,8 @@ static int skd_cons_skcomp(struct skd_device *skdev)
		"comp pci_alloc, total bytes %zd entries %d\n",
		SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);

	skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
				       &skdev->cq_dma_address);
	skcomp = dma_zalloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
				     &skdev->cq_dma_address, GFP_KERNEL);

	if (skcomp == NULL) {
		rc = -ENOMEM;
@@ -2674,10 +2675,10 @@ static int skd_cons_skmsg(struct skd_device *skdev)

		skmsg->id = i + SKD_ID_FIT_MSG;

		skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
		skmsg->msg_buf = dma_alloc_coherent(&skdev->pdev->dev,
						    SKD_N_FITMSG_BYTES,
						      &skmsg->mb_dma_address);

						    &skmsg->mb_dma_address,
						    GFP_KERNEL);
		if (skmsg->msg_buf == NULL) {
			rc = -ENOMEM;
			goto err_out;
@@ -2971,7 +2972,7 @@ static struct skd_device *skd_construct(struct pci_dev *pdev)
static void skd_free_skcomp(struct skd_device *skdev)
{
	if (skdev->skcomp_table)
		pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
		dma_free_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
				  skdev->skcomp_table, skdev->cq_dma_address);

	skdev->skcomp_table = NULL;
@@ -2991,7 +2992,7 @@ static void skd_free_skmsg(struct skd_device *skdev)
		skmsg = &skdev->skmsg_table[i];

		if (skmsg->msg_buf != NULL) {
			pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
			dma_free_coherent(&skdev->pdev->dev, SKD_N_FITMSG_BYTES,
					  skmsg->msg_buf,
					    skmsg->mb_dma_address);
		}
@@ -3172,19 +3173,13 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
	rc = pci_request_regions(pdev, DRV_NAME);
	if (rc)
		goto err_out;
	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (!rc) {
		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
			dev_err(&pdev->dev, "consistent DMA mask error %d\n",
				rc);
		}
	} else {
		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
	if (rc)
		dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
	if (rc) {
		dev_err(&pdev->dev, "DMA mask error %d\n", rc);
		goto err_out_regions;
	}
	}

	if (!skd_major) {
		rc = register_blkdev(0, DRV_NAME);
@@ -3367,21 +3362,13 @@ static int skd_pci_resume(struct pci_dev *pdev)
	rc = pci_request_regions(pdev, DRV_NAME);
	if (rc)
		goto err_out;
	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (!rc) {
		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {

			dev_err(&pdev->dev, "consistent DMA mask error %d\n",
				rc);
		}
	} else {
		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
	if (rc)
		dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
	if (rc) {

		dev_err(&pdev->dev, "DMA mask error %d\n", rc);
		goto err_out_regions;
	}
	}

	pci_set_master(pdev);
	rc = pci_enable_pcie_error_reporting(pdev);