Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8de94ce1 authored by Steve Wise's avatar Steve Wise Committed by Roland Dreier
Browse files

IB/amso1100: Use dma_alloc_coherent() instead of kmalloc/dma_map_single



The Ammasso driver needs to use dma_alloc_coherent() for
allocating memory that will be used by the HW for dma.

Signed-off-by: default avatarSteve Wise <swise@opengridcomputing.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 04d03bc5
Loading
Loading
Loading
Loading
+6 −7
Original line number Diff line number Diff line
@@ -42,13 +42,14 @@ static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
{
	int i;
	struct sp_chunk *new_head;
	dma_addr_t dma_addr;

	new_head = (struct sp_chunk *) __get_free_page(gfp_mask);
	new_head = dma_alloc_coherent(&c2dev->pcidev->dev, PAGE_SIZE,
				      &dma_addr, gfp_mask);
	if (new_head == NULL)
		return -ENOMEM;

	new_head->dma_addr = dma_map_single(c2dev->ibdev.dma_device, new_head,
					    PAGE_SIZE, DMA_FROM_DEVICE);
	new_head->dma_addr = dma_addr;
	pci_unmap_addr_set(new_head, mapping, new_head->dma_addr);

	new_head->next = NULL;
@@ -80,10 +81,8 @@ void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)

	while (root) {
		next = root->next;
		dma_unmap_single(c2dev->ibdev.dma_device,
				 pci_unmap_addr(root, mapping), PAGE_SIZE,
			         DMA_FROM_DEVICE);
		__free_page((struct page *) root);
		dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
				  pci_unmap_addr(root, mapping));
		root = next;
	}
}
+6 −12
Original line number Diff line number Diff line
@@ -246,20 +246,17 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)

static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
{

	dma_unmap_single(c2dev->ibdev.dma_device, pci_unmap_addr(mq, mapping),
			 mq->q_size * mq->msg_size, DMA_FROM_DEVICE);
	free_pages((unsigned long) mq->msg_pool.host,
		   get_order(mq->q_size * mq->msg_size));
	dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
			  mq->msg_pool.host, pci_unmap_addr(mq, mapping));
}

static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
			   int msg_size)
{
	unsigned long pool_start;
	u8 *pool_start;

	pool_start = __get_free_pages(GFP_KERNEL,
				      get_order(q_size * msg_size));
	pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
					&mq->host_dma, GFP_KERNEL);
	if (!pool_start)
		return -ENOMEM;

@@ -267,13 +264,10 @@ static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
		       0,		/* index (currently unknown) */
		       q_size,
		       msg_size,
		       (u8 *) pool_start,
		       pool_start,
		       NULL,	/* peer (currently unknown) */
		       C2_MQ_HOST_TARGET);

	mq->host_dma = dma_map_single(c2dev->ibdev.dma_device,
				      (void *)pool_start,
				      q_size * msg_size, DMA_FROM_DEVICE);
	pci_unmap_addr_set(mq, mapping, mq->host_dma);

	return 0;
+21 −31
Original line number Diff line number Diff line
@@ -517,14 +517,12 @@ int c2_rnic_init(struct c2_dev *c2dev)
	/* Initialize the Verbs Reply Queue */
	qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE));
	msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
	q1_pages = kmalloc(qsize * msgsize, GFP_KERNEL);
	q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
				      &c2dev->rep_vq.host_dma, GFP_KERNEL);
	if (!q1_pages) {
		err = -ENOMEM;
		goto bail1;
	}
	c2dev->rep_vq.host_dma = dma_map_single(c2dev->ibdev.dma_device,
					        (void *)q1_pages, qsize * msgsize,
				      		DMA_FROM_DEVICE);
	pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
	pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages,
		 (unsigned long long) c2dev->rep_vq.host_dma);
@@ -540,14 +538,12 @@ int c2_rnic_init(struct c2_dev *c2dev)
	/* Initialize the Asynchronus Event Queue */
	qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE));
	msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
	q2_pages = kmalloc(qsize * msgsize, GFP_KERNEL);
	q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
				      &c2dev->aeq.host_dma, GFP_KERNEL);
	if (!q2_pages) {
		err = -ENOMEM;
		goto bail2;
	}
	c2dev->aeq.host_dma = dma_map_single(c2dev->ibdev.dma_device,
					        (void *)q2_pages, qsize * msgsize,
				      		DMA_FROM_DEVICE);
	pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
	pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q1_pages,
		 (unsigned long long) c2dev->rep_vq.host_dma);
@@ -597,17 +593,13 @@ int c2_rnic_init(struct c2_dev *c2dev)
      bail4:
	vq_term(c2dev);
      bail3:
	dma_unmap_single(c2dev->ibdev.dma_device,
			 pci_unmap_addr(&c2dev->aeq, mapping),
	dma_free_coherent(&c2dev->pcidev->dev,
			  c2dev->aeq.q_size * c2dev->aeq.msg_size,
		  	 DMA_FROM_DEVICE);
	kfree(q2_pages);
			  q2_pages, pci_unmap_addr(&c2dev->aeq, mapping));
      bail2:
	dma_unmap_single(c2dev->ibdev.dma_device,
			 pci_unmap_addr(&c2dev->rep_vq, mapping),
	dma_free_coherent(&c2dev->pcidev->dev,
			  c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
		  	 DMA_FROM_DEVICE);
	kfree(q1_pages);
			  q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping));
      bail1:
	c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
      bail0:
@@ -640,19 +632,17 @@ void c2_rnic_term(struct c2_dev *c2dev)
	/* Free the verbs request allocator */
	vq_term(c2dev);

	/* Unmap and free the asynchronus event queue */
	dma_unmap_single(c2dev->ibdev.dma_device,
			 pci_unmap_addr(&c2dev->aeq, mapping),
	/* Free the asynchronus event queue */
	dma_free_coherent(&c2dev->pcidev->dev,
			  c2dev->aeq.q_size * c2dev->aeq.msg_size,
		  	 DMA_FROM_DEVICE);
	kfree(c2dev->aeq.msg_pool.host);
			  c2dev->aeq.msg_pool.host,
			  pci_unmap_addr(&c2dev->aeq, mapping));

	/* Unmap and free the verbs reply queue */
	dma_unmap_single(c2dev->ibdev.dma_device,
			 pci_unmap_addr(&c2dev->rep_vq, mapping),
	/* Free the verbs reply queue */
	dma_free_coherent(&c2dev->pcidev->dev,
			  c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
		  	 DMA_FROM_DEVICE);
	kfree(c2dev->rep_vq.msg_pool.host);
			  c2dev->rep_vq.msg_pool.host,
			  pci_unmap_addr(&c2dev->rep_vq, mapping));

	/* Free the MQ shared pointer pool */
	c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);