Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 64ffaa21 authored by Amir Vadai's avatar Amir Vadai Committed by David S. Miller
Browse files

net/mlx5_core,mlx5_ib: Do not use vmap() on coherent memory

As David Daney pointed in mlx4_core driver [1], mlx5_core is also
misusing the DMA-API.

This patch is removing the code that vmap() memory allocated by
dma_alloc_coherent().

After this patch, users of this drivers might fail allocating resources
on memory fragmeneted systems.  This will be fixed later on.

[1] - https://patchwork.ozlabs.org/patch/458531/



CC: David Daney <david.daney@cavium.com>
Signed-off-by: default avatarAmir Vadai <amirv@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8ed9b5e1
Loading
Loading
Loading
Loading
+1 −2
Original line number Original line Diff line number Diff line
@@ -590,8 +590,7 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
{
{
	int err;
	int err;


	err = mlx5_buf_alloc(dev->mdev, nent * cqe_size,
	err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, &buf->buf);
			     PAGE_SIZE * 2, &buf->buf);
	if (err)
	if (err)
		return err;
		return err;


+1 −1
Original line number Original line Diff line number Diff line
@@ -768,7 +768,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
	qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
	qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
	qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
	qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);


	err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
	err = mlx5_buf_alloc(dev->mdev, qp->buf_size, &qp->buf);
	if (err) {
	if (err) {
		mlx5_ib_dbg(dev, "err %d\n", err);
		mlx5_ib_dbg(dev, "err %d\n", err);
		goto err_uuar;
		goto err_uuar;
+1 −1
Original line number Original line Diff line number Diff line
@@ -165,7 +165,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
		return err;
		return err;
	}
	}


	if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
	if (mlx5_buf_alloc(dev->mdev, buf_size, &srq->buf)) {
		mlx5_ib_dbg(dev, "buf alloc failed\n");
		mlx5_ib_dbg(dev, "buf alloc failed\n");
		err = -ENOMEM;
		err = -ENOMEM;
		goto err_db;
		goto err_db;
+17 −79
Original line number Original line Diff line number Diff line
@@ -42,19 +42,14 @@
#include "mlx5_core.h"
#include "mlx5_core.h"


/* Handling for queue buffers -- we allocate a bunch of memory and
/* Handling for queue buffers -- we allocate a bunch of memory and
 * register it in a memory region at HCA virtual address 0.  If the
 * register it in a memory region at HCA virtual address 0.
 * requested size is > max_direct, we split the allocation into
 * multiple pages, so we don't require too much contiguous memory.
 */
 */


int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
		   struct mlx5_buf *buf)
{
{
	dma_addr_t t;
	dma_addr_t t;


	buf->size = size;
	buf->size = size;
	if (size <= max_direct) {
		buf->nbufs        = 1;
	buf->npages       = 1;
	buf->npages       = 1;
	buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
	buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
	buf->direct.buf   = dma_zalloc_coherent(&dev->pdev->dev,
	buf->direct.buf   = dma_zalloc_coherent(&dev->pdev->dev,
@@ -68,69 +63,15 @@ int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
		--buf->page_shift;
		--buf->page_shift;
		buf->npages *= 2;
		buf->npages *= 2;
	}
	}
	} else {
		int i;

		buf->direct.buf  = NULL;
		buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
		buf->npages      = buf->nbufs;
		buf->page_shift  = PAGE_SHIFT;
		buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list),
					   GFP_KERNEL);
		if (!buf->page_list)
			return -ENOMEM;

		for (i = 0; i < buf->nbufs; i++) {
			buf->page_list[i].buf =
				dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE,
						    &t, GFP_KERNEL);
			if (!buf->page_list[i].buf)
				goto err_free;

			buf->page_list[i].map = t;
		}

		if (BITS_PER_LONG == 64) {
			struct page **pages;
			pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL);
			if (!pages)
				goto err_free;
			for (i = 0; i < buf->nbufs; i++)
				pages[i] = virt_to_page(buf->page_list[i].buf);
			buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
			kfree(pages);
			if (!buf->direct.buf)
				goto err_free;
		}
	}


	return 0;
	return 0;

err_free:
	mlx5_buf_free(dev, buf);

	return -ENOMEM;
}
}
EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
EXPORT_SYMBOL_GPL(mlx5_buf_alloc);


void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
{
{
	int i;

	if (buf->nbufs == 1)
	dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
	dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
			  buf->direct.map);
			  buf->direct.map);
	else {
		if (BITS_PER_LONG == 64)
			vunmap(buf->direct.buf);

		for (i = 0; i < buf->nbufs; i++)
			if (buf->page_list[i].buf)
				dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
						  buf->page_list[i].buf,
						  buf->page_list[i].map);
		kfree(buf->page_list);
	}
}
}
EXPORT_SYMBOL_GPL(mlx5_buf_free);
EXPORT_SYMBOL_GPL(mlx5_buf_free);


@@ -230,10 +171,7 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
	int i;
	int i;


	for (i = 0; i < buf->npages; i++) {
	for (i = 0; i < buf->npages; i++) {
		if (buf->nbufs == 1)
		addr = buf->direct.map + (i << buf->page_shift);
		addr = buf->direct.map + (i << buf->page_shift);
		else
			addr = buf->page_list[i].map;


		pas[i] = cpu_to_be64(addr);
		pas[i] = cpu_to_be64(addr);
	}
	}
+1 −2
Original line number Original line Diff line number Diff line
@@ -346,8 +346,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
	int inlen;
	int inlen;


	eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
	eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
	err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
	err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
			     &eq->buf);
	if (err)
	if (err)
		return err;
		return err;


Loading