Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cbc9355a authored by Chuck Lever's avatar Chuck Lever Committed by Doug Ledford
Browse files

IB/mlx4: Prevent cross page boundary allocation



Prevent cross page boundary allocation by allocating
new page, this is required to be aligned with ConnectX-3 HW
requirements.

Not doing that might cause to "RDMA read local protection" error.

Fixes: 1b2cd0fc ('IB/mlx4: Support the new memory registration API')
Suggested-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarYishai Hadas <yishaih@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 5b420d9c
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -139,7 +139,7 @@ struct mlx4_ib_mr {
	u32			max_pages;
	struct mlx4_mr		mmr;
	struct ib_umem	       *umem;
	void			*pages_alloc;
	size_t			page_map_size;
};

struct mlx4_ib_mw {
+17 −17
Original line number Diff line number Diff line
@@ -277,20 +277,23 @@ mlx4_alloc_priv_pages(struct ib_device *device,
		      struct mlx4_ib_mr *mr,
		      int max_pages)
{
	int size = max_pages * sizeof(u64);
	int add_size;
	int ret;

	add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
	/* Ensure that size is aligned to DMA cacheline
	 * requirements.
	 * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
	 * so page_map_size will never cross PAGE_SIZE.
	 */
	mr->page_map_size = roundup(max_pages * sizeof(u64),
				    MLX4_MR_PAGES_ALIGN);

	mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL);
	if (!mr->pages_alloc)
	/* Prevent cross page boundary allocation. */
	mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
	if (!mr->pages)
		return -ENOMEM;

	mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN);

	mr->page_map = dma_map_single(device->dma_device, mr->pages,
				      size, DMA_TO_DEVICE);
				      mr->page_map_size, DMA_TO_DEVICE);

	if (dma_mapping_error(device->dma_device, mr->page_map)) {
		ret = -ENOMEM;
@@ -298,9 +301,9 @@ mlx4_alloc_priv_pages(struct ib_device *device,
	}

	return 0;
err:
	kfree(mr->pages_alloc);

err:
	free_page((unsigned long)mr->pages);
	return ret;
}

@@ -309,11 +312,10 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
{
	if (mr->pages) {
		struct ib_device *device = mr->ibmr.device;
		int size = mr->max_pages * sizeof(u64);

		dma_unmap_single(device->dma_device, mr->page_map,
				 size, DMA_TO_DEVICE);
		kfree(mr->pages_alloc);
				 mr->page_map_size, DMA_TO_DEVICE);
		free_page((unsigned long)mr->pages);
		mr->pages = NULL;
	}
}
@@ -537,14 +539,12 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
	mr->npages = 0;

	ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
				   sizeof(u64) * mr->max_pages,
				   DMA_TO_DEVICE);
				   mr->page_map_size, DMA_TO_DEVICE);

	rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);

	ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
				      sizeof(u64) * mr->max_pages,
				      DMA_TO_DEVICE);
				      mr->page_map_size, DMA_TO_DEVICE);

	return rc;
}