Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2b6b7d4b authored by Jack Morgenstein's avatar Jack Morgenstein Committed by Roland Dreier
Browse files

IB/mlx4: Don't overwrite fast registration page list when posting work request

The low-level mlx4 driver modified the page-list addresses for fast
register work requests post send to big-endian, and set a "present"
bit.  This caused problems later when the consumer attempted to unmap
the pages using the page-list (using the list addresses which were
assumed to be still in CPU-endian order).  Fix the mlx4 driver to
allocate two buffers and use a private buffer for the hardware-format
bus addresses.

This patch fixes <https://bugs.openfabrics.org/show_bug.cgi?id=1571

>,
an NFS/RDMA server crash.  The cause of the crash was found by Vu Pham
of Mellanox.  The fix is along the lines suggested by Steve Wise in
comment #21 in bug 1571.

Signed-off-by: default avatarJack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 56a50add
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -86,6 +86,7 @@ struct mlx4_ib_mr {


struct mlx4_ib_fast_reg_page_list {
struct mlx4_ib_fast_reg_page_list {
	struct ib_fast_reg_page_list	ibfrpl;
	struct ib_fast_reg_page_list	ibfrpl;
	__be64			       *mapped_page_list;
	dma_addr_t			map;
	dma_addr_t			map;
};
};


+8 −2
Original line number Original line Diff line number Diff line
@@ -231,7 +231,11 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device
	if (!mfrpl)
	if (!mfrpl)
		return ERR_PTR(-ENOMEM);
		return ERR_PTR(-ENOMEM);


	mfrpl->ibfrpl.page_list = dma_alloc_coherent(&dev->dev->pdev->dev,
	mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
	if (!mfrpl->ibfrpl.page_list)
		goto err_free;

	mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev,
						     size, &mfrpl->map,
						     size, &mfrpl->map,
						     GFP_KERNEL);
						     GFP_KERNEL);
	if (!mfrpl->ibfrpl.page_list)
	if (!mfrpl->ibfrpl.page_list)
@@ -242,6 +246,7 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device
	return &mfrpl->ibfrpl;
	return &mfrpl->ibfrpl;


err_free:
err_free:
	kfree(mfrpl->ibfrpl.page_list);
	kfree(mfrpl);
	kfree(mfrpl);
	return ERR_PTR(-ENOMEM);
	return ERR_PTR(-ENOMEM);
}
}
@@ -252,8 +257,9 @@ void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
	struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
	struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
	int size = page_list->max_page_list_len * sizeof (u64);
	int size = page_list->max_page_list_len * sizeof (u64);


	dma_free_coherent(&dev->dev->pdev->dev, size, page_list->page_list,
	dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list,
			  mfrpl->map);
			  mfrpl->map);
	kfree(mfrpl->ibfrpl.page_list);
	kfree(mfrpl);
	kfree(mfrpl);
}
}


+1 −1
Original line number Original line Diff line number Diff line
@@ -1365,7 +1365,7 @@ static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr)
	int i;
	int i;


	for (i = 0; i < wr->wr.fast_reg.page_list_len; ++i)
	for (i = 0; i < wr->wr.fast_reg.page_list_len; ++i)
		wr->wr.fast_reg.page_list->page_list[i] =
		mfrpl->mapped_page_list[i] =
			cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] |
			cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] |
				    MLX4_MTT_FLAG_PRESENT);
				    MLX4_MTT_FLAG_PRESENT);