Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c789174b authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman
Browse files
Doug writes:
  "RDMA fixes:

   Final for-rc pull request for 4.19

   We only have one bug to submit this time around.  It fixes a DMA
   unmap issue where we unmapped the DMA address from the IOMMU before
   we did from the card, resulting in a DMAR error with IOMMU enabled,
   or possible crash without."

* tag 'for-gkh' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  IB/mlx5: Unmap DMA addr from HCA before IOMMU
parents eb81bfb2 dd9a4034
Loading
Loading
Loading
Loading
+8 −4
Original line number Original line Diff line number Diff line
@@ -544,6 +544,9 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
	int shrink = 0;
	int shrink = 0;
	int c;
	int c;


	if (!mr->allocated_from_cache)
		return;

	c = order2idx(dev, mr->order);
	c = order2idx(dev, mr->order);
	if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
	if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
		mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
		mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
@@ -1647,18 +1650,19 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
		umem = NULL;
		umem = NULL;
	}
	}
#endif
#endif

	clean_mr(dev, mr);
	clean_mr(dev, mr);


	/*
	 * We should unregister the DMA address from the HCA before
	 * remove the DMA mapping.
	 */
	mlx5_mr_cache_free(dev, mr);
	if (umem) {
	if (umem) {
		ib_umem_release(umem);
		ib_umem_release(umem);
		atomic_sub(npages, &dev->mdev->priv.reg_pages);
		atomic_sub(npages, &dev->mdev->priv.reg_pages);
	}
	}

	if (!mr->allocated_from_cache)
	if (!mr->allocated_from_cache)
		kfree(mr);
		kfree(mr);
	else
		mlx5_mr_cache_free(dev, mr);
}
}


int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
int mlx5_ib_dereg_mr(struct ib_mr *ibmr)