Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c282da41 authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Doug Ledford
Browse files

RDMA/mlx4: Use rdma_user_mmap_io



Rely on the new core code helper to map BAR memory from the driver.

Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 5f9794dc
Loading
Loading
Loading
Loading
+24 −118
Original line number Diff line number Diff line
@@ -1138,144 +1138,50 @@ static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
	return 0;
}

static void  mlx4_ib_vma_open(struct vm_area_struct *area)
{
	/* vma_open is called when a new VMA is created on top of our VMA.
	 * This is done through either mremap flow or split_vma (usually due
	 * to mlock, madvise, munmap, etc.). We do not support a clone of the
	 * vma, as this VMA is strongly hardware related. Therefore we set the
	 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
	 * calling us again and trying to do incorrect actions. We assume that
	 * the original vma size is exactly a single page that there will be no
	 * "splitting" operations on.
	 */
	area->vm_ops = NULL;
}

static void  mlx4_ib_vma_close(struct vm_area_struct *area)
{
	struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;

	/* It's guaranteed that all VMAs opened on a FD are closed before the
	 * file itself is closed, therefore no sync is needed with the regular
	 * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
	 * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
	 * The close operation is usually called under mm->mmap_sem except when
	 * process is exiting.  The exiting case is handled explicitly as part
	 * of mlx4_ib_disassociate_ucontext.
	 */
	mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
				area->vm_private_data;

	/* set the vma context pointer to null in the mlx4_ib driver's private
	 * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
	 */
	mlx4_ib_vma_priv_data->vma = NULL;
}

static const struct vm_operations_struct mlx4_ib_vm_ops = {
	.open = mlx4_ib_vma_open,
	.close = mlx4_ib_vma_close
};

static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
	int i;
	struct vm_area_struct *vma;
	struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);

	/* need to protect from a race on closing the vma as part of
	 * mlx4_ib_vma_close().
	 */
	for (i = 0; i < HW_BAR_COUNT; i++) {
		vma = context->hw_bar_info[i].vma;
		if (!vma)
			continue;

		zap_vma_ptes(context->hw_bar_info[i].vma,
			     context->hw_bar_info[i].vma->vm_start, PAGE_SIZE);

		context->hw_bar_info[i].vma->vm_flags &=
			~(VM_SHARED | VM_MAYSHARE);
		/* context going to be destroyed, should not access ops any more */
		context->hw_bar_info[i].vma->vm_ops = NULL;
	}
}

static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
				 struct mlx4_ib_vma_private_data *vma_private_data)
{
	vma_private_data->vma = vma;
	vma->vm_private_data = vma_private_data;
	vma->vm_ops =  &mlx4_ib_vm_ops;
}

static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
	struct mlx4_ib_dev *dev = to_mdev(context->device);
	struct mlx4_ib_ucontext *mucontext = to_mucontext(context);

	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
		return -EINVAL;

	if (vma->vm_pgoff == 0) {
		/* We prevent double mmaping on same context */
		if (mucontext->hw_bar_info[HW_BAR_DB].vma)
			return -EINVAL;

		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

		if (io_remap_pfn_range(vma, vma->vm_start,
	switch (vma->vm_pgoff) {
	case 0:
		return rdma_user_mmap_io(context, vma,
					 to_mucontext(context)->uar.pfn,
				       PAGE_SIZE, vma->vm_page_prot))
			return -EAGAIN;

		mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);
					 PAGE_SIZE,
					 pgprot_noncached(vma->vm_page_prot));

	} else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
		/* We prevent double mmaping on same context */
		if (mucontext->hw_bar_info[HW_BAR_BF].vma)
	case 1:
		if (dev->dev->caps.bf_reg_size == 0)
			return -EINVAL;

		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);

		if (io_remap_pfn_range(vma, vma->vm_start,
		return rdma_user_mmap_io(
			context, vma,
			to_mucontext(context)->uar.pfn +
				dev->dev->caps.num_uars,
				       PAGE_SIZE, vma->vm_page_prot))
			return -EAGAIN;

		mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
			PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot));

	} else if (vma->vm_pgoff == 3) {
	case 3: {
		struct mlx4_clock_params params;
		int ret;

		/* We prevent double mmaping on same context */
		if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
			return -EINVAL;

		ret = mlx4_get_internal_clock_params(dev->dev, &params);

		if (ret)
			return ret;

		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
		if (io_remap_pfn_range(vma, vma->vm_start,
		return rdma_user_mmap_io(
			context, vma,
			(pci_resource_start(dev->dev->persist->pdev,
					    params.bar) +
					params.offset)
				       >> PAGE_SHIFT,
				       PAGE_SIZE, vma->vm_page_prot))
			return -EAGAIN;
			 params.offset) >>
				PAGE_SHIFT,
			PAGE_SIZE, pgprot_noncached(vma->vm_page_prot));
	}

		mlx4_ib_set_vma_data(vma,
				     &mucontext->hw_bar_info[HW_BAR_CLOCK]);
	} else {
	default:
		return -EINVAL;
	}

	return 0;
}

static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
+0 −5
Original line number Diff line number Diff line
@@ -80,16 +80,11 @@ enum hw_bar_type {
	HW_BAR_COUNT
};

struct mlx4_ib_vma_private_data {
	struct vm_area_struct *vma;
};

struct mlx4_ib_ucontext {
	struct ib_ucontext	ibucontext;
	struct mlx4_uar		uar;
	struct list_head	db_page_list;
	struct mutex		db_page_mutex;
	struct mlx4_ib_vma_private_data hw_bar_info[HW_BAR_COUNT];
	struct list_head	wqn_ranges_list;
	struct mutex		wqn_ranges_mutex; /* protect wqn_ranges_list */
};