Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f993de88 authored by Jason Gunthorpe's avatar Jason Gunthorpe
Browse files

RDMA/odp: Iterate over the whole rbtree directly

Instead of intersecting a full interval, just iterate over every element
directly. This is faster and clearer.

Link: https://lore.kernel.org/r/20190819111710.18440-3-leon@kernel.org


Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 7cc2e18f
Loading
Loading
Loading
Loading
+21 −19
Original line number Diff line number Diff line
@@ -72,31 +72,34 @@ static void ib_umem_notifier_end_account(struct ib_umem_odp *umem_odp)
	mutex_unlock(&umem_odp->umem_mutex);
}

static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
					       u64 start, u64 end, void *cookie)
static void ib_umem_notifier_release(struct mmu_notifier *mn,
				     struct mm_struct *mm)
{
	struct ib_ucontext_per_mm *per_mm =
		container_of(mn, struct ib_ucontext_per_mm, mn);
	struct rb_node *node;

	down_read(&per_mm->umem_rwsem);
	if (!per_mm->active)
		goto out;

	for (node = rb_first_cached(&per_mm->umem_tree); node;
	     node = rb_next(node)) {
		struct ib_umem_odp *umem_odp =
			rb_entry(node, struct ib_umem_odp, interval_tree.rb);

		/*
	 * Increase the number of notifiers running, to
	 * prevent any further fault handling on this MR.
		 * Increase the number of notifiers running, to prevent any
		 * further fault handling on this MR.
		 */
		ib_umem_notifier_start_account(umem_odp);
		complete_all(&umem_odp->notifier_completion);
		umem_odp->umem.context->invalidate_range(
		umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp));
	return 0;
			umem_odp, ib_umem_start(umem_odp),
			ib_umem_end(umem_odp));
	}

static void ib_umem_notifier_release(struct mmu_notifier *mn,
				     struct mm_struct *mm)
{
	struct ib_ucontext_per_mm *per_mm =
		container_of(mn, struct ib_ucontext_per_mm, mn);

	down_read(&per_mm->umem_rwsem);
	if (per_mm->active)
		rbt_ib_umem_for_each_in_range(
			&per_mm->umem_tree, 0, ULLONG_MAX,
			ib_umem_notifier_release_trampoline, true, NULL);
out:
	up_read(&per_mm->umem_rwsem);
}

@@ -756,4 +759,3 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,

	return ret_val;
}
EXPORT_SYMBOL(rbt_ib_umem_for_each_in_range);
+19 −22
Original line number Diff line number Diff line
@@ -539,34 +539,31 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
	return imr;
}

static int mr_leaf_free(struct ib_umem_odp *umem_odp, u64 start, u64 end,
			void *cookie)
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
{
	struct mlx5_ib_mr *mr = umem_odp->private, *imr = cookie;
	struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr);
	struct rb_node *node;

	down_read(&per_mm->umem_rwsem);
	for (node = rb_first_cached(&per_mm->umem_tree); node;
	     node = rb_next(node)) {
		struct ib_umem_odp *umem_odp =
			rb_entry(node, struct ib_umem_odp, interval_tree.rb);
		struct mlx5_ib_mr *mr = umem_odp->private;

		if (mr->parent != imr)
		return 0;
			continue;

		ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
					    ib_umem_end(umem_odp));

		if (umem_odp->dying)
		return 0;
			continue;

		WRITE_ONCE(umem_odp->dying, 1);
		atomic_inc(&imr->num_leaf_free);
		schedule_work(&umem_odp->work);

	return 0;
	}

void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
{
	struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr);

	down_read(&per_mm->umem_rwsem);
	rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, 0, ULLONG_MAX,
				      mr_leaf_free, true, imr);
	up_read(&per_mm->umem_rwsem);

	wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));