Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ae20f12d authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: add invalidate_page callback for userptrs



Otherwise we can run into problems with the writeback code.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 0d2b42b0
Loading
Loading
Loading
Loading
+72 −26
Original line number Diff line number Diff line
@@ -106,42 +106,23 @@ static void amdgpu_mn_release(struct mmu_notifier *mn,
}

/**
 * amdgpu_mn_invalidate_range_start - callback to notify about mm change
 * amdgpu_mn_invalidate_node - unmap all BOs of a node
 *
 * @mn: our notifier
 * @mn: the mm this callback is about
 * @start: start of updated range
 * @end: end of updated range
 * @node: the node with the BOs to unmap
 *
 * We block for all BOs between start and end to be idle and
 * unmap them by move them into system domain again.
 * We block for all BOs and unmap them by move them
 * into system domain again.
 */
static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
					     struct mm_struct *mm,
static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
				      unsigned long start,
				      unsigned long end)
{
	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
	struct interval_tree_node *it;

	/* notification is exclusive, but interval is inclusive */
	end -= 1;

	mutex_lock(&rmn->lock);

	it = interval_tree_iter_first(&rmn->objects, start, end);
	while (it) {
		struct amdgpu_mn_node *node;
	struct amdgpu_bo *bo;
	long r;

		node = container_of(it, struct amdgpu_mn_node, it);
		it = interval_tree_iter_next(it, start, end);

	list_for_each_entry(bo, &node->bos, mn_list) {

			if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
							  end))
		if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
			continue;

		r = amdgpu_bo_reserve(bo, true);
@@ -164,11 +145,76 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
	}
}

/**
 * amdgpu_mn_invalidate_page - callback to notify about mm change
 *
 * @mn: our notifier
 * @mn: the mm this callback is about
 * @address: address of invalidate page
 *
 * Invalidation of a single page. Blocks for all BOs mapping it
 * and unmap them by move them into system domain again.
 */
static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
				      struct mm_struct *mm,
				      unsigned long address)
{
	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
	struct interval_tree_node *it;

	mutex_lock(&rmn->lock);

	it = interval_tree_iter_first(&rmn->objects, address, address);
	if (it) {
		struct amdgpu_mn_node *node;

		node = container_of(it, struct amdgpu_mn_node, it);
		amdgpu_mn_invalidate_node(node, address, address);
	}

	mutex_unlock(&rmn->lock);
}

/**
 * amdgpu_mn_invalidate_range_start - callback to notify about mm change
 *
 * @mn: our notifier
 * @mn: the mm this callback is about
 * @start: start of updated range
 * @end: end of updated range
 *
 * We block for all BOs between start and end to be idle and
 * unmap them by move them into system domain again.
 */
static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
					     struct mm_struct *mm,
					     unsigned long start,
					     unsigned long end)
{
	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
	struct interval_tree_node *it;

	/* notification is exclusive, but interval is inclusive */
	end -= 1;

	mutex_lock(&rmn->lock);

	it = interval_tree_iter_first(&rmn->objects, start, end);
	while (it) {
		struct amdgpu_mn_node *node;

		node = container_of(it, struct amdgpu_mn_node, it);
		it = interval_tree_iter_next(it, start, end);

		amdgpu_mn_invalidate_node(node, start, end);
	}

	mutex_unlock(&rmn->lock);
}

static const struct mmu_notifier_ops amdgpu_mn_ops = {
	.release = amdgpu_mn_release,
	.invalidate_page = amdgpu_mn_invalidate_page,
	.invalidate_range_start = amdgpu_mn_invalidate_range_start,
};