Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c41d271d authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: remove the userptr rmn->lock



Avoid a lock inversion problem by just using the mmap_sem to
protect the entries of the intervall tree.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
parent e7813d0c
Loading
Loading
Loading
Loading
+12 −20
Original line number Diff line number Diff line
@@ -48,8 +48,7 @@ struct amdgpu_mn {
	/* protected by adev->mn_lock */
	struct hlist_node	node;

	/* objects protected by lock */
	struct mutex		lock;
	/* objects protected by mm->mmap_sem */
	struct rb_root		objects;
};

@@ -72,8 +71,8 @@ static void amdgpu_mn_destroy(struct work_struct *work)
	struct amdgpu_mn_node *node, *next_node;
	struct amdgpu_bo *bo, *next_bo;

	down_write(&rmn->mm->mmap_sem);
	mutex_lock(&adev->mn_lock);
	mutex_lock(&rmn->lock);
	hash_del(&rmn->node);
	rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
					     it.rb) {
@@ -85,8 +84,8 @@ static void amdgpu_mn_destroy(struct work_struct *work)
		}
		kfree(node);
	}
	mutex_unlock(&rmn->lock);
	mutex_unlock(&adev->mn_lock);
	up_write(&rmn->mm->mmap_sem);
	mmu_notifier_unregister(&rmn->mn, rmn->mm);
	kfree(rmn);
}
@@ -129,8 +128,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
	/* notification is exclusive, but interval is inclusive */
	end -= 1;

	mutex_lock(&rmn->lock);

	it = interval_tree_iter_first(&rmn->objects, start, end);
	while (it) {
		struct amdgpu_mn_node *node;
@@ -165,8 +162,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
			amdgpu_bo_unreserve(bo);
		}
	}

	mutex_unlock(&rmn->lock);
}

static const struct mmu_notifier_ops amdgpu_mn_ops = {
@@ -203,7 +198,6 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
	rmn->adev = adev;
	rmn->mm = mm;
	rmn->mn.ops = &amdgpu_mn_ops;
	mutex_init(&rmn->lock);
	rmn->objects = RB_ROOT;

	r = __mmu_notifier_register(&rmn->mn, mm);
@@ -250,7 +244,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)

	INIT_LIST_HEAD(&bos);

	mutex_lock(&rmn->lock);
	down_write(&rmn->mm->mmap_sem);

	while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
		kfree(node);
@@ -264,7 +258,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
	if (!node) {
		node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
		if (!node) {
			mutex_unlock(&rmn->lock);
			up_write(&rmn->mm->mmap_sem);
			return -ENOMEM;
		}
	}
@@ -279,7 +273,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)

	interval_tree_insert(&node->it, &rmn->objects);

	mutex_unlock(&rmn->lock);
	up_write(&rmn->mm->mmap_sem);

	return 0;
}
@@ -294,17 +288,15 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
void amdgpu_mn_unregister(struct amdgpu_bo *bo)
{
	struct amdgpu_device *adev = bo->adev;
	struct amdgpu_mn *rmn;
	struct amdgpu_mn *rmn = bo->mn;
	struct list_head *head;

	mutex_lock(&adev->mn_lock);
	rmn = bo->mn;
	if (rmn == NULL) {
		mutex_unlock(&adev->mn_lock);
	if (rmn == NULL)
		return;
	}

	mutex_lock(&rmn->lock);
	down_write(&rmn->mm->mmap_sem);
	mutex_lock(&adev->mn_lock);

	/* save the next list entry for later */
	head = bo->mn_list.next;

@@ -318,6 +310,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
		kfree(node);
	}

	mutex_unlock(&rmn->lock);
	mutex_unlock(&adev->mn_lock);
	up_write(&rmn->mm->mmap_sem);
}