Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c25867df authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher
Browse files

drm/amdgpu: add lock for interval tree in vm



Change-Id: I62b892a22af37b32e6b4aefca80a25cf45426ed2
Signed-off-by: default avatarChunming Zhou <David1.Zhou@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
parent 1c16c0a7
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -954,6 +954,8 @@ struct amdgpu_vm {

	/* for id and flush management per ring */
	struct amdgpu_vm_id	ids[AMDGPU_MAX_RINGS];
	/* for interval tree */
	spinlock_t		it_lock;
};

struct amdgpu_vm_manager {
+13 −2
Original line number Diff line number Diff line
@@ -1028,7 +1028,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
	saddr /= AMDGPU_GPU_PAGE_SIZE;
	eaddr /= AMDGPU_GPU_PAGE_SIZE;

	spin_lock(&vm->it_lock);
	it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
	spin_unlock(&vm->it_lock);
	if (it) {
		struct amdgpu_bo_va_mapping *tmp;
		tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
@@ -1055,7 +1057,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
	mapping->flags = flags;

	list_add(&mapping->list, &bo_va->invalids);
	spin_lock(&vm->it_lock);
	interval_tree_insert(&mapping->it, &vm->va);
	spin_unlock(&vm->it_lock);
	trace_amdgpu_vm_bo_map(bo_va, mapping);

	/* Make sure the page tables are allocated */
@@ -1101,7 +1105,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,

error_free:
	list_del(&mapping->list);
	spin_lock(&vm->it_lock);
	interval_tree_remove(&mapping->it, &vm->va);
	spin_unlock(&vm->it_lock);
	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
	kfree(mapping);

@@ -1151,7 +1157,9 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
	}

	list_del(&mapping->list);
	spin_lock(&vm->it_lock);
	interval_tree_remove(&mapping->it, &vm->va);
	spin_unlock(&vm->it_lock);
	trace_amdgpu_vm_bo_unmap(bo_va, mapping);

	if (valid)
@@ -1187,13 +1195,17 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,

	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
		list_del(&mapping->list);
		spin_lock(&vm->it_lock);
		interval_tree_remove(&mapping->it, &vm->va);
		spin_unlock(&vm->it_lock);
		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
		list_add(&mapping->list, &vm->freed);
	}
	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
		list_del(&mapping->list);
		spin_lock(&vm->it_lock);
		interval_tree_remove(&mapping->it, &vm->va);
		spin_unlock(&vm->it_lock);
		kfree(mapping);
	}

@@ -1248,7 +1260,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
	INIT_LIST_HEAD(&vm->invalidated);
	INIT_LIST_HEAD(&vm->cleared);
	INIT_LIST_HEAD(&vm->freed);

	spin_lock_init(&vm->it_lock);
	pd_size = amdgpu_vm_directory_size(adev);
	pd_entries = amdgpu_vm_num_pdes(adev);

@@ -1312,7 +1324,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)

	amdgpu_bo_unref(&vm->page_directory);
	fence_put(vm->page_directory_fence);

	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
		unsigned id = vm->ids[i].id;