Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4f618e73 authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: drop VMID per ring tracking



David suggested this a long time ago, instead of checking
each ring just walk over all the VMIDs in reverse LRU order.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAndres Rodriguez <andresx7@gmail.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 0eeb68b3
Loading
Loading
Loading
Loading
+3 −14
Original line number Original line Diff line number Diff line
@@ -463,17 +463,10 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,


	job->vm_needs_flush = true;
	job->vm_needs_flush = true;
	/* Check if we can use a VMID already assigned to this VM */
	/* Check if we can use a VMID already assigned to this VM */
	i = ring->idx;
	list_for_each_entry_reverse(id, &adev->vm_manager.ids_lru, list) {
	do {
		struct dma_fence *flushed;
		struct dma_fence *flushed;


		id = vm->ids[i++];
		if (i == AMDGPU_MAX_RINGS)
			i = 0;

		/* Check all the prerequisites to using this VMID */
		/* Check all the prerequisites to using this VMID */
		if (!id)
			continue;
		if (amdgpu_vm_had_gpu_reset(adev, id))
		if (amdgpu_vm_had_gpu_reset(adev, id))
			continue;
			continue;


@@ -503,7 +496,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
			goto error;
			goto error;


		list_move_tail(&id->list, &adev->vm_manager.ids_lru);
		list_move_tail(&id->list, &adev->vm_manager.ids_lru);
		vm->ids[ring->idx] = id;


		job->vm_id = id - adev->vm_manager.ids;
		job->vm_id = id - adev->vm_manager.ids;
		job->vm_needs_flush = false;
		job->vm_needs_flush = false;
@@ -512,7 +504,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
		mutex_unlock(&adev->vm_manager.lock);
		mutex_unlock(&adev->vm_manager.lock);
		return 0;
		return 0;


	} while (i != ring->idx);
	};


	/* Still no ID to use? Then use the idle one found earlier */
	/* Still no ID to use? Then use the idle one found earlier */
	id = idle;
	id = idle;
@@ -532,7 +524,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
	id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
	id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
	list_move_tail(&id->list, &adev->vm_manager.ids_lru);
	list_move_tail(&id->list, &adev->vm_manager.ids_lru);
	atomic64_set(&id->owner, vm->client_id);
	atomic64_set(&id->owner, vm->client_id);
	vm->ids[ring->idx] = id;


	job->vm_id = id - adev->vm_manager.ids;
	job->vm_id = id - adev->vm_manager.ids;
	trace_amdgpu_vm_grab_id(vm, ring->idx, job);
	trace_amdgpu_vm_grab_id(vm, ring->idx, job);
@@ -2117,10 +2108,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
	unsigned ring_instance;
	unsigned ring_instance;
	struct amdgpu_ring *ring;
	struct amdgpu_ring *ring;
	struct amd_sched_rq *rq;
	struct amd_sched_rq *rq;
	int i, r;
	int r;


	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
		vm->ids[i] = NULL;
	vm->va = RB_ROOT;
	vm->va = RB_ROOT;
	vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
	vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
	spin_lock_init(&vm->status_lock);
	spin_lock_init(&vm->status_lock);
+0 −3
Original line number Original line Diff line number Diff line
@@ -114,9 +114,6 @@ struct amdgpu_vm {
	struct dma_fence	*last_dir_update;
	struct dma_fence	*last_dir_update;
	uint64_t		last_eviction_counter;
	uint64_t		last_eviction_counter;


	/* for id and flush management per ring */
	struct amdgpu_vm_id	*ids[AMDGPU_MAX_RINGS];

	/* protecting freed */
	/* protecting freed */
	spinlock_t		freed_lock;
	spinlock_t		freed_lock;