Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8d76001e authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: reuse VMIDs assigned to a VM only if there is also a free one



This fixes a fairness problem with the GPU scheduler. VM having lot of
jobs could previously starve VM with less jobs.

Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 36fd7c5c
Loading
Loading
Loading
Loading
+59 −54
Original line number Diff line number Diff line
@@ -179,12 +179,25 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
	uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
	struct amdgpu_device *adev = ring->adev;
	struct fence *updates = sync->last_vm_update;
	struct amdgpu_vm_id *id;
	struct amdgpu_vm_id *id, *idle;
	unsigned i = ring->idx;
	int r;

	mutex_lock(&adev->vm_manager.lock);

	/* Check if we have an idle VMID */
	list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
		if (amdgpu_sync_is_idle(&idle->active, ring))
			break;

	}

	/* If we can't find a idle VMID to use, just wait for the oldest */
	if (&idle->list == &adev->vm_manager.ids_lru) {
		id = list_first_entry(&adev->vm_manager.ids_lru,
				      struct amdgpu_vm_id,
				      list);
	} else {
		/* Check if we can use a VMID already assigned to this VM */
		do {
			struct fence *flushed;
@@ -203,12 +216,13 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
			if (pd_addr != id->pd_gpu_addr)
				continue;

		if (id->last_user != ring &&
		    (!id->last_flush || !fence_is_signaled(id->last_flush)))
			if (id->last_user != ring && (!id->last_flush ||
			    !fence_is_signaled(id->last_flush)))
				continue;

			flushed  = id->flushed_updates;
		if (updates && (!flushed || fence_is_later(updates, flushed)))
			if (updates && (!flushed ||
			    fence_is_later(updates, flushed)))
				continue;

			/* Good we can use this VMID */
@@ -229,25 +243,16 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,

			*vm_id = id - adev->vm_manager.ids;
			*vm_pd_addr = AMDGPU_VM_NO_FLUSH;
		trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
			trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id,
						*vm_pd_addr);

			mutex_unlock(&adev->vm_manager.lock);
			return 0;

		} while (i != ring->idx);

	/* Check if we have an idle VMID */
	list_for_each_entry(id, &adev->vm_manager.ids_lru, list) {
		if (amdgpu_sync_is_idle(&id->active, ring))
			break;

	}

	/* If we can't find a idle VMID to use, just wait for the oldest */
	if (&id->list == &adev->vm_manager.ids_lru) {
		id = list_first_entry(&adev->vm_manager.ids_lru,
				      struct amdgpu_vm_id,
				      list);
		/* Still no ID to use? Then use the idle one found earlier */
		id = idle;
	}

	r = amdgpu_sync_cycle_fences(sync, &id->active, fence);