Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8d0a7cea authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: grab VMID before submitting job v5



This allows the scheduler to handle the dependencies on ID contention as well.

v2: grab id only once
v3: use a separate lock for the VMIDs
v4: cleanup after semaphore removal
v5: minor coding style change

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 165e4e07
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -777,6 +777,7 @@ struct amdgpu_ib {
	struct amdgpu_ring		*ring;
	struct amdgpu_fence		*fence;
	struct amdgpu_user_fence        *user;
	bool				grabbed_vmid;
	struct amdgpu_vm		*vm;
	struct amdgpu_ctx		*ctx;
	struct amdgpu_sync		sync;
@@ -925,6 +926,9 @@ struct amdgpu_vm {
};

struct amdgpu_vm_manager {
	/* protecting IDs */
	struct mutex				lock;

	struct {
		struct fence	*active;
		atomic_long_t	owner;
+1 −0
Original line number Diff line number Diff line
@@ -1456,6 +1456,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
	/* mutex initialization are all done here so we
	 * can recall function without having locking issues */
	mutex_init(&adev->ring_lock);
	mutex_init(&adev->vm_manager.lock);
	atomic_set(&adev->irq.ih.lock, 0);
	mutex_init(&adev->gem.mutex);
	mutex_init(&adev->pm.mutex);
+5 −12
Original line number Diff line number Diff line
@@ -142,20 +142,16 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
		return -EINVAL;
	}

	r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs);
	if (r) {
		dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
		return r;
	if (vm && !ibs->grabbed_vmid) {
		dev_err(adev->dev, "VM IB without ID\n");
		return -EINVAL;
	}

	if (vm) {
		/* grab a vm id if necessary */
		r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync);
	r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs);
	if (r) {
			amdgpu_ring_unlock_undo(ring);
		dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
		return r;
	}
	}

	r = amdgpu_sync_wait(&ibs->sync);
	if (r) {
@@ -207,9 +203,6 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
				       AMDGPU_FENCE_FLAG_64BIT);
	}

	if (ib->vm)
		amdgpu_vm_fence(adev, ib->vm, &ib->fence->base);

	amdgpu_ring_unlock_commit(ring);
	return 0;
}
+25 −1
Original line number Diff line number Diff line
@@ -31,7 +31,31 @@
static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
{
	struct amdgpu_job *job = to_amdgpu_job(sched_job);
	return amdgpu_sync_get_fence(&job->ibs->sync);
	struct amdgpu_sync *sync = &job->ibs->sync;
	struct amdgpu_vm *vm = job->ibs->vm;

	struct fence *fence = amdgpu_sync_get_fence(sync);

	if (fence == NULL && vm && !job->ibs->grabbed_vmid) {
		struct amdgpu_ring *ring = job->ibs->ring;
		struct amdgpu_device *adev = ring->adev;
		int r;

		mutex_lock(&adev->vm_manager.lock);
		r = amdgpu_vm_grab_id(vm, ring, sync);
		if (r) {
			DRM_ERROR("Error getting VM ID (%d)\n", r);
		} else {
			fence = &job->base.s_fence->base;
			amdgpu_vm_fence(ring->adev, vm, fence);
			job->ibs->grabbed_vmid = true;
		}
		mutex_unlock(&adev->vm_manager.lock);

		fence = amdgpu_sync_get_fence(sync);
	}

	return fence;
}

static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)