Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7484667c authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher
Browse files

drm/amdgpu: move sched job process from isr to fence callback



This way can avoid interrupt lost, and can process sched job exactly.

Signed-off-by: default avatarChunming Zhou <david1.zhou@amd.com>
Reviewed-by: default avatarJammy Zhou <Jammy.Zhou@amd.com>
parent 27f6642d
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -404,7 +404,7 @@ struct amdgpu_fence_driver {

struct amdgpu_fence {
	struct fence base;

	struct fence_cb cb;
	/* RB, DMA, etc. */
	struct amdgpu_ring		*ring;
	uint64_t			seq;
+1 −18
Original line number Diff line number Diff line
@@ -350,25 +350,8 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
		}
	} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);

	if (wake) {
		if (amdgpu_enable_scheduler) {
			uint64_t handled_seq =
				amd_sched_get_handled_seq(ring->scheduler);
			uint64_t latest_seq =
				atomic64_read(&ring->fence_drv.last_seq);
			if (handled_seq == latest_seq) {
				DRM_ERROR("ring %d, EOP without seq update (lastest_seq=%llu)\n",
					  ring->idx, latest_seq);
				goto exit;
			}
			do {
				amd_sched_isr(ring->scheduler);
			} while (amd_sched_get_handled_seq(ring->scheduler) < latest_seq);
		}

	if (wake)
		wake_up_all(&ring->fence_drv.fence_queue);
	}
exit:
	spin_unlock_irqrestore(&ring->fence_lock, irqflags);
}

+13 −0
Original line number Diff line number Diff line
@@ -43,12 +43,20 @@ static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
	return r;
}

static void amdgpu_fence_sched_cb(struct fence *f, struct fence_cb *cb)
{
	struct amdgpu_fence *fence =
		container_of(cb, struct amdgpu_fence, cb);
	amd_sched_isr(fence->ring->scheduler);
}

static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
				 struct amd_context_entity *c_entity,
				 void *job)
{
	int r = 0;
	struct amdgpu_cs_parser *sched_job = (struct amdgpu_cs_parser *)job;
	struct amdgpu_fence *fence;

	mutex_lock(&sched_job->job_lock);
	r = amdgpu_ib_schedule(sched_job->adev,
@@ -57,6 +65,11 @@ static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
			       sched_job->filp);
	if (r)
		goto err;
	fence = sched_job->ibs[sched_job->num_ibs - 1].fence;
	if (fence_add_callback(&fence->base,
			       &fence->cb, amdgpu_fence_sched_cb))
		goto err;

	if (sched_job->run_job) {
		r = sched_job->run_job(sched_job);
		if (r)