Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3320b8d2 authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: remove job->ring



We can easily get that from the scheduler.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarJunwei Zhang <Jerry.Zhang@amd.com>
Acked-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 0e28b10f
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -1027,6 +1027,7 @@ struct amdgpu_cs_parser {

	/* scheduler job object */
	struct amdgpu_job	*job;
	struct amdgpu_ring	*ring;

	/* buffer objects */
	struct ww_acquire_ctx		ticket;
+9 −9
Original line number Diff line number Diff line
@@ -912,11 +912,11 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
{
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
	struct amdgpu_vm *vm = &fpriv->vm;
	struct amdgpu_ring *ring = p->job->ring;
	struct amdgpu_ring *ring = p->ring;
	int r;

	/* Only for UVD/VCE VM emulation */
	if (p->job->ring->funcs->parse_cs) {
	if (p->ring->funcs->parse_cs) {
		unsigned i, j;

		for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
@@ -1030,10 +1030,10 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
			}
		}

		if (parser->job->ring && parser->job->ring != ring)
		if (parser->ring && parser->ring != ring)
			return -EINVAL;

		parser->job->ring = ring;
		parser->ring = ring;

		r =  amdgpu_ib_get(adev, vm,
					ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
@@ -1052,11 +1052,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,

	/* UVD & VCE fw doesn't support user fences */
	if (parser->job->uf_addr && (
	    parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
	    parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
	    parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
	    parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
		return -EINVAL;

	return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx);
	return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx);
}

static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
@@ -1207,7 +1207,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
			    union drm_amdgpu_cs *cs)
{
	struct amdgpu_ring *ring = p->job->ring;
	struct amdgpu_ring *ring = p->ring;
	struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
	struct amdgpu_job *job;
	unsigned i;
@@ -1256,7 +1256,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
	job->uf_sequence = seq;

	amdgpu_job_free_resources(job);
	amdgpu_ring_priority_get(job->ring, job->base.s_priority);
	amdgpu_ring_priority_get(p->ring, job->base.s_priority);

	trace_amdgpu_cs_ioctl(job);
	drm_sched_entity_push_job(&job->base, entity);
+2 −2
Original line number Diff line number Diff line
@@ -3253,7 +3253,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,

		kthread_park(ring->sched.thread);

		if (job && job->ring->idx != i)
		if (job && job->base.sched == &ring->sched)
			continue;

		drm_sched_hw_job_reset(&ring->sched, &job->base);
@@ -3277,7 +3277,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
		 * or all rings (in the case @job is NULL)
		 * after above amdgpu_reset accomplished
		 */
		if ((!job || job->ring->idx == i) && !r)
		if ((!job || job->base.sched == &ring->sched) && !r)
			drm_sched_job_recovery(&ring->sched);

		kthread_unpark(ring->sched.thread);
+12 −11
Original line number Diff line number Diff line
@@ -30,12 +30,12 @@

static void amdgpu_job_timedout(struct drm_sched_job *s_job)
{
	struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
	struct amdgpu_job *job = to_amdgpu_job(s_job);

	DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
		  job->base.sched->name,
		  atomic_read(&job->ring->fence_drv.last_seq),
		  job->ring->fence_drv.sync_seq);
		  job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
		  ring->fence_drv.sync_seq);

	amdgpu_device_gpu_recover(job->adev, job, false);
}
@@ -98,9 +98,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)

static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
{
	struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
	struct amdgpu_job *job = to_amdgpu_job(s_job);

	amdgpu_ring_priority_put(job->ring, s_job->s_priority);
	amdgpu_ring_priority_put(ring, s_job->s_priority);
	dma_fence_put(job->fence);
	amdgpu_sync_free(&job->sync);
	amdgpu_sync_free(&job->sched_sync);
@@ -120,6 +121,7 @@ void amdgpu_job_free(struct amdgpu_job *job)
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
		      void *owner, struct dma_fence **f)
{
	struct amdgpu_ring *ring = to_amdgpu_ring(entity->sched);
	int r;

	if (!f)
@@ -130,10 +132,9 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
		return r;

	job->owner = owner;
	job->ring = to_amdgpu_ring(entity->sched);
	*f = dma_fence_get(&job->base.s_fence->finished);
	amdgpu_job_free_resources(job);
	amdgpu_ring_priority_get(job->ring, job->base.s_priority);
	amdgpu_ring_priority_get(ring, job->base.s_priority);
	drm_sched_entity_push_job(&job->base, entity);

	return 0;
@@ -142,6 +143,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
					       struct drm_sched_entity *s_entity)
{
	struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched);
	struct amdgpu_job *job = to_amdgpu_job(sched_job);
	struct amdgpu_vm *vm = job->vm;
	bool explicit = false;
@@ -157,8 +159,6 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
	}

	while (fence == NULL && vm && !job->vmid) {
		struct amdgpu_ring *ring = job->ring;

		r = amdgpu_vmid_grab(vm, ring, &job->sync,
				     &job->base.s_fence->finished,
				     job);
@@ -173,6 +173,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,

static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
{
	struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
	struct dma_fence *fence = NULL, *finished;
	struct amdgpu_device *adev;
	struct amdgpu_job *job;
@@ -196,7 +197,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
	if (finished->error < 0) {
		DRM_INFO("Skip scheduling IBs!\n");
	} else {
		r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
				       &fence);
		if (r)
			DRM_ERROR("Error scheduling IBs (%d)\n", r);
+0 −1
Original line number Diff line number Diff line
@@ -37,7 +37,6 @@ struct amdgpu_job {
	struct drm_sched_job    base;
	struct amdgpu_device	*adev;
	struct amdgpu_vm	*vm;
	struct amdgpu_ring	*ring;
	struct amdgpu_sync	sync;
	struct amdgpu_sync	sched_sync;
	struct amdgpu_ib	*ibs;
Loading