Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6fc13675 authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: generalize the scheduler fence



Make it two events, one for the job being scheduled and one when it is finished.

Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 0e9d239b
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -85,7 +85,7 @@ static void amdgpu_job_free_resources(struct amdgpu_job *job)
	unsigned i;

	/* use sched fence if available */
	f = job->base.s_fence ? &job->base.s_fence->base : job->fence;
	f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;

	for (i = 0; i < job->num_ibs; ++i)
		amdgpu_ib_free(job->adev, &job->ibs[i], f);
@@ -143,7 +143,7 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
		int r;

		r = amdgpu_vm_grab_id(vm, ring, &job->sync,
				      &job->base.s_fence->base,
				      &job->base.s_fence->finished,
				      &job->vm_id, &job->vm_pd_addr);
		if (r)
			DRM_ERROR("Error getting VM ID (%d)\n", r);
+2 −2
Original line number Diff line number Diff line
@@ -102,7 +102,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
			   __entry->adev = job->adev;
			   __entry->sched_job = &job->base;
			   __entry->ib = job->ibs;
			   __entry->fence = &job->base.s_fence->base;
			   __entry->fence = &job->base.s_fence->finished;
			   __entry->ring_name = job->ring->name;
			   __entry->num_ibs = job->num_ibs;
			   ),
@@ -127,7 +127,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
			   __entry->adev = job->adev;
			   __entry->sched_job = &job->base;
			   __entry->ib = job->ibs;
			   __entry->fence = &job->base.s_fence->base;
			   __entry->fence = &job->base.s_fence->finished;
			   __entry->ring_name = job->ring->name;
			   __entry->num_ibs = job->num_ibs;
			   ),
+2 −2
Original line number Diff line number Diff line
@@ -26,7 +26,7 @@ TRACE_EVENT(amd_sched_job,
	    TP_fast_assign(
			   __entry->entity = sched_job->s_entity;
			   __entry->sched_job = sched_job;
			   __entry->fence = &sched_job->s_fence->base;
			   __entry->fence = &sched_job->s_fence->finished;
			   __entry->name = sched_job->sched->name;
			   __entry->job_count = kfifo_len(
				   &sched_job->s_entity->job_queue) / sizeof(sched_job);
@@ -46,7 +46,7 @@ TRACE_EVENT(amd_sched_process_job,
		    ),

	    TP_fast_assign(
		    __entry->fence = &fence->base;
		    __entry->fence = &fence->finished;
		    ),
	    TP_printk("fence=%p signaled", __entry->fence)
);
+19 −15
Original line number Diff line number Diff line
@@ -140,7 +140,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
		return r;

	atomic_set(&entity->fence_seq, 0);
	entity->fence_context = fence_context_alloc(1);
	entity->fence_context = fence_context_alloc(2);

	return 0;
}
@@ -251,17 +251,21 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)

	s_fence = to_amd_sched_fence(fence);
	if (s_fence && s_fence->sched == sched) {
		/* Fence is from the same scheduler */
		if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
			/* Ignore it when it is already scheduled */
			fence_put(entity->dependency);
			return false;
		}

		/* Wait for fence to be scheduled */
		entity->cb.func = amd_sched_entity_clear_dep;
		list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
		/*
		 * Fence is from the same scheduler, only need to wait for
		 * it to be scheduled
		 */
		fence = fence_get(&s_fence->scheduled);
		fence_put(entity->dependency);
		entity->dependency = fence;
		if (!fence_add_callback(fence, &entity->cb,
					amd_sched_entity_clear_dep))
			return true;

		/* Ignore it when it is already scheduled */
		fence_put(fence);
		return false;
	}

	if (!fence_add_callback(entity->dependency, &entity->cb,
@@ -389,7 +393,7 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
	struct amd_sched_entity *entity = sched_job->s_entity;

	trace_amd_sched_job(sched_job);
	fence_add_callback(&sched_job->s_fence->base, &sched_job->finish_cb,
	fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
			   amd_sched_job_finish_cb);
	wait_event(entity->sched->job_scheduled,
		   amd_sched_entity_in(sched_job));
@@ -412,7 +416,7 @@ int amd_sched_job_init(struct amd_sched_job *job,
	INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);

	if (fence)
		*fence = &job->s_fence->base;
		*fence = &job->s_fence->finished;
	return 0;
}

@@ -463,10 +467,10 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
	struct amd_gpu_scheduler *sched = s_fence->sched;

	atomic_dec(&sched->hw_rq_count);
	amd_sched_fence_signal(s_fence);
	amd_sched_fence_finished(s_fence);

	trace_amd_sched_process_job(s_fence);
	fence_put(&s_fence->base);
	fence_put(&s_fence->finished);
	wake_up_interruptible(&sched->wake_up_worker);
}

+9 −10
Original line number Diff line number Diff line
@@ -27,8 +27,6 @@
#include <linux/kfifo.h>
#include <linux/fence.h>

#define AMD_SCHED_FENCE_SCHEDULED_BIT	FENCE_FLAG_USER_BITS

struct amd_gpu_scheduler;
struct amd_sched_rq;

@@ -68,9 +66,9 @@ struct amd_sched_rq {
};

struct amd_sched_fence {
	struct fence                    base;
	struct fence                    scheduled;
	struct fence                    finished;
	struct fence_cb                 cb;
	struct list_head		scheduled_cb;
	struct amd_gpu_scheduler	*sched;
	spinlock_t			lock;
	void                            *owner;
@@ -86,14 +84,15 @@ struct amd_sched_job {
	struct delayed_work		work_tdr;
};

extern const struct fence_ops amd_sched_fence_ops;
extern const struct fence_ops amd_sched_fence_ops_scheduled;
extern const struct fence_ops amd_sched_fence_ops_finished;
static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
{
	struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence,
						   base);
	if (f->ops == &amd_sched_fence_ops_scheduled)
		return container_of(f, struct amd_sched_fence, scheduled);

	if (__f->base.ops == &amd_sched_fence_ops)
		return __f;
	if (f->ops == &amd_sched_fence_ops_finished)
		return container_of(f, struct amd_sched_fence, finished);

	return NULL;
}
@@ -148,7 +147,7 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
struct amd_sched_fence *amd_sched_fence_create(
	struct amd_sched_entity *s_entity, void *owner);
void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
void amd_sched_fence_signal(struct amd_sched_fence *fence);
void amd_sched_fence_finished(struct amd_sched_fence *fence);
int amd_sched_job_init(struct amd_sched_job *job,
		       struct amd_gpu_scheduler *sched,
		       struct amd_sched_entity *entity,
Loading