Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e61235db authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: add scheduler dependency callback v2



This way the scheduler doesn't wait in it's work thread any more.

v2: fix race conditions

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Reviewed-by: default avatarJammy Zhou <Jammy.Zhou@amd.com>
parent 69bd5bf1
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -717,6 +717,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
		     void *owner);
int amdgpu_sync_rings(struct amdgpu_sync *sync,
		      struct amdgpu_ring *ring);
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
int amdgpu_sync_wait(struct amdgpu_sync *sync);
void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
		      struct fence *fence);
+7 −0
Original line number Diff line number Diff line
@@ -27,6 +27,12 @@
#include <drm/drmP.h>
#include "amdgpu.h"

static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job)
{
	struct amdgpu_job *sched_job = (struct amdgpu_job *)job;
	return amdgpu_sync_get_fence(&sched_job->ibs->sync);
}

static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job)
{
	struct amdgpu_job *sched_job;
@@ -75,6 +81,7 @@ static void amdgpu_sched_process_job(struct amd_sched_job *job)
}

struct amd_sched_backend_ops amdgpu_sched_ops = {
	.dependency = amdgpu_sched_dependency,
	.run_job = amdgpu_sched_run_job,
	.process_job = amdgpu_sched_process_job
};
+22 −0
Original line number Diff line number Diff line
@@ -202,6 +202,28 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
	return r;
}

struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
{
	struct amdgpu_sync_entry *e;
	struct hlist_node *tmp;
	struct fence *f;
	int i;

	hash_for_each_safe(sync->fences, i, tmp, e, node) {

		f = e->fence;

		hash_del(&e->node);
		kfree(e);

		if (!fence_is_signaled(f))
			return f;

		fence_put(f);
	}
	return NULL;
}

int amdgpu_sync_wait(struct amdgpu_sync *sync)
{
	struct amdgpu_sync_entry *e;
+22 −0
Original line number Diff line number Diff line
@@ -192,14 +192,36 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
	kfifo_free(&entity->job_queue);
}

static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
{
	struct amd_sched_entity *entity =
		container_of(cb, struct amd_sched_entity, cb);
	entity->dependency = NULL;
	fence_put(f);
	amd_sched_wakeup(entity->scheduler);
}

static struct amd_sched_job *
amd_sched_entity_pop_job(struct amd_sched_entity *entity)
{
	struct amd_gpu_scheduler *sched = entity->scheduler;
	struct amd_sched_job *job;

	if (ACCESS_ONCE(entity->dependency))
		return NULL;

	if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job)))
		return NULL;

	while ((entity->dependency = sched->ops->dependency(job))) {

		if (fence_add_callback(entity->dependency, &entity->cb,
				       amd_sched_entity_wakeup))
			fence_put(entity->dependency);
		else
			return NULL;
	}

	return job;
}

+3 −0
Original line number Diff line number Diff line
@@ -45,6 +45,8 @@ struct amd_sched_entity {
	spinlock_t			queue_lock;
	struct amd_gpu_scheduler	*scheduler;
	uint64_t                        fence_context;
	struct fence			*dependency;
	struct fence_cb			cb;
};

/**
@@ -89,6 +91,7 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
 * these functions should be implemented in driver side
*/
struct amd_sched_backend_ops {
	struct fence *(*dependency)(struct amd_sched_job *job);
	struct fence *(*run_job)(struct amd_sched_job *job);
	void (*process_job)(struct amd_sched_job *job);
};