Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d033a6de authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher
Browse files

drm/amd: abstract kernel rq and normal rq to priority of run queue



Allows us to set priorities in the scheduler.

Signed-off-by: default avatarChunming Zhou <David1.Zhou@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarJunwei Zhang <Jerry.Zhang@amd.com>
parent ccba7691
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1044,7 +1044,7 @@ struct amdgpu_ctx_mgr {
	struct idr		ctx_handles;
};

int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
		    struct amdgpu_ctx *ctx);
void amdgpu_ctx_fini(struct amdgpu_ctx *ctx);

+5 −6
Original line number Diff line number Diff line
@@ -25,7 +25,7 @@
#include <drm/drmP.h>
#include "amdgpu.h"

int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
		    struct amdgpu_ctx *ctx)
{
	unsigned i, j;
@@ -42,10 +42,9 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
		/* create context entity for each ring */
		for (i = 0; i < adev->num_rings; i++) {
			struct amd_sched_rq *rq;
			if (kernel)
				rq = &adev->rings[i]->sched.kernel_rq;
			else
				rq = &adev->rings[i]->sched.sched_rq;
			if (pri >= AMD_SCHED_MAX_PRIORITY)
				return -EINVAL;
			rq = &adev->rings[i]->sched.sched_rq[pri];
			r = amd_sched_entity_init(&adev->rings[i]->sched,
						  &ctx->rings[i].entity,
						  rq, amdgpu_sched_jobs);
@@ -103,7 +102,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
		return r;
	}
	*id = (uint32_t)r;
	r = amdgpu_ctx_init(adev, false, ctx);
	r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_NORMAL, ctx);
	mutex_unlock(&mgr->lock);

	return r;
+1 −1
Original line number Diff line number Diff line
@@ -1528,7 +1528,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
		return r;
	}

	r = amdgpu_ctx_init(adev, true, &adev->kernel_ctx);
	r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_KERNEL, &adev->kernel_ctx);
	if (r) {
		dev_err(adev->dev, "failed to create kernel context (%d).\n", r);
		return r;
+9 −5
Original line number Diff line number Diff line
@@ -348,14 +348,17 @@ static struct amd_sched_entity *
amd_sched_select_entity(struct amd_gpu_scheduler *sched)
{
	struct amd_sched_entity *entity;
	int i;

	if (!amd_sched_ready(sched))
		return NULL;

	/* Kernel run queue has higher priority than normal run queue*/
	entity = amd_sched_rq_select_entity(&sched->kernel_rq);
	if (entity == NULL)
		entity = amd_sched_rq_select_entity(&sched->sched_rq);
	for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) {
		entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
		if (entity)
			break;
	}

	return entity;
}
@@ -477,12 +480,13 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
		   struct amd_sched_backend_ops *ops,
		   unsigned hw_submission, long timeout, const char *name)
{
	int i;
	sched->ops = ops;
	sched->hw_submission_limit = hw_submission;
	sched->name = name;
	sched->timeout = timeout;
	amd_sched_rq_init(&sched->sched_rq);
	amd_sched_rq_init(&sched->kernel_rq);
	for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++)
		amd_sched_rq_init(&sched->sched_rq[i]);

	init_waitqueue_head(&sched->wake_up_worker);
	init_waitqueue_head(&sched->job_scheduled);
+7 −2
Original line number Diff line number Diff line
@@ -104,6 +104,12 @@ struct amd_sched_backend_ops {
	struct fence *(*run_job)(struct amd_sched_job *sched_job);
};

enum amd_sched_priority {
	AMD_SCHED_PRIORITY_KERNEL = 0,
	AMD_SCHED_PRIORITY_NORMAL,
	AMD_SCHED_MAX_PRIORITY
};

/**
 * One scheduler is implemented for each hardware ring
*/
@@ -112,8 +118,7 @@ struct amd_gpu_scheduler {
	uint32_t			hw_submission_limit;
	long				timeout;
	const char			*name;
	struct amd_sched_rq		sched_rq;
	struct amd_sched_rq		kernel_rq;
	struct amd_sched_rq		sched_rq[AMD_SCHED_MAX_PRIORITY];
	wait_queue_head_t		wake_up_worker;
	wait_queue_head_t		job_scheduled;
	atomic_t			hw_rq_count;