Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 068c3304 authored by Nayan Deshmukh's avatar Nayan Deshmukh Committed by Alex Deucher
Browse files

drm/scheduler: remove sched field from the entity



The scheduler of the entity is decided by the run queue on which
it is queued. This patch avoids us the effort required to maintain
a sync between rq and sched field when we start shifting entites
among different rqs.

Signed-off-by: default avatarNayan Deshmukh <nayan26deshmukh@gmail.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarEric Anholt <eric@anholt.net>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent cdc50176
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1262,7 +1262,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
	priority = job->base.s_priority;
	drm_sched_entity_push_job(&job->base, entity);

	ring = to_amdgpu_ring(entity->sched);
	ring = to_amdgpu_ring(entity->rq->sched);
	amdgpu_ring_priority_get(ring, priority);

	ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
+2 −2
Original line number Diff line number Diff line
@@ -143,7 +143,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
	priority = job->base.s_priority;
	drm_sched_entity_push_job(&job->base, entity);

	ring = to_amdgpu_ring(entity->sched);
	ring = to_amdgpu_ring(entity->rq->sched);
	amdgpu_ring_priority_get(ring, priority);

	return 0;
@@ -167,7 +167,7 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
					       struct drm_sched_entity *s_entity)
{
	struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched);
	struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
	struct amdgpu_job *job = to_amdgpu_job(sched_job);
	struct amdgpu_vm *vm = job->vm;
	struct dma_fence *fence;
+3 −3
Original line number Diff line number Diff line
@@ -387,7 +387,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
		ats_entries = 0;
	}

	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
	ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);

	r = reservation_object_reserve_shared(bo->tbo.resv);
	if (r)
@@ -1113,7 +1113,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
		struct amdgpu_ring *ring;
		struct dma_fence *fence;

		ring = container_of(vm->entity.sched, struct amdgpu_ring,
		ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
				    sched);

		amdgpu_ring_pad_ib(ring, params.ib);
@@ -1403,7 +1403,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
					   addr, flags);
	}

	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
	ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);

	nptes = last - start + 1;

+9 −10
Original line number Diff line number Diff line
@@ -185,7 +185,6 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
	memset(entity, 0, sizeof(struct drm_sched_entity));
	INIT_LIST_HEAD(&entity->list);
	entity->rq = rq_list[0];
	entity->sched = rq_list[0]->sched;
	entity->guilty = guilty;
	entity->last_scheduled = NULL;

@@ -210,8 +209,8 @@ EXPORT_SYMBOL(drm_sched_entity_init);
static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
					    struct drm_sched_entity *entity)
{
	return entity->sched == sched &&
		entity->rq != NULL;
	return entity->rq != NULL &&
		entity->rq->sched == sched;
}

/**
@@ -278,7 +277,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
	struct drm_gpu_scheduler *sched;
	long ret = timeout;

	sched = entity->sched;
	sched = entity->rq->sched;
	if (!drm_sched_entity_is_initialized(sched, entity))
		return ret;
	/**
@@ -317,7 +316,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
	struct drm_gpu_scheduler *sched;

	sched = entity->sched;
	sched = entity->rq->sched;
	drm_sched_entity_set_rq(entity, NULL);

	/* Consumption of existing IBs wasn't completed. Forcefully
@@ -388,7 +387,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb
		container_of(cb, struct drm_sched_entity, cb);
	entity->dependency = NULL;
	dma_fence_put(f);
	drm_sched_wakeup(entity->sched);
	drm_sched_wakeup(entity->rq->sched);
}

static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
@@ -438,7 +437,7 @@ EXPORT_SYMBOL(drm_sched_entity_set_rq);
bool drm_sched_dependency_optimized(struct dma_fence* fence,
				    struct drm_sched_entity *entity)
{
	struct drm_gpu_scheduler *sched = entity->sched;
	struct drm_gpu_scheduler *sched = entity->rq->sched;
	struct drm_sched_fence *s_fence;

	if (!fence || dma_fence_is_signaled(fence))
@@ -455,7 +454,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized);

static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
{
	struct drm_gpu_scheduler *sched = entity->sched;
	struct drm_gpu_scheduler *sched = entity->rq->sched;
	struct dma_fence * fence = entity->dependency;
	struct drm_sched_fence *s_fence;

@@ -500,7 +499,7 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
static struct drm_sched_job *
drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
	struct drm_gpu_scheduler *sched = entity->sched;
	struct drm_gpu_scheduler *sched = entity->rq->sched;
	struct drm_sched_job *sched_job = to_drm_sched_job(
						spsc_queue_peek(&entity->job_queue));

@@ -744,7 +743,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
		       struct drm_sched_entity *entity,
		       void *owner)
{
	struct drm_gpu_scheduler *sched = entity->sched;
	struct drm_gpu_scheduler *sched = entity->rq->sched;

	job->sched = sched;
	job->entity = entity;
+1 −1
Original line number Diff line number Diff line
@@ -161,7 +161,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
		return NULL;

	fence->owner = owner;
	fence->sched = entity->sched;
	fence->sched = entity->rq->sched;
	spin_lock_init(&fence->lock);

	seq = atomic_inc_return(&entity->fence_seq);
Loading