Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit aef4852e authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: fix entity wakeup race condition



That actually didn't worked at all.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
parent f85a6dd9
Loading
Loading
Loading
Loading
+21 −22
Original line number Diff line number Diff line
@@ -108,7 +108,6 @@ static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
static struct amd_sched_entity *
amd_sched_select_context(struct amd_gpu_scheduler *sched)
{
	struct amd_sched_entity *wake_entity = NULL;
	struct amd_sched_entity *tmp;

	if (!amd_sched_ready(sched))
@@ -119,11 +118,6 @@ amd_sched_select_context(struct amd_gpu_scheduler *sched)
	if (tmp == NULL)
		tmp = amd_sched_rq_select_entity(&sched->sched_rq);

	if (sched->current_entity && (sched->current_entity != tmp))
		wake_entity = sched->current_entity;
	sched->current_entity = tmp;
	if (wake_entity && wake_entity->need_wakeup)
		wake_up(&wake_entity->wait_queue);
	return tmp;
}

@@ -184,16 +178,17 @@ static bool is_context_entity_initialized(struct amd_gpu_scheduler *sched,
		entity->belongto_rq != NULL;
}

static bool is_context_entity_idle(struct amd_gpu_scheduler *sched,
				   struct amd_sched_entity *entity)
{
/**
	 * Idle means no pending IBs, and the entity is not
	 * currently being used.
 * Check if entity is idle
 *
 * @entity	The pointer to a valid scheduler entity
 *
 * Return true if entity don't has any unscheduled jobs.
 */
	barrier();
	if ((sched->current_entity != entity) &&
	    kfifo_is_empty(&entity->job_queue))
static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
{
	rmb();
	if (kfifo_is_empty(&entity->job_queue))
		return true;

	return false;
@@ -210,8 +205,8 @@ static bool is_context_entity_idle(struct amd_gpu_scheduler *sched,
int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
			    struct amd_sched_entity *entity)
{
	int r = 0;
	struct amd_sched_rq *rq = entity->belongto_rq;
	long r;

	if (!is_context_entity_initialized(sched, entity))
		return 0;
@@ -220,13 +215,11 @@ int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
	 * The client will not queue more IBs during this fini, consume existing
	 * queued IBs
	*/
	r = wait_event_timeout(
		entity->wait_queue,
		is_context_entity_idle(sched, entity),
		msecs_to_jiffies(AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS)
		) ?  0 : -1;
	r = wait_event_timeout(entity->wait_queue,
		amd_sched_entity_is_idle(entity),
		msecs_to_jiffies(AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS));

	if (r)
	if (r <= 0)
		DRM_INFO("Entity %p is in waiting state during fini\n",
			 entity);

@@ -325,6 +318,12 @@ static int amd_sched_main(void *param)
			fence_put(fence);
		}
		mutex_unlock(&sched->sched_lock);

		if (c_entity->need_wakeup) {
			c_entity->need_wakeup = false;
			wake_up(&c_entity->wait_queue);
		}

	}
	return 0;
}
+0 −1
Original line number Diff line number Diff line
@@ -117,7 +117,6 @@ struct amd_gpu_scheduler {
	uint32_t			granularity; /* in ms unit */
	uint32_t			preemption;
	wait_queue_head_t		wait_queue;
	struct amd_sched_entity	*current_entity;
	struct mutex			sched_lock;
	uint32_t                        hw_submission_limit;
};