Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6c859274 authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: fix and cleanup amd_sched_entity_push_job



Calling schedule() is probably the worse things we can do.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
parent 87e0a87d
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -857,7 +857,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)


		job->free_job = amdgpu_cs_free_job;
		job->free_job = amdgpu_cs_free_job;
		mutex_lock(&job->job_lock);
		mutex_lock(&job->job_lock);
		r = amd_sched_push_job((struct amd_sched_job *)job);
		r = amd_sched_entity_push_job((struct amd_sched_job *)job);
		if (r) {
		if (r) {
			mutex_unlock(&job->job_lock);
			mutex_unlock(&job->job_lock);
			amdgpu_cs_free_job(job);
			amdgpu_cs_free_job(job);
+1 −1
Original line number Original line Diff line number Diff line
@@ -105,7 +105,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
		mutex_init(&job->job_lock);
		mutex_init(&job->job_lock);
		job->free_job = free_job;
		job->free_job = free_job;
		mutex_lock(&job->job_lock);
		mutex_lock(&job->job_lock);
		r = amd_sched_push_job((struct amd_sched_job *)job);
		r = amd_sched_entity_push_job((struct amd_sched_job *)job);
		if (r) {
		if (r) {
			mutex_unlock(&job->job_lock);
			mutex_unlock(&job->job_lock);
			kfree(job);
			kfree(job);
+45 −33
Original line number Original line Diff line number Diff line
@@ -121,7 +121,6 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
	entity->fence_context = fence_context_alloc(1);
	entity->fence_context = fence_context_alloc(1);
	snprintf(name, sizeof(name), "c_entity[%llu]", entity->fence_context);
	snprintf(name, sizeof(name), "c_entity[%llu]", entity->fence_context);
	memcpy(entity->name, name, 20);
	memcpy(entity->name, name, 20);
	entity->need_wakeup = false;
	if(kfifo_alloc(&entity->job_queue,
	if(kfifo_alloc(&entity->job_queue,
		       jobs * sizeof(void *),
		       jobs * sizeof(void *),
		       GFP_KERNEL))
		       GFP_KERNEL))
@@ -182,7 +181,7 @@ int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,


	if (!amd_sched_entity_is_initialized(sched, entity))
	if (!amd_sched_entity_is_initialized(sched, entity))
		return 0;
		return 0;
	entity->need_wakeup = true;

	/**
	/**
	 * The client will not queue more IBs during this fini, consume existing
	 * The client will not queue more IBs during this fini, consume existing
	 * queued IBs
	 * queued IBs
@@ -201,38 +200,55 @@ int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
}
}


/**
/**
 * Submit a normal job to the job queue
 * Helper to submit a job to the job queue
 *
 *
 * @sched	The pointer to the scheduler
 * @c_entity    The pointer to amd_sched_entity
 * @job		The pointer to job required to submit
 * @job		The pointer to job required to submit
 * return 0 if succeed. -1 if failed.
 *
 *        -2 indicate queue is full for this client, client should wait untill
 * Returns true if we could submit the job.
 *	     scheduler consum some queued command.
 */
 *	  -1 other fail.
static bool amd_sched_entity_in(struct amd_sched_job *job)
{
	struct amd_sched_entity *entity = job->s_entity;
	bool added, first = false;

	spin_lock(&entity->queue_lock);
	added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job);

	if (added && kfifo_len(&entity->job_queue) == sizeof(job))
		first = true;

	spin_unlock(&entity->queue_lock);

	/* first job wakes up scheduler */
	if (first)
		wake_up_interruptible(&job->sched->wait_queue);

	return added;
}

/**
 * Submit a job to the job queue
 *
 * @job		The pointer to job required to submit
 *
 * Returns 0 for success, negative error code otherwise.
 */
 */
int amd_sched_push_job(struct amd_sched_job *sched_job)
int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
{
{
	struct amd_sched_fence 	*fence =
	struct amd_sched_entity *entity = sched_job->s_entity;
		amd_sched_fence_create(sched_job->s_entity);
	struct amd_sched_fence *fence = amd_sched_fence_create(entity);
	int r;

	if (!fence)
	if (!fence)
		return -EINVAL;
		return -ENOMEM;

	fence_get(&fence->base);
	fence_get(&fence->base);
	sched_job->s_fence = fence;
	sched_job->s_fence = fence;
	while (kfifo_in_spinlocked(&sched_job->s_entity->job_queue,

				   &sched_job, sizeof(void *),
	r = wait_event_interruptible(entity->wait_queue,
				   &sched_job->s_entity->queue_lock) !=
				     amd_sched_entity_in(sched_job));
	       sizeof(void *)) {

		/**
	return r;
		 * Current context used up all its IB slots
		 * wait here, or need to check whether GPU is hung
		*/
		schedule();
	}
	/* first job wake up scheduler */
	if ((kfifo_len(&sched_job->s_entity->job_queue) / sizeof(void *)) == 1)
		wake_up_interruptible(&sched_job->sched->wait_queue);
	return 0;
}
}


/**
/**
@@ -313,12 +329,8 @@ static int amd_sched_main(void *param)
			fence_put(fence);
			fence_put(fence);
		}
		}


		if (c_entity->need_wakeup) {
			c_entity->need_wakeup = false;
		wake_up(&c_entity->wait_queue);
		wake_up(&c_entity->wait_queue);
	}
	}

	}
	return 0;
	return 0;
}
}


+1 −3
Original line number Original line Diff line number Diff line
@@ -49,7 +49,6 @@ struct amd_sched_entity {
	wait_queue_head_t		wait_queue;
	wait_queue_head_t		wait_queue;
	uint64_t                        fence_context;
	uint64_t                        fence_context;
	char                            name[20];
	char                            name[20];
	bool                            need_wakeup;
};
};


/**
/**
@@ -119,14 +118,13 @@ amd_sched_create(struct amd_sched_backend_ops *ops,
		 uint32_t ring, uint32_t hw_submission);
		 uint32_t ring, uint32_t hw_submission);
int amd_sched_destroy(struct amd_gpu_scheduler *sched);
int amd_sched_destroy(struct amd_gpu_scheduler *sched);


int amd_sched_push_job(struct amd_sched_job *sched_job);

int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
			  struct amd_sched_entity *entity,
			  struct amd_sched_entity *entity,
			  struct amd_sched_rq *rq,
			  struct amd_sched_rq *rq,
			  uint32_t jobs);
			  uint32_t jobs);
int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
			  struct amd_sched_entity *entity);
			  struct amd_sched_entity *entity);
int amd_sched_entity_push_job(struct amd_sched_job *sched_job);


struct amd_sched_fence *amd_sched_fence_create(
struct amd_sched_fence *amd_sched_fence_create(
	struct amd_sched_entity *s_entity);
	struct amd_sched_entity *s_entity);