Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bb977d37 authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher
Browse files

drm/amdgpu: abstract amdgpu_job for scheduler

parent 6055f37a
Loading
Loading
Loading
Loading
+15 −1
Original line number Diff line number Diff line
@@ -183,6 +183,7 @@ struct amdgpu_vm;
struct amdgpu_ring;
struct amdgpu_semaphore;
struct amdgpu_cs_parser;
struct amdgpu_job;
struct amdgpu_irq_src;
struct amdgpu_fpriv;

@@ -871,7 +872,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
					 struct amdgpu_ring *ring,
					 struct amdgpu_ib *ibs,
					 unsigned num_ibs,
					 int (*free_job)(struct amdgpu_cs_parser *),
					 int (*free_job)(struct amdgpu_job *),
					 void *owner,
					 struct fence **fence);

@@ -1040,6 +1041,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx);

struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
struct amdgpu_ctx *amdgpu_ctx_get_ref(struct amdgpu_ctx *ctx);

uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
			      struct fence *fence, uint64_t queued_seq);
@@ -1265,6 +1267,18 @@ struct amdgpu_cs_parser {
	struct amd_sched_fence *s_fence;
};

struct amdgpu_job {
	struct amd_sched_job    base;
	struct amdgpu_device	*adev;
	struct amdgpu_ctx	*ctx;
	struct drm_file		*owner;
	struct amdgpu_ib	*ibs;
	uint32_t		num_ibs;
	struct mutex            job_lock;
	struct amdgpu_user_fence uf;
	int (*free_job)(struct amdgpu_job *sched_job);
};

static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
{
	return p->ibs[ib_idx].ptr[idx];
+55 −46
Original line number Diff line number Diff line
@@ -126,19 +126,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
	return 0;
}

static void amdgpu_job_work_func(struct work_struct *work)
{
	struct amdgpu_cs_parser *sched_job =
		container_of(work, struct amdgpu_cs_parser,
			     job_work);
	mutex_lock(&sched_job->job_lock);
	if (sched_job->free_job)
		sched_job->free_job(sched_job);
	mutex_unlock(&sched_job->job_lock);
	/* after processing job, free memory */
	fence_put(&sched_job->s_fence->base);
	kfree(sched_job);
}
struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
                                               struct drm_file *filp,
                                               struct amdgpu_ctx *ctx,
@@ -157,10 +144,6 @@ struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
	parser->ctx = ctx;
	parser->ibs = ibs;
	parser->num_ibs = num_ibs;
	if (amdgpu_enable_scheduler) {
		mutex_init(&parser->job_lock);
		INIT_WORK(&parser->job_work, amdgpu_job_work_func);
	}
	for (i = 0; i < num_ibs; i++)
		ibs[i].ctx = ctx;

@@ -508,14 +491,16 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
	for (i = 0; i < parser->nchunks; i++)
		drm_free_large(parser->chunks[i].kdata);
	kfree(parser->chunks);
	if (!amdgpu_enable_scheduler)
	{
		if (parser->ibs)
			for (i = 0; i < parser->num_ibs; i++)
				amdgpu_ib_free(parser->adev, &parser->ibs[i]);
		kfree(parser->ibs);
		if (parser->uf.bo)
			drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
	}

	if (!amdgpu_enable_scheduler)
	kfree(parser);
}

@@ -533,12 +518,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
       amdgpu_cs_parser_fini_late(parser);
}

static int amdgpu_cs_parser_free_job(struct amdgpu_cs_parser *sched_job)
{
       amdgpu_cs_parser_fini_late(sched_job);
       return 0;
}

static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
				   struct amdgpu_vm *vm)
{
@@ -874,6 +853,19 @@ static struct amdgpu_ring *amdgpu_cs_parser_get_ring(
	return ring;
}

static int amdgpu_cs_free_job(struct amdgpu_job *sched_job)
{
	int i;
	amdgpu_ctx_put(sched_job->ctx);
	if (sched_job->ibs)
		for (i = 0; i < sched_job->num_ibs; i++)
			amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
	kfree(sched_job->ibs);
	if (sched_job->uf.bo)
		drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base);
	return 0;
}

int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
@@ -900,33 +892,50 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
	}

	if (amdgpu_enable_scheduler && parser->num_ibs) {
		struct amdgpu_job *job;
		struct amdgpu_ring * ring =
			amdgpu_cs_parser_get_ring(adev, parser);
		r = amdgpu_cs_parser_prepare_job(parser);
		if (r)
			goto out;
		parser->ring = ring;
		parser->free_job = amdgpu_cs_parser_free_job;
		mutex_lock(&parser->job_lock);
		r = amd_sched_push_job(ring->scheduler,
				       &parser->ctx->rings[ring->idx].entity,
				       parser,
				       &parser->s_fence);
		job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
		if (!job)
			return -ENOMEM;
		job->base.sched = ring->scheduler;
		job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
		job->adev = parser->adev;
		job->ibs = parser->ibs;
		job->num_ibs = parser->num_ibs;
		job->owner = parser->filp;
		job->ctx = amdgpu_ctx_get_ref(parser->ctx);
		mutex_init(&job->job_lock);
		if (job->ibs[job->num_ibs - 1].user) {
			memcpy(&job->uf,  &parser->uf,
			       sizeof(struct amdgpu_user_fence));
			job->ibs[job->num_ibs - 1].user = &job->uf;
		}

		job->free_job = amdgpu_cs_free_job;
		mutex_lock(&job->job_lock);
		r = amd_sched_push_job((struct amd_sched_job *)job);
		if (r) {
			mutex_unlock(&parser->job_lock);
			mutex_unlock(&job->job_lock);
			amdgpu_cs_free_job(job);
			kfree(job);
			goto out;
		}
		parser->ibs[parser->num_ibs - 1].sequence =
			amdgpu_ctx_add_fence(parser->ctx, ring,
					     &parser->s_fence->base,
					     parser->s_fence->v_seq);
		cs->out.handle = parser->s_fence->v_seq;
		job->ibs[parser->num_ibs - 1].sequence =
			amdgpu_ctx_add_fence(job->ctx, ring,
					     &job->base.s_fence->base,
					     job->base.s_fence->v_seq);
		cs->out.handle = job->base.s_fence->v_seq;
		list_sort(NULL, &parser->validated, cmp_size_smaller_first);
		ttm_eu_fence_buffer_objects(&parser->ticket,
				&parser->validated,
				&parser->s_fence->base);
				&job->base.s_fence->base);

		mutex_unlock(&parser->job_lock);
		mutex_unlock(&job->job_lock);
		amdgpu_cs_parser_fini_late(parser);
		up_read(&adev->exclusive_lock);
		return 0;
	}
+7 −0
Original line number Diff line number Diff line
@@ -219,6 +219,13 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
	return ctx;
}

struct amdgpu_ctx *amdgpu_ctx_get_ref(struct amdgpu_ctx *ctx)
{
	if (ctx)
		kref_get(&ctx->refcount);
	return ctx;
}

int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
{
	if (ctx == NULL)
+34 −54
Original line number Diff line number Diff line
@@ -27,81 +27,58 @@
#include <drm/drmP.h>
#include "amdgpu.h"

static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
				    struct amd_sched_entity *entity,
				    struct amd_sched_job *job)
{
	int r = 0;
	struct amdgpu_cs_parser *sched_job;
	if (!job || !job->data) {
		DRM_ERROR("job is null\n");
		return -EINVAL;
	}

	sched_job = (struct amdgpu_cs_parser *)job->data;
	if (sched_job->prepare_job) {
		r = sched_job->prepare_job(sched_job);
		if (r) {
			DRM_ERROR("Prepare job error\n");
			schedule_work(&sched_job->job_work);
		}
	}
	return r;
}

static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
					  struct amd_sched_entity *entity,
					  struct amd_sched_job *job)
{
	int r = 0;
	struct amdgpu_cs_parser *sched_job;
	struct amdgpu_job *sched_job;
	struct amdgpu_fence *fence;

	if (!job || !job->data) {
	if (!job) {
		DRM_ERROR("job is null\n");
		return NULL;
	}
	sched_job = (struct amdgpu_cs_parser *)job->data;
	sched_job = (struct amdgpu_job *)job;
	mutex_lock(&sched_job->job_lock);
	r = amdgpu_ib_schedule(sched_job->adev,
			       sched_job->num_ibs,
			       sched_job->ibs,
			       sched_job->filp);
			       sched_job->owner);
	if (r)
		goto err;
	fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);

	if (sched_job->run_job) {
		r = sched_job->run_job(sched_job);
		if (r)
			goto err;
	}

	mutex_unlock(&sched_job->job_lock);
	return &fence->base;

err:
	DRM_ERROR("Run job error\n");
	mutex_unlock(&sched_job->job_lock);
	schedule_work(&sched_job->job_work);
	sched->ops->process_job(sched, (struct amd_sched_job *)sched_job);
	return NULL;
}

static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched,
				     struct amd_sched_job *job)
{
	struct amdgpu_cs_parser *sched_job;
	struct amdgpu_job *sched_job;

	if (!job || !job->data) {
	if (!job) {
		DRM_ERROR("job is null\n");
		return;
	}
	sched_job = (struct amdgpu_cs_parser *)job->data;
	schedule_work(&sched_job->job_work);
	sched_job = (struct amdgpu_job *)job;
	mutex_lock(&sched_job->job_lock);
	if (sched_job->free_job)
		sched_job->free_job(sched_job);
	mutex_unlock(&sched_job->job_lock);
	/* after processing job, free memory */
	fence_put(&sched_job->base.s_fence->base);
	kfree(sched_job);
}

struct amd_sched_backend_ops amdgpu_sched_ops = {
	.prepare_job = amdgpu_sched_prepare_job,
	.run_job = amdgpu_sched_run_job,
	.process_job = amdgpu_sched_process_job
};
@@ -110,31 +87,34 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
					 struct amdgpu_ring *ring,
					 struct amdgpu_ib *ibs,
					 unsigned num_ibs,
					 int (*free_job)(struct amdgpu_cs_parser *),
					 int (*free_job)(struct amdgpu_job *),
					 void *owner,
					 struct fence **f)
{
	int r = 0;
	if (amdgpu_enable_scheduler) {
		struct amdgpu_cs_parser *sched_job =
			amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx,
						ibs, num_ibs);
		if(!sched_job) {
		struct amdgpu_job *job =
			kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
		if (!job)
			return -ENOMEM;
		}
		sched_job->free_job = free_job;
		mutex_lock(&sched_job->job_lock);
		r = amd_sched_push_job(ring->scheduler,
				       &adev->kernel_ctx.rings[ring->idx].entity,
				       sched_job, &sched_job->s_fence);
		job->base.sched = ring->scheduler;
		job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
		job->adev = adev;
		job->ibs = ibs;
		job->num_ibs = num_ibs;
		job->owner = owner;
		mutex_init(&job->job_lock);
		job->free_job = free_job;
		mutex_lock(&job->job_lock);
		r = amd_sched_push_job((struct amd_sched_job *)job);
		if (r) {
			mutex_unlock(&sched_job->job_lock);
			kfree(sched_job);
			mutex_unlock(&job->job_lock);
			kfree(job);
			return r;
		}
		ibs[num_ibs - 1].sequence = sched_job->s_fence->v_seq;
		*f = fence_get(&sched_job->s_fence->base);
		mutex_unlock(&sched_job->job_lock);
		ibs[num_ibs - 1].sequence = job->base.s_fence->v_seq;
		*f = fence_get(&job->base.s_fence->base);
		mutex_unlock(&job->job_lock);
	} else {
		r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
		if (r)
+1 −1
Original line number Diff line number Diff line
@@ -807,7 +807,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
}

static int amdgpu_uvd_free_job(
	struct amdgpu_cs_parser *sched_job)
	struct amdgpu_job *sched_job)
{
	amdgpu_ib_free(sched_job->adev, sched_job->ibs);
	kfree(sched_job->ibs);
Loading