Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 37cd0ca2 authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher
Browse files

drm/amdgpu: unify AMDGPU_CTX_MAX_CS_PENDING and amdgpu_sched_jobs

parent c648ed7c
Loading
Loading
Loading
Loading
+2 −3
Original line number Diff line number Diff line
@@ -1023,11 +1023,9 @@ int amdgpu_vm_free_job(struct amdgpu_job *job);
 * context related structures
 */

#define AMDGPU_CTX_MAX_CS_PENDING	16

struct amdgpu_ctx_ring {
	uint64_t		sequence;
	struct fence		*fences[AMDGPU_CTX_MAX_CS_PENDING];
	struct fence		**fences;
	struct amd_sched_entity	entity;
};

@@ -1036,6 +1034,7 @@ struct amdgpu_ctx {
	struct amdgpu_device    *adev;
	unsigned		reset_counter;
	spinlock_t		ring_lock;
	struct fence            **fences;
	struct amdgpu_ctx_ring	rings[AMDGPU_MAX_RINGS];
};

+18 −7
Original line number Diff line number Diff line
@@ -35,15 +35,24 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
	ctx->adev = adev;
	kref_init(&ctx->refcount);
	spin_lock_init(&ctx->ring_lock);
	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
		ctx->rings[i].sequence = 1;
	ctx->fences = kzalloc(sizeof(struct fence *) * amdgpu_sched_jobs *
			 AMDGPU_MAX_RINGS, GFP_KERNEL);
	if (!ctx->fences)
		return -ENOMEM;

	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
		ctx->rings[i].sequence = 1;
		ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) *
			amdgpu_sched_jobs * i;
	}
	if (amdgpu_enable_scheduler) {
		/* create context entity for each ring */
		for (i = 0; i < adev->num_rings; i++) {
			struct amd_sched_rq *rq;
			if (pri >= AMD_SCHED_MAX_PRIORITY)
			if (pri >= AMD_SCHED_MAX_PRIORITY) {
				kfree(ctx->fences);
				return -EINVAL;
			}
			rq = &adev->rings[i]->sched.sched_rq[pri];
			r = amd_sched_entity_init(&adev->rings[i]->sched,
						  &ctx->rings[i].entity,
@@ -56,6 +65,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
			for (j = 0; j < i; j++)
				amd_sched_entity_fini(&adev->rings[j]->sched,
						      &ctx->rings[j].entity);
			kfree(ctx->fences);
			return r;
		}
	}
@@ -71,8 +81,9 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
		return;

	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
		for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
		for (j = 0; j < amdgpu_sched_jobs; ++j)
			fence_put(ctx->rings[i].fences[j]);
	kfree(ctx->fences);

	if (amdgpu_enable_scheduler) {
		for (i = 0; i < adev->num_rings; i++)
@@ -241,7 +252,7 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
	unsigned idx = 0;
	struct fence *other = NULL;

	idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
	idx = seq % amdgpu_sched_jobs;
	other = cring->fences[idx];
	if (other) {
		signed long r;
@@ -276,12 +287,12 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
	}


	if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) {
	if (seq + amdgpu_sched_jobs < cring->sequence) {
		spin_unlock(&ctx->ring_lock);
		return NULL;
	}

	fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]);
	fence = fence_get(cring->fences[seq % amdgpu_sched_jobs]);
	spin_unlock(&ctx->ring_lock);

	return fence;