Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0e89d0c1 authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: stop leaking the ctx id into the scheduler v2



Id's are for the IOCTL ABI only.

v2: remove tgid as well

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
parent efd4ccb5
Loading
Loading
Loading
Loading
+4 −7
Original line number Diff line number Diff line
@@ -50,8 +50,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)

static void amdgpu_ctx_init(struct amdgpu_device *adev,
			    struct amdgpu_fpriv *fpriv,
			    struct amdgpu_ctx *ctx,
			    uint32_t id)
			    struct amdgpu_ctx *ctx)
{
	int i;
	memset(ctx, 0, sizeof(*ctx));
@@ -81,7 +80,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
			return r;
		}
		*id = (uint32_t)r;
		amdgpu_ctx_init(adev, fpriv, ctx, *id);
		amdgpu_ctx_init(adev, fpriv, ctx);
		mutex_unlock(&mgr->lock);
	} else {
		if (adev->kernel_ctx) {
@@ -89,8 +88,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
			kfree(ctx);
			return 0;
		}
		*id = AMD_KERNEL_CONTEXT_ID;
		amdgpu_ctx_init(adev, fpriv, ctx, *id);
		amdgpu_ctx_init(adev, fpriv, ctx);

		adev->kernel_ctx = ctx;
	}
@@ -105,8 +103,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
				rq = &adev->rings[i]->scheduler->kernel_rq;
			r = amd_context_entity_init(adev->rings[i]->scheduler,
						    &ctx->rings[i].c_entity,
						    NULL, rq, *id,
						    amdgpu_sched_jobs);
						    NULL, rq, amdgpu_sched_jobs);
			if (r)
				break;
		}
+3 −7
Original line number Diff line number Diff line
@@ -172,7 +172,7 @@ static struct amd_context_entity *select_context(
 * @entity	The pointer to a valid amd_context_entity
 * @parent	The parent entity of this amd_context_entity
 * @rq		The run queue this entity belongs
 * @context_id	The context id for this entity
 * @kernel	If this is an entity for the kernel
 * @jobs	The max number of jobs in the job queue
 *
 * return 0 if succeed. negative error code on failure
@@ -181,7 +181,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched,
			    struct amd_context_entity *entity,
			    struct amd_sched_entity *parent,
			    struct amd_run_queue *rq,
			    uint32_t context_id,
			    uint32_t jobs)
{
	uint64_t seq_ring = 0;
@@ -203,9 +202,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched,
		return -EINVAL;

	spin_lock_init(&entity->queue_lock);
	entity->tgid = (context_id == AMD_KERNEL_CONTEXT_ID) ?
		AMD_KERNEL_PROCESS_ID : current->tgid;
	entity->context_id = context_id;
	atomic64_set(&entity->last_emitted_v_seq, seq_ring);
	atomic64_set(&entity->last_queued_v_seq, seq_ring);

@@ -275,9 +271,9 @@ int amd_context_entity_fini(struct amd_gpu_scheduler *sched,

	if (r) {
		if (entity->is_pending)
			DRM_INFO("Entity %u is in waiting state during fini,\
			DRM_INFO("Entity %p is in waiting state during fini,\
				all pending ibs will be canceled.\n",
				 entity->context_id);
				 entity);
	}

	mutex_lock(&rq->lock);
+0 −6
Original line number Diff line number Diff line
@@ -26,9 +26,6 @@

#include <linux/kfifo.h>

#define AMD_KERNEL_CONTEXT_ID			0
#define AMD_KERNEL_PROCESS_ID			0

#define AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS		3000

struct amd_gpu_scheduler;
@@ -74,8 +71,6 @@ struct amd_context_entity {
	/* the virtual_seq is unique per context per ring */
	atomic64_t			last_queued_v_seq;
	atomic64_t			last_emitted_v_seq;
	pid_t				tgid;
	uint32_t			context_id;
	/* the job_queue maintains the jobs submitted by clients */
	struct kfifo                    job_queue;
	spinlock_t			queue_lock;
@@ -148,7 +143,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched,
			    struct amd_context_entity *entity,
			    struct amd_sched_entity *parent,
			    struct amd_run_queue *rq,
			    uint32_t context_id,
			    uint32_t jobs);

void amd_sched_emit(struct amd_context_entity *c_entity, uint64_t seq);