Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9b398fa5 authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: rename fence->scheduler to sched v2



Just to be consistent with the other members.

v2: rename the ring member as well.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com> (v1)
Reviewed-by: default avatarChunming <Zhou&lt;david1.zhou@amd.com>
parent 0f75aee7
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -891,7 +891,7 @@ struct amdgpu_ring {
	struct amdgpu_device		*adev;
	const struct amdgpu_ring_funcs	*funcs;
	struct amdgpu_fence_driver	fence_drv;
	struct amd_gpu_scheduler 	*scheduler;
	struct amd_gpu_scheduler 	*sched;

	spinlock_t              fence_lock;
	struct mutex		*ring_lock;
+1 −1
Original line number Diff line number Diff line
@@ -848,7 +848,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
		job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
		if (!job)
			return -ENOMEM;
		job->base.sched = ring->scheduler;
		job->base.sched = ring->sched;
		job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
		job->adev = parser->adev;
		job->ibs = parser->ibs;
+5 −5
Original line number Diff line number Diff line
@@ -43,10 +43,10 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
		for (i = 0; i < adev->num_rings; i++) {
			struct amd_sched_rq *rq;
			if (kernel)
				rq = &adev->rings[i]->scheduler->kernel_rq;
				rq = &adev->rings[i]->sched->kernel_rq;
			else
				rq = &adev->rings[i]->scheduler->sched_rq;
			r = amd_sched_entity_init(adev->rings[i]->scheduler,
				rq = &adev->rings[i]->sched->sched_rq;
			r = amd_sched_entity_init(adev->rings[i]->sched,
						  &ctx->rings[i].entity,
						  rq, amdgpu_sched_jobs);
			if (r)
@@ -55,7 +55,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,

		if (i < adev->num_rings) {
			for (j = 0; j < i; j++)
				amd_sched_entity_fini(adev->rings[j]->scheduler,
				amd_sched_entity_fini(adev->rings[j]->sched,
						      &ctx->rings[j].entity);
			kfree(ctx);
			return r;
@@ -75,7 +75,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)

	if (amdgpu_enable_scheduler) {
		for (i = 0; i < adev->num_rings; i++)
			amd_sched_entity_fini(adev->rings[i]->scheduler,
			amd_sched_entity_fini(adev->rings[i]->sched,
					      &ctx->rings[i].entity);
	}
}
+7 −7
Original line number Diff line number Diff line
@@ -626,11 +626,11 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
	ring->fence_drv.ring = ring;

	if (amdgpu_enable_scheduler) {
		ring->scheduler = amd_sched_create(&amdgpu_sched_ops,
		ring->sched = amd_sched_create(&amdgpu_sched_ops,
					       ring->idx,
					       amdgpu_sched_hw_submission,
					       (void *)ring->adev);
		if (!ring->scheduler)
		if (!ring->sched)
			DRM_ERROR("Failed to create scheduler on ring %d.\n",
				  ring->idx);
	}
@@ -681,8 +681,8 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
		wake_up_all(&ring->fence_drv.fence_queue);
		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
			       ring->fence_drv.irq_type);
		if (ring->scheduler)
			amd_sched_destroy(ring->scheduler);
		if (ring->sched)
			amd_sched_destroy(ring->sched);
		ring->fence_drv.initialized = false;
	}
	mutex_unlock(&adev->ring_lock);
+2 −2
Original line number Diff line number Diff line
@@ -146,7 +146,7 @@ static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)

	s_fence = to_amd_sched_fence(f);
	if (s_fence)
		return s_fence->scheduler->ring_id;
		return s_fence->sched->ring_id;
	a_fence = to_amdgpu_fence(f);
	if (a_fence)
		return a_fence->ring->idx;
@@ -437,7 +437,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
			if (s_fence)
				seq_printf(m, " protected by 0x%016x on ring %d",
					   s_fence->base.seqno,
					   s_fence->scheduler->ring_id);
					   s_fence->sched->ring_id);

		}
		seq_printf(m, "\n");
Loading