Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 180fc134 authored by Andrey Grodzovsky's avatar Andrey Grodzovsky Committed by Alex Deucher
Browse files

drm/scheduler: Rename cleanup functions v2.



Everything in the flush code path (i.e. waiting for SW queue
to become empty) names with *_flush()
and everything in the release code path names *_fini()

This patch also effect the amdgpu and etnaviv drivers which
use those functions.

v2:
Also pplay the change to vd3.

Signed-off-by: default avatarAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Suggested-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarLucas Stach <l.stach@pengutronix.de>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent f3efec54
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -104,7 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,

failed:
	for (j = 0; j < i; j++)
		drm_sched_entity_fini(&adev->rings[j]->sched,
		drm_sched_entity_destroy(&adev->rings[j]->sched,
				      &ctx->rings[j].entity);
	kfree(ctx->fences);
	ctx->fences = NULL;
@@ -178,7 +178,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
		if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
			continue;

		drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
		drm_sched_entity_destroy(&ctx->adev->rings[i]->sched,
			&ctx->rings[i].entity);
	}

@@ -466,7 +466,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
			if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
				continue;

			max_wait = drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
			max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched,
					  &ctx->rings[i].entity, max_wait);
		}
	}
@@ -492,7 +492,7 @@ void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
				continue;

			if (kref_read(&ctx->refcount) == 1)
				drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched,
				drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
					&ctx->rings[i].entity);
			else
				DRM_ERROR("ctx %p is still alive\n", ctx);
+1 −1
Original line number Diff line number Diff line
@@ -162,7 +162,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
{
	if (adev->mman.mem_global_referenced) {
		drm_sched_entity_fini(adev->mman.entity.sched,
		drm_sched_entity_destroy(adev->mman.entity.sched,
				      &adev->mman.entity);
		mutex_destroy(&adev->mman.gtt_window_lock);
		drm_global_item_unref(&adev->mman.bo_global_ref.ref);
+1 −1
Original line number Diff line number Diff line
@@ -309,7 +309,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
		kfree(adev->uvd.inst[j].saved_bo);

		drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
		drm_sched_entity_destroy(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);

		amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
				      &adev->uvd.inst[j].gpu_addr,
+1 −1
Original line number Diff line number Diff line
@@ -222,7 +222,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
	if (adev->vce.vcpu_bo == NULL)
		return 0;

	drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
	drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity);

	amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
		(void **)&adev->vce.cpu_addr);
+2 −2
Original line number Diff line number Diff line
@@ -2643,7 +2643,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
	vm->root.base.bo = NULL;

error_free_sched_entity:
	drm_sched_entity_fini(&ring->sched, &vm->entity);
	drm_sched_entity_destroy(&ring->sched, &vm->entity);

	return r;
}
@@ -2780,7 +2780,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
	}

	drm_sched_entity_fini(vm->entity.sched, &vm->entity);
	drm_sched_entity_destroy(vm->entity.sched, &vm->entity);

	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
		dev_err(adev->dev, "still active bo inside vm\n");
Loading