Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4af9f07c authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher
Browse files

drm/amdgpu: use kernel submit helper in vm

parent 953e8fd4
Loading
Loading
Loading
Loading
+0 −14
Original line number Diff line number Diff line
@@ -1235,19 +1235,6 @@ struct amdgpu_cs_chunk {
	void __user		*user_ptr;
};

union amdgpu_sched_job_param {
	struct {
		struct amdgpu_vm *vm;
		uint64_t start;
		uint64_t last;
		struct fence **fence;

	} vm_mapping;
	struct {
		struct amdgpu_bo *bo;
	} vm;
};

struct amdgpu_cs_parser {
	struct amdgpu_device	*adev;
	struct drm_file		*filp;
@@ -1272,7 +1259,6 @@ struct amdgpu_cs_parser {
	struct mutex job_lock;
	struct work_struct job_work;
	int (*prepare_job)(struct amdgpu_cs_parser *sched_job);
	union amdgpu_sched_job_param job_param;
	int (*run_job)(struct amdgpu_cs_parser *sched_job);
	int (*free_job)(struct amdgpu_cs_parser *sched_job);
};
+2 −2
Original line number Diff line number Diff line
@@ -121,7 +121,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
		uint64_t v_seq;
		struct amdgpu_cs_parser *sched_job =
			amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx,
						ibs, 1);
						ibs, num_ibs);
		if(!sched_job) {
			return -ENOMEM;
		}
@@ -139,7 +139,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
		if (r)
			WARN(true, "emit timeout\n");
	} else
		r = amdgpu_ib_schedule(adev, 1, ibs, owner);
		r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
	if (r)
		return r;
	*f = &ibs[num_ibs - 1].fence->base;
+31 −128
Original line number Diff line number Diff line
@@ -316,14 +316,6 @@ static int amdgpu_vm_free_job(
	return 0;
}

static int amdgpu_vm_run_job(
	struct amdgpu_cs_parser *sched_job)
{
	amdgpu_bo_fence(sched_job->job_param.vm.bo,
			&sched_job->ibs[sched_job->num_ibs -1].fence->base, true);
	return 0;
}

/**
 * amdgpu_vm_clear_bo - initially clear the page dir/table
 *
@@ -334,7 +326,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
			      struct amdgpu_bo *bo)
{
	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
	struct amdgpu_cs_parser *sched_job = NULL;
	struct fence *fence = NULL;
	struct amdgpu_ib *ib;
	unsigned entries;
	uint64_t addr;
@@ -368,38 +360,16 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
	amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
	amdgpu_vm_pad_ib(adev, ib);
	WARN_ON(ib->length_dw > 64);

	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
						 &amdgpu_vm_free_job,
						 AMDGPU_FENCE_OWNER_VM,
						 &fence);
	if (!r)
		amdgpu_bo_fence(bo, fence, true);
	if (amdgpu_enable_scheduler) {
		int r;
		uint64_t v_seq;
		sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
						    &adev->kernel_ctx, ib, 1);
		if(!sched_job)
			goto error_free;
		sched_job->job_param.vm.bo = bo;
		sched_job->run_job = amdgpu_vm_run_job;
		sched_job->free_job = amdgpu_vm_free_job;
		v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].entity.last_queued_v_seq);
		ib->sequence = v_seq;
		amd_sched_push_job(ring->scheduler,
				   &adev->kernel_ctx.rings[ring->idx].entity,
				   sched_job);
		r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].entity,
					v_seq,
					false,
					-1);
		if (r)
			DRM_ERROR("emit timeout\n");

		amdgpu_bo_unreserve(bo);
		return 0;
	} else {
		r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
		if (r)
			goto error_free;
		amdgpu_bo_fence(bo, &ib->fence->base, true);
	}

error_free:
	amdgpu_ib_free(adev, ib);
	kfree(ib);
@@ -456,7 +426,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
	uint64_t last_pde = ~0, last_pt = ~0;
	unsigned count = 0, pt_idx, ndw;
	struct amdgpu_ib *ib;
	struct amdgpu_cs_parser *sched_job = NULL;
	struct fence *fence = NULL;

	int r;

@@ -518,37 +488,13 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
		amdgpu_vm_pad_ib(adev, ib);
		amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
		WARN_ON(ib->length_dw > ndw);

		if (amdgpu_enable_scheduler) {
			int r;
			uint64_t v_seq;
			sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
							    &adev->kernel_ctx,
							    ib, 1);
			if(!sched_job)
				goto error_free;
			sched_job->job_param.vm.bo = pd;
			sched_job->run_job = amdgpu_vm_run_job;
			sched_job->free_job = amdgpu_vm_free_job;
			v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].entity.last_queued_v_seq);
			ib->sequence = v_seq;
			amd_sched_push_job(ring->scheduler,
					   &adev->kernel_ctx.rings[ring->idx].entity,
					   sched_job);
			r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].entity,
						v_seq,
						false,
						-1);
		r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
							 &amdgpu_vm_free_job,
							 AMDGPU_FENCE_OWNER_VM,
							 &fence);
		if (r)
				DRM_ERROR("emit timeout\n");
		} else {
			r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
			if (r) {
				amdgpu_ib_free(adev, ib);
				return r;
			}
			amdgpu_bo_fence(pd, &ib->fence->base, true);
		}
			goto error_free;
		amdgpu_bo_fence(pd, fence, true);
	}

	if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
@@ -559,11 +505,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
	return 0;

error_free:
	if (sched_job)
		kfree(sched_job);
	amdgpu_ib_free(adev, ib);
	kfree(ib);
	return -ENOMEM;
	return r;
}

/**
@@ -748,20 +692,6 @@ static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
		amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
}

static int amdgpu_vm_bo_update_mapping_run_job(
	struct amdgpu_cs_parser *sched_job)
{
	struct fence **fence = sched_job->job_param.vm_mapping.fence;
	amdgpu_vm_fence_pts(sched_job->job_param.vm_mapping.vm,
			    sched_job->job_param.vm_mapping.start,
			    sched_job->job_param.vm_mapping.last + 1,
			    &sched_job->ibs[sched_job->num_ibs -1].fence->base);
	if (fence) {
		fence_put(*fence);
		*fence = fence_get(&sched_job->ibs[sched_job->num_ibs -1].fence->base);
	}
	return 0;
}
/**
 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
 *
@@ -787,7 +717,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
	unsigned nptes, ncmds, ndw;
	uint32_t flags = gtt_flags;
	struct amdgpu_ib *ib;
	struct amdgpu_cs_parser *sched_job = NULL;
	struct fence *f = NULL;
	int r;

	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -869,56 +799,29 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,

	amdgpu_vm_pad_ib(adev, ib);
	WARN_ON(ib->length_dw > ndw);

	if (amdgpu_enable_scheduler) {
		int r;
		uint64_t v_seq;
		sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
						    &adev->kernel_ctx, ib, 1);
		if(!sched_job)
			goto error_free;
		sched_job->job_param.vm_mapping.vm = vm;
		sched_job->job_param.vm_mapping.start = mapping->it.start;
		sched_job->job_param.vm_mapping.last = mapping->it.last;
		sched_job->job_param.vm_mapping.fence = fence;
		sched_job->run_job = amdgpu_vm_bo_update_mapping_run_job;
		sched_job->free_job = amdgpu_vm_free_job;
		v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].entity.last_queued_v_seq);
		ib->sequence = v_seq;
		amd_sched_push_job(ring->scheduler,
				   &adev->kernel_ctx.rings[ring->idx].entity,
				   sched_job);
		r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].entity,
					v_seq,
					false,
					-1);
	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
						 &amdgpu_vm_free_job,
						 AMDGPU_FENCE_OWNER_VM,
						 &f);
	if (r)
			DRM_ERROR("emit timeout\n");
	} else {
		r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
		if (r) {
			amdgpu_ib_free(adev, ib);
			return r;
		}
		goto error_free;

	amdgpu_vm_fence_pts(vm, mapping->it.start,
				    mapping->it.last + 1, &ib->fence->base);
			    mapping->it.last + 1, f);
	if (fence) {
		fence_put(*fence);
			*fence = fence_get(&ib->fence->base);
		*fence = fence_get(f);
	}

	if (!amdgpu_enable_scheduler) {
		amdgpu_ib_free(adev, ib);
		kfree(ib);
	}
	return 0;

error_free:
	if (sched_job)
		kfree(sched_job);
	amdgpu_ib_free(adev, ib);
	kfree(ib);
	return -ENOMEM;
	return r;
}

/**