Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0cce2823 authored by fred gao's avatar fred gao Committed by Zhenyu Wang
Browse files

drm/i915/gvt: Refine error handling for prepare_execlist_workload



refine the error handling for prepare_execlist_workload to restore to the
original states once error occurs.

only release the shadowed batch buffer and wa ctx when the workload is
completed successfully.

v2:
- split the mixed several error paths for better review. (Zhenyu)

v3:
- handle prepare batch buffer/wa ctx pin errors and
- emulate_schedule_in null issue. (Zhenyu)

v4:
- no need to handle emulate_schedule_in null issue. (Zhenyu)

v5:
- release the shadowed batch buffer and wa ctx only for the
  successful workload. (Zhenyu)

v6:
- polish the return style. (Zhenyu)

Signed-off-by: default avatarfred gao <fred.gao@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent a3cfdca9
Loading
Loading
Loading
Loading
+69 −27
Original line number Diff line number Diff line
@@ -368,7 +368,7 @@ static void free_workload(struct intel_vgpu_workload *workload)
#define get_desc_from_elsp_dwords(ed, i) \
	((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))

static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
	const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
	struct intel_shadow_bb_entry *entry_obj;
@@ -379,7 +379,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)

		vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
		if (IS_ERR(vma)) {
			return;
			return PTR_ERR(vma);
		}

		/* FIXME: we are not tracking our pinned VMA leaving it
@@ -392,6 +392,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
		if (gmadr_bytes == 8)
			entry_obj->bb_start_cmd_va[2] = 0;
	}
	return 0;
}

static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
@@ -420,7 +421,7 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
	return 0;
}

static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
	struct i915_vma *vma;
	unsigned char *per_ctx_va =
@@ -428,12 +429,12 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
		wa_ctx->indirect_ctx.size;

	if (wa_ctx->indirect_ctx.size == 0)
		return;
		return 0;

	vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
				       0, CACHELINE_BYTES, 0);
	if (IS_ERR(vma)) {
		return;
		return PTR_ERR(vma);
	}

	/* FIXME: we are not tracking our pinned VMA leaving it
@@ -447,26 +448,7 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
	memset(per_ctx_va, 0, CACHELINE_BYTES);

	update_wa_ctx_2_shadow_ctx(wa_ctx);
}

static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
{
	struct intel_vgpu *vgpu = workload->vgpu;
	struct execlist_ctx_descriptor_format ctx[2];
	int ring_id = workload->ring_id;

	intel_vgpu_pin_mm(workload->shadow_mm);
	intel_vgpu_sync_oos_pages(workload->vgpu);
	intel_vgpu_flush_post_shadow(workload->vgpu);
	prepare_shadow_batch_buffer(workload);
	prepare_shadow_wa_ctx(&workload->wa_ctx);
	if (!workload->emulate_schedule_in)
	return 0;

	ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
	ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);

	return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
}

static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
@@ -489,6 +471,64 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
	}
}

static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
{
	struct intel_vgpu *vgpu = workload->vgpu;
	struct execlist_ctx_descriptor_format ctx[2];
	int ring_id = workload->ring_id;
	int ret;

	ret = intel_vgpu_pin_mm(workload->shadow_mm);
	if (ret) {
		gvt_vgpu_err("fail to vgpu pin mm\n");
		goto out;
	}

	ret = intel_vgpu_sync_oos_pages(workload->vgpu);
	if (ret) {
		gvt_vgpu_err("fail to vgpu sync oos pages\n");
		goto err_unpin_mm;
	}

	ret = intel_vgpu_flush_post_shadow(workload->vgpu);
	if (ret) {
		gvt_vgpu_err("fail to flush post shadow\n");
		goto err_unpin_mm;
	}

	ret = prepare_shadow_batch_buffer(workload);
	if (ret) {
		gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
		goto err_unpin_mm;
	}

	ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
	if (ret) {
		gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
		goto err_shadow_batch;
	}

	if (!workload->emulate_schedule_in)
		return 0;

	ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
	ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);

	ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
	if (!ret)
		goto out;
	else
		gvt_vgpu_err("fail to emulate execlist schedule in\n");

	release_shadow_wa_ctx(&workload->wa_ctx);
err_shadow_batch:
	release_shadow_batch_buffer(workload);
err_unpin_mm:
	intel_vgpu_unpin_mm(workload->shadow_mm);
out:
	return ret;
}

static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
	struct intel_vgpu *vgpu = workload->vgpu;
@@ -502,8 +542,10 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
	gvt_dbg_el("complete workload %p status %d\n", workload,
			workload->status);

	if (!workload->status) {
		release_shadow_batch_buffer(workload);
		release_shadow_wa_ctx(&workload->wa_ctx);
	}

	if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
		/* if workload->status is not successful means HW GPU