Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e2c43c01 authored by Zhi Wang's avatar Zhi Wang Committed by Zhenyu Wang
Browse files

drm/i915/gvt: Move clean_workloads() into scheduler.c



Move clean_workloads() into scheduler.c since it's not specific to
execlist.

v2:

- Remove clean_workloads in intel_vgpu_select_submission_ops. (Zhenyu)

Signed-off-by: default avatarZhi Wang <zhi.a.wang@intel.com>
parent 06bb372f
Loading
Loading
Loading
Loading
+1 −40
Original line number Original line Diff line number Diff line
@@ -46,8 +46,6 @@
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
		((a)->lrca == (b)->lrca))
		((a)->lrca == (b)->lrca))


static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);

static int context_switch_events[] = {
static int context_switch_events[] = {
	[RCS] = RCS_AS_CONTEXT_SWITCH,
	[RCS] = RCS_AS_CONTEXT_SWITCH,
	[BCS] = BCS_AS_CONTEXT_SWITCH,
	[BCS] = BCS_AS_CONTEXT_SWITCH,
@@ -397,23 +395,8 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
	gvt_dbg_el("complete workload %p status %d\n", workload,
	gvt_dbg_el("complete workload %p status %d\n", workload,
			workload->status);
			workload->status);


	if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
	if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id)))
		/* if workload->status is not successful means HW GPU
		 * has occurred GPU hang or something wrong with i915/GVT,
		 * and GVT won't inject context switch interrupt to guest.
		 * So this error is a vGPU hang actually to the guest.
		 * According to this we should emunlate a vGPU hang. If
		 * there are pending workloads which are already submitted
		 * from guest, we should clean them up like HW GPU does.
		 *
		 * if it is in middle of engine resetting, the pending
		 * workloads won't be submitted to HW GPU and will be
		 * cleaned up during the resetting process later, so doing
		 * the workload clean up here doesn't have any impact.
		 **/
		clean_workloads(vgpu, ENGINE_MASK(ring_id));
		goto out;
		goto out;
	}


	if (!list_empty(workload_q_head(vgpu, ring_id))) {
	if (!list_empty(workload_q_head(vgpu, ring_id))) {
		struct execlist_ctx_descriptor_format *this_desc, *next_desc;
		struct execlist_ctx_descriptor_format *this_desc, *next_desc;
@@ -529,32 +512,11 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
	vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
	vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}
}


static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
{
	struct intel_vgpu_submission *s = &vgpu->submission;
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	struct intel_engine_cs *engine;
	struct intel_vgpu_workload *pos, *n;
	unsigned int tmp;

	/* free the unsubmited workloads in the queues. */
	for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
		list_for_each_entry_safe(pos, n,
			&s->workload_q_head[engine->id], list) {
			list_del_init(&pos->list);
			intel_vgpu_destroy_workload(pos);
		}
		clear_bit(engine->id, s->shadow_ctx_desc_updated);
	}
}

void clean_execlist(struct intel_vgpu *vgpu)
void clean_execlist(struct intel_vgpu *vgpu)
{
{
	enum intel_engine_id i;
	enum intel_engine_id i;
	struct intel_engine_cs *engine;
	struct intel_engine_cs *engine;


	clean_workloads(vgpu, ALL_ENGINES);

	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
		struct intel_vgpu_submission *s = &vgpu->submission;
		struct intel_vgpu_submission *s = &vgpu->submission;


@@ -571,7 +533,6 @@ void reset_execlist(struct intel_vgpu *vgpu,
	struct intel_engine_cs *engine;
	struct intel_engine_cs *engine;
	unsigned int tmp;
	unsigned int tmp;


	clean_workloads(vgpu, engine_mask);
	for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
	for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
		init_vgpu_execlist(vgpu, engine->id);
		init_vgpu_execlist(vgpu, engine->id);
}
}
+37 −0
Original line number Original line Diff line number Diff line
@@ -644,6 +644,25 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
	kunmap(page);
	kunmap(page);
}
}


static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
{
	struct intel_vgpu_submission *s = &vgpu->submission;
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	struct intel_engine_cs *engine;
	struct intel_vgpu_workload *pos, *n;
	unsigned int tmp;

	/* free the unsubmited workloads in the queues. */
	for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
		list_for_each_entry_safe(pos, n,
			&s->workload_q_head[engine->id], list) {
			list_del_init(&pos->list);
			intel_vgpu_destroy_workload(pos);
		}
		clear_bit(engine->id, s->shadow_ctx_desc_updated);
	}
}

static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
{
{
	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
@@ -707,6 +726,23 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
		release_shadow_wa_ctx(&workload->wa_ctx);
		release_shadow_wa_ctx(&workload->wa_ctx);
	}
	}


	if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
		/* if workload->status is not successful means HW GPU
		 * has occurred GPU hang or something wrong with i915/GVT,
		 * and GVT won't inject context switch interrupt to guest.
		 * So this error is a vGPU hang actually to the guest.
		 * According to this we should emunlate a vGPU hang. If
		 * there are pending workloads which are already submitted
		 * from guest, we should clean them up like HW GPU does.
		 *
		 * if it is in middle of engine resetting, the pending
		 * workloads won't be submitted to HW GPU and will be
		 * cleaned up during the resetting process later, so doing
		 * the workload clean up here doesn't have any impact.
		 **/
		clean_workloads(vgpu, ENGINE_MASK(ring_id));
	}

	workload->complete(workload);
	workload->complete(workload);


	atomic_dec(&s->running_workload_num);
	atomic_dec(&s->running_workload_num);
@@ -906,6 +942,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
	if (!s->active)
	if (!s->active)
		return;
		return;


	clean_workloads(vgpu, engine_mask);
	s->ops->reset(vgpu, engine_mask);
	s->ops->reset(vgpu, engine_mask);
}
}