Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1406a14b authored by Zhi Wang's avatar Zhi Wang Committed by Zhenyu Wang
Browse files

drm/i915/gvt: Introduce intel_vgpu_submission



Introduce intel_vgpu_submission to hold all members related to submission
in struct intel_vgpu before.

Signed-off-by: default avatarZhi Wang <zhi.a.wang@intel.com>
parent 9a9829e9
Loading
Loading
Loading
Loading
+18 −12
Original line number Diff line number Diff line
@@ -362,7 +362,7 @@ static void free_workload(struct intel_vgpu_workload *workload)
{
	intel_vgpu_unpin_mm(workload->shadow_mm);
	intel_gvt_mm_unreference(workload->shadow_mm);
	kmem_cache_free(workload->vgpu->workloads, workload);
	kmem_cache_free(workload->vgpu->submission.workloads, workload);
}

#define get_desc_from_elsp_dwords(ed, i) \
@@ -401,7 +401,8 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
					struct intel_vgpu_workload,
					wa_ctx);
	int ring_id = workload->ring_id;
	struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
	struct intel_vgpu_submission *s = &workload->vgpu->submission;
	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
	struct drm_i915_gem_object *ctx_obj =
		shadow_ctx->engine[ring_id].state->obj;
	struct execlist_ring_context *shadow_ring_context;
@@ -474,6 +475,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
{
	struct intel_vgpu *vgpu = workload->vgpu;
	struct intel_vgpu_submission *s = &vgpu->submission;
	struct execlist_ctx_descriptor_format ctx[2];
	int ring_id = workload->ring_id;
	int ret;
@@ -514,7 +516,7 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
	ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
	ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);

	ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
	ret = emulate_execlist_schedule_in(&s->execlist[ring_id], ctx);
	if (!ret)
		goto out;
	else
@@ -533,7 +535,8 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
	struct intel_vgpu *vgpu = workload->vgpu;
	int ring_id = workload->ring_id;
	struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
	struct intel_vgpu_submission *s = &vgpu->submission;
	struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
	struct intel_vgpu_workload *next_workload;
	struct list_head *next = workload_q_head(vgpu, ring_id)->next;
	bool lite_restore = false;
@@ -652,6 +655,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
		struct execlist_ctx_descriptor_format *desc,
		bool emulate_schedule_in)
{
	struct intel_vgpu_submission *s = &vgpu->submission;
	struct list_head *q = workload_q_head(vgpu, ring_id);
	struct intel_vgpu_workload *last_workload = get_last_workload(q);
	struct intel_vgpu_workload *workload = NULL;
@@ -689,7 +693,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,

	gvt_dbg_el("ring id %d begin a new workload\n", ring_id);

	workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL);
	workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
	if (!workload)
		return -ENOMEM;

@@ -738,7 +742,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
	}

	if (emulate_schedule_in)
		workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords;
		workload->elsp_dwords = s->execlist[ring_id].elsp_dwords;

	gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
			workload, ring_id, head, tail, start, ctl);
@@ -748,7 +752,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,

	ret = prepare_mm(workload);
	if (ret) {
		kmem_cache_free(vgpu->workloads, workload);
		kmem_cache_free(s->workloads, workload);
		return ret;
	}

@@ -769,7 +773,8 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,

int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
{
	struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
	struct intel_vgpu_submission *s = &vgpu->submission;
	struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
	struct execlist_ctx_descriptor_format *desc[2];
	int i, ret;

@@ -811,7 +816,8 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)

static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
{
	struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
	struct intel_vgpu_submission *s = &vgpu->submission;
	struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
	struct execlist_context_status_pointer_format ctx_status_ptr;
	u32 ctx_status_ptr_reg;

@@ -833,6 +839,7 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)

static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
{
	struct intel_vgpu_submission *s = &vgpu->submission;
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	struct intel_engine_cs *engine;
	struct intel_vgpu_workload *pos, *n;
@@ -841,12 +848,11 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
	/* free the unsubmited workloads in the queues. */
	for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
		list_for_each_entry_safe(pos, n,
			&vgpu->workload_q_head[engine->id], list) {
			&s->workload_q_head[engine->id], list) {
			list_del_init(&pos->list);
			free_workload(pos);
		}

		clear_bit(engine->id, vgpu->shadow_ctx_desc_updated);
		clear_bit(engine->id, s->shadow_ctx_desc_updated);
	}
}

+11 −6
Original line number Diff line number Diff line
@@ -142,6 +142,15 @@ struct vgpu_sched_ctl {
	int weight;
};

struct intel_vgpu_submission {
	struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
	struct list_head workload_q_head[I915_NUM_ENGINES];
	struct kmem_cache *workloads;
	atomic_t running_workload_num;
	struct i915_gem_context *shadow_ctx;
	DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
};

struct intel_vgpu {
	struct intel_gvt *gvt;
	int id;
@@ -161,16 +170,12 @@ struct intel_vgpu {
	struct intel_vgpu_gtt gtt;
	struct intel_vgpu_opregion opregion;
	struct intel_vgpu_display display;
	struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
	struct list_head workload_q_head[I915_NUM_ENGINES];
	struct kmem_cache *workloads;
	atomic_t running_workload_num;
	struct intel_vgpu_submission submission;
	/* 1/2K for each reserve ring buffer */
	void *reserve_ring_buffer_va[I915_NUM_ENGINES];
	int reserve_ring_buffer_size[I915_NUM_ENGINES];
	DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
	struct i915_gem_context *shadow_ctx;
	DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);


#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
	struct {
+1 −1
Original line number Diff line number Diff line
@@ -1451,7 +1451,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
	if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
		return -EINVAL;

	execlist = &vgpu->execlist[ring_id];
	execlist = &vgpu->submission.execlist[ring_id];

	execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
	if (execlist->elsp_dwords.index == 3) {
+1 −1
Original line number Diff line number Diff line
@@ -1188,7 +1188,7 @@ hw_id_show(struct device *dev, struct device_attribute *attr,
		struct intel_vgpu *vgpu = (struct intel_vgpu *)
			mdev_get_drvdata(mdev);
		return sprintf(buf, "%u\n",
			       vgpu->shadow_ctx->hw_id);
			       vgpu->submission.shadow_ctx->hw_id);
	}
	return sprintf(buf, "\n");
}
+5 −4
Original line number Diff line number Diff line
@@ -261,14 +261,15 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
{
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	struct render_mmio *mmio;
	u32 v;
	int i, array_size;
	u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state;
	struct intel_vgpu_submission *s = &vgpu->submission;
	u32 *reg_state = s->shadow_ctx->engine[ring_id].lrc_reg_state;
	u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
	u32 inhibit_mask =
		_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
	i915_reg_t last_reg = _MMIO(0);
	struct render_mmio *mmio;
	u32 v;
	int i, array_size;

	if (IS_SKYLAKE(vgpu->gvt->dev_priv)
		|| IS_KABYLAKE(vgpu->gvt->dev_priv)) {
Loading