Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9a9829e9 authored by Zhi Wang's avatar Zhi Wang Committed by Zhenyu Wang
Browse files

drm/i915/gvt: Move workload cache init/clean into intel_vgpu_{setup, clean}_submission()



Move vGPU workload cache initialization/de-initialization into
intel_vgpu_{setup, clean}_submission() since they are not specific to
execlist stuffs.

Signed-off-by: default avatarZhi Wang <zhi.a.wang@intel.com>
parent 874b6a91
Loading
Loading
Loading
Loading
+1 −14
Original line number Diff line number Diff line
@@ -856,14 +856,12 @@ void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
	struct intel_engine_cs *engine;

	clean_workloads(vgpu, ALL_ENGINES);
	kmem_cache_destroy(vgpu->workloads);

	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
		kfree(vgpu->reserve_ring_buffer_va[i]);
		vgpu->reserve_ring_buffer_va[i] = NULL;
		vgpu->reserve_ring_buffer_size[i] = 0;
	}

}

#define RESERVE_RING_BUFFER_SIZE		((1 * PAGE_SIZE)/8)
@@ -872,19 +870,8 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
	enum intel_engine_id i;
	struct intel_engine_cs *engine;

	/* each ring has a virtual execlist engine */
	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
	for_each_engine(engine, vgpu->gvt->dev_priv, i)
		init_vgpu_execlist(vgpu, i);
		INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
	}

	vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
			sizeof(struct intel_vgpu_workload), 0,
			SLAB_HWCACHE_ALIGN,
			NULL);

	if (!vgpu->workloads)
		return -ENOMEM;

	/* each ring has a shadow ring buffer until vgpu destroyed */
	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+23 −1
Original line number Diff line number Diff line
@@ -719,6 +719,7 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
{
	i915_gem_context_put(vgpu->shadow_ctx);
	kmem_cache_destroy(vgpu->workloads);
}

/**
@@ -733,7 +734,9 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
 */
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
{
	atomic_set(&vgpu->running_workload_num, 0);
	enum intel_engine_id i;
	struct intel_engine_cs *engine;
	int ret;

	vgpu->shadow_ctx = i915_gem_context_create_gvt(
			&vgpu->gvt->dev_priv->drm);
@@ -742,5 +745,24 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)

	bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);

	vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
			sizeof(struct intel_vgpu_workload), 0,
			SLAB_HWCACHE_ALIGN,
			NULL);

	if (!vgpu->workloads) {
		ret = -ENOMEM;
		goto out_shadow_ctx;
	}

	for_each_engine(engine, vgpu->gvt->dev_priv, i)
		INIT_LIST_HEAD(&vgpu->workload_q_head[i]);

	atomic_set(&vgpu->running_workload_num, 0);

	return 0;

out_shadow_ctx:
	i915_gem_context_put(vgpu->shadow_ctx);
	return ret;
}