Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 12ea39f8 authored by Jani Nikula's avatar Jani Nikula
Browse files

Merge tag 'gvt-fixes-2017-05-25' of https://github.com/01org/gvt-linux into drm-intel-fixes



gvt-fixes-2017-05-25

- workload cleanup fix for vGPU destroy (Changbin)
- disable compression workaround to fix vGPU hang (Chuanxiao)

Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170525083802.ae4uwx2qks2ho35b@zhen-hp.sh.intel.com
parents 2e0bb5b3 e274086e
Loading
Loading
Loading
Loading
+20 −10
Original line number Original line Diff line number Diff line
@@ -779,8 +779,26 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
	vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
	vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}
}


static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
{
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	struct intel_engine_cs *engine;
	struct intel_vgpu_workload *pos, *n;
	unsigned int tmp;

	/* free the unsubmited workloads in the queues. */
	for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
		list_for_each_entry_safe(pos, n,
			&vgpu->workload_q_head[engine->id], list) {
			list_del_init(&pos->list);
			free_workload(pos);
		}
	}
}

void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
{
{
	clean_workloads(vgpu, ALL_ENGINES);
	kmem_cache_destroy(vgpu->workloads);
	kmem_cache_destroy(vgpu->workloads);
}
}


@@ -811,17 +829,9 @@ void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
{
{
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	struct intel_engine_cs *engine;
	struct intel_engine_cs *engine;
	struct intel_vgpu_workload *pos, *n;
	unsigned int tmp;
	unsigned int tmp;


	for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
	clean_workloads(vgpu, engine_mask);
		/* free the unsubmited workload in the queue */
	for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
		list_for_each_entry_safe(pos, n,
			&vgpu->workload_q_head[engine->id], list) {
			list_del_init(&pos->list);
			free_workload(pos);
		}

		init_vgpu_execlist(vgpu, engine->id);
		init_vgpu_execlist(vgpu, engine->id);
}
}
}
+21 −9
Original line number Original line Diff line number Diff line
@@ -1366,18 +1366,28 @@ static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
		void *p_data, unsigned int bytes)
		void *p_data, unsigned int bytes)
{
{
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	i915_reg_t reg = {.reg = offset};
	u32 v = *(u32 *)p_data;

	if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
		return intel_vgpu_default_mmio_write(vgpu,
				offset, p_data, bytes);


	switch (offset) {
	switch (offset) {
	case 0x4ddc:
	case 0x4ddc:
		vgpu_vreg(vgpu, offset) = 0x8000003c;
		/* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */
		vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
		I915_WRITE(reg, vgpu_vreg(vgpu, offset));
		break;
		break;
	case 0x42080:
	case 0x42080:
		vgpu_vreg(vgpu, offset) = 0x8000;
		/* bypass WaCompressedResourceDisplayNewHashMode */
		/* WaCompressedResourceDisplayNewHashMode:skl */
		vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
		I915_WRITE(reg, vgpu_vreg(vgpu, offset));
		break;
	case 0xe194:
		/* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
		vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
		break;
	case 0x7014:
		/* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
		vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
		break;
		break;
	default:
	default:
		return -EINVAL;
		return -EINVAL;
@@ -1634,7 +1644,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
	MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
	MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
		NULL, NULL);
		NULL, NULL);
	MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
		 skl_misc_ctl_write);
	MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2568,7 +2579,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
	MMIO_D(0x6e570, D_BDW_PLUS);
	MMIO_D(0x6e570, D_BDW_PLUS);
	MMIO_D(0x65f10, D_BDW_PLUS);
	MMIO_D(0x65f10, D_BDW_PLUS);


	MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
		 skl_misc_ctl_write);
	MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);