Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 05dfe9f2 authored by Joonas Lahtinen's avatar Joonas Lahtinen
Browse files

Merge tag 'gvt-fixes-2017-11-28' of https://github.com/intel/gvt-linux into drm-intel-fixes



gvt-fixes-2017-11-28

- regression fix for sane request alloc (Fred)
- locking fix (Changbin)
- fix invalid addr mask (Xiong)
- compression regression fix (Weinan)
- fix default pipe enable for virtual display (Xiaolin)

Signed-off-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parents ac29fc66 b721b65a
Loading
Loading
Loading
Loading
+2 −0
Original line number Original line Diff line number Diff line
@@ -282,6 +282,7 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
				    int type, unsigned int resolution)
				    int type, unsigned int resolution)
{
{
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
	struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);


	if (WARN_ON(resolution >= GVT_EDID_NUM))
	if (WARN_ON(resolution >= GVT_EDID_NUM))
@@ -307,6 +308,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
	port->type = type;
	port->type = type;


	emulate_monitor_status_change(vgpu);
	emulate_monitor_status_change(vgpu);
	vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
	return 0;
	return 0;
}
}


+6 −0
Original line number Original line Diff line number Diff line
@@ -496,6 +496,12 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
		goto err_unpin_mm;
		goto err_unpin_mm;
	}
	}


	ret = intel_gvt_generate_request(workload);
	if (ret) {
		gvt_vgpu_err("fail to generate request\n");
		goto err_unpin_mm;
	}

	ret = prepare_shadow_batch_buffer(workload);
	ret = prepare_shadow_batch_buffer(workload);
	if (ret) {
	if (ret) {
		gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
		gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
+3 −3
Original line number Original line Diff line number Diff line
@@ -311,9 +311,9 @@ static inline int gtt_set_entry64(void *pt,


#define GTT_HAW 46
#define GTT_HAW 46


#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)


static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
{
{
+5 −40
Original line number Original line Diff line number Diff line
@@ -1381,40 +1381,6 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
	return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
	return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
}
}


static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
		void *p_data, unsigned int bytes)
{
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	u32 v = *(u32 *)p_data;

	if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
		return intel_vgpu_default_mmio_write(vgpu,
				offset, p_data, bytes);

	switch (offset) {
	case 0x4ddc:
		/* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
		vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
		break;
	case 0x42080:
		/* bypass WaCompressedResourceDisplayNewHashMode */
		vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
		break;
	case 0xe194:
		/* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
		vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
		break;
	case 0x7014:
		/* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
		vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
		void *p_data, unsigned int bytes)
		void *p_data, unsigned int bytes)
{
{
@@ -1671,8 +1637,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
	MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
	MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
		NULL, NULL);
		NULL, NULL);
	MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
	MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
		 skl_misc_ctl_write);
		 NULL, NULL);
	MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2564,8 +2530,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
	MMIO_D(0x6e570, D_BDW_PLUS);
	MMIO_D(0x6e570, D_BDW_PLUS);
	MMIO_D(0x65f10, D_BDW_PLUS);
	MMIO_D(0x65f10, D_BDW_PLUS);


	MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
	MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
		 skl_misc_ctl_write);
	MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@@ -2615,8 +2580,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
	MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
	MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
	MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
	MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
	MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
	MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write);
	MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, NULL);
	MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write);
	MMIO_DH(0x42080, D_SKL_PLUS, NULL, NULL);
	MMIO_D(0x45504, D_SKL_PLUS);
	MMIO_D(0x45504, D_SKL_PLUS);
	MMIO_D(0x45520, D_SKL_PLUS);
	MMIO_D(0x45520, D_SKL_PLUS);
	MMIO_D(0x46000, D_SKL_PLUS);
	MMIO_D(0x46000, D_SKL_PLUS);
+25 −8
Original line number Original line Diff line number Diff line
@@ -140,9 +140,10 @@ static int shadow_context_status_change(struct notifier_block *nb,
	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
	enum intel_engine_id ring_id = req->engine->id;
	enum intel_engine_id ring_id = req->engine->id;
	struct intel_vgpu_workload *workload;
	struct intel_vgpu_workload *workload;
	unsigned long flags;


	if (!is_gvt_request(req)) {
	if (!is_gvt_request(req)) {
		spin_lock_bh(&scheduler->mmio_context_lock);
		spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
		if (action == INTEL_CONTEXT_SCHEDULE_IN &&
		if (action == INTEL_CONTEXT_SCHEDULE_IN &&
		    scheduler->engine_owner[ring_id]) {
		    scheduler->engine_owner[ring_id]) {
			/* Switch ring from vGPU to host. */
			/* Switch ring from vGPU to host. */
@@ -150,7 +151,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
					      NULL, ring_id);
					      NULL, ring_id);
			scheduler->engine_owner[ring_id] = NULL;
			scheduler->engine_owner[ring_id] = NULL;
		}
		}
		spin_unlock_bh(&scheduler->mmio_context_lock);
		spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);


		return NOTIFY_OK;
		return NOTIFY_OK;
	}
	}
@@ -161,7 +162,7 @@ static int shadow_context_status_change(struct notifier_block *nb,


	switch (action) {
	switch (action) {
	case INTEL_CONTEXT_SCHEDULE_IN:
	case INTEL_CONTEXT_SCHEDULE_IN:
		spin_lock_bh(&scheduler->mmio_context_lock);
		spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
		if (workload->vgpu != scheduler->engine_owner[ring_id]) {
		if (workload->vgpu != scheduler->engine_owner[ring_id]) {
			/* Switch ring from host to vGPU or vGPU to vGPU. */
			/* Switch ring from host to vGPU or vGPU to vGPU. */
			intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
			intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
@@ -170,7 +171,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
		} else
		} else
			gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
			gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
				      ring_id, workload->vgpu->id);
				      ring_id, workload->vgpu->id);
		spin_unlock_bh(&scheduler->mmio_context_lock);
		spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
		atomic_set(&workload->shadow_ctx_active, 1);
		atomic_set(&workload->shadow_ctx_active, 1);
		break;
		break;
	case INTEL_CONTEXT_SCHEDULE_OUT:
	case INTEL_CONTEXT_SCHEDULE_OUT:
@@ -253,7 +254,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
	struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
	struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
	struct drm_i915_gem_request *rq;
	struct intel_vgpu *vgpu = workload->vgpu;
	struct intel_vgpu *vgpu = workload->vgpu;
	struct intel_ring *ring;
	struct intel_ring *ring;
	int ret;
	int ret;
@@ -299,6 +299,26 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
	ret = populate_shadow_context(workload);
	ret = populate_shadow_context(workload);
	if (ret)
	if (ret)
		goto err_unpin;
		goto err_unpin;
	workload->shadowed = true;
	return 0;

err_unpin:
	engine->context_unpin(engine, shadow_ctx);
err_shadow:
	release_shadow_wa_ctx(&workload->wa_ctx);
err_scan:
	return ret;
}

int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
{
	int ring_id = workload->ring_id;
	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
	struct drm_i915_gem_request *rq;
	struct intel_vgpu *vgpu = workload->vgpu;
	struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
	int ret;


	rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
	rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
	if (IS_ERR(rq)) {
	if (IS_ERR(rq)) {
@@ -313,14 +333,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
	ret = copy_workload_to_ring_buffer(workload);
	ret = copy_workload_to_ring_buffer(workload);
	if (ret)
	if (ret)
		goto err_unpin;
		goto err_unpin;
	workload->shadowed = true;
	return 0;
	return 0;


err_unpin:
err_unpin:
	engine->context_unpin(engine, shadow_ctx);
	engine->context_unpin(engine, shadow_ctx);
err_shadow:
	release_shadow_wa_ctx(&workload->wa_ctx);
	release_shadow_wa_ctx(&workload->wa_ctx);
err_scan:
	return ret;
	return ret;
}
}


Loading