Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 73ba2d5c authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-intel-next-fixes-2017-04-27' of...

Merge tag 'drm-intel-next-fixes-2017-04-27' of git://anongit.freedesktop.org/git/drm-intel into drm-next

drm/i915 and gvt fixes for drm-next/v4.12

* tag 'drm-intel-next-fixes-2017-04-27' of git://anongit.freedesktop.org/git/drm-intel:
  drm/i915: Confirm the request is still active before adding it to the await
  drm/i915: Avoid busy-spinning on VLV_GLTC_PW_STATUS mmio
  drm/i915/selftests: Allocate inode/file dynamically
  drm/i915: Fix system hang with EI UP masked on Haswell
  drm/i915: checking for NULL instead of IS_ERR() in mock selftests
  drm/i915: Perform link quality check unconditionally during long pulse
  drm/i915: Fix use after free in lpe_audio_platdev_destroy()
  drm/i915: Use the right mapping_gfp_mask for final shmem allocation
  drm/i915: Make legacy cursor updates more unsynced
  drm/i915: Apply a cond_resched() to the saturated signaler
  drm/i915: Park the signaler before sleeping
  drm/i915/gvt: fix a bounds check in ring_id_to_context_switch_event()
  drm/i915/gvt: Fix PTE write flush for taking runtime pm properly
  drm/i915/gvt: remove some debug messages in scheduler timer handler
  drm/i915/gvt: add mmio init for virtual display
  drm/i915/gvt: use directly assignment for structure copying
  drm/i915/gvt: remove redundant ring id check which cause significant CPU misprediction
  drm/i915/gvt: remove redundant platform check for mocs load/restore
  drm/i915/gvt: Align render mmio list to cacheline
  drm/i915/gvt: cleanup some too chatty scheduler message
parents 53cecf1b 88326ef0
Loading
Loading
Loading
Loading
+1 −7
Original line number Original line Diff line number Diff line
@@ -616,9 +616,6 @@ static inline u32 get_opcode(u32 cmd, int ring_id)
{
{
	struct decode_info *d_info;
	struct decode_info *d_info;


	if (ring_id >= I915_NUM_ENGINES)
		return INVALID_OP;

	d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
	d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
	if (d_info == NULL)
	if (d_info == NULL)
		return INVALID_OP;
		return INVALID_OP;
@@ -661,9 +658,6 @@ static inline void print_opcode(u32 cmd, int ring_id)
	struct decode_info *d_info;
	struct decode_info *d_info;
	int i;
	int i;


	if (ring_id >= I915_NUM_ENGINES)
		return;

	d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
	d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
	if (d_info == NULL)
	if (d_info == NULL)
		return;
		return;
@@ -2483,7 +2477,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)


	t1 = get_cycles();
	t1 = get_cycles();


	memcpy(&s_before_advance_custom, s, sizeof(struct parser_exec_state));
	s_before_advance_custom = *s;


	if (info->handler) {
	if (info->handler) {
		ret = info->handler(s);
		ret = info->handler(s);
+28 −1
Original line number Original line Diff line number Diff line
@@ -189,17 +189,44 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
	}
	}


	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
		vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
		vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
		vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
			~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
			TRANS_DDI_PORT_MASK);
		vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
			(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
			(PORT_B << TRANS_DDI_PORT_SHIFT) |
			TRANS_DDI_FUNC_ENABLE);
		vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
		vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
	}
	}


	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
		vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
			~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
			TRANS_DDI_PORT_MASK);
		vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
			(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
			(PORT_C << TRANS_DDI_PORT_SHIFT) |
			TRANS_DDI_FUNC_ENABLE);
		vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
		vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
		vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
		vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
	}
	}


	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
		vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
			~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
			TRANS_DDI_PORT_MASK);
		vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
			(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
			(PORT_D << TRANS_DDI_PORT_SHIFT) |
			TRANS_DDI_FUNC_ENABLE);
		vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
		vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
		vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
		vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
	}
	}


+3 −5
Original line number Original line Diff line number Diff line
@@ -56,8 +56,8 @@ static int context_switch_events[] = {


static int ring_id_to_context_switch_event(int ring_id)
static int ring_id_to_context_switch_event(int ring_id)
{
{
	if (WARN_ON(ring_id < RCS && ring_id >
	if (WARN_ON(ring_id < RCS ||
				ARRAY_SIZE(context_switch_events)))
		    ring_id >= ARRAY_SIZE(context_switch_events)))
		return -EINVAL;
		return -EINVAL;


	return context_switch_events[ring_id];
	return context_switch_events[ring_id];
@@ -687,9 +687,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
	}
	}


	if (emulate_schedule_in)
	if (emulate_schedule_in)
		memcpy(&workload->elsp_dwords,
		workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords;
				&vgpu->execlist[ring_id].elsp_dwords,
				sizeof(workload->elsp_dwords));


	gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
	gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
			workload, ring_id, head, tail, start, ctl);
			workload, ring_id, head, tail, start, ctl);
+5 −0
Original line number Original line Diff line number Diff line
@@ -2294,12 +2294,15 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt)
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
{
{
	struct intel_gvt *gvt = vgpu->gvt;
	struct intel_gvt *gvt = vgpu->gvt;
	struct drm_i915_private *dev_priv = gvt->dev_priv;
	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
	u32 index;
	u32 index;
	u32 offset;
	u32 offset;
	u32 num_entries;
	u32 num_entries;
	struct intel_gvt_gtt_entry e;
	struct intel_gvt_gtt_entry e;


	intel_runtime_pm_get(dev_priv);

	memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
	memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
	e.type = GTT_TYPE_GGTT_PTE;
	e.type = GTT_TYPE_GGTT_PTE;
	ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
	ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
@@ -2314,6 +2317,8 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
	num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
	num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
	for (offset = 0; offset < num_entries; offset++)
	for (offset = 0; offset < num_entries; offset++)
		ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
		ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);

	intel_runtime_pm_put(dev_priv);
}
}


/**
/**
+2 −8
Original line number Original line Diff line number Diff line
@@ -44,7 +44,7 @@ struct render_mmio {
	u32 value;
	u32 value;
};
};


static struct render_mmio gen8_render_mmio_list[] = {
static struct render_mmio gen8_render_mmio_list[] __cacheline_aligned = {
	{RCS, _MMIO(0x229c), 0xffff, false},
	{RCS, _MMIO(0x229c), 0xffff, false},
	{RCS, _MMIO(0x2248), 0x0, false},
	{RCS, _MMIO(0x2248), 0x0, false},
	{RCS, _MMIO(0x2098), 0x0, false},
	{RCS, _MMIO(0x2098), 0x0, false},
@@ -75,7 +75,7 @@ static struct render_mmio gen8_render_mmio_list[] = {
	{BCS, _MMIO(0x22028), 0x0, false},
	{BCS, _MMIO(0x22028), 0x0, false},
};
};


static struct render_mmio gen9_render_mmio_list[] = {
static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = {
	{RCS, _MMIO(0x229c), 0xffff, false},
	{RCS, _MMIO(0x229c), 0xffff, false},
	{RCS, _MMIO(0x2248), 0x0, false},
	{RCS, _MMIO(0x2248), 0x0, false},
	{RCS, _MMIO(0x2098), 0x0, false},
	{RCS, _MMIO(0x2098), 0x0, false},
@@ -204,9 +204,6 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
	if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
	if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
		return;
		return;


	if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
		return;

	offset.reg = regs[ring_id];
	offset.reg = regs[ring_id];
	for (i = 0; i < 64; i++) {
	for (i = 0; i < 64; i++) {
		gen9_render_mocs[ring_id][i] = I915_READ(offset);
		gen9_render_mocs[ring_id][i] = I915_READ(offset);
@@ -242,9 +239,6 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
	if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
	if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
		return;
		return;


	if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
		return;

	offset.reg = regs[ring_id];
	offset.reg = regs[ring_id];
	for (i = 0; i < 64; i++) {
	for (i = 0; i < 64; i++) {
		vgpu_vreg(vgpu, offset) = I915_READ(offset);
		vgpu_vreg(vgpu, offset) = I915_READ(offset);
Loading