Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7d588f90 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-intel-fixes-2018-11-08' of...

Merge tag 'drm-intel-fixes-2018-11-08' of git://anongit.freedesktop.org/drm/drm-intel

 into drm-fixes

Bugzilla #108282 fixed: Avoid graphics corruption on 32-bit systems for Mesa 18.2.x
Avoid OOPS on LPE audio deinit. Remove two unused W/As.
Fix to correct HDMI 2.0 audio clock modes to spec.

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181108134508.GA28466@jlahtine-desk.ger.corp.intel.com
parents d08f44b1 214782da
Loading
Loading
Loading
Loading
+58 −57
Original line number Diff line number Diff line
@@ -1905,7 +1905,6 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
		vgpu_free_mm(mm);
		return ERR_PTR(-ENOMEM);
	}
	mm->ggtt_mm.last_partial_off = -1UL;

	return mm;
}
@@ -1930,7 +1929,6 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
		invalidate_ppgtt_mm(mm);
	} else {
		vfree(mm->ggtt_mm.virtual_ggtt);
		mm->ggtt_mm.last_partial_off = -1UL;
	}

	vgpu_free_mm(mm);
@@ -2168,6 +2166,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
	struct intel_gvt_gtt_entry e, m;
	dma_addr_t dma_addr;
	int ret;
	struct intel_gvt_partial_pte *partial_pte, *pos, *n;
	bool partial_update = false;

	if (bytes != 4 && bytes != 8)
		return -EINVAL;
@@ -2178,68 +2178,57 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
	if (!vgpu_gmadr_is_valid(vgpu, gma))
		return 0;

	ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);

	e.type = GTT_TYPE_GGTT_PTE;
	memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
			bytes);

	/* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
	 * write, we assume the two 4 bytes writes are consecutive.
	 * Otherwise, we abort and report error
	 * write, save the first 4 bytes in a list and update virtual
	 * PTE. Only update shadow PTE when the second 4 bytes comes.
	 */
	if (bytes < info->gtt_entry_size) {
		if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
			/* the first partial part*/
			ggtt_mm->ggtt_mm.last_partial_off = off;
			ggtt_mm->ggtt_mm.last_partial_data = e.val64;
			return 0;
		} else if ((g_gtt_index ==
				(ggtt_mm->ggtt_mm.last_partial_off >>
				info->gtt_entry_size_shift)) &&
			(off !=	ggtt_mm->ggtt_mm.last_partial_off)) {
			/* the second partial part */
		bool found = false;

			int last_off = ggtt_mm->ggtt_mm.last_partial_off &
		list_for_each_entry_safe(pos, n,
				&ggtt_mm->ggtt_mm.partial_pte_list, list) {
			if (g_gtt_index == pos->offset >>
					info->gtt_entry_size_shift) {
				if (off != pos->offset) {
					/* the second partial part*/
					int last_off = pos->offset &
						(info->gtt_entry_size - 1);

					memcpy((void *)&e.val64 + last_off,
				(void *)&ggtt_mm->ggtt_mm.last_partial_data +
				last_off, bytes);

			ggtt_mm->ggtt_mm.last_partial_off = -1UL;
		} else {
			int last_offset;

			gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
					ggtt_mm->ggtt_mm.last_partial_off, off,
					bytes, info->gtt_entry_size);

			/* set host ggtt entry to scratch page and clear
			 * virtual ggtt entry as not present for last
			 * partially write offset
			 */
			last_offset = ggtt_mm->ggtt_mm.last_partial_off &
					(~(info->gtt_entry_size - 1));

			ggtt_get_host_entry(ggtt_mm, &m, last_offset);
			ggtt_invalidate_pte(vgpu, &m);
			ops->set_pfn(&m, gvt->gtt.scratch_mfn);
			ops->clear_present(&m);
			ggtt_set_host_entry(ggtt_mm, &m, last_offset);
			ggtt_invalidate(gvt->dev_priv);

			ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
			ops->clear_present(&e);
			ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
						(void *)&pos->data + last_off,
						bytes);

			ggtt_mm->ggtt_mm.last_partial_off = off;
			ggtt_mm->ggtt_mm.last_partial_data = e.val64;
					list_del(&pos->list);
					kfree(pos);
					found = true;
					break;
				}

				/* update of the first partial part */
				pos->data = e.val64;
				ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
				return 0;
			}
		}

	if (ops->test_present(&e)) {
		if (!found) {
			/* the first partial part */
			partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL);
			if (!partial_pte)
				return -ENOMEM;
			partial_pte->offset = off;
			partial_pte->data = e.val64;
			list_add_tail(&partial_pte->list,
				&ggtt_mm->ggtt_mm.partial_pte_list);
			partial_update = true;
		}
	}

	if (!partial_update && (ops->test_present(&e))) {
		gfn = ops->get_pfn(&e);
		m = e;

@@ -2263,16 +2252,18 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
		} else
			ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
	} else {
		ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index);
		ggtt_invalidate_pte(vgpu, &m);
		ops->set_pfn(&m, gvt->gtt.scratch_mfn);
		ops->clear_present(&m);
	}

out:
	ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);

	ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
	ggtt_invalidate_pte(vgpu, &e);

	ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
	ggtt_invalidate(gvt->dev_priv);
	ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
	return 0;
}

@@ -2430,6 +2421,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)

	intel_vgpu_reset_ggtt(vgpu, false);

	INIT_LIST_HEAD(&gtt->ggtt_mm->ggtt_mm.partial_pte_list);

	return create_scratch_page_tree(vgpu);
}

@@ -2454,6 +2447,14 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)

static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
{
	struct intel_gvt_partial_pte *pos;

	list_for_each_entry(pos,
			&vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, list) {
		gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
			pos->offset, pos->data);
		kfree(pos);
	}
	intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
	vgpu->gtt.ggtt_mm = NULL;
}
+7 −3
Original line number Diff line number Diff line
@@ -35,7 +35,6 @@
#define _GVT_GTT_H_

#define I915_GTT_PAGE_SHIFT         12
#define I915_GTT_PAGE_MASK		(~(I915_GTT_PAGE_SIZE - 1))

struct intel_vgpu_mm;

@@ -133,6 +132,12 @@ enum intel_gvt_mm_type {

#define GVT_RING_CTX_NR_PDPS	GEN8_3LVL_PDPES

struct intel_gvt_partial_pte {
	unsigned long offset;
	u64 data;
	struct list_head list;
};

struct intel_vgpu_mm {
	enum intel_gvt_mm_type type;
	struct intel_vgpu *vgpu;
@@ -157,8 +162,7 @@ struct intel_vgpu_mm {
		} ppgtt_mm;
		struct {
			void *virtual_ggtt;
			unsigned long last_partial_off;
			u64 last_partial_data;
			struct list_head partial_pte_list;
		} ggtt_mm;
	};
};
+4 −4
Original line number Diff line number Diff line
@@ -1609,7 +1609,7 @@ static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
	return 0;
}

static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
		unsigned int offset, void *p_data, unsigned int bytes)
{
	vgpu_vreg(vgpu, offset) = 0;
@@ -2607,6 +2607,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
	MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
	MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);

	MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
	MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
	return 0;
}

@@ -3205,9 +3208,6 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
	MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
	MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);

	MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
	MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);

	MMIO_D(RC6_CTX_BASE, D_BXT);

	MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
+1 −1
Original line number Diff line number Diff line
@@ -131,7 +131,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
	{RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */

	{RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
	{RCS, GEN9_CSFE_CHICKEN1_RCS, 0x0, false}, /* 0x20d4 */
	{RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */

	{RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
	{RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
+8 −7
Original line number Diff line number Diff line
@@ -1175,8 +1175,6 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
		return -EINVAL;
	}

	dram_info->valid_dimm = true;

	/*
	 * If any of the channel is single rank channel, worst case output
	 * will be same as if single rank memory, so consider single rank
@@ -1193,8 +1191,7 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
		return -EINVAL;
	}

	if (ch0.is_16gb_dimm || ch1.is_16gb_dimm)
		dram_info->is_16gb_dimm = true;
	dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;

	dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0,
								       val_ch1,
@@ -1314,7 +1311,6 @@ bxt_get_dram_info(struct drm_i915_private *dev_priv)
		return -EINVAL;
	}

	dram_info->valid_dimm = true;
	dram_info->valid = true;
	return 0;
}
@@ -1327,12 +1323,17 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
	int ret;

	dram_info->valid = false;
	dram_info->valid_dimm = false;
	dram_info->is_16gb_dimm = false;
	dram_info->rank = I915_DRAM_RANK_INVALID;
	dram_info->bandwidth_kbps = 0;
	dram_info->num_channels = 0;

	/*
	 * Assume 16Gb DIMMs are present until proven otherwise.
	 * This is only used for the level 0 watermark latency
	 * w/a which does not apply to bxt/glk.
	 */
	dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);

	if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv))
		return;

Loading