Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0581a5cb authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-intel-fixes-2018-07-05' of...

Merge tag 'drm-intel-fixes-2018-07-05' of git://anongit.freedesktop.org/drm/drm-intel

 into drm-fixes

A couple of GVT fixes, and a GGTT mmapping fix.

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

Link: https://patchwork.freedesktop.org/patch/msgid/8736wxq35t.fsf@intel.com
parents b7716735 3030deda
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -196,7 +196,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
			~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
			TRANS_DDI_PORT_MASK);
		vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
			(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
			(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
			(PORT_B << TRANS_DDI_PORT_SHIFT) |
			TRANS_DDI_FUNC_ENABLE);
		if (IS_BROADWELL(dev_priv)) {
@@ -216,7 +216,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
			~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
			TRANS_DDI_PORT_MASK);
		vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
			(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
			(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
			(PORT_C << TRANS_DDI_PORT_SHIFT) |
			TRANS_DDI_FUNC_ENABLE);
		if (IS_BROADWELL(dev_priv)) {
@@ -236,7 +236,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
			~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
			TRANS_DDI_PORT_MASK);
		vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
			(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
			(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
			(PORT_D << TRANS_DDI_PORT_SHIFT) |
			TRANS_DDI_FUNC_ENABLE);
		if (IS_BROADWELL(dev_priv)) {
+58 −0
Original line number Diff line number Diff line
@@ -1592,6 +1592,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
		vgpu_free_mm(mm);
		return ERR_PTR(-ENOMEM);
	}
	mm->ggtt_mm.last_partial_off = -1UL;

	return mm;
}
@@ -1616,6 +1617,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
		invalidate_ppgtt_mm(mm);
	} else {
		vfree(mm->ggtt_mm.virtual_ggtt);
		mm->ggtt_mm.last_partial_off = -1UL;
	}

	vgpu_free_mm(mm);
@@ -1868,6 +1870,62 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
	memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
			bytes);

	/* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
	 * write, we assume the two 4 bytes writes are consecutive.
	 * Otherwise, we abort and report error
	 */
	if (bytes < info->gtt_entry_size) {
		if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
			/* the first partial part*/
			ggtt_mm->ggtt_mm.last_partial_off = off;
			ggtt_mm->ggtt_mm.last_partial_data = e.val64;
			return 0;
		} else if ((g_gtt_index ==
				(ggtt_mm->ggtt_mm.last_partial_off >>
				info->gtt_entry_size_shift)) &&
			(off !=	ggtt_mm->ggtt_mm.last_partial_off)) {
			/* the second partial part */

			int last_off = ggtt_mm->ggtt_mm.last_partial_off &
				(info->gtt_entry_size - 1);

			memcpy((void *)&e.val64 + last_off,
				(void *)&ggtt_mm->ggtt_mm.last_partial_data +
				last_off, bytes);

			ggtt_mm->ggtt_mm.last_partial_off = -1UL;
		} else {
			int last_offset;

			gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
					ggtt_mm->ggtt_mm.last_partial_off, off,
					bytes, info->gtt_entry_size);

			/* set host ggtt entry to scratch page and clear
			 * virtual ggtt entry as not present for last
			 * partially write offset
			 */
			last_offset = ggtt_mm->ggtt_mm.last_partial_off &
					(~(info->gtt_entry_size - 1));

			ggtt_get_host_entry(ggtt_mm, &m, last_offset);
			ggtt_invalidate_pte(vgpu, &m);
			ops->set_pfn(&m, gvt->gtt.scratch_mfn);
			ops->clear_present(&m);
			ggtt_set_host_entry(ggtt_mm, &m, last_offset);
			ggtt_invalidate(gvt->dev_priv);

			ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
			ops->clear_present(&e);
			ggtt_set_guest_entry(ggtt_mm, &e, last_offset);

			ggtt_mm->ggtt_mm.last_partial_off = off;
			ggtt_mm->ggtt_mm.last_partial_data = e.val64;

			return 0;
		}
	}

	if (ops->test_present(&e)) {
		gfn = ops->get_pfn(&e);
		m = e;
+2 −0
Original line number Diff line number Diff line
@@ -150,6 +150,8 @@ struct intel_vgpu_mm {
		} ppgtt_mm;
		struct {
			void *virtual_ggtt;
			unsigned long last_partial_off;
			u64 last_partial_data;
		} ggtt_mm;
	};
};
+17 −11
Original line number Diff line number Diff line
@@ -2002,7 +2002,6 @@ int i915_gem_fault(struct vm_fault *vmf)
	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
	struct i915_vma *vma;
	pgoff_t page_offset;
	unsigned int flags;
	int ret;

	/* We don't use vmf->pgoff since that has the fake offset */
@@ -2038,27 +2037,34 @@ int i915_gem_fault(struct vm_fault *vmf)
		goto err_unlock;
	}

	/* If the object is smaller than a couple of partial vma, it is
	 * not worth only creating a single partial vma - we may as well
	 * clear enough space for the full object.
	 */
	flags = PIN_MAPPABLE;
	if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
		flags |= PIN_NONBLOCK | PIN_NONFAULT;

	/* Now pin it into the GTT as needed */
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
				       PIN_MAPPABLE |
				       PIN_NONBLOCK |
				       PIN_NONFAULT);
	if (IS_ERR(vma)) {
		/* Use a partial view if it is bigger than available space */
		struct i915_ggtt_view view =
			compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
		unsigned int flags;

		/* Userspace is now writing through an untracked VMA, abandon
		flags = PIN_MAPPABLE;
		if (view.type == I915_GGTT_VIEW_NORMAL)
			flags |= PIN_NONBLOCK; /* avoid warnings for pinned */

		/*
		 * Userspace is now writing through an untracked VMA, abandon
		 * all hope that the hardware is able to track future writes.
		 */
		obj->frontbuffer_ggtt_origin = ORIGIN_CPU;

		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
		if (IS_ERR(vma) && !view.type) {
			flags = PIN_MAPPABLE;
			view.type = I915_GGTT_VIEW_PARTIAL;
			vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
		}
	}
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
+1 −1
Original line number Diff line number Diff line
@@ -109,7 +109,7 @@ vma_create(struct drm_i915_gem_object *obj,
						     obj->base.size >> PAGE_SHIFT));
			vma->size = view->partial.size;
			vma->size <<= PAGE_SHIFT;
			GEM_BUG_ON(vma->size >= obj->base.size);
			GEM_BUG_ON(vma->size > obj->base.size);
		} else if (view->type == I915_GGTT_VIEW_ROTATED) {
			vma->size = intel_rotation_info_size(&view->rotated);
			vma->size <<= PAGE_SHIFT;