Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7325e4bd authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-intel-fixes-2019-01-24' of...

Merge tag 'drm-intel-fixes-2019-01-24' of git://anongit.freedesktop.org/drm/drm-intel

 into drm-fixes

drm/i915 fixes for v5.0-rc4:
- fix priority boost
- gvt: fix destroy of shadow batch and indirect ctx

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87k1iu1a2e.fsf@intel.com
parents edaf6901 b42606b0
Loading
Loading
Loading
Loading
+6 −5
Original line number Diff line number Diff line
@@ -332,6 +332,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)

	i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
	i915_gem_object_put(wa_ctx->indirect_ctx.obj);

	wa_ctx->indirect_ctx.obj = NULL;
	wa_ctx->indirect_ctx.shadow_va = NULL;
}

static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
@@ -911,11 +914,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)

	list_del_init(&workload->list);

	if (!workload->status) {
		release_shadow_batch_buffer(workload);
		release_shadow_wa_ctx(&workload->wa_ctx);
	}

	if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
		/* if workload->status is not successful means HW GPU
		 * has occurred GPU hang or something wrong with i915/GVT,
@@ -1283,6 +1281,9 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
{
	struct intel_vgpu_submission *s = &workload->vgpu->submission;

	release_shadow_batch_buffer(workload);
	release_shadow_wa_ctx(&workload->wa_ctx);

	if (workload->shadow_mm)
		intel_vgpu_mm_put(workload->shadow_mm);

+4 −0
Original line number Diff line number Diff line
@@ -303,6 +303,7 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
	 */
	if (!(prio & I915_PRIORITY_NEWCLIENT)) {
		prio |= I915_PRIORITY_NEWCLIENT;
		active->sched.attr.priority = prio;
		list_move_tail(&active->sched.link,
			       i915_sched_lookup_priolist(engine, prio));
	}
@@ -645,6 +646,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
		int i;

		priolist_for_each_request_consume(rq, rn, p, i) {
			GEM_BUG_ON(last &&
				   need_preempt(engine, last, rq_prio(rq)));

			/*
			 * Can we combine this request with the current port?
			 * It has to be the same context/ringbuffer and not