Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9c117313 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-intel-next-fixes-2017-11-10' of...

Merge tag 'drm-intel-next-fixes-2017-11-10' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

drm/i915 fixes for v4.15

* tag 'drm-intel-next-fixes-2017-11-10' of git://anongit.freedesktop.org/drm/drm-intel:
  drm/i915: Reorder context-close to avoid calling i915_vma_close() under RCU
  drm/i915: Move init_clock_gating() back to where it was
  drm/i915: Prune the reservation shared fence array
  drm/i915: Idle the GPU before shinking everything
  drm/i915: Lock llist_del_first() vs llist_del_all()
  drm/i915: Calculate ironlake intermediate watermarks correctly, v2.
  drm/i915: Disable lazy PPGTT page table optimization for vGPU
  drm/i915/execlists: Remove the priority "optimisation"
  drm/i915: Filter out spurious execlists context-switch interrupts
parents fee25cb9 e8c49fa9
Loading
Loading
Loading
Loading
+7 −1
Original line number Diff line number Diff line
@@ -4603,11 +4603,17 @@ static void __i915_gem_free_work(struct work_struct *work)
	 * unbound now.
	 */

	spin_lock(&i915->mm.free_lock);
	while ((freed = llist_del_all(&i915->mm.free_list))) {
		spin_unlock(&i915->mm.free_lock);

		__i915_gem_free_objects(i915, freed);
		if (need_resched())
			break;
			return;

		spin_lock(&i915->mm.free_lock);
	}
	spin_unlock(&i915->mm.free_lock);
}

static void __i915_gem_free_object_rcu(struct rcu_head *head)
+6 −6
Original line number Diff line number Diff line
@@ -106,14 +106,9 @@ static void lut_close(struct i915_gem_context *ctx)

	radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
		struct i915_vma *vma = rcu_dereference_raw(*slot);
		struct drm_i915_gem_object *obj = vma->obj;

		radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);

		if (!i915_vma_is_ggtt(vma))
			i915_vma_close(vma);

		__i915_gem_object_release_unless_active(obj);
		__i915_gem_object_release_unless_active(vma->obj);
	}
}

@@ -198,6 +193,11 @@ static void context_close(struct i915_gem_context *ctx)
{
	i915_gem_context_set_closed(ctx);

	/*
	 * The LUT uses the VMA as a backpointer to unref the object,
	 * so we need to clear the LUT before we close all the VMA (inside
	 * the ppgtt).
	 */
	lut_close(ctx);
	if (ctx->ppgtt)
		i915_ppgtt_close(&ctx->ppgtt->base);
+1 −1
Original line number Diff line number Diff line
@@ -1341,7 +1341,7 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
			if (IS_ERR(pt))
				goto unwind;

			if (count < GEN8_PTES)
			if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
				gen8_initialize_pt(vm, pt);

			gen8_ppgtt_set_pde(vm, pd, pt, pde);
+12 −0
Original line number Diff line number Diff line
@@ -162,6 +162,18 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
	if (!shrinker_lock(dev_priv, &unlock))
		return 0;

	/*
	 * When shrinking the active list, also consider active contexts.
	 * Active contexts are pinned until they are retired, and so can
	 * not be simply unbound to retire and unpin their pages. To shrink
	 * the contexts, we must wait until the gpu is idle.
	 *
	 * We don't care about errors here; if we cannot wait upon the GPU,
	 * we will free as much as we can and hope to get a second chance.
	 */
	if (flags & I915_SHRINK_ACTIVE)
		i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);

	trace_i915_gem_shrink(dev_priv, target, flags);
	i915_gem_retire_requests(dev_priv);

+3 −0
Original line number Diff line number Diff line
@@ -610,6 +610,7 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
	execlists->first = rb;
	if (submit) {
		port_assign(port, last);
		execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
		i915_guc_submit(engine);
	}
	spin_unlock_irq(&engine->timeline->lock);
@@ -633,6 +634,8 @@ static void i915_guc_irq_handler(unsigned long data)

		rq = port_request(&port[0]);
	}
	if (!rq)
		execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);

	if (!port_isset(last_port))
		i915_guc_dequeue(engine);
Loading