Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3d574a6b authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Remove walk over obj->vma_list for the shrinker



In the next patch, we want to reduce the lock coverage within the
shrinker, and one of the dangerous walks we have is over obj->vma_list.
We are only walking the obj->vma_list in order to check whether it has
been permanently pinned by HW access, typically via use on the scanout.
But we have a couple of other long term pins, the context objects for
which we currently have to check the individual vma pin_count. If we
instead mark these using obj->pin_display, we can forgo the dangerous
and sometimes slow list iteration.

v2: Rearrange code to try and avoid confusion from false associations
due to arrangement of whitespace along with rebasing on obj->pin_global.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171013202621.7276-4-chris@chris-wilson.co.uk
parent f46250e4
Loading
Loading
Loading
Loading
+7 −20
Original line number Diff line number Diff line
@@ -71,25 +71,6 @@ static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
	mutex_unlock(&dev_priv->drm.struct_mutex);
}

static bool any_vma_pinned(struct drm_i915_gem_object *obj)
{
	struct i915_vma *vma;

	list_for_each_entry(vma, &obj->vma_list, obj_link) {
		/* Only GGTT vma may be permanently pinned, and are always
		 * at the start of the list. We can stop hunting as soon
		 * as we see a ppGTT vma.
		 */
		if (!i915_vma_is_ggtt(vma))
			break;

		if (i915_vma_is_pinned(vma))
			return true;
	}

	return false;
}

static bool swap_available(void)
{
	return get_nr_swap_pages() > 0;
@@ -115,7 +96,13 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
	if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
		return false;

	if (any_vma_pinned(obj))
	/* If any vma are "permanently" pinned, it will prevent us from
	 * reclaiming the obj->mm.pages. We only allow scanout objects to claim
	 * a permanent pin, along with a few others like the context objects.
	 * To simplify the scan, and to avoid walking the list of vma under the
	 * object, we just check the count of its permanently pinned.
	 */
	if (obj->pin_global)
		return false;

	/* We can only return physical pages to the system if we can either
+2 −0
Original line number Diff line number Diff line
@@ -1093,6 +1093,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
		i915_ggtt_offset(ce->ring->vma);

	ce->state->obj->mm.dirty = true;
	ce->state->obj->pin_global++;

	i915_gem_context_get(ctx);
out:
@@ -1120,6 +1121,7 @@ static void execlists_context_unpin(struct intel_engine_cs *engine,

	intel_ring_unpin(ce->ring);

	ce->state->obj->pin_global--;
	i915_gem_object_unpin_map(ce->state->obj);
	i915_vma_unpin(ce->state);

+7 −1
Original line number Diff line number Diff line
@@ -1244,6 +1244,8 @@ int intel_ring_pin(struct intel_ring *ring,
	if (IS_ERR(addr))
		goto err;

	vma->obj->pin_global++;

	ring->vaddr = addr;
	return 0;

@@ -1275,6 +1277,7 @@ void intel_ring_unpin(struct intel_ring *ring)
		i915_gem_object_unpin_map(ring->vma->obj);
	ring->vaddr = NULL;

	ring->vma->obj->pin_global--;
	i915_vma_unpin(ring->vma);
}

@@ -1439,6 +1442,7 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
			goto err;

		ce->state->obj->mm.dirty = true;
		ce->state->obj->pin_global++;
	}

	/* The kernel context is only used as a placeholder for flushing the
@@ -1473,8 +1477,10 @@ static void intel_ring_context_unpin(struct intel_engine_cs *engine,
	if (--ce->pin_count)
		return;

	if (ce->state)
	if (ce->state) {
		ce->state->obj->pin_global--;
		i915_vma_unpin(ce->state);
	}

	i915_gem_context_put(ctx);
}