Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit aa653a68 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Be more careful when unbinding vma



When we call i915_vma_unbind(), we will wait upon outstanding rendering.
This will also trigger a retirement phase, which may update the object
lists. If, we extend request tracking to the VMA itself (rather than
keep it at the encompassing object), then there is a potential that the
obj->vma_list be modified for other elements upon i915_vma_unbind(). As
a result, if we walk over the object list and call i915_vma_unbind(), we
need to be prepared for that list to change.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470293567-10811-8-git-send-email-chris@chris-wilson.co.uk
parent 15717de2
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -3052,6 +3052,8 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
 * _guarantee_ VMA in question is _not in use_ anywhere.
 */
int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);

int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
+42 −15
Original line number Diff line number Diff line
@@ -283,17 +283,37 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
	.release = i915_gem_object_release_phys,
};

int
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
	struct i915_vma *vma;
	LIST_HEAD(still_in_list);
	int ret;

	/* The vma will only be freed if it is marked as closed, and if we wait
	 * upon rendering to the vma, we may unbind anything in the list.
	 */
	while ((vma = list_first_entry_or_null(&obj->vma_list,
					       struct i915_vma,
					       obj_link))) {
		list_move_tail(&vma->obj_link, &still_in_list);
		ret = i915_vma_unbind(vma);
		if (ret)
			break;
	}
	list_splice(&still_in_list, &obj->vma_list);

	return ret;
}

static int
drop_pages(struct drm_i915_gem_object *obj)
{
	struct i915_vma *vma, *next;
	int ret;

	i915_gem_object_get(obj);
	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
		if (i915_vma_unbind(vma))
			break;

	ret = i915_gem_object_unbind(obj);
	if (ret == 0)
		ret = i915_gem_object_put_pages(obj);
	i915_gem_object_put(obj);

@@ -3450,8 +3470,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
				    enum i915_cache_level cache_level)
{
	struct drm_device *dev = obj->base.dev;
	struct i915_vma *vma, *next;
	struct i915_vma *vma;
	int ret = 0;

	if (obj->cache_level == cache_level)
@@ -3462,7 +3481,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
	 * catch the issue of the CS prefetch crossing page boundaries and
	 * reading an invalid PTE on older architectures.
	 */
	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
restart:
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
		if (!drm_mm_node_allocated(&vma->node))
			continue;

@@ -3471,11 +3491,18 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
			return -EBUSY;
		}

		if (!i915_gem_valid_gtt_space(vma, cache_level)) {
		if (i915_gem_valid_gtt_space(vma, cache_level))
			continue;

		ret = i915_vma_unbind(vma);
		if (ret)
			return ret;
		}

		/* As unbinding may affect other elements in the
		 * obj->vma_list (due to side-effects from retiring
		 * an active vma), play safe and restart the iterator.
		 */
		goto restart;
	}

	/* We can reuse the existing drm_mm nodes but need to change the
@@ -3494,7 +3521,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
		if (ret)
			return ret;

		if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
		if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
			/* Access to snoopable pages through the GTT is
			 * incoherent and on some machines causes a hard
			 * lockup. Relinquish the CPU mmaping to force
+1 −7
Original line number Diff line number Diff line
@@ -172,8 +172,6 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
		       (obj = list_first_entry_or_null(phase->list,
						       typeof(*obj),
						       global_list))) {
			struct i915_vma *vma, *v;

			list_move_tail(&obj->global_list, &still_in_list);

			if (flags & I915_SHRINK_PURGEABLE &&
@@ -193,11 +191,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
			i915_gem_object_get(obj);

			/* For the unbound phase, this should be a no-op! */
			list_for_each_entry_safe(vma, v,
						 &obj->vma_list, obj_link)
				if (i915_vma_unbind(vma))
					break;

			i915_gem_object_unbind(obj);
			if (i915_gem_object_put_pages(obj) == 0)
				count += obj->base.size >> PAGE_SHIFT;

+1 −3
Original line number Diff line number Diff line
@@ -104,7 +104,6 @@ static void cancel_userptr(struct work_struct *work)

	if (obj->pages != NULL) {
		struct drm_i915_private *dev_priv = to_i915(dev);
		struct i915_vma *vma, *tmp;
		bool was_interruptible;

		wait_rendering(obj);
@@ -112,8 +111,7 @@ static void cancel_userptr(struct work_struct *work)
		was_interruptible = dev_priv->mm.interruptible;
		dev_priv->mm.interruptible = false;

		list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
			WARN_ON(i915_vma_unbind(vma));
		WARN_ON(i915_gem_object_unbind(obj));
		WARN_ON(i915_gem_object_put_pages(obj));

		dev_priv->mm.interruptible = was_interruptible;