Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a65adaf8 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Track user GTT faulting per-vma



We don't wish to refault the entire object (other vma) when unbinding
one partial vma. To do this track which vma have been faulted into the
user's address space.

v2: Use a local vma_offset to tidy up a multiline unmap_mapping_range().

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171009084401.29090-3-chris@chris-wilson.co.uk


Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent 3bd40735
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -98,7 +98,7 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)

static char get_global_flag(struct drm_i915_gem_object *obj)
{
	return !list_empty(&obj->userfault_link) ? 'g' : ' ';
	return obj->userfault_count ? 'g' : ' ';
}

static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
+34 −16
Original line number Diff line number Diff line
@@ -1914,18 +1914,22 @@ int i915_gem_fault(struct vm_fault *vmf)
	if (ret)
		goto err_unpin;

	/* Mark as being mmapped into userspace for later revocation */
	assert_rpm_wakelock_held(dev_priv);
	if (list_empty(&obj->userfault_link))
		list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);

	/* Finally, remap it using the new GTT offset */
	ret = remap_io_mapping(area,
			       area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
			       (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
			       min_t(u64, vma->size, area->vm_end - area->vm_start),
			       &ggtt->mappable);
	if (ret)
		goto err_fence;

	/* Mark as being mmapped into userspace for later revocation */
	assert_rpm_wakelock_held(dev_priv);
	if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
		list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
	GEM_BUG_ON(!obj->userfault_count);

err_fence:
	i915_vma_unpin_fence(vma);
err_unpin:
	__i915_vma_unpin(vma);
@@ -1978,6 +1982,25 @@ int i915_gem_fault(struct vm_fault *vmf)
	return ret;
}

static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
{
	struct i915_vma *vma;

	GEM_BUG_ON(!obj->userfault_count);

	obj->userfault_count = 0;
	list_del(&obj->userfault_link);
	drm_vma_node_unmap(&obj->base.vma_node,
			   obj->base.dev->anon_inode->i_mapping);

	list_for_each_entry(vma, &obj->vma_list, obj_link) {
		if (!i915_vma_is_ggtt(vma))
			break;

		i915_vma_unset_userfault(vma);
	}
}

/**
 * i915_gem_release_mmap - remove physical page mappings
 * @obj: obj in question
@@ -2008,12 +2031,10 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
	lockdep_assert_held(&i915->drm.struct_mutex);
	intel_runtime_pm_get(i915);

	if (list_empty(&obj->userfault_link))
	if (!obj->userfault_count)
		goto out;

	list_del_init(&obj->userfault_link);
	drm_vma_node_unmap(&obj->base.vma_node,
			   obj->base.dev->anon_inode->i_mapping);
	__i915_gem_object_release_mmap(obj);

	/* Ensure that the CPU's PTE are revoked and there are not outstanding
	 * memory transactions from userspace before we return. The TLB
@@ -2041,11 +2062,8 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
	 */

	list_for_each_entry_safe(obj, on,
				 &dev_priv->mm.userfault_list, userfault_link) {
		list_del_init(&obj->userfault_link);
		drm_vma_node_unmap(&obj->base.vma_node,
				   obj->base.dev->anon_inode->i_mapping);
	}
				 &dev_priv->mm.userfault_list, userfault_link)
		__i915_gem_object_release_mmap(obj);

	/* The fence will be lost when the device powers down. If any were
	 * in use by hardware (i.e. they are pinned), we should not be powering
@@ -2068,7 +2086,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
		if (!reg->vma)
			continue;

		GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
		GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
		reg->dirty = true;
	}
}
@@ -4276,7 +4294,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
	mutex_init(&obj->mm.lock);

	INIT_LIST_HEAD(&obj->global_link);
	INIT_LIST_HEAD(&obj->userfault_link);
	INIT_LIST_HEAD(&obj->vma_list);
	INIT_LIST_HEAD(&obj->lut_list);
	INIT_LIST_HEAD(&obj->batch_pool_link);
@@ -4457,6 +4474,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,

	llist_for_each_entry_safe(obj, on, freed, freed) {
		GEM_BUG_ON(obj->bind_count);
		GEM_BUG_ON(obj->userfault_count);
		GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
		GEM_BUG_ON(!list_empty(&obj->lut_list));

+1 −1
Original line number Diff line number Diff line
@@ -82,7 +82,7 @@ mark_free(struct drm_mm_scan *scan,
	if (i915_vma_is_pinned(vma))
		return false;

	if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
	if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma))
		return false;

	list_add(&vma->evict_link, unwind);
+4 −3
Original line number Diff line number Diff line
@@ -240,7 +240,8 @@ static int fence_update(struct drm_i915_fence_reg *fence,
		/* Ensure that all userspace CPU access is completed before
		 * stealing the fence.
		 */
		i915_gem_release_mmap(fence->vma->obj);
		GEM_BUG_ON(fence->vma->fence != fence);
		i915_vma_revoke_mmap(fence->vma);

		fence->vma->fence = NULL;
		fence->vma = NULL;
@@ -451,7 +452,7 @@ void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
		GEM_BUG_ON(fence->vma && fence->vma->fence != fence);

		if (fence->vma)
			i915_gem_release_mmap(fence->vma->obj);
			i915_vma_revoke_mmap(fence->vma);
	}
}

@@ -479,7 +480,7 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
		 */
		if (vma && !i915_gem_object_is_tiled(vma->obj)) {
			GEM_BUG_ON(!reg->dirty);
			GEM_BUG_ON(!list_empty(&vma->obj->userfault_link));
			GEM_BUG_ON(i915_vma_has_userfault(vma));

			list_move(&reg->link, &dev_priv->mm.fence_list);
			vma->fence = NULL;
+1 −0
Original line number Diff line number Diff line
@@ -123,6 +123,7 @@ struct drm_i915_gem_object {
	/**
	 * Whether the object is currently in the GGTT mmap.
	 */
	unsigned int userfault_count;
	struct list_head userfault_link;

	struct list_head batch_pool_link;
Loading