Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1ec9e26d authored by Daniel Vetter's avatar Daniel Vetter
Browse files

drm/i915: Consolidate binding parameters into flags



Anything more than just one bool parameter is just a pain to read,
symbolic constants are much better.

Split out from Chris' vma-binding rework patch.

v2: Undo the behaviour change in object_pin that Chris spotted.

v3: Split out misplaced hunk to handle set_cache_level errors,
spotted by Jani.

v4: Keep the current over-zealous binding logic in the execbuffer code
working with a quick hack while the overall binding code gets shuffled
around.

v5: Reorder the PIN_ flags for more natural patch splitup.

v6: Pull out the PIN_GLOBAL split-up again.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ben Widawsky <benjamin.widawsky@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 931c1c26
Loading
Loading
Loading
Loading
+6 −8
Original line number Diff line number Diff line
@@ -2076,11 +2076,12 @@ void i915_init_vm(struct drm_i915_private *dev_priv,
void i915_gem_free_object(struct drm_gem_object *obj);
void i915_gem_vma_destroy(struct i915_vma *vma);

#define PIN_MAPPABLE 0x1
#define PIN_NONBLOCK 0x2
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
				     struct i915_address_space *vm,
				     uint32_t alignment,
				     bool map_and_fenceable,
				     bool nonblocking);
				     unsigned flags);
void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
int __must_check i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
@@ -2283,11 +2284,9 @@ i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
static inline int __must_check
i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
		      uint32_t alignment,
		      bool map_and_fenceable,
		      bool nonblocking)
		      unsigned flags)
{
	return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
				   map_and_fenceable, nonblocking);
	return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags);
}

/* i915_gem_context.c */
@@ -2331,8 +2330,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
					  int min_size,
					  unsigned alignment,
					  unsigned cache_level,
					  bool mappable,
					  bool nonblock);
					  unsigned flags);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
int i915_gem_evict_everything(struct drm_device *dev);

+25 −37
Original line number Diff line number Diff line
@@ -43,12 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
			       bool readonly);
static __must_check int
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
			   struct i915_address_space *vm,
			   unsigned alignment,
			   bool map_and_fenceable,
			   bool nonblocking);
static int i915_gem_phys_pwrite(struct drm_device *dev,
				struct drm_i915_gem_object *obj,
				struct drm_i915_gem_pwrite *args,
@@ -605,7 +599,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
	char __user *user_data;
	int page_offset, page_length, ret;

	ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
	if (ret)
		goto out;

@@ -1411,7 +1405,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
	}

	/* Now bind it into the GTT if needed */
	ret = i915_gem_obj_ggtt_pin(obj,  0, true, false);
	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
	if (ret)
		goto unlock;

@@ -2721,7 +2715,6 @@ int i915_vma_unbind(struct i915_vma *vma)

	if (!drm_mm_node_allocated(&vma->node)) {
		i915_gem_vma_destroy(vma);

		return 0;
	}

@@ -3219,14 +3212,13 @@ static int
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
			   struct i915_address_space *vm,
			   unsigned alignment,
			   bool map_and_fenceable,
			   bool nonblocking)
			   unsigned flags)
{
	struct drm_device *dev = obj->base.dev;
	drm_i915_private_t *dev_priv = dev->dev_private;
	u32 size, fence_size, fence_alignment, unfenced_alignment;
	size_t gtt_max =
		map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
		flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
	struct i915_vma *vma;
	int ret;

@@ -3242,14 +3234,14 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
					   obj->tiling_mode, false);

	if (alignment == 0)
		alignment = map_and_fenceable ? fence_alignment :
		alignment = flags & PIN_MAPPABLE ? fence_alignment :
						unfenced_alignment;
	if (map_and_fenceable && alignment & (fence_alignment - 1)) {
	if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
		DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
		return -EINVAL;
	}

	size = map_and_fenceable ? fence_size : obj->base.size;
	size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;

	/* If the object is bigger than the entire aperture, reject it early
	 * before evicting everything in a vain attempt to find space.
@@ -3257,7 +3249,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
	if (obj->base.size > gtt_max) {
		DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
			  obj->base.size,
			  map_and_fenceable ? "mappable" : "total",
			  flags & PIN_MAPPABLE ? "mappable" : "total",
			  gtt_max);
		return -E2BIG;
	}
@@ -3281,9 +3273,7 @@ search_free:
						  DRM_MM_SEARCH_DEFAULT);
	if (ret) {
		ret = i915_gem_evict_something(dev, vm, size, alignment,
					       obj->cache_level,
					       map_and_fenceable,
					       nonblocking);
					       obj->cache_level, flags);
		if (ret == 0)
			goto search_free;

@@ -3314,9 +3304,9 @@ search_free:
		obj->map_and_fenceable = mappable && fenceable;
	}

	WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
	WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);

	trace_i915_vma_bind(vma, map_and_fenceable);
	trace_i915_vma_bind(vma, flags);
	i915_gem_verify_gtt(dev);
	return 0;

@@ -3687,7 +3677,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
	 * (e.g. libkms for the bootup splash), we have to ensure that we
	 * always use map_and_fenceable for all scanout buffers.
	 */
	ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
	ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
	if (ret)
		goto err_unpin_display;

@@ -3843,30 +3833,28 @@ int
i915_gem_object_pin(struct drm_i915_gem_object *obj,
		    struct i915_address_space *vm,
		    uint32_t alignment,
		    bool map_and_fenceable,
		    bool nonblocking)
		    unsigned flags)
{
	const u32 flags = map_and_fenceable ? GLOBAL_BIND : 0;
	struct i915_vma *vma;
	int ret;

	WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
	if (WARN_ON(flags & PIN_MAPPABLE && !i915_is_ggtt(vm)))
		return -EINVAL;

	vma = i915_gem_obj_to_vma(obj, vm);

	if (vma) {
		if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
			return -EBUSY;

		if ((alignment &&
		     vma->node.start & (alignment - 1)) ||
		    (map_and_fenceable && !obj->map_and_fenceable)) {
		    (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
			WARN(vma->pin_count,
			     "bo is already pinned with incorrect alignment:"
			     " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
			     " obj->map_and_fenceable=%d\n",
			     i915_gem_obj_offset(obj, vm), alignment,
			     map_and_fenceable,
			     flags & PIN_MAPPABLE,
			     obj->map_and_fenceable);
			ret = i915_vma_unbind(vma);
			if (ret)
@@ -3875,9 +3863,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
	}

	if (!i915_gem_obj_bound(obj, vm)) {
		ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
						 map_and_fenceable,
						 nonblocking);
		ret = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
		if (ret)
			return ret;

@@ -3885,10 +3871,12 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,

	vma = i915_gem_obj_to_vma(obj, vm);

	vma->bind_vma(vma, obj->cache_level, flags);
	vma->bind_vma(vma, obj->cache_level,
		      flags & PIN_MAPPABLE ? GLOBAL_BIND : 0);

	i915_gem_obj_to_vma(obj, vm)->pin_count++;
	obj->pin_mappable |= map_and_fenceable;
	if (flags & PIN_MAPPABLE)
		obj->pin_mappable |= true;

	return 0;
}
@@ -3946,7 +3934,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
	}

	if (obj->user_pin_count == 0) {
		ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
		ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
		if (ret)
			goto out;
	}
+3 −6
Original line number Diff line number Diff line
@@ -258,8 +258,7 @@ i915_gem_create_context(struct drm_device *dev,
		 * context.
		 */
		ret = i915_gem_obj_ggtt_pin(ctx->obj,
					    get_context_alignment(dev),
					    false, false);
					    get_context_alignment(dev), 0);
		if (ret) {
			DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
			goto err_destroy;
@@ -335,8 +334,7 @@ void i915_gem_context_reset(struct drm_device *dev)

		if (i == RCS) {
			WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
						      get_context_alignment(dev),
						      false, false));
						      get_context_alignment(dev), 0));
			/* Fake a finish/inactive */
			dctx->obj->base.write_domain = 0;
			dctx->obj->active = 0;
@@ -612,8 +610,7 @@ static int do_switch(struct intel_ring_buffer *ring,
	/* Trying to pin first makes error handling easier. */
	if (ring == &dev_priv->ring[RCS]) {
		ret = i915_gem_obj_ggtt_pin(to->obj,
					    get_context_alignment(ring->dev),
					    false, false);
					    get_context_alignment(ring->dev), 0);
		if (ret)
			return ret;
	}
+5 −5
Original line number Diff line number Diff line
@@ -68,7 +68,7 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
int
i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
			 int min_size, unsigned alignment, unsigned cache_level,
			 bool mappable, bool nonblocking)
			 unsigned flags)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct list_head eviction_list, unwind_list;
@@ -76,7 +76,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
	int ret = 0;
	int pass = 0;

	trace_i915_gem_evict(dev, min_size, alignment, mappable);
	trace_i915_gem_evict(dev, min_size, alignment, flags);

	/*
	 * The goal is to evict objects and amalgamate space in LRU order.
@@ -102,7 +102,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
	 */

	INIT_LIST_HEAD(&unwind_list);
	if (mappable) {
	if (flags & PIN_MAPPABLE) {
		BUG_ON(!i915_is_ggtt(vm));
		drm_mm_init_scan_with_range(&vm->mm, min_size,
					    alignment, cache_level, 0,
@@ -117,7 +117,7 @@ search_again:
			goto found;
	}

	if (nonblocking)
	if (flags & PIN_NONBLOCK)
		goto none;

	/* Now merge in the soon-to-be-expired objects... */
@@ -141,7 +141,7 @@ none:
	/* Can we unpin some objects such as idle hw contents,
	 * or pending flips?
	 */
	if (nonblocking)
	if (flags & PIN_NONBLOCK)
		return -ENOSPC;

	/* Only idle the GPU and repeat the search once */
+13 −6
Original line number Diff line number Diff line
@@ -544,19 +544,23 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
	struct drm_i915_gem_object *obj = vma->obj;
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
	bool need_fence, need_mappable;
	u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
		!vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
	bool need_fence;
	unsigned flags;
	int ret;

	flags = 0;

	need_fence =
		has_fenced_gpu_access &&
		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
		obj->tiling_mode != I915_TILING_NONE;
	need_mappable = need_fence || need_reloc_mappable(vma);
	if (need_fence || need_reloc_mappable(vma))
		flags |= PIN_MAPPABLE;

	if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
		flags |= PIN_MAPPABLE;

	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
				  false);
	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
	if (ret)
		return ret;

@@ -585,6 +589,9 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
	}

	/* Temporary hack while we rework the binding logic. */
	flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
		!vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
	vma->bind_vma(vma, obj->cache_level, flags);

	return 0;
Loading