Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit de4e783a authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter
Browse files

drm/i915: Tidy batch pool logic



Move the madvise logic out of the execbuffer main path into the
relatively rare allocation path, making the execbuffer manipulation less
fragile.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent ed9ddd25
Loading
Loading
Loading
Loading
+3 −9
Original line number Diff line number Diff line
@@ -869,6 +869,9 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
	    batch_len + batch_start_offset > src_obj->base.size)
		return ERR_PTR(-E2BIG);

	if (WARN_ON(dest_obj->pages_pin_count == 0))
		return ERR_PTR(-ENODEV);

	ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
	if (ret) {
		DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
@@ -882,13 +885,6 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
		goto unpin_src;
	}

	ret = i915_gem_object_get_pages(dest_obj);
	if (ret) {
		DRM_DEBUG_DRIVER("CMD: Failed to get pages for shadow batch\n");
		goto unmap_src;
	}
	i915_gem_object_pin_pages(dest_obj);

	ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
	if (ret) {
		DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
@@ -898,7 +894,6 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
	dst = vmap_batch(dest_obj, 0, batch_len);
	if (!dst) {
		DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
		i915_gem_object_unpin_pages(dest_obj);
		ret = -ENOMEM;
		goto unmap_src;
	}
@@ -1129,7 +1124,6 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
	}

	vunmap(batch_base);
	i915_gem_object_unpin_pages(shadow_batch_obj);

	return ret;
}
+19 −20
Original line number Diff line number Diff line
@@ -67,25 +67,23 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
					 struct drm_i915_gem_object,
					 batch_pool_list);

		WARN_ON(obj->active);

		list_del_init(&obj->batch_pool_list);
		list_del(&obj->batch_pool_list);
		drm_gem_object_unreference(&obj->base);
	}
}

/**
 * i915_gem_batch_pool_get() - select a buffer from the pool
 * i915_gem_batch_pool_get() - allocate a buffer from the pool
 * @pool: the batch buffer pool
 * @size: the minimum desired size of the returned buffer
 *
 * Finds or allocates a batch buffer in the pool with at least the requested
 * size. The caller is responsible for any domain, active/inactive, or
 * purgeability management for the returned buffer.
 * Returns an inactive buffer from @pool with at least @size bytes,
 * with the pages pinned. The caller must i915_gem_object_unpin_pages()
 * on the returned object.
 *
 * Note: Callers must hold the struct_mutex
 *
 * Return: the selected batch buffer object
 * Return: the buffer object or an error pointer
 */
struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
@@ -98,7 +96,6 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,

	list_for_each_entry_safe(tmp, next,
				 &pool->cache_list, batch_pool_list) {

		if (tmp->active)
			continue;

@@ -114,25 +111,27 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
		 * but not 'too much' bigger. A better way to do this
		 * might be to bucket the pool objects based on size.
		 */
		if (tmp->base.size >= size &&
		    tmp->base.size <= (2 * size)) {
		if (tmp->base.size >= size && tmp->base.size <= 2 * size) {
			obj = tmp;
			break;
		}
	}

	if (!obj) {
	if (obj == NULL) {
		int ret;

		obj = i915_gem_alloc_object(pool->dev, size);
		if (!obj)
		if (obj == NULL)
			return ERR_PTR(-ENOMEM);

		list_add_tail(&obj->batch_pool_list, &pool->cache_list);
	}
	else
		/* Keep list in LRU order */
		list_move_tail(&obj->batch_pool_list, &pool->cache_list);
		ret = i915_gem_object_get_pages(obj);
		if (ret)
			return ERR_PTR(ret);

	obj->madv = I915_MADV_WILLNEED;
		obj->madv = I915_MADV_DONTNEED;
	}

	list_move_tail(&obj->batch_pool_list, &pool->cache_list);
	i915_gem_object_pin_pages(obj);
	return obj;
}
+5 −8
Original line number Diff line number Diff line
@@ -37,7 +37,6 @@
#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
#define  __EXEC_OBJECT_NEEDS_MAP (1<<29)
#define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)
#define  __EXEC_OBJECT_PURGEABLE (1<<27)

#define BATCH_OFFSET_BIAS (256*1024)

@@ -224,12 +223,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
		vma->pin_count--;

	if (entry->flags & __EXEC_OBJECT_PURGEABLE)
		obj->madv = I915_MADV_DONTNEED;

	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE |
			  __EXEC_OBJECT_HAS_PIN |
			  __EXEC_OBJECT_PURGEABLE);
	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}

static void eb_destroy(struct eb_vmas *eb)
@@ -1165,11 +1159,13 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
	if (ret)
		goto err;

	i915_gem_object_unpin_pages(shadow_batch_obj);

	memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));

	vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
	vma->exec_entry = shadow_exec_entry;
	vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE | __EXEC_OBJECT_HAS_PIN;
	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
	drm_gem_object_reference(&shadow_batch_obj->base);
	list_add_tail(&vma->exec_list, &eb->vmas);

@@ -1178,6 +1174,7 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
	return shadow_batch_obj;

err:
	i915_gem_object_unpin_pages(shadow_batch_obj);
	if (ret == -EACCES) /* unhandled chained batch */
		return batch_obj;
	else