Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 69c2fc89 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter
Browse files

drm/i915: Remove the per-ring write list



This is now handled by a global flag to ensure we emit a flush before
the next serialisation point (if we failed to queue one previously).

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 65ce3027
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -865,8 +865,6 @@ struct drm_i915_gem_object {
	/** This object's place on the active/inactive lists */
	struct list_head ring_list;
	struct list_head mm_list;
	/** This object's place on GPU write list */
	struct list_head gpu_write_list;
	/** This object's place in the batchbuffer or on the eviction list */
	struct list_head exec_list;

+1 −52
Original line number Diff line number Diff line
@@ -1465,7 +1465,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)

	list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);

	BUG_ON(!list_empty(&obj->gpu_write_list));
	BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
	BUG_ON(!obj->active);

@@ -1511,30 +1510,6 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
	return obj->madv == I915_MADV_DONTNEED;
}

static void
i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
			       uint32_t flush_domains)
{
	struct drm_i915_gem_object *obj, *next;

	list_for_each_entry_safe(obj, next,
				 &ring->gpu_write_list,
				 gpu_write_list) {
		if (obj->base.write_domain & flush_domains) {
			uint32_t old_write_domain = obj->base.write_domain;

			obj->base.write_domain = 0;
			list_del_init(&obj->gpu_write_list);
			i915_gem_object_move_to_active(obj, ring,
						       i915_gem_next_request_seqno(ring));

			trace_i915_gem_object_change_domain(obj,
							    obj->base.read_domains,
							    old_write_domain);
		}
	}
}

static u32
i915_gem_get_seqno(struct drm_device *dev)
{
@@ -1637,8 +1612,6 @@ i915_add_request(struct intel_ring_buffer *ring,
					   &dev_priv->mm.retire_work, HZ);
	}

	WARN_ON(!list_empty(&ring->gpu_write_list));

	return 0;
}

@@ -1680,7 +1653,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
				       struct drm_i915_gem_object,
				       ring_list);

		list_del_init(&obj->gpu_write_list);
		i915_gem_object_move_to_inactive(obj);
	}
}
@@ -2011,11 +1983,6 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
	u32 seqno;
	int ret;

	/* This function only exists to support waiting for existing rendering,
	 * not for emitting required flushes.
	 */
	BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);

	/* If there is rendering queued on the buffer being evicted, wait for
	 * it.
	 */
@@ -2308,26 +2275,14 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
	if (ret)
		return ret;

	if (flush_domains & I915_GEM_GPU_DOMAINS)
		i915_gem_process_flushing_list(ring, flush_domains);

	return 0;
}

static int i915_ring_idle(struct intel_ring_buffer *ring)
{
	int ret;

	if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
	if (list_empty(&ring->active_list))
		return 0;

	if (!list_empty(&ring->gpu_write_list)) {
		ret = i915_gem_flush_ring(ring,
				    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
		if (ret)
			return ret;
	}

	return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
}

@@ -2343,10 +2298,6 @@ int i915_gpu_idle(struct drm_device *dev)
		if (ret)
			return ret;

		/* Is the device fubar? */
		if (WARN_ON(!list_empty(&ring->gpu_write_list)))
			return -EBUSY;

		ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
		if (ret)
			return ret;
@@ -3491,7 +3442,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
	INIT_LIST_HEAD(&obj->gtt_list);
	INIT_LIST_HEAD(&obj->ring_list);
	INIT_LIST_HEAD(&obj->exec_list);
	INIT_LIST_HEAD(&obj->gpu_write_list);
	obj->madv = I915_MADV_WILLNEED;
	/* Avoid an unnecessary call to unbind on the first bind. */
	obj->map_and_fenceable = true;
@@ -3912,7 +3862,6 @@ init_ring_lists(struct intel_ring_buffer *ring)
{
	INIT_LIST_HEAD(&ring->active_list);
	INIT_LIST_HEAD(&ring->request_list);
	INIT_LIST_HEAD(&ring->gpu_write_list);
}

void
+2 −5
Original line number Diff line number Diff line
@@ -946,7 +946,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
		u32 old_read = obj->base.read_domains;
		u32 old_write = obj->base.write_domain;


		obj->base.read_domains = obj->base.pending_read_domains;
		obj->base.write_domain = obj->base.pending_write_domain;
		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
@@ -955,8 +954,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
		if (obj->base.write_domain) {
			obj->dirty = 1;
			obj->last_write_seqno = seqno;
			list_move_tail(&obj->gpu_write_list,
				       &ring->gpu_write_list);
			if (obj->pin_count) /* check for potential scanout */
				intel_mark_busy(ring->dev, obj);
		}
+0 −2
Original line number Diff line number Diff line
@@ -1002,7 +1002,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
	ring->dev = dev;
	INIT_LIST_HEAD(&ring->active_list);
	INIT_LIST_HEAD(&ring->request_list);
	INIT_LIST_HEAD(&ring->gpu_write_list);
	ring->size = 32 * PAGE_SIZE;

	init_waitqueue_head(&ring->irq_queue);
@@ -1473,7 +1472,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
	ring->dev = dev;
	INIT_LIST_HEAD(&ring->active_list);
	INIT_LIST_HEAD(&ring->request_list);
	INIT_LIST_HEAD(&ring->gpu_write_list);

	ring->size = size;
	ring->effective_size = ring->size;
+0 −9
Original line number Diff line number Diff line
@@ -100,15 +100,6 @@ struct intel_ring_buffer {
	 */
	struct list_head request_list;

	/**
	 * List of objects currently pending a GPU write flush.
	 *
	 * All elements on this list will belong to either the
	 * active_list or flushing_list, last_rendering_seqno can
	 * be used to differentiate between the two elements.
	 */
	struct list_head gpu_write_list;

	/**
	 * Do we have some not yet emitted requests outstanding?
	 */