Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 82b6b6d7 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter
Browse files

drm/i915: Remove fenced_gpu_access and pending_fenced_gpu_access



This migrates the fence tracking onto the existing seqno
infrastructure so that the later conversion to tracking via requests is
simplified.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent e6a84468
Loading
Loading
Loading
Loading
+0 −7
Original line number Diff line number Diff line
@@ -1777,13 +1777,6 @@ struct drm_i915_gem_object {
	 * Only honoured if hardware has relevant pte bit
	 */
	unsigned long gt_ro:1;

	/*
	 * Is the GPU currently using a fence to access this buffer,
	 */
	unsigned int pending_fenced_gpu_access:1;
	unsigned int fenced_gpu_access:1;

	unsigned int cache_level:3;

	unsigned int has_aliasing_ppgtt_mapping:1;
+0 −17
Original line number Diff line number Diff line
@@ -2161,8 +2161,6 @@ static void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
			       struct intel_engine_cs *ring)
{
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 seqno = intel_ring_get_seqno(ring);

	BUG_ON(ring == NULL);
@@ -2181,19 +2179,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
	list_move_tail(&obj->ring_list, &ring->active_list);

	obj->last_read_seqno = seqno;

	if (obj->fenced_gpu_access) {
		obj->last_fenced_seqno = seqno;

		/* Bump MRU to take account of the delayed flush */
		if (obj->fence_reg != I915_FENCE_REG_NONE) {
			struct drm_i915_fence_reg *reg;

			reg = &dev_priv->fence_regs[obj->fence_reg];
			list_move_tail(&reg->lru_list,
				       &dev_priv->mm.fence_list);
		}
	}
}

void i915_vma_move_to_active(struct i915_vma *vma,
@@ -2229,7 +2214,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
	obj->base.write_domain = 0;

	obj->last_fenced_seqno = 0;
	obj->fenced_gpu_access = false;

	obj->active = 0;
	drm_gem_object_unreference(&obj->base);
@@ -3174,7 +3158,6 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
		obj->last_fenced_seqno = 0;
	}

	obj->fenced_gpu_access = false;
	return 0;
}

+19 −15
Original line number Diff line number Diff line
@@ -542,7 +542,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
{
	struct drm_i915_gem_object *obj = vma->obj;
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
	uint64_t flags;
	int ret;

@@ -560,7 +559,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,

	entry->flags |= __EXEC_OBJECT_HAS_PIN;

	if (has_fenced_gpu_access) {
	if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
		ret = i915_gem_object_get_fence(obj);
		if (ret)
@@ -568,9 +566,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,

		if (i915_gem_object_pin_fence(obj))
			entry->flags |= __EXEC_OBJECT_HAS_FENCE;

			obj->pending_fenced_gpu_access = true;
		}
	}

	if (entry->offset != vma->node.start) {
@@ -658,8 +653,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
		obj = vma->obj;
		entry = vma->exec_entry;

		if (!has_fenced_gpu_access)
			entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
		need_fence =
			has_fenced_gpu_access &&
			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
			obj->tiling_mode != I915_TILING_NONE;
		need_mappable = need_fence || need_reloc_mappable(vma);
@@ -672,7 +668,6 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,

		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
		obj->base.pending_write_domain = 0;
		obj->pending_fenced_gpu_access = false;
	}
	list_splice(&ordered_vmas, vmas);

@@ -959,9 +954,11 @@ static void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
				   struct intel_engine_cs *ring)
{
	u32 seqno = intel_ring_get_seqno(ring);
	struct i915_vma *vma;

	list_for_each_entry(vma, vmas, exec_list) {
		struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
		struct drm_i915_gem_object *obj = vma->obj;
		u32 old_read = obj->base.read_domains;
		u32 old_write = obj->base.write_domain;
@@ -970,18 +967,25 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
		if (obj->base.write_domain == 0)
			obj->base.pending_read_domains |= obj->base.read_domains;
		obj->base.read_domains = obj->base.pending_read_domains;
		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;

		i915_vma_move_to_active(vma, ring);
		if (obj->base.write_domain) {
			obj->dirty = 1;
			obj->last_write_seqno = intel_ring_get_seqno(ring);
			obj->last_write_seqno = seqno;

			intel_fb_obj_invalidate(obj, ring);

			/* update for the implicit flush after a batch */
			obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
		}
		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
			obj->last_fenced_seqno = seqno;
			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
				struct drm_i915_private *dev_priv = to_i915(ring->dev);
				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
					       &dev_priv->mm.fence_list);
			}
		}

		trace_i915_gem_object_change_domain(obj, old_read, old_write);
	}
+1 −1
Original line number Diff line number Diff line
@@ -376,7 +376,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,

		if (ret == 0) {
			obj->fence_dirty =
				obj->fenced_gpu_access ||
				obj->last_fenced_seqno ||
				obj->fence_reg != I915_FENCE_REG_NONE;

			obj->tiling_mode = args->tiling_mode;