Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 535fbe82 authored by John Harrison's avatar John Harrison Committed by Daniel Vetter
Browse files

drm/i915: Update move_to_gpu() to take a request structure



The plan is to pass requests around as the basic submission tracking structure
rather than rings and contexts. This patch updates the move_to_gpu() code paths.

For: VIZ-5115
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Reviewed-by: default avatarTomas Elf <tomas.elf@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 95c24161
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -891,10 +891,10 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
}

static int
i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
				struct list_head *vmas)
{
	const unsigned other_rings = ~intel_ring_flag(ring);
	const unsigned other_rings = ~intel_ring_flag(req->ring);
	struct i915_vma *vma;
	uint32_t flush_domains = 0;
	bool flush_chipset = false;
@@ -904,7 +904,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
		struct drm_i915_gem_object *obj = vma->obj;

		if (obj->active & other_rings) {
			ret = i915_gem_object_sync(obj, ring);
			ret = i915_gem_object_sync(obj, req->ring);
			if (ret)
				return ret;
		}
@@ -916,7 +916,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
	}

	if (flush_chipset)
		i915_gem_chipset_flush(ring->dev);
		i915_gem_chipset_flush(req->ring->dev);

	if (flush_domains & I915_GEM_DOMAIN_GTT)
		wmb();
@@ -924,7 +924,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
	/* Unconditionally invalidate gpu caches and ensure that we do flush
	 * any residual writes from the previous batch.
	 */
	return intel_ring_invalidate_all_caches(ring);
	return intel_ring_invalidate_all_caches(req->ring);
}

static bool
@@ -1246,7 +1246,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
		}
	}

	ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
	if (ret)
		goto error;

+5 −7
Original line number Diff line number Diff line
@@ -624,12 +624,10 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
	return 0;
}

static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
				 struct intel_context *ctx,
static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
				 struct list_head *vmas)
{
	struct intel_engine_cs *ring = ringbuf->ring;
	const unsigned other_rings = ~intel_ring_flag(ring);
	const unsigned other_rings = ~intel_ring_flag(req->ring);
	struct i915_vma *vma;
	uint32_t flush_domains = 0;
	bool flush_chipset = false;
@@ -639,7 +637,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
		struct drm_i915_gem_object *obj = vma->obj;

		if (obj->active & other_rings) {
			ret = i915_gem_object_sync(obj, ring);
			ret = i915_gem_object_sync(obj, req->ring);
			if (ret)
				return ret;
		}
@@ -656,7 +654,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
	/* Unconditionally invalidate gpu caches and ensure that we do flush
	 * any residual writes from the previous batch.
	 */
	return logical_ring_invalidate_all_caches(ringbuf, ctx);
	return logical_ring_invalidate_all_caches(req->ringbuf, req->ctx);
}

int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -918,7 +916,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
		return -EINVAL;
	}

	ret = execlists_move_to_gpu(ringbuf, params->ctx, vmas);
	ret = execlists_move_to_gpu(params->request, vmas);
	if (ret)
		return ret;