Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5b4a60c2 authored by John Harrison's avatar John Harrison Committed by Daniel Vetter
Browse files

drm/i915: Add flag to i915_add_request() to skip the cache flush



In order to explcitly track all GPU work (and completely remove the outstanding
lazy request), it is necessary to add extra i915_add_request() calls to various
places. Some of these do not need the implicit cache flush done as part of the
standard batch buffer submission process.

This patch adds a flag to _add_request() to specify whether the flush is
required or not.

For: VIZ-5115
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Reviewed-by: default avatarTomas Elf <tomas.elf@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 8a8edb59
Loading
Loading
Loading
Loading
+5 −2
Original line number Diff line number Diff line
@@ -2890,9 +2890,12 @@ int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
void __i915_add_request(struct intel_engine_cs *ring,
			struct drm_file *file,
			struct drm_i915_gem_object *batch_obj);
			struct drm_i915_gem_object *batch_obj,
			bool flush_caches);
#define i915_add_request(ring) \
	__i915_add_request(ring, NULL, NULL)
	__i915_add_request(ring, NULL, NULL, true)
#define i915_add_request_no_flush(ring) \
	__i915_add_request(ring, NULL, NULL, false)
int __i915_wait_request(struct drm_i915_gem_request *req,
			unsigned reset_counter,
			bool interruptible,
+10 −7
Original line number Diff line number Diff line
@@ -2470,7 +2470,8 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
 */
void __i915_add_request(struct intel_engine_cs *ring,
			struct drm_file *file,
			struct drm_i915_gem_object *obj)
			struct drm_i915_gem_object *obj,
			bool flush_caches)
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
	struct drm_i915_gem_request *request;
@@ -2502,12 +2503,14 @@ void __i915_add_request(struct intel_engine_cs *ring,
	 * is that the flush _must_ happen before the next request, no matter
	 * what.
	 */
	if (flush_caches) {
		if (i915.enable_execlists)
			ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
		else
			ret = intel_ring_flush_all_caches(ring);
		/* Not allowed to fail! */
		WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
	}

	/* Record the position of the start of the request so that
	 * should we detect the updated seqno part-way through the
+1 −1
Original line number Diff line number Diff line
@@ -1066,7 +1066,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
	params->ring->gpu_caches_dirty = true;

	/* Add a breadcrumb for the completion of the batch buffer */
	__i915_add_request(params->ring, params->file, params->batch_obj);
	__i915_add_request(params->ring, params->file, params->batch_obj, true);
}

static int
+1 −1
Original line number Diff line number Diff line
@@ -173,7 +173,7 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring)

	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);

	__i915_add_request(ring, NULL, so.obj);
	__i915_add_request(ring, NULL, so.obj, true);
	/* __i915_add_request moves object to inactive if it fails */
out:
	i915_gem_render_state_fini(&so);
+1 −1
Original line number Diff line number Diff line
@@ -1599,7 +1599,7 @@ static int intel_lr_context_render_state_init(struct intel_engine_cs *ring,

	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);

	__i915_add_request(ring, file, so.obj);
	__i915_add_request(ring, file, so.obj, true);
	/* intel_logical_ring_add_request moves object to inactive if it
	 * fails */
out: