Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit da99fe5f authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Refactor export_fence() after i915_vma_move_to_active()



Currently all callers are responsible for adding the vma to the active
timeline and then exporting its fence. Combine the two operations into
i915_vma_move_to_active() to move all the extra handling from the
callers to the single site.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180706103947.15919-1-chris@chris-wilson.co.uk
parent 8fdbfd86
Loading
Loading
Loading
Loading
+21 −26
Original line number Diff line number Diff line
@@ -1166,15 +1166,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,

	GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
	i915_vma_move_to_active(batch, rq, 0);
	reservation_object_lock(batch->resv, NULL);
	reservation_object_add_excl_fence(batch->resv, &rq->fence);
	reservation_object_unlock(batch->resv);
	i915_vma_unpin(batch);

	i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
	reservation_object_lock(vma->resv, NULL);
	reservation_object_add_excl_fence(vma->resv, &rq->fence);
	reservation_object_unlock(vma->resv);

	rq->batch = batch;

@@ -1771,25 +1765,6 @@ static int eb_relocate(struct i915_execbuffer *eb)
	return eb_relocate_slow(eb);
}

static void eb_export_fence(struct i915_vma *vma,
			    struct i915_request *rq,
			    unsigned int flags)
{
	struct reservation_object *resv = vma->resv;

	/*
	 * Ignore errors from failing to allocate the new fence, we can't
	 * handle an error right now. Worst case should be missed
	 * synchronisation leading to rendering corruption.
	 */
	reservation_object_lock(resv, NULL);
	if (flags & EXEC_OBJECT_WRITE)
		reservation_object_add_excl_fence(resv, &rq->fence);
	else if (reservation_object_reserve_shared(resv) == 0)
		reservation_object_add_shared_fence(resv, &rq->fence);
	reservation_object_unlock(resv);
}

static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
@@ -1844,7 +1819,6 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
		struct i915_vma *vma = eb->vma[i];

		i915_vma_move_to_active(vma, eb->request, flags);
		eb_export_fence(vma, eb->request, flags);

		__eb_unreserve_vma(vma, flags);
		vma->exec_flags = NULL;
@@ -1884,6 +1858,25 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
	return true;
}

static void export_fence(struct i915_vma *vma,
			 struct i915_request *rq,
			 unsigned int flags)
{
	struct reservation_object *resv = vma->resv;

	/*
	 * Ignore errors from failing to allocate the new fence, we can't
	 * handle an error right now. Worst case should be missed
	 * synchronisation leading to rendering corruption.
	 */
	reservation_object_lock(resv, NULL);
	if (flags & EXEC_OBJECT_WRITE)
		reservation_object_add_excl_fence(resv, &rq->fence);
	else if (reservation_object_reserve_shared(resv) == 0)
		reservation_object_add_shared_fence(resv, &rq->fence);
	reservation_object_unlock(resv);
}

void i915_vma_move_to_active(struct i915_vma *vma,
			     struct i915_request *rq,
			     unsigned int flags)
@@ -1921,6 +1914,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,

	if (flags & EXEC_OBJECT_NEEDS_FENCE)
		i915_gem_active_set(&vma->last_fence, rq);

	export_fence(vma, rq, flags);
}

static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
+0 −4
Original line number Diff line number Diff line
@@ -998,10 +998,6 @@ static int gpu_write(struct i915_vma *vma,

	i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);

	reservation_object_lock(vma->resv, NULL);
	reservation_object_add_excl_fence(vma->resv, &rq->fence);
	reservation_object_unlock(vma->resv);

err_request:
	i915_request_add(rq);

+0 −4
Original line number Diff line number Diff line
@@ -225,10 +225,6 @@ static int gpu_set(struct drm_i915_gem_object *obj,
	i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
	i915_vma_unpin(vma);

	reservation_object_lock(obj->resv, NULL);
	reservation_object_add_excl_fence(obj->resv, &rq->fence);
	reservation_object_unlock(obj->resv);

	i915_request_add(rq);

	return 0;
+0 −4
Original line number Diff line number Diff line
@@ -178,10 +178,6 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
	i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
	i915_vma_unpin(vma);

	reservation_object_lock(obj->resv, NULL);
	reservation_object_add_excl_fence(obj->resv, &rq->fence);
	reservation_object_unlock(obj->resv);

	i915_request_add(rq);

	return 0;
+0 −4
Original line number Diff line number Diff line
@@ -466,10 +466,6 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)

	i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);

	reservation_object_lock(vma->resv, NULL);
	reservation_object_add_excl_fence(vma->resv, &rq->fence);
	reservation_object_unlock(vma->resv);

	i915_request_add(rq);

	i915_gem_object_set_active_reference(obj);
Loading