Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4e0d64db authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Move request->ctx aside



In the next patch, we want to store the intel_context pointer inside
i915_request, as it is frequently access via a convoluted dance when
submitting the request to hw. Having two context pointers inside
i915_request leads to confusion so first rename the existing
i915_gem_context pointer to i915_request.gem_context.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180517212633.24934-1-chris@chris-wilson.co.uk
parent c8af5274
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -205,7 +205,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)

static inline bool is_gvt_request(struct i915_request *req)
{
	return i915_gem_context_force_single_submission(req->ctx);
	return i915_gem_context_force_single_submission(req->gem_context);
}

static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
@@ -305,7 +305,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
	struct i915_request *req = workload->req;

	if (IS_KABYLAKE(req->i915) &&
	    is_inhibit_context(req->ctx, req->engine->id))
	    is_inhibit_context(req->gem_context, req->engine->id))
		intel_vgpu_restore_inhibit_context(vgpu, req);

	/* allocate shadow ring buffer */
+2 −2
Original line number Diff line number Diff line
@@ -542,8 +542,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
						   struct i915_request,
						   client_link);
		rcu_read_lock();
		task = pid_task(request && request->ctx->pid ?
				request->ctx->pid : file->pid,
		task = pid_task(request && request->gem_context->pid ?
				request->gem_context->pid : file->pid,
				PIDTYPE_PID);
		print_file_stats(m, task ? task->comm : "<unknown>", stats);
		rcu_read_unlock();
+5 −5
Original line number Diff line number Diff line
@@ -3067,7 +3067,7 @@ static void skip_request(struct i915_request *request)
static void engine_skip_context(struct i915_request *request)
{
	struct intel_engine_cs *engine = request->engine;
	struct i915_gem_context *hung_ctx = request->ctx;
	struct i915_gem_context *hung_ctx = request->gem_context;
	struct i915_timeline *timeline = request->timeline;
	unsigned long flags;

@@ -3077,7 +3077,7 @@ static void engine_skip_context(struct i915_request *request)
	spin_lock_nested(&timeline->lock, SINGLE_DEPTH_NESTING);

	list_for_each_entry_continue(request, &engine->timeline.requests, link)
		if (request->ctx == hung_ctx)
		if (request->gem_context == hung_ctx)
			skip_request(request);

	list_for_each_entry(request, &timeline->requests, link)
@@ -3123,11 +3123,11 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
	}

	if (stalled) {
		i915_gem_context_mark_guilty(request->ctx);
		i915_gem_context_mark_guilty(request->gem_context);
		skip_request(request);

		/* If this context is now banned, skip all pending requests. */
		if (i915_gem_context_is_banned(request->ctx))
		if (i915_gem_context_is_banned(request->gem_context))
			engine_skip_context(request);
	} else {
		/*
@@ -3137,7 +3137,7 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
		 */
		request = i915_gem_find_active_request(engine);
		if (request) {
			i915_gem_context_mark_innocent(request->ctx);
			i915_gem_context_mark_innocent(request->gem_context);
			dma_fence_set_error(&request->fence, -EAGAIN);

			/* Rewind the engine to replay the incomplete rq */
+10 −8
Original line number Diff line number Diff line
@@ -1287,9 +1287,11 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
static void record_request(struct i915_request *request,
			   struct drm_i915_error_request *erq)
{
	erq->context = request->ctx->hw_id;
	struct i915_gem_context *ctx = request->gem_context;

	erq->context = ctx->hw_id;
	erq->sched_attr = request->sched.attr;
	erq->ban_score = atomic_read(&request->ctx->ban_score);
	erq->ban_score = atomic_read(&ctx->ban_score);
	erq->seqno = request->global_seqno;
	erq->jiffies = request->emitted_jiffies;
	erq->start = i915_ggtt_offset(request->ring->vma);
@@ -1297,7 +1299,7 @@ static void record_request(struct i915_request *request,
	erq->tail = request->tail;

	rcu_read_lock();
	erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
	erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0;
	rcu_read_unlock();
}

@@ -1461,12 +1463,12 @@ static void gem_record_rings(struct i915_gpu_state *error)

		request = i915_gem_find_active_request(engine);
		if (request) {
			struct i915_gem_context *ctx = request->gem_context;
			struct intel_ring *ring;

			ee->vm = request->ctx->ppgtt ?
				&request->ctx->ppgtt->base : &ggtt->base;
			ee->vm = ctx->ppgtt ? &ctx->ppgtt->base : &ggtt->base;

			record_context(&ee->context, request->ctx);
			record_context(&ee->context, ctx);

			/* We need to copy these to an anonymous buffer
			 * as the simplest method to avoid being overwritten
@@ -1483,11 +1485,11 @@ static void gem_record_rings(struct i915_gpu_state *error)

			ee->ctx =
				i915_error_object_create(i915,
							 to_intel_context(request->ctx,
							 to_intel_context(ctx,
									  engine)->state);

			error->simulated |=
				i915_gem_context_no_error_capture(request->ctx);
				i915_gem_context_no_error_capture(ctx);

			ee->rq_head = request->head;
			ee->rq_post = request->postfix;
+6 −6
Original line number Diff line number Diff line
@@ -384,7 +384,7 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
	 */
	if (engine->last_retired_context)
		intel_context_unpin(engine->last_retired_context, engine);
	engine->last_retired_context = rq->ctx;
	engine->last_retired_context = rq->gem_context;
}

static void __retire_engine_upto(struct intel_engine_cs *engine,
@@ -455,8 +455,8 @@ static void i915_request_retire(struct i915_request *request)
	i915_request_remove_from_client(request);

	/* Retirement decays the ban score as it is a sign of ctx progress */
	atomic_dec_if_positive(&request->ctx->ban_score);
	intel_context_unpin(request->ctx, request->engine);
	atomic_dec_if_positive(&request->gem_context->ban_score);
	intel_context_unpin(request->gem_context, request->engine);

	__retire_engine_upto(request->engine, request);

@@ -760,7 +760,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
	INIT_LIST_HEAD(&rq->active_list);
	rq->i915 = i915;
	rq->engine = engine;
	rq->ctx = ctx;
	rq->gem_context = ctx;
	rq->ring = ring;
	rq->timeline = ring->timeline;
	GEM_BUG_ON(rq->timeline == &engine->timeline);
@@ -814,7 +814,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
		goto err_unwind;

	/* Keep a second pin for the dual retirement along engine and ring */
	__intel_context_pin(rq->ctx, engine);
	__intel_context_pin(rq->gem_context, engine);

	/* Check that we didn't interrupt ourselves with a new request */
	GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
@@ -1113,7 +1113,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
	local_bh_disable();
	rcu_read_lock(); /* RCU serialisation for set-wedged protection */
	if (engine->schedule)
		engine->schedule(request, &request->ctx->sched);
		engine->schedule(request, &request->gem_context->sched);
	rcu_read_unlock();
	i915_sw_fence_commit(&request->submit);
	local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
Loading