Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1fc44d9b authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Store a pointer to intel_context in i915_request



To ease the frequent and ugly pointer dance of
&request->gem_context->engine[request->engine->id] during request
submission, store that pointer as request->hw_context. One major
advantage that we will exploit later is that this decouples the logical
context state from the engine itself.

v2: Set mock_context->ops so we don't crash and burn in selftests.
    Cleanups from Tvrtko.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Acked-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180517212633.24934-3-chris@chris-wilson.co.uk
parent 01278cb1
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -446,9 +446,9 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,

#define CTX_CONTEXT_CONTROL_VAL	0x03

bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
bool is_inhibit_context(struct intel_context *ce)
{
	u32 *reg_state = ctx->__engine[ring_id].lrc_reg_state;
	const u32 *reg_state = ce->lrc_reg_state;
	u32 inhibit_mask =
		_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);

@@ -501,7 +501,7 @@ static void switch_mmio(struct intel_vgpu *pre,
			 * itself.
			 */
			if (mmio->in_context &&
			    !is_inhibit_context(s->shadow_ctx, ring_id))
			    !is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
				continue;

			if (mmio->mask)
+1 −1
Original line number Diff line number Diff line
@@ -49,7 +49,7 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,

void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);

bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id);
bool is_inhibit_context(struct intel_context *ce);

int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
				       struct i915_request *req);
+52 −89
Original line number Diff line number Diff line
@@ -54,11 +54,8 @@ static void set_context_pdp_root_pointer(

static void update_shadow_pdps(struct intel_vgpu_workload *workload)
{
	struct intel_vgpu *vgpu = workload->vgpu;
	int ring_id = workload->ring_id;
	struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
	struct drm_i915_gem_object *ctx_obj =
		shadow_ctx->__engine[ring_id].state->obj;
		workload->req->hw_context->state->obj;
	struct execlist_ring_context *shadow_ring_context;
	struct page *page;

@@ -128,9 +125,8 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
	struct intel_vgpu *vgpu = workload->vgpu;
	struct intel_gvt *gvt = vgpu->gvt;
	int ring_id = workload->ring_id;
	struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
	struct drm_i915_gem_object *ctx_obj =
		shadow_ctx->__engine[ring_id].state->obj;
		workload->req->hw_context->state->obj;
	struct execlist_ring_context *shadow_ring_context;
	struct page *page;
	void *dst;
@@ -280,10 +276,8 @@ static int shadow_context_status_change(struct notifier_block *nb,
	return NOTIFY_OK;
}

static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
		struct intel_engine_cs *engine)
static void shadow_context_descriptor_update(struct intel_context *ce)
{
	struct intel_context *ce = to_intel_context(ctx, engine);
	u64 desc = 0;

	desc = ce->lrc_desc;
@@ -292,7 +286,7 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
	 * like GEN8_CTX_* cached in desc_template
	 */
	desc &= U64_MAX << 12;
	desc |= ctx->desc_template & ((1ULL << 12) - 1);
	desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);

	ce->lrc_desc = desc;
}
@@ -300,12 +294,11 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
{
	struct intel_vgpu *vgpu = workload->vgpu;
	struct i915_request *req = workload->req;
	void *shadow_ring_buffer_va;
	u32 *cs;
	struct i915_request *req = workload->req;

	if (IS_KABYLAKE(req->i915) &&
	    is_inhibit_context(req->gem_context, req->engine->id))
	if (IS_KABYLAKE(req->i915) && is_inhibit_context(req->hw_context))
		intel_vgpu_restore_inhibit_context(vgpu, req);

	/* allocate shadow ring buffer */
@@ -353,60 +346,56 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
	struct intel_vgpu_submission *s = &vgpu->submission;
	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	int ring_id = workload->ring_id;
	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
	struct intel_ring *ring;
	struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
	struct intel_context *ce;
	int ret;

	lockdep_assert_held(&dev_priv->drm.struct_mutex);

	if (workload->shadowed)
	if (workload->req)
		return 0;

	/* pin shadow context by gvt even the shadow context will be pinned
	 * when i915 alloc request. That is because gvt will update the guest
	 * context from shadow context when workload is completed, and at that
	 * moment, i915 may already unpined the shadow context to make the
	 * shadow_ctx pages invalid. So gvt need to pin itself. After update
	 * the guest context, gvt can unpin the shadow_ctx safely.
	 */
	ce = intel_context_pin(shadow_ctx, engine);
	if (IS_ERR(ce)) {
		gvt_vgpu_err("fail to pin shadow context\n");
		return PTR_ERR(ce);
	}

	shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
	shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
				    GEN8_CTX_ADDRESSING_MODE_SHIFT;

	if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
		shadow_context_descriptor_update(shadow_ctx,
					dev_priv->engine[ring_id]);
	if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
		shadow_context_descriptor_update(ce);

	ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
	if (ret)
		goto err_scan;
		goto err_unpin;

	if ((workload->ring_id == RCS) &&
	    (workload->wa_ctx.indirect_ctx.size != 0)) {
		ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
		if (ret)
			goto err_scan;
	}

	/* pin shadow context by gvt even the shadow context will be pinned
	 * when i915 alloc request. That is because gvt will update the guest
	 * context from shadow context when workload is completed, and at that
	 * moment, i915 may already unpined the shadow context to make the
	 * shadow_ctx pages invalid. So gvt need to pin itself. After update
	 * the guest context, gvt can unpin the shadow_ctx safely.
	 */
	ring = intel_context_pin(shadow_ctx, engine);
	if (IS_ERR(ring)) {
		ret = PTR_ERR(ring);
		gvt_vgpu_err("fail to pin shadow context\n");
			goto err_shadow;
	}

	ret = populate_shadow_context(workload);
	if (ret)
		goto err_unpin;
	workload->shadowed = true;
		goto err_shadow;

	return 0;

err_unpin:
	intel_context_unpin(shadow_ctx, engine);
err_shadow:
	release_shadow_wa_ctx(&workload->wa_ctx);
err_scan:
err_unpin:
	intel_context_unpin(ce);
	return ret;
}

@@ -414,7 +403,6 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
{
	int ring_id = workload->ring_id;
	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
	struct i915_request *rq;
	struct intel_vgpu *vgpu = workload->vgpu;
	struct intel_vgpu_submission *s = &vgpu->submission;
@@ -437,7 +425,6 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
	return 0;

err_unpin:
	intel_context_unpin(shadow_ctx, engine);
	release_shadow_wa_ctx(&workload->wa_ctx);
	return ret;
}
@@ -517,21 +504,13 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
	return ret;
}

static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
	struct intel_vgpu_workload *workload = container_of(wa_ctx,
					struct intel_vgpu_workload,
					wa_ctx);
	int ring_id = workload->ring_id;
	struct intel_vgpu_submission *s = &workload->vgpu->submission;
	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
	struct drm_i915_gem_object *ctx_obj =
		shadow_ctx->__engine[ring_id].state->obj;
	struct execlist_ring_context *shadow_ring_context;
	struct page *page;

	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
	shadow_ring_context = kmap_atomic(page);
	struct intel_vgpu_workload *workload =
		container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
	struct i915_request *rq = workload->req;
	struct execlist_ring_context *shadow_ring_context =
		(struct execlist_ring_context *)rq->hw_context->lrc_reg_state;

	shadow_ring_context->bb_per_ctx_ptr.val =
		(shadow_ring_context->bb_per_ctx_ptr.val &
@@ -539,9 +518,6 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
	shadow_ring_context->rcs_indirect_ctx.val =
		(shadow_ring_context->rcs_indirect_ctx.val &
		(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;

	kunmap_atomic(shadow_ring_context);
	return 0;
}

static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
@@ -670,12 +646,9 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
	struct intel_vgpu *vgpu = workload->vgpu;
	struct intel_vgpu_submission *s = &vgpu->submission;
	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	int ring_id = workload->ring_id;
	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
	int ret = 0;
	int ret;

	gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
		ring_id, workload);
@@ -687,10 +660,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
		goto out;

	ret = prepare_workload(workload);
	if (ret) {
		intel_context_unpin(shadow_ctx, engine);
		goto out;
	}

out:
	if (ret)
@@ -765,27 +734,23 @@ static struct intel_vgpu_workload *pick_next_workload(

static void update_guest_context(struct intel_vgpu_workload *workload)
{
	struct i915_request *rq = workload->req;
	struct intel_vgpu *vgpu = workload->vgpu;
	struct intel_gvt *gvt = vgpu->gvt;
	struct intel_vgpu_submission *s = &vgpu->submission;
	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
	int ring_id = workload->ring_id;
	struct drm_i915_gem_object *ctx_obj =
		shadow_ctx->__engine[ring_id].state->obj;
	struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
	struct execlist_ring_context *shadow_ring_context;
	struct page *page;
	void *src;
	unsigned long context_gpa, context_page_num;
	int i;

	gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
	gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
		      workload->ctx_desc.lrca);

	context_page_num = gvt->dev_priv->engine[ring_id]->context_size;

	context_page_num = rq->engine->context_size;
	context_page_num = context_page_num >> PAGE_SHIFT;

	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
	if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
		context_page_num = 19;

	i = 2;
@@ -858,6 +823,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
		scheduler->current_workload[ring_id];
	struct intel_vgpu *vgpu = workload->vgpu;
	struct intel_vgpu_submission *s = &vgpu->submission;
	struct i915_request *rq;
	int event;

	mutex_lock(&gvt->lock);
@@ -866,11 +832,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
	 * switch to make sure request is completed.
	 * For the workload w/o request, directly complete the workload.
	 */
	if (workload->req) {
		struct drm_i915_private *dev_priv =
			workload->vgpu->gvt->dev_priv;
		struct intel_engine_cs *engine =
			dev_priv->engine[workload->ring_id];
	rq = fetch_and_zero(&workload->req);
	if (rq) {
		wait_event(workload->shadow_ctx_status_wq,
			   !atomic_read(&workload->shadow_ctx_active));

@@ -886,8 +849,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
				workload->status = 0;
		}

		i915_request_put(fetch_and_zero(&workload->req));

		if (!workload->status && !(vgpu->resetting_eng &
					   ENGINE_MASK(ring_id))) {
			update_guest_context(workload);
@@ -896,10 +857,13 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
					 INTEL_GVT_EVENT_MAX)
				intel_vgpu_trigger_virtual_event(vgpu, event);
		}
		mutex_lock(&dev_priv->drm.struct_mutex);

		/* unpin shadow ctx as the shadow_ctx update is done */
		intel_context_unpin(s->shadow_ctx, engine);
		mutex_unlock(&dev_priv->drm.struct_mutex);
		mutex_lock(&rq->i915->drm.struct_mutex);
		intel_context_unpin(rq->hw_context);
		mutex_unlock(&rq->i915->drm.struct_mutex);

		i915_request_put(rq);
	}

	gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -1270,7 +1234,6 @@ alloc_workload(struct intel_vgpu *vgpu)
	atomic_set(&workload->shadow_ctx_active, 0);

	workload->status = -EINPROGRESS;
	workload->shadowed = false;
	workload->vgpu = vgpu;

	return workload;
+0 −1
Original line number Diff line number Diff line
@@ -83,7 +83,6 @@ struct intel_vgpu_workload {
	struct i915_request *req;
	/* if this workload has been dispatched to i915? */
	bool dispatched;
	bool shadowed;
	int status;

	struct intel_vgpu_mm *shadow_mm;
+1 −0
Original line number Diff line number Diff line
@@ -1950,6 +1950,7 @@ struct drm_i915_private {
			 */
			struct i915_perf_stream *exclusive_stream;

			struct intel_context *pinned_ctx;
			u32 specific_ctx_id;

			struct hrtimer poll_check_timer;
Loading