Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ab82a063 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Wrap engine->context_pin() and engine->context_unpin()



Make life easier in upcoming patches by moving the context_pin and
context_unpin vfuncs into inline helpers.

v2: Fixup mock_engine to mark the context as pinned on use.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180430131503.5375-2-chris@chris-wilson.co.uk
parent 52d7f16e
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -448,7 +448,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,

bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
{
	u32 *reg_state = ctx->engine[ring_id].lrc_reg_state;
	u32 *reg_state = ctx->__engine[ring_id].lrc_reg_state;
	u32 inhibit_mask =
		_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);

+10 −10
Original line number Diff line number Diff line
@@ -58,7 +58,7 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload)
	int ring_id = workload->ring_id;
	struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
	struct drm_i915_gem_object *ctx_obj =
		shadow_ctx->engine[ring_id].state->obj;
		shadow_ctx->__engine[ring_id].state->obj;
	struct execlist_ring_context *shadow_ring_context;
	struct page *page;

@@ -130,7 +130,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
	int ring_id = workload->ring_id;
	struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
	struct drm_i915_gem_object *ctx_obj =
		shadow_ctx->engine[ring_id].state->obj;
		shadow_ctx->__engine[ring_id].state->obj;
	struct execlist_ring_context *shadow_ring_context;
	struct page *page;
	void *dst;
@@ -283,7 +283,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
		struct intel_engine_cs *engine)
{
	struct intel_context *ce = &ctx->engine[engine->id];
	struct intel_context *ce = to_intel_context(ctx, engine);
	u64 desc = 0;

	desc = ce->lrc_desc;
@@ -389,7 +389,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
	 * shadow_ctx pages invalid. So gvt need to pin itself. After update
	 * the guest context, gvt can unpin the shadow_ctx safely.
	 */
	ring = engine->context_pin(engine, shadow_ctx);
	ring = intel_context_pin(shadow_ctx, engine);
	if (IS_ERR(ring)) {
		ret = PTR_ERR(ring);
		gvt_vgpu_err("fail to pin shadow context\n");
@@ -403,7 +403,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
	return 0;

err_unpin:
	engine->context_unpin(engine, shadow_ctx);
	intel_context_unpin(shadow_ctx, engine);
err_shadow:
	release_shadow_wa_ctx(&workload->wa_ctx);
err_scan:
@@ -437,7 +437,7 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
	return 0;

err_unpin:
	engine->context_unpin(engine, shadow_ctx);
	intel_context_unpin(shadow_ctx, engine);
	release_shadow_wa_ctx(&workload->wa_ctx);
	return ret;
}
@@ -526,7 +526,7 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
	struct intel_vgpu_submission *s = &workload->vgpu->submission;
	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
	struct drm_i915_gem_object *ctx_obj =
		shadow_ctx->engine[ring_id].state->obj;
		shadow_ctx->__engine[ring_id].state->obj;
	struct execlist_ring_context *shadow_ring_context;
	struct page *page;

@@ -688,7 +688,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)

	ret = prepare_workload(workload);
	if (ret) {
		engine->context_unpin(engine, shadow_ctx);
		intel_context_unpin(shadow_ctx, engine);
		goto out;
	}

@@ -771,7 +771,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
	int ring_id = workload->ring_id;
	struct drm_i915_gem_object *ctx_obj =
		shadow_ctx->engine[ring_id].state->obj;
		shadow_ctx->__engine[ring_id].state->obj;
	struct execlist_ring_context *shadow_ring_context;
	struct page *page;
	void *src;
@@ -898,7 +898,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
		}
		mutex_lock(&dev_priv->drm.struct_mutex);
		/* unpin shadow ctx as the shadow_ctx update is done */
		engine->context_unpin(engine, s->shadow_ctx);
		intel_context_unpin(s->shadow_ctx, engine);
		mutex_unlock(&dev_priv->drm.struct_mutex);
	}

+12 −8
Original line number Diff line number Diff line
@@ -377,16 +377,19 @@ static void print_batch_pool_stats(struct seq_file *m,
	print_file_stats(m, "[k]batch pool", stats);
}

static int per_file_ctx_stats(int id, void *ptr, void *data)
static int per_file_ctx_stats(int idx, void *ptr, void *data)
{
	struct i915_gem_context *ctx = ptr;
	int n;
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	for_each_engine(engine, ctx->i915, id) {
		struct intel_context *ce = to_intel_context(ctx, engine);

	for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
		if (ctx->engine[n].state)
			per_file_stats(0, ctx->engine[n].state->obj, data);
		if (ctx->engine[n].ring)
			per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
		if (ce->state)
			per_file_stats(0, ce->state->obj, data);
		if (ce->ring)
			per_file_stats(0, ce->ring->vma->obj, data);
	}

	return 0;
@@ -1959,7 +1962,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
		seq_putc(m, '\n');

		for_each_engine(engine, dev_priv, id) {
			struct intel_context *ce = &ctx->engine[engine->id];
			struct intel_context *ce =
				to_intel_context(ctx, engine);

			seq_printf(m, "%s: ", engine->name);
			if (ce->state)
+2 −2
Original line number Diff line number Diff line
@@ -3234,7 +3234,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
				      stalled_mask & ENGINE_MASK(id));
		ctx = fetch_and_zero(&engine->last_retired_context);
		if (ctx)
			engine->context_unpin(engine, ctx);
			intel_context_unpin(ctx, engine);

		/*
		 * Ostensibily, we always want a context loaded for powersaving,
@@ -5291,7 +5291,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
	for_each_engine(engine, i915, id) {
		struct i915_vma *state;

		state = ctx->engine[id].state;
		state = to_intel_context(ctx, engine)->state;
		if (!state)
			continue;

+4 −4
Original line number Diff line number Diff line
@@ -117,15 +117,15 @@ static void lut_close(struct i915_gem_context *ctx)

static void i915_gem_context_free(struct i915_gem_context *ctx)
{
	int i;
	unsigned int n;

	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));

	i915_ppgtt_put(ctx->ppgtt);

	for (i = 0; i < I915_NUM_ENGINES; i++) {
		struct intel_context *ce = &ctx->engine[i];
	for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
		struct intel_context *ce = &ctx->__engine[n];

		if (!ce->state)
			continue;
@@ -521,7 +521,7 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
		if (!engine->last_retired_context)
			continue;

		engine->context_unpin(engine, engine->last_retired_context);
		intel_context_unpin(engine->last_retired_context, engine);
		engine->last_retired_context = NULL;
	}
}
Loading