Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e84fe803 authored by Nick Hoath's avatar Nick Hoath Committed by Daniel Vetter
Browse files

drm/i915: Split alloc from init for lrc



Extend init/init_hw split to context init.
   - Move context initialisation in to i915_gem_init_hw
   - Move one off initialisation for render ring to
        i915_gem_validate_context
   - Move default context initialisation to logical_ring_init

Rename intel_lr_context_deferred_create to
intel_lr_context_deferred_alloc, to reflect reduced functionality &
alloc/init split.

This patch is intended to split out the allocation of resources &
initialisation to allow easier reuse of code for resume/gpu reset.

v2: Removed function ptr wrapping of do_switch_context (Daniel Vetter)
    Left ->init_context int intel_lr_context_deferred_alloc
    (Daniel Vetter)
    Remove unnecessary init flag & ring type test. (Daniel Vetter)
    Improve commit message (Daniel Vetter)
v3: On init/reinit, set the hw next sequence number to the sw next
    sequence number. This is set to 1 at driver load time. This prevents
    the seqno being reset on reinit (Chris Wilson)
v4: Set seqno back to ~0 - 0x1000 at start-of-day, and increment by 0x100
    on reset.
    This makes it obvious which bbs are which after a reset. (David Gordon
    & John Harrison)
    Rebase.
v5: Rebase. Fixed rebase breakage. Put context pinning in separate
    function. Removed code churn. (Thomas Daniel)
v6: Cleanup up issues introduced in v2 & v5 (Thomas Daniel)

Issue: VIZ-4798
Signed-off-by: default avatarNick Hoath <nicholas.hoath@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: John Harrison <john.c.harrison@intel.com>
Cc: David Gordon <david.s.gordon@intel.com>
Cc: Thomas Daniel <thomas.daniel@intel.com>
Reviewed-by: default avatarThomas Daniel <thomas.daniel@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 87bcdd2e
Loading
Loading
Loading
Loading
+0 −1
Original line number Original line Diff line number Diff line
@@ -890,7 +890,6 @@ struct intel_context {
	} legacy_hw_ctx;
	} legacy_hw_ctx;


	/* Execlists */
	/* Execlists */
	bool rcs_initialized;
	struct {
	struct {
		struct drm_i915_gem_object *state;
		struct drm_i915_gem_object *state;
		struct intel_ringbuffer *ringbuf;
		struct intel_ringbuffer *ringbuf;
+16 −6
Original line number Original line Diff line number Diff line
@@ -4609,14 +4609,8 @@ int i915_gem_init_rings(struct drm_device *dev)
			goto cleanup_vebox_ring;
			goto cleanup_vebox_ring;
	}
	}


	ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
	if (ret)
		goto cleanup_bsd2_ring;

	return 0;
	return 0;


cleanup_bsd2_ring:
	intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
cleanup_vebox_ring:
cleanup_vebox_ring:
	intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
	intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
cleanup_blt_ring:
cleanup_blt_ring:
@@ -4705,6 +4699,14 @@ i915_gem_init_hw(struct drm_device *dev)
		}
		}
	}
	}


	/*
	 * Increment the next seqno by 0x100 so we have a visible break
	 * on re-initialisation
	 */
	ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
	if (ret)
		goto out;

	/* Now it is safe to go back round and do everything else: */
	/* Now it is safe to go back round and do everything else: */
	for_each_ring(ring, dev_priv, i) {
	for_each_ring(ring, dev_priv, i) {
		struct drm_i915_gem_request *req;
		struct drm_i915_gem_request *req;
@@ -4906,6 +4908,14 @@ i915_gem_load(struct drm_device *dev)
		dev_priv->num_fence_regs =
		dev_priv->num_fence_regs =
				I915_READ(vgtif_reg(avail_rs.fence_num));
				I915_READ(vgtif_reg(avail_rs.fence_num));


	/*
	 * Set initial sequence number for requests.
	 * Using this number allows the wraparound to happen early,
	 * catching any obvious problems.
	 */
	dev_priv->next_seqno = ((u32)~0 - 0x1100);
	dev_priv->last_seqno = ((u32)~0 - 0x1101);

	/* Initialize fence registers to zero */
	/* Initialize fence registers to zero */
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
	i915_gem_restore_fences(dev);
	i915_gem_restore_fences(dev);
+1 −1
Original line number Original line Diff line number Diff line
@@ -1009,7 +1009,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
	}
	}


	if (i915.enable_execlists && !ctx->engine[ring->id].state) {
	if (i915.enable_execlists && !ctx->engine[ring->id].state) {
		int ret = intel_lr_context_deferred_create(ctx, ring);
		int ret = intel_lr_context_deferred_alloc(ctx, ring);
		if (ret) {
		if (ret) {
			DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
			DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
			return ERR_PTR(ret);
			return ERR_PTR(ret);
+80 −84
Original line number Original line Diff line number Diff line
@@ -221,6 +221,9 @@ enum {
#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT  0x17
#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT  0x17


static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
		struct drm_i915_gem_object *default_ctx_obj);



/**
/**
 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@@ -1020,20 +1023,19 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
	return 0;
	return 0;
}
}


static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
		struct drm_i915_gem_object *ctx_obj,
		struct intel_ringbuffer *ringbuf)
{
{
	struct drm_i915_private *dev_priv = rq->i915;
	struct drm_device *dev = ring->dev;
	struct intel_engine_cs *ring = rq->ring;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
	struct intel_ringbuffer *ringbuf = rq->ringbuf;
	int ret = 0;
	int ret = 0;


	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
	if (rq->ctx->engine[ring->id].pin_count++ == 0) {
	ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
	ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
			PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
			PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
	if (ret)
	if (ret)
			goto reset_pin_count;
		return ret;


	ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
	ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
	if (ret)
	if (ret)
@@ -1044,15 +1046,31 @@ static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
	/* Invalidate GuC TLB. */
	/* Invalidate GuC TLB. */
	if (i915.enable_guc_submission)
	if (i915.enable_guc_submission)
		I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
		I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
	}


	return ret;
	return ret;


unpin_ctx_obj:
unpin_ctx_obj:
	i915_gem_object_ggtt_unpin(ctx_obj);
	i915_gem_object_ggtt_unpin(ctx_obj);

	return ret;
}

static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
{
	int ret = 0;
	struct intel_engine_cs *ring = rq->ring;
	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
	struct intel_ringbuffer *ringbuf = rq->ringbuf;

	if (rq->ctx->engine[ring->id].pin_count++ == 0) {
		ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf);
		if (ret)
			goto reset_pin_count;
	}
	return ret;

reset_pin_count:
reset_pin_count:
	rq->ctx->engine[ring->id].pin_count = 0;
	rq->ctx->engine[ring->id].pin_count = 0;

	return ret;
	return ret;
}
}


@@ -1462,6 +1480,9 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
	struct drm_device *dev = ring->dev;
	struct drm_device *dev = ring->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_private *dev_priv = dev->dev_private;


	lrc_setup_hardware_status_page(ring,
				ring->default_context->engine[ring->id].state);

	I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
	I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
	I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
	I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);


@@ -1901,7 +1922,21 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
	if (ret)
	if (ret)
		return ret;
		return ret;


	ret = intel_lr_context_deferred_create(ring->default_context, ring);
	ret = intel_lr_context_deferred_alloc(ring->default_context, ring);
	if (ret)
		return ret;

	/* As this is the default context, always pin it */
	ret = intel_lr_context_do_pin(
			ring,
			ring->default_context->engine[ring->id].state,
			ring->default_context->engine[ring->id].ringbuf);
	if (ret) {
		DRM_ERROR(
			"Failed to pin and map ringbuffer %s: %d\n",
			ring->name, ret);
		return ret;
	}


	return ret;
	return ret;
}
}
@@ -2124,14 +2159,8 @@ int intel_logical_rings_init(struct drm_device *dev)
			goto cleanup_vebox_ring;
			goto cleanup_vebox_ring;
	}
	}


	ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
	if (ret)
		goto cleanup_bsd2_ring;

	return 0;
	return 0;


cleanup_bsd2_ring:
	intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
cleanup_vebox_ring:
cleanup_vebox_ring:
	intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
	intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
cleanup_blt_ring:
cleanup_blt_ring:
@@ -2401,7 +2430,7 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
}
}


/**
/**
 * intel_lr_context_deferred_create() - create the LRC specific bits of a context
 * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
 * @ctx: LR context to create.
 * @ctx: LR context to create.
 * @ring: engine to be used with the context.
 * @ring: engine to be used with the context.
 *
 *
@@ -2413,12 +2442,11 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
 *
 *
 * Return: non-zero on error.
 * Return: non-zero on error.
 */
 */
int intel_lr_context_deferred_create(struct intel_context *ctx,

int intel_lr_context_deferred_alloc(struct intel_context *ctx,
				     struct intel_engine_cs *ring)
				     struct intel_engine_cs *ring)
{
{
	const bool is_global_default_ctx = (ctx == ring->default_context);
	struct drm_device *dev = ring->dev;
	struct drm_device *dev = ring->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *ctx_obj;
	struct drm_i915_gem_object *ctx_obj;
	uint32_t context_size;
	uint32_t context_size;
	struct intel_ringbuffer *ringbuf;
	struct intel_ringbuffer *ringbuf;
@@ -2438,82 +2466,50 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
		return -ENOMEM;
		return -ENOMEM;
	}
	}


	if (is_global_default_ctx) {
		ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
				PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
		if (ret) {
			DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n",
					ret);
			drm_gem_object_unreference(&ctx_obj->base);
			return ret;
		}

		/* Invalidate GuC TLB. */
		if (i915.enable_guc_submission)
			I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
	}

	ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE);
	ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE);
	if (IS_ERR(ringbuf)) {
	if (IS_ERR(ringbuf)) {
		ret = PTR_ERR(ringbuf);
		ret = PTR_ERR(ringbuf);
		goto error_unpin_ctx;
		goto error_deref_obj;
	}

	if (is_global_default_ctx) {
		ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
		if (ret) {
			DRM_ERROR(
				  "Failed to pin and map ringbuffer %s: %d\n",
				  ring->name, ret);
			goto error_ringbuf;
		}
	}
	}


	ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
	ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
	if (ret) {
	if (ret) {
		DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
		DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
		goto error;
		goto error_ringbuf;
	}
	}


	ctx->engine[ring->id].ringbuf = ringbuf;
	ctx->engine[ring->id].ringbuf = ringbuf;
	ctx->engine[ring->id].state = ctx_obj;
	ctx->engine[ring->id].state = ctx_obj;


	if (ctx == ring->default_context)
	if (ctx != ring->default_context && ring->init_context) {
		lrc_setup_hardware_status_page(ring, ctx_obj);
	else if (ring->id == RCS && !ctx->rcs_initialized) {
		if (ring->init_context) {
		struct drm_i915_gem_request *req;
		struct drm_i915_gem_request *req;


			ret = i915_gem_request_alloc(ring, ctx, &req);
		ret = i915_gem_request_alloc(ring,
			if (ret)
			ctx, &req);
				return ret;
		if (ret) {
			DRM_ERROR("ring create req: %d\n",
				ret);
			i915_gem_request_cancel(req);
			goto error_ringbuf;
		}


		ret = ring->init_context(req);
		ret = ring->init_context(req);
		if (ret) {
		if (ret) {
				DRM_ERROR("ring init context: %d\n", ret);
			DRM_ERROR("ring init context: %d\n",
				ret);
			i915_gem_request_cancel(req);
			i915_gem_request_cancel(req);
				ctx->engine[ring->id].ringbuf = NULL;
			goto error_ringbuf;
				ctx->engine[ring->id].state = NULL;
				goto error;
		}
		}

		i915_add_request_no_flush(req);
		i915_add_request_no_flush(req);
	}
	}

		ctx->rcs_initialized = true;
	}

	return 0;
	return 0;


error:
	if (is_global_default_ctx)
		intel_unpin_ringbuffer_obj(ringbuf);
error_ringbuf:
error_ringbuf:
	intel_ringbuffer_free(ringbuf);
	intel_ringbuffer_free(ringbuf);
error_unpin_ctx:
error_deref_obj:
	if (is_global_default_ctx)
		i915_gem_object_ggtt_unpin(ctx_obj);
	drm_gem_object_unreference(&ctx_obj->base);
	drm_gem_object_unreference(&ctx_obj->base);
	ctx->engine[ring->id].ringbuf = NULL;
	ctx->engine[ring->id].state = NULL;
	return ret;
	return ret;
}
}


+2 −2
Original line number Original line Diff line number Diff line
@@ -75,7 +75,7 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
#define LRC_STATE_PN	(LRC_PPHWSP_PN + 1)
#define LRC_STATE_PN	(LRC_PPHWSP_PN + 1)


void intel_lr_context_free(struct intel_context *ctx);
void intel_lr_context_free(struct intel_context *ctx);
int intel_lr_context_deferred_create(struct intel_context *ctx,
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
				    struct intel_engine_cs *ring);
				    struct intel_engine_cs *ring);
void intel_lr_context_unpin(struct drm_i915_gem_request *req);
void intel_lr_context_unpin(struct drm_i915_gem_request *req);
void intel_lr_context_reset(struct drm_device *dev,
void intel_lr_context_reset(struct drm_device *dev,