Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0bc40be8 authored by Tvrtko Ursulin's avatar Tvrtko Ursulin
Browse files

drm/i915: Rename intel_engine_cs function parameters



@@
identifier func;
@@
func(..., struct intel_engine_cs *
- ring
+ engine
, ...)
{
<...
- ring
+ engine
...>
}
@@
identifier func;
type T;
@@
T func(..., struct intel_engine_cs *
- ring
+ engine
, ...);

Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent e2f80391
Loading
Loading
Loading
Loading
+61 −61
Original line number Diff line number Diff line
@@ -555,7 +555,7 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
	return 0;
}

static bool validate_cmds_sorted(struct intel_engine_cs *ring,
static bool validate_cmds_sorted(struct intel_engine_cs *engine,
				 const struct drm_i915_cmd_table *cmd_tables,
				 int cmd_table_count)
{
@@ -577,7 +577,7 @@ static bool validate_cmds_sorted(struct intel_engine_cs *ring,

			if (curr < previous) {
				DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
					  ring->id, i, j, curr, previous);
					  engine->id, i, j, curr, previous);
				ret = false;
			}

@@ -611,11 +611,11 @@ static bool check_sorted(int ring_id,
	return ret;
}

static bool validate_regs_sorted(struct intel_engine_cs *ring)
static bool validate_regs_sorted(struct intel_engine_cs *engine)
{
	return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
		check_sorted(ring->id, ring->master_reg_table,
			     ring->master_reg_count);
	return check_sorted(engine->id, engine->reg_table, engine->reg_count) &&
		check_sorted(engine->id, engine->master_reg_table,
			     engine->master_reg_count);
}

struct cmd_node {
@@ -639,13 +639,13 @@ struct cmd_node {
 */
#define CMD_HASH_MASK STD_MI_OPCODE_MASK

static int init_hash_table(struct intel_engine_cs *ring,
static int init_hash_table(struct intel_engine_cs *engine,
			   const struct drm_i915_cmd_table *cmd_tables,
			   int cmd_table_count)
{
	int i, j;

	hash_init(ring->cmd_hash);
	hash_init(engine->cmd_hash);

	for (i = 0; i < cmd_table_count; i++) {
		const struct drm_i915_cmd_table *table = &cmd_tables[i];
@@ -660,7 +660,7 @@ static int init_hash_table(struct intel_engine_cs *ring,
				return -ENOMEM;

			desc_node->desc = desc;
			hash_add(ring->cmd_hash, &desc_node->node,
			hash_add(engine->cmd_hash, &desc_node->node,
				 desc->cmd.value & CMD_HASH_MASK);
		}
	}
@@ -668,13 +668,13 @@ static int init_hash_table(struct intel_engine_cs *ring,
	return 0;
}

static void fini_hash_table(struct intel_engine_cs *ring)
static void fini_hash_table(struct intel_engine_cs *engine)
{
	struct hlist_node *tmp;
	struct cmd_node *desc_node;
	int i;

	hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) {
	hash_for_each_safe(engine->cmd_hash, i, tmp, desc_node, node) {
		hash_del(&desc_node->node);
		kfree(desc_node);
	}
@@ -690,18 +690,18 @@ static void fini_hash_table(struct intel_engine_cs *ring)
 *
 * Return: non-zero if initialization fails
 */
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
{
	const struct drm_i915_cmd_table *cmd_tables;
	int cmd_table_count;
	int ret;

	if (!IS_GEN7(ring->dev))
	if (!IS_GEN7(engine->dev))
		return 0;

	switch (ring->id) {
	switch (engine->id) {
	case RCS:
		if (IS_HASWELL(ring->dev)) {
		if (IS_HASWELL(engine->dev)) {
			cmd_tables = hsw_render_ring_cmds;
			cmd_table_count =
				ARRAY_SIZE(hsw_render_ring_cmds);
@@ -710,26 +710,26 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
			cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
		}

		ring->reg_table = gen7_render_regs;
		ring->reg_count = ARRAY_SIZE(gen7_render_regs);
		engine->reg_table = gen7_render_regs;
		engine->reg_count = ARRAY_SIZE(gen7_render_regs);

		if (IS_HASWELL(ring->dev)) {
			ring->master_reg_table = hsw_master_regs;
			ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
		if (IS_HASWELL(engine->dev)) {
			engine->master_reg_table = hsw_master_regs;
			engine->master_reg_count = ARRAY_SIZE(hsw_master_regs);
		} else {
			ring->master_reg_table = ivb_master_regs;
			ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
			engine->master_reg_table = ivb_master_regs;
			engine->master_reg_count = ARRAY_SIZE(ivb_master_regs);
		}

		ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
		engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
		break;
	case VCS:
		cmd_tables = gen7_video_cmds;
		cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
		ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
		engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
		break;
	case BCS:
		if (IS_HASWELL(ring->dev)) {
		if (IS_HASWELL(engine->dev)) {
			cmd_tables = hsw_blt_ring_cmds;
			cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
		} else {
@@ -737,44 +737,44 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
			cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
		}

		ring->reg_table = gen7_blt_regs;
		ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
		engine->reg_table = gen7_blt_regs;
		engine->reg_count = ARRAY_SIZE(gen7_blt_regs);

		if (IS_HASWELL(ring->dev)) {
			ring->master_reg_table = hsw_master_regs;
			ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
		if (IS_HASWELL(engine->dev)) {
			engine->master_reg_table = hsw_master_regs;
			engine->master_reg_count = ARRAY_SIZE(hsw_master_regs);
		} else {
			ring->master_reg_table = ivb_master_regs;
			ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
			engine->master_reg_table = ivb_master_regs;
			engine->master_reg_count = ARRAY_SIZE(ivb_master_regs);
		}

		ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
		engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
		break;
	case VECS:
		cmd_tables = hsw_vebox_cmds;
		cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
		/* VECS can use the same length_mask function as VCS */
		ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
		engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
		break;
	default:
		DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
			  ring->id);
			  engine->id);
		BUG();
	}

	BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
	BUG_ON(!validate_regs_sorted(ring));
	BUG_ON(!validate_cmds_sorted(engine, cmd_tables, cmd_table_count));
	BUG_ON(!validate_regs_sorted(engine));

	WARN_ON(!hash_empty(ring->cmd_hash));
	WARN_ON(!hash_empty(engine->cmd_hash));

	ret = init_hash_table(ring, cmd_tables, cmd_table_count);
	ret = init_hash_table(engine, cmd_tables, cmd_table_count);
	if (ret) {
		DRM_ERROR("CMD: cmd_parser_init failed!\n");
		fini_hash_table(ring);
		fini_hash_table(engine);
		return ret;
	}

	ring->needs_cmd_parser = true;
	engine->needs_cmd_parser = true;

	return 0;
}
@@ -786,21 +786,21 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
 * Releases any resources related to command parsing that may have been
 * initialized for the specified ring.
 */
void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine)
{
	if (!ring->needs_cmd_parser)
	if (!engine->needs_cmd_parser)
		return;

	fini_hash_table(ring);
	fini_hash_table(engine);
}

static const struct drm_i915_cmd_descriptor*
find_cmd_in_table(struct intel_engine_cs *ring,
find_cmd_in_table(struct intel_engine_cs *engine,
		  u32 cmd_header)
{
	struct cmd_node *desc_node;

	hash_for_each_possible(ring->cmd_hash, desc_node, node,
	hash_for_each_possible(engine->cmd_hash, desc_node, node,
			       cmd_header & CMD_HASH_MASK) {
		const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
		u32 masked_cmd = desc->cmd.mask & cmd_header;
@@ -822,18 +822,18 @@ find_cmd_in_table(struct intel_engine_cs *ring,
 * ring's default length encoding and returns default_desc.
 */
static const struct drm_i915_cmd_descriptor*
find_cmd(struct intel_engine_cs *ring,
find_cmd(struct intel_engine_cs *engine,
	 u32 cmd_header,
	 struct drm_i915_cmd_descriptor *default_desc)
{
	const struct drm_i915_cmd_descriptor *desc;
	u32 mask;

	desc = find_cmd_in_table(ring, cmd_header);
	desc = find_cmd_in_table(engine, cmd_header);
	if (desc)
		return desc;

	mask = ring->get_cmd_length_mask(cmd_header);
	mask = engine->get_cmd_length_mask(cmd_header);
	if (!mask)
		return NULL;

@@ -963,18 +963,18 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
 *
 * Return: true if the ring requires software command parsing
 */
bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
{
	if (!ring->needs_cmd_parser)
	if (!engine->needs_cmd_parser)
		return false;

	if (!USES_PPGTT(ring->dev))
	if (!USES_PPGTT(engine->dev))
		return false;

	return (i915.enable_cmd_parser == 1);
}

static bool check_cmd(const struct intel_engine_cs *ring,
static bool check_cmd(const struct intel_engine_cs *engine,
		      const struct drm_i915_cmd_descriptor *desc,
		      const u32 *cmd, u32 length,
		      const bool is_master,
@@ -1004,17 +1004,17 @@ static bool check_cmd(const struct intel_engine_cs *ring,
		     offset += step) {
			const u32 reg_addr = cmd[offset] & desc->reg.mask;
			const struct drm_i915_reg_descriptor *reg =
				find_reg(ring->reg_table, ring->reg_count,
				find_reg(engine->reg_table, engine->reg_count,
					 reg_addr);

			if (!reg && is_master)
				reg = find_reg(ring->master_reg_table,
					       ring->master_reg_count,
				reg = find_reg(engine->master_reg_table,
					       engine->master_reg_count,
					       reg_addr);

			if (!reg) {
				DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
						 reg_addr, *cmd, ring->id);
						 reg_addr, *cmd, engine->id);
				return false;
			}

@@ -1087,7 +1087,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
						 *cmd,
						 desc->bits[i].mask,
						 desc->bits[i].expected,
						 dword, ring->id);
						 dword, engine->id);
				return false;
			}
		}
@@ -1113,7 +1113,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
 * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
 * if the batch appears legal but should use hardware parsing
 */
int i915_parse_cmds(struct intel_engine_cs *ring,
int i915_parse_cmds(struct intel_engine_cs *engine,
		    struct drm_i915_gem_object *batch_obj,
		    struct drm_i915_gem_object *shadow_batch_obj,
		    u32 batch_start_offset,
@@ -1147,7 +1147,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
		if (*cmd == MI_BATCH_BUFFER_END)
			break;

		desc = find_cmd(ring, *cmd, &default_desc);
		desc = find_cmd(engine, *cmd, &default_desc);
		if (!desc) {
			DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
					 *cmd);
@@ -1179,7 +1179,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
			break;
		}

		if (!check_cmd(ring, desc, cmd, length, is_master,
		if (!check_cmd(engine, desc, cmd, length, is_master,
			       &oacontrol_set)) {
			ret = -EINVAL;
			break;
+8 −8
Original line number Diff line number Diff line
@@ -725,11 +725,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
}

static void i915_ring_seqno_info(struct seq_file *m,
				 struct intel_engine_cs *ring)
				 struct intel_engine_cs *engine)
{
	if (ring->get_seqno) {
	if (engine->get_seqno) {
		seq_printf(m, "Current sequence (%s): %x\n",
			   ring->name, ring->get_seqno(ring, false));
			   engine->name, engine->get_seqno(engine, false));
	}
}

@@ -1992,22 +1992,22 @@ static int i915_context_status(struct seq_file *m, void *unused)

static void i915_dump_lrc_obj(struct seq_file *m,
			      struct intel_context *ctx,
			      struct intel_engine_cs *ring)
			      struct intel_engine_cs *engine)
{
	struct page *page;
	uint32_t *reg_state;
	int j;
	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
	struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
	unsigned long ggtt_offset = 0;

	if (ctx_obj == NULL) {
		seq_printf(m, "Context on %s with no gem object\n",
			   ring->name);
			   engine->name);
		return;
	}

	seq_printf(m, "CONTEXT: %s %u\n", ring->name,
		   intel_execlists_ctx_id(ctx, ring));
	seq_printf(m, "CONTEXT: %s %u\n", engine->name,
		   intel_execlists_ctx_id(ctx, engine));

	if (!i915_gem_obj_ggtt_bound(ctx_obj))
		seq_puts(m, "\tNot bound in GGTT\n");
+9 −9
Original line number Diff line number Diff line
@@ -2964,10 +2964,10 @@ int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);

struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *ring);
i915_gem_find_active_request(struct intel_engine_cs *engine);

bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
				      bool interruptible);

@@ -3297,10 +3297,10 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);

/* i915_cmd_parser.c */
int i915_cmd_parser_get_version(void);
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
int i915_parse_cmds(struct intel_engine_cs *ring,
int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
int i915_parse_cmds(struct intel_engine_cs *engine,
		    struct drm_i915_gem_object *batch_obj,
		    struct drm_i915_gem_object *shadow_batch_obj,
		    u32 batch_start_offset,
@@ -3571,11 +3571,11 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
	}
}

static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
static inline void i915_trace_irq_get(struct intel_engine_cs *engine,
				      struct drm_i915_gem_request *req)
{
	if (ring->trace_irq_req == NULL && ring->irq_get(ring))
		i915_gem_request_assign(&ring->trace_irq_req, req);
	if (engine->trace_irq_req == NULL && engine->irq_get(engine))
		i915_gem_request_assign(&engine->trace_irq_req, req);
}

#endif
+43 −43
Original line number Diff line number Diff line
@@ -1141,9 +1141,9 @@ static void fake_irq(unsigned long data)
}

static bool missed_irq(struct drm_i915_private *dev_priv,
		       struct intel_engine_cs *ring)
		       struct intel_engine_cs *engine)
{
	return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
	return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
}

static unsigned long local_clock_us(unsigned *cpu)
@@ -2689,11 +2689,11 @@ void i915_gem_request_free(struct kref *req_ref)
}

static inline int
__i915_gem_request_alloc(struct intel_engine_cs *ring,
__i915_gem_request_alloc(struct intel_engine_cs *engine,
			 struct intel_context *ctx,
			 struct drm_i915_gem_request **req_out)
{
	struct drm_i915_private *dev_priv = to_i915(ring->dev);
	struct drm_i915_private *dev_priv = to_i915(engine->dev);
	struct drm_i915_gem_request *req;
	int ret;

@@ -2706,13 +2706,13 @@ __i915_gem_request_alloc(struct intel_engine_cs *ring,
	if (req == NULL)
		return -ENOMEM;

	ret = i915_gem_get_seqno(ring->dev, &req->seqno);
	ret = i915_gem_get_seqno(engine->dev, &req->seqno);
	if (ret)
		goto err;

	kref_init(&req->ref);
	req->i915 = dev_priv;
	req->ring = ring;
	req->ring = engine;
	req->ctx  = ctx;
	i915_gem_context_reference(req->ctx);

@@ -2787,11 +2787,11 @@ void i915_gem_request_cancel(struct drm_i915_gem_request *req)
}

struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *ring)
i915_gem_find_active_request(struct intel_engine_cs *engine)
{
	struct drm_i915_gem_request *request;

	list_for_each_entry(request, &ring->request_list, list) {
	list_for_each_entry(request, &engine->request_list, list) {
		if (i915_gem_request_completed(request, false))
			continue;

@@ -2802,37 +2802,37 @@ i915_gem_find_active_request(struct intel_engine_cs *ring)
}

static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
				       struct intel_engine_cs *ring)
				       struct intel_engine_cs *engine)
{
	struct drm_i915_gem_request *request;
	bool ring_hung;

	request = i915_gem_find_active_request(ring);
	request = i915_gem_find_active_request(engine);

	if (request == NULL)
		return;

	ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
	ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;

	i915_set_reset_status(dev_priv, request->ctx, ring_hung);

	list_for_each_entry_continue(request, &ring->request_list, list)
	list_for_each_entry_continue(request, &engine->request_list, list)
		i915_set_reset_status(dev_priv, request->ctx, false);
}

static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
					struct intel_engine_cs *ring)
					struct intel_engine_cs *engine)
{
	struct intel_ringbuffer *buffer;

	while (!list_empty(&ring->active_list)) {
	while (!list_empty(&engine->active_list)) {
		struct drm_i915_gem_object *obj;

		obj = list_first_entry(&ring->active_list,
		obj = list_first_entry(&engine->active_list,
				       struct drm_i915_gem_object,
				       ring_list[ring->id]);
				       ring_list[engine->id]);

		i915_gem_object_retire__read(obj, ring->id);
		i915_gem_object_retire__read(obj, engine->id);
	}

	/*
@@ -2842,14 +2842,14 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
	 */

	if (i915.enable_execlists) {
		spin_lock_irq(&ring->execlist_lock);
		spin_lock_irq(&engine->execlist_lock);

		/* list_splice_tail_init checks for empty lists */
		list_splice_tail_init(&ring->execlist_queue,
				      &ring->execlist_retired_req_list);
		list_splice_tail_init(&engine->execlist_queue,
				      &engine->execlist_retired_req_list);

		spin_unlock_irq(&ring->execlist_lock);
		intel_execlists_retire_requests(ring);
		spin_unlock_irq(&engine->execlist_lock);
		intel_execlists_retire_requests(engine);
	}

	/*
@@ -2859,10 +2859,10 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
	 * implicit references on things like e.g. ppgtt address spaces through
	 * the request.
	 */
	while (!list_empty(&ring->request_list)) {
	while (!list_empty(&engine->request_list)) {
		struct drm_i915_gem_request *request;

		request = list_first_entry(&ring->request_list,
		request = list_first_entry(&engine->request_list,
					   struct drm_i915_gem_request,
					   list);

@@ -2876,7 +2876,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
	 * upon reset is less than when we start. Do one more pass over
	 * all the ringbuffers to reset last_retired_head.
	 */
	list_for_each_entry(buffer, &ring->buffers, link) {
	list_for_each_entry(buffer, &engine->buffers, link) {
		buffer->last_retired_head = buffer->tail;
		intel_ring_update_space(buffer);
	}
@@ -2910,19 +2910,19 @@ void i915_gem_reset(struct drm_device *dev)
 * This function clears the request list as sequence numbers are passed.
 */
void
i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
{
	WARN_ON(i915_verify_lists(ring->dev));
	WARN_ON(i915_verify_lists(engine->dev));

	/* Retire requests first as we use it above for the early return.
	 * If we retire requests last, we may use a later seqno and so clear
	 * the requests lists without clearing the active list, leading to
	 * confusion.
	 */
	while (!list_empty(&ring->request_list)) {
	while (!list_empty(&engine->request_list)) {
		struct drm_i915_gem_request *request;

		request = list_first_entry(&ring->request_list,
		request = list_first_entry(&engine->request_list,
					   struct drm_i915_gem_request,
					   list);

@@ -2936,26 +2936,26 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
	 * by the ringbuffer to the flushing/inactive lists as appropriate,
	 * before we free the context associated with the requests.
	 */
	while (!list_empty(&ring->active_list)) {
	while (!list_empty(&engine->active_list)) {
		struct drm_i915_gem_object *obj;

		obj = list_first_entry(&ring->active_list,
		obj = list_first_entry(&engine->active_list,
				       struct drm_i915_gem_object,
				      ring_list[ring->id]);
				       ring_list[engine->id]);

		if (!list_empty(&obj->last_read_req[ring->id]->list))
		if (!list_empty(&obj->last_read_req[engine->id]->list))
			break;

		i915_gem_object_retire__read(obj, ring->id);
		i915_gem_object_retire__read(obj, engine->id);
	}

	if (unlikely(ring->trace_irq_req &&
		     i915_gem_request_completed(ring->trace_irq_req, true))) {
		ring->irq_put(ring);
		i915_gem_request_assign(&ring->trace_irq_req, NULL);
	if (unlikely(engine->trace_irq_req &&
		     i915_gem_request_completed(engine->trace_irq_req, true))) {
		engine->irq_put(engine);
		i915_gem_request_assign(&engine->trace_irq_req, NULL);
	}

	WARN_ON(i915_verify_lists(ring->dev));
	WARN_ON(i915_verify_lists(engine->dev));
}

bool
@@ -5022,10 +5022,10 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
}

static void
init_ring_lists(struct intel_engine_cs *ring)
init_ring_lists(struct intel_engine_cs *engine)
{
	INIT_LIST_HEAD(&ring->active_list);
	INIT_LIST_HEAD(&ring->request_list);
	INIT_LIST_HEAD(&engine->active_list);
	INIT_LIST_HEAD(&engine->request_list);
}

void
+11 −11
Original line number Diff line number Diff line
@@ -600,7 +600,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
	return ret;
}

static inline bool should_skip_switch(struct intel_engine_cs *ring,
static inline bool should_skip_switch(struct intel_engine_cs *engine,
				      struct intel_context *from,
				      struct intel_context *to)
{
@@ -608,42 +608,42 @@ static inline bool should_skip_switch(struct intel_engine_cs *ring,
		return false;

	if (to->ppgtt && from == to &&
	    !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
	    !(intel_ring_flag(engine) & to->ppgtt->pd_dirty_rings))
		return true;

	return false;
}

static bool
needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
	struct drm_i915_private *dev_priv = engine->dev->dev_private;

	if (!to->ppgtt)
		return false;

	if (INTEL_INFO(ring->dev)->gen < 8)
	if (INTEL_INFO(engine->dev)->gen < 8)
		return true;

	if (ring != &dev_priv->ring[RCS])
	if (engine != &dev_priv->ring[RCS])
		return true;

	return false;
}

static bool
needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to,
		   u32 hw_flags)
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
	struct drm_i915_private *dev_priv = engine->dev->dev_private;

	if (!to->ppgtt)
		return false;

	if (!IS_GEN8(ring->dev))
	if (!IS_GEN8(engine->dev))
		return false;

	if (ring != &dev_priv->ring[RCS])
	if (engine != &dev_priv->ring[RCS])
		return false;

	if (hw_flags & MI_RESTORE_INHIBIT)
Loading