Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 666796da authored by Tvrtko Ursulin's avatar Tvrtko Ursulin
Browse files

drm/i915: More intel_engine_cs renaming



Some trivial ones, first pass done with Coccinelle:

@@
@@
(
- I915_NUM_RINGS
+ I915_NUM_ENGINES
|
- intel_ring_flag
+ intel_engine_flag
|
- for_each_ring
+ for_each_engine
|
- i915_gem_request_get_ring
+ i915_gem_request_get_engine
|
- intel_ring_idle
+ intel_engine_idle
|
- i915_gem_reset_ring_status
+ i915_gem_reset_engine_status
|
- i915_gem_reset_ring_cleanup
+ i915_gem_reset_engine_cleanup
|
- init_ring_lists
+ init_engine_lists
)

But that didn't fully work so I cleaned it up with:

for f in *.[hc]; do sed -i -e s/I915_NUM_RINGS/I915_NUM_ENGINES/ $f; done
for f in *.[hc]; do sed -i -e s/i915_gem_request_get_ring/i915_gem_request_get_engine/ $f; done
for f in *.[hc]; do sed -i -e s/intel_ring_flag/intel_engine_flag/ $f; done
for f in *.[hc]; do sed -i -e s/intel_ring_idle/intel_engine_idle/ $f; done
for f in *.[hc]; do sed -i -e s/init_ring_lists/init_engine_lists/ $f; done
for f in *.[hc]; do sed -i -e s/i915_gem_reset_ring_cleanup/i915_gem_reset_engine_cleanup/ $f; done
for f in *.[hc]; do sed -i -e s/i915_gem_reset_ring_status/i915_gem_reset_engine_status/ $f; done

v2: Rebase.

Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent 4a570db5
Loading
Loading
Loading
Loading
+26 −26
Original line number Diff line number Diff line
@@ -143,7 +143,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
		   obj->base.size / 1024,
		   obj->base.read_domains,
		   obj->base.write_domain);
	for_each_ring(engine, dev_priv, i)
	for_each_engine(engine, dev_priv, i)
		seq_printf(m, "%x ",
				i915_gem_request_get_seqno(obj->last_read_req[i]));
	seq_printf(m, "] %x %x%s%s%s",
@@ -184,7 +184,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
	}
	if (obj->last_write_req != NULL)
		seq_printf(m, " (%s)",
			   i915_gem_request_get_ring(obj->last_write_req)->name);
			   i915_gem_request_get_engine(obj->last_write_req)->name);
	if (obj->frontbuffer_bits)
		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
}
@@ -402,7 +402,7 @@ static void print_batch_pool_stats(struct seq_file *m,

	memset(&stats, 0, sizeof(stats));

	for_each_ring(engine, dev_priv, i) {
	for_each_engine(engine, dev_priv, i) {
		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
			list_for_each_entry(obj,
					    &engine->batch_pool.cache_list[j],
@@ -591,7 +591,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
					   pipe, plane);
			}
			if (work->flip_queued_req) {
				struct intel_engine_cs *engine = i915_gem_request_get_ring(work->flip_queued_req);
				struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req);

				seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
					   engine->name,
@@ -644,7 +644,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
	if (ret)
		return ret;

	for_each_ring(engine, dev_priv, i) {
	for_each_engine(engine, dev_priv, i) {
		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
			int count;

@@ -689,7 +689,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
		return ret;

	any = 0;
	for_each_ring(engine, dev_priv, i) {
	for_each_engine(engine, dev_priv, i) {
		int count;

		count = 0;
@@ -746,7 +746,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
		return ret;
	intel_runtime_pm_get(dev_priv);

	for_each_ring(engine, dev_priv, i)
	for_each_engine(engine, dev_priv, i)
		i915_ring_seqno_info(m, engine);

	intel_runtime_pm_put(dev_priv);
@@ -933,7 +933,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
			   I915_READ(GTIMR));
	}
	for_each_ring(engine, dev_priv, i) {
	for_each_engine(engine, dev_priv, i) {
		if (INTEL_INFO(dev)->gen >= 6) {
			seq_printf(m,
				   "Graphics Interrupt mask (%s):	%08x\n",
@@ -1331,8 +1331,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_engine_cs *engine;
	u64 acthd[I915_NUM_RINGS];
	u32 seqno[I915_NUM_RINGS];
	u64 acthd[I915_NUM_ENGINES];
	u32 seqno[I915_NUM_ENGINES];
	u32 instdone[I915_NUM_INSTDONE_REG];
	int i, j;

@@ -1343,7 +1343,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)

	intel_runtime_pm_get(dev_priv);

	for_each_ring(engine, dev_priv, i) {
	for_each_engine(engine, dev_priv, i) {
		seqno[i] = engine->get_seqno(engine, false);
		acthd[i] = intel_ring_get_active_head(engine);
	}
@@ -1359,7 +1359,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
	} else
		seq_printf(m, "Hangcheck inactive\n");

	for_each_ring(engine, dev_priv, i) {
	for_each_engine(engine, dev_priv, i) {
		seq_printf(m, "%s:\n", engine->name);
		seq_printf(m, "\tseqno = %x [current %x]\n",
			   engine->hangcheck.seqno, seqno[i]);
@@ -1965,7 +1965,7 @@ static int i915_context_status(struct seq_file *m, void *unused)

		if (i915.enable_execlists) {
			seq_putc(m, '\n');
			for_each_ring(engine, dev_priv, i) {
			for_each_engine(engine, dev_priv, i) {
				struct drm_i915_gem_object *ctx_obj =
					ctx->engine[i].state;
				struct intel_ringbuffer *ringbuf =
@@ -2055,7 +2055,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)

	list_for_each_entry(ctx, &dev_priv->context_list, link)
		if (ctx != dev_priv->kernel_context)
			for_each_ring(engine, dev_priv, i)
			for_each_engine(engine, dev_priv, i)
				i915_dump_lrc_obj(m, ctx, engine);

	mutex_unlock(&dev->struct_mutex);
@@ -2089,7 +2089,7 @@ static int i915_execlists(struct seq_file *m, void *data)

	intel_runtime_pm_get(dev_priv);

	for_each_ring(engine, dev_priv, ring_id) {
	for_each_engine(engine, dev_priv, ring_id) {
		struct drm_i915_gem_request *head_req = NULL;
		int count = 0;
		unsigned long flags;
@@ -2253,7 +2253,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
	if (!ppgtt)
		return;

	for_each_ring(engine, dev_priv, unused) {
	for_each_engine(engine, dev_priv, unused) {
		seq_printf(m, "%s\n", engine->name);
		for (i = 0; i < 4; i++) {
			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
@@ -2273,7 +2273,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
	if (INTEL_INFO(dev)->gen == 6)
		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));

	for_each_ring(engine, dev_priv, i) {
	for_each_engine(engine, dev_priv, i) {
		seq_printf(m, "%s\n", engine->name);
		if (INTEL_INFO(dev)->gen == 7)
			seq_printf(m, "GFX_MODE: 0x%08x\n",
@@ -2342,7 +2342,7 @@ static int count_irq_waiters(struct drm_i915_private *i915)
	int count = 0;
	int i;

	for_each_ring(engine, i915, i)
	for_each_engine(engine, i915, i)
		count += engine->irq_refcount;

	return count;
@@ -2466,7 +2466,7 @@ static void i915_guc_client_info(struct seq_file *m,
	seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
	seq_printf(m, "\tLast submission result: %d\n", client->retcode);

	for_each_ring(engine, dev_priv, i) {
	for_each_engine(engine, dev_priv, i) {
		seq_printf(m, "\tSubmissions: %llu %s\n",
				client->submissions[engine->guc_id],
				engine->name);
@@ -2506,7 +2506,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
	seq_printf(m, "GuC last action error code: %d\n", guc.action_err);

	seq_printf(m, "\nGuC submissions:\n");
	for_each_ring(engine, dev_priv, i) {
	for_each_engine(engine, dev_priv, i) {
		seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
			engine->name, guc.submissions[engine->guc_id],
			guc.last_seqno[engine->guc_id]);
@@ -3153,14 +3153,14 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
		page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);

		seqno = (uint64_t *)kmap_atomic(page);
		for_each_ring(engine, dev_priv, i) {
		for_each_engine(engine, dev_priv, i) {
			uint64_t offset;

			seq_printf(m, "%s\n", engine->name);

			seq_puts(m, "  Last signal:");
			for (j = 0; j < num_rings; j++) {
				offset = i * I915_NUM_RINGS + j;
				offset = i * I915_NUM_ENGINES + j;
				seq_printf(m, "0x%08llx (0x%02llx) ",
					   seqno[offset], offset * 8);
			}
@@ -3168,7 +3168,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)

			seq_puts(m, "  Last wait:  ");
			for (j = 0; j < num_rings; j++) {
				offset = i + (j * I915_NUM_RINGS);
				offset = i + (j * I915_NUM_ENGINES);
				seq_printf(m, "0x%08llx (0x%02llx) ",
					   seqno[offset], offset * 8);
			}
@@ -3178,7 +3178,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
		kunmap_atomic(seqno);
	} else {
		seq_puts(m, "  Last signal:");
		for_each_ring(engine, dev_priv, i)
		for_each_engine(engine, dev_priv, i)
			for (j = 0; j < num_rings; j++)
				seq_printf(m, "0x%08x\n",
					   I915_READ(engine->semaphore.mbox.signal[j]));
@@ -3186,7 +3186,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
	}

	seq_puts(m, "\nSync seqno:\n");
	for_each_ring(engine, dev_priv, i) {
	for_each_engine(engine, dev_priv, i) {
		for (j = 0; j < num_rings; j++) {
			seq_printf(m, "  0x%08x ",
				   engine->semaphore.sync_seqno[j]);
@@ -3244,7 +3244,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
	intel_runtime_pm_get(dev_priv);

	seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
	for_each_ring(engine, dev_priv, i)
	for_each_engine(engine, dev_priv, i)
		seq_printf(m, "HW whitelist count for %s: %d\n",
			   engine->name, workarounds->hw_whitelist_count[i]);
	for (i = 0; i < workarounds->count; ++i) {
+13 −13
Original line number Diff line number Diff line
@@ -459,7 +459,7 @@ struct drm_i915_error_state {
		u32 cpu_ring_head;
		u32 cpu_ring_tail;

		u32 semaphore_seqno[I915_NUM_RINGS - 1];
		u32 semaphore_seqno[I915_NUM_ENGINES - 1];

		/* Register state */
		u32 start;
@@ -479,7 +479,7 @@ struct drm_i915_error_state {
		u32 fault_reg;
		u64 faddr;
		u32 rc_psmi; /* sleep state */
		u32 semaphore_mboxes[I915_NUM_RINGS - 1];
		u32 semaphore_mboxes[I915_NUM_ENGINES - 1];

		struct drm_i915_error_object {
			int page_count;
@@ -505,12 +505,12 @@ struct drm_i915_error_state {

		pid_t pid;
		char comm[TASK_COMM_LEN];
	} ring[I915_NUM_RINGS];
	} ring[I915_NUM_ENGINES];

	struct drm_i915_error_buffer {
		u32 size;
		u32 name;
		u32 rseqno[I915_NUM_RINGS], wseqno;
		u32 rseqno[I915_NUM_ENGINES], wseqno;
		u64 gtt_offset;
		u32 read_domains;
		u32 write_domain;
@@ -824,7 +824,7 @@ struct intel_context {
		struct i915_vma *lrc_vma;
		u64 lrc_desc;
		uint32_t *lrc_reg_state;
	} engine[I915_NUM_RINGS];
	} engine[I915_NUM_ENGINES];

	struct list_head link;
};
@@ -1639,7 +1639,7 @@ struct i915_wa_reg {
struct i915_workarounds {
	struct i915_wa_reg reg[I915_MAX_WA_REGS];
	u32 count;
	u32 hw_whitelist_count[I915_NUM_RINGS];
	u32 hw_whitelist_count[I915_NUM_ENGINES];
};

struct i915_virtual_gpu {
@@ -1704,7 +1704,7 @@ struct drm_i915_private {
	wait_queue_head_t gmbus_wait_queue;

	struct pci_dev *bridge_dev;
	struct intel_engine_cs engine[I915_NUM_RINGS];
	struct intel_engine_cs engine[I915_NUM_ENGINES];
	struct drm_i915_gem_object *semaphore_obj;
	uint32_t last_seqno, next_seqno;

@@ -1967,8 +1967,8 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
}

/* Iterate over initialised rings */
#define for_each_ring(ring__, dev_priv__, i__) \
	for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
#define for_each_engine(ring__, dev_priv__, i__) \
	for ((i__) = 0; (i__) < I915_NUM_ENGINES; (i__)++) \
		for_each_if ((((ring__) = &(dev_priv__)->engine[(i__)]), intel_ring_initialized((ring__))))

enum hdmi_force_audio {
@@ -2039,7 +2039,7 @@ struct drm_i915_gem_object {
	struct drm_mm_node *stolen;
	struct list_head global_list;

	struct list_head ring_list[I915_NUM_RINGS];
	struct list_head ring_list[I915_NUM_ENGINES];
	/** Used in execbuf to temporarily hold a ref */
	struct list_head obj_exec_link;

@@ -2050,7 +2050,7 @@ struct drm_i915_gem_object {
	 * rendering and so a non-zero seqno), and is not set if it i s on
	 * inactive (ready to be unbound) list.
	 */
	unsigned int active:I915_NUM_RINGS;
	unsigned int active:I915_NUM_ENGINES;

	/**
	 * This is set if the object has been written to since last bound
@@ -2129,7 +2129,7 @@ struct drm_i915_gem_object {
	 * read request. This allows for the CPU to read from an active
	 * buffer by only waiting for the write to complete.
	 * */
	struct drm_i915_gem_request *last_read_req[I915_NUM_RINGS];
	struct drm_i915_gem_request *last_read_req[I915_NUM_ENGINES];
	struct drm_i915_gem_request *last_write_req;
	/** Breadcrumb of last fenced GPU access to the buffer. */
	struct drm_i915_gem_request *last_fenced_req;
@@ -2277,7 +2277,7 @@ i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
}

static inline struct intel_engine_cs *
i915_gem_request_get_ring(struct drm_i915_gem_request *req)
i915_gem_request_get_engine(struct drm_i915_gem_request *req)
{
	return req ? req->engine : NULL;
}
+37 −37
Original line number Diff line number Diff line
@@ -1243,11 +1243,11 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
			s64 *timeout,
			struct intel_rps_client *rps)
{
	struct intel_engine_cs *engine = i915_gem_request_get_ring(req);
	struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
	struct drm_device *dev = engine->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	const bool irq_test_in_progress =
		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(engine);
		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
	DEFINE_WAIT(wait);
	unsigned long timeout_expire;
@@ -1512,7 +1512,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
				i915_gem_object_retire__write(obj);
		}
	} else {
		for (i = 0; i < I915_NUM_RINGS; i++) {
		for (i = 0; i < I915_NUM_ENGINES; i++) {
			if (obj->last_read_req[i] == NULL)
				continue;

@@ -1552,7 +1552,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
{
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_request *requests[I915_NUM_RINGS];
	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
	unsigned reset_counter;
	int ret, i, n = 0;

@@ -1577,7 +1577,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,

		requests[n++] = i915_gem_request_reference(req);
	} else {
		for (i = 0; i < I915_NUM_RINGS; i++) {
		for (i = 0; i < I915_NUM_ENGINES; i++) {
			struct drm_i915_gem_request *req;

			req = obj->last_read_req[i];
@@ -2406,12 +2406,12 @@ void i915_vma_move_to_active(struct i915_vma *vma,
	struct drm_i915_gem_object *obj = vma->obj;
	struct intel_engine_cs *engine;

	engine = i915_gem_request_get_ring(req);
	engine = i915_gem_request_get_engine(req);

	/* Add a reference if we're newly entering the active list. */
	if (obj->active == 0)
		drm_gem_object_reference(&obj->base);
	obj->active |= intel_ring_flag(engine);
	obj->active |= intel_engine_flag(engine);

	list_move_tail(&obj->ring_list[engine->id], &engine->active_list);
	i915_gem_request_assign(&obj->last_read_req[engine->id], req);
@@ -2423,7 +2423,7 @@ static void
i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
{
	RQ_BUG_ON(obj->last_write_req == NULL);
	RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->engine)));
	RQ_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));

	i915_gem_request_assign(&obj->last_write_req, NULL);
	intel_fb_obj_flush(obj, true, ORIGIN_CS);
@@ -2471,15 +2471,15 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
	int ret, i, j;

	/* Carefully retire all requests without writing to the rings */
	for_each_ring(engine, dev_priv, i) {
		ret = intel_ring_idle(engine);
	for_each_engine(engine, dev_priv, i) {
		ret = intel_engine_idle(engine);
		if (ret)
			return ret;
	}
	i915_gem_retire_requests(dev);

	/* Finally reset hw state */
	for_each_ring(engine, dev_priv, i) {
	for_each_engine(engine, dev_priv, i) {
		intel_ring_init_seqno(engine, seqno);

		for (j = 0; j < ARRAY_SIZE(engine->semaphore.sync_seqno); j++)
@@ -2801,7 +2801,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
	return NULL;
}

static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv,
				       struct intel_engine_cs *engine)
{
	struct drm_i915_gem_request *request;
@@ -2820,7 +2820,7 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
		i915_set_reset_status(dev_priv, request->ctx, false);
}

static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
					struct intel_engine_cs *engine)
{
	struct intel_ringbuffer *buffer;
@@ -2893,11 +2893,11 @@ void i915_gem_reset(struct drm_device *dev)
	 * them for finding the guilty party. As the requests only borrow
	 * their reference to the objects, the inspection must be done first.
	 */
	for_each_ring(engine, dev_priv, i)
		i915_gem_reset_ring_status(dev_priv, engine);
	for_each_engine(engine, dev_priv, i)
		i915_gem_reset_engine_status(dev_priv, engine);

	for_each_ring(engine, dev_priv, i)
		i915_gem_reset_ring_cleanup(dev_priv, engine);
	for_each_engine(engine, dev_priv, i)
		i915_gem_reset_engine_cleanup(dev_priv, engine);

	i915_gem_context_reset(dev);

@@ -2966,7 +2966,7 @@ i915_gem_retire_requests(struct drm_device *dev)
	bool idle = true;
	int i;

	for_each_ring(engine, dev_priv, i) {
	for_each_engine(engine, dev_priv, i) {
		i915_gem_retire_requests_ring(engine);
		idle &= list_empty(&engine->request_list);
		if (i915.enable_execlists) {
@@ -3014,7 +3014,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
	struct intel_engine_cs *ring;
	int i;

	for_each_ring(ring, dev_priv, i)
	for_each_engine(ring, dev_priv, i)
		if (!list_empty(&ring->request_list))
			return;

@@ -3028,7 +3028,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
		struct intel_engine_cs *engine;
		int i;

		for_each_ring(engine, dev_priv, i)
		for_each_engine(engine, dev_priv, i)
			i915_gem_batch_pool_fini(&engine->batch_pool);

		mutex_unlock(&dev->struct_mutex);
@@ -3048,7 +3048,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
	if (!obj->active)
		return 0;

	for (i = 0; i < I915_NUM_RINGS; i++) {
	for (i = 0; i < I915_NUM_ENGINES; i++) {
		struct drm_i915_gem_request *req;

		req = obj->last_read_req[i];
@@ -3096,7 +3096,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_wait *args = data;
	struct drm_i915_gem_object *obj;
	struct drm_i915_gem_request *req[I915_NUM_RINGS];
	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
	unsigned reset_counter;
	int i, n = 0;
	int ret;
@@ -3133,7 +3133,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
	drm_gem_object_unreference(&obj->base);
	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);

	for (i = 0; i < I915_NUM_RINGS; i++) {
	for (i = 0; i < I915_NUM_ENGINES; i++) {
		if (obj->last_read_req[i] == NULL)
			continue;

@@ -3166,7 +3166,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
	struct intel_engine_cs *from;
	int ret;

	from = i915_gem_request_get_ring(from_req);
	from = i915_gem_request_get_engine(from_req);
	if (to == from)
		return 0;

@@ -3260,7 +3260,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_request **to_req)
{
	const bool readonly = obj->base.pending_write_domain == 0;
	struct drm_i915_gem_request *req[I915_NUM_RINGS];
	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
	int ret, i, n;

	if (!obj->active)
@@ -3274,7 +3274,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
		if (obj->last_write_req)
			req[n++] = obj->last_write_req;
	} else {
		for (i = 0; i < I915_NUM_RINGS; i++)
		for (i = 0; i < I915_NUM_ENGINES; i++)
			if (obj->last_read_req[i])
				req[n++] = obj->last_read_req[i];
	}
@@ -3395,7 +3395,7 @@ int i915_gpu_idle(struct drm_device *dev)
	int ret, i;

	/* Flush everything onto the inactive list. */
	for_each_ring(engine, dev_priv, i) {
	for_each_engine(engine, dev_priv, i) {
		if (!i915.enable_execlists) {
			struct drm_i915_gem_request *req;

@@ -3412,7 +3412,7 @@ int i915_gpu_idle(struct drm_device *dev)
			i915_add_request_no_flush(req);
		}

		ret = intel_ring_idle(engine);
		ret = intel_engine_idle(engine);
		if (ret)
			return ret;
	}
@@ -4359,7 +4359,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
	if (obj->active) {
		int i;

		for (i = 0; i < I915_NUM_RINGS; i++) {
		for (i = 0; i < I915_NUM_ENGINES; i++) {
			struct drm_i915_gem_request *req;

			req = obj->last_read_req[i];
@@ -4447,7 +4447,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
	int i;

	INIT_LIST_HEAD(&obj->global_list);
	for (i = 0; i < I915_NUM_RINGS; i++)
	for (i = 0; i < I915_NUM_ENGINES; i++)
		INIT_LIST_HEAD(&obj->ring_list[i]);
	INIT_LIST_HEAD(&obj->obj_exec_link);
	INIT_LIST_HEAD(&obj->vma_list);
@@ -4659,7 +4659,7 @@ i915_gem_stop_ringbuffers(struct drm_device *dev)
	struct intel_engine_cs *engine;
	int i;

	for_each_ring(engine, dev_priv, i)
	for_each_engine(engine, dev_priv, i)
		dev_priv->gt.stop_ring(engine);
}

@@ -4876,7 +4876,7 @@ i915_gem_init_hw(struct drm_device *dev)
	}

	/* Need to do basic initialisation of all rings first: */
	for_each_ring(engine, dev_priv, i) {
	for_each_engine(engine, dev_priv, i) {
		ret = engine->init_hw(engine);
		if (ret)
			goto out;
@@ -4901,7 +4901,7 @@ i915_gem_init_hw(struct drm_device *dev)
		goto out;

	/* Now it is safe to go back round and do everything else: */
	for_each_ring(engine, dev_priv, i) {
	for_each_engine(engine, dev_priv, i) {
		struct drm_i915_gem_request *req;

		req = i915_gem_request_alloc(engine, NULL);
@@ -5009,7 +5009,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
	struct intel_engine_cs *engine;
	int i;

	for_each_ring(engine, dev_priv, i)
	for_each_engine(engine, dev_priv, i)
		dev_priv->gt.cleanup_ring(engine);

    if (i915.enable_execlists)
@@ -5022,7 +5022,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
}

static void
init_ring_lists(struct intel_engine_cs *engine)
init_engine_lists(struct intel_engine_cs *engine)
{
	INIT_LIST_HEAD(&engine->active_list);
	INIT_LIST_HEAD(&engine->request_list);
@@ -5055,8 +5055,8 @@ i915_gem_load_init(struct drm_device *dev)
	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
	for (i = 0; i < I915_NUM_RINGS; i++)
		init_ring_lists(&dev_priv->engine[i]);
	for (i = 0; i < I915_NUM_ENGINES; i++)
		init_engine_lists(&dev_priv->engine[i]);
	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+8 −8
Original line number Diff line number Diff line
@@ -345,7 +345,7 @@ void i915_gem_context_reset(struct drm_device *dev)
			intel_lr_context_reset(dev, ctx);
	}

	for (i = 0; i < I915_NUM_RINGS; i++) {
	for (i = 0; i < I915_NUM_ENGINES; i++) {
		struct intel_engine_cs *engine = &dev_priv->engine[i];

		if (engine->last_context) {
@@ -426,7 +426,7 @@ void i915_gem_context_fini(struct drm_device *dev)
		i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
	}

	for (i = I915_NUM_RINGS; --i >= 0;) {
	for (i = I915_NUM_ENGINES; --i >= 0;) {
		struct intel_engine_cs *engine = &dev_priv->engine[i];

		if (engine->last_context) {
@@ -553,7 +553,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)

			intel_ring_emit(engine,
					MI_LOAD_REGISTER_IMM(num_rings));
			for_each_ring(signaller, to_i915(engine->dev), i) {
			for_each_engine(signaller, to_i915(engine->dev), i) {
				if (signaller == engine)
					continue;

@@ -582,7 +582,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)

			intel_ring_emit(engine,
					MI_LOAD_REGISTER_IMM(num_rings));
			for_each_ring(signaller, to_i915(engine->dev), i) {
			for_each_engine(signaller, to_i915(engine->dev), i) {
				if (signaller == engine)
					continue;

@@ -608,7 +608,7 @@ static inline bool should_skip_switch(struct intel_engine_cs *engine,
		return false;

	if (to->ppgtt && from == to &&
	    !(intel_ring_flag(engine) & to->ppgtt->pd_dirty_rings))
	    !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
		return true;

	return false;
@@ -697,7 +697,7 @@ static int do_switch(struct drm_i915_gem_request *req)
			goto unpin_out;

		/* Doing a PD load always reloads the page dirs */
		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine);
		to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
	}

	if (engine != &dev_priv->engine[RCS]) {
@@ -725,9 +725,9 @@ static int do_switch(struct drm_i915_gem_request *req)
		 * space. This means we must enforce that a page table load
		 * occur when this occurs. */
	} else if (to->ppgtt &&
		   (intel_ring_flag(engine) & to->ppgtt->pd_dirty_rings)) {
		   (intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) {
		hw_flags |= MI_FORCE_RESTORE;
		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine);
		to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
	}

	/* We should never emit switch_mm more than once */
+1 −1
Original line number Diff line number Diff line
@@ -43,7 +43,7 @@ i915_verify_lists(struct drm_device *dev)
	if (warned)
		return 0;

	for_each_ring(engine, dev_priv, i) {
	for_each_engine(engine, dev_priv, i) {
		list_for_each_entry(obj, &engine->active_list,
				    ring_list[engine->id]) {
			if (obj->base.dev != dev ||
Loading