Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a4872ba6 authored by Oscar Mateo's avatar Oscar Mateo Committed by Daniel Vetter
Browse files

drm/i915: s/intel_ring_buffer/intel_engine_cs



In the upcoming patches we plan to break the correlation between
engine command streamers (a.k.a. rings) and ringbuffers, so it
makes sense to refactor the code and make the change obvious.

No functional changes.

Signed-off-by: default avatarOscar Mateo <oscar.mateo@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent fe5b1886
Loading
Loading
Loading
Loading
+12 −12
Original line number Diff line number Diff line
@@ -498,7 +498,7 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
	return 0;
}

static bool validate_cmds_sorted(struct intel_ring_buffer *ring,
static bool validate_cmds_sorted(struct intel_engine_cs *ring,
				 const struct drm_i915_cmd_table *cmd_tables,
				 int cmd_table_count)
{
@@ -552,7 +552,7 @@ static bool check_sorted(int ring_id, const u32 *reg_table, int reg_count)
	return ret;
}

static bool validate_regs_sorted(struct intel_ring_buffer *ring)
static bool validate_regs_sorted(struct intel_engine_cs *ring)
{
	return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
		check_sorted(ring->id, ring->master_reg_table,
@@ -580,7 +580,7 @@ struct cmd_node {
 */
#define CMD_HASH_MASK STD_MI_OPCODE_MASK

static int init_hash_table(struct intel_ring_buffer *ring,
static int init_hash_table(struct intel_engine_cs *ring,
			   const struct drm_i915_cmd_table *cmd_tables,
			   int cmd_table_count)
{
@@ -609,7 +609,7 @@ static int init_hash_table(struct intel_ring_buffer *ring,
	return 0;
}

static void fini_hash_table(struct intel_ring_buffer *ring)
static void fini_hash_table(struct intel_engine_cs *ring)
{
	struct hlist_node *tmp;
	struct cmd_node *desc_node;
@@ -626,12 +626,12 @@ static void fini_hash_table(struct intel_ring_buffer *ring)
 * @ring: the ringbuffer to initialize
 *
 * Optionally initializes fields related to batch buffer command parsing in the
 * struct intel_ring_buffer based on whether the platform requires software
 * struct intel_engine_cs based on whether the platform requires software
 * command parsing.
 *
 * Return: non-zero if initialization fails
 */
int i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
{
	const struct drm_i915_cmd_table *cmd_tables;
	int cmd_table_count;
@@ -725,7 +725,7 @@ int i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
 * Releases any resources related to command parsing that may have been
 * initialized for the specified ring.
 */
void i915_cmd_parser_fini_ring(struct intel_ring_buffer *ring)
void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
{
	if (!ring->needs_cmd_parser)
		return;
@@ -734,7 +734,7 @@ void i915_cmd_parser_fini_ring(struct intel_ring_buffer *ring)
}

static const struct drm_i915_cmd_descriptor*
find_cmd_in_table(struct intel_ring_buffer *ring,
find_cmd_in_table(struct intel_engine_cs *ring,
		  u32 cmd_header)
{
	struct cmd_node *desc_node;
@@ -761,7 +761,7 @@ find_cmd_in_table(struct intel_ring_buffer *ring,
 * ring's default length encoding and returns default_desc.
 */
static const struct drm_i915_cmd_descriptor*
find_cmd(struct intel_ring_buffer *ring,
find_cmd(struct intel_engine_cs *ring,
	 u32 cmd_header,
	 struct drm_i915_cmd_descriptor *default_desc)
{
@@ -837,7 +837,7 @@ static u32 *vmap_batch(struct drm_i915_gem_object *obj)
 *
 * Return: true if the ring requires software command parsing
 */
bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;

@@ -855,7 +855,7 @@ bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
	return (i915.enable_cmd_parser == 1);
}

static bool check_cmd(const struct intel_ring_buffer *ring,
static bool check_cmd(const struct intel_engine_cs *ring,
		      const struct drm_i915_cmd_descriptor *desc,
		      const u32 *cmd,
		      const bool is_master,
@@ -957,7 +957,7 @@ static bool check_cmd(const struct intel_ring_buffer *ring,
 *
 * Return: non-zero if the parser finds violations or otherwise fails
 */
int i915_parse_cmds(struct intel_ring_buffer *ring,
int i915_parse_cmds(struct intel_engine_cs *ring,
		    struct drm_i915_gem_object *batch_obj,
		    u32 batch_start_offset,
		    bool is_master)
+8 −8
Original line number Diff line number Diff line
@@ -562,7 +562,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring;
	struct intel_engine_cs *ring;
	struct drm_i915_gem_request *gem_request;
	int ret, count, i;

@@ -594,7 +594,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
}

static void i915_ring_seqno_info(struct seq_file *m,
				 struct intel_ring_buffer *ring)
				 struct intel_engine_cs *ring)
{
	if (ring->get_seqno) {
		seq_printf(m, "Current sequence (%s): %u\n",
@@ -607,7 +607,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring;
	struct intel_engine_cs *ring;
	int ret, i;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -630,7 +630,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring;
	struct intel_engine_cs *ring;
	int ret, i, pipe;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -840,7 +840,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring;
	struct intel_engine_cs *ring;
	const u32 *hws;
	int i;

@@ -1717,7 +1717,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
	struct drm_info_node *node = m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring;
	struct intel_engine_cs *ring;
	struct i915_hw_context *ctx;
	int ret, i;

@@ -1866,7 +1866,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring;
	struct intel_engine_cs *ring;
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
	int unused, i;

@@ -1890,7 +1890,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring;
	struct intel_engine_cs *ring;
	struct drm_file *file;
	int i;

+5 −5
Original line number Diff line number Diff line
@@ -120,7 +120,7 @@ static void i915_write_hws_pga(struct drm_device *dev)
static void i915_free_hws(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring = LP_RING(dev_priv);
	struct intel_engine_cs *ring = LP_RING(dev_priv);

	if (dev_priv->status_page_dmah) {
		drm_pci_free(dev, dev_priv->status_page_dmah);
@@ -140,7 +140,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_master_private *master_priv;
	struct intel_ring_buffer *ring = LP_RING(dev_priv);
	struct intel_engine_cs *ring = LP_RING(dev_priv);

	/*
	 * We should never lose context on the ring with modesetting
@@ -235,7 +235,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
static int i915_dma_resume(struct drm_device * dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring = LP_RING(dev_priv);
	struct intel_engine_cs *ring = LP_RING(dev_priv);

	DRM_DEBUG_DRIVER("%s\n", __func__);

@@ -783,7 +783,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
	int ret = 0;
	struct intel_ring_buffer *ring = LP_RING(dev_priv);
	struct intel_engine_cs *ring = LP_RING(dev_priv);

	DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
		  READ_BREADCRUMB(dev_priv));
@@ -1074,7 +1074,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	drm_i915_hws_addr_t *hws = data;
	struct intel_ring_buffer *ring;
	struct intel_engine_cs *ring;

	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return -ENODEV;
+20 −20
Original line number Diff line number Diff line
@@ -470,7 +470,7 @@ struct drm_i915_display_funcs {
	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
			  struct drm_framebuffer *fb,
			  struct drm_i915_gem_object *obj,
			  struct intel_ring_buffer *ring,
			  struct intel_engine_cs *ring,
			  uint32_t flags);
	void (*update_primary_plane)(struct drm_crtc *crtc,
				     struct drm_framebuffer *fb,
@@ -605,7 +605,7 @@ struct i915_hw_context {
	bool is_initialized;
	uint8_t remap_slice;
	struct drm_i915_file_private *file_priv;
	struct intel_ring_buffer *last_ring;
	struct intel_engine_cs *last_ring;
	struct drm_i915_gem_object *obj;
	struct i915_ctx_hang_stats hang_stats;
	struct i915_address_space *vm;
@@ -1372,7 +1372,7 @@ struct drm_i915_private {
	wait_queue_head_t gmbus_wait_queue;

	struct pci_dev *bridge_dev;
	struct intel_ring_buffer ring[I915_NUM_RINGS];
	struct intel_engine_cs ring[I915_NUM_RINGS];
	uint32_t last_seqno, next_seqno;

	drm_dma_handle_t *status_page_dmah;
@@ -1690,7 +1690,7 @@ struct drm_i915_gem_object {
	void *dma_buf_vmapping;
	int vmapping_count;

	struct intel_ring_buffer *ring;
	struct intel_engine_cs *ring;

	/** Breadcrumb of last rendering to the buffer. */
	uint32_t last_read_seqno;
@@ -1741,7 +1741,7 @@ struct drm_i915_gem_object {
 */
struct drm_i915_gem_request {
	/** On Which ring this request was generated */
	struct intel_ring_buffer *ring;
	struct intel_engine_cs *ring;

	/** GEM sequence number associated with this request. */
	uint32_t seqno;
@@ -1782,7 +1782,7 @@ struct drm_i915_file_private {

	struct i915_hw_context *private_default_ctx;
	atomic_t rps_wait_boost;
	struct  intel_ring_buffer *bsd_ring;
	struct  intel_engine_cs *bsd_ring;
};

/*
@@ -2209,9 +2209,9 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)

int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
			 struct intel_ring_buffer *to);
			 struct intel_engine_cs *to);
void i915_vma_move_to_active(struct i915_vma *vma,
			     struct intel_ring_buffer *ring);
			     struct intel_engine_cs *ring);
int i915_gem_dumb_create(struct drm_file *file_priv,
			 struct drm_device *dev,
			 struct drm_mode_create_dumb *args);
@@ -2235,10 +2235,10 @@ bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);

struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_ring_buffer *ring);
i915_gem_find_active_request(struct intel_engine_cs *ring);

bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
				      bool interruptible);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
@@ -2274,18 +2274,18 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
int __i915_add_request(struct intel_ring_buffer *ring,
int __i915_add_request(struct intel_engine_cs *ring,
		       struct drm_file *file,
		       struct drm_i915_gem_object *batch_obj,
		       u32 *seqno);
#define i915_add_request(ring, seqno) \
	__i915_add_request(ring, NULL, NULL, seqno)
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
				 uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
@@ -2296,7 +2296,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
				     struct intel_ring_buffer *pipelined);
				     struct intel_engine_cs *pipelined);
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
int i915_gem_attach_phys_object(struct drm_device *dev,
				struct drm_i915_gem_object *obj,
@@ -2398,7 +2398,7 @@ void i915_gem_context_reset(struct drm_device *dev);
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
int i915_gem_context_enable(struct drm_i915_private *dev_priv);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct intel_ring_buffer *ring,
int i915_switch_context(struct intel_engine_cs *ring,
			struct i915_hw_context *to);
struct i915_hw_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
@@ -2424,7 +2424,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file);

/* i915_gem_render_state.c */
int i915_gem_render_state_init(struct intel_ring_buffer *ring);
int i915_gem_render_state_init(struct intel_engine_cs *ring);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev,
					  struct i915_address_space *vm,
@@ -2509,10 +2509,10 @@ const char *i915_cache_level_str(int type);

/* i915_cmd_parser.c */
int i915_cmd_parser_get_version(void);
int i915_cmd_parser_init_ring(struct intel_ring_buffer *ring);
void i915_cmd_parser_fini_ring(struct intel_ring_buffer *ring);
bool i915_needs_cmd_parser(struct intel_ring_buffer *ring);
int i915_parse_cmds(struct intel_ring_buffer *ring,
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
int i915_parse_cmds(struct intel_engine_cs *ring,
		    struct drm_i915_gem_object *batch_obj,
		    u32 batch_start_offset,
		    bool is_master);
+28 −28
Original line number Diff line number Diff line
@@ -979,7 +979,7 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
 * equal.
 */
static int
i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
{
	int ret;

@@ -998,7 +998,7 @@ static void fake_irq(unsigned long data)
}

static bool missed_irq(struct drm_i915_private *dev_priv,
		       struct intel_ring_buffer *ring)
		       struct intel_engine_cs *ring)
{
	return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
}
@@ -1029,7 +1029,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
 * Returns 0 if the seqno was found within the alloted time. Else returns the
 * errno with remaining time filled in timeout argument.
 */
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
			unsigned reset_counter,
			bool interruptible,
			struct timespec *timeout,
@@ -1136,7 +1136,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
 * request and object lists appropriately for that event.
 */
int
i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
{
	struct drm_device *dev = ring->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1161,7 +1161,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)

static int
i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
				     struct intel_ring_buffer *ring)
				     struct intel_engine_cs *ring)
{
	if (!obj->active)
		return 0;
@@ -1186,7 +1186,7 @@ static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
			       bool readonly)
{
	struct intel_ring_buffer *ring = obj->ring;
	struct intel_engine_cs *ring = obj->ring;
	u32 seqno;
	int ret;

@@ -1211,7 +1211,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
{
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring = obj->ring;
	struct intel_engine_cs *ring = obj->ring;
	unsigned reset_counter;
	u32 seqno;
	int ret;
@@ -2040,7 +2040,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)

static void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
			       struct intel_ring_buffer *ring)
			       struct intel_engine_cs *ring)
{
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2078,7 +2078,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
}

void i915_vma_move_to_active(struct i915_vma *vma,
			     struct intel_ring_buffer *ring)
			     struct intel_engine_cs *ring)
{
	list_move_tail(&vma->mm_list, &vma->vm->active_list);
	return i915_gem_object_move_to_active(vma->obj, ring);
@@ -2119,7 +2119,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
static void
i915_gem_object_retire(struct drm_i915_gem_object *obj)
{
	struct intel_ring_buffer *ring = obj->ring;
	struct intel_engine_cs *ring = obj->ring;

	if (ring == NULL)
		return;
@@ -2133,7 +2133,7 @@ static int
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring;
	struct intel_engine_cs *ring;
	int ret, i, j;

	/* Carefully retire all requests without writing to the rings */
@@ -2199,7 +2199,7 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
	return 0;
}

int __i915_add_request(struct intel_ring_buffer *ring,
int __i915_add_request(struct intel_engine_cs *ring,
		       struct drm_file *file,
		       struct drm_i915_gem_object *obj,
		       u32 *out_seqno)
@@ -2359,7 +2359,7 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
}

struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_ring_buffer *ring)
i915_gem_find_active_request(struct intel_engine_cs *ring)
{
	struct drm_i915_gem_request *request;
	u32 completed_seqno;
@@ -2377,7 +2377,7 @@ i915_gem_find_active_request(struct intel_ring_buffer *ring)
}

static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
				       struct intel_ring_buffer *ring)
				       struct intel_engine_cs *ring)
{
	struct drm_i915_gem_request *request;
	bool ring_hung;
@@ -2396,7 +2396,7 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
}

static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
					struct intel_ring_buffer *ring)
					struct intel_engine_cs *ring)
{
	while (!list_empty(&ring->active_list)) {
		struct drm_i915_gem_object *obj;
@@ -2455,7 +2455,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
void i915_gem_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring;
	struct intel_engine_cs *ring;
	int i;

	/*
@@ -2478,7 +2478,7 @@ void i915_gem_reset(struct drm_device *dev)
 * This function clears the request list as sequence numbers are passed.
 */
void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
{
	uint32_t seqno;

@@ -2541,7 +2541,7 @@ bool
i915_gem_retire_requests(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring;
	struct intel_engine_cs *ring;
	bool idle = true;
	int i;

@@ -2635,7 +2635,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_wait *args = data;
	struct drm_i915_gem_object *obj;
	struct intel_ring_buffer *ring = NULL;
	struct intel_engine_cs *ring = NULL;
	struct timespec timeout_stack, *timeout = NULL;
	unsigned reset_counter;
	u32 seqno = 0;
@@ -2706,9 +2706,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 */
int
i915_gem_object_sync(struct drm_i915_gem_object *obj,
		     struct intel_ring_buffer *to)
		     struct intel_engine_cs *to)
{
	struct intel_ring_buffer *from = obj->ring;
	struct intel_engine_cs *from = obj->ring;
	u32 seqno;
	int ret, idx;

@@ -2831,7 +2831,7 @@ int i915_vma_unbind(struct i915_vma *vma)
int i915_gpu_idle(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring;
	struct intel_engine_cs *ring;
	int ret, i;

	/* Flush everything onto the inactive list. */
@@ -3702,7 +3702,7 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
				     struct intel_ring_buffer *pipelined)
				     struct intel_engine_cs *pipelined)
{
	u32 old_read_domains, old_write_domain;
	bool was_pin_display;
@@ -3858,7 +3858,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
	struct drm_i915_file_private *file_priv = file->driver_priv;
	unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
	struct drm_i915_gem_request *request;
	struct intel_ring_buffer *ring = NULL;
	struct intel_engine_cs *ring = NULL;
	unsigned reset_counter;
	u32 seqno = 0;
	int ret;
@@ -4359,7 +4359,7 @@ static void
i915_gem_stop_ringbuffers(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring;
	struct intel_engine_cs *ring;
	int i;

	for_each_ring(ring, dev_priv, i)
@@ -4408,7 +4408,7 @@ i915_gem_suspend(struct drm_device *dev)
	return ret;
}

int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
{
	struct drm_device *dev = ring->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4632,7 +4632,7 @@ void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring;
	struct intel_engine_cs *ring;
	int i;

	for_each_ring(ring, dev_priv, i)
@@ -4708,7 +4708,7 @@ i915_gem_lastclose(struct drm_device *dev)
}

static void
init_ring_lists(struct intel_ring_buffer *ring)
init_ring_lists(struct intel_engine_cs *ring)
{
	INIT_LIST_HEAD(&ring->active_list);
	INIT_LIST_HEAD(&ring->request_list);
Loading