Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8187a2b7 authored by Zou Nan hai's avatar Zou Nan hai Committed by Eric Anholt
Browse files

drm/i915: introduce intel_ring_buffer structure (V2)



Introduces a more complete intel_ring_buffer structure with callbacks
for setup and management of a particular ringbuffer, and converts the
render ring buffer consumers to use it.

Signed-off-by: default avatarZou Nan hai <nanhai.zou@intel.com>
Signed-off-by: default avatarXiang Hai hao <haihao.xiang@intel.com>
[anholt: Fixed up whitespace fail and rebased against prep patches]
Signed-off-by: default avatarEric Anholt <eric@anholt.net>
parent d3301d86
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -317,14 +317,14 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
	u8 *virt;
	uint32_t *ptr, off;

	if (!dev_priv->render_ring.ring_obj) {
	if (!dev_priv->render_ring.gem_object) {
		seq_printf(m, "No ringbuffer setup\n");
		return 0;
	}

	virt = dev_priv->render_ring.virtual_start;

	for (off = 0; off < dev_priv->render_ring.Size; off += 4) {
	for (off = 0; off < dev_priv->render_ring.size; off += 4) {
		ptr = (uint32_t *)(virt + off);
		seq_printf(m, "%08x :  %08x\n", off, *ptr);
	}
@@ -344,7 +344,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)

	seq_printf(m, "RingHead :  %08x\n", head);
	seq_printf(m, "RingTail :  %08x\n", tail);
	seq_printf(m, "RingSize :  %08lx\n", dev_priv->render_ring.Size);
	seq_printf(m, "RingSize :  %08lx\n", dev_priv->render_ring.size);
	seq_printf(m, "Acthd :     %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));

	return 0;
+25 −33
Original line number Diff line number Diff line
@@ -40,7 +40,6 @@
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>


/**
 * Sets up the hardware status page for devices that need a physical address
 * in the register.
@@ -56,10 +55,11 @@ static int i915_init_phys_hws(struct drm_device *dev)
		DRM_ERROR("Can not allocate hardware status page\n");
		return -ENOMEM;
	}
	dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
	dev_priv->render_ring.status_page.page_addr
		= dev_priv->status_page_dmah->vaddr;
	dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;

	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
	memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);

	if (IS_I965G(dev))
		dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@ -95,7 +95,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_master_private *master_priv;
	drm_i915_ring_buffer_t *ring = &(dev_priv->render_ring);
	struct intel_ring_buffer *ring = &dev_priv->render_ring;

	/*
	 * We should never lose context on the ring with modesetting
@@ -108,7 +108,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
	ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
	ring->space = ring->head - (ring->tail + 8);
	if (ring->space < 0)
		ring->space += ring->Size;
		ring->space += ring->size;

	if (!dev->primary->master)
		return;
@@ -128,12 +128,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
	if (dev->irq_enabled)
		drm_irq_uninstall(dev);

	if (dev_priv->render_ring.virtual_start) {
		drm_core_ioremapfree(&dev_priv->render_ring.map, dev);
		dev_priv->render_ring.virtual_start = NULL;
		dev_priv->render_ring.map.handle = NULL;
		dev_priv->render_ring.map.size = 0;
	}
	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);

	/* Clear the HWS virtual address at teardown */
	if (I915_NEED_GFX_HWS(dev))
@@ -156,14 +151,14 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
	}

	if (init->ring_size != 0) {
		if (dev_priv->render_ring.ring_obj != NULL) {
		if (dev_priv->render_ring.gem_object != NULL) {
			i915_dma_cleanup(dev);
			DRM_ERROR("Client tried to initialize ringbuffer in "
				  "GEM mode\n");
			return -EINVAL;
		}

		dev_priv->render_ring.Size = init->ring_size;
		dev_priv->render_ring.size = init->ring_size;

		dev_priv->render_ring.map.offset = init->ring_start;
		dev_priv->render_ring.map.size = init->ring_size;
@@ -201,26 +196,29 @@ static int i915_dma_resume(struct drm_device * dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;

	struct intel_ring_buffer *ring;
	DRM_DEBUG_DRIVER("%s\n", __func__);

	if (dev_priv->render_ring.map.handle == NULL) {
	ring = &dev_priv->render_ring;

	if (ring->map.handle == NULL) {
		DRM_ERROR("can not ioremap virtual address for"
			  " ring buffer\n");
		return -ENOMEM;
	}

	/* Program Hardware Status Page */
	if (!dev_priv->hw_status_page) {
	if (!ring->status_page.page_addr) {
		DRM_ERROR("Can not find hardware status page\n");
		return -EINVAL;
	}
	DRM_DEBUG_DRIVER("hw status page @ %p\n",
				dev_priv->hw_status_page);

	if (dev_priv->status_gfx_addr != 0)
		I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
				ring->status_page.page_addr);
	if (ring->status_page.gfx_addr != 0)
		ring->setup_status_page(dev, ring);
	else
		I915_WRITE(HWS_PGA, dev_priv->dma_status_page);

	DRM_DEBUG_DRIVER("Enabled hardware status page\n");

	return 0;
@@ -330,9 +328,8 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int i;
	RING_LOCALS;

	if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.Size - 8)
	if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
		return -EINVAL;

	BEGIN_LP_RING((dwords+1)&~1);
@@ -365,9 +362,7 @@ i915_emit_box(struct drm_device *dev,
	      struct drm_clip_rect *boxes,
	      int i, int DR1, int DR4)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_clip_rect box = boxes[i];
	RING_LOCALS;

	if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
		DRM_ERROR("Bad box %d,%d..%d,%d\n",
@@ -404,7 +399,6 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
	RING_LOCALS;

	dev_priv->counter++;
	if (dev_priv->counter > 0x7FFFFFFFUL)
@@ -458,10 +452,8 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
				     drm_i915_batchbuffer_t * batch,
				     struct drm_clip_rect *cliprects)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int nbox = batch->num_cliprects;
	int i = 0, count;
	RING_LOCALS;

	if ((batch->start | batch->used) & 0x7) {
		DRM_ERROR("alignment");
@@ -510,7 +502,6 @@ static int i915_dispatch_flip(struct drm_device * dev)
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_master_private *master_priv =
		dev->primary->master->driver_priv;
	RING_LOCALS;

	if (!master_priv->sarea_priv)
		return -EINVAL;
@@ -563,7 +554,8 @@ static int i915_quiescent(struct drm_device * dev)
	drm_i915_private_t *dev_priv = dev->dev_private;

	i915_kernel_lost_context(dev);
	return i915_wait_ring(dev, dev_priv->render_ring.Size - 8, __func__);
	return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
				      dev_priv->render_ring.size - 8);
}

static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -805,6 +797,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	drm_i915_hws_addr_t *hws = data;
	struct intel_ring_buffer *ring = &dev_priv->render_ring;

	if (!I915_NEED_GFX_HWS(dev))
		return -EINVAL;
@@ -821,7 +814,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,

	DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);

	dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
	ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);

	dev_priv->hws_map.offset = dev->agp->base + hws->addr;
	dev_priv->hws_map.size = 4*1024;
@@ -837,10 +830,10 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
				" G33 hw status page\n");
		return -ENOMEM;
	}
	dev_priv->hw_status_page = dev_priv->hws_map.handle;
	ring->status_page.page_addr = dev_priv->hws_map.handle;
	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
	I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);

	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
	I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
	DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
				dev_priv->status_gfx_addr);
	DRM_DEBUG_DRIVER("load hws at %p\n",
@@ -1639,7 +1632,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)

	spin_lock_init(&dev_priv->user_irq_lock);
	spin_lock_init(&dev_priv->error_lock);
	dev_priv->user_irq_refcount = 0;
	dev_priv->trace_irq_seqno = 0;

	ret = drm_vblank_init(dev, I915_NUM_PIPE);
+3 −26
Original line number Diff line number Diff line
@@ -389,32 +389,9 @@ int i965_reset(struct drm_device *dev, u8 flags)
	 */
	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
			!dev_priv->mm.suspended) {
		drm_i915_ring_buffer_t *ring = &dev_priv->render_ring;
		struct drm_gem_object *obj = ring->ring_obj;
		struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
		struct intel_ring_buffer *ring = &dev_priv->render_ring;
		dev_priv->mm.suspended = 0;

		/* Stop the ring if it's running. */
		I915_WRITE(PRB0_CTL, 0);
		I915_WRITE(PRB0_TAIL, 0);
		I915_WRITE(PRB0_HEAD, 0);

		/* Initialize the ring. */
		I915_WRITE(PRB0_START, obj_priv->gtt_offset);
		I915_WRITE(PRB0_CTL,
			   ((obj->size - 4096) & RING_NR_PAGES) |
			   RING_NO_REPORT |
			   RING_VALID);
		if (!drm_core_check_feature(dev, DRIVER_MODESET))
			i915_kernel_lost_context(dev);
		else {
			ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
			ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
			ring->space = ring->head - (ring->tail + 8);
			if (ring->space < 0)
				ring->space += ring->Size;
		}

		ring->init(dev, ring);
		mutex_unlock(&dev->struct_mutex);
		drm_irq_uninstall(dev);
		drm_irq_install(dev);
+25 −55
Original line number Diff line number Diff line
@@ -31,8 +31,8 @@
#define _I915_DRV_H_

#include "i915_reg.h"
#include "i915_drm.h"
#include "intel_bios.h"
#include "intel_ringbuffer.h"
#include <linux/io-mapping.h>

/* General customization:
@@ -92,16 +92,6 @@ struct drm_i915_gem_phys_object {
	struct drm_gem_object *cur_obj;
};

typedef struct _drm_i915_ring_buffer {
	unsigned long Size;
	u8 *virtual_start;
	int head;
	int tail;
	int space;
	drm_local_map_t map;
	struct drm_gem_object *ring_obj;
} drm_i915_ring_buffer_t;

struct mem_block {
	struct mem_block *next;
	struct mem_block *prev;
@@ -244,7 +234,7 @@ typedef struct drm_i915_private {
	void __iomem *regs;

	struct pci_dev *bridge_dev;
	drm_i915_ring_buffer_t render_ring;
	struct intel_ring_buffer render_ring;

	drm_dma_handle_t *status_page_dmah;
	void *hw_status_page;
@@ -270,8 +260,6 @@ typedef struct drm_i915_private {
	atomic_t irq_received;
	/** Protects user_irq_refcount and irq_mask_reg */
	spinlock_t user_irq_lock;
	/** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
	int user_irq_refcount;
	u32 trace_irq_seqno;
	/** Cached value of IMR to avoid reads in updating the bitfield */
	u32 irq_mask_reg;
@@ -832,9 +820,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
			 struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
			 struct drm_file *file_priv);
void i915_user_irq_get(struct drm_device *dev);
void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
void i915_user_irq_put(struct drm_device *dev);
extern void i915_enable_interrupt (struct drm_device *dev);

extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
@@ -853,8 +839,10 @@ extern int i915_vblank_swap(struct drm_device *dev, void *data,
			    struct drm_file *file_priv);
extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask);
void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask);
extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
		u32 mask);
extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
		u32 mask);

void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -962,8 +950,6 @@ void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);

void i915_gem_shrinker_init(void);
void i915_gem_shrinker_exit(void);
int i915_gem_init_pipe_control(struct drm_device *dev);
void i915_gem_cleanup_pipe_control(struct drm_device *dev);

/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -1014,16 +1000,6 @@ static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return;
static inline void opregion_enable_asle(struct drm_device *dev) { return; }
#endif

/* intel_ringbuffer.c */
extern void i915_gem_flush(struct drm_device *dev,
			   uint32_t invalidate_domains,
			   uint32_t flush_domains);
extern int i915_dispatch_gem_execbuffer(struct drm_device *dev,
					struct drm_i915_gem_execbuffer2 *exec,
					struct drm_clip_rect *cliprects,
					uint64_t exec_offset);
extern uint32_t i915_ring_add_request(struct drm_device *dev);

/* modesetting */
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -1044,7 +1020,8 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
 * has access to the ring.
 */
#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do {			\
	if (((drm_i915_private_t *)dev->dev_private)->render_ring.ring_obj == NULL) \
	if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
			== NULL)					\
		LOCK_TEST_WITH_RETURN(dev, file_priv);			\
} while (0)

@@ -1060,32 +1037,27 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);

#define I915_VERBOSE 0

#define RING_LOCALS	volatile unsigned int *ring_virt__;

#define BEGIN_LP_RING(n)  do { \
	int bytes__ = 4*(n);						\
	if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n));	\
	/* a wrap must occur between instructions so pad beforehand */	\
	if (unlikely (dev_priv->render_ring.tail + bytes__ > dev_priv->render_ring.Size)) \
		i915_wrap_ring(dev);					\
	if (unlikely (dev_priv->render_ring.space < bytes__))			\
		i915_wait_ring(dev, bytes__, __func__);			\
	ring_virt__ = (unsigned int *)					\
	        (dev_priv->render_ring.virtual_start + dev_priv->render_ring.tail);	\
	dev_priv->render_ring.tail += bytes__;					\
	dev_priv->render_ring.tail &= dev_priv->render_ring.Size - 1;			\
	dev_priv->render_ring.space -= bytes__;				\
	drm_i915_private_t *dev_priv = dev->dev_private;                \
	if (I915_VERBOSE)						\
		DRM_DEBUG("   BEGIN_LP_RING %x\n", (int)(n));		\
	intel_ring_begin(dev, &dev_priv->render_ring, 4*(n));		\
} while (0)

#define OUT_RING(n) do {						\
	if (I915_VERBOSE) DRM_DEBUG("   OUT_RING %x\n", (int)(n));	\
	*ring_virt__++ = (n);						\

#define OUT_RING(x) do {						\
	drm_i915_private_t *dev_priv = dev->dev_private;		\
	if (I915_VERBOSE)						\
		DRM_DEBUG("   OUT_RING %x\n", (int)(x));		\
	intel_ring_emit(dev, &dev_priv->render_ring, x);		\
} while (0)

#define ADVANCE_LP_RING() do {						\
	drm_i915_private_t *dev_priv = dev->dev_private;                \
	if (I915_VERBOSE)						\
		DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->render_ring.tail);	\
	I915_WRITE(PRB0_TAIL, dev_priv->render_ring.tail);			\
		DRM_DEBUG("ADVANCE_LP_RING %x\n",			\
				dev_priv->render_ring.tail);		\
	intel_ring_advance(dev, &dev_priv->render_ring);		\
} while(0)

/**
@@ -1103,14 +1075,12 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
 *
 * The area from dword 0x20 to 0x3ff is available for driver usage.
 */
#define READ_HWSP(dev_priv, reg)  (((volatile u32*)(dev_priv->hw_status_page))[reg])
#define READ_HWSP(dev_priv, reg)  (((volatile u32 *)\
			(dev_priv->render_ring.status_page.page_addr))[reg])
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_GEM_HWS_INDEX		0x20
#define I915_BREADCRUMB_INDEX		0x21

extern int i915_wrap_ring(struct drm_device * dev);
extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);

#define INTEL_INFO(dev)	(((struct drm_i915_private *) (dev)->dev_private)->info)

#define IS_I830(dev)		((dev)->pci_device == 0x3577)
+65 −11
Original line number Diff line number Diff line
@@ -1590,6 +1590,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
		}
	}
}

uint32_t
i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
		 uint32_t flush_domains)
@@ -1607,7 +1608,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
	if (request == NULL)
		return 0;

	seqno = i915_ring_add_request(dev);
	seqno = dev_priv->render_ring.add_request(dev, &dev_priv->render_ring,
						  file_priv, flush_domains);

	DRM_DEBUG_DRIVER("%d\n", seqno);

@@ -1645,10 +1647,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
static uint32_t
i915_retire_commands(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
	uint32_t flush_domains = 0;
	RING_LOCALS;

	/* The sampler always gets flushed on i965 (sigh) */
	if (IS_I965G(dev))
@@ -1746,7 +1746,9 @@ i915_gem_retire_requests(struct drm_device *dev)
	drm_i915_private_t *dev_priv = dev->dev_private;
	uint32_t seqno;

	if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
	struct intel_ring_buffer *ring = &(dev_priv->render_ring);
	if (!ring->status_page.page_addr
			|| list_empty(&dev_priv->mm.request_list))
		return;

	seqno = i915_get_gem_seqno(dev);
@@ -1773,7 +1775,8 @@ i915_gem_retire_requests(struct drm_device *dev)

	if (unlikely (dev_priv->trace_irq_seqno &&
		      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
		i915_user_irq_put(dev);

		ring->user_irq_put(dev, ring);
		dev_priv->trace_irq_seqno = 0;
	}
}
@@ -1803,6 +1806,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
	u32 ier;
	int ret = 0;

	struct intel_ring_buffer *ring = &dev_priv->render_ring;
	BUG_ON(seqno == 0);

	if (atomic_read(&dev_priv->mm.wedged))
@@ -1823,7 +1827,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
		trace_i915_gem_request_wait_begin(dev, seqno);

		dev_priv->mm.waiting_gem_seqno = seqno;
		i915_user_irq_get(dev);
		ring->user_irq_get(dev, ring);
		if (interruptible)
			ret = wait_event_interruptible(dev_priv->irq_queue,
				i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
@@ -1833,7 +1837,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
				i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
				atomic_read(&dev_priv->mm.wedged));

		i915_user_irq_put(dev);
		ring->user_irq_put(dev, ring);
		dev_priv->mm.waiting_gem_seqno = 0;

		trace_i915_gem_request_wait_end(dev, seqno);
@@ -1867,6 +1871,19 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
}


static void
i915_gem_flush(struct drm_device *dev,
	       uint32_t invalidate_domains,
	       uint32_t flush_domains)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	if (flush_domains & I915_GEM_DOMAIN_CPU)
		drm_agp_chipset_flush(dev);
	dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
			invalidate_domains,
			flush_domains);
}

/**
 * Ensures that all rendering to the object has completed and the object is
 * safe to unbind from the GTT or access from the CPU.
@@ -3820,7 +3837,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
#endif

	/* Exec the batchbuffer */
	ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
	ret = dev_priv->render_ring.dispatch_gem_execbuffer(dev,
							    &dev_priv->render_ring,
							    args,
							    cliprects,
							    exec_offset);
	if (ret) {
		DRM_ERROR("dispatch failed %d\n", ret);
		goto err;
@@ -4378,7 +4399,8 @@ i915_gem_idle(struct drm_device *dev)

	mutex_lock(&dev->struct_mutex);

	if (dev_priv->mm.suspended || dev_priv->render_ring.ring_obj == NULL) {
	if (dev_priv->mm.suspended ||
			dev_priv->render_ring.gem_object == NULL) {
		mutex_unlock(&dev->struct_mutex);
		return 0;
	}
@@ -4420,7 +4442,7 @@ i915_gem_idle(struct drm_device *dev)
 * 965+ support PIPE_CONTROL commands, which provide finer grained control
 * over cache flushing.
 */
int
static int
i915_gem_init_pipe_control(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4459,7 +4481,8 @@ i915_gem_init_pipe_control(struct drm_device *dev)
	return ret;
}

void

static void
i915_gem_cleanup_pipe_control(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4476,6 +4499,37 @@ i915_gem_cleanup_pipe_control(struct drm_device *dev)
	dev_priv->seqno_page = NULL;
}

int
i915_gem_init_ringbuffer(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret;
	dev_priv->render_ring = render_ring;
	if (!I915_NEED_GFX_HWS(dev)) {
		dev_priv->render_ring.status_page.page_addr
			= dev_priv->status_page_dmah->vaddr;
		memset(dev_priv->render_ring.status_page.page_addr,
				0, PAGE_SIZE);
	}
	if (HAS_PIPE_CONTROL(dev)) {
		ret = i915_gem_init_pipe_control(dev);
		if (ret)
			return ret;
	}
	ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
	return ret;
}

void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;

	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
	if (HAS_PIPE_CONTROL(dev))
		i915_gem_cleanup_pipe_control(dev);
}

int
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
Loading