Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0d1aacac authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter
Browse files

drm/i915: Embed the ring->private within the struct intel_ring_buffer



We now have more devices using ring->private than not, and they all want
the same structure. Worse, I would like to use a scratch page from
outside of intel_ringbuffer.c and so for convenience would like to reuse
ring->private. Embed the object into the struct intel_ringbuffer so that
we can keep the code clean.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent a52690e4
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -641,7 +641,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
		if (WARN_ON(ring->id != RCS))
			return NULL;

		obj = ring->private;
		obj = ring->scratch.obj;
		if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
		    acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
			return i915_error_object_create(dev_priv, obj);
+29 −70
Original line number Diff line number Diff line
@@ -33,16 +33,6 @@
#include "i915_trace.h"
#include "intel_drv.h"

/*
 * 965+ support PIPE_CONTROL commands, which provide finer grained control
 * over cache flushing.
 */
struct pipe_control {
	struct drm_i915_gem_object *obj;
	volatile u32 *cpu_page;
	u32 gtt_offset;
};

static inline int ring_space(struct intel_ring_buffer *ring)
{
	int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
@@ -175,8 +165,7 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring,
static int
intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
{
	struct pipe_control *pc = ring->private;
	u32 scratch_addr = pc->gtt_offset + 128;
	u32 scratch_addr = ring->scratch.gtt_offset + 128;
	int ret;


@@ -213,8 +202,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
                         u32 invalidate_domains, u32 flush_domains)
{
	u32 flags = 0;
	struct pipe_control *pc = ring->private;
	u32 scratch_addr = pc->gtt_offset + 128;
	u32 scratch_addr = ring->scratch.gtt_offset + 128;
	int ret;

	/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -306,8 +294,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
		       u32 invalidate_domains, u32 flush_domains)
{
	u32 flags = 0;
	struct pipe_control *pc = ring->private;
	u32 scratch_addr = pc->gtt_offset + 128;
	u32 scratch_addr = ring->scratch.gtt_offset + 128;
	int ret;

	/*
@@ -481,68 +468,43 @@ static int init_ring_common(struct intel_ring_buffer *ring)
static int
init_pipe_control(struct intel_ring_buffer *ring)
{
	struct pipe_control *pc;
	struct drm_i915_gem_object *obj;
	int ret;

	if (ring->private)
	if (ring->scratch.obj)
		return 0;

	pc = kmalloc(sizeof(*pc), GFP_KERNEL);
	if (!pc)
		return -ENOMEM;

	obj = i915_gem_alloc_object(ring->dev, 4096);
	if (obj == NULL) {
	ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
	if (ring->scratch.obj == NULL) {
		DRM_ERROR("Failed to allocate seqno page\n");
		ret = -ENOMEM;
		goto err;
	}

	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
	i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);

	ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
	ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
	if (ret)
		goto err_unref;

	pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
	pc->cpu_page = kmap(sg_page(obj->pages->sgl));
	if (pc->cpu_page == NULL) {
	ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
	ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
	if (ring->scratch.cpu_page == NULL) {
		ret = -ENOMEM;
		goto err_unpin;
	}

	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
			 ring->name, pc->gtt_offset);

	pc->obj = obj;
	ring->private = pc;
			 ring->name, ring->scratch.gtt_offset);
	return 0;

err_unpin:
	i915_gem_object_unpin(obj);
	i915_gem_object_unpin(ring->scratch.obj);
err_unref:
	drm_gem_object_unreference(&obj->base);
	drm_gem_object_unreference(&ring->scratch.obj->base);
err:
	kfree(pc);
	return ret;
}

static void
cleanup_pipe_control(struct intel_ring_buffer *ring)
{
	struct pipe_control *pc = ring->private;
	struct drm_i915_gem_object *obj;

	obj = pc->obj;

	kunmap(sg_page(obj->pages->sgl));
	i915_gem_object_unpin(obj);
	drm_gem_object_unreference(&obj->base);

	kfree(pc);
}

static int init_render_ring(struct intel_ring_buffer *ring)
{
	struct drm_device *dev = ring->dev;
@@ -607,16 +569,16 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
{
	struct drm_device *dev = ring->dev;

	if (!ring->private)
	if (ring->scratch.obj == NULL)
		return;

	if (HAS_BROKEN_CS_TLB(dev))
		drm_gem_object_unreference(to_gem_object(ring->private));

	if (INTEL_INFO(dev)->gen >= 5)
		cleanup_pipe_control(ring);
	if (INTEL_INFO(dev)->gen >= 5) {
		kunmap(sg_page(ring->scratch.obj->pages->sgl));
		i915_gem_object_unpin(ring->scratch.obj);
	}

	ring->private = NULL;
	drm_gem_object_unreference(&ring->scratch.obj->base);
	ring->scratch.obj = NULL;
}

static void
@@ -742,8 +704,7 @@ do { \
static int
pc_render_add_request(struct intel_ring_buffer *ring)
{
	struct pipe_control *pc = ring->private;
	u32 scratch_addr = pc->gtt_offset + 128;
	u32 scratch_addr = ring->scratch.gtt_offset + 128;
	int ret;

	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -761,7 +722,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
			PIPE_CONTROL_WRITE_FLUSH |
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
	intel_ring_emit(ring, ring->outstanding_lazy_request);
	intel_ring_emit(ring, 0);
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
@@ -780,7 +741,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
			PIPE_CONTROL_WRITE_FLUSH |
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
			PIPE_CONTROL_NOTIFY);
	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
	intel_ring_emit(ring, ring->outstanding_lazy_request);
	intel_ring_emit(ring, 0);
	intel_ring_advance(ring);
@@ -814,15 +775,13 @@ ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
static u32
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
	struct pipe_control *pc = ring->private;
	return pc->cpu_page[0];
	return ring->scratch.cpu_page[0];
}

static void
pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
	struct pipe_control *pc = ring->private;
	pc->cpu_page[0] = seqno;
	ring->scratch.cpu_page[0] = seqno;
}

static bool
@@ -1141,8 +1100,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
		intel_ring_emit(ring, MI_NOOP);
		intel_ring_advance(ring);
	} else {
		struct drm_i915_gem_object *obj = ring->private;
		u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
		u32 cs_offset = ring->scratch.gtt_offset;

		if (len > I830_BATCH_LIMIT)
			return -ENOSPC;
@@ -1835,7 +1793,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
			return ret;
		}

		ring->private = obj;
		ring->scratch.obj = obj;
		ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
	}

	return intel_init_ring_buffer(dev, ring);
+5 −1
Original line number Diff line number Diff line
@@ -155,7 +155,11 @@ struct intel_ring_buffer {

	struct intel_ring_hangcheck hangcheck;

	void *private;
	struct {
		struct drm_i915_gem_object *obj;
		u32 gtt_offset;
		volatile u32 *cpu_page;
	} scratch;
};

static inline bool