Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 78501eac authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915/ringbuffer: Drop the redundant dev from the vfunc interface



The ringbuffer keeps a pointer to the parent device, so we can use that
instead of passing around the pointer on the stack.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent dd2b379f
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -296,7 +296,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)

	if (dev_priv->render_ring.status_page.page_addr != NULL) {
		seq_printf(m, "Current sequence: %d\n",
			   dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
			   dev_priv->render_ring.get_seqno(&dev_priv->render_ring));
	} else {
		seq_printf(m, "Current sequence: hws uninitialized\n");
	}
@@ -356,7 +356,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
		   atomic_read(&dev_priv->irq_received));
	if (dev_priv->render_ring.status_page.page_addr != NULL) {
		seq_printf(m, "Current sequence:    %d\n",
			   dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
			   dev_priv->render_ring.get_seqno(&dev_priv->render_ring));
	} else {
		seq_printf(m, "Current sequence:    hws uninitialized\n");
	}
+5 −5
Original line number Diff line number Diff line
@@ -131,9 +131,9 @@ static int i915_dma_cleanup(struct drm_device * dev)
		drm_irq_uninstall(dev);

	mutex_lock(&dev->struct_mutex);
	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
	intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
	intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
	intel_cleanup_ring_buffer(&dev_priv->render_ring);
	intel_cleanup_ring_buffer(&dev_priv->bsd_ring);
	intel_cleanup_ring_buffer(&dev_priv->blt_ring);
	mutex_unlock(&dev->struct_mutex);

	/* Clear the HWS virtual address at teardown */
@@ -221,7 +221,7 @@ static int i915_dma_resume(struct drm_device * dev)
	DRM_DEBUG_DRIVER("hw status page @ %p\n",
				ring->status_page.page_addr);
	if (ring->status_page.gfx_addr != 0)
		intel_ring_setup_status_page(dev, ring);
		intel_ring_setup_status_page(ring);
	else
		I915_WRITE(HWS_PGA, dev_priv->dma_status_page);

@@ -567,7 +567,7 @@ static int i915_quiescent(struct drm_device * dev)
	drm_i915_private_t *dev_priv = dev->dev_private;

	i915_kernel_lost_context(dev);
	return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
	return intel_wait_ring_buffer(&dev_priv->render_ring,
				      dev_priv->render_ring.size - 8);
}

+1 −1
Original line number Diff line number Diff line
@@ -473,7 +473,7 @@ int i915_reset(struct drm_device *dev, u8 flags)
			!dev_priv->mm.suspended) {
		struct intel_ring_buffer *ring = &dev_priv->render_ring;
		dev_priv->mm.suspended = 0;
		ring->init(dev, ring);
		ring->init(ring);
		mutex_unlock(&dev->struct_mutex);
		drm_irq_uninstall(dev);
		drm_irq_install(dev);
+4 −4
Original line number Diff line number Diff line
@@ -1222,7 +1222,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
	drm_i915_private_t *dev_priv__ = dev->dev_private;		\
	if (I915_VERBOSE)						\
		DRM_DEBUG("   BEGIN_LP_RING %x\n", (int)(n));		\
	intel_ring_begin(dev, &dev_priv__->render_ring, (n));		\
	intel_ring_begin(&dev_priv__->render_ring, (n));		\
} while (0)


@@ -1230,7 +1230,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
	drm_i915_private_t *dev_priv__ = dev->dev_private;		\
	if (I915_VERBOSE)						\
		DRM_DEBUG("   OUT_RING %x\n", (int)(x));		\
	intel_ring_emit(dev, &dev_priv__->render_ring, x);		\
	intel_ring_emit(&dev_priv__->render_ring, x);			\
} while (0)

#define ADVANCE_LP_RING() do {						\
@@ -1238,7 +1238,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
	if (I915_VERBOSE)						\
		DRM_DEBUG("ADVANCE_LP_RING %x\n",			\
				dev_priv__->render_ring.tail);		\
	intel_ring_advance(dev, &dev_priv__->render_ring);		\
	intel_ring_advance(&dev_priv__->render_ring);			\
} while(0)

/**
+25 −30
Original line number Diff line number Diff line
@@ -1703,7 +1703,7 @@ i915_add_request(struct drm_device *dev,
			return 0;
	}

	seqno = ring->add_request(dev, ring, 0);
	seqno = ring->add_request(ring, 0);
	ring->outstanding_lazy_request = false;

	request->seqno = seqno;
@@ -1745,8 +1745,7 @@ i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
	if (INTEL_INFO(dev)->gen >= 4)
		flush_domains |= I915_GEM_DOMAIN_SAMPLER;

	ring->flush(dev, ring,
			I915_GEM_DOMAIN_COMMAND, flush_domains);
	ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains);
}

static inline void
@@ -1853,7 +1852,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,

	WARN_ON(i915_verify_lists(dev));

	seqno = ring->get_seqno(dev, ring);
	seqno = ring->get_seqno(ring);
	while (!list_empty(&ring->request_list)) {
		struct drm_i915_gem_request *request;

@@ -1894,7 +1893,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,

	if (unlikely (dev_priv->trace_irq_seqno &&
		      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
		ring->user_irq_put(dev, ring);
		ring->user_irq_put(ring);
		dev_priv->trace_irq_seqno = 0;
	}

@@ -1971,7 +1970,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
	}
	BUG_ON(seqno == dev_priv->next_seqno);

	if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
	if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
		if (HAS_PCH_SPLIT(dev))
			ier = I915_READ(DEIER) | I915_READ(GTIER);
		else
@@ -1986,19 +1985,17 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
		trace_i915_gem_request_wait_begin(dev, seqno);

		ring->waiting_gem_seqno = seqno;
		ring->user_irq_get(dev, ring);
		ring->user_irq_get(ring);
		if (interruptible)
			ret = wait_event_interruptible(ring->irq_queue,
				i915_seqno_passed(
					ring->get_seqno(dev, ring), seqno)
				i915_seqno_passed(ring->get_seqno(ring), seqno)
				|| atomic_read(&dev_priv->mm.wedged));
		else
			wait_event(ring->irq_queue,
				i915_seqno_passed(
					ring->get_seqno(dev, ring), seqno)
				i915_seqno_passed(ring->get_seqno(ring), seqno)
				|| atomic_read(&dev_priv->mm.wedged));

		ring->user_irq_put(dev, ring);
		ring->user_irq_put(ring);
		ring->waiting_gem_seqno = 0;

		trace_i915_gem_request_wait_end(dev, seqno);
@@ -2008,7 +2005,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,

	if (ret && ret != -ERESTARTSYS)
		DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
			  __func__, ret, seqno, ring->get_seqno(dev, ring),
			  __func__, ret, seqno, ring->get_seqno(ring),
			  dev_priv->next_seqno);

	/* Directly dispatch request retiring.  While we have the work queue
@@ -2040,7 +2037,7 @@ i915_gem_flush_ring(struct drm_device *dev,
		    uint32_t invalidate_domains,
		    uint32_t flush_domains)
{
	ring->flush(dev, ring, invalidate_domains, flush_domains);
	ring->flush(ring, invalidate_domains, flush_domains);
	i915_gem_process_flushing_list(dev, flush_domains, ring);
}

@@ -3532,17 +3529,17 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
		return 0;

	ret = 0;
	if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
	if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
		/* And wait for the seqno passing without holding any locks and
		 * causing extra latency for others. This is safe as the irq
		 * generation is designed to be run atomically and so is
		 * lockless.
		 */
		ring->user_irq_get(dev, ring);
		ring->user_irq_get(ring);
		ret = wait_event_interruptible(ring->irq_queue,
					       i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
					       i915_seqno_passed(ring->get_seqno(ring), seqno)
					       || atomic_read(&dev_priv->mm.wedged));
		ring->user_irq_put(dev, ring);
		ring->user_irq_put(ring);

		if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
			ret = -EIO;
@@ -3829,17 +3826,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
			else
				flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;

			intel_ring_begin(dev, ring, 2);
			intel_ring_emit(dev, ring,
					MI_WAIT_FOR_EVENT | flip_mask);
			intel_ring_emit(dev, ring, MI_NOOP);
			intel_ring_advance(dev, ring);
			intel_ring_begin(ring, 2);
			intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
			intel_ring_emit(ring, MI_NOOP);
			intel_ring_advance(ring);
		}
	}

	/* Exec the batchbuffer */
	ret = ring->dispatch_gem_execbuffer(dev, ring, args,
					    cliprects, exec_offset);
	ret = ring->dispatch_execbuffer(ring, args, cliprects, exec_offset);
	if (ret) {
		DRM_ERROR("dispatch failed %d\n", ret);
		goto err;
@@ -4520,9 +4515,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
	return 0;

cleanup_bsd_ring:
	intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
	intel_cleanup_ring_buffer(&dev_priv->bsd_ring);
cleanup_render_ring:
	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
	intel_cleanup_ring_buffer(&dev_priv->render_ring);
cleanup_pipe_control:
	if (HAS_PIPE_CONTROL(dev))
		i915_gem_cleanup_pipe_control(dev);
@@ -4534,9 +4529,9 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;

	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
	intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
	intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
	intel_cleanup_ring_buffer(&dev_priv->render_ring);
	intel_cleanup_ring_buffer(&dev_priv->bsd_ring);
	intel_cleanup_ring_buffer(&dev_priv->blt_ring);
	if (HAS_PIPE_CONTROL(dev))
		i915_gem_cleanup_pipe_control(dev);
}
Loading