Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b13c2b96 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915/ringbuffer: Make IRQ refcnting atomic



In order to enforce the correct memory barriers for irq get/put, we need
to perform the actual counting using atomic operations.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent 8d5203ca
Loading
Loading
Loading
Loading
+20 −17
Original line number Diff line number Diff line
@@ -2000,7 +2000,8 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
		trace_i915_gem_request_wait_begin(dev, seqno);

		ring->waiting_seqno = seqno;
		ring->irq_get(ring);
		ret = -ENODEV;
		if (ring->irq_get(ring)) {
			if (interruptible)
				ret = wait_event_interruptible(ring->irq_queue,
							       i915_seqno_passed(ring->get_seqno(ring), seqno)
@@ -2011,6 +2012,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
					   || atomic_read(&dev_priv->mm.wedged));

			ring->irq_put(ring);
		}
		ring->waiting_seqno = 0;

		trace_i915_gem_request_wait_end(dev, seqno);
@@ -3157,7 +3159,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
		 * generation is designed to be run atomically and so is
		 * lockless.
		 */
		ring->irq_get(ring);
		if (ring->irq_get(ring)) {
			ret = wait_event_interruptible(ring->irq_queue,
						       i915_seqno_passed(ring->get_seqno(ring), seqno)
						       || atomic_read(&dev_priv->mm.wedged));
@@ -3166,6 +3168,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
			if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
				ret = -EIO;
		}
	}

	if (ret == 0)
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
+9 −8
Original line number Diff line number Diff line
@@ -1186,9 +1186,8 @@ void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	struct intel_ring_buffer *ring = LP_RING(dev_priv);

	if (dev_priv->trace_irq_seqno == 0)
		ring->irq_get(ring);

	if (dev_priv->trace_irq_seqno == 0 &&
	    ring->irq_get(ring))
		dev_priv->trace_irq_seqno = seqno;
}

@@ -1211,10 +1210,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
	if (master_priv->sarea_priv)
		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;

	ring->irq_get(ring);
	ret = -ENODEV;
	if (ring->irq_get(ring)) {
		DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
			    READ_BREADCRUMB(dev_priv) >= irq_nr);
		ring->irq_put(ring);
	}

	if (ret == -EBUSY) {
		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
+25 −19
Original line number Diff line number Diff line
@@ -327,25 +327,28 @@ ring_get_seqno(struct intel_ring_buffer *ring)
	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}

static void
static bool
render_ring_get_irq(struct intel_ring_buffer *ring)
{
	struct drm_device *dev = ring->dev;

	if (dev->irq_enabled && ++ring->irq_refcount == 1) {
	if (!dev->irq_enabled)
		return false;

	if (atomic_inc_return(&ring->irq_refcount) == 1) {
		drm_i915_private_t *dev_priv = dev->dev_private;
		unsigned long irqflags;

		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);

		if (HAS_PCH_SPLIT(dev))
			ironlake_enable_graphics_irq(dev_priv,
						     GT_USER_INTERRUPT);
		else
			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);

		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
	}

	return true;
}

static void
@@ -353,8 +356,7 @@ render_ring_put_irq(struct intel_ring_buffer *ring)
{
	struct drm_device *dev = ring->dev;

	BUG_ON(dev->irq_enabled && ring->irq_refcount == 0);
	if (dev->irq_enabled && --ring->irq_refcount == 0) {
	if (atomic_dec_and_test(&ring->irq_refcount)) {
		drm_i915_private_t *dev_priv = dev->dev_private;
		unsigned long irqflags;

@@ -417,12 +419,15 @@ ring_add_request(struct intel_ring_buffer *ring,
	return 0;
}

static void
static bool
ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
{
	struct drm_device *dev = ring->dev;

	if (dev->irq_enabled && ++ring->irq_refcount == 1) {
	if (!dev->irq_enabled)
	       return false;

	if (atomic_inc_return(&ring->irq_refcount) == 1) {
		drm_i915_private_t *dev_priv = dev->dev_private;
		unsigned long irqflags;

@@ -430,6 +435,8 @@ ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
		ironlake_enable_graphics_irq(dev_priv, flag);
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
	}

	return true;
}

static void
@@ -437,7 +444,7 @@ ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
{
	struct drm_device *dev = ring->dev;

	if (dev->irq_enabled && --ring->irq_refcount == 0) {
	if (atomic_dec_and_test(&ring->irq_refcount)) {
		drm_i915_private_t *dev_priv = dev->dev_private;
		unsigned long irqflags;

@@ -447,11 +454,10 @@ ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
	}
}


static void
static bool
bsd_ring_get_irq(struct intel_ring_buffer *ring)
{
    ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
	return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
}
static void
bsd_ring_put_irq(struct intel_ring_buffer *ring)
@@ -846,10 +852,10 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
       return 0;
}

static void
static bool
gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
{
    ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
	return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
}

static void
@@ -876,10 +882,10 @@ static const struct intel_ring_buffer gen6_bsd_ring = {

/* Blitter support (SandyBridge+) */

static void
static bool
blt_ring_get_irq(struct intel_ring_buffer *ring)
{
    ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
	return ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
}

static void
+2 −2
Original line number Diff line number Diff line
@@ -54,8 +54,8 @@ struct intel_ring_buffer {
	u32		irq_seqno;		/* last seq seem at irq time */
	u32		waiting_seqno;
	u32		sync_seqno[I915_NUM_RINGS-1];
	u32		irq_refcount;
	void		(*irq_get)(struct intel_ring_buffer *ring);
	atomic_t	irq_refcount;
	bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
	void		(*irq_put)(struct intel_ring_buffer *ring);

	int		(*init)(struct intel_ring_buffer *ring);