Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0d72c6fc authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'drm-intel-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/keithp/linux-2.6:
  drm/i915: Use chipset-specific irq installers
  drm/i915: forcewake fix after reset
  drm/i915: add Ivy Bridge page flip support
  drm/i915: split page flip queueing into per-chipset functions
parents c89b857c f01c22fd
Loading
Loading
Loading
Loading
+3 −0
Original line number Original line Diff line number Diff line
@@ -579,6 +579,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
	} else switch (INTEL_INFO(dev)->gen) {
	} else switch (INTEL_INFO(dev)->gen) {
	case 6:
	case 6:
		ret = gen6_do_reset(dev, flags);
		ret = gen6_do_reset(dev, flags);
		/* If reset with a user forcewake, try to restore */
		if (atomic_read(&dev_priv->forcewake_count))
			__gen6_gt_force_wake_get(dev_priv);
		break;
		break;
	case 5:
	case 5:
		ret = ironlake_do_reset(dev, flags);
		ret = ironlake_do_reset(dev, flags);
+3 −0
Original line number Original line Diff line number Diff line
@@ -211,6 +211,9 @@ struct drm_i915_display_funcs {
	void (*fdi_link_train)(struct drm_crtc *crtc);
	void (*fdi_link_train)(struct drm_crtc *crtc);
	void (*init_clock_gating)(struct drm_device *dev);
	void (*init_clock_gating)(struct drm_device *dev);
	void (*init_pch_clock_gating)(struct drm_device *dev);
	void (*init_pch_clock_gating)(struct drm_device *dev);
	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
			  struct drm_framebuffer *fb,
			  struct drm_i915_gem_object *obj);
	/* clock updates for mode set */
	/* clock updates for mode set */
	/* cursor updates */
	/* cursor updates */
	/* render clock increase/decrease */
	/* render clock increase/decrease */
+2 −2
Original line number Original line Diff line number Diff line
@@ -2072,8 +2072,8 @@ i915_wait_request(struct intel_ring_buffer *ring,
		if (!ier) {
		if (!ier) {
			DRM_ERROR("something (likely vbetool) disabled "
			DRM_ERROR("something (likely vbetool) disabled "
				  "interrupts, re-enabling\n");
				  "interrupts, re-enabling\n");
			i915_driver_irq_preinstall(ring->dev);
			ring->dev->driver->irq_preinstall(ring->dev);
			i915_driver_irq_postinstall(ring->dev);
			ring->dev->driver->irq_postinstall(ring->dev);
		}
		}


		trace_i915_gem_request_wait_begin(ring, seqno);
		trace_i915_gem_request_wait_begin(ring, seqno);
+222 −83
Original line number Original line Diff line number Diff line
@@ -6261,115 +6261,104 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
	spin_unlock_irqrestore(&dev->event_lock, flags);
	spin_unlock_irqrestore(&dev->event_lock, flags);
}
}


static int intel_crtc_page_flip(struct drm_crtc *crtc,
static int intel_gen2_queue_flip(struct drm_device *dev,
				 struct drm_crtc *crtc,
				 struct drm_framebuffer *fb,
				 struct drm_framebuffer *fb,
				struct drm_pending_vblank_event *event)
				 struct drm_i915_gem_object *obj)
{
{
	struct drm_device *dev = crtc->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_framebuffer *intel_fb;
	struct drm_i915_gem_object *obj;
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	struct intel_unpin_work *work;
	unsigned long offset;
	unsigned long flags, offset;
	u32 flip_mask;
	int pipe = intel_crtc->pipe;
	u32 pf, pipesrc;
	int ret;
	int ret;


	work = kzalloc(sizeof *work, GFP_KERNEL);
	if (work == NULL)
		return -ENOMEM;

	work->event = event;
	work->dev = crtc->dev;
	intel_fb = to_intel_framebuffer(crtc->fb);
	work->old_fb_obj = intel_fb->obj;
	INIT_WORK(&work->work, intel_unpin_work_fn);

	/* We borrow the event spin lock for protecting unpin_work */
	spin_lock_irqsave(&dev->event_lock, flags);
	if (intel_crtc->unpin_work) {
		spin_unlock_irqrestore(&dev->event_lock, flags);
		kfree(work);

		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
		return -EBUSY;
	}
	intel_crtc->unpin_work = work;
	spin_unlock_irqrestore(&dev->event_lock, flags);

	intel_fb = to_intel_framebuffer(fb);
	obj = intel_fb->obj;

	mutex_lock(&dev->struct_mutex);
	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
	if (ret)
	if (ret)
		goto cleanup_work;
		goto out;


	/* Reference the objects for the scheduled work. */
	/* Offset into the new buffer for cases of shared fbs between CRTCs */
	drm_gem_object_reference(&work->old_fb_obj->base);
	offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
	drm_gem_object_reference(&obj->base);

	crtc->fb = fb;


	ret = drm_vblank_get(dev, intel_crtc->pipe);
	ret = BEGIN_LP_RING(6);
	if (ret)
	if (ret)
		goto cleanup_objs;
		goto out;

	if (IS_GEN3(dev) || IS_GEN2(dev)) {
		u32 flip_mask;


	/* Can't queue multiple flips, so wait for the previous
	/* Can't queue multiple flips, so wait for the previous
	 * one to finish before executing the next.
	 * one to finish before executing the next.
	 */
	 */
		ret = BEGIN_LP_RING(2);
		if (ret)
			goto cleanup_objs;

	if (intel_crtc->plane)
	if (intel_crtc->plane)
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
	else
	else
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
	OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
	OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
	OUT_RING(MI_NOOP);
	OUT_RING(MI_NOOP);
	OUT_RING(MI_DISPLAY_FLIP |
		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
	OUT_RING(fb->pitch);
	OUT_RING(obj->gtt_offset + offset);
	OUT_RING(MI_NOOP);
	ADVANCE_LP_RING();
	ADVANCE_LP_RING();
out:
	return ret;
}
}


	work->pending_flip_obj = obj;
static int intel_gen3_queue_flip(struct drm_device *dev,
				 struct drm_crtc *crtc,
				 struct drm_framebuffer *fb,
				 struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	unsigned long offset;
	u32 flip_mask;
	int ret;


	work->enable_stall_check = true;
	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
	if (ret)
		goto out;


	/* Offset into the new buffer for cases of shared fbs between CRTCs */
	/* Offset into the new buffer for cases of shared fbs between CRTCs */
	offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
	offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;


	ret = BEGIN_LP_RING(4);
	ret = BEGIN_LP_RING(6);
	if (ret)
	if (ret)
		goto cleanup_objs;
		goto out;


	/* Block clients from rendering to the new back buffer until
	if (intel_crtc->plane)
	 * the flip occurs and the object is no longer visible.
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
	 */
	else
	atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;

	OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
	switch (INTEL_INFO(dev)->gen) {
	case 2:
		OUT_RING(MI_DISPLAY_FLIP |
			 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
		OUT_RING(fb->pitch);
		OUT_RING(obj->gtt_offset + offset);
	OUT_RING(MI_NOOP);
	OUT_RING(MI_NOOP);
		break;

	case 3:
	OUT_RING(MI_DISPLAY_FLIP_I915 |
	OUT_RING(MI_DISPLAY_FLIP_I915 |
		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
	OUT_RING(fb->pitch);
	OUT_RING(fb->pitch);
	OUT_RING(obj->gtt_offset + offset);
	OUT_RING(obj->gtt_offset + offset);
	OUT_RING(MI_NOOP);
	OUT_RING(MI_NOOP);
		break;


	case 4:
	ADVANCE_LP_RING();
	case 5:
out:
	return ret;
}

static int intel_gen4_queue_flip(struct drm_device *dev,
				 struct drm_crtc *crtc,
				 struct drm_framebuffer *fb,
				 struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	uint32_t pf, pipesrc;
	int ret;

	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
	if (ret)
		goto out;

	ret = BEGIN_LP_RING(4);
	if (ret)
		goto out;

	/* i965+ uses the linear or tiled offsets from the
	/* i965+ uses the linear or tiled offsets from the
	 * Display Registers (which do not change across a page-flip)
	 * Display Registers (which do not change across a page-flip)
	 * so we need only reprogram the base address.
	 * so we need only reprogram the base address.
@@ -6384,23 +6373,147 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
	 */
	 */
	pf = 0;
	pf = 0;
		pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
	OUT_RING(pf | pipesrc);
	OUT_RING(pf | pipesrc);
		break;
	ADVANCE_LP_RING();
out:
	return ret;
}

static int intel_gen6_queue_flip(struct drm_device *dev,
				 struct drm_crtc *crtc,
				 struct drm_framebuffer *fb,
				 struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	uint32_t pf, pipesrc;
	int ret;

	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
	if (ret)
		goto out;

	ret = BEGIN_LP_RING(4);
	if (ret)
		goto out;


	case 6:
	case 7:
	OUT_RING(MI_DISPLAY_FLIP |
	OUT_RING(MI_DISPLAY_FLIP |
		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
	OUT_RING(fb->pitch | obj->tiling_mode);
	OUT_RING(fb->pitch | obj->tiling_mode);
	OUT_RING(obj->gtt_offset);
	OUT_RING(obj->gtt_offset);


		pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE;
	pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
		pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
	OUT_RING(pf | pipesrc);
	OUT_RING(pf | pipesrc);
		break;
	}
	ADVANCE_LP_RING();
	ADVANCE_LP_RING();
out:
	return ret;
}

/*
 * On gen7 we currently use the blit ring because (in early silicon at least)
 * the render ring doesn't give us interrpts for page flip completion, which
 * means clients will hang after the first flip is queued.  Fortunately the
 * blit ring generates interrupts properly, so use it instead.
 */
static int intel_gen7_queue_flip(struct drm_device *dev,
				 struct drm_crtc *crtc,
				 struct drm_framebuffer *fb,
				 struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
	int ret;

	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
	if (ret)
		goto out;

	ret = intel_ring_begin(ring, 4);
	if (ret)
		goto out;

	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
	intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
	intel_ring_emit(ring, (obj->gtt_offset));
	intel_ring_emit(ring, (MI_NOOP));
	intel_ring_advance(ring);
out:
	return ret;
}

static int intel_default_queue_flip(struct drm_device *dev,
				    struct drm_crtc *crtc,
				    struct drm_framebuffer *fb,
				    struct drm_i915_gem_object *obj)
{
	return -ENODEV;
}

static int intel_crtc_page_flip(struct drm_crtc *crtc,
				struct drm_framebuffer *fb,
				struct drm_pending_vblank_event *event)
{
	struct drm_device *dev = crtc->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_framebuffer *intel_fb;
	struct drm_i915_gem_object *obj;
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	struct intel_unpin_work *work;
	unsigned long flags;
	int ret;

	work = kzalloc(sizeof *work, GFP_KERNEL);
	if (work == NULL)
		return -ENOMEM;

	work->event = event;
	work->dev = crtc->dev;
	intel_fb = to_intel_framebuffer(crtc->fb);
	work->old_fb_obj = intel_fb->obj;
	INIT_WORK(&work->work, intel_unpin_work_fn);

	/* We borrow the event spin lock for protecting unpin_work */
	spin_lock_irqsave(&dev->event_lock, flags);
	if (intel_crtc->unpin_work) {
		spin_unlock_irqrestore(&dev->event_lock, flags);
		kfree(work);

		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
		return -EBUSY;
	}
	intel_crtc->unpin_work = work;
	spin_unlock_irqrestore(&dev->event_lock, flags);

	intel_fb = to_intel_framebuffer(fb);
	obj = intel_fb->obj;

	mutex_lock(&dev->struct_mutex);

	/* Reference the objects for the scheduled work. */
	drm_gem_object_reference(&work->old_fb_obj->base);
	drm_gem_object_reference(&obj->base);

	crtc->fb = fb;

	ret = drm_vblank_get(dev, intel_crtc->pipe);
	if (ret)
		goto cleanup_objs;

	work->pending_flip_obj = obj;

	work->enable_stall_check = true;

	/* Block clients from rendering to the new back buffer until
	 * the flip occurs and the object is no longer visible.
	 */
	atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);

	ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
	if (ret)
		goto cleanup_pending;


	mutex_unlock(&dev->struct_mutex);
	mutex_unlock(&dev->struct_mutex);


@@ -6408,10 +6521,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,


	return 0;
	return 0;


cleanup_pending:
	atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
cleanup_objs:
cleanup_objs:
	drm_gem_object_unreference(&work->old_fb_obj->base);
	drm_gem_object_unreference(&work->old_fb_obj->base);
	drm_gem_object_unreference(&obj->base);
	drm_gem_object_unreference(&obj->base);
cleanup_work:
	mutex_unlock(&dev->struct_mutex);
	mutex_unlock(&dev->struct_mutex);


	spin_lock_irqsave(&dev->event_lock, flags);
	spin_lock_irqsave(&dev->event_lock, flags);
@@ -7656,6 +7770,31 @@ static void intel_init_display(struct drm_device *dev)
		else
		else
			dev_priv->display.get_fifo_size = i830_get_fifo_size;
			dev_priv->display.get_fifo_size = i830_get_fifo_size;
	}
	}

	/* Default just returns -ENODEV to indicate unsupported */
	dev_priv->display.queue_flip = intel_default_queue_flip;

	switch (INTEL_INFO(dev)->gen) {
	case 2:
		dev_priv->display.queue_flip = intel_gen2_queue_flip;
		break;

	case 3:
		dev_priv->display.queue_flip = intel_gen3_queue_flip;
		break;

	case 4:
	case 5:
		dev_priv->display.queue_flip = intel_gen4_queue_flip;
		break;

	case 6:
		dev_priv->display.queue_flip = intel_gen6_queue_flip;
		break;
	case 7:
		dev_priv->display.queue_flip = intel_gen7_queue_flip;
		break;
	}
}
}


/*
/*