Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f6510ec5 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel:
  drm/i915: Fix leak of relocs along do_execbuffer error path
  drm/i915: slow acpi_lid_open() causes flickering - V2
  drm/i915: Disable SR when more than one pipe is enabled
  drm/i915: page flip support for Ironlake
  drm/i915: Fix the incorrect DMI string for Samsung SX20S laptop
  drm/i915: Add support for SDVO composite TV
  drm/i915: don't trigger ironlake vblank interrupt at irq install
  drm/i915: handle non-flip pending case when unpinning the scanout buffer
  drm/i915: Fix the device info of Pineview
  drm/i915: enable vblank interrupt on ironlake
  drm/i915: Prevent use of uninitialized pointers along error path.
  drm/i915: disable hotplug detect before Ironlake CRT detect
parents 6f5a55f1 93533c29
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -120,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = {

const static struct intel_device_info intel_pineview_info = {
	.is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
	.has_pipe_cxsr = 1,
	.need_gfx_hws = 1,
	.has_hotplug = 1,
};

+9 −2
Original line number Diff line number Diff line
@@ -3564,6 +3564,9 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
	uint32_t reloc_count = 0, i;
	int ret = 0;

	if (relocs == NULL)
	    return 0;

	for (i = 0; i < buffer_count; i++) {
		struct drm_i915_gem_relocation_entry __user *user_relocs;
		int unwritten;
@@ -3653,7 +3656,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
	struct drm_gem_object *batch_obj;
	struct drm_i915_gem_object *obj_priv;
	struct drm_clip_rect *cliprects = NULL;
	struct drm_i915_gem_relocation_entry *relocs;
	struct drm_i915_gem_relocation_entry *relocs = NULL;
	int ret = 0, ret2, i, pinned = 0;
	uint64_t exec_offset;
	uint32_t seqno, flush_domains, reloc_index;
@@ -3722,6 +3725,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		if (object_list[i] == NULL) {
			DRM_ERROR("Invalid object handle %d at index %d\n",
				   exec_list[i].handle, i);
			/* prevent error path from reading uninitialized data */
			args->buffer_count = i + 1;
			ret = -EBADF;
			goto err;
		}
@@ -3730,6 +3735,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		if (obj_priv->in_execbuffer) {
			DRM_ERROR("Object %p appears more than once in object list\n",
				   object_list[i]);
			/* prevent error path from reading uninitialized data */
			args->buffer_count = i + 1;
			ret = -EBADF;
			goto err;
		}
@@ -3926,6 +3933,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,

	mutex_unlock(&dev->struct_mutex);

pre_mutex_err:
	/* Copy the updated relocations out regardless of current error
	 * state.  Failure to update the relocs would mean that the next
	 * time userland calls execbuf, it would do so with presumed offset
@@ -3940,7 +3948,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
			ret = ret2;
	}

pre_mutex_err:
	drm_free_large(object_list);
	kfree(cliprects);

+30 −12
Original line number Diff line number Diff line
@@ -309,6 +309,22 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
	if (de_iir & DE_GSE)
		ironlake_opregion_gse_intr(dev);

	if (de_iir & DE_PLANEA_FLIP_DONE)
		intel_prepare_page_flip(dev, 0);

	if (de_iir & DE_PLANEB_FLIP_DONE)
		intel_prepare_page_flip(dev, 1);

	if (de_iir & DE_PIPEA_VBLANK) {
		drm_handle_vblank(dev, 0);
		intel_finish_page_flip(dev, 0);
	}

	if (de_iir & DE_PIPEB_VBLANK) {
		drm_handle_vblank(dev, 1);
		intel_finish_page_flip(dev, 1);
	}

	/* check event from PCH */
	if ((de_iir & DE_PCH_EVENT) &&
	    (pch_iir & SDE_HOTPLUG_MASK)) {
@@ -844,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
	if (!(pipeconf & PIPEACONF_ENABLE))
		return -EINVAL;

	if (IS_IRONLAKE(dev))
		return 0;

	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
	if (IS_I965G(dev))
	if (IS_IRONLAKE(dev))
		ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 
					    DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
	else if (IS_I965G(dev))
		i915_enable_pipestat(dev_priv, pipe,
				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
	else
@@ -866,10 +882,11 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	unsigned long irqflags;

	if (IS_IRONLAKE(dev))
		return;

	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
	if (IS_IRONLAKE(dev))
		ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 
					     DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
	else
		i915_disable_pipestat(dev_priv, pipe,
				      PIPE_VBLANK_INTERRUPT_ENABLE |
				      PIPE_START_VBLANK_INTERRUPT_ENABLE);
@@ -1015,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	/* enable kind of interrupts always enabled */
	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT;
	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
	u32 render_mask = GT_USER_INTERRUPT;
	u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
			   SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;

	dev_priv->irq_mask_reg = ~display_mask;
	dev_priv->de_irq_enable_reg = display_mask;
	dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;

	/* should always can generate irq */
	I915_WRITE(DEIIR, I915_READ(DEIIR));
+3 −0
Original line number Diff line number Diff line
@@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
	adpa = I915_READ(PCH_ADPA);

	adpa &= ~ADPA_CRT_HOTPLUG_MASK;
	/* disable HPD first */
	I915_WRITE(PCH_ADPA, adpa);
	(void)I915_READ(PCH_ADPA);

	adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
			ADPA_CRT_HOTPLUG_WARMUP_10MS |
+31 −2
Original line number Diff line number Diff line
@@ -1638,6 +1638,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
	case DRM_MODE_DPMS_OFF:
		DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);

		drm_vblank_off(dev, pipe);
		/* Disable display plane */
		temp = I915_READ(dspcntr_reg);
		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -2519,6 +2520,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
		sr_entries = roundup(sr_entries / cacheline_size, 1);
		DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
	} else {
		/* Turn off self refresh if both pipes are enabled */
		I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
					& ~FW_BLC_SELF_EN);
	}

	DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -2562,6 +2567,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
			srwm = 1;
		srwm &= 0x3f;
		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
	} else {
		/* Turn off self refresh if both pipes are enabled */
		I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
					& ~FW_BLC_SELF_EN);
	}

	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -2630,6 +2639,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
		if (srwm < 0)
			srwm = 1;
		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
	} else {
		/* Turn off self refresh if both pipes are enabled */
		I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
					& ~FW_BLC_SELF_EN);
	}

	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -3984,6 +3997,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
	spin_lock_irqsave(&dev->event_lock, flags);
	work = intel_crtc->unpin_work;
	if (work == NULL || !work->pending) {
		if (work && !work->pending) {
			obj_priv = work->obj->driver_private;
			DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
					 obj_priv,
					 atomic_read(&obj_priv->pending_flip));
		}
		spin_unlock_irqrestore(&dev->event_lock, flags);
		return;
	}
@@ -4005,7 +4024,10 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
	spin_unlock_irqrestore(&dev->event_lock, flags);

	obj_priv = work->obj->driver_private;
	if (atomic_dec_and_test(&obj_priv->pending_flip))

	/* Initial scanout buffer will have a 0 pending flip count */
	if ((atomic_read(&obj_priv->pending_flip) == 0) ||
	    atomic_dec_and_test(&obj_priv->pending_flip))
		DRM_WAKEUP(&dev_priv->pending_flip_queue);
	schedule_work(&work->work);
}
@@ -4018,8 +4040,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
	unsigned long flags;

	spin_lock_irqsave(&dev->event_lock, flags);
	if (intel_crtc->unpin_work)
	if (intel_crtc->unpin_work) {
		intel_crtc->unpin_work->pending = 1;
	} else {
		DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
	}
	spin_unlock_irqrestore(&dev->event_lock, flags);
}

@@ -4053,6 +4078,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
	/* We borrow the event spin lock for protecting unpin_work */
	spin_lock_irqsave(&dev->event_lock, flags);
	if (intel_crtc->unpin_work) {
		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
		spin_unlock_irqrestore(&dev->event_lock, flags);
		kfree(work);
		mutex_unlock(&dev->struct_mutex);
@@ -4066,7 +4092,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,

	ret = intel_pin_and_fence_fb_obj(dev, obj);
	if (ret != 0) {
		DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
			  obj->driver_private);
		kfree(work);
		intel_crtc->unpin_work = NULL;
		mutex_unlock(&dev->struct_mutex);
		return ret;
	}
Loading