Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f0aa848f authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Daniel writes:
As promised a stash of (mostly) fixes. Two pieces of non-fixes included:
- A notch more gtt refactoring from Ben, beating to death with igt in our
  nightly testing.
- Support for display display-less server chips (again from Ben). New hw
  support which is only likely to break itself ;-)

Otherwise just tons of fixes:
- hpd irq storm mitigation from Egbert Eich. Your -next tree already has
  the infrastructure, this here just supplies the logic.
- sdvo hw state check fix from Egbert Eich
- fb cb tune settings for the pch pll clocks on cpt/ppt
- "Bring a bigger gun" coherence workaround for multi-threade, mulit-core
  & thrashing tiled gtt cpu access from Chris.
- Update haswell mPHY code.
- l3$ caching for context objects on ivb/hsw (Chris).
- dp aux refclock fix for haswell (Jani)
- moar overclocking fixes for snb/ivb (Ben)
- ecobits ppgtt pte caching control fixes from Ville
- fence stride check fixes and limit improvements (Ville)
- fix up crtc force restoring, potentially resulting in tons of hw state
  check WARNs
- OOPS fix for NULL derefencing of fb pointers when force-restoring a crtc
  when other crtcs are disabled and the force-restored crtc is _not_ the
  first one.
- Fix pfit disabling on gen2/3.
- Haswell ring freq scaling fixes (Chris).
- backlight init/teardown fix (failed eDP init killed the lvds backlight)
  from Jani
- cpt/ppt fdi polarity fixes from Paulo (should help a lot of the FDI link
  train failures).
- And a bunch of smaller things all over.
* 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel: (56 commits)
  drm/i915: fix bpc vs. bpp confusion in intel_crtc_compute_config
  drm/i915: move cpu_transcoder to the pipe configuration
  drm/i915: preserve the PBC bits of TRANS_CHICKEN2
  drm/i915: set CPT FDI RX polarity bits based on VBT
  drm/i915: Add Reenable Timer to turn Hotplug Detection back on (v4)
  drm/i915: Disable HPD interrupt on pin when irq storm is detected (v3)
  drm/i915: Mask out the HPD irq bits before setting them individually.
  drm/i915: (re)init HPD interrupt storm statistics
  drm/i915: Add HPD IRQ storm detection (v5)
  drm/i915: WARN when LPT-LP is not paired with ULT CPU
  drm/i915: don't intel_crt_init on any ULT machines
  drm/i915: remove comment about IVB link training from intel_pm.c
  drm/i915: VLV doesn't have LLC
  drm/i915: Scale ring, rather than ia, frequency on Haswell
  drm/i915: shorten debugfs output simple attributes
  drm/i915: Fixup pfit disabling for gen2/3
  drm/i915: Fixup Oops in the pipe config computation
  drm/i915: ensure single initialization and cleanup of backlight device
  drm/i915: don't touch the PF regs if the power well is down
  drm/i915: add intel_using_power_well
  ...
parents e1adc78c bd080ee5
Loading
Loading
Loading
Loading
+12 −6
Original line number Diff line number Diff line
@@ -901,7 +901,7 @@ i915_next_seqno_set(void *data, u64 val)

DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
			i915_next_seqno_get, i915_next_seqno_set,
			"next_seqno :  0x%llx\n");
			"0x%llx\n");

static int i915_rstdby_delays(struct seq_file *m, void *unused)
{
@@ -1006,6 +1006,9 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
		max_freq = rp_state_cap & 0xff;
		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
			   max_freq * GT_FREQUENCY_MULTIPLIER);

		seq_printf(m, "Max overclocked frequency: %dMHz\n",
			   dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
	} else {
		seq_printf(m, "no P-state info available\n");
	}
@@ -1354,7 +1357,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
	if (ret)
		return ret;

	seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
	seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");

	for (gpu_freq = dev_priv->rps.min_delay;
	     gpu_freq <= dev_priv->rps.max_delay;
@@ -1363,7 +1366,10 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
		sandybridge_pcode_read(dev_priv,
				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
				       &ia_freq);
		seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
			   gpu_freq * GT_FREQUENCY_MULTIPLIER,
			   ((ia_freq >> 0) & 0xff) * 100,
			   ((ia_freq >> 8) & 0xff) * 100);
	}

	mutex_unlock(&dev_priv->rps.hw_lock);
@@ -1687,7 +1693,7 @@ i915_wedged_set(void *data, u64 val)

DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
			i915_wedged_get, i915_wedged_set,
			"wedged :  %llu\n");
			"%llu\n");

static int
i915_ring_stop_get(void *data, u64 *val)
@@ -1841,7 +1847,7 @@ i915_max_freq_set(void *data, u64 val)

DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
			i915_max_freq_get, i915_max_freq_set,
			"max freq: %llu\n");
			"%llu\n");

static int
i915_min_freq_get(void *data, u64 *val)
@@ -1892,7 +1898,7 @@ i915_min_freq_set(void *data, u64 val)

DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
			i915_min_freq_get, i915_min_freq_set,
			"min freq: %llu\n");
			"%llu\n");

static int
i915_cache_sharing_get(void *data, u64 *val)
+37 −30
Original line number Diff line number Diff line
@@ -1322,6 +1322,10 @@ static int i915_load_modeset_init(struct drm_device *dev)
	/* Always safe in the mode setting case. */
	/* FIXME: do pre/post-mode set stuff in core KMS code */
	dev->vblank_disable_allowed = 1;
	if (INTEL_INFO(dev)->num_pipes == 0) {
		dev_priv->mm.suspended = 0;
		return 0;
	}

	ret = intel_fbdev_init(dev);
	if (ret)
@@ -1514,6 +1518,28 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
		goto free_priv;
	}

	mmio_bar = IS_GEN2(dev) ? 1 : 0;
	/* Before gen4, the registers and the GTT are behind different BARs.
	 * However, from gen4 onwards, the registers and the GTT are shared
	 * in the same BAR, so we want to restrict this ioremap from
	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
	 * the register BAR remains the same size for all the earlier
	 * generations up to Ironlake.
	 */
	if (info->gen < 5)
		mmio_size = 512*1024;
	else
		mmio_size = 2*1024*1024;

	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
	if (!dev_priv->regs) {
		DRM_ERROR("failed to map registers\n");
		ret = -EIO;
		goto put_bridge;
	}

	intel_early_sanitize_regs(dev);

	ret = i915_gem_gtt_init(dev);
	if (ret)
		goto put_bridge;
@@ -1538,28 +1564,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));

	mmio_bar = IS_GEN2(dev) ? 1 : 0;
	/* Before gen4, the registers and the GTT are behind different BARs.
	 * However, from gen4 onwards, the registers and the GTT are shared
	 * in the same BAR, so we want to restrict this ioremap from
	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
	 * the register BAR remains the same size for all the earlier
	 * generations up to Ironlake.
	 */
	if (info->gen < 5)
		mmio_size = 512*1024;
	else
		mmio_size = 2*1024*1024;

	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
	if (!dev_priv->regs) {
		DRM_ERROR("failed to map registers\n");
		ret = -EIO;
		goto put_gmch;
	}

	intel_early_sanitize_regs(dev);

	aperture_size = dev_priv->gtt.mappable_end;

	dev_priv->gtt.mappable =
@@ -1634,9 +1638,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
	if (IS_VALLEYVIEW(dev))
		dev_priv->num_plane = 2;

	if (INTEL_INFO(dev)->num_pipes) {
		ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
		if (ret)
			goto out_gem_unload;
	}

	/* Start out suspended */
	dev_priv->mm.suspended = 1;
@@ -1651,9 +1657,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)

	i915_setup_sysfs(dev);

	if (INTEL_INFO(dev)->num_pipes) {
		/* Must be done after probing outputs */
		intel_opregion_init(dev);
		acpi_video_register();
	}

	if (IS_GEN5(dev))
		intel_gpu_ips_init(dev_priv);
@@ -1678,10 +1686,9 @@ out_mtrrfree:
		dev_priv->mm.gtt_mtrr = -1;
	}
	io_mapping_free(dev_priv->gtt.mappable);
	dev_priv->gtt.gtt_remove(dev);
out_rmmap:
	pci_iounmap(dev->pdev, dev_priv->regs);
put_gmch:
	dev_priv->gtt.gtt_remove(dev);
put_bridge:
	pci_dev_put(dev_priv->bridge_dev);
free_priv:
+35 −1
Original line number Diff line number Diff line
@@ -140,6 +140,16 @@ extern int intel_agp_enabled;
	.subdevice = PCI_ANY_ID,		\
	.driver_data = (unsigned long) info }

#define INTEL_QUANTA_VGA_DEVICE(info) {		\
	.class = PCI_BASE_CLASS_DISPLAY << 16,	\
	.class_mask = 0xff0000,			\
	.vendor = 0x8086,			\
	.device = 0x16a,			\
	.subvendor = 0x152d,			\
	.subdevice = 0x8990,			\
	.driver_data = (unsigned long) info }


static const struct intel_device_info intel_i830_info = {
	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
	.has_overlay = 1, .overlay_needs_physical = 1,
@@ -272,12 +282,19 @@ static const struct intel_device_info intel_ivybridge_m_info = {
	.is_mobile = 1,
};

static const struct intel_device_info intel_ivybridge_q_info = {
	GEN7_FEATURES,
	.is_ivybridge = 1,
	.num_pipes = 0, /* legal, last one wins */
};

static const struct intel_device_info intel_valleyview_m_info = {
	GEN7_FEATURES,
	.is_mobile = 1,
	.num_pipes = 2,
	.is_valleyview = 1,
	.display_mmio_offset = VLV_DISPLAY_BASE,
	.has_llc = 0, /* legal, last one wins */
};

static const struct intel_device_info intel_valleyview_d_info = {
@@ -285,6 +302,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
	.num_pipes = 2,
	.is_valleyview = 1,
	.display_mmio_offset = VLV_DISPLAY_BASE,
	.has_llc = 0, /* legal, last one wins */
};

static const struct intel_device_info intel_haswell_d_info = {
@@ -342,6 +360,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */
	INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
	INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
	INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
	INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */
	INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
	INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
	INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
@@ -397,6 +416,15 @@ void intel_detect_pch(struct drm_device *dev)
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct pci_dev *pch;

	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
	 * (which really amounts to a PCH but no South Display).
	 */
	if (INTEL_INFO(dev)->num_pipes == 0) {
		dev_priv->pch_type = PCH_NOP;
		dev_priv->num_pch_pll = 0;
		return;
	}

	/*
	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
	 * make graphics device passthrough work easy for VMM, that only
@@ -431,11 +459,13 @@ void intel_detect_pch(struct drm_device *dev)
				dev_priv->num_pch_pll = 0;
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
				WARN_ON(!IS_HASWELL(dev));
				WARN_ON(IS_ULT(dev));
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				dev_priv->num_pch_pll = 0;
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
				WARN_ON(!IS_HASWELL(dev));
				WARN_ON(!IS_ULT(dev));
			}
			BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
		}
@@ -901,7 +931,11 @@ int i915_reset(struct drm_device *dev)
			ring->init(ring);

		i915_gem_context_init(dev);
		i915_gem_init_ppgtt(dev);
		if (dev_priv->mm.aliasing_ppgtt) {
			ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
			if (ret)
				i915_gem_cleanup_aliasing_ppgtt(dev);
		}

		/*
		 * It would make sense to re-init all the other hw state, at
+18 −4
Original line number Diff line number Diff line
@@ -195,9 +195,9 @@ struct drm_i915_master_private {
	struct _drm_i915_sarea *sarea_priv;
};
#define I915_FENCE_REG_NONE -1
#define I915_MAX_NUM_FENCES 16
/* 16 fences + sign bit for FENCE_REG_NONE */
#define I915_MAX_NUM_FENCE_BITS 5
#define I915_MAX_NUM_FENCES 32
/* 32 fences + sign bit for FENCE_REG_NONE */
#define I915_MAX_NUM_FENCE_BITS 6

struct drm_i915_fence_reg {
	struct list_head lru_list;
@@ -449,6 +449,7 @@ struct i915_hw_ppgtt {
			       struct sg_table *st,
			       unsigned int pg_start,
			       enum i915_cache_level cache_level);
	int (*enable)(struct drm_device *dev);
	void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
};

@@ -479,6 +480,7 @@ enum intel_pch {
	PCH_IBX,	/* Ibexpeak PCH */
	PCH_CPT,	/* Cougarpoint PCH */
	PCH_LPT,	/* Lynxpoint PCH */
	PCH_NOP,
};

enum intel_sbi_destination {
@@ -666,6 +668,7 @@ struct intel_gen6_power_mgmt {
	u8 cur_delay;
	u8 min_delay;
	u8 max_delay;
	u8 hw_max;

	struct delayed_work delayed_resume_work;

@@ -929,6 +932,16 @@ typedef struct drm_i915_private {

	struct work_struct hotplug_work;
	bool enable_hotplug_processing;
	struct {
		unsigned long hpd_last_jiffies;
		int hpd_cnt;
		enum {
			HPD_ENABLED = 0,
			HPD_DISABLED = 1,
			HPD_MARK_DISABLED = 2
		} hpd_mark;
	} hpd_stats[HPD_NUM_PINS];
	struct timer_list hotplug_reenable_timer;

	int num_pch_pll;
	int num_plane;
@@ -963,6 +976,7 @@ typedef struct drm_i915_private {
	unsigned int int_crt_support:1;
	unsigned int lvds_use_ssc:1;
	unsigned int display_clock_mode:1;
	unsigned int fdi_rx_polarity_inverted:1;
	int lvds_ssc_freq;
	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
	struct {
@@ -1373,6 +1387,7 @@ struct drm_i915_file_private {
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)

#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
@@ -1640,7 +1655,6 @@ int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
void i915_gem_l3_remap(struct drm_device *dev);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
+39 −7
Original line number Diff line number Diff line
@@ -2683,17 +2683,35 @@ static inline int fence_number(struct drm_i915_private *dev_priv,
	return fence - dev_priv->fence_regs;
}

static void i915_gem_write_fence__ipi(void *data)
{
	wbinvd();
}

static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
					 struct drm_i915_fence_reg *fence,
					 bool enable)
{
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
	int reg = fence_number(dev_priv, fence);

	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	int fence_reg = fence_number(dev_priv, fence);

	/* In order to fully serialize access to the fenced region and
	 * the update to the fence register we need to take extreme
	 * measures on SNB+. In theory, the write to the fence register
	 * flushes all memory transactions before, and coupled with the
	 * mb() placed around the register write we serialise all memory
	 * operations with respect to the changes in the tiler. Yet, on
	 * SNB+ we need to take a step further and emit an explicit wbinvd()
	 * on each processor in order to manually flush all memory
	 * transactions before updating the fence register.
	 */
	if (HAS_LLC(obj->base.dev))
		on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
	i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);

	if (enable) {
		obj->fence_reg = reg;
		obj->fence_reg = fence_reg;
		fence->obj = obj;
		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
	} else {
@@ -3992,6 +4010,12 @@ i915_gem_init_hw(struct drm_device *dev)
	if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
		I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);

	if (HAS_PCH_NOP(dev)) {
		u32 temp = I915_READ(GEN7_MSG_CTL);
		temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
		I915_WRITE(GEN7_MSG_CTL, temp);
	}

	i915_gem_l3_remap(dev);

	i915_gem_init_swizzling(dev);
@@ -4005,7 +4029,13 @@ i915_gem_init_hw(struct drm_device *dev)
	 * contexts before PPGTT.
	 */
	i915_gem_context_init(dev);
	i915_gem_init_ppgtt(dev);
	if (dev_priv->mm.aliasing_ppgtt) {
		ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
		if (ret) {
			i915_gem_cleanup_aliasing_ppgtt(dev);
			DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
		}
	}

	return 0;
}
@@ -4160,7 +4190,9 @@ i915_gem_load(struct drm_device *dev)
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
		dev_priv->fence_reg_start = 3;

	if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
		dev_priv->num_fence_regs = 32;
	else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
		dev_priv->num_fence_regs = 16;
	else
		dev_priv->num_fence_regs = 8;
Loading