Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a8cbd459 authored by Daniel Vetter's avatar Daniel Vetter
Browse files

Merge branch 'drm-intel-next-fixes' into drm-intel-next



So I've sent the first pull request to Dave and I expect his request
for a merge tree any second now ;-)

More seriously I have some pending patches for 3.19 that depend upon
both trees, hence backmerge. Conflicts are all trivial.

Conflicts:
	drivers/gpu/drm/i915/i915_irq.c
	drivers/gpu/drm/i915/intel_display.c

v2: Of course I've forgotten the fixup script for the silent conflict.

Signed-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
parents b7277357 07c338ce
Loading
Loading
Loading
Loading
+0 −22
Original line number Diff line number Diff line
@@ -968,23 +968,6 @@ struct intel_rps_ei {
	u32 media_c0;
};

struct intel_rps_bdw_cal {
	u32 it_threshold_pct; /* interrupt, in percentage */
	u32 eval_interval; /* evaluation interval, in us */
	u32 last_ts;
	u32 last_c0;
	bool is_up;
};

struct intel_rps_bdw_turbo {
	struct intel_rps_bdw_cal up;
	struct intel_rps_bdw_cal down;
	struct timer_list flip_timer;
	u32 timeout;
	atomic_t flip_received;
	struct work_struct work_max_freq;
};

struct intel_gen6_power_mgmt {
	/* work and pm_iir are protected by dev_priv->irq_lock */
	struct work_struct work;
@@ -1018,9 +1001,6 @@ struct intel_gen6_power_mgmt {
	bool enabled;
	struct delayed_work delayed_resume_work;

	bool is_bdw_sw_turbo;	/* Switch of BDW software turbo */
	struct intel_rps_bdw_turbo sw_turbo; /* Calculate RP interrupt timing */

	/* manual wa residency calculations */
	struct intel_rps_ei up_ei, down_ei;

@@ -2857,8 +2837,6 @@ extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void bdw_software_turbo(struct drm_device *dev);
extern void gen8_flip_interrupt(struct drm_device *dev);
extern void valleyview_set_rps(struct drm_device *dev, u8 val);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
				  bool enable);
+19 −12
Original line number Diff line number Diff line
@@ -293,15 +293,23 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
static struct i915_mmu_notifier *
i915_mmu_notifier_find(struct i915_mm_struct *mm)
{
	if (mm->mn == NULL) {
	struct i915_mmu_notifier *mn = mm->mn;

	mn = mm->mn;
	if (mn)
		return mn;

	down_write(&mm->mm->mmap_sem);
	mutex_lock(&to_i915(mm->dev)->mm_lock);
		if (mm->mn == NULL)
			mm->mn = i915_mmu_notifier_create(mm->mm);
	if ((mn = mm->mn) == NULL) {
		mn = i915_mmu_notifier_create(mm->mm);
		if (!IS_ERR(mn))
			mm->mn = mn;
	}
	mutex_unlock(&to_i915(mm->dev)->mm_lock);
	up_write(&mm->mm->mmap_sem);
	}
	return mm->mn;

	return mn;
}

static int
@@ -681,16 +689,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
static void
i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
{
	struct scatterlist *sg;
	int i;
	struct sg_page_iter sg_iter;

	BUG_ON(obj->userptr.work != NULL);

	if (obj->madv != I915_MADV_WILLNEED)
		obj->dirty = 0;

	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
		struct page *page = sg_page(sg);
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
		struct page *page = sg_page_iter_page(&sg_iter);

		if (obj->dirty)
			set_page_dirty(page);
+11 −30
Original line number Diff line number Diff line
@@ -1716,7 +1716,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
#define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_THRESHOLD 5

static int ilk_port_to_hotplug_shift(enum port port)
static int pch_port_to_hotplug_shift(enum port port)
{
	switch (port) {
	case PORT_A:
@@ -1732,7 +1732,7 @@ static int ilk_port_to_hotplug_shift(enum port port)
	}
}

static int g4x_port_to_hotplug_shift(enum port port)
static int i915_port_to_hotplug_shift(enum port port)
{
	switch (port) {
	case PORT_A:
@@ -1790,12 +1790,12 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
		if (port && dev_priv->hpd_irq_port[port]) {
			bool long_hpd;

			if (IS_G4X(dev)) {
				dig_shift = g4x_port_to_hotplug_shift(port);
				long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
			} else {
				dig_shift = ilk_port_to_hotplug_shift(port);
			if (HAS_PCH_SPLIT(dev)) {
				dig_shift = pch_port_to_hotplug_shift(port);
				long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
			} else {
				dig_shift = i915_port_to_hotplug_shift(port);
				long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
			}

			DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
@@ -1984,27 +1984,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
				     res1, res2);
}

void gen8_flip_interrupt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (!dev_priv->rps.is_bdw_sw_turbo)
		return;

	if(atomic_read(&dev_priv->rps.sw_turbo.flip_received)) {
		mod_timer(&dev_priv->rps.sw_turbo.flip_timer,
				usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies);
	}
	else {
		dev_priv->rps.sw_turbo.flip_timer.expires =
				usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies;
		add_timer(&dev_priv->rps.sw_turbo.flip_timer);
		atomic_set(&dev_priv->rps.sw_turbo.flip_received, true);
	}

	bdw_software_turbo(dev);
}

/* The RPS events need forcewake, so we add them to a work queue and mask their
 * IMR bits until the work is done. Other interrupts can be processed without
 * the work queue. */
@@ -3494,11 +3473,13 @@ static void gen8_irq_reset(struct drm_device *dev)

void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
{
	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;

	spin_lock_irq(&dev_priv->irq_lock);
	GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
			  ~dev_priv->de_irq_mask[PIPE_B]);
			  ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
	GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
			  ~dev_priv->de_irq_mask[PIPE_C]);
			  ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
	spin_unlock_irq(&dev_priv->irq_lock);
}

+3 −4
Original line number Diff line number Diff line
@@ -2474,6 +2474,7 @@ enum punit_power_well {
#define _PIPEASRC	0x6001c
#define _BCLRPAT_A	0x60020
#define _VSYNCSHIFT_A	0x60028
#define _PIPE_MULT_A	0x6002c

/* Pipe B timing regs */
#define _HTOTAL_B	0x61000
@@ -2485,6 +2486,7 @@ enum punit_power_well {
#define _PIPEBSRC	0x6101c
#define _BCLRPAT_B	0x61020
#define _VSYNCSHIFT_B	0x61028
#define _PIPE_MULT_B	0x6102c

#define TRANSCODER_A_OFFSET 0x60000
#define TRANSCODER_B_OFFSET 0x61000
@@ -2505,6 +2507,7 @@ enum punit_power_well {
#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A)
#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A)
#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
#define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A)

/* HSW+ eDP PSR registers */
#define EDP_PSR_BASE(dev)                       (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
@@ -5766,10 +5769,6 @@ enum punit_power_well {
#define GEN8_UCGCTL6				0x9430
#define   GEN8_SDEUNIT_CLOCK_GATE_DISABLE	(1<<14)

#define TIMESTAMP_CTR		0x44070
#define FREQ_1_28_US(us)	(((us) * 100) >> 7)
#define MCHBAR_PCU_C0		(MCHBAR_MIRROR_BASE_SNB + 0x5960)

#define GEN6_GFXPAUSE				0xA000
#define GEN6_RPNSWREQ				0xA008
#define   GEN6_TURBO_DISABLE			(1<<31)
+32 −19
Original line number Diff line number Diff line
@@ -73,9 +73,6 @@ static const uint32_t intel_cursor_formats[] = {
	DRM_FORMAT_ARGB8888,
};

#define DIV_ROUND_CLOSEST_ULL(ll, d)	\
({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })

static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);

static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
@@ -4265,6 +4262,11 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)

	intel_set_pipe_timings(intel_crtc);

	if (intel_crtc->config.cpu_transcoder != TRANSCODER_EDP) {
		I915_WRITE(PIPE_MULT(intel_crtc->config.cpu_transcoder),
			   intel_crtc->config.pixel_multiplier - 1);
	}

	if (intel_crtc->config.has_pch_encoder) {
		intel_cpu_transcoder_set_m_n(intel_crtc,
				     &intel_crtc->config.fdi_m_n, NULL);
@@ -7937,7 +7939,12 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
		pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
			(I915_READ(IPS_CTL) & IPS_ENABLE);

	if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
		pipe_config->pixel_multiplier =
			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
	} else {
		pipe_config->pixel_multiplier = 1;
	}

	return true;
}
@@ -9773,9 +9780,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
	struct intel_engine_cs *ring;
	int ret;

	//trigger software GT busyness calculation
	gen8_flip_interrupt(dev);

	/*
	 * drm_mode_page_flip_ioctl() should already catch this, but double
	 * check to be safe.  In the future we may enable pageflipping from
@@ -12223,28 +12227,37 @@ static void intel_setup_outputs(struct drm_device *dev)
		if (I915_READ(PCH_DP_D) & DP_DETECTED)
			intel_dp_init(dev, PCH_DP_D, PORT_D);
	} else if (IS_VALLEYVIEW(dev)) {
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
		/*
		 * The DP_DETECTED bit is the latched state of the DDC
		 * SDA pin at boot. However since eDP doesn't require DDC
		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
		 * eDP ports may have been muxed to an alternate function.
		 * Thus we can't rely on the DP_DETECTED bit alone to detect
		 * eDP ports. Consult the VBT as well as DP_DETECTED to
		 * detect eDP ports.
		 */
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED)
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
					PORT_B);
			if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
		if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED ||
		    intel_dp_is_edp(dev, PORT_B))
			intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
		}

		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED)
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
					PORT_C);
			if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
		if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED ||
		    intel_dp_is_edp(dev, PORT_C))
			intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
		}

		if (IS_CHERRYVIEW(dev)) {
			if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) {
			if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED)
				intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
						PORT_D);
			/* eDP not supported on port D, so don't check VBT */
			if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
		}
		}

		intel_dsi_init(dev);
	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
Loading