Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8dd0eb35 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-intel-next-2015-02-27' of git://anongit.freedesktop.org/drm-intel into drm-next

- Y tiling support for scanout from Tvrtko&Damien
- Remove more UMS support
- some small prep patches for OLR removal from John Harrison
- first few patches for dynamic pagetable allocation from Ben Widawsky, rebased
  by tons of other people
- DRRS support patches (Sonika&Vandana)
- fbc patches from Paulo
- make sure our vblank callbacks aren't called when the pipes are off
- various patches all over

* tag 'drm-intel-next-2015-02-27' of git://anongit.freedesktop.org/drm-intel: (61 commits)
  drm/i915: Update DRIVER_DATE to 20150227
  drm/i915: Clarify obj->map_and_fenceable
  drm/i915/skl: Allow Y (and Yf) frame buffer creation
  drm/i915/skl: Update watermarks for Y tiling
  drm/i915/skl: Updated watermark programming
  drm/i915/skl: Adjust get_plane_config() to support Yb/Yf tiling
  drm/i915/skl: Teach pin_and_fence_fb_obj() about Y tiling constraints
  drm/i915/skl: Adjust intel_fb_align_height() for Yb/Yf tiling
  drm/i915/skl: Allow scanning out Y and Yf fbs
  drm/i915/skl: Add new displayable tiling formats
  drm/i915: Remove DRIVER_MODESET checks from modeset code
  drm/i915: Remove regfile code&data for UMS suspend/resume
  drm/i915: Remove DRIVER_MODESET checks from gem code
  drm/i915: Remove DRIVER_MODESET checks in the gpu reset code
  drm/i915: Remove DRIVER_MODESET checks from suspend/resume code
  drm/i915: Remove DRIVER_MODESET checks in load/unload/close code
  drm/i915: fix a printk format
  drm/i915: Add media rc6 residency file to sysfs
  drm/i915: Add missing description to parameter in alloc_pt_range
  drm/i915: Removed the read of RP_STATE_CAP from sysfs/debugfs functions
  ...
parents d136dfee f89fe1ff
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -4051,6 +4051,17 @@ int num_ioctls;</synopsis>
	<title>Frame Buffer Compression (FBC)</title>
!Pdrivers/gpu/drm/i915/intel_fbc.c Frame Buffer Compression (FBC)
!Idrivers/gpu/drm/i915/intel_fbc.c
      </sect2>
      <sect2>
        <title>Display Refresh Rate Switching (DRRS)</title>
!Pdrivers/gpu/drm/i915/intel_dp.c Display Refresh Rate Switching (DRRS)
!Fdrivers/gpu/drm/i915/intel_dp.c intel_dp_set_drrs_state
!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_enable
!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_disable
!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_invalidate
!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_flush
!Fdrivers/gpu/drm/i915/intel_dp.c intel_dp_drrs_init

      </sect2>
      <sect2>
        <title>DPIO</title>
+46 −10
Original line number Diff line number Diff line
@@ -276,7 +276,6 @@ static void vblank_disable_fn(unsigned long arg)
void drm_vblank_cleanup(struct drm_device *dev)
{
	int crtc;
	unsigned long irqflags;

	/* Bail if the driver didn't call drm_vblank_init() */
	if (dev->num_crtcs == 0)
@@ -285,11 +284,10 @@ void drm_vblank_cleanup(struct drm_device *dev)
	for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
		struct drm_vblank_crtc *vblank = &dev->vblank[crtc];

		del_timer_sync(&vblank->disable_timer);
		WARN_ON(vblank->enabled &&
			drm_core_check_feature(dev, DRIVER_MODESET));

		spin_lock_irqsave(&dev->vbl_lock, irqflags);
		vblank_disable_and_save(dev, crtc);
		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
		del_timer_sync(&vblank->disable_timer);
	}

	kfree(dev->vblank);
@@ -475,17 +473,23 @@ int drm_irq_uninstall(struct drm_device *dev)
	dev->irq_enabled = false;

	/*
	 * Wake up any waiters so they don't hang.
	 * Wake up any waiters so they don't hang. This is just to paper over
	 * isssues for UMS drivers which aren't in full control of their
	 * vblank/irq handling. KMS drivers must ensure that vblanks are all
	 * disabled when uninstalling the irq handler.
	 */
	if (dev->num_crtcs) {
		spin_lock_irqsave(&dev->vbl_lock, irqflags);
		for (i = 0; i < dev->num_crtcs; i++) {
			struct drm_vblank_crtc *vblank = &dev->vblank[i];

			if (!vblank->enabled)
				continue;

			WARN_ON(drm_core_check_feature(dev, DRIVER_MODESET));

			vblank_disable_and_save(dev, i);
			wake_up(&vblank->queue);
			vblank->enabled = false;
			vblank->last =
				dev->driver->get_vblank_counter(dev, i);
		}
		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
	}
@@ -1232,6 +1236,38 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_vblank_off);

/**
 * drm_crtc_vblank_reset - reset vblank state to off on a CRTC
 * @crtc: CRTC in question
 *
 * Drivers can use this function to reset the vblank state to off at load time.
 * Drivers should use this together with the drm_crtc_vblank_off() and
 * drm_crtc_vblank_on() functions. The difference compared to
 * drm_crtc_vblank_off() is that this function doesn't save the vblank counter
 * and hence doesn't need to call any driver hooks.
 */
void drm_crtc_vblank_reset(struct drm_crtc *drm_crtc)
{
	struct drm_device *dev = drm_crtc->dev;
	unsigned long irqflags;
	int crtc = drm_crtc_index(drm_crtc);
	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];

	spin_lock_irqsave(&dev->vbl_lock, irqflags);
	/*
	 * Prevent subsequent drm_vblank_get() from enabling the vblank
	 * interrupt by bumping the refcount.
	 */
	if (!vblank->inmodeset) {
		atomic_inc(&vblank->refcount);
		vblank->inmodeset = 1;
	}
	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);

	WARN_ON(!list_empty(&dev->vblank_event_list));
}
EXPORT_SYMBOL(drm_crtc_vblank_reset);

/**
 * drm_vblank_on - enable vblank events on a CRTC
 * @dev: DRM device
@@ -1653,7 +1689,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
	struct timeval tvblank;
	unsigned long irqflags;

	if (!dev->num_crtcs)
	if (WARN_ON_ONCE(!dev->num_crtcs))
		return false;

	if (WARN_ON(crtc >= dev->num_crtcs))
+1 −2
Original line number Diff line number Diff line
@@ -87,8 +87,7 @@ i915-y += dvo_ch7017.o \
i915-y += i915_vgpu.o

# legacy horrors
i915-y += i915_dma.o \
	  i915_ums.o
i915-y += i915_dma.o

obj-$(CONFIG_DRM_I915)  += i915.o

+34 −40
Original line number Diff line number Diff line
@@ -818,24 +818,26 @@ static bool valid_reg(const u32 *table, int count, u32 addr)
	return false;
}

static u32 *vmap_batch(struct drm_i915_gem_object *obj)
static u32 *vmap_batch(struct drm_i915_gem_object *obj,
		       unsigned start, unsigned len)
{
	int i;
	void *addr = NULL;
	struct sg_page_iter sg_iter;
	int first_page = start >> PAGE_SHIFT;
	int last_page = (len + start + 4095) >> PAGE_SHIFT;
	int npages = last_page - first_page;
	struct page **pages;

	pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
	pages = drm_malloc_ab(npages, sizeof(*pages));
	if (pages == NULL) {
		DRM_DEBUG_DRIVER("Failed to get space for pages\n");
		goto finish;
	}

	i = 0;
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
		pages[i] = sg_page_iter_page(&sg_iter);
		i++;
	}
	for_each_sg_page(obj->pages->sgl, &sg_iter, npages, first_page)
		pages[i++] = sg_page_iter_page(&sg_iter);

	addr = vmap(pages, i, 0, PAGE_KERNEL);
	if (addr == NULL) {
@@ -855,61 +857,61 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
		       u32 batch_start_offset,
		       u32 batch_len)
{
	int ret = 0;
	int needs_clflush = 0;
	u32 *src_base, *dest_base = NULL;
	u32 *src_addr, *dest_addr;
	u32 offset = batch_start_offset / sizeof(*dest_addr);
	u32 end = batch_start_offset + batch_len;
	void *src_base, *src;
	void *dst = NULL;
	int ret;

	if (end > dest_obj->base.size || end > src_obj->base.size)
	if (batch_len > dest_obj->base.size ||
	    batch_len + batch_start_offset > src_obj->base.size)
		return ERR_PTR(-E2BIG);

	ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
	if (ret) {
		DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
		DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
		return ERR_PTR(ret);
	}

	src_base = vmap_batch(src_obj);
	src_base = vmap_batch(src_obj, batch_start_offset, batch_len);
	if (!src_base) {
		DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
		ret = -ENOMEM;
		goto unpin_src;
	}

	src_addr = src_base + offset;

	if (needs_clflush)
		drm_clflush_virt_range((char *)src_addr, batch_len);
	ret = i915_gem_object_get_pages(dest_obj);
	if (ret) {
		DRM_DEBUG_DRIVER("CMD: Failed to get pages for shadow batch\n");
		goto unmap_src;
	}
	i915_gem_object_pin_pages(dest_obj);

	ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
	if (ret) {
		DRM_DEBUG_DRIVER("CMD: Failed to set batch CPU domain\n");
		DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
		goto unmap_src;
	}

	dest_base = vmap_batch(dest_obj);
	if (!dest_base) {
	dst = vmap_batch(dest_obj, 0, batch_len);
	if (!dst) {
		DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
		i915_gem_object_unpin_pages(dest_obj);
		ret = -ENOMEM;
		goto unmap_src;
	}

	dest_addr = dest_base + offset;

	if (batch_start_offset != 0)
		memset((u8 *)dest_base, 0, batch_start_offset);
	src = src_base + offset_in_page(batch_start_offset);
	if (needs_clflush)
		drm_clflush_virt_range(src, batch_len);

	memcpy(dest_addr, src_addr, batch_len);
	memset((u8 *)dest_addr + batch_len, 0, dest_obj->base.size - end);
	memcpy(dst, src, batch_len);

unmap_src:
	vunmap(src_base);
unpin_src:
	i915_gem_object_unpin_pages(src_obj);

	return ret ? ERR_PTR(ret) : dest_base;
	return ret ? ERR_PTR(ret) : dst;
}

/**
@@ -1046,34 +1048,26 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
		    u32 batch_len,
		    bool is_master)
{
	int ret = 0;
	u32 *cmd, *batch_base, *batch_end;
	struct drm_i915_cmd_descriptor default_desc = { 0 };
	bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */

	ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
	if (ret) {
		DRM_DEBUG_DRIVER("CMD: Failed to pin shadow batch\n");
		return -1;
	}
	int ret = 0;

	batch_base = copy_batch(shadow_batch_obj, batch_obj,
				batch_start_offset, batch_len);
	if (IS_ERR(batch_base)) {
		DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
		i915_gem_object_ggtt_unpin(shadow_batch_obj);
		return PTR_ERR(batch_base);
	}

	cmd = batch_base + (batch_start_offset / sizeof(*cmd));

	/*
	 * We use the batch length as size because the shadow object is as
	 * large or larger and copy_batch() will write MI_NOPs to the extra
	 * space. Parsing should be faster in some cases this way.
	 */
	batch_end = cmd + (batch_len / sizeof(*batch_end));
	batch_end = batch_base + (batch_len / sizeof(*batch_end));

	cmd = batch_base;
	while (cmd < batch_end) {
		const struct drm_i915_cmd_descriptor *desc;
		u32 length;
@@ -1132,7 +1126,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
	}

	vunmap(batch_base);
	i915_gem_object_ggtt_unpin(shadow_batch_obj);
	i915_gem_object_unpin_pages(shadow_batch_obj);

	return ret;
}
+93 −28
Original line number Diff line number Diff line
@@ -139,9 +139,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
	if (obj->base.name)
		seq_printf(m, " (name: %d)", obj->base.name);
	list_for_each_entry(vma, &obj->vma_list, vma_link)
	list_for_each_entry(vma, &obj->vma_list, vma_link) {
		if (vma->pin_count > 0)
			pin_count++;
	}
	seq_printf(m, " (pinned x %d)", pin_count);
	if (obj->pin_display)
		seq_printf(m, " (display)");
@@ -580,7 +581,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
			seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
				   work->flip_queued_vblank,
				   work->flip_ready_vblank,
				   drm_vblank_count(dev, crtc->pipe));
				   drm_crtc_vblank_count(&crtc->base));
			if (work->enable_stall_check)
				seq_puts(m, "Stall check enabled, ");
			else
@@ -2185,7 +2186,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;

		seq_puts(m, "aliasing PPGTT:\n");
		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset);

		ppgtt->debug_dump(ppgtt, m);
	}
@@ -4191,7 +4192,7 @@ i915_max_freq_set(void *data, u64 val)
{
	struct drm_device *dev = data;
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 rp_state_cap, hw_max, hw_min;
	u32 hw_max, hw_min;
	int ret;

	if (INTEL_INFO(dev)->gen < 6)
@@ -4208,18 +4209,10 @@ i915_max_freq_set(void *data, u64 val)
	/*
	 * Turbo will still be enabled, but won't go above the set value.
	 */
	if (IS_VALLEYVIEW(dev)) {
	val = intel_freq_opcode(dev_priv, val);

	hw_max = dev_priv->rps.max_freq;
	hw_min = dev_priv->rps.min_freq;
	} else {
		val = intel_freq_opcode(dev_priv, val);

		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
		hw_max = dev_priv->rps.max_freq;
		hw_min = (rp_state_cap >> 16) & 0xff;
	}

	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
		mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4266,7 +4259,7 @@ i915_min_freq_set(void *data, u64 val)
{
	struct drm_device *dev = data;
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 rp_state_cap, hw_max, hw_min;
	u32 hw_max, hw_min;
	int ret;

	if (INTEL_INFO(dev)->gen < 6)
@@ -4283,18 +4276,10 @@ i915_min_freq_set(void *data, u64 val)
	/*
	 * Turbo will still be enabled, but won't go below the set value.
	 */
	if (IS_VALLEYVIEW(dev)) {
	val = intel_freq_opcode(dev_priv, val);

	hw_max = dev_priv->rps.max_freq;
	hw_min = dev_priv->rps.min_freq;
	} else {
		val = intel_freq_opcode(dev_priv, val);

		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
		hw_max = dev_priv->rps.max_freq;
		hw_min = (rp_state_cap >> 16) & 0xff;
	}

	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
		mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4370,6 +4355,85 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
			i915_cache_sharing_get, i915_cache_sharing_set,
			"%llu\n");

static int i915_sseu_status(struct seq_file *m, void *unused)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned int s_tot = 0, ss_tot = 0, ss_per = 0, eu_tot = 0, eu_per = 0;

	if (INTEL_INFO(dev)->gen < 9)
		return -ENODEV;

	seq_puts(m, "SSEU Device Info\n");
	seq_printf(m, "  Available Slice Total: %u\n",
		   INTEL_INFO(dev)->slice_total);
	seq_printf(m, "  Available Subslice Total: %u\n",
		   INTEL_INFO(dev)->subslice_total);
	seq_printf(m, "  Available Subslice Per Slice: %u\n",
		   INTEL_INFO(dev)->subslice_per_slice);
	seq_printf(m, "  Available EU Total: %u\n",
		   INTEL_INFO(dev)->eu_total);
	seq_printf(m, "  Available EU Per Subslice: %u\n",
		   INTEL_INFO(dev)->eu_per_subslice);
	seq_printf(m, "  Has Slice Power Gating: %s\n",
		   yesno(INTEL_INFO(dev)->has_slice_pg));
	seq_printf(m, "  Has Subslice Power Gating: %s\n",
		   yesno(INTEL_INFO(dev)->has_subslice_pg));
	seq_printf(m, "  Has EU Power Gating: %s\n",
		   yesno(INTEL_INFO(dev)->has_eu_pg));

	seq_puts(m, "SSEU Device Status\n");
	if (IS_SKYLAKE(dev)) {
		const int s_max = 3, ss_max = 4;
		int s, ss;
		u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];

		s_reg[0] = I915_READ(GEN9_SLICE0_PGCTL_ACK);
		s_reg[1] = I915_READ(GEN9_SLICE1_PGCTL_ACK);
		s_reg[2] = I915_READ(GEN9_SLICE2_PGCTL_ACK);
		eu_reg[0] = I915_READ(GEN9_SLICE0_SS01_EU_PGCTL_ACK);
		eu_reg[1] = I915_READ(GEN9_SLICE0_SS23_EU_PGCTL_ACK);
		eu_reg[2] = I915_READ(GEN9_SLICE1_SS01_EU_PGCTL_ACK);
		eu_reg[3] = I915_READ(GEN9_SLICE1_SS23_EU_PGCTL_ACK);
		eu_reg[4] = I915_READ(GEN9_SLICE2_SS01_EU_PGCTL_ACK);
		eu_reg[5] = I915_READ(GEN9_SLICE2_SS23_EU_PGCTL_ACK);
		eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
			     GEN9_PGCTL_SSA_EU19_ACK |
			     GEN9_PGCTL_SSA_EU210_ACK |
			     GEN9_PGCTL_SSA_EU311_ACK;
		eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
			     GEN9_PGCTL_SSB_EU19_ACK |
			     GEN9_PGCTL_SSB_EU210_ACK |
			     GEN9_PGCTL_SSB_EU311_ACK;

		for (s = 0; s < s_max; s++) {
			if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
				/* skip disabled slice */
				continue;

			s_tot++;
			ss_per = INTEL_INFO(dev)->subslice_per_slice;
			ss_tot += ss_per;
			for (ss = 0; ss < ss_max; ss++) {
				unsigned int eu_cnt;

				eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
						       eu_mask[ss%2]);
				eu_tot += eu_cnt;
				eu_per = max(eu_per, eu_cnt);
			}
		}
	}
	seq_printf(m, "  Enabled Slice Total: %u\n", s_tot);
	seq_printf(m, "  Enabled Subslice Total: %u\n", ss_tot);
	seq_printf(m, "  Enabled Subslice Per Slice: %u\n", ss_per);
	seq_printf(m, "  Enabled EU Total: %u\n", eu_tot);
	seq_printf(m, "  Enabled EU Per Subslice: %u\n", eu_per);

	return 0;
}

static int i915_forcewake_open(struct inode *inode, struct file *file)
{
	struct drm_device *dev = inode->i_private;
@@ -4483,6 +4547,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
	{"i915_dp_mst_info", i915_dp_mst_info, 0},
	{"i915_wa_registers", i915_wa_registers, 0},
	{"i915_ddb_info", i915_ddb_info, 0},
	{"i915_sseu_status", i915_sseu_status, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)

Loading