Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 436e94a4 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-intel-next-fixes-2014-10-03' of...

Merge tag 'drm-intel-next-fixes-2014-10-03' of git://anongit.freedesktop.org/drm-intel into drm-next

Bunch of fixes for 3.18. Major parts:
- ppgtt fixes (but full ppgtt is for 3.19) from Chris, Michel, ...
- hdmi pixel replication fixes (Clint Taylor)
- leftover i830M patches from Ville
- small things all over

* tag 'drm-intel-next-fixes-2014-10-03' of git://anongit.freedesktop.org/drm-intel: (21 commits)
  drm/i915: Enable pixel replicated modes on BDW and HSW.
  drm/i915: Don't spam dmesg with rps messages on vlv/chv
  drm/i915: Do not leak pages when freeing userptr objects
  drm/i915: Do not store the error pointer for a failed userptr registration
  Revert "drm/i915/bdw: BDW Software Turbo"
  drm/i915/bdw: Cleanup pre prod workarounds
  drm/i915: Use EIO instead of EAGAIN for sink CRC error.
  drm/i915: Extend BIOS stolen mem handling to all platform
  drm/i915: Match GTT space sanity checker with implementation
  drm/i915: HSW always use GGTT selector for secure batches
  drm/i915: add cherryview specfic forcewake in execlists_elsp_write
  drm/i915: fix another use-after-free in i915_gem_evict_everything
  drm/i915: Don't reinit hpd interrupts after gpu reset
  drm/i915: Wrap -EIO send-vblank event for failed pageflip in spinlock
  drm/i915: Drop any active reference before unbinding
  drm/i915: Objects on the unbound list may still have an active reference
  drm/i915/edp: use lane count and link rate from DPCD for eDP
  drm/i915/dp: add missing \n in the TPS3 debug message
  drm/i915/hdmi, dp: Do not dereference the encoder in the connector destroy
  drm/i915: Limit the watermark to at least 8 entries on gen2/3
  ...
parents ccb09a8e ebb69c95
Loading
Loading
Loading
Loading
+4 −30
Original line number Diff line number Diff line
@@ -3826,7 +3826,6 @@ i915_drop_caches_set(void *data, u64 val)
{
	struct drm_device *dev = data;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj, *next;
	int ret;

	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
@@ -3846,36 +3845,11 @@ i915_drop_caches_set(void *data, u64 val)
	if (val & (DROP_RETIRE | DROP_ACTIVE))
		i915_gem_retire_requests(dev);

	if (val & DROP_BOUND) {
		list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
					 global_list) {
			struct i915_vma *vma, *v;
	if (val & DROP_BOUND)
		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);

			ret = 0;
			drm_gem_object_reference(&obj->base);
			list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) {
				if (vma->pin_count)
					continue;

				ret = i915_vma_unbind(vma);
				if (ret)
					break;
			}
			drm_gem_object_unreference(&obj->base);
			if (ret)
				goto unlock;
		}
	}

	if (val & DROP_UNBOUND) {
		list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
					 global_list)
			if (obj->pages_pin_count == 0) {
				ret = i915_gem_object_put_pages(obj);
				if (ret)
					goto unlock;
			}
	}
	if (val & DROP_UNBOUND)
		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);

unlock:
	mutex_unlock(&dev->struct_mutex);
+0 −2
Original line number Diff line number Diff line
@@ -871,8 +871,6 @@ int i915_reset(struct drm_device *dev)
		 */
		if (INTEL_INFO(dev)->gen > 5)
			intel_reset_gt_powersave(dev);

		intel_hpd_init(dev);
	} else {
		mutex_unlock(&dev->struct_mutex);
	}
+6 −22
Original line number Diff line number Diff line
@@ -946,23 +946,6 @@ struct intel_rps_ei {
	u32 media_c0;
};

struct intel_rps_bdw_cal {
	u32 it_threshold_pct; /* interrupt, in percentage */
	u32 eval_interval; /* evaluation interval, in us */
	u32 last_ts;
	u32 last_c0;
	bool is_up;
};

struct intel_rps_bdw_turbo {
	struct intel_rps_bdw_cal up;
	struct intel_rps_bdw_cal down;
	struct timer_list flip_timer;
	u32 timeout;
	atomic_t flip_received;
	struct work_struct work_max_freq;
};

struct intel_gen6_power_mgmt {
	/* work and pm_iir are protected by dev_priv->irq_lock */
	struct work_struct work;
@@ -996,9 +979,6 @@ struct intel_gen6_power_mgmt {
	bool enabled;
	struct delayed_work delayed_resume_work;

	bool is_bdw_sw_turbo;	/* Switch of BDW software turbo */
	struct intel_rps_bdw_turbo sw_turbo; /* Calculate RP interrupt timing */

	/* manual wa residency calculations */
	struct intel_rps_ei up_ei, down_ei;

@@ -2369,6 +2349,12 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
			      long target,
			      unsigned flags);
#define I915_SHRINK_PURGEABLE 0x1
#define I915_SHRINK_UNBOUND 0x2
#define I915_SHRINK_BOUND 0x4
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -2823,8 +2809,6 @@ extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void bdw_software_turbo(struct drm_device *dev);
extern void gen8_flip_interrupt(struct drm_device *dev);
extern void valleyview_set_rps(struct drm_device *dev, u8 val);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
				  bool enable);
+78 −56
Original line number Diff line number Diff line
@@ -60,7 +60,6 @@ static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
static int i915_gem_shrinker_oom(struct notifier_block *nb,
				 unsigned long event,
				 void *ptr);
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);

static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -1741,7 +1740,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
	 * offsets on purgeable objects by truncating it and marking it purged,
	 * which prevents userspace from ever using that object again.
	 */
	i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
	i915_gem_shrink(dev_priv,
			obj->base.size >> PAGE_SHIFT,
			I915_SHRINK_BOUND |
			I915_SHRINK_UNBOUND |
			I915_SHRINK_PURGEABLE);
	ret = drm_gem_create_mmap_offset(&obj->base);
	if (ret != -ENOSPC)
		goto out;
@@ -1938,12 +1941,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
	return 0;
}

static unsigned long
__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
		  bool purgeable_only)
unsigned long
i915_gem_shrink(struct drm_i915_private *dev_priv,
		long target, unsigned flags)
{
	struct list_head still_in_list;
	struct drm_i915_gem_object *obj;
	const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
	unsigned long count = 0;

	/*
@@ -1965,8 +1967,13 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
	 * dev->struct_mutex and so we won't ever be able to observe an
	 * object on the bound_list with a reference count equals 0.
	 */
	if (flags & I915_SHRINK_UNBOUND) {
		struct list_head still_in_list;

		INIT_LIST_HEAD(&still_in_list);
		while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
			struct drm_i915_gem_object *obj;

			obj = list_first_entry(&dev_priv->mm.unbound_list,
					       typeof(*obj), global_list);
			list_move_tail(&obj->global_list, &still_in_list);
@@ -1982,9 +1989,14 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
			drm_gem_object_unreference(&obj->base);
		}
		list_splice(&still_in_list, &dev_priv->mm.unbound_list);
	}

	if (flags & I915_SHRINK_BOUND) {
		struct list_head still_in_list;

		INIT_LIST_HEAD(&still_in_list);
		while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
			struct drm_i915_gem_object *obj;
			struct i915_vma *vma, *v;

			obj = list_first_entry(&dev_priv->mm.bound_list,
@@ -2006,21 +2018,17 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
			drm_gem_object_unreference(&obj->base);
		}
		list_splice(&still_in_list, &dev_priv->mm.bound_list);

	return count;
	}

static unsigned long
i915_gem_purge(struct drm_i915_private *dev_priv, long target)
{
	return __i915_gem_shrink(dev_priv, target, true);
	return count;
}

static unsigned long
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
	i915_gem_evict_everything(dev_priv->dev);
	return __i915_gem_shrink(dev_priv, LONG_MAX, false);
	return i915_gem_shrink(dev_priv, LONG_MAX,
			       I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
}

static int
@@ -2067,7 +2075,11 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
	for (i = 0; i < page_count; i++) {
		page = shmem_read_mapping_page_gfp(mapping, i, gfp);
		if (IS_ERR(page)) {
			i915_gem_purge(dev_priv, page_count);
			i915_gem_shrink(dev_priv,
					page_count,
					I915_SHRINK_BOUND |
					I915_SHRINK_UNBOUND |
					I915_SHRINK_PURGEABLE);
			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
		}
		if (IS_ERR(page)) {
@@ -2944,6 +2956,9 @@ int i915_vma_unbind(struct i915_vma *vma)
	 * cause memory corruption through use-after-free.
	 */

	/* Throw away the active reference before moving to the unbound list */
	i915_gem_object_retire(obj);

	if (i915_is_ggtt(vma->vm)) {
		i915_gem_object_finish_gtt(obj);

@@ -3336,17 +3351,20 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
	return 0;
}

static bool i915_gem_valid_gtt_space(struct drm_device *dev,
				     struct drm_mm_node *gtt_space,
static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
				     unsigned long cache_level)
{
	struct drm_mm_node *gtt_space = &vma->node;
	struct drm_mm_node *other;

	/* On non-LLC machines we have to be careful when putting differing
	 * types of snoopable memory together to avoid the prefetcher
	 * crossing memory domains and dying.
	/*
	 * On some machines we have to be careful when putting differing types
	 * of snoopable memory together to avoid the prefetcher crossing memory
	 * domains and dying. During vm initialisation, we decide whether or not
	 * these constraints apply and set the drm_mm.color_adjust
	 * appropriately.
	 */
	if (HAS_LLC(dev))
	if (vma->vm->mm.color_adjust == NULL)
		return true;

	if (!drm_mm_node_allocated(gtt_space))
@@ -3484,8 +3502,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,

		goto err_free_vma;
	}
	if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
					      obj->cache_level))) {
	if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
		ret = -EINVAL;
		goto err_remove_node;
	}
@@ -3695,7 +3712,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
	}

	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
		if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
		if (!i915_gem_valid_gtt_space(vma, cache_level)) {
			ret = i915_vma_unbind(vma);
			if (ret)
				return ret;
@@ -5261,11 +5278,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
	if (!i915_gem_shrinker_lock(dev, &unlock))
		return SHRINK_STOP;

	freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
	freed = i915_gem_shrink(dev_priv,
				sc->nr_to_scan,
				I915_SHRINK_BOUND |
				I915_SHRINK_UNBOUND |
				I915_SHRINK_PURGEABLE);
	if (freed < sc->nr_to_scan)
		freed += __i915_gem_shrink(dev_priv,
		freed += i915_gem_shrink(dev_priv,
					 sc->nr_to_scan - freed,
					   false);
					 I915_SHRINK_BOUND |
					 I915_SHRINK_UNBOUND);
	if (unlock)
		mutex_unlock(&dev->struct_mutex);

+2 −2
Original line number Diff line number Diff line
@@ -243,7 +243,7 @@ int
i915_gem_evict_everything(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_address_space *vm;
	struct i915_address_space *vm, *v;
	bool lists_empty = true;
	int ret;

@@ -270,7 +270,7 @@ i915_gem_evict_everything(struct drm_device *dev)
	i915_gem_retire_requests(dev);

	/* Having flushed everything, unbind() should never raise an error */
	list_for_each_entry(vm, &dev_priv->vm_list, global_link)
	list_for_each_entry_safe(vm, v, &dev_priv->vm_list, global_link)
		WARN_ON(i915_gem_evict_vm(vm, false));

	return 0;
Loading