Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4daad1b2 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-intel-fixes-2017-03-14' of...

Merge tag 'drm-intel-fixes-2017-03-14' of git://anongit.freedesktop.org/git/drm-intel into drm-fixes

drm/i915 fixes for v4.11-rc3

* tag 'drm-intel-fixes-2017-03-14' of git://anongit.freedesktop.org/git/drm-intel:
  drm/i915: Fix forcewake active domain tracking
  drm/i915: Nuke skl_update_plane debug message from the pipe update critical section
  drm/i915: use correct node for handling cache domain eviction
  drm/i915: Drain the freed state from the tail of the next commit
  drm/i915: Nuke debug messages from the pipe update critical section
  drm/i915: Use pagecache write to prepopulate shmemfs from pwrite-ioctl
  drm/i915: Store a permanent error in obj->mm.pages
  drm/i915: Move updating color management to before vblank evasion
  drm/i915/gen9: Increase PCODE request timeout to 50ms
  drm/i915: Avoid tweaking evaluation thresholds on Baytrail v3
  drm/i915: Remove the vma from the drm_mm if binding fails
  drm/i915/fbdev: Stop repeating tile configuration on stagnation
  drm/i915/glk: Fix watermark computations for third sprite plane
  drm/i915: Squelch any ktime/jiffie rounding errors for wait-ioctl
parents 490b8981 6aef6603
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -293,6 +293,7 @@ enum plane_id {
	PLANE_PRIMARY,
	PLANE_SPRITE0,
	PLANE_SPRITE1,
	PLANE_SPRITE2,
	PLANE_CURSOR,
	I915_MAX_PLANES,
};
+94 −3
Original line number Diff line number Diff line
@@ -1434,6 +1434,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,

	trace_i915_gem_object_pwrite(obj, args->offset, args->size);

	ret = -ENODEV;
	if (obj->ops->pwrite)
		ret = obj->ops->pwrite(obj, args);
	if (ret != -ENODEV)
		goto err;

	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_ALL,
@@ -2119,6 +2125,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
	 */
	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
	obj->mm.madv = __I915_MADV_PURGED;
	obj->mm.pages = ERR_PTR(-EFAULT);
}

/* Try to discard unwanted pages */
@@ -2218,7 +2225,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,

	__i915_gem_object_reset_page_iter(obj);

	if (!IS_ERR(pages))
		obj->ops->put_pages(obj, pages);

unlock:
	mutex_unlock(&obj->mm.lock);
}
@@ -2437,7 +2446,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
	if (err)
		return err;

	if (unlikely(!obj->mm.pages)) {
	if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
		err = ____i915_gem_object_get_pages(obj);
		if (err)
			goto unlock;
@@ -2515,7 +2524,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,

	pinned = true;
	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
		if (unlikely(!obj->mm.pages)) {
		if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
			ret = ____i915_gem_object_get_pages(obj);
			if (ret)
				goto err_unlock;
@@ -2563,6 +2572,75 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
	goto out_unlock;
}

static int
i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
			   const struct drm_i915_gem_pwrite *arg)
{
	struct address_space *mapping = obj->base.filp->f_mapping;
	char __user *user_data = u64_to_user_ptr(arg->data_ptr);
	u64 remain, offset;
	unsigned int pg;

	/* Before we instantiate/pin the backing store for our use, we
	 * can prepopulate the shmemfs filp efficiently using a write into
	 * the pagecache. We avoid the penalty of instantiating all the
	 * pages, important if the user is just writing to a few and never
	 * uses the object on the GPU, and using a direct write into shmemfs
	 * allows it to avoid the cost of retrieving a page (either swapin
	 * or clearing-before-use) before it is overwritten.
	 */
	if (READ_ONCE(obj->mm.pages))
		return -ENODEV;

	/* Before the pages are instantiated the object is treated as being
	 * in the CPU domain. The pages will be clflushed as required before
	 * use, and we can freely write into the pages directly. If userspace
	 * races pwrite with any other operation; corruption will ensue -
	 * that is userspace's prerogative!
	 */

	remain = arg->size;
	offset = arg->offset;
	pg = offset_in_page(offset);

	do {
		unsigned int len, unwritten;
		struct page *page;
		void *data, *vaddr;
		int err;

		len = PAGE_SIZE - pg;
		if (len > remain)
			len = remain;

		err = pagecache_write_begin(obj->base.filp, mapping,
					    offset, len, 0,
					    &page, &data);
		if (err < 0)
			return err;

		vaddr = kmap(page);
		unwritten = copy_from_user(vaddr + pg, user_data, len);
		kunmap(page);

		err = pagecache_write_end(obj->base.filp, mapping,
					  offset, len, len - unwritten,
					  page, data);
		if (err < 0)
			return err;

		if (unwritten)
			return -EFAULT;

		remain -= len;
		user_data += len;
		offset += len;
		pg = 0;
	} while (remain);

	return 0;
}

static bool ban_context(const struct i915_gem_context *ctx)
{
	return (i915_gem_context_is_bannable(ctx) &&
@@ -3029,6 +3107,16 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
		args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
		if (args->timeout_ns < 0)
			args->timeout_ns = 0;

		/*
		 * Apparently ktime isn't accurate enough and occasionally has a
		 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
		 * things up to make the test happy. We allow up to 1 jiffy.
		 *
		 * This is a regression from the timespec->ktime conversion.
		 */
		if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
			args->timeout_ns = 0;
	}

	i915_gem_object_put(obj);
@@ -3974,8 +4062,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
		 I915_GEM_OBJECT_IS_SHRINKABLE,

	.get_pages = i915_gem_object_get_pages_gtt,
	.put_pages = i915_gem_object_put_pages_gtt,

	.pwrite = i915_gem_object_pwrite_gtt,
};

struct drm_i915_gem_object *
+4 −4
Original line number Diff line number Diff line
@@ -293,12 +293,12 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
		 * those as well to make room for our guard pages.
		 */
		if (check_color) {
			if (vma->node.start + vma->node.size == node->start) {
				if (vma->node.color == node->color)
			if (node->start + node->size == target->start) {
				if (node->color == target->color)
					continue;
			}
			if (vma->node.start == node->start + node->size) {
				if (vma->node.color == node->color)
			if (node->start == target->start + target->size) {
				if (node->color == target->color)
					continue;
			}
		}
+3 −0
Original line number Diff line number Diff line
@@ -54,6 +54,9 @@ struct drm_i915_gem_object_ops {
	struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
	void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);

	int (*pwrite)(struct drm_i915_gem_object *,
		      const struct drm_i915_gem_pwrite *);

	int (*dmabuf_export)(struct drm_i915_gem_object *);
	void (*release)(struct drm_i915_gem_object *);
};
+37 −20
Original line number Diff line number Diff line
@@ -512,10 +512,36 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
	return ret;
}

static void
i915_vma_remove(struct i915_vma *vma)
{
	struct drm_i915_gem_object *obj = vma->obj;

	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));

	drm_mm_remove_node(&vma->node);
	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);

	/* Since the unbound list is global, only move to that list if
	 * no more VMAs exist.
	 */
	if (--obj->bind_count == 0)
		list_move_tail(&obj->global_link,
			       &to_i915(obj->base.dev)->mm.unbound_list);

	/* And finally now the object is completely decoupled from this vma,
	 * we can drop its hold on the backing storage and allow it to be
	 * reaped by the shrinker.
	 */
	i915_gem_object_unpin_pages(obj);
	GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
}

int __i915_vma_do_pin(struct i915_vma *vma,
		      u64 size, u64 alignment, u64 flags)
{
	unsigned int bound = vma->flags;
	const unsigned int bound = vma->flags;
	int ret;

	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
@@ -524,18 +550,18 @@ int __i915_vma_do_pin(struct i915_vma *vma,

	if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
		ret = -EBUSY;
		goto err;
		goto err_unpin;
	}

	if ((bound & I915_VMA_BIND_MASK) == 0) {
		ret = i915_vma_insert(vma, size, alignment, flags);
		if (ret)
			goto err;
			goto err_unpin;
	}

	ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
	if (ret)
		goto err;
		goto err_remove;

	if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
		__i915_vma_set_map_and_fenceable(vma);
@@ -544,7 +570,12 @@ int __i915_vma_do_pin(struct i915_vma *vma,
	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
	return 0;

err:
err_remove:
	if ((bound & I915_VMA_BIND_MASK) == 0) {
		GEM_BUG_ON(vma->pages);
		i915_vma_remove(vma);
	}
err_unpin:
	__i915_vma_unpin(vma);
	return ret;
}
@@ -657,9 +688,6 @@ int i915_vma_unbind(struct i915_vma *vma)
	}
	vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);

	drm_mm_remove_node(&vma->node);
	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);

	if (vma->pages != obj->mm.pages) {
		GEM_BUG_ON(!vma->pages);
		sg_free_table(vma->pages);
@@ -667,18 +695,7 @@ int i915_vma_unbind(struct i915_vma *vma)
	}
	vma->pages = NULL;

	/* Since the unbound list is global, only move to that list if
	 * no more VMAs exist. */
	if (--obj->bind_count == 0)
		list_move_tail(&obj->global_link,
			       &to_i915(obj->base.dev)->mm.unbound_list);

	/* And finally now the object is completely decoupled from this vma,
	 * we can drop its hold on the backing storage and allow it to be
	 * reaped by the shrinker.
	 */
	i915_gem_object_unpin_pages(obj);
	GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
	i915_vma_remove(vma);

destroy:
	if (unlikely(i915_vma_is_closed(vma)))
Loading