Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f1fa4f44 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Refactor testing obj->mm.pages



Since we occasionally stuff an error pointer into obj->mm.pages for a
semi-permanent or even permanent failure, we have to be more careful and
not just test against NULL when deciding if the object has a complete
set of its concurrent pages.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171013202621.7276-1-chris@chris-wilson.co.uk
parent e4ffc83d
Loading
Loading
Loading
Loading
+8 −2
Original line number Original line Diff line number Diff line
@@ -3566,10 +3566,16 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
	return __i915_gem_object_get_pages(obj);
	return __i915_gem_object_get_pages(obj);
}
}


static inline bool
i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
{
	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
}

static inline void
static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
{
	GEM_BUG_ON(!obj->mm.pages);
	GEM_BUG_ON(!i915_gem_object_has_pages(obj));


	atomic_inc(&obj->mm.pages_pin_count);
	atomic_inc(&obj->mm.pages_pin_count);
}
}
@@ -3583,8 +3589,8 @@ i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
static inline void
static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
{
	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
	GEM_BUG_ON(!obj->mm.pages);


	atomic_dec(&obj->mm.pages_pin_count);
	atomic_dec(&obj->mm.pages_pin_count);
}
}
+10 −9
Original line number Original line Diff line number Diff line
@@ -2196,7 +2196,7 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
	struct address_space *mapping;
	struct address_space *mapping;


	lockdep_assert_held(&obj->mm.lock);
	lockdep_assert_held(&obj->mm.lock);
	GEM_BUG_ON(obj->mm.pages);
	GEM_BUG_ON(i915_gem_object_has_pages(obj));


	switch (obj->mm.madv) {
	switch (obj->mm.madv) {
	case I915_MADV_DONTNEED:
	case I915_MADV_DONTNEED:
@@ -2259,7 +2259,7 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
		return;
		return;


	GEM_BUG_ON(obj->bind_count);
	GEM_BUG_ON(obj->bind_count);
	if (!READ_ONCE(obj->mm.pages))
	if (!i915_gem_object_has_pages(obj))
		return;
		return;


	/* May be called by shrinker from within get_pages() (on another bo) */
	/* May be called by shrinker from within get_pages() (on another bo) */
@@ -2563,7 +2563,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
	if (err)
	if (err)
		return err;
		return err;


	if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
	if (unlikely(!i915_gem_object_has_pages(obj))) {
		GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
		GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));


		err = ____i915_gem_object_get_pages(obj);
		err = ____i915_gem_object_get_pages(obj);
@@ -2648,7 +2648,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
	type &= ~I915_MAP_OVERRIDE;
	type &= ~I915_MAP_OVERRIDE;


	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
		if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
		if (unlikely(!i915_gem_object_has_pages(obj))) {
			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));


			ret = ____i915_gem_object_get_pages(obj);
			ret = ____i915_gem_object_get_pages(obj);
@@ -2660,7 +2660,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
		atomic_inc(&obj->mm.pages_pin_count);
		atomic_inc(&obj->mm.pages_pin_count);
		pinned = false;
		pinned = false;
	}
	}
	GEM_BUG_ON(!obj->mm.pages);
	GEM_BUG_ON(!i915_gem_object_has_pages(obj));


	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
	if (ptr && has_type != type) {
	if (ptr && has_type != type) {
@@ -2715,7 +2715,7 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
	 * allows it to avoid the cost of retrieving a page (either swapin
	 * allows it to avoid the cost of retrieving a page (either swapin
	 * or clearing-before-use) before it is overwritten.
	 * or clearing-before-use) before it is overwritten.
	 */
	 */
	if (READ_ONCE(obj->mm.pages))
	if (i915_gem_object_has_pages(obj))
		return -ENODEV;
		return -ENODEV;


	/* Before the pages are instantiated the object is treated as being
	/* Before the pages are instantiated the object is treated as being
@@ -4278,7 +4278,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
	if (err)
	if (err)
		goto out;
		goto out;


	if (obj->mm.pages &&
	if (i915_gem_object_has_pages(obj) &&
	    i915_gem_object_is_tiled(obj) &&
	    i915_gem_object_is_tiled(obj) &&
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
		if (obj->mm.madv == I915_MADV_WILLNEED) {
		if (obj->mm.madv == I915_MADV_WILLNEED) {
@@ -4297,7 +4297,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		obj->mm.madv = args->madv;
		obj->mm.madv = args->madv;


	/* if the object is no longer attached, discard its backing storage */
	/* if the object is no longer attached, discard its backing storage */
	if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
	if (obj->mm.madv == I915_MADV_DONTNEED &&
	    !i915_gem_object_has_pages(obj))
		i915_gem_object_truncate(obj);
		i915_gem_object_truncate(obj);


	args->retained = obj->mm.madv != __I915_MADV_PURGED;
	args->retained = obj->mm.madv != __I915_MADV_PURGED;
@@ -4514,7 +4515,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
		if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
		if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
			atomic_set(&obj->mm.pages_pin_count, 0);
			atomic_set(&obj->mm.pages_pin_count, 0);
		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
		GEM_BUG_ON(obj->mm.pages);
		GEM_BUG_ON(i915_gem_object_has_pages(obj));


		if (obj->base.import_attach)
		if (obj->base.import_attach)
			drm_prime_gem_destroy(&obj->base, NULL);
			drm_prime_gem_destroy(&obj->base, NULL);
+1 −0
Original line number Original line Diff line number Diff line
@@ -70,6 +70,7 @@ static const struct dma_fence_ops i915_clflush_ops = {


static void __i915_do_clflush(struct drm_i915_gem_object *obj)
static void __i915_do_clflush(struct drm_i915_gem_object *obj)
{
{
	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
	drm_clflush_sg(obj->mm.pages);
	drm_clflush_sg(obj->mm.pages);
	intel_fb_obj_flush(obj, ORIGIN_CPU);
	intel_fb_obj_flush(obj, ORIGIN_CPU);
}
}
+1 −1
Original line number Original line Diff line number Diff line
@@ -229,7 +229,7 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
		return 0;
		return 0;


	/* Recreate the page after shrinking */
	/* Recreate the page after shrinking */
	if (!so->vma->obj->mm.pages)
	if (!i915_gem_object_has_pages(so->vma->obj))
		so->batch_offset = -1;
		so->batch_offset = -1;


	ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
	ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+5 −5
Original line number Original line Diff line number Diff line
@@ -97,7 +97,7 @@ static bool swap_available(void)


static bool can_release_pages(struct drm_i915_gem_object *obj)
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
{
	if (!obj->mm.pages)
	if (!i915_gem_object_has_pages(obj))
		return false;
		return false;


	/* Consider only shrinkable ojects. */
	/* Consider only shrinkable ojects. */
@@ -129,7 +129,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
{
{
	if (i915_gem_object_unbind(obj) == 0)
	if (i915_gem_object_unbind(obj) == 0)
		__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
		__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
	return !READ_ONCE(obj->mm.pages);
	return !i915_gem_object_has_pages(obj);
}
}


/**
/**
@@ -247,7 +247,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
				/* May arrive from get_pages on another bo */
				/* May arrive from get_pages on another bo */
				mutex_lock_nested(&obj->mm.lock,
				mutex_lock_nested(&obj->mm.lock,
						  I915_MM_SHRINKER);
						  I915_MM_SHRINKER);
				if (!obj->mm.pages) {
				if (!i915_gem_object_has_pages(obj)) {
					__i915_gem_object_invalidate(obj);
					__i915_gem_object_invalidate(obj);
					list_del_init(&obj->global_link);
					list_del_init(&obj->global_link);
					count += obj->base.size >> PAGE_SHIFT;
					count += obj->base.size >> PAGE_SHIFT;
@@ -413,7 +413,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
	 */
	 */
	unbound = bound = unevictable = 0;
	unbound = bound = unevictable = 0;
	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
		if (!obj->mm.pages)
		if (!i915_gem_object_has_pages(obj))
			continue;
			continue;


		if (!can_release_pages(obj))
		if (!can_release_pages(obj))
@@ -422,7 +422,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
			unbound += obj->base.size >> PAGE_SHIFT;
			unbound += obj->base.size >> PAGE_SHIFT;
	}
	}
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
		if (!obj->mm.pages)
		if (!i915_gem_object_has_pages(obj))
			continue;
			continue;


		if (!can_release_pages(obj))
		if (!can_release_pages(obj))
Loading