Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a4f5ea64 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Refactor object page API



The plan is to make obtaining the backing storage for the object avoid
struct_mutex (i.e. use its own locking). The first step is to update the
API so that normal users only call pin/unpin whilst working on the
backing storage.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-12-chris@chris-wilson.co.uk
parent d2a84a76
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1290,7 +1290,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
	}

	if (ret == 0 && needs_clflush_after)
		drm_clflush_virt_range(shadow_batch_obj->mapping, batch_len);
		drm_clflush_virt_range(shadow_batch_obj->mm.mapping, batch_len);
	i915_gem_object_unpin_map(shadow_batch_obj);

	return ret;
+9 −8
Original line number Diff line number Diff line
@@ -112,7 +112,7 @@ static char get_global_flag(struct drm_i915_gem_object *obj)

static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
{
	return obj->mapping ? 'M' : ' ';
	return obj->mm.mapping ? 'M' : ' ';
}

static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
@@ -158,8 +158,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
		   i915_gem_active_get_seqno(&obj->last_write,
					     &obj->base.dev->struct_mutex),
		   i915_cache_level_str(dev_priv, obj->cache_level),
		   obj->dirty ? " dirty" : "",
		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
		   obj->mm.dirty ? " dirty" : "",
		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
	if (obj->base.name)
		seq_printf(m, " (name: %d)", obj->base.name);
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
@@ -403,12 +403,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
		size += obj->base.size;
		++count;

		if (obj->madv == I915_MADV_DONTNEED) {
		if (obj->mm.madv == I915_MADV_DONTNEED) {
			purgeable_size += obj->base.size;
			++purgeable_count;
		}

		if (obj->mapping) {
		if (obj->mm.mapping) {
			mapped_count++;
			mapped_size += obj->base.size;
		}
@@ -425,12 +425,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
			++dpy_count;
		}

		if (obj->madv == I915_MADV_DONTNEED) {
		if (obj->mm.madv == I915_MADV_DONTNEED) {
			purgeable_size += obj->base.size;
			++purgeable_count;
		}

		if (obj->mapping) {
		if (obj->mm.mapping) {
			mapped_count++;
			mapped_size += obj->base.size;
		}
@@ -2028,7 +2028,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
		seq_printf(m, "\tBound in GGTT at 0x%08x\n",
			   i915_ggtt_offset(vma));

	if (i915_gem_object_get_pages(vma->obj)) {
	if (i915_gem_object_pin_pages(vma->obj)) {
		seq_puts(m, "\tFailed to get pages for context object\n\n");
		return;
	}
@@ -2047,6 +2047,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
		kunmap_atomic(reg_state);
	}

	i915_gem_object_unpin_pages(vma->obj);
	seq_putc(m, '\n');
}

+64 −30
Original line number Diff line number Diff line
@@ -2252,17 +2252,6 @@ struct drm_i915_gem_object {
	 */
#define I915_BO_ACTIVE_REF (I915_BO_ACTIVE_SHIFT + I915_NUM_ENGINES)

	/**
	 * This is set if the object has been written to since last bound
	 * to the GTT
	 */
	unsigned int dirty:1;

	/**
	 * Advice: are the backing pages purgeable?
	 */
	unsigned int madv:2;

	/*
	 * Is the object to be mapped as read-only to the GPU
	 * Only honoured if hardware has relevant pte bit
@@ -2284,8 +2273,12 @@ struct drm_i915_gem_object {
	unsigned int bind_count;
	unsigned int pin_display;

	struct {
		unsigned int pages_pin_count;

		struct sg_table *pages;
	int pages_pin_count;
		void *mapping;

		struct i915_gem_object_page_iter {
			struct scatterlist *sg_pos;
			unsigned int sg_idx; /* in pages, but 32bit eek! */
@@ -2293,7 +2286,18 @@ struct drm_i915_gem_object {
			struct radix_tree_root radix;
			struct mutex lock; /* protects this cache */
		} get_page;
	void *mapping;

		/**
		 * Advice: are the backing pages purgeable?
		 */
		unsigned int madv:2;

		/**
		 * This is set if the object has been written to since the
		 * pages were last acquired.
		 */
		bool dirty:1;
	} mm;

	/** Breadcrumb of last rendering to the buffer.
	 * There can only be one writer, but we allow for multiple readers.
@@ -3182,14 +3186,11 @@ void i915_vma_close(struct i915_vma *vma);
void i915_vma_destroy(struct i915_vma *vma);

int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);

void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);

int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);

static inline int __sg_page_count(struct scatterlist *sg)
static inline int __sg_page_count(const struct scatterlist *sg)
{
	return sg->length >> PAGE_SHIFT;
}
@@ -3210,19 +3211,52 @@ dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
				unsigned long n);

static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);

static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
	lockdep_assert_held(&obj->base.dev->struct_mutex);

	if (obj->mm.pages_pin_count++)
		return 0;

	return __i915_gem_object_get_pages(obj);
}

static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
	lockdep_assert_held(&obj->base.dev->struct_mutex);
	GEM_BUG_ON(!obj->mm.pages);

	obj->mm.pages_pin_count++;
}

static inline bool
i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
{
	GEM_BUG_ON(obj->pages == NULL);
	obj->pages_pin_count++;
	return obj->mm.pages_pin_count;
}

static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
	lockdep_assert_held(&obj->base.dev->struct_mutex);
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
	GEM_BUG_ON(!obj->mm.pages);

	obj->mm.pages_pin_count--;
	GEM_BUG_ON(obj->mm.pages_pin_count < obj->bind_count);
}

static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
	GEM_BUG_ON(obj->pages_pin_count == 0);
	obj->pages_pin_count--;
	GEM_BUG_ON(obj->pages_pin_count < obj->bind_count);
	__i915_gem_object_unpin_pages(obj);
}

int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);

enum i915_map_type {
	I915_MAP_WB = 0,
	I915_MAP_WC,
+95 −110
Original line number Diff line number Diff line
@@ -216,7 +216,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
	sg_dma_address(sg) = obj->phys_handle->busaddr;
	sg_dma_len(sg) = obj->base.size;

	obj->pages = st;
	obj->mm.pages = st;
	return 0;
}

@@ -225,7 +225,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
{
	int ret;

	BUG_ON(obj->madv == __I915_MADV_PURGED);
	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);

	ret = i915_gem_object_set_to_cpu_domain(obj, true);
	if (WARN_ON(ret)) {
@@ -235,10 +235,10 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	}

	if (obj->madv == I915_MADV_DONTNEED)
		obj->dirty = 0;
	if (obj->mm.madv == I915_MADV_DONTNEED)
		obj->mm.dirty = false;

	if (obj->dirty) {
	if (obj->mm.dirty) {
		struct address_space *mapping = obj->base.filp->f_mapping;
		char *vaddr = obj->phys_handle->vaddr;
		int i;
@@ -257,22 +257,23 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
			kunmap_atomic(dst);

			set_page_dirty(page);
			if (obj->madv == I915_MADV_WILLNEED)
			if (obj->mm.madv == I915_MADV_WILLNEED)
				mark_page_accessed(page);
			put_page(page);
			vaddr += PAGE_SIZE;
		}
		obj->dirty = 0;
		obj->mm.dirty = false;
	}

	sg_free_table(obj->pages);
	kfree(obj->pages);
	sg_free_table(obj->mm.pages);
	kfree(obj->mm.pages);
}

static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
	drm_pci_free(obj->base.dev, obj->phys_handle);
	i915_gem_object_unpin_pages(obj);
}

static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
@@ -507,7 +508,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
		return 0;
	}

	if (obj->madv != I915_MADV_WILLNEED)
	if (obj->mm.madv != I915_MADV_WILLNEED)
		return -EFAULT;

	if (obj->base.filp == NULL)
@@ -517,7 +518,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
	if (ret)
		return ret;

	ret = i915_gem_object_put_pages(obj);
	ret = __i915_gem_object_put_pages(obj);
	if (ret)
		return ret;

@@ -529,7 +530,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
	obj->phys_handle = phys;
	obj->ops = &i915_gem_phys_ops;

	return i915_gem_object_get_pages(obj);
	return i915_gem_object_pin_pages(obj);
}

static int
@@ -725,12 +726,10 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
	if (ret)
		return ret;

	ret = i915_gem_object_get_pages(obj);
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		return ret;

	i915_gem_object_pin_pages(obj);

	i915_gem_object_flush_gtt_write_domain(obj);

	/* If we're not in the cpu read domain, set ourself into the gtt
@@ -778,12 +777,10 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
	if (ret)
		return ret;

	ret = i915_gem_object_get_pages(obj);
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		return ret;

	i915_gem_object_pin_pages(obj);

	i915_gem_object_flush_gtt_write_domain(obj);

	/* If we're not in the cpu write domain, set ourself into the
@@ -813,7 +810,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
		obj->cache_dirty = true;

	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
	obj->dirty = 1;
	obj->mm.dirty = true;
	/* return with the pages pinned */
	return 0;

@@ -951,13 +948,11 @@ i915_gem_gtt_pread(struct drm_device *dev,
		if (ret)
			goto out;

		ret = i915_gem_object_get_pages(obj);
		ret = i915_gem_object_pin_pages(obj);
		if (ret) {
			remove_mappable_node(&node);
			goto out;
		}

		i915_gem_object_pin_pages(obj);
	}

	ret = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -1064,7 +1059,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
	offset = args->offset;
	remain = args->size;

	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
	for_each_sg_page(obj->mm.pages->sgl, &sg_iter, obj->mm.pages->nents,
			 offset >> PAGE_SHIFT) {
		struct page *page = sg_page_iter_page(&sg_iter);

@@ -1254,13 +1249,11 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
		if (ret)
			goto out;

		ret = i915_gem_object_get_pages(obj);
		ret = i915_gem_object_pin_pages(obj);
		if (ret) {
			remove_mappable_node(&node);
			goto out;
		}

		i915_gem_object_pin_pages(obj);
	}

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
@@ -1268,7 +1261,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
		goto out_unpin;

	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
	obj->dirty = true;
	obj->mm.dirty = true;

	user_data = u64_to_user_ptr(args->data_ptr);
	offset = args->offset;
@@ -1439,7 +1432,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
	offset = args->offset;
	remain = args->size;

	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
	for_each_sg_page(obj->mm.pages->sgl, &sg_iter, obj->mm.pages->nents,
			 offset >> PAGE_SHIFT) {
		struct page *page = sg_page_iter_page(&sg_iter);
		int partial_cacheline_write;
@@ -2266,7 +2259,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
	 * backing pages, *now*.
	 */
	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
	obj->madv = __I915_MADV_PURGED;
	obj->mm.madv = __I915_MADV_PURGED;
}

/* Try to discard unwanted pages */
@@ -2275,7 +2268,7 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
{
	struct address_space *mapping;

	switch (obj->madv) {
	switch (obj->mm.madv) {
	case I915_MADV_DONTNEED:
		i915_gem_object_truncate(obj);
	case __I915_MADV_PURGED:
@@ -2296,7 +2289,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
	struct page *page;
	int ret;

	BUG_ON(obj->madv == __I915_MADV_PURGED);
	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);

	ret = i915_gem_object_set_to_cpu_domain(obj, true);
	if (WARN_ON(ret)) {
@@ -2312,22 +2305,22 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
	if (i915_gem_object_needs_bit17_swizzle(obj))
		i915_gem_object_save_bit_17_swizzle(obj);

	if (obj->madv == I915_MADV_DONTNEED)
		obj->dirty = 0;
	if (obj->mm.madv == I915_MADV_DONTNEED)
		obj->mm.dirty = false;

	for_each_sgt_page(page, sgt_iter, obj->pages) {
		if (obj->dirty)
	for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
		if (obj->mm.dirty)
			set_page_dirty(page);

		if (obj->madv == I915_MADV_WILLNEED)
		if (obj->mm.madv == I915_MADV_WILLNEED)
			mark_page_accessed(page);

		put_page(page);
	}
	obj->dirty = 0;
	obj->mm.dirty = false;

	sg_free_table(obj->pages);
	kfree(obj->pages);
	sg_free_table(obj->mm.pages);
	kfree(obj->mm.pages);
}

static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
@@ -2335,21 +2328,20 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
	struct radix_tree_iter iter;
	void **slot;

	radix_tree_for_each_slot(slot, &obj->get_page.radix, &iter, 0)
		radix_tree_delete(&obj->get_page.radix, iter.index);
	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
}

int
i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
	const struct drm_i915_gem_object_ops *ops = obj->ops;

	lockdep_assert_held(&obj->base.dev->struct_mutex);

	if (obj->pages == NULL)
	if (!obj->mm.pages)
		return 0;

	if (obj->pages_pin_count)
	if (i915_gem_object_has_pinned_pages(obj))
		return -EBUSY;

	GEM_BUG_ON(obj->bind_count);
@@ -2359,22 +2351,22 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
	 * lists early. */
	list_del(&obj->global_list);

	if (obj->mapping) {
	if (obj->mm.mapping) {
		void *ptr;

		ptr = ptr_mask_bits(obj->mapping);
		ptr = ptr_mask_bits(obj->mm.mapping);
		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
		else
			kunmap(kmap_to_page(ptr));

		obj->mapping = NULL;
		obj->mm.mapping = NULL;
	}

	__i915_gem_object_reset_page_iter(obj);

	ops->put_pages(obj);
	obj->pages = NULL;
	obj->mm.pages = NULL;

	i915_gem_object_invalidate(obj);

@@ -2474,7 +2466,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
	}
	if (sg) /* loop terminated early; short sg table */
		sg_mark_end(sg);
	obj->pages = st;
	obj->mm.pages = st;

	ret = i915_gem_gtt_prepare_object(obj);
	if (ret)
@@ -2485,7 +2477,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)

	if (i915_gem_object_is_tiled(obj) &&
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
		i915_gem_object_pin_pages(obj);
		__i915_gem_object_pin_pages(obj);

	return 0;

@@ -2517,8 +2509,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 * either as a result of memory pressure (reaping pages under the shrinker)
 * or as the object is itself released.
 */
int
i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	const struct drm_i915_gem_object_ops *ops = obj->ops;
@@ -2526,24 +2517,25 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)

	lockdep_assert_held(&obj->base.dev->struct_mutex);

	if (obj->pages)
	if (obj->mm.pages)
		return 0;

	if (obj->madv != I915_MADV_WILLNEED) {
	if (obj->mm.madv != I915_MADV_WILLNEED) {
		DRM_DEBUG("Attempting to obtain a purgeable object\n");
		__i915_gem_object_unpin_pages(obj);
		return -EFAULT;
	}

	BUG_ON(obj->pages_pin_count);

	ret = ops->get_pages(obj);
	if (ret)
	if (ret) {
		__i915_gem_object_unpin_pages(obj);
		return ret;
	}

	list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);

	obj->get_page.sg_pos = obj->pages->sgl;
	obj->get_page.sg_idx = 0;
	obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
	obj->mm.get_page.sg_idx = 0;

	return 0;
}
@@ -2553,7 +2545,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
				 enum i915_map_type type)
{
	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
	struct sg_table *sgt = obj->pages;
	struct sg_table *sgt = obj->mm.pages;
	struct sgt_iter sgt_iter;
	struct page *page;
	struct page *stack_pages[32];
@@ -2607,14 +2599,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
	lockdep_assert_held(&obj->base.dev->struct_mutex);
	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));

	ret = i915_gem_object_get_pages(obj);
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		return ERR_PTR(ret);

	i915_gem_object_pin_pages(obj);
	pinned = obj->pages_pin_count > 1;
	pinned = obj->mm.pages_pin_count > 1;

	ptr = ptr_unpack_bits(obj->mapping, has_type);
	ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
	if (ptr && has_type != type) {
		if (pinned) {
			ret = -EBUSY;
@@ -2626,7 +2617,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
		else
			kunmap(kmap_to_page(ptr));

		ptr = obj->mapping = NULL;
		ptr = obj->mm.mapping = NULL;
	}

	if (!ptr) {
@@ -2636,7 +2627,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
			goto err;
		}

		obj->mapping = ptr_pack_bits(ptr, type);
		obj->mm.mapping = ptr_pack_bits(ptr, type);
	}

	return ptr;
@@ -3087,7 +3078,7 @@ int i915_vma_unbind(struct i915_vma *vma)
		goto destroy;

	GEM_BUG_ON(obj->bind_count == 0);
	GEM_BUG_ON(!obj->pages);
	GEM_BUG_ON(!obj->mm.pages);

	if (i915_vma_is_map_and_fenceable(vma)) {
		/* release the fence reg _after_ flushing */
@@ -3111,7 +3102,7 @@ int i915_vma_unbind(struct i915_vma *vma)
	drm_mm_remove_node(&vma->node);
	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);

	if (vma->pages != obj->pages) {
	if (vma->pages != obj->mm.pages) {
		GEM_BUG_ON(!vma->pages);
		sg_free_table(vma->pages);
		kfree(vma->pages);
@@ -3244,12 +3235,10 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
		return -E2BIG;
	}

	ret = i915_gem_object_get_pages(obj);
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		return ret;

	i915_gem_object_pin_pages(obj);

	if (flags & PIN_OFFSET_FIXED) {
		u64 offset = flags & PIN_OFFSET_MASK;
		if (offset & (alignment - 1) || offset > end - size) {
@@ -3331,7 +3320,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
	 * to GPU, and we can ignore the cache flush because it'll happen
	 * again at bind time.
	 */
	if (obj->pages == NULL)
	if (!obj->mm.pages)
		return false;

	/*
@@ -3355,7 +3344,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
	}

	trace_i915_gem_object_clflush(obj);
	drm_clflush_sg(obj->pages);
	drm_clflush_sg(obj->mm.pages);
	obj->cache_dirty = false;

	return true;
@@ -3469,7 +3458,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
	ret = i915_gem_object_get_pages(obj);
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		return ret;

@@ -3493,7 +3482,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
	if (write) {
		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
		obj->dirty = 1;
		obj->mm.dirty = true;
	}

	trace_i915_gem_object_change_domain(obj,
@@ -3502,6 +3491,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)

	/* And bump the LRU for this access */
	i915_gem_object_bump_inactive_ggtt(obj);
	i915_gem_object_unpin_pages(obj);

	return 0;
}
@@ -4304,23 +4294,23 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		goto unlock;
	}

	if (obj->pages &&
	if (obj->mm.pages &&
	    i915_gem_object_is_tiled(obj) &&
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
		if (obj->madv == I915_MADV_WILLNEED)
			i915_gem_object_unpin_pages(obj);
		if (obj->mm.madv == I915_MADV_WILLNEED)
			__i915_gem_object_unpin_pages(obj);
		if (args->madv == I915_MADV_WILLNEED)
			i915_gem_object_pin_pages(obj);
			__i915_gem_object_pin_pages(obj);
	}

	if (obj->madv != __I915_MADV_PURGED)
		obj->madv = args->madv;
	if (obj->mm.madv != __I915_MADV_PURGED)
		obj->mm.madv = args->madv;

	/* if the object is no longer attached, discard its backing storage */
	if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
	if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
		i915_gem_object_truncate(obj);

	args->retained = obj->madv != __I915_MADV_PURGED;
	args->retained = obj->mm.madv != __I915_MADV_PURGED;

	i915_gem_object_put(obj);
unlock:
@@ -4347,9 +4337,10 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
	obj->ops = ops;

	obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
	obj->madv = I915_MADV_WILLNEED;
	INIT_RADIX_TREE(&obj->get_page.radix, GFP_KERNEL | __GFP_NOWARN);
	mutex_init(&obj->get_page.lock);

	obj->mm.madv = I915_MADV_WILLNEED;
	INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
	mutex_init(&obj->mm.get_page.lock);

	i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
}
@@ -4441,7 +4432,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
	 * back the contents from the GPU.
	 */

	if (obj->madv != I915_MADV_WILLNEED)
	if (obj->mm.madv != I915_MADV_WILLNEED)
		return false;

	if (obj->base.filp == NULL)
@@ -4483,32 +4474,27 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
	}
	GEM_BUG_ON(obj->bind_count);

	/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
	 * before progressing. */
	if (obj->stolen)
		i915_gem_object_unpin_pages(obj);

	WARN_ON(atomic_read(&obj->frontbuffer_bits));

	if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
	if (obj->mm.pages && obj->mm.madv == I915_MADV_WILLNEED &&
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
	    i915_gem_object_is_tiled(obj))
		i915_gem_object_unpin_pages(obj);
		__i915_gem_object_unpin_pages(obj);

	if (WARN_ON(obj->pages_pin_count))
		obj->pages_pin_count = 0;
	if (obj->ops->release)
		obj->ops->release(obj);

	if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
		obj->mm.pages_pin_count = 0;
	if (discard_backing_storage(obj))
		obj->madv = I915_MADV_DONTNEED;
	i915_gem_object_put_pages(obj);
		obj->mm.madv = I915_MADV_DONTNEED;
	__i915_gem_object_put_pages(obj);

	BUG_ON(obj->pages);
	GEM_BUG_ON(obj->mm.pages);

	if (obj->base.import_attach)
		drm_prime_gem_destroy(&obj->base, NULL);

	if (obj->ops->release)
		obj->ops->release(obj);

	drm_gem_object_release(&obj->base);
	i915_gem_info_remove_obj(dev_priv, obj->base.size);

@@ -5063,14 +5049,13 @@ i915_gem_object_create_from_data(struct drm_device *dev,
	if (ret)
		goto fail;

	ret = i915_gem_object_get_pages(obj);
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		goto fail;

	i915_gem_object_pin_pages(obj);
	sg = obj->pages;
	sg = obj->mm.pages;
	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
	obj->dirty = 1;		/* Backing store is now out of date */
	obj->mm.dirty = true; /* Backing store is now out of date */
	i915_gem_object_unpin_pages(obj);

	if (WARN_ON(bytes != size)) {
@@ -5091,13 +5076,13 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
		       unsigned int n,
		       unsigned int *offset)
{
	struct i915_gem_object_page_iter *iter = &obj->get_page;
	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
	struct scatterlist *sg;
	unsigned int idx, count;

	might_sleep();
	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
	GEM_BUG_ON(obj->pages_pin_count == 0);
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));

	/* As we iterate forward through the sg, we record each entry in a
	 * radixtree for quick repeated (backwards) lookups. If we have seen
@@ -5222,7 +5207,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
	struct page *page;

	page = i915_gem_object_get_page(obj, n);
	if (!obj->dirty)
	if (!obj->mm.dirty)
		set_page_dirty(page);

	return page;
+1 −2
Original line number Diff line number Diff line
@@ -130,11 +130,10 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
			return obj;
	}

	ret = i915_gem_object_get_pages(obj);
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		return ERR_PTR(ret);

	list_move_tail(&obj->batch_pool_link, list);
	i915_gem_object_pin_pages(obj);
	return obj;
}
Loading