Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 03ac84f1 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Pass around sg_table to get_pages/put_pages backend



The plan is to move obj->pages out from under the struct_mutex into its
own per-object lock. We need to prune any assumption of the struct_mutex
from the get_pages/put_pages backends, and to make it easier we pass
around the sg_table to operate on rather than indirectly via the obj.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-13-chris@chris-wilson.co.uk
parent a4f5ea64
Loading
Loading
Loading
Loading
+27 −9
Original line number Diff line number Diff line
@@ -2185,8 +2185,8 @@ struct drm_i915_gem_object_ops {
	 * being released or under memory pressure (where we attempt to
	 * reap pages for the shrinker).
	 */
	int (*get_pages)(struct drm_i915_gem_object *);
	void (*put_pages)(struct drm_i915_gem_object *);
	struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
	void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);

	int (*dmabuf_export)(struct drm_i915_gem_object *);
	void (*release)(struct drm_i915_gem_object *);
@@ -2321,8 +2321,6 @@ struct drm_i915_gem_object {
	struct i915_gem_userptr {
		uintptr_t ptr;
		unsigned read_only :1;
		unsigned workers :4;
#define I915_GEM_USERPTR_MAX_WORKERS 15

		struct i915_mm_struct *mm;
		struct i915_mmu_object *mmu_object;
@@ -2383,6 +2381,19 @@ i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj)
__deprecated
extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);

static inline bool
i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
{
	return atomic_read(&obj->base.refcount.refcount) == 0;
}

#if IS_ENABLED(CONFIG_LOCKDEP)
#define lockdep_assert_held_unless(lock, cond) \
	GEM_BUG_ON(debug_locks && !lockdep_is_held(lock) && !(cond))
#else
#define lockdep_assert_held_unless(lock, cond)
#endif

static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
@@ -3211,6 +3222,8 @@ dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
				unsigned long n);

void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
				 struct sg_table *pages);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);

static inline int __must_check
@@ -3227,7 +3240,8 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
	lockdep_assert_held(&obj->base.dev->struct_mutex);
	lockdep_assert_held_unless(&obj->base.dev->struct_mutex,
				   i915_gem_object_is_dead(obj));
	GEM_BUG_ON(!obj->mm.pages);

	obj->mm.pages_pin_count++;
@@ -3242,7 +3256,8 @@ i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
	lockdep_assert_held(&obj->base.dev->struct_mutex);
	lockdep_assert_held_unless(&obj->base.dev->struct_mutex,
				   i915_gem_object_is_dead(obj));
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
	GEM_BUG_ON(!obj->mm.pages);

@@ -3255,7 +3270,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
	__i915_gem_object_unpin_pages(obj);
}

int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);

enum i915_map_type {
	I915_MAP_WB = 0,
@@ -3480,8 +3496,10 @@ i915_vma_unpin_fence(struct i915_vma *vma)
void i915_gem_restore_fences(struct drm_device *dev);

void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
				       struct sg_table *pages);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
					 struct sg_table *pages);

/* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_device *dev);
+83 −89
Original line number Diff line number Diff line
@@ -169,7 +169,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
	return 0;
}

static int
static struct sg_table *
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
	struct address_space *mapping = obj->base.filp->f_mapping;
@@ -179,7 +179,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
	int i;

	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
		return -EINVAL;
		return ERR_PTR(-EINVAL);

	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
		struct page *page;
@@ -187,7 +187,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)

		page = shmem_read_mapping_page(mapping, i);
		if (IS_ERR(page))
			return PTR_ERR(page);
			return ERR_CAST(page);

		src = kmap_atomic(page);
		memcpy(vaddr, src, PAGE_SIZE);
@@ -202,11 +202,11 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
		return -ENOMEM;
		return ERR_PTR(-ENOMEM);

	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
		kfree(st);
		return -ENOMEM;
		return ERR_PTR(-ENOMEM);
	}

	sg = st->sgl;
@@ -216,28 +216,30 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
	sg_dma_address(sg) = obj->phys_handle->busaddr;
	sg_dma_len(sg) = obj->base.size;

	obj->mm.pages = st;
	return 0;
	return st;
}

static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj)
{
	int ret;

	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);

	ret = i915_gem_object_set_to_cpu_domain(obj, true);
	if (WARN_ON(ret)) {
		/* In the event of a disaster, abandon all caches and
		 * hope for the best.
		 */
		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	}

	if (obj->mm.madv == I915_MADV_DONTNEED)
		obj->mm.dirty = false;

	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
		i915_gem_clflush_object(obj, false);

	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}

static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
			       struct sg_table *pages)
{
	__i915_gem_object_release_shmem(obj);

	if (obj->mm.dirty) {
		struct address_space *mapping = obj->base.filp->f_mapping;
		char *vaddr = obj->phys_handle->vaddr;
@@ -265,8 +267,8 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
		obj->mm.dirty = false;
	}

	sg_free_table(obj->mm.pages);
	kfree(obj->mm.pages);
	sg_free_table(pages);
	kfree(pages);
}

static void
@@ -518,9 +520,9 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
	if (ret)
		return ret;

	ret = __i915_gem_object_put_pages(obj);
	if (ret)
		return ret;
	__i915_gem_object_put_pages(obj);
	if (obj->mm.pages)
		return -EBUSY;

	/* create a new object */
	phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
@@ -536,7 +538,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pwrite *args,
		     struct drm_file *file_priv)
		     struct drm_file *file)
{
	struct drm_device *dev = obj->base.dev;
	void *vaddr = obj->phys_handle->vaddr + args->offset;
@@ -552,7 +554,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
				   I915_WAIT_LOCKED |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file_priv));
				   to_rps_client(file));
	if (ret)
		return ret;

@@ -2263,8 +2265,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
}

/* Try to discard unwanted pages */
static void
i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
{
	struct address_space *mapping;

@@ -2283,32 +2284,20 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
}

static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
			      struct sg_table *pages)
{
	struct sgt_iter sgt_iter;
	struct page *page;
	int ret;

	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);

	ret = i915_gem_object_set_to_cpu_domain(obj, true);
	if (WARN_ON(ret)) {
		/* In the event of a disaster, abandon all caches and
		 * hope for the best.
		 */
		i915_gem_clflush_object(obj, true);
		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	}
	__i915_gem_object_release_shmem(obj);

	i915_gem_gtt_finish_object(obj);
	i915_gem_gtt_finish_pages(obj, pages);

	if (i915_gem_object_needs_bit17_swizzle(obj))
		i915_gem_object_save_bit_17_swizzle(obj);

	if (obj->mm.madv == I915_MADV_DONTNEED)
		obj->mm.dirty = false;
		i915_gem_object_save_bit_17_swizzle(obj, pages);

	for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
	for_each_sgt_page(page, sgt_iter, pages) {
		if (obj->mm.dirty)
			set_page_dirty(page);

@@ -2319,8 +2308,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
	}
	obj->mm.dirty = false;

	sg_free_table(obj->mm.pages);
	kfree(obj->mm.pages);
	sg_free_table(pages);
	kfree(pages);
}

static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
@@ -2332,24 +2321,22 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
}

int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
	const struct drm_i915_gem_object_ops *ops = obj->ops;
	struct sg_table *pages;

	lockdep_assert_held(&obj->base.dev->struct_mutex);

	if (!obj->mm.pages)
		return 0;

	if (i915_gem_object_has_pinned_pages(obj))
		return -EBUSY;
		return;

	GEM_BUG_ON(obj->bind_count);

	/* ->put_pages might need to allocate memory for the bit17 swizzle
	 * array, hence protect them from being reaped by removing them from gtt
	 * lists early. */
	list_del(&obj->global_list);
	pages = fetch_and_zero(&obj->mm.pages);
	GEM_BUG_ON(!pages);

	if (obj->mm.mapping) {
		void *ptr;
@@ -2365,12 +2352,7 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)

	__i915_gem_object_reset_page_iter(obj);

	ops->put_pages(obj);
	obj->mm.pages = NULL;

	i915_gem_object_invalidate(obj);

	return 0;
	obj->ops->put_pages(obj, pages);
}

static unsigned int swiotlb_max_size(void)
@@ -2382,7 +2364,7 @@ static unsigned int swiotlb_max_size(void)
#endif
}

static int
static struct sg_table *
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
@@ -2401,8 +2383,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
	 * wasn't in the GTT, there shouldn't be any way it could have been in
	 * a GPU cache
	 */
	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
	GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
	GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);

	max_segment = swiotlb_max_size();
	if (!max_segment)
@@ -2410,12 +2392,12 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
		return -ENOMEM;
		return ERR_PTR(-ENOMEM);

	page_count = obj->base.size / PAGE_SIZE;
	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
		kfree(st);
		return -ENOMEM;
		return ERR_PTR(-ENOMEM);
	}

	/* Get the list of pages out of our struct file.  They'll be pinned
@@ -2466,20 +2448,19 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
	}
	if (sg) /* loop terminated early; short sg table */
		sg_mark_end(sg);
	obj->mm.pages = st;

	ret = i915_gem_gtt_prepare_object(obj);
	ret = i915_gem_gtt_prepare_pages(obj, st);
	if (ret)
		goto err_pages;

	if (i915_gem_object_needs_bit17_swizzle(obj))
		i915_gem_object_do_bit_17_swizzle(obj);
		i915_gem_object_do_bit_17_swizzle(obj, st);

	if (i915_gem_object_is_tiled(obj) &&
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
		__i915_gem_object_pin_pages(obj);

	return 0;
	return st;

err_pages:
	sg_mark_end(sg);
@@ -2499,7 +2480,35 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
	if (ret == -ENOSPC)
		ret = -ENOMEM;

	return ret;
	return ERR_PTR(ret);
}

void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
				 struct sg_table *pages)
{
	lockdep_assert_held(&obj->base.dev->struct_mutex);

	obj->mm.get_page.sg_pos = pages->sgl;
	obj->mm.get_page.sg_idx = 0;

	obj->mm.pages = pages;
}

static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
	struct sg_table *pages;

	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
		DRM_DEBUG("Attempting to obtain a purgeable object\n");
		return -EFAULT;
	}

	pages = obj->ops->get_pages(obj);
	if (unlikely(IS_ERR(pages)))
		return PTR_ERR(pages);

	__i915_gem_object_set_pages(obj, pages);
	return 0;
}

/* Ensure that the associated pages are gathered from the backing storage
@@ -2511,33 +2520,18 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 */
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	const struct drm_i915_gem_object_ops *ops = obj->ops;
	int ret;
	int err;

	lockdep_assert_held(&obj->base.dev->struct_mutex);

	if (obj->mm.pages)
		return 0;

	if (obj->mm.madv != I915_MADV_WILLNEED) {
		DRM_DEBUG("Attempting to obtain a purgeable object\n");
		__i915_gem_object_unpin_pages(obj);
		return -EFAULT;
	}

	ret = ops->get_pages(obj);
	if (ret) {
	err = ____i915_gem_object_get_pages(obj);
	if (err)
		__i915_gem_object_unpin_pages(obj);
		return ret;
	}

	list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);

	obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
	obj->mm.get_page.sg_idx = 0;

	return 0;
	return err;
}

/* The 'mapping' part of i915_gem_object_pin_map() below */
+8 −12
Original line number Diff line number Diff line
@@ -289,22 +289,18 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
	return dma_buf;
}

static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
static struct sg_table *
i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{
	struct sg_table *sg;

	sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sg))
		return PTR_ERR(sg);

	obj->mm.pages = sg;
	return 0;
	return dma_buf_map_attachment(obj->base.import_attach,
				      DMA_BIDIRECTIONAL);
}

static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
					     struct sg_table *pages)
{
	dma_buf_unmap_attachment(obj->base.import_attach,
				 obj->mm.pages, DMA_BIDIRECTIONAL);
	dma_buf_unmap_attachment(obj->base.import_attach, pages,
				 DMA_BIDIRECTIONAL);
}

static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
+10 −7
Original line number Diff line number Diff line
@@ -644,6 +644,7 @@ i915_gem_swizzle_page(struct page *page)
/**
 * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
 * @obj: i915 GEM buffer object
 * @pages: the scattergather list of physical pages
 *
 * This function fixes up the swizzling in case any page frame number for this
 * object has changed in bit 17 since that state has been saved with
@@ -654,7 +655,8 @@ i915_gem_swizzle_page(struct page *page)
 * by swapping them out and back in again).
 */
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
				  struct sg_table *pages)
{
	struct sgt_iter sgt_iter;
	struct page *page;
@@ -664,10 +666,9 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
		return;

	i = 0;
	for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
	for_each_sgt_page(page, sgt_iter, pages) {
		char new_bit_17 = page_to_phys(page) >> 17;
		if ((new_bit_17 & 0x1) !=
		    (test_bit(i, obj->bit_17) != 0)) {
		if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
			i915_gem_swizzle_page(page);
			set_page_dirty(page);
		}
@@ -678,17 +679,19 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
/**
 * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
 * @obj: i915 GEM buffer object
 * @pages: the scattergather list of physical pages
 *
 * This function saves the bit 17 of each page frame number so that swizzling
 * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
 * be called before the backing storage can be unpinned.
 */
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
				    struct sg_table *pages)
{
	const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
	struct sgt_iter sgt_iter;
	struct page *page;
	int page_count = obj->base.size >> PAGE_SHIFT;
	int i;

	if (obj->bit_17 == NULL) {
@@ -703,7 +706,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)

	i = 0;

	for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
	for_each_sgt_page(page, sgt_iter, pages) {
		if (page_to_phys(page) & (1 << 17))
			__set_bit(i, obj->bit_17);
		else
+10 −9
Original line number Diff line number Diff line
@@ -2370,14 +2370,15 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
	i915_ggtt_flush(dev_priv);
}

int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
			       struct sg_table *pages)
{
	if (!dma_map_sg(&obj->base.dev->pdev->dev,
			obj->mm.pages->sgl, obj->mm.pages->nents,
	if (dma_map_sg(&obj->base.dev->pdev->dev,
		       pages->sgl, pages->nents,
		       PCI_DMA_BIDIRECTIONAL))
		return -ENOSPC;

		return 0;

	return -ENOSPC;
}

static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
@@ -2696,7 +2697,8 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
					 vma->node.start, size);
}

void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
			       struct sg_table *pages)
{
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct device *kdev = &dev_priv->drm.pdev->dev;
@@ -2710,8 +2712,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
		}
	}

	dma_unmap_sg(kdev, obj->mm.pages->sgl, obj->mm.pages->nents,
		     PCI_DMA_BIDIRECTIONAL);
	dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
}

static void i915_gtt_color_adjust(struct drm_mm_node *node,
Loading