Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 84e8978e authored by Matthew Auld's avatar Matthew Auld Committed by Chris Wilson
Browse files

drm/i915: s/sg_mask/sg_page_sizes/



It's a little unclear what the sg_mask actually is, so prefer the more
meaningful name of sg_page_sizes.

Suggested-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171009110024.29114-1-matthew.auld@intel.com


Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent 43ae70d9
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -3537,7 +3537,7 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,

void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
				 struct sg_table *pages,
				 unsigned int sg_mask);
				 unsigned int sg_page_sizes);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);

static inline int __must_check
+14 −14
Original line number Diff line number Diff line
@@ -2333,7 +2333,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
	struct page *page;
	unsigned long last_pfn = 0;	/* suppress gcc warning */
	unsigned int max_segment = i915_sg_segment_size();
	unsigned int sg_mask;
	unsigned int sg_page_sizes;
	gfp_t noreclaim;
	int ret;

@@ -2365,7 +2365,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)

	sg = st->sgl;
	st->nents = 0;
	sg_mask = 0;
	sg_page_sizes = 0;
	for (i = 0; i < page_count; i++) {
		const unsigned int shrink[] = {
			I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
@@ -2419,7 +2419,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
		    sg->length >= max_segment ||
		    page_to_pfn(page) != last_pfn + 1) {
			if (i) {
				sg_mask |= sg->length;
				sg_page_sizes |= sg->length;
				sg = sg_next(sg);
			}
			st->nents++;
@@ -2433,7 +2433,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
	}
	if (sg) { /* loop terminated early; short sg table */
		sg_mask |= sg->length;
		sg_page_sizes |= sg->length;
		sg_mark_end(sg);
	}

@@ -2464,7 +2464,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
	if (i915_gem_object_needs_bit17_swizzle(obj))
		i915_gem_object_do_bit_17_swizzle(obj, st);

	__i915_gem_object_set_pages(obj, st, sg_mask);
	__i915_gem_object_set_pages(obj, st, sg_page_sizes);

	return 0;

@@ -2492,7 +2492,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)

void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
				 struct sg_table *pages,
				 unsigned int sg_mask)
				 unsigned int sg_page_sizes)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	unsigned long supported = INTEL_INFO(i915)->page_sizes;
@@ -2512,16 +2512,16 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
		obj->mm.quirked = true;
	}

	GEM_BUG_ON(!sg_mask);
	obj->mm.page_sizes.phys = sg_mask;
	GEM_BUG_ON(!sg_page_sizes);
	obj->mm.page_sizes.phys = sg_page_sizes;

	/*
	 * Calculate the supported page-sizes which fit into the given sg_mask.
	 * This will give us the page-sizes which we may be able to use
	 * opportunistically when later inserting into the GTT. For example if
	 * phys=2G, then in theory we should be able to use 1G, 2M, 64K or 4K
	 * pages, although in practice this will depend on a number of other
	 * factors.
	 * Calculate the supported page-sizes which fit into the given
	 * sg_page_sizes. This will give us the page-sizes which we may be able
	 * to use opportunistically when later inserting into the GTT. For
	 * example if phys=2G, then in theory we should be able to use 1G, 2M,
	 * 64K or 4K pages, although in practice this will depend on a number of
	 * other factors.
	 */
	obj->mm.page_sizes.sg = 0;
	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
+3 −3
Original line number Diff line number Diff line
@@ -259,16 +259,16 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{
	struct sg_table *pages;
	unsigned int sg_mask;
	unsigned int sg_page_sizes;

	pages = dma_buf_map_attachment(obj->base.import_attach,
				       DMA_BIDIRECTIONAL);
	if (IS_ERR(pages))
		return PTR_ERR(pages);

	sg_mask = i915_sg_page_sizes(pages->sgl);
	sg_page_sizes = i915_sg_page_sizes(pages->sgl);

	__i915_gem_object_set_pages(obj, pages, sg_mask);
	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);

	return 0;
}
+4 −4
Original line number Diff line number Diff line
@@ -49,7 +49,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct sg_table *st;
	struct scatterlist *sg;
	unsigned int sg_mask;
	unsigned int sg_page_sizes;
	unsigned int npages;
	int max_order;
	gfp_t gfp;
@@ -88,7 +88,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)

	sg = st->sgl;
	st->nents = 0;
	sg_mask = 0;
	sg_page_sizes = 0;

	do {
		int order = min(fls(npages) - 1, max_order);
@@ -106,7 +106,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
		} while (1);

		sg_set_page(sg, page, PAGE_SIZE << order, 0);
		sg_mask |= PAGE_SIZE << order;
		sg_page_sizes |= PAGE_SIZE << order;
		st->nents++;

		npages -= 1 << order;
@@ -135,7 +135,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
	 */
	obj->mm.madv = I915_MADV_DONTNEED;

	__i915_gem_object_set_pages(obj, st, sg_mask);
	__i915_gem_object_set_pages(obj, st, sg_page_sizes);

	return 0;

+3 −3
Original line number Diff line number Diff line
@@ -405,7 +405,7 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
{
	unsigned int max_segment = i915_sg_segment_size();
	struct sg_table *st;
	unsigned int sg_mask;
	unsigned int sg_page_sizes;
	int ret;

	st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -435,9 +435,9 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
		return ERR_PTR(ret);
	}

	sg_mask = i915_sg_page_sizes(st->sgl);
	sg_page_sizes = i915_sg_page_sizes(st->sgl);

	__i915_gem_object_set_pages(obj, st, sg_mask);
	__i915_gem_object_set_pages(obj, st, sg_page_sizes);

	return st;
}
Loading