Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 73f522ba authored by Ville Syrjälä's avatar Ville Syrjälä
Browse files

drm/i915: Use i915_gem_object_get_dma_address() to populate rotated vmas



Replace the kvmalloc_array() with i915_gem_object_get_dma_address() when
populating rotated vmas. One random access mechanism ought to be enough
for everyone?

To calculate the size of the radix tree I think we can do
something like this (assuming 64bit pointers):
 num_pages = obj_size / 4096
 tree_height = ceil(log64(num_pages))
 num_nodes = sum(64^n, n, 0, tree_height-1)
 tree_size = num_nodes * 576

If we compare that with the object size we should get a relative
overhead of around .2% to 1% for reasonable sized objects,
which framebuffers tend to be.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Suggested-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarVille Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181016150413.11577-1-ville.syrjala@linux.intel.com


Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
parent 198a2a2f
Loading
Loading
Loading
Loading
+6 −25
Original line number Original line Diff line number Diff line
@@ -3637,7 +3637,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
}
}


static struct scatterlist *
static struct scatterlist *
rotate_pages(const dma_addr_t *in, unsigned int offset,
rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
	     unsigned int width, unsigned int height,
	     unsigned int width, unsigned int height,
	     unsigned int stride,
	     unsigned int stride,
	     struct sg_table *st, struct scatterlist *sg)
	     struct sg_table *st, struct scatterlist *sg)
@@ -3646,7 +3646,7 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
	unsigned int src_idx;
	unsigned int src_idx;


	for (column = 0; column < width; column++) {
	for (column = 0; column < width; column++) {
		src_idx = stride * (height - 1) + column;
		src_idx = stride * (height - 1) + column + offset;
		for (row = 0; row < height; row++) {
		for (row = 0; row < height; row++) {
			st->nents++;
			st->nents++;
			/* We don't need the pages, but need to initialize
			/* We don't need the pages, but need to initialize
@@ -3654,7 +3654,8 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
			 * The only thing we need are DMA addresses.
			 * The only thing we need are DMA addresses.
			 */
			 */
			sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
			sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
			sg_dma_address(sg) = in[offset + src_idx];
			sg_dma_address(sg) =
				i915_gem_object_get_dma_address(obj, src_idx);
			sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
			sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
			sg = sg_next(sg);
			sg = sg_next(sg);
			src_idx -= stride;
			src_idx -= stride;
@@ -3668,22 +3669,11 @@ static noinline struct sg_table *
intel_rotate_pages(struct intel_rotation_info *rot_info,
intel_rotate_pages(struct intel_rotation_info *rot_info,
		   struct drm_i915_gem_object *obj)
		   struct drm_i915_gem_object *obj)
{
{
	const unsigned long n_pages = obj->base.size / I915_GTT_PAGE_SIZE;
	unsigned int size = intel_rotation_info_size(rot_info);
	unsigned int size = intel_rotation_info_size(rot_info);
	struct sgt_iter sgt_iter;
	dma_addr_t dma_addr;
	unsigned long i;
	dma_addr_t *page_addr_list;
	struct sg_table *st;
	struct sg_table *st;
	struct scatterlist *sg;
	struct scatterlist *sg;
	int ret = -ENOMEM;
	int ret = -ENOMEM;

	int i;
	/* Allocate a temporary list of source pages for random access. */
	page_addr_list = kvmalloc_array(n_pages,
					sizeof(dma_addr_t),
					GFP_KERNEL);
	if (!page_addr_list)
		return ERR_PTR(ret);


	/* Allocate target SG list. */
	/* Allocate target SG list. */
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -3694,29 +3684,20 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
	if (ret)
	if (ret)
		goto err_sg_alloc;
		goto err_sg_alloc;


	/* Populate source page list from the object. */
	i = 0;
	for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
		page_addr_list[i++] = dma_addr;

	GEM_BUG_ON(i != n_pages);
	st->nents = 0;
	st->nents = 0;
	sg = st->sgl;
	sg = st->sgl;


	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
		sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
		sg = rotate_pages(obj, rot_info->plane[i].offset,
				  rot_info->plane[i].width, rot_info->plane[i].height,
				  rot_info->plane[i].width, rot_info->plane[i].height,
				  rot_info->plane[i].stride, st, sg);
				  rot_info->plane[i].stride, st, sg);
	}
	}


	kvfree(page_addr_list);

	return st;
	return st;


err_sg_alloc:
err_sg_alloc:
	kfree(st);
	kfree(st);
err_st_alloc:
err_st_alloc:
	kvfree(page_addr_list);


	DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
	DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
			 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
			 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);