Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e8cb909a authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Fallback to single page GTT mmappings for relocations



If we cannot pin the entire object into the mappable region of the GTT,
try to pin a single page instead. This is much more likely to succeed,
and prevents us falling back to the clflush slow path.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160818161718.27187-14-chris@chris-wilson.co.uk
parent d50415cc
Loading
Loading
Loading
Loading
+51 −11
Original line number Diff line number Diff line
@@ -331,6 +331,7 @@ static void reloc_cache_init(struct reloc_cache *cache,
	cache->vaddr = 0;
	cache->i915 = i915;
	cache->use_64bit_reloc = INTEL_GEN(cache->i915) >= 8;
	cache->node.allocated = false;
}

static inline void *unmask_page(unsigned long p)
@@ -360,10 +361,21 @@ static void reloc_cache_fini(struct reloc_cache *cache)
		kunmap_atomic(vaddr);
		i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
	} else {
		wmb();
		io_mapping_unmap_atomic((void __iomem *)vaddr);
		if (cache->node.allocated) {
			struct i915_ggtt *ggtt = &cache->i915->ggtt;

			ggtt->base.clear_range(&ggtt->base,
					       cache->node.start,
					       cache->node.size,
					       true);
			drm_mm_remove_node(&cache->node);
		} else {
			i915_vma_unpin((struct i915_vma *)cache->node.mm);
		}
	}
}

static void *reloc_kmap(struct drm_i915_gem_object *obj,
			struct reloc_cache *cache,
@@ -401,8 +413,19 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
			 int page)
{
	struct i915_ggtt *ggtt = &cache->i915->ggtt;
	unsigned long offset;
	void *vaddr;

	if (cache->node.allocated) {
		wmb();
		ggtt->base.insert_page(&ggtt->base,
				       i915_gem_object_get_dma_address(obj, page),
				       cache->node.start, I915_CACHE_NONE, 0);
		cache->page = page;
		return unmask_page(cache->vaddr);
	}

	if (cache->vaddr) {
		io_mapping_unmap_atomic(unmask_page(cache->vaddr));
	} else {
@@ -418,9 +441,17 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,

		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
					       PIN_MAPPABLE | PIN_NONBLOCK);
		if (IS_ERR(vma))
			return NULL;

		if (IS_ERR(vma)) {
			memset(&cache->node, 0, sizeof(cache->node));
			ret = drm_mm_insert_node_in_range_generic
				(&ggtt->base.mm, &cache->node,
				 4096, 0, 0,
				 0, ggtt->mappable_end,
				 DRM_MM_SEARCH_DEFAULT,
				 DRM_MM_CREATE_DEFAULT);
			if (ret)
				return ERR_PTR(ret);
		} else {
			ret = i915_gem_object_put_fence(obj);
			if (ret) {
				i915_vma_unpin(vma);
@@ -430,9 +461,18 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
			cache->node.start = vma->node.start;
			cache->node.mm = (void *)vma;
		}
	}

	offset = cache->node.start;
	if (cache->node.allocated) {
		ggtt->base.insert_page(&ggtt->base,
				       i915_gem_object_get_dma_address(obj, page),
				       offset, I915_CACHE_NONE, 0);
	} else {
		offset += page << PAGE_SHIFT;
	}

	vaddr = io_mapping_map_atomic_wc(cache->i915->ggtt.mappable,
					 cache->node.start + (page << PAGE_SHIFT));
	vaddr = io_mapping_map_atomic_wc(cache->i915->ggtt.mappable, offset);
	cache->page = page;
	cache->vaddr = (unsigned long)vaddr;