Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 542c6f6d authored by Thomas Hellstrom's avatar Thomas Hellstrom Committed by Dave Airlie
Browse files

drm/ttm: Fix ttm in-kernel copying of pages with non-standard caching attributes.



For x86 this affected highmem pages only, since they were always kmapped
cache-coherent, and this is fixed using kmap_atomic_prot().

For other architectures that may not modify the linear kernel map we
resort to vmap() for now, since kmap_atomic_prot() generally uses the
linear kernel map for lowmem pages. This of course comes with a
performance impact and should be optimized when possible.

Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 4677f15c
Loading
Loading
Loading
Loading
+52 −11
Original line number Diff line number Diff line
@@ -136,7 +136,8 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
}

static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
				unsigned long page)
				unsigned long page,
				pgprot_t prot)
{
	struct page *d = ttm_tt_get_page(ttm, page);
	void *dst;
@@ -145,17 +146,35 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
		return -ENOMEM;

	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));

#ifdef CONFIG_X86
	dst = kmap_atomic_prot(d, KM_USER0, prot);
#else
	if (prot != PAGE_KERNEL)
		dst = vmap(&d, 1, 0, prot);
	else
		dst = kmap(d);
#endif
	if (!dst)
		return -ENOMEM;

	memcpy_fromio(dst, src, PAGE_SIZE);

#ifdef CONFIG_X86
	kunmap_atomic(dst, KM_USER0);
#else
	if (prot != PAGE_KERNEL)
		vunmap(dst);
	else
		kunmap(d);
#endif

	return 0;
}

static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
				unsigned long page)
				unsigned long page,
				pgprot_t prot)
{
	struct page *s = ttm_tt_get_page(ttm, page);
	void *src;
@@ -164,12 +183,28 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
		return -ENOMEM;

	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
#ifdef CONFIG_X86
	src = kmap_atomic_prot(s, KM_USER0, prot);
#else
	if (prot != PAGE_KERNEL)
		src = vmap(&s, 1, 0, prot);
	else
		src = kmap(s);
#endif
	if (!src)
		return -ENOMEM;

	memcpy_toio(dst, src, PAGE_SIZE);

#ifdef CONFIG_X86
	kunmap_atomic(src, KM_USER0);
#else
	if (prot != PAGE_KERNEL)
		vunmap(src);
	else
		kunmap(s);
#endif

	return 0;
}

@@ -214,11 +249,17 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,

	for (i = 0; i < new_mem->num_pages; ++i) {
		page = i * dir + add;
		if (old_iomap == NULL)
			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
		else if (new_iomap == NULL)
			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
		else
		if (old_iomap == NULL) {
			pgprot_t prot = ttm_io_prot(old_mem->placement,
						    PAGE_KERNEL);
			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
						   prot);
		} else if (new_iomap == NULL) {
			pgprot_t prot = ttm_io_prot(new_mem->placement,
						    PAGE_KERNEL);
			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
						   prot);
		} else
			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
		if (ret)
			goto out1;