Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 11a3a881 authored by Mitchel Humpherys's avatar Mitchel Humpherys
Browse files

ion: invalidate cache after zero'ing buffers even if uncached



Currently, we don't invalidate the cache if we're working with uncached
buffers. However, the memory for those mappings might actually be stale
in the cache since someone else might have touched it. Just invalidate
the cache for everyone, cached or uncached.

Without this change, corruption was observed on the display due to stale
data in graphics buffers from Ion.

CRs-Fixed: 585524
Change-Id: I0037beaadfde983af15d8f45918f7798264dd3ca
Signed-off-by: default avatarMitchel Humpherys <mitchelh@codeaurora.org>
parent b96d42eb
Loading
Loading
Loading
Loading
+15 −21
Original line number Diff line number Diff line
@@ -108,8 +108,7 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
 *
 * Note that the `pages' array should be composed of all 4K pages.
 */
int ion_heap_pages_zero(struct page **pages, int num_pages,
				bool should_invalidate)
int ion_heap_pages_zero(struct page **pages, int num_pages)
{
	int i, j, k, npages_to_vmap;
	void *ptr = NULL;
@@ -143,7 +142,6 @@ int ion_heap_pages_zero(struct page **pages, int num_pages,
			return -ENOMEM;

		memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
		if (should_invalidate) {
		/*
		 * invalidate the cache to pick up the zeroing
		 */
@@ -156,7 +154,6 @@ int ion_heap_pages_zero(struct page **pages, int num_pages,
			outer_inv_range(phys, phys + PAGE_SIZE);
			kunmap_atomic(p);
		}
		}
		vunmap(ptr);
	}

@@ -196,8 +193,7 @@ static void ion_heap_free_pages_mem(struct pages_mem *pages_mem)
	pages_mem->free_fn(pages_mem->pages);
}

int ion_heap_high_order_page_zero(struct page *page,
				int order, bool should_invalidate)
int ion_heap_high_order_page_zero(struct page *page, int order)
{
	int i, ret;
	struct pages_mem pages_mem;
@@ -210,8 +206,7 @@ int ion_heap_high_order_page_zero(struct page *page,
	for (i = 0; i < (1 << order); ++i)
		pages_mem.pages[i] = page + i;

	ret = ion_heap_pages_zero(pages_mem.pages, npages,
				should_invalidate);
	ret = ion_heap_pages_zero(pages_mem.pages, npages);
	ion_heap_free_pages_mem(&pages_mem);
	return ret;
}
@@ -240,8 +235,7 @@ int ion_heap_buffer_zero(struct ion_buffer *buffer)
			pages_mem.pages[npages++] = page + j;
	}

	ret = ion_heap_pages_zero(pages_mem.pages, npages,
				ion_buffer_cached(buffer));
	ret = ion_heap_pages_zero(pages_mem.pages, npages);
	ion_heap_free_pages_mem(&pages_mem);
	return ret;
}
+2 −5
Original line number Diff line number Diff line
@@ -40,8 +40,7 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
		return NULL;

	if (pool->gfp_mask & __GFP_ZERO)
		if (ion_heap_high_order_page_zero(
				page, pool->order, pool->should_invalidate))
		if (ion_heap_high_order_page_zero(page, pool->order))
			goto error_free_pages;

	sg_init_table(&sg, 1);
@@ -175,8 +174,7 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
	return nr_freed;
}

struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
	bool should_invalidate)
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
{
	struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
					     GFP_KERNEL);
@@ -188,7 +186,6 @@ struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
	INIT_LIST_HEAD(&pool->high_items);
	pool->gfp_mask = gfp_mask;
	pool->order = order;
	pool->should_invalidate = should_invalidate;
	mutex_init(&pool->mutex);
	plist_node_init(&pool->list, order);

+3 −9
Original line number Diff line number Diff line
@@ -234,11 +234,9 @@ void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
			struct vm_area_struct *);
int ion_heap_pages_zero(struct page **pages, int num_pages,
			bool should_invalidate);
int ion_heap_pages_zero(struct page **pages, int num_pages);
int ion_heap_buffer_zero(struct ion_buffer *buffer);
int ion_heap_high_order_page_zero(struct page *page,
				int order, bool should_invalidate);
int ion_heap_high_order_page_zero(struct page *page, int order);

/**
 * ion_heap_init_deferred_free -- initialize deferred free functionality
@@ -357,8 +355,6 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
 * @gfp_mask:		gfp_mask to use from alloc
 * @order:		order of pages in the pool
 * @list:		plist node for list of pools
 * @should_invalidate:	whether or not the cache needs to be invalidated at
 *			page allocation time.
 *
 * Allows you to keep a pool of pre allocated pages to use from your heap.
 * Keeping a pool of pages that is ready for dma, ie any cached mapping have
@@ -374,11 +370,9 @@ struct ion_page_pool {
	gfp_t gfp_mask;
	unsigned int order;
	struct plist_node list;
	bool should_invalidate;
};

struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
	bool should_invalidate);
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
void ion_page_pool_destroy(struct ion_page_pool *);
void *ion_page_pool_alloc(struct ion_page_pool *);
void ion_page_pool_free(struct ion_page_pool *, struct page *);
+4 −6
Original line number Diff line number Diff line
@@ -355,8 +355,7 @@ static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
 * nothing. If it succeeds you'll eventually need to use
 * ion_system_heap_destroy_pools to destroy the pools.
 */
static int ion_system_heap_create_pools(struct ion_page_pool **pools,
					bool should_invalidate)
static int ion_system_heap_create_pools(struct ion_page_pool **pools)
{
	int i;
	for (i = 0; i < num_orders; i++) {
@@ -365,8 +364,7 @@ static int ion_system_heap_create_pools(struct ion_page_pool **pools,

		if (orders[i] > 4)
			gfp_flags = high_order_gfp_flags;
		pool = ion_page_pool_create(gfp_flags, orders[i],
					should_invalidate);
		pool = ion_page_pool_create(gfp_flags, orders[i]);
		if (!pool)
			goto err_create_pool;
		pools[i] = pool;
@@ -397,10 +395,10 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
	if (!heap->cached_pools)
		goto err_alloc_cached_pools;

	if (ion_system_heap_create_pools(heap->uncached_pools, false))
	if (ion_system_heap_create_pools(heap->uncached_pools))
		goto err_create_uncached_pools;

	if (ion_system_heap_create_pools(heap->cached_pools, true))
	if (ion_system_heap_create_pools(heap->cached_pools))
		goto err_create_cached_pools;

	heap->heap.shrinker.shrink = ion_system_heap_shrink;