Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dc333aea authored by Jordan Crouse's avatar Jordan Crouse
Browse files

msm: kgsl: Use DMA APIs for memory pool cache maintenance



After allocating and zeroing pages from system memory or the pool we need
to ensure that the cache is synchronized so that it doesn't cause problems
down the road.  Use dma_sync_sg_for_device to make sure the allocated pages
are clean. This isn't the best way to handle this but we haven't yet come
up with a better way and this does the job.

Change-Id: Ic0dedbade48b700015bec172cf9b64e436364b4a
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent 880a71f5
Loading
Loading
Loading
Loading
+27 −14
Original line number Diff line number Diff line
@@ -57,20 +57,36 @@ _kgsl_get_pool_from_order(int order)
	return index >= 0 ? &kgsl_pools[index] : NULL;
}

static void kgsl_pool_sync_for_device(struct device *dev, struct page *page,
		size_t size)
{
	struct scatterlist sg;

	/* The caller may choose not to specify a device on purpose */
	if (!dev)
		return;

	sg_init_table(&sg, 1);
	sg_set_page(&sg, page, size, 0);
	sg_dma_address(&sg) = page_to_phys(page);

	dma_sync_sg_for_device(dev, &sg, 1, DMA_BIDIRECTIONAL);
}

/* Map the page into kernel and zero it out */
static void
_kgsl_pool_zero_page(struct page *p, unsigned int pool_order)
_kgsl_pool_zero_page(struct page *p, unsigned int pool_order,
		struct device *dev)
{
	int i;

	for (i = 0; i < (1 << pool_order); i++) {
		struct page *page = nth_page(p, i);
		void *addr = kmap_atomic(page);

		memset(addr, 0, PAGE_SIZE);
		dmac_flush_range(addr, addr + PAGE_SIZE);
		kunmap_atomic(addr);
		clear_highpage(page);
	}

	kgsl_pool_sync_for_device(dev, p, PAGE_SIZE << pool_order);
}

/* Add a page to specified pool */
@@ -80,8 +96,6 @@ _kgsl_pool_add_page(struct kgsl_page_pool *pool, struct page *p)
	if (!p)
		return;

	_kgsl_pool_zero_page(p, pool->pool_order);

	spin_lock(&pool->list_lock);
	list_add_tail(&p->lru, &pool->page_list);
	pool->page_count++;
@@ -327,7 +341,8 @@ static int kgsl_get_page_size(size_t size, unsigned int align)
 * Return total page count on success and negative value on failure
 */
static int kgsl_pool_alloc_page(int *page_size, struct page **pages,
			unsigned int pages_len, unsigned int *align)
			unsigned int pages_len, unsigned int *align,
			struct device *dev)
{
	int j;
	int pcount = 0;
@@ -355,7 +370,6 @@ static int kgsl_pool_alloc_page(int *page_size, struct page **pages,
			} else
				return -ENOMEM;
		}
		_kgsl_pool_zero_page(page, order);
		goto done;
	}

@@ -375,7 +389,6 @@ static int kgsl_pool_alloc_page(int *page_size, struct page **pages,
			page = alloc_pages(gfp_mask, order);
			if (page == NULL)
				return -ENOMEM;
			_kgsl_pool_zero_page(page, order);
			goto done;
		}
	}
@@ -405,11 +418,11 @@ static int kgsl_pool_alloc_page(int *page_size, struct page **pages,
			} else
				return -ENOMEM;
		}

		_kgsl_pool_zero_page(page, order);
	}

done:
	_kgsl_pool_zero_page(page, order, dev);

	for (j = 0; j < (*page_size >> PAGE_SHIFT); j++) {
		p = nth_page(page, j);
		pages[pcount] = p;
@@ -426,7 +439,7 @@ static int kgsl_pool_alloc_page(int *page_size, struct page **pages,
	return -EAGAIN;
}

int kgsl_pool_alloc_pages(u64 size, struct page ***pages)
int kgsl_pool_alloc_pages(u64 size, struct page ***pages, struct device *dev)
{
	int count = 0;
	int npages = size >> PAGE_SHIFT;
@@ -444,7 +457,7 @@ int kgsl_pool_alloc_pages(u64 size, struct page ***pages)

	while (len) {
		int ret = kgsl_pool_alloc_page(&page_size, &local[count],
			npages, &align);
			npages, &align, dev);

		if (ret == -EAGAIN)
			continue;
+6 −1
Original line number Diff line number Diff line
@@ -11,12 +11,17 @@ void kgsl_pool_free_sgt(struct sg_table *sgt);
 * kgsl_pool_alloc_pages - Allocate an array of pages from the pool
 * @size: Size of the allocation
 * @pages: Pointer to an array of pages
 * @dev: A &struct device pointer
 *
 * Allocate a list of pages and store it in the pointer pointed to by @pages.
 * @dev specifies a &struct device that is used to call dma_sync_sg_for_device
 * to synchronize the caches. If @dev isn't specified, no cache maintenance
 * will be performed.
 *
 * Return: The number of entries in the array pointed to by @page or negative
 * on error.
 */
int kgsl_pool_alloc_pages(u64 size, struct page ***pages);
int kgsl_pool_alloc_pages(u64 size, struct page ***pages, struct device *dev);

/**
 * kgsl_pool_free_pages - Free pages in an pages array
+2 −2
Original line number Diff line number Diff line
@@ -1074,7 +1074,7 @@ static int kgsl_alloc_secure_pages(struct kgsl_device *device,
	memdesc->priv |= priv;

	memdesc->ops = &kgsl_secure_pool_ops;
	count = kgsl_pool_alloc_pages(size, &pages);
	count = kgsl_pool_alloc_pages(size, &pages, device->dev);

	if (count < 0)
		return count;
@@ -1129,7 +1129,7 @@ static int kgsl_alloc_pages(struct kgsl_device *device,
	memdesc->priv |= priv;

	memdesc->ops = &kgsl_pool_ops;
	count = kgsl_pool_alloc_pages(size, &pages);
	count = kgsl_pool_alloc_pages(size, &pages, device->dev);

	if (count < 0)
		return count;