Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e689cf86 authored by Colin Cross's avatar Colin Cross Committed by Mitchel Humpherys
Browse files

ion: fix dma APIs



__dma_page_cpu_to_dev is a private ARM api that is not available
on 3.10 and was never available on other architectures.  We can
get the same behavior by calling dma_sync_sg_for_device with a
scatterlist containing a single page.  It's still not quite a
kosher use of the dma apis, we still conflate physical addresses
with bus addresses, but it should at least compile on all
platforms, and work on any platform that doesn't have a physical
to bus address translation.

Change-Id: I8451c2dae4bf85841015c016640684ac28430a5a
Signed-off-by: default avatarColin Cross <ccross@android.com>
Git-commit: 51e3580bf9da57da9c860330293b73b0dbb296e0
Git-repo: http://android.googlesource.com/kernel/common/


[mitchelh@codeaurora.org: conflicts due to different cache APIs being
 used. Earlier we changed __dma_page_cpu_to_dev to
 arm_dma_ops.sync_single_for_device so there was a conflict at that call
 site.]
Signed-off-by: default avatarMitchel Humpherys <mitchelh@codeaurora.org>
parent 98eb96b9
Loading
Loading
Loading
Loading
+19 −2
Original line number Diff line number Diff line
@@ -1014,6 +1014,22 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
{
}

void ion_pages_sync_for_device(struct device *dev, struct page *page,
		size_t size, enum dma_data_direction dir)
{
	struct scatterlist sg;

	sg_init_table(&sg, 1);
	sg_set_page(&sg, page, size, 0);
	/*
	 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
	 * for the the targeted device, but this works on the currently targeted
	 * hardware.
	 */
	sg_dma_address(&sg) = page_to_phys(page);
	dma_sync_sg_for_device(dev, &sg, 1, dir);
}

struct ion_vma_list {
	struct list_head list;
	struct vm_area_struct *vma;
@@ -1038,8 +1054,9 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
		struct page *page = buffer->pages[i];

		if (ion_buffer_page_is_dirty(page))
			arm_dma_ops.sync_single_for_device(
				dev, page_to_phys(page), PAGE_SIZE, dir);
			ion_pages_sync_for_device(dev, ion_buffer_page(page),
							PAGE_SIZE, dir);

		ion_buffer_page_clean(buffer->pages + i);
	}
	list_for_each_entry(vma_list, &buffer->vmas, list) {
+7 −4
Original line number Diff line number Diff line
@@ -104,6 +104,10 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)

	ion_heap_buffer_zero(buffer);

	if (ion_buffer_cached(buffer))
		dma_sync_sg_for_device(NULL, table->sgl, table->nents,
                                       DMA_BIDIRECTIONAL);

	for_each_sg(table->sgl, sg, table->nents, i) {
		if (ion_buffer_cached(buffer))
			dma_sync_sg_for_device(NULL, sg, 1, DMA_BIDIRECTIONAL);
@@ -140,7 +144,6 @@ static struct ion_heap_ops chunk_heap_ops = {
struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
{
	struct ion_chunk_heap *chunk_heap;
	struct scatterlist sg;

	chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
	if (!chunk_heap)
@@ -157,9 +160,9 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
	chunk_heap->size = heap_data->size;
	chunk_heap->allocated = 0;

	sg_init_table(&sg, 1);
	sg_set_page(&sg, phys_to_page(heap_data->base), heap_data->size, 0);
	dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
	ion_pages_sync_for_device(NULL, pfn_to_page(PFN_DOWN(heap_data->base)),
			heap_data->size, DMA_BIDIRECTIONAL);

	gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
	chunk_heap->heap.ops = &chunk_heap_ops;
	chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
+2 −5
Original line number Diff line number Diff line
@@ -32,7 +32,6 @@ struct ion_page_pool_item {
static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
	struct page *page;
	struct scatterlist sg;

	page = alloc_pages(pool->gfp_mask & ~__GFP_ZERO, pool->order);

@@ -43,10 +42,8 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
		if (ion_heap_high_order_page_zero(page, pool->order))
			goto error_free_pages;

	sg_init_table(&sg, 1);
	sg_set_page(&sg, page, PAGE_SIZE << pool->order, 0);
	sg_dma_address(&sg) = sg_phys(&sg);
	dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
	ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
						DMA_BIDIRECTIONAL);

	return page;
error_free_pages:
+12 −0
Original line number Diff line number Diff line
@@ -18,6 +18,7 @@
#ifndef _ION_PRIV_H
#define _ION_PRIV_H

#include <linux/dma-direction.h>
#include <linux/kref.h>
#include <linux/mm_types.h>
#include <linux/mutex.h>
@@ -400,6 +401,17 @@ void ion_page_pool_free(struct ion_page_pool *, struct page *);
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
			  int nr_to_scan);

/**
 * ion_pages_sync_for_device - cache flush pages for use with the specified
 *                             device
 * @dev:		the device the pages will be used with
 * @page:		the first page to be flushed
 * @size:		size in bytes of region to be flushed
 * @dir:		direction of dma transfer
 */
void ion_pages_sync_for_device(struct device *dev, struct page *page,
		size_t size, enum dma_data_direction dir);

int ion_walk_heaps(struct ion_client *client, int heap_id, void *data,
			int (*f)(struct ion_heap *heap, void *data));