Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a3056906 authored by Colin Cross's avatar Colin Cross Committed by Greg Kroah-Hartman
Browse files

ion: remove ion_heap_alloc_pages



Now that ion_vm_fault doesn't need a struct page with a nonzero
refcount, there is no need allocate heap memory for cached pages using
split_page.  Remove the ion_heap_alloc_pages and ion_heap_free_pages
helpers in favor of direct calls to alloc_pages and __free_pages,
and remove the special handling in the system heap.

Signed-off-by: default avatarColin Cross <ccross@android.com>
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 462be0c6
Loading
Loading
Loading
Loading
+0 −27
Original line number Diff line number Diff line
@@ -148,33 +148,6 @@ int ion_heap_buffer_zero(struct ion_buffer *buffer)
	return ret;
}

struct page *ion_heap_alloc_pages(struct ion_buffer *buffer, gfp_t gfp_flags,
				  unsigned int order)
{
	struct page *page = alloc_pages(gfp_flags, order);

	if (!page)
		return page;

	if (ion_buffer_fault_user_mappings(buffer))
		split_page(page, order);

	return page;
}

void ion_heap_free_pages(struct ion_buffer *buffer, struct page *page,
			 unsigned int order)
{
	int i;

	if (!ion_buffer_fault_user_mappings(buffer)) {
		__free_pages(page, order);
		return;
	}
	for (i = 0; i < (1 << order); i++)
		__free_page(page + i);
}

void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
{
	rt_mutex_lock(&heap->lock);
+0 −13
Original line number Diff line number Diff line
@@ -216,19 +216,6 @@ int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
			struct vm_area_struct *);
int ion_heap_buffer_zero(struct ion_buffer *buffer);

/**
 * ion_heap_alloc_pages - allocate pages from alloc_pages
 * @buffer:		the buffer to allocate for, used to extract the flags
 * @gfp_flags:		the gfp_t for the allocation
 * @order:		the order of the allocatoin
 *
 * This funciton allocations from alloc pages and also does any other
 * necessary operations based on the buffer->flags.  For buffers which
 * will be faulted in the pages are split using split_page
 */
struct page *ion_heap_alloc_pages(struct ion_buffer *buffer, gfp_t gfp_flags,
				  unsigned int order);

/**
 * ion_heap_init_deferred_free -- initialize deferred free functionality
 * @heap:		the heap
+1 −6
Original line number Diff line number Diff line
@@ -74,7 +74,7 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,

		if (order > 4)
			gfp_flags = high_order_gfp_flags;
		page = ion_heap_alloc_pages(buffer, gfp_flags, order);
		page = alloc_pages(gfp_flags, order);
		if (!page)
			return 0;
		ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
@@ -91,15 +91,10 @@ static void free_buffer_page(struct ion_system_heap *heap,
			     unsigned int order)
{
	bool cached = ion_buffer_cached(buffer);
	bool split_pages = ion_buffer_fault_user_mappings(buffer);
	int i;

	if (!cached) {
		struct ion_page_pool *pool = heap->pools[order_to_index(order)];
		ion_page_pool_free(pool, page);
	} else if (split_pages) {
		for (i = 0; i < (1 << order); i++)
			__free_page(page + i);
	} else {
		__free_pages(page, order);
	}