Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a93f3584 authored by Patrick Daly's avatar Patrick Daly
Browse files

ion: ion_page_pool: Use -ENOMEM instead of NULL



Convert all non-static functions in ion_page_pool which return
a struct page to use -ENOMEM as an error value instead of NULL.
This prepares for future changes which use alternate error codes.

Change-Id: Ie661c5c07d280ef6a4018c29ad4e4a316650e0b5
Signed-off-by: default avatarPatrick Daly <pdaly@codeaurora.org>
parent cdf68d23
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -92,6 +92,9 @@ struct page *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool)
		page = ion_page_pool_alloc_pages(pool);
		*from_pool = false;
	}

	if (!page)
		return ERR_PTR(-ENOMEM);
	return page;
}

@@ -103,7 +106,7 @@ struct page *ion_page_pool_alloc_pool_only(struct ion_page_pool *pool)
	struct page *page = NULL;

	if (!pool)
		return NULL;
		return ERR_PTR(-EINVAL);

	if (mutex_trylock(&pool->mutex)) {
		if (pool->high_count)
@@ -113,6 +116,8 @@ struct page *ion_page_pool_alloc_pool_only(struct ion_page_pool *pool)
		mutex_unlock(&pool->mutex);
	}

	if (!page)
		return ERR_PTR(-ENOMEM);
	return page;
}

+13 −11
Original line number Diff line number Diff line
@@ -76,8 +76,8 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,

	page = ion_page_pool_alloc(pool, from_pool);

	if (!page)
		return 0;
	if (IS_ERR(page))
		return page;

	if ((MAKE_ION_ALLOC_DMA_READY && vmid <= 0) || !(*from_pool))
		ion_pages_sync_for_device(dev, page, PAGE_SIZE << order,
@@ -128,7 +128,7 @@ static struct page_info *alloc_largest_available(struct ion_system_heap *heap,

	info = kmalloc(sizeof(*info), GFP_KERNEL);
	if (!info)
		return NULL;
		return ERR_PTR(-ENOMEM);

	for (i = 0; i < NUM_ORDERS; i++) {
		if (size < order_to_size(orders[i]))
@@ -137,7 +137,7 @@ static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
			continue;
		from_pool = !(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC);
		page = alloc_buffer_page(heap, buffer, orders[i], &from_pool);
		if (!page)
		if (IS_ERR(page))
			continue;

		info->page = page;
@@ -148,7 +148,7 @@ static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
	}
	kfree(info);

	return NULL;
	return ERR_PTR(-ENOMEM);
}

static struct page_info *alloc_from_pool_preferred(
@@ -161,7 +161,7 @@ static struct page_info *alloc_from_pool_preferred(

	info = kmalloc(sizeof(*info), GFP_KERNEL);
	if (!info)
		return NULL;
		return ERR_PTR(-ENOMEM);

	for (i = 0; i < NUM_ORDERS; i++) {
		if (size < order_to_size(orders[i]))
@@ -170,7 +170,7 @@ static struct page_info *alloc_from_pool_preferred(
			continue;

		page = alloc_from_secure_pool_order(heap, buffer, orders[i]);
		if (!page)
		if (IS_ERR(page))
			continue;

		info->page = page;
@@ -181,7 +181,7 @@ static struct page_info *alloc_from_pool_preferred(
	}

	page = split_page_from_secure_pool(heap, buffer);
	if (page) {
	if (!IS_ERR(page)) {
		info->page = page;
		info->order = 0;
		info->from_pool = true;
@@ -265,7 +265,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
	struct sg_table table_sync = {0};
	struct scatterlist *sg;
	struct scatterlist *sg_sync;
	int ret;
	int ret = -ENOMEM;
	struct list_head pages;
	struct list_head pages_from_pool;
	struct page_info *info, *tmp_info;
@@ -294,8 +294,10 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
					sys_heap, buffer, size_remaining,
					max_order);

		if (!info)
		if (IS_ERR(info)) {
			ret = PTR_ERR(info);
			goto err;
		}

		sz = (1 << info->order) * PAGE_SIZE;

@@ -402,7 +404,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
		free_buffer_page(sys_heap, buffer, info->page, info->order);
		kfree(info);
	}
	return -ENOMEM;
	return ret;
}

void ion_system_heap_free(struct ion_buffer *buffer)
+5 −5
Original line number Diff line number Diff line
@@ -376,7 +376,7 @@ struct page *alloc_from_secure_pool_order(struct ion_system_heap *heap,
	struct ion_page_pool *pool;

	if (!is_secure_vmid_valid(vmid))
		return NULL;
		return ERR_PTR(-EINVAL);

	pool = heap->secure_pools[vmid][order_to_index(order)];
	return ion_page_pool_alloc_pool_only(pool);
@@ -398,13 +398,13 @@ struct page *split_page_from_secure_pool(struct ion_system_heap *heap,
	 * possible.
	 */
	page = alloc_from_secure_pool_order(heap, buffer, 0);
	if (page)
	if (!IS_ERR(page))
		goto got_page;

	for (i = NUM_ORDERS - 2; i >= 0; i--) {
		order = orders[i];
		page = alloc_from_secure_pool_order(heap, buffer, order);
		if (!page)
		if (IS_ERR(page))
			continue;

		split_page(page, order);
@@ -414,7 +414,7 @@ struct page *split_page_from_secure_pool(struct ion_system_heap *heap,
	 * Return the remaining order-0 pages to the pool.
	 * SetPagePrivate flag to mark memory as secure.
	 */
	if (page) {
	if (!IS_ERR(page)) {
		for (j = 1; j < (1 << order); j++) {
			SetPagePrivate(page + j);
			free_buffer_page(heap, buffer, page + j, 0);
@@ -443,7 +443,7 @@ int ion_secure_page_pool_shrink(

	while (freed < nr_to_scan) {
		page = ion_page_pool_alloc_pool_only(pool);
		if (!page)
		if (IS_ERR(page))
			break;
		list_add(&page->lru, &pages);
		freed += (1 << order);