Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0734b477 authored by Sudarshan Rajagopalan's avatar Sudarshan Rajagopalan
Browse files

ion: Implement ion heap pool auto refill



Refill the ion heap pools automatically when the pool count reaches
below the set low mark. This refilling is done by kworker thread which
is invoked asynchronously when the pool count reaches below low mark
and refills till fill mark of the pool is reached.

Change-Id: Idea83e68cd73e640c2420f5f34e6f156f26819ef
Signed-off-by: default avatarSudarshan Rajagopalan <sudaraja@codeaurora.org>
parent 05e912b7
Loading
Loading
Loading
Loading
+19 −0
Original line number Diff line number Diff line
@@ -67,3 +67,22 @@ config ION_DEFER_FREE_NO_SCHED_IDLE
	  Choose this option to remove the SCHED_IDLE flag in case of defer
	  free thereby increasing the priority of defer free thread.
	  if you're not sure say Y here.

config ION_POOL_AUTO_REFILL
	bool "Refill the ION heap pools automatically"
	depends on ION
	help
	  Choose this option to refill the ION system heap pools (non-secure)
	  automatically when the pool pages count becomes lower than a set low mark.
	  This refilling is done by worker thread which is invoked asynchronously
	  when the pool count reaches below low mark.
	  if you're not sure say Y here.

config ION_POOL_FILL_MARK
	int "ion pool fillmark size in MB"
	depends on ION_POOL_AUTO_REFILL
	range 16 256
	default 100
	help
	  Set the fillmark of the pool in terms of mega bytes and the lowmark is
	  ION_POOL_LOW_MARK_PERCENT of fillmark value.
+37 −1
Original line number Diff line number Diff line
@@ -17,10 +17,12 @@
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/shrinker.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/bitops.h>
#include <linux/vmstat.h>
#include "ion_kernel.h"
#include "../uapi/ion.h"
#include "../uapi/msm_ion.h"
@@ -54,6 +56,16 @@
#define MAKE_ION_ALLOC_DMA_READY 0
#endif

/* ION page pool marks in bytes */
#ifdef CONFIG_ION_POOL_AUTO_REFILL
#define ION_POOL_FILL_MARK (CONFIG_ION_POOL_FILL_MARK * SZ_1M)
#define POOL_LOW_MARK_PERCENT	40UL
#define ION_POOL_LOW_MARK ((ION_POOL_FILL_MARK * POOL_LOW_MARK_PERCENT) / 100)
#else
#define ION_POOL_FILL_MARK 0UL
#define ION_POOL_LOW_MARK 0UL
#endif

/**
 * struct ion_platform_heap - defines a heap in the given platform
 * @type:	type of the heap from ion_heap_type enum
@@ -400,6 +412,7 @@ ion_secure_carveout_heap_create(struct ion_platform_heap *heap);
 * struct ion_page_pool - pagepool struct
 * @high_count:		number of highmem items in the pool
 * @low_count:		number of lowmem items in the pool
 * @count:		total number of pages/items in the pool
 * @high_items:		list of highmem items
 * @low_items:		list of lowmem items
 * @mutex:		lock protecting this struct and especially the count
@@ -408,6 +421,7 @@ ion_secure_carveout_heap_create(struct ion_platform_heap *heap);
 * @order:		order of pages in the pool
 * @list:		plist node for list of pools
 * @cached:		it's cached pool or not
 * @heap:		ion heap associated to this pool
 *
 * Allows you to keep a pool of pre allocated pages to use from your heap.
 * Keeping a pool of pages that is ready for dma, ie any cached mapping have
@@ -417,6 +431,7 @@ ion_secure_carveout_heap_create(struct ion_platform_heap *heap);
struct ion_page_pool {
	int high_count;
	int low_count;
	atomic_t count;
	bool cached;
	struct list_head high_items;
	struct list_head low_items;
@@ -425,10 +440,12 @@ struct ion_page_pool {
	gfp_t gfp_mask;
	unsigned int order;
	struct plist_node list;
	struct ion_heap heap;
};

struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
					   bool cached);
void ion_page_pool_refill(struct ion_page_pool *pool);
void ion_page_pool_destroy(struct ion_page_pool *pool);
struct page *ion_page_pool_alloc(struct ion_page_pool *a, bool *from_pool);
void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
@@ -468,4 +485,23 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);

int ion_query_heaps(struct ion_heap_query *query);

static __always_inline int get_pool_fillmark(struct ion_page_pool *pool)
{
	return ION_POOL_FILL_MARK / (PAGE_SIZE << pool->order);
}

static __always_inline int get_pool_lowmark(struct ion_page_pool *pool)
{
	return ION_POOL_LOW_MARK / (PAGE_SIZE << pool->order);
}

static __always_inline bool pool_count_below_lowmark(struct ion_page_pool *pool)
{
	return atomic_read(&pool->count) < get_pool_lowmark(pool);
}

static __always_inline bool pool_fillmark_reached(struct ion_page_pool *pool)
{
	return atomic_read(&pool->count) >= get_pool_fillmark(pool);
}
#endif /* _ION_H */
+40 −3
Original line number Diff line number Diff line
@@ -12,6 +12,25 @@

#include "ion.h"

/* do a simple check to see if we are in any low memory situation */
static bool pool_refill_ok(struct ion_page_pool *pool)
{
	struct zonelist *zonelist;
	struct zoneref *z;
	struct zone *zone;
	int mark;
	int classzone_idx = (int)gfp_zone(pool->gfp_mask);

	zonelist = node_zonelist(numa_node_id(), pool->gfp_mask);
	for_each_zone_zonelist(zone, z, zonelist, classzone_idx) {
		mark = high_wmark_pages(zone);
		if (!zone_watermark_ok_safe(zone, 0, mark, classzone_idx))
			return false;
	}

	return true;
}

static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
	return alloc_pages(pool->gfp_mask, pool->order);
@@ -34,11 +53,30 @@ static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
		pool->low_count++;
	}

	atomic_inc(&pool->count);
	mod_node_page_state(page_pgdat(page), NR_INDIRECTLY_RECLAIMABLE_BYTES,
			    (1 << (PAGE_SHIFT + pool->order)));
	mutex_unlock(&pool->mutex);
}

void ion_page_pool_refill(struct ion_page_pool *pool)
{
	struct page *page;
	gfp_t gfp_refill = (pool->gfp_mask | __GFP_RECLAIM) & ~__GFP_NORETRY;
	struct device *dev = pool->heap.priv;

	while (!pool_fillmark_reached(pool) && pool_refill_ok(pool)) {
		page = alloc_pages(gfp_refill, pool->order);
		if (!page)
			break;
		if (!pool->cached)
			ion_pages_sync_for_device(dev, page,
						  PAGE_SIZE << pool->order,
						  DMA_BIDIRECTIONAL);
		ion_page_pool_add(pool, page);
	}
}

static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
{
	struct page *page;
@@ -53,6 +91,7 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
		pool->low_count--;
	}

	atomic_dec(&pool->count);
	list_del(&page->lru);
	mod_node_page_state(page_pgdat(page), NR_INDIRECTLY_RECLAIMABLE_BYTES,
			    -(1 << (PAGE_SHIFT + pool->order)));
@@ -165,12 +204,10 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
					   bool cached)
{
	struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
	struct ion_page_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);

	if (!pool)
		return NULL;
	pool->high_count = 0;
	pool->low_count = 0;
	INIT_LIST_HEAD(&pool->low_items);
	INIT_LIST_HEAD(&pool->high_items);
	pool->gfp_mask = gfp_mask;
+98 −7
Original line number Diff line number Diff line
@@ -16,6 +16,8 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/sched/types.h>
#include <linux/sched.h>
#include <soc/qcom/secure_buffer.h>
#include "ion_system_heap.h"
#include "ion.h"
@@ -27,6 +29,9 @@ static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
				     __GFP_NORETRY) & ~__GFP_RECLAIM;
static gfp_t low_order_gfp_flags  = GFP_HIGHUSER | __GFP_ZERO;

bool pool_auto_refill_en  __read_mostly =
		IS_ENABLED(CONFIG_ION_POOL_AUTO_REFILL);

int order_to_index(unsigned int order)
{
	int i;
@@ -58,7 +63,7 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
				      unsigned long order,
				      bool *from_pool)
{
	bool cached = ion_buffer_cached(buffer);
	int cached = (int)ion_buffer_cached(buffer);
	struct page *page;
	struct ion_page_pool *pool;
	int vmid = get_secure_vmid(buffer->flags);
@@ -73,6 +78,11 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,

	page = ion_page_pool_alloc(pool, from_pool);

	if (pool_auto_refill_en &&
	    pool_count_below_lowmark(pool)) {
		wake_up_process(heap->kworker[cached]);
	}

	if (IS_ERR(page))
		return page;

@@ -624,7 +634,8 @@ static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
 * nothing. If it succeeds you'll eventually need to use
 * ion_system_heap_destroy_pools to destroy the pools.
 */
static int ion_system_heap_create_pools(struct ion_page_pool **pools,
static int ion_system_heap_create_pools(struct ion_system_heap *sys_heap,
					struct ion_page_pool **pools,
					bool cached)
{
	int i;
@@ -638,6 +649,7 @@ static int ion_system_heap_create_pools(struct ion_page_pool **pools,
		pool = ion_page_pool_create(gfp_flags, orders[i], cached);
		if (!pool)
			goto err_create_pool;
		pool->heap = sys_heap->heap;
		pools[i] = pool;
	}
	return 0;
@@ -646,9 +658,70 @@ static int ion_system_heap_create_pools(struct ion_page_pool **pools,
	return -ENOMEM;
}

static int ion_sys_heap_worker(void *data)
{
	struct ion_page_pool **pools = (struct ion_page_pool **)data;
	int i;

	for (;;) {
		for (i = 0; i < NUM_ORDERS; i++) {
			if (pool_count_below_lowmark(pools[i]))
				ion_page_pool_refill(pools[i]);
		}
		set_current_state(TASK_INTERRUPTIBLE);
		if (unlikely(kthread_should_stop())) {
			set_current_state(TASK_RUNNING);
			break;
		}
		schedule();

		set_current_state(TASK_RUNNING);
	}

	return 0;
}

static struct task_struct *ion_create_kworker(struct ion_page_pool **pools,
					      bool cached)
{
	struct sched_attr attr = { 0 };
	struct task_struct *thread;
	int ret;
	char *buf;
	cpumask_t *cpumask;
	DECLARE_BITMAP(bmap, nr_cpumask_bits);

	attr.sched_nice = ION_KTHREAD_NICE_VAL;
	buf = cached ? "cached" : "uncached";
	/*
	 * Affine the kthreads to min capacity CPUs
	 * TODO: remove this hack once is_min_capability_cpu is available
	 */
	bitmap_fill(bmap, 0x4);
	cpumask = to_cpumask(bmap);

	thread = kthread_create(ion_sys_heap_worker, pools,
				"ion-pool-%s-worker", buf);
	if (IS_ERR(thread)) {
		pr_err("%s: failed to create %s worker thread: %ld\n",
		       __func__, buf, PTR_ERR(thread));
		return thread;
	}
	ret = sched_setattr(thread, &attr);
	if (ret) {
		kthread_stop(thread);
		pr_warn("%s: failed to set task priority for %s worker thread: ret = %d\n",
			__func__, buf, ret);
		return ERR_PTR(ret);
	}
	kthread_bind_mask(thread, cpumask);
	return thread;
}

struct ion_heap *ion_system_heap_create(struct ion_platform_heap *data)
{
	struct ion_system_heap *heap;
	int ret = -ENOMEM;
	int i;

	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
@@ -660,21 +733,39 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *data)

	for (i = 0; i < VMID_LAST; i++)
		if (is_secure_vmid_valid(i))
			if (ion_system_heap_create_pools(heap->secure_pools[i],
			if (ion_system_heap_create_pools(heap,
							 heap->secure_pools[i],
							 false))
				goto destroy_secure_pools;

	if (ion_system_heap_create_pools(heap->uncached_pools, false))
	if (ion_system_heap_create_pools(heap, heap->uncached_pools, false))
		goto destroy_secure_pools;

	if (ion_system_heap_create_pools(heap->cached_pools, true))
	if (ion_system_heap_create_pools(heap, heap->cached_pools, true))
		goto destroy_uncached_pools;

	if (pool_auto_refill_en) {
		heap->kworker[ION_KTHREAD_UNCACHED] =
				ion_create_kworker(heap->uncached_pools, false);
		if (IS_ERR(heap->kworker[ION_KTHREAD_UNCACHED])) {
			ret = PTR_ERR(heap->kworker[ION_KTHREAD_UNCACHED]);
			goto destroy_pools;
		}
		heap->kworker[ION_KTHREAD_CACHED] =
				ion_create_kworker(heap->cached_pools, true);
		if (IS_ERR(heap->kworker[ION_KTHREAD_CACHED])) {
			kthread_stop(heap->kworker[ION_KTHREAD_UNCACHED]);
			ret = PTR_ERR(heap->kworker[ION_KTHREAD_CACHED]);
			goto destroy_pools;
		}
	}

	mutex_init(&heap->split_page_mutex);

	heap->heap.debug_show = ion_system_heap_debug_show;
	return &heap->heap;

destroy_pools:
	ion_system_heap_destroy_pools(heap->cached_pools);
destroy_uncached_pools:
	ion_system_heap_destroy_pools(heap->uncached_pools);
destroy_secure_pools:
@@ -683,7 +774,7 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *data)
			ion_system_heap_destroy_pools(heap->secure_pools[i]);
	}
	kfree(heap);
	return ERR_PTR(-ENOMEM);
	return ERR_PTR(ret);
}

static int ion_system_contig_heap_allocate(struct ion_heap *heap,
+11 −1
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
 * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
 */
#include <soc/qcom/secure_buffer.h>
#include "ion.h"
@@ -20,10 +20,20 @@ static const unsigned int orders[] = {0};

#define NUM_ORDERS ARRAY_SIZE(orders)

#define ION_KTHREAD_NICE_VAL 10

enum ion_kthread_type {
	ION_KTHREAD_UNCACHED,
	ION_KTHREAD_CACHED,
	ION_MAX_NUM_KTHREADS
};

struct ion_system_heap {
	struct ion_heap heap;
	struct ion_page_pool *uncached_pools[MAX_ORDER];
	struct ion_page_pool *cached_pools[MAX_ORDER];
	/* worker threads to refill the pool */
	struct task_struct *kworker[ION_MAX_NUM_KTHREADS];
	struct ion_page_pool *secure_pools[VMID_LAST][MAX_ORDER];
	/* Prevents unnecessary page splitting */
	struct mutex split_page_mutex;