Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1fbb233d authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "defconfig: Enable ion heap pool auto refill"

parents fc596f63 fb7ee4f2
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -494,6 +494,7 @@ CONFIG_UIO=y
CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ION=y
CONFIG_ION_POOL_AUTO_REFILL=y
CONFIG_QPNP_REVID=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
+1 −0
Original line number Diff line number Diff line
@@ -513,6 +513,7 @@ CONFIG_UIO=y
CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ION=y
CONFIG_ION_POOL_AUTO_REFILL=y
CONFIG_QPNP_REVID=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
+19 −0
Original line number Diff line number Diff line
@@ -67,3 +67,22 @@ config ION_DEFER_FREE_NO_SCHED_IDLE
	  Choose this option to remove the SCHED_IDLE flag in case of defer
	  free thereby increasing the priority of defer free thread.
	  if you're not sure say Y here.

config ION_POOL_AUTO_REFILL
	bool "Refill the ION heap pools automatically"
	depends on ION
	help
	  Choose this option to refill the ION system heap pools (non-secure)
	  automatically when the pool pages count becomes lower than a set low mark.
	  This refilling is done by worker thread which is invoked asynchronously
	  when the pool count reaches below low mark.
	  if you're not sure say Y here.

config ION_POOL_FILL_MARK
	int "ion pool fillmark size in MB"
	depends on ION_POOL_AUTO_REFILL
	range 16 256
	default 100
	help
	  Set the fillmark of the pool in terms of mega bytes and the lowmark is
	  ION_POOL_LOW_MARK_PERCENT of fillmark value.
+37 −1
Original line number Diff line number Diff line
@@ -17,10 +17,12 @@
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/shrinker.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/bitops.h>
#include <linux/vmstat.h>
#include "ion_kernel.h"
#include "../uapi/ion.h"
#include "../uapi/msm_ion.h"
@@ -54,6 +56,16 @@
#define MAKE_ION_ALLOC_DMA_READY 0
#endif

/* ION page pool marks in bytes */
#ifdef CONFIG_ION_POOL_AUTO_REFILL
#define ION_POOL_FILL_MARK (CONFIG_ION_POOL_FILL_MARK * SZ_1M)
#define POOL_LOW_MARK_PERCENT	40UL
#define ION_POOL_LOW_MARK ((ION_POOL_FILL_MARK * POOL_LOW_MARK_PERCENT) / 100)
#else
#define ION_POOL_FILL_MARK 0UL
#define ION_POOL_LOW_MARK 0UL
#endif

/**
 * struct ion_platform_heap - defines a heap in the given platform
 * @type:	type of the heap from ion_heap_type enum
@@ -400,6 +412,7 @@ ion_secure_carveout_heap_create(struct ion_platform_heap *heap);
 * struct ion_page_pool - pagepool struct
 * @high_count:		number of highmem items in the pool
 * @low_count:		number of lowmem items in the pool
 * @count:		total number of pages/items in the pool
 * @high_items:		list of highmem items
 * @low_items:		list of lowmem items
 * @mutex:		lock protecting this struct and especially the count
@@ -408,6 +421,7 @@ ion_secure_carveout_heap_create(struct ion_platform_heap *heap);
 * @order:		order of pages in the pool
 * @list:		plist node for list of pools
 * @cached:		it's cached pool or not
 * @heap:		ion heap associated to this pool
 *
 * Allows you to keep a pool of pre allocated pages to use from your heap.
 * Keeping a pool of pages that is ready for dma, ie any cached mapping have
@@ -417,6 +431,7 @@ ion_secure_carveout_heap_create(struct ion_platform_heap *heap);
struct ion_page_pool {
	int high_count;
	int low_count;
	atomic_t count;
	bool cached;
	struct list_head high_items;
	struct list_head low_items;
@@ -425,10 +440,12 @@ struct ion_page_pool {
	gfp_t gfp_mask;
	unsigned int order;
	struct plist_node list;
	struct ion_heap heap;
};

struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
					   bool cached);
void ion_page_pool_refill(struct ion_page_pool *pool);
void ion_page_pool_destroy(struct ion_page_pool *pool);
struct page *ion_page_pool_alloc(struct ion_page_pool *a, bool *from_pool);
void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
@@ -468,4 +485,23 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);

int ion_query_heaps(struct ion_heap_query *query);

static __always_inline int get_pool_fillmark(struct ion_page_pool *pool)
{
	return ION_POOL_FILL_MARK / (PAGE_SIZE << pool->order);
}

static __always_inline int get_pool_lowmark(struct ion_page_pool *pool)
{
	return ION_POOL_LOW_MARK / (PAGE_SIZE << pool->order);
}

static __always_inline bool pool_count_below_lowmark(struct ion_page_pool *pool)
{
	return atomic_read(&pool->count) < get_pool_lowmark(pool);
}

static __always_inline bool pool_fillmark_reached(struct ion_page_pool *pool)
{
	return atomic_read(&pool->count) >= get_pool_fillmark(pool);
}
#endif /* _ION_H */
+40 −3
Original line number Diff line number Diff line
@@ -12,6 +12,25 @@

#include "ion.h"

/* do a simple check to see if we are in any low memory situation */
static bool pool_refill_ok(struct ion_page_pool *pool)
{
	struct zonelist *zonelist;
	struct zoneref *z;
	struct zone *zone;
	int mark;
	int classzone_idx = (int)gfp_zone(pool->gfp_mask);

	zonelist = node_zonelist(numa_node_id(), pool->gfp_mask);
	for_each_zone_zonelist(zone, z, zonelist, classzone_idx) {
		mark = high_wmark_pages(zone);
		if (!zone_watermark_ok_safe(zone, 0, mark, classzone_idx))
			return false;
	}

	return true;
}

static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
	return alloc_pages(pool->gfp_mask, pool->order);
@@ -34,11 +53,30 @@ static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
		pool->low_count++;
	}

	atomic_inc(&pool->count);
	mod_node_page_state(page_pgdat(page), NR_INDIRECTLY_RECLAIMABLE_BYTES,
			    (1 << (PAGE_SHIFT + pool->order)));
	mutex_unlock(&pool->mutex);
}

void ion_page_pool_refill(struct ion_page_pool *pool)
{
	struct page *page;
	gfp_t gfp_refill = (pool->gfp_mask | __GFP_RECLAIM) & ~__GFP_NORETRY;
	struct device *dev = pool->heap.priv;

	while (!pool_fillmark_reached(pool) && pool_refill_ok(pool)) {
		page = alloc_pages(gfp_refill, pool->order);
		if (!page)
			break;
		if (!pool->cached)
			ion_pages_sync_for_device(dev, page,
						  PAGE_SIZE << pool->order,
						  DMA_BIDIRECTIONAL);
		ion_page_pool_add(pool, page);
	}
}

static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
{
	struct page *page;
@@ -53,6 +91,7 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
		pool->low_count--;
	}

	atomic_dec(&pool->count);
	list_del(&page->lru);
	mod_node_page_state(page_pgdat(page), NR_INDIRECTLY_RECLAIMABLE_BYTES,
			    -(1 << (PAGE_SHIFT + pool->order)));
@@ -165,12 +204,10 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
					   bool cached)
{
	struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
	struct ion_page_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);

	if (!pool)
		return NULL;
	pool->high_count = 0;
	pool->low_count = 0;
	INIT_LIST_HEAD(&pool->low_items);
	INIT_LIST_HEAD(&pool->high_items);
	pool->gfp_mask = gfp_mask;
Loading