Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8b21a243 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "ion: Restore GKI system heap implementation"

parents 3a3cee06 e2bc9cef
Loading
Loading
Loading
Loading
+5 −3
Original line number Diff line number Diff line
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ION_SYSTEM_HEAP) += ion_sys_heap.o
ion_sys_heap-y := ion_system_heap.o ion_page_pool.o
obj-$(CONFIG_ION_MSM_HEAPS) += msm_ion_heaps.o
msm_ion_heaps-objs += msm_ion.o msm_ion_dma_buf.o ion_page_pool.o \
		ion_system_heap.o ion_carveout_heap.o ion_system_secure_heap.o \
		ion_cma_heap.o ion_secure_util.o
msm_ion_heaps-objs += msm_ion.o msm_ion_dma_buf.o ion_msm_page_pool.o \
		ion_msm_system_heap.o ion_carveout_heap.o \
		ion_system_secure_heap.o ion_cma_heap.o ion_secure_util.o
+257 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/*
 * ION Memory Allocator page pool helpers
 *
 * Copyright (C) 2011 Google, Inc.
 */

#include <linux/list.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/sched/signal.h>

#include "msm_ion_priv.h"
#include "ion_msm_page_pool.h"

static inline struct page
*ion_msm_page_pool_alloc_pages(struct ion_msm_page_pool *pool)
{
	if (fatal_signal_pending(current))
		return NULL;
	return alloc_pages(pool->gfp_mask, pool->order);
}

static void ion_msm_page_pool_free_pages(struct ion_msm_page_pool *pool,
					 struct page *page)
{
	__free_pages(page, pool->order);
}

static void ion_msm_page_pool_add(struct ion_msm_page_pool *pool,
				  struct page *page)
{
	mutex_lock(&pool->mutex);
	if (PageHighMem(page)) {
		list_add_tail(&page->lru, &pool->high_items);
		pool->high_count++;
	} else {
		list_add_tail(&page->lru, &pool->low_items);
		pool->low_count++;
	}

	atomic_inc(&pool->count);
	mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
			    (1 << pool->order));
	mutex_unlock(&pool->mutex);
}

#ifdef CONFIG_ION_POOL_AUTO_REFILL
/* do a simple check to see if we are in any low memory situation */
static bool pool_refill_ok(struct ion_msm_page_pool *pool)
{
	struct zonelist *zonelist;
	struct zoneref *z;
	struct zone *zone;
	int mark;
	enum zone_type classzone_idx = gfp_zone(pool->gfp_mask);
	s64 delta;

	/* check if we are within the refill defer window */
	delta = ktime_ms_delta(ktime_get(), pool->last_low_watermark_ktime);
	if (delta < ION_POOL_REFILL_DEFER_WINDOW_MS)
		return false;

	zonelist = node_zonelist(numa_node_id(), pool->gfp_mask);
	/*
	 * make sure that if we allocate a pool->order page from buddy,
	 * we don't put the zone watermarks go below the high threshold.
	 * This makes sure there's no unwanted repetitive refilling and
	 * reclaiming of buddy pages on the pool.
	 */
	for_each_zone_zonelist(zone, z, zonelist, classzone_idx) {
		mark = high_wmark_pages(zone);
		mark += 1 << pool->order;
		if (!zone_watermark_ok_safe(zone, pool->order, mark,
					    classzone_idx)) {
			pool->last_low_watermark_ktime = ktime_get();
			return false;
		}
	}

	return true;
}

void ion_msm_page_pool_refill(struct ion_msm_page_pool *pool)
{
	struct page *page;
	gfp_t gfp_refill = (pool->gfp_mask | __GFP_RECLAIM) & ~__GFP_NORETRY;
	struct device *dev = pool->heap_dev;

	/* skip refilling order 0 pools */
	if (!pool->order)
		return;

	while (!pool_fillmark_reached(pool) && pool_refill_ok(pool)) {
		page = alloc_pages(gfp_refill, pool->order);
		if (!page)
			break;
		if (!pool->cached)
			ion_pages_sync_for_device(dev, page,
						  PAGE_SIZE << pool->order,
						  DMA_BIDIRECTIONAL);
		ion_msm_page_pool_add(pool, page);
	}
}
#endif /* CONFIG_ION_PAGE_POOL_REFILL */

static struct page *ion_msm_page_pool_remove(struct ion_msm_page_pool *pool,
					     bool high)
{
	struct page *page;

	if (high) {
		BUG_ON(!pool->high_count);
		page = list_first_entry(&pool->high_items, struct page, lru);
		pool->high_count--;
	} else {
		BUG_ON(!pool->low_count);
		page = list_first_entry(&pool->low_items, struct page, lru);
		pool->low_count--;
	}

	atomic_dec(&pool->count);
	list_del(&page->lru);
	mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
					-(1 << pool->order));
	return page;
}

struct page *ion_msm_page_pool_alloc(struct ion_msm_page_pool *pool,
				     bool *from_pool)
{
	struct page *page = NULL;

	BUG_ON(!pool);

	if (fatal_signal_pending(current))
		return ERR_PTR(-EINTR);

	if (*from_pool && mutex_trylock(&pool->mutex)) {
		if (pool->high_count)
			page = ion_msm_page_pool_remove(pool, true);
		else if (pool->low_count)
			page = ion_msm_page_pool_remove(pool, false);
		mutex_unlock(&pool->mutex);
	}
	if (!page) {
		page = ion_msm_page_pool_alloc_pages(pool);
		*from_pool = false;
	}

	if (!page)
		return ERR_PTR(-ENOMEM);
	return page;
}

/*
 * Tries to allocate from only the specified Pool and returns NULL otherwise
 */
struct page *ion_msm_page_pool_alloc_pool_only(struct ion_msm_page_pool *pool)
{
	struct page *page = NULL;

	if (!pool)
		return ERR_PTR(-EINVAL);

	if (mutex_trylock(&pool->mutex)) {
		if (pool->high_count)
			page = ion_msm_page_pool_remove(pool, true);
		else if (pool->low_count)
			page = ion_msm_page_pool_remove(pool, false);
		mutex_unlock(&pool->mutex);
	}

	if (!page)
		return ERR_PTR(-ENOMEM);
	return page;
}

void ion_msm_page_pool_free(struct ion_msm_page_pool *pool, struct page *page)
{
	ion_msm_page_pool_add(pool, page);
}

void ion_msm_page_pool_free_immediate(struct ion_msm_page_pool *pool,
				      struct page *page)
{
	ion_msm_page_pool_free_pages(pool, page);
}

int ion_msm_page_pool_total(struct ion_msm_page_pool *pool, bool high)
{
	int count = pool->low_count;

	if (high)
		count += pool->high_count;

	return count << pool->order;
}

int ion_msm_page_pool_shrink(struct ion_msm_page_pool *pool, gfp_t gfp_mask,
			     int nr_to_scan)
{
	int freed = 0;
	bool high;

	if (current_is_kswapd())
		high = true;
	else
		high = !!(gfp_mask & __GFP_HIGHMEM);

	if (nr_to_scan == 0)
		return ion_msm_page_pool_total(pool, high);

	while (freed < nr_to_scan) {
		struct page *page;

		mutex_lock(&pool->mutex);
		if (pool->low_count) {
			page = ion_msm_page_pool_remove(pool, false);
		} else if (high && pool->high_count) {
			page = ion_msm_page_pool_remove(pool, true);
		} else {
			mutex_unlock(&pool->mutex);
			break;
		}
		mutex_unlock(&pool->mutex);
		ion_msm_page_pool_free_pages(pool, page);
		freed += (1 << pool->order);
	}

	return freed;
}

struct ion_msm_page_pool *ion_msm_page_pool_create(gfp_t gfp_mask,
						   unsigned int order,
						   bool cached)
{
	struct ion_msm_page_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);

	if (!pool)
		return NULL;
	INIT_LIST_HEAD(&pool->low_items);
	INIT_LIST_HEAD(&pool->high_items);
	pool->gfp_mask = gfp_mask;
	pool->order = order;
	mutex_init(&pool->mutex);
	plist_node_init(&pool->list, order);
	if (cached)
		pool->cached = true;

	return pool;
}

void ion_msm_page_pool_destroy(struct ion_msm_page_pool *pool)
{
	kfree(pool);
}
+148 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * ION Page Pool kernel interface header
 *
 * Copyright (C) 2011 Google, Inc.
 */

#ifndef _ION_MSM_PAGE_POOL_H
#define _ION_MSM_PAGE_POOL_H

#include <linux/mm_types.h>
#include <linux/mutex.h>
#include <linux/shrinker.h>
#include <linux/types.h>

/* ION page pool marks in bytes */
#ifdef CONFIG_ION_POOL_AUTO_REFILL
#define ION_POOL_FILL_MARK (CONFIG_ION_POOL_FILL_MARK * SZ_1M)
#define POOL_LOW_MARK_PERCENT	40UL
#define ION_POOL_LOW_MARK ((ION_POOL_FILL_MARK * POOL_LOW_MARK_PERCENT) / 100)
#else
#define ION_POOL_FILL_MARK 0UL
#define ION_POOL_LOW_MARK 0UL
#endif

/* if low watermark of zones have reached, defer the refill in this window */
#define ION_POOL_REFILL_DEFER_WINDOW_MS	10

/**
 * functions for creating and destroying a heap pool -- allows you
 * to keep a pool of pre allocated memory to use from your heap.  Keeping
 * a pool of memory that is ready for dma, ie any cached mapping have been
 * invalidated from the cache, provides a significant performance benefit on
 * many systems
 */

/**
 * struct ion_msm_page_pool - pagepool struct
 * @high_count:		number of highmem items in the pool
 * @low_count:		number of lowmem items in the pool
 * @count:		total number of pages/items in the pool
 * @high_items:		list of highmem items
 * @low_items:		list of lowmem items
 * @last_low_watermark_ktime: most recent time at which the zone watermarks were
 *			low
 * @mutex:		lock protecting this struct and especially the count
 *			item list
 * @gfp_mask:		gfp_mask to use from alloc
 * @order:		order of pages in the pool
 * @list:		plist node for list of pools
 * @cached:		it's cached pool or not
 * @heap_dev:		device for the ion heap associated with this pool
 *
 * Allows you to keep a pool of pre allocated pages to use from your heap.
 * Keeping a pool of pages that is ready for dma, ie any cached mapping have
 * been invalidated from the cache, provides a significant performance benefit
 * on many systems
 */
struct ion_msm_page_pool {
	int high_count;
	int low_count;
	atomic_t count;
	struct list_head high_items;
	struct list_head low_items;
	ktime_t last_low_watermark_ktime;
	struct mutex mutex;
	gfp_t gfp_mask;
	unsigned int order;
	struct plist_node list;
	bool cached;
	struct device *heap_dev;
};

struct ion_msm_page_pool *ion_msm_page_pool_create(gfp_t gfp_mask,
						   unsigned int order,
						   bool cached);
void ion_msm_page_pool_destroy(struct ion_msm_page_pool *pool);
struct page *ion_msm_page_pool_alloc(struct ion_msm_page_pool *pool,
				     bool *from_pool);
void ion_msm_page_pool_free(struct ion_msm_page_pool *pool, struct page *page);
struct page *ion_msm_page_pool_alloc_pool_only(struct ion_msm_page_pool *a);
void ion_msm_page_pool_free_immediate(struct ion_msm_page_pool *pool,
				      struct page *page);
int ion_msm_page_pool_total(struct ion_msm_page_pool *pool, bool high);
size_t ion_system_heap_secure_page_pool_total(struct ion_heap *heap, int vmid);

/** ion_msm_page_pool_shrink - shrinks the size of the memory cached in the pool
 * @pool:		the pool
 * @gfp_mask:		the memory type to reclaim
 * @nr_to_scan:		number of items to shrink in pages
 *
 * returns the number of items freed in pages
 */
int ion_msm_page_pool_shrink(struct ion_msm_page_pool *pool, gfp_t gfp_mask,
			     int nr_to_scan);

#ifdef CONFIG_ION_POOL_AUTO_REFILL
void ion_msm_page_pool_refill(struct ion_msm_page_pool *pool);

static __always_inline int get_pool_fillmark(struct ion_msm_page_pool *pool)
{
	return ION_POOL_FILL_MARK / (PAGE_SIZE << pool->order);
}

static __always_inline int get_pool_lowmark(struct ion_msm_page_pool *pool)
{
	return ION_POOL_LOW_MARK / (PAGE_SIZE << pool->order);
}

static __always_inline bool
pool_count_below_lowmark(struct ion_msm_page_pool *pool)
{
	return atomic_read(&pool->count) < get_pool_lowmark(pool);
}

static __always_inline bool
pool_fillmark_reached(struct ion_msm_page_pool *pool)
{
	return atomic_read(&pool->count) >= get_pool_fillmark(pool);
}
#else
static inline void ion_msm_page_pool_refill(struct ion_msm_page_pool *pool)
{
}

static __always_inline int get_pool_fillmark(struct ion_msm_page_pool *pool)
{
	return 0;
}

static __always_inline int get_pool_lowmark(struct ion_msm_page_pool *pool)
{
	return 0;
}

static __always_inline bool
pool_count_below_lowmark(struct ion_msm_page_pool *pool)
{
	return false;
}

static __always_inline bool
pool_fillmark_reached(struct ion_msm_page_pool *pool)
{
	return false;
}
#endif /* CONFIG_ION_POOL_AUTO_REFILL */
#endif /* _ION_MSM_PAGE_POOL_H */
+789 −0

File added.

Preview size limit exceeded, changes collapsed.

+11 −11
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
 * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
 */
#include <soc/qcom/secure_buffer.h>
#include "msm_ion_priv.h"

#ifndef _ION_SYSTEM_HEAP_H
#define _ION_SYSTEM_HEAP_H
#ifndef _ION_MSM_SYSTEM_HEAP_H
#define _ION_MSM_SYSTEM_HEAP_H

#ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
#if defined(CONFIG_IOMMU_IO_PGTABLE_ARMV7S)
@@ -22,8 +22,8 @@ static const unsigned int orders[] = {0};

#define ION_KTHREAD_NICE_VAL 10

#define to_system_heap(_heap) \
	container_of(to_msm_ion_heap(_heap), struct ion_system_heap, heap)
#define to_msm_system_heap(_heap) \
	container_of(to_msm_ion_heap(_heap), struct ion_msm_system_heap, heap)

enum ion_kthread_type {
	ION_KTHREAD_UNCACHED,
@@ -31,13 +31,13 @@ enum ion_kthread_type {
	ION_MAX_NUM_KTHREADS
};

struct ion_system_heap {
struct ion_msm_system_heap {
	struct msm_ion_heap heap;
	struct ion_page_pool *uncached_pools[MAX_ORDER];
	struct ion_page_pool *cached_pools[MAX_ORDER];
	struct ion_msm_page_pool *uncached_pools[MAX_ORDER];
	struct ion_msm_page_pool *cached_pools[MAX_ORDER];
	/* worker threads to refill the pool */
	struct task_struct *kworker[ION_MAX_NUM_KTHREADS];
	struct ion_page_pool *secure_pools[VMID_LAST][MAX_ORDER];
	struct ion_msm_page_pool *secure_pools[VMID_LAST][MAX_ORDER];
	/* Prevents unnecessary page splitting */
	struct mutex split_page_mutex;
};
@@ -51,8 +51,8 @@ struct page_info {

int order_to_index(unsigned int order);

void free_buffer_page(struct ion_system_heap *heap,
void free_buffer_page(struct ion_msm_system_heap *heap,
		      struct ion_buffer *buffer, struct page *page,
		      unsigned int order);

#endif /* _ION_SYSTEM_HEAP_H */
#endif /* _ION_MSM_SYSTEM_HEAP_H */
Loading