Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9cae2378 authored by Isaac J. Manjarres's avatar Isaac J. Manjarres
Browse files

ion: Move the msm system heap source into its own files



The MSM implementation of the system heap currently lives
in the same source files as where the GKI system heap source
lives. This is problematic, as the MSM system heap needs to
co-exist with the GKI system heap. While they share a lot of
code, it is not possible for them to use the same source files,
as the same symbols would have to be available for GKI for the
system heap, as well as for the MSM system heap. Additionally,
the MSM system heap changes some of the signatures of the shared
functions, which makes it even more complicated for the two heaps
to share source files. Thus, in preparation to bring back the
GKI system heap, move the MSM system heap to its own files. This
change does not introduce any functional impact. Also, namespace
the page pool interface, as it has common global symbols with the
GKI ION page pool interface, which will lead to linking issues
with duplicate symbols.

Change-Id: I7cadb752add96d2f3a392240946c643c673711a1
Signed-off-by: default avatarIsaac J. Manjarres <isaacm@codeaurora.org>
parent c65a75e3
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ION_MSM_HEAPS) += msm_ion_heaps.o
msm_ion_heaps-objs += msm_ion.o msm_ion_dma_buf.o ion_page_pool.o \
		ion_system_heap.o ion_carveout_heap.o ion_system_secure_heap.o \
		ion_cma_heap.o ion_secure_util.o
msm_ion_heaps-objs += msm_ion.o msm_ion_dma_buf.o ion_msm_page_pool.o \
		ion_msm_system_heap.o ion_carveout_heap.o \
		ion_system_secure_heap.o ion_cma_heap.o ion_secure_util.o
+37 −31
Original line number Diff line number Diff line
@@ -11,22 +11,24 @@
#include <linux/sched/signal.h>

#include "msm_ion_priv.h"
#include "ion_page_pool.h"
#include "ion_msm_page_pool.h"

static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
static inline struct page
*ion_msm_page_pool_alloc_pages(struct ion_msm_page_pool *pool)
{
	if (fatal_signal_pending(current))
		return NULL;
	return alloc_pages(pool->gfp_mask, pool->order);
}

static void ion_page_pool_free_pages(struct ion_page_pool *pool,
static void ion_msm_page_pool_free_pages(struct ion_msm_page_pool *pool,
					 struct page *page)
{
	__free_pages(page, pool->order);
}

static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
static void ion_msm_page_pool_add(struct ion_msm_page_pool *pool,
				  struct page *page)
{
	mutex_lock(&pool->mutex);
	if (PageHighMem(page)) {
@@ -45,7 +47,7 @@ static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page)

#ifdef CONFIG_ION_POOL_AUTO_REFILL
/* do a simple check to see if we are in any low memory situation */
static bool pool_refill_ok(struct ion_page_pool *pool)
static bool pool_refill_ok(struct ion_msm_page_pool *pool)
{
	struct zonelist *zonelist;
	struct zoneref *z;
@@ -79,7 +81,7 @@ static bool pool_refill_ok(struct ion_page_pool *pool)
	return true;
}

void ion_page_pool_refill(struct ion_page_pool *pool)
void ion_msm_page_pool_refill(struct ion_msm_page_pool *pool)
{
	struct page *page;
	gfp_t gfp_refill = (pool->gfp_mask | __GFP_RECLAIM) & ~__GFP_NORETRY;
@@ -97,12 +99,13 @@ void ion_page_pool_refill(struct ion_page_pool *pool)
			ion_pages_sync_for_device(dev, page,
						  PAGE_SIZE << pool->order,
						  DMA_BIDIRECTIONAL);
		ion_page_pool_add(pool, page);
		ion_msm_page_pool_add(pool, page);
	}
}
#endif /* CONFIG_ION_PAGE_POOL_REFILL */

static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
static struct page *ion_msm_page_pool_remove(struct ion_msm_page_pool *pool,
					     bool high)
{
	struct page *page;

@@ -123,7 +126,8 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
	return page;
}

struct page *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool)
struct page *ion_msm_page_pool_alloc(struct ion_msm_page_pool *pool,
				     bool *from_pool)
{
	struct page *page = NULL;

@@ -134,13 +138,13 @@ struct page *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool)

	if (*from_pool && mutex_trylock(&pool->mutex)) {
		if (pool->high_count)
			page = ion_page_pool_remove(pool, true);
			page = ion_msm_page_pool_remove(pool, true);
		else if (pool->low_count)
			page = ion_page_pool_remove(pool, false);
			page = ion_msm_page_pool_remove(pool, false);
		mutex_unlock(&pool->mutex);
	}
	if (!page) {
		page = ion_page_pool_alloc_pages(pool);
		page = ion_msm_page_pool_alloc_pages(pool);
		*from_pool = false;
	}

@@ -152,7 +156,7 @@ struct page *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool)
/*
 * Tries to allocate from only the specified Pool and returns NULL otherwise
 */
struct page *ion_page_pool_alloc_pool_only(struct ion_page_pool *pool)
struct page *ion_msm_page_pool_alloc_pool_only(struct ion_msm_page_pool *pool)
{
	struct page *page = NULL;

@@ -161,9 +165,9 @@ struct page *ion_page_pool_alloc_pool_only(struct ion_page_pool *pool)

	if (mutex_trylock(&pool->mutex)) {
		if (pool->high_count)
			page = ion_page_pool_remove(pool, true);
			page = ion_msm_page_pool_remove(pool, true);
		else if (pool->low_count)
			page = ion_page_pool_remove(pool, false);
			page = ion_msm_page_pool_remove(pool, false);
		mutex_unlock(&pool->mutex);
	}

@@ -172,17 +176,18 @@ struct page *ion_page_pool_alloc_pool_only(struct ion_page_pool *pool)
	return page;
}

void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
void ion_msm_page_pool_free(struct ion_msm_page_pool *pool, struct page *page)
{
	ion_page_pool_add(pool, page);
	ion_msm_page_pool_add(pool, page);
}

void ion_page_pool_free_immediate(struct ion_page_pool *pool, struct page *page)
void ion_msm_page_pool_free_immediate(struct ion_msm_page_pool *pool,
				      struct page *page)
{
	ion_page_pool_free_pages(pool, page);
	ion_msm_page_pool_free_pages(pool, page);
}

int ion_page_pool_total(struct ion_page_pool *pool, bool high)
int ion_msm_page_pool_total(struct ion_msm_page_pool *pool, bool high)
{
	int count = pool->low_count;

@@ -192,7 +197,7 @@ int ion_page_pool_total(struct ion_page_pool *pool, bool high)
	return count << pool->order;
}

int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
int ion_msm_page_pool_shrink(struct ion_msm_page_pool *pool, gfp_t gfp_mask,
			     int nr_to_scan)
{
	int freed = 0;
@@ -204,32 +209,33 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
		high = !!(gfp_mask & __GFP_HIGHMEM);

	if (nr_to_scan == 0)
		return ion_page_pool_total(pool, high);
		return ion_msm_page_pool_total(pool, high);

	while (freed < nr_to_scan) {
		struct page *page;

		mutex_lock(&pool->mutex);
		if (pool->low_count) {
			page = ion_page_pool_remove(pool, false);
			page = ion_msm_page_pool_remove(pool, false);
		} else if (high && pool->high_count) {
			page = ion_page_pool_remove(pool, true);
			page = ion_msm_page_pool_remove(pool, true);
		} else {
			mutex_unlock(&pool->mutex);
			break;
		}
		mutex_unlock(&pool->mutex);
		ion_page_pool_free_pages(pool, page);
		ion_msm_page_pool_free_pages(pool, page);
		freed += (1 << pool->order);
	}

	return freed;
}

struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
struct ion_msm_page_pool *ion_msm_page_pool_create(gfp_t gfp_mask,
						   unsigned int order,
						   bool cached)
{
	struct ion_page_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
	struct ion_msm_page_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);

	if (!pool)
		return NULL;
@@ -245,7 +251,7 @@ struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
	return pool;
}

void ion_page_pool_destroy(struct ion_page_pool *pool)
void ion_msm_page_pool_destroy(struct ion_msm_page_pool *pool)
{
	kfree(pool);
}
+33 −27
Original line number Diff line number Diff line
@@ -5,8 +5,8 @@
 * Copyright (C) 2011 Google, Inc.
 */

#ifndef _ION_PAGE_POOL_H
#define _ION_PAGE_POOL_H
#ifndef _ION_MSM_PAGE_POOL_H
#define _ION_MSM_PAGE_POOL_H

#include <linux/mm_types.h>
#include <linux/mutex.h>
@@ -35,7 +35,7 @@
 */

/**
 * struct ion_page_pool - pagepool struct
 * struct ion_msm_page_pool - pagepool struct
 * @high_count:		number of highmem items in the pool
 * @low_count:		number of lowmem items in the pool
 * @count:		total number of pages/items in the pool
@@ -56,7 +56,7 @@
 * been invalidated from the cache, provides a significant performance benefit
 * on many systems
 */
struct ion_page_pool {
struct ion_msm_page_pool {
	int high_count;
	int low_count;
	atomic_t count;
@@ -71,72 +71,78 @@ struct ion_page_pool {
	struct device *heap_dev;
};

struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
struct ion_msm_page_pool *ion_msm_page_pool_create(gfp_t gfp_mask,
						   unsigned int order,
						   bool cached);
void ion_page_pool_destroy(struct ion_page_pool *pool);
struct page *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool);
void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
struct page *ion_page_pool_alloc_pool_only(struct ion_page_pool *a);
void ion_page_pool_free_immediate(struct ion_page_pool *pool,
void ion_msm_page_pool_destroy(struct ion_msm_page_pool *pool);
struct page *ion_msm_page_pool_alloc(struct ion_msm_page_pool *pool,
				     bool *from_pool);
void ion_msm_page_pool_free(struct ion_msm_page_pool *pool, struct page *page);
struct page *ion_msm_page_pool_alloc_pool_only(struct ion_msm_page_pool *a);
void ion_msm_page_pool_free_immediate(struct ion_msm_page_pool *pool,
				      struct page *page);
int ion_page_pool_total(struct ion_page_pool *pool, bool high);
int ion_msm_page_pool_total(struct ion_msm_page_pool *pool, bool high);
size_t ion_system_heap_secure_page_pool_total(struct ion_heap *heap, int vmid);

/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
/** ion_msm_page_pool_shrink - shrinks the size of the memory cached in the pool
 * @pool:		the pool
 * @gfp_mask:		the memory type to reclaim
 * @nr_to_scan:		number of items to shrink in pages
 *
 * returns the number of items freed in pages
 */
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
int ion_msm_page_pool_shrink(struct ion_msm_page_pool *pool, gfp_t gfp_mask,
			     int nr_to_scan);

#ifdef CONFIG_ION_POOL_AUTO_REFILL
void ion_page_pool_refill(struct ion_page_pool *pool);
void ion_msm_page_pool_refill(struct ion_msm_page_pool *pool);

static __always_inline int get_pool_fillmark(struct ion_page_pool *pool)
static __always_inline int get_pool_fillmark(struct ion_msm_page_pool *pool)
{
	return ION_POOL_FILL_MARK / (PAGE_SIZE << pool->order);
}

static __always_inline int get_pool_lowmark(struct ion_page_pool *pool)
static __always_inline int get_pool_lowmark(struct ion_msm_page_pool *pool)
{
	return ION_POOL_LOW_MARK / (PAGE_SIZE << pool->order);
}

static __always_inline bool pool_count_below_lowmark(struct ion_page_pool *pool)
static __always_inline bool
pool_count_below_lowmark(struct ion_msm_page_pool *pool)
{
	return atomic_read(&pool->count) < get_pool_lowmark(pool);
}

static __always_inline bool pool_fillmark_reached(struct ion_page_pool *pool)
static __always_inline bool
pool_fillmark_reached(struct ion_msm_page_pool *pool)
{
	return atomic_read(&pool->count) >= get_pool_fillmark(pool);
}
#else
static inline void ion_page_pool_refill(struct ion_page_pool *pool)
static inline void ion_msm_page_pool_refill(struct ion_msm_page_pool *pool)
{
}

static __always_inline int get_pool_fillmark(struct ion_page_pool *pool)
static __always_inline int get_pool_fillmark(struct ion_msm_page_pool *pool)
{
	return 0;
}

static __always_inline int get_pool_lowmark(struct ion_page_pool *pool)
static __always_inline int get_pool_lowmark(struct ion_msm_page_pool *pool)
{
	return 0;
}

static __always_inline bool pool_count_below_lowmark(struct ion_page_pool *pool)
static __always_inline bool
pool_count_below_lowmark(struct ion_msm_page_pool *pool)
{
	return false;
}

static __always_inline bool pool_fillmark_reached(struct ion_page_pool *pool)
static __always_inline bool
pool_fillmark_reached(struct ion_msm_page_pool *pool)
{
	return false;
}
#endif /* CONFIG_ION_POOL_AUTO_REFILL */
#endif /* _ION_PAGE_POOL_H */
#endif /* _ION_MSM_PAGE_POOL_H */
+76 −73
Original line number Diff line number Diff line
@@ -21,10 +21,9 @@
#include <uapi/linux/sched/types.h>
#include <linux/seq_file.h>
#include <soc/qcom/secure_buffer.h>
#include "ion_system_heap.h"
#include "ion_page_pool.h"
#include "ion_msm_system_heap.h"
#include "ion_msm_page_pool.h"
#include "msm_ion_priv.h"
#include "ion_system_heap.h"
#include "ion_system_secure_heap.h"
#include "ion_secure_util.h"

@@ -32,7 +31,7 @@ static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
				     __GFP_NORETRY) & ~__GFP_RECLAIM;
static gfp_t low_order_gfp_flags  = GFP_HIGHUSER | __GFP_ZERO;

bool pool_auto_refill_en  __read_mostly =
static bool pool_auto_refill_en  __read_mostly =
IS_ENABLED(CONFIG_ION_POOL_AUTO_REFILL);

int order_to_index(unsigned int order)
@@ -51,19 +50,19 @@ static inline unsigned int order_to_size(int order)
	return PAGE_SIZE << order;
}

int ion_heap_is_system_heap_type(enum ion_heap_type type)
static int ion_heap_is_system_heap_type(enum ion_heap_type type)
{
	return type == ((enum ion_heap_type)ION_HEAP_TYPE_SYSTEM);
}

static struct page *alloc_buffer_page(struct ion_system_heap *sys_heap,
static struct page *alloc_buffer_page(struct ion_msm_system_heap *sys_heap,
				      struct ion_buffer *buffer,
				      unsigned long order,
				      bool *from_pool)
{
	int cached = (int)ion_buffer_cached(buffer);
	struct page *page;
	struct ion_page_pool *pool;
	struct ion_msm_page_pool *pool;
	int vmid = get_secure_vmid(buffer->flags);
	struct device *dev = sys_heap->heap.dev;
	int order_ind = order_to_index(order);
@@ -81,12 +80,12 @@ static struct page *alloc_buffer_page(struct ion_system_heap *sys_heap,
		if (!(*from_pool && pool_auto_refill_en))
			goto normal_alloc;

		page = ion_page_pool_alloc_pool_only(pool);
		page = ion_msm_page_pool_alloc_pool_only(pool);
		if (!IS_ERR(page))
			return page;

		pool = sys_heap->uncached_pools[order_ind];
		page = ion_page_pool_alloc_pool_only(pool);
		page = ion_msm_page_pool_alloc_pool_only(pool);
		if (IS_ERR(page)) {
			pool = sys_heap->secure_pools[vmid][order_ind];
			goto normal_alloc;
@@ -112,7 +111,7 @@ static struct page *alloc_buffer_page(struct ion_system_heap *sys_heap,
	}

normal_alloc:
	page = ion_page_pool_alloc(pool, from_pool);
	page = ion_msm_page_pool_alloc(pool, from_pool);

	if (pool_auto_refill_en && pool->order &&
	    pool_count_below_lowmark(pool) && vmid <= 0)
@@ -132,7 +131,7 @@ static struct page *alloc_buffer_page(struct ion_system_heap *sys_heap,
 * For secure pages that need to be freed and not added back to the pool; the
 *  hyp_unassign should be called before calling this function
 */
void free_buffer_page(struct ion_system_heap *heap,
void free_buffer_page(struct ion_msm_system_heap *heap,
		      struct ion_buffer *buffer, struct page *page,
		      unsigned int order)
{
@@ -140,7 +139,7 @@ void free_buffer_page(struct ion_system_heap *heap,
	int vmid = get_secure_vmid(buffer->flags);

	if (!(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)) {
		struct ion_page_pool *pool;
		struct ion_msm_page_pool *pool;

		if (vmid > 0)
			pool = heap->secure_pools[vmid][order_to_index(order)];
@@ -150,9 +149,9 @@ void free_buffer_page(struct ion_system_heap *heap,
			pool = heap->uncached_pools[order_to_index(order)];

		if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)
			ion_page_pool_free_immediate(pool, page);
			ion_msm_page_pool_free_immediate(pool, page);
		else
			ion_page_pool_free(pool, page);
			ion_msm_page_pool_free(pool, page);

#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
		mod_node_page_state(page_pgdat(page), NR_UNRECLAIMABLE_PAGES,
@@ -169,7 +168,8 @@ void free_buffer_page(struct ion_system_heap *heap,
	}
}

static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
static struct
page_info *alloc_largest_available(struct ion_msm_system_heap *heap,
				   struct ion_buffer *buffer,
				   unsigned long size,
				   unsigned int max_order)
@@ -205,7 +205,7 @@ static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
}

static struct page_info *
alloc_from_pool_preferred(struct ion_system_heap *heap,
alloc_from_pool_preferred(struct ion_msm_system_heap *heap,
			  struct ion_buffer *buffer,
			  unsigned long size,
			  unsigned int max_order)
@@ -274,12 +274,12 @@ static void process_info(struct page_info *info,
	kfree(info);
}

static int ion_system_heap_allocate(struct ion_heap *heap,
static int ion_msm_system_heap_allocate(struct ion_heap *heap,
					struct ion_buffer *buffer,
					unsigned long size,
					unsigned long flags)
{
	struct ion_system_heap *sys_heap = to_system_heap(heap);
	struct ion_msm_system_heap *sys_heap = to_msm_system_heap(heap);
	struct msm_ion_buf_lock_state *lock_state;
	struct sg_table *table;
	struct sg_table table_sync = {0};
@@ -441,10 +441,10 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
	return ret;
}

static void ion_system_heap_free(struct ion_buffer *buffer)
static void ion_msm_system_heap_free(struct ion_buffer *buffer)
{
	struct ion_heap *heap = buffer->heap;
	struct ion_system_heap *sys_heap = to_system_heap(heap);
	struct ion_msm_system_heap *sys_heap = to_msm_system_heap(heap);
	struct msm_ion_buf_lock_state *lock_state = buffer->priv_virt;
	struct sg_table *table = buffer->sg_table;
	struct scatterlist *sg;
@@ -474,16 +474,16 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
	kfree(buffer->priv_virt);
}

static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
static int ion_msm_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
				      int nr_to_scan)
{
	struct ion_system_heap *sys_heap;
	struct ion_msm_system_heap *sys_heap;
	int nr_total = 0;
	int i, j, nr_freed = 0;
	int only_scan = 0;
	struct ion_page_pool *pool;
	struct ion_msm_page_pool *pool;

	sys_heap = to_system_heap(heap);
	sys_heap = to_msm_system_heap(heap);

	if (!nr_to_scan)
		only_scan = 1;
@@ -501,10 +501,12 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
		}

		pool = sys_heap->uncached_pools[i];
		nr_freed += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
		nr_freed +=
			ion_msm_page_pool_shrink(pool, gfp_mask, nr_to_scan);

		pool = sys_heap->cached_pools[i];
		nr_freed += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
		nr_freed +=
			ion_msm_page_pool_shrink(pool, gfp_mask, nr_to_scan);
		nr_total += nr_freed;

		if (!only_scan) {
@@ -519,23 +521,23 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
}

static struct ion_heap_ops system_heap_ops = {
	.allocate = ion_system_heap_allocate,
	.free = ion_system_heap_free,
	.shrink = ion_system_heap_shrink,
	.allocate = ion_msm_system_heap_allocate,
	.free = ion_msm_system_heap_free,
	.shrink = ion_msm_system_heap_shrink,
};

static int ion_system_heap_debug_show(struct ion_heap *heap,
static int ion_msm_system_heap_debug_show(struct ion_heap *heap,
					  struct seq_file *s, void *unused)
{
	struct ion_system_heap *sys_heap;
	struct ion_msm_system_heap *sys_heap;
	bool use_seq = s;
	unsigned long uncached_total = 0;
	unsigned long cached_total = 0;
	unsigned long secure_total = 0;
	struct ion_page_pool *pool;
	struct ion_msm_page_pool *pool;
	int i, j;

	sys_heap = to_system_heap(heap);
	sys_heap = to_msm_system_heap(heap);
	for (i = 0; i < NUM_ORDERS; i++) {
		pool = sys_heap->uncached_pools[i];
		if (use_seq) {
@@ -624,40 +626,43 @@ static int ion_system_heap_debug_show(struct ion_heap *heap,
}

static struct msm_ion_heap_ops msm_system_heap_ops = {
	.debug_show = ion_system_heap_debug_show,
	.debug_show = ion_msm_system_heap_debug_show,
};

static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
static void ion_msm_system_heap_destroy_pools(struct ion_msm_page_pool **pools)
{
	int i;

	if (!pools)
		return;

	for (i = 0; i < NUM_ORDERS; i++)
		if (pools[i]) {
			ion_page_pool_destroy(pools[i]);
			ion_msm_page_pool_destroy(pools[i]);
			pools[i] = NULL;
		}
}

/**
 * ion_system_heap_create_pools - Creates pools for all orders
 * ion_msm_system_heap_create_pools - Creates pools for all orders
 *
 * If this fails you don't need to destroy any pools. It's all or
 * nothing. If it succeeds you'll eventually need to use
 * ion_system_heap_destroy_pools to destroy the pools.
 * ion_msm_system_heap_destroy_pools to destroy the pools.
 */
static int ion_system_heap_create_pools(struct ion_system_heap *sys_heap,
					struct ion_page_pool **pools,
					bool cached)
static int
ion_msm_system_heap_create_pools(struct ion_msm_system_heap *sys_heap,
				 struct ion_msm_page_pool **pools, bool cached)
{
	int i;

	for (i = 0; i < NUM_ORDERS; i++) {
		struct ion_page_pool *pool;
		struct ion_msm_page_pool *pool;
		gfp_t gfp_flags = low_order_gfp_flags;

		if (orders[i])
			gfp_flags = high_order_gfp_flags;
		pool = ion_page_pool_create(gfp_flags, orders[i], cached);
		pool = ion_msm_page_pool_create(gfp_flags, orders[i], cached);
		if (!pool)
			goto err_create_pool;
		pool->heap_dev = sys_heap->heap.dev;
@@ -666,19 +671,19 @@ static int ion_system_heap_create_pools(struct ion_system_heap *sys_heap,

	return 0;
err_create_pool:
	ion_system_heap_destroy_pools(pools);
	ion_msm_system_heap_destroy_pools(pools);
	return -ENOMEM;
}

static int ion_sys_heap_worker(void *data)
static int ion_msm_sys_heap_worker(void *data)
{
	struct ion_page_pool **pools = (struct ion_page_pool **)data;
	struct ion_msm_page_pool **pools = (struct ion_msm_page_pool **)data;
	int i;

	for (;;) {
		for (i = 0; i < NUM_ORDERS; i++) {
			if (pool_count_below_lowmark(pools[i]))
				ion_page_pool_refill(pools[i]);
				ion_msm_page_pool_refill(pools[i]);
		}
		set_current_state(TASK_INTERRUPTIBLE);
		if (unlikely(kthread_should_stop())) {
@@ -693,7 +698,7 @@ static int ion_sys_heap_worker(void *data)
	return 0;
}

static struct task_struct *ion_create_kworker(struct ion_page_pool **pools,
static struct task_struct *ion_create_kworker(struct ion_msm_page_pool **pools,
					      bool cached)
{
	struct sched_attr attr = { 0 };
@@ -704,7 +709,7 @@ static struct task_struct *ion_create_kworker(struct ion_page_pool **pools,
	attr.sched_nice = ION_KTHREAD_NICE_VAL;
	buf = cached ? "cached" : "uncached";

	thread = kthread_run(ion_sys_heap_worker, pools,
	thread = kthread_run(ion_msm_sys_heap_worker, pools,
			     "ion-pool-%s-worker", buf);
	if (IS_ERR(thread)) {
		pr_err("%s: failed to create %s worker thread: %ld\n",
@@ -722,9 +727,9 @@ static struct task_struct *ion_create_kworker(struct ion_page_pool **pools,
	return thread;
}

struct ion_heap *ion_system_heap_create(struct ion_platform_heap *data)
struct ion_heap *ion_msm_system_heap_create(struct ion_platform_heap *data)
{
	struct ion_system_heap *heap;
	struct ion_msm_system_heap *heap;
	int ret = -ENOMEM;
	int i;

@@ -739,16 +744,16 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *data)
	heap->heap.ion_heap.flags = ION_HEAP_FLAG_DEFER_FREE;

	for (i = 0; i < VMID_LAST; i++)
		if (is_secure_vmid_valid(i))
			if (ion_system_heap_create_pools(heap,
		if (is_secure_vmid_valid(i) &&
		    ion_msm_system_heap_create_pools(heap,
						     heap->secure_pools[i],
						     false))
			goto destroy_secure_pools;

	if (ion_system_heap_create_pools(heap, heap->uncached_pools, false))
	if (ion_msm_system_heap_create_pools(heap, heap->uncached_pools, false))
		goto destroy_secure_pools;

	if (ion_system_heap_create_pools(heap, heap->cached_pools, true))
	if (ion_msm_system_heap_create_pools(heap, heap->cached_pools, true))
		goto destroy_uncached_pools;

	if (pool_auto_refill_en) {
@@ -771,14 +776,12 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *data)

	return &heap->heap.ion_heap;
destroy_pools:
	ion_system_heap_destroy_pools(heap->cached_pools);
	ion_msm_system_heap_destroy_pools(heap->cached_pools);
destroy_uncached_pools:
	ion_system_heap_destroy_pools(heap->uncached_pools);
	ion_msm_system_heap_destroy_pools(heap->uncached_pools);
destroy_secure_pools:
	for (i = 0; i < VMID_LAST; i++) {
		if (heap->secure_pools[i])
			ion_system_heap_destroy_pools(heap->secure_pools[i]);
	}
	for (i = 0; i < VMID_LAST; i++)
		ion_msm_system_heap_destroy_pools(heap->secure_pools[i]);
	kfree(heap);
	return ERR_PTR(ret);
}
+11 −11
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
 * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
 */
#include <soc/qcom/secure_buffer.h>
#include "msm_ion_priv.h"

#ifndef _ION_SYSTEM_HEAP_H
#define _ION_SYSTEM_HEAP_H
#ifndef _ION_MSM_SYSTEM_HEAP_H
#define _ION_MSM_SYSTEM_HEAP_H

#ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
#if defined(CONFIG_IOMMU_IO_PGTABLE_ARMV7S)
@@ -22,8 +22,8 @@ static const unsigned int orders[] = {0};

#define ION_KTHREAD_NICE_VAL 10

#define to_system_heap(_heap) \
	container_of(to_msm_ion_heap(_heap), struct ion_system_heap, heap)
#define to_msm_system_heap(_heap) \
	container_of(to_msm_ion_heap(_heap), struct ion_msm_system_heap, heap)

enum ion_kthread_type {
	ION_KTHREAD_UNCACHED,
@@ -31,13 +31,13 @@ enum ion_kthread_type {
	ION_MAX_NUM_KTHREADS
};

struct ion_system_heap {
struct ion_msm_system_heap {
	struct msm_ion_heap heap;
	struct ion_page_pool *uncached_pools[MAX_ORDER];
	struct ion_page_pool *cached_pools[MAX_ORDER];
	struct ion_msm_page_pool *uncached_pools[MAX_ORDER];
	struct ion_msm_page_pool *cached_pools[MAX_ORDER];
	/* worker threads to refill the pool */
	struct task_struct *kworker[ION_MAX_NUM_KTHREADS];
	struct ion_page_pool *secure_pools[VMID_LAST][MAX_ORDER];
	struct ion_msm_page_pool *secure_pools[VMID_LAST][MAX_ORDER];
	/* Prevents unnecessary page splitting */
	struct mutex split_page_mutex;
};
@@ -51,8 +51,8 @@ struct page_info {

int order_to_index(unsigned int order);

void free_buffer_page(struct ion_system_heap *heap,
void free_buffer_page(struct ion_msm_system_heap *heap,
		      struct ion_buffer *buffer, struct page *page,
		      unsigned int order);

#endif /* _ION_SYSTEM_HEAP_H */
#endif /* _ION_MSM_SYSTEM_HEAP_H */
Loading