Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 85eb1252 authored by Jordan Crouse's avatar Jordan Crouse
Browse files

msm: kgsl: Move kgsl_pool_allocate_pages()



For reasons unknown while most of the pool code was properly living in
kgsl_pool.c, kgsl_pool_allocate_pages() was in kgsl_sharedmem.c and
depended on several other exported functions just to make it work.  Move
the function and fix up the helper functions.

Change-Id: Ic0dedbad7a7c06adce4f87f7915853eb44a4151c
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent 505c34f3
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@
#include "kgsl_debugfs.h"
#include "kgsl_device.h"
#include "kgsl_mmu.h"
#include "kgsl_pool.h"
#include "kgsl_sync.h"
#include "kgsl_trace.h"

+130 −91
Original line number Diff line number Diff line
@@ -39,19 +39,28 @@ static struct kgsl_page_pool kgsl_pools[KGSL_MAX_POOLS];
static int kgsl_num_pools;
static int kgsl_pool_max_pages;

static void kgsl_pool_free_page(struct page *page);

/* Returns KGSL pool corresponding to input page order*/
static struct kgsl_page_pool *
_kgsl_get_pool_from_order(unsigned int order)
/* Return the index of the pool for the specified order */
static int kgsl_get_pool_index(int order)
{
	int i;

	for (i = 0; i < kgsl_num_pools; i++) {
		if (kgsl_pools[i].pool_order == order)
			return &kgsl_pools[i];
			return i;
	}

	return NULL;
	return -EINVAL;
}

/* Returns KGSL pool corresponding to input page order*/
static struct kgsl_page_pool *
_kgsl_get_pool_from_order(int order)
{
	int index = kgsl_get_pool_index(order);

	return index >= 0 ? &kgsl_pools[index] : NULL;
}

/* Map the page into kernel and zero it out */
@@ -102,27 +111,20 @@ _kgsl_pool_get_page(struct kgsl_page_pool *pool)
	return p;
}

/* Returns the number of pages in specified pool */
static int
kgsl_pool_size(struct kgsl_page_pool *kgsl_pool)
{
	int size;

	spin_lock(&kgsl_pool->list_lock);
	size = kgsl_pool->page_count * (1 << kgsl_pool->pool_order);
	spin_unlock(&kgsl_pool->list_lock);

	return size;
}

/* Returns the number of pages in all kgsl page pools */
static int kgsl_pool_size_total(void)
{
	int i;
	int total = 0;

	for (i = 0; i < kgsl_num_pools; i++)
		total += kgsl_pool_size(&kgsl_pools[i]);
	for (i = 0; i < kgsl_num_pools; i++) {
		struct kgsl_page_pool *kgsl_pool = &kgsl_pools[i];

		spin_lock(&kgsl_pool->list_lock);
		total += kgsl_pool->page_count * (1 << kgsl_pool->pool_order);
		spin_unlock(&kgsl_pool->list_lock);
	}

	return total;
}

@@ -142,13 +144,11 @@ _kgsl_pool_shrink(struct kgsl_page_pool *pool, int num_pages)
	for (j = 0; j < num_pages >> pool->pool_order; j++) {
		struct page *page = _kgsl_pool_get_page(pool);

		if (page != NULL) {
		if (!page)
			break;

		__free_pages(page, pool->pool_order);
		pcount += (1 << pool->pool_order);
		} else {
			/* Break as this pool is empty */
			break;
		}
	}

	return pcount;
@@ -238,19 +238,11 @@ void kgsl_pool_free_sgt(struct sg_table *sgt)
	}
}

/**
 * kgsl_pool_free_pages() - Free pages in the pages array
 * @pages: pointer of the pages array
 *
 * Free the pages by collapsing any physical adjacent pages.
 * Pages are added back to the pool, if pool has sufficient space
 * otherwise they are given back to system.
 */
void kgsl_pool_free_pages(struct page **pages, unsigned int pcount)
{
	int i;

	if (pages == NULL || pcount == 0)
	if (!pages)
		return;

	if (WARN(!kern_addr_valid((unsigned long)pages),
@@ -271,16 +263,6 @@ void kgsl_pool_free_pages(struct page **pages, unsigned int pcount)
		kgsl_pool_free_page(p);
	}
}
static int kgsl_pool_idx_lookup(unsigned int order)
{
	int i;

	for (i = 0; i < kgsl_num_pools; i++)
		if (order == kgsl_pools[i].pool_order)
			return i;

	return -ENOMEM;
}

static int kgsl_pool_get_retry_order(unsigned int order)
{
@@ -293,9 +275,9 @@ static int kgsl_pool_get_retry_order(unsigned int order)
	return 0;
}

static unsigned int kgsl_gfp_mask(unsigned int page_order)
static gfp_t kgsl_gfp_mask(int page_order)
{
	unsigned int gfp_mask = __GFP_HIGHMEM;
	gfp_t gfp_mask = __GFP_HIGHMEM;

	if (page_order > 0) {
		gfp_mask |= __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN;
@@ -309,7 +291,36 @@ static unsigned int kgsl_gfp_mask(unsigned int page_order)
	return gfp_mask;
}

/**
/*
 * Return true if the pool of specified page size is supported
 * or no pools are supported otherwise return false.
 */
static bool kgsl_pool_available(unsigned int page_size)
{
	int order = ilog2(page_size >> PAGE_SHIFT);

	if (!kgsl_num_pools)
		return true;

	return (kgsl_get_pool_index(order) >= 0);
}

static int kgsl_get_page_size(size_t size, unsigned int align)
{
	if (align >= ilog2(SZ_1M) && size >= SZ_1M &&
		kgsl_pool_available(SZ_1M))
		return SZ_1M;
	else if (align >= ilog2(SZ_64K) && size >= SZ_64K &&
		kgsl_pool_available(SZ_64K))
		return SZ_64K;
	else if (align >= ilog2(SZ_8K) && size >= SZ_8K &&
		kgsl_pool_available(SZ_8K))
		return SZ_8K;

	return PAGE_SIZE;
}

/*
 * kgsl_pool_alloc_page() - Allocate a page of requested size
 * @page_size: Size of the page to be allocated
 * @pages: pointer to hold list of pages, should be big enough to hold
@@ -318,7 +329,7 @@ static unsigned int kgsl_gfp_mask(unsigned int page_order)
 *
 * Return total page count on success and negative value on failure
 */
int kgsl_pool_alloc_page(int *page_size, struct page **pages,
static int kgsl_pool_alloc_page(int *page_size, struct page **pages,
			unsigned int pages_len, unsigned int *align)
{
	int j;
@@ -372,7 +383,7 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages,
		}
	}

	pool_idx = kgsl_pool_idx_lookup(order);
	pool_idx = kgsl_get_pool_index(order);
	page = _kgsl_pool_get_page(pool);

	/* Allocate a new page if not allocated from pool */
@@ -413,13 +424,65 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages,
	return pcount;

eagain:
	*page_size = kgsl_get_page_size(size,
			ilog2(size));
	*page_size = kgsl_get_page_size(size, ilog2(size));
	*align = ilog2(*page_size);
	return -EAGAIN;
}

void kgsl_pool_free_page(struct page *page)
int kgsl_pool_alloc_pages(u64 size, struct page ***pages)
{
	int count = 0;
	int npages = size >> PAGE_SHIFT;
	struct page **local = kvcalloc(npages, sizeof(*local), GFP_KERNEL);
	u32 page_size, align;
	u64 len = size;

	if (!local)
		return -ENOMEM;

	/* Start with 1MB alignment to get the biggest page we can */
	align = ilog2(SZ_1M);

	page_size = kgsl_get_page_size(len, align);

	while (len) {
		int ret = kgsl_pool_alloc_page(&page_size, &local[count],
			npages, &align);

		if (ret == -EAGAIN)
			continue;
		else if (ret <= 0) {
			int i;

			for (i = 0; i < count; ) {
				int n = 1 << compound_order(local[i]);

				kgsl_pool_free_page(local[i]);
				i += n;
			}
			kvfree(local);

			if (!kgsl_sharedmem_get_noretry()) {
				pr_err_ratelimited("kgsl: out of memory: only allocated %lldKb of %lldKb requested\n",
					(size - len) >> 10, size >> 10);

				return -ENOMEM;
			}
		}

		count += ret;
		npages -= ret;
		len -= page_size;

		page_size = kgsl_get_page_size(len, align);
	}

	*pages = local;

	return count;
}

static void kgsl_pool_free_page(struct page *page)
{
	struct kgsl_page_pool *pool;
	int page_order;
@@ -445,24 +508,6 @@ void kgsl_pool_free_page(struct page *page)
	__free_pages(page, page_order);
}

/*
 * Return true if the pool of specified page size is supported
 * or no pools are supported otherwise return false.
 */
bool kgsl_pool_avaialable(int page_size)
{
	int i;

	if (!kgsl_num_pools)
		return true;

	for (i = 0; i < kgsl_num_pools; i++)
		if (ilog2(page_size >> PAGE_SHIFT) == kgsl_pools[i].pool_order)
			return true;

	return false;
}

static void kgsl_pool_reserve_pages(void)
{
	int i, j;
@@ -517,12 +562,9 @@ static struct shrinker kgsl_pool_shrinker = {
static void kgsl_pool_config(unsigned int order, unsigned int reserved_pages,
		bool allocation_allowed)
{
#ifdef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
	if (order > 0) {
		pr_err("kgsl: pool order:%d not supprted\n", order);
	if (kgsl_num_pools == KGSL_MAX_POOLS)
		return;
	}
#endif

	if ((order > KGSL_MAX_POOL_ORDER) ||
			(reserved_pages > KGSL_MAX_RESERVED_PAGES))
		return;
@@ -538,21 +580,15 @@ static void kgsl_pool_config(unsigned int order, unsigned int reserved_pages,
static void kgsl_of_parse_mempools(struct device_node *node)
{
	struct device_node *child;
	unsigned int page_size, reserved_pages = 0;
	unsigned int page_size;
	bool allocation_allowed;

	for_each_child_of_node(node, child) {
		unsigned int index;

		if (of_property_read_u32(child, "reg", &index))
			return;

		if (index >= KGSL_MAX_POOLS)
			continue;
		unsigned int reserved_pages = 0;

		if (of_property_read_u32(child, "qcom,mempool-page-size",
					&page_size))
			return;
			continue;

		of_property_read_u32(child, "qcom,mempool-reserved",
				&reserved_pages);
@@ -570,12 +606,15 @@ static void kgsl_of_get_mempools(struct device_node *parent)
	struct device_node *node;

	node = of_find_compatible_node(parent, NULL, "qcom,gpu-mempools");
	if (node != NULL) {
	if (!node)
		return;

	/* Get Max pages limit for mempool */
	of_property_read_u32(node, "qcom,mempool-max-pages",
			&kgsl_pool_max_pages);
	kgsl_of_parse_mempools(node);
	}

	of_node_put(node);
}

void kgsl_init_page_pools(struct platform_device *pdev)
+21 −4
Original line number Diff line number Diff line
@@ -6,12 +6,29 @@
#define __KGSL_POOL_H

void kgsl_pool_free_sgt(struct sg_table *sgt);

/**
 * kgsl_pool_alloc_pages - Allocate an array of pages from the pool
 * @size: Size of the allocation
 * @pages: Pointer to an array of pages
 *
 * Allocate a list of pages and store it in the pointer pointed to by @pages.
 * Return: The number of entries in the array pointed to by @page or negative
 * on error.
 */
int kgsl_pool_alloc_pages(u64 size, struct page ***pages);

/**
 * kgsl_pool_free_pages - Free pages in an pages array
 * @pages: pointer to an array of page structs
 * @page_count: Number of entries in @pages
 *
 * Free the pages by collapsing any physical adjacent pages.
 * Pages are added back to the pool, if pool has sufficient space
 * otherwise they are given back to system.
 */
void kgsl_pool_free_pages(struct page **pages, unsigned int page_count);
void kgsl_init_page_pools(struct platform_device *pdev);
void kgsl_exit_page_pools(void);
int kgsl_pool_alloc_page(int *page_size, struct page **pages,
			unsigned int pages_len, unsigned int *align);
void kgsl_pool_free_page(struct page *p);
bool kgsl_pool_avaialable(int size);
#endif /* __KGSL_POOL_H */
+7 −59
Original line number Diff line number Diff line
@@ -10,6 +10,7 @@
#include <linux/random.h>

#include "kgsl_device.h"
#include "kgsl_pool.h"
#include "kgsl_sharedmem.h"

/*
@@ -966,59 +967,6 @@ int kgsl_memdesc_sg_dma(struct kgsl_memdesc *memdesc,
	return 0;
}

static struct page **kgsl_pool_alloc_pages(u64 size, u32 *pages_count)
{
	int count = 0;
	int npages = size >> PAGE_SHIFT;
	struct page **pages = kvcalloc(npages, sizeof(*pages), GFP_KERNEL);
	u32 page_size;
	u64 len = size;
	u32 align = ilog2(SZ_1M);

	if (!pages)
		return ERR_PTR(-ENOMEM);

	/*
	 * Start with an 1M alignment to get the largest supported pagesize we
	 * can
	 */
	page_size = kgsl_get_page_size(len, align);

	while (len) {
		int ret = kgsl_pool_alloc_page(&page_size, &pages[count],
			npages, &align);

		if (ret == -EAGAIN)
			continue;
		else if (ret <= 0) {
			int i;

			for (i = 0; i < count; ) {
				int n = 1 << compound_order(pages[i]);

				kgsl_pool_free_page(pages[i]);
				i += n;
			}
			kvfree(pages);

			if (!sharedmem_noretry_flag)
				pr_err_ratelimited("kgsl: out of memory: only allocated %lldKb of %lldKb requested\n",
					(size - len) >> 10, size >> 10);

			return ERR_PTR(-ENOMEM);
		}

		count += ret;
		npages -= ret;
		len -= page_size;

		page_size = kgsl_get_page_size(len, align);
	}

	*pages_count = count;
	return pages;
}

static void _kgsl_contiguous_free(struct kgsl_memdesc *memdesc)
{
	dma_free_attrs(memdesc->dev, memdesc->size,
@@ -1126,10 +1074,10 @@ static int kgsl_alloc_secure_pages(struct kgsl_device *device,
	memdesc->priv |= priv;

	memdesc->ops = &kgsl_secure_pool_ops;
	pages = kgsl_pool_alloc_pages(size, &count);
	count = kgsl_pool_alloc_pages(size, &pages);

	if (IS_ERR(pages))
		return PTR_ERR(pages);
	if (count < 0)
		return count;

	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt) {
@@ -1181,10 +1129,10 @@ static int kgsl_alloc_pages(struct kgsl_device *device,
	memdesc->priv |= priv;

	memdesc->ops = &kgsl_pool_ops;
	pages = kgsl_pool_alloc_pages(size, &count);
	count = kgsl_pool_alloc_pages(size, &pages);

	if (IS_ERR(pages))
		return PTR_ERR(pages);
	if (count < 0)
		return count;

	memdesc->pages = pages;
	memdesc->size = size;
+0 −31
Original line number Diff line number Diff line
@@ -350,35 +350,4 @@ static inline void kgsl_free_sgt(struct sg_table *sgt)
	}
}

#include "kgsl_pool.h"

/**
 * kgsl_get_page_size() - Get supported pagesize
 * @size: Size of the page
 * @align: Desired alignment of the size
 *
 * Return supported pagesize
 */
#ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
static inline int kgsl_get_page_size(size_t size, unsigned int align)
{
	if (align >= ilog2(SZ_1M) && size >= SZ_1M &&
		kgsl_pool_avaialable(SZ_1M))
		return SZ_1M;
	else if (align >= ilog2(SZ_64K) && size >= SZ_64K &&
		kgsl_pool_avaialable(SZ_64K))
		return SZ_64K;
	else if (align >= ilog2(SZ_8K) && size >= SZ_8K &&
		kgsl_pool_avaialable(SZ_8K))
		return SZ_8K;
	else
		return PAGE_SIZE;
}
#else
static inline int kgsl_get_page_size(size_t size, unsigned int align)
{
	return PAGE_SIZE;
}
#endif

#endif /* __KGSL_SHAREDMEM_H */