Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Unverified Commit f59f70f1 authored by derfelot's avatar derfelot
Browse files

staging: ion: msm: Add Sony modifications

Taken from Sony 47.2.A.10.107 stock kernel
parent 849a4067
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -15,6 +15,11 @@
 * GNU General Public License for more details.
 *
 */
/*
 * NOTE: This file has been modified by Sony Mobile Communications Inc.
 * Modifications are Copyright (c) 2017 Sony Mobile Communications Inc,
 * and licensed under the license of the file.
 */

#include <linux/device.h>
#include <linux/ion.h>
@@ -397,7 +402,7 @@ static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker,
	 * would cause a deadlock in several places so don't shrink if that
	 * happens.
	 */
	if (!mutex_trylock(&sheap->chunk_lock))
	if (!mutex_trylock_spin(&sheap->chunk_lock))
		return -1;

	freed = __ion_secure_cma_shrink_pool(sheap, nr_to_scan);
+176 −6
Original line number Diff line number Diff line
@@ -13,18 +13,29 @@
 * GNU General Public License for more details.
 *
 */
/*
 * NOTE: This file has been modified by Sony Mobile Communications Inc.
 * Modifications are Copyright (c) 2016 Sony Mobile Communications Inc,
 * and licensed under the license of the file.
 */

#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/migrate.h>
#include <linux/mount.h>
#include <linux/init.h>
#include <linux/page-flags.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/vmalloc.h>
#include <linux/compaction.h>
#include "ion_priv.h"

#define ION_PAGE_CACHE	1

static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
	struct page *page;
@@ -51,12 +62,18 @@ static void ion_page_pool_free_pages(struct ion_page_pool *pool,
				     struct page *page)
{
	ion_page_pool_free_set_cache_policy(pool, page);
	if (pool->inode && pool->order == 0) {
		lock_page(page);
		__ClearPageMovable(page);
		unlock_page(page);
	}
	__free_pages(page, pool->order);
}

static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
{
	mutex_lock(&pool->mutex);
	page->private = ION_PAGE_CACHE;
	if (PageHighMem(page)) {
		list_add_tail(&page->lru, &pool->high_items);
		pool->high_count++;
@@ -65,6 +82,8 @@ static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
		pool->low_count++;
	}

	if (pool->inode && pool->order == 0)
		__SetPageMovable(page, pool->inode->i_mapping);
	mod_zone_page_state(page_zone(page), NR_INDIRECTLY_RECLAIMABLE_BYTES,
			    (1 << (PAGE_SHIFT + pool->order)));
	mutex_unlock(&pool->mutex);
@@ -85,7 +104,9 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
		pool->low_count--;
	}

	list_del(&page->lru);
	clear_bit(ION_PAGE_CACHE, &page->private);

	list_del_init(&page->lru);
	mod_zone_page_state(page_zone(page), NR_INDIRECTLY_RECLAIMABLE_BYTES,
			    -(1 << (PAGE_SHIFT + pool->order)));
	return page;
@@ -99,7 +120,7 @@ void *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool)

	*from_pool = true;

	if (mutex_trylock(&pool->mutex)) {
	if (mutex_trylock_spin(&pool->mutex)) {
		if (pool->high_count)
			page = ion_page_pool_remove(pool, true);
		else if (pool->low_count)
@@ -109,6 +130,10 @@ void *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool)
	if (!page) {
		page = ion_page_pool_alloc_pages(pool);
		*from_pool = false;
	} else {
		lock_page(page);
		__ClearPageMovable(page);
		unlock_page(page);
	}
	return page;
}
@@ -122,14 +147,18 @@ void *ion_page_pool_alloc_pool_only(struct ion_page_pool *pool)

	BUG_ON(!pool);

	if (mutex_trylock(&pool->mutex)) {
	if (mutex_trylock_spin(&pool->mutex)) {
		if (pool->high_count)
			page = ion_page_pool_remove(pool, true);
		else if (pool->low_count)
			page = ion_page_pool_remove(pool, false);
		mutex_unlock(&pool->mutex);
	}

	if (page) {
		lock_page(page);
		__ClearPageMovable(page);
		unlock_page(page);
	}
	return page;
}

@@ -191,8 +220,144 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
	return freed;
}

static bool ion_page_pool_isolate(struct page *page, isolate_mode_t mode)
{
	struct ion_page_pool *pool;
	struct address_space *mapping = page_mapping(page);

	VM_BUG_ON_PAGE(PageIsolated(page), page);

	if (!mapping)
		return false;
	pool = mapping->private_data;

	mutex_lock(&pool->mutex);
	/* could be removed from the cache pool and thus become unmovable */
	if (!__PageMovable(page)) {
		mutex_unlock(&pool->mutex);
		return false;
	}

	if (unlikely(!test_bit(ION_PAGE_CACHE, &page->private))) {
		mutex_unlock(&pool->mutex);
		return false;
	}

	list_del(&page->lru);
	if (PageHighMem(page))
		pool->high_count--;
	else
		pool->low_count--;
	mutex_unlock(&pool->mutex);

	return true;
}

static int ion_page_pool_migrate(struct address_space *mapping,
				 struct page *newpage,
				 struct page *page, enum migrate_mode mode)
{
	struct ion_page_pool *pool = mapping->private_data;

	VM_BUG_ON_PAGE(!PageMovable(page), page);
	VM_BUG_ON_PAGE(!PageIsolated(page), page);

	lock_page(page);
	newpage->private = ION_PAGE_CACHE;
	__SetPageMovable(newpage, page_mapping(page));
	get_page(newpage);
	__ClearPageMovable(page);
	ClearPagePrivate(page);
	unlock_page(page);
	mutex_lock(&pool->mutex);
	if (PageHighMem(newpage)) {
		list_add_tail(&newpage->lru, &pool->high_items);
		pool->high_count++;
	} else {
		list_add_tail(&newpage->lru, &pool->low_items);
		pool->low_count++;
	}
	mutex_unlock(&pool->mutex);
	put_page(page);
	return 0;
}

static void ion_page_pool_putback(struct page *page)
{
	/*
	 * migrate function either succeeds or returns -EAGAIN, which
	 * results in calling it again until it succeeds, sothis callback
	 * is not needed.
	 */
}

static struct dentry *ion_pool_do_mount(struct file_system_type *fs_type,
				int flags, const char *dev_name, void *data)
{
	static const struct dentry_operations ops = {
		.d_dname = simple_dname,
	};

	return mount_pseudo(fs_type, "ion_pool:", NULL, &ops, 0x77);
}

static struct file_system_type ion_pool_fs = {
	.name		= "ion_pool",
	.mount		= ion_pool_do_mount,
	.kill_sb	= kill_anon_super,
};

static int ion_pool_cnt;
static struct vfsmount *ion_pool_mnt;
static int ion_pool_mount(void)
{
	int ret = 0;

	ion_pool_mnt = kern_mount(&ion_pool_fs);
	if (IS_ERR(ion_pool_mnt))
		ret = PTR_ERR(ion_pool_mnt);

	return ret;
}

static const struct address_space_operations ion_pool_aops = {
	.isolate_page = ion_page_pool_isolate,
	.migratepage = ion_page_pool_migrate,
	.putback_page = ion_page_pool_putback,
};

static int ion_pool_register_migration(struct ion_page_pool *pool)
{
	int  ret = simple_pin_fs(&ion_pool_fs, &ion_pool_mnt, &ion_pool_cnt);

	if (ret < 0) {
		pr_err("Cannot mount pseudo fs: %d\n", ret);
		return ret;
	}
	pool->inode = alloc_anon_inode(ion_pool_mnt->mnt_sb);
	if (IS_ERR(pool->inode)) {
		ret = PTR_ERR(pool->inode);
		pool->inode = NULL;
		simple_release_fs(&ion_pool_mnt, &ion_pool_cnt);
		return ret;
	}

	pool->inode->i_mapping->private_data = pool;
	pool->inode->i_mapping->a_ops = &ion_pool_aops;
	return 0;
}

static void ion_pool_unregister_migration(struct ion_page_pool *pool)
{
	if (pool->inode) {
		iput(pool->inode);
		pool->inode = NULL;
		simple_release_fs(&ion_pool_mnt, &ion_pool_cnt);
	}
}

struct ion_page_pool *ion_page_pool_create(struct device *dev, gfp_t gfp_mask,
					   unsigned int order)
					   unsigned int order, bool movable)
{
	struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
					     GFP_KERNEL);
@@ -208,16 +373,21 @@ struct ion_page_pool *ion_page_pool_create(struct device *dev, gfp_t gfp_mask,
	mutex_init(&pool->mutex);
	plist_node_init(&pool->list, order);

	pool->inode = NULL;
	if (movable)
		ion_pool_register_migration(pool);

	return pool;
}

void ion_page_pool_destroy(struct ion_page_pool *pool)
{
	ion_pool_unregister_migration(pool);
	kfree(pool);
}

static int __init ion_page_pool_init(void)
{
	return 0;
	return ion_pool_mount();
}
device_initcall(ion_page_pool_init);
+8 −1
Original line number Diff line number Diff line
@@ -14,6 +14,11 @@
 * GNU General Public License for more details.
 *
 */
/*
 * NOTE: This file has been modified by Sony Mobile Communications Inc.
 * Modifications are Copyright (c) 2016 Sony Mobile Communications Inc,
 * and licensed under the license of the file.
 */

#ifndef _ION_PRIV_H
#define _ION_PRIV_H
@@ -422,6 +427,7 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
 * @gfp_mask:		gfp_mask to use from alloc
 * @order:		order of pages in the pool
 * @list:		plist node for list of pools
 * @inode:		inode for ion_pool pseudo filesystem
 *
 * Allows you to keep a pool of pre allocated pages to use from your heap.
 * Keeping a pool of pages that is ready for dma, ie any cached mapping have
@@ -438,10 +444,11 @@ struct ion_page_pool {
	gfp_t gfp_mask;
	unsigned int order;
	struct plist_node list;
	struct inode *inode;
};

struct ion_page_pool *ion_page_pool_create(struct device *dev, gfp_t gfp_mask,
					   unsigned int order);
					   unsigned int order, bool movable);
void ion_page_pool_destroy(struct ion_page_pool *);
void *ion_page_pool_alloc(struct ion_page_pool *, bool *from_pool);
void *ion_page_pool_alloc_pool_only(struct ion_page_pool *);
+12 −5
Original line number Diff line number Diff line
@@ -14,6 +14,11 @@
 * GNU General Public License for more details.
 *
 */
/*
 * NOTE: This file has been modified by Sony Mobile Communications Inc.
 * Modifications are Copyright (c) 2017 Sony Mobile Communications Inc,
 * and licensed under the license of the file.
 */

#include <asm/page.h>
#include <linux/dma-mapping.h>
@@ -784,7 +789,8 @@ static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
 * ion_system_heap_destroy_pools to destroy the pools.
 */
static int ion_system_heap_create_pools(struct device *dev,
					struct ion_page_pool **pools)
					struct ion_page_pool **pools,
					bool movable)
{
	int i;
	for (i = 0; i < num_orders; i++) {
@@ -793,7 +799,8 @@ static int ion_system_heap_create_pools(struct device *dev,

		if (orders[i])
			gfp_flags = high_order_gfp_flags;
		pool = ion_page_pool_create(dev, gfp_flags, orders[i]);
		pool = ion_page_pool_create(dev, gfp_flags, orders[i],
					movable);
		if (!pool)
			goto err_create_pool;
		pools[i] = pool;
@@ -832,15 +839,15 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *data)
			if (!heap->secure_pools[i])
				goto err_create_secure_pools;
			if (ion_system_heap_create_pools(
					dev, heap->secure_pools[i]))
					dev, heap->secure_pools[i], false))
				goto err_create_secure_pools;
		}
	}

	if (ion_system_heap_create_pools(dev, heap->uncached_pools))
	if (ion_system_heap_create_pools(dev, heap->uncached_pools, false))
		goto err_create_uncached_pools;

	if (ion_system_heap_create_pools(dev, heap->cached_pools))
	if (ion_system_heap_create_pools(dev, heap->cached_pools, true))
		goto err_create_cached_pools;

	mutex_init(&heap->split_page_mutex);