Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 64ae2308 authored by Puranam V G Tejaswi's avatar Puranam V G Tejaswi
Browse files

msm: kgsl: use shmem for kgsl allocations



Use shmem for graphics allocations. This would help to make
kgsl pages moveable and also swappable to zram. By using
shmem we would forgo the benefit of using higher order pages
and pools. But the benefits we get from it in terms of getting
more free memory available in the system and getting larger
contiguous chunks of memory can outweigh what we forgo.

Change-Id: If613d721c581317967ea0603ea151fe85aca8afe
Signed-off-by: default avatarPuranam V G Tejaswi <pvgtejas@codeaurora.org>
parent 16dd4e63
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -26,3 +26,12 @@ config QCOM_ADRENO_DEFAULT_GOVERNOR
config QCOM_KGSL_IOMMU
	bool
	default y if QCOM_KGSL && (MSM_IOMMU || ARM_SMMU)

config QCOM_KGSL_USE_SHMEM
	bool "Enable using shmem for memory allocations"
	depends on QCOM_KGSL
	help
	  Say 'Y' to enable using shmem for memory allocations. If enabled,
	  there will be no support for the memory pools and higher order pages.
	  But using shmem will help in making kgsl pages available for
	  reclaiming.
+4 −1
Original line number Diff line number Diff line
@@ -12,7 +12,6 @@ msm_kgsl_core-y = \
	kgsl_mmu.o \
	kgsl_snapshot.o \
	kgsl_events.o \
	kgsl_pool.o \
	kgsl_gmu_core.o \
	kgsl_gmu.o \
	kgsl_rgmu.o \
@@ -23,6 +22,10 @@ msm_kgsl_core-$(CONFIG_DEBUG_FS) += kgsl_debugfs.o
msm_kgsl_core-$(CONFIG_SYNC_FILE) += kgsl_sync.o
msm_kgsl_core-$(CONFIG_COMPAT) += kgsl_compat.o

ifndef CONFIG_QCOM_KGSL_USE_SHMEM
	msm_kgsl_core-y += kgsl_pool.o
endif

msm_adreno-y += \
	adreno_ioctl.o \
	adreno_ringbuffer.o \
+3 −1
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2008-2019, The Linux Foundation. All rights reserved.
 * Copyright (c) 2008-2020, The Linux Foundation. All rights reserved.
 */
#ifndef __KGSL_H
#define __KGSL_H
@@ -211,6 +211,7 @@ struct kgsl_memdesc_ops {
 * @pages: An array of pointers to allocated pages
 * @page_count: Total number of pages allocated
 * @cur_bindings: Number of sparse pages actively bound
 * @shmem_filp: Pointer to the shmem file backing this memdesc
 */
struct kgsl_memdesc {
	struct kgsl_pagetable *pagetable;
@@ -230,6 +231,7 @@ struct kgsl_memdesc {
	struct page **pages;
	unsigned int page_count;
	unsigned int cur_bindings;
	struct file *shmem_filp;
};

/*
+4 −73
Original line number Diff line number Diff line
@@ -57,27 +57,11 @@ _kgsl_get_pool_from_order(unsigned int order)
	return NULL;
}

/* Map the page into kernel and zero it out */
static void
_kgsl_pool_zero_page(struct page *p, unsigned int pool_order)
{
	int i;

	for (i = 0; i < (1 << pool_order); i++) {
		struct page *page = nth_page(p, i);
		void *addr = kmap_atomic(page);

		memset(addr, 0, PAGE_SIZE);
		dmac_flush_range(addr, addr + PAGE_SIZE);
		kunmap_atomic(addr);
	}
}

/* Add a page to specified pool */
static void
_kgsl_pool_add_page(struct kgsl_page_pool *pool, struct page *p)
{
	_kgsl_pool_zero_page(p, pool->pool_order);
	kgsl_zero_page(p, pool->pool_order);

	spin_lock(&pool->list_lock);
	list_add_tail(&p->lru, &pool->page_list);
@@ -207,43 +191,6 @@ kgsl_pool_reduce(unsigned int target_pages, bool exit)
	return pcount;
}

/**
 * kgsl_pool_free_sgt() - Free scatter-gather list
 * @sgt: pointer of the sg list
 *
 * Free the sg list by collapsing any physical adjacent pages.
 * Pages are added back to the pool, if pool has sufficient space
 * otherwise they are given back to system.
 */

void kgsl_pool_free_sgt(struct sg_table *sgt)
{
	int i;
	struct scatterlist *sg;

	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
		/*
		 * sg_alloc_table_from_pages() will collapse any physically
		 * adjacent pages into a single scatterlist entry. We cannot
		 * just call __free_pages() on the entire set since we cannot
		 * ensure that the size is a whole order. Instead, free each
		 * page or compound page group individually.
		 */
		struct page *p = sg_page(sg), *next;
		unsigned int count;
		unsigned int j = 0;

		while (j < (sg->length/PAGE_SIZE)) {
			count = 1 << compound_order(p);
			next = nth_page(p, count);
			kgsl_pool_free_page(p);

			p = next;
			j += count;
		}
	}
}

/**
 * kgsl_pool_free_pages() - Free pages in the pages array
 * @pages: pointer of the pages array
@@ -299,22 +246,6 @@ static int kgsl_pool_get_retry_order(unsigned int order)
	return 0;
}

static unsigned int kgsl_gfp_mask(unsigned int page_order)
{
	unsigned int gfp_mask = __GFP_HIGHMEM;

	if (page_order > 0) {
		gfp_mask |= __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN;
		gfp_mask &= ~__GFP_RECLAIM;
	} else
		gfp_mask |= GFP_KERNEL;

	if (kgsl_sharedmem_get_noretry())
		gfp_mask |= __GFP_NORETRY | __GFP_NOWARN;

	return gfp_mask;
}

/**
 * kgsl_pool_alloc_page() - Allocate a page of requested size
 * @page_size: Size of the page to be allocated
@@ -353,7 +284,7 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages,
			} else
				return -ENOMEM;
		}
		_kgsl_pool_zero_page(page, order);
		kgsl_zero_page(page, order);
		goto done;
	}

@@ -373,7 +304,7 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages,
			page = alloc_pages(gfp_mask, order);
			if (page == NULL)
				return -ENOMEM;
			_kgsl_pool_zero_page(page, order);
			kgsl_zero_page(page, order);
			goto done;
		}
	}
@@ -404,7 +335,7 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages,
				return -ENOMEM;
		}

		_kgsl_pool_zero_page(page, order);
		kgsl_zero_page(page, order);
	}

done:
+7 −3
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2016-2017,2019 The Linux Foundation. All rights reserved.
 * Copyright (c) 2016-2017, 2019-2020, The Linux Foundation. All rights reserved.
 */
#ifndef __KGSL_POOL_H
#define __KGSL_POOL_H

void kgsl_pool_free_sgt(struct sg_table *sgt);
void kgsl_pool_free_pages(struct page **pages, unsigned int page_count);
#ifdef CONFIG_QCOM_KGSL_USE_SHMEM
static inline void kgsl_init_page_pools(struct platform_device *pdev) { }
static inline void kgsl_exit_page_pools(void) { }
#else
void kgsl_init_page_pools(struct platform_device *pdev);
void kgsl_exit_page_pools(void);
void kgsl_pool_free_pages(struct page **pages, unsigned int page_count);
int kgsl_pool_alloc_page(int *page_size, struct page **pages,
			unsigned int pages_len, unsigned int *align);
void kgsl_pool_free_page(struct page *p);
bool kgsl_pool_avaialable(int size);
#endif
#endif /* __KGSL_POOL_H */
Loading