Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 19969933 authored by Puranam V G Tejaswi's avatar Puranam V G Tejaswi
Browse files

msm: kgsl: improve memory stats maintenance



While freeing memory, page_alloc memstat wrongly shows
that the memory is freed while the process is still ongoing.
While allocating memory if something goes wrong during the
process, the memory allocated till that point is not
reflected in page_alloc. Change the point of updation of
page_alloc. Also add two stats, one to track allocation in
progress and the other to track free in progress.

Change-Id: I494c873fd90b8e504d5eea6966855b50493d9c09
Signed-off-by: default avatarPuranam V G Tejaswi <pvgtejas@codeaurora.org>
parent f9d8e0ff
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -4684,6 +4684,8 @@ struct kgsl_driver kgsl_driver = {
	.stats.secure_max = ATOMIC_LONG_INIT(0),
	.stats.mapped = ATOMIC_LONG_INIT(0),
	.stats.mapped_max = ATOMIC_LONG_INIT(0),
	.stats.page_free_pending = ATOMIC_LONG_INIT(0),
	.stats.page_alloc_pending = ATOMIC_LONG_INIT(0),
};
EXPORT_SYMBOL(kgsl_driver);

+2 −0
Original line number Diff line number Diff line
@@ -158,6 +158,8 @@ struct kgsl_driver {
		atomic_long_t secure_max;
		atomic_long_t mapped;
		atomic_long_t mapped_max;
		atomic_long_t page_free_pending;
		atomic_long_t page_alloc_pending;
	} stats;
	unsigned int full_cache_threshold;
	struct workqueue_struct *workqueue;
+12 −1
Original line number Diff line number Diff line
@@ -533,7 +533,8 @@ static void kgsl_page_alloc_free(struct kgsl_memdesc *memdesc)

		atomic_long_sub(memdesc->size, &kgsl_driver.stats.secure);
	} else {
		atomic_long_sub(memdesc->size, &kgsl_driver.stats.page_alloc);
		atomic_long_add(memdesc->size,
			&kgsl_driver.stats.page_free_pending);
	}

	/* Free pages using the pages array for non secure paged memory */
@@ -542,6 +543,11 @@ static void kgsl_page_alloc_free(struct kgsl_memdesc *memdesc)
	else
		kgsl_pool_free_sgt(memdesc->sgt);

	if (!(memdesc->priv & KGSL_MEMDESC_TZ_LOCKED)) {
		atomic_long_sub(memdesc->size, &kgsl_driver.stats.page_alloc);
		atomic_long_sub(memdesc->size,
			&kgsl_driver.stats.page_free_pending);
	}
}

/*
@@ -886,6 +892,8 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
	 * are allocated by kgsl. This helps with improving the vm fault
	 * routine by finding the faulted page in constant time.
	 */
	if (!(memdesc->flags & KGSL_MEMFLAGS_SECURE))
		atomic_long_add(size, &kgsl_driver.stats.page_alloc_pending);

	memdesc->pages = kgsl_malloc(len_alloc * sizeof(struct page *));
	memdesc->page_count = 0;
@@ -980,6 +988,9 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
		&kgsl_driver.stats.page_alloc_max);

done:
	if (!(memdesc->flags & KGSL_MEMFLAGS_SECURE))
		atomic_long_sub(size, &kgsl_driver.stats.page_alloc_pending);

	if (ret) {
		if (memdesc->pages) {
			unsigned int count = 1;