Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit de531b6f authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "gpu: ion: Add trace events for secure cma"

parents 1619fe63 250adcdc
Loading
Loading
Loading
Loading
+39 −21
Original line number Diff line number Diff line
@@ -3,7 +3,7 @@
 *
 * Copyright (C) Linaro 2012
 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
 *
 * This software is licensed under the terms of the GNU General Public
 * License version 2, as published by the Free Software Foundation, and
@@ -23,6 +23,7 @@
#include <linux/err.h>
#include <linux/dma-mapping.h>
#include <linux/msm_ion.h>
#include <trace/events/kmem.h>

#include <asm/cacheflush.h>

@@ -114,7 +115,8 @@ int ion_secure_cma_get_sgtable(struct device *dev, struct sg_table *sgt,

static int ion_secure_cma_add_to_pool(
					struct ion_cma_secure_heap *sheap,
					unsigned long len)
					unsigned long len,
					bool prefetch)
{
	void *cpu_addr;
	dma_addr_t handle;
@@ -122,6 +124,9 @@ static int ion_secure_cma_add_to_pool(
	int ret = 0;
	struct ion_cma_alloc_chunk *chunk;


	trace_ion_secure_cma_add_to_pool_start(len,
				atomic_read(&sheap->total_pool_size), prefetch);
	mutex_lock(&sheap->chunk_lock);

	chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
@@ -155,6 +160,10 @@ out_free:
	kfree(chunk);
out:
	mutex_unlock(&sheap->chunk_lock);

	trace_ion_secure_cma_add_to_pool_end(len,
				atomic_read(&sheap->total_pool_size), prefetch);

	return ret;
}

@@ -163,7 +172,7 @@ static void ion_secure_pool_pages(struct work_struct *work)
	struct ion_cma_secure_heap *sheap = container_of(work,
			struct ion_cma_secure_heap, work);

	ion_secure_cma_add_to_pool(sheap, sheap->last_alloc);
	ion_secure_cma_add_to_pool(sheap, sheap->last_alloc, true);
}
/*
 * @s1: start of the first region
@@ -236,6 +245,7 @@ int ion_secure_cma_prefetch(struct ion_heap *heap, void *data)
		len = diff;

	sheap->last_alloc = len;
	trace_ion_prefetching(sheap->last_alloc);
	schedule_work(&sheap->work);

	return 0;
@@ -319,20 +329,39 @@ static void ion_secure_cma_free_chunk(struct ion_cma_secure_heap *sheap,

}

int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr)
{
	struct ion_cma_secure_heap *sheap =
		container_of(heap, struct ion_cma_secure_heap, heap);
	struct list_head *entry, *_n;
	unsigned long drained_size = 0, skipped_size = 0;

	trace_ion_secure_cma_shrink_pool_start(drained_size, skipped_size);

	mutex_lock(&sheap->chunk_lock);
	list_for_each_safe(entry, _n, &sheap->chunks) {
		struct ion_cma_alloc_chunk *chunk = container_of(entry,
					struct ion_cma_alloc_chunk, entry);

		if (atomic_read(&chunk->cnt) == 0)
		if (max_nr < 0)
			break;

		if (atomic_read(&chunk->cnt) == 0) {
			max_nr -= chunk->chunk_size;
			drained_size += chunk->chunk_size;
			ion_secure_cma_free_chunk(sheap, chunk);
		} else {
			skipped_size += chunk->chunk_size;
		}
	}

	trace_ion_secure_cma_shrink_pool_end(drained_size, skipped_size);
}

int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
{
	struct ion_cma_secure_heap *sheap =
		container_of(heap, struct ion_cma_secure_heap, heap);

	mutex_lock(&sheap->chunk_lock);
	__ion_secure_cma_shrink_pool(sheap, INT_MAX);
	mutex_unlock(&sheap->chunk_lock);

	return 0;
@@ -344,7 +373,6 @@ static int ion_secure_cma_shrinker(struct shrinker *shrinker,
	struct ion_cma_secure_heap *sheap = container_of(shrinker,
					struct ion_cma_secure_heap, shrinker);
	int nr_to_scan = sc->nr_to_scan;
	struct list_head *entry, *_n;

	if (nr_to_scan == 0)
		return atomic_read(&sheap->total_pool_size);
@@ -364,18 +392,8 @@ static int ion_secure_cma_shrinker(struct shrinker *shrinker,
	if (!mutex_trylock(&sheap->chunk_lock))
		return -1;

	list_for_each_safe(entry, _n, &sheap->chunks) {
		struct ion_cma_alloc_chunk *chunk = container_of(entry,
					struct ion_cma_alloc_chunk, entry);

		if (nr_to_scan < 0)
			break;
	__ion_secure_cma_shrink_pool(sheap, nr_to_scan);

		if (atomic_read(&chunk->cnt) == 0) {
			nr_to_scan -= chunk->chunk_size;
			ion_secure_cma_free_chunk(sheap, chunk);
		}
	}
	mutex_unlock(&sheap->chunk_lock);

	return atomic_read(&sheap->total_pool_size);
@@ -440,7 +458,7 @@ static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate(
	ret = ion_secure_cma_alloc_from_pool(sheap, &info->phys, len);

	if (ret) {
		ret = ion_secure_cma_add_to_pool(sheap, len);
		ret = ion_secure_cma_add_to_pool(sheap, len, false);
		if (ret) {
			dev_err(sheap->dev, "Fail to allocate buffer\n");
			goto err;
+98 −0
Original line number Diff line number Diff line
@@ -599,6 +599,104 @@ DEFINE_EVENT(smmu_map, iommu_map_range,
	TP_ARGS(va, pa, chunk_size, len)
	);

DECLARE_EVENT_CLASS(ion_secure_cma_add_to_pool,

	TP_PROTO(unsigned long len,
		 int pool_total,
		 bool is_prefetch),

	TP_ARGS(len, pool_total, is_prefetch),

	TP_STRUCT__entry(
		__field(unsigned long, len)
		__field(int, pool_total)
		__field(bool, is_prefetch)
		),

	TP_fast_assign(
		__entry->len = len;
		__entry->pool_total = pool_total;
		__entry->is_prefetch = is_prefetch;
		),

	TP_printk("len %lx, pool total %x is_prefetch %d",
		__entry->len,
		__entry->pool_total,
		__entry->is_prefetch)
	);

DEFINE_EVENT(ion_secure_cma_add_to_pool, ion_secure_cma_add_to_pool_start,
	TP_PROTO(unsigned long len,
		int pool_total,
		bool is_prefetch),

	TP_ARGS(len, pool_total, is_prefetch)
	);

DEFINE_EVENT(ion_secure_cma_add_to_pool, ion_secure_cma_add_to_pool_end,
	TP_PROTO(unsigned long len,
		int pool_total,
		bool is_prefetch),

	TP_ARGS(len, pool_total, is_prefetch)
	);

DECLARE_EVENT_CLASS(ion_secure_cma_shrink_pool,

	TP_PROTO(unsigned long drained_size,
		 unsigned long skipped_size),

	TP_ARGS(drained_size, skipped_size),

	TP_STRUCT__entry(
		__field(unsigned long, drained_size)
		__field(unsigned long, skipped_size)
		),

	TP_fast_assign(
		__entry->drained_size = drained_size;
		__entry->skipped_size = skipped_size;
		),

	TP_printk("drained size %lx, skipped size %lx",
		__entry->drained_size,
		__entry->skipped_size)
	);

DEFINE_EVENT(ion_secure_cma_shrink_pool, ion_secure_cma_shrink_pool_start,
	TP_PROTO(unsigned long drained_size,
		 unsigned long skipped_size),

	TP_ARGS(drained_size, skipped_size)
	);

DEFINE_EVENT(ion_secure_cma_shrink_pool, ion_secure_cma_shrink_pool_end,
	TP_PROTO(unsigned long drained_size,
		 unsigned long skipped_size),

	TP_ARGS(drained_size, skipped_size)
	);

TRACE_EVENT(ion_prefetching,

	TP_PROTO(unsigned long len),

	TP_ARGS(len),

	TP_STRUCT__entry(
		__field(unsigned long, len)
		),

	TP_fast_assign(
		__entry->len = len;
		),

	TP_printk("prefetch size %lx",
		__entry->len)
	);



#endif /* _TRACE_KMEM_H */

/* This part must be outside protection */