Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d8506176 authored by Liam Mark's avatar Liam Mark
Browse files

staging: android: ion: add ftrace logging for cache maintenance



Trace when ION applies, and when it skips, cache maintenance.

This will be useful during debugging to both catch when cache maintenance
is missed and when there is unnecessary cache maintenance.

Change-Id: I1432454a7ab735a63970b977656e9c4c9b5f5be7
Signed-off-by: default avatarLiam Mark <lmark@codeaurora.org>
parent 9905cf67
Loading
Loading
Loading
Loading
+128 −8
Original line number Diff line number Diff line
@@ -40,6 +40,8 @@
#include <linux/sched/task.h>
#include <linux/bitops.h>
#include <linux/msm_dma_iommu_mapping.h>
#define CREATE_TRACE_POINTS
#include <trace/events/ion.h>
#include <soc/qcom/secure_buffer.h>

#include "ion.h"
@@ -328,6 +330,21 @@ static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
	    !hlos_accessible_buffer(buffer))
		map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;

	if (map_attrs & DMA_ATTR_SKIP_CPU_SYNC)
		trace_ion_dma_map_cmo_skip(attachment->dev,
					   attachment->dmabuf->name,
					   ion_buffer_cached(buffer),
					   hlos_accessible_buffer(buffer),
					   attachment->dma_map_attrs,
					   direction);
	else
		trace_ion_dma_map_cmo_apply(attachment->dev,
					    attachment->dmabuf->name,
					    ion_buffer_cached(buffer),
					    hlos_accessible_buffer(buffer),
					    attachment->dma_map_attrs,
					    direction);

	if (map_attrs & DMA_ATTR_DELAYED_UNMAP) {
		count = msm_dma_map_sg_attrs(attachment->dev, table->sgl,
					     table->nents, direction,
@@ -358,6 +375,21 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
	    !hlos_accessible_buffer(buffer))
		map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;

	if (map_attrs & DMA_ATTR_SKIP_CPU_SYNC)
		trace_ion_dma_unmap_cmo_skip(attachment->dev,
					     attachment->dmabuf->name,
					     ion_buffer_cached(buffer),
					     hlos_accessible_buffer(buffer),
					     attachment->dma_map_attrs,
					     direction);
	else
		trace_ion_dma_unmap_cmo_apply(attachment->dev,
					      attachment->dmabuf->name,
					      ion_buffer_cached(buffer),
					      hlos_accessible_buffer(buffer),
					      attachment->dma_map_attrs,
					      direction);

	if (map_attrs & DMA_ATTR_DELAYED_UNMAP)
		msm_dma_unmap_sg_attrs(attachment->dev, table->sgl,
				       table->nents, direction,
@@ -539,6 +571,10 @@ static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
	int ret = 0;

	if (!hlos_accessible_buffer(buffer)) {
		trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name,
						    ion_buffer_cached(buffer),
						    false, direction,
						    sync_only_mapped);
		ret = -EPERM;
		goto out;
	}
@@ -552,8 +588,12 @@ static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
		mutex_unlock(&buffer->lock);
	}

	if (!(buffer->flags & ION_FLAG_CACHED))
	if (!(buffer->flags & ION_FLAG_CACHED)) {
		trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, false,
						    true, direction,
						    sync_only_mapped);
		goto out;
	}

	mutex_lock(&buffer->lock);

@@ -561,6 +601,10 @@ static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
		struct device *dev = buffer->heap->priv;
		struct sg_table *table = buffer->sg_table;

		trace_ion_begin_cpu_access_cmo_apply(dev, dmabuf->name,
						     true, true, direction,
						     sync_only_mapped);

		if (sync_only_mapped)
			ion_sgl_sync_mapped(dev, table->sgl,
					    table->nents, &buffer->vmas,
@@ -574,8 +618,18 @@ static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
	}

	list_for_each_entry(a, &buffer->attachments, list) {
		if (!a->dma_mapped)
		if (!a->dma_mapped) {
			trace_ion_begin_cpu_access_notmapped(a->dev,
							     dmabuf->name,
							     true, true,
							     direction,
							     sync_only_mapped);
			continue;
		}

		trace_ion_begin_cpu_access_cmo_apply(a->dev, dmabuf->name,
						     true, true, direction,
						     sync_only_mapped);

		if (sync_only_mapped)
			ion_sgl_sync_mapped(a->dev, a->table->sgl,
@@ -600,6 +654,10 @@ static int __ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
	int ret = 0;

	if (!hlos_accessible_buffer(buffer)) {
		trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name,
						  ion_buffer_cached(buffer),
						  false, direction,
						  sync_only_mapped);
		ret = -EPERM;
		goto out;
	}
@@ -610,14 +668,22 @@ static int __ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
		mutex_unlock(&buffer->lock);
	}

	if (!(buffer->flags & ION_FLAG_CACHED))
	if (!(buffer->flags & ION_FLAG_CACHED)) {
		trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, false,
						  true, direction,
						  sync_only_mapped);
		goto out;
	}

	mutex_lock(&buffer->lock);
	if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
		struct device *dev = buffer->heap->priv;
		struct sg_table *table = buffer->sg_table;

		trace_ion_end_cpu_access_cmo_apply(dev, dmabuf->name,
						   true, true, direction,
						   sync_only_mapped);

		if (sync_only_mapped)
			ion_sgl_sync_mapped(dev, table->sgl,
					    table->nents, &buffer->vmas,
@@ -630,8 +696,18 @@ static int __ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
	}

	list_for_each_entry(a, &buffer->attachments, list) {
		if (!a->dma_mapped)
		if (!a->dma_mapped) {
			trace_ion_end_cpu_access_notmapped(a->dev,
							   dmabuf->name,
							   true, true,
							   direction,
							   sync_only_mapped);
			continue;
		}

		trace_ion_end_cpu_access_cmo_apply(a->dev, dmabuf->name,
						   true, true, direction,
						   sync_only_mapped);

		if (sync_only_mapped)
			ion_sgl_sync_mapped(a->dev, a->table->sgl,
@@ -682,6 +758,10 @@ static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
	int ret = 0;

	if (!hlos_accessible_buffer(buffer)) {
		trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name,
						    ion_buffer_cached(buffer),
						    false, dir,
						    false);
		ret = -EPERM;
		goto out;
	}
@@ -695,14 +775,22 @@ static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
		mutex_unlock(&buffer->lock);
	}

	if (!(buffer->flags & ION_FLAG_CACHED))
	if (!(buffer->flags & ION_FLAG_CACHED)) {
		trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, false,
						    true, dir,
						    false);
		goto out;
	}

	mutex_lock(&buffer->lock);
	if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
		struct device *dev = buffer->heap->priv;
		struct sg_table *table = buffer->sg_table;

		trace_ion_begin_cpu_access_cmo_apply(dev, dmabuf->name,
						     true, true, dir,
						     false);

		ion_sgl_sync_range(dev, table->sgl, table->nents,
				   offset, len, dir, true);

@@ -711,8 +799,18 @@ static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
	}

	list_for_each_entry(a, &buffer->attachments, list) {
		if (!a->dma_mapped)
		if (!a->dma_mapped) {
			trace_ion_begin_cpu_access_notmapped(a->dev,
							     dmabuf->name,
							     true, true,
							     dir,
							     false);
			continue;
		}

		trace_ion_begin_cpu_access_cmo_apply(a->dev, dmabuf->name,
						     true, true, dir,
						     false);

		ion_sgl_sync_range(a->dev, a->table->sgl, a->table->nents,
				   offset, len, dir, true);
@@ -733,6 +831,10 @@ static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
	int ret = 0;

	if (!hlos_accessible_buffer(buffer)) {
		trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name,
						  ion_buffer_cached(buffer),
						  false, direction,
						  false);
		ret = -EPERM;
		goto out;
	}
@@ -743,14 +845,22 @@ static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
		mutex_unlock(&buffer->lock);
	}

	if (!(buffer->flags & ION_FLAG_CACHED))
	if (!(buffer->flags & ION_FLAG_CACHED)) {
		trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, false,
						  true, direction,
						  false);
		goto out;
	}

	mutex_lock(&buffer->lock);
	if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
		struct device *dev = buffer->heap->priv;
		struct sg_table *table = buffer->sg_table;

		trace_ion_end_cpu_access_cmo_apply(dev, dmabuf->name,
						   true, true, direction,
						   false);

		ion_sgl_sync_range(dev, table->sgl, table->nents,
				   offset, len, direction, false);

@@ -759,8 +869,18 @@ static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
	}

	list_for_each_entry(a, &buffer->attachments, list) {
		if (!a->dma_mapped)
		if (!a->dma_mapped) {
			trace_ion_end_cpu_access_notmapped(a->dev,
							   dmabuf->name,
							   true, true,
							   direction,
							   false);
			continue;
		}

		trace_ion_end_cpu_access_cmo_apply(a->dev, dmabuf->name,
						   true, true, direction,
						   false);

		ion_sgl_sync_range(a->dev, a->table->sgl, a->table->nents,
				   offset, len, direction, false);
+180 −0
Original line number Diff line number Diff line
/* Copyright (c) 2018 The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#undef TRACE_SYSTEM
#define TRACE_SYSTEM ion

#if !defined(_TRACE_ION_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_ION_H

#include <linux/types.h>
#include <linux/tracepoint.h>

#define DEV_NAME_NONE "None"

DECLARE_EVENT_CLASS(ion_dma_map_cmo_class,

	TP_PROTO(const struct device *dev, const char *name,
		 bool cached, bool hlos_accessible, unsigned long map_attrs,
		 enum dma_data_direction dir),

	TP_ARGS(dev, name, cached, hlos_accessible, map_attrs, dir),

	TP_STRUCT__entry(
		__string(dev_name, dev ? dev_name(dev) : DEV_NAME_NONE)
		__string(name, name)
		__field(bool, cached)
		__field(bool, hlos_accessible)
		__field(unsigned long, map_attrs)
		__field(enum dma_data_direction, dir)
	),

	TP_fast_assign(
		__assign_str(dev_name, dev ? dev_name(dev) : DEV_NAME_NONE);
		__assign_str(name, name);
		__entry->cached = cached;
		__entry->hlos_accessible = hlos_accessible;
		__entry->map_attrs = map_attrs;
		__entry->dir = dir;
	),

	TP_printk("dev=%s name=%s cached=%d access=%d map_attrs=0x%lx dir=%d",
		__get_str(dev_name),
		__get_str(name),
		__entry->cached,
		__entry->hlos_accessible,
		__entry->map_attrs,
		__entry->dir)
);

DEFINE_EVENT(ion_dma_map_cmo_class, ion_dma_map_cmo_apply,

	TP_PROTO(const struct device *dev, const char *name,
		 bool cached, bool hlos_accessible, unsigned long map_attrs,
		 enum dma_data_direction dir),

	TP_ARGS(dev, name, cached, hlos_accessible, map_attrs, dir)
);

DEFINE_EVENT(ion_dma_map_cmo_class, ion_dma_map_cmo_skip,

	TP_PROTO(const struct device *dev, const char *name,
		 bool cached, bool hlos_accessible, unsigned long map_attrs,
		 enum dma_data_direction dir),

	TP_ARGS(dev, name, cached, hlos_accessible, map_attrs, dir)
);

DEFINE_EVENT(ion_dma_map_cmo_class, ion_dma_unmap_cmo_apply,

	TP_PROTO(const struct device *dev, const char *name,
		 bool cached, bool hlos_accessible, unsigned long map_attrs,
		 enum dma_data_direction dir),

	TP_ARGS(dev, name, cached, hlos_accessible, map_attrs, dir)
);

DEFINE_EVENT(ion_dma_map_cmo_class, ion_dma_unmap_cmo_skip,

	TP_PROTO(const struct device *dev, const char *name,
		 bool cached, bool hlos_accessible, unsigned long map_attrs,
		 enum dma_data_direction dir),

	TP_ARGS(dev, name, cached, hlos_accessible, map_attrs, dir)
);

DECLARE_EVENT_CLASS(ion_access_cmo_class,

	TP_PROTO(const struct device *dev, const char *name,
		 bool cached, bool hlos_accessible, enum dma_data_direction dir,
		 bool only_mapped),

	TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped),

	TP_STRUCT__entry(
		__string(dev_name, dev ? dev_name(dev) : DEV_NAME_NONE)
		__string(name, name)
		__field(bool, cached)
		__field(bool, hlos_accessible)
		__field(enum dma_data_direction, dir)
		__field(bool, only_mapped)
	),

	TP_fast_assign(
		__assign_str(dev_name, dev ? dev_name(dev) : DEV_NAME_NONE);
		__assign_str(name, name);
		__entry->cached = cached;
		__entry->hlos_accessible = hlos_accessible;
		__entry->dir = dir;
		__entry->only_mapped = only_mapped;
	),

	TP_printk("dev=%s name=%s cached=%d access=%d dir=%d, only_mapped=%d",
		  __get_str(dev_name),
		  __get_str(name),
		  __entry->cached,
		  __entry->hlos_accessible,
		  __entry->dir,
		  __entry->only_mapped)
);

DEFINE_EVENT(ion_access_cmo_class, ion_begin_cpu_access_cmo_apply,
	TP_PROTO(const struct device *dev, const char *name,
		 bool cached, bool hlos_accessible, enum dma_data_direction dir,
		 bool only_mapped),

	TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped)
);

DEFINE_EVENT(ion_access_cmo_class, ion_begin_cpu_access_cmo_skip,
	TP_PROTO(const struct device *dev, const char *name,
		 bool cached, bool hlos_accessible, enum dma_data_direction dir,
		 bool only_mapped),

	TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped)
);

DEFINE_EVENT(ion_access_cmo_class, ion_begin_cpu_access_notmapped,
	TP_PROTO(const struct device *dev, const char *name,
		 bool cached, bool hlos_accessible, enum dma_data_direction dir,
		 bool only_mapped),

	TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped)
);

DEFINE_EVENT(ion_access_cmo_class, ion_end_cpu_access_cmo_apply,
	TP_PROTO(const struct device *dev, const char *name,
		 bool cached, bool hlos_accessible, enum dma_data_direction dir,
		 bool only_mapped),

	TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped)
);

DEFINE_EVENT(ion_access_cmo_class, ion_end_cpu_access_cmo_skip,
	TP_PROTO(const struct device *dev, const char *name,
		 bool cached, bool hlos_accessible, enum dma_data_direction dir,
		 bool only_mapped),

	TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped)
);

DEFINE_EVENT(ion_access_cmo_class, ion_end_cpu_access_notmapped,
	TP_PROTO(const struct device *dev, const char *name,
		 bool cached, bool hlos_accessible, enum dma_data_direction dir,
		 bool only_mapped),

	TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped)
);
#endif /* _TRACE_ION_H */

#include <trace/define_trace.h>