Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ca2b84cb authored by Eduard - Gabriel Munteanu's avatar Eduard - Gabriel Munteanu Committed by Ingo Molnar
Browse files

kmemtrace: use tracepoints



kmemtrace now uses tracepoints instead of markers. We no longer need to
use format specifiers to pass arguments.

Signed-off-by: default avatarEduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
[ folded: Use the new TP_PROTO and TP_ARGS to fix the build.     ]
[ folded: fix build when CONFIG_KMEMTRACE is disabled.           ]
[ folded: define tracepoints when CONFIG_TRACEPOINTS is enabled. ]
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
LKML-Reference: <ae61c0f37156db8ec8dc0d5778018edde60a92e3.1237813499.git.eduard.munteanu@linux360.ro>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent ac44021f
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -73,7 +73,7 @@ found:

		ret = kmem_cache_alloc_notrace(cachep, flags);

		kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
		trace_kmalloc(_THIS_IP_, ret,
			      size, slab_buffer_size(cachep), flags);

		return ret;
@@ -128,8 +128,8 @@ found:

		ret = kmem_cache_alloc_node_notrace(cachep, flags, node);

		kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_,
					  ret, size, slab_buffer_size(cachep),
		trace_kmalloc_node(_THIS_IP_, ret,
				   size, slab_buffer_size(cachep),
				   flags, node);

		return ret;
+4 −8
Original line number Diff line number Diff line
@@ -233,8 +233,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
	unsigned int order = get_order(size);
	void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);

	kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
			     size, PAGE_SIZE << order, flags);
	trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags);

	return ret;
}
@@ -255,9 +254,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)

			ret = kmem_cache_alloc_notrace(s, flags);

			kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
					     _THIS_IP_, ret,
					     size, s->size, flags);
			trace_kmalloc(_THIS_IP_, ret, size, s->size, flags);

			return ret;
		}
@@ -296,8 +293,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)

		ret = kmem_cache_alloc_node_notrace(s, flags, node);

		kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
					  _THIS_IP_, ret,
		trace_kmalloc_node(_THIS_IP_, ret,
				   size, s->size, flags, node);

		return ret;
+40 −52
Original line number Diff line number Diff line
@@ -9,65 +9,53 @@

#ifdef __KERNEL__

#include <linux/tracepoint.h>
#include <linux/types.h>
#include <linux/marker.h>

enum kmemtrace_type_id {
	KMEMTRACE_TYPE_KMALLOC = 0,	/* kmalloc() or kfree(). */
	KMEMTRACE_TYPE_CACHE,		/* kmem_cache_*(). */
	KMEMTRACE_TYPE_PAGES,		/* __get_free_pages() and friends. */
};

#ifdef CONFIG_KMEMTRACE

extern void kmemtrace_init(void);

extern void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
					     unsigned long call_site,
					     const void *ptr,
					     size_t bytes_req,
					     size_t bytes_alloc,
					     gfp_t gfp_flags,
					     int node);

extern void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
				       unsigned long call_site,
				       const void *ptr);

#else /* CONFIG_KMEMTRACE */

#else
static inline void kmemtrace_init(void)
{
}
#endif

static inline void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
					     unsigned long call_site,
DECLARE_TRACE(kmalloc,
	      TP_PROTO(unsigned long call_site,
		      const void *ptr,
		      size_t bytes_req,
		      size_t bytes_alloc,
		      gfp_t gfp_flags),
	      TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags));
DECLARE_TRACE(kmem_cache_alloc,
	      TP_PROTO(unsigned long call_site,
		      const void *ptr,
		      size_t bytes_req,
		      size_t bytes_alloc,
		      gfp_t gfp_flags),
	      TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags));
DECLARE_TRACE(kmalloc_node,
	      TP_PROTO(unsigned long call_site,
		      const void *ptr,
		      size_t bytes_req,
		      size_t bytes_alloc,
		      gfp_t gfp_flags,
					     int node)
{
}

static inline void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
				       unsigned long call_site,
				       const void *ptr)
{
}

#endif /* CONFIG_KMEMTRACE */

static inline void kmemtrace_mark_alloc(enum kmemtrace_type_id type_id,
					unsigned long call_site,
		      int node),
	      TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node));
DECLARE_TRACE(kmem_cache_alloc_node,
	      TP_PROTO(unsigned long call_site,
		      const void *ptr,
		      size_t bytes_req,
		      size_t bytes_alloc,
					gfp_t gfp_flags)
{
	kmemtrace_mark_alloc_node(type_id, call_site, ptr,
				  bytes_req, bytes_alloc, gfp_flags, -1);
}
		      gfp_t gfp_flags,
		      int node),
	      TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node));
DECLARE_TRACE(kfree,
	      TP_PROTO(unsigned long call_site, const void *ptr),
	      TP_ARGS(call_site, ptr));
DECLARE_TRACE(kmem_cache_free,
	      TP_PROTO(unsigned long call_site, const void *ptr),
	      TP_ARGS(call_site, ptr));

#endif /* __KERNEL__ */

+145 −61
Original line number Diff line number Diff line
@@ -10,6 +10,7 @@
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/tracepoint.h>
#include <trace/kmemtrace.h>

#include "trace.h"
@@ -29,10 +30,150 @@ static struct tracer_flags kmem_tracer_flags = {
	.opts = kmem_opts
};


static bool kmem_tracing_enabled __read_mostly;
static struct trace_array *kmemtrace_array;

/* Trace allocations */
static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id,
				   unsigned long call_site,
				   const void *ptr,
				   size_t bytes_req,
				   size_t bytes_alloc,
				   gfp_t gfp_flags,
				   int node)
{
	struct ring_buffer_event *event;
	struct kmemtrace_alloc_entry *entry;
	struct trace_array *tr = kmemtrace_array;

	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	tracing_generic_entry_update(&entry->ent, 0, 0);

	entry->ent.type = TRACE_KMEM_ALLOC;
	entry->call_site = call_site;
	entry->ptr = ptr;
	entry->bytes_req = bytes_req;
	entry->bytes_alloc = bytes_alloc;
	entry->gfp_flags = gfp_flags;
	entry->node	=	node;

	ring_buffer_unlock_commit(tr->buffer, event);

	trace_wake_up();
}

static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
				  unsigned long call_site,
				  const void *ptr)
{
	struct ring_buffer_event *event;
	struct kmemtrace_free_entry *entry;
	struct trace_array *tr = kmemtrace_array;

	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	tracing_generic_entry_update(&entry->ent, 0, 0);

	entry->ent.type = TRACE_KMEM_FREE;
	entry->type_id	= type_id;
	entry->call_site = call_site;
	entry->ptr = ptr;

	ring_buffer_unlock_commit(tr->buffer, event);

	trace_wake_up();
}

static void kmemtrace_kmalloc(unsigned long call_site,
			      const void *ptr,
			      size_t bytes_req,
			      size_t bytes_alloc,
			      gfp_t gfp_flags)
{
	kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
			bytes_req, bytes_alloc, gfp_flags, -1);
}

static void kmemtrace_kmem_cache_alloc(unsigned long call_site,
				       const void *ptr,
				       size_t bytes_req,
				       size_t bytes_alloc,
				       gfp_t gfp_flags)
{
	kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
			bytes_req, bytes_alloc, gfp_flags, -1);
}

static void kmemtrace_kmalloc_node(unsigned long call_site,
				   const void *ptr,
				   size_t bytes_req,
				   size_t bytes_alloc,
				   gfp_t gfp_flags,
				   int node)
{
	kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
			bytes_req, bytes_alloc, gfp_flags, node);
}

static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site,
					    const void *ptr,
					    size_t bytes_req,
					    size_t bytes_alloc,
					    gfp_t gfp_flags,
					    int node)
{
	kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
			bytes_req, bytes_alloc, gfp_flags, node);
}

static void kmemtrace_kfree(unsigned long call_site, const void *ptr)
{
	kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
}

static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr)
{
	kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
}

static int kmemtrace_start_probes(void)
{
	int err;

	err = register_trace_kmalloc(kmemtrace_kmalloc);
	if (err)
		return err;
	err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
	if (err)
		return err;
	err = register_trace_kmalloc_node(kmemtrace_kmalloc_node);
	if (err)
		return err;
	err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
	if (err)
		return err;
	err = register_trace_kfree(kmemtrace_kfree);
	if (err)
		return err;
	err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free);

	return err;
}

static void kmemtrace_stop_probes(void)
{
	unregister_trace_kmalloc(kmemtrace_kmalloc);
	unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
	unregister_trace_kmalloc_node(kmemtrace_kmalloc_node);
	unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
	unregister_trace_kfree(kmemtrace_kfree);
	unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
}

static int kmem_trace_init(struct trace_array *tr)
{
	int cpu;
@@ -41,14 +182,14 @@ static int kmem_trace_init(struct trace_array *tr)
	for_each_cpu_mask(cpu, cpu_possible_map)
		tracing_reset(tr, cpu);

	kmem_tracing_enabled = true;
	kmemtrace_start_probes();

	return 0;
}

static void kmem_trace_reset(struct trace_array *tr)
{
	kmem_tracing_enabled = false;
	kmemtrace_stop_probes();
}

static void kmemtrace_headers(struct seq_file *s)
@@ -260,63 +401,6 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
	}
}

/* Trace allocations */
void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
			     unsigned long call_site,
			     const void *ptr,
			     size_t bytes_req,
			     size_t bytes_alloc,
			     gfp_t gfp_flags,
			     int node)
{
	struct ring_buffer_event *event;
	struct kmemtrace_alloc_entry *entry;
	struct trace_array *tr = kmemtrace_array;

	if (!kmem_tracing_enabled)
		return;

	event = trace_buffer_lock_reserve(tr, TRACE_KMEM_ALLOC,
					  sizeof(*entry), 0, 0);
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);

	entry->call_site = call_site;
	entry->ptr = ptr;
	entry->bytes_req = bytes_req;
	entry->bytes_alloc = bytes_alloc;
	entry->gfp_flags = gfp_flags;
	entry->node	=	node;

	trace_buffer_unlock_commit(tr, event, 0, 0);
}
EXPORT_SYMBOL(kmemtrace_mark_alloc_node);

void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
		       unsigned long call_site,
		       const void *ptr)
{
	struct ring_buffer_event *event;
	struct kmemtrace_free_entry *entry;
	struct trace_array *tr = kmemtrace_array;

	if (!kmem_tracing_enabled)
		return;

	event = trace_buffer_lock_reserve(tr, TRACE_KMEM_FREE,
					  sizeof(*entry), 0, 0);
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	entry->type_id	= type_id;
	entry->call_site = call_site;
	entry->ptr = ptr;

	trace_buffer_unlock_commit(tr, event, 0, 0);
}
EXPORT_SYMBOL(kmemtrace_mark_free);

static struct tracer kmem_tracer __read_mostly = {
	.name		= "kmemtrace",
	.init		= kmem_trace_init,
+6 −0
Original line number Diff line number Diff line
@@ -182,6 +182,12 @@ struct trace_power {
	struct power_trace	state_data;
};

enum kmemtrace_type_id {
	KMEMTRACE_TYPE_KMALLOC = 0,	/* kmalloc() or kfree(). */
	KMEMTRACE_TYPE_CACHE,		/* kmem_cache_*(). */
	KMEMTRACE_TYPE_PAGES,		/* __get_free_pages() and friends. */
};

struct kmemtrace_alloc_entry {
	struct trace_entry	ent;
	enum kmemtrace_type_id type_id;
Loading