Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit df95e795 authored by Dennis Zhou's avatar Dennis Zhou Committed by Tejun Heo
Browse files

percpu: add tracepoint support for percpu memory



Add support for tracepoints to the following events: chunk allocation,
chunk free, area allocation, area free, and area allocation failure.
This should let us replay percpu memory requests and evaluate
corresponding decisions.

Signed-off-by: default avatarDennis Zhou <dennisz@fb.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 30a5b536
Loading
Loading
Loading
Loading
+125 −0
Original line number Original line Diff line number Diff line
#undef TRACE_SYSTEM
#define TRACE_SYSTEM percpu

#if !defined(_TRACE_PERCPU_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_PERCPU_H

#include <linux/tracepoint.h>

TRACE_EVENT(percpu_alloc_percpu,

	TP_PROTO(bool reserved, bool is_atomic, size_t size,
		 size_t align, void *base_addr, int off, void __percpu *ptr),

	TP_ARGS(reserved, is_atomic, size, align, base_addr, off, ptr),

	TP_STRUCT__entry(
		__field(	bool,			reserved	)
		__field(	bool,			is_atomic	)
		__field(	size_t,			size		)
		__field(	size_t,			align		)
		__field(	void *,			base_addr	)
		__field(	int,			off		)
		__field(	void __percpu *,	ptr		)
	),

	TP_fast_assign(
		__entry->reserved	= reserved;
		__entry->is_atomic	= is_atomic;
		__entry->size		= size;
		__entry->align		= align;
		__entry->base_addr	= base_addr;
		__entry->off		= off;
		__entry->ptr		= ptr;
	),

	TP_printk("reserved=%d is_atomic=%d size=%zu align=%zu base_addr=%p off=%d ptr=%p",
		  __entry->reserved, __entry->is_atomic,
		  __entry->size, __entry->align,
		  __entry->base_addr, __entry->off, __entry->ptr)
);

TRACE_EVENT(percpu_free_percpu,

	TP_PROTO(void *base_addr, int off, void __percpu *ptr),

	TP_ARGS(base_addr, off, ptr),

	TP_STRUCT__entry(
		__field(	void *,			base_addr	)
		__field(	int,			off		)
		__field(	void __percpu *,	ptr		)
	),

	TP_fast_assign(
		__entry->base_addr	= base_addr;
		__entry->off		= off;
		__entry->ptr		= ptr;
	),

	TP_printk("base_addr=%p off=%d ptr=%p",
		__entry->base_addr, __entry->off, __entry->ptr)
);

TRACE_EVENT(percpu_alloc_percpu_fail,

	TP_PROTO(bool reserved, bool is_atomic, size_t size, size_t align),

	TP_ARGS(reserved, is_atomic, size, align),

	TP_STRUCT__entry(
		__field(	bool,	reserved	)
		__field(	bool,	is_atomic	)
		__field(	size_t,	size		)
		__field(	size_t, align		)
	),

	TP_fast_assign(
		__entry->reserved	= reserved;
		__entry->is_atomic	= is_atomic;
		__entry->size		= size;
		__entry->align		= align;
	),

	TP_printk("reserved=%d is_atomic=%d size=%zu align=%zu",
		  __entry->reserved, __entry->is_atomic,
		  __entry->size, __entry->align)
);

TRACE_EVENT(percpu_create_chunk,

	TP_PROTO(void *base_addr),

	TP_ARGS(base_addr),

	TP_STRUCT__entry(
		__field(	void *, base_addr	)
	),

	TP_fast_assign(
		__entry->base_addr	= base_addr;
	),

	TP_printk("base_addr=%p", __entry->base_addr)
);

TRACE_EVENT(percpu_destroy_chunk,

	TP_PROTO(void *base_addr),

	TP_ARGS(base_addr),

	TP_STRUCT__entry(
		__field(	void *,	base_addr	)
	),

	TP_fast_assign(
		__entry->base_addr	= base_addr;
	),

	TP_printk("base_addr=%p", __entry->base_addr)
);

#endif /* _TRACE_PERCPU_H */

#include <trace/define_trace.h>
+2 −0
Original line number Original line Diff line number Diff line
@@ -73,6 +73,7 @@ static struct pcpu_chunk *pcpu_create_chunk(void)
	spin_unlock_irq(&pcpu_lock);
	spin_unlock_irq(&pcpu_lock);


	pcpu_stats_chunk_alloc();
	pcpu_stats_chunk_alloc();
	trace_percpu_create_chunk(chunk->base_addr);


	return chunk;
	return chunk;
}
}
@@ -82,6 +83,7 @@ static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
	const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
	const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;


	pcpu_stats_chunk_dealloc();
	pcpu_stats_chunk_dealloc();
	trace_percpu_destroy_chunk(chunk->base_addr);


	if (chunk && chunk->data)
	if (chunk && chunk->data)
		__free_pages(chunk->data, order_base_2(nr_pages));
		__free_pages(chunk->data, order_base_2(nr_pages));
+2 −0
Original line number Original line Diff line number Diff line
@@ -345,6 +345,7 @@ static struct pcpu_chunk *pcpu_create_chunk(void)
	chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0];
	chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0];


	pcpu_stats_chunk_alloc();
	pcpu_stats_chunk_alloc();
	trace_percpu_create_chunk(chunk->base_addr);


	return chunk;
	return chunk;
}
}
@@ -352,6 +353,7 @@ static struct pcpu_chunk *pcpu_create_chunk(void)
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
{
{
	pcpu_stats_chunk_dealloc();
	pcpu_stats_chunk_dealloc();
	trace_percpu_destroy_chunk(chunk->base_addr);


	if (chunk && chunk->data)
	if (chunk && chunk->data)
		pcpu_free_vm_areas(chunk->data, pcpu_nr_groups);
		pcpu_free_vm_areas(chunk->data, pcpu_nr_groups);
+12 −0
Original line number Original line Diff line number Diff line
@@ -76,6 +76,9 @@
#include <asm/tlbflush.h>
#include <asm/tlbflush.h>
#include <asm/io.h>
#include <asm/io.h>


#define CREATE_TRACE_POINTS
#include <trace/events/percpu.h>

#include "percpu-internal.h"
#include "percpu-internal.h"


#define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
#define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
@@ -1015,11 +1018,17 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,


	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
	kmemleak_alloc_percpu(ptr, size, gfp);
	kmemleak_alloc_percpu(ptr, size, gfp);

	trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
			chunk->base_addr, off, ptr);

	return ptr;
	return ptr;


fail_unlock:
fail_unlock:
	spin_unlock_irqrestore(&pcpu_lock, flags);
	spin_unlock_irqrestore(&pcpu_lock, flags);
fail:
fail:
	trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);

	if (!is_atomic && warn_limit) {
	if (!is_atomic && warn_limit) {
		pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
		pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
			size, align, is_atomic, err);
			size, align, is_atomic, err);
@@ -1269,6 +1278,8 @@ void free_percpu(void __percpu *ptr)
			}
			}
	}
	}


	trace_percpu_free_percpu(chunk->base_addr, off, ptr);

	spin_unlock_irqrestore(&pcpu_lock, flags);
	spin_unlock_irqrestore(&pcpu_lock, flags);
}
}
EXPORT_SYMBOL_GPL(free_percpu);
EXPORT_SYMBOL_GPL(free_percpu);
@@ -1719,6 +1730,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
	pcpu_chunk_relocate(pcpu_first_chunk, -1);
	pcpu_chunk_relocate(pcpu_first_chunk, -1);


	pcpu_stats_chunk_alloc();
	pcpu_stats_chunk_alloc();
	trace_percpu_create_chunk(base_addr);


	/* we're done */
	/* we're done */
	pcpu_base_addr = base_addr;
	pcpu_base_addr = base_addr;