Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5b882be4 authored by Eduard - Gabriel Munteanu's avatar Eduard - Gabriel Munteanu Committed by Pekka Enberg
Browse files

kmemtrace: SLUB hooks.



This adds hooks for the SLUB allocator, to allow tracing with kmemtrace.

Signed-off-by: default avatarEduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
parent 3eae2cb2
Loading
Loading
Loading
Loading
+50 −3
Original line number Diff line number Diff line
@@ -10,6 +10,7 @@
#include <linux/gfp.h>
#include <linux/workqueue.h>
#include <linux/kobject.h>
#include <linux/kmemtrace.h>

enum stat_item {
	ALLOC_FASTPATH,		/* Allocation from cpu slab */
@@ -204,13 +205,31 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);

#ifdef CONFIG_KMEMTRACE
extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
#else
static __always_inline void *
kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
{
	return kmem_cache_alloc(s, gfpflags);
}
#endif

static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
{
	return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size));
	unsigned int order = get_order(size);
	void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);

	kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
			     size, PAGE_SIZE << order, flags);

	return ret;
}

static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
	void *ret;

	if (__builtin_constant_p(size)) {
		if (size > PAGE_SIZE)
			return kmalloc_large(size, flags);
@@ -221,7 +240,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
			if (!s)
				return ZERO_SIZE_PTR;

			return kmem_cache_alloc(s, flags);
			ret = kmem_cache_alloc_notrace(s, flags);

			kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
					     _THIS_IP_, ret,
					     size, s->size, flags);

			return ret;
		}
	}
	return __kmalloc(size, flags);
@@ -231,8 +256,24 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);

#ifdef CONFIG_KMEMTRACE
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
					   gfp_t gfpflags,
					   int node);
#else
static __always_inline void *
kmem_cache_alloc_node_notrace(struct kmem_cache *s,
			      gfp_t gfpflags,
			      int node)
{
	return kmem_cache_alloc_node(s, gfpflags, node);
}
#endif

static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
	void *ret;

	if (__builtin_constant_p(size) &&
		size <= PAGE_SIZE && !(flags & SLUB_DMA)) {
			struct kmem_cache *s = kmalloc_slab(size);
@@ -240,7 +281,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
		if (!s)
			return ZERO_SIZE_PTR;

		return kmem_cache_alloc_node(s, flags, node);
		ret = kmem_cache_alloc_node_notrace(s, flags, node);

		kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
					  _THIS_IP_, ret,
					  size, s->size, flags, node);

		return ret;
	}
	return __kmalloc_node(size, flags, node);
}
+59 −6
Original line number Diff line number Diff line
@@ -24,6 +24,7 @@
#include <linux/kallsyms.h>
#include <linux/memory.h>
#include <linux/math64.h>
#include <linux/kmemtrace.h>

/*
 * Lock order:
@@ -1613,18 +1614,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,

void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
	return slab_alloc(s, gfpflags, -1, _RET_IP_);
	void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);

	kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
			     s->objsize, s->size, gfpflags);

	return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc);

#ifdef CONFIG_KMEMTRACE
void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
{
	return slab_alloc(s, gfpflags, -1, _RET_IP_);
}
EXPORT_SYMBOL(kmem_cache_alloc_notrace);
#endif

#ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
	return slab_alloc(s, gfpflags, node, _RET_IP_);
	void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);

	kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
				  s->objsize, s->size, gfpflags, node);

	return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif

#ifdef CONFIG_KMEMTRACE
void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
				    gfp_t gfpflags,
				    int node)
{
	return slab_alloc(s, gfpflags, node, _RET_IP_);
}
EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
#endif

/*
 * Slow patch handling. This may still be called frequently since objects
 * have a longer lifetime than the cpu slabs in most processing loads.
@@ -1732,6 +1761,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
	page = virt_to_head_page(x);

	slab_free(s, page, x, _RET_IP_);

	kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x);
}
EXPORT_SYMBOL(kmem_cache_free);

@@ -2650,6 +2681,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
void *__kmalloc(size_t size, gfp_t flags)
{
	struct kmem_cache *s;
	void *ret;

	if (unlikely(size > PAGE_SIZE))
		return kmalloc_large(size, flags);
@@ -2659,7 +2691,12 @@ void *__kmalloc(size_t size, gfp_t flags)
	if (unlikely(ZERO_OR_NULL_PTR(s)))
		return s;

	return slab_alloc(s, flags, -1, _RET_IP_);
	ret = slab_alloc(s, flags, -1, _RET_IP_);

	kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
			     size, s->size, flags);

	return ret;
}
EXPORT_SYMBOL(__kmalloc);

@@ -2678,16 +2715,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
	struct kmem_cache *s;
	void *ret;

	if (unlikely(size > PAGE_SIZE))
		return kmalloc_large_node(size, flags, node);
	if (unlikely(size > PAGE_SIZE)) {
		ret = kmalloc_large_node(size, flags, node);

		kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
					  _RET_IP_, ret,
					  size, PAGE_SIZE << get_order(size),
					  flags, node);

		return ret;
	}

	s = get_slab(size, flags);

	if (unlikely(ZERO_OR_NULL_PTR(s)))
		return s;

	return slab_alloc(s, flags, node, _RET_IP_);
	ret = slab_alloc(s, flags, node, _RET_IP_);

	kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
				  size, s->size, flags, node);

	return ret;
}
EXPORT_SYMBOL(__kmalloc_node);
#endif
@@ -2745,6 +2796,8 @@ void kfree(const void *x)
		return;
	}
	slab_free(page->slab, page, object, _RET_IP_);

	kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x);
}
EXPORT_SYMBOL(kfree);