Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3eae2cb2 authored by Eduard - Gabriel Munteanu's avatar Eduard - Gabriel Munteanu Committed by Pekka Enberg
Browse files

kmemtrace: SLOB hooks.



This adds hooks for the SLOB allocator, to allow tracing with kmemtrace.

We also convert some inline functions to __always_inline to make sure
_RET_IP_, which expands to __builtin_return_address(0), always works
as expected.

Acked-by: default avatarMatt Mackall <mpm@selenic.com>
Signed-off-by: default avatarEduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
parent 36555751
Loading
Loading
Loading
Loading
+5 −4
Original line number Diff line number Diff line
@@ -3,14 +3,15 @@

void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);

static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
					      gfp_t flags)
{
	return kmem_cache_alloc_node(cachep, flags, -1);
}

void *__kmalloc_node(size_t size, gfp_t flags, int node);

static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
	return __kmalloc_node(size, flags, node);
}
@@ -23,12 +24,12 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 * kmalloc is the normal method of allocating memory
 * in the kernel.
 */
static inline void *kmalloc(size_t size, gfp_t flags)
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
	return __kmalloc_node(size, flags, -1);
}

static inline void *__kmalloc(size_t size, gfp_t flags)
static __always_inline void *__kmalloc(size_t size, gfp_t flags)
{
	return kmalloc(size, flags);
}
+31 −6
Original line number Diff line number Diff line
@@ -65,6 +65,7 @@
#include <linux/module.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <linux/kmemtrace.h>
#include <asm/atomic.h>

/*
@@ -463,27 +464,38 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
{
	unsigned int *m;
	int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
	void *ret;

	if (size < PAGE_SIZE - align) {
		if (!size)
			return ZERO_SIZE_PTR;

		m = slob_alloc(size + align, gfp, align, node);

		if (!m)
			return NULL;
		*m = size;
		return (void *)m + align;
		ret = (void *)m + align;

		kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
					  _RET_IP_, ret,
					  size, size + align, gfp, node);
	} else {
		void *ret;
		unsigned int order = get_order(size);

		ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node);
		ret = slob_new_page(gfp | __GFP_COMP, order, node);
		if (ret) {
			struct page *page;
			page = virt_to_page(ret);
			page->private = size;
		}
		return ret;

		kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
					  _RET_IP_, ret,
					  size, PAGE_SIZE << order, gfp, node);
	}

	return ret;
}
EXPORT_SYMBOL(__kmalloc_node);

@@ -501,6 +513,8 @@ void kfree(const void *block)
		slob_free(m, *m + align);
	} else
		put_page(&sp->page);

	kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block);
}
EXPORT_SYMBOL(kfree);

@@ -569,10 +583,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
	void *b;

	if (c->size < PAGE_SIZE)
	if (c->size < PAGE_SIZE) {
		b = slob_alloc(c->size, flags, c->align, node);
	else
		kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
					  _RET_IP_, b, c->size,
					  SLOB_UNITS(c->size) * SLOB_UNIT,
					  flags, node);
	} else {
		b = slob_new_page(flags, get_order(c->size), node);
		kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
					  _RET_IP_, b, c->size,
					  PAGE_SIZE << get_order(c->size),
					  flags, node);
	}

	if (c->ctor)
		c->ctor(b);
@@ -608,6 +631,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
	} else {
		__kmem_cache_free(b, c->size);
	}

	kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b);
}
EXPORT_SYMBOL(kmem_cache_free);