Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a45b0616 authored by Pekka Enberg's avatar Pekka Enberg
Browse files

Merge branch 'slab/next' into for-linus

parents 3c0eee3f 8165984a
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -2,7 +2,7 @@
obj- := dummy.o
obj- := dummy.o


# List of programs to build
# List of programs to build
hostprogs-y := slabinfo page-types hugepage-mmap hugepage-shm map_hugetlb
hostprogs-y := page-types hugepage-mmap hugepage-shm map_hugetlb


# Tell kbuild to always build the programs
# Tell kbuild to always build the programs
always := $(hostprogs-y)
always := $(hostprogs-y)
+13 −20
Original line number Original line Diff line number Diff line
@@ -138,11 +138,12 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
void *__kmalloc(size_t size, gfp_t flags);


#ifdef CONFIG_TRACING
#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
extern void *kmem_cache_alloc_trace(size_t size,
				    struct kmem_cache *cachep, gfp_t flags);
extern size_t slab_buffer_size(struct kmem_cache *cachep);
extern size_t slab_buffer_size(struct kmem_cache *cachep);
#else
#else
static __always_inline void *
static __always_inline void *
kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
{
{
	return kmem_cache_alloc(cachep, flags);
	return kmem_cache_alloc(cachep, flags);
}
}
@@ -179,10 +180,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
#endif
#endif
			cachep = malloc_sizes[i].cs_cachep;
			cachep = malloc_sizes[i].cs_cachep;


		ret = kmem_cache_alloc_notrace(cachep, flags);
		ret = kmem_cache_alloc_trace(size, cachep, flags);

		trace_kmalloc(_THIS_IP_, ret,
			      size, slab_buffer_size(cachep), flags);


		return ret;
		return ret;
	}
	}
@@ -194,12 +192,14 @@ extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);


#ifdef CONFIG_TRACING
#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
extern void *kmem_cache_alloc_node_trace(size_t size,
					 struct kmem_cache *cachep,
					 gfp_t flags,
					 gfp_t flags,
					 int nodeid);
					 int nodeid);
#else
#else
static __always_inline void *
static __always_inline void *
kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
kmem_cache_alloc_node_trace(size_t size,
			    struct kmem_cache *cachep,
			    gfp_t flags,
			    gfp_t flags,
			    int nodeid)
			    int nodeid)
{
{
@@ -210,7 +210,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
{
	struct kmem_cache *cachep;
	struct kmem_cache *cachep;
	void *ret;


	if (__builtin_constant_p(size)) {
	if (__builtin_constant_p(size)) {
		int i = 0;
		int i = 0;
@@ -234,13 +233,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
#endif
#endif
			cachep = malloc_sizes[i].cs_cachep;
			cachep = malloc_sizes[i].cs_cachep;


		ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
		return kmem_cache_alloc_node_trace(size, cachep, flags, node);

		trace_kmalloc_node(_THIS_IP_, ret,
				   size, slab_buffer_size(cachep),
				   flags, node);

		return ret;
	}
	}
	return __kmalloc_node(size, flags, node);
	return __kmalloc_node(size, flags, node);
}
}
+26 −29
Original line number Original line Diff line number Diff line
@@ -10,9 +10,8 @@
#include <linux/gfp.h>
#include <linux/gfp.h>
#include <linux/workqueue.h>
#include <linux/workqueue.h>
#include <linux/kobject.h>
#include <linux/kobject.h>
#include <linux/kmemleak.h>


#include <trace/events/kmem.h>
#include <linux/kmemleak.h>


enum stat_item {
enum stat_item {
	ALLOC_FASTPATH,		/* Allocation from cpu slab */
	ALLOC_FASTPATH,		/* Allocation from cpu slab */
@@ -216,31 +215,40 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
void *__kmalloc(size_t size, gfp_t flags);


static __always_inline void *
kmalloc_order(size_t size, gfp_t flags, unsigned int order)
{
	void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
	kmemleak_alloc(ret, size, 1, flags);
	return ret;
}

#ifdef CONFIG_TRACING
#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
extern void *
kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
#else
#else
static __always_inline void *
static __always_inline void *
kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
{
	return kmem_cache_alloc(s, gfpflags);
	return kmem_cache_alloc(s, gfpflags);
}
}

static __always_inline void *
kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
{
	return kmalloc_order(size, flags, order);
}
#endif
#endif


static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
{
{
	unsigned int order = get_order(size);
	unsigned int order = get_order(size);
	void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
	return kmalloc_order_trace(size, flags, order);

	kmemleak_alloc(ret, size, 1, flags);
	trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags);

	return ret;
}
}


static __always_inline void *kmalloc(size_t size, gfp_t flags)
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
{
	void *ret;

	if (__builtin_constant_p(size)) {
	if (__builtin_constant_p(size)) {
		if (size > SLUB_MAX_SIZE)
		if (size > SLUB_MAX_SIZE)
			return kmalloc_large(size, flags);
			return kmalloc_large(size, flags);
@@ -251,11 +259,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
			if (!s)
			if (!s)
				return ZERO_SIZE_PTR;
				return ZERO_SIZE_PTR;


			ret = kmem_cache_alloc_notrace(s, flags);
			return kmem_cache_alloc_trace(s, flags, size);

			trace_kmalloc(_THIS_IP_, ret, size, s->size, flags);

			return ret;
		}
		}
	}
	}
	return __kmalloc(size, flags);
	return __kmalloc(size, flags);
@@ -266,14 +270,14 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);


#ifdef CONFIG_TRACING
#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
					   gfp_t gfpflags,
					   gfp_t gfpflags,
					   int node);
					   int node, size_t size);
#else
#else
static __always_inline void *
static __always_inline void *
kmem_cache_alloc_node_notrace(struct kmem_cache *s,
kmem_cache_alloc_node_trace(struct kmem_cache *s,
			      gfp_t gfpflags,
			      gfp_t gfpflags,
			      int node)
			      int node, size_t size)
{
{
	return kmem_cache_alloc_node(s, gfpflags, node);
	return kmem_cache_alloc_node(s, gfpflags, node);
}
}
@@ -281,8 +285,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *s,


static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
{
	void *ret;

	if (__builtin_constant_p(size) &&
	if (__builtin_constant_p(size) &&
		size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
		size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
			struct kmem_cache *s = kmalloc_slab(size);
			struct kmem_cache *s = kmalloc_slab(size);
@@ -290,12 +292,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
		if (!s)
		if (!s)
			return ZERO_SIZE_PTR;
			return ZERO_SIZE_PTR;


		ret = kmem_cache_alloc_node_notrace(s, flags, node);
		return kmem_cache_alloc_node_trace(s, flags, node, size);

		trace_kmalloc_node(_THIS_IP_, ret,
				   size, s->size, flags, node);

		return ret;
	}
	}
	return __kmalloc_node(size, flags, node);
	return __kmalloc_node(size, flags, node);
}
}
+23 −15
Original line number Original line Diff line number Diff line
@@ -3653,11 +3653,18 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
EXPORT_SYMBOL(kmem_cache_alloc);
EXPORT_SYMBOL(kmem_cache_alloc);


#ifdef CONFIG_TRACING
#ifdef CONFIG_TRACING
void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
void *
kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
{
{
	return __cache_alloc(cachep, flags, __builtin_return_address(0));
	void *ret;

	ret = __cache_alloc(cachep, flags, __builtin_return_address(0));

	trace_kmalloc(_RET_IP_, ret,
		      size, slab_buffer_size(cachep), flags);
	return ret;
}
}
EXPORT_SYMBOL(kmem_cache_alloc_notrace);
EXPORT_SYMBOL(kmem_cache_alloc_trace);
#endif
#endif


/**
/**
@@ -3705,31 +3712,32 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
EXPORT_SYMBOL(kmem_cache_alloc_node);
EXPORT_SYMBOL(kmem_cache_alloc_node);


#ifdef CONFIG_TRACING
#ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
void *kmem_cache_alloc_node_trace(size_t size,
				  struct kmem_cache *cachep,
				  gfp_t flags,
				  gfp_t flags,
				  int nodeid)
				  int nodeid)
{
{
	return __cache_alloc_node(cachep, flags, nodeid,
	void *ret;

	ret = __cache_alloc_node(cachep, flags, nodeid,
				  __builtin_return_address(0));
				  __builtin_return_address(0));
	trace_kmalloc_node(_RET_IP_, ret,
			   size, slab_buffer_size(cachep),
			   flags, nodeid);
	return ret;
}
}
EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif
#endif


static __always_inline void *
static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
{
{
	struct kmem_cache *cachep;
	struct kmem_cache *cachep;
	void *ret;


	cachep = kmem_find_general_cachep(size, flags);
	cachep = kmem_find_general_cachep(size, flags);
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
		return cachep;
	ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
	return kmem_cache_alloc_node_trace(size, cachep, flags, node);

	trace_kmalloc_node((unsigned long) caller, ret,
			   size, cachep->buffer_size, flags, node);

	return ret;
}
}


#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
+23 −7
Original line number Original line Diff line number Diff line
@@ -28,6 +28,8 @@
#include <linux/math64.h>
#include <linux/math64.h>
#include <linux/fault-inject.h>
#include <linux/fault-inject.h>


#include <trace/events/kmem.h>

/*
/*
 * Lock order:
 * Lock order:
 *   1. slab_lock(page)
 *   1. slab_lock(page)
@@ -1774,11 +1776,21 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
EXPORT_SYMBOL(kmem_cache_alloc);
EXPORT_SYMBOL(kmem_cache_alloc);


#ifdef CONFIG_TRACING
#ifdef CONFIG_TRACING
void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
{
	return slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
	void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
	return ret;
}
}
EXPORT_SYMBOL(kmem_cache_alloc_notrace);
EXPORT_SYMBOL(kmem_cache_alloc_trace);

void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
{
	void *ret = kmalloc_order(size, flags, order);
	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
	return ret;
}
EXPORT_SYMBOL(kmalloc_order_trace);
#endif
#endif


#ifdef CONFIG_NUMA
#ifdef CONFIG_NUMA
@@ -1794,13 +1806,17 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
EXPORT_SYMBOL(kmem_cache_alloc_node);
EXPORT_SYMBOL(kmem_cache_alloc_node);


#ifdef CONFIG_TRACING
#ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
				    gfp_t gfpflags,
				    gfp_t gfpflags,
				    int node)
				    int node, size_t size)
{
{
	return slab_alloc(s, gfpflags, node, _RET_IP_);
	void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);

	trace_kmalloc_node(_RET_IP_, ret,
			   size, s->size, gfpflags, node);
	return ret;
}
}
EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif
#endif
#endif
#endif


Loading