Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 057685cf authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'for-ingo' of...

Merge branch 'for-ingo' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6 into tracing/kmemtrace

Conflicts:
	mm/slub.c
parents 64b36ca7 fe1200b6
Loading
Loading
Loading
Loading
+16 −3
Original line number Diff line number Diff line
@@ -121,11 +121,24 @@ struct kmem_cache {

#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)

/*
 * Maximum kmalloc object size handled by SLUB. Larger object allocations
 * are passed through to the page allocator. The page allocator "fastpath"
 * is relatively slow so we need this value sufficiently high so that
 * performance critical objects are allocated through the SLUB fastpath.
 *
 * This should be dropped to PAGE_SIZE / 2 once the page allocator
 * "fastpath" becomes competitive with the slab allocator fastpaths.
 */
#define SLUB_MAX_SIZE (PAGE_SIZE)

#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 1)

/*
 * We keep the general caches in an array of slab caches that are used for
 * 2^x bytes of allocations.
 */
extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1];
extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];

/*
 * Sorry that the following has to be that ugly but some versions of GCC
@@ -231,7 +244,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
	void *ret;

	if (__builtin_constant_p(size)) {
		if (size > PAGE_SIZE)
		if (size > SLUB_MAX_SIZE)
			return kmalloc_large(size, flags);

		if (!(flags & SLUB_DMA)) {
@@ -275,7 +288,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
	void *ret;

	if (__builtin_constant_p(size) &&
		size <= PAGE_SIZE && !(flags & SLUB_DMA)) {
		size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
			struct kmem_cache *s = kmalloc_slab(size);

		if (!s)
+8 −8
Original line number Diff line number Diff line
@@ -2506,7 +2506,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
 *		Kmalloc subsystem
 *******************************************************************/

struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches);

static int __init setup_slub_min_order(char *str)
@@ -2568,7 +2568,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
}

#ifdef CONFIG_ZONE_DMA
static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];

static void sysfs_add_func(struct work_struct *w)
{
@@ -2690,7 +2690,7 @@ void *__kmalloc(size_t size, gfp_t flags)
	struct kmem_cache *s;
	void *ret;

	if (unlikely(size > PAGE_SIZE))
	if (unlikely(size > SLUB_MAX_SIZE))
		return kmalloc_large(size, flags);

	s = get_slab(size, flags);
@@ -2724,7 +2724,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
	struct kmem_cache *s;
	void *ret;

	if (unlikely(size > PAGE_SIZE)) {
	if (unlikely(size > SLUB_MAX_SIZE)) {
		ret = kmalloc_large_node(size, flags, node);

		kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
@@ -3039,7 +3039,7 @@ void __init kmem_cache_init(void)
		caches++;
	}

	for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
		create_kmalloc_cache(&kmalloc_caches[i],
			"kmalloc", 1 << i, GFP_KERNEL);
		caches++;
@@ -3076,7 +3076,7 @@ void __init kmem_cache_init(void)
	slab_state = UP;

	/* Provide the correct kmalloc names now that the caches are up */
	for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
		kmalloc_caches[i]. name =
			kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);

@@ -3277,7 +3277,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
	struct kmem_cache *s;
	void *ret;

	if (unlikely(size > PAGE_SIZE))
	if (unlikely(size > SLUB_MAX_SIZE))
		return kmalloc_large(size, gfpflags);

	s = get_slab(size, gfpflags);
@@ -3300,7 +3300,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
	struct kmem_cache *s;
	void *ret;

	if (unlikely(size > PAGE_SIZE))
	if (unlikely(size > SLUB_MAX_SIZE))
		return kmalloc_large_node(size, gfpflags, node);

	s = get_slab(size, gfpflags);