Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 08afe22c authored by Pekka Enberg's avatar Pekka Enberg
Browse files

Merge branch 'slab/next' into slab/for-linus



Fix up a trivial merge conflict with commit baaf1dd4 ("mm/slob: use
min_t() to compare ARCH_SLAB_MINALIGN") that did not go through the slab
tree.

Conflicts:
	mm/slob.c

Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parents a304f836 45906855
Loading
Loading
Loading
Loading
+2 −5
Original line number Diff line number Diff line
@@ -128,10 +128,7 @@ struct page {
		};

		struct list_head list;	/* slobs list of pages */
		struct {		/* slab fields */
			struct kmem_cache *slab_cache;
			struct slab *slab_page;
		};
		struct slab *slab_page; /* slab fields */
	};

	/* Remainder is not double word aligned */
@@ -146,7 +143,7 @@ struct page {
#if USE_SPLIT_PTLOCKS
		spinlock_t ptl;
#endif
		struct kmem_cache *slab;	/* SLUB: Pointer to slab */
		struct kmem_cache *slab_cache;	/* SL[AU]B: Pointer to slab */
		struct page *first_page;	/* Compound tail pages */
	};

+8 −1
Original line number Diff line number Diff line
@@ -128,7 +128,6 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
void kmem_cache_free(struct kmem_cache *, void *);
unsigned int kmem_cache_size(struct kmem_cache *);

/*
 * Please use this macro to create slab caches. Simply specify the
@@ -388,6 +387,14 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
	return kmalloc_node(size, flags | __GFP_ZERO, node);
}

/*
 * Determine the size of a slab object
 */
static inline unsigned int kmem_cache_size(struct kmem_cache *s)
{
	return s->object_size;
}

void __init kmem_cache_init_late(void);

#endif	/* _LINUX_SLAB_H */
+5 −1
Original line number Diff line number Diff line
@@ -89,9 +89,13 @@ struct kmem_cache {
	 * (see kmem_cache_init())
	 * We still use [NR_CPUS] and not [1] or [0] because cache_cache
	 * is statically defined, so we reserve the max number of cpus.
	 *
	 * We also need to guarantee that the list is able to accomodate a
	 * pointer for each node since "nodelists" uses the remainder of
	 * available pointers.
	 */
	struct kmem_list3 **nodelists;
	struct array_cache *array[NR_CPUS];
	struct array_cache *array[NR_CPUS + MAX_NUMNODES];
	/*
	 * Do not add fields after array[]
	 */
+44 −127
Original line number Diff line number Diff line
@@ -162,23 +162,6 @@
 */
static bool pfmemalloc_active __read_mostly;

/* Legal flag mask for kmem_cache_create(). */
#if DEBUG
# define CREATE_MASK	(SLAB_RED_ZONE | \
			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
			 SLAB_CACHE_DMA | \
			 SLAB_STORE_USER | \
			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
			 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
#else
# define CREATE_MASK	(SLAB_HWCACHE_ALIGN | \
			 SLAB_CACHE_DMA | \
			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
			 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
#endif

/*
 * kmem_bufctl_t:
 *
@@ -564,15 +547,11 @@ static struct cache_names __initdata cache_names[] = {
#undef CACHE
};

static struct arraycache_init initarray_cache __initdata =
    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
static struct arraycache_init initarray_generic =
    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };

/* internal cache of cache description objs */
static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES];
static struct kmem_cache kmem_cache_boot = {
	.nodelists = kmem_cache_nodelists,
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
@@ -1576,29 +1555,34 @@ static void __init set_up_list3s(struct kmem_cache *cachep, int index)
	}
}

/*
 * The memory after the last cpu cache pointer is used for the
 * the nodelists pointer.
 */
static void setup_nodelists_pointer(struct kmem_cache *cachep)
{
	cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
}

/*
 * Initialisation.  Called after the page allocator have been initialised and
 * before smp_init().
 */
void __init kmem_cache_init(void)
{
	size_t left_over;
	struct cache_sizes *sizes;
	struct cache_names *names;
	int i;
	int order;
	int node;

	kmem_cache = &kmem_cache_boot;
	setup_nodelists_pointer(kmem_cache);

	if (num_possible_nodes() == 1)
		use_alien_caches = 0;

	for (i = 0; i < NUM_INIT_LISTS; i++) {
	for (i = 0; i < NUM_INIT_LISTS; i++)
		kmem_list3_init(&initkmem_list3[i]);
		if (i < MAX_NUMNODES)
			kmem_cache->nodelists[i] = NULL;
	}

	set_up_list3s(kmem_cache, CACHE_CACHE);

	/*
@@ -1629,37 +1613,16 @@ void __init kmem_cache_init(void)
	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
	 */

	node = numa_mem_id();

	/* 1) create the kmem_cache */
	INIT_LIST_HEAD(&slab_caches);
	list_add(&kmem_cache->list, &slab_caches);
	kmem_cache->colour_off = cache_line_size();
	kmem_cache->array[smp_processor_id()] = &initarray_cache.cache;
	kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node];

	/*
	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
	 */
	kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
				  nr_node_ids * sizeof(struct kmem_list3 *);
	kmem_cache->object_size = kmem_cache->size;
	kmem_cache->size = ALIGN(kmem_cache->object_size,
					cache_line_size());
	kmem_cache->reciprocal_buffer_size =
		reciprocal_value(kmem_cache->size);

	for (order = 0; order < MAX_ORDER; order++) {
		cache_estimate(order, kmem_cache->size,
			cache_line_size(), 0, &left_over, &kmem_cache->num);
		if (kmem_cache->num)
			break;
	}
	BUG_ON(!kmem_cache->num);
	kmem_cache->gfporder = order;
	kmem_cache->colour = left_over / kmem_cache->colour_off;
	kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) +
				      sizeof(struct slab), cache_line_size());
	create_boot_cache(kmem_cache, "kmem_cache",
		offsetof(struct kmem_cache, array[nr_cpu_ids]) +
				  nr_node_ids * sizeof(struct kmem_list3 *),
				  SLAB_HWCACHE_ALIGN);
	list_add(&kmem_cache->list, &slab_caches);

	/* 2+3) create the kmalloc caches */
	sizes = malloc_sizes;
@@ -1671,23 +1634,13 @@ void __init kmem_cache_init(void)
	 * bug.
	 */

	sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
	sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name;
	sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size;
	sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size;
	sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
	__kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
	list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
	sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
					sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);

	if (INDEX_AC != INDEX_L3) {
		sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
		sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name;
		sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
		sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
		sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
		__kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
		list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
	}
	if (INDEX_AC != INDEX_L3)
		sizes[INDEX_L3].cs_cachep =
			create_kmalloc_cache(names[INDEX_L3].name,
				sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);

	slab_early_init = 0;

@@ -1699,24 +1652,14 @@ void __init kmem_cache_init(void)
		 * Note for systems short on memory removing the alignment will
		 * allow tighter packing of the smaller caches.
		 */
		if (!sizes->cs_cachep) {
			sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
			sizes->cs_cachep->name = names->name;
			sizes->cs_cachep->size = sizes->cs_size;
			sizes->cs_cachep->object_size = sizes->cs_size;
			sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
			__kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
			list_add(&sizes->cs_cachep->list, &slab_caches);
		}
		if (!sizes->cs_cachep)
			sizes->cs_cachep = create_kmalloc_cache(names->name,
					sizes->cs_size, ARCH_KMALLOC_FLAGS);

#ifdef CONFIG_ZONE_DMA
		sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
		sizes->cs_dmacachep->name = names->name_dma;
		sizes->cs_dmacachep->size = sizes->cs_size;
		sizes->cs_dmacachep->object_size = sizes->cs_size;
		sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN;
		__kmem_cache_create(sizes->cs_dmacachep,
			       ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
		list_add(&sizes->cs_dmacachep->list, &slab_caches);
		sizes->cs_dmacachep = create_kmalloc_cache(
			names->name_dma, sizes->cs_size,
			SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
#endif
		sizes++;
		names++;
@@ -1727,7 +1670,6 @@ void __init kmem_cache_init(void)

		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);

		BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache);
		memcpy(ptr, cpu_cache_get(kmem_cache),
		       sizeof(struct arraycache_init));
		/*
@@ -2282,7 +2224,15 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)

	if (slab_state == DOWN) {
		/*
		 * Note: the first kmem_cache_create must create the cache
		 * Note: Creation of first cache (kmem_cache).
		 * The setup_list3s is taken care
		 * of by the caller of __kmem_cache_create
		 */
		cachep->array[smp_processor_id()] = &initarray_generic.cache;
		slab_state = PARTIAL;
	} else if (slab_state == PARTIAL) {
		/*
		 * Note: the second kmem_cache_create must create the cache
		 * that's used by kmalloc(24), otherwise the creation of
		 * further caches will BUG().
		 */
@@ -2290,7 +2240,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)

		/*
		 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
		 * the first cache, then we need to set up all its list3s,
		 * the second cache, then we need to set up all its list3s,
		 * otherwise the creation of further caches will BUG().
		 */
		set_up_list3s(cachep, SIZE_AC);
@@ -2299,6 +2249,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
		else
			slab_state = PARTIAL_ARRAYCACHE;
	} else {
		/* Remaining boot caches */
		cachep->array[smp_processor_id()] =
			kmalloc(sizeof(struct arraycache_init), gfp);

@@ -2331,11 +2282,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)

/**
 * __kmem_cache_create - Create a cache.
 * @name: A string which is used in /proc/slabinfo to identify this cache.
 * @size: The size of objects to be created in this cache.
 * @align: The required alignment for the objects.
 * @cachep: cache management descriptor
 * @flags: SLAB flags
 * @ctor: A constructor for the objects.
 *
 * Returns a ptr to the cache on success, NULL on failure.
 * Cannot be called within a int, but can be interrupted.
@@ -2378,11 +2326,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
	if (flags & SLAB_DESTROY_BY_RCU)
		BUG_ON(flags & SLAB_POISON);
#endif
	/*
	 * Always checks flags, a caller might be expecting debug support which
	 * isn't available.
	 */
	BUG_ON(flags & ~CREATE_MASK);

	/*
	 * Check that size is in terms of words.  This is needed to avoid
@@ -2394,22 +2337,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
		size &= ~(BYTES_PER_WORD - 1);
	}

	/* calculate the final buffer alignment: */

	/* 1) arch recommendation: can be overridden for debug */
	if (flags & SLAB_HWCACHE_ALIGN) {
		/*
		 * Default alignment: as specified by the arch code.  Except if
		 * an object is really small, then squeeze multiple objects into
		 * one cacheline.
		 */
		ralign = cache_line_size();
		while (size <= ralign / 2)
			ralign /= 2;
	} else {
		ralign = BYTES_PER_WORD;
	}

	/*
	 * Redzoning and user store require word alignment or possibly larger.
	 * Note this will be overridden by architecture or caller mandated
@@ -2426,10 +2353,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
		size &= ~(REDZONE_ALIGN - 1);
	}

	/* 2) arch mandated alignment */
	if (ralign < ARCH_SLAB_MINALIGN) {
		ralign = ARCH_SLAB_MINALIGN;
	}
	/* 3) caller mandated alignment */
	if (ralign < cachep->align) {
		ralign = cachep->align;
@@ -2447,7 +2370,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
	else
		gfp = GFP_NOWAIT;

	cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
	setup_nodelists_pointer(cachep);
#if DEBUG

	/*
@@ -3969,12 +3892,6 @@ void kfree(const void *objp)
}
EXPORT_SYMBOL(kfree);

unsigned int kmem_cache_size(struct kmem_cache *cachep)
{
	return cachep->object_size;
}
EXPORT_SYMBOL(kmem_cache_size);

/*
 * This initializes kmem_list3 or resizes various caches for all nodes.
 */
+33 −0
Original line number Diff line number Diff line
@@ -32,9 +32,17 @@ extern struct list_head slab_caches;
/* The slab cache that manages slab cache information */
extern struct kmem_cache *kmem_cache;

unsigned long calculate_alignment(unsigned long flags,
		unsigned long align, unsigned long size);

/* Functions provided by the slab allocators */
extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);

extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
			unsigned long flags);
extern void create_boot_cache(struct kmem_cache *, const char *name,
			size_t size, unsigned long flags);

#ifdef CONFIG_SLUB
struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
	size_t align, unsigned long flags, void (*ctor)(void *));
@@ -45,6 +53,31 @@ static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t siz
#endif


/* Legal flag mask for kmem_cache_create(), for various configurations */
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
			 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )

#if defined(CONFIG_DEBUG_SLAB)
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
#elif defined(CONFIG_SLUB_DEBUG)
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
			  SLAB_TRACE | SLAB_DEBUG_FREE)
#else
#define SLAB_DEBUG_FLAGS (0)
#endif

#if defined(CONFIG_SLAB)
#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
			  SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
#elif defined(CONFIG_SLUB)
#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
			  SLAB_TEMPORARY | SLAB_NOTRACK)
#else
#define SLAB_CACHE_FLAGS (0)
#endif

#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)

int __kmem_cache_shutdown(struct kmem_cache *);

struct seq_file;
Loading