Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 81cda662 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds
Browse files

Slab allocators: Cleanup zeroing allocations



It becomes now easy to support the zeroing allocs with generic inline
functions in slab.h.  Provide inline definitions to allow the continued use of
kzalloc, kmem_cache_zalloc etc but remove other definitions of zeroing
functions from the slab allocators and util.c.

Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ce15fea8
Loading
Loading
Loading
Loading
+46 −31
Original line number Original line Diff line number Diff line
@@ -55,7 +55,6 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
			void (*)(void *, struct kmem_cache *, unsigned long));
			void (*)(void *, struct kmem_cache *, unsigned long));
void kmem_cache_destroy(struct kmem_cache *);
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
void kmem_cache_free(struct kmem_cache *, void *);
void kmem_cache_free(struct kmem_cache *, void *);
unsigned int kmem_cache_size(struct kmem_cache *);
unsigned int kmem_cache_size(struct kmem_cache *);
const char *kmem_cache_name(struct kmem_cache *);
const char *kmem_cache_name(struct kmem_cache *);
@@ -91,11 +90,37 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
/*
/*
 * Common kmalloc functions provided by all allocators
 * Common kmalloc functions provided by all allocators
 */
 */
void *__kzalloc(size_t, gfp_t);
void * __must_check krealloc(const void *, size_t, gfp_t);
void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
void kfree(const void *);
size_t ksize(const void *);
size_t ksize(const void *);


/*
 * Allocator specific definitions. These are mainly used to establish optimized
 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by
 * selecting the appropriate general cache at compile time.
 *
 * Allocators must define at least:
 *
 *	kmem_cache_alloc()
 *	__kmalloc()
 *	kmalloc()
 *
 * Those wishing to support NUMA must also define:
 *
 *	kmem_cache_alloc_node()
 *	kmalloc_node()
 *
 * See each allocator definition file for additional comments and
 * implementation notes.
 */
#ifdef CONFIG_SLUB
#include <linux/slub_def.h>
#elif defined(CONFIG_SLOB)
#include <linux/slob_def.h>
#else
#include <linux/slab_def.h>
#endif

/**
/**
 * kcalloc - allocate memory for an array. The memory is set to zero.
 * kcalloc - allocate memory for an array. The memory is set to zero.
 * @n: number of elements.
 * @n: number of elements.
@@ -151,37 +176,9 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
{
{
	if (n != 0 && size > ULONG_MAX / n)
	if (n != 0 && size > ULONG_MAX / n)
		return NULL;
		return NULL;
	return __kzalloc(n * size, flags);
	return __kmalloc(n * size, flags | __GFP_ZERO);
}
}


/*
 * Allocator specific definitions. These are mainly used to establish optimized
 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by
 * selecting the appropriate general cache at compile time.
 *
 * Allocators must define at least:
 *
 *	kmem_cache_alloc()
 *	__kmalloc()
 *	kmalloc()
 *	kzalloc()
 *
 * Those wishing to support NUMA must also define:
 *
 *	kmem_cache_alloc_node()
 *	kmalloc_node()
 *
 * See each allocator definition file for additional comments and
 * implementation notes.
 */
#ifdef CONFIG_SLUB
#include <linux/slub_def.h>
#elif defined(CONFIG_SLOB)
#include <linux/slob_def.h>
#else
#include <linux/slab_def.h>
#endif

#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
/**
/**
 * kmalloc_node - allocate memory from a specific node
 * kmalloc_node - allocate memory from a specific node
@@ -255,5 +252,23 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);


#endif /* DEBUG_SLAB */
#endif /* DEBUG_SLAB */


/*
 * Shortcuts
 */
static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
{
	return kmem_cache_alloc(k, flags | __GFP_ZERO);
}

/**
 * kzalloc - allocate memory. The memory is set to zero.
 * @size: how many bytes of memory are required.
 * @flags: the type of memory to allocate (see kmalloc).
 */
static inline void *kzalloc(size_t size, gfp_t flags)
{
	return kmalloc(size, flags | __GFP_ZERO);
}

#endif	/* __KERNEL__ */
#endif	/* __KERNEL__ */
#endif	/* _LINUX_SLAB_H */
#endif	/* _LINUX_SLAB_H */
+0 −30
Original line number Original line Diff line number Diff line
@@ -58,36 +58,6 @@ found:
	return __kmalloc(size, flags);
	return __kmalloc(size, flags);
}
}


static inline void *kzalloc(size_t size, gfp_t flags)
{
	if (__builtin_constant_p(size)) {
		int i = 0;

		if (!size)
			return ZERO_SIZE_PTR;

#define CACHE(x) \
		if (size <= x) \
			goto found; \
		else \
			i++;
#include "kmalloc_sizes.h"
#undef CACHE
		{
			extern void __you_cannot_kzalloc_that_much(void);
			__you_cannot_kzalloc_that_much();
		}
found:
#ifdef CONFIG_ZONE_DMA
		if (flags & GFP_DMA)
			return kmem_cache_zalloc(malloc_sizes[i].cs_dmacachep,
						flags);
#endif
		return kmem_cache_zalloc(malloc_sizes[i].cs_cachep, flags);
	}
	return __kzalloc(size, flags);
}

#ifdef CONFIG_NUMA
#ifdef CONFIG_NUMA
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+0 −13
Original line number Original line Diff line number Diff line
@@ -179,19 +179,6 @@ static inline void *kmalloc(size_t size, gfp_t flags)
		return __kmalloc(size, flags);
		return __kmalloc(size, flags);
}
}


static inline void *kzalloc(size_t size, gfp_t flags)
{
	if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
		struct kmem_cache *s = kmalloc_slab(size);

		if (!s)
			return ZERO_SIZE_PTR;

		return kmem_cache_zalloc(s, flags);
	} else
		return __kzalloc(size, flags);
}

#ifdef CONFIG_NUMA
#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+0 −17
Original line number Original line Diff line number Diff line
@@ -3589,23 +3589,6 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
}
}
EXPORT_SYMBOL(kmem_cache_alloc);
EXPORT_SYMBOL(kmem_cache_alloc);


/**
 * kmem_cache_zalloc - Allocate an object. The memory is set to zero.
 * @cache: The cache to allocate from.
 * @flags: See kmalloc().
 *
 * Allocate an object from this cache and set the allocated memory to zero.
 * The flags are only relevant if the cache has no available objects.
 */
void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
{
	void *ret = __cache_alloc(cache, flags, __builtin_return_address(0));
	if (ret)
		memset(ret, 0, obj_size(cache));
	return ret;
}
EXPORT_SYMBOL(kmem_cache_zalloc);

/**
/**
 * kmem_ptr_validate - check if an untrusted pointer might
 * kmem_ptr_validate - check if an untrusted pointer might
 *	be a slab entry.
 *	be a slab entry.
+0 −10
Original line number Original line Diff line number Diff line
@@ -543,16 +543,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
}
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
EXPORT_SYMBOL(kmem_cache_alloc_node);


void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
{
	void *ret = kmem_cache_alloc(c, flags);
	if (ret)
		memset(ret, 0, c->size);

	return ret;
}
EXPORT_SYMBOL(kmem_cache_zalloc);

static void __kmem_cache_free(void *b, int size)
static void __kmem_cache_free(void *b, int size)
{
{
	if (size < PAGE_SIZE)
	if (size < PAGE_SIZE)
Loading