Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bb193c98 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
  slub: fix slab_pad_check()
  slub: release kobject if sysfs_create_group failed in sysfs_slab_add
  SLUB: fix ARCH_KMALLOC_MINALIGN cases 64 and 256
  SLUB: Fix some coding style issues
  SLUB: Drop write permission to /proc/slabinfo
  slab: remove duplicate kmem_cache_init_late() declarations
  slub: change kmem_cache->align to record the real alignment
  slub: use size and objsize orders to disable debug flags
  slub: add option to disable higher order debugging slabs
parents f65ac45e aceda773
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -41,6 +41,8 @@ Possible debug options are
	P		Poisoning (object and padding)
	U		User tracking (free and alloc)
	T		Trace (please only use on single slabs)
	O		Switch debugging off for caches that would have
			caused higher minimum slab orders
	-		Switch all debugging off (useful if the kernel is
			configured with CONFIG_SLUB_DEBUG_ON)

@@ -59,6 +61,14 @@ to the dentry cache with

	slub_debug=F,dentry

Debugging options may require the minimum possible slab order to increase as
a result of storing the metadata (for example, caches with PAGE_SIZE object
sizes).  This has a higher liklihood of resulting in slab allocation errors
in low memory situations or if there's high fragmentation of memory.  To
switch off debugging for such caches by default, use

	slub_debug=O

In case you forgot to enable debugging on the kernel command line: It is
possible to enable debugging manually when the kernel is up. Look at the
contents of:
+0 −5
Original line number Diff line number Diff line
@@ -34,9 +34,4 @@ static __always_inline void *__kmalloc(size_t size, gfp_t flags)
	return kmalloc(size, flags);
}

static inline void kmem_cache_init_late(void)
{
	/* Nothing to do */
}

#endif /* __LINUX_SLOB_DEF_H */
+2 −6
Original line number Diff line number Diff line
@@ -153,12 +153,10 @@ static __always_inline int kmalloc_index(size_t size)
	if (size <= KMALLOC_MIN_SIZE)
		return KMALLOC_SHIFT_LOW;

#if KMALLOC_MIN_SIZE <= 64
	if (size > 64 && size <= 96)
	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
		return 1;
	if (size > 128 && size <= 192)
	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
		return 2;
#endif
	if (size <=          8) return 3;
	if (size <=         16) return 4;
	if (size <=         32) return 5;
@@ -304,6 +302,4 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
}
#endif

void __init kmem_cache_init_late(void);

#endif /* _LINUX_SLUB_DEF_H */
+5 −0
Original line number Diff line number Diff line
@@ -692,3 +692,8 @@ void __init kmem_cache_init(void)
{
	slob_ready = 1;
}

void __init kmem_cache_init_late(void)
{
	/* Nothing to do */
}
+68 −14
Original line number Diff line number Diff line
@@ -140,6 +140,13 @@
#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
				SLAB_POISON | SLAB_STORE_USER)

/*
 * Debugging flags that require metadata to be stored in the slab.  These get
 * disabled when slub_debug=O is used and a cache's min order increases with
 * metadata.
 */
#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)

/*
 * Set of flags that will prevent slab merging
 */
@@ -325,6 +332,7 @@ static int slub_debug;
#endif

static char *slub_debug_slabs;
static int disable_higher_order_debug;

/*
 * Object debugging
@@ -646,7 +654,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
	print_section("Padding", end - remainder, remainder);

	restore_bytes(s, "slab padding", POISON_INUSE, start, end);
	restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
	return 0;
}

@@ -976,6 +984,15 @@ static int __init setup_slub_debug(char *str)
		 */
		goto check_slabs;

	if (tolower(*str) == 'o') {
		/*
		 * Avoid enabling debugging on caches if its minimum order
		 * would increase as a result.
		 */
		disable_higher_order_debug = 1;
		goto out;
	}

	slub_debug = 0;
	if (*str == '-')
		/*
@@ -1026,7 +1043,7 @@ static unsigned long kmem_cache_flags(unsigned long objsize,
	 * Enable debugging if selected on the kernel commandline.
	 */
	if (slub_debug && (!slub_debug_slabs ||
	    strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0))
		!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
		flags |= slub_debug;

	return flags;
@@ -1109,8 +1126,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
	}

	if (kmemcheck_enabled
		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS)))
	{
		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
		int pages = 1 << oo_order(oo);

		kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
@@ -1560,6 +1576,10 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
		"default order: %d, min order: %d\n", s->name, s->objsize,
		s->size, oo_order(s->oo), oo_order(s->min));

	if (oo_order(s->min) > get_order(s->objsize))
		printk(KERN_WARNING "  %s debugging increased min order, use "
		       "slub_debug=O to disable.\n", s->name);

	for_each_online_node(node) {
		struct kmem_cache_node *n = get_node(s, node);
		unsigned long nr_slabs;
@@ -2400,6 +2420,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
	 * on bootup.
	 */
	align = calculate_alignment(flags, align, s->objsize);
	s->align = align;

	/*
	 * SLUB stores one object immediately after another beginning from
@@ -2452,6 +2473,18 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,

	if (!calculate_sizes(s, -1))
		goto error;
	if (disable_higher_order_debug) {
		/*
		 * Disable debugging flags that store metadata if the min slab
		 * order increased.
		 */
		if (get_order(s->size) > get_order(s->objsize)) {
			s->flags &= ~DEBUG_METADATA_FLAGS;
			s->offset = 0;
			if (!calculate_sizes(s, -1))
				goto error;
		}
	}

	/*
	 * The larger the object size is, the more pages we want on the partial
@@ -2790,6 +2823,11 @@ static s8 size_index[24] = {
	2	/* 192 */
};

static inline int size_index_elem(size_t bytes)
{
	return (bytes - 1) / 8;
}

static struct kmem_cache *get_slab(size_t size, gfp_t flags)
{
	int index;
@@ -2798,7 +2836,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
		if (!size)
			return ZERO_SIZE_PTR;

		index = size_index[(size - 1) / 8];
		index = size_index[size_index_elem(size)];
	} else
		index = fls(size - 1);

@@ -3156,10 +3194,12 @@ void __init kmem_cache_init(void)
	slab_state = PARTIAL;

	/* Caches that are not of the two-to-the-power-of size */
	if (KMALLOC_MIN_SIZE <= 64) {
	if (KMALLOC_MIN_SIZE <= 32) {
		create_kmalloc_cache(&kmalloc_caches[1],
				"kmalloc-96", 96, GFP_NOWAIT);
		caches++;
	}
	if (KMALLOC_MIN_SIZE <= 64) {
		create_kmalloc_cache(&kmalloc_caches[2],
				"kmalloc-192", 192, GFP_NOWAIT);
		caches++;
@@ -3186,17 +3226,28 @@ void __init kmem_cache_init(void)
	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));

	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
		size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
		int elem = size_index_elem(i);
		if (elem >= ARRAY_SIZE(size_index))
			break;
		size_index[elem] = KMALLOC_SHIFT_LOW;
	}

	if (KMALLOC_MIN_SIZE == 128) {
	if (KMALLOC_MIN_SIZE == 64) {
		/*
		 * The 96 byte size cache is not used if the alignment
		 * is 64 byte.
		 */
		for (i = 64 + 8; i <= 96; i += 8)
			size_index[size_index_elem(i)] = 7;
	} else if (KMALLOC_MIN_SIZE == 128) {
		/*
		 * The 192 byte sized cache is not used if the alignment
		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
		 * instead.
		 */
		for (i = 128 + 8; i <= 192; i += 8)
			size_index[(i - 1) / 8] = 8;
			size_index[size_index_elem(i)] = 8;
	}

	slab_state = UP;
@@ -4543,8 +4594,11 @@ static int sysfs_slab_add(struct kmem_cache *s)
	}

	err = sysfs_create_group(&s->kobj, &slab_attr_group);
	if (err)
	if (err) {
		kobject_del(&s->kobj);
		kobject_put(&s->kobj);
		return err;
	}
	kobject_uevent(&s->kobj, KOBJ_ADD);
	if (!unmergeable) {
		/* Setup first alias */
@@ -4726,7 +4780,7 @@ static const struct file_operations proc_slabinfo_operations = {

static int __init slab_proc_init(void)
{
	proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
	proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
	return 0;
}
module_init(slab_proc_init);