Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b57bdda5 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
  slub: Allow removal of slab caches during boot
  Revert "slub: Allow removal of slab caches during boot"
  slub numa: Fix rare allocation from unexpected node
  slab: use deferable timers for its periodic housekeeping
  slub: Use kmem_cache flags to detect if slab is in debugging mode.
  slub: Allow removal of slab caches during boot
  slub: Check kasprintf results in kmem_cache_init()
  SLUB: Constants need UL
  slub: Use a constant for a unspecified node.
  SLOB: Free objects to their own list
  slab: fix caller tracking on !CONFIG_DEBUG_SLAB && CONFIG_TRACING
parents cc41f5ce 415cb479
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -128,7 +128,6 @@ enum pageflags {

	/* SLUB */
	PG_slub_frozen = PG_active,
	PG_slub_debug = PG_error,
};

#ifndef __GENERATING_BOUNDS_H
@@ -215,7 +214,6 @@ PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
__PAGEFLAG(SlobFree, slob_free)

__PAGEFLAG(SlubFrozen, slub_frozen)
__PAGEFLAG(SlubDebug, slub_debug)

/*
 * Private page markings that may be used by the filesystem that owns the page
+4 −2
Original line number Diff line number Diff line
@@ -268,7 +268,8 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
 * allocator where we care about the real place the memory allocation
 * request comes from.
 */
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
	(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
#define kmalloc_track_caller(size, flags) \
	__kmalloc_track_caller(size, flags, _RET_IP_)
@@ -286,7 +287,8 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
 * standard allocator where we care about the real place the memory
 * allocation request comes from.
 */
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
	(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
#define kmalloc_node_track_caller(size, flags, node) \
	__kmalloc_node_track_caller(size, flags, node, \
+1 −1
Original line number Diff line number Diff line
@@ -860,7 +860,7 @@ static void __cpuinit start_cpu_timer(int cpu)
	 */
	if (keventd_up() && reap_work->work.func == NULL) {
		init_reap_node(cpu);
		INIT_DELAYED_WORK(reap_work, cache_reap);
		INIT_DELAYED_WORK_DEFERRABLE(reap_work, cache_reap);
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
	}
+8 −1
Original line number Diff line number Diff line
@@ -396,6 +396,7 @@ static void slob_free(void *block, int size)
	slob_t *prev, *next, *b = (slob_t *)block;
	slobidx_t units;
	unsigned long flags;
	struct list_head *slob_list;

	if (unlikely(ZERO_OR_NULL_PTR(block)))
		return;
@@ -424,7 +425,13 @@ static void slob_free(void *block, int size)
		set_slob(b, units,
			(void *)((unsigned long)(b +
					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
		set_slob_page_free(sp, &free_slob_small);
		if (size < SLOB_BREAK1)
			slob_list = &free_slob_small;
		else if (size < SLOB_BREAK2)
			slob_list = &free_slob_medium;
		else
			slob_list = &free_slob_large;
		set_slob_page_free(sp, slob_list);
		goto out;
	}

+43 −43
Original line number Diff line number Diff line
@@ -106,11 +106,17 @@
 * 			the fast path and disables lockless freelists.
 */

#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
		SLAB_TRACE | SLAB_DEBUG_FREE)

static inline int kmem_cache_debug(struct kmem_cache *s)
{
#ifdef CONFIG_SLUB_DEBUG
#define SLABDEBUG 1
	return unlikely(s->flags & SLAB_DEBUG_FLAGS);
#else
#define SLABDEBUG 0
	return 0;
#endif
}

/*
 * Issues still to be resolved:
@@ -161,8 +167,8 @@
#define MAX_OBJS_PER_PAGE	65535 /* since page.objects is u16 */

/* Internal SLUB flags */
#define __OBJECT_POISON		0x80000000 /* Poison object */
#define __SYSFS_ADD_DEFERRED	0x40000000 /* Not yet visible via sysfs */
#define __OBJECT_POISON		0x80000000UL /* Poison object */
#define __SYSFS_ADD_DEFERRED	0x40000000UL /* Not yet visible via sysfs */

static int kmem_size = sizeof(struct kmem_cache);

@@ -1072,7 +1078,7 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node,

	flags |= __GFP_NOTRACK;

	if (node == -1)
	if (node == NUMA_NO_NODE)
		return alloc_pages(flags, order);
	else
		return alloc_pages_exact_node(node, flags, order);
@@ -1156,9 +1162,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
	inc_slabs_node(s, page_to_nid(page), page->objects);
	page->slab = s;
	page->flags |= 1 << PG_slab;
	if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
			SLAB_STORE_USER | SLAB_TRACE))
		__SetPageSlubDebug(page);

	start = page_address(page);

@@ -1185,14 +1188,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
	int order = compound_order(page);
	int pages = 1 << order;

	if (unlikely(SLABDEBUG && PageSlubDebug(page))) {
	if (kmem_cache_debug(s)) {
		void *p;

		slab_pad_check(s, page);
		for_each_object(p, s, page_address(page),
						page->objects)
			check_object(s, page, p, 0);
		__ClearPageSlubDebug(page);
	}

	kmemcheck_free_shadow(page, compound_order(page));
@@ -1386,10 +1388,10 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
{
	struct page *page;
	int searchnode = (node == -1) ? numa_node_id() : node;
	int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;

	page = get_partial_node(get_node(s, searchnode));
	if (page || (flags & __GFP_THISNODE))
	if (page || node != -1)
		return page;

	return get_any_partial(s, flags);
@@ -1414,8 +1416,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
			stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
		} else {
			stat(s, DEACTIVATE_FULL);
			if (SLABDEBUG && PageSlubDebug(page) &&
						(s->flags & SLAB_STORE_USER))
			if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER))
				add_full(n, page);
		}
		slab_unlock(page);
@@ -1514,7 +1515,7 @@ static void flush_all(struct kmem_cache *s)
static inline int node_match(struct kmem_cache_cpu *c, int node)
{
#ifdef CONFIG_NUMA
	if (node != -1 && c->node != node)
	if (node != NUMA_NO_NODE && c->node != node)
		return 0;
#endif
	return 1;
@@ -1623,7 +1624,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
	object = c->page->freelist;
	if (unlikely(!object))
		goto another_slab;
	if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
	if (kmem_cache_debug(s))
		goto debug;

	c->freelist = get_freepointer(s, object);
@@ -1726,7 +1727,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,

void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
	void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
	void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);

	trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);

@@ -1737,7 +1738,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_TRACING
void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
{
	return slab_alloc(s, gfpflags, -1, _RET_IP_);
	return slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
}
EXPORT_SYMBOL(kmem_cache_alloc_notrace);
#endif
@@ -1782,7 +1783,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
	stat(s, FREE_SLOWPATH);
	slab_lock(page);

	if (unlikely(SLABDEBUG && PageSlubDebug(page)))
	if (kmem_cache_debug(s))
		goto debug;

checks_ok:
@@ -2489,7 +2490,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
	s->refcount--;
	if (!s->refcount) {
		list_del(&s->list);
		up_write(&slub_lock);
		if (kmem_cache_close(s)) {
			printk(KERN_ERR "SLUB %s: %s called for cache that "
				"still has objects.\n", s->name, __func__);
@@ -2498,7 +2498,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
		if (s->flags & SLAB_DESTROY_BY_RCU)
			rcu_barrier();
		sysfs_slab_remove(s);
	} else
	}
	up_write(&slub_lock);
}
EXPORT_SYMBOL(kmem_cache_destroy);
@@ -2727,7 +2727,7 @@ void *__kmalloc(size_t size, gfp_t flags)
	if (unlikely(ZERO_OR_NULL_PTR(s)))
		return s;

	ret = slab_alloc(s, flags, -1, _RET_IP_);
	ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_);

	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);

@@ -3117,9 +3117,12 @@ void __init kmem_cache_init(void)
	slab_state = UP;

	/* Provide the correct kmalloc names now that the caches are up */
	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
		kmalloc_caches[i]. name =
			kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
		char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);

		BUG_ON(!s);
		kmalloc_caches[i].name = s;
	}

#ifdef CONFIG_SMP
	register_cpu_notifier(&slab_notifier);
@@ -3222,14 +3225,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
		 */
		s->objsize = max(s->objsize, (int)size);
		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
		up_write(&slub_lock);

		if (sysfs_slab_alias(s, name)) {
			down_write(&slub_lock);
			s->refcount--;
			up_write(&slub_lock);
			goto err;
		}
		up_write(&slub_lock);
		return s;
	}

@@ -3238,14 +3239,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
		if (kmem_cache_open(s, GFP_KERNEL, name,
				size, align, flags, ctor)) {
			list_add(&s->list, &slab_caches);
			up_write(&slub_lock);
			if (sysfs_slab_add(s)) {
				down_write(&slub_lock);
				list_del(&s->list);
				up_write(&slub_lock);
				kfree(s);
				goto err;
			}
			up_write(&slub_lock);
			return s;
		}
		kfree(s);
@@ -3311,7 +3310,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
	if (unlikely(ZERO_OR_NULL_PTR(s)))
		return s;

	ret = slab_alloc(s, gfpflags, -1, caller);
	ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller);

	/* Honor the call site pointer we recieved. */
	trace_kmalloc(caller, ret, size, s->size, gfpflags);
@@ -3394,16 +3393,6 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page,
	} else
		printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
			s->name, page);

	if (s->flags & DEBUG_DEFAULT_FLAGS) {
		if (!PageSlubDebug(page))
			printk(KERN_ERR "SLUB %s: SlubDebug not set "
				"on slab 0x%p\n", s->name, page);
	} else {
		if (PageSlubDebug(page))
			printk(KERN_ERR "SLUB %s: SlubDebug set on "
				"slab 0x%p\n", s->name, page);
	}
}

static int validate_slab_node(struct kmem_cache *s,
@@ -4503,6 +4492,13 @@ static int sysfs_slab_add(struct kmem_cache *s)

static void sysfs_slab_remove(struct kmem_cache *s)
{
	if (slab_state < SYSFS)
		/*
		 * Sysfs has not been setup yet so no need to remove the
		 * cache from sysfs.
		 */
		return;

	kobject_uevent(&s->kobj, KOBJ_REMOVE);
	kobject_del(&s->kobj);
	kobject_put(&s->kobj);
@@ -4548,8 +4544,11 @@ static int __init slab_sysfs_init(void)
	struct kmem_cache *s;
	int err;

	down_write(&slub_lock);

	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
	if (!slab_kset) {
		up_write(&slub_lock);
		printk(KERN_ERR "Cannot register slab subsystem.\n");
		return -ENOSYS;
	}
@@ -4574,6 +4573,7 @@ static int __init slab_sysfs_init(void)
		kfree(al);
	}

	up_write(&slub_lock);
	resiliency_test();
	return 0;
}