Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4867faab authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
  slub: Deal with hyperthetical case of PAGE_SIZE > 2M
  slub: Remove node check in slab_free
  slub: avoid label inside conditional
  slub: Make CONFIG_DEBUG_PAGE_ALLOC work with new fastpath
  slub: Avoid warning for !CONFIG_SLUB_DEBUG
  slub: Remove CONFIG_CMPXCHG_LOCAL ifdeffery
  slub: Move debug handlign in __slab_free
  slub: Move node determination out of hotpath
  slub: Eliminate repeated use of c->page through a new page variable
  slub: get_map() function to establish map of free objects in a slab
  slub: Use NUMA_NO_NODE in get_partial
  slub: Fix a typo in config name
parents 6fad2b5b bfb91fb6
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -37,9 +37,7 @@ enum stat_item {

struct kmem_cache_cpu {
	void **freelist;	/* Pointer to next available object */
#ifdef CONFIG_CMPXCHG_LOCAL
	unsigned long tid;	/* Globally unique transaction id */
#endif
	struct page *page;	/* The slab from which we are allocating */
	int node;		/* The node of the page (or -1 for debug) */
#ifdef CONFIG_SLUB_STATS
@@ -179,7 +177,8 @@ static __always_inline int kmalloc_index(size_t size)
	if (size <=   4 * 1024) return 12;
/*
 * The following is only needed to support architectures with a larger page
 * size than 4k.
 * size than 4k. We need to support 2 * PAGE_SIZE here. So for a 64k page
 * size we would have to go up to 128k.
 */
	if (size <=   8 * 1024) return 13;
	if (size <=  16 * 1024) return 14;
@@ -190,7 +189,8 @@ static __always_inline int kmalloc_index(size_t size)
	if (size <= 512 * 1024) return 19;
	if (size <= 1024 * 1024) return 20;
	if (size <=  2 * 1024 * 1024) return 21;
	return -1;
	BUG();
	return -1; /* Will never be reached */

/*
 * What we really wanted to do and cannot do because of compiler issues is:
+65 −100
Original line number Diff line number Diff line
@@ -261,6 +261,18 @@ static inline void *get_freepointer(struct kmem_cache *s, void *object)
	return *(void **)(object + s->offset);
}

static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
{
	void *p;

#ifdef CONFIG_DEBUG_PAGEALLOC
	probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
#else
	p = get_freepointer(s, object);
#endif
	return p;
}

static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
{
	*(void **)(object + s->offset) = fp;
@@ -271,10 +283,6 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
	for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
			__p += (__s)->size)

/* Scan freelist */
#define for_each_free_object(__p, __s, __free) \
	for (__p = (__free); __p; __p = get_freepointer((__s), __p))

/* Determine object index from a given position */
static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
{
@@ -331,6 +339,21 @@ static inline int oo_objects(struct kmem_cache_order_objects x)
}

#ifdef CONFIG_SLUB_DEBUG
/*
 * Determine a map of object in use on a page.
 *
 * Slab lock or node listlock must be held to guarantee that the page does
 * not vanish from under us.
 */
static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
{
	void *p;
	void *addr = page_address(page);

	for (p = page->freelist; p; p = get_freepointer(s, p))
		set_bit(slab_index(p, s, addr), map);
}

/*
 * Debug settings:
 */
@@ -1487,7 +1510,7 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
	int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;

	page = get_partial_node(get_node(s, searchnode));
	if (page || node != -1)
	if (page || node != NUMA_NO_NODE)
		return page;

	return get_any_partial(s, flags);
@@ -1540,7 +1563,6 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
	}
}

#ifdef CONFIG_CMPXCHG_LOCAL
#ifdef CONFIG_PREEMPT
/*
 * Calculate the next globally unique transaction for disambiguiation
@@ -1600,17 +1622,12 @@ static inline void note_cmpxchg_failure(const char *n,
	stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
}

#endif

void init_kmem_cache_cpus(struct kmem_cache *s)
{
#ifdef CONFIG_CMPXCHG_LOCAL
	int cpu;

	for_each_possible_cpu(cpu)
		per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
#endif

}
/*
 * Remove the cpu slab
@@ -1643,9 +1660,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
		page->inuse--;
	}
	c->page = NULL;
#ifdef CONFIG_CMPXCHG_LOCAL
	c->tid = next_tid(c->tid);
#endif
	unfreeze_slab(s, page, tail);
}

@@ -1779,8 +1794,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
			  unsigned long addr, struct kmem_cache_cpu *c)
{
	void **object;
	struct page *new;
#ifdef CONFIG_CMPXCHG_LOCAL
	struct page *page;
	unsigned long flags;

	local_irq_save(flags);
@@ -1791,38 +1805,36 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
	 * pointer.
	 */
	c = this_cpu_ptr(s->cpu_slab);
#endif
#endif

	/* We handle __GFP_ZERO in the caller */
	gfpflags &= ~__GFP_ZERO;

	if (!c->page)
	page = c->page;
	if (!page)
		goto new_slab;

	slab_lock(c->page);
	slab_lock(page);
	if (unlikely(!node_match(c, node)))
		goto another_slab;

	stat(s, ALLOC_REFILL);

load_freelist:
	object = c->page->freelist;
	object = page->freelist;
	if (unlikely(!object))
		goto another_slab;
	if (kmem_cache_debug(s))
		goto debug;

	c->freelist = get_freepointer(s, object);
	c->page->inuse = c->page->objects;
	c->page->freelist = NULL;
	c->node = page_to_nid(c->page);
	page->inuse = page->objects;
	page->freelist = NULL;

unlock_out:
	slab_unlock(c->page);
#ifdef CONFIG_CMPXCHG_LOCAL
	slab_unlock(page);
	c->tid = next_tid(c->tid);
	local_irq_restore(flags);
#endif
	stat(s, ALLOC_SLOWPATH);
	return object;

@@ -1830,10 +1842,11 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
	deactivate_slab(s, c);

new_slab:
	new = get_partial(s, gfpflags, node);
	if (new) {
		c->page = new;
	page = get_partial(s, gfpflags, node);
	if (page) {
		stat(s, ALLOC_FROM_PARTIAL);
		c->node = page_to_nid(page);
		c->page = page;
		goto load_freelist;
	}

@@ -1841,33 +1854,35 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
	if (gfpflags & __GFP_WAIT)
		local_irq_enable();

	new = new_slab(s, gfpflags, node);
	page = new_slab(s, gfpflags, node);

	if (gfpflags & __GFP_WAIT)
		local_irq_disable();

	if (new) {
	if (page) {
		c = __this_cpu_ptr(s->cpu_slab);
		stat(s, ALLOC_SLAB);
		if (c->page)
			flush_slab(s, c);
		slab_lock(new);
		__SetPageSlubFrozen(new);
		c->page = new;

		slab_lock(page);
		__SetPageSlubFrozen(page);
		c->node = page_to_nid(page);
		c->page = page;
		goto load_freelist;
	}
	if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
		slab_out_of_memory(s, gfpflags, node);
#ifdef CONFIG_CMPXCHG_LOCAL
	local_irq_restore(flags);
#endif
	return NULL;
debug:
	if (!alloc_debug_processing(s, c->page, object, addr))
	if (!alloc_debug_processing(s, page, object, addr))
		goto another_slab;

	c->page->inuse++;
	c->page->freelist = get_freepointer(s, object);
	page->inuse++;
	page->freelist = get_freepointer(s, object);
	deactivate_slab(s, c);
	c->page = NULL;
	c->node = NUMA_NO_NODE;
	goto unlock_out;
}
@@ -1887,20 +1902,12 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
{
	void **object;
	struct kmem_cache_cpu *c;
#ifdef CONFIG_CMPXCHG_LOCAL
	unsigned long tid;
#else
	unsigned long flags;
#endif

	if (slab_pre_alloc_hook(s, gfpflags))
		return NULL;

#ifndef CONFIG_CMPXCHG_LOCAL
	local_irq_save(flags);
#else
redo:
#endif

	/*
	 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
@@ -1910,7 +1917,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
	 */
	c = __this_cpu_ptr(s->cpu_slab);

#ifdef CONFIG_CMPXCHG_LOCAL
	/*
	 * The transaction ids are globally unique per cpu and per operation on
	 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
@@ -1919,7 +1925,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
	 */
	tid = c->tid;
	barrier();
#endif

	object = c->freelist;
	if (unlikely(!object || !node_match(c, node)))
@@ -1927,7 +1932,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
		object = __slab_alloc(s, gfpflags, node, addr, c);

	else {
#ifdef CONFIG_CMPXCHG_LOCAL
		/*
		 * The cmpxchg will only match if there was no additional
		 * operation and if we are on the right processor.
@@ -1943,21 +1947,14 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
		if (unlikely(!irqsafe_cpu_cmpxchg_double(
				s->cpu_slab->freelist, s->cpu_slab->tid,
				object, tid,
				get_freepointer(s, object), next_tid(tid)))) {
				get_freepointer_safe(s, object), next_tid(tid)))) {

			note_cmpxchg_failure("slab_alloc", s, tid);
			goto redo;
		}
#else
		c->freelist = get_freepointer(s, object);
#endif
		stat(s, ALLOC_FASTPATH);
	}

#ifndef CONFIG_CMPXCHG_LOCAL
	local_irq_restore(flags);
#endif

	if (unlikely(gfpflags & __GFP_ZERO) && object)
		memset(object, 0, s->objsize);

@@ -2034,18 +2031,15 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
{
	void *prior;
	void **object = (void *)x;
#ifdef CONFIG_CMPXCHG_LOCAL
	unsigned long flags;

	local_irq_save(flags);
#endif
	slab_lock(page);
	stat(s, FREE_SLOWPATH);

	if (kmem_cache_debug(s))
		goto debug;
	if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr))
		goto out_unlock;

checks_ok:
	prior = page->freelist;
	set_freepointer(s, object, prior);
	page->freelist = object;
@@ -2070,9 +2064,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,

out_unlock:
	slab_unlock(page);
#ifdef CONFIG_CMPXCHG_LOCAL
	local_irq_restore(flags);
#endif
	return;

slab_empty:
@@ -2084,17 +2076,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
		stat(s, FREE_REMOVE_PARTIAL);
	}
	slab_unlock(page);
#ifdef CONFIG_CMPXCHG_LOCAL
	local_irq_restore(flags);
#endif
	stat(s, FREE_SLAB);
	discard_slab(s, page);
	return;

debug:
	if (!free_debug_processing(s, page, x, addr))
		goto out_unlock;
	goto checks_ok;
}

/*
@@ -2113,20 +2097,11 @@ static __always_inline void slab_free(struct kmem_cache *s,
{
	void **object = (void *)x;
	struct kmem_cache_cpu *c;
#ifdef CONFIG_CMPXCHG_LOCAL
	unsigned long tid;
#else
	unsigned long flags;
#endif

	slab_free_hook(s, x);

#ifndef CONFIG_CMPXCHG_LOCAL
	local_irq_save(flags);

#else
redo:
#endif

	/*
	 * Determine the currently cpus per cpu slab.
@@ -2136,15 +2111,12 @@ static __always_inline void slab_free(struct kmem_cache *s,
	 */
	c = __this_cpu_ptr(s->cpu_slab);

#ifdef CONFIG_CMPXCHG_LOCAL
	tid = c->tid;
	barrier();
#endif

	if (likely(page == c->page && c->node != NUMA_NO_NODE)) {
	if (likely(page == c->page)) {
		set_freepointer(s, object, c->freelist);

#ifdef CONFIG_CMPXCHG_LOCAL
		if (unlikely(!irqsafe_cpu_cmpxchg_double(
				s->cpu_slab->freelist, s->cpu_slab->tid,
				c->freelist, tid,
@@ -2153,16 +2125,10 @@ static __always_inline void slab_free(struct kmem_cache *s,
			note_cmpxchg_failure("slab_free", s, tid);
			goto redo;
		}
#else
		c->freelist = object;
#endif
		stat(s, FREE_FASTPATH);
	} else
		__slab_free(s, page, x, addr);

#ifndef CONFIG_CMPXCHG_LOCAL
	local_irq_restore(flags);
#endif
}

void kmem_cache_free(struct kmem_cache *s, void *x)
@@ -2673,9 +2639,8 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
		return;
	slab_err(s, page, "%s", text);
	slab_lock(page);
	for_each_free_object(p, s, page->freelist)
		set_bit(slab_index(p, s, addr), map);

	get_map(s, page, map);
	for_each_object(p, s, addr, page->objects) {

		if (!test_bit(slab_index(p, s, addr), map)) {
@@ -3203,7 +3168,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
			list_for_each_entry(p, &n->partial, lru)
				p->slab = s;

#ifdef CONFIG_SLAB_DEBUG
#ifdef CONFIG_SLUB_DEBUG
			list_for_each_entry(p, &n->full, lru)
				p->slab = s;
#endif
@@ -3610,8 +3575,9 @@ static int validate_slab(struct kmem_cache *s, struct page *page,
	/* Now we know that a valid freelist exists */
	bitmap_zero(map, page->objects);

	for_each_free_object(p, s, page->freelist) {
		set_bit(slab_index(p, s, addr), map);
	get_map(s, page, map);
	for_each_object(p, s, addr, page->objects) {
		if (test_bit(slab_index(p, s, addr), map))
			if (!check_object(s, page, p, SLUB_RED_INACTIVE))
				return 0;
	}
@@ -3821,8 +3787,7 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s,
	void *p;

	bitmap_zero(map, page->objects);
	for_each_free_object(p, s, page->freelist)
		set_bit(slab_index(p, s, addr), map);
	get_map(s, page, map);

	for_each_object(p, s, addr, page->objects)
		if (!test_bit(slab_index(p, s, addr), map))