Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b28a02de authored by Pekka Enberg's avatar Pekka Enberg Committed by Linus Torvalds
Browse files

[PATCH] slab: fix code formatting



The slab allocator code is inconsistent in coding style and messy.  For this
patch, I ran Lindent for mm/slab.c and fixed up goofs by hand.

Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 4d268eba
Loading
Loading
Loading
Loading
+500 −464
Original line number Original line Diff line number Diff line
@@ -130,7 +130,6 @@
#define	FORCED_DEBUG	0
#define	FORCED_DEBUG	0
#endif
#endif



/* Shouldn't this be in a header file somewhere? */
/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)
#define	BYTES_PER_WORD		sizeof(void *)


@@ -523,7 +522,8 @@ static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
{
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
	if (cachep->flags & SLAB_STORE_USER)
		return (unsigned long*) (objp+cachep->objsize-2*BYTES_PER_WORD);
		return (unsigned long *)(objp + cachep->objsize -
					 2 * BYTES_PER_WORD);
	return (unsigned long *)(objp + cachep->objsize - BYTES_PER_WORD);
	return (unsigned long *)(objp + cachep->objsize - BYTES_PER_WORD);
}
}


@@ -812,7 +812,8 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
	kfree(ac_ptr);
	kfree(ac_ptr);
}
}


static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache *ac, int node)
static inline void __drain_alien_cache(kmem_cache_t *cachep,
				       struct array_cache *ac, int node)
{
{
	struct kmem_list3 *rl3 = cachep->nodelists[node];
	struct kmem_list3 *rl3 = cachep->nodelists[node];


@@ -900,7 +901,8 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
			BUG_ON(!l3);
			BUG_ON(!l3);
			if (!l3->shared) {
			if (!l3->shared) {
				if (!(nc = alloc_arraycache(node,
				if (!(nc = alloc_arraycache(node,
					cachep->shared*cachep->batchcount,
							    cachep->shared *
							    cachep->batchcount,
							    0xbaadf00d)))
							    0xbaadf00d)))
					goto bad;
					goto bad;


@@ -985,8 +987,7 @@ static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
/*
/*
 * swap the static kmem_list3 with kmalloced memory
 * swap the static kmem_list3 with kmalloced memory
 */
 */
static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list,
static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list, int nodeid)
		int nodeid)
{
{
	struct kmem_list3 *ptr;
	struct kmem_list3 *ptr;


@@ -1074,14 +1075,18 @@ void __init kmem_cache_init(void)
	 */
	 */


	sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
	sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
				sizes[INDEX_AC].cs_size, ARCH_KMALLOC_MINALIGN,
						      sizes[INDEX_AC].cs_size,
				(ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL);
						      ARCH_KMALLOC_MINALIGN,
						      (ARCH_KMALLOC_FLAGS |
						       SLAB_PANIC), NULL, NULL);


	if (INDEX_AC != INDEX_L3)
	if (INDEX_AC != INDEX_L3)
		sizes[INDEX_L3].cs_cachep =
		sizes[INDEX_L3].cs_cachep =
		    kmem_cache_create(names[INDEX_L3].name,
		    kmem_cache_create(names[INDEX_L3].name,
				sizes[INDEX_L3].cs_size, ARCH_KMALLOC_MINALIGN,
				      sizes[INDEX_L3].cs_size,
				(ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL);
				      ARCH_KMALLOC_MINALIGN,
				      (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL,
				      NULL);


	while (sizes->cs_size != ULONG_MAX) {
	while (sizes->cs_size != ULONG_MAX) {
		/*
		/*
@@ -1093,8 +1098,11 @@ void __init kmem_cache_init(void)
		 */
		 */
		if (!sizes->cs_cachep)
		if (!sizes->cs_cachep)
			sizes->cs_cachep = kmem_cache_create(names->name,
			sizes->cs_cachep = kmem_cache_create(names->name,
				sizes->cs_size, ARCH_KMALLOC_MINALIGN,
							     sizes->cs_size,
				(ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL);
							     ARCH_KMALLOC_MINALIGN,
							     (ARCH_KMALLOC_FLAGS
							      | SLAB_PANIC),
							     NULL, NULL);


		/* Inc off-slab bufctl limit until the ceiling is hit. */
		/* Inc off-slab bufctl limit until the ceiling is hit. */
		if (!(OFF_SLAB(sizes->cs_cachep))) {
		if (!(OFF_SLAB(sizes->cs_cachep))) {
@@ -1103,9 +1111,12 @@ void __init kmem_cache_init(void)
		}
		}


		sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
		sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
			sizes->cs_size, ARCH_KMALLOC_MINALIGN,
							sizes->cs_size,
			(ARCH_KMALLOC_FLAGS | SLAB_CACHE_DMA | SLAB_PANIC),
							ARCH_KMALLOC_MINALIGN,
			NULL, NULL);
							(ARCH_KMALLOC_FLAGS |
							 SLAB_CACHE_DMA |
							 SLAB_PANIC), NULL,
							NULL);


		sizes++;
		sizes++;
		names++;
		names++;
@@ -1358,7 +1369,8 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
			/* Mismatch ! */
			/* Mismatch ! */
			/* Print header */
			/* Print header */
			if (lines == 0) {
			if (lines == 0) {
				printk(KERN_ERR "Slab corruption: start=%p, len=%d\n",
				printk(KERN_ERR
				       "Slab corruption: start=%p, len=%d\n",
				       realobj, size);
				       realobj, size);
				print_objinfo(cachep, objp, 0);
				print_objinfo(cachep, objp, 0);
			}
			}
@@ -1416,8 +1428,11 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)


		if (cachep->flags & SLAB_POISON) {
		if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
#ifdef CONFIG_DEBUG_PAGEALLOC
			if ((cachep->objsize%PAGE_SIZE)==0 && OFF_SLAB(cachep))
			if ((cachep->objsize % PAGE_SIZE) == 0
				kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE,1);
			    && OFF_SLAB(cachep))
				kernel_map_pages(virt_to_page(objp),
						 cachep->objsize / PAGE_SIZE,
						 1);
			else
			else
				check_poison_obj(cachep, objp);
				check_poison_obj(cachep, objp);
#else
#else
@@ -1568,8 +1583,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
	if ((!name) ||
	if ((!name) ||
	    in_interrupt() ||
	    in_interrupt() ||
	    (size < BYTES_PER_WORD) ||
	    (size < BYTES_PER_WORD) ||
		(size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) ||
	    (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
		(dtor && !ctor)) {
		printk(KERN_ERR "%s: Early error in slab %s\n",
		printk(KERN_ERR "%s: Early error in slab %s\n",
		       __FUNCTION__, name);
		       __FUNCTION__, name);
		BUG();
		BUG();
@@ -1612,7 +1626,6 @@ kmem_cache_create (const char *name, size_t size, size_t align,
		       "requested - %s\n", __FUNCTION__, name);
		       "requested - %s\n", __FUNCTION__, name);
		flags &= ~SLAB_DEBUG_INITIAL;
		flags &= ~SLAB_DEBUG_INITIAL;
	}
	}

#if FORCED_DEBUG
#if FORCED_DEBUG
	/*
	/*
	 * Enable redzoning and last user accounting, except for caches with
	 * Enable redzoning and last user accounting, except for caches with
@@ -1620,7 +1633,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
	 * above the next power of two: caches with object sizes just above a
	 * above the next power of two: caches with object sizes just above a
	 * power of two have a significant amount of internal fragmentation.
	 * power of two have a significant amount of internal fragmentation.
	 */
	 */
	if ((size < 4096 || fls(size-1) == fls(size-1+3*BYTES_PER_WORD)))
	if ((size < 4096
	     || fls(size - 1) == fls(size - 1 + 3 * BYTES_PER_WORD)))
		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
	if (!(flags & SLAB_DESTROY_BY_RCU))
	if (!(flags & SLAB_DESTROY_BY_RCU))
		flags |= SLAB_POISON;
		flags |= SLAB_POISON;
@@ -1703,7 +1717,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
		size += BYTES_PER_WORD;
		size += BYTES_PER_WORD;
	}
	}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
	if (size >= malloc_sizes[INDEX_L3+1].cs_size && cachep->reallen > cache_line_size() && size < PAGE_SIZE) {
	if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
	    && cachep->reallen > cache_line_size() && size < PAGE_SIZE) {
		cachep->dbghead += PAGE_SIZE - size;
		cachep->dbghead += PAGE_SIZE - size;
		size = PAGE_SIZE;
		size = PAGE_SIZE;
	}
	}
@@ -1752,7 +1767,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,


	if (flags & CFLGS_OFF_SLAB) {
	if (flags & CFLGS_OFF_SLAB) {
		/* really off slab. No need for manual alignment */
		/* really off slab. No need for manual alignment */
		slab_size = cachep->num*sizeof(kmem_bufctl_t)+sizeof(struct slab);
		slab_size =
		    cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
	}
	}


	cachep->colour_off = cache_line_size();
	cachep->colour_off = cache_line_size();
@@ -1800,8 +1816,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
				g_cpucache_up = PARTIAL_AC;
				g_cpucache_up = PARTIAL_AC;
		} else {
		} else {
			cachep->array[smp_processor_id()] =
			cachep->array[smp_processor_id()] =
				kmalloc(sizeof(struct arraycache_init),
			    kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
						GFP_KERNEL);


			if (g_cpucache_up == PARTIAL_AC) {
			if (g_cpucache_up == PARTIAL_AC) {
				set_up_list3s(cachep, SIZE_L3);
				set_up_list3s(cachep, SIZE_L3);
@@ -1811,10 +1826,12 @@ kmem_cache_create (const char *name, size_t size, size_t align,
				for_each_online_node(node) {
				for_each_online_node(node) {


					cachep->nodelists[node] =
					cachep->nodelists[node] =
						kmalloc_node(sizeof(struct kmem_list3),
					    kmalloc_node(sizeof
							 (struct kmem_list3),
							 GFP_KERNEL, node);
							 GFP_KERNEL, node);
					BUG_ON(!cachep->nodelists[node]);
					BUG_ON(!cachep->nodelists[node]);
					kmem_list3_init(cachep->nodelists[node]);
					kmem_list3_init(cachep->
							nodelists[node]);
				}
				}
			}
			}
		}
		}
@@ -1895,8 +1912,8 @@ static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
	preempt_enable();
	preempt_enable();
}
}


static void drain_array_locked(kmem_cache_t* cachep,
static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
				struct array_cache *ac, int force, int node);
				int force, int node);


static void do_drain(void *arg)
static void do_drain(void *arg)
{
{
@@ -1958,8 +1975,7 @@ static int __node_shrink(kmem_cache_t *cachep, int node)
		slab_destroy(cachep, slabp);
		slab_destroy(cachep, slabp);
		spin_lock_irq(&l3->list_lock);
		spin_lock_irq(&l3->list_lock);
	}
	}
	ret = !list_empty(&l3->slabs_full) ||
	ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial);
		!list_empty(&l3->slabs_partial);
	return ret;
	return ret;
}
}


@@ -2116,7 +2132,8 @@ static void cache_init_objs(kmem_cache_t *cachep,
		 * Otherwise, deadlock. They must also be threaded.
		 * Otherwise, deadlock. They must also be threaded.
		 */
		 */
		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
			cachep->ctor(objp+obj_dbghead(cachep), cachep, ctor_flags);
			cachep->ctor(objp + obj_dbghead(cachep), cachep,
				     ctor_flags);


		if (cachep->flags & SLAB_RED_ZONE) {
		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
@@ -2126,8 +2143,10 @@ static void cache_init_objs(kmem_cache_t *cachep,
				slab_error(cachep, "constructor overwrote the"
				slab_error(cachep, "constructor overwrote the"
					   " start of an object");
					   " start of an object");
		}
		}
		if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
		if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)
	       		kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0);
		    && cachep->flags & SLAB_POISON)
			kernel_map_pages(virt_to_page(objp),
					 cachep->objsize / PAGE_SIZE, 0);
#else
#else
		if (cachep->ctor)
		if (cachep->ctor)
			cachep->ctor(objp, cachep, ctor_flags);
			cachep->ctor(objp, cachep, ctor_flags);
@@ -2273,7 +2292,8 @@ static void kfree_debugcheck(const void *objp)
	}
	}
	page = virt_to_page(objp);
	page = virt_to_page(objp);
	if (!PageSlab(page)) {
	if (!PageSlab(page)) {
		printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", (unsigned long)objp);
		printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n",
		       (unsigned long)objp);
		BUG();
		BUG();
	}
	}
}
}
@@ -2290,20 +2310,26 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
	page = virt_to_page(objp);
	page = virt_to_page(objp);


	if (page_get_cache(page) != cachep) {
	if (page_get_cache(page) != cachep) {
		printk(KERN_ERR "mismatch in kmem_cache_free: expected cache %p, got %p\n",
		printk(KERN_ERR
		       "mismatch in kmem_cache_free: expected cache %p, got %p\n",
		       page_get_cache(page), cachep);
		       page_get_cache(page), cachep);
		printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
		printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
		printk(KERN_ERR "%p is %s.\n", page_get_cache(page), page_get_cache(page)->name);
		printk(KERN_ERR "%p is %s.\n", page_get_cache(page),
		       page_get_cache(page)->name);
		WARN_ON(1);
		WARN_ON(1);
	}
	}
	slabp = page_get_slab(page);
	slabp = page_get_slab(page);


	if (cachep->flags & SLAB_RED_ZONE) {
	if (cachep->flags & SLAB_RED_ZONE) {
		if (*dbg_redzone1(cachep, objp) != RED_ACTIVE || *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
		if (*dbg_redzone1(cachep, objp) != RED_ACTIVE
			slab_error(cachep, "double free, or memory outside"
		    || *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
			slab_error(cachep,
				   "double free, or memory outside"
				   " object was overwritten");
				   " object was overwritten");
			printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
			printk(KERN_ERR
					objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp));
			       "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
			       objp, *dbg_redzone1(cachep, objp),
			       *dbg_redzone2(cachep, objp));
		}
		}
		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
@@ -2334,7 +2360,8 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
#ifdef CONFIG_DEBUG_PAGEALLOC
#ifdef CONFIG_DEBUG_PAGEALLOC
		if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
		if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
			store_stackinfo(cachep, objp, (unsigned long)caller);
			store_stackinfo(cachep, objp, (unsigned long)caller);
	       		kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0);
			kernel_map_pages(virt_to_page(objp),
					 cachep->objsize / PAGE_SIZE, 0);
		} else {
		} else {
			poison_obj(cachep, objp, POISON_FREE);
			poison_obj(cachep, objp, POISON_FREE);
		}
		}
@@ -2358,9 +2385,12 @@ static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
	}
	}
	if (entries != cachep->num - slabp->inuse) {
	if (entries != cachep->num - slabp->inuse) {
	      bad:
	      bad:
		printk(KERN_ERR "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n",
		printk(KERN_ERR
		       "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n",
		       cachep->name, cachep->num, slabp, slabp->inuse);
		       cachep->name, cachep->num, slabp, slabp->inuse);
		for (i=0;i<sizeof(slabp)+cachep->num*sizeof(kmem_bufctl_t);i++) {
		for (i = 0;
		     i < sizeof(slabp) + cachep->num * sizeof(kmem_bufctl_t);
		     i++) {
			if ((i % 16) == 0)
			if ((i % 16) == 0)
				printk("\n%03x:", i);
				printk("\n%03x:", i);
			printk(" %02x", ((unsigned char *)slabp)[i]);
			printk(" %02x", ((unsigned char *)slabp)[i]);
@@ -2485,16 +2515,16 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags)
}
}


#if DEBUG
#if DEBUG
static void *
static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags,
cache_alloc_debugcheck_after(kmem_cache_t *cachep,
					void *objp, void *caller)
			gfp_t flags, void *objp, void *caller)
{
{
	if (!objp)
	if (!objp)
		return objp;
		return objp;
	if (cachep->flags & SLAB_POISON) {
	if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
#ifdef CONFIG_DEBUG_PAGEALLOC
		if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
		if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
			kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 1);
			kernel_map_pages(virt_to_page(objp),
					 cachep->objsize / PAGE_SIZE, 1);
		else
		else
			check_poison_obj(cachep, objp);
			check_poison_obj(cachep, objp);
#else
#else
@@ -2506,11 +2536,15 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep,
		*dbg_userword(cachep, objp) = caller;
		*dbg_userword(cachep, objp) = caller;


	if (cachep->flags & SLAB_RED_ZONE) {
	if (cachep->flags & SLAB_RED_ZONE) {
		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE
			slab_error(cachep, "double free, or memory outside"
		    || *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
			slab_error(cachep,
				   "double free, or memory outside"
				   " object was overwritten");
				   " object was overwritten");
			printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
			printk(KERN_ERR
					objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp));
			       "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
			       objp, *dbg_redzone1(cachep, objp),
			       *dbg_redzone2(cachep, objp));
		}
		}
		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
@@ -2638,7 +2672,8 @@ static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
/*
/*
 * Caller needs to acquire correct kmem_list's list_lock
 * Caller needs to acquire correct kmem_list's list_lock
 */
 */
static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int node)
static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects,
		       int node)
{
{
	int i;
	int i;
	struct kmem_list3 *l3;
	struct kmem_list3 *l3;
@@ -2710,8 +2745,7 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
			if (batchcount > max)
			if (batchcount > max)
				batchcount = max;
				batchcount = max;
			memcpy(&(shared_array->entry[shared_array->avail]),
			memcpy(&(shared_array->entry[shared_array->avail]),
					ac->entry,
			       ac->entry, sizeof(void *) * batchcount);
					sizeof(void*)*batchcount);
			shared_array->avail += batchcount;
			shared_array->avail += batchcount;
			goto free_done;
			goto free_done;
		}
		}
@@ -2743,7 +2777,6 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
		sizeof(void *) * ac->avail);
		sizeof(void *) * ac->avail);
}
}



/*
/*
 * __cache_free
 * __cache_free
 * Release an obj back to its cache. If the obj has a constructed
 * Release an obj back to its cache. If the obj has a constructed
@@ -2768,7 +2801,8 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
		if (unlikely(slabp->nodeid != numa_node_id())) {
		if (unlikely(slabp->nodeid != numa_node_id())) {
			struct array_cache *alien = NULL;
			struct array_cache *alien = NULL;
			int nodeid = slabp->nodeid;
			int nodeid = slabp->nodeid;
			struct kmem_list3 *l3 = cachep->nodelists[numa_node_id()];
			struct kmem_list3 *l3 =
			    cachep->nodelists[numa_node_id()];


			STATS_INC_NODEFREES(cachep);
			STATS_INC_NODEFREES(cachep);
			if (l3->alien && l3->alien[nodeid]) {
			if (l3->alien && l3->alien[nodeid]) {
@@ -2880,7 +2914,9 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)


	if (unlikely(!cachep->nodelists[nodeid])) {
	if (unlikely(!cachep->nodelists[nodeid])) {
		/* Fall back to __cache_alloc if we run into trouble */
		/* Fall back to __cache_alloc if we run into trouble */
		printk(KERN_WARNING "slab: not allocating in inactive node %d for cache %s\n", nodeid, cachep->name);
		printk(KERN_WARNING
		       "slab: not allocating in inactive node %d for cache %s\n",
		       nodeid, cachep->name);
		return __cache_alloc(cachep, flags);
		return __cache_alloc(cachep, flags);
	}
	}


@@ -2891,7 +2927,9 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
	else
	else
		ptr = __cache_alloc_node(cachep, flags, nodeid);
		ptr = __cache_alloc_node(cachep, flags, nodeid);
	local_irq_restore(save_flags);
	local_irq_restore(save_flags);
	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, __builtin_return_address(0));
	ptr =
	    cache_alloc_debugcheck_after(cachep, flags, ptr,
					 __builtin_return_address(0));


	return ptr;
	return ptr;
}
}
@@ -3059,8 +3097,7 @@ EXPORT_SYMBOL(kfree);
 * Don't free memory not originally allocated by alloc_percpu()
 * Don't free memory not originally allocated by alloc_percpu()
 * The complemented objp is to check for that.
 * The complemented objp is to check for that.
 */
 */
void
void free_percpu(const void *objp)
free_percpu(const void *objp)
{
{
	int i;
	int i;
	struct percpu_data *p = (struct percpu_data *)(~(unsigned long)objp);
	struct percpu_data *p = (struct percpu_data *)(~(unsigned long)objp);
@@ -3104,15 +3141,15 @@ static int alloc_kmemlist(kmem_cache_t *cachep)
			goto fail;
			goto fail;
#endif
#endif
		if (!(new = alloc_arraycache(node, (cachep->shared *
		if (!(new = alloc_arraycache(node, (cachep->shared *
				cachep->batchcount), 0xbaadf00d)))
						    cachep->batchcount),
					     0xbaadf00d)))
			goto fail;
			goto fail;
		if ((l3 = cachep->nodelists[node])) {
		if ((l3 = cachep->nodelists[node])) {


			spin_lock_irq(&l3->list_lock);
			spin_lock_irq(&l3->list_lock);


			if ((nc = cachep->nodelists[node]->shared))
			if ((nc = cachep->nodelists[node]->shared))
				free_block(cachep, nc->entry,
				free_block(cachep, nc->entry, nc->avail, node);
							nc->avail, node);


			l3->shared = new;
			l3->shared = new;
			if (!cachep->nodelists[node]->alien) {
			if (!cachep->nodelists[node]->alien) {
@@ -3162,7 +3199,6 @@ static void do_ccupdate_local(void *info)
	new->new[smp_processor_id()] = old;
	new->new[smp_processor_id()] = old;
}
}



static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
			    int shared)
			    int shared)
{
{
@@ -3171,9 +3207,11 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,


	memset(&new.new, 0, sizeof(new.new));
	memset(&new.new, 0, sizeof(new.new));
	for_each_online_cpu(i) {
	for_each_online_cpu(i) {
		new.new[i] = alloc_arraycache(cpu_to_node(i), limit, batchcount);
		new.new[i] =
		    alloc_arraycache(cpu_to_node(i), limit, batchcount);
		if (!new.new[i]) {
		if (!new.new[i]) {
			for (i--; i >= 0; i--) kfree(new.new[i]);
			for (i--; i >= 0; i--)
				kfree(new.new[i]);
			return -ENOMEM;
			return -ENOMEM;
		}
		}
	}
	}
@@ -3207,7 +3245,6 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
	return 0;
	return 0;
}
}



static void enable_cpucache(kmem_cache_t *cachep)
static void enable_cpucache(kmem_cache_t *cachep)
{
{
	int err;
	int err;
@@ -3260,8 +3297,8 @@ static void enable_cpucache(kmem_cache_t *cachep)
		       cachep->name, -err);
		       cachep->name, -err);
}
}


static void drain_array_locked(kmem_cache_t *cachep,
static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
				struct array_cache *ac, int force, int node)
				int force, int node)
{
{
	int tofree;
	int tofree;


@@ -3299,7 +3336,8 @@ static void cache_reap(void *unused)


	if (down_trylock(&cache_chain_sem)) {
	if (down_trylock(&cache_chain_sem)) {
		/* Give up. Setup the next iteration. */
		/* Give up. Setup the next iteration. */
		schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
		schedule_delayed_work(&__get_cpu_var(reap_work),
				      REAPTIMEOUT_CPUC);
		return;
		return;
	}
	}


@@ -3338,7 +3376,9 @@ static void cache_reap(void *unused)
			goto next_unlock;
			goto next_unlock;
		}
		}


		tofree = (l3->free_limit+5*searchp->num-1)/(5*searchp->num);
		tofree =
		    (l3->free_limit + 5 * searchp->num -
		     1) / (5 * searchp->num);
		do {
		do {
			p = l3->slabs_free.next;
			p = l3->slabs_free.next;
			if (p == &(l3->slabs_free))
			if (p == &(l3->slabs_free))
@@ -3491,8 +3531,7 @@ static int s_show(struct seq_file *m, void *p)
		   name, active_objs, num_objs, cachep->objsize,
		   name, active_objs, num_objs, cachep->objsize,
		   cachep->num, (1 << cachep->gfporder));
		   cachep->num, (1 << cachep->gfporder));
	seq_printf(m, " : tunables %4u %4u %4u",
	seq_printf(m, " : tunables %4u %4u %4u",
			cachep->limit, cachep->batchcount,
		   cachep->limit, cachep->batchcount, cachep->shared);
			cachep->shared);
	seq_printf(m, " : slabdata %6lu %6lu %6lu",
	seq_printf(m, " : slabdata %6lu %6lu %6lu",
		   active_slabs, num_slabs, shared_avail);
		   active_slabs, num_slabs, shared_avail);
#if STATS
#if STATS
@@ -3507,9 +3546,7 @@ static int s_show(struct seq_file *m, void *p)
		unsigned long node_frees = cachep->node_frees;
		unsigned long node_frees = cachep->node_frees;


		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
				%4lu %4lu %4lu %4lu",
				%4lu %4lu %4lu %4lu", allocs, high, grown, reaped, errors, max_freeable, node_allocs, node_frees);
				allocs, high, grown, reaped, errors,
				max_freeable, node_allocs, node_frees);
	}
	}
	/* cpu stats */
	/* cpu stats */
	{
	{
@@ -3586,8 +3623,7 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
		if (!strcmp(cachep->name, kbuf)) {
		if (!strcmp(cachep->name, kbuf)) {
			if (limit < 1 ||
			if (limit < 1 ||
			    batchcount < 1 ||
			    batchcount < 1 ||
			    batchcount > limit ||
			    batchcount > limit || shared < 0) {
			    shared < 0) {
				res = 0;
				res = 0;
			} else {
			} else {
				res = do_tune_cpucache(cachep, limit,
				res = do_tune_cpucache(cachep, limit,