Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3dafccf2 authored by Manfred Spraul's avatar Manfred Spraul Committed by Linus Torvalds
Browse files

[PATCH] slab: distinguish between object and buffer size



An object cache has two different object lengths:

  - the amount of memory available for the user (object size)
  - the amount of memory allocated internally (buffer size)

This patch does some renames to make the code reflect that better.

Signed-off-by: default avatarManfred Spraul <manfred@colorfullife.com>
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent e965f963
Loading
Loading
Loading
Loading
+80 −74
Original line number Original line Diff line number Diff line
@@ -375,7 +375,7 @@ struct kmem_cache {
	unsigned int batchcount;
	unsigned int batchcount;
	unsigned int limit;
	unsigned int limit;
	unsigned int shared;
	unsigned int shared;
	unsigned int objsize;
	unsigned int buffer_size;
/* 2) touched by every alloc & free from the backend */
/* 2) touched by every alloc & free from the backend */
	struct kmem_list3 *nodelists[MAX_NUMNODES];
	struct kmem_list3 *nodelists[MAX_NUMNODES];
	unsigned int flags;	/* constant flags */
	unsigned int flags;	/* constant flags */
@@ -423,8 +423,14 @@ struct kmem_cache {
	atomic_t freemiss;
	atomic_t freemiss;
#endif
#endif
#if DEBUG
#if DEBUG
	int dbghead;
	/*
	int reallen;
	 * If debugging is enabled, then the allocator can add additional
	 * fields and/or padding to every object. buffer_size contains the total
	 * object size including these internal fields, the following two
	 * variables contain the offset to the user object and its size.
	 */
	int obj_offset;
	int obj_size;
#endif
#endif
};
};


@@ -495,50 +501,50 @@ struct kmem_cache {


/* memory layout of objects:
/* memory layout of objects:
 * 0		: objp
 * 0		: objp
 * 0 .. cachep->dbghead - BYTES_PER_WORD - 1: padding. This ensures that
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
 * 		the end of an object is aligned with the end of the real
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
 * 		allocation. Catches writes behind the end of the allocation.
 * cachep->dbghead - BYTES_PER_WORD .. cachep->dbghead - 1:
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
 * 		redzone word.
 * 		redzone word.
 * cachep->dbghead: The real object.
 * cachep->obj_offset: The real object.
 * cachep->objsize - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->objsize - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
 */
 */
static int obj_dbghead(kmem_cache_t *cachep)
static int obj_offset(kmem_cache_t *cachep)
{
{
	return cachep->dbghead;
	return cachep->obj_offset;
}
}


static int obj_reallen(kmem_cache_t *cachep)
static int obj_size(kmem_cache_t *cachep)
{
{
	return cachep->reallen;
	return cachep->obj_size;
}
}


static unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp)
static unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp)
{
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	return (unsigned long*) (objp+obj_dbghead(cachep)-BYTES_PER_WORD);
	return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
}
}


static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
{
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
	if (cachep->flags & SLAB_STORE_USER)
		return (unsigned long *)(objp + cachep->objsize -
		return (unsigned long *)(objp + cachep->buffer_size -
					 2 * BYTES_PER_WORD);
					 2 * BYTES_PER_WORD);
	return (unsigned long *)(objp + cachep->objsize - BYTES_PER_WORD);
	return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
}
}


static void **dbg_userword(kmem_cache_t *cachep, void *objp)
static void **dbg_userword(kmem_cache_t *cachep, void *objp)
{
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
	return (void **)(objp + cachep->objsize - BYTES_PER_WORD);
	return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
}
}


#else
#else


#define obj_dbghead(x)			0
#define obj_offset(x)			0
#define obj_reallen(cachep)		(cachep->objsize)
#define obj_size(cachep)		(cachep->buffer_size)
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long *)NULL;})
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long *)NULL;})
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
@@ -623,12 +629,12 @@ static kmem_cache_t cache_cache = {
	.batchcount = 1,
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
	.shared = 1,
	.objsize = sizeof(kmem_cache_t),
	.buffer_size = sizeof(kmem_cache_t),
	.flags = SLAB_NO_REAP,
	.flags = SLAB_NO_REAP,
	.spinlock = SPIN_LOCK_UNLOCKED,
	.spinlock = SPIN_LOCK_UNLOCKED,
	.name = "kmem_cache",
	.name = "kmem_cache",
#if DEBUG
#if DEBUG
	.reallen = sizeof(kmem_cache_t),
	.obj_size = sizeof(kmem_cache_t),
#endif
#endif
};
};


@@ -1057,9 +1063,9 @@ void __init kmem_cache_init(void)
	cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
	cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
	cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE];
	cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE];


	cache_cache.objsize = ALIGN(cache_cache.objsize, cache_line_size());
	cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size());


	cache_estimate(0, cache_cache.objsize, cache_line_size(), 0,
	cache_estimate(0, cache_cache.buffer_size, cache_line_size(), 0,
		       &left_over, &cache_cache.num);
		       &left_over, &cache_cache.num);
	if (!cache_cache.num)
	if (!cache_cache.num)
		BUG();
		BUG();
@@ -1274,9 +1280,9 @@ static void kmem_rcu_free(struct rcu_head *head)
static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
			    unsigned long caller)
			    unsigned long caller)
{
{
	int size = obj_reallen(cachep);
	int size = obj_size(cachep);


	addr = (unsigned long *)&((char *)addr)[obj_dbghead(cachep)];
	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];


	if (size < 5 * sizeof(unsigned long))
	if (size < 5 * sizeof(unsigned long))
		return;
		return;
@@ -1306,8 +1312,8 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,


static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val)
static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val)
{
{
	int size = obj_reallen(cachep);
	int size = obj_size(cachep);
	addr = &((char *)addr)[obj_dbghead(cachep)];
	addr = &((char *)addr)[obj_offset(cachep)];


	memset(addr, val, size);
	memset(addr, val, size);
	*(unsigned char *)(addr + size - 1) = POISON_END;
	*(unsigned char *)(addr + size - 1) = POISON_END;
@@ -1344,8 +1350,8 @@ static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines)
			     (unsigned long)*dbg_userword(cachep, objp));
			     (unsigned long)*dbg_userword(cachep, objp));
		printk("\n");
		printk("\n");
	}
	}
	realobj = (char *)objp + obj_dbghead(cachep);
	realobj = (char *)objp + obj_offset(cachep);
	size = obj_reallen(cachep);
	size = obj_size(cachep);
	for (i = 0; i < size && lines; i += 16, lines--) {
	for (i = 0; i < size && lines; i += 16, lines--) {
		int limit;
		int limit;
		limit = 16;
		limit = 16;
@@ -1361,8 +1367,8 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
	int size, i;
	int size, i;
	int lines = 0;
	int lines = 0;


	realobj = (char *)objp + obj_dbghead(cachep);
	realobj = (char *)objp + obj_offset(cachep);
	size = obj_reallen(cachep);
	size = obj_size(cachep);


	for (i = 0; i < size; i++) {
	for (i = 0; i < size; i++) {
		char exp = POISON_FREE;
		char exp = POISON_FREE;
@@ -1398,17 +1404,17 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
		struct slab *slabp = page_get_slab(virt_to_page(objp));
		struct slab *slabp = page_get_slab(virt_to_page(objp));
		int objnr;
		int objnr;


		objnr = (unsigned)(objp - slabp->s_mem) / cachep->objsize;
		objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
		if (objnr) {
		if (objnr) {
			objp = slabp->s_mem + (objnr - 1) * cachep->objsize;
			objp = slabp->s_mem + (objnr - 1) * cachep->buffer_size;
			realobj = (char *)objp + obj_dbghead(cachep);
			realobj = (char *)objp + obj_offset(cachep);
			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
			       realobj, size);
			       realobj, size);
			print_objinfo(cachep, objp, 2);
			print_objinfo(cachep, objp, 2);
		}
		}
		if (objnr + 1 < cachep->num) {
		if (objnr + 1 < cachep->num) {
			objp = slabp->s_mem + (objnr + 1) * cachep->objsize;
			objp = slabp->s_mem + (objnr + 1) * cachep->buffer_size;
			realobj = (char *)objp + obj_dbghead(cachep);
			realobj = (char *)objp + obj_offset(cachep);
			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
			       realobj, size);
			       realobj, size);
			print_objinfo(cachep, objp, 2);
			print_objinfo(cachep, objp, 2);
@@ -1428,14 +1434,14 @@ static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
#if DEBUG
#if DEBUG
	int i;
	int i;
	for (i = 0; i < cachep->num; i++) {
	for (i = 0; i < cachep->num; i++) {
		void *objp = slabp->s_mem + cachep->objsize * i;
		void *objp = slabp->s_mem + cachep->buffer_size * i;


		if (cachep->flags & SLAB_POISON) {
		if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
#ifdef CONFIG_DEBUG_PAGEALLOC
			if ((cachep->objsize % PAGE_SIZE) == 0
			if ((cachep->buffer_size % PAGE_SIZE) == 0
			    && OFF_SLAB(cachep))
			    && OFF_SLAB(cachep))
				kernel_map_pages(virt_to_page(objp),
				kernel_map_pages(virt_to_page(objp),
						 cachep->objsize / PAGE_SIZE,
						 cachep->buffer_size / PAGE_SIZE,
						 1);
						 1);
			else
			else
				check_poison_obj(cachep, objp);
				check_poison_obj(cachep, objp);
@@ -1452,13 +1458,13 @@ static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
					   "was overwritten");
					   "was overwritten");
		}
		}
		if (cachep->dtor && !(cachep->flags & SLAB_POISON))
		if (cachep->dtor && !(cachep->flags & SLAB_POISON))
			(cachep->dtor) (objp + obj_dbghead(cachep), cachep, 0);
			(cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
	}
	}
#else
#else
	if (cachep->dtor) {
	if (cachep->dtor) {
		int i;
		int i;
		for (i = 0; i < cachep->num; i++) {
		for (i = 0; i < cachep->num; i++) {
			void *objp = slabp->s_mem + cachep->objsize * i;
			void *objp = slabp->s_mem + cachep->buffer_size * i;
			(cachep->dtor) (objp, cachep, 0);
			(cachep->dtor) (objp, cachep, 0);
		}
		}
	}
	}
@@ -1478,7 +1484,7 @@ static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
	}
	}
}
}


/* For setting up all the kmem_list3s for cache whose objsize is same
/* For setting up all the kmem_list3s for cache whose buffer_size is same
   as size of kmem_list3. */
   as size of kmem_list3. */
static inline void set_up_list3s(kmem_cache_t *cachep, int index)
static inline void set_up_list3s(kmem_cache_t *cachep, int index)
{
{
@@ -1611,7 +1617,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
		set_fs(old_fs);
		set_fs(old_fs);
		if (res) {
		if (res) {
			printk("SLAB: cache with size %d has lost its name\n",
			printk("SLAB: cache with size %d has lost its name\n",
			       pc->objsize);
			       pc->buffer_size);
			continue;
			continue;
		}
		}


@@ -1702,14 +1708,14 @@ kmem_cache_create (const char *name, size_t size, size_t align,
	memset(cachep, 0, sizeof(kmem_cache_t));
	memset(cachep, 0, sizeof(kmem_cache_t));


#if DEBUG
#if DEBUG
	cachep->reallen = size;
	cachep->obj_size = size;


	if (flags & SLAB_RED_ZONE) {
	if (flags & SLAB_RED_ZONE) {
		/* redzoning only works with word aligned caches */
		/* redzoning only works with word aligned caches */
		align = BYTES_PER_WORD;
		align = BYTES_PER_WORD;


		/* add space for red zone words */
		/* add space for red zone words */
		cachep->dbghead += BYTES_PER_WORD;
		cachep->obj_offset += BYTES_PER_WORD;
		size += 2 * BYTES_PER_WORD;
		size += 2 * BYTES_PER_WORD;
	}
	}
	if (flags & SLAB_STORE_USER) {
	if (flags & SLAB_STORE_USER) {
@@ -1722,8 +1728,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
	}
	}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
	if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
	if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
	    && cachep->reallen > cache_line_size() && size < PAGE_SIZE) {
	    && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
		cachep->dbghead += PAGE_SIZE - size;
		cachep->obj_offset += PAGE_SIZE - size;
		size = PAGE_SIZE;
		size = PAGE_SIZE;
	}
	}
#endif
#endif
@@ -1786,7 +1792,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
	if (flags & SLAB_CACHE_DMA)
	if (flags & SLAB_CACHE_DMA)
		cachep->gfpflags |= GFP_DMA;
		cachep->gfpflags |= GFP_DMA;
	spin_lock_init(&cachep->spinlock);
	spin_lock_init(&cachep->spinlock);
	cachep->objsize = size;
	cachep->buffer_size = size;


	if (flags & CFLGS_OFF_SLAB)
	if (flags & CFLGS_OFF_SLAB)
		cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
		cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
@@ -2118,7 +2124,7 @@ static void cache_init_objs(kmem_cache_t *cachep,
	int i;
	int i;


	for (i = 0; i < cachep->num; i++) {
	for (i = 0; i < cachep->num; i++) {
		void *objp = slabp->s_mem + cachep->objsize * i;
		void *objp = slabp->s_mem + cachep->buffer_size * i;
#if DEBUG
#if DEBUG
		/* need to poison the objs? */
		/* need to poison the objs? */
		if (cachep->flags & SLAB_POISON)
		if (cachep->flags & SLAB_POISON)
@@ -2136,7 +2142,7 @@ static void cache_init_objs(kmem_cache_t *cachep,
		 * Otherwise, deadlock. They must also be threaded.
		 * Otherwise, deadlock. They must also be threaded.
		 */
		 */
		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
			cachep->ctor(objp + obj_dbghead(cachep), cachep,
			cachep->ctor(objp + obj_offset(cachep), cachep,
				     ctor_flags);
				     ctor_flags);


		if (cachep->flags & SLAB_RED_ZONE) {
		if (cachep->flags & SLAB_RED_ZONE) {
@@ -2147,10 +2153,10 @@ static void cache_init_objs(kmem_cache_t *cachep,
				slab_error(cachep, "constructor overwrote the"
				slab_error(cachep, "constructor overwrote the"
					   " start of an object");
					   " start of an object");
		}
		}
		if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)
		if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)
		    && cachep->flags & SLAB_POISON)
		    && cachep->flags & SLAB_POISON)
			kernel_map_pages(virt_to_page(objp),
			kernel_map_pages(virt_to_page(objp),
					 cachep->objsize / PAGE_SIZE, 0);
					 cachep->buffer_size / PAGE_SIZE, 0);
#else
#else
		if (cachep->ctor)
		if (cachep->ctor)
			cachep->ctor(objp, cachep, ctor_flags);
			cachep->ctor(objp, cachep, ctor_flags);
@@ -2309,7 +2315,7 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
	unsigned int objnr;
	unsigned int objnr;
	struct slab *slabp;
	struct slab *slabp;


	objp -= obj_dbghead(cachep);
	objp -= obj_offset(cachep);
	kfree_debugcheck(objp);
	kfree_debugcheck(objp);
	page = virt_to_page(objp);
	page = virt_to_page(objp);


@@ -2341,31 +2347,31 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
	if (cachep->flags & SLAB_STORE_USER)
	if (cachep->flags & SLAB_STORE_USER)
		*dbg_userword(cachep, objp) = caller;
		*dbg_userword(cachep, objp) = caller;


	objnr = (unsigned)(objp - slabp->s_mem) / cachep->objsize;
	objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;


	BUG_ON(objnr >= cachep->num);
	BUG_ON(objnr >= cachep->num);
	BUG_ON(objp != slabp->s_mem + objnr * cachep->objsize);
	BUG_ON(objp != slabp->s_mem + objnr * cachep->buffer_size);


	if (cachep->flags & SLAB_DEBUG_INITIAL) {
	if (cachep->flags & SLAB_DEBUG_INITIAL) {
		/* Need to call the slab's constructor so the
		/* Need to call the slab's constructor so the
		 * caller can perform a verify of its state (debugging).
		 * caller can perform a verify of its state (debugging).
		 * Called without the cache-lock held.
		 * Called without the cache-lock held.
		 */
		 */
		cachep->ctor(objp + obj_dbghead(cachep),
		cachep->ctor(objp + obj_offset(cachep),
			     cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
			     cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
	}
	}
	if (cachep->flags & SLAB_POISON && cachep->dtor) {
	if (cachep->flags & SLAB_POISON && cachep->dtor) {
		/* we want to cache poison the object,
		/* we want to cache poison the object,
		 * call the destruction callback
		 * call the destruction callback
		 */
		 */
		cachep->dtor(objp + obj_dbghead(cachep), cachep, 0);
		cachep->dtor(objp + obj_offset(cachep), cachep, 0);
	}
	}
	if (cachep->flags & SLAB_POISON) {
	if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
#ifdef CONFIG_DEBUG_PAGEALLOC
		if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
		if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
			store_stackinfo(cachep, objp, (unsigned long)caller);
			store_stackinfo(cachep, objp, (unsigned long)caller);
			kernel_map_pages(virt_to_page(objp),
			kernel_map_pages(virt_to_page(objp),
					 cachep->objsize / PAGE_SIZE, 0);
					 cachep->buffer_size / PAGE_SIZE, 0);
		} else {
		} else {
			poison_obj(cachep, objp, POISON_FREE);
			poison_obj(cachep, objp, POISON_FREE);
		}
		}
@@ -2468,7 +2474,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)


			/* get obj pointer */
			/* get obj pointer */
			ac->entry[ac->avail++] = slabp->s_mem +
			ac->entry[ac->avail++] = slabp->s_mem +
			    slabp->free * cachep->objsize;
			    slabp->free * cachep->buffer_size;


			slabp->inuse++;
			slabp->inuse++;
			next = slab_bufctl(slabp)[slabp->free];
			next = slab_bufctl(slabp)[slabp->free];
@@ -2526,9 +2532,9 @@ static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags,
		return objp;
		return objp;
	if (cachep->flags & SLAB_POISON) {
	if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
#ifdef CONFIG_DEBUG_PAGEALLOC
		if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
		if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
			kernel_map_pages(virt_to_page(objp),
			kernel_map_pages(virt_to_page(objp),
					 cachep->objsize / PAGE_SIZE, 1);
					 cachep->buffer_size / PAGE_SIZE, 1);
		else
		else
			check_poison_obj(cachep, objp);
			check_poison_obj(cachep, objp);
#else
#else
@@ -2553,7 +2559,7 @@ static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags,
		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
	}
	}
	objp += obj_dbghead(cachep);
	objp += obj_offset(cachep);
	if (cachep->ctor && cachep->flags & SLAB_POISON) {
	if (cachep->ctor && cachep->flags & SLAB_POISON) {
		unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
		unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;


@@ -2648,7 +2654,7 @@ static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
	BUG_ON(slabp->inuse == cachep->num);
	BUG_ON(slabp->inuse == cachep->num);


	/* get obj pointer */
	/* get obj pointer */
	obj = slabp->s_mem + slabp->free * cachep->objsize;
	obj = slabp->s_mem + slabp->free * cachep->buffer_size;
	slabp->inuse++;
	slabp->inuse++;
	next = slab_bufctl(slabp)[slabp->free];
	next = slab_bufctl(slabp)[slabp->free];
#if DEBUG
#if DEBUG
@@ -2699,7 +2705,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects,
		slabp = page_get_slab(virt_to_page(objp));
		slabp = page_get_slab(virt_to_page(objp));
		l3 = cachep->nodelists[node];
		l3 = cachep->nodelists[node];
		list_del(&slabp->list);
		list_del(&slabp->list);
		objnr = (unsigned)(objp - slabp->s_mem) / cachep->objsize;
		objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
		check_spinlock_acquired_node(cachep, node);
		check_spinlock_acquired_node(cachep, node);
		check_slabp(cachep, slabp);
		check_slabp(cachep, slabp);


@@ -2881,7 +2887,7 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
	unsigned long addr = (unsigned long)ptr;
	unsigned long addr = (unsigned long)ptr;
	unsigned long min_addr = PAGE_OFFSET;
	unsigned long min_addr = PAGE_OFFSET;
	unsigned long align_mask = BYTES_PER_WORD - 1;
	unsigned long align_mask = BYTES_PER_WORD - 1;
	unsigned long size = cachep->objsize;
	unsigned long size = cachep->buffer_size;
	struct page *page;
	struct page *page;


	if (unlikely(addr < min_addr))
	if (unlikely(addr < min_addr))
@@ -3083,7 +3089,7 @@ void kfree(const void *objp)
	local_irq_save(flags);
	local_irq_save(flags);
	kfree_debugcheck(objp);
	kfree_debugcheck(objp);
	c = page_get_cache(virt_to_page(objp));
	c = page_get_cache(virt_to_page(objp));
	mutex_debug_check_no_locks_freed(objp, obj_reallen(c));
	mutex_debug_check_no_locks_freed(objp, obj_size(c));
	__cache_free(c, (void *)objp);
	__cache_free(c, (void *)objp);
	local_irq_restore(flags);
	local_irq_restore(flags);
}
}
@@ -3114,7 +3120,7 @@ EXPORT_SYMBOL(free_percpu);


unsigned int kmem_cache_size(kmem_cache_t *cachep)
unsigned int kmem_cache_size(kmem_cache_t *cachep)
{
{
	return obj_reallen(cachep);
	return obj_size(cachep);
}
}
EXPORT_SYMBOL(kmem_cache_size);
EXPORT_SYMBOL(kmem_cache_size);


@@ -3258,13 +3264,13 @@ static void enable_cpucache(kmem_cache_t *cachep)
	 * The numbers are guessed, we should auto-tune as described by
	 * The numbers are guessed, we should auto-tune as described by
	 * Bonwick.
	 * Bonwick.
	 */
	 */
	if (cachep->objsize > 131072)
	if (cachep->buffer_size > 131072)
		limit = 1;
		limit = 1;
	else if (cachep->objsize > PAGE_SIZE)
	else if (cachep->buffer_size > PAGE_SIZE)
		limit = 8;
		limit = 8;
	else if (cachep->objsize > 1024)
	else if (cachep->buffer_size > 1024)
		limit = 24;
		limit = 24;
	else if (cachep->objsize > 256)
	else if (cachep->buffer_size > 256)
		limit = 54;
		limit = 54;
	else
	else
		limit = 120;
		limit = 120;
@@ -3279,7 +3285,7 @@ static void enable_cpucache(kmem_cache_t *cachep)
	 */
	 */
	shared = 0;
	shared = 0;
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
	if (cachep->objsize <= PAGE_SIZE)
	if (cachep->buffer_size <= PAGE_SIZE)
		shared = 8;
		shared = 8;
#endif
#endif


@@ -3528,7 +3534,7 @@ static int s_show(struct seq_file *m, void *p)
		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);


	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
		   name, active_objs, num_objs, cachep->objsize,
		   name, active_objs, num_objs, cachep->buffer_size,
		   cachep->num, (1 << cachep->gfporder));
		   cachep->num, (1 << cachep->gfporder));
	seq_printf(m, " : tunables %4u %4u %4u",
	seq_printf(m, " : tunables %4u %4u %4u",
		   cachep->limit, cachep->batchcount, cachep->shared);
		   cachep->limit, cachep->batchcount, cachep->shared);
@@ -3656,5 +3662,5 @@ unsigned int ksize(const void *objp)
	if (unlikely(objp == NULL))
	if (unlikely(objp == NULL))
		return 0;
		return 0;


	return obj_reallen(page_get_cache(virt_to_page(objp)));
	return obj_size(page_get_cache(virt_to_page(objp)));
}
}