Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4ce4b1b7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
  SLUB: Fix early boot GFP_DMA allocations
  SLUB: Don't print out OOM warning for __GFP_NOFAIL
  SLUB: fix build when !SLUB_DEBUG
  SLUB: Out-of-memory diagnostics
  slab: document kzfree() zeroing behavior
  slab: fix generic PAGE_POISONING conflict with SLAB_RED_ZONE
  slob: use PG_slab for identifying SLOB pages
parents 9cb0fbf7 5caf5c7d
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -118,7 +118,6 @@ enum pageflags {
	PG_savepinned = PG_dirty,

	/* SLOB */
	PG_slob_page = PG_active,
	PG_slob_free = PG_private,

	/* SLUB */
@@ -201,7 +200,6 @@ PAGEFLAG(SavePinned, savepinned); /* Xen */
PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)

__PAGEFLAG(SlobPage, slob_page)
__PAGEFLAG(SlobFree, slob_free)

__PAGEFLAG(SlubFrozen, slub_frozen)
+9 −0
Original line number Diff line number Diff line
@@ -2308,6 +2308,15 @@ kmem_cache_create (const char *name, size_t size, size_t align,
		/* really off slab. No need for manual alignment */
		slab_size =
		    cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);

#ifdef CONFIG_PAGE_POISONING
		/* If we're going to use the generic kernel_map_pages()
		 * poisoning, then it's going to smash the contents of
		 * the redzone and userword anyhow, so switch them off.
		 */
		if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
			flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
#endif
	}

	cachep->colour_off = cache_line_size();
+3 −3
Original line number Diff line number Diff line
@@ -133,17 +133,17 @@ static LIST_HEAD(free_slob_large);
 */
static inline int is_slob_page(struct slob_page *sp)
{
	return PageSlobPage((struct page *)sp);
	return PageSlab((struct page *)sp);
}

static inline void set_slob_page(struct slob_page *sp)
{
	__SetPageSlobPage((struct page *)sp);
	__SetPageSlab((struct page *)sp);
}

static inline void clear_slob_page(struct slob_page *sp)
{
	__ClearPageSlobPage((struct page *)sp);
	__ClearPageSlab((struct page *)sp);
}

static inline struct slob_page *slob_page(const void *addr)
+82 −23
Original line number Diff line number Diff line
@@ -840,6 +840,11 @@ static inline unsigned long slabs_node(struct kmem_cache *s, int node)
	return atomic_long_read(&n->nr_slabs);
}

static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
{
	return atomic_long_read(&n->nr_slabs);
}

static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
{
	struct kmem_cache_node *n = get_node(s, node);
@@ -1058,6 +1063,8 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize,

static inline unsigned long slabs_node(struct kmem_cache *s, int node)
							{ return 0; }
static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
							{ return 0; }
static inline void inc_slabs_node(struct kmem_cache *s, int node,
							int objects) {}
static inline void dec_slabs_node(struct kmem_cache *s, int node,
@@ -1514,6 +1521,65 @@ static inline int node_match(struct kmem_cache_cpu *c, int node)
	return 1;
}

static int count_free(struct page *page)
{
	return page->objects - page->inuse;
}

static unsigned long count_partial(struct kmem_cache_node *n,
					int (*get_count)(struct page *))
{
	unsigned long flags;
	unsigned long x = 0;
	struct page *page;

	spin_lock_irqsave(&n->list_lock, flags);
	list_for_each_entry(page, &n->partial, lru)
		x += get_count(page);
	spin_unlock_irqrestore(&n->list_lock, flags);
	return x;
}

static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
{
#ifdef CONFIG_SLUB_DEBUG
	return atomic_long_read(&n->total_objects);
#else
	return 0;
#endif
}

static noinline void
slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
{
	int node;

	printk(KERN_WARNING
		"SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
		nid, gfpflags);
	printk(KERN_WARNING "  cache: %s, object size: %d, buffer size: %d, "
		"default order: %d, min order: %d\n", s->name, s->objsize,
		s->size, oo_order(s->oo), oo_order(s->min));

	for_each_online_node(node) {
		struct kmem_cache_node *n = get_node(s, node);
		unsigned long nr_slabs;
		unsigned long nr_objs;
		unsigned long nr_free;

		if (!n)
			continue;

		nr_free  = count_partial(n, count_free);
		nr_slabs = node_nr_slabs(n);
		nr_objs  = node_nr_objs(n);

		printk(KERN_WARNING
			"  node %d: slabs: %ld, objs: %ld, free: %ld\n",
			node, nr_slabs, nr_objs, nr_free);
	}
}

/*
 * Slow path. The lockless freelist is empty or we need to perform
 * debugging duties.
@@ -1595,6 +1661,8 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
		c->page = new;
		goto load_freelist;
	}
	if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
		slab_out_of_memory(s, gfpflags, node);
	return NULL;
debug:
	if (!alloc_debug_processing(s, c->page, object, addr))
@@ -2636,6 +2704,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
	struct kmem_cache *s;
	char *text;
	size_t realsize;
	unsigned long slabflags;

	s = kmalloc_caches_dma[index];
	if (s)
@@ -2657,10 +2726,18 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
			 (unsigned int)realsize);
	s = kmalloc(kmem_size, flags & ~SLUB_DMA);

	/*
	 * Must defer sysfs creation to a workqueue because we don't know
	 * what context we are called from. Before sysfs comes up, we don't
	 * need to do anything because our sysfs initcall will start by
	 * adding all existing slabs to sysfs.
	 */
	slabflags = SLAB_CACHE_DMA|SLAB_NOTRACK;
	if (slab_state >= SYSFS)
		slabflags |= __SYSFS_ADD_DEFERRED;

	if (!s || !text || !kmem_cache_open(s, flags, text,
			realsize, ARCH_KMALLOC_MINALIGN,
			SLAB_CACHE_DMA|SLAB_NOTRACK|__SYSFS_ADD_DEFERRED,
			NULL)) {
			realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) {
		kfree(s);
		kfree(text);
		goto unlock_out;
@@ -2669,6 +2746,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
	list_add(&s->list, &slab_caches);
	kmalloc_caches_dma[index] = s;

	if (slab_state >= SYSFS)
		schedule_work(&sysfs_add_work);

unlock_out:
@@ -3368,20 +3446,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
}

#ifdef CONFIG_SLUB_DEBUG
static unsigned long count_partial(struct kmem_cache_node *n,
					int (*get_count)(struct page *))
{
	unsigned long flags;
	unsigned long x = 0;
	struct page *page;

	spin_lock_irqsave(&n->list_lock, flags);
	list_for_each_entry(page, &n->partial, lru)
		x += get_count(page);
	spin_unlock_irqrestore(&n->list_lock, flags);
	return x;
}

static int count_inuse(struct page *page)
{
	return page->inuse;
@@ -3392,11 +3456,6 @@ static int count_total(struct page *page)
	return page->objects;
}

static int count_free(struct page *page)
{
	return page->objects - page->inuse;
}

static int validate_slab(struct kmem_cache *s, struct page *page,
						unsigned long *map)
{
+4 −0
Original line number Diff line number Diff line
@@ -168,6 +168,10 @@ EXPORT_SYMBOL(krealloc);
 *
 * The memory of the object @p points to is zeroed before freed.
 * If @p is %NULL, kzfree() does nothing.
 *
 * Note: this function zeroes the whole allocated buffer which can be a good
 * deal bigger than the requested buffer size passed to kmalloc(). So be
 * careful when using this function in performance sensitive code.
 */
void kzfree(const void *p)
{