Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7b383bef authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull SLAB changes from Pekka Enberg:
 "Random bug fixes that have accumulated in my inbox over the past few
  months"

* 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux:
  mm: Fix warning on make htmldocs caused by slab.c
  mm: slub: work around unneeded lockdep warning
  mm: sl[uo]b: fix misleading comments
  slub: Fix possible format string bug.
  slub: use lockdep_assert_held
  slub: Fix calculation of cpu slabs
  slab.h: remove duplicate kmalloc declaration and fix kernel-doc warnings
parents 87af5e5c cb8ee1a3
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -205,8 +205,8 @@ struct kmem_cache {

#ifdef CONFIG_SLUB
/*
 * SLUB allocates up to order 2 pages directly and otherwise
 * passes the request to the page allocator.
 * SLUB directly allocates requests fitting in to an order-1 page
 * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
 */
#define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
#define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT)
@@ -217,12 +217,12 @@ struct kmem_cache {

#ifdef CONFIG_SLOB
/*
 * SLOB passes all page size and larger requests to the page allocator.
 * SLOB passes all requests larger than one page to the page allocator.
 * No kmalloc array is necessary since objects of different sizes can
 * be allocated from the same page.
 */
#define KMALLOC_SHIFT_MAX	30
#define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
#define KMALLOC_SHIFT_MAX	30
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW	3
#endif
+1 −1
Original line number Diff line number Diff line
@@ -1946,7 +1946,7 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
/**
 * slab_destroy - destroy and release all objects in a slab
 * @cachep: cache pointer being destroyed
 * @slabp: slab pointer being destroyed
 * @page: page pointer being destroyed
 *
 * Destroy all the objs in a slab, and release the mem back to the system.
 * Before calling the slab must have been unlinked from the cache.  The
+34 −22
Original line number Diff line number Diff line
@@ -1000,23 +1000,22 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)

/*
 * Tracking of fully allocated slabs for debugging purposes.
 *
 * list_lock must be held.
 */
static void add_full(struct kmem_cache *s,
	struct kmem_cache_node *n, struct page *page)
{
	lockdep_assert_held(&n->list_lock);

	if (!(s->flags & SLAB_STORE_USER))
		return;

	list_add(&page->lru, &n->full);
}

/*
 * list_lock must be held.
 */
static void remove_full(struct kmem_cache *s, struct page *page)
static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
{
	lockdep_assert_held(&n->list_lock);

	if (!(s->flags & SLAB_STORE_USER))
		return;

@@ -1265,7 +1264,8 @@ static inline int check_object(struct kmem_cache *s, struct page *page,
			void *object, u8 val) { return 1; }
static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
					struct page *page) {}
static inline void remove_full(struct kmem_cache *s, struct page *page) {}
static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
					struct page *page) {}
static inline unsigned long kmem_cache_flags(unsigned long object_size,
	unsigned long flags, const char *name,
	void (*ctor)(void *))
@@ -1519,12 +1519,12 @@ static void discard_slab(struct kmem_cache *s, struct page *page)

/*
 * Management of partially allocated slabs.
 *
 * list_lock must be held.
 */
static inline void add_partial(struct kmem_cache_node *n,
				struct page *page, int tail)
{
	lockdep_assert_held(&n->list_lock);

	n->nr_partial++;
	if (tail == DEACTIVATE_TO_TAIL)
		list_add_tail(&page->lru, &n->partial);
@@ -1532,12 +1532,11 @@ static inline void add_partial(struct kmem_cache_node *n,
		list_add(&page->lru, &n->partial);
}

/*
 * list_lock must be held.
 */
static inline void remove_partial(struct kmem_cache_node *n,
					struct page *page)
{
	lockdep_assert_held(&n->list_lock);

	list_del(&page->lru);
	n->nr_partial--;
}
@@ -1547,8 +1546,6 @@ static inline void remove_partial(struct kmem_cache_node *n,
 * return the pointer to the freelist.
 *
 * Returns a list of objects or NULL if it fails.
 *
 * Must hold list_lock since we modify the partial list.
 */
static inline void *acquire_slab(struct kmem_cache *s,
		struct kmem_cache_node *n, struct page *page,
@@ -1558,6 +1555,8 @@ static inline void *acquire_slab(struct kmem_cache *s,
	unsigned long counters;
	struct page new;

	lockdep_assert_held(&n->list_lock);

	/*
	 * Zap the freelist and set the frozen bit.
	 * The old freelist is the list of objects for the
@@ -1902,7 +1901,7 @@ redo:

		else if (l == M_FULL)

			remove_full(s, page);
			remove_full(s, n, page);

		if (m == M_PARTIAL) {

@@ -2556,7 +2555,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
		new.inuse--;
		if ((!new.inuse || !prior) && !was_frozen) {

			if (kmem_cache_has_cpu_partial(s) && !prior)
			if (kmem_cache_has_cpu_partial(s) && !prior) {

				/*
				 * Slab was on no list before and will be
@@ -2566,7 +2565,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
				 */
				new.frozen = 1;

			else { /* Needs to be taken off a list */
			} else { /* Needs to be taken off a list */

	                        n = get_node(s, page_to_nid(page));
				/*
@@ -2615,7 +2614,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
	 */
	if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
		if (kmem_cache_debug(s))
			remove_full(s, page);
			remove_full(s, n, page);
		add_partial(n, page, DEACTIVATE_TO_TAIL);
		stat(s, FREE_ADD_PARTIAL);
	}
@@ -2629,9 +2628,10 @@ slab_empty:
		 */
		remove_partial(n, page);
		stat(s, FREE_REMOVE_PARTIAL);
	} else
	} else {
		/* Slab must be on the full list */
		remove_full(s, page);
		remove_full(s, n, page);
	}

	spin_unlock_irqrestore(&n->list_lock, flags);
	stat(s, FREE_SLAB);
@@ -2905,7 +2905,13 @@ static void early_kmem_cache_node_alloc(int node)
	init_kmem_cache_node(n);
	inc_slabs_node(kmem_cache_node, node, page->objects);

	/*
	 * the lock is for lockdep's sake, not for any actual
	 * race protection
	 */
	spin_lock(&n->list_lock);
	add_partial(n, page, DEACTIVATE_TO_HEAD);
	spin_unlock(&n->list_lock);
}

static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -4314,7 +4320,13 @@ static ssize_t show_slab_objects(struct kmem_cache *s,

			page = ACCESS_ONCE(c->partial);
			if (page) {
				x = page->pobjects;
				node = page_to_nid(page);
				if (flags & SO_TOTAL)
					WARN_ON_ONCE(1);
				else if (flags & SO_OBJECTS)
					WARN_ON_ONCE(1);
				else
					x = page->pages;
				total += x;
				nodes[node] += x;
			}
@@ -5178,7 +5190,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
	}

	s->kobj.kset = slab_kset;
	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
	if (err) {
		kobject_put(&s->kobj);
		return err;