Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bb799dca authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm:
  slub: fix typo in Documentation/vm/slub.txt
  slab: NUMA slab allocator migration bugfix
  slub: Do not cross cacheline boundaries for very small objects
  slab - use angle brackets for include of kmalloc_sizes.h
  slab numa fallback logic: Do not pass unfiltered flags to page allocator
  slub statistics: Fix check for DEACTIVATE_REMOTE_FREES
parents bb641ab4 989a7241
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -50,14 +50,14 @@ F.e. in order to boot just with sanity checks and red zoning one would specify:

Trying to find an issue in the dentry cache? Try

	slub_debug=,dentry_cache
	slub_debug=,dentry

to only enable debugging on the dentry cache.

Red zoning and tracking may realign the slab.  We can just apply sanity checks
to the dentry cache with

	slub_debug=F,dentry_cache
	slub_debug=F,dentry

In case you forgot to enable debugging on the kernel command line: It is
possible to enable debugging manually when the kernel is up. Look at the
+2 −2
Original line number Diff line number Diff line
@@ -41,7 +41,7 @@ static inline void *kmalloc(size_t size, gfp_t flags)
			goto found; \
		else \
			i++;
#include "kmalloc_sizes.h"
#include <linux/kmalloc_sizes.h>
#undef CACHE
		{
			extern void __you_cannot_kmalloc_that_much(void);
@@ -75,7 +75,7 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
			goto found; \
		else \
			i++;
#include "kmalloc_sizes.h"
#include <linux/kmalloc_sizes.h>
#undef CACHE
		{
			extern void __you_cannot_kmalloc_that_much(void);
+4 −5
Original line number Diff line number Diff line
@@ -333,7 +333,7 @@ static __always_inline int index_of(const size_t size)
		return i; \
	else \
		i++;
#include "linux/kmalloc_sizes.h"
#include <linux/kmalloc_sizes.h>
#undef CACHE
		__bad_size();
	} else
@@ -2964,11 +2964,10 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
	struct array_cache *ac;
	int node;

	node = numa_node_id();

retry:
	check_irq_off();
	node = numa_node_id();
	ac = cpu_cache_get(cachep);
retry:
	batchcount = ac->batchcount;
	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
		/*
@@ -3280,7 +3279,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
		if (local_flags & __GFP_WAIT)
			local_irq_enable();
		kmem_flagcheck(cache, flags);
		obj = kmem_getpages(cache, flags, -1);
		obj = kmem_getpages(cache, local_flags, -1);
		if (local_flags & __GFP_WAIT)
			local_irq_disable();
		if (obj) {
+8 −5
Original line number Diff line number Diff line
@@ -1368,7 +1368,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
	struct page *page = c->page;
	int tail = 1;

	if (c->freelist)
	if (page->freelist)
		stat(c, DEACTIVATE_REMOTE_FREES);
	/*
	 * Merge cpu freelist into slab freelist. Typically we get here
@@ -1856,12 +1856,15 @@ static unsigned long calculate_alignment(unsigned long flags,
	 * The hardware cache alignment cannot override the specified
	 * alignment though. If that is greater then use it.
	 */
	if ((flags & SLAB_HWCACHE_ALIGN) &&
			size > cache_line_size() / 2)
		return max_t(unsigned long, align, cache_line_size());
	if (flags & SLAB_HWCACHE_ALIGN) {
		unsigned long ralign = cache_line_size();
		while (size <= ralign / 2)
			ralign /= 2;
		align = max(align, ralign);
	}

	if (align < ARCH_SLAB_MINALIGN)
		return ARCH_SLAB_MINALIGN;
		align = ARCH_SLAB_MINALIGN;

	return ALIGN(align, sizeof(void *));
}