Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 76c39e4f authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: (27 commits)
  SLUB: Fix memory hotplug with !NUMA
  slub: Move functions to reduce #ifdefs
  slub: Enable sysfs support for !CONFIG_SLUB_DEBUG
  SLUB: Optimize slab_free() debug check
  slub: Move NUMA-related functions under CONFIG_NUMA
  slub: Add lock release annotation
  slub: Fix signedness warnings
  slub: extract common code to remove objects from partial list without locking
  SLUB: Pass active and inactive redzone flags instead of boolean to debug functions
  slub: reduce differences between SMP and NUMA
  Revert "Slub: UP bandaid"
  percpu: clear memory allocated with the km allocator
  percpu: use percpu allocator on UP too
  percpu: reduce PCPU_MIN_UNIT_SIZE to 32k
  vmalloc: pcpu_get/free_vm_areas() aren't needed on UP
  SLUB: Fix merged slab cache names
  Slub: UP bandaid
  slub: fix SLUB_RESILIENCY_TEST for dynamic kmalloc caches
  slub: Fix up missing kmalloc_cache -> kmem_cache_node case for memoryhotplug
  slub: Add dummy functions for the !SLUB_DEBUG case
  ...
parents 1765a1fe 6d4121f6
Loading
Loading
Loading
Loading
+4 −10
Original line number Diff line number Diff line
@@ -87,7 +87,7 @@ struct kmem_cache {
	unsigned long min_partial;
	const char *name;	/* Name (only for display!) */
	struct list_head list;	/* List of slab caches */
#ifdef CONFIG_SLUB_DEBUG
#ifdef CONFIG_SYSFS
	struct kobject kobj;	/* For sysfs */
#endif

@@ -96,11 +96,8 @@ struct kmem_cache {
	 * Defragmentation by allocating from a remote node.
	 */
	int remote_node_defrag_ratio;
	struct kmem_cache_node *node[MAX_NUMNODES];
#else
	/* Avoid an extra cache line for UP */
	struct kmem_cache_node local_node;
#endif
	struct kmem_cache_node *node[MAX_NUMNODES];
};

/*
@@ -139,19 +136,16 @@ struct kmem_cache {

#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
/* Reserve extra caches for potential DMA use */
#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT)
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#define KMALLOC_CACHES SLUB_PAGE_SHIFT
#endif

/*
 * We keep the general caches in an array of slab caches that are used for
 * 2^x bytes of allocations.
 */
extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES];
extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];

/*
 * Sorry that the following has to be that ugly but some versions of GCC
@@ -216,7 +210,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
	if (index == 0)
		return NULL;

	return &kmalloc_caches[index];
	return kmalloc_caches[index];
}

void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
+1 −1
Original line number Diff line number Diff line
@@ -353,7 +353,7 @@ config SLUB_DEBUG_ON
config SLUB_STATS
	default n
	bool "Enable SLUB performance statistics"
	depends on SLUB && SLUB_DEBUG && SYSFS
	depends on SLUB && SYSFS
	help
	  SLUB statistics are useful to debug SLUBs allocation behavior in
	  order find ways to optimize the allocator. This should never be
+3 −1
Original line number Diff line number Diff line
@@ -500,7 +500,9 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
	} else {
		unsigned int order = get_order(size);

		ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
		if (likely(order))
			gfp |= __GFP_COMP;
		ret = slob_new_pages(gfp, order, node);
		if (ret) {
			struct page *page;
			page = virt_to_page(ret);
+416 −372

File changed.

Preview size limit exceeded, changes collapsed.