Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 23910c50 authored by Pekka Enberg's avatar Pekka Enberg
Browse files

Merge branch 'slub/cleanups' into slab/next



* Fix a merge conflict in mm/slub.c::acquire_slab() due to commit 02d7633f
  ("slub: fix a memory leak in get_partial_node()").

Conflicts:
	mm/slub.c

Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parents f8f5701b 57d437d2
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -48,7 +48,6 @@ struct kmem_cache_cpu {
	unsigned long tid;	/* Globally unique transaction id */
	struct page *page;	/* The slab from which we are allocating */
	struct page *partial;	/* Partially allocated frozen slabs */
	int node;		/* The node of the page (or -1 for debug) */
#ifdef CONFIG_SLUB_STATS
	unsigned stat[NR_SLUB_STAT_ITEMS];
#endif
+92 −81
Original line number Diff line number Diff line
@@ -1490,12 +1490,12 @@ static inline void remove_partial(struct kmem_cache_node *n,
}

/*
 * Lock slab, remove from the partial list and put the object into the
 * per cpu freelist.
 * Remove slab from the partial list, freeze it and
 * return the pointer to the freelist.
 *
 * Returns a list of objects or NULL if it fails.
 *
 * Must hold list_lock.
 * Must hold list_lock since we modify the partial list.
 */
static inline void *acquire_slab(struct kmem_cache *s,
		struct kmem_cache_node *n, struct page *page,
@@ -1510,7 +1510,6 @@ static inline void *acquire_slab(struct kmem_cache *s,
	 * The old freelist is the list of objects for the
	 * per cpu allocation list.
	 */
	do {
	freelist = page->freelist;
	counters = page->counters;
	new.counters = counters;
@@ -1524,12 +1523,14 @@ static inline void *acquire_slab(struct kmem_cache *s,
	VM_BUG_ON(new.frozen);
	new.frozen = 1;

	} while (!__cmpxchg_double_slab(s, page,
	if (!__cmpxchg_double_slab(s, page,
			freelist, counters,
			new.freelist, new.counters,
			"lock and freeze"));
			"acquire_slab"))
		return NULL;

	remove_partial(n, page);
	WARN_ON(!freelist);
	return freelist;
}

@@ -1563,7 +1564,6 @@ static void *get_partial_node(struct kmem_cache *s,

		if (!object) {
			c->page = page;
			c->node = page_to_nid(page);
			stat(s, ALLOC_FROM_PARTIAL);
			object = t;
			available =  page->objects - page->inuse;
@@ -1731,14 +1731,12 @@ void init_kmem_cache_cpus(struct kmem_cache *s)
/*
 * Remove the cpu slab
 */
static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist)
{
	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
	struct page *page = c->page;
	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
	int lock = 0;
	enum slab_modes l = M_NONE, m = M_NONE;
	void *freelist;
	void *nextfree;
	int tail = DEACTIVATE_TO_HEAD;
	struct page new;
@@ -1749,11 +1747,6 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
		tail = DEACTIVATE_TO_TAIL;
	}

	c->tid = next_tid(c->tid);
	c->page = NULL;
	freelist = c->freelist;
	c->freelist = NULL;

	/*
	 * Stage one: Free all available per cpu objects back
	 * to the page freelist while it is still frozen. Leave the
@@ -2011,7 +2004,11 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
	stat(s, CPUSLAB_FLUSH);
	deactivate_slab(s, c);
	deactivate_slab(s, c->page, c->freelist);

	c->tid = next_tid(c->tid);
	c->page = NULL;
	c->freelist = NULL;
}

/*
@@ -2055,10 +2052,10 @@ static void flush_all(struct kmem_cache *s)
 * Check if the objects in a per cpu structure fit numa
 * locality expectations.
 */
static inline int node_match(struct kmem_cache_cpu *c, int node)
static inline int node_match(struct page *page, int node)
{
#ifdef CONFIG_NUMA
	if (node != NUMA_NO_NODE && c->node != node)
	if (node != NUMA_NO_NODE && page_to_nid(page) != node)
		return 0;
#endif
	return 1;
@@ -2130,10 +2127,16 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
			int node, struct kmem_cache_cpu **pc)
{
	void *object;
	struct kmem_cache_cpu *c;
	struct page *page = new_slab(s, flags, node);
	void *freelist;
	struct kmem_cache_cpu *c = *pc;
	struct page *page;

	freelist = get_partial(s, flags, node, c);

	if (freelist)
		return freelist;

	page = new_slab(s, flags, node);
	if (page) {
		c = __this_cpu_ptr(s->cpu_slab);
		if (c->page)
@@ -2143,17 +2146,16 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
		 * No other reference to the page yet so we can
		 * muck around with it freely without cmpxchg
		 */
		object = page->freelist;
		freelist = page->freelist;
		page->freelist = NULL;

		stat(s, ALLOC_SLAB);
		c->node = page_to_nid(page);
		c->page = page;
		*pc = c;
	} else
		object = NULL;
		freelist = NULL;

	return object;
	return freelist;
}

/*
@@ -2173,6 +2175,7 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
	do {
		freelist = page->freelist;
		counters = page->counters;

		new.counters = counters;
		VM_BUG_ON(!new.frozen);

@@ -2206,7 +2209,8 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
			  unsigned long addr, struct kmem_cache_cpu *c)
{
	void **object;
	void *freelist;
	struct page *page;
	unsigned long flags;

	local_irq_save(flags);
@@ -2219,25 +2223,29 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
	c = this_cpu_ptr(s->cpu_slab);
#endif

	if (!c->page)
	page = c->page;
	if (!page)
		goto new_slab;
redo:
	if (unlikely(!node_match(c, node))) {

	if (unlikely(!node_match(page, node))) {
		stat(s, ALLOC_NODE_MISMATCH);
		deactivate_slab(s, c);
		deactivate_slab(s, page, c->freelist);
		c->page = NULL;
		c->freelist = NULL;
		goto new_slab;
	}

	/* must check again c->freelist in case of cpu migration or IRQ */
	object = c->freelist;
	if (object)
	freelist = c->freelist;
	if (freelist)
		goto load_freelist;

	stat(s, ALLOC_SLOWPATH);

	object = get_freelist(s, c->page);
	freelist = get_freelist(s, page);

	if (!object) {
	if (!freelist) {
		c->page = NULL;
		stat(s, DEACTIVATE_BYPASS);
		goto new_slab;
@@ -2246,50 +2254,50 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
	stat(s, ALLOC_REFILL);

load_freelist:
	c->freelist = get_freepointer(s, object);
	/*
	 * freelist is pointing to the list of objects to be used.
	 * page is pointing to the page from which the objects are obtained.
	 * That page must be frozen for per cpu allocations to work.
	 */
	VM_BUG_ON(!c->page->frozen);
	c->freelist = get_freepointer(s, freelist);
	c->tid = next_tid(c->tid);
	local_irq_restore(flags);
	return object;
	return freelist;

new_slab:

	if (c->partial) {
		c->page = c->partial;
		c->partial = c->page->next;
		c->node = page_to_nid(c->page);
		page = c->page = c->partial;
		c->partial = page->next;
		stat(s, CPU_PARTIAL_ALLOC);
		c->freelist = NULL;
		goto redo;
	}

	/* Then do expensive stuff like retrieving pages from the partial lists */
	object = get_partial(s, gfpflags, node, c);

	if (unlikely(!object)) {

		object = new_slab_objects(s, gfpflags, node, &c);
	freelist = new_slab_objects(s, gfpflags, node, &c);

		if (unlikely(!object)) {
	if (unlikely(!freelist)) {
		if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
			slab_out_of_memory(s, gfpflags, node);

		local_irq_restore(flags);
		return NULL;
	}
	}

	page = c->page;
	if (likely(!kmem_cache_debug(s)))
		goto load_freelist;

	/* Only entered in the debug case */
	if (!alloc_debug_processing(s, c->page, object, addr))
	if (!alloc_debug_processing(s, page, freelist, addr))
		goto new_slab;	/* Slab failed checks. Next slab needed */

	c->freelist = get_freepointer(s, object);
	deactivate_slab(s, c);
	c->node = NUMA_NO_NODE;
	deactivate_slab(s, page, get_freepointer(s, freelist));
	c->page = NULL;
	c->freelist = NULL;
	local_irq_restore(flags);
	return object;
	return freelist;
}

/*
@@ -2307,6 +2315,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
{
	void **object;
	struct kmem_cache_cpu *c;
	struct page *page;
	unsigned long tid;

	if (slab_pre_alloc_hook(s, gfpflags))
@@ -2332,7 +2341,8 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
	barrier();

	object = c->freelist;
	if (unlikely(!object || !node_match(c, node)))
	page = c->page;
	if (unlikely(!object || !node_match(page, node)))

		object = __slab_alloc(s, gfpflags, node, addr, c);

@@ -4500,13 +4510,14 @@ static ssize_t show_slab_objects(struct kmem_cache *s,

		for_each_possible_cpu(cpu) {
			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
			int node = ACCESS_ONCE(c->node);
			int node;
			struct page *page;

			if (node < 0)
				continue;
			page = ACCESS_ONCE(c->page);
			if (page) {
			if (!page)
				continue;

			node = page_to_nid(page);
			if (flags & SO_TOTAL)
				x = page->objects;
			else if (flags & SO_OBJECTS)
@@ -4516,14 +4527,14 @@ static ssize_t show_slab_objects(struct kmem_cache *s,

			total += x;
			nodes[node] += x;
			}
			page = c->partial;

			page = ACCESS_ONCE(c->partial);
			if (page) {
				x = page->pobjects;
				total += x;
				nodes[node] += x;
			}

			per_cpu[node]++;
		}
	}