Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 57d437d2 authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg
Browse files

slub: pass page to node_match() instead of kmem_cache_cpu structure



Avoid passing the kmem_cache_cpu pointer to node_match. This makes the
node_match function more generic and easier to understand.

Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent f6e7def7
Loading
Loading
Loading
Loading
+6 −4
Original line number Diff line number Diff line
@@ -2050,10 +2050,10 @@ static void flush_all(struct kmem_cache *s)
 * Check if the objects in a per cpu structure fit numa
 * locality expectations.
 */
static inline int node_match(struct kmem_cache_cpu *c, int node)
static inline int node_match(struct page *page, int node)
{
#ifdef CONFIG_NUMA
	if (node != NUMA_NO_NODE && page_to_nid(c->page) != node)
	if (node != NUMA_NO_NODE && page_to_nid(page) != node)
		return 0;
#endif
	return 1;
@@ -2226,7 +2226,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
		goto new_slab;
redo:

	if (unlikely(!node_match(c, node))) {
	if (unlikely(!node_match(page, node))) {
		stat(s, ALLOC_NODE_MISMATCH);
		deactivate_slab(s, page, c->freelist);
		c->page = NULL;
@@ -2313,6 +2313,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
{
	void **object;
	struct kmem_cache_cpu *c;
	struct page *page;
	unsigned long tid;

	if (slab_pre_alloc_hook(s, gfpflags))
@@ -2338,7 +2339,8 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
	barrier();

	object = c->freelist;
	if (unlikely(!object || !node_match(c, node)))
	page = c->page;
	if (unlikely(!object || !node_match(page, node)))

		object = __slab_alloc(s, gfpflags, node, addr, c);