Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b49af68f authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds
Browse files

Add virt_to_head_page and consolidate code in slab and slub

parent 6d777953
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -286,6 +286,12 @@ static inline void get_page(struct page *page)
	atomic_inc(&page->_count);
}

static inline struct page *virt_to_head_page(const void *x)
{
	struct page *page = virt_to_page(x);
	return compound_head(page);
}

/*
 * Setup the page count before being freed into the page allocator for
 * the first time (boot or memory hotplug)
+4 −5
Original line number Diff line number Diff line
@@ -614,20 +614,19 @@ static inline void page_set_slab(struct page *page, struct slab *slab)

static inline struct slab *page_get_slab(struct page *page)
{
	page = compound_head(page);
	BUG_ON(!PageSlab(page));
	return (struct slab *)page->lru.prev;
}

static inline struct kmem_cache *virt_to_cache(const void *obj)
{
	struct page *page = virt_to_page(obj);
	struct page *page = virt_to_head_page(obj);
	return page_get_cache(page);
}

static inline struct slab *virt_to_slab(const void *obj)
{
	struct page *page = virt_to_page(obj);
	struct page *page = virt_to_head_page(obj);
	return page_get_slab(page);
}

@@ -2876,7 +2875,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,

	objp -= obj_offset(cachep);
	kfree_debugcheck(objp);
	page = virt_to_page(objp);
	page = virt_to_head_page(objp);

	slabp = page_get_slab(page);

@@ -3100,7 +3099,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
		struct slab *slabp;
		unsigned objnr;

		slabp = page_get_slab(virt_to_page(objp));
		slabp = page_get_slab(virt_to_head_page(objp));
		objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
		slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
	}
+4 −6
Original line number Diff line number Diff line
@@ -1323,9 +1323,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
{
	struct page * page;

	page = virt_to_page(x);

	page = compound_head(page);
	page = virt_to_head_page(x);

	if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER)))
		set_tracking(s, x, TRACK_FREE);
@@ -1336,7 +1334,7 @@ EXPORT_SYMBOL(kmem_cache_free);
/* Figure out on which slab object the object resides */
static struct page *get_object_page(const void *x)
{
	struct page *page = compound_head(virt_to_page(x));
	struct page *page = virt_to_head_page(x);

	if (!PageSlab(page))
		return NULL;
@@ -2076,7 +2074,7 @@ void kfree(const void *x)
	if (!x)
		return;

	page = compound_head(virt_to_page(x));
	page = virt_to_head_page(x);

	s = page->slab;

@@ -2112,7 +2110,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
		return NULL;
	}

	page = compound_head(virt_to_page(p));
	page = virt_to_head_page(p);

	new_cache = get_slab(new_size, flags);