Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7c0cb9c6 authored by Ezequiel Garcia's avatar Ezequiel Garcia Committed by Pekka Enberg
Browse files

mm, slab: Replace 'caller' type, void* -> unsigned long



This allows to use _RET_IP_ instead of builtin_address(0), thus
achiveing implementation consistency in all three allocators.
Though maybe a nitpick, the real goal behind this patch is
to be able to obtain common code between SLAB and SLUB.

Signed-off-by: default avatarEzequiel Garcia <elezegarcia@gmail.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent f3f74101
Loading
Loading
Loading
Loading
+24 −26
Original line number Diff line number Diff line
@@ -3084,7 +3084,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
}

static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
				   void *caller)
				   unsigned long caller)
{
	struct page *page;
	unsigned int objnr;
@@ -3104,7 +3104,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
	}
	if (cachep->flags & SLAB_STORE_USER)
		*dbg_userword(cachep, objp) = caller;
		*dbg_userword(cachep, objp) = (void *)caller;

	objnr = obj_to_index(cachep, slabp, objp);

@@ -3117,7 +3117,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
	if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
		if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
			store_stackinfo(cachep, objp, (unsigned long)caller);
			store_stackinfo(cachep, objp, caller);
			kernel_map_pages(virt_to_page(objp),
					 cachep->size / PAGE_SIZE, 0);
		} else {
@@ -3270,7 +3270,7 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,

#if DEBUG
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
				gfp_t flags, void *objp, void *caller)
				gfp_t flags, void *objp, unsigned long caller)
{
	if (!objp)
		return objp;
@@ -3287,7 +3287,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
		poison_obj(cachep, objp, POISON_INUSE);
	}
	if (cachep->flags & SLAB_STORE_USER)
		*dbg_userword(cachep, objp) = caller;
		*dbg_userword(cachep, objp) = (void *)caller;

	if (cachep->flags & SLAB_RED_ZONE) {
		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
@@ -3562,7 +3562,7 @@ done:
 */
static __always_inline void *
__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
		   void *caller)
		   unsigned long caller)
{
	unsigned long save_flags;
	void *ptr;
@@ -3648,7 +3648,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
#endif /* CONFIG_NUMA */

static __always_inline void *
__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
__cache_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
{
	unsigned long save_flags;
	void *objp;
@@ -3784,7 +3784,7 @@ free_done:
 * be in this state _before_ it is released.  Called with disabled ints.
 */
static inline void __cache_free(struct kmem_cache *cachep, void *objp,
    void *caller)
				unsigned long caller)
{
	struct array_cache *ac = cpu_cache_get(cachep);

@@ -3824,7 +3824,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
 */
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
	void *ret = __cache_alloc(cachep, flags, _RET_IP_);

	trace_kmem_cache_alloc(_RET_IP_, ret,
			       cachep->object_size, cachep->size, flags);
@@ -3839,7 +3839,7 @@ kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
{
	void *ret;

	ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
	ret = __cache_alloc(cachep, flags, _RET_IP_);

	trace_kmalloc(_RET_IP_, ret,
		      size, cachep->size, flags);
@@ -3851,8 +3851,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
#ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
	void *ret = __cache_alloc_node(cachep, flags, nodeid,
				       __builtin_return_address(0));
	void *ret = __cache_alloc_node(cachep, flags, nodeid, _RET_IP_);

	trace_kmem_cache_alloc_node(_RET_IP_, ret,
				    cachep->object_size, cachep->size,
@@ -3870,8 +3869,8 @@ void *kmem_cache_alloc_node_trace(size_t size,
{
	void *ret;

	ret = __cache_alloc_node(cachep, flags, nodeid,
				  __builtin_return_address(0));
	ret = __cache_alloc_node(cachep, flags, nodeid, _RET_IP);

	trace_kmalloc_node(_RET_IP_, ret,
			   size, cachep->size,
			   flags, nodeid);
@@ -3881,7 +3880,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif

static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
{
	struct kmem_cache *cachep;

@@ -3894,21 +3893,20 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
	return __do_kmalloc_node(size, flags, node,
			__builtin_return_address(0));
	return __do_kmalloc_node(size, flags, node, _RET_IP_);
}
EXPORT_SYMBOL(__kmalloc_node);

void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
		int node, unsigned long caller)
{
	return __do_kmalloc_node(size, flags, node, (void *)caller);
	return __do_kmalloc_node(size, flags, node, caller);
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
#else
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
	return __do_kmalloc_node(size, flags, node, NULL);
	return __do_kmalloc_node(size, flags, node, 0);
}
EXPORT_SYMBOL(__kmalloc_node);
#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
@@ -3921,7 +3919,7 @@ EXPORT_SYMBOL(__kmalloc_node);
 * @caller: function caller for debug tracking of the caller
 */
static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
					  void *caller)
					  unsigned long caller)
{
	struct kmem_cache *cachep;
	void *ret;
@@ -3936,7 +3934,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
		return cachep;
	ret = __cache_alloc(cachep, flags, caller);

	trace_kmalloc((unsigned long) caller, ret,
	trace_kmalloc(caller, ret,
		      size, cachep->size, flags);

	return ret;
@@ -3946,20 +3944,20 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
void *__kmalloc(size_t size, gfp_t flags)
{
	return __do_kmalloc(size, flags, __builtin_return_address(0));
	return __do_kmalloc(size, flags, _RET_IP_);
}
EXPORT_SYMBOL(__kmalloc);

void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
{
	return __do_kmalloc(size, flags, (void *)caller);
	return __do_kmalloc(size, flags, caller);
}
EXPORT_SYMBOL(__kmalloc_track_caller);

#else
void *__kmalloc(size_t size, gfp_t flags)
{
	return __do_kmalloc(size, flags, NULL);
	return __do_kmalloc(size, flags, 0);
}
EXPORT_SYMBOL(__kmalloc);
#endif
@@ -3980,7 +3978,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
	debug_check_no_locks_freed(objp, cachep->object_size);
	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
		debug_check_no_obj_freed(objp, cachep->object_size);
	__cache_free(cachep, objp, __builtin_return_address(0));
	__cache_free(cachep, objp, _RET_IP_);
	local_irq_restore(flags);

	trace_kmem_cache_free(_RET_IP_, objp);
@@ -4011,7 +4009,7 @@ void kfree(const void *objp)
	debug_check_no_locks_freed(objp, c->object_size);

	debug_check_no_obj_freed(objp, c->object_size);
	__cache_free(c, (void *)objp, __builtin_return_address(0));
	__cache_free(c, (void *)objp, _RET_IP_);
	local_irq_restore(flags);
}
EXPORT_SYMBOL(kfree);