Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 505f5dcb authored by Alexander Potapenko's avatar Alexander Potapenko Committed by Linus Torvalds
Browse files

mm, kasan: add GFP flags to KASAN API



Add GFP flags to KASAN hooks for future patches to use.

This patch is based on the "mm: kasan: unified support for SLUB and SLAB
allocators" patch originally prepared by Dmitry Chernenkov.

Signed-off-by: default avatarAlexander Potapenko <glider@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrey Konovalov <adech.fo@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Konstantin Serebryany <kcc@google.com>
Cc: Dmitry Chernenkov <dmitryc@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7ed2f9e6
Loading
Loading
Loading
Loading
+11 −8
Original line number Original line Diff line number Diff line
@@ -55,13 +55,14 @@ void kasan_poison_slab(struct page *page);
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
void kasan_poison_object_data(struct kmem_cache *cache, void *object);
void kasan_poison_object_data(struct kmem_cache *cache, void *object);


void kasan_kmalloc_large(const void *ptr, size_t size);
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
void kasan_kfree_large(const void *ptr);
void kasan_kfree_large(const void *ptr);
void kasan_kfree(void *ptr);
void kasan_kfree(void *ptr);
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
void kasan_krealloc(const void *object, size_t new_size);
		  gfp_t flags);
void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);


void kasan_slab_alloc(struct kmem_cache *s, void *object);
void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
void kasan_slab_free(struct kmem_cache *s, void *object);
void kasan_slab_free(struct kmem_cache *s, void *object);


struct kasan_cache {
struct kasan_cache {
@@ -94,14 +95,16 @@ static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
static inline void kasan_poison_object_data(struct kmem_cache *cache,
static inline void kasan_poison_object_data(struct kmem_cache *cache,
					void *object) {}
					void *object) {}


static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
static inline void kasan_kfree_large(const void *ptr) {}
static inline void kasan_kfree_large(const void *ptr) {}
static inline void kasan_kfree(void *ptr) {}
static inline void kasan_kfree(void *ptr) {}
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
				size_t size) {}
				size_t size, gfp_t flags) {}
static inline void kasan_krealloc(const void *object, size_t new_size) {}
static inline void kasan_krealloc(const void *object, size_t new_size,
				 gfp_t flags) {}


static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
static inline void kasan_slab_alloc(struct kmem_cache *s, void *object,
				   gfp_t flags) {}
static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}


static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
+2 −2
Original line number Original line Diff line number Diff line
@@ -376,7 +376,7 @@ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
{
{
	void *ret = kmem_cache_alloc(s, flags);
	void *ret = kmem_cache_alloc(s, flags);


	kasan_kmalloc(s, ret, size);
	kasan_kmalloc(s, ret, size, flags);
	return ret;
	return ret;
}
}


@@ -387,7 +387,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
{
{
	void *ret = kmem_cache_alloc_node(s, gfpflags, node);
	void *ret = kmem_cache_alloc_node(s, gfpflags, node);


	kasan_kmalloc(s, ret, size);
	kasan_kmalloc(s, ret, size, gfpflags);
	return ret;
	return ret;
}
}
#endif /* CONFIG_TRACING */
#endif /* CONFIG_TRACING */
+8 −7
Original line number Original line Diff line number Diff line
@@ -434,9 +434,9 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
}
}
#endif
#endif


void kasan_slab_alloc(struct kmem_cache *cache, void *object)
void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
{
{
	kasan_kmalloc(cache, object, cache->object_size);
	kasan_kmalloc(cache, object, cache->object_size, flags);
}
}


void kasan_slab_free(struct kmem_cache *cache, void *object)
void kasan_slab_free(struct kmem_cache *cache, void *object)
@@ -462,7 +462,8 @@ void kasan_slab_free(struct kmem_cache *cache, void *object)
	kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
	kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
}
}


void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
		   gfp_t flags)
{
{
	unsigned long redzone_start;
	unsigned long redzone_start;
	unsigned long redzone_end;
	unsigned long redzone_end;
@@ -491,7 +492,7 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
}
}
EXPORT_SYMBOL(kasan_kmalloc);
EXPORT_SYMBOL(kasan_kmalloc);


void kasan_kmalloc_large(const void *ptr, size_t size)
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
{
{
	struct page *page;
	struct page *page;
	unsigned long redzone_start;
	unsigned long redzone_start;
@@ -510,7 +511,7 @@ void kasan_kmalloc_large(const void *ptr, size_t size)
		KASAN_PAGE_REDZONE);
		KASAN_PAGE_REDZONE);
}
}


void kasan_krealloc(const void *object, size_t size)
void kasan_krealloc(const void *object, size_t size, gfp_t flags)
{
{
	struct page *page;
	struct page *page;


@@ -520,9 +521,9 @@ void kasan_krealloc(const void *object, size_t size)
	page = virt_to_head_page(object);
	page = virt_to_head_page(object);


	if (unlikely(!PageSlab(page)))
	if (unlikely(!PageSlab(page)))
		kasan_kmalloc_large(object, size);
		kasan_kmalloc_large(object, size, flags);
	else
	else
		kasan_kmalloc(page->slab_cache, object, size);
		kasan_kmalloc(page->slab_cache, object, size, flags);
}
}


void kasan_kfree(void *ptr)
void kasan_kfree(void *ptr)
+8 −8
Original line number Original line Diff line number Diff line
@@ -112,12 +112,12 @@ static void kasan_poison_element(mempool_t *pool, void *element)
		kasan_free_pages(element, (unsigned long)pool->pool_data);
		kasan_free_pages(element, (unsigned long)pool->pool_data);
}
}


static void kasan_unpoison_element(mempool_t *pool, void *element)
static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
{
{
	if (pool->alloc == mempool_alloc_slab)
	if (pool->alloc == mempool_alloc_slab)
		kasan_slab_alloc(pool->pool_data, element);
		kasan_slab_alloc(pool->pool_data, element, flags);
	if (pool->alloc == mempool_kmalloc)
	if (pool->alloc == mempool_kmalloc)
		kasan_krealloc(element, (size_t)pool->pool_data);
		kasan_krealloc(element, (size_t)pool->pool_data, flags);
	if (pool->alloc == mempool_alloc_pages)
	if (pool->alloc == mempool_alloc_pages)
		kasan_alloc_pages(element, (unsigned long)pool->pool_data);
		kasan_alloc_pages(element, (unsigned long)pool->pool_data);
}
}
@@ -130,12 +130,12 @@ static void add_element(mempool_t *pool, void *element)
	pool->elements[pool->curr_nr++] = element;
	pool->elements[pool->curr_nr++] = element;
}
}


static void *remove_element(mempool_t *pool)
static void *remove_element(mempool_t *pool, gfp_t flags)
{
{
	void *element = pool->elements[--pool->curr_nr];
	void *element = pool->elements[--pool->curr_nr];


	BUG_ON(pool->curr_nr < 0);
	BUG_ON(pool->curr_nr < 0);
	kasan_unpoison_element(pool, element);
	kasan_unpoison_element(pool, element, flags);
	check_element(pool, element);
	check_element(pool, element);
	return element;
	return element;
}
}
@@ -154,7 +154,7 @@ void mempool_destroy(mempool_t *pool)
		return;
		return;


	while (pool->curr_nr) {
	while (pool->curr_nr) {
		void *element = remove_element(pool);
		void *element = remove_element(pool, GFP_KERNEL);
		pool->free(element, pool->pool_data);
		pool->free(element, pool->pool_data);
	}
	}
	kfree(pool->elements);
	kfree(pool->elements);
@@ -250,7 +250,7 @@ int mempool_resize(mempool_t *pool, int new_min_nr)
	spin_lock_irqsave(&pool->lock, flags);
	spin_lock_irqsave(&pool->lock, flags);
	if (new_min_nr <= pool->min_nr) {
	if (new_min_nr <= pool->min_nr) {
		while (new_min_nr < pool->curr_nr) {
		while (new_min_nr < pool->curr_nr) {
			element = remove_element(pool);
			element = remove_element(pool, GFP_KERNEL);
			spin_unlock_irqrestore(&pool->lock, flags);
			spin_unlock_irqrestore(&pool->lock, flags);
			pool->free(element, pool->pool_data);
			pool->free(element, pool->pool_data);
			spin_lock_irqsave(&pool->lock, flags);
			spin_lock_irqsave(&pool->lock, flags);
@@ -347,7 +347,7 @@ void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)


	spin_lock_irqsave(&pool->lock, flags);
	spin_lock_irqsave(&pool->lock, flags);
	if (likely(pool->curr_nr)) {
	if (likely(pool->curr_nr)) {
		element = remove_element(pool);
		element = remove_element(pool, gfp_temp);
		spin_unlock_irqrestore(&pool->lock, flags);
		spin_unlock_irqrestore(&pool->lock, flags);
		/* paired with rmb in mempool_free(), read comment there */
		/* paired with rmb in mempool_free(), read comment there */
		smp_wmb();
		smp_wmb();
+8 −7
Original line number Original line Diff line number Diff line
@@ -3378,7 +3378,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
{
	void *ret = slab_alloc(cachep, flags, _RET_IP_);
	void *ret = slab_alloc(cachep, flags, _RET_IP_);


	kasan_slab_alloc(cachep, ret);
	kasan_slab_alloc(cachep, ret, flags);
	trace_kmem_cache_alloc(_RET_IP_, ret,
	trace_kmem_cache_alloc(_RET_IP_, ret,
			       cachep->object_size, cachep->size, flags);
			       cachep->object_size, cachep->size, flags);


@@ -3444,7 +3444,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)


	ret = slab_alloc(cachep, flags, _RET_IP_);
	ret = slab_alloc(cachep, flags, _RET_IP_);


	kasan_kmalloc(cachep, ret, size);
	kasan_kmalloc(cachep, ret, size, flags);
	trace_kmalloc(_RET_IP_, ret,
	trace_kmalloc(_RET_IP_, ret,
		      size, cachep->size, flags);
		      size, cachep->size, flags);
	return ret;
	return ret;
@@ -3468,7 +3468,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
{
	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);


	kasan_slab_alloc(cachep, ret);
	kasan_slab_alloc(cachep, ret, flags);
	trace_kmem_cache_alloc_node(_RET_IP_, ret,
	trace_kmem_cache_alloc_node(_RET_IP_, ret,
				    cachep->object_size, cachep->size,
				    cachep->object_size, cachep->size,
				    flags, nodeid);
				    flags, nodeid);
@@ -3486,7 +3486,8 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
	void *ret;
	void *ret;


	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
	kasan_kmalloc(cachep, ret, size);

	kasan_kmalloc(cachep, ret, size, flags);
	trace_kmalloc_node(_RET_IP_, ret,
	trace_kmalloc_node(_RET_IP_, ret,
			   size, cachep->size,
			   size, cachep->size,
			   flags, nodeid);
			   flags, nodeid);
@@ -3505,7 +3506,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
		return cachep;
	ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
	ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
	kasan_kmalloc(cachep, ret, size);
	kasan_kmalloc(cachep, ret, size, flags);


	return ret;
	return ret;
}
}
@@ -3541,7 +3542,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
		return cachep;
		return cachep;
	ret = slab_alloc(cachep, flags, caller);
	ret = slab_alloc(cachep, flags, caller);


	kasan_kmalloc(cachep, ret, size);
	kasan_kmalloc(cachep, ret, size, flags);
	trace_kmalloc(caller, ret,
	trace_kmalloc(caller, ret,
		      size, cachep->size, flags);
		      size, cachep->size, flags);


@@ -4323,7 +4324,7 @@ size_t ksize(const void *objp)
	/* We assume that ksize callers could use the whole allocated area,
	/* We assume that ksize callers could use the whole allocated area,
	 * so we need to unpoison this area.
	 * so we need to unpoison this area.
	 */
	 */
	kasan_krealloc(objp, size);
	kasan_krealloc(objp, size, GFP_NOWAIT);


	return size;
	return size;
}
}
Loading