Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 80a9201a authored by Alexander Potapenko's avatar Alexander Potapenko Committed by Linus Torvalds
Browse files

mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB

For KASAN builds:
 - switch SLUB allocator to using stackdepot instead of storing the
   allocation/deallocation stacks in the objects;
 - change the freelist hook so that parts of the freelist can be put
   into the quarantine.

[aryabinin@virtuozzo.com: fixes]
  Link: http://lkml.kernel.org/r/1468601423-28676-1-git-send-email-aryabinin@virtuozzo.com
Link: http://lkml.kernel.org/r/1468347165-41906-3-git-send-email-glider@google.com


Signed-off-by: default avatarAlexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <adech.fo@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Steven Rostedt (Red Hat) <rostedt@goodmis.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kostya Serebryany <kcc@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Kuthonuzo Luruo <kuthonuzo.luruo@hpe.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c146a2b9
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -77,6 +77,7 @@ void kasan_free_shadow(const struct vm_struct *vm);

size_t ksize(const void *);
static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
size_t kasan_metadata_size(struct kmem_cache *cache);

#else /* CONFIG_KASAN */

@@ -121,6 +122,7 @@ static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
static inline void kasan_free_shadow(const struct vm_struct *vm) {}

static inline void kasan_unpoison_slab(const void *ptr) { }
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }

#endif /* CONFIG_KASAN */

+2 −1
Original line number Diff line number Diff line
@@ -88,7 +88,8 @@ struct kmem_cache {
};

static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
				void *x) {
				void *x)
{
	void *object = x - (x - page->s_mem) % cache->size;
	void *last_object = page->s_mem + (cache->num - 1) * cache->size;

+4 −0
Original line number Diff line number Diff line
@@ -104,6 +104,10 @@ struct kmem_cache {
	unsigned int *random_seq;
#endif

#ifdef CONFIG_KASAN
	struct kasan_cache kasan_info;
#endif

	struct kmem_cache_node *node[MAX_NUMNODES];
};

+2 −2
Original line number Diff line number Diff line
@@ -5,9 +5,9 @@ if HAVE_ARCH_KASAN

config KASAN
	bool "KASan: runtime memory debugger"
	depends on SLUB_DEBUG || (SLAB && !DEBUG_SLAB)
	depends on SLUB || (SLAB && !DEBUG_SLAB)
	select CONSTRUCTORS
	select STACKDEPOT if SLAB
	select STACKDEPOT
	help
	  Enables kernel address sanitizer - runtime memory debugger,
	  designed to find out-of-bounds accesses and use-after-free bugs.
+1 −2
Original line number Diff line number Diff line
@@ -7,5 +7,4 @@ CFLAGS_REMOVE_kasan.o = -pg
# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)

obj-y := kasan.o report.o kasan_init.o
obj-$(CONFIG_SLAB) += quarantine.o
obj-y := kasan.o report.o kasan_init.o quarantine.o
Loading