Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b1eeab67 authored by Vegard Nossum's avatar Vegard Nossum
Browse files

kmemcheck: add hooks for the page allocator



This adds support for tracking the initializedness of memory that
was allocated with the page allocator. Highmem requests are not
tracked.

Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Acked-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>

[build fix for !CONFIG_KMEMCHECK]
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>

[rebased for mainline inclusion]
Signed-off-by: default avatarVegard Nossum <vegard.nossum@gmail.com>
parent 9b5cab31
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -154,9 +154,9 @@ struct thread_info {

/* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE
#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO)
#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
#else
#define THREAD_FLAGS GFP_KERNEL
#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK)
#endif

#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
+8 −0
Original line number Diff line number Diff line
@@ -116,6 +116,14 @@ void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n)
		kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE);
}

void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n)
{
	unsigned int i;

	for (i = 0; i < n; ++i)
		kmemcheck_mark_initialized(page_address(&p[i]), PAGE_SIZE);
}

enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size)
{
	uint8_t *x;
+5 −0
Original line number Diff line number Diff line
@@ -51,7 +51,12 @@ struct vm_area_struct;
#define __GFP_THISNODE	((__force gfp_t)0x40000u)/* No fallback, no policies */
#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
#define __GFP_MOVABLE	((__force gfp_t)0x100000u)  /* Page is movable */

#ifdef CONFIG_KMEMCHECK
#define __GFP_NOTRACK	((__force gfp_t)0x200000u)  /* Don't track with kmemcheck */
#else
#define __GFP_NOTRACK	((__force gfp_t)0)
#endif

/*
 * This may seem redundant, but it's a way of annotating false positives vs.
+29 −6
Original line number Diff line number Diff line
@@ -8,13 +8,15 @@
extern int kmemcheck_enabled;

/* The slab-related functions. */
void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
			    struct page *page, int order);
void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order);
void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
void kmemcheck_free_shadow(struct page *page, int order);
void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
			  size_t size);
void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);

void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
			       gfp_t gfpflags);

void kmemcheck_show_pages(struct page *p, unsigned int n);
void kmemcheck_hide_pages(struct page *p, unsigned int n);

@@ -27,6 +29,7 @@ void kmemcheck_mark_freed(void *address, unsigned int n);

void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);

int kmemcheck_show_addr(unsigned long address);
int kmemcheck_hide_addr(unsigned long address);
@@ -34,13 +37,12 @@ int kmemcheck_hide_addr(unsigned long address);
#define kmemcheck_enabled 0

static inline void
kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
		       struct page *page, int order)
kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
{
}

static inline void
kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order)
kmemcheck_free_shadow(struct page *page, int order)
{
}

@@ -55,6 +57,11 @@ static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
{
}

static inline void kmemcheck_pagealloc_alloc(struct page *p,
	unsigned int order, gfp_t gfpflags)
{
}

static inline bool kmemcheck_page_is_tracked(struct page *p)
{
	return false;
@@ -75,6 +82,22 @@ static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
static inline void kmemcheck_mark_freed(void *address, unsigned int n)
{
}

static inline void kmemcheck_mark_unallocated_pages(struct page *p,
						    unsigned int n)
{
}

static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
						      unsigned int n)
{
}

static inline void kmemcheck_mark_initialized_pages(struct page *p,
						    unsigned int n)
{
}

#endif /* CONFIG_KMEMCHECK */

#endif /* LINUX_KMEMCHECK_H */
+32 −13
Original line number Diff line number Diff line
#include <linux/gfp.h>
#include <linux/mm_types.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/kmemcheck.h>

void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
			   struct page *page, int order)
void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
{
	struct page *shadow;
	int pages;
@@ -16,7 +16,7 @@ void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
	 * With kmemcheck enabled, we need to allocate a memory area for the
	 * shadow bits as well.
	 */
	shadow = alloc_pages_node(node, flags, order);
	shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
	if (!shadow) {
		if (printk_ratelimit())
			printk(KERN_ERR "kmemcheck: failed to allocate "
@@ -33,23 +33,17 @@ void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
	 * the memory accesses.
	 */
	kmemcheck_hide_pages(page, pages);

	/*
	 * Objects from caches that have a constructor don't get
	 * cleared when they're allocated, so we need to do it here.
	 */
	if (s->ctor)
		kmemcheck_mark_uninitialized_pages(page, pages);
	else
		kmemcheck_mark_unallocated_pages(page, pages);
}

void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order)
void kmemcheck_free_shadow(struct page *page, int order)
{
	struct page *shadow;
	int pages;
	int i;

	if (!kmemcheck_page_is_tracked(page))
		return;

	pages = 1 << order;

	kmemcheck_show_pages(page, pages);
@@ -101,3 +95,28 @@ void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
	if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
		kmemcheck_mark_freed(object, size);
}

void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
			       gfp_t gfpflags)
{
	int pages;

	if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
		return;

	pages = 1 << order;

	/*
	 * NOTE: We choose to track GFP_ZERO pages too; in fact, they
	 * can become uninitialized by copying uninitialized memory
	 * into them.
	 */

	/* XXX: Can use zone->node for node? */
	kmemcheck_alloc_shadow(page, order, gfpflags, -1);

	if (gfpflags & __GFP_ZERO)
		kmemcheck_mark_initialized_pages(page, pages);
	else
		kmemcheck_mark_uninitialized_pages(page, pages);
}
Loading