Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dcc7cd01 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'kmemleak' of git://linux-arm.org/linux-2.6

* 'kmemleak' of git://linux-arm.org/linux-2.6:
  kmemleak: fix kconfig for crc32 build error
  kmemleak: Reduce the false positives by checking for modified objects
  kmemleak: Show the age of an unreferenced object
  kmemleak: Release the object lock before calling put_object()
  kmemleak: Scan the _ftrace_events section in modules
  kmemleak: Simplify the kmemleak_scan_area() function prototype
  kmemleak: Do not use off-slab management with SLAB_NOLEAKTRACE
parents bf931a01 b60e26a2
Loading
Loading
Loading
Loading
+2 −4
Original line number Original line Diff line number Diff line
@@ -32,8 +32,7 @@ extern void kmemleak_padding(const void *ptr, unsigned long offset,
			     size_t size) __ref;
			     size_t size) __ref;
extern void kmemleak_not_leak(const void *ptr) __ref;
extern void kmemleak_not_leak(const void *ptr) __ref;
extern void kmemleak_ignore(const void *ptr) __ref;
extern void kmemleak_ignore(const void *ptr) __ref;
extern void kmemleak_scan_area(const void *ptr, unsigned long offset,
extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
			       size_t length, gfp_t gfp) __ref;
extern void kmemleak_no_scan(const void *ptr) __ref;
extern void kmemleak_no_scan(const void *ptr) __ref;


static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
@@ -84,8 +83,7 @@ static inline void kmemleak_not_leak(const void *ptr)
static inline void kmemleak_ignore(const void *ptr)
static inline void kmemleak_ignore(const void *ptr)
{
{
}
}
static inline void kmemleak_scan_area(const void *ptr, unsigned long offset,
static inline void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
				      size_t length, gfp_t gfp)
{
{
}
}
static inline void kmemleak_erase(void **ptr)
static inline void kmemleak_erase(void **ptr)
+8 −5
Original line number Original line Diff line number Diff line
@@ -1910,9 +1910,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
	unsigned int i;
	unsigned int i;


	/* only scan the sections containing data */
	/* only scan the sections containing data */
	kmemleak_scan_area(mod->module_core, (unsigned long)mod -
	kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
			   (unsigned long)mod->module_core,
			   sizeof(struct module), GFP_KERNEL);


	for (i = 1; i < hdr->e_shnum; i++) {
	for (i = 1; i < hdr->e_shnum; i++) {
		if (!(sechdrs[i].sh_flags & SHF_ALLOC))
		if (!(sechdrs[i].sh_flags & SHF_ALLOC))
@@ -1921,8 +1919,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
		    && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
		    && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
			continue;
			continue;


		kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
		kmemleak_scan_area((void *)sechdrs[i].sh_addr,
				   (unsigned long)mod->module_core,
				   sechdrs[i].sh_size, GFP_KERNEL);
				   sechdrs[i].sh_size, GFP_KERNEL);
	}
	}
}
}
@@ -2250,6 +2247,12 @@ static noinline struct module *load_module(void __user *umod,
					 "_ftrace_events",
					 "_ftrace_events",
					 sizeof(*mod->trace_events),
					 sizeof(*mod->trace_events),
					 &mod->num_trace_events);
					 &mod->num_trace_events);
	/*
	 * This section contains pointers to allocated objects in the trace
	 * code and not scanning it leads to false positives.
	 */
	kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) *
			   mod->num_trace_events, GFP_KERNEL);
#endif
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
	/* sechdrs[0].sh_size is always zero */
	/* sechdrs[0].sh_size is always zero */
+1 −0
Original line number Original line Diff line number Diff line
@@ -360,6 +360,7 @@ config DEBUG_KMEMLEAK
	select DEBUG_FS if SYSFS
	select DEBUG_FS if SYSFS
	select STACKTRACE if STACKTRACE_SUPPORT
	select STACKTRACE if STACKTRACE_SUPPORT
	select KALLSYMS
	select KALLSYMS
	select CRC32
	help
	help
	  Say Y here if you want to enable the memory leak
	  Say Y here if you want to enable the memory leak
	  detector. The memory allocation/freeing is traced in a way
	  detector. The memory allocation/freeing is traced in a way
+101 −87
Original line number Original line Diff line number Diff line
@@ -93,6 +93,7 @@
#include <linux/nodemask.h>
#include <linux/nodemask.h>
#include <linux/mm.h>
#include <linux/mm.h>
#include <linux/workqueue.h>
#include <linux/workqueue.h>
#include <linux/crc32.h>


#include <asm/sections.h>
#include <asm/sections.h>
#include <asm/processor.h>
#include <asm/processor.h>
@@ -108,7 +109,6 @@
#define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
#define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
#define SECS_FIRST_SCAN		60	/* delay before the first scan */
#define SECS_FIRST_SCAN		60	/* delay before the first scan */
#define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
#define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
#define GRAY_LIST_PASSES	25	/* maximum number of gray list scans */
#define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */
#define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */


#define BYTES_PER_POINTER	sizeof(void *)
#define BYTES_PER_POINTER	sizeof(void *)
@@ -119,8 +119,8 @@
/* scanning area inside a memory block */
/* scanning area inside a memory block */
struct kmemleak_scan_area {
struct kmemleak_scan_area {
	struct hlist_node node;
	struct hlist_node node;
	unsigned long offset;
	unsigned long start;
	size_t length;
	size_t size;
};
};


#define KMEMLEAK_GREY	0
#define KMEMLEAK_GREY	0
@@ -149,6 +149,8 @@ struct kmemleak_object {
	int min_count;
	int min_count;
	/* the total number of pointers found pointing to this object */
	/* the total number of pointers found pointing to this object */
	int count;
	int count;
	/* checksum for detecting modified objects */
	u32 checksum;
	/* memory ranges to be scanned inside an object (empty for all) */
	/* memory ranges to be scanned inside an object (empty for all) */
	struct hlist_head area_list;
	struct hlist_head area_list;
	unsigned long trace[MAX_TRACE];
	unsigned long trace[MAX_TRACE];
@@ -164,8 +166,6 @@ struct kmemleak_object {
#define OBJECT_REPORTED		(1 << 1)
#define OBJECT_REPORTED		(1 << 1)
/* flag set to not scan the object */
/* flag set to not scan the object */
#define OBJECT_NO_SCAN		(1 << 2)
#define OBJECT_NO_SCAN		(1 << 2)
/* flag set on newly allocated objects */
#define OBJECT_NEW		(1 << 3)


/* number of bytes to print per line; must be 16 or 32 */
/* number of bytes to print per line; must be 16 or 32 */
#define HEX_ROW_SIZE		16
#define HEX_ROW_SIZE		16
@@ -241,8 +241,6 @@ struct early_log {
	const void *ptr;		/* allocated/freed memory block */
	const void *ptr;		/* allocated/freed memory block */
	size_t size;			/* memory block size */
	size_t size;			/* memory block size */
	int min_count;			/* minimum reference count */
	int min_count;			/* minimum reference count */
	unsigned long offset;		/* scan area offset */
	size_t length;			/* scan area length */
	unsigned long trace[MAX_TRACE];	/* stack trace */
	unsigned long trace[MAX_TRACE];	/* stack trace */
	unsigned int trace_len;		/* stack trace length */
	unsigned int trace_len;		/* stack trace length */
};
};
@@ -323,11 +321,6 @@ static bool color_gray(const struct kmemleak_object *object)
		object->count >= object->min_count;
		object->count >= object->min_count;
}
}


static bool color_black(const struct kmemleak_object *object)
{
	return object->min_count == KMEMLEAK_BLACK;
}

/*
/*
 * Objects are considered unreferenced only if their color is white, they have
 * Objects are considered unreferenced only if their color is white, they have
 * not be deleted and have a minimum age to avoid false positives caused by
 * not be deleted and have a minimum age to avoid false positives caused by
@@ -335,7 +328,7 @@ static bool color_black(const struct kmemleak_object *object)
 */
 */
static bool unreferenced_object(struct kmemleak_object *object)
static bool unreferenced_object(struct kmemleak_object *object)
{
{
	return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
	return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
		time_before_eq(object->jiffies + jiffies_min_age,
		time_before_eq(object->jiffies + jiffies_min_age,
			       jiffies_last_scan);
			       jiffies_last_scan);
}
}
@@ -348,11 +341,13 @@ static void print_unreferenced(struct seq_file *seq,
			       struct kmemleak_object *object)
			       struct kmemleak_object *object)
{
{
	int i;
	int i;
	unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);


	seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
	seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
		   object->pointer, object->size);
		   object->pointer, object->size);
	seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu\n",
	seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
		   object->comm, object->pid, object->jiffies);
		   object->comm, object->pid, object->jiffies,
		   msecs_age / 1000, msecs_age % 1000);
	hex_dump_object(seq, object);
	hex_dump_object(seq, object);
	seq_printf(seq, "  backtrace:\n");
	seq_printf(seq, "  backtrace:\n");


@@ -381,6 +376,7 @@ static void dump_object_info(struct kmemleak_object *object)
	pr_notice("  min_count = %d\n", object->min_count);
	pr_notice("  min_count = %d\n", object->min_count);
	pr_notice("  count = %d\n", object->count);
	pr_notice("  count = %d\n", object->count);
	pr_notice("  flags = 0x%lx\n", object->flags);
	pr_notice("  flags = 0x%lx\n", object->flags);
	pr_notice("  checksum = %d\n", object->checksum);
	pr_notice("  backtrace:\n");
	pr_notice("  backtrace:\n");
	print_stack_trace(&trace, 4);
	print_stack_trace(&trace, 4);
}
}
@@ -522,12 +518,13 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
	INIT_HLIST_HEAD(&object->area_list);
	INIT_HLIST_HEAD(&object->area_list);
	spin_lock_init(&object->lock);
	spin_lock_init(&object->lock);
	atomic_set(&object->use_count, 1);
	atomic_set(&object->use_count, 1);
	object->flags = OBJECT_ALLOCATED | OBJECT_NEW;
	object->flags = OBJECT_ALLOCATED;
	object->pointer = ptr;
	object->pointer = ptr;
	object->size = size;
	object->size = size;
	object->min_count = min_count;
	object->min_count = min_count;
	object->count = -1;			/* no color initially */
	object->count = 0;			/* white color initially */
	object->jiffies = jiffies;
	object->jiffies = jiffies;
	object->checksum = 0;


	/* task information */
	/* task information */
	if (in_irq()) {
	if (in_irq()) {
@@ -720,14 +717,13 @@ static void make_black_object(unsigned long ptr)
 * Add a scanning area to the object. If at least one such area is added,
 * Add a scanning area to the object. If at least one such area is added,
 * kmemleak will only scan these ranges rather than the whole memory block.
 * kmemleak will only scan these ranges rather than the whole memory block.
 */
 */
static void add_scan_area(unsigned long ptr, unsigned long offset,
static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
			  size_t length, gfp_t gfp)
{
{
	unsigned long flags;
	unsigned long flags;
	struct kmemleak_object *object;
	struct kmemleak_object *object;
	struct kmemleak_scan_area *area;
	struct kmemleak_scan_area *area;


	object = find_and_get_object(ptr, 0);
	object = find_and_get_object(ptr, 1);
	if (!object) {
	if (!object) {
		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
			      ptr);
			      ptr);
@@ -741,7 +737,7 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
	}
	}


	spin_lock_irqsave(&object->lock, flags);
	spin_lock_irqsave(&object->lock, flags);
	if (offset + length > object->size) {
	if (ptr + size > object->pointer + object->size) {
		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
		dump_object_info(object);
		dump_object_info(object);
		kmem_cache_free(scan_area_cache, area);
		kmem_cache_free(scan_area_cache, area);
@@ -749,8 +745,8 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
	}
	}


	INIT_HLIST_NODE(&area->node);
	INIT_HLIST_NODE(&area->node);
	area->offset = offset;
	area->start = ptr;
	area->length = length;
	area->size = size;


	hlist_add_head(&area->node, &object->area_list);
	hlist_add_head(&area->node, &object->area_list);
out_unlock:
out_unlock:
@@ -786,7 +782,7 @@ static void object_no_scan(unsigned long ptr)
 * processed later once kmemleak is fully initialized.
 * processed later once kmemleak is fully initialized.
 */
 */
static void __init log_early(int op_type, const void *ptr, size_t size,
static void __init log_early(int op_type, const void *ptr, size_t size,
			     int min_count, unsigned long offset, size_t length)
			     int min_count)
{
{
	unsigned long flags;
	unsigned long flags;
	struct early_log *log;
	struct early_log *log;
@@ -808,8 +804,6 @@ static void __init log_early(int op_type, const void *ptr, size_t size,
	log->ptr = ptr;
	log->ptr = ptr;
	log->size = size;
	log->size = size;
	log->min_count = min_count;
	log->min_count = min_count;
	log->offset = offset;
	log->length = length;
	if (op_type == KMEMLEAK_ALLOC)
	if (op_type == KMEMLEAK_ALLOC)
		log->trace_len = __save_stack_trace(log->trace);
		log->trace_len = __save_stack_trace(log->trace);
	crt_early_log++;
	crt_early_log++;
@@ -858,7 +852,7 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
		create_object((unsigned long)ptr, size, min_count, gfp);
		create_object((unsigned long)ptr, size, min_count, gfp);
	else if (atomic_read(&kmemleak_early_log))
	else if (atomic_read(&kmemleak_early_log))
		log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0);
		log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
}
}
EXPORT_SYMBOL_GPL(kmemleak_alloc);
EXPORT_SYMBOL_GPL(kmemleak_alloc);


@@ -873,7 +867,7 @@ void __ref kmemleak_free(const void *ptr)
	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
		delete_object_full((unsigned long)ptr);
		delete_object_full((unsigned long)ptr);
	else if (atomic_read(&kmemleak_early_log))
	else if (atomic_read(&kmemleak_early_log))
		log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0);
		log_early(KMEMLEAK_FREE, ptr, 0, 0);
}
}
EXPORT_SYMBOL_GPL(kmemleak_free);
EXPORT_SYMBOL_GPL(kmemleak_free);


@@ -888,7 +882,7 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
		delete_object_part((unsigned long)ptr, size);
		delete_object_part((unsigned long)ptr, size);
	else if (atomic_read(&kmemleak_early_log))
	else if (atomic_read(&kmemleak_early_log))
		log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0);
		log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
}
}
EXPORT_SYMBOL_GPL(kmemleak_free_part);
EXPORT_SYMBOL_GPL(kmemleak_free_part);


@@ -903,7 +897,7 @@ void __ref kmemleak_not_leak(const void *ptr)
	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
		make_gray_object((unsigned long)ptr);
		make_gray_object((unsigned long)ptr);
	else if (atomic_read(&kmemleak_early_log))
	else if (atomic_read(&kmemleak_early_log))
		log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0);
		log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
}
}
EXPORT_SYMBOL(kmemleak_not_leak);
EXPORT_SYMBOL(kmemleak_not_leak);


@@ -919,22 +913,21 @@ void __ref kmemleak_ignore(const void *ptr)
	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
		make_black_object((unsigned long)ptr);
		make_black_object((unsigned long)ptr);
	else if (atomic_read(&kmemleak_early_log))
	else if (atomic_read(&kmemleak_early_log))
		log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0);
		log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
}
}
EXPORT_SYMBOL(kmemleak_ignore);
EXPORT_SYMBOL(kmemleak_ignore);


/*
/*
 * Limit the range to be scanned in an allocated memory block.
 * Limit the range to be scanned in an allocated memory block.
 */
 */
void __ref kmemleak_scan_area(const void *ptr, unsigned long offset,
void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
			      size_t length, gfp_t gfp)
{
{
	pr_debug("%s(0x%p)\n", __func__, ptr);
	pr_debug("%s(0x%p)\n", __func__, ptr);


	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
		add_scan_area((unsigned long)ptr, offset, length, gfp);
		add_scan_area((unsigned long)ptr, size, gfp);
	else if (atomic_read(&kmemleak_early_log))
	else if (atomic_read(&kmemleak_early_log))
		log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length);
		log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
}
}
EXPORT_SYMBOL(kmemleak_scan_area);
EXPORT_SYMBOL(kmemleak_scan_area);


@@ -948,10 +941,24 @@ void __ref kmemleak_no_scan(const void *ptr)
	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
		object_no_scan((unsigned long)ptr);
		object_no_scan((unsigned long)ptr);
	else if (atomic_read(&kmemleak_early_log))
	else if (atomic_read(&kmemleak_early_log))
		log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0);
		log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
}
}
EXPORT_SYMBOL(kmemleak_no_scan);
EXPORT_SYMBOL(kmemleak_no_scan);


/*
 * Update an object's checksum and return true if it was modified.
 */
static bool update_checksum(struct kmemleak_object *object)
{
	u32 old_csum = object->checksum;

	if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
		return false;

	object->checksum = crc32(0, (void *)object->pointer, object->size);
	return object->checksum != old_csum;
}

/*
/*
 * Memory scanning is a long process and it needs to be interruptable. This
 * Memory scanning is a long process and it needs to be interruptable. This
 * function checks whether such interrupt condition occured.
 * function checks whether such interrupt condition occured.
@@ -1031,11 +1038,14 @@ static void scan_block(void *_start, void *_end,
		 * added to the gray_list.
		 * added to the gray_list.
		 */
		 */
		object->count++;
		object->count++;
		if (color_gray(object))
		if (color_gray(object)) {
			list_add_tail(&object->gray_list, &gray_list);
			list_add_tail(&object->gray_list, &gray_list);
		else
			put_object(object);
			spin_unlock_irqrestore(&object->lock, flags);
			spin_unlock_irqrestore(&object->lock, flags);
			continue;
		}

		spin_unlock_irqrestore(&object->lock, flags);
		put_object(object);
	}
	}
}
}


@@ -1075,13 +1085,46 @@ static void scan_object(struct kmemleak_object *object)
		}
		}
	} else
	} else
		hlist_for_each_entry(area, elem, &object->area_list, node)
		hlist_for_each_entry(area, elem, &object->area_list, node)
			scan_block((void *)(object->pointer + area->offset),
			scan_block((void *)area->start,
				   (void *)(object->pointer + area->offset
				   (void *)(area->start + area->size),
					    + area->length), object, 0);
				   object, 0);
out:
out:
	spin_unlock_irqrestore(&object->lock, flags);
	spin_unlock_irqrestore(&object->lock, flags);
}
}


/*
 * Scan the objects already referenced (gray objects). More objects will be
 * referenced and, if there are no memory leaks, all the objects are scanned.
 */
static void scan_gray_list(void)
{
	struct kmemleak_object *object, *tmp;

	/*
	 * The list traversal is safe for both tail additions and removals
	 * from inside the loop. The kmemleak objects cannot be freed from
	 * outside the loop because their use_count was incremented.
	 */
	object = list_entry(gray_list.next, typeof(*object), gray_list);
	while (&object->gray_list != &gray_list) {
		cond_resched();

		/* may add new objects to the list */
		if (!scan_should_stop())
			scan_object(object);

		tmp = list_entry(object->gray_list.next, typeof(*object),
				 gray_list);

		/* remove the object from the list and release it */
		list_del(&object->gray_list);
		put_object(object);

		object = tmp;
	}
	WARN_ON(!list_empty(&gray_list));
}

/*
/*
 * Scan data sections and all the referenced memory blocks allocated via the
 * Scan data sections and all the referenced memory blocks allocated via the
 * kernel's standard allocators. This function must be called with the
 * kernel's standard allocators. This function must be called with the
@@ -1090,10 +1133,9 @@ out:
static void kmemleak_scan(void)
static void kmemleak_scan(void)
{
{
	unsigned long flags;
	unsigned long flags;
	struct kmemleak_object *object, *tmp;
	struct kmemleak_object *object;
	int i;
	int i;
	int new_leaks = 0;
	int new_leaks = 0;
	int gray_list_pass = 0;


	jiffies_last_scan = jiffies;
	jiffies_last_scan = jiffies;


@@ -1114,7 +1156,6 @@ static void kmemleak_scan(void)
#endif
#endif
		/* reset the reference count (whiten the object) */
		/* reset the reference count (whiten the object) */
		object->count = 0;
		object->count = 0;
		object->flags &= ~OBJECT_NEW;
		if (color_gray(object) && get_object(object))
		if (color_gray(object) && get_object(object))
			list_add_tail(&object->gray_list, &gray_list);
			list_add_tail(&object->gray_list, &gray_list);


@@ -1172,62 +1213,36 @@ static void kmemleak_scan(void)


	/*
	/*
	 * Scan the objects already referenced from the sections scanned
	 * Scan the objects already referenced from the sections scanned
	 * above. More objects will be referenced and, if there are no memory
	 * above.
	 * leaks, all the objects will be scanned. The list traversal is safe
	 * for both tail additions and removals from inside the loop. The
	 * kmemleak objects cannot be freed from outside the loop because their
	 * use_count was increased.
	 */
	 */
repeat:
	scan_gray_list();
	object = list_entry(gray_list.next, typeof(*object), gray_list);
	while (&object->gray_list != &gray_list) {
		cond_resched();

		/* may add new objects to the list */
		if (!scan_should_stop())
			scan_object(object);

		tmp = list_entry(object->gray_list.next, typeof(*object),
				 gray_list);

		/* remove the object from the list and release it */
		list_del(&object->gray_list);
		put_object(object);

		object = tmp;
	}

	if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES)
		goto scan_end;


	/*
	/*
	 * Check for new objects allocated during this scanning and add them
	 * Check for new or unreferenced objects modified since the previous
	 * to the gray list.
	 * scan and color them gray until the next scan.
	 */
	 */
	rcu_read_lock();
	rcu_read_lock();
	list_for_each_entry_rcu(object, &object_list, object_list) {
	list_for_each_entry_rcu(object, &object_list, object_list) {
		spin_lock_irqsave(&object->lock, flags);
		spin_lock_irqsave(&object->lock, flags);
		if ((object->flags & OBJECT_NEW) && !color_black(object) &&
		if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
		    get_object(object)) {
		    && update_checksum(object) && get_object(object)) {
			object->flags &= ~OBJECT_NEW;
			/* color it gray temporarily */
			object->count = object->min_count;
			list_add_tail(&object->gray_list, &gray_list);
			list_add_tail(&object->gray_list, &gray_list);
		}
		}
		spin_unlock_irqrestore(&object->lock, flags);
		spin_unlock_irqrestore(&object->lock, flags);
	}
	}
	rcu_read_unlock();
	rcu_read_unlock();


	if (!list_empty(&gray_list))
	/*
		goto repeat;
	 * Re-scan the gray list for modified unreferenced objects.

	 */
scan_end:
	scan_gray_list();
	WARN_ON(!list_empty(&gray_list));


	/*
	/*
	 * If scanning was stopped or new objects were being allocated at a
	 * If scanning was stopped do not report any new unreferenced objects.
	 * higher rate than gray list scanning, do not report any new
	 * unreferenced objects.
	 */
	 */
	if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES)
	if (scan_should_stop())
		return;
		return;


	/*
	/*
@@ -1642,8 +1657,7 @@ void __init kmemleak_init(void)
			kmemleak_ignore(log->ptr);
			kmemleak_ignore(log->ptr);
			break;
			break;
		case KMEMLEAK_SCAN_AREA:
		case KMEMLEAK_SCAN_AREA:
			kmemleak_scan_area(log->ptr, log->offset, log->length,
			kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
					   GFP_KERNEL);
			break;
			break;
		case KMEMLEAK_NO_SCAN:
		case KMEMLEAK_NO_SCAN:
			kmemleak_no_scan(log->ptr);
			kmemleak_no_scan(log->ptr);
+6 −4
Original line number Original line Diff line number Diff line
@@ -2275,9 +2275,11 @@ kmem_cache_create (const char *name, size_t size, size_t align,
	/*
	/*
	 * Determine if the slab management is 'on' or 'off' slab.
	 * Determine if the slab management is 'on' or 'off' slab.
	 * (bootstrapping cannot cope with offslab caches so don't do
	 * (bootstrapping cannot cope with offslab caches so don't do
	 * it too early on.)
	 * it too early on. Always use on-slab management when
	 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
	 */
	 */
	if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
	if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init &&
	    !(flags & SLAB_NOLEAKTRACE))
		/*
		/*
		 * Size is large, assume best to place the slab management obj
		 * Size is large, assume best to place the slab management obj
		 * off-slab (should allow better packing of objs).
		 * off-slab (should allow better packing of objs).
@@ -2596,8 +2598,8 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
		 * kmemleak does not treat the ->s_mem pointer as a reference
		 * kmemleak does not treat the ->s_mem pointer as a reference
		 * to the object. Otherwise we will not report the leak.
		 * to the object. Otherwise we will not report the leak.
		 */
		 */
		kmemleak_scan_area(slabp, offsetof(struct slab, list),
		kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
				   sizeof(struct list_head), local_flags);
				   local_flags);
		if (!slabp)
		if (!slabp)
			return NULL;
			return NULL;
	} else {
	} else {