Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6a62bbe8 authored by Catalin Marinas's avatar Catalin Marinas Committed by Greg Kroah-Hartman
Browse files

kmemleak: powerpc: skip scanning holes in the .bss section

[ Upstream commit 298a32b132087550d3fa80641ca58323c5dfd4d9 ]

Commit 2d4f5671 ("KVM: PPC: Introduce kvm_tmp framework") adds
kvm_tmp[] into the .bss section and then free the rest of unused spaces
back to the page allocator.

kernel_init
  kvm_guest_init
    kvm_free_tmp
      free_reserved_area
        free_unref_page
          free_unref_page_prepare

With DEBUG_PAGEALLOC=y, it will unmap those pages from kernel.  As the
result, kmemleak scan will trigger a panic when it scans the .bss
section with unmapped pages.

This patch creates dedicated kmemleak objects for the .data, .bss and
potentially .data..ro_after_init sections to allow partial freeing via
the kmemleak_free_part() in the powerpc kvm_free_tmp() function.

Link: http://lkml.kernel.org/r/20190321171917.62049-1-catalin.marinas@arm.com


Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Reported-by: default avatarQian Cai <cai@lca.pw>
Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc)
Tested-by: default avatarQian Cai <cai@lca.pw>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Avi Kivity <avi@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krcmar <rkrcmar@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent 82e8da1f
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -22,6 +22,7 @@
#include <linux/kvm_host.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/kvm_para.h>
#include <linux/slab.h>
#include <linux/of.h>
@@ -712,6 +713,12 @@ static void kvm_use_magic_page(void)

static __init void kvm_free_tmp(void)
{
	/*
	 * Inform kmemleak about the hole in the .bss section since the
	 * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
	 */
	kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
			   ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
	free_reserved_area(&kvm_tmp[kvm_tmp_index],
			   &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
}
+11 −5
Original line number Diff line number Diff line
@@ -1501,11 +1501,6 @@ static void kmemleak_scan(void)
	}
	rcu_read_unlock();

	/* data/bss scanning */
	scan_large_block(_sdata, _edata);
	scan_large_block(__bss_start, __bss_stop);
	scan_large_block(__start_ro_after_init, __end_ro_after_init);

#ifdef CONFIG_SMP
	/* per-cpu sections scanning */
	for_each_possible_cpu(i)
@@ -2036,6 +2031,17 @@ void __init kmemleak_init(void)
	}
	local_irq_restore(flags);

	/* register the data/bss sections */
	create_object((unsigned long)_sdata, _edata - _sdata,
		      KMEMLEAK_GREY, GFP_ATOMIC);
	create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
		      KMEMLEAK_GREY, GFP_ATOMIC);
	/* only register .data..ro_after_init if not within .data */
	if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
		create_object((unsigned long)__start_ro_after_init,
			      __end_ro_after_init - __start_ro_after_init,
			      KMEMLEAK_GREY, GFP_ATOMIC);

	/*
	 * This is the point where tracking allocations is safe. Automatic
	 * scanning is started during the late initcall. Add the early logged