Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 309381fe authored by Sasha Levin's avatar Sasha Levin Committed by Linus Torvalds
Browse files

mm: dump page when hitting a VM_BUG_ON using VM_BUG_ON_PAGE



Most of the VM_BUG_ON assertions are performed on a page.  Usually, when
one of these assertions fails we'll get a BUG_ON with a call stack and
the registers.

I've recently noticed based on the requests to add a small piece of code
that dumps the page to various VM_BUG_ON sites that the page dump is
quite useful to people debugging issues in mm.

This patch adds a VM_BUG_ON_PAGE(cond, page) which beyond doing what
VM_BUG_ON() does, also dumps the page before executing the actual
BUG_ON.

[akpm@linux-foundation.org: fix up includes]
Signed-off-by: default avatarSasha Levin <sasha.levin@oracle.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e3bba3c3
Loading
Loading
Loading
Loading
+4 −4
Original line number Original line Diff line number Diff line
@@ -108,8 +108,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,


static inline void get_head_page_multiple(struct page *page, int nr)
static inline void get_head_page_multiple(struct page *page, int nr)
{
{
	VM_BUG_ON(page != compound_head(page));
	VM_BUG_ON_PAGE(page != compound_head(page), page);
	VM_BUG_ON(page_count(page) == 0);
	VM_BUG_ON_PAGE(page_count(page) == 0, page);
	atomic_add(nr, &page->_count);
	atomic_add(nr, &page->_count);
	SetPageReferenced(page);
	SetPageReferenced(page);
}
}
@@ -135,7 +135,7 @@ static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
	head = pte_page(pte);
	head = pte_page(pte);
	page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
	page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
	do {
	do {
		VM_BUG_ON(compound_head(page) != head);
		VM_BUG_ON_PAGE(compound_head(page) != head, page);
		pages[*nr] = page;
		pages[*nr] = page;
		if (PageTail(page))
		if (PageTail(page))
			get_huge_page_tail(page);
			get_huge_page_tail(page);
@@ -212,7 +212,7 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
	head = pte_page(pte);
	head = pte_page(pte);
	page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
	page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
	do {
	do {
		VM_BUG_ON(compound_head(page) != head);
		VM_BUG_ON_PAGE(compound_head(page) != head, page);
		pages[*nr] = page;
		pages[*nr] = page;
		if (PageTail(page))
		if (PageTail(page))
			get_huge_page_tail(page);
			get_huge_page_tail(page);
+1 −0
Original line number Original line Diff line number Diff line
#ifndef __LINUX_GFP_H
#ifndef __LINUX_GFP_H
#define __LINUX_GFP_H
#define __LINUX_GFP_H


#include <linux/mmdebug.h>
#include <linux/mmzone.h>
#include <linux/mmzone.h>
#include <linux/stddef.h>
#include <linux/stddef.h>
#include <linux/linkage.h>
#include <linux/linkage.h>
+2 −1
Original line number Original line Diff line number Diff line
@@ -2,6 +2,7 @@
#define _LINUX_HUGETLB_H
#define _LINUX_HUGETLB_H


#include <linux/mm_types.h>
#include <linux/mm_types.h>
#include <linux/mmdebug.h>
#include <linux/fs.h>
#include <linux/fs.h>
#include <linux/hugetlb_inline.h>
#include <linux/hugetlb_inline.h>
#include <linux/cgroup.h>
#include <linux/cgroup.h>
@@ -354,7 +355,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,


static inline struct hstate *page_hstate(struct page *page)
static inline struct hstate *page_hstate(struct page *page)
{
{
	VM_BUG_ON(!PageHuge(page));
	VM_BUG_ON_PAGE(!PageHuge(page), page);
	return size_to_hstate(PAGE_SIZE << compound_order(page));
	return size_to_hstate(PAGE_SIZE << compound_order(page));
}
}


+3 −2
Original line number Original line Diff line number Diff line
@@ -15,6 +15,7 @@
#ifndef _LINUX_HUGETLB_CGROUP_H
#ifndef _LINUX_HUGETLB_CGROUP_H
#define _LINUX_HUGETLB_CGROUP_H
#define _LINUX_HUGETLB_CGROUP_H


#include <linux/mmdebug.h>
#include <linux/res_counter.h>
#include <linux/res_counter.h>


struct hugetlb_cgroup;
struct hugetlb_cgroup;
@@ -28,7 +29,7 @@ struct hugetlb_cgroup;


static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
{
{
	VM_BUG_ON(!PageHuge(page));
	VM_BUG_ON_PAGE(!PageHuge(page), page);


	if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
	if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
		return NULL;
		return NULL;
@@ -38,7 +39,7 @@ static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
static inline
static inline
int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
{
{
	VM_BUG_ON(!PageHuge(page));
	VM_BUG_ON_PAGE(!PageHuge(page), page);


	if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
	if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
		return -1;
		return -1;
+13 −16
Original line number Original line Diff line number Diff line
@@ -5,6 +5,7 @@


#ifdef __KERNEL__
#ifdef __KERNEL__


#include <linux/mmdebug.h>
#include <linux/gfp.h>
#include <linux/gfp.h>
#include <linux/bug.h>
#include <linux/bug.h>
#include <linux/list.h>
#include <linux/list.h>
@@ -303,7 +304,7 @@ static inline int get_freepage_migratetype(struct page *page)
 */
 */
static inline int put_page_testzero(struct page *page)
static inline int put_page_testzero(struct page *page)
{
{
	VM_BUG_ON(atomic_read(&page->_count) == 0);
	VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
	return atomic_dec_and_test(&page->_count);
	return atomic_dec_and_test(&page->_count);
}
}


@@ -364,7 +365,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
static inline void compound_lock(struct page *page)
static inline void compound_lock(struct page *page)
{
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	VM_BUG_ON(PageSlab(page));
	VM_BUG_ON_PAGE(PageSlab(page), page);
	bit_spin_lock(PG_compound_lock, &page->flags);
	bit_spin_lock(PG_compound_lock, &page->flags);
#endif
#endif
}
}
@@ -372,7 +373,7 @@ static inline void compound_lock(struct page *page)
static inline void compound_unlock(struct page *page)
static inline void compound_unlock(struct page *page)
{
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	VM_BUG_ON(PageSlab(page));
	VM_BUG_ON_PAGE(PageSlab(page), page);
	bit_spin_unlock(PG_compound_lock, &page->flags);
	bit_spin_unlock(PG_compound_lock, &page->flags);
#endif
#endif
}
}
@@ -447,7 +448,7 @@ static inline bool __compound_tail_refcounted(struct page *page)
 */
 */
static inline bool compound_tail_refcounted(struct page *page)
static inline bool compound_tail_refcounted(struct page *page)
{
{
	VM_BUG_ON(!PageHead(page));
	VM_BUG_ON_PAGE(!PageHead(page), page);
	return __compound_tail_refcounted(page);
	return __compound_tail_refcounted(page);
}
}


@@ -456,9 +457,9 @@ static inline void get_huge_page_tail(struct page *page)
	/*
	/*
	 * __split_huge_page_refcount() cannot run from under us.
	 * __split_huge_page_refcount() cannot run from under us.
	 */
	 */
	VM_BUG_ON(!PageTail(page));
	VM_BUG_ON_PAGE(!PageTail(page), page);
	VM_BUG_ON(page_mapcount(page) < 0);
	VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
	VM_BUG_ON(atomic_read(&page->_count) != 0);
	VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
	if (compound_tail_refcounted(page->first_page))
	if (compound_tail_refcounted(page->first_page))
		atomic_inc(&page->_mapcount);
		atomic_inc(&page->_mapcount);
}
}
@@ -474,7 +475,7 @@ static inline void get_page(struct page *page)
	 * Getting a normal page or the head of a compound page
	 * Getting a normal page or the head of a compound page
	 * requires to already have an elevated page->_count.
	 * requires to already have an elevated page->_count.
	 */
	 */
	VM_BUG_ON(atomic_read(&page->_count) <= 0);
	VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
	atomic_inc(&page->_count);
	atomic_inc(&page->_count);
}
}


@@ -511,13 +512,13 @@ static inline int PageBuddy(struct page *page)


static inline void __SetPageBuddy(struct page *page)
static inline void __SetPageBuddy(struct page *page)
{
{
	VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
	VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
	atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
	atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
}
}


static inline void __ClearPageBuddy(struct page *page)
static inline void __ClearPageBuddy(struct page *page)
{
{
	VM_BUG_ON(!PageBuddy(page));
	VM_BUG_ON_PAGE(!PageBuddy(page), page);
	atomic_set(&page->_mapcount, -1);
	atomic_set(&page->_mapcount, -1);
}
}


@@ -1401,7 +1402,7 @@ static inline bool ptlock_init(struct page *page)
	 * slab code uses page->slab_cache and page->first_page (for tail
	 * slab code uses page->slab_cache and page->first_page (for tail
	 * pages), which share storage with page->ptl.
	 * pages), which share storage with page->ptl.
	 */
	 */
	VM_BUG_ON(*(unsigned long *)&page->ptl);
	VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
	if (!ptlock_alloc(page))
	if (!ptlock_alloc(page))
		return false;
		return false;
	spin_lock_init(ptlock_ptr(page));
	spin_lock_init(ptlock_ptr(page));
@@ -1492,7 +1493,7 @@ static inline bool pgtable_pmd_page_ctor(struct page *page)
static inline void pgtable_pmd_page_dtor(struct page *page)
static inline void pgtable_pmd_page_dtor(struct page *page)
{
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	VM_BUG_ON(page->pmd_huge_pte);
	VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
#endif
#endif
	ptlock_free(page);
	ptlock_free(page);
}
}
@@ -2029,10 +2030,6 @@ extern void shake_page(struct page *p, int access);
extern atomic_long_t num_poisoned_pages;
extern atomic_long_t num_poisoned_pages;
extern int soft_offline_page(struct page *page, int flags);
extern int soft_offline_page(struct page *page, int flags);


extern void dump_page(struct page *page, char *reason);
extern void dump_page_badflags(struct page *page, char *reason,
			       unsigned long badflags);

#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
extern void clear_huge_page(struct page *page,
extern void clear_huge_page(struct page *page,
			    unsigned long addr,
			    unsigned long addr,
Loading