Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7bfec6f4 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds
Browse files

mm, page_alloc: check multiple page fields with a single branch



Every page allocated or freed is checked for sanity to avoid corruptions
that are difficult to detect later.  A bad page could be due to a number
of fields.  Instead of using multiple branches, this patch combines
multiple fields into a single branch.  A detailed check is only
necessary if that check fails.

                                             4.6.0-rc2                  4.6.0-rc2
                                        initonce-v1r20            multcheck-v1r20
  Min      alloc-odr0-1               359.00 (  0.00%)           348.00 (  3.06%)
  Min      alloc-odr0-2               260.00 (  0.00%)           254.00 (  2.31%)
  Min      alloc-odr0-4               214.00 (  0.00%)           213.00 (  0.47%)
  Min      alloc-odr0-8               186.00 (  0.00%)           186.00 (  0.00%)
  Min      alloc-odr0-16              173.00 (  0.00%)           173.00 (  0.00%)
  Min      alloc-odr0-32              165.00 (  0.00%)           166.00 ( -0.61%)
  Min      alloc-odr0-64              162.00 (  0.00%)           162.00 (  0.00%)
  Min      alloc-odr0-128             161.00 (  0.00%)           160.00 (  0.62%)
  Min      alloc-odr0-256             170.00 (  0.00%)           169.00 (  0.59%)
  Min      alloc-odr0-512             181.00 (  0.00%)           180.00 (  0.55%)
  Min      alloc-odr0-1024            190.00 (  0.00%)           188.00 (  1.05%)
  Min      alloc-odr0-2048            196.00 (  0.00%)           194.00 (  1.02%)
  Min      alloc-odr0-4096            202.00 (  0.00%)           199.00 (  1.49%)
  Min      alloc-odr0-8192            205.00 (  0.00%)           202.00 (  1.46%)
  Min      alloc-odr0-16384           205.00 (  0.00%)           203.00 (  0.98%)

Again, the benefit is marginal but avoiding excessive branches is
important.  Ideally the paths would not have to check these conditions
at all but regrettably abandoning the tests would make use-after-free
bugs much harder to detect.

Signed-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 93ea9964
Loading
Loading
Loading
Loading
+43 −12
Original line number Diff line number Diff line
@@ -784,10 +784,42 @@ static inline void __free_one_page(struct page *page,
	zone->free_area[order].nr_free++;
}

/*
 * A bad page could be due to a number of fields. Instead of multiple branches,
 * try and check multiple fields with one check. The caller must do a detailed
 * check if necessary.
 */
static inline bool page_expected_state(struct page *page,
					unsigned long check_flags)
{
	if (unlikely(atomic_read(&page->_mapcount) != -1))
		return false;

	if (unlikely((unsigned long)page->mapping |
			page_ref_count(page) |
#ifdef CONFIG_MEMCG
			(unsigned long)page->mem_cgroup |
#endif
			(page->flags & check_flags)))
		return false;

	return true;
}

static inline int free_pages_check(struct page *page)
{
	const char *bad_reason = NULL;
	unsigned long bad_flags = 0;
	const char *bad_reason;
	unsigned long bad_flags;

	if (page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)) {
		page_cpupid_reset_last(page);
		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
		return 0;
	}

	/* Something has gone sideways, find it */
	bad_reason = NULL;
	bad_flags = 0;

	if (unlikely(atomic_read(&page->_mapcount) != -1))
		bad_reason = "nonzero mapcount";
@@ -803,15 +835,9 @@ static inline int free_pages_check(struct page *page)
	if (unlikely(page->mem_cgroup))
		bad_reason = "page still charged to cgroup";
#endif
	if (unlikely(bad_reason)) {
	bad_page(page, bad_reason, bad_flags);
	return 1;
}
	page_cpupid_reset_last(page);
	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
	return 0;
}

/*
 * Frees a number of pages from the PCP lists
@@ -1492,9 +1518,14 @@ static inline void expand(struct zone *zone, struct page *page,
 */
static inline int check_new_page(struct page *page)
{
	const char *bad_reason = NULL;
	unsigned long bad_flags = 0;
	const char *bad_reason;
	unsigned long bad_flags;

	if (page_expected_state(page, PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))
		return 0;

	bad_reason = NULL;
	bad_flags = 0;
	if (unlikely(atomic_read(&page->_mapcount) != -1))
		bad_reason = "nonzero mapcount";
	if (unlikely(page->mapping != NULL))