Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2c888cfb authored by Rik van Riel's avatar Rik van Riel Committed by Linus Torvalds
Browse files

thp: fix anon memory statistics with transparent hugepages



Count each transparent hugepage as HPAGE_PMD_NR pages in the LRU
statistics, so the Active(anon) and Inactive(anon) statistics in
/proc/meminfo are correct.

Signed-off-by: default avatarRik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 97562cd2
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -117,11 +117,19 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
		return;
	__vma_adjust_trans_huge(vma, start, end, adjust_next);
}
static inline int hpage_nr_pages(struct page *page)
{
	if (unlikely(PageTransHuge(page)))
		return HPAGE_PMD_NR;
	return 1;
}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUG(); 0; })
#define HPAGE_PMD_SIZE ({ BUG(); 0; })

#define hpage_nr_pages(x) 1

#define transparent_hugepage_enabled(__vma) 0

#define transparent_hugepage_flags 0UL
+5 −3
Original line number Diff line number Diff line
#ifndef LINUX_MM_INLINE_H
#define LINUX_MM_INLINE_H

#include <linux/huge_mm.h>

/**
 * page_is_file_cache - should the page be on a file LRU or anon LRU?
 * @page: the page to test
@@ -24,7 +26,7 @@ __add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l,
		       struct list_head *head)
{
	list_add(&page->lru, head);
	__inc_zone_state(zone, NR_LRU_BASE + l);
	__mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page));
	mem_cgroup_add_lru_list(page, l);
}

@@ -38,7 +40,7 @@ static inline void
del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
{
	list_del(&page->lru);
	__dec_zone_state(zone, NR_LRU_BASE + l);
	__mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page));
	mem_cgroup_del_lru_list(page, l);
}

@@ -73,7 +75,7 @@ del_page_from_lru(struct zone *zone, struct page *page)
			l += LRU_ACTIVE;
		}
	}
	__dec_zone_state(zone, NR_LRU_BASE + l);
	__mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page));
	mem_cgroup_del_lru_list(page, l);
}

+10 −0
Original line number Diff line number Diff line
@@ -1143,6 +1143,7 @@ static void __split_huge_page_refcount(struct page *page)
	int i;
	unsigned long head_index = page->index;
	struct zone *zone = page_zone(page);
	int zonestat;

	/* prevent PageLRU to go away from under us, and freeze lru stats */
	spin_lock_irq(&zone->lru_lock);
@@ -1207,6 +1208,15 @@ static void __split_huge_page_refcount(struct page *page)
	__dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
	__mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);

	/*
	 * A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics,
	 * so adjust those appropriately if this page is on the LRU.
	 */
	if (PageLRU(page)) {
		zonestat = NR_LRU_BASE + page_lru(page);
		__mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1));
	}

	ClearPageCompound(page);
	compound_unlock(page);
	spin_unlock_irq(&zone->lru_lock);
+1 −1
Original line number Diff line number Diff line
@@ -1091,7 +1091,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
		case 0:
			list_move(&page->lru, dst);
			mem_cgroup_del_lru(page);
			nr_taken++;
			nr_taken += hpage_nr_pages(page);
			break;
		case -EBUSY:
			/* we don't affect global LRU but rotate in our LRU */
+6 −5
Original line number Diff line number Diff line
@@ -1045,7 +1045,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
		case 0:
			list_move(&page->lru, dst);
			mem_cgroup_del_lru(page);
			nr_taken++;
			nr_taken += hpage_nr_pages(page);
			break;

		case -EBUSY:
@@ -1103,7 +1103,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
			if (__isolate_lru_page(cursor_page, mode, file) == 0) {
				list_move(&cursor_page->lru, dst);
				mem_cgroup_del_lru(cursor_page);
				nr_taken++;
				nr_taken += hpage_nr_pages(page);
				nr_lumpy_taken++;
				if (PageDirty(cursor_page))
					nr_lumpy_dirty++;
@@ -1158,14 +1158,15 @@ static unsigned long clear_active_flags(struct list_head *page_list,
	struct page *page;

	list_for_each_entry(page, page_list, lru) {
		int numpages = hpage_nr_pages(page);
		lru = page_lru_base_type(page);
		if (PageActive(page)) {
			lru += LRU_ACTIVE;
			ClearPageActive(page);
			nr_active++;
			nr_active += numpages;
		}
		if (count)
			count[lru]++;
			count[lru] += numpages;
	}

	return nr_active;
@@ -1483,7 +1484,7 @@ static void move_active_pages_to_lru(struct zone *zone,

		list_move(&page->lru, &zone->lru[lru].list);
		mem_cgroup_add_lru_list(page, lru);
		pgmoved++;
		pgmoved += hpage_nr_pages(page);

		if (!pagevec_add(&pvec, page) || list_empty(list)) {
			spin_unlock_irq(&zone->lru_lock);