Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5576646f authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds
Browse files

revert "mm: fix-up zone present pages"



Revert commit 7f1290f2 ("mm: fix-up zone present pages")

That patch tried to fix a issue when calculating zone->present_pages,
but it caused a regression on 32bit systems with HIGHMEM.  With that
change, reset_zone_present_pages() resets all zone->present_pages to
zero, and fixup_zone_present_pages() is called to recalculate
zone->present_pages when the boot allocator frees core memory pages into
buddy allocator.  Because highmem pages are not freed by bootmem
allocator, all highmem zones' present_pages becomes zero.

Various options for improving the situation are being discussed but for
now, let's return to the 3.6 code.

Cc: Jianguo Wu <wujianguo@huawei.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: Petr Tesarik <ptesarik@suse.cz>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Tested-by: default avatarChris Clayton <chris2553@googlemail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0f3c42f5
Loading
Loading
Loading
Loading
+0 −1
Original line number Original line Diff line number Diff line
@@ -637,7 +637,6 @@ mem_init (void)


	high_memory = __va(max_low_pfn * PAGE_SIZE);
	high_memory = __va(max_low_pfn * PAGE_SIZE);


	reset_zone_present_pages();
	for_each_online_pgdat(pgdat)
	for_each_online_pgdat(pgdat)
		if (pgdat->bdata->node_bootmem_map)
		if (pgdat->bdata->node_bootmem_map)
			totalram_pages += free_all_bootmem_node(pgdat);
			totalram_pages += free_all_bootmem_node(pgdat);
+0 −4
Original line number Original line Diff line number Diff line
@@ -1684,9 +1684,5 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
static inline bool page_is_guard(struct page *page) { return false; }
static inline bool page_is_guard(struct page *page) { return false; }
#endif /* CONFIG_DEBUG_PAGEALLOC */
#endif /* CONFIG_DEBUG_PAGEALLOC */


extern void reset_zone_present_pages(void);
extern void fixup_zone_present_pages(int nid, unsigned long start_pfn,
				unsigned long end_pfn);

#endif /* __KERNEL__ */
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
#endif /* _LINUX_MM_H */
+1 −9
Original line number Original line Diff line number Diff line
@@ -198,8 +198,6 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
			int order = ilog2(BITS_PER_LONG);
			int order = ilog2(BITS_PER_LONG);


			__free_pages_bootmem(pfn_to_page(start), order);
			__free_pages_bootmem(pfn_to_page(start), order);
			fixup_zone_present_pages(page_to_nid(pfn_to_page(start)),
					start, start + BITS_PER_LONG);
			count += BITS_PER_LONG;
			count += BITS_PER_LONG;
			start += BITS_PER_LONG;
			start += BITS_PER_LONG;
		} else {
		} else {
@@ -210,9 +208,6 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
				if (vec & 1) {
				if (vec & 1) {
					page = pfn_to_page(start + off);
					page = pfn_to_page(start + off);
					__free_pages_bootmem(page, 0);
					__free_pages_bootmem(page, 0);
					fixup_zone_present_pages(
						page_to_nid(page),
						start + off, start + off + 1);
					count++;
					count++;
				}
				}
				vec >>= 1;
				vec >>= 1;
@@ -226,11 +221,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
	pages = bdata->node_low_pfn - bdata->node_min_pfn;
	pages = bdata->node_low_pfn - bdata->node_min_pfn;
	pages = bootmem_bootmap_pages(pages);
	pages = bootmem_bootmap_pages(pages);
	count += pages;
	count += pages;
	while (pages--) {
	while (pages--)
		fixup_zone_present_pages(page_to_nid(page),
				page_to_pfn(page), page_to_pfn(page) + 1);
		__free_pages_bootmem(page++, 0);
		__free_pages_bootmem(page++, 0);
	}


	bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
	bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);


+0 −7
Original line number Original line Diff line number Diff line
@@ -106,7 +106,6 @@ static void get_page_bootmem(unsigned long info, struct page *page,
void __ref put_page_bootmem(struct page *page)
void __ref put_page_bootmem(struct page *page)
{
{
	unsigned long type;
	unsigned long type;
	struct zone *zone;


	type = (unsigned long) page->lru.next;
	type = (unsigned long) page->lru.next;
	BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
	BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
@@ -117,12 +116,6 @@ void __ref put_page_bootmem(struct page *page)
		set_page_private(page, 0);
		set_page_private(page, 0);
		INIT_LIST_HEAD(&page->lru);
		INIT_LIST_HEAD(&page->lru);
		__free_pages_bootmem(page, 0);
		__free_pages_bootmem(page, 0);

		zone = page_zone(page);
		zone_span_writelock(zone);
		zone->present_pages++;
		zone_span_writeunlock(zone);
		totalram_pages++;
	}
	}


}
}
+0 −3
Original line number Original line Diff line number Diff line
@@ -116,8 +116,6 @@ static unsigned long __init __free_memory_core(phys_addr_t start,
		return 0;
		return 0;


	__free_pages_memory(start_pfn, end_pfn);
	__free_pages_memory(start_pfn, end_pfn);
	fixup_zone_present_pages(pfn_to_nid(start >> PAGE_SHIFT),
			start_pfn, end_pfn);


	return end_pfn - start_pfn;
	return end_pfn - start_pfn;
}
}
@@ -128,7 +126,6 @@ unsigned long __init free_low_memory_core_early(int nodeid)
	phys_addr_t start, end, size;
	phys_addr_t start, end, size;
	u64 i;
	u64 i;


	reset_zone_present_pages();
	for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
	for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
		count += __free_memory_core(start, end);
		count += __free_memory_core(start, end);


Loading