Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f2260e6b authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds
Browse files

page allocator: update NR_FREE_PAGES only as necessary



When pages are being freed to the buddy allocator, the zone NR_FREE_PAGES
counter must be updated.  In the case of bulk per-cpu page freeing, it's
updated once per page.  This retouches cache lines more than necessary.
Update the counters one per per-cpu bulk free.

Signed-off-by: default avatarMel Gorman <mel@csn.ul.ie>
Reviewed-by: default avatarChristoph Lameter <cl@linux-foundation.org>
Reviewed-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 41858966
Loading
Loading
Loading
Loading
+7 −6
Original line number Diff line number Diff line
@@ -456,7 +456,6 @@ static inline void __free_one_page(struct page *page,
		int migratetype)
{
	unsigned long page_idx;
	int order_size = 1 << order;

	if (unlikely(PageCompound(page)))
		if (unlikely(destroy_compound_page(page, order)))
@@ -466,10 +465,9 @@ static inline void __free_one_page(struct page *page,

	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);

	VM_BUG_ON(page_idx & (order_size - 1));
	VM_BUG_ON(page_idx & ((1 << order) - 1));
	VM_BUG_ON(bad_range(zone, page));

	__mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
	while (order < MAX_ORDER-1) {
		unsigned long combined_idx;
		struct page *buddy;
@@ -524,6 +522,8 @@ static void free_pages_bulk(struct zone *zone, int count,
	spin_lock(&zone->lock);
	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
	zone->pages_scanned = 0;

	__mod_zone_page_state(zone, NR_FREE_PAGES, count << order);
	while (count--) {
		struct page *page;

@@ -542,6 +542,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
	spin_lock(&zone->lock);
	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
	zone->pages_scanned = 0;

	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
	__free_one_page(page, zone, order, migratetype);
	spin_unlock(&zone->lock);
}
@@ -686,7 +688,6 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
		list_del(&page->lru);
		rmv_page_order(page);
		area->nr_free--;
		__mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
		expand(zone, page, order, current_order, area, migratetype);
		return page;
	}
@@ -826,8 +827,6 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
			/* Remove the page from the freelists */
			list_del(&page->lru);
			rmv_page_order(page);
			__mod_zone_page_state(zone, NR_FREE_PAGES,
							-(1UL << order));

			if (current_order == pageblock_order)
				set_pageblock_migratetype(page,
@@ -900,6 +899,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
		set_page_private(page, migratetype);
		list = &page->lru;
	}
	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
	spin_unlock(&zone->lock);
	return i;
}
@@ -1129,6 +1129,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
	} else {
		spin_lock_irqsave(&zone->lock, flags);
		page = __rmqueue(zone, order, migratetype);
		__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
		spin_unlock(&zone->lock);
		if (!page)
			goto failed;