Loading include/linux/mmzone.h +1 −0 Original line number Diff line number Diff line Loading @@ -104,6 +104,7 @@ static inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn) struct free_area { struct list_head free_list[MIGRATE_TYPES]; unsigned long nr_free; unsigned long nr_free_cma; }; struct pglist_data; Loading include/linux/page-isolation.h +2 −2 Original line number Diff line number Diff line Loading @@ -33,10 +33,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, bool skip_hwpoisoned_pages); void set_pageblock_migratetype(struct page *page, int migratetype); int move_freepages_block(struct zone *zone, struct page *page, int migratetype); int migratetype, int old_mt); int move_freepages(struct zone *zone, struct page *start_page, struct page *end_page, int migratetype); int migratetype, int old_mt); /* * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. Loading mm/page_alloc.c +75 −23 Original line number Diff line number Diff line Loading @@ -563,7 +563,9 @@ static inline void __free_one_page(struct page *page, unsigned long combined_idx; unsigned long uninitialized_var(buddy_idx); struct page *buddy; int max_order = MAX_ORDER; unsigned int max_order; max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1); VM_BUG_ON(!zone_is_initialized(zone)); Loading @@ -572,28 +574,20 @@ static inline void __free_one_page(struct page *page, return; VM_BUG_ON(migratetype == -1); if (is_migrate_isolate(migratetype)) { /* * We restrict max order of merging to prevent merge * between freepages on isolate pageblock and normal * pageblock. Without this, pageblock isolation * could cause incorrect freepage accounting. */ max_order = min(MAX_ORDER, pageblock_order + 1); } else { if (likely(!is_migrate_isolate(migratetype))) __mod_zone_freepage_state(zone, 1 << order, migratetype); } page_idx = pfn & ((1 << max_order) - 1); page_idx = pfn & ((1 << MAX_ORDER) - 1); VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); VM_BUG_ON_PAGE(bad_range(zone, page), page); continue_merging: while (order < max_order - 1) { buddy_idx = __find_buddy_index(page_idx, order); buddy = page + (buddy_idx - page_idx); if (!page_is_buddy(page, buddy, order)) break; goto done_merging; /* * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, * merge with it and move up one order. Loading @@ -608,6 +602,8 @@ static inline void __free_one_page(struct page *page, } else { list_del(&buddy->lru); zone->free_area[order].nr_free--; if (is_migrate_cma(migratetype)) zone->free_area[order].nr_free_cma--; rmv_page_order(buddy); } combined_idx = buddy_idx & page_idx; Loading @@ -615,6 +611,33 @@ static inline void __free_one_page(struct page *page, page_idx = combined_idx; order++; } if (max_order < MAX_ORDER) { /* If we are here, it means order is >= pageblock_order. * We want to prevent merge between freepages on isolate * pageblock and normal pageblock. Without this, pageblock * isolation could cause incorrect freepage or CMA accounting. * * We don't want to hit this code for the more frequent * low-order merging. */ if (unlikely(has_isolate_pageblock(zone))) { int buddy_mt; buddy_idx = __find_buddy_index(page_idx, order); buddy = page + (buddy_idx - page_idx); buddy_mt = get_pageblock_migratetype(buddy); if (migratetype != buddy_mt && (is_migrate_isolate(migratetype) || is_migrate_isolate(buddy_mt))) goto done_merging; } max_order++; goto continue_merging; } done_merging: set_page_order(page, order); /* Loading @@ -641,6 +664,8 @@ static inline void __free_one_page(struct page *page, list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); out: zone->free_area[order].nr_free++; if (is_migrate_cma(migratetype)) zone->free_area[order].nr_free_cma++; } static inline int free_pages_check(struct page *page) Loading Loading @@ -903,6 +928,8 @@ static inline void expand(struct zone *zone, struct page *page, #endif list_add(&page[size].lru, &area->free_list[migratetype]); area->nr_free++; if (is_migrate_cma(migratetype)) area->nr_free_cma++; set_page_order(&page[size], high); } } Loading Loading @@ -983,6 +1010,8 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, list_del(&page->lru); rmv_page_order(page); area->nr_free--; if (is_migrate_cma(migratetype)) area->nr_free_cma--; expand(zone, page, order, current_order, area, migratetype); set_freepage_migratetype(page, migratetype); return page; Loading Loading @@ -1032,7 +1061,7 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone, */ int move_freepages(struct zone *zone, struct page *start_page, struct page *end_page, int migratetype) int migratetype, int old_mt) { struct page *page; unsigned long order; Loading Loading @@ -1066,6 +1095,12 @@ int move_freepages(struct zone *zone, order = page_order(page); list_move(&page->lru, &zone->free_area[order].free_list[migratetype]); if (is_migrate_cma(migratetype)) zone->free_area[order].nr_free_cma++; else if (is_migrate_cma(old_mt)) zone->free_area[order].nr_free_cma--; set_freepage_migratetype(page, migratetype); page += 1 << order; pages_moved += 1 << order; Loading @@ -1075,7 +1110,7 @@ int move_freepages(struct zone *zone, } int move_freepages_block(struct zone *zone, struct page *page, int migratetype) int migratetype, int old_mt) { unsigned long start_pfn, end_pfn; struct page *start_page, *end_page; Loading @@ -1092,7 +1127,7 @@ int move_freepages_block(struct zone *zone, struct page *page, if (!zone_spans_pfn(zone, end_pfn)) return 0; return move_freepages(zone, start_page, end_page, migratetype); return move_freepages(zone, start_page, end_page, migratetype, old_mt); } static void change_pageblock_range(struct page *pageblock_page, Loading Loading @@ -1138,7 +1173,8 @@ static void try_to_steal_freepages(struct zone *zone, struct page *page, page_group_by_mobility_disabled) { int pages; pages = move_freepages_block(zone, page, start_type); pages = move_freepages_block(zone, page, start_type, 0); /* Claim the whole block if over half of it is free */ if (pages >= (1 << (pageblock_order-1)) || Loading Loading @@ -1175,6 +1211,8 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) page = list_entry(area->free_list[migratetype].next, struct page, lru); area->nr_free--; if (is_migrate_cma(migratetype)) area->nr_free_cma--; try_to_steal_freepages(zone, page, start_migratetype, migratetype); Loading Loading @@ -1586,6 +1624,8 @@ int __isolate_free_page(struct page *page, unsigned int order) /* Remove page from free list */ list_del(&page->lru); zone->free_area[order].nr_free--; if (is_migrate_cma(mt)) zone->free_area[order].nr_free_cma--; rmv_page_order(page); /* Set the pageblock if the isolated page is at least a pageblock */ Loading Loading @@ -1812,7 +1852,6 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order, /* free_pages my go negative - that's OK */ long min = mark; int o; long free_cma = 0; free_pages -= (1 << order) - 1; if (alloc_flags & ALLOC_HIGH) Loading @@ -1822,14 +1861,23 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order, #ifdef CONFIG_CMA /* If allocation can't use CMA areas don't use free CMA pages */ if (!(alloc_flags & ALLOC_CMA)) free_cma = zone_page_state(z, NR_FREE_CMA_PAGES); free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); #endif if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx]) if (free_pages <= min + z->lowmem_reserve[classzone_idx]) return false; for (o = 0; o < order; o++) { /* At the next order, this order's pages become unavailable */ if (!(alloc_flags & ALLOC_CMA)) { long free = z->free_area[o].nr_free - z->free_area[o].nr_free_cma; if (free < 0) free = 0; free_pages -= free << o; } else { free_pages -= z->free_area[o].nr_free << o; } /* Require fewer higher order pages to be free */ min >>= min_free_order_shift; Loading Loading @@ -4186,7 +4234,7 @@ static void setup_zone_migrate_reserve(struct zone *zone) set_pageblock_migratetype(page, MIGRATE_RESERVE); move_freepages_block(zone, page, MIGRATE_RESERVE); MIGRATE_RESERVE, 0); reserve--; continue; } Loading @@ -4204,7 +4252,8 @@ static void setup_zone_migrate_reserve(struct zone *zone) */ if (block_migratetype == MIGRATE_RESERVE) { set_pageblock_migratetype(page, MIGRATE_MOVABLE); move_freepages_block(zone, page, MIGRATE_MOVABLE); move_freepages_block(zone, page, MIGRATE_MOVABLE, 0); } } } Loading Loading @@ -4282,6 +4331,7 @@ static void __meminit zone_init_free_lists(struct zone *zone) for_each_migratetype_order(order, t) { INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); zone->free_area[order].nr_free = 0; zone->free_area[order].nr_free_cma = 0; } } Loading Loading @@ -6668,6 +6718,8 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) list_del(&page->lru); rmv_page_order(page); zone->free_area[order].nr_free--; if (is_migrate_cma(get_pageblock_migratetype(page))) zone->free_area[order].nr_free_cma--; for (i = 0; i < (1 << order); i++) SetPageReserved((page+i)); pfn += (1 << order); Loading mm/page_isolation.c +4 −2 Original line number Diff line number Diff line Loading @@ -61,7 +61,8 @@ out: set_pageblock_migratetype(page, MIGRATE_ISOLATE); zone->nr_isolate_pageblock++; nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, migratetype); __mod_zone_freepage_state(zone, -nr_pages, migratetype); } Loading Loading @@ -116,7 +117,8 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype) * pageblock scanning for freepage moving. */ if (!isolated_page) { nr_pages = move_freepages_block(zone, page, migratetype); nr_pages = move_freepages_block(zone, page, migratetype, 0); __mod_zone_freepage_state(zone, nr_pages, migratetype); } set_pageblock_migratetype(page, migratetype); Loading Loading
include/linux/mmzone.h +1 −0 Original line number Diff line number Diff line Loading @@ -104,6 +104,7 @@ static inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn) struct free_area { struct list_head free_list[MIGRATE_TYPES]; unsigned long nr_free; unsigned long nr_free_cma; }; struct pglist_data; Loading
include/linux/page-isolation.h +2 −2 Original line number Diff line number Diff line Loading @@ -33,10 +33,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, bool skip_hwpoisoned_pages); void set_pageblock_migratetype(struct page *page, int migratetype); int move_freepages_block(struct zone *zone, struct page *page, int migratetype); int migratetype, int old_mt); int move_freepages(struct zone *zone, struct page *start_page, struct page *end_page, int migratetype); int migratetype, int old_mt); /* * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. Loading
mm/page_alloc.c +75 −23 Original line number Diff line number Diff line Loading @@ -563,7 +563,9 @@ static inline void __free_one_page(struct page *page, unsigned long combined_idx; unsigned long uninitialized_var(buddy_idx); struct page *buddy; int max_order = MAX_ORDER; unsigned int max_order; max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1); VM_BUG_ON(!zone_is_initialized(zone)); Loading @@ -572,28 +574,20 @@ static inline void __free_one_page(struct page *page, return; VM_BUG_ON(migratetype == -1); if (is_migrate_isolate(migratetype)) { /* * We restrict max order of merging to prevent merge * between freepages on isolate pageblock and normal * pageblock. Without this, pageblock isolation * could cause incorrect freepage accounting. */ max_order = min(MAX_ORDER, pageblock_order + 1); } else { if (likely(!is_migrate_isolate(migratetype))) __mod_zone_freepage_state(zone, 1 << order, migratetype); } page_idx = pfn & ((1 << max_order) - 1); page_idx = pfn & ((1 << MAX_ORDER) - 1); VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); VM_BUG_ON_PAGE(bad_range(zone, page), page); continue_merging: while (order < max_order - 1) { buddy_idx = __find_buddy_index(page_idx, order); buddy = page + (buddy_idx - page_idx); if (!page_is_buddy(page, buddy, order)) break; goto done_merging; /* * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, * merge with it and move up one order. Loading @@ -608,6 +602,8 @@ static inline void __free_one_page(struct page *page, } else { list_del(&buddy->lru); zone->free_area[order].nr_free--; if (is_migrate_cma(migratetype)) zone->free_area[order].nr_free_cma--; rmv_page_order(buddy); } combined_idx = buddy_idx & page_idx; Loading @@ -615,6 +611,33 @@ static inline void __free_one_page(struct page *page, page_idx = combined_idx; order++; } if (max_order < MAX_ORDER) { /* If we are here, it means order is >= pageblock_order. * We want to prevent merge between freepages on isolate * pageblock and normal pageblock. Without this, pageblock * isolation could cause incorrect freepage or CMA accounting. * * We don't want to hit this code for the more frequent * low-order merging. */ if (unlikely(has_isolate_pageblock(zone))) { int buddy_mt; buddy_idx = __find_buddy_index(page_idx, order); buddy = page + (buddy_idx - page_idx); buddy_mt = get_pageblock_migratetype(buddy); if (migratetype != buddy_mt && (is_migrate_isolate(migratetype) || is_migrate_isolate(buddy_mt))) goto done_merging; } max_order++; goto continue_merging; } done_merging: set_page_order(page, order); /* Loading @@ -641,6 +664,8 @@ static inline void __free_one_page(struct page *page, list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); out: zone->free_area[order].nr_free++; if (is_migrate_cma(migratetype)) zone->free_area[order].nr_free_cma++; } static inline int free_pages_check(struct page *page) Loading Loading @@ -903,6 +928,8 @@ static inline void expand(struct zone *zone, struct page *page, #endif list_add(&page[size].lru, &area->free_list[migratetype]); area->nr_free++; if (is_migrate_cma(migratetype)) area->nr_free_cma++; set_page_order(&page[size], high); } } Loading Loading @@ -983,6 +1010,8 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, list_del(&page->lru); rmv_page_order(page); area->nr_free--; if (is_migrate_cma(migratetype)) area->nr_free_cma--; expand(zone, page, order, current_order, area, migratetype); set_freepage_migratetype(page, migratetype); return page; Loading Loading @@ -1032,7 +1061,7 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone, */ int move_freepages(struct zone *zone, struct page *start_page, struct page *end_page, int migratetype) int migratetype, int old_mt) { struct page *page; unsigned long order; Loading Loading @@ -1066,6 +1095,12 @@ int move_freepages(struct zone *zone, order = page_order(page); list_move(&page->lru, &zone->free_area[order].free_list[migratetype]); if (is_migrate_cma(migratetype)) zone->free_area[order].nr_free_cma++; else if (is_migrate_cma(old_mt)) zone->free_area[order].nr_free_cma--; set_freepage_migratetype(page, migratetype); page += 1 << order; pages_moved += 1 << order; Loading @@ -1075,7 +1110,7 @@ int move_freepages(struct zone *zone, } int move_freepages_block(struct zone *zone, struct page *page, int migratetype) int migratetype, int old_mt) { unsigned long start_pfn, end_pfn; struct page *start_page, *end_page; Loading @@ -1092,7 +1127,7 @@ int move_freepages_block(struct zone *zone, struct page *page, if (!zone_spans_pfn(zone, end_pfn)) return 0; return move_freepages(zone, start_page, end_page, migratetype); return move_freepages(zone, start_page, end_page, migratetype, old_mt); } static void change_pageblock_range(struct page *pageblock_page, Loading Loading @@ -1138,7 +1173,8 @@ static void try_to_steal_freepages(struct zone *zone, struct page *page, page_group_by_mobility_disabled) { int pages; pages = move_freepages_block(zone, page, start_type); pages = move_freepages_block(zone, page, start_type, 0); /* Claim the whole block if over half of it is free */ if (pages >= (1 << (pageblock_order-1)) || Loading Loading @@ -1175,6 +1211,8 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) page = list_entry(area->free_list[migratetype].next, struct page, lru); area->nr_free--; if (is_migrate_cma(migratetype)) area->nr_free_cma--; try_to_steal_freepages(zone, page, start_migratetype, migratetype); Loading Loading @@ -1586,6 +1624,8 @@ int __isolate_free_page(struct page *page, unsigned int order) /* Remove page from free list */ list_del(&page->lru); zone->free_area[order].nr_free--; if (is_migrate_cma(mt)) zone->free_area[order].nr_free_cma--; rmv_page_order(page); /* Set the pageblock if the isolated page is at least a pageblock */ Loading Loading @@ -1812,7 +1852,6 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order, /* free_pages my go negative - that's OK */ long min = mark; int o; long free_cma = 0; free_pages -= (1 << order) - 1; if (alloc_flags & ALLOC_HIGH) Loading @@ -1822,14 +1861,23 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order, #ifdef CONFIG_CMA /* If allocation can't use CMA areas don't use free CMA pages */ if (!(alloc_flags & ALLOC_CMA)) free_cma = zone_page_state(z, NR_FREE_CMA_PAGES); free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); #endif if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx]) if (free_pages <= min + z->lowmem_reserve[classzone_idx]) return false; for (o = 0; o < order; o++) { /* At the next order, this order's pages become unavailable */ if (!(alloc_flags & ALLOC_CMA)) { long free = z->free_area[o].nr_free - z->free_area[o].nr_free_cma; if (free < 0) free = 0; free_pages -= free << o; } else { free_pages -= z->free_area[o].nr_free << o; } /* Require fewer higher order pages to be free */ min >>= min_free_order_shift; Loading Loading @@ -4186,7 +4234,7 @@ static void setup_zone_migrate_reserve(struct zone *zone) set_pageblock_migratetype(page, MIGRATE_RESERVE); move_freepages_block(zone, page, MIGRATE_RESERVE); MIGRATE_RESERVE, 0); reserve--; continue; } Loading @@ -4204,7 +4252,8 @@ static void setup_zone_migrate_reserve(struct zone *zone) */ if (block_migratetype == MIGRATE_RESERVE) { set_pageblock_migratetype(page, MIGRATE_MOVABLE); move_freepages_block(zone, page, MIGRATE_MOVABLE); move_freepages_block(zone, page, MIGRATE_MOVABLE, 0); } } } Loading Loading @@ -4282,6 +4331,7 @@ static void __meminit zone_init_free_lists(struct zone *zone) for_each_migratetype_order(order, t) { INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); zone->free_area[order].nr_free = 0; zone->free_area[order].nr_free_cma = 0; } } Loading Loading @@ -6668,6 +6718,8 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) list_del(&page->lru); rmv_page_order(page); zone->free_area[order].nr_free--; if (is_migrate_cma(get_pageblock_migratetype(page))) zone->free_area[order].nr_free_cma--; for (i = 0; i < (1 << order); i++) SetPageReserved((page+i)); pfn += (1 << order); Loading
mm/page_isolation.c +4 −2 Original line number Diff line number Diff line Loading @@ -61,7 +61,8 @@ out: set_pageblock_migratetype(page, MIGRATE_ISOLATE); zone->nr_isolate_pageblock++; nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, migratetype); __mod_zone_freepage_state(zone, -nr_pages, migratetype); } Loading Loading @@ -116,7 +117,8 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype) * pageblock scanning for freepage moving. */ if (!isolated_page) { nr_pages = move_freepages_block(zone, page, migratetype); nr_pages = move_freepages_block(zone, page, migratetype, 0); __mod_zone_freepage_state(zone, nr_pages, migratetype); } set_pageblock_migratetype(page, migratetype); Loading