--- zzzz-none-000/linux-3.10.107/mm/page_isolation.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/mm/page_isolation.c 2021-02-04 17:41:59.000000000 +0000 @@ -6,9 +6,11 @@ #include #include #include +#include #include "internal.h" -int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages) +static int set_migratetype_isolate(struct page *page, + bool skip_hwpoisoned_pages) { struct zone *zone; unsigned long flags, pfn; @@ -59,6 +61,7 @@ int migratetype = get_pageblock_migratetype(page); set_pageblock_migratetype(page, MIGRATE_ISOLATE); + zone->nr_isolate_pageblock++; nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); __mod_zone_freepage_state(zone, -nr_pages, migratetype); @@ -66,24 +69,64 @@ spin_unlock_irqrestore(&zone->lock, flags); if (!ret) - drain_all_pages(); + drain_all_pages(zone); return ret; } -void unset_migratetype_isolate(struct page *page, unsigned migratetype) +static void unset_migratetype_isolate(struct page *page, unsigned migratetype) { struct zone *zone; unsigned long flags, nr_pages; + struct page *isolated_page = NULL; + unsigned int order; + unsigned long page_idx, buddy_idx; + struct page *buddy; zone = page_zone(page); spin_lock_irqsave(&zone->lock, flags); if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) goto out; - nr_pages = move_freepages_block(zone, page, migratetype); - __mod_zone_freepage_state(zone, nr_pages, migratetype); + + /* + * Because freepage with more than pageblock_order on isolated + * pageblock is restricted to merge due to freepage counting problem, + * it is possible that there is free buddy page. + * move_freepages_block() doesn't care of merge so we need other + * approach in order to merge them. Isolation and free will make + * these pages to be merged. + */ + if (PageBuddy(page)) { + order = page_order(page); + if (order >= pageblock_order) { + page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); + buddy_idx = __find_buddy_index(page_idx, order); + buddy = page + (buddy_idx - page_idx); + + if (pfn_valid_within(page_to_pfn(buddy)) && + !is_migrate_isolate_page(buddy)) { + __isolate_free_page(page, order); + kernel_map_pages(page, (1 << order), 1); + set_page_refcounted(page); + isolated_page = page; + } + } + } + + /* + * If we isolate freepage with more than pageblock_order, there + * should be no freepage in the range, so we could avoid costly + * pageblock scanning for freepage moving. + */ + if (!isolated_page) { + nr_pages = move_freepages_block(zone, page, migratetype); + __mod_zone_freepage_state(zone, nr_pages, migratetype); + } set_pageblock_migratetype(page, migratetype); + zone->nr_isolate_pageblock--; out: spin_unlock_irqrestore(&zone->lock, flags); + if (isolated_page) + __free_pages(isolated_page, order); } static inline struct page * @@ -181,34 +224,16 @@ continue; } page = pfn_to_page(pfn); - if (PageBuddy(page)) { + if (PageBuddy(page)) /* - * If race between isolatation and allocation happens, - * some free pages could be in MIGRATE_MOVABLE list - * although pageblock's migratation type of the page - * is MIGRATE_ISOLATE. Catch it and move the page into - * MIGRATE_ISOLATE list. + * If the page is on a free list, it has to be on + * the correct MIGRATE_ISOLATE freelist. There is no + * simple way to verify that as VM_BUG_ON(), though. */ - if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) { - struct page *end_page; - - end_page = page + (1 << page_order(page)) - 1; - move_freepages(page_zone(page), page, end_page, - MIGRATE_ISOLATE); - } pfn += 1 << page_order(page); - } - else if (page_count(page) == 0 && - get_freepage_migratetype(page) == MIGRATE_ISOLATE) - pfn += 1; - else if (skip_hwpoisoned_pages && PageHWPoison(page)) { - /* - * The HWPoisoned page may be not in buddy - * system, and page_count() is not 0. - */ + else if (skip_hwpoisoned_pages && PageHWPoison(page)) + /* A HWPoisoned page cannot be also PageBuddy */ pfn++; - continue; - } else break; } @@ -226,9 +251,9 @@ int ret; /* - * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page - * is not aligned to pageblock_nr_pages. - * Then we just check pagetype fist. + * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages + * are not aligned to pageblock_nr_pages. + * Then we just check migratetype first. */ for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); @@ -238,7 +263,7 @@ page = __first_valid_page(start_pfn, end_pfn - start_pfn); if ((pfn < end_pfn) || !page) return -EBUSY; - /* Check all pages are free or Marked as ISOLATED */ + /* Check all pages are free or marked as ISOLATED */ zone = page_zone(page); spin_lock_irqsave(&zone->lock, flags); ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn, @@ -252,6 +277,19 @@ { gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; + /* + * TODO: allocate a destination hugepage from a nearest neighbor node, + * accordance with memory policy of the user process if possible. For + * now as a simple work-around, we use the next node for destination. + */ + if (PageHuge(page)) { + int node = next_online_node(page_to_nid(page)); + if (node == MAX_NUMNODES) + node = first_online_node; + return alloc_huge_page_node(page_hstate(compound_head(page)), + node); + } + if (PageHighMem(page)) gfp_mask |= __GFP_HIGHMEM;