Lines Matching full:cc

220 static inline bool isolation_suitable(struct compact_control *cc,  in isolation_suitable()  argument
223 if (cc->ignore_skip_hint) in isolation_suitable()
451 static bool test_and_set_skip(struct compact_control *cc, struct page *page) in test_and_set_skip() argument
456 if (cc->ignore_skip_hint) in test_and_set_skip()
460 if (!skip && !cc->no_set_skip_hint) in test_and_set_skip()
466 static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) in update_cached_migrate() argument
468 struct zone *zone = cc->zone; in update_cached_migrate()
471 if (cc->no_set_skip_hint) in update_cached_migrate()
479 if (cc->mode != MIGRATE_ASYNC && in update_cached_migrate()
488 static void update_pageblock_skip(struct compact_control *cc, in update_pageblock_skip() argument
491 struct zone *zone = cc->zone; in update_pageblock_skip()
493 if (cc->no_set_skip_hint) in update_pageblock_skip()
502 static inline bool isolation_suitable(struct compact_control *cc, in isolation_suitable() argument
513 static inline void update_pageblock_skip(struct compact_control *cc, in update_pageblock_skip() argument
518 static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) in update_cached_migrate() argument
522 static bool test_and_set_skip(struct compact_control *cc, struct page *page) in test_and_set_skip() argument
538 struct compact_control *cc) in compact_lock_irqsave() argument
542 if (cc->mode == MIGRATE_ASYNC && !cc->contended) { in compact_lock_irqsave()
546 cc->contended = true; in compact_lock_irqsave()
566 unsigned long flags, bool *locked, struct compact_control *cc) in compact_unlock_should_abort() argument
574 cc->contended = true; in compact_unlock_should_abort()
588 static unsigned long isolate_freepages_block(struct compact_control *cc, in isolate_freepages_block() argument
618 && compact_unlock_should_abort(&cc->zone->lock, flags, in isolate_freepages_block()
619 &locked, cc)) in isolate_freepages_block()
647 locked = compact_lock_irqsave(&cc->zone->lock, in isolate_freepages_block()
648 &flags, cc); in isolate_freepages_block()
664 cc->nr_freepages += isolated; in isolate_freepages_block()
667 if (!strict && cc->nr_migratepages <= cc->nr_freepages) { in isolate_freepages_block()
683 spin_unlock_irqrestore(&cc->zone->lock, flags); in isolate_freepages_block()
705 cc->total_free_scanned += nr_scanned; in isolate_freepages_block()
713 * @cc: Compaction control structure.
719 * undo its actions and return zero. cc->freepages[] are empty.
723 * a free page). cc->freepages[] contain free pages isolated.
726 isolate_freepages_range(struct compact_control *cc, in isolate_freepages_range() argument
733 INIT_LIST_HEAD(&cc->freepages[order]); in isolate_freepages_range()
737 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_freepages_range()
738 block_start_pfn = cc->zone->zone_start_pfn; in isolate_freepages_range()
760 block_end_pfn, cc->zone)) in isolate_freepages_range()
763 isolated = isolate_freepages_block(cc, &isolate_start_pfn, in isolate_freepages_range()
764 block_end_pfn, cc->freepages, 0, true); in isolate_freepages_range()
783 release_free_list(cc->freepages); in isolate_freepages_range()
792 static bool too_many_isolated(struct compact_control *cc) in too_many_isolated() argument
794 pg_data_t *pgdat = cc->zone->zone_pgdat; in too_many_isolated()
812 if (cc->gfp_mask & __GFP_FS) { in too_many_isolated()
853 * @cc: Compaction control structure.
862 * cc->migrate_pfn will contain the next pfn to scan.
864 * The pages are isolated on cc->migratepages list (not required to be empty),
865 * and cc->nr_migratepages is updated accordingly.
868 isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, in isolate_migratepages_block() argument
871 pg_data_t *pgdat = cc->zone->zone_pgdat; in isolate_migratepages_block()
885 cc->migrate_pfn = low_pfn; in isolate_migratepages_block()
892 while (unlikely(too_many_isolated(cc))) { in isolate_migratepages_block()
894 if (cc->nr_migratepages) in isolate_migratepages_block()
898 if (cc->mode == MIGRATE_ASYNC) in isolate_migratepages_block()
909 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { in isolate_migratepages_block()
911 next_skip_pfn = block_end_pfn(low_pfn, cc->order); in isolate_migratepages_block()
937 next_skip_pfn = block_end_pfn(low_pfn, cc->order); in isolate_migratepages_block()
952 cc->contended = true; in isolate_migratepages_block()
972 low_pfn == cc->zone->zone_start_pfn)) { in isolate_migratepages_block()
973 if (!isolation_suitable(cc, page)) { in isolate_migratepages_block()
987 if (!cc->alloc_contig) { in isolate_migratepages_block()
1002 ret = isolate_or_dissolve_huge_page(page, &cc->migratepages); in isolate_migratepages_block()
1020 * on the cc->migratepages list. in isolate_migratepages_block()
1064 if (PageCompound(page) && !cc->alloc_contig) { in isolate_migratepages_block()
1068 if (skip_isolation_on_order(order, cc->order)) { in isolate_migratepages_block()
1125 if (!(cc->gfp_mask & __GFP_FS) && mapping) in isolate_migratepages_block()
1197 compact_lock_irqsave(&lruvec->lru_lock, &flags, cc); in isolate_migratepages_block()
1209 if (test_and_set_skip(cc, valid_page) && in isolate_migratepages_block()
1210 !cc->finish_pageblock) { in isolate_migratepages_block()
1220 cc->order) && in isolate_migratepages_block()
1221 !cc->alloc_contig)) { in isolate_migratepages_block()
1240 list_add(&folio->lru, &cc->migratepages); in isolate_migratepages_block()
1242 cc->nr_migratepages += folio_nr_pages(folio); in isolate_migratepages_block()
1252 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX && in isolate_migratepages_block()
1253 !cc->finish_pageblock && !cc->contended) { in isolate_migratepages_block()
1274 * instead of migrating, as we cannot form the cc->order buddy in isolate_migratepages_block()
1282 putback_movable_pages(&cc->migratepages); in isolate_migratepages_block()
1283 cc->nr_migratepages = 0; in isolate_migratepages_block()
1293 next_skip_pfn += 1UL << cc->order; in isolate_migratepages_block()
1325 if (low_pfn == end_pfn && (!nr_isolated || cc->finish_pageblock)) { in isolate_migratepages_block()
1326 if (!cc->no_set_skip_hint && valid_page && !skip_updated) in isolate_migratepages_block()
1328 update_cached_migrate(cc, low_pfn); in isolate_migratepages_block()
1335 cc->total_migrate_scanned += nr_scanned; in isolate_migratepages_block()
1339 cc->migrate_pfn = low_pfn; in isolate_migratepages_block()
1346 * @cc: Compaction control structure.
1354 isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, in isolate_migratepages_range() argument
1363 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_migratepages_range()
1364 block_start_pfn = cc->zone->zone_start_pfn; in isolate_migratepages_range()
1374 block_end_pfn, cc->zone)) in isolate_migratepages_range()
1377 ret = isolate_migratepages_block(cc, pfn, block_end_pfn, in isolate_migratepages_range()
1383 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX) in isolate_migratepages_range()
1393 static bool suitable_migration_source(struct compact_control *cc, in suitable_migration_source() argument
1401 if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) in suitable_migration_source()
1406 if (cc->migratetype == MIGRATE_MOVABLE) in suitable_migration_source()
1409 return block_mt == cc->migratetype; in suitable_migration_source()
1413 static bool suitable_migration_target(struct compact_control *cc, in suitable_migration_target() argument
1418 int order = cc->order > 0 ? cc->order : pageblock_order; in suitable_migration_target()
1429 if (cc->ignore_block_suitable) in suitable_migration_target()
1441 freelist_scan_limit(struct compact_control *cc) in freelist_scan_limit() argument
1445 return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1; in freelist_scan_limit()
1452 static inline bool compact_scanners_met(struct compact_control *cc) in compact_scanners_met() argument
1454 return (cc->free_pfn >> pageblock_order) in compact_scanners_met()
1455 <= (cc->migrate_pfn >> pageblock_order); in compact_scanners_met()
1492 fast_isolate_around(struct compact_control *cc, unsigned long pfn) in fast_isolate_around() argument
1498 if (cc->nr_freepages >= cc->nr_migratepages) in fast_isolate_around()
1502 if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC) in fast_isolate_around()
1506 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); in fast_isolate_around()
1507 end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)); in fast_isolate_around()
1509 page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone); in fast_isolate_around()
1513 isolate_freepages_block(cc, &start_pfn, end_pfn, cc->freepages, 1, false); in fast_isolate_around()
1516 if (start_pfn == end_pfn && !cc->no_set_skip_hint) in fast_isolate_around()
1521 static int next_search_order(struct compact_control *cc, int order) in next_search_order() argument
1525 order = cc->order - 1; in next_search_order()
1528 if (order == cc->search_order) { in next_search_order()
1529 cc->search_order--; in next_search_order()
1530 if (cc->search_order < 0) in next_search_order()
1531 cc->search_order = cc->order - 1; in next_search_order()
1538 static void fast_isolate_freepages(struct compact_control *cc) in fast_isolate_freepages() argument
1540 unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1); in fast_isolate_freepages()
1550 if (cc->order <= 0) in fast_isolate_freepages()
1557 if (cc->free_pfn >= cc->zone->compact_init_free_pfn) { in fast_isolate_freepages()
1566 distance = (cc->free_pfn - cc->migrate_pfn); in fast_isolate_freepages()
1567 low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2)); in fast_isolate_freepages()
1568 min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1)); in fast_isolate_freepages()
1577 cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); in fast_isolate_freepages()
1579 for (order = cc->search_order; in fast_isolate_freepages()
1581 order = next_search_order(cc, order)) { in fast_isolate_freepages()
1582 struct free_area *area = &cc->zone->free_area[order]; in fast_isolate_freepages()
1592 spin_lock_irqsave(&cc->zone->lock, flags); in fast_isolate_freepages()
1603 cc->zone->zone_start_pfn); in fast_isolate_freepages()
1606 cc->fast_search_fail = 0; in fast_isolate_freepages()
1607 cc->search_order = order; in fast_isolate_freepages()
1641 cc->nr_freepages += nr_isolated; in fast_isolate_freepages()
1642 list_add_tail(&page->lru, &cc->freepages[order]); in fast_isolate_freepages()
1646 order = cc->search_order + 1; in fast_isolate_freepages()
1651 spin_unlock_irqrestore(&cc->zone->lock, flags); in fast_isolate_freepages()
1654 if (cc->nr_freepages >= cc->nr_migratepages) in fast_isolate_freepages()
1665 trace_mm_compaction_fast_isolate_freepages(min_pfn, cc->free_pfn, in fast_isolate_freepages()
1669 cc->fast_search_fail++; in fast_isolate_freepages()
1678 cc->free_pfn = highest; in fast_isolate_freepages()
1680 if (cc->direct_compaction && pfn_valid(min_pfn)) { in fast_isolate_freepages()
1683 zone_end_pfn(cc->zone)), in fast_isolate_freepages()
1684 cc->zone); in fast_isolate_freepages()
1685 if (page && !suitable_migration_target(cc, page)) in fast_isolate_freepages()
1688 cc->free_pfn = min_pfn; in fast_isolate_freepages()
1694 if (highest && highest >= cc->zone->compact_cached_free_pfn) { in fast_isolate_freepages()
1696 cc->zone->compact_cached_free_pfn = highest; in fast_isolate_freepages()
1699 cc->total_free_scanned += nr_scanned; in fast_isolate_freepages()
1704 fast_isolate_around(cc, low_pfn); in fast_isolate_freepages()
1711 static void isolate_freepages(struct compact_control *cc) in isolate_freepages() argument
1713 struct zone *zone = cc->zone; in isolate_freepages()
1722 fast_isolate_freepages(cc); in isolate_freepages()
1723 if (cc->nr_freepages) in isolate_freepages()
1737 isolate_start_pfn = cc->free_pfn; in isolate_freepages()
1741 low_pfn = pageblock_end_pfn(cc->migrate_pfn); in isolate_freepages()
1742 stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1; in isolate_freepages()
1746 * pages on cc->migratepages. We stop searching if the migrate in isolate_freepages()
1775 if (!suitable_migration_target(cc, page)) in isolate_freepages()
1779 if (!isolation_suitable(cc, page)) in isolate_freepages()
1783 nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn, in isolate_freepages()
1784 block_end_pfn, cc->freepages, stride, false); in isolate_freepages()
1788 update_pageblock_skip(cc, page, block_start_pfn - in isolate_freepages()
1792 if (cc->nr_freepages >= cc->nr_migratepages) { in isolate_freepages()
1824 cc->free_pfn = isolate_start_pfn; in isolate_freepages()
1833 struct compact_control *cc = (struct compact_control *)data; in compaction_alloc_noprof() local
1843 if (!list_empty(&cc->freepages[start_order])) in compaction_alloc_noprof()
1850 isolate_freepages(cc); in compaction_alloc_noprof()
1855 freepage = list_first_entry(&cc->freepages[start_order], struct page, in compaction_alloc_noprof()
1865 list_add(&freepage[size].lru, &cc->freepages[start_order]); in compaction_alloc_noprof()
1873 cc->nr_freepages -= 1 << order; in compaction_alloc_noprof()
1874 cc->nr_migratepages -= 1 << order; in compaction_alloc_noprof()
1890 struct compact_control *cc = (struct compact_control *)data; in compaction_free() local
1896 list_add(&dst->lru, &cc->freepages[order]); in compaction_free()
1897 cc->nr_freepages += 1 << order; in compaction_free()
1899 cc->nr_migratepages += 1 << order; in compaction_free()
1928 update_fast_start_pfn(struct compact_control *cc, unsigned long pfn) in update_fast_start_pfn() argument
1930 if (cc->fast_start_pfn == ULONG_MAX) in update_fast_start_pfn()
1933 if (!cc->fast_start_pfn) in update_fast_start_pfn()
1934 cc->fast_start_pfn = pfn; in update_fast_start_pfn()
1936 cc->fast_start_pfn = min(cc->fast_start_pfn, pfn); in update_fast_start_pfn()
1940 reinit_migrate_pfn(struct compact_control *cc) in reinit_migrate_pfn() argument
1942 if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX) in reinit_migrate_pfn()
1943 return cc->migrate_pfn; in reinit_migrate_pfn()
1945 cc->migrate_pfn = cc->fast_start_pfn; in reinit_migrate_pfn()
1946 cc->fast_start_pfn = ULONG_MAX; in reinit_migrate_pfn()
1948 return cc->migrate_pfn; in reinit_migrate_pfn()
1956 static unsigned long fast_find_migrateblock(struct compact_control *cc) in fast_find_migrateblock() argument
1958 unsigned int limit = freelist_scan_limit(cc); in fast_find_migrateblock()
1961 unsigned long pfn = cc->migrate_pfn; in fast_find_migrateblock()
1967 if (cc->ignore_skip_hint) in fast_find_migrateblock()
1974 if (cc->finish_pageblock) in fast_find_migrateblock()
1982 if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) in fast_find_migrateblock()
1990 if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) in fast_find_migrateblock()
1999 if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE) in fast_find_migrateblock()
2008 distance = (cc->free_pfn - cc->migrate_pfn) >> 1; in fast_find_migrateblock()
2009 if (cc->migrate_pfn != cc->zone->zone_start_pfn) in fast_find_migrateblock()
2011 high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance); in fast_find_migrateblock()
2013 for (order = cc->order - 1; in fast_find_migrateblock()
2016 struct free_area *area = &cc->zone->free_area[order]; in fast_find_migrateblock()
2024 spin_lock_irqsave(&cc->zone->lock, flags); in fast_find_migrateblock()
2048 update_fast_start_pfn(cc, free_pfn); in fast_find_migrateblock()
2050 if (pfn < cc->zone->zone_start_pfn) in fast_find_migrateblock()
2051 pfn = cc->zone->zone_start_pfn; in fast_find_migrateblock()
2052 cc->fast_search_fail = 0; in fast_find_migrateblock()
2057 spin_unlock_irqrestore(&cc->zone->lock, flags); in fast_find_migrateblock()
2060 cc->total_migrate_scanned += nr_scanned; in fast_find_migrateblock()
2067 cc->fast_search_fail++; in fast_find_migrateblock()
2068 pfn = reinit_migrate_pfn(cc); in fast_find_migrateblock()
2078 static isolate_migrate_t isolate_migratepages(struct compact_control *cc) in isolate_migratepages() argument
2086 (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); in isolate_migratepages()
2094 low_pfn = fast_find_migrateblock(cc); in isolate_migratepages()
2096 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_migratepages()
2097 block_start_pfn = cc->zone->zone_start_pfn; in isolate_migratepages()
2104 fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail; in isolate_migratepages()
2113 for (; block_end_pfn <= cc->free_pfn; in isolate_migratepages()
2115 cc->migrate_pfn = low_pfn = block_end_pfn, in isolate_migratepages()
2128 block_end_pfn, cc->zone); in isolate_migratepages()
2134 block_end_pfn = min(next_pfn, cc->free_pfn); in isolate_migratepages()
2146 low_pfn == cc->zone->zone_start_pfn) && in isolate_migratepages()
2147 !fast_find_block && !isolation_suitable(cc, page)) in isolate_migratepages()
2158 if (!suitable_migration_source(cc, page)) { in isolate_migratepages()
2159 update_cached_migrate(cc, block_end_pfn); in isolate_migratepages()
2164 if (isolate_migratepages_block(cc, low_pfn, block_end_pfn, in isolate_migratepages()
2176 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; in isolate_migratepages()
2271 static enum compact_result __compact_finished(struct compact_control *cc) in __compact_finished() argument
2274 const int migratetype = cc->migratetype; in __compact_finished()
2278 if (compact_scanners_met(cc)) { in __compact_finished()
2280 reset_cached_positions(cc->zone); in __compact_finished()
2288 if (cc->direct_compaction) in __compact_finished()
2289 cc->zone->compact_blockskip_flush = true; in __compact_finished()
2291 if (cc->whole_zone) in __compact_finished()
2297 if (cc->proactive_compaction) { in __compact_finished()
2301 pgdat = cc->zone->zone_pgdat; in __compact_finished()
2305 score = fragmentation_score_zone(cc->zone); in __compact_finished()
2316 if (is_via_compact_memory(cc->order)) in __compact_finished()
2325 if (!pageblock_aligned(cc->migrate_pfn)) in __compact_finished()
2330 for (order = cc->order; order < NR_PAGE_ORDERS; order++) { in __compact_finished()
2331 struct free_area *area = &cc->zone->free_area[order]; in __compact_finished()
2362 if (cc->contended || fatal_signal_pending(current)) in __compact_finished()
2368 static enum compact_result compact_finished(struct compact_control *cc) in compact_finished() argument
2372 ret = __compact_finished(cc); in compact_finished()
2373 trace_mm_compaction_finished(cc->zone, cc->order, ret); in compact_finished()
2507 compact_zone(struct compact_control *cc, struct capture_control *capc) in compact_zone() argument
2510 unsigned long start_pfn = cc->zone->zone_start_pfn; in compact_zone()
2511 unsigned long end_pfn = zone_end_pfn(cc->zone); in compact_zone()
2513 const bool sync = cc->mode != MIGRATE_ASYNC; in compact_zone()
2522 cc->total_migrate_scanned = 0; in compact_zone()
2523 cc->total_free_scanned = 0; in compact_zone()
2524 cc->nr_migratepages = 0; in compact_zone()
2525 cc->nr_freepages = 0; in compact_zone()
2527 INIT_LIST_HEAD(&cc->freepages[order]); in compact_zone()
2528 INIT_LIST_HEAD(&cc->migratepages); in compact_zone()
2530 cc->migratetype = gfp_migratetype(cc->gfp_mask); in compact_zone()
2532 if (!is_via_compact_memory(cc->order)) { in compact_zone()
2533 ret = compaction_suit_allocation_order(cc->zone, cc->order, in compact_zone()
2534 cc->highest_zoneidx, in compact_zone()
2535 cc->alloc_flags); in compact_zone()
2544 if (compaction_restarting(cc->zone, cc->order)) in compact_zone()
2545 __reset_isolation_suitable(cc->zone); in compact_zone()
2553 cc->fast_start_pfn = 0; in compact_zone()
2554 if (cc->whole_zone) { in compact_zone()
2555 cc->migrate_pfn = start_pfn; in compact_zone()
2556 cc->free_pfn = pageblock_start_pfn(end_pfn - 1); in compact_zone()
2558 cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; in compact_zone()
2559 cc->free_pfn = cc->zone->compact_cached_free_pfn; in compact_zone()
2560 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { in compact_zone()
2561 cc->free_pfn = pageblock_start_pfn(end_pfn - 1); in compact_zone()
2562 cc->zone->compact_cached_free_pfn = cc->free_pfn; in compact_zone()
2564 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { in compact_zone()
2565 cc->migrate_pfn = start_pfn; in compact_zone()
2566 cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; in compact_zone()
2567 cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; in compact_zone()
2570 if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) in compact_zone()
2571 cc->whole_zone = true; in compact_zone()
2585 cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; in compact_zone()
2587 trace_mm_compaction_begin(cc, start_pfn, end_pfn, sync); in compact_zone()
2592 while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) { in compact_zone()
2594 unsigned long iteration_start_pfn = cc->migrate_pfn; in compact_zone()
2604 cc->finish_pageblock = false; in compact_zone()
2607 cc->finish_pageblock = true; in compact_zone()
2611 switch (isolate_migratepages(cc)) { in compact_zone()
2614 putback_movable_pages(&cc->migratepages); in compact_zone()
2615 cc->nr_migratepages = 0; in compact_zone()
2619 cc->zone->compact_cached_migrate_pfn[1] = in compact_zone()
2620 cc->zone->compact_cached_migrate_pfn[0]; in compact_zone()
2626 * previous cc->order aligned block. in compact_zone()
2631 last_migrated_pfn = max(cc->zone->zone_start_pfn, in compact_zone()
2632 pageblock_start_pfn(cc->migrate_pfn - 1)); in compact_zone()
2637 * compaction_alloc/free() will update cc->nr_migratepages in compact_zone()
2640 nr_migratepages = cc->nr_migratepages; in compact_zone()
2641 err = migrate_pages(&cc->migratepages, compaction_alloc, in compact_zone()
2642 compaction_free, (unsigned long)cc, cc->mode, in compact_zone()
2648 cc->nr_migratepages = 0; in compact_zone()
2650 putback_movable_pages(&cc->migratepages); in compact_zone()
2655 if (err == -ENOMEM && !compact_scanners_met(cc)) { in compact_zone()
2670 if (!pageblock_aligned(cc->migrate_pfn) && in compact_zone()
2671 !cc->ignore_skip_hint && !cc->finish_pageblock && in compact_zone()
2672 (cc->mode < MIGRATE_SYNC)) { in compact_zone()
2673 cc->finish_pageblock = true; in compact_zone()
2680 if (cc->order == COMPACTION_HPAGE_ORDER) in compact_zone()
2696 * cc->order aligned block where we migrated from? If yes, in compact_zone()
2701 if (cc->order > 0 && last_migrated_pfn) { in compact_zone()
2703 block_start_pfn(cc->migrate_pfn, cc->order); in compact_zone()
2706 lru_add_drain_cpu_zone(cc->zone); in compact_zone()
2718 if (cc->nr_freepages > 0) { in compact_zone()
2719 unsigned long free_pfn = release_free_list(cc->freepages); in compact_zone()
2721 cc->nr_freepages = 0; in compact_zone()
2729 if (free_pfn > cc->zone->compact_cached_free_pfn) in compact_zone()
2730 cc->zone->compact_cached_free_pfn = free_pfn; in compact_zone()
2733 count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); in compact_zone()
2734 count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned); in compact_zone()
2736 trace_mm_compaction_end(cc, start_pfn, end_pfn, sync, ret); in compact_zone()
2738 VM_BUG_ON(!list_empty(&cc->migratepages)); in compact_zone()
2749 struct compact_control cc = { in compact_zone_order() local
2764 .cc = &cc, in compact_zone_order()
2776 ret = compact_zone(&cc, &capc); in compact_zone_order()
2891 struct compact_control cc = { in compact_node() local
2908 cc.zone = zone; in compact_node()
2910 compact_zone(&cc, NULL); in compact_node()
2914 cc.total_migrate_scanned); in compact_node()
2916 cc.total_free_scanned); in compact_node()
3054 struct compact_control cc = { in kcompactd_do_work() local
3064 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, in kcompactd_do_work()
3065 cc.highest_zoneidx); in kcompactd_do_work()
3068 for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) { in kcompactd_do_work()
3075 if (compaction_deferred(zone, cc.order)) in kcompactd_do_work()
3079 cc.order, zoneid, ALLOC_WMARK_MIN); in kcompactd_do_work()
3086 cc.zone = zone; in kcompactd_do_work()
3087 status = compact_zone(&cc, NULL); in kcompactd_do_work()
3090 compaction_defer_reset(zone, cc.order, false); in kcompactd_do_work()
3095 * order >= cc.order. This is ratelimited by the in kcompactd_do_work()
3104 defer_compaction(zone, cc.order); in kcompactd_do_work()
3108 cc.total_migrate_scanned); in kcompactd_do_work()
3110 cc.total_free_scanned); in kcompactd_do_work()
3118 if (pgdat->kcompactd_max_order <= cc.order) in kcompactd_do_work()
3120 if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx) in kcompactd_do_work()