Lines Matching +full:mixed +full:- +full:signals
1 // SPDX-License-Identifier: GPL-2.0-only
27 #include <linux/page-isolation.h>
31 #include <linux/firmware-map.h>
92 *((int *)kp->arg) = mode; in set_memmap_mode()
97 memmap_pages - PFN_UP(memory_block_memmap_size())); in set_memmap_mode()
104 int mode = *((int *)kp->arg); in get_memmap_mode()
137 [ONLINE_POLICY_CONTIG_ZONES] = "contig-zones",
138 [ONLINE_POLICY_AUTO_MOVABLE] = "auto-movable",
147 *((int *)kp->arg) = ret; in set_online_policy()
153 return sprintf(buffer, "%s\n", online_policy_to_str[*((int *)kp->arg)]); in get_online_policy()
160 * "contig-zones": keep zone contiguous
161 * "auto-movable": online memory to ZONE_MOVABLE if the configuration
171 "Set the online policy (\"contig-zones\", \"auto-movable\") "
172 "Default: \"contig-zones\"");
178 * online some memory to ZONE_MOVABLE -- e.g., because hotplugged KERNEL memory
185 "in percent for \"auto-movable\" online policy. Default: 301");
195 "\"auto-movable\" online policy. Default: true");
264 return ERR_PTR(-E2BIG); in register_memory_resource()
269 * refer to document of 'mem=' in kernel-parameters.txt for more in register_memory_resource()
273 return ERR_PTR(-E2BIG); in register_memory_resource()
284 pr_debug("Unable to reserve System RAM region: %016llx->%016llx\n", in register_memory_resource()
286 return ERR_PTR(-EEXIST); in register_memory_resource()
302 * Disallow all operations smaller than a sub-section and only in check_pfn_span()
317 return -EINVAL; in check_pfn_span()
323 * walkers which rely on the fully initialized page->flags and others
356 * get_dev_pagemap() can determine sub-section online status. in pfn_to_online_page()
375 struct vmem_altmap *altmap = params->altmap; in __add_pages()
377 if (WARN_ON_ONCE(!pgprot_val(params->pgprot))) in __add_pages()
378 return -EINVAL; in __add_pages()
386 if (altmap->base_pfn != pfn in __add_pages()
389 return -EINVAL; in __add_pages()
391 altmap->alloc = 0; in __add_pages()
395 WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1); in __add_pages()
396 return -EINVAL; in __add_pages()
401 cur_nr_pages = min(end_pfn - pfn, in __add_pages()
402 SECTION_ALIGN_UP(pfn + 1) - pfn); in __add_pages()
404 params->pgmap); in __add_pages()
442 pfn = end_pfn - 1; in find_biggest_section_pfn()
443 for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) { in find_biggest_section_pfn()
465 if (zone->zone_start_pfn == start_pfn) { in shrink_zone_span()
468 * shrink zone->zone_start_pfn and zone->zone_spanned_pages. in shrink_zone_span()
475 zone->spanned_pages = zone_end_pfn(zone) - pfn; in shrink_zone_span()
476 zone->zone_start_pfn = pfn; in shrink_zone_span()
478 zone->zone_start_pfn = 0; in shrink_zone_span()
479 zone->spanned_pages = 0; in shrink_zone_span()
484 * shrink zone->spanned_pages. in shrink_zone_span()
488 pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn, in shrink_zone_span()
491 zone->spanned_pages = pfn - zone->zone_start_pfn + 1; in shrink_zone_span()
493 zone->zone_start_pfn = 0; in shrink_zone_span()
494 zone->spanned_pages = 0; in shrink_zone_span()
504 for (zone = pgdat->node_zones; in update_pgdat_span()
505 zone < pgdat->node_zones + MAX_NR_ZONES; zone++) { in update_pgdat_span()
509 if (!zone->spanned_pages) in update_pgdat_span()
512 node_start_pfn = zone->zone_start_pfn; in update_pgdat_span()
519 if (zone->zone_start_pfn < node_start_pfn) in update_pgdat_span()
520 node_start_pfn = zone->zone_start_pfn; in update_pgdat_span()
523 pgdat->node_start_pfn = node_start_pfn; in update_pgdat_span()
524 pgdat->node_spanned_pages = node_end_pfn - node_start_pfn; in update_pgdat_span()
532 struct pglist_data *pgdat = zone->zone_pgdat; in remove_pfn_range_from_zone()
541 min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn); in remove_pfn_range_from_zone()
548 * we will not try to shrink the zones - which is okay as in remove_pfn_range_from_zone()
563 * __remove_pages() - remove sections of pages
580 WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1); in __remove_pages()
587 cur_nr_pages = min(end_pfn - pfn, in __remove_pages()
588 SECTION_ALIGN_UP(pfn + 1) - pfn); in __remove_pages()
595 int rc = -EINVAL; in set_online_page_callback()
614 int rc = -EINVAL; in restore_online_page_callback()
631 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
649 * MAX_ORDER_NR_PAGES - 1, but pageblock aligned. __ffs() will detect in online_pages_range()
659 * MAX_PAGE_ORDER-aligned, Set order to MAX_PAGE_ORDER for in online_pages_range()
681 arg->status_change_nid = NUMA_NO_NODE; in node_states_check_changes_online()
682 arg->status_change_nid_normal = NUMA_NO_NODE; in node_states_check_changes_online()
685 arg->status_change_nid = nid; in node_states_check_changes_online()
687 arg->status_change_nid_normal = nid; in node_states_check_changes_online()
692 if (arg->status_change_nid_normal >= 0) in node_states_set_node()
695 if (arg->status_change_nid >= 0) in node_states_set_node()
704 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) in resize_zone_range()
705 zone->zone_start_pfn = start_pfn; in resize_zone_range()
707 zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; in resize_zone_range()
715 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) in resize_pgdat_range()
716 pgdat->node_start_pfn = start_pfn; in resize_pgdat_range()
718 pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; in resize_pgdat_range()
727 ms->section_mem_map |= SECTION_TAINT_ZONE_DEVICE; in section_taint_zone_device()
748 struct pglist_data *pgdat = zone->zone_pgdat; in move_pfn_range_to_zone()
749 int nid = pgdat->node_id; in move_pfn_range_to_zone()
792 stats->movable_pages += zone->present_pages; in auto_movable_stats_account_zone()
794 stats->kernel_early_pages += zone->present_early_pages; in auto_movable_stats_account_zone()
800 stats->movable_pages += zone->cma_pages; in auto_movable_stats_account_zone()
801 stats->kernel_early_pages -= zone->cma_pages; in auto_movable_stats_account_zone()
818 * We don't support modifying the config while the auto-movable online in auto_movable_stats_account_group()
828 pages = group->present_movable_pages * 100 / ratio; in auto_movable_stats_account_group()
829 pages -= group->present_kernel_pages; in auto_movable_stats_account_group()
832 stats->req_kernel_early_pages += pages; in auto_movable_stats_account_group()
833 stats->movable_pages += group->present_movable_pages; in auto_movable_stats_account_group()
855 zone = pgdat->node_zones + i; in auto_movable_can_online_movable()
873 kernel_early_pages -= group_stats.req_kernel_early_pages; in auto_movable_can_online_movable()
874 movable_pages -= group_stats.movable_pages; in auto_movable_can_online_movable()
876 if (group && group->is_dynamic) in auto_movable_can_online_movable()
877 kernel_early_pages += group->present_kernel_pages; in auto_movable_can_online_movable()
899 struct zone *zone = &pgdat->node_zones[zid]; in default_kernel_zone_for_pfn()
905 return &pgdat->node_zones[ZONE_NORMAL]; in default_kernel_zone_for_pfn()
937 * memory within the same memory group -- because in that case, there is
953 * hotplugged by virtio-mem, look like they are completely present, however,
969 if (group && !group->is_dynamic) { in auto_movable_zone_for_pfn()
970 max_pages = group->s.max_pages; in auto_movable_zone_for_pfn()
971 online_pages = group->present_movable_pages; in auto_movable_zone_for_pfn()
974 if (group->present_kernel_pages) in auto_movable_zone_for_pfn()
976 } else if (!group || group->d.unit_pages == nr_pages) { in auto_movable_zone_for_pfn()
979 max_pages = group->d.unit_pages; in auto_movable_zone_for_pfn()
986 pfn = ALIGN_DOWN(pfn, group->d.unit_pages); in auto_movable_zone_for_pfn()
987 end_pfn = pfn + group->d.unit_pages; in auto_movable_zone_for_pfn()
1004 nr_pages = max_pages - online_pages; in auto_movable_zone_for_pfn()
1014 return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; in auto_movable_zone_for_pfn()
1024 struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; in default_zone_for_pfn()
1051 return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; in zone_for_pfn_range()
1074 zone->present_early_pages += nr_pages; in adjust_present_page_count()
1075 zone->present_pages += nr_pages; in adjust_present_page_count()
1076 zone->zone_pgdat->node_present_pages += nr_pages; in adjust_present_page_count()
1079 group->present_movable_pages += nr_pages; in adjust_present_page_count()
1081 group->present_kernel_pages += nr_pages; in adjust_present_page_count()
1164 return -EINVAL; in online_pages()
1183 spin_lock_irqsave(&zone->lock, flags); in online_pages()
1184 zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages; in online_pages()
1185 spin_unlock_irqrestore(&zone->lock, flags); in online_pages()
1211 * across the whole freelist - to create an initial shuffle. in online_pages()
1227 pr_debug("online_pages [mem %#010llx-%#010llx] failed\n", in online_pages()
1229 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); in online_pages()
1235 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1253 * to access not-initialized zonelist, build here. in hotadd_init_pgdat()
1261 * __try_online_node - online a node if offlined
1267 * 1 -> a new node has been allocated
1268 * 0 -> the node is already online
1269 * -ENOMEM -> the node could not be allocated
1282 ret = -ENOMEM; in __try_online_node()
1315 return -EINVAL; in check_hotplug_memory_range()
1323 mem->online_type = mhp_default_online_type; in online_memory_block()
1324 return device_online(&mem->dev); in online_memory_block()
1354 * code requires applicable ranges to be page-aligned, for example, to in mhp_supports_memmap_on_memory()
1395 * For memmap_on_memory, the altmaps were added on a per-memblock in remove_memory_blocks_and_altmaps()
1407 altmap = mem->altmap; in remove_memory_blocks_and_altmaps()
1408 mem->altmap = NULL; in remove_memory_blocks_and_altmaps()
1415 WARN(altmap->alloc, "Altmap not fully unmapped"); in remove_memory_blocks_and_altmaps()
1433 .end_pfn = PHYS_PFN(cur_start + memblock_size - 1), in create_altmaps_and_memory_blocks()
1442 ret = -ENOMEM; in create_altmaps_and_memory_blocks()
1466 remove_memory_blocks_and_altmaps(start, cur_start - start); in create_altmaps_and_memory_blocks()
1474 * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG
1485 start = res->start; in add_memory_resource()
1495 return -EINVAL; in add_memory_resource()
1496 nid = group->nid; in add_memory_resource()
1501 return -EINVAL; in add_memory_resource()
1507 if (res->flags & IORESOURCE_SYSRAM_DRIVER_MANAGED) in add_memory_resource()
1542 * can't be hot-added. There is no rollback way now. in add_memory_resource()
1552 PFN_UP(start + size - 1), in add_memory_resource()
1556 if (!strcmp(res->name, "System RAM")) in add_memory_resource()
1611 * Add special, driver-managed memory to the system as system RAM. Such
1612 * memory is not exposed via the raw firmware-provided memmap as system
1613 * RAM, instead, it is detected and added by a driver - during cold boot,
1618 * - The booting kernel is in charge of determining how this memory will be
1620 * - Coordination with a hypervisor is required before this memory
1623 * For this memory, no entries in /sys/firmware/memmap ("raw firmware-provided
1625 * with IORESOURCE_SYSRAM_DRIVER_MANAGED, so in-kernel users can special-case
1639 resource_name[strlen(resource_name) - 1] != ')') in add_memory_driver_managed()
1640 return -EINVAL; in add_memory_driver_managed()
1666 * - range.start <= range.end
1667 * - Range includes both end points [range.start..range.end]
1677 .end = -1ULL, in arch_get_mappable_range()
1707 if (start < end && start >= mhp_range.start && (end - 1) <= mhp_range.end) in mhp_range_allowed()
1710 pr_warn("Hotplug memory [%#llx-%#llx] exceeds maximum addressable range [%#llx-%#llx]\n", in mhp_range_allowed()
1718 * non-lru movable pages and hugepages). Will skip over most unmovable
1724 * -ENOENT in case no movable page was found.
1725 * -EBUSY in case a definitely unmovable page was found.
1751 return -EBUSY; in scan_movable_pages()
1765 pfn |= folio_nr_pages(folio) - 1; in scan_movable_pages()
1767 return -ENOENT; in scan_movable_pages()
1796 pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1; in do_migrate_range()
1859 dump_page(&folio->page, in do_migrate_range()
1879 struct pglist_data *pgdat = zone->zone_pgdat; in node_states_check_changes_offline()
1883 arg->status_change_nid = NUMA_NO_NODE; in node_states_check_changes_offline()
1884 arg->status_change_nid_normal = NUMA_NO_NODE; in node_states_check_changes_offline()
1895 present_pages += pgdat->node_zones[zt].present_pages; in node_states_check_changes_offline()
1897 arg->status_change_nid_normal = zone_to_nid(zone); in node_states_check_changes_offline()
1908 present_pages += pgdat->node_zones[ZONE_MOVABLE].present_pages; in node_states_check_changes_offline()
1911 arg->status_change_nid = zone_to_nid(zone); in node_states_check_changes_offline()
1916 if (arg->status_change_nid_normal >= 0) in node_states_clear_node()
1919 if (arg->status_change_nid >= 0) in node_states_clear_node()
1955 return -EINVAL; in offline_pages()
1960 * via the hotplug path - online_pages() - as hotplugged memory has in offline_pages()
1968 ret = -EINVAL; in offline_pages()
1979 page_zone(pfn_to_page(end_pfn - 1)) != zone)) { in offline_pages()
1980 ret = -EINVAL; in offline_pages()
2018 * can't limit it to fatal signals without eventually in offline_pages()
2022 ret = -EINTR; in offline_pages()
2039 if (ret != -ENOENT) { in offline_pages()
2068 spin_lock_irqsave(&zone->lock, flags); in offline_pages()
2069 zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages; in offline_pages()
2070 spin_unlock_irqrestore(&zone->lock, flags); in offline_pages()
2076 adjust_managed_page_count(pfn_to_page(start_pfn), -managed_pages); in offline_pages()
2077 adjust_present_page_count(pfn_to_page(start_pfn), group, -nr_pages); in offline_pages()
2083 * Make sure to mark the node as memory-less before rebuilding the zone in offline_pages()
2111 pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n", in offline_pages()
2113 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1, in offline_pages()
2122 *nid = mem->nid; in check_memblock_offlined_cb()
2123 if (unlikely(mem->state != MEM_OFFLINE)) { in check_memblock_offlined_cb()
2126 beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)); in check_memblock_offlined_cb()
2127 endpa = beginpa + memory_block_size_bytes() - 1; in check_memblock_offlined_cb()
2128 pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n", in check_memblock_offlined_cb()
2131 return -EBUSY; in check_memblock_offlined_cb()
2140 if (mem->altmap) in count_memory_range_altmaps_cb()
2156 return -EBUSY; in check_cpu_on_node()
2171 return mem->nid == nid ? -EEXIST : 0; in check_no_memblock_for_node_cb()
2231 return -EINVAL; in memory_blocks_have_altmaps()
2247 * While at it, determine the nid. Note that if we'd have mixed nodes, in try_remove_memory()
2248 * we'd only try to offline the last determined one -- which is good in try_remove_memory()
2290 * __remove_memory - Remove memory if every memory block is offline
2310 * Remove memory if every memory block is offline, otherwise return -EBUSY is
2337 page = pfn_to_online_page(section_nr_to_pfn(mem->start_section_nr)); in try_offline_memory_block()
2341 rc = device_offline(&mem->dev); in try_offline_memory_block()
2343 * Default is MMOP_OFFLINE - change it only if offlining succeeded, in try_offline_memory_block()
2360 mem->online_type = **online_types; in try_reonline_memory_block()
2361 rc = device_online(&mem->dev); in try_reonline_memory_block()
2363 pr_warn("%s: Failed to re-online memory: %d", in try_reonline_memory_block()
2386 return -EINVAL; in offline_and_remove_memory()
2396 return -ENOMEM; in offline_and_remove_memory()