Home
last modified time | relevance | path

Searched refs:PAGES_PER_SECTION (Results 1 – 20 of 20) sorted by relevance

/linux-6.12.1/mm/
Dpage_ext.c294 table_size = page_ext_size * PAGES_PER_SECTION; in init_section_page_ext()
324 table_size = page_ext_size * PAGES_PER_SECTION; in free_page_ext()
391 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) in online_page_ext()
397 end = pfn - PAGES_PER_SECTION; in online_page_ext()
398 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) in online_page_ext()
421 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) in offline_page_ext()
426 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) in offline_page_ext()
478 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) { in page_ext_init()
Dsparse.c202 pfns = min(nr_pages, PAGES_PER_SECTION in subsection_map_init()
228 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { in memory_present()
416 return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE); in section_map_size()
422 return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); in section_map_size()
526 map = __populate_section_memmap(pfn, PAGES_PER_SECTION, in sparse_init_nid()
598 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in online_mem_sections()
616 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in offline_mem_sections()
652 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); in free_map_bootmem()
711 PAGES_PER_SECTION), GFP_KERNEL, nid); in populate_section_memmap()
726 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) in free_map_bootmem()
[all …]
Dbootmem_info.c61 mapsize = sizeof(struct page) * PAGES_PER_SECTION; in register_page_bootmem_info_section()
90 register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION); in register_page_bootmem_info_section()
118 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in register_page_bootmem_info_node()
Dmemory_hotplug.c315 min_align = PAGES_PER_SECTION; in check_pfn_span()
765 if (!IS_ALIGNED(start_pfn, PAGES_PER_SECTION)) in move_pfn_range_to_zone()
767 if (!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)) in move_pfn_range_to_zone()
988 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in auto_movable_zone_for_pfn()
995 online_pages += PAGES_PER_SECTION; in auto_movable_zone_for_pfn()
1117 if (nr_pages >= PAGES_PER_SECTION) in mhp_init_memmap_on_memory()
1118 online_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); in mhp_init_memmap_on_memory()
1132 if (nr_pages >= PAGES_PER_SECTION) in mhp_deinit_memmap_on_memory()
1133 offline_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); in mhp_deinit_memmap_on_memory()
1163 !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION))) in online_pages()
[all …]
Dmm_init.c701 if ((nr_initialised > PAGES_PER_SECTION) && in defer_init()
702 (pfn & (PAGES_PER_SECTION - 1)) == 0) { in defer_init()
971 end_pfn = round_up(end_pfn, PAGES_PER_SECTION); in memmap_init()
2130 first_init_pfn = ALIGN(epfn, PAGES_PER_SECTION); in deferred_init_memmap()
2136 .align = PAGES_PER_SECTION, in deferred_init_memmap()
2137 .min_chunk = PAGES_PER_SECTION, in deferred_init_memmap()
2168 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION); in deferred_grow_zone()
2212 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION) in deferred_grow_zone()
Dnuma_memblks.c407 if (pfn_align && pfn_align < PAGES_PER_SECTION) { in numa_register_meminfo()
410 unsigned long sect_align_mb = PFN_PHYS(PAGES_PER_SECTION) >> 20; in numa_register_meminfo()
Dmemblock.c2085 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); in free_unused_memmap()
2110 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) { in free_unused_memmap()
2112 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); in free_unused_memmap()
Dcompaction.c272 return section_nr_to_pfn(start_nr) + PAGES_PER_SECTION; in skip_offline_sections_reverse()
Dpage_alloc.c344 pfn &= (PAGES_PER_SECTION-1); in pfn_to_bitidx()
/linux-6.12.1/drivers/xen/
Dunpopulated-alloc.c39 unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION); in fill_list()
54 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); in fill_list()
Dballoon.c250 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); in additional_memory_resource()
277 balloon_hotplug = round_up(credit, PAGES_PER_SECTION); in reserve_additional_memory()
/linux-6.12.1/drivers/base/
Dmemory.c189 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; in memory_block_online()
260 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; in memory_block_offline()
454 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; in valid_zones_show()
555 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block; in probe_store()
706 const unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; in early_node_zone_for_memory_block()
Dnode.c801 pfn = round_down(pfn + PAGES_PER_SECTION, in register_mem_block_under_node_early()
802 PAGES_PER_SECTION) - 1; in register_mem_block_under_node_early()
/linux-6.12.1/include/linux/
Dmmzone.h1771 #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) macro
1772 #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1790 #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
/linux-6.12.1/scripts/gdb/linux/
Dmm.py72 self.PAGES_PER_SECTION = 1 << self.PFN_SECTION_SHIFT
73 self.PAGE_SECTION_MASK = (~(self.PAGES_PER_SECTION - 1)) & ((1 << 64) - 1)
/linux-6.12.1/arch/powerpc/platforms/powernv/
Dmemtrace.c98 if (IS_ALIGNED(pfn, PAGES_PER_SECTION)) in memtrace_clear_range()
/linux-6.12.1/drivers/virtio/
Dvirtio_mem.c2179 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in virtio_mem_bbm_offline_remove_and_unplug_bb()
2184 rc = virtio_mem_fake_offline(vm, pfn, PAGES_PER_SECTION); in virtio_mem_bbm_offline_remove_and_unplug_bb()
2208 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in virtio_mem_bbm_offline_remove_and_unplug_bb()
2212 virtio_mem_fake_online(pfn, PAGES_PER_SECTION); in virtio_mem_bbm_offline_remove_and_unplug_bb()
2230 pfn += PAGES_PER_SECTION) { in virtio_mem_bbm_bb_is_offline()
2250 pfn += PAGES_PER_SECTION) { in virtio_mem_bbm_bb_is_movable()
/linux-6.12.1/fs/proc/
Dpage.c34 return round_up(max_pfn, PAGES_PER_SECTION); in get_max_dump_pfn()
/linux-6.12.1/arch/x86/mm/
Dinit_64.c1539 if (end - start < PAGES_PER_SECTION * sizeof(struct page)) in vmemmap_populate()
/linux-6.12.1/arch/arm64/mm/
Dmmu.c1396 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in prevent_bootmem_remove_notifier()