Lines Matching +full:boot +full:- +full:page +full:- +full:step

1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
33 * constant relationship between address of struct page and its PFN.
35 * During boot or memory hotplug operation when a new memory section is
41 * ----------------------------------------------
43 * ----------------------------------------------
46 * vmemmap +--------------+ +--------------+
47 * + | page struct | +--------------> | page struct |
48 * | +--------------+ +--------------+
49 * | | page struct | +--------------> | page struct |
50 * | +--------------+ | +--------------+
51 * | | page struct | + +------> | page struct |
52 * | +--------------+ | +--------------+
53 * | | page struct | | +--> | page struct |
54 * | +--------------+ | | +--------------+
55 * | | page struct | | |
56 * | +--------------+ | |
57 * | | page struct | | |
58 * | +--------------+ | |
59 * | | page struct | | |
60 * | +--------------+ | |
61 * | | page struct | | |
62 * | +--------------+ | |
63 * | | page struct | +-------+ |
64 * | +--------------+ |
65 * | | page struct | +-----------+
66 * | +--------------+
67 * | | page struct | No mapping
68 * | +--------------+
69 * | | page struct | No mapping
70 * v +--------------+
72 * -----------------------------------------
74 * -----------------------------------------
76 * vmemmap +--------------+ +---------------+
77 * + | page struct | +-------------> | PFN |
78 * | +--------------+ +---------------+
79 * | | page struct | +-------------> | PFN |
80 * | +--------------+ +---------------+
81 * | | page struct | +-------------> | PFN |
82 * | +--------------+ +---------------+
83 * | | page struct | +-------------> | PFN |
84 * | +--------------+ +---------------+
86 * | +--------------+
88 * | +--------------+
90 * | +--------------+ +---------------+
91 * | | page struct | +-------------> | PFN |
92 * | +--------------+ +---------------+
94 * | +--------------+
96 * | +--------------+ +---------------+
97 * | | page struct | +-------------> | PFN |
98 * | +--------------+ +---------------+
99 * | | page struct | +-------------> | PFN |
100 * v +--------------+ +---------------+
103 * On hash-based CPUs, the vmemmap is bolted in the hash table.
114 return -1; in hash__vmemmap_create_mapping()
124 BUG_ON(rc2 && (rc2 != -ENOENT)); in hash__vmemmap_create_mapping()
136 BUG_ON((rc < 0) && (rc != -ENOENT)); in hash__vmemmap_remove_mapping()
137 WARN_ON(rc == -ENOENT); in hash__vmemmap_remove_mapping()
144 * map_kernel_page adds an entry to the ioremap page table
161 return -ENOMEM; in hash__map_kernel_page()
164 return -ENOMEM; in hash__map_kernel_page()
167 return -ENOMEM; in hash__map_kernel_page()
172 * linux page table entry for this mapping. Simply bolt an in hash__map_kernel_page()
173 * entry in the hardware page table. in hash__map_kernel_page()
180 return -ENOMEM; in hash__map_kernel_page()
205 bne- 1b \n\ in hash__pmd_hugepage_update()
209 bne- 1b" in hash__pmd_hugepage_update()
239 * page fault will see a none pmd and take the slow path that in hash__pmdp_collapse_flush()
241 * hash_page with local ptep pointer value. Such a hash page in hash__pmdp_collapse_flush()
243 * That means we could be modifying the page content as we in hash__pmdp_collapse_flush()
244 * copy them to a huge page. So wait for parallel hash_page in hash__pmdp_collapse_flush()
249 serialize_against_pte_lookup(vma->vm_mm); in hash__pmdp_collapse_flush()
259 flush_hash_table_pmd_range(vma->vm_mm, &pmd, address); in hash__pmdp_collapse_flush()
265 * the base page size hptes
320 /* get the base page size,vsid and segment size */ in hpte_do_hugepage_flush()
332 vsid = get_user_vsid(&mm->context, addr, ssize); in hpte_do_hugepage_flush()
382 * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE in hash__has_transparent_hugepage()
389 (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1)) in hash__has_transparent_hugepage()
394 if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1) in hash__has_transparent_hugepage()
407 unsigned int step, nr_cpus; member
419 unsigned int step, unsigned long newpp) in change_memory_range() argument
423 pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n", in change_memory_range()
424 start, end, newpp, step); in change_memory_range()
426 for (idx = start; idx < end; idx += step) in change_memory_range()
437 p = &parms->cpu_counter.counter; in chmem_secondary_loop()
452 "addic %[tmp], %[tmp], -1 ;" in chmem_secondary_loop()
454 "bne- 1b ;" in chmem_secondary_loop()
460 "bne- 2b ;" in chmem_secondary_loop()
483 if (atomic_xchg(&parms->master_cpu, 1) == 1) in change_memory_range_fn()
486 // Wait for all but one CPU (this one) to call-in in change_memory_range_fn()
487 while (atomic_read(&parms->cpu_counter) > 1) in change_memory_range_fn()
490 change_memory_range(parms->start, parms->end, parms->step, parms->newpp); in change_memory_range_fn()
495 atomic_dec(&parms->cpu_counter); in change_memory_range_fn()
503 unsigned int step, shift; in hash__change_memory_range() local
506 step = 1 << shift; in hash__change_memory_range()
508 start = ALIGN_DOWN(start, step); in hash__change_memory_range()
509 end = ALIGN(end, step); // aligns up in hash__change_memory_range()
519 chmem_parms.step = step; in hash__change_memory_range()
536 change_memory_range(start, end, step, newpp); in hash__change_memory_range()