1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Virtual Memory Map support
4 *
5 * (C) 2007 sgi. Christoph Lameter.
6 *
7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8 * virt_to_page, page_address() to be implemented as a base offset
9 * calculation without memory access.
10 *
11 * However, virtual mappings need a page table and TLBs. Many Linux
12 * architectures already map their physical space using 1-1 mappings
13 * via TLBs. For those arches the virtual memory map is essentially
14 * for free if we use the same page size as the 1-1 mappings. In that
15 * case the overhead consists of a few additional pages that are
16 * allocated to create a view of memory for vmemmap.
17 *
18 * The architecture is expected to provide a vmemmap_populate() function
19 * to instantiate the mapping.
20 */
21 #include <linux/mm.h>
22 #include <linux/mmzone.h>
23 #include <linux/memblock.h>
24 #include <linux/memremap.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched.h>
30
31 #include <asm/dma.h>
32 #include <asm/pgalloc.h>
33
34 /*
35 * Allocate a block of memory to be used to back the virtual memory map
36 * or to back the page tables that are used to create the mapping.
37 * Uses the main allocators if they are available, else bootmem.
38 */
39
__earlyonly_bootmem_alloc(int node,unsigned long size,unsigned long align,unsigned long goal)40 static void * __ref __earlyonly_bootmem_alloc(int node,
41 unsigned long size,
42 unsigned long align,
43 unsigned long goal)
44 {
45 return memblock_alloc_try_nid_raw(size, align, goal,
46 MEMBLOCK_ALLOC_ACCESSIBLE, node);
47 }
48
vmemmap_alloc_block(unsigned long size,int node)49 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
50 {
51 /* If the main allocator is up use that, fallback to bootmem. */
52 if (slab_is_available()) {
53 gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
54 int order = get_order(size);
55 static bool warned;
56 struct page *page;
57
58 page = alloc_pages_node(node, gfp_mask, order);
59 if (page)
60 return page_address(page);
61
62 if (!warned) {
63 warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
64 "vmemmap alloc failure: order:%u", order);
65 warned = true;
66 }
67 return NULL;
68 } else
69 return __earlyonly_bootmem_alloc(node, size, size,
70 __pa(MAX_DMA_ADDRESS));
71 }
72
73 static void * __meminit altmap_alloc_block_buf(unsigned long size,
74 struct vmem_altmap *altmap);
75
76 /* need to make sure size is all the same during early stage */
vmemmap_alloc_block_buf(unsigned long size,int node,struct vmem_altmap * altmap)77 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node,
78 struct vmem_altmap *altmap)
79 {
80 void *ptr;
81
82 if (altmap)
83 return altmap_alloc_block_buf(size, altmap);
84
85 ptr = sparse_buffer_alloc(size);
86 if (!ptr)
87 ptr = vmemmap_alloc_block(size, node);
88 return ptr;
89 }
90
vmem_altmap_next_pfn(struct vmem_altmap * altmap)91 static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
92 {
93 return altmap->base_pfn + altmap->reserve + altmap->alloc
94 + altmap->align;
95 }
96
vmem_altmap_nr_free(struct vmem_altmap * altmap)97 static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
98 {
99 unsigned long allocated = altmap->alloc + altmap->align;
100
101 if (altmap->free > allocated)
102 return altmap->free - allocated;
103 return 0;
104 }
105
altmap_alloc_block_buf(unsigned long size,struct vmem_altmap * altmap)106 static void * __meminit altmap_alloc_block_buf(unsigned long size,
107 struct vmem_altmap *altmap)
108 {
109 unsigned long pfn, nr_pfns, nr_align;
110
111 if (size & ~PAGE_MASK) {
112 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
113 __func__, size);
114 return NULL;
115 }
116
117 pfn = vmem_altmap_next_pfn(altmap);
118 nr_pfns = size >> PAGE_SHIFT;
119 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
120 nr_align = ALIGN(pfn, nr_align) - pfn;
121 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
122 return NULL;
123
124 altmap->alloc += nr_pfns;
125 altmap->align += nr_align;
126 pfn += nr_align;
127
128 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
129 __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
130 return __va(__pfn_to_phys(pfn));
131 }
132
vmemmap_verify(pte_t * pte,int node,unsigned long start,unsigned long end)133 void __meminit vmemmap_verify(pte_t *pte, int node,
134 unsigned long start, unsigned long end)
135 {
136 unsigned long pfn = pte_pfn(ptep_get(pte));
137 int actual_node = early_pfn_to_nid(pfn);
138
139 if (node_distance(actual_node, node) > LOCAL_DISTANCE)
140 pr_warn_once("[%lx-%lx] potential offnode page_structs\n",
141 start, end - 1);
142 }
143
vmemmap_pte_populate(pmd_t * pmd,unsigned long addr,int node,struct vmem_altmap * altmap,struct page * reuse)144 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
145 struct vmem_altmap *altmap,
146 struct page *reuse)
147 {
148 pte_t *pte = pte_offset_kernel(pmd, addr);
149 if (pte_none(ptep_get(pte))) {
150 pte_t entry;
151 void *p;
152
153 if (!reuse) {
154 p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
155 if (!p)
156 return NULL;
157 } else {
158 /*
159 * When a PTE/PMD entry is freed from the init_mm
160 * there's a free_pages() call to this page allocated
161 * above. Thus this get_page() is paired with the
162 * put_page_testzero() on the freeing path.
163 * This can only called by certain ZONE_DEVICE path,
164 * and through vmemmap_populate_compound_pages() when
165 * slab is available.
166 */
167 get_page(reuse);
168 p = page_to_virt(reuse);
169 }
170 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
171 set_pte_at(&init_mm, addr, pte, entry);
172 }
173 return pte;
174 }
175
vmemmap_alloc_block_zero(unsigned long size,int node)176 static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
177 {
178 void *p = vmemmap_alloc_block(size, node);
179
180 if (!p)
181 return NULL;
182 memset(p, 0, size);
183
184 return p;
185 }
186
kernel_pte_init(void * addr)187 void __weak __meminit kernel_pte_init(void *addr)
188 {
189 }
190
vmemmap_pmd_populate(pud_t * pud,unsigned long addr,int node)191 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
192 {
193 pmd_t *pmd = pmd_offset(pud, addr);
194 if (pmd_none(*pmd)) {
195 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
196 if (!p)
197 return NULL;
198 kernel_pte_init(p);
199 pmd_populate_kernel(&init_mm, pmd, p);
200 }
201 return pmd;
202 }
203
pmd_init(void * addr)204 void __weak __meminit pmd_init(void *addr)
205 {
206 }
207
vmemmap_pud_populate(p4d_t * p4d,unsigned long addr,int node)208 pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
209 {
210 pud_t *pud = pud_offset(p4d, addr);
211 if (pud_none(*pud)) {
212 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
213 if (!p)
214 return NULL;
215 pmd_init(p);
216 pud_populate(&init_mm, pud, p);
217 }
218 return pud;
219 }
220
pud_init(void * addr)221 void __weak __meminit pud_init(void *addr)
222 {
223 }
224
vmemmap_p4d_populate(pgd_t * pgd,unsigned long addr,int node)225 p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
226 {
227 p4d_t *p4d = p4d_offset(pgd, addr);
228 if (p4d_none(*p4d)) {
229 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
230 if (!p)
231 return NULL;
232 pud_init(p);
233 p4d_populate(&init_mm, p4d, p);
234 }
235 return p4d;
236 }
237
vmemmap_pgd_populate(unsigned long addr,int node)238 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
239 {
240 pgd_t *pgd = pgd_offset_k(addr);
241 if (pgd_none(*pgd)) {
242 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
243 if (!p)
244 return NULL;
245 pgd_populate(&init_mm, pgd, p);
246 }
247 return pgd;
248 }
249
vmemmap_populate_address(unsigned long addr,int node,struct vmem_altmap * altmap,struct page * reuse)250 static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node,
251 struct vmem_altmap *altmap,
252 struct page *reuse)
253 {
254 pgd_t *pgd;
255 p4d_t *p4d;
256 pud_t *pud;
257 pmd_t *pmd;
258 pte_t *pte;
259
260 pgd = vmemmap_pgd_populate(addr, node);
261 if (!pgd)
262 return NULL;
263 p4d = vmemmap_p4d_populate(pgd, addr, node);
264 if (!p4d)
265 return NULL;
266 pud = vmemmap_pud_populate(p4d, addr, node);
267 if (!pud)
268 return NULL;
269 pmd = vmemmap_pmd_populate(pud, addr, node);
270 if (!pmd)
271 return NULL;
272 pte = vmemmap_pte_populate(pmd, addr, node, altmap, reuse);
273 if (!pte)
274 return NULL;
275 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
276
277 return pte;
278 }
279
vmemmap_populate_range(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap,struct page * reuse)280 static int __meminit vmemmap_populate_range(unsigned long start,
281 unsigned long end, int node,
282 struct vmem_altmap *altmap,
283 struct page *reuse)
284 {
285 unsigned long addr = start;
286 pte_t *pte;
287
288 for (; addr < end; addr += PAGE_SIZE) {
289 pte = vmemmap_populate_address(addr, node, altmap, reuse);
290 if (!pte)
291 return -ENOMEM;
292 }
293
294 return 0;
295 }
296
vmemmap_populate_basepages(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)297 int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
298 int node, struct vmem_altmap *altmap)
299 {
300 return vmemmap_populate_range(start, end, node, altmap, NULL);
301 }
302
vmemmap_set_pmd(pmd_t * pmd,void * p,int node,unsigned long addr,unsigned long next)303 void __weak __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
304 unsigned long addr, unsigned long next)
305 {
306 }
307
vmemmap_check_pmd(pmd_t * pmd,int node,unsigned long addr,unsigned long next)308 int __weak __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
309 unsigned long addr, unsigned long next)
310 {
311 return 0;
312 }
313
vmemmap_populate_hugepages(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)314 int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,
315 int node, struct vmem_altmap *altmap)
316 {
317 unsigned long addr;
318 unsigned long next;
319 pgd_t *pgd;
320 p4d_t *p4d;
321 pud_t *pud;
322 pmd_t *pmd;
323
324 for (addr = start; addr < end; addr = next) {
325 next = pmd_addr_end(addr, end);
326
327 pgd = vmemmap_pgd_populate(addr, node);
328 if (!pgd)
329 return -ENOMEM;
330
331 p4d = vmemmap_p4d_populate(pgd, addr, node);
332 if (!p4d)
333 return -ENOMEM;
334
335 pud = vmemmap_pud_populate(p4d, addr, node);
336 if (!pud)
337 return -ENOMEM;
338
339 pmd = pmd_offset(pud, addr);
340 if (pmd_none(READ_ONCE(*pmd))) {
341 void *p;
342
343 p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
344 if (p) {
345 vmemmap_set_pmd(pmd, p, node, addr, next);
346 continue;
347 } else if (altmap) {
348 /*
349 * No fallback: In any case we care about, the
350 * altmap should be reasonably sized and aligned
351 * such that vmemmap_alloc_block_buf() will always
352 * succeed. For consistency with the PTE case,
353 * return an error here as failure could indicate
354 * a configuration issue with the size of the altmap.
355 */
356 return -ENOMEM;
357 }
358 } else if (vmemmap_check_pmd(pmd, node, addr, next))
359 continue;
360 if (vmemmap_populate_basepages(addr, next, node, altmap))
361 return -ENOMEM;
362 }
363 return 0;
364 }
365
366 #ifndef vmemmap_populate_compound_pages
367 /*
368 * For compound pages bigger than section size (e.g. x86 1G compound
369 * pages with 2M subsection size) fill the rest of sections as tail
370 * pages.
371 *
372 * Note that memremap_pages() resets @nr_range value and will increment
373 * it after each range successful onlining. Thus the value or @nr_range
374 * at section memmap populate corresponds to the in-progress range
375 * being onlined here.
376 */
reuse_compound_section(unsigned long start_pfn,struct dev_pagemap * pgmap)377 static bool __meminit reuse_compound_section(unsigned long start_pfn,
378 struct dev_pagemap *pgmap)
379 {
380 unsigned long nr_pages = pgmap_vmemmap_nr(pgmap);
381 unsigned long offset = start_pfn -
382 PHYS_PFN(pgmap->ranges[pgmap->nr_range].start);
383
384 return !IS_ALIGNED(offset, nr_pages) && nr_pages > PAGES_PER_SUBSECTION;
385 }
386
compound_section_tail_page(unsigned long addr)387 static pte_t * __meminit compound_section_tail_page(unsigned long addr)
388 {
389 pte_t *pte;
390
391 addr -= PAGE_SIZE;
392
393 /*
394 * Assuming sections are populated sequentially, the previous section's
395 * page data can be reused.
396 */
397 pte = pte_offset_kernel(pmd_off_k(addr), addr);
398 if (!pte)
399 return NULL;
400
401 return pte;
402 }
403
vmemmap_populate_compound_pages(unsigned long start_pfn,unsigned long start,unsigned long end,int node,struct dev_pagemap * pgmap)404 static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
405 unsigned long start,
406 unsigned long end, int node,
407 struct dev_pagemap *pgmap)
408 {
409 unsigned long size, addr;
410 pte_t *pte;
411 int rc;
412
413 if (reuse_compound_section(start_pfn, pgmap)) {
414 pte = compound_section_tail_page(start);
415 if (!pte)
416 return -ENOMEM;
417
418 /*
419 * Reuse the page that was populated in the prior iteration
420 * with just tail struct pages.
421 */
422 return vmemmap_populate_range(start, end, node, NULL,
423 pte_page(ptep_get(pte)));
424 }
425
426 size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page));
427 for (addr = start; addr < end; addr += size) {
428 unsigned long next, last = addr + size;
429
430 /* Populate the head page vmemmap page */
431 pte = vmemmap_populate_address(addr, node, NULL, NULL);
432 if (!pte)
433 return -ENOMEM;
434
435 /* Populate the tail pages vmemmap page */
436 next = addr + PAGE_SIZE;
437 pte = vmemmap_populate_address(next, node, NULL, NULL);
438 if (!pte)
439 return -ENOMEM;
440
441 /*
442 * Reuse the previous page for the rest of tail pages
443 * See layout diagram in Documentation/mm/vmemmap_dedup.rst
444 */
445 next += PAGE_SIZE;
446 rc = vmemmap_populate_range(next, last, node, NULL,
447 pte_page(ptep_get(pte)));
448 if (rc)
449 return -ENOMEM;
450 }
451
452 return 0;
453 }
454
455 #endif
456
__populate_section_memmap(unsigned long pfn,unsigned long nr_pages,int nid,struct vmem_altmap * altmap,struct dev_pagemap * pgmap)457 struct page * __meminit __populate_section_memmap(unsigned long pfn,
458 unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
459 struct dev_pagemap *pgmap)
460 {
461 unsigned long start = (unsigned long) pfn_to_page(pfn);
462 unsigned long end = start + nr_pages * sizeof(struct page);
463 int r;
464
465 if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) ||
466 !IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION)))
467 return NULL;
468
469 if (vmemmap_can_optimize(altmap, pgmap))
470 r = vmemmap_populate_compound_pages(pfn, start, end, nid, pgmap);
471 else
472 r = vmemmap_populate(start, end, nid, altmap);
473
474 if (r < 0)
475 return NULL;
476
477 if (system_state == SYSTEM_BOOTING)
478 memmap_boot_pages_add(DIV_ROUND_UP(end - start, PAGE_SIZE));
479 else
480 memmap_pages_add(DIV_ROUND_UP(end - start, PAGE_SIZE));
481
482 return pfn_to_page(pfn);
483 }
484