1  // SPDX-License-Identifier: GPL-2.0
2  #include <linux/mm.h>
3  #include <linux/mmzone.h>
4  #include <linux/memblock.h>
5  #include <linux/page_ext.h>
6  #include <linux/memory.h>
7  #include <linux/vmalloc.h>
8  #include <linux/kmemleak.h>
9  #include <linux/page_owner.h>
10  #include <linux/page_idle.h>
11  #include <linux/page_table_check.h>
12  #include <linux/rcupdate.h>
13  #include <linux/pgalloc_tag.h>
14  
15  /*
16   * struct page extension
17   *
18   * This is the feature to manage memory for extended data per page.
19   *
20   * Until now, we must modify struct page itself to store extra data per page.
21   * This requires rebuilding the kernel and it is really time consuming process.
22   * And, sometimes, rebuild is impossible due to third party module dependency.
23   * At last, enlarging struct page could cause un-wanted system behaviour change.
24   *
25   * This feature is intended to overcome above mentioned problems. This feature
26   * allocates memory for extended data per page in certain place rather than
27   * the struct page itself. This memory can be accessed by the accessor
28   * functions provided by this code. During the boot process, it checks whether
29   * allocation of huge chunk of memory is needed or not. If not, it avoids
30   * allocating memory at all. With this advantage, we can include this feature
31   * into the kernel in default and can avoid rebuild and solve related problems.
32   *
33   * To help these things to work well, there are two callbacks for clients. One
34   * is the need callback which is mandatory if user wants to avoid useless
35   * memory allocation at boot-time. The other is optional, init callback, which
36   * is used to do proper initialization after memory is allocated.
37   *
38   * The need callback is used to decide whether extended memory allocation is
39   * needed or not. Sometimes users want to deactivate some features in this
40   * boot and extra memory would be unnecessary. In this case, to avoid
41   * allocating huge chunk of memory, each clients represent their need of
42   * extra memory through the need callback. If one of the need callbacks
43   * returns true, it means that someone needs extra memory so that
44   * page extension core should allocates memory for page extension. If
45   * none of need callbacks return true, memory isn't needed at all in this boot
46   * and page extension core can skip to allocate memory. As result,
47   * none of memory is wasted.
48   *
49   * When need callback returns true, page_ext checks if there is a request for
50   * extra memory through size in struct page_ext_operations. If it is non-zero,
51   * extra space is allocated for each page_ext entry and offset is returned to
52   * user through offset in struct page_ext_operations.
53   *
54   * The init callback is used to do proper initialization after page extension
55   * is completely initialized. In sparse memory system, extra memory is
56   * allocated some time later than memmap is allocated. In other words, lifetime
57   * of memory for page extension isn't same with memmap for struct page.
58   * Therefore, clients can't store extra data until page extension is
59   * initialized, even if pages are allocated and used freely. This could
60   * cause inadequate state of extra data per page, so, to prevent it, client
61   * can utilize this callback to initialize the state of it correctly.
62   */
63  
64  #ifdef CONFIG_SPARSEMEM
65  #define PAGE_EXT_INVALID       (0x1)
66  #endif
67  
68  #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
need_page_idle(void)69  static bool need_page_idle(void)
70  {
71  	return true;
72  }
73  static struct page_ext_operations page_idle_ops __initdata = {
74  	.need = need_page_idle,
75  	.need_shared_flags = true,
76  };
77  #endif
78  
79  static struct page_ext_operations *page_ext_ops[] __initdata = {
80  #ifdef CONFIG_PAGE_OWNER
81  	&page_owner_ops,
82  #endif
83  #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
84  	&page_idle_ops,
85  #endif
86  #ifdef CONFIG_MEM_ALLOC_PROFILING
87  	&page_alloc_tagging_ops,
88  #endif
89  #ifdef CONFIG_PAGE_TABLE_CHECK
90  	&page_table_check_ops,
91  #endif
92  };
93  
94  unsigned long page_ext_size;
95  
96  static unsigned long total_usage;
97  
98  #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
99  /*
100   * To ensure correct allocation tagging for pages, page_ext should be available
101   * before the first page allocation. Otherwise early task stacks will be
102   * allocated before page_ext initialization and missing tags will be flagged.
103   */
104  bool early_page_ext __meminitdata = true;
105  #else
106  bool early_page_ext __meminitdata;
107  #endif
setup_early_page_ext(char * str)108  static int __init setup_early_page_ext(char *str)
109  {
110  	early_page_ext = true;
111  	return 0;
112  }
113  early_param("early_page_ext", setup_early_page_ext);
114  
invoke_need_callbacks(void)115  static bool __init invoke_need_callbacks(void)
116  {
117  	int i;
118  	int entries = ARRAY_SIZE(page_ext_ops);
119  	bool need = false;
120  
121  	for (i = 0; i < entries; i++) {
122  		if (page_ext_ops[i]->need()) {
123  			if (page_ext_ops[i]->need_shared_flags) {
124  				page_ext_size = sizeof(struct page_ext);
125  				break;
126  			}
127  		}
128  	}
129  
130  	for (i = 0; i < entries; i++) {
131  		if (page_ext_ops[i]->need()) {
132  			page_ext_ops[i]->offset = page_ext_size;
133  			page_ext_size += page_ext_ops[i]->size;
134  			need = true;
135  		}
136  	}
137  
138  	return need;
139  }
140  
invoke_init_callbacks(void)141  static void __init invoke_init_callbacks(void)
142  {
143  	int i;
144  	int entries = ARRAY_SIZE(page_ext_ops);
145  
146  	for (i = 0; i < entries; i++) {
147  		if (page_ext_ops[i]->init)
148  			page_ext_ops[i]->init();
149  	}
150  }
151  
get_entry(void * base,unsigned long index)152  static inline struct page_ext *get_entry(void *base, unsigned long index)
153  {
154  	return base + page_ext_size * index;
155  }
156  
157  #ifndef CONFIG_SPARSEMEM
page_ext_init_flatmem_late(void)158  void __init page_ext_init_flatmem_late(void)
159  {
160  	invoke_init_callbacks();
161  }
162  
pgdat_page_ext_init(struct pglist_data * pgdat)163  void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
164  {
165  	pgdat->node_page_ext = NULL;
166  }
167  
lookup_page_ext(const struct page * page)168  static struct page_ext *lookup_page_ext(const struct page *page)
169  {
170  	unsigned long pfn = page_to_pfn(page);
171  	unsigned long index;
172  	struct page_ext *base;
173  
174  	WARN_ON_ONCE(!rcu_read_lock_held());
175  	base = NODE_DATA(page_to_nid(page))->node_page_ext;
176  	/*
177  	 * The sanity checks the page allocator does upon freeing a
178  	 * page can reach here before the page_ext arrays are
179  	 * allocated when feeding a range of pages to the allocator
180  	 * for the first time during bootup or memory hotplug.
181  	 */
182  	if (unlikely(!base))
183  		return NULL;
184  	index = pfn - round_down(node_start_pfn(page_to_nid(page)),
185  					MAX_ORDER_NR_PAGES);
186  	return get_entry(base, index);
187  }
188  
alloc_node_page_ext(int nid)189  static int __init alloc_node_page_ext(int nid)
190  {
191  	struct page_ext *base;
192  	unsigned long table_size;
193  	unsigned long nr_pages;
194  
195  	nr_pages = NODE_DATA(nid)->node_spanned_pages;
196  	if (!nr_pages)
197  		return 0;
198  
199  	/*
200  	 * Need extra space if node range is not aligned with
201  	 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
202  	 * checks buddy's status, range could be out of exact node range.
203  	 */
204  	if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
205  		!IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
206  		nr_pages += MAX_ORDER_NR_PAGES;
207  
208  	table_size = page_ext_size * nr_pages;
209  
210  	base = memblock_alloc_try_nid(
211  			table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
212  			MEMBLOCK_ALLOC_ACCESSIBLE, nid);
213  	if (!base)
214  		return -ENOMEM;
215  	NODE_DATA(nid)->node_page_ext = base;
216  	total_usage += table_size;
217  	memmap_boot_pages_add(DIV_ROUND_UP(table_size, PAGE_SIZE));
218  	return 0;
219  }
220  
page_ext_init_flatmem(void)221  void __init page_ext_init_flatmem(void)
222  {
223  
224  	int nid, fail;
225  
226  	if (!invoke_need_callbacks())
227  		return;
228  
229  	for_each_online_node(nid)  {
230  		fail = alloc_node_page_ext(nid);
231  		if (fail)
232  			goto fail;
233  	}
234  	pr_info("allocated %ld bytes of page_ext\n", total_usage);
235  	return;
236  
237  fail:
238  	pr_crit("allocation of page_ext failed.\n");
239  	panic("Out of memory");
240  }
241  
242  #else /* CONFIG_SPARSEMEM */
page_ext_invalid(struct page_ext * page_ext)243  static bool page_ext_invalid(struct page_ext *page_ext)
244  {
245  	return !page_ext || (((unsigned long)page_ext & PAGE_EXT_INVALID) == PAGE_EXT_INVALID);
246  }
247  
lookup_page_ext(const struct page * page)248  static struct page_ext *lookup_page_ext(const struct page *page)
249  {
250  	unsigned long pfn = page_to_pfn(page);
251  	struct mem_section *section = __pfn_to_section(pfn);
252  	struct page_ext *page_ext = READ_ONCE(section->page_ext);
253  
254  	WARN_ON_ONCE(!rcu_read_lock_held());
255  	/*
256  	 * The sanity checks the page allocator does upon freeing a
257  	 * page can reach here before the page_ext arrays are
258  	 * allocated when feeding a range of pages to the allocator
259  	 * for the first time during bootup or memory hotplug.
260  	 */
261  	if (page_ext_invalid(page_ext))
262  		return NULL;
263  	return get_entry(page_ext, pfn);
264  }
265  
alloc_page_ext(size_t size,int nid)266  static void *__meminit alloc_page_ext(size_t size, int nid)
267  {
268  	gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
269  	void *addr = NULL;
270  
271  	addr = alloc_pages_exact_nid(nid, size, flags);
272  	if (addr)
273  		kmemleak_alloc(addr, size, 1, flags);
274  	else
275  		addr = vzalloc_node(size, nid);
276  
277  	if (addr)
278  		memmap_pages_add(DIV_ROUND_UP(size, PAGE_SIZE));
279  
280  	return addr;
281  }
282  
init_section_page_ext(unsigned long pfn,int nid)283  static int __meminit init_section_page_ext(unsigned long pfn, int nid)
284  {
285  	struct mem_section *section;
286  	struct page_ext *base;
287  	unsigned long table_size;
288  
289  	section = __pfn_to_section(pfn);
290  
291  	if (section->page_ext)
292  		return 0;
293  
294  	table_size = page_ext_size * PAGES_PER_SECTION;
295  	base = alloc_page_ext(table_size, nid);
296  
297  	/*
298  	 * The value stored in section->page_ext is (base - pfn)
299  	 * and it does not point to the memory block allocated above,
300  	 * causing kmemleak false positives.
301  	 */
302  	kmemleak_not_leak(base);
303  
304  	if (!base) {
305  		pr_err("page ext allocation failure\n");
306  		return -ENOMEM;
307  	}
308  
309  	/*
310  	 * The passed "pfn" may not be aligned to SECTION.  For the calculation
311  	 * we need to apply a mask.
312  	 */
313  	pfn &= PAGE_SECTION_MASK;
314  	section->page_ext = (void *)base - page_ext_size * pfn;
315  	total_usage += table_size;
316  	return 0;
317  }
318  
free_page_ext(void * addr)319  static void free_page_ext(void *addr)
320  {
321  	size_t table_size;
322  	struct page *page;
323  
324  	table_size = page_ext_size * PAGES_PER_SECTION;
325  	memmap_pages_add(-1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));
326  
327  	if (is_vmalloc_addr(addr)) {
328  		vfree(addr);
329  	} else {
330  		page = virt_to_page(addr);
331  		BUG_ON(PageReserved(page));
332  		kmemleak_free(addr);
333  		free_pages_exact(addr, table_size);
334  	}
335  }
336  
__free_page_ext(unsigned long pfn)337  static void __free_page_ext(unsigned long pfn)
338  {
339  	struct mem_section *ms;
340  	struct page_ext *base;
341  
342  	ms = __pfn_to_section(pfn);
343  	if (!ms || !ms->page_ext)
344  		return;
345  
346  	base = READ_ONCE(ms->page_ext);
347  	/*
348  	 * page_ext here can be valid while doing the roll back
349  	 * operation in online_page_ext().
350  	 */
351  	if (page_ext_invalid(base))
352  		base = (void *)base - PAGE_EXT_INVALID;
353  	WRITE_ONCE(ms->page_ext, NULL);
354  
355  	base = get_entry(base, pfn);
356  	free_page_ext(base);
357  }
358  
__invalidate_page_ext(unsigned long pfn)359  static void __invalidate_page_ext(unsigned long pfn)
360  {
361  	struct mem_section *ms;
362  	void *val;
363  
364  	ms = __pfn_to_section(pfn);
365  	if (!ms || !ms->page_ext)
366  		return;
367  	val = (void *)ms->page_ext + PAGE_EXT_INVALID;
368  	WRITE_ONCE(ms->page_ext, val);
369  }
370  
online_page_ext(unsigned long start_pfn,unsigned long nr_pages,int nid)371  static int __meminit online_page_ext(unsigned long start_pfn,
372  				unsigned long nr_pages,
373  				int nid)
374  {
375  	unsigned long start, end, pfn;
376  	int fail = 0;
377  
378  	start = SECTION_ALIGN_DOWN(start_pfn);
379  	end = SECTION_ALIGN_UP(start_pfn + nr_pages);
380  
381  	if (nid == NUMA_NO_NODE) {
382  		/*
383  		 * In this case, "nid" already exists and contains valid memory.
384  		 * "start_pfn" passed to us is a pfn which is an arg for
385  		 * online__pages(), and start_pfn should exist.
386  		 */
387  		nid = pfn_to_nid(start_pfn);
388  		VM_BUG_ON(!node_online(nid));
389  	}
390  
391  	for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION)
392  		fail = init_section_page_ext(pfn, nid);
393  	if (!fail)
394  		return 0;
395  
396  	/* rollback */
397  	end = pfn - PAGES_PER_SECTION;
398  	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
399  		__free_page_ext(pfn);
400  
401  	return -ENOMEM;
402  }
403  
offline_page_ext(unsigned long start_pfn,unsigned long nr_pages)404  static void __meminit offline_page_ext(unsigned long start_pfn,
405  				unsigned long nr_pages)
406  {
407  	unsigned long start, end, pfn;
408  
409  	start = SECTION_ALIGN_DOWN(start_pfn);
410  	end = SECTION_ALIGN_UP(start_pfn + nr_pages);
411  
412  	/*
413  	 * Freeing of page_ext is done in 3 steps to avoid
414  	 * use-after-free of it:
415  	 * 1) Traverse all the sections and mark their page_ext
416  	 *    as invalid.
417  	 * 2) Wait for all the existing users of page_ext who
418  	 *    started before invalidation to finish.
419  	 * 3) Free the page_ext.
420  	 */
421  	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
422  		__invalidate_page_ext(pfn);
423  
424  	synchronize_rcu();
425  
426  	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
427  		__free_page_ext(pfn);
428  }
429  
page_ext_callback(struct notifier_block * self,unsigned long action,void * arg)430  static int __meminit page_ext_callback(struct notifier_block *self,
431  			       unsigned long action, void *arg)
432  {
433  	struct memory_notify *mn = arg;
434  	int ret = 0;
435  
436  	switch (action) {
437  	case MEM_GOING_ONLINE:
438  		ret = online_page_ext(mn->start_pfn,
439  				   mn->nr_pages, mn->status_change_nid);
440  		break;
441  	case MEM_OFFLINE:
442  		offline_page_ext(mn->start_pfn,
443  				mn->nr_pages);
444  		break;
445  	case MEM_CANCEL_ONLINE:
446  		offline_page_ext(mn->start_pfn,
447  				mn->nr_pages);
448  		break;
449  	case MEM_GOING_OFFLINE:
450  		break;
451  	case MEM_ONLINE:
452  	case MEM_CANCEL_OFFLINE:
453  		break;
454  	}
455  
456  	return notifier_from_errno(ret);
457  }
458  
page_ext_init(void)459  void __init page_ext_init(void)
460  {
461  	unsigned long pfn;
462  	int nid;
463  
464  	if (!invoke_need_callbacks())
465  		return;
466  
467  	for_each_node_state(nid, N_MEMORY) {
468  		unsigned long start_pfn, end_pfn;
469  
470  		start_pfn = node_start_pfn(nid);
471  		end_pfn = node_end_pfn(nid);
472  		/*
473  		 * start_pfn and end_pfn may not be aligned to SECTION and the
474  		 * page->flags of out of node pages are not initialized.  So we
475  		 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
476  		 */
477  		for (pfn = start_pfn; pfn < end_pfn;
478  			pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
479  
480  			if (!pfn_valid(pfn))
481  				continue;
482  			/*
483  			 * Nodes's pfns can be overlapping.
484  			 * We know some arch can have a nodes layout such as
485  			 * -------------pfn-------------->
486  			 * N0 | N1 | N2 | N0 | N1 | N2|....
487  			 */
488  			if (pfn_to_nid(pfn) != nid)
489  				continue;
490  			if (init_section_page_ext(pfn, nid))
491  				goto oom;
492  			cond_resched();
493  		}
494  	}
495  	hotplug_memory_notifier(page_ext_callback, DEFAULT_CALLBACK_PRI);
496  	pr_info("allocated %ld bytes of page_ext\n", total_usage);
497  	invoke_init_callbacks();
498  	return;
499  
500  oom:
501  	panic("Out of memory");
502  }
503  
pgdat_page_ext_init(struct pglist_data * pgdat)504  void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
505  {
506  }
507  
508  #endif
509  
510  /**
511   * page_ext_get() - Get the extended information for a page.
512   * @page: The page we're interested in.
513   *
514   * Ensures that the page_ext will remain valid until page_ext_put()
515   * is called.
516   *
517   * Return: NULL if no page_ext exists for this page.
518   * Context: Any context.  Caller may not sleep until they have called
519   * page_ext_put().
520   */
page_ext_get(const struct page * page)521  struct page_ext *page_ext_get(const struct page *page)
522  {
523  	struct page_ext *page_ext;
524  
525  	rcu_read_lock();
526  	page_ext = lookup_page_ext(page);
527  	if (!page_ext) {
528  		rcu_read_unlock();
529  		return NULL;
530  	}
531  
532  	return page_ext;
533  }
534  
535  /**
536   * page_ext_put() - Working with page extended information is done.
537   * @page_ext: Page extended information received from page_ext_get().
538   *
539   * The page extended information of the page may not be valid after this
540   * function is called.
541   *
542   * Return: None.
543   * Context: Any context with corresponding page_ext_get() is called.
544   */
page_ext_put(struct page_ext * page_ext)545  void page_ext_put(struct page_ext *page_ext)
546  {
547  	if (unlikely(!page_ext))
548  		return;
549  
550  	rcu_read_unlock();
551  }
552