1  // SPDX-License-Identifier: GPL-2.0-only
2  #include <linux/mm.h>
3  #include <linux/slab.h>
4  #include <linux/string.h>
5  #include <linux/compiler.h>
6  #include <linux/export.h>
7  #include <linux/err.h>
8  #include <linux/sched.h>
9  #include <linux/sched/mm.h>
10  #include <linux/sched/signal.h>
11  #include <linux/sched/task_stack.h>
12  #include <linux/security.h>
13  #include <linux/swap.h>
14  #include <linux/swapops.h>
15  #include <linux/mman.h>
16  #include <linux/hugetlb.h>
17  #include <linux/vmalloc.h>
18  #include <linux/userfaultfd_k.h>
19  #include <linux/elf.h>
20  #include <linux/elf-randomize.h>
21  #include <linux/personality.h>
22  #include <linux/random.h>
23  #include <linux/processor.h>
24  #include <linux/sizes.h>
25  #include <linux/compat.h>
26  
27  #include <linux/uaccess.h>
28  
29  #include <kunit/visibility.h>
30  
31  #include "internal.h"
32  #include "swap.h"
33  
34  /**
35   * kfree_const - conditionally free memory
36   * @x: pointer to the memory
37   *
38   * Function calls kfree only if @x is not in .rodata section.
39   */
kfree_const(const void * x)40  void kfree_const(const void *x)
41  {
42  	if (!is_kernel_rodata((unsigned long)x))
43  		kfree(x);
44  }
45  EXPORT_SYMBOL(kfree_const);
46  
47  /**
48   * kstrdup - allocate space for and copy an existing string
49   * @s: the string to duplicate
50   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
51   *
52   * Return: newly allocated copy of @s or %NULL in case of error
53   */
54  noinline
kstrdup(const char * s,gfp_t gfp)55  char *kstrdup(const char *s, gfp_t gfp)
56  {
57  	size_t len;
58  	char *buf;
59  
60  	if (!s)
61  		return NULL;
62  
63  	len = strlen(s) + 1;
64  	buf = kmalloc_track_caller(len, gfp);
65  	if (buf)
66  		memcpy(buf, s, len);
67  	return buf;
68  }
69  EXPORT_SYMBOL(kstrdup);
70  
71  /**
72   * kstrdup_const - conditionally duplicate an existing const string
73   * @s: the string to duplicate
74   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
75   *
76   * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
77   * must not be passed to krealloc().
78   *
79   * Return: source string if it is in .rodata section otherwise
80   * fallback to kstrdup.
81   */
kstrdup_const(const char * s,gfp_t gfp)82  const char *kstrdup_const(const char *s, gfp_t gfp)
83  {
84  	if (is_kernel_rodata((unsigned long)s))
85  		return s;
86  
87  	return kstrdup(s, gfp);
88  }
89  EXPORT_SYMBOL(kstrdup_const);
90  
91  /**
92   * kstrndup - allocate space for and copy an existing string
93   * @s: the string to duplicate
94   * @max: read at most @max chars from @s
95   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
96   *
97   * Note: Use kmemdup_nul() instead if the size is known exactly.
98   *
99   * Return: newly allocated copy of @s or %NULL in case of error
100   */
kstrndup(const char * s,size_t max,gfp_t gfp)101  char *kstrndup(const char *s, size_t max, gfp_t gfp)
102  {
103  	size_t len;
104  	char *buf;
105  
106  	if (!s)
107  		return NULL;
108  
109  	len = strnlen(s, max);
110  	buf = kmalloc_track_caller(len+1, gfp);
111  	if (buf) {
112  		memcpy(buf, s, len);
113  		buf[len] = '\0';
114  	}
115  	return buf;
116  }
117  EXPORT_SYMBOL(kstrndup);
118  
119  /**
120   * kmemdup - duplicate region of memory
121   *
122   * @src: memory region to duplicate
123   * @len: memory region length
124   * @gfp: GFP mask to use
125   *
126   * Return: newly allocated copy of @src or %NULL in case of error,
127   * result is physically contiguous. Use kfree() to free.
128   */
kmemdup_noprof(const void * src,size_t len,gfp_t gfp)129  void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp)
130  {
131  	void *p;
132  
133  	p = kmalloc_node_track_caller_noprof(len, gfp, NUMA_NO_NODE, _RET_IP_);
134  	if (p)
135  		memcpy(p, src, len);
136  	return p;
137  }
138  EXPORT_SYMBOL(kmemdup_noprof);
139  
140  /**
141   * kmemdup_array - duplicate a given array.
142   *
143   * @src: array to duplicate.
144   * @count: number of elements to duplicate from array.
145   * @element_size: size of each element of array.
146   * @gfp: GFP mask to use.
147   *
148   * Return: duplicated array of @src or %NULL in case of error,
149   * result is physically contiguous. Use kfree() to free.
150   */
kmemdup_array(const void * src,size_t count,size_t element_size,gfp_t gfp)151  void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp)
152  {
153  	return kmemdup(src, size_mul(element_size, count), gfp);
154  }
155  EXPORT_SYMBOL(kmemdup_array);
156  
157  /**
158   * kvmemdup - duplicate region of memory
159   *
160   * @src: memory region to duplicate
161   * @len: memory region length
162   * @gfp: GFP mask to use
163   *
164   * Return: newly allocated copy of @src or %NULL in case of error,
165   * result may be not physically contiguous. Use kvfree() to free.
166   */
kvmemdup(const void * src,size_t len,gfp_t gfp)167  void *kvmemdup(const void *src, size_t len, gfp_t gfp)
168  {
169  	void *p;
170  
171  	p = kvmalloc(len, gfp);
172  	if (p)
173  		memcpy(p, src, len);
174  	return p;
175  }
176  EXPORT_SYMBOL(kvmemdup);
177  
178  /**
179   * kmemdup_nul - Create a NUL-terminated string from unterminated data
180   * @s: The data to stringify
181   * @len: The size of the data
182   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
183   *
184   * Return: newly allocated copy of @s with NUL-termination or %NULL in
185   * case of error
186   */
kmemdup_nul(const char * s,size_t len,gfp_t gfp)187  char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
188  {
189  	char *buf;
190  
191  	if (!s)
192  		return NULL;
193  
194  	buf = kmalloc_track_caller(len + 1, gfp);
195  	if (buf) {
196  		memcpy(buf, s, len);
197  		buf[len] = '\0';
198  	}
199  	return buf;
200  }
201  EXPORT_SYMBOL(kmemdup_nul);
202  
203  static kmem_buckets *user_buckets __ro_after_init;
204  
init_user_buckets(void)205  static int __init init_user_buckets(void)
206  {
207  	user_buckets = kmem_buckets_create("memdup_user", 0, 0, INT_MAX, NULL);
208  
209  	return 0;
210  }
211  subsys_initcall(init_user_buckets);
212  
213  /**
214   * memdup_user - duplicate memory region from user space
215   *
216   * @src: source address in user space
217   * @len: number of bytes to copy
218   *
219   * Return: an ERR_PTR() on failure.  Result is physically
220   * contiguous, to be freed by kfree().
221   */
memdup_user(const void __user * src,size_t len)222  void *memdup_user(const void __user *src, size_t len)
223  {
224  	void *p;
225  
226  	p = kmem_buckets_alloc_track_caller(user_buckets, len, GFP_USER | __GFP_NOWARN);
227  	if (!p)
228  		return ERR_PTR(-ENOMEM);
229  
230  	if (copy_from_user(p, src, len)) {
231  		kfree(p);
232  		return ERR_PTR(-EFAULT);
233  	}
234  
235  	return p;
236  }
237  EXPORT_SYMBOL(memdup_user);
238  
239  /**
240   * vmemdup_user - duplicate memory region from user space
241   *
242   * @src: source address in user space
243   * @len: number of bytes to copy
244   *
245   * Return: an ERR_PTR() on failure.  Result may be not
246   * physically contiguous.  Use kvfree() to free.
247   */
vmemdup_user(const void __user * src,size_t len)248  void *vmemdup_user(const void __user *src, size_t len)
249  {
250  	void *p;
251  
252  	p = kmem_buckets_valloc(user_buckets, len, GFP_USER);
253  	if (!p)
254  		return ERR_PTR(-ENOMEM);
255  
256  	if (copy_from_user(p, src, len)) {
257  		kvfree(p);
258  		return ERR_PTR(-EFAULT);
259  	}
260  
261  	return p;
262  }
263  EXPORT_SYMBOL(vmemdup_user);
264  
265  /**
266   * strndup_user - duplicate an existing string from user space
267   * @s: The string to duplicate
268   * @n: Maximum number of bytes to copy, including the trailing NUL.
269   *
270   * Return: newly allocated copy of @s or an ERR_PTR() in case of error
271   */
strndup_user(const char __user * s,long n)272  char *strndup_user(const char __user *s, long n)
273  {
274  	char *p;
275  	long length;
276  
277  	length = strnlen_user(s, n);
278  
279  	if (!length)
280  		return ERR_PTR(-EFAULT);
281  
282  	if (length > n)
283  		return ERR_PTR(-EINVAL);
284  
285  	p = memdup_user(s, length);
286  
287  	if (IS_ERR(p))
288  		return p;
289  
290  	p[length - 1] = '\0';
291  
292  	return p;
293  }
294  EXPORT_SYMBOL(strndup_user);
295  
296  /**
297   * memdup_user_nul - duplicate memory region from user space and NUL-terminate
298   *
299   * @src: source address in user space
300   * @len: number of bytes to copy
301   *
302   * Return: an ERR_PTR() on failure.
303   */
memdup_user_nul(const void __user * src,size_t len)304  void *memdup_user_nul(const void __user *src, size_t len)
305  {
306  	char *p;
307  
308  	/*
309  	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
310  	 * cause pagefault, which makes it pointless to use GFP_NOFS
311  	 * or GFP_ATOMIC.
312  	 */
313  	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
314  	if (!p)
315  		return ERR_PTR(-ENOMEM);
316  
317  	if (copy_from_user(p, src, len)) {
318  		kfree(p);
319  		return ERR_PTR(-EFAULT);
320  	}
321  	p[len] = '\0';
322  
323  	return p;
324  }
325  EXPORT_SYMBOL(memdup_user_nul);
326  
327  /* Check if the vma is being used as a stack by this task */
vma_is_stack_for_current(struct vm_area_struct * vma)328  int vma_is_stack_for_current(struct vm_area_struct *vma)
329  {
330  	struct task_struct * __maybe_unused t = current;
331  
332  	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
333  }
334  
335  /*
336   * Change backing file, only valid to use during initial VMA setup.
337   */
vma_set_file(struct vm_area_struct * vma,struct file * file)338  void vma_set_file(struct vm_area_struct *vma, struct file *file)
339  {
340  	/* Changing an anonymous vma with this is illegal */
341  	get_file(file);
342  	swap(vma->vm_file, file);
343  	fput(file);
344  }
345  EXPORT_SYMBOL(vma_set_file);
346  
347  #ifndef STACK_RND_MASK
348  #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
349  #endif
350  
randomize_stack_top(unsigned long stack_top)351  unsigned long randomize_stack_top(unsigned long stack_top)
352  {
353  	unsigned long random_variable = 0;
354  
355  	if (current->flags & PF_RANDOMIZE) {
356  		random_variable = get_random_long();
357  		random_variable &= STACK_RND_MASK;
358  		random_variable <<= PAGE_SHIFT;
359  	}
360  #ifdef CONFIG_STACK_GROWSUP
361  	return PAGE_ALIGN(stack_top) + random_variable;
362  #else
363  	return PAGE_ALIGN(stack_top) - random_variable;
364  #endif
365  }
366  
367  /**
368   * randomize_page - Generate a random, page aligned address
369   * @start:	The smallest acceptable address the caller will take.
370   * @range:	The size of the area, starting at @start, within which the
371   *		random address must fall.
372   *
373   * If @start + @range would overflow, @range is capped.
374   *
375   * NOTE: Historical use of randomize_range, which this replaces, presumed that
376   * @start was already page aligned.  We now align it regardless.
377   *
378   * Return: A page aligned address within [start, start + range).  On error,
379   * @start is returned.
380   */
randomize_page(unsigned long start,unsigned long range)381  unsigned long randomize_page(unsigned long start, unsigned long range)
382  {
383  	if (!PAGE_ALIGNED(start)) {
384  		range -= PAGE_ALIGN(start) - start;
385  		start = PAGE_ALIGN(start);
386  	}
387  
388  	if (start > ULONG_MAX - range)
389  		range = ULONG_MAX - start;
390  
391  	range >>= PAGE_SHIFT;
392  
393  	if (range == 0)
394  		return start;
395  
396  	return start + (get_random_long() % range << PAGE_SHIFT);
397  }
398  
399  #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
arch_randomize_brk(struct mm_struct * mm)400  unsigned long __weak arch_randomize_brk(struct mm_struct *mm)
401  {
402  	/* Is the current task 32bit ? */
403  	if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
404  		return randomize_page(mm->brk, SZ_32M);
405  
406  	return randomize_page(mm->brk, SZ_1G);
407  }
408  
arch_mmap_rnd(void)409  unsigned long arch_mmap_rnd(void)
410  {
411  	unsigned long rnd;
412  
413  #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
414  	if (is_compat_task())
415  		rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
416  	else
417  #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
418  		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
419  
420  	return rnd << PAGE_SHIFT;
421  }
422  
mmap_is_legacy(struct rlimit * rlim_stack)423  static int mmap_is_legacy(struct rlimit *rlim_stack)
424  {
425  	if (current->personality & ADDR_COMPAT_LAYOUT)
426  		return 1;
427  
428  	/* On parisc the stack always grows up - so a unlimited stack should
429  	 * not be an indicator to use the legacy memory layout. */
430  	if (rlim_stack->rlim_cur == RLIM_INFINITY &&
431  		!IS_ENABLED(CONFIG_STACK_GROWSUP))
432  		return 1;
433  
434  	return sysctl_legacy_va_layout;
435  }
436  
437  /*
438   * Leave enough space between the mmap area and the stack to honour ulimit in
439   * the face of randomisation.
440   */
441  #define MIN_GAP		(SZ_128M)
442  #define MAX_GAP		(STACK_TOP / 6 * 5)
443  
mmap_base(unsigned long rnd,struct rlimit * rlim_stack)444  static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
445  {
446  #ifdef CONFIG_STACK_GROWSUP
447  	/*
448  	 * For an upwards growing stack the calculation is much simpler.
449  	 * Memory for the maximum stack size is reserved at the top of the
450  	 * task. mmap_base starts directly below the stack and grows
451  	 * downwards.
452  	 */
453  	return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd);
454  #else
455  	unsigned long gap = rlim_stack->rlim_cur;
456  	unsigned long pad = stack_guard_gap;
457  
458  	/* Account for stack randomization if necessary */
459  	if (current->flags & PF_RANDOMIZE)
460  		pad += (STACK_RND_MASK << PAGE_SHIFT);
461  
462  	/* Values close to RLIM_INFINITY can overflow. */
463  	if (gap + pad > gap)
464  		gap += pad;
465  
466  	if (gap < MIN_GAP && MIN_GAP < MAX_GAP)
467  		gap = MIN_GAP;
468  	else if (gap > MAX_GAP)
469  		gap = MAX_GAP;
470  
471  	return PAGE_ALIGN(STACK_TOP - gap - rnd);
472  #endif
473  }
474  
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)475  void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
476  {
477  	unsigned long random_factor = 0UL;
478  
479  	if (current->flags & PF_RANDOMIZE)
480  		random_factor = arch_mmap_rnd();
481  
482  	if (mmap_is_legacy(rlim_stack)) {
483  		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
484  		clear_bit(MMF_TOPDOWN, &mm->flags);
485  	} else {
486  		mm->mmap_base = mmap_base(random_factor, rlim_stack);
487  		set_bit(MMF_TOPDOWN, &mm->flags);
488  	}
489  }
490  #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)491  void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
492  {
493  	mm->mmap_base = TASK_UNMAPPED_BASE;
494  	clear_bit(MMF_TOPDOWN, &mm->flags);
495  }
496  #endif
497  #ifdef CONFIG_MMU
498  EXPORT_SYMBOL_IF_KUNIT(arch_pick_mmap_layout);
499  #endif
500  
501  /**
502   * __account_locked_vm - account locked pages to an mm's locked_vm
503   * @mm:          mm to account against
504   * @pages:       number of pages to account
505   * @inc:         %true if @pages should be considered positive, %false if not
506   * @task:        task used to check RLIMIT_MEMLOCK
507   * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
508   *
509   * Assumes @task and @mm are valid (i.e. at least one reference on each), and
510   * that mmap_lock is held as writer.
511   *
512   * Return:
513   * * 0       on success
514   * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
515   */
__account_locked_vm(struct mm_struct * mm,unsigned long pages,bool inc,struct task_struct * task,bool bypass_rlim)516  int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
517  			struct task_struct *task, bool bypass_rlim)
518  {
519  	unsigned long locked_vm, limit;
520  	int ret = 0;
521  
522  	mmap_assert_write_locked(mm);
523  
524  	locked_vm = mm->locked_vm;
525  	if (inc) {
526  		if (!bypass_rlim) {
527  			limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
528  			if (locked_vm + pages > limit)
529  				ret = -ENOMEM;
530  		}
531  		if (!ret)
532  			mm->locked_vm = locked_vm + pages;
533  	} else {
534  		WARN_ON_ONCE(pages > locked_vm);
535  		mm->locked_vm = locked_vm - pages;
536  	}
537  
538  	pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
539  		 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
540  		 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
541  		 ret ? " - exceeded" : "");
542  
543  	return ret;
544  }
545  EXPORT_SYMBOL_GPL(__account_locked_vm);
546  
547  /**
548   * account_locked_vm - account locked pages to an mm's locked_vm
549   * @mm:          mm to account against, may be NULL
550   * @pages:       number of pages to account
551   * @inc:         %true if @pages should be considered positive, %false if not
552   *
553   * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
554   *
555   * Return:
556   * * 0       on success, or if mm is NULL
557   * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
558   */
account_locked_vm(struct mm_struct * mm,unsigned long pages,bool inc)559  int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
560  {
561  	int ret;
562  
563  	if (pages == 0 || !mm)
564  		return 0;
565  
566  	mmap_write_lock(mm);
567  	ret = __account_locked_vm(mm, pages, inc, current,
568  				  capable(CAP_IPC_LOCK));
569  	mmap_write_unlock(mm);
570  
571  	return ret;
572  }
573  EXPORT_SYMBOL_GPL(account_locked_vm);
574  
vm_mmap_pgoff(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flag,unsigned long pgoff)575  unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
576  	unsigned long len, unsigned long prot,
577  	unsigned long flag, unsigned long pgoff)
578  {
579  	unsigned long ret;
580  	struct mm_struct *mm = current->mm;
581  	unsigned long populate;
582  	LIST_HEAD(uf);
583  
584  	ret = security_mmap_file(file, prot, flag);
585  	if (!ret) {
586  		if (mmap_write_lock_killable(mm))
587  			return -EINTR;
588  		ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate,
589  			      &uf);
590  		mmap_write_unlock(mm);
591  		userfaultfd_unmap_complete(mm, &uf);
592  		if (populate)
593  			mm_populate(ret, populate);
594  	}
595  	return ret;
596  }
597  
vm_mmap(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flag,unsigned long offset)598  unsigned long vm_mmap(struct file *file, unsigned long addr,
599  	unsigned long len, unsigned long prot,
600  	unsigned long flag, unsigned long offset)
601  {
602  	if (unlikely(offset + PAGE_ALIGN(len) < offset))
603  		return -EINVAL;
604  	if (unlikely(offset_in_page(offset)))
605  		return -EINVAL;
606  
607  	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
608  }
609  EXPORT_SYMBOL(vm_mmap);
610  
kmalloc_gfp_adjust(gfp_t flags,size_t size)611  static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
612  {
613  	/*
614  	 * We want to attempt a large physically contiguous block first because
615  	 * it is less likely to fragment multiple larger blocks and therefore
616  	 * contribute to a long term fragmentation less than vmalloc fallback.
617  	 * However make sure that larger requests are not too disruptive - no
618  	 * OOM killer and no allocation failure warnings as we have a fallback.
619  	 */
620  	if (size > PAGE_SIZE) {
621  		flags |= __GFP_NOWARN;
622  
623  		if (!(flags & __GFP_RETRY_MAYFAIL))
624  			flags |= __GFP_NORETRY;
625  
626  		/* nofail semantic is implemented by the vmalloc fallback */
627  		flags &= ~__GFP_NOFAIL;
628  	}
629  
630  	return flags;
631  }
632  
633  /**
634   * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon
635   * failure, fall back to non-contiguous (vmalloc) allocation.
636   * @size: size of the request.
637   * @b: which set of kmalloc buckets to allocate from.
638   * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
639   * @node: numa node to allocate from
640   *
641   * Uses kmalloc to get the memory but if the allocation fails then falls back
642   * to the vmalloc allocator. Use kvfree for freeing the memory.
643   *
644   * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
645   * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
646   * preferable to the vmalloc fallback, due to visible performance drawbacks.
647   *
648   * Return: pointer to the allocated memory of %NULL in case of failure
649   */
__kvmalloc_node_noprof(DECL_BUCKET_PARAMS (size,b),gfp_t flags,int node)650  void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
651  {
652  	void *ret;
653  
654  	/*
655  	 * It doesn't really make sense to fallback to vmalloc for sub page
656  	 * requests
657  	 */
658  	ret = __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, b),
659  				    kmalloc_gfp_adjust(flags, size),
660  				    node);
661  	if (ret || size <= PAGE_SIZE)
662  		return ret;
663  
664  	/* non-sleeping allocations are not supported by vmalloc */
665  	if (!gfpflags_allow_blocking(flags))
666  		return NULL;
667  
668  	/* Don't even allow crazy sizes */
669  	if (unlikely(size > INT_MAX)) {
670  		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
671  		return NULL;
672  	}
673  
674  	/*
675  	 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
676  	 * since the callers already cannot assume anything
677  	 * about the resulting pointer, and cannot play
678  	 * protection games.
679  	 */
680  	return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
681  			flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
682  			node, __builtin_return_address(0));
683  }
684  EXPORT_SYMBOL(__kvmalloc_node_noprof);
685  
686  /**
687   * kvfree() - Free memory.
688   * @addr: Pointer to allocated memory.
689   *
690   * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
691   * It is slightly more efficient to use kfree() or vfree() if you are certain
692   * that you know which one to use.
693   *
694   * Context: Either preemptible task context or not-NMI interrupt.
695   */
kvfree(const void * addr)696  void kvfree(const void *addr)
697  {
698  	if (is_vmalloc_addr(addr))
699  		vfree(addr);
700  	else
701  		kfree(addr);
702  }
703  EXPORT_SYMBOL(kvfree);
704  
705  /**
706   * kvfree_sensitive - Free a data object containing sensitive information.
707   * @addr: address of the data object to be freed.
708   * @len: length of the data object.
709   *
710   * Use the special memzero_explicit() function to clear the content of a
711   * kvmalloc'ed object containing sensitive data to make sure that the
712   * compiler won't optimize out the data clearing.
713   */
kvfree_sensitive(const void * addr,size_t len)714  void kvfree_sensitive(const void *addr, size_t len)
715  {
716  	if (likely(!ZERO_OR_NULL_PTR(addr))) {
717  		memzero_explicit((void *)addr, len);
718  		kvfree(addr);
719  	}
720  }
721  EXPORT_SYMBOL(kvfree_sensitive);
722  
723  /**
724   * kvrealloc - reallocate memory; contents remain unchanged
725   * @p: object to reallocate memory for
726   * @size: the size to reallocate
727   * @flags: the flags for the page level allocator
728   *
729   * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
730   * and @p is not a %NULL pointer, the object pointed to is freed.
731   *
732   * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
733   * initial memory allocation, every subsequent call to this API for the same
734   * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
735   * __GFP_ZERO is not fully honored by this API.
736   *
737   * In any case, the contents of the object pointed to are preserved up to the
738   * lesser of the new and old sizes.
739   *
740   * This function must not be called concurrently with itself or kvfree() for the
741   * same memory allocation.
742   *
743   * Return: pointer to the allocated memory or %NULL in case of error
744   */
kvrealloc_noprof(const void * p,size_t size,gfp_t flags)745  void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
746  {
747  	void *n;
748  
749  	if (is_vmalloc_addr(p))
750  		return vrealloc_noprof(p, size, flags);
751  
752  	n = krealloc_noprof(p, size, kmalloc_gfp_adjust(flags, size));
753  	if (!n) {
754  		/* We failed to krealloc(), fall back to kvmalloc(). */
755  		n = kvmalloc_noprof(size, flags);
756  		if (!n)
757  			return NULL;
758  
759  		if (p) {
760  			/* We already know that `p` is not a vmalloc address. */
761  			kasan_disable_current();
762  			memcpy(n, kasan_reset_tag(p), ksize(p));
763  			kasan_enable_current();
764  
765  			kfree(p);
766  		}
767  	}
768  
769  	return n;
770  }
771  EXPORT_SYMBOL(kvrealloc_noprof);
772  
773  /**
774   * __vmalloc_array - allocate memory for a virtually contiguous array.
775   * @n: number of elements.
776   * @size: element size.
777   * @flags: the type of memory to allocate (see kmalloc).
778   */
__vmalloc_array_noprof(size_t n,size_t size,gfp_t flags)779  void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
780  {
781  	size_t bytes;
782  
783  	if (unlikely(check_mul_overflow(n, size, &bytes)))
784  		return NULL;
785  	return __vmalloc_noprof(bytes, flags);
786  }
787  EXPORT_SYMBOL(__vmalloc_array_noprof);
788  
789  /**
790   * vmalloc_array - allocate memory for a virtually contiguous array.
791   * @n: number of elements.
792   * @size: element size.
793   */
vmalloc_array_noprof(size_t n,size_t size)794  void *vmalloc_array_noprof(size_t n, size_t size)
795  {
796  	return __vmalloc_array_noprof(n, size, GFP_KERNEL);
797  }
798  EXPORT_SYMBOL(vmalloc_array_noprof);
799  
800  /**
801   * __vcalloc - allocate and zero memory for a virtually contiguous array.
802   * @n: number of elements.
803   * @size: element size.
804   * @flags: the type of memory to allocate (see kmalloc).
805   */
__vcalloc_noprof(size_t n,size_t size,gfp_t flags)806  void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags)
807  {
808  	return __vmalloc_array_noprof(n, size, flags | __GFP_ZERO);
809  }
810  EXPORT_SYMBOL(__vcalloc_noprof);
811  
812  /**
813   * vcalloc - allocate and zero memory for a virtually contiguous array.
814   * @n: number of elements.
815   * @size: element size.
816   */
vcalloc_noprof(size_t n,size_t size)817  void *vcalloc_noprof(size_t n, size_t size)
818  {
819  	return __vmalloc_array_noprof(n, size, GFP_KERNEL | __GFP_ZERO);
820  }
821  EXPORT_SYMBOL(vcalloc_noprof);
822  
folio_anon_vma(struct folio * folio)823  struct anon_vma *folio_anon_vma(struct folio *folio)
824  {
825  	unsigned long mapping = (unsigned long)folio->mapping;
826  
827  	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
828  		return NULL;
829  	return (void *)(mapping - PAGE_MAPPING_ANON);
830  }
831  
832  /**
833   * folio_mapping - Find the mapping where this folio is stored.
834   * @folio: The folio.
835   *
836   * For folios which are in the page cache, return the mapping that this
837   * page belongs to.  Folios in the swap cache return the swap mapping
838   * this page is stored in (which is different from the mapping for the
839   * swap file or swap device where the data is stored).
840   *
841   * You can call this for folios which aren't in the swap cache or page
842   * cache and it will return NULL.
843   */
folio_mapping(struct folio * folio)844  struct address_space *folio_mapping(struct folio *folio)
845  {
846  	struct address_space *mapping;
847  
848  	/* This happens if someone calls flush_dcache_page on slab page */
849  	if (unlikely(folio_test_slab(folio)))
850  		return NULL;
851  
852  	if (unlikely(folio_test_swapcache(folio)))
853  		return swap_address_space(folio->swap);
854  
855  	mapping = folio->mapping;
856  	if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
857  		return NULL;
858  
859  	return mapping;
860  }
861  EXPORT_SYMBOL(folio_mapping);
862  
863  /**
864   * folio_copy - Copy the contents of one folio to another.
865   * @dst: Folio to copy to.
866   * @src: Folio to copy from.
867   *
868   * The bytes in the folio represented by @src are copied to @dst.
869   * Assumes the caller has validated that @dst is at least as large as @src.
870   * Can be called in atomic context for order-0 folios, but if the folio is
871   * larger, it may sleep.
872   */
folio_copy(struct folio * dst,struct folio * src)873  void folio_copy(struct folio *dst, struct folio *src)
874  {
875  	long i = 0;
876  	long nr = folio_nr_pages(src);
877  
878  	for (;;) {
879  		copy_highpage(folio_page(dst, i), folio_page(src, i));
880  		if (++i == nr)
881  			break;
882  		cond_resched();
883  	}
884  }
885  EXPORT_SYMBOL(folio_copy);
886  
folio_mc_copy(struct folio * dst,struct folio * src)887  int folio_mc_copy(struct folio *dst, struct folio *src)
888  {
889  	long nr = folio_nr_pages(src);
890  	long i = 0;
891  
892  	for (;;) {
893  		if (copy_mc_highpage(folio_page(dst, i), folio_page(src, i)))
894  			return -EHWPOISON;
895  		if (++i == nr)
896  			break;
897  		cond_resched();
898  	}
899  
900  	return 0;
901  }
902  EXPORT_SYMBOL(folio_mc_copy);
903  
904  int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
905  int sysctl_overcommit_ratio __read_mostly = 50;
906  unsigned long sysctl_overcommit_kbytes __read_mostly;
907  int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
908  unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
909  unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
910  
overcommit_ratio_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)911  int overcommit_ratio_handler(const struct ctl_table *table, int write, void *buffer,
912  		size_t *lenp, loff_t *ppos)
913  {
914  	int ret;
915  
916  	ret = proc_dointvec(table, write, buffer, lenp, ppos);
917  	if (ret == 0 && write)
918  		sysctl_overcommit_kbytes = 0;
919  	return ret;
920  }
921  
sync_overcommit_as(struct work_struct * dummy)922  static void sync_overcommit_as(struct work_struct *dummy)
923  {
924  	percpu_counter_sync(&vm_committed_as);
925  }
926  
overcommit_policy_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)927  int overcommit_policy_handler(const struct ctl_table *table, int write, void *buffer,
928  		size_t *lenp, loff_t *ppos)
929  {
930  	struct ctl_table t;
931  	int new_policy = -1;
932  	int ret;
933  
934  	/*
935  	 * The deviation of sync_overcommit_as could be big with loose policy
936  	 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
937  	 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
938  	 * with the strict "NEVER", and to avoid possible race condition (even
939  	 * though user usually won't too frequently do the switching to policy
940  	 * OVERCOMMIT_NEVER), the switch is done in the following order:
941  	 *	1. changing the batch
942  	 *	2. sync percpu count on each CPU
943  	 *	3. switch the policy
944  	 */
945  	if (write) {
946  		t = *table;
947  		t.data = &new_policy;
948  		ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
949  		if (ret || new_policy == -1)
950  			return ret;
951  
952  		mm_compute_batch(new_policy);
953  		if (new_policy == OVERCOMMIT_NEVER)
954  			schedule_on_each_cpu(sync_overcommit_as);
955  		sysctl_overcommit_memory = new_policy;
956  	} else {
957  		ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
958  	}
959  
960  	return ret;
961  }
962  
overcommit_kbytes_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)963  int overcommit_kbytes_handler(const struct ctl_table *table, int write, void *buffer,
964  		size_t *lenp, loff_t *ppos)
965  {
966  	int ret;
967  
968  	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
969  	if (ret == 0 && write)
970  		sysctl_overcommit_ratio = 0;
971  	return ret;
972  }
973  
974  /*
975   * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
976   */
vm_commit_limit(void)977  unsigned long vm_commit_limit(void)
978  {
979  	unsigned long allowed;
980  
981  	if (sysctl_overcommit_kbytes)
982  		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
983  	else
984  		allowed = ((totalram_pages() - hugetlb_total_pages())
985  			   * sysctl_overcommit_ratio / 100);
986  	allowed += total_swap_pages;
987  
988  	return allowed;
989  }
990  
991  /*
992   * Make sure vm_committed_as in one cacheline and not cacheline shared with
993   * other variables. It can be updated by several CPUs frequently.
994   */
995  struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
996  
997  /*
998   * The global memory commitment made in the system can be a metric
999   * that can be used to drive ballooning decisions when Linux is hosted
1000   * as a guest. On Hyper-V, the host implements a policy engine for dynamically
1001   * balancing memory across competing virtual machines that are hosted.
1002   * Several metrics drive this policy engine including the guest reported
1003   * memory commitment.
1004   *
1005   * The time cost of this is very low for small platforms, and for big
1006   * platform like a 2S/36C/72T Skylake server, in worst case where
1007   * vm_committed_as's spinlock is under severe contention, the time cost
1008   * could be about 30~40 microseconds.
1009   */
vm_memory_committed(void)1010  unsigned long vm_memory_committed(void)
1011  {
1012  	return percpu_counter_sum_positive(&vm_committed_as);
1013  }
1014  EXPORT_SYMBOL_GPL(vm_memory_committed);
1015  
1016  /*
1017   * Check that a process has enough memory to allocate a new virtual
1018   * mapping. 0 means there is enough memory for the allocation to
1019   * succeed and -ENOMEM implies there is not.
1020   *
1021   * We currently support three overcommit policies, which are set via the
1022   * vm.overcommit_memory sysctl.  See Documentation/mm/overcommit-accounting.rst
1023   *
1024   * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
1025   * Additional code 2002 Jul 20 by Robert Love.
1026   *
1027   * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
1028   *
1029   * Note this is a helper function intended to be used by LSMs which
1030   * wish to use this logic.
1031   */
__vm_enough_memory(struct mm_struct * mm,long pages,int cap_sys_admin)1032  int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1033  {
1034  	long allowed;
1035  	unsigned long bytes_failed;
1036  
1037  	vm_acct_memory(pages);
1038  
1039  	/*
1040  	 * Sometimes we want to use more memory than we have
1041  	 */
1042  	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
1043  		return 0;
1044  
1045  	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1046  		if (pages > totalram_pages() + total_swap_pages)
1047  			goto error;
1048  		return 0;
1049  	}
1050  
1051  	allowed = vm_commit_limit();
1052  	/*
1053  	 * Reserve some for root
1054  	 */
1055  	if (!cap_sys_admin)
1056  		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
1057  
1058  	/*
1059  	 * Don't let a single process grow so big a user can't recover
1060  	 */
1061  	if (mm) {
1062  		long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
1063  
1064  		allowed -= min_t(long, mm->total_vm / 32, reserve);
1065  	}
1066  
1067  	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1068  		return 0;
1069  error:
1070  	bytes_failed = pages << PAGE_SHIFT;
1071  	pr_warn_ratelimited("%s: pid: %d, comm: %s, bytes: %lu not enough memory for the allocation\n",
1072  			    __func__, current->pid, current->comm, bytes_failed);
1073  	vm_unacct_memory(pages);
1074  
1075  	return -ENOMEM;
1076  }
1077  
1078  /**
1079   * get_cmdline() - copy the cmdline value to a buffer.
1080   * @task:     the task whose cmdline value to copy.
1081   * @buffer:   the buffer to copy to.
1082   * @buflen:   the length of the buffer. Larger cmdline values are truncated
1083   *            to this length.
1084   *
1085   * Return: the size of the cmdline field copied. Note that the copy does
1086   * not guarantee an ending NULL byte.
1087   */
get_cmdline(struct task_struct * task,char * buffer,int buflen)1088  int get_cmdline(struct task_struct *task, char *buffer, int buflen)
1089  {
1090  	int res = 0;
1091  	unsigned int len;
1092  	struct mm_struct *mm = get_task_mm(task);
1093  	unsigned long arg_start, arg_end, env_start, env_end;
1094  	if (!mm)
1095  		goto out;
1096  	if (!mm->arg_end)
1097  		goto out_mm;	/* Shh! No looking before we're done */
1098  
1099  	spin_lock(&mm->arg_lock);
1100  	arg_start = mm->arg_start;
1101  	arg_end = mm->arg_end;
1102  	env_start = mm->env_start;
1103  	env_end = mm->env_end;
1104  	spin_unlock(&mm->arg_lock);
1105  
1106  	len = arg_end - arg_start;
1107  
1108  	if (len > buflen)
1109  		len = buflen;
1110  
1111  	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
1112  
1113  	/*
1114  	 * If the nul at the end of args has been overwritten, then
1115  	 * assume application is using setproctitle(3).
1116  	 */
1117  	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
1118  		len = strnlen(buffer, res);
1119  		if (len < res) {
1120  			res = len;
1121  		} else {
1122  			len = env_end - env_start;
1123  			if (len > buflen - res)
1124  				len = buflen - res;
1125  			res += access_process_vm(task, env_start,
1126  						 buffer+res, len,
1127  						 FOLL_FORCE);
1128  			res = strnlen(buffer, res);
1129  		}
1130  	}
1131  out_mm:
1132  	mmput(mm);
1133  out:
1134  	return res;
1135  }
1136  
memcmp_pages(struct page * page1,struct page * page2)1137  int __weak memcmp_pages(struct page *page1, struct page *page2)
1138  {
1139  	char *addr1, *addr2;
1140  	int ret;
1141  
1142  	addr1 = kmap_local_page(page1);
1143  	addr2 = kmap_local_page(page2);
1144  	ret = memcmp(addr1, addr2, PAGE_SIZE);
1145  	kunmap_local(addr2);
1146  	kunmap_local(addr1);
1147  	return ret;
1148  }
1149  
1150  #ifdef CONFIG_PRINTK
1151  /**
1152   * mem_dump_obj - Print available provenance information
1153   * @object: object for which to find provenance information.
1154   *
1155   * This function uses pr_cont(), so that the caller is expected to have
1156   * printed out whatever preamble is appropriate.  The provenance information
1157   * depends on the type of object and on how much debugging is enabled.
1158   * For example, for a slab-cache object, the slab name is printed, and,
1159   * if available, the return address and stack trace from the allocation
1160   * and last free path of that object.
1161   */
mem_dump_obj(void * object)1162  void mem_dump_obj(void *object)
1163  {
1164  	const char *type;
1165  
1166  	if (kmem_dump_obj(object))
1167  		return;
1168  
1169  	if (vmalloc_dump_obj(object))
1170  		return;
1171  
1172  	if (is_vmalloc_addr(object))
1173  		type = "vmalloc memory";
1174  	else if (virt_addr_valid(object))
1175  		type = "non-slab/vmalloc memory";
1176  	else if (object == NULL)
1177  		type = "NULL pointer";
1178  	else if (object == ZERO_SIZE_PTR)
1179  		type = "zero-size pointer";
1180  	else
1181  		type = "non-paged memory";
1182  
1183  	pr_cont(" %s\n", type);
1184  }
1185  EXPORT_SYMBOL_GPL(mem_dump_obj);
1186  #endif
1187  
1188  /*
1189   * A driver might set a page logically offline -- PageOffline() -- and
1190   * turn the page inaccessible in the hypervisor; after that, access to page
1191   * content can be fatal.
1192   *
1193   * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1194   * pages after checking PageOffline(); however, these PFN walkers can race
1195   * with drivers that set PageOffline().
1196   *
1197   * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1198   * synchronize with such drivers, achieving that a page cannot be set
1199   * PageOffline() while frozen.
1200   *
1201   * page_offline_begin()/page_offline_end() is used by drivers that care about
1202   * such races when setting a page PageOffline().
1203   */
1204  static DECLARE_RWSEM(page_offline_rwsem);
1205  
page_offline_freeze(void)1206  void page_offline_freeze(void)
1207  {
1208  	down_read(&page_offline_rwsem);
1209  }
1210  
page_offline_thaw(void)1211  void page_offline_thaw(void)
1212  {
1213  	up_read(&page_offline_rwsem);
1214  }
1215  
page_offline_begin(void)1216  void page_offline_begin(void)
1217  {
1218  	down_write(&page_offline_rwsem);
1219  }
1220  EXPORT_SYMBOL(page_offline_begin);
1221  
page_offline_end(void)1222  void page_offline_end(void)
1223  {
1224  	up_write(&page_offline_rwsem);
1225  }
1226  EXPORT_SYMBOL(page_offline_end);
1227  
1228  #ifndef flush_dcache_folio
flush_dcache_folio(struct folio * folio)1229  void flush_dcache_folio(struct folio *folio)
1230  {
1231  	long i, nr = folio_nr_pages(folio);
1232  
1233  	for (i = 0; i < nr; i++)
1234  		flush_dcache_page(folio_page(folio, i));
1235  }
1236  EXPORT_SYMBOL(flush_dcache_folio);
1237  #endif
1238