Lines Matching +full:non +full:- +full:contiguous
1 // SPDX-License-Identifier: GPL-2.0-only
20 #include <linux/elf-randomize.h>
35 * kfree_const - conditionally free memory
48 * kstrdup - allocate space for and copy an existing string
72 * kstrdup_const - conditionally duplicate an existing const string
92 * kstrndup - allocate space for and copy an existing string
120 * kmemdup - duplicate region of memory
127 * result is physically contiguous. Use kfree() to free.
141 * kmemdup_array - duplicate a given array.
149 * result is physically contiguous. Use kfree() to free.
158 * kvmemdup - duplicate region of memory
165 * result may be not physically contiguous. Use kvfree() to free.
179 * kmemdup_nul - Create a NUL-terminated string from unterminated data
184 * Return: newly allocated copy of @s with NUL-termination or %NULL in
214 * memdup_user - duplicate memory region from user space
220 * contiguous, to be freed by kfree().
228 return ERR_PTR(-ENOMEM); in memdup_user()
232 return ERR_PTR(-EFAULT); in memdup_user()
240 * vmemdup_user - duplicate memory region from user space
246 * physically contiguous. Use kvfree() to free.
254 return ERR_PTR(-ENOMEM); in vmemdup_user()
258 return ERR_PTR(-EFAULT); in vmemdup_user()
266 * strndup_user - duplicate an existing string from user space
280 return ERR_PTR(-EFAULT); in strndup_user()
283 return ERR_PTR(-EINVAL); in strndup_user()
290 p[length - 1] = '\0'; in strndup_user()
297 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
315 return ERR_PTR(-ENOMEM); in memdup_user_nul()
319 return ERR_PTR(-EFAULT); in memdup_user_nul()
332 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); in vma_is_stack_for_current()
342 swap(vma->vm_file, file); in vma_set_file()
348 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
355 if (current->flags & PF_RANDOMIZE) { in randomize_stack_top()
363 return PAGE_ALIGN(stack_top) - random_variable; in randomize_stack_top()
368 * randomize_page - Generate a random, page aligned address
384 range -= PAGE_ALIGN(start) - start; in randomize_page()
388 if (start > ULONG_MAX - range) in randomize_page()
389 range = ULONG_MAX - start; in randomize_page()
404 return randomize_page(mm->brk, SZ_32M); in arch_randomize_brk()
406 return randomize_page(mm->brk, SZ_1G); in arch_randomize_brk()
415 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); in arch_mmap_rnd()
418 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); in arch_mmap_rnd()
425 if (current->personality & ADDR_COMPAT_LAYOUT) in mmap_is_legacy()
428 /* On parisc the stack always grows up - so a unlimited stack should in mmap_is_legacy()
430 if (rlim_stack->rlim_cur == RLIM_INFINITY && in mmap_is_legacy()
453 return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd); in mmap_base()
455 unsigned long gap = rlim_stack->rlim_cur; in mmap_base()
459 if (current->flags & PF_RANDOMIZE) in mmap_base()
471 return PAGE_ALIGN(STACK_TOP - gap - rnd); in mmap_base()
479 if (current->flags & PF_RANDOMIZE) in arch_pick_mmap_layout()
483 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; in arch_pick_mmap_layout()
484 clear_bit(MMF_TOPDOWN, &mm->flags); in arch_pick_mmap_layout()
486 mm->mmap_base = mmap_base(random_factor, rlim_stack); in arch_pick_mmap_layout()
487 set_bit(MMF_TOPDOWN, &mm->flags); in arch_pick_mmap_layout()
493 mm->mmap_base = TASK_UNMAPPED_BASE; in arch_pick_mmap_layout()
494 clear_bit(MMF_TOPDOWN, &mm->flags); in arch_pick_mmap_layout()
502 * __account_locked_vm - account locked pages to an mm's locked_vm
514 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
524 locked_vm = mm->locked_vm; in __account_locked_vm()
529 ret = -ENOMEM; in __account_locked_vm()
532 mm->locked_vm = locked_vm + pages; in __account_locked_vm()
535 mm->locked_vm = locked_vm - pages; in __account_locked_vm()
538 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid, in __account_locked_vm()
539 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, in __account_locked_vm()
541 ret ? " - exceeded" : ""); in __account_locked_vm()
548 * account_locked_vm - account locked pages to an mm's locked_vm
553 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
557 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
580 struct mm_struct *mm = current->mm; in vm_mmap_pgoff()
587 return -EINTR; in vm_mmap_pgoff()
603 return -EINVAL; in vm_mmap()
605 return -EINVAL; in vm_mmap()
614 * We want to attempt a large physically contiguous block first because in kmalloc_gfp_adjust()
617 * However make sure that larger requests are not too disruptive - no in kmalloc_gfp_adjust()
634 * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon
635 * failure, fall back to non-contiguous (vmalloc) allocation.
638 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
664 /* non-sleeping allocations are not supported by vmalloc */ in __kvmalloc_node_noprof()
687 * kvfree() - Free memory.
694 * Context: Either preemptible task context or not-NMI interrupt.
706 * kvfree_sensitive - Free a data object containing sensitive information.
724 * kvrealloc - reallocate memory; contents remain unchanged
774 * __vmalloc_array - allocate memory for a virtually contiguous array.
790 * vmalloc_array - allocate memory for a virtually contiguous array.
801 * __vcalloc - allocate and zero memory for a virtually contiguous array.
813 * vcalloc - allocate and zero memory for a virtually contiguous array.
825 unsigned long mapping = (unsigned long)folio->mapping; in folio_anon_vma()
829 return (void *)(mapping - PAGE_MAPPING_ANON); in folio_anon_vma()
833 * folio_mapping - Find the mapping where this folio is stored.
853 return swap_address_space(folio->swap); in folio_mapping()
855 mapping = folio->mapping; in folio_mapping()
864 * folio_copy - Copy the contents of one folio to another.
870 * Can be called in atomic context for order-0 folios, but if the folio is
894 return -EHWPOISON; in folio_mc_copy()
931 int new_policy = -1; in overcommit_policy_handler()
949 if (ret || new_policy == -1) in overcommit_policy_handler()
982 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); in vm_commit_limit()
984 allowed = ((totalram_pages() - hugetlb_total_pages()) in vm_commit_limit()
1000 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
1019 * succeed and -ENOMEM implies there is not.
1022 * vm.overcommit_memory sysctl. See Documentation/mm/overcommit-accounting.rst
1056 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); in __vm_enough_memory()
1062 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); in __vm_enough_memory()
1064 allowed -= min_t(long, mm->total_vm / 32, reserve); in __vm_enough_memory()
1072 __func__, current->pid, current->comm, bytes_failed); in __vm_enough_memory()
1075 return -ENOMEM; in __vm_enough_memory()
1079 * get_cmdline() - copy the cmdline value to a buffer.
1096 if (!mm->arg_end) in get_cmdline()
1099 spin_lock(&mm->arg_lock); in get_cmdline()
1100 arg_start = mm->arg_start; in get_cmdline()
1101 arg_end = mm->arg_end; in get_cmdline()
1102 env_start = mm->env_start; in get_cmdline()
1103 env_end = mm->env_end; in get_cmdline()
1104 spin_unlock(&mm->arg_lock); in get_cmdline()
1106 len = arg_end - arg_start; in get_cmdline()
1117 if (res > 0 && buffer[res-1] != '\0' && len < buflen) { in get_cmdline()
1122 len = env_end - env_start; in get_cmdline()
1123 if (len > buflen - res) in get_cmdline()
1124 len = buflen - res; in get_cmdline()
1152 * mem_dump_obj - Print available provenance information
1158 * For example, for a slab-cache object, the slab name is printed, and,
1175 type = "non-slab/vmalloc memory"; in mem_dump_obj()
1179 type = "zero-size pointer"; in mem_dump_obj()
1181 type = "non-paged memory"; in mem_dump_obj()
1189 * A driver might set a page logically offline -- PageOffline() -- and
1193 * Some special PFN walkers -- i.e., /proc/kcore -- read content of random