Lines Matching +full:async +full:- +full:prefix
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
19 #include <linux/tracepoint-defs.h>
70 #define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1)
86 return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED; in folio_nr_pages_mapped()
91 * folio. We cannot rely on folio->swap as there is no guarantee that it has
106 unsigned long mapping = (unsigned long)folio->mapping; in folio_raw_mapping()
112 * This is a file-backed mapping, and is about to be memory mapped - invoke its
133 vma->vm_ops = &vma_dummy_vm_ops; in mmap_file()
145 if (vma->vm_ops && vma->vm_ops->close) { in vma_close()
146 vma->vm_ops->close(vma); in vma_close()
152 vma->vm_ops = &vma_dummy_vm_ops; in vma_close()
164 /* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
177 * folio_pte_batch - detect a PTE batch for a large folio
196 * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY).
261 return min(ptep - start_ptep, max_nr); in folio_pte_batch()
265 * pte_move_swp_offset - Move the swap entry offset field of a swap pte
293 * pte_next_swp_offset - Increment the swap entry offset field of a swap pte.
306 * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries
311 * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs
344 return ptep - start_ptep; in swap_pte_batch()
353 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled); in acct_reclaim_writeback()
363 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED]; in wake_throttle_isolated()
374 vma_end_read(vmf->vma); in vmf_anon_prepare()
401 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index); in force_page_cache_readahead()
418 * folio_evictable - Test whether a folio is evictable.
421 * Test whether @folio is evictable -- i.e., should be placed on
441 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
452 * Return true if a folio needs ->release_folio() calling upon it.
485 #define K(x) ((x) << (PAGE_SHIFT-10))
534 * general, page_zone(page)->lock must be held by the caller to prevent the
536 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
568 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
620 * function is used in the performance-critical __free_one_page().
636 buddy = page + (__buddy_pfn - pfn); in find_buddy_page_pfn()
651 if (zone->contiguous) in pageblock_pfn_to_page()
661 zone->contiguous = false; in clear_zone_contiguous()
674 * caller passes in a non-large folio.
681 folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order; in folio_set_order()
683 folio->_folio_nr_pages = 1U << order; in folio_set_order()
698 if (data_race(list_empty(&folio->_deferred_list))) in folio_unqueue_deferred_split()
718 atomic_set(&folio->_large_mapcount, -1); in prep_compound_head()
719 atomic_set(&folio->_entire_mapcount, -1); in prep_compound_head()
720 atomic_set(&folio->_nr_pages_mapped, 0); in prep_compound_head()
721 atomic_set(&folio->_pincount, 0); in prep_compound_head()
723 INIT_LIST_HEAD(&folio->_deferred_list); in prep_compound_head()
730 p->mapping = TAIL_MAPPING; in prep_compound_tail()
794 enum migrate_mode mode; /* Async or sync migration mode */
840 return list_empty(&area->free_list[migratetype]); in free_area_empty()
881 if (start < vma->vm_start) in folio_within_range()
882 start = vma->vm_start; in folio_within_range()
884 if (end > vma->vm_end) in folio_within_range()
885 end = vma->vm_end; in folio_within_range()
890 if (!in_range(pgoff, vma->vm_pgoff, vma_pglen)) in folio_within_range()
893 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in folio_within_range()
895 return !(addr < start || end - addr < folio_size(folio)); in folio_within_range()
901 return folio_within_range(folio, vma, vma->vm_start, vma->vm_end); in folio_within_vma()
919 * 1) VM_IO check prevents migration from double-counting during mlock. in mlock_vma_folio()
922 * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may in mlock_vma_folio()
925 if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED)) in mlock_vma_folio()
942 if (unlikely(vma->vm_flags & VM_LOCKED)) in munlock_vma_folio()
954 * vma_address - Find the virtual address a page range is mapped at
960 * where any of these pages appear. Otherwise, return -EFAULT.
967 if (pgoff >= vma->vm_pgoff) { in vma_address()
968 address = vma->vm_start + in vma_address()
969 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in vma_address()
971 if (address < vma->vm_start || address >= vma->vm_end) in vma_address()
972 address = -EFAULT; in vma_address()
973 } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) { in vma_address()
974 /* Test above avoids possibility of wrap to 0 on 32-bit */ in vma_address()
975 address = vma->vm_start; in vma_address()
977 address = -EFAULT; in vma_address()
988 struct vm_area_struct *vma = pvmw->vma; in vma_address_end()
992 /* Common case, plus ->pgoff is invalid for KSM */ in vma_address_end()
993 if (pvmw->nr_pages == 1) in vma_address_end()
994 return pvmw->address + PAGE_SIZE; in vma_address_end()
996 pgoff = pvmw->pgoff + pvmw->nr_pages; in vma_address_end()
997 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in vma_address_end()
999 if (address < vma->vm_start || address > vma->vm_end) in vma_address_end()
1000 address = vma->vm_end; in vma_address_end()
1007 int flags = vmf->flags; in maybe_unlock_mmap_for_io()
1019 fpin = get_file(vmf->vma->vm_file); in maybe_unlock_mmap_for_io()
1052 #define mminit_dprintk(level, prefix, fmt, arg...) \ argument
1056 pr_warn("mminit::" prefix " " fmt, ##arg); \
1058 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
1067 const char *prefix, const char *fmt, ...) in mminit_dprintk() argument
1080 #define NODE_RECLAIM_NOSCAN -2
1081 #define NODE_RECLAIM_FULL -1
1101 * mm/memory-failure.c
1140 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1147 #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1150 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
1246 return -EINVAL; in vmap_pages_range_noflush()
1284 /* we are working on non-current tsk/mm */
1288 /* gup_fast: prevent fall-back to slow gup */
1301 * Indicates for which pages that are write-protected in the page table,
1309 * * GUP-fast and fork(): mm->write_protect_seq
1310 * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1315 * PTE-mapped THP.
1317 * If the vma is NULL, we're coming from the GUP-fast path and might have
1325 * has to be writable -- and if it references (part of) an anonymous in gup_must_unshare()
1336 * We only care about R/O long-term pining: R/O short-term in gup_must_unshare()
1351 return is_cow_mapping(vma->vm_flags); in gup_must_unshare()
1372 vma->vm_start = start; in vma_set_range()
1373 vma->vm_end = end; in vma_set_range()
1374 vma->vm_pgoff = pgoff; in vma_set_range()
1380 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty in vma_soft_dirty_enabled()
1381 * enablements, because when without soft-dirty being compiled in, in vma_soft_dirty_enabled()
1389 * Soft-dirty is kind of special: its tracking is enabled when the in vma_soft_dirty_enabled()
1392 return !(vma->vm_flags & VM_SOFTDIRTY); in vma_soft_dirty_enabled()
1416 return -EINVAL; in can_do_mseal()
1424 return -EPERM; in can_do_mseal()
1432 shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap); in shrinker_debugfs_name_alloc()
1434 return shrinker->name ? 0 : -ENOMEM; in shrinker_debugfs_name_alloc()
1439 kfree_const(shrinker->name); in shrinker_debugfs_name_free()
1440 shrinker->name = NULL; in shrinker_debugfs_name_free()
1464 *debugfs_id = -1; in shrinker_debugfs_detach()