/linux-6.12.1/block/ |
D | blk-lib.c | 39 sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask) in blk_alloc_discard_bio() 64 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) in __blkdev_issue_discard() 86 sector_t nr_sects, gfp_t gfp_mask) in blkdev_issue_discard() 122 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_write_zeroes() 196 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_zero_pages() 274 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, in __blkdev_issue_zeroout() 309 sector_t nr_sects, gfp_t gfp_mask, unsigned flags) in blkdev_issue_zeroout()
|
D | blk-map.c | 22 gfp_t gfp_mask) in bio_alloc_map_data() 132 struct iov_iter *iter, gfp_t gfp_mask) in bio_copy_user_iov() 254 unsigned int nr_vecs, gfp_t gfp_mask) in blk_rq_map_bio_alloc() 273 gfp_t gfp_mask) in bio_map_user_iov() 390 unsigned int len, gfp_t gfp_mask) in bio_map_kern() 475 unsigned int len, gfp_t gfp_mask, int reading) in bio_copy_kern() 610 const struct iov_iter *iter, gfp_t gfp_mask) in blk_rq_map_user_iov() 665 unsigned long len, gfp_t gfp_mask) in blk_rq_map_user() 678 void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask, in blk_rq_map_user_io() 762 unsigned int len, gfp_t gfp_mask) in blk_rq_map_kern()
|
D | blk-crypto.c | 92 const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) in bio_crypt_set_ctx() 116 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) in __bio_crypt_clone() 304 gfp_t gfp_mask) in __blk_crypto_rq_bio_prep()
|
/linux-6.12.1/include/linux/ |
D | gfp.h | 245 static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) in warn_if_node_offline() 264 __alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node_noprof() 290 static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask, in alloc_pages_node_noprof() 311 static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order) in alloc_pages_noprof() 339 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) argument 365 #define __get_free_page(gfp_mask) \ argument 368 #define __get_dma_pages(gfp_mask, order) \ argument 381 unsigned int fragsz, gfp_t gfp_mask, in page_frag_alloc_align() 389 unsigned int fragsz, gfp_t gfp_mask) in page_frag_alloc() 429 static inline bool gfp_compaction_allowed(gfp_t gfp_mask) in gfp_compaction_allowed()
|
D | cpuset.h | 87 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() 92 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() 224 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() 229 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed()
|
D | page_owner.h | 29 unsigned short order, gfp_t gfp_mask) in set_page_owner() 61 unsigned short order, gfp_t gfp_mask) in set_page_owner()
|
/linux-6.12.1/fs/nfs/blocklayout/ |
D | dev.c | 296 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_simple() 387 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_scsi() 441 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_slice() 457 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_concat() 486 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_stripe() 515 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_deviceid() 538 gfp_t gfp_mask) in bl_alloc_deviceid_node()
|
/linux-6.12.1/mm/ |
D | mempool.c | 197 gfp_t gfp_mask, int node_id) in mempool_init_node() 272 gfp_t gfp_mask, int node_id) in mempool_create_node_noprof() 384 void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) in mempool_alloc_noprof() 555 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) in mempool_alloc_slab() 574 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) in mempool_kmalloc() 587 void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data) in mempool_kvmalloc() 604 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) in mempool_alloc_pages()
|
D | page_alloc.c | 3209 unsigned int alloc_flags, gfp_t gfp_mask) in zone_watermark_fast() 3288 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() 3320 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, in gfp_to_alloc_flags_cma() 3335 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() 3508 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) in warn_alloc_show_mem() 3527 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) in warn_alloc() 3553 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, in __alloc_pages_cpuset_fallback() 3573 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom() 3668 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() 3793 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() [all …]
|
D | swap.h | 133 gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx) in swap_cluster_readahead() 138 static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, in swapin_readahead() 177 gfp_t gfp_mask, void **shadowp) in add_to_swap_cache()
|
D | page_owner.c | 27 gfp_t gfp_mask; member 166 gfp_t gfp_mask) in add_stack_record_to_list() 194 static void inc_stack_record_count(depot_stack_handle_t handle, gfp_t gfp_mask, in inc_stack_record_count() 235 gfp_t gfp_mask, in __update_page_owner_handle() 314 gfp_t gfp_mask) in __set_page_owner() 598 gfp_t gfp_mask; in __dump_page_owner() local
|
D | swap_state.c | 432 struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in __read_swap_cache_async() 558 struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async() 656 struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, in swap_cluster_readahead() 799 static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, in swap_vma_readahead() 872 struct folio *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, in swapin_readahead()
|
D | memcontrol-v1.h | 13 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, in try_charge() 154 gfp_t gfp_mask) { return true; } in memcg1_charge_skmem()
|
/linux-6.12.1/fs/btrfs/ |
D | ulist.c | 99 struct ulist *ulist_alloc(gfp_t gfp_mask) in ulist_alloc() 111 void ulist_prealloc(struct ulist *ulist, gfp_t gfp_mask) in ulist_prealloc() 201 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) in ulist_add() 207 u64 *old_aux, gfp_t gfp_mask) in ulist_add_merge()
|
/linux-6.12.1/net/sunrpc/auth_gss/ |
D | gss_krb5_keys.c | 152 const struct xdr_netobj *in_constant, gfp_t gfp_mask) in krb5_DK() 271 gfp_t gfp_mask) in krb5_derive_key_v2() 372 gfp_t gfp_mask) in krb5_kdf_feedback_cmac() 504 gfp_t gfp_mask) in krb5_kdf_hmac_sha2()
|
D | gss_krb5_mech.c | 297 gss_krb5_import_ctx_v2(struct krb5_ctx *ctx, gfp_t gfp_mask) in gss_krb5_import_ctx_v2() 396 gfp_t gfp_mask) in gss_import_v2_context() 470 time64_t *endtime, gfp_t gfp_mask) in gss_krb5_import_sec_context()
|
/linux-6.12.1/lib/ |
D | generic-radix-tree.c | 24 gfp_t gfp_mask) in __genradix_ptr_alloc() 211 gfp_t gfp_mask) in __genradix_prealloc()
|
D | scatterlist.c | 152 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) in sg_kmalloc() 288 unsigned int nents_first_chunk, gfp_t gfp_mask, in __sg_alloc_table() 375 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) in sg_alloc_table() 390 gfp_t gfp_mask) in get_next_sg() 458 unsigned int left_pages, gfp_t gfp_mask) in sg_alloc_append_table_from_pages() 582 gfp_t gfp_mask) in sg_alloc_table_from_pages_segment()
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx4/ |
D | icm.c | 99 gfp_t gfp_mask, int node) in mlx4_alloc_icm_pages() 115 int order, gfp_t gfp_mask) in mlx4_alloc_icm_coherent() 133 gfp_t gfp_mask, int coherent) in mlx4_alloc_icm()
|
/linux-6.12.1/include/linux/sched/ |
D | mm.h | 278 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } in fs_reclaim_acquire() 279 static inline void fs_reclaim_release(gfp_t gfp_mask) { } in fs_reclaim_release() 316 static inline void might_alloc(gfp_t gfp_mask) in might_alloc()
|
/linux-6.12.1/kernel/power/ |
D | snapshot.c | 191 static void *get_image_page(gfp_t gfp_mask, int safe_needed) in get_image_page() 210 static void *__get_safe_page(gfp_t gfp_mask) in __get_safe_page() 222 unsigned long get_safe_page(gfp_t gfp_mask) in get_safe_page() 227 static struct page *alloc_image_page(gfp_t gfp_mask) in alloc_image_page() 297 gfp_t gfp_mask; /* mask for allocating pages */ member 301 static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask, in chain_init() 442 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed, in alloc_rtree_node() 468 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask, in add_rtree_block() 538 static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask, in create_zone_bm_rtree() 628 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask) in create_mem_extents() [all …]
|
/linux-6.12.1/drivers/connector/ |
D | connector.c | 62 gfp_t gfp_mask, netlink_filter_fn filter, in cn_netlink_send_mult() 124 gfp_t gfp_mask) in cn_netlink_send()
|
/linux-6.12.1/rust/helpers/ |
D | page.c | 6 struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order) in rust_helper_alloc_pages()
|
/linux-6.12.1/kernel/ |
D | umh.c | 357 char **envp, gfp_t gfp_mask, in call_usermodehelper_setup() 486 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; in call_usermodehelper() local
|
/linux-6.12.1/drivers/infiniband/hw/mthca/ |
D | mthca_memfree.c | 107 static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) in mthca_alloc_icm_pages() 124 int order, gfp_t gfp_mask) in mthca_alloc_icm_coherent() 138 gfp_t gfp_mask, int coherent) in mthca_alloc_icm()
|