Home
last modified time | relevance | path

Searched refs:gfp_mask (Results 1 – 25 of 236) sorted by relevance

12345678910

/linux-6.12.1/include/linux/
Dgfp.h245 static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) in warn_if_node_offline() argument
247 gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN); in warn_if_node_offline()
255 pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node); in warn_if_node_offline()
264 __alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node_noprof() argument
267 warn_if_node_offline(nid, gfp_mask); in __alloc_pages_node_noprof()
269 return __alloc_pages_noprof(gfp_mask, order, nid, NULL); in __alloc_pages_node_noprof()
290 static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask, in alloc_pages_node_noprof() argument
296 return __alloc_pages_node_noprof(nid, gfp_mask, order); in alloc_pages_node_noprof()
311 static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order) in alloc_pages_noprof() argument
313 return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order); in alloc_pages_noprof()
[all …]
Dmempool.h15 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
43 gfp_t gfp_mask, int node_id);
55 gfp_t gfp_mask, int nid);
66 extern void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) __malloc;
78 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
90 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
100 void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data);
117 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
Dcpuset.h85 extern bool cpuset_node_allowed(int node, gfp_t gfp_mask);
87 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument
89 return cpuset_node_allowed(zone_to_nid(z), gfp_mask); in __cpuset_zone_allowed()
92 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument
95 return __cpuset_zone_allowed(z, gfp_mask); in cpuset_zone_allowed()
224 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument
229 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument
Dblk-crypto.h84 gfp_t gfp_mask);
115 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
128 gfp_t gfp_mask) in bio_crypt_clone() argument
131 return __bio_crypt_clone(dst, src, gfp_mask); in bio_crypt_clone()
Dpage_owner.h13 unsigned short order, gfp_t gfp_mask);
29 unsigned short order, gfp_t gfp_mask) in set_page_owner() argument
32 __set_page_owner(page, order, gfp_mask); in set_page_owner()
61 unsigned short order, gfp_t gfp_mask) in set_page_owner() argument
/linux-6.12.1/mm/
Dmempool.c197 gfp_t gfp_mask, int node_id) in mempool_init_node() argument
207 gfp_mask, node_id); in mempool_init_node()
217 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_init_node()
272 gfp_t gfp_mask, int node_id) in mempool_create_node_noprof() argument
276 pool = kmalloc_node_noprof(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id); in mempool_create_node_noprof()
281 gfp_mask, node_id)) { in mempool_create_node_noprof()
384 void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) in mempool_alloc_noprof() argument
391 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); in mempool_alloc_noprof()
392 might_alloc(gfp_mask); in mempool_alloc_noprof()
394 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ in mempool_alloc_noprof()
[all …]
Dpage_alloc.c3209 unsigned int alloc_flags, gfp_t gfp_mask) in zone_watermark_fast() argument
3288 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument
3296 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); in alloc_flags_nofragment()
3320 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, in gfp_to_alloc_flags_cma() argument
3324 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) in gfp_to_alloc_flags_cma()
3335 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument
3358 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist()
3420 gfp_mask)) in get_page_from_freelist()
3429 gfp_mask)) { in get_page_from_freelist()
3452 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
[all …]
Dfail_page_alloc.c26 bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
32 if (gfp_mask & __GFP_NOFAIL) in should_fail_alloc_page()
34 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in should_fail_alloc_page()
37 (gfp_mask & __GFP_DIRECT_RECLAIM)) in should_fail_alloc_page()
41 if (gfp_mask & __GFP_NOWARN) in should_fail_alloc_page()
Dpage_owner.c27 gfp_t gfp_mask; member
166 gfp_t gfp_mask) in add_stack_record_to_list() argument
172 stack = kmalloc(sizeof(*stack), gfp_nested_mask(gfp_mask)); in add_stack_record_to_list()
194 static void inc_stack_record_count(depot_stack_handle_t handle, gfp_t gfp_mask, in inc_stack_record_count() argument
214 add_stack_record_to_list(stack_record, gfp_mask); in inc_stack_record_count()
235 gfp_t gfp_mask, in __update_page_owner_handle() argument
246 page_owner->gfp_mask = gfp_mask; in __update_page_owner_handle()
314 gfp_t gfp_mask) in __set_page_owner() argument
320 handle = save_stack(gfp_mask); in __set_page_owner()
325 __update_page_owner_handle(page_ext, handle, order, gfp_mask, -1, in __set_page_owner()
[all …]
Dswap_state.c432 struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in __read_swap_cache_async() argument
477 new_folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id()); in __read_swap_cache_async()
518 if (mem_cgroup_swapin_charge_folio(new_folio, NULL, gfp_mask, entry)) in __read_swap_cache_async()
522 if (add_to_swap_cache(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) in __read_swap_cache_async()
558 struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async() argument
568 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, in read_swap_cache_async()
656 struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, in swap_cluster_readahead() argument
686 gfp_mask, mpol, ilx, &page_allocated, false); in swap_cluster_readahead()
703 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, in swap_cluster_readahead()
799 static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, in swap_vma_readahead() argument
[all …]
Dmemcontrol-v1.h10 int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
13 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, in try_charge() argument
19 return try_charge_memcg(memcg, gfp_mask, nr_pages); in try_charge()
116 gfp_t gfp_mask);
154 gfp_t gfp_mask) { return true; } in memcg1_charge_skmem() argument
/linux-6.12.1/block/
Dblk-lib.c39 sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask) in blk_alloc_discard_bio() argument
47 bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask); in blk_alloc_discard_bio()
64 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) in __blkdev_issue_discard() argument
69 gfp_mask))) in __blkdev_issue_discard()
86 sector_t nr_sects, gfp_t gfp_mask) in blkdev_issue_discard() argument
93 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio); in blkdev_issue_discard()
122 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_write_zeroes() argument
134 bio = bio_alloc(bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask); in __blkdev_issue_write_zeroes()
196 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_zero_pages() argument
203 bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask); in __blkdev_issue_zero_pages()
[all …]
Dblk-map.c22 gfp_t gfp_mask) in bio_alloc_map_data() argument
29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); in bio_alloc_map_data()
132 struct iov_iter *iter, gfp_t gfp_mask) in bio_copy_user_iov() argument
142 bmd = bio_alloc_map_data(iter, gfp_mask); in bio_copy_user_iov()
157 bio = bio_kmalloc(nr_pages, gfp_mask); in bio_copy_user_iov()
185 page = alloc_page(GFP_NOIO | gfp_mask); in bio_copy_user_iov()
254 unsigned int nr_vecs, gfp_t gfp_mask) in blk_rq_map_bio_alloc() argument
259 bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask, in blk_rq_map_bio_alloc()
264 bio = bio_kmalloc(nr_vecs, gfp_mask); in blk_rq_map_bio_alloc()
273 gfp_t gfp_mask) in bio_map_user_iov() argument
[all …]
Dblk-crypto.c92 const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) in bio_crypt_set_ctx() argument
100 WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM)); in bio_crypt_set_ctx()
102 bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in bio_crypt_set_ctx()
116 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) in __bio_crypt_clone() argument
118 dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in __bio_crypt_clone()
304 gfp_t gfp_mask) in __blk_crypto_rq_bio_prep() argument
307 rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in __blk_crypto_rq_bio_prep()
/linux-6.12.1/fs/nfs/blocklayout/
Ddev.c291 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask);
296 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_simple() argument
302 dev = bl_resolve_deviceid(server, v, gfp_mask); in bl_parse_simple()
387 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_scsi() argument
441 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_slice() argument
446 ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask); in bl_parse_slice()
457 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_concat() argument
464 sizeof(struct pnfs_block_dev), gfp_mask); in bl_parse_concat()
470 volumes, v->concat.volumes[i], gfp_mask); in bl_parse_concat()
486 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_stripe() argument
[all …]
/linux-6.12.1/fs/btrfs/
Dulist.h50 struct ulist *ulist_alloc(gfp_t gfp_mask);
53 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
55 u64 *old_aux, gfp_t gfp_mask);
60 void **old_aux, gfp_t gfp_mask) in ulist_add_merge_ptr() argument
64 int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask); in ulist_add_merge_ptr()
68 return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask); in ulist_add_merge_ptr()
Dulist.c99 struct ulist *ulist_alloc(gfp_t gfp_mask) in ulist_alloc() argument
101 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask); in ulist_alloc()
111 void ulist_prealloc(struct ulist *ulist, gfp_t gfp_mask) in ulist_prealloc() argument
114 ulist->prealloc = kzalloc(sizeof(*ulist->prealloc), gfp_mask); in ulist_prealloc()
201 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) in ulist_add() argument
203 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask); in ulist_add()
207 u64 *old_aux, gfp_t gfp_mask) in ulist_add_merge() argument
223 node = kmalloc(sizeof(*node), gfp_mask); in ulist_add_merge()
/linux-6.12.1/include/linux/sched/
Dmm.h273 extern void fs_reclaim_acquire(gfp_t gfp_mask);
274 extern void fs_reclaim_release(gfp_t gfp_mask);
278 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } in fs_reclaim_acquire() argument
279 static inline void fs_reclaim_release(gfp_t gfp_mask) { } in fs_reclaim_release() argument
316 static inline void might_alloc(gfp_t gfp_mask) in might_alloc() argument
318 fs_reclaim_acquire(gfp_mask); in might_alloc()
319 fs_reclaim_release(gfp_mask); in might_alloc()
321 might_sleep_if(gfpflags_allow_blocking(gfp_mask)); in might_alloc()
/linux-6.12.1/net/sunrpc/auth_gss/
Dgss_krb5_keys.c152 const struct xdr_netobj *in_constant, gfp_t gfp_mask) in krb5_DK() argument
174 inblockdata = kmalloc(blocksize, gfp_mask); in krb5_DK()
178 outblockdata = kmalloc(blocksize, gfp_mask); in krb5_DK()
271 gfp_t gfp_mask) in krb5_derive_key_v2() argument
277 inblock.data = kmalloc(inblock.len, gfp_mask); in krb5_derive_key_v2()
281 ret = krb5_DK(gk5e, inkey, inblock.data, label, gfp_mask); in krb5_derive_key_v2()
372 gfp_t gfp_mask) in krb5_kdf_feedback_cmac() argument
401 step.data = kzalloc(step.len, gfp_mask); in krb5_kdf_feedback_cmac()
406 DR.data = kmalloc(DR.len, gfp_mask); in krb5_kdf_feedback_cmac()
504 gfp_t gfp_mask) in krb5_kdf_hmac_sha2() argument
[all …]
Dgss_krb5_mech.c297 gss_krb5_import_ctx_v2(struct krb5_ctx *ctx, gfp_t gfp_mask) in gss_krb5_import_ctx_v2() argument
306 keyout.data = kmalloc(GSS_KRB5_MAX_KEYLEN, gfp_mask); in gss_krb5_import_ctx_v2()
313 KEY_USAGE_SEED_ENCRYPTION, gfp_mask)) in gss_krb5_import_ctx_v2()
329 KEY_USAGE_SEED_ENCRYPTION, gfp_mask)) in gss_krb5_import_ctx_v2()
346 KEY_USAGE_SEED_CHECKSUM, gfp_mask)) in gss_krb5_import_ctx_v2()
354 KEY_USAGE_SEED_CHECKSUM, gfp_mask)) in gss_krb5_import_ctx_v2()
363 KEY_USAGE_SEED_INTEGRITY, gfp_mask)) in gss_krb5_import_ctx_v2()
371 KEY_USAGE_SEED_INTEGRITY, gfp_mask)) in gss_krb5_import_ctx_v2()
396 gfp_t gfp_mask) in gss_import_v2_context() argument
447 gss_kerberos_mech.gm_oid.len, gfp_mask); in gss_import_v2_context()
[all …]
Dgss_krb5_internal.h40 gfp_t gfp_mask);
110 gfp_t gfp_mask);
116 gfp_t gfp_mask);
122 gfp_t gfp_mask);
141 u32 usage, u8 seed, gfp_t gfp_mask) in krb5_derive_key() argument
153 return gk5e->derive_key(gk5e, inkey, outkey, &label, gfp_mask); in krb5_derive_key()
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx4/
Dicm.c99 gfp_t gfp_mask, int node) in mlx4_alloc_icm_pages() argument
103 page = alloc_pages_node(node, gfp_mask, order); in mlx4_alloc_icm_pages()
105 page = alloc_pages(gfp_mask, order); in mlx4_alloc_icm_pages()
115 int order, gfp_t gfp_mask) in mlx4_alloc_icm_coherent() argument
118 &buf->dma_addr, gfp_mask); in mlx4_alloc_icm_coherent()
133 gfp_t gfp_mask, int coherent) in mlx4_alloc_icm() argument
142 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); in mlx4_alloc_icm()
145 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), in mlx4_alloc_icm()
149 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); in mlx4_alloc_icm()
162 gfp_mask & ~(__GFP_HIGHMEM | in mlx4_alloc_icm()
[all …]
/linux-6.12.1/drivers/connector/
Dconnector.c62 gfp_t gfp_mask, netlink_filter_fn filter, in cn_netlink_send_mult() argument
97 skb = nlmsg_new(size, gfp_mask); in cn_netlink_send_mult()
115 gfp_mask, filter, in cn_netlink_send_mult()
118 !gfpflags_allow_blocking(gfp_mask)); in cn_netlink_send_mult()
124 gfp_t gfp_mask) in cn_netlink_send() argument
126 return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask, in cn_netlink_send()
/linux-6.12.1/kernel/power/
Dsnapshot.c191 static void *get_image_page(gfp_t gfp_mask, int safe_needed) in get_image_page() argument
195 res = (void *)get_zeroed_page(gfp_mask); in get_image_page()
201 res = (void *)get_zeroed_page(gfp_mask); in get_image_page()
210 static void *__get_safe_page(gfp_t gfp_mask) in __get_safe_page() argument
219 return get_image_page(gfp_mask, PG_SAFE); in __get_safe_page()
222 unsigned long get_safe_page(gfp_t gfp_mask) in get_safe_page() argument
224 return (unsigned long)__get_safe_page(gfp_mask); in get_safe_page()
227 static struct page *alloc_image_page(gfp_t gfp_mask) in alloc_image_page() argument
231 page = alloc_page(gfp_mask); in alloc_image_page()
297 gfp_t gfp_mask; /* mask for allocating pages */ member
[all …]
/linux-6.12.1/lib/
Dgeneric-radix-tree.c24 gfp_t gfp_mask) in __genradix_ptr_alloc() argument
44 new_node = genradix_alloc_node(gfp_mask); in __genradix_ptr_alloc()
69 new_node = genradix_alloc_node(gfp_mask); in __genradix_ptr_alloc()
211 gfp_t gfp_mask) in __genradix_prealloc() argument
216 if (!__genradix_ptr_alloc(radix, offset, NULL, gfp_mask)) in __genradix_prealloc()

12345678910