/linux-6.12.1/net/core/ |
D | page_pool.c | 43 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) argument 45 #define recycle_stat_inc(pool, __stat) \ argument 47 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 51 #define recycle_stat_add(pool, __stat, val) \ argument 53 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 82 bool page_pool_get_stats(const struct page_pool *pool, in page_pool_get_stats() argument 91 stats->alloc_stats.fast += pool->alloc_stats.fast; in page_pool_get_stats() 92 stats->alloc_stats.slow += pool->alloc_stats.slow; in page_pool_get_stats() 93 stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order; in page_pool_get_stats() 94 stats->alloc_stats.empty += pool->alloc_stats.empty; in page_pool_get_stats() [all …]
|
D | page_pool_user.c | 34 typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool, 40 struct page_pool *pool; in netdev_nl_page_pool_get_do() local 45 pool = xa_load(&page_pools, id); in netdev_nl_page_pool_get_do() 46 if (!pool || hlist_unhashed(&pool->user.list) || in netdev_nl_page_pool_get_do() 47 !net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) { in netdev_nl_page_pool_get_do() 58 err = fill(rsp, pool, info); in netdev_nl_page_pool_get_do() 86 struct page_pool *pool; in netdev_nl_page_pool_get_dump() local 92 hlist_for_each_entry(pool, &netdev->page_pools, user.list) { in netdev_nl_page_pool_get_dump() 93 if (state->pp_id && state->pp_id < pool->user.id) in netdev_nl_page_pool_get_dump() 96 state->pp_id = pool->user.id; in netdev_nl_page_pool_get_dump() [all …]
|
/linux-6.12.1/net/xdp/ |
D | xsk_buff_pool.c | 11 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() argument 18 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_add_xsk() 19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk() 20 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_add_xsk() 23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_del_xsk() argument 30 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_del_xsk() 32 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_del_xsk() 35 void xp_destroy(struct xsk_buff_pool *pool) in xp_destroy() argument 37 if (!pool) in xp_destroy() 40 kvfree(pool->tx_descs); in xp_destroy() [all …]
|
/linux-6.12.1/mm/ |
D | mempool.c | 24 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() argument 27 const int nr = pool->curr_nr; in poison_error() 33 pr_err("Mempool %p size %zu\n", pool, size); in poison_error() 41 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() argument 50 poison_error(pool, element, size, i); in __check_element() 57 static void check_element(mempool_t *pool, void *element) in check_element() argument 64 if (pool->free == mempool_kfree) { in check_element() 65 __check_element(pool, element, (size_t)pool->pool_data); in check_element() 66 } else if (pool->free == mempool_free_slab) { in check_element() 67 __check_element(pool, element, kmem_cache_size(pool->pool_data)); in check_element() [all …]
|
D | dmapool.c | 74 struct dma_pool *pool; in pools_show() local 80 list_for_each_entry(pool, &dev->dma_pools, pools) { in pools_show() 83 pool->name, pool->nr_active, in pools_show() 84 pool->nr_blocks, pool->size, in pools_show() 85 pool->nr_pages); in pools_show() 95 static void pool_check_block(struct dma_pool *pool, struct dma_block *block, in pool_check_block() argument 101 for (i = sizeof(struct dma_block); i < pool->size; i++) { in pool_check_block() 104 dev_err(pool->dev, "%s %s, %p (corrupted)\n", __func__, in pool_check_block() 105 pool->name, block); in pool_check_block() 112 data, pool->size, 1); in pool_check_block() [all …]
|
D | zbud.c | 202 struct zbud_pool *pool; in zbud_create_pool() local 205 pool = kzalloc(sizeof(struct zbud_pool), gfp); in zbud_create_pool() 206 if (!pool) in zbud_create_pool() 208 spin_lock_init(&pool->lock); in zbud_create_pool() 210 INIT_LIST_HEAD(&pool->unbuddied[i]); in zbud_create_pool() 211 INIT_LIST_HEAD(&pool->buddied); in zbud_create_pool() 212 pool->pages_nr = 0; in zbud_create_pool() 213 return pool; in zbud_create_pool() 222 static void zbud_destroy_pool(struct zbud_pool *pool) in zbud_destroy_pool() argument 224 kfree(pool); in zbud_destroy_pool() [all …]
|
/linux-6.12.1/drivers/net/ethernet/ti/ |
D | k3-cppi-desc-pool.c | 28 void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) in k3_cppi_desc_pool_destroy() argument 30 if (!pool) in k3_cppi_desc_pool_destroy() 33 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), in k3_cppi_desc_pool_destroy() 35 gen_pool_size(pool->gen_pool), in k3_cppi_desc_pool_destroy() 36 gen_pool_avail(pool->gen_pool)); in k3_cppi_desc_pool_destroy() 37 if (pool->cpumem) in k3_cppi_desc_pool_destroy() 38 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, in k3_cppi_desc_pool_destroy() 39 pool->dma_addr); in k3_cppi_desc_pool_destroy() 41 kfree(pool->desc_infos); in k3_cppi_desc_pool_destroy() 43 gen_pool_destroy(pool->gen_pool); /* frees pool->name */ in k3_cppi_desc_pool_destroy() [all …]
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/ |
D | mlx5hws_pool.c | 9 switch (resource->pool->type) { in hws_pool_free_one_resource() 11 mlx5hws_cmd_ste_destroy(resource->pool->ctx->mdev, resource->base_id); in hws_pool_free_one_resource() 14 mlx5hws_cmd_stc_destroy(resource->pool->ctx->mdev, resource->base_id); in hws_pool_free_one_resource() 23 static void hws_pool_resource_free(struct mlx5hws_pool *pool, in hws_pool_resource_free() argument 26 hws_pool_free_one_resource(pool->resource[resource_idx]); in hws_pool_resource_free() 27 pool->resource[resource_idx] = NULL; in hws_pool_resource_free() 29 if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) { in hws_pool_resource_free() 30 hws_pool_free_one_resource(pool->mirror_resource[resource_idx]); in hws_pool_resource_free() 31 pool->mirror_resource[resource_idx] = NULL; in hws_pool_resource_free() 36 hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range, in hws_pool_create_one_resource() argument [all …]
|
/linux-6.12.1/drivers/md/ |
D | dm-thin.c | 232 struct pool { struct 290 static void metadata_operation_failed(struct pool *pool, const char *op, int r); argument 292 static enum pool_mode get_pool_mode(struct pool *pool) in get_pool_mode() argument 294 return pool->pf.mode; in get_pool_mode() 297 static void notify_of_pool_mode_change(struct pool *pool) in notify_of_pool_mode_change() argument 307 enum pool_mode mode = get_pool_mode(pool); in notify_of_pool_mode_change() 310 if (!pool->pf.error_if_no_space) in notify_of_pool_mode_change() 316 dm_table_event(pool->ti->table); in notify_of_pool_mode_change() 318 dm_device_name(pool->pool_md), in notify_of_pool_mode_change() 327 struct pool *pool; member [all …]
|
/linux-6.12.1/net/ceph/ |
D | msgpool.c | 14 struct ceph_msgpool *pool = arg; in msgpool_alloc() local 17 msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, in msgpool_alloc() 20 dout("msgpool_alloc %s failed\n", pool->name); in msgpool_alloc() 22 dout("msgpool_alloc %s %p\n", pool->name, msg); in msgpool_alloc() 23 msg->pool = pool; in msgpool_alloc() 30 struct ceph_msgpool *pool = arg; in msgpool_free() local 33 dout("msgpool_release %s %p\n", pool->name, msg); in msgpool_free() 34 msg->pool = NULL; in msgpool_free() 38 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, in ceph_msgpool_init() argument 43 pool->type = type; in ceph_msgpool_init() [all …]
|
/linux-6.12.1/sound/core/seq/ |
D | seq_memory.c | 22 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) in snd_seq_pool_available() argument 24 return pool->total_elements - atomic_read(&pool->counter); in snd_seq_pool_available() 27 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) in snd_seq_output_ok() argument 29 return snd_seq_pool_available(pool) >= pool->room; in snd_seq_output_ok() 225 static inline void free_cell(struct snd_seq_pool *pool, in free_cell() argument 228 cell->next = pool->free; in free_cell() 229 pool->free = cell; in free_cell() 230 atomic_dec(&pool->counter); in free_cell() 235 struct snd_seq_pool *pool; in snd_seq_cell_free() local 239 pool = cell->pool; in snd_seq_cell_free() [all …]
|
/linux-6.12.1/lib/ |
D | objpool.c | 19 objpool_init_percpu_slot(struct objpool_head *pool, in objpool_init_percpu_slot() argument 24 void *obj = (void *)&slot->entries[pool->capacity]; in objpool_init_percpu_slot() 28 slot->mask = pool->capacity - 1; in objpool_init_percpu_slot() 37 obj = obj + pool->obj_size; in objpool_init_percpu_slot() 40 pool->nr_objs++; in objpool_init_percpu_slot() 48 objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs, in objpool_init_percpu_slots() argument 63 nodes = nr_objs / pool->nr_possible_cpus; in objpool_init_percpu_slots() 64 if (cpu_count < (nr_objs % pool->nr_possible_cpus)) in objpool_init_percpu_slots() 68 size = struct_size(slot, entries, pool->capacity) + in objpool_init_percpu_slots() 69 pool->obj_size * nodes; in objpool_init_percpu_slots() [all …]
|
/linux-6.12.1/include/net/ |
D | xdp_sock_drv.h | 23 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries); 24 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc); 25 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max); 26 void xsk_tx_release(struct xsk_buff_pool *pool); 29 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool); 30 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool); 31 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool); 32 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool); 33 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool); 35 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool) in xsk_pool_get_headroom() argument [all …]
|
D | xsk_buff_pool.h | 30 struct xsk_buff_pool *pool; member 105 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, 107 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, 109 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs); 110 void xp_destroy(struct xsk_buff_pool *pool); 111 void xp_get_pool(struct xsk_buff_pool *pool); 112 bool xp_put_pool(struct xsk_buff_pool *pool); 113 void xp_clear_dev(struct xsk_buff_pool *pool); 114 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); 115 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); [all …]
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
D | dr_icm_pool.c | 82 u32 offset = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_mr_addr() 94 u32 size = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_icm_addr() 102 chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_byte_size() 111 dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool) in dr_icm_pool_mr_create() argument 113 struct mlx5_core_dev *mdev = pool->dmn->mdev; in dr_icm_pool_mr_create() 123 icm_mr->dmn = pool->dmn; in dr_icm_pool_mr_create() 125 icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, in dr_icm_pool_mr_create() 126 pool->icm_type); in dr_icm_pool_mr_create() 128 switch (pool->icm_type) { in dr_icm_pool_mr_create() 144 WARN_ON(pool->icm_type); in dr_icm_pool_mr_create() [all …]
|
D | dr_arg.c | 31 static int dr_arg_pool_alloc_objs(struct dr_arg_pool *pool) in dr_arg_pool_alloc_objs() argument 43 pool->dmn->info.caps.log_header_modify_argument_granularity; in dr_arg_pool_alloc_objs() 46 max_t(u32, pool->dmn->info.caps.log_header_modify_argument_granularity, in dr_arg_pool_alloc_objs() 49 min_t(u32, pool->dmn->info.caps.log_header_modify_argument_max_alloc, in dr_arg_pool_alloc_objs() 52 if (pool->log_chunk_size > object_range) { in dr_arg_pool_alloc_objs() 53 mlx5dr_err(pool->dmn, "Required chunk size (%d) is not supported\n", in dr_arg_pool_alloc_objs() 54 pool->log_chunk_size); in dr_arg_pool_alloc_objs() 58 num_of_objects = (1 << (object_range - pool->log_chunk_size)); in dr_arg_pool_alloc_objs() 60 ret = mlx5dr_cmd_create_modify_header_arg(pool->dmn->mdev, in dr_arg_pool_alloc_objs() 62 pool->dmn->pdn, in dr_arg_pool_alloc_objs() [all …]
|
/linux-6.12.1/drivers/staging/media/atomisp/pci/runtime/rmgr/src/ |
D | rmgr_vbuf.c | 128 int ia_css_rmgr_init_vbuf(struct ia_css_rmgr_vbuf_pool *pool) in ia_css_rmgr_init_vbuf() argument 134 assert(pool); in ia_css_rmgr_init_vbuf() 135 if (!pool) in ia_css_rmgr_init_vbuf() 138 if (pool->recycle && pool->size) { in ia_css_rmgr_init_vbuf() 142 pool->size; in ia_css_rmgr_init_vbuf() 143 pool->handles = kvmalloc(bytes_needed, GFP_KERNEL); in ia_css_rmgr_init_vbuf() 144 if (pool->handles) in ia_css_rmgr_init_vbuf() 145 memset(pool->handles, 0, bytes_needed); in ia_css_rmgr_init_vbuf() 150 pool->size = 0; in ia_css_rmgr_init_vbuf() 151 pool->handles = NULL; in ia_css_rmgr_init_vbuf() [all …]
|
/linux-6.12.1/drivers/gpu/drm/panthor/ |
D | panthor_heap.c | 110 static int panthor_get_heap_ctx_offset(struct panthor_heap_pool *pool, int id) in panthor_get_heap_ctx_offset() argument 112 return panthor_heap_ctx_stride(pool->ptdev) * id; in panthor_get_heap_ctx_offset() 115 static void *panthor_get_heap_ctx(struct panthor_heap_pool *pool, int id) in panthor_get_heap_ctx() argument 117 return pool->gpu_contexts->kmap + in panthor_get_heap_ctx() 118 panthor_get_heap_ctx_offset(pool, id); in panthor_get_heap_ctx() 221 panthor_heap_destroy_locked(struct panthor_heap_pool *pool, u32 handle) in panthor_heap_destroy_locked() argument 225 heap = xa_erase(&pool->xa, handle); in panthor_heap_destroy_locked() 229 panthor_free_heap_chunks(pool->vm, heap); in panthor_heap_destroy_locked() 240 int panthor_heap_destroy(struct panthor_heap_pool *pool, u32 handle) in panthor_heap_destroy() argument 244 down_write(&pool->lock); in panthor_heap_destroy() [all …]
|
/linux-6.12.1/drivers/gpu/drm/amd/display/dc/resource/dce80/ |
D | dce80_resource.c | 803 static void dce80_resource_destruct(struct dce110_resource_pool *pool) in dce80_resource_destruct() argument 807 for (i = 0; i < pool->base.pipe_count; i++) { in dce80_resource_destruct() 808 if (pool->base.opps[i] != NULL) in dce80_resource_destruct() 809 dce110_opp_destroy(&pool->base.opps[i]); in dce80_resource_destruct() 811 if (pool->base.transforms[i] != NULL) in dce80_resource_destruct() 812 dce80_transform_destroy(&pool->base.transforms[i]); in dce80_resource_destruct() 814 if (pool->base.ipps[i] != NULL) in dce80_resource_destruct() 815 dce_ipp_destroy(&pool->base.ipps[i]); in dce80_resource_destruct() 817 if (pool->base.mis[i] != NULL) { in dce80_resource_destruct() 818 kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); in dce80_resource_destruct() [all …]
|
/linux-6.12.1/drivers/gpu/drm/amd/display/dc/dce60/ |
D | dce60_resource.c | 797 static void dce60_resource_destruct(struct dce110_resource_pool *pool) in dce60_resource_destruct() argument 801 for (i = 0; i < pool->base.pipe_count; i++) { in dce60_resource_destruct() 802 if (pool->base.opps[i] != NULL) in dce60_resource_destruct() 803 dce110_opp_destroy(&pool->base.opps[i]); in dce60_resource_destruct() 805 if (pool->base.transforms[i] != NULL) in dce60_resource_destruct() 806 dce60_transform_destroy(&pool->base.transforms[i]); in dce60_resource_destruct() 808 if (pool->base.ipps[i] != NULL) in dce60_resource_destruct() 809 dce_ipp_destroy(&pool->base.ipps[i]); in dce60_resource_destruct() 811 if (pool->base.mis[i] != NULL) { in dce60_resource_destruct() 812 kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); in dce60_resource_destruct() [all …]
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/ |
D | irq_affinity.c | 8 static void cpu_put(struct mlx5_irq_pool *pool, int cpu) in cpu_put() argument 10 pool->irqs_per_cpu[cpu]--; in cpu_put() 13 static void cpu_get(struct mlx5_irq_pool *pool, int cpu) in cpu_get() argument 15 pool->irqs_per_cpu[cpu]++; in cpu_get() 19 static int cpu_get_least_loaded(struct mlx5_irq_pool *pool, in cpu_get_least_loaded() argument 27 if (!pool->irqs_per_cpu[cpu]) { in cpu_get_least_loaded() 33 if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu]) in cpu_get_least_loaded() 38 mlx5_core_err(pool->dev, "NO online CPUs in req_mask (%*pbl)\n", in cpu_get_least_loaded() 42 pool->irqs_per_cpu[best_cpu]++; in cpu_get_least_loaded() 48 irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc) in irq_pool_request_irq() argument [all …]
|
/linux-6.12.1/arch/arm64/kvm/hyp/nvhe/ |
D | page_alloc.c | 33 static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool, in __find_buddy_nocheck() argument 45 if (addr < pool->range_start || addr >= pool->range_end) in __find_buddy_nocheck() 52 static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool, in __find_buddy_avail() argument 56 struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order); in __find_buddy_avail() 93 static void __hyp_attach_page(struct hyp_pool *pool, in __hyp_attach_page() argument 103 if (phys < pool->range_start || phys >= pool->range_end) in __hyp_attach_page() 113 for (; (order + 1) <= pool->max_order; order++) { in __hyp_attach_page() 114 buddy = __find_buddy_avail(pool, p, order); in __hyp_attach_page() 127 page_add_to_list(p, &pool->free_area[order]); in __hyp_attach_page() 130 static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool, in __hyp_extract_page() argument [all …]
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
D | crypto.c | 19 #define MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) MLX5_CRYPTO_DEK_CALC_FREED(pool) argument 288 mlx5_crypto_dek_bulk_create(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_bulk_create() argument 290 struct mlx5_crypto_dek_priv *dek_priv = pool->mdev->mlx5e_res.dek_priv; in mlx5_crypto_dek_bulk_create() 291 struct mlx5_core_dev *mdev = pool->mdev; in mlx5_crypto_dek_bulk_create() 313 err = mlx5_crypto_create_dek_bulk(mdev, pool->key_purpose, in mlx5_crypto_dek_bulk_create() 334 mlx5_crypto_dek_pool_add_bulk(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_pool_add_bulk() argument 338 bulk = mlx5_crypto_dek_bulk_create(pool); in mlx5_crypto_dek_pool_add_bulk() 342 pool->avail_deks += bulk->num_deks; in mlx5_crypto_dek_pool_add_bulk() 343 pool->num_deks += bulk->num_deks; in mlx5_crypto_dek_pool_add_bulk() 344 list_add(&bulk->entry, &pool->partial_list); in mlx5_crypto_dek_pool_add_bulk() [all …]
|
/linux-6.12.1/net/rds/ |
D | ib_rdma.c | 198 struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool) in rds_ib_reuse_mr() argument 204 spin_lock_irqsave(&pool->clean_lock, flags); in rds_ib_reuse_mr() 205 ret = llist_del_first(&pool->clean_list); in rds_ib_reuse_mr() 206 spin_unlock_irqrestore(&pool->clean_lock, flags); in rds_ib_reuse_mr() 209 if (pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_reuse_mr() 275 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_teardown_mr() local 277 atomic_sub(pinned, &pool->free_pinned); in rds_ib_teardown_mr() 281 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) in rds_ib_flush_goal() argument 285 item_count = atomic_read(&pool->item_count); in rds_ib_flush_goal() 342 int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, in rds_ib_flush_mr_pool() argument [all …]
|
/linux-6.12.1/include/net/page_pool/ |
D | helpers.h | 67 bool page_pool_get_stats(const struct page_pool *pool, 92 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) in page_pool_dev_alloc_pages() argument 96 return page_pool_alloc_pages(pool, gfp); in page_pool_dev_alloc_pages() 110 static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, in page_pool_dev_alloc_frag() argument 116 return page_pool_alloc_frag(pool, offset, size, gfp); in page_pool_dev_alloc_frag() 119 static inline struct page *page_pool_alloc(struct page_pool *pool, in page_pool_alloc() argument 123 unsigned int max_size = PAGE_SIZE << pool->p.order; in page_pool_alloc() 129 return page_pool_alloc_pages(pool, gfp); in page_pool_alloc() 132 page = page_pool_alloc_frag(pool, offset, *size, gfp); in page_pool_alloc() 140 if (pool->frag_offset + *size > max_size) { in page_pool_alloc() [all …]
|