Home
last modified time | relevance | path

Searched refs:pools (Results 1 – 25 of 63) sorted by relevance

123

/linux-6.12.1/tools/testing/selftests/drivers/net/mlxsw/
Dsharedbuffer_configuration.py28 def __init__(self, pools): argument
30 for pool in pools:
187 pools = PoolList()
190 pools.append(Pool(pooldict))
191 return pools
194 def do_check_pools(dlname, pools, vp): argument
195 for pool in pools:
219 def check_pools(dlname, pools): argument
221 record_vp = RecordValuePicker(pools)
224 do_check_pools(dlname, pools, RandomValuePicker(pools))
[all …]
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/steering/
Ddr_arg.c28 struct dr_arg_pool *pools[DR_ARG_CHUNK_SIZE_MAX]; member
201 arg_obj = dr_arg_pool_get_arg_obj(mgr->pools[size]); in mlx5dr_arg_get_obj()
226 dr_arg_pool_put_arg_obj(mgr->pools[arg_obj->log_chunk_size], arg_obj); in mlx5dr_arg_put_obj()
245 pool_mgr->pools[i] = dr_arg_pool_create(dmn, i); in mlx5dr_arg_mgr_create()
246 if (!pool_mgr->pools[i]) in mlx5dr_arg_mgr_create()
254 dr_arg_pool_destroy(pool_mgr->pools[i]); in mlx5dr_arg_mgr_create()
262 struct dr_arg_pool **pools; in mlx5dr_arg_mgr_destroy() local
268 pools = mgr->pools; in mlx5dr_arg_mgr_destroy()
270 dr_arg_pool_destroy(pools[i]); in mlx5dr_arg_mgr_destroy()
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
Dpool.c25 if (!xsk->pools) { in mlx5e_xsk_get_pools()
26 xsk->pools = kcalloc(MLX5E_MAX_NUM_CHANNELS, in mlx5e_xsk_get_pools()
27 sizeof(*xsk->pools), GFP_KERNEL); in mlx5e_xsk_get_pools()
28 if (unlikely(!xsk->pools)) in mlx5e_xsk_get_pools()
41 kfree(xsk->pools); in mlx5e_xsk_put_pools()
42 xsk->pools = NULL; in mlx5e_xsk_put_pools()
54 xsk->pools[ix] = pool; in mlx5e_xsk_add_pool()
60 xsk->pools[ix] = NULL; in mlx5e_xsk_remove_pool()
Dpool.h12 if (!xsk || !xsk->pools) in mlx5e_xsk_get_pool()
18 return xsk->pools[ix]; in mlx5e_xsk_get_pool()
/linux-6.12.1/arch/sparc/kernel/
Diommu-common.c82 spin_lock_init(&(iommu->pools[i].lock)); in iommu_tbl_pool_init()
83 iommu->pools[i].start = start; in iommu_tbl_pool_init()
84 iommu->pools[i].hint = start; in iommu_tbl_pool_init()
86 iommu->pools[i].end = start - 1; in iommu_tbl_pool_init()
131 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc()
161 pool = &(iommu->pools[0]); in iommu_tbl_range_alloc()
193 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc()
237 p = &tbl->pools[pool_nr]; in get_pool()
/linux-6.12.1/tools/net/ynl/samples/
Dpage-pool.c60 struct netdev_page_pool_get_list *pools; in main() local
76 pools = netdev_page_pool_get_dump(ys); in main()
77 if (!pools) in main()
80 ynl_dump_foreach(pools, pp) { in main()
87 netdev_page_pool_get_list_free(pools); in main()
/linux-6.12.1/mm/
Ddmapool.c60 struct list_head pools; member
80 list_for_each_entry(pool, &dev->dma_pools, pools) { in pools_show()
92 static DEVICE_ATTR_RO(pools);
267 INIT_LIST_HEAD(&retval->pools); in dma_pool_create()
280 list_add(&retval->pools, &dev->dma_pools); in dma_pool_create()
288 list_del(&retval->pools); in dma_pool_create()
370 list_del(&pool->pools); in dma_pool_destroy()
/linux-6.12.1/drivers/net/ethernet/chelsio/libcxgb/
Dlibcxgb_ppm.c348 struct cxgbi_ppm_pool __percpu *pools; in ppm_alloc_cpu_pool() local
350 unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3; in ppm_alloc_cpu_pool()
367 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; in ppm_alloc_cpu_pool()
368 pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool)); in ppm_alloc_cpu_pool()
370 if (!pools) in ppm_alloc_cpu_pool()
374 struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu); in ppm_alloc_cpu_pool()
384 return pools; in ppm_alloc_cpu_pool()
/linux-6.12.1/drivers/net/ethernet/freescale/dpaa2/
Ddpaa2-xsk.c162 pools_params->pools[curr_bp].priority_mask |= (1 << j); in dpaa2_xsk_set_bp_per_qdbin()
163 if (!pools_params->pools[curr_bp].priority_mask) in dpaa2_xsk_set_bp_per_qdbin()
166 pools_params->pools[curr_bp].dpbp_id = priv->bp[i]->bpid; in dpaa2_xsk_set_bp_per_qdbin()
167 pools_params->pools[curr_bp].buffer_size = priv->rx_buf_size; in dpaa2_xsk_set_bp_per_qdbin()
168 pools_params->pools[curr_bp++].backup_pool = 0; in dpaa2_xsk_set_bp_per_qdbin()
/linux-6.12.1/drivers/soc/ti/
Dknav_qmss.h203 struct list_head pools; member
304 struct list_head pools; member
363 list_for_each_entry(pool, &kdev->pools, list)
Dknav_qmss_queue.c820 node = &region->pools; in knav_pool_create()
821 list_for_each_entry(iter, &region->pools, region_inst) { in knav_pool_create()
835 list_add_tail(&pool->list, &kdev->pools); in knav_pool_create()
1037 list_add(&pool->region_inst, &region->pools); in knav_queue_setup_region()
1122 INIT_LIST_HEAD(&region->pools); in knav_queue_setup_regions()
1364 list_for_each_entry_safe(pool, tmp, &region->pools, region_inst) in knav_queue_free_regions()
1811 INIT_LIST_HEAD(&kdev->pools); in knav_queue_probe()
/linux-6.12.1/Documentation/devicetree/bindings/soc/ti/
Dkeystone-navigator-qmss.txt6 processors(PDSP), linking RAM, descriptor pools and infrastructure
49 - queue-pools : child node classifying the queue ranges into pools.
50 Queue ranges are grouped into 3 type of pools:
151 queue-pools {
/linux-6.12.1/Documentation/core-api/
Dworkqueue.rst60 * Use per-CPU unified worker pools shared by all wq to provide
85 worker-pools.
89 which manages worker-pools and processes the queued work items.
91 There are two worker-pools, one for normal work items and the other
93 worker-pools to serve work items queued on unbound workqueues - the
94 number of these backing pools is dynamic.
140 For unbound workqueues, the number of backing pools is dynamic.
143 backing worker pools matching the attributes. The responsibility of
188 worker-pools which host workers which are not bound to any
191 worker-pools try to start execution of work items as soon as
[all …]
Dswiotlb.rst142 as one or more "pools". The default pool is allocated during system boot with a
146 CONFIG_SWIOTLB_DYNAMIC is enabled, additional pools may be allocated later in
224 New pools added via dynamic swiotlb are linked together in a linear list.
227 large number of dynamic pools. The data structures could be improved for
232 not wasted, with dynamic pools making more space available if needed (as long
239 which includes the default memory pool and any dynamic or transient pools
307 Restricted pools
309 The swiotlb machinery is also used for "restricted pools", which are pools of
311 use by a particular device. Restricted pools provide a level of DMA memory
318 Restricted pools add swiotlb_alloc() and swiotlb_free() APIs, which are called
Dmm-api.rst88 Memory pools
94 DMA pools
/linux-6.12.1/drivers/md/
Ddm-table.c1051 struct dm_md_mempools *pools; in dm_table_alloc_md_mempools() local
1060 pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); in dm_table_alloc_md_mempools()
1061 if (!pools) in dm_table_alloc_md_mempools()
1087 if (bioset_init(&pools->io_bs, pool_size, io_front_pad, bioset_flags)) in dm_table_alloc_md_mempools()
1090 bioset_integrity_create(&pools->io_bs, pool_size)) in dm_table_alloc_md_mempools()
1093 if (bioset_init(&pools->bs, pool_size, front_pad, 0)) in dm_table_alloc_md_mempools()
1096 bioset_integrity_create(&pools->bs, pool_size)) in dm_table_alloc_md_mempools()
1099 t->mempools = pools; in dm_table_alloc_md_mempools()
1103 dm_free_md_mempools(pools); in dm_table_alloc_md_mempools()
/linux-6.12.1/arch/sparc/include/asm/
Diommu-common.h26 struct iommu_pool pools[IOMMU_NR_POOLS]; member
/linux-6.12.1/Documentation/networking/
Dpage_pool.rst46 Information about page pools on the system can be accessed via the netdev
51 The number of pools created **must** match the number of hardware queues
106 with fragmented page pools.
/linux-6.12.1/drivers/net/ethernet/wangxun/libwx/
Dwx_hw.c604 static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools, in wx_set_rar() argument
620 wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); in wx_set_rar()
622 wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32); in wx_set_rar()
797 wx->mac_table[i].pools, in wx_sync_mac_table()
811 wx->mac_table[0].pools = 1ULL; in wx_mac_set_default_filter()
814 wx->mac_table[0].pools, in wx_mac_set_default_filter()
830 wx->mac_table[i].pools = 0; in wx_flush_sw_mac_table()
846 if (wx->mac_table[i].pools != (1ULL << pool)) { in wx_add_mac_filter()
848 wx->mac_table[i].pools |= (1ULL << pool); in wx_add_mac_filter()
860 wx->mac_table[i].pools |= (1ULL << pool); in wx_add_mac_filter()
[all …]
/linux-6.12.1/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/
Dethernet-driver.rst26 - buffer pools
69 DPBPs represent hardware buffer pools. Packet I/O is performed in the context
124 The role of hardware buffer pools is storage of ingress frame data. Each network
/linux-6.12.1/Documentation/arch/arm/keystone/
Dknav-qmss.rst12 processors(PDSP), linking RAM, descriptor pools and infrastructure
25 allocate descriptor pools, map the descriptors, push/pop to queues etc. For
/linux-6.12.1/kernel/dma/
Dswiotlb.c91 .pools = LIST_HEAD_INIT(io_tlb_default_mem.pools),
310 list_add_rcu(&pool->node, &mem->pools); in add_mem_pool()
783 list_for_each_entry_rcu(pool, &mem->pools, node) { in __swiotlb_find_pool()
1151 list_for_each_entry_rcu(pool, &mem->pools, node) { in swiotlb_search_area()
1332 list_for_each_entry_rcu(pool, &mem->pools, node) in mem_used()
1841 INIT_LIST_HEAD_RCU(&mem->pools); in rmem_swiotlb_device_init()
/linux-6.12.1/arch/powerpc/kernel/
Diommu.c253 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc()
281 pool = &(tbl->pools[0]); in iommu_range_alloc()
303 pool = &tbl->pools[pool_nr]; in iommu_range_alloc()
430 p = &tbl->pools[pool_nr]; in get_pool()
756 p = &tbl->pools[i]; in iommu_init_table()
/linux-6.12.1/drivers/soc/fsl/qbman/
Dqman_priv.h177 u32 pools; member
/linux-6.12.1/arch/arm/boot/dts/ti/keystone/
Dkeystone-k2g-netcp.dtsi36 queue-pools {

123