/linux-6.12.1/drivers/net/ethernet/marvell/octeontx2/nic/ |
D | qos_sq.c | 19 static void otx2_qos_aura_pool_free(struct otx2_nic *pfvf, int pool_id) in otx2_qos_aura_pool_free() argument 26 pool = &pfvf->qset.pool[pool_id]; in otx2_qos_aura_pool_free() 36 int pool_id, stack_pages, num_sqbs; in otx2_qos_sq_aura_pool_init() local 56 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); in otx2_qos_sq_aura_pool_init() 57 pool = &pfvf->qset.pool[pool_id]; in otx2_qos_sq_aura_pool_init() 60 err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs); in otx2_qos_sq_aura_pool_init() 65 err = otx2_pool_init(pfvf, pool_id, stack_pages, in otx2_qos_sq_aura_pool_init() 88 pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr); in otx2_qos_sq_aura_pool_init() 104 otx2_aura_allocptr(pfvf, pool_id); in otx2_qos_sq_aura_pool_init() 238 int pool_id, sq_idx, err; in otx2_qos_enable_sq() local [all …]
|
D | otx2_common.c | 988 int err, pool_id, non_xdp_queues; in otx2_cq_init() local 1030 pool_id = ((cq->cq_type == CQ_RX) && in otx2_cq_init() 1032 cq->rbpool = &qset->pool[pool_id]; in otx2_cq_init() 1233 int pool_id, pool_start = 0, pool_end = 0, size = 0; in otx2_free_aura_ptr() local 1249 for (pool_id = pool_start; pool_id < pool_end; pool_id++) { in otx2_free_aura_ptr() 1250 iova = otx2_aura_allocptr(pfvf, pool_id); in otx2_free_aura_ptr() 1251 pool = &pfvf->qset.pool[pool_id]; in otx2_free_aura_ptr() 1258 iova = otx2_aura_allocptr(pfvf, pool_id); in otx2_free_aura_ptr() 1266 int pool_id; in otx2_aura_pool_free() local 1271 for (pool_id = 0; pool_id < pfvf->hw.pool_cnt; pool_id++) { in otx2_aura_pool_free() [all …]
|
D | otx2_txrx.c | 1221 u16 pool_id; in otx2_cleanup_rx_cqes() local 1230 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx); in otx2_cleanup_rx_cqes() 1231 pool = &pfvf->qset.pool[pool_id]; in otx2_cleanup_rx_cqes()
|
D | otx2_common.h | 995 int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, 998 int pool_id, int numptrs);
|
D | otx2_pf.c | 1613 int pool_id; in otx2_free_hw_resources() local 1654 pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx); in otx2_free_hw_resources() 1655 pool = &pf->qset.pool[pool_id]; in otx2_free_hw_resources()
|
/linux-6.12.1/drivers/net/ethernet/marvell/ |
D | mvneta_bm.c | 40 static void mvneta_bm_pool_enable(struct mvneta_bm *priv, int pool_id) in mvneta_bm_pool_enable() argument 44 val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id)); in mvneta_bm_pool_enable() 46 mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val); in mvneta_bm_pool_enable() 52 static void mvneta_bm_pool_disable(struct mvneta_bm *priv, int pool_id) in mvneta_bm_pool_disable() argument 56 val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id)); in mvneta_bm_pool_disable() 58 mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val); in mvneta_bm_pool_disable() 79 static void mvneta_bm_pool_target_set(struct mvneta_bm *priv, int pool_id, in mvneta_bm_pool_target_set() argument 84 val = mvneta_bm_read(priv, MVNETA_BM_XBAR_POOL_REG(pool_id)); in mvneta_bm_pool_target_set() 85 val &= ~MVNETA_BM_TARGET_ID_MASK(pool_id); in mvneta_bm_pool_target_set() 86 val &= ~MVNETA_BM_XBAR_ATTR_MASK(pool_id); in mvneta_bm_pool_target_set() [all …]
|
D | mvneta_bm.h | 144 struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, 175 u8 pool_id, in mvneta_bm_pool_use() argument
|
D | mvneta.c | 1067 u8 pool_id) in mvneta_bm_pool_bufsize_set() argument 1078 val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id)); in mvneta_bm_pool_bufsize_set() 1080 mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val); in mvneta_bm_pool_bufsize_set() 1999 u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); in mvneta_rxq_drop_pkts() local 2002 bm_pool = &pp->bm_priv->bm_pools[pool_id]; in mvneta_rxq_drop_pkts() 2564 u8 pool_id; in mvneta_rx_hwbm() local 2571 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); in mvneta_rx_hwbm() 2572 bm_pool = &pp->bm_priv->bm_pools[pool_id]; in mvneta_rx_hwbm()
|
/linux-6.12.1/fs/ceph/ |
D | util.c | 37 fl->pool_id = le32_to_cpu(legacy->fl_pg_pool); in ceph_file_layout_from_legacy() 38 if (fl->pool_id == 0 && fl->stripe_unit == 0 && in ceph_file_layout_from_legacy() 40 fl->pool_id = -1; in ceph_file_layout_from_legacy() 49 if (fl->pool_id >= 0) in ceph_file_layout_to_legacy() 50 legacy->fl_pg_pool = cpu_to_le32(fl->pool_id); in ceph_file_layout_to_legacy()
|
D | ioctl.c | 29 l.data_pool = ci->i_layout.pool_id; in ceph_ioctl_get_layout() 99 nl.data_pool = ci->i_layout.pool_id; in ceph_ioctl_set_layout() 212 oloc.pool = ci->i_layout.pool_id; in ceph_ioctl_get_dataloc()
|
D | xattr.c | 53 fl->object_size > 0 || fl->pool_id >= 0 || in ceph_vxattrcb_layout_exists() 64 s64 pool = ci->i_layout.pool_id; in ceph_vxattrcb_layout() 167 s64 pool = ci->i_layout.pool_id; in ceph_vxattrcb_layout_pool()
|
D | addr.c | 2210 pool = ci->i_layout.pool_id; in ceph_pool_perm_check() 2238 if (pool == ci->i_layout.pool_id && in ceph_pool_perm_check() 2242 pool = ci->i_layout.pool_id; in ceph_pool_perm_check()
|
/linux-6.12.1/drivers/net/ethernet/apm/xgene/ |
D | xgene_enet_cle.c | 701 u16 pool_id; in xgene_cle_set_rss_idt() local 705 pool_id = pdata->rx_ring[idx]->buf_pool->id; in xgene_cle_set_rss_idt() 706 fpsel = xgene_enet_get_fpsel(pool_id); in xgene_cle_set_rss_idt() 710 pool_id = pdata->rx_ring[idx]->page_pool->id; in xgene_cle_set_rss_idt() 711 nfpsel = xgene_enet_get_fpsel(pool_id); in xgene_cle_set_rss_idt() 764 u32 def_qid, def_fpsel, def_nxtfpsel, pool_id; in xgene_enet_cle_init() local 783 pool_id = pdata->rx_ring[0]->buf_pool->id; in xgene_enet_cle_init() 784 def_fpsel = xgene_enet_get_fpsel(pool_id); in xgene_enet_cle_init() 787 pool_id = pdata->rx_ring[0]->page_pool->id; in xgene_enet_cle_init() 788 def_nxtfpsel = xgene_enet_get_fpsel(pool_id); in xgene_enet_cle_init()
|
/linux-6.12.1/drivers/net/wireless/ath/ath12k/ |
D | dp.c | 1156 u32 pool_id, tx_spt_page; in ath12k_dp_cc_cleanup() local 1218 for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) { in ath12k_dp_cc_cleanup() 1219 spin_lock_bh(&dp->tx_desc_lock[pool_id]); in ath12k_dp_cc_cleanup() 1222 tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL; in ath12k_dp_cc_cleanup() 1230 spin_unlock_bh(&dp->tx_desc_lock[pool_id]); in ath12k_dp_cc_cleanup() 1401 u32 i, j, pool_id, tx_spt_page; in ath12k_dp_cc_desc_init() local 1433 for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) { in ath12k_dp_cc_desc_init() 1434 spin_lock_bh(&dp->tx_desc_lock[pool_id]); in ath12k_dp_cc_desc_init() 1440 spin_unlock_bh(&dp->tx_desc_lock[pool_id]); in ath12k_dp_cc_desc_init() 1445 tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL; in ath12k_dp_cc_desc_init() [all …]
|
D | dp_tx.c | 81 u8 pool_id) in ath12k_dp_tx_release_txbuf() argument 83 spin_lock_bh(&dp->tx_desc_lock[pool_id]); in ath12k_dp_tx_release_txbuf() 84 list_move_tail(&tx_desc->list, &dp->tx_desc_free_list[pool_id]); in ath12k_dp_tx_release_txbuf() 85 spin_unlock_bh(&dp->tx_desc_lock[pool_id]); in ath12k_dp_tx_release_txbuf() 89 u8 pool_id) in ath12k_dp_tx_assign_buffer() argument 93 spin_lock_bh(&dp->tx_desc_lock[pool_id]); in ath12k_dp_tx_assign_buffer() 94 desc = list_first_entry_or_null(&dp->tx_desc_free_list[pool_id], in ath12k_dp_tx_assign_buffer() 98 spin_unlock_bh(&dp->tx_desc_lock[pool_id]); in ath12k_dp_tx_assign_buffer() 103 list_move_tail(&desc->list, &dp->tx_desc_used_list[pool_id]); in ath12k_dp_tx_assign_buffer() 104 spin_unlock_bh(&dp->tx_desc_lock[pool_id]); in ath12k_dp_tx_assign_buffer() [all …]
|
/linux-6.12.1/drivers/block/ |
D | rbd.c | 188 u64 pool_id; member 992 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ? in rbd_init_layout() 993 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id; in rbd_init_layout() 1450 req->r_base_oloc.pool = rbd_dev->layout.pool_id; in __rbd_obj_add_osd_request() 5106 (unsigned long long) rbd_dev->spec->pool_id); in rbd_pool_id_show() 5182 spec->pool_id, spec->pool_name, in rbd_parent_show() 5219 static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL); 5288 spec->pool_id = CEPH_NOPOOL; in rbd_spec_alloc() 5357 rbd_dev->header_oloc.pool = spec->pool_id; in __rbd_dev_create() 5588 u64 pool_id; member [all …]
|
/linux-6.12.1/include/xen/interface/ |
D | xen.h | 744 int32_t pool_id; member
|
/linux-6.12.1/tools/workqueue/ |
D | wq_dump.py | 185 pool_id = per_cpu_ptr(wq.cpu_pwq, cpu)[0].pool.id.value_() variable
|
/linux-6.12.1/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_ttm.c | 680 int32_t pool_id; member 1110 gtt->pool_id = KFD_XCP_MEM_ID(adev, abo->xcp_id); in amdgpu_ttm_tt_create() 1112 gtt->pool_id = abo->xcp_id; in amdgpu_ttm_tt_create() 1154 if (adev->mman.ttm_pools && gtt->pool_id >= 0) in amdgpu_ttm_tt_populate() 1155 pool = &adev->mman.ttm_pools[gtt->pool_id]; in amdgpu_ttm_tt_populate() 1199 if (adev->mman.ttm_pools && gtt->pool_id >= 0) in amdgpu_ttm_tt_unpopulate() 1200 pool = &adev->mman.ttm_pools[gtt->pool_id]; in amdgpu_ttm_tt_unpopulate()
|
/linux-6.12.1/net/ipv4/ |
D | tcp_ao.c | 1554 int err, pool_id; in tcp_ao_key_alloc() local 1567 pool_id = tcp_sigpool_alloc_ahash(algo, 60); in tcp_ao_key_alloc() 1568 if (pool_id < 0) in tcp_ao_key_alloc() 1569 return ERR_PTR(pool_id); in tcp_ao_key_alloc() 1571 err = tcp_sigpool_start(pool_id, &hp); in tcp_ao_key_alloc() 1586 key->tcp_sigpool_id = pool_id; in tcp_ao_key_alloc() 1591 tcp_sigpool_release(pool_id); in tcp_ao_key_alloc()
|
/linux-6.12.1/kernel/ |
D | workqueue.c | 397 u32 pool_id; member 800 int pool_id, unsigned long flags) in set_work_pool_and_keep_pending() argument 802 set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) | in set_work_pool_and_keep_pending() 807 int pool_id, unsigned long flags) in set_work_pool_and_clear_pending() argument 816 set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) | in set_work_pool_and_clear_pending() 882 int pool_id; in get_work_pool() local 889 pool_id = data >> WORK_OFFQ_POOL_SHIFT; in get_work_pool() 890 if (pool_id == WORK_OFFQ_POOL_NONE) in get_work_pool() 893 return idr_find(&worker_pool_idr, pool_id); in get_work_pool() 905 offqd->pool_id = shift_and_mask(data, WORK_OFFQ_POOL_SHIFT, in work_offqd_unpack() [all …]
|
/linux-6.12.1/drivers/net/wireless/ath/ath11k/ |
D | dp_tx.c | 94 u8 pool_id; in ath11k_dp_tx() local 108 pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1); in ath11k_dp_tx() 141 FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id); in ath11k_dp_tx()
|
/linux-6.12.1/Documentation/ABI/testing/ |
D | sysfs-bus-rbd | 112 What: /sys/bus/rbd/devices/<dev-id>/pool_id
|
/linux-6.12.1/drivers/s390/block/ |
D | dasd_eckd.h | 430 __u16 pool_id; member
|
/linux-6.12.1/include/linux/ceph/ |
D | ceph_fs.h | 66 s64 pool_id; /* rados pool id */ member
|