/linux-6.12.1/drivers/net/ethernet/cavium/thunder/ |
D | nicvf_queues.c | 32 static int nicvf_poll_reg(struct nicvf *nic, int qidx, in nicvf_poll_reg() argument 43 reg_val = nicvf_queue_reg_read(nic, reg, qidx); in nicvf_poll_reg() 505 struct snd_queue *sq, int q_len, int qidx) in nicvf_init_snd_queue() argument 525 qidx += ((nic->sqs_id + 1) * MAX_SND_QUEUES_PER_QS); in nicvf_init_snd_queue() 526 if (qidx < nic->pnicvf->xdp_tx_queues) { in nicvf_init_snd_queue() 628 struct queue_set *qs, int qidx) in nicvf_reclaim_snd_queue() argument 631 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); in nicvf_reclaim_snd_queue() 633 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) in nicvf_reclaim_snd_queue() 636 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); in nicvf_reclaim_snd_queue() 640 struct queue_set *qs, int qidx) in nicvf_reclaim_rcv_queue() argument [all …]
|
D | nicvf_main.c | 75 static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) in nicvf_netdev_qidx() argument 78 return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS); in nicvf_netdev_qidx() 80 return qidx; in nicvf_netdev_qidx() 104 u64 qidx, u64 val) in nicvf_queue_reg_write() argument 108 writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT)); in nicvf_queue_reg_write() 111 u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx) in nicvf_queue_reg_read() argument 115 return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT)); in nicvf_queue_reg_read() 989 int qidx; in nicvf_handle_qs_err() local 995 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { in nicvf_handle_qs_err() 997 qidx); in nicvf_handle_qs_err() [all …]
|
D | nicvf_ethtool.c | 214 int stats, qidx; in nicvf_get_qset_strings() local 217 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_strings() 219 sprintf(*data, "rxq%d: %s", qidx + start_qidx, in nicvf_get_qset_strings() 225 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_strings() 227 sprintf(*data, "txq%d: %s", qidx + start_qidx, in nicvf_get_qset_strings() 301 int stat, qidx; in nicvf_get_qset_stats() local 306 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_stats() 307 nicvf_update_rq_stats(nic, qidx); in nicvf_get_qset_stats() 309 *((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats) in nicvf_get_qset_stats() 313 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_stats() [all …]
|
D | nicvf_queues.h | 336 int qidx, bool enable); 338 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx); 339 void nicvf_sq_disable(struct nicvf *nic, int qidx); 342 struct snd_queue *sq, int qidx); 363 u64 qidx, u64 val); 365 u64 offset, u64 qidx);
|
/linux-6.12.1/drivers/net/ethernet/marvell/octeontx2/nic/ |
D | qos_sq.c | 33 static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx) in otx2_qos_sq_aura_pool_init() argument 56 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); in otx2_qos_sq_aura_pool_init() 76 sq = &qset->sq[qidx]; in otx2_qos_sq_aura_pool_init() 116 static void otx2_qos_sq_free_sqbs(struct otx2_nic *pfvf, int qidx) in otx2_qos_sq_free_sqbs() argument 124 sq = &qset->sq[qidx]; in otx2_qos_sq_free_sqbs() 140 sq = &qset->sq[qidx]; in otx2_qos_sq_free_sqbs() 151 static void otx2_qos_sqb_flush(struct otx2_nic *pfvf, int qidx) in otx2_qos_sqb_flush() argument 157 incr = (u64)qidx << 32; in otx2_qos_sqb_flush() 165 static int otx2_qos_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id) in otx2_qos_ctx_disable() argument 176 cn10k_sq_aq->qidx = qidx; in otx2_qos_ctx_disable() [all …]
|
D | otx2_common.c | 20 struct otx2_nic *pfvf, int qidx) in otx2_nix_rq_op_stats() argument 22 u64 incr = (u64)qidx << 32; in otx2_nix_rq_op_stats() 33 struct otx2_nic *pfvf, int qidx) in otx2_nix_sq_op_stats() argument 35 u64 incr = (u64)qidx << 32; in otx2_nix_sq_op_stats() 76 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx) in otx2_update_rq_stats() argument 78 struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx]; in otx2_update_rq_stats() 83 otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx); in otx2_update_rq_stats() 87 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx) in otx2_update_sq_stats() argument 89 struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx]; in otx2_update_sq_stats() 94 if (qidx >= pfvf->hw.non_qos_queues) { in otx2_update_sq_stats() [all …]
|
D | cn10k.c | 75 int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) in cn10k_sq_aq_init() argument 85 aq->sq.cq = pfvf->hw.rx_queues + qidx; in cn10k_sq_aq_init() 89 aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); in cn10k_sq_aq_init() 102 aq->qidx = qidx; in cn10k_sq_aq_init() 138 void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) in cn10k_sqe_flush() argument 321 aq->qidx = rq_idx; in cn10k_map_unmap_rq_policer() 345 int qidx, rc; in cn10k_free_matchall_ipolicer() local 350 for (qidx = 0; qidx < hw->rx_queues; qidx++) in cn10k_free_matchall_ipolicer() 351 cn10k_map_unmap_rq_policer(pfvf, qidx, in cn10k_free_matchall_ipolicer() 457 aq->qidx = profile; in cn10k_set_ipolicer_rate() [all …]
|
D | otx2_txrx.c | 196 int qidx) in otx2_skb_add_frag() argument 227 pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL); in otx2_skb_add_frag() 255 int qidx) in otx2_free_rcv_seg() argument 268 pfvf->hw_ops->aura_freeptr(pfvf, qidx, in otx2_free_rcv_seg() 275 struct nix_cqe_rx_s *cqe, int qidx) in otx2_check_rcv_errors() argument 283 qidx, parse->errlev, parse->errcode); in otx2_check_rcv_errors() 334 otx2_free_rcv_seg(pfvf, cqe, qidx); in otx2_check_rcv_errors() 453 int tx_pkts = 0, tx_bytes = 0, qidx; in otx2_tx_napi_handler() local 465 qidx = cq->cq_idx - pfvf->hw.rx_queues; in otx2_tx_napi_handler() 466 sq = &pfvf->qset.sq[qidx]; in otx2_tx_napi_handler() [all …]
|
D | otx2_pf.c | 1271 u64 qidx = 0; in otx2_q_intr_handler() local 1274 for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) { in otx2_q_intr_handler() 1276 val = otx2_atomic64_add((qidx << 44), ptr); in otx2_q_intr_handler() 1278 otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) | in otx2_q_intr_handler() 1286 qidx, otx2_read64(pf, NIX_LF_ERR_INT)); in otx2_q_intr_handler() 1290 qidx); in otx2_q_intr_handler() 1294 qidx); in otx2_q_intr_handler() 1301 for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) { in otx2_q_intr_handler() 1305 sq = &pf->qset.sq[qidx]; in otx2_q_intr_handler() 1315 val = otx2_atomic64_add((qidx << 44), ptr); in otx2_q_intr_handler() [all …]
|
D | otx2_dcbnl.c | 161 cn10k_sq_aq->qidx = prio; in otx2_pfc_update_sq_smq_mapping() 176 sq_aq->qidx = prio; in otx2_pfc_update_sq_smq_mapping() 330 void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, in otx2_update_bpid_in_rqctx() argument 338 if (pfvf->queue_to_pfc_map[qidx] && pfc_enable) { in otx2_update_bpid_in_rqctx() 341 pfvf->queue_to_pfc_map[qidx], qidx); in otx2_update_bpid_in_rqctx() 350 pfvf->queue_to_pfc_map[qidx] = vlan_prio; in otx2_update_bpid_in_rqctx() 362 aq->qidx = qidx; in otx2_update_bpid_in_rqctx() 377 npa_aq->aura_id = qidx; in otx2_update_bpid_in_rqctx() 391 qidx, err); in otx2_update_bpid_in_rqctx()
|
D | otx2_ethtool.c | 86 int qidx, stats; in otx2_get_qset_strings() local 88 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { in otx2_get_qset_strings() 90 sprintf(*data, "rxq%d: %s", qidx + start_qidx, in otx2_get_qset_strings() 96 for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { in otx2_get_qset_strings() 98 if (qidx >= pfvf->hw.non_qos_queues) in otx2_get_qset_strings() 100 qidx + start_qidx - pfvf->hw.non_qos_queues, in otx2_get_qset_strings() 103 sprintf(*data, "txq%d: %s", qidx + start_qidx, in otx2_get_qset_strings() 153 int stat, qidx; in otx2_get_qset_stats() local 157 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { in otx2_get_qset_stats() 158 if (!otx2_update_rq_stats(pfvf, qidx)) { in otx2_get_qset_stats() [all …]
|
D | qos.h | 27 void otx2_qos_free_qid(struct otx2_nic *pfvf, int qidx); 28 int otx2_qos_enable_sq(struct otx2_nic *pfvf, int qidx); 29 void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx);
|
D | otx2_common.h | 370 int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura); 372 int size, int qidx); 913 static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx) in otx2_get_smq_idx() argument 917 if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx]) in otx2_get_smq_idx() 918 return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx]; in otx2_get_smq_idx() 921 if (qidx >= pfvf->hw.non_qos_queues) in otx2_get_smq_idx() 922 smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues]; in otx2_get_smq_idx() 961 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx); 988 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx); 990 int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura); [all …]
|
D | otx2_txrx.h | 171 struct sk_buff *skb, u16 qidx); 173 int size, int qidx); 175 int size, int qidx);
|
D | cn10k.h | 28 void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); 29 int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
|
/linux-6.12.1/drivers/net/ethernet/fungible/funeth/ |
D | funeth_trace.h | 23 __field(u32, qidx) 31 __entry->qidx = txq->qidx; 39 __get_str(devname), __entry->qidx, __entry->sqe_idx, 53 __field(u32, qidx) 61 __entry->qidx = txq->qidx; 69 __get_str(devname), __entry->qidx, __entry->sqe_idx, 84 __field(u32, qidx) 94 __entry->qidx = rxq->qidx; 104 __get_str(devname), __entry->qidx, __entry->cq_head,
|
D | funeth_tx.c | 624 unsigned int qidx, in fun_txq_create_sw() argument 635 numa_node = cpu_to_node(qidx); /* XDP Tx queue */ in fun_txq_create_sw() 651 q->qidx = qidx; in fun_txq_create_sw() 661 irq ? "Tx" : "XDP", qidx); in fun_txq_create_sw() 709 q->ndq = netdev_get_tx_queue(q->netdev, q->qidx); in fun_txq_create_dev() 718 irq ? "Tx" : "XDP", q->qidx, ndesc, q->hw_qid, irq_idx, in fun_txq_create_dev() 727 irq ? "Tx" : "XDP", q->qidx, err); in fun_txq_create_dev() 740 q->irq ? "Tx" : "XDP", q->qidx, q->hw_qid, in fun_txq_free_dev() 759 int funeth_txq_create(struct net_device *dev, unsigned int qidx, in funeth_txq_create() argument 767 q = fun_txq_create_sw(dev, qidx, ndesc, irq); in funeth_txq_create()
|
D | funeth_rx.c | 432 skb_record_rx_queue(skb, q->qidx); in fun_handle_cqe_pkt() 614 unsigned int qidx, in fun_rxq_create_sw() argument 629 q->qidx = qidx; in fun_rxq_create_sw() 673 netdev_err(dev, "Unable to allocate memory for Rx queue %u\n", qidx); in fun_rxq_create_sw() 704 err = xdp_rxq_info_reg(&q->xdp_rxq, q->netdev, q->qidx, in fun_rxq_create_dev() 750 q->qidx, ncqe, nrqe, q->hw_cqid, q->hw_sqid, irq->irq_idx, in fun_rxq_create_dev() 761 q->qidx, err); in fun_rxq_create_dev() 776 q->qidx, q->hw_cqid, q->hw_sqid, irq->irq_idx); in fun_rxq_free_dev() 788 int funeth_rxq_create(struct net_device *dev, unsigned int qidx, in funeth_rxq_create() argument 796 q = fun_rxq_create_sw(dev, qidx, ncqe, nrqe, irq); in funeth_rxq_create()
|
D | funeth_txrx.h | 117 u16 qidx; /* queue index within net_device */ member 173 u16 qidx; /* queue index within net_device */ member 254 int funeth_txq_create(struct net_device *dev, unsigned int qidx, 259 int funeth_rxq_create(struct net_device *dev, unsigned int qidx,
|
/linux-6.12.1/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/ |
D | chcr_ipsec.c | 422 u32 qidx; in copy_esn_pktxt() local 430 qidx = skb->queue_mapping; in copy_esn_pktxt() 431 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in copy_esn_pktxt() 472 u32 ctrl0, qidx; in copy_cpltx_pktxt() local 478 qidx = skb->queue_mapping; in copy_cpltx_pktxt() 479 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in copy_cpltx_pktxt() 517 unsigned int qidx; in copy_key_cpltx_pktxt() local 521 qidx = skb->queue_mapping; in copy_key_cpltx_pktxt() 522 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in copy_key_cpltx_pktxt() 577 int qidx = skb_get_queue_mapping(skb); in ch_ipsec_crypto_wreq() local [all …]
|
/linux-6.12.1/drivers/dma/ptdma/ |
D | ptdma-dev.c | 72 u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx]; in pt_core_execute_cmd() 84 cmd_q->qidx = (cmd_q->qidx + 1) % CMD_Q_LEN; in pt_core_execute_cmd() 90 tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); in pt_core_execute_cmd() 136 tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); in pt_do_cmd_complete() 216 cmd_q->qidx = 0; in pt_core_init()
|
/linux-6.12.1/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt_dcb.c | 55 u8 qidx; in bnxt_hwrm_queue_pri2cos_cfg() local 60 qidx = bp->tc_to_qidx[ets->prio_tc[i]]; in bnxt_hwrm_queue_pri2cos_cfg() 61 pri2cos[i] = bp->q_info[qidx].queue_id; in bnxt_hwrm_queue_pri2cos_cfg() 108 u8 qidx = bp->tc_to_qidx[i]; in bnxt_hwrm_queue_cos2bw_cfg() local 112 qidx); in bnxt_hwrm_queue_cos2bw_cfg() 115 cos2bw.queue_id = bp->q_info[qidx].queue_id; in bnxt_hwrm_queue_cos2bw_cfg() 131 if (qidx == 0) { in bnxt_hwrm_queue_cos2bw_cfg() 277 u8 qidx = bp->tc_to_qidx[i]; in bnxt_hwrm_queue_pfc_cfg() local 279 if (!BNXT_LLQ(bp->q_info[qidx].queue_profile)) { in bnxt_hwrm_queue_pfc_cfg()
|
/linux-6.12.1/drivers/scsi/csiostor/ |
D | csio_wr.c | 745 csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx) in csio_wr_cleanup_eq_stpg() argument 747 struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx]; in csio_wr_cleanup_eq_stpg() 762 csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx) in csio_wr_cleanup_iq_ftr() argument 765 struct csio_q *q = wrm->q_arr[qidx]; in csio_wr_cleanup_iq_ftr() 863 csio_wr_get(struct csio_hw *hw, int qidx, uint32_t size, in csio_wr_get() argument 867 struct csio_q *q = wrm->q_arr[qidx]; in csio_wr_get() 878 CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx)); in csio_wr_get() 982 csio_wr_issue(struct csio_hw *hw, int qidx, bool prio) in csio_wr_issue() argument 985 struct csio_q *q = wrm->q_arr[qidx]; in csio_wr_issue() 987 CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx)); in csio_wr_issue() [all …]
|
/linux-6.12.1/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_amdkfd_gfx_v9.c | 1028 int qidx; in kgd_gfx_v9_get_cu_occupancy() local 1057 for (qidx = 0; qidx < max_queue_cnt; qidx++) { in kgd_gfx_v9_get_cu_occupancy() 1061 if (!test_bit(qidx, cp_queue_bitmap)) in kgd_gfx_v9_get_cu_occupancy() 1064 if (!(queue_map & (1 << qidx))) in kgd_gfx_v9_get_cu_occupancy() 1068 get_wave_count(adev, qidx, &cu_occupancy[qidx], in kgd_gfx_v9_get_cu_occupancy()
|
/linux-6.12.1/drivers/scsi/qla2xxx/ |
D | qla_nvme.c | 116 unsigned int qidx, u16 qsize, void **handle) in qla_nvme_alloc_queue() argument 123 if (qidx) in qla_nvme_alloc_queue() 124 qidx--; in qla_nvme_alloc_queue() 131 __func__, handle, qidx, qsize); in qla_nvme_alloc_queue() 133 if (qidx > qla_nvme_fc_transport.max_hw_queues) { in qla_nvme_alloc_queue() 136 __func__, qidx, qla_nvme_fc_transport.max_hw_queues); in qla_nvme_alloc_queue() 144 if (ha->queue_pair_map[qidx]) { in qla_nvme_alloc_queue() 145 *handle = ha->queue_pair_map[qidx]; in qla_nvme_alloc_queue() 148 *handle, qidx); in qla_nvme_alloc_queue()
|