/linux-6.12.1/drivers/net/ethernet/marvell/octeon_ep_vf/ |
D | octep_vf_cnxk.c | 17 static void cnxk_vf_dump_q_regs(struct octep_vf_device *oct, int qno) in cnxk_vf_dump_q_regs() argument 21 dev_info(dev, "IQ-%d register dump\n", qno); in cnxk_vf_dump_q_regs() 23 qno, CNXK_VF_SDP_R_IN_INSTR_DBELL(qno), in cnxk_vf_dump_q_regs() 24 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(qno))); in cnxk_vf_dump_q_regs() 26 qno, CNXK_VF_SDP_R_IN_CONTROL(qno), in cnxk_vf_dump_q_regs() 27 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(qno))); in cnxk_vf_dump_q_regs() 29 qno, CNXK_VF_SDP_R_IN_ENABLE(qno), in cnxk_vf_dump_q_regs() 30 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(qno))); in cnxk_vf_dump_q_regs() 32 qno, CNXK_VF_SDP_R_IN_INSTR_BADDR(qno), in cnxk_vf_dump_q_regs() 33 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(qno))); in cnxk_vf_dump_q_regs() [all …]
|
D | octep_vf_cn9k.c | 17 static void cn93_vf_dump_q_regs(struct octep_vf_device *oct, int qno) in cn93_vf_dump_q_regs() argument 21 dev_info(dev, "IQ-%d register dump\n", qno); in cn93_vf_dump_q_regs() 23 qno, CN93_VF_SDP_R_IN_INSTR_DBELL(qno), in cn93_vf_dump_q_regs() 24 octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(qno))); in cn93_vf_dump_q_regs() 26 qno, CN93_VF_SDP_R_IN_CONTROL(qno), in cn93_vf_dump_q_regs() 27 octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(qno))); in cn93_vf_dump_q_regs() 29 qno, CN93_VF_SDP_R_IN_ENABLE(qno), in cn93_vf_dump_q_regs() 30 octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(qno))); in cn93_vf_dump_q_regs() 32 qno, CN93_VF_SDP_R_IN_INSTR_BADDR(qno), in cn93_vf_dump_q_regs() 33 octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(qno))); in cn93_vf_dump_q_regs() [all …]
|
/linux-6.12.1/drivers/net/wwan/t7xx/ |
D | t7xx_cldma.c | 63 void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, in t7xx_cldma_hw_start_queue() argument 71 val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); in t7xx_cldma_hw_start_queue() 105 bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno) in t7xx_cldma_tx_addr_is_set() argument 107 u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE; in t7xx_cldma_tx_addr_is_set() 112 void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address, in t7xx_cldma_hw_set_start_addr() argument 115 u32 offset = qno * ADDR_SIZE; in t7xx_cldma_hw_set_start_addr() 123 void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, in t7xx_cldma_hw_resume_queue() argument 129 iowrite32(BIT(qno), base + REG_CLDMA_DL_RESUME_CMD); in t7xx_cldma_hw_resume_queue() 131 iowrite32(BIT(qno), base + REG_CLDMA_UL_RESUME_CMD); in t7xx_cldma_hw_resume_queue() 134 unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno, in t7xx_cldma_hw_queue_status() argument [all …]
|
D | t7xx_cldma.h | 153 void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, 155 void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, 157 void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, 159 void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx); 160 unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno, 163 void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, 166 void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, 172 unsigned int qno, u64 address, enum mtk_txrx tx_rx); 179 bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno);
|
D | t7xx_hif_cldma.c | 875 static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno, in t7xx_cldma_hw_start_send() argument 881 if (!t7xx_cldma_tx_addr_is_set(hw_info, qno)) { in t7xx_cldma_hw_start_send() 883 t7xx_cldma_hw_set_start_addr(hw_info, qno, prev_req->gpd_addr, MTK_TX); in t7xx_cldma_hw_start_send() 884 md_ctrl->txq_started &= ~BIT(qno); in t7xx_cldma_hw_start_send() 887 if (!t7xx_cldma_hw_queue_status(hw_info, qno, MTK_TX)) { in t7xx_cldma_hw_start_send() 888 if (md_ctrl->txq_started & BIT(qno)) in t7xx_cldma_hw_start_send() 889 t7xx_cldma_hw_resume_queue(hw_info, qno, MTK_TX); in t7xx_cldma_hw_start_send() 891 t7xx_cldma_hw_start_queue(hw_info, qno, MTK_TX); in t7xx_cldma_hw_start_send() 893 md_ctrl->txq_started |= BIT(qno); in t7xx_cldma_hw_start_send() 921 int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb) in t7xx_cldma_send_skb() argument [all …]
|
D | t7xx_dpmaif.c | 143 static int t7xx_mask_dlq_intr(struct dpmaif_hw_info *hw_info, unsigned int qno) in t7xx_mask_dlq_intr() argument 148 q_done = qno == DPF_RX_QNO0 ? DPMAIF_DL_INT_DLQ0_QDONE : DPMAIF_DL_INT_DLQ1_QDONE; in t7xx_mask_dlq_intr() 164 void t7xx_dpmaif_dlq_unmask_rx_done(struct dpmaif_hw_info *hw_info, unsigned int qno) in t7xx_dpmaif_dlq_unmask_rx_done() argument 168 mask = qno == DPF_RX_QNO0 ? DPMAIF_DL_INT_DLQ0_QDONE : DPMAIF_DL_INT_DLQ1_QDONE; in t7xx_dpmaif_dlq_unmask_rx_done() 182 unsigned int qno) in t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr() argument 184 if (qno == DPF_RX_QNO0) in t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr() 193 unsigned int qno) in t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr() argument 195 if (qno == DPF_RX_QNO0) in t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr() 265 struct dpmaif_hw_intr_st_para *para, int qno) in t7xx_dpmaif_hw_check_rx_intr() argument 267 if (qno == DPF_RX_QNO_DFT) { in t7xx_dpmaif_hw_check_rx_intr() [all …]
|
D | t7xx_dpmaif.h | 155 struct dpmaif_hw_intr_st_para *para, int qno); 164 unsigned int qno); 165 void t7xx_dpmaif_dlq_unmask_rx_done(struct dpmaif_hw_info *hw_info, unsigned int qno); 166 bool t7xx_dpmaif_ul_clr_done(struct dpmaif_hw_info *hw_info, unsigned int qno);
|
D | t7xx_netdev.c | 442 static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno) in t7xx_ccmni_queue_tx_irq_notify() argument 448 net_queue = netdev_get_tx_queue(ccmni->dev, qno); in t7xx_ccmni_queue_tx_irq_notify() 454 static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno) in t7xx_ccmni_queue_tx_full_notify() argument 460 netdev_err(ccmni->dev, "TX queue %d is full\n", qno); in t7xx_ccmni_queue_tx_full_notify() 461 net_queue = netdev_get_tx_queue(ccmni->dev, qno); in t7xx_ccmni_queue_tx_full_notify() 467 enum dpmaif_txq_state state, int qno) in t7xx_ccmni_queue_state_notify() argument 480 t7xx_ccmni_queue_tx_irq_notify(ctlb, qno); in t7xx_ccmni_queue_state_notify() 482 t7xx_ccmni_queue_tx_full_notify(ctlb, qno); in t7xx_ccmni_queue_state_notify()
|
D | t7xx_hif_dpmaif.c | 424 int qno; in t7xx_dpmaif_unmask_dlq_intr() local 426 for (qno = 0; qno < DPMAIF_RXQ_NUM; qno++) in t7xx_dpmaif_unmask_dlq_intr() 427 t7xx_dpmaif_dlq_unmask_rx_done(&dpmaif_ctrl->hw_info, qno); in t7xx_dpmaif_unmask_dlq_intr()
|
D | t7xx_hif_dpmaif_rx.c | 893 int qno, ret; in t7xx_dpmaif_irq_rx_done() local 895 qno = ffs(que_mask) - 1; in t7xx_dpmaif_irq_rx_done() 896 if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) { in t7xx_dpmaif_irq_rx_done() 897 dev_err(dpmaif_ctrl->dev, "Invalid RXQ number: %u\n", qno); in t7xx_dpmaif_irq_rx_done() 901 rxq = &dpmaif_ctrl->rxq[qno]; in t7xx_dpmaif_irq_rx_done()
|
D | t7xx_hif_cldma.h | 131 int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb);
|
/linux-6.12.1/drivers/net/ethernet/marvell/octeon_ep/ |
D | octep_cn9k_pf.c | 40 static void cn93_dump_regs(struct octep_device *oct, int qno) in cn93_dump_regs() argument 44 dev_info(dev, "IQ-%d register dump\n", qno); in cn93_dump_regs() 46 qno, CN93_SDP_R_IN_INSTR_DBELL(qno), in cn93_dump_regs() 47 octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(qno))); in cn93_dump_regs() 49 qno, CN93_SDP_R_IN_CONTROL(qno), in cn93_dump_regs() 50 octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(qno))); in cn93_dump_regs() 52 qno, CN93_SDP_R_IN_ENABLE(qno), in cn93_dump_regs() 53 octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(qno))); in cn93_dump_regs() 55 qno, CN93_SDP_R_IN_INSTR_BADDR(qno), in cn93_dump_regs() 56 octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(qno))); in cn93_dump_regs() [all …]
|
D | octep_cnxk_pf.c | 60 static void cnxk_dump_regs(struct octep_device *oct, int qno) in cnxk_dump_regs() argument 64 dev_info(dev, "IQ-%d register dump\n", qno); in cnxk_dump_regs() 66 qno, CNXK_SDP_R_IN_INSTR_DBELL(qno), in cnxk_dump_regs() 67 octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(qno))); in cnxk_dump_regs() 69 qno, CNXK_SDP_R_IN_CONTROL(qno), in cnxk_dump_regs() 70 octep_read_csr64(oct, CNXK_SDP_R_IN_CONTROL(qno))); in cnxk_dump_regs() 72 qno, CNXK_SDP_R_IN_ENABLE(qno), in cnxk_dump_regs() 73 octep_read_csr64(oct, CNXK_SDP_R_IN_ENABLE(qno))); in cnxk_dump_regs() 75 qno, CNXK_SDP_R_IN_INSTR_BADDR(qno), in cnxk_dump_regs() 76 octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_BADDR(qno))); in cnxk_dump_regs() [all …]
|
/linux-6.12.1/drivers/crypto/cavium/cpt/ |
D | cptvf_reqmanager.c | 35 int qno) in pending_queue_inc_front() argument 37 struct pending_queue *queue = &pqinfo->queue[qno]; in pending_queue_inc_front() 224 u32 qno) in send_cpt_command() argument 233 if (unlikely(qno >= cptvf->nr_queues)) { in send_cpt_command() 235 qno, cptvf->nr_queues); in send_cpt_command() 240 queue = &qinfo->queue[qno]; in send_cpt_command() 326 int qno) in process_pending_queue() argument 329 struct pending_queue *pqueue = &pqinfo->queue[qno]; in process_pending_queue() 346 pending_queue_inc_front(pqinfo, qno); in process_pending_queue() 362 pending_queue_inc_front(pqinfo, qno); in process_pending_queue() [all …]
|
D | cptvf_main.c | 17 u32 qno; member 29 vq_post_process(cwqe->cptvf, cwqe->qno); in vq_work_handler() 50 cwqe_info->vq_wqe[i].qno = i; in init_worker_threads() 554 int qno) in get_cptvf_vq_wqe() argument 558 if (unlikely(qno >= cptvf->nr_queues)) in get_cptvf_vq_wqe() 562 return &nwqe_info->vq_wqe[qno]; in get_cptvf_vq_wqe()
|
D | cptvf.h | 127 void vq_post_process(struct cpt_vf *cptvf, u32 qno);
|
D | request_manager.h | 144 void vq_post_process(struct cpt_vf *cptvf, u32 qno);
|
/linux-6.12.1/drivers/crypto/cavium/nitrox/ |
D | nitrox_reqmgr.c | 388 int qno, ret = 0; in nitrox_process_se_request() local 425 qno = smp_processor_id() % ndev->nr_queues; in nitrox_process_se_request() 427 sr->cmdq = &ndev->pkt_inq[qno]; in nitrox_process_se_request() 464 sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno; in nitrox_process_se_request()
|
D | nitrox_hal.h | 21 void enable_aqm_ring(struct nitrox_device *ndev, int qno);
|
D | nitrox_lib.c | 113 cmdq->qno = i; in nitrox_alloc_aqm_queues() 167 cmdq->qno = i; in nitrox_alloc_pktin_queues()
|
D | nitrox_dev.h | 60 u8 qno; member
|
/linux-6.12.1/drivers/net/ethernet/cavium/liquidio/ |
D | octeon_network.h | 574 int i, qno; in wake_txqs() local 577 qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no; in wake_txqs() 580 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, in wake_txqs()
|
/linux-6.12.1/drivers/crypto/marvell/octeontx/ |
D | otx_cptvf_main.c | 537 int qno) in get_cptvf_vq_wqe() argument 541 if (unlikely(qno >= cptvf->num_queues)) in get_cptvf_vq_wqe() 545 return &nwqe_info->vq_wqe[qno]; in get_cptvf_vq_wqe()
|
/linux-6.12.1/drivers/scsi/lpfc/ |
D | lpfc_nvmet.c | 2199 uint32_t *payload, qno; in lpfc_nvmet_process_rcv_fcp_req() local 2273 qno = nvmebuf->idx; in lpfc_nvmet_process_rcv_fcp_req() 2275 phba, phba->sli4_hba.nvmet_mrq_hdr[qno], in lpfc_nvmet_process_rcv_fcp_req() 2276 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); in lpfc_nvmet_process_rcv_fcp_req() 2391 uint32_t size, oxid, sid, qno; in lpfc_nvmet_unsol_fcp_buffer() local 2457 qno = nvmebuf->idx; in lpfc_nvmet_unsol_fcp_buffer() 2459 phba, phba->sli4_hba.nvmet_mrq_hdr[qno], in lpfc_nvmet_unsol_fcp_buffer() 2460 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); in lpfc_nvmet_unsol_fcp_buffer()
|
/linux-6.12.1/drivers/net/wireless/rsi/ |
D | rsi_mgmt.h | 708 static inline void rsi_set_len_qno(__le16 *addr, u16 len, u8 qno) in rsi_set_len_qno() argument 710 *addr = cpu_to_le16(len | ((qno & 7) << 12)); in rsi_set_len_qno()
|