/linux-6.12.1/drivers/net/ethernet/marvell/octeon_ep/ |
D | octep_rx.c | 15 static void octep_oq_reset_indices(struct octep_oq *oq) in octep_oq_reset_indices() argument 17 oq->host_read_idx = 0; in octep_oq_reset_indices() 18 oq->host_refill_idx = 0; in octep_oq_reset_indices() 19 oq->refill_count = 0; in octep_oq_reset_indices() 20 oq->last_pkt_count = 0; in octep_oq_reset_indices() 21 oq->pkts_pending = 0; in octep_oq_reset_indices() 32 static int octep_oq_fill_ring_buffers(struct octep_oq *oq) in octep_oq_fill_ring_buffers() argument 34 struct octep_oq_desc_hw *desc_ring = oq->desc_ring; in octep_oq_fill_ring_buffers() 38 for (i = 0; i < oq->max_count; i++) { in octep_oq_fill_ring_buffers() 41 dev_err(oq->dev, "Rx buffer alloc failed\n"); in octep_oq_fill_ring_buffers() [all …]
|
D | octep_config.h | 67 #define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs) 68 #define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size) 69 #define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold) 70 #define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt) 71 #define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time) 72 #define CFG_GET_OQ_WMARK(cfg) ((cfg)->oq.wmark) 235 struct octep_oq_config oq; member
|
D | octep_cnxk_pf.c | 256 conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS; in octep_init_config_cnxk_pf() 257 conf->oq.buf_size = OCTEP_OQ_BUF_SIZE; in octep_init_config_cnxk_pf() 258 conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD; in octep_init_config_cnxk_pf() 259 conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD; in octep_init_config_cnxk_pf() 260 conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD; in octep_init_config_cnxk_pf() 261 conf->oq.wmark = OCTEP_OQ_WMARK_MIN; in octep_init_config_cnxk_pf() 335 struct octep_oq *oq = oct->oq[oq_no]; in octep_setup_oq_regs_cnxk_pf() local 360 oq->desc_ring_dma); in octep_setup_oq_regs_cnxk_pf() 362 oq->max_count); in octep_setup_oq_regs_cnxk_pf() 370 oq_ctl |= (oq->buffer_size & 0xffff); in octep_setup_oq_regs_cnxk_pf() [all …]
|
D | octep_cn9k_pf.c | 237 conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS; in octep_init_config_cn93_pf() 238 conf->oq.buf_size = OCTEP_OQ_BUF_SIZE; in octep_init_config_cn93_pf() 239 conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD; in octep_init_config_cn93_pf() 240 conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD; in octep_init_config_cn93_pf() 241 conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD; in octep_init_config_cn93_pf() 315 struct octep_oq *oq = oct->oq[oq_no]; in octep_setup_oq_regs_cn93_pf() local 340 oq->desc_ring_dma); in octep_setup_oq_regs_cn93_pf() 342 oq->max_count); in octep_setup_oq_regs_cn93_pf() 346 oq_ctl |= (oq->buffer_size & 0xffff); //populate the BSIZE (15-0) in octep_setup_oq_regs_cn93_pf() 350 oq->pkts_sent_reg = oct->mmio[0].hw_addr + CN93_SDP_R_OUT_CNTS(oq_no); in octep_setup_oq_regs_cn93_pf() [all …]
|
D | octep_ethtool.c | 162 struct octep_oq *oq = oct->oq[q]; in octep_get_ethtool_stats() local 168 rx_packets += oq->stats.packets; in octep_get_ethtool_stats() 169 rx_bytes += oq->stats.bytes; in octep_get_ethtool_stats() 170 rx_alloc_errors += oq->stats.alloc_failures; in octep_get_ethtool_stats() 219 struct octep_oq *oq = oct->oq[q]; in octep_get_ethtool_stats() local 221 data[i++] = oq->stats.packets; in octep_get_ethtool_stats() 222 data[i++] = oq->stats.bytes; in octep_get_ethtool_stats() 223 data[i++] = oq->stats.alloc_failures; in octep_get_ethtool_stats()
|
D | octep_main.c | 66 ioq_vector->oq = oct->oq[i]; in octep_alloc_ioq_vectors() 563 static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq) in octep_enable_ioq_irq() argument 565 u32 pkts_pend = oq->pkts_pending; in octep_enable_ioq_irq() 573 if (oq->last_pkt_count - pkts_pend) { in octep_enable_ioq_irq() 574 writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg); in octep_enable_ioq_irq() 575 oq->last_pkt_count = pkts_pend; in octep_enable_ioq_irq() 580 writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg); in octep_enable_ioq_irq() 597 rx_done = octep_oq_process_rx(ioq_vector->oq, budget); in octep_napi_poll() 606 octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq); in octep_napi_poll() 623 oct->oq[i]->napi = &oct->ioq_vector[i]->napi; in octep_napi_add() [all …]
|
D | octep_main.h | 151 struct octep_oq *oq; member 263 struct octep_oq *oq[OCTEP_MAX_OQ]; member 404 int octep_oq_process_rx(struct octep_oq *oq, int budget);
|
/linux-6.12.1/drivers/net/ethernet/marvell/octeon_ep_vf/ |
D | octep_vf_rx.c | 15 static void octep_vf_oq_reset_indices(struct octep_vf_oq *oq) in octep_vf_oq_reset_indices() argument 17 oq->host_read_idx = 0; in octep_vf_oq_reset_indices() 18 oq->host_refill_idx = 0; in octep_vf_oq_reset_indices() 19 oq->refill_count = 0; in octep_vf_oq_reset_indices() 20 oq->last_pkt_count = 0; in octep_vf_oq_reset_indices() 21 oq->pkts_pending = 0; in octep_vf_oq_reset_indices() 32 static int octep_vf_oq_fill_ring_buffers(struct octep_vf_oq *oq) in octep_vf_oq_fill_ring_buffers() argument 34 struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring; in octep_vf_oq_fill_ring_buffers() 38 for (i = 0; i < oq->max_count; i++) { in octep_vf_oq_fill_ring_buffers() 41 dev_err(oq->dev, "Rx buffer alloc failed\n"); in octep_vf_oq_fill_ring_buffers() [all …]
|
D | octep_vf_config.h | 63 #define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs) 64 #define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size) 65 #define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold) 66 #define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt) 67 #define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time) 68 #define CFG_GET_OQ_WMARK(cfg) ((cfg)->oq.wmark) 152 struct octep_vf_oq_config oq; member
|
D | octep_vf_cnxk.c | 153 conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS; in octep_vf_init_config_cnxk_vf() 154 conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE; in octep_vf_init_config_cnxk_vf() 155 conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD; in octep_vf_init_config_cnxk_vf() 156 conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD; in octep_vf_init_config_cnxk_vf() 157 conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD; in octep_vf_init_config_cnxk_vf() 158 conf->oq.wmark = OCTEP_VF_OQ_WMARK_MIN; in octep_vf_init_config_cnxk_vf() 204 struct octep_vf_oq *oq = oct->oq[oq_no]; in octep_vf_setup_oq_regs_cnxk() local 230 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma); in octep_vf_setup_oq_regs_cnxk() 231 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count); in octep_vf_setup_oq_regs_cnxk() 237 oq_ctl |= (oq->buffer_size & GENMASK_ULL(15, 0)); in octep_vf_setup_oq_regs_cnxk() [all …]
|
D | octep_vf_cn9k.c | 151 conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS; in octep_vf_init_config_cn93_vf() 152 conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE; in octep_vf_init_config_cn93_vf() 153 conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD; in octep_vf_init_config_cn93_vf() 154 conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD; in octep_vf_init_config_cn93_vf() 155 conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD; in octep_vf_init_config_cn93_vf() 201 struct octep_vf_oq *oq = oct->oq[oq_no]; in octep_vf_setup_oq_regs_cn93() local 227 octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma); in octep_vf_setup_oq_regs_cn93() 228 octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count); in octep_vf_setup_oq_regs_cn93() 232 oq_ctl |= (oq->buffer_size & GENMASK_ULL(15, 0)); //populate the BSIZE (15-0) in octep_vf_setup_oq_regs_cn93() 236 oq->pkts_sent_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_OUT_CNTS(oq_no); in octep_vf_setup_oq_regs_cn93() [all …]
|
D | octep_vf_ethtool.c | 126 struct octep_vf_oq *oq = oct->oq[q]; in octep_vf_get_ethtool_stats() local 129 rx_alloc_errors += oq->stats.alloc_failures; in octep_vf_get_ethtool_stats() 155 struct octep_vf_oq *oq = oct->oq[q]; in octep_vf_get_ethtool_stats() local 157 data[i++] = oq->stats.packets; in octep_vf_get_ethtool_stats() 158 data[i++] = oq->stats.bytes; in octep_vf_get_ethtool_stats() 159 data[i++] = oq->stats.alloc_failures; in octep_vf_get_ethtool_stats()
|
D | octep_vf_main.c | 65 ioq_vector->oq = oct->oq[i]; in octep_vf_alloc_ioq_vectors() 296 static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq, struct octep_vf_oq *oq) in octep_vf_enable_ioq_irq() argument 298 u32 pkts_pend = oq->pkts_pending; in octep_vf_enable_ioq_irq() 306 if (oq->last_pkt_count - pkts_pend) { in octep_vf_enable_ioq_irq() 307 writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg); in octep_vf_enable_ioq_irq() 308 oq->last_pkt_count = pkts_pend; in octep_vf_enable_ioq_irq() 313 writeq(1UL << OCTEP_VF_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg); in octep_vf_enable_ioq_irq() 330 rx_done = octep_vf_oq_process_rx(ioq_vector->oq, budget); in octep_vf_napi_poll() 339 octep_vf_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq); in octep_vf_napi_poll() 356 oct->oq[i]->napi = &oct->ioq_vector[i]->napi; in octep_vf_napi_add() [all …]
|
D | octep_vf_main.h | 127 struct octep_vf_oq *oq; member 252 struct octep_vf_oq *oq[OCTEP_VF_MAX_OQ]; member 329 int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget);
|
/linux-6.12.1/drivers/net/ethernet/cavium/liquidio/ |
D | cn23xx_vf_regs.h | 164 #define CN23XX_VF_SLI_OQ_PKT_CONTROL(oq) \ argument 165 (CN23XX_VF_SLI_OQ_PKT_CONTROL_START + ((oq) * CN23XX_VF_OQ_OFFSET)) 167 #define CN23XX_VF_SLI_OQ_BASE_ADDR64(oq) \ argument 168 (CN23XX_VF_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN23XX_VF_OQ_OFFSET)) 170 #define CN23XX_VF_SLI_OQ_SIZE(oq) \ argument 171 (CN23XX_VF_SLI_OQ_SIZE_START + ((oq) * CN23XX_VF_OQ_OFFSET)) 173 #define CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(oq) \ argument 174 (CN23XX_VF_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN23XX_VF_OQ_OFFSET)) 176 #define CN23XX_VF_SLI_OQ_PKTS_SENT(oq) \ argument 177 (CN23XX_VF_SLI_OQ_PKT_SENT_START + ((oq) * CN23XX_VF_OQ_OFFSET)) [all …]
|
D | cn23xx_pf_regs.h | 282 #define CN23XX_SLI_OQ_PKT_CONTROL(oq) \ argument 283 (CN23XX_SLI_OQ_PKT_CONTROL_START + ((oq) * CN23XX_OQ_OFFSET)) 285 #define CN23XX_SLI_OQ_BASE_ADDR64(oq) \ argument 286 (CN23XX_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN23XX_OQ_OFFSET)) 288 #define CN23XX_SLI_OQ_SIZE(oq) \ argument 289 (CN23XX_SLI_OQ_SIZE_START + ((oq) * CN23XX_OQ_OFFSET)) 291 #define CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq) \ argument 292 (CN23XX_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN23XX_OQ_OFFSET)) 294 #define CN23XX_SLI_OQ_PKTS_SENT(oq) \ argument 295 (CN23XX_SLI_OQ_PKT_SENT_START + ((oq) * CN23XX_OQ_OFFSET)) [all …]
|
D | cn66xx_regs.h | 277 #define CN6XXX_SLI_OQ_BASE_ADDR64(oq) \ argument 278 (CN6XXX_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN6XXX_OQ_OFFSET)) 280 #define CN6XXX_SLI_OQ_SIZE(oq) \ argument 281 (CN6XXX_SLI_OQ_SIZE_START + ((oq) * CN6XXX_OQ_OFFSET)) 283 #define CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq) \ argument 284 (CN6XXX_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN6XXX_OQ_OFFSET)) 286 #define CN6XXX_SLI_OQ_PKTS_SENT(oq) \ argument 287 (CN6XXX_SLI_OQ_PKT_SENT_START + ((oq) * CN6XXX_OQ_OFFSET)) 289 #define CN6XXX_SLI_OQ_PKTS_CREDIT(oq) \ argument 290 (CN6XXX_SLI_OQ_PKT_CREDITS_START + ((oq) * CN6XXX_OQ_OFFSET))
|
D | octeon_config.h | 131 #define CFG_GET_OQ_MAX_Q(cfg) ((cfg)->oq.max_oqs) 132 #define CFG_GET_OQ_PKTS_PER_INTR(cfg) ((cfg)->oq.pkts_per_intr) 133 #define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold) 134 #define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt) 135 #define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time) 136 #define CFG_SET_OQ_INTR_PKT(cfg, val) (cfg)->oq.oq_intr_pkt = val 137 #define CFG_SET_OQ_INTR_TIME(cfg, val) (cfg)->oq.oq_intr_time = val 413 struct octeon_oq_config oq; member
|
D | cn66xx_device.c | 343 mask |= oct->io_qmask.oq; in lio_cn6xxx_enable_io_queues() 378 mask ^= oct->io_qmask.oq; in lio_cn6xxx_disable_io_queues() 383 mask = (u32)oct->io_qmask.oq; in lio_cn6xxx_disable_io_queues() 393 if (!(oct->io_qmask.oq & BIT_ULL(i))) in lio_cn6xxx_disable_io_queues() 521 droq_mask &= oct->io_qmask.oq; in lio_cn6xxx_process_droq_intr_regs() 557 droq_time_mask &= oct->io_qmask.oq; in lio_cn6xxx_process_droq_intr_regs() 558 droq_cnt_mask &= oct->io_qmask.oq; in lio_cn6xxx_process_droq_intr_regs()
|
D | octeon_device.c | 52 .oq = { 161 .oq = { 327 .oq = { 430 .oq = { 651 if (oct->io_qmask.oq & BIT_ULL(i)) in octeon_free_device_mem() 1296 (oct->io_qmask.oq & BIT_ULL(q_no))) in octeon_get_rx_qsize()
|
D | octeon_droq.c | 208 oct->io_qmask.oq &= ~(1ULL << q_no); in octeon_delete_droq() 303 oct->io_qmask.oq |= BIT_ULL(q_no); in octeon_init_droq()
|
D | octeon_device.h | 189 u64 oq; member
|
D | cn23xx_vf_device.c | 345 if (oct->io_qmask.oq & BIT_ULL(q_no)) { in cn23xx_enable_vf_io_queues()
|
D | lio_ethtool.c | 1154 if (!(oct->io_qmask.oq & BIT_ULL(i))) in lio_reset_queues() 1683 if (!(oct_dev->io_qmask.oq & BIT_ULL(j))) in lio_get_ethtool_stats() 1882 if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) in lio_get_strings() 1930 if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) in lio_vf_get_strings()
|
/linux-6.12.1/fs/xfs/ |
D | xfs_trans_dquot.c | 79 struct xfs_dqtrx *oq, *nq; in xfs_trans_dup_dqinfo() local 97 oq = &oqa[i]; in xfs_trans_dup_dqinfo() 100 if (oq->qt_blk_res && oq->qt_bcount_delta > 0) in xfs_trans_dup_dqinfo() 101 blk_res_used = oq->qt_bcount_delta; in xfs_trans_dup_dqinfo() 103 nq->qt_dquot = oq->qt_dquot; in xfs_trans_dup_dqinfo() 110 nq->qt_blk_res = oq->qt_blk_res - blk_res_used; in xfs_trans_dup_dqinfo() 111 oq->qt_blk_res = blk_res_used; in xfs_trans_dup_dqinfo() 113 nq->qt_rtblk_res = oq->qt_rtblk_res - in xfs_trans_dup_dqinfo() 114 oq->qt_rtblk_res_used; in xfs_trans_dup_dqinfo() 115 oq->qt_rtblk_res = oq->qt_rtblk_res_used; in xfs_trans_dup_dqinfo() [all …]
|