Lines Matching +full:pa +full:- +full:stats
1 // SPDX-License-Identifier: GPL-2.0
19 static void otx2_nix_rq_op_stats(struct queue_stats *stats, in otx2_nix_rq_op_stats() argument
26 stats->bytes = otx2_atomic64_add(incr, ptr); in otx2_nix_rq_op_stats()
29 stats->pkts = otx2_atomic64_add(incr, ptr); in otx2_nix_rq_op_stats()
32 static void otx2_nix_sq_op_stats(struct queue_stats *stats, in otx2_nix_sq_op_stats() argument
39 stats->bytes = otx2_atomic64_add(incr, ptr); in otx2_nix_sq_op_stats()
42 stats->pkts = otx2_atomic64_add(incr, ptr); in otx2_nix_sq_op_stats()
49 if (!netif_running(pfvf->netdev)) in otx2_update_lmac_stats()
52 mutex_lock(&pfvf->mbox.lock); in otx2_update_lmac_stats()
53 req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox); in otx2_update_lmac_stats()
55 mutex_unlock(&pfvf->mbox.lock); in otx2_update_lmac_stats()
59 otx2_sync_mbox_msg(&pfvf->mbox); in otx2_update_lmac_stats()
60 mutex_unlock(&pfvf->mbox.lock); in otx2_update_lmac_stats()
67 if (!netif_running(pfvf->netdev)) in otx2_update_lmac_fec_stats()
69 mutex_lock(&pfvf->mbox.lock); in otx2_update_lmac_fec_stats()
70 req = otx2_mbox_alloc_msg_cgx_fec_stats(&pfvf->mbox); in otx2_update_lmac_fec_stats()
72 otx2_sync_mbox_msg(&pfvf->mbox); in otx2_update_lmac_fec_stats()
73 mutex_unlock(&pfvf->mbox.lock); in otx2_update_lmac_fec_stats()
78 struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx]; in otx2_update_rq_stats()
80 if (!pfvf->qset.rq) in otx2_update_rq_stats()
83 otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx); in otx2_update_rq_stats()
89 struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx]; in otx2_update_sq_stats()
91 if (!pfvf->qset.sq) in otx2_update_sq_stats()
94 if (qidx >= pfvf->hw.non_qos_queues) { in otx2_update_sq_stats()
95 if (!test_bit(qidx - pfvf->hw.non_qos_queues, pfvf->qos.qos_sq_bmap)) in otx2_update_sq_stats()
99 otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx); in otx2_update_sq_stats()
105 struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats; in otx2_get_dev_stats()
107 dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS); in otx2_get_dev_stats()
108 dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP); in otx2_get_dev_stats()
109 dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST); in otx2_get_dev_stats()
110 dev_stats->rx_mcast_frames = OTX2_GET_RX_STATS(RX_MCAST); in otx2_get_dev_stats()
111 dev_stats->rx_ucast_frames = OTX2_GET_RX_STATS(RX_UCAST); in otx2_get_dev_stats()
112 dev_stats->rx_frames = dev_stats->rx_bcast_frames + in otx2_get_dev_stats()
113 dev_stats->rx_mcast_frames + in otx2_get_dev_stats()
114 dev_stats->rx_ucast_frames; in otx2_get_dev_stats()
116 dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS); in otx2_get_dev_stats()
117 dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP); in otx2_get_dev_stats()
118 dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST); in otx2_get_dev_stats()
119 dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST); in otx2_get_dev_stats()
120 dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST); in otx2_get_dev_stats()
121 dev_stats->tx_frames = dev_stats->tx_bcast_frames + in otx2_get_dev_stats()
122 dev_stats->tx_mcast_frames + in otx2_get_dev_stats()
123 dev_stats->tx_ucast_frames; in otx2_get_dev_stats()
127 struct rtnl_link_stats64 *stats) in otx2_get_stats64() argument
134 dev_stats = &pfvf->hw.dev_stats; in otx2_get_stats64()
135 stats->rx_bytes = dev_stats->rx_bytes; in otx2_get_stats64()
136 stats->rx_packets = dev_stats->rx_frames; in otx2_get_stats64()
137 stats->rx_dropped = dev_stats->rx_drops; in otx2_get_stats64()
138 stats->multicast = dev_stats->rx_mcast_frames; in otx2_get_stats64()
140 stats->tx_bytes = dev_stats->tx_bytes; in otx2_get_stats64()
141 stats->tx_packets = dev_stats->tx_frames; in otx2_get_stats64()
142 stats->tx_dropped = dev_stats->tx_drops; in otx2_get_stats64()
152 mutex_lock(&pfvf->mbox.lock); in otx2_hw_set_mac_addr()
153 req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox); in otx2_hw_set_mac_addr()
155 mutex_unlock(&pfvf->mbox.lock); in otx2_hw_set_mac_addr()
156 return -ENOMEM; in otx2_hw_set_mac_addr()
159 ether_addr_copy(req->mac_addr, mac); in otx2_hw_set_mac_addr()
161 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_hw_set_mac_addr()
162 mutex_unlock(&pfvf->mbox.lock); in otx2_hw_set_mac_addr()
174 mutex_lock(&pfvf->mbox.lock); in otx2_hw_get_mac_addr()
175 req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox); in otx2_hw_get_mac_addr()
177 mutex_unlock(&pfvf->mbox.lock); in otx2_hw_get_mac_addr()
178 return -ENOMEM; in otx2_hw_get_mac_addr()
181 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_hw_get_mac_addr()
183 mutex_unlock(&pfvf->mbox.lock); in otx2_hw_get_mac_addr()
187 msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); in otx2_hw_get_mac_addr()
189 mutex_unlock(&pfvf->mbox.lock); in otx2_hw_get_mac_addr()
193 eth_hw_addr_set(netdev, rsp->mac_addr); in otx2_hw_get_mac_addr()
194 mutex_unlock(&pfvf->mbox.lock); in otx2_hw_get_mac_addr()
204 if (!is_valid_ether_addr(addr->sa_data)) in otx2_set_mac_address()
205 return -EADDRNOTAVAIL; in otx2_set_mac_address()
207 if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) { in otx2_set_mac_address()
208 eth_hw_addr_set(netdev, addr->sa_data); in otx2_set_mac_address()
211 pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) in otx2_set_mac_address()
214 if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) in otx2_set_mac_address()
217 return -EPERM; in otx2_set_mac_address()
232 mutex_lock(&pfvf->mbox.lock); in otx2_hw_set_mtu()
233 req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox); in otx2_hw_set_mtu()
235 mutex_unlock(&pfvf->mbox.lock); in otx2_hw_set_mtu()
236 return -ENOMEM; in otx2_hw_set_mtu()
239 req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; in otx2_hw_set_mtu()
242 if (is_otx2_lbkvf(pfvf->pdev)) in otx2_hw_set_mtu()
243 req->maxlen = maxlen; in otx2_hw_set_mtu()
245 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_hw_set_mtu()
246 mutex_unlock(&pfvf->mbox.lock); in otx2_hw_set_mtu()
255 if (is_otx2_lbkvf(pfvf->pdev)) in otx2_config_pause_frm()
258 mutex_lock(&pfvf->mbox.lock); in otx2_config_pause_frm()
259 req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox); in otx2_config_pause_frm()
261 err = -ENOMEM; in otx2_config_pause_frm()
265 req->rx_pause = !!(pfvf->flags & OTX2_FLAG_RX_PAUSE_ENABLED); in otx2_config_pause_frm()
266 req->tx_pause = !!(pfvf->flags & OTX2_FLAG_TX_PAUSE_ENABLED); in otx2_config_pause_frm()
267 req->set = 1; in otx2_config_pause_frm()
269 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_config_pause_frm()
271 mutex_unlock(&pfvf->mbox.lock); in otx2_config_pause_frm()
278 struct otx2_rss_info *rss = &pfvf->hw.rss_info; in otx2_set_flowkey_cfg()
283 mutex_lock(&pfvf->mbox.lock); in otx2_set_flowkey_cfg()
284 req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox); in otx2_set_flowkey_cfg()
286 mutex_unlock(&pfvf->mbox.lock); in otx2_set_flowkey_cfg()
287 return -ENOMEM; in otx2_set_flowkey_cfg()
289 req->mcam_index = -1; /* Default or reserved index */ in otx2_set_flowkey_cfg()
290 req->flowkey_cfg = rss->flowkey_cfg; in otx2_set_flowkey_cfg()
291 req->group = DEFAULT_RSS_CONTEXT_GROUP; in otx2_set_flowkey_cfg()
293 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_set_flowkey_cfg()
298 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); in otx2_set_flowkey_cfg()
304 pfvf->hw.flowkey_alg_idx = rsp->alg_idx; in otx2_set_flowkey_cfg()
306 mutex_unlock(&pfvf->mbox.lock); in otx2_set_flowkey_cfg()
312 struct otx2_rss_info *rss = &pfvf->hw.rss_info; in otx2_set_rss_table()
313 const int index = rss->rss_size * ctx_id; in otx2_set_rss_table()
314 struct mbox *mbox = &pfvf->mbox; in otx2_set_rss_table()
319 mutex_lock(&mbox->lock); in otx2_set_rss_table()
320 rss_ctx = rss->rss_ctx[ctx_id]; in otx2_set_rss_table()
322 for (idx = 0; idx < rss->rss_size; idx++) { in otx2_set_rss_table()
330 mutex_unlock(&mbox->lock); in otx2_set_rss_table()
335 mutex_unlock(&mbox->lock); in otx2_set_rss_table()
336 return -ENOMEM; in otx2_set_rss_table()
340 aq->rss.rq = rss_ctx->ind_tbl[idx]; in otx2_set_rss_table()
343 aq->qidx = index + idx; in otx2_set_rss_table()
344 aq->ctype = NIX_AQ_CTYPE_RSS; in otx2_set_rss_table()
345 aq->op = NIX_AQ_INSTOP_INIT; in otx2_set_rss_table()
348 mutex_unlock(&mbox->lock); in otx2_set_rss_table()
354 struct otx2_rss_info *rss = &pfvf->hw.rss_info; in otx2_set_rss_key()
355 u64 *key = (u64 *)&rss->key[4]; in otx2_set_rss_key()
367 (u64)(*((u32 *)&rss->key)) << 32); in otx2_set_rss_key()
368 idx = sizeof(rss->key) / sizeof(u64); in otx2_set_rss_key()
370 idx--; in otx2_set_rss_key()
377 struct otx2_rss_info *rss = &pfvf->hw.rss_info; in otx2_rss_init()
381 rss->rss_size = sizeof(*rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]); in otx2_rss_init()
384 if (!rss->enable) in otx2_rss_init()
385 netdev_rss_key_fill(rss->key, sizeof(rss->key)); in otx2_rss_init()
388 if (!netif_is_rxfh_configured(pfvf->netdev)) { in otx2_rss_init()
390 rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP] = kzalloc(rss->rss_size, in otx2_rss_init()
392 if (!rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]) in otx2_rss_init()
393 return -ENOMEM; in otx2_rss_init()
395 rss_ctx = rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]; in otx2_rss_init()
396 for (idx = 0; idx < rss->rss_size; idx++) in otx2_rss_init()
397 rss_ctx->ind_tbl[idx] = in otx2_rss_init()
399 pfvf->hw.rx_queues); in otx2_rss_init()
406 rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg : in otx2_rss_init()
416 rss->enable = true; in otx2_rss_init()
425 field = (struct nix_lso_format *)&lso->fields[0]; in otx2_setup_udp_segmentation()
426 lso->field_mask = GENMASK(18, 0); in otx2_setup_udp_segmentation()
429 field->layer = NIX_TXLAYER_OL3; in otx2_setup_udp_segmentation()
431 field->offset = v4 ? 2 : 4; in otx2_setup_udp_segmentation()
432 field->sizem1 = 1; /* i.e 2 bytes */ in otx2_setup_udp_segmentation()
433 field->alg = NIX_LSOALG_ADD_PAYLEN; in otx2_setup_udp_segmentation()
439 field->layer = NIX_TXLAYER_OL3; in otx2_setup_udp_segmentation()
440 field->offset = 4; in otx2_setup_udp_segmentation()
441 field->sizem1 = 1; /* i.e 2 bytes */ in otx2_setup_udp_segmentation()
442 field->alg = NIX_LSOALG_ADD_SEGNUM; in otx2_setup_udp_segmentation()
447 field->layer = NIX_TXLAYER_OL4; in otx2_setup_udp_segmentation()
448 field->offset = 4; in otx2_setup_udp_segmentation()
449 field->sizem1 = 1; in otx2_setup_udp_segmentation()
450 field->alg = NIX_LSOALG_ADD_PAYLEN; in otx2_setup_udp_segmentation()
458 struct otx2_hw *hw = &pfvf->hw; in otx2_setup_segmentation()
461 mutex_lock(&pfvf->mbox.lock); in otx2_setup_segmentation()
464 lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox); in otx2_setup_segmentation()
471 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_setup_segmentation()
476 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr); in otx2_setup_segmentation()
480 hw->lso_udpv4_idx = rsp->lso_format_idx; in otx2_setup_segmentation()
483 lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox); in otx2_setup_segmentation()
490 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_setup_segmentation()
495 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr); in otx2_setup_segmentation()
499 hw->lso_udpv6_idx = rsp->lso_format_idx; in otx2_setup_segmentation()
500 mutex_unlock(&pfvf->mbox.lock); in otx2_setup_segmentation()
503 mutex_unlock(&pfvf->mbox.lock); in otx2_setup_segmentation()
504 netdev_info(pfvf->netdev, in otx2_setup_segmentation()
506 pfvf->netdev->hw_features &= ~NETIF_F_GSO_UDP_L4; in otx2_setup_segmentation()
518 ((u64)(pfvf->hw.cq_time_wait * 10) << 48) | in otx2_config_irq_coalescing()
519 ((u64)pfvf->hw.cq_qcount_wait << 32) | in otx2_config_irq_coalescing()
520 (pfvf->hw.cq_ecount_wait - 1)); in otx2_config_irq_coalescing()
530 sz = SKB_DATA_ALIGN(pool->rbsize); in otx2_alloc_pool_buf()
533 page = page_pool_alloc_frag(pool->page_pool, &offset, sz, GFP_ATOMIC); in otx2_alloc_pool_buf()
535 return -ENOMEM; in otx2_alloc_pool_buf()
546 if (pool->page_pool) in __otx2_alloc_rbuf()
549 buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN); in __otx2_alloc_rbuf()
551 return -ENOMEM; in __otx2_alloc_rbuf()
553 *dma = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize, in __otx2_alloc_rbuf()
555 if (unlikely(dma_mapping_error(pfvf->dev, *dma))) { in __otx2_alloc_rbuf()
557 return -ENOMEM; in __otx2_alloc_rbuf()
577 if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) in otx2_alloc_buffer()
578 return -ENOMEM; in otx2_alloc_buffer()
586 schedule_work(&pfvf->reset_task); in otx2_tx_timeout()
597 dev_warn(pfvf->dev, "Failed to read mac from hardware\n"); in otx2_get_mac_from_af()
600 if (!is_valid_ether_addr(netdev->dev_addr)) in otx2_get_mac_from_af()
608 struct otx2_hw *hw = &pfvf->hw; in otx2_txschq_config()
613 dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); in otx2_txschq_config()
615 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); in otx2_txschq_config()
617 return -ENOMEM; in otx2_txschq_config()
619 req->lvl = lvl; in otx2_txschq_config()
620 req->num_regs = 1; in otx2_txschq_config()
622 schq_list = hw->txschq_list; in otx2_txschq_config()
625 schq_list = pfvf->pfc_schq_list; in otx2_txschq_config()
631 req->reg[0] = NIX_AF_SMQX_CFG(schq); in otx2_txschq_config()
632 req->regval[0] = ((u64)pfvf->tx_max_pktlen << 8) | OTX2_MIN_MTU; in otx2_txschq_config()
633 req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) | in otx2_txschq_config()
636 if (!is_dev_otx2(pfvf->pdev)) in otx2_txschq_config()
637 req->regval[0] |= FIELD_PREP(GENMASK_ULL(58, 57), in otx2_txschq_config()
638 (u64)hw->smq_link_type); in otx2_txschq_config()
639 req->num_regs++; in otx2_txschq_config()
642 req->reg[1] = NIX_AF_MDQX_PARENT(schq); in otx2_txschq_config()
643 req->regval[1] = parent << 16; in otx2_txschq_config()
644 req->num_regs++; in otx2_txschq_config()
646 req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq); in otx2_txschq_config()
647 req->regval[2] = dwrr_val; in otx2_txschq_config()
650 req->reg[0] = NIX_AF_TL4X_PARENT(schq); in otx2_txschq_config()
651 req->regval[0] = (u64)parent << 16; in otx2_txschq_config()
652 req->num_regs++; in otx2_txschq_config()
653 req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq); in otx2_txschq_config()
654 req->regval[1] = dwrr_val; in otx2_txschq_config()
657 req->reg[0] = NIX_AF_TL3X_PARENT(schq); in otx2_txschq_config()
658 req->regval[0] = (u64)parent << 16; in otx2_txschq_config()
659 req->num_regs++; in otx2_txschq_config()
660 req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq); in otx2_txschq_config()
661 req->regval[1] = dwrr_val; in otx2_txschq_config()
662 if (lvl == hw->txschq_link_cfg_lvl) { in otx2_txschq_config()
663 req->num_regs++; in otx2_txschq_config()
664 req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link); in otx2_txschq_config()
668 req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio; in otx2_txschq_config()
672 req->reg[0] = NIX_AF_TL2X_PARENT(schq); in otx2_txschq_config()
673 req->regval[0] = (u64)parent << 16; in otx2_txschq_config()
675 req->num_regs++; in otx2_txschq_config()
676 req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq); in otx2_txschq_config()
677 req->regval[1] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 | dwrr_val; in otx2_txschq_config()
679 if (lvl == hw->txschq_link_cfg_lvl) { in otx2_txschq_config()
680 req->num_regs++; in otx2_txschq_config()
681 req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link); in otx2_txschq_config()
685 req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio; in otx2_txschq_config()
696 req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq); in otx2_txschq_config()
697 req->regval[0] = TXSCH_TL1_DFLT_RR_QTM; in otx2_txschq_config()
699 req->num_regs++; in otx2_txschq_config()
700 req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq); in otx2_txschq_config()
701 req->regval[1] = hw->txschq_aggr_lvl_rr_prio << 1; in otx2_txschq_config()
703 req->num_regs++; in otx2_txschq_config()
704 req->reg[2] = NIX_AF_TL1X_CIR(schq); in otx2_txschq_config()
705 req->regval[2] = 0; in otx2_txschq_config()
708 return otx2_sync_mbox_msg(&pfvf->mbox); in otx2_txschq_config()
717 mutex_lock(&pfvf->mbox.lock); in otx2_smq_flush()
719 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); in otx2_smq_flush()
721 mutex_unlock(&pfvf->mbox.lock); in otx2_smq_flush()
722 return -ENOMEM; in otx2_smq_flush()
725 req->lvl = NIX_TXSCH_LVL_SMQ; in otx2_smq_flush()
726 req->reg[0] = NIX_AF_SMQX_CFG(smq); in otx2_smq_flush()
727 req->regval[0] |= BIT_ULL(49); in otx2_smq_flush()
728 req->num_regs++; in otx2_smq_flush()
730 rc = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_smq_flush()
731 mutex_unlock(&pfvf->mbox.lock); in otx2_smq_flush()
743 req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox); in otx2_txsch_alloc()
745 return -ENOMEM; in otx2_txsch_alloc()
749 req->schq[lvl] = 1; in otx2_txsch_alloc()
750 rc = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_txsch_alloc()
755 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); in otx2_txsch_alloc()
761 for (schq = 0; schq < rsp->schq[lvl]; schq++) in otx2_txsch_alloc()
762 pfvf->hw.txschq_list[lvl][schq] = in otx2_txsch_alloc()
763 rsp->schq_list[lvl][schq]; in otx2_txsch_alloc()
765 pfvf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl; in otx2_txsch_alloc()
766 pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio; in otx2_txsch_alloc()
776 mutex_lock(&pfvf->mbox.lock); in otx2_txschq_free_one()
778 free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox); in otx2_txschq_free_one()
780 mutex_unlock(&pfvf->mbox.lock); in otx2_txschq_free_one()
781 netdev_err(pfvf->netdev, in otx2_txschq_free_one()
786 free_req->schq_lvl = lvl; in otx2_txschq_free_one()
787 free_req->schq = schq; in otx2_txschq_free_one()
789 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_txschq_free_one()
791 netdev_err(pfvf->netdev, in otx2_txschq_free_one()
795 mutex_unlock(&pfvf->mbox.lock); in otx2_txschq_free_one()
806 pfvf->hw.txschq_list[lvl][0]); in otx2_txschq_stop()
811 pfvf->hw.txschq_list[lvl][schq] = 0; in otx2_txschq_stop()
824 sq = &pfvf->qset.sq[qidx]; in otx2_sqb_flush()
825 if (!sq->sqb_ptrs) in otx2_sqb_flush()
849 #define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */
850 #define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
851 #define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
855 struct otx2_qset *qset = &pfvf->qset; in otx2_rq_init()
859 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); in otx2_rq_init()
861 return -ENOMEM; in otx2_rq_init()
863 aq->rq.cq = qidx; in otx2_rq_init()
864 aq->rq.ena = 1; in otx2_rq_init()
865 aq->rq.pb_caching = 1; in otx2_rq_init()
866 aq->rq.lpb_aura = lpb_aura; /* Use large packet buffer aura */ in otx2_rq_init()
867 aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN(pfvf->rbsize) / 8) - 1; in otx2_rq_init()
868 aq->rq.xqe_imm_size = 0; /* Copying of packet to CQE not needed */ in otx2_rq_init()
869 aq->rq.flow_tagw = 32; /* Copy full 32bit flow_tag to CQE header */ in otx2_rq_init()
870 aq->rq.qint_idx = 0; in otx2_rq_init()
871 aq->rq.lpb_drop_ena = 1; /* Enable RED dropping for AURA */ in otx2_rq_init()
872 aq->rq.xqe_drop_ena = 1; /* Enable RED dropping for CQ/SSO */ in otx2_rq_init()
873 aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); in otx2_rq_init()
874 aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); in otx2_rq_init()
875 aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA; in otx2_rq_init()
876 aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA; in otx2_rq_init()
879 aq->qidx = qidx; in otx2_rq_init()
880 aq->ctype = NIX_AQ_CTYPE_RQ; in otx2_rq_init()
881 aq->op = NIX_AQ_INSTOP_INIT; in otx2_rq_init()
883 return otx2_sync_mbox_msg(&pfvf->mbox); in otx2_rq_init()
892 sq = &pfvf->qset.sq[qidx]; in otx2_sq_aq_init()
893 sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx)); in otx2_sq_aq_init()
895 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); in otx2_sq_aq_init()
897 return -ENOMEM; in otx2_sq_aq_init()
899 aq->sq.cq = pfvf->hw.rx_queues + qidx; in otx2_sq_aq_init()
900 aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */ in otx2_sq_aq_init()
901 aq->sq.cq_ena = 1; in otx2_sq_aq_init()
902 aq->sq.ena = 1; in otx2_sq_aq_init()
903 aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); in otx2_sq_aq_init()
904 aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); in otx2_sq_aq_init()
905 aq->sq.default_chan = pfvf->hw.tx_chan_base; in otx2_sq_aq_init()
906 aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ in otx2_sq_aq_init()
907 aq->sq.sqb_aura = sqb_aura; in otx2_sq_aq_init()
908 aq->sq.sq_int_ena = NIX_SQINT_BITS; in otx2_sq_aq_init()
909 aq->sq.qint_idx = 0; in otx2_sq_aq_init()
913 aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt)); in otx2_sq_aq_init()
916 aq->qidx = qidx; in otx2_sq_aq_init()
917 aq->ctype = NIX_AQ_CTYPE_SQ; in otx2_sq_aq_init()
918 aq->op = NIX_AQ_INSTOP_INIT; in otx2_sq_aq_init()
920 return otx2_sync_mbox_msg(&pfvf->mbox); in otx2_sq_aq_init()
925 struct otx2_qset *qset = &pfvf->qset; in otx2_sq_init()
930 pool = &pfvf->qset.pool[sqb_aura]; in otx2_sq_init()
931 sq = &qset->sq[qidx]; in otx2_sq_init()
932 sq->sqe_size = NIX_SQESZ_W16 ? 64 : 128; in otx2_sq_init()
933 sq->sqe_cnt = qset->sqe_cnt; in otx2_sq_init()
935 err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size); in otx2_sq_init()
939 if (qidx < pfvf->hw.tx_queues) { in otx2_sq_init()
940 err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt, in otx2_sq_init()
946 sq->sqe_base = sq->sqe->base; in otx2_sq_init()
947 sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL); in otx2_sq_init()
948 if (!sq->sg) in otx2_sq_init()
949 return -ENOMEM; in otx2_sq_init()
951 if (pfvf->ptp && qidx < pfvf->hw.tx_queues) { in otx2_sq_init()
952 err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt, in otx2_sq_init()
953 sizeof(*sq->timestamps)); in otx2_sq_init()
955 kfree(sq->sg); in otx2_sq_init()
956 sq->sg = NULL; in otx2_sq_init()
961 sq->head = 0; in otx2_sq_init()
962 sq->cons_head = 0; in otx2_sq_init()
963 sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1; in otx2_sq_init()
964 sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb; in otx2_sq_init()
966 sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100; in otx2_sq_init()
967 sq->aura_id = sqb_aura; in otx2_sq_init()
968 sq->aura_fc_addr = pool->fc_addr->base; in otx2_sq_init()
969 sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0)); in otx2_sq_init()
971 sq->stats.bytes = 0; in otx2_sq_init()
972 sq->stats.pkts = 0; in otx2_sq_init()
974 err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura); in otx2_sq_init()
976 kfree(sq->sg); in otx2_sq_init()
977 sq->sg = NULL; in otx2_sq_init()
987 struct otx2_qset *qset = &pfvf->qset; in otx2_cq_init()
992 cq = &qset->cq[qidx]; in otx2_cq_init()
993 cq->cq_idx = qidx; in otx2_cq_init()
994 non_xdp_queues = pfvf->hw.rx_queues + pfvf->hw.tx_queues; in otx2_cq_init()
995 if (qidx < pfvf->hw.rx_queues) { in otx2_cq_init()
996 cq->cq_type = CQ_RX; in otx2_cq_init()
997 cq->cint_idx = qidx; in otx2_cq_init()
998 cq->cqe_cnt = qset->rqe_cnt; in otx2_cq_init()
999 if (pfvf->xdp_prog) in otx2_cq_init()
1000 xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0); in otx2_cq_init()
1002 cq->cq_type = CQ_TX; in otx2_cq_init()
1003 cq->cint_idx = qidx - pfvf->hw.rx_queues; in otx2_cq_init()
1004 cq->cqe_cnt = qset->sqe_cnt; in otx2_cq_init()
1006 if (pfvf->hw.xdp_queues && in otx2_cq_init()
1007 qidx < non_xdp_queues + pfvf->hw.xdp_queues) { in otx2_cq_init()
1008 cq->cq_type = CQ_XDP; in otx2_cq_init()
1009 cq->cint_idx = qidx - non_xdp_queues; in otx2_cq_init()
1010 cq->cqe_cnt = qset->sqe_cnt; in otx2_cq_init()
1012 cq->cq_type = CQ_QOS; in otx2_cq_init()
1013 cq->cint_idx = qidx - non_xdp_queues - in otx2_cq_init()
1014 pfvf->hw.xdp_queues; in otx2_cq_init()
1015 cq->cqe_cnt = qset->sqe_cnt; in otx2_cq_init()
1018 cq->cqe_size = pfvf->qset.xqe_size; in otx2_cq_init()
1021 err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size); in otx2_cq_init()
1026 cq->cqe_base = cq->cqe->base; in otx2_cq_init()
1030 pool_id = ((cq->cq_type == CQ_RX) && in otx2_cq_init()
1031 (pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx; in otx2_cq_init()
1032 cq->rbpool = &qset->pool[pool_id]; in otx2_cq_init()
1033 cq->refill_task_sched = false; in otx2_cq_init()
1036 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); in otx2_cq_init()
1038 return -ENOMEM; in otx2_cq_init()
1040 aq->cq.ena = 1; in otx2_cq_init()
1041 aq->cq.qsize = Q_SIZE(cq->cqe_cnt, 4); in otx2_cq_init()
1042 aq->cq.caching = 1; in otx2_cq_init()
1043 aq->cq.base = cq->cqe->iova; in otx2_cq_init()
1044 aq->cq.cint_idx = cq->cint_idx; in otx2_cq_init()
1045 aq->cq.cq_err_int_ena = NIX_CQERRINT_BITS; in otx2_cq_init()
1046 aq->cq.qint_idx = 0; in otx2_cq_init()
1047 aq->cq.avg_level = 255; in otx2_cq_init()
1049 if (qidx < pfvf->hw.rx_queues) { in otx2_cq_init()
1050 aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt); in otx2_cq_init()
1051 aq->cq.drop_ena = 1; in otx2_cq_init()
1053 if (!is_otx2_lbkvf(pfvf->pdev)) { in otx2_cq_init()
1055 aq->cq.bp_ena = 1; in otx2_cq_init()
1057 aq->cq.bpid = pfvf->bpid[pfvf->queue_to_pfc_map[qidx]]; in otx2_cq_init()
1059 aq->cq.bpid = pfvf->bpid[0]; in otx2_cq_init()
1063 aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); in otx2_cq_init()
1068 aq->qidx = qidx; in otx2_cq_init()
1069 aq->ctype = NIX_AQ_CTYPE_CQ; in otx2_cq_init()
1070 aq->op = NIX_AQ_INSTOP_INIT; in otx2_cq_init()
1072 return otx2_sync_mbox_msg(&pfvf->mbox); in otx2_cq_init()
1083 pfvf = wrk->pf; in otx2_pool_refill_task()
1084 qidx = wrk - pfvf->refill_wrk; in otx2_pool_refill_task()
1085 cq = &pfvf->qset.cq[qidx]; in otx2_pool_refill_task()
1087 cq->refill_task_sched = false; in otx2_pool_refill_task()
1090 napi_schedule(wrk->napi); in otx2_pool_refill_task()
1099 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { in otx2_config_nix_queues()
1108 for (qidx = 0; qidx < pfvf->hw.non_qos_queues; qidx++) { in otx2_config_nix_queues()
1117 for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) { in otx2_config_nix_queues()
1123 pfvf->cq_op_addr = (__force u64 *)otx2_get_regaddr(pfvf, in otx2_config_nix_queues()
1127 pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt, in otx2_config_nix_queues()
1129 if (!pfvf->refill_wrk) in otx2_config_nix_queues()
1130 return -ENOMEM; in otx2_config_nix_queues()
1132 for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) { in otx2_config_nix_queues()
1133 pfvf->refill_wrk[qidx].pf = pfvf; in otx2_config_nix_queues()
1134 INIT_DELAYED_WORK(&pfvf->refill_wrk[qidx].pool_refill_work, in otx2_config_nix_queues()
1146 pfvf->qset.xqe_size = pfvf->hw.xqe_size; in otx2_config_nix()
1149 nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox); in otx2_config_nix()
1151 return -ENOMEM; in otx2_config_nix()
1154 nixlf->rq_cnt = pfvf->hw.rx_queues; in otx2_config_nix()
1155 nixlf->sq_cnt = otx2_get_total_tx_queues(pfvf); in otx2_config_nix()
1156 nixlf->cq_cnt = pfvf->qset.cq_cnt; in otx2_config_nix()
1157 nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE; in otx2_config_nix()
1158 nixlf->rss_grps = MAX_RSS_GROUPS; in otx2_config_nix()
1159 nixlf->xqe_sz = pfvf->hw.xqe_size == 128 ? NIX_XQESZ_W16 : NIX_XQESZ_W64; in otx2_config_nix()
1164 nixlf->npa_func = RVU_DEFAULT_PF_FUNC; in otx2_config_nix()
1168 nixlf->rx_cfg = BIT_ULL(33) | BIT_ULL(35) | BIT_ULL(37); in otx2_config_nix()
1170 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_config_nix()
1174 rsp = (struct nix_lf_alloc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, in otx2_config_nix()
1175 &nixlf->hdr); in otx2_config_nix()
1179 if (rsp->qints < 1) in otx2_config_nix()
1180 return -ENXIO; in otx2_config_nix()
1182 return rsp->hdr.rc; in otx2_config_nix()
1187 struct otx2_qset *qset = &pfvf->qset; in otx2_sq_free_sqbs()
1188 struct otx2_hw *hw = &pfvf->hw; in otx2_sq_free_sqbs()
1191 u64 iova, pa; in otx2_sq_free_sqbs() local
1194 sq = &qset->sq[qidx]; in otx2_sq_free_sqbs()
1195 if (!sq->sqb_ptrs) in otx2_sq_free_sqbs()
1197 for (sqb = 0; sqb < sq->sqb_count; sqb++) { in otx2_sq_free_sqbs()
1198 if (!sq->sqb_ptrs[sqb]) in otx2_sq_free_sqbs()
1200 iova = sq->sqb_ptrs[sqb]; in otx2_sq_free_sqbs()
1201 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); in otx2_sq_free_sqbs()
1202 dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size, in otx2_sq_free_sqbs()
1205 put_page(virt_to_page(phys_to_virt(pa))); in otx2_sq_free_sqbs()
1207 sq->sqb_count = 0; in otx2_sq_free_sqbs()
1215 u64 pa; in otx2_free_bufs() local
1217 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); in otx2_free_bufs()
1218 page = virt_to_head_page(phys_to_virt(pa)); in otx2_free_bufs()
1220 if (pool->page_pool) { in otx2_free_bufs()
1221 page_pool_put_full_page(pool->page_pool, page, true); in otx2_free_bufs()
1223 dma_unmap_page_attrs(pfvf->dev, iova, size, in otx2_free_bufs()
1239 pool_end = pool_start + pfvf->hw.sqpool_cnt; in otx2_free_aura_ptr()
1240 size = pfvf->hw.sqb_size; in otx2_free_aura_ptr()
1244 pool_end = pfvf->hw.rqpool_cnt; in otx2_free_aura_ptr()
1245 size = pfvf->rbsize; in otx2_free_aura_ptr()
1251 pool = &pfvf->qset.pool[pool_id]; in otx2_free_aura_ptr()
1254 iova -= OTX2_HEAD_ROOM; in otx2_free_aura_ptr()
1268 if (!pfvf->qset.pool) in otx2_aura_pool_free()
1271 for (pool_id = 0; pool_id < pfvf->hw.pool_cnt; pool_id++) { in otx2_aura_pool_free()
1272 pool = &pfvf->qset.pool[pool_id]; in otx2_aura_pool_free()
1273 qmem_free(pfvf->dev, pool->stack); in otx2_aura_pool_free()
1274 qmem_free(pfvf->dev, pool->fc_addr); in otx2_aura_pool_free()
1275 page_pool_destroy(pool->page_pool); in otx2_aura_pool_free()
1276 pool->page_pool = NULL; in otx2_aura_pool_free()
1278 devm_kfree(pfvf->dev, pfvf->qset.pool); in otx2_aura_pool_free()
1279 pfvf->qset.pool = NULL; in otx2_aura_pool_free()
1289 pool = &pfvf->qset.pool[pool_id]; in otx2_aura_init()
1294 if (!pool->fc_addr) { in otx2_aura_init()
1295 err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN); in otx2_aura_init()
1301 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); in otx2_aura_init()
1304 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_aura_init()
1307 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); in otx2_aura_init()
1309 return -ENOMEM; in otx2_aura_init()
1312 aq->aura_id = aura_id; in otx2_aura_init()
1314 aq->aura.pool_addr = pool_id; in otx2_aura_init()
1315 aq->aura.pool_caching = 1; in otx2_aura_init()
1316 aq->aura.shift = ilog2(numptrs) - 8; in otx2_aura_init()
1317 aq->aura.count = numptrs; in otx2_aura_init()
1318 aq->aura.limit = numptrs; in otx2_aura_init()
1319 aq->aura.avg_level = 255; in otx2_aura_init()
1320 aq->aura.ena = 1; in otx2_aura_init()
1321 aq->aura.fc_ena = 1; in otx2_aura_init()
1322 aq->aura.fc_addr = pool->fc_addr->iova; in otx2_aura_init()
1323 aq->aura.fc_hyst_bits = 0; /* Store count on all updates */ in otx2_aura_init()
1326 if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) { in otx2_aura_init()
1327 aq->aura.bp_ena = 0; in otx2_aura_init()
1336 * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to in otx2_aura_init()
1337 * NIX-RX based on [BP] level. One bit per NIX-RX; index in otx2_aura_init()
1340 if (pfvf->nix_blkaddr == BLKADDR_NIX1) in otx2_aura_init()
1341 aq->aura.bp_ena = 1; in otx2_aura_init()
1343 aq->aura.nix0_bpid = pfvf->bpid[pfvf->queue_to_pfc_map[aura_id]]; in otx2_aura_init()
1345 aq->aura.nix0_bpid = pfvf->bpid[0]; in otx2_aura_init()
1349 aq->aura.bp = RQ_BP_LVL_AURA; in otx2_aura_init()
1353 aq->ctype = NPA_AQ_CTYPE_AURA; in otx2_aura_init()
1354 aq->op = NPA_AQ_INSTOP_INIT; in otx2_aura_init()
1367 pool = &pfvf->qset.pool[pool_id]; in otx2_pool_init()
1369 err = qmem_alloc(pfvf->dev, &pool->stack, in otx2_pool_init()
1370 stack_pages, pfvf->hw.stack_pg_bytes); in otx2_pool_init()
1374 pool->rbsize = buf_size; in otx2_pool_init()
1377 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); in otx2_pool_init()
1380 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_pool_init()
1382 qmem_free(pfvf->dev, pool->stack); in otx2_pool_init()
1385 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); in otx2_pool_init()
1387 qmem_free(pfvf->dev, pool->stack); in otx2_pool_init()
1388 return -ENOMEM; in otx2_pool_init()
1392 aq->aura_id = pool_id; in otx2_pool_init()
1393 aq->pool.stack_base = pool->stack->iova; in otx2_pool_init()
1394 aq->pool.stack_caching = 1; in otx2_pool_init()
1395 aq->pool.ena = 1; in otx2_pool_init()
1396 aq->pool.buf_size = buf_size / 128; in otx2_pool_init()
1397 aq->pool.stack_max_pages = stack_pages; in otx2_pool_init()
1398 aq->pool.shift = ilog2(numptrs) - 8; in otx2_pool_init()
1399 aq->pool.ptr_start = 0; in otx2_pool_init()
1400 aq->pool.ptr_end = ~0ULL; in otx2_pool_init()
1403 aq->ctype = NPA_AQ_CTYPE_POOL; in otx2_pool_init()
1404 aq->op = NPA_AQ_INSTOP_INIT; in otx2_pool_init()
1407 pool->page_pool = NULL; in otx2_pool_init()
1415 pp_params.dev = pfvf->dev; in otx2_pool_init()
1417 pool->page_pool = page_pool_create(&pp_params); in otx2_pool_init()
1418 if (IS_ERR(pool->page_pool)) { in otx2_pool_init()
1419 netdev_err(pfvf->netdev, "Creation of page pool failed\n"); in otx2_pool_init()
1420 return PTR_ERR(pool->page_pool); in otx2_pool_init()
1429 struct otx2_qset *qset = &pfvf->qset; in otx2_sq_aura_pool_init()
1430 struct otx2_hw *hw = &pfvf->hw; in otx2_sq_aura_pool_init()
1441 num_sqbs = (hw->sqb_size / 128) - 1; in otx2_sq_aura_pool_init()
1442 num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs; in otx2_sq_aura_pool_init()
1446 (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs; in otx2_sq_aura_pool_init()
1448 for (qidx = 0; qidx < hw->non_qos_queues; qidx++) { in otx2_sq_aura_pool_init()
1457 num_sqbs, hw->sqb_size, AURA_NIX_SQ); in otx2_sq_aura_pool_init()
1463 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_sq_aura_pool_init()
1468 for (qidx = 0; qidx < hw->non_qos_queues; qidx++) { in otx2_sq_aura_pool_init()
1470 pool = &pfvf->qset.pool[pool_id]; in otx2_sq_aura_pool_init()
1472 sq = &qset->sq[qidx]; in otx2_sq_aura_pool_init()
1473 sq->sqb_count = 0; in otx2_sq_aura_pool_init()
1474 sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL); in otx2_sq_aura_pool_init()
1475 if (!sq->sqb_ptrs) { in otx2_sq_aura_pool_init()
1476 err = -ENOMEM; in otx2_sq_aura_pool_init()
1484 pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr); in otx2_sq_aura_pool_init()
1485 sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; in otx2_sq_aura_pool_init()
1490 return err ? -ENOMEM : 0; in otx2_sq_aura_pool_init()
1493 otx2_mbox_reset(&pfvf->mbox.mbox, 0); in otx2_sq_aura_pool_init()
1500 struct otx2_hw *hw = &pfvf->hw; in otx2_rq_aura_pool_init()
1506 num_ptrs = pfvf->qset.rqe_cnt; in otx2_rq_aura_pool_init()
1509 (num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs; in otx2_rq_aura_pool_init()
1511 for (rq = 0; rq < hw->rx_queues; rq++) { in otx2_rq_aura_pool_init()
1518 for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { in otx2_rq_aura_pool_init()
1520 num_ptrs, pfvf->rbsize, AURA_NIX_RQ); in otx2_rq_aura_pool_init()
1526 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_rq_aura_pool_init()
1531 for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { in otx2_rq_aura_pool_init()
1532 pool = &pfvf->qset.pool[pool_id]; in otx2_rq_aura_pool_init()
1536 return -ENOMEM; in otx2_rq_aura_pool_init()
1537 pfvf->hw_ops->aura_freeptr(pfvf, pool_id, in otx2_rq_aura_pool_init()
1543 otx2_mbox_reset(&pfvf->mbox.mbox, 0); in otx2_rq_aura_pool_init()
1550 struct otx2_qset *qset = &pfvf->qset; in otx2_config_npa()
1552 struct otx2_hw *hw = &pfvf->hw; in otx2_config_npa()
1555 /* Pool - Stack of free buffer pointers in otx2_config_npa()
1556 * Aura - Alloc/frees pointers from/to pool for NIX DMA. in otx2_config_npa()
1559 if (!hw->pool_cnt) in otx2_config_npa()
1560 return -EINVAL; in otx2_config_npa()
1562 qset->pool = devm_kcalloc(pfvf->dev, hw->pool_cnt, in otx2_config_npa()
1564 if (!qset->pool) in otx2_config_npa()
1565 return -ENOMEM; in otx2_config_npa()
1568 npalf = otx2_mbox_alloc_msg_npa_lf_alloc(&pfvf->mbox); in otx2_config_npa()
1570 return -ENOMEM; in otx2_config_npa()
1573 npalf->nr_pools = hw->pool_cnt; in otx2_config_npa()
1574 aura_cnt = ilog2(roundup_pow_of_two(hw->pool_cnt)); in otx2_config_npa()
1575 npalf->aura_sz = (aura_cnt >= ilog2(128)) ? (aura_cnt - 6) : 1; in otx2_config_npa()
1577 return otx2_sync_mbox_msg(&pfvf->mbox); in otx2_config_npa()
1584 mutex_lock(&mbox->lock); in otx2_detach_resources()
1587 mutex_unlock(&mbox->lock); in otx2_detach_resources()
1588 return -ENOMEM; in otx2_detach_resources()
1592 detach->partial = false; in otx2_detach_resources()
1596 mutex_unlock(&mbox->lock); in otx2_detach_resources()
1607 mutex_lock(&pfvf->mbox.lock); in otx2_attach_npa_nix()
1609 attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox); in otx2_attach_npa_nix()
1611 mutex_unlock(&pfvf->mbox.lock); in otx2_attach_npa_nix()
1612 return -ENOMEM; in otx2_attach_npa_nix()
1615 attach->npalf = true; in otx2_attach_npa_nix()
1616 attach->nixlf = true; in otx2_attach_npa_nix()
1619 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_attach_npa_nix()
1621 mutex_unlock(&pfvf->mbox.lock); in otx2_attach_npa_nix()
1625 pfvf->nix_blkaddr = BLKADDR_NIX0; in otx2_attach_npa_nix()
1631 pfvf->nix_blkaddr = BLKADDR_NIX1; in otx2_attach_npa_nix()
1634 msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox); in otx2_attach_npa_nix()
1636 mutex_unlock(&pfvf->mbox.lock); in otx2_attach_npa_nix()
1637 return -ENOMEM; in otx2_attach_npa_nix()
1640 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_attach_npa_nix()
1642 mutex_unlock(&pfvf->mbox.lock); in otx2_attach_npa_nix()
1645 mutex_unlock(&pfvf->mbox.lock); in otx2_attach_npa_nix()
1647 if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID || in otx2_attach_npa_nix()
1648 pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) { in otx2_attach_npa_nix()
1649 dev_err(pfvf->dev, in otx2_attach_npa_nix()
1651 return -EINVAL; in otx2_attach_npa_nix()
1662 mutex_lock(&mbox->lock); in otx2_ctx_disable()
1670 mutex_unlock(&mbox->lock); in otx2_ctx_disable()
1674 req->ctype = type; in otx2_ctx_disable()
1677 dev_err(mbox->pfvf->dev, "%s failed to disable context\n", in otx2_ctx_disable()
1680 mutex_unlock(&mbox->lock); in otx2_ctx_disable()
1688 req = otx2_mbox_alloc_msg_nix_bp_enable(&pfvf->mbox); in otx2_nix_config_bp()
1690 req = otx2_mbox_alloc_msg_nix_bp_disable(&pfvf->mbox); in otx2_nix_config_bp()
1693 return -ENOMEM; in otx2_nix_config_bp()
1695 req->chan_base = 0; in otx2_nix_config_bp()
1697 req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1; in otx2_nix_config_bp()
1698 req->bpid_per_chan = pfvf->pfc_en ? 1 : 0; in otx2_nix_config_bp()
1700 req->chan_cnt = 1; in otx2_nix_config_bp()
1701 req->bpid_per_chan = 0; in otx2_nix_config_bp()
1704 return otx2_sync_mbox_msg(&pfvf->mbox); in otx2_nix_config_bp()
1715 pfvf->hw.cgx_rx_stats[id] = rsp->rx_stats[id]; in mbox_handler_cgx_stats()
1717 pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id]; in mbox_handler_cgx_stats()
1723 pfvf->hw.cgx_fec_corr_blks += rsp->fec_corr_blks; in mbox_handler_cgx_fec_stats()
1724 pfvf->hw.cgx_fec_uncorr_blks += rsp->fec_uncorr_blks; in mbox_handler_cgx_fec_stats()
1730 pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs; in mbox_handler_npa_lf_alloc()
1731 pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes; in mbox_handler_npa_lf_alloc()
1738 pfvf->hw.sqb_size = rsp->sqb_size; in mbox_handler_nix_lf_alloc()
1739 pfvf->hw.rx_chan_base = rsp->rx_chan_base; in mbox_handler_nix_lf_alloc()
1740 pfvf->hw.tx_chan_base = rsp->tx_chan_base; in mbox_handler_nix_lf_alloc()
1741 pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx; in mbox_handler_nix_lf_alloc()
1742 pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx; in mbox_handler_nix_lf_alloc()
1743 pfvf->hw.cgx_links = rsp->cgx_links; in mbox_handler_nix_lf_alloc()
1744 pfvf->hw.lbk_links = rsp->lbk_links; in mbox_handler_nix_lf_alloc()
1745 pfvf->hw.tx_link = rsp->tx_link; in mbox_handler_nix_lf_alloc()
1752 pfvf->hw.npa_msixoff = rsp->npa_msixoff; in mbox_handler_msix_offset()
1753 pfvf->hw.nix_msixoff = rsp->nix_msixoff; in mbox_handler_msix_offset()
1762 for (chan = 0; chan < rsp->chan_cnt; chan++) { in mbox_handler_nix_bp_enable()
1763 chan_id = ((rsp->chan_bpid[chan] >> 10) & 0x7F); in mbox_handler_nix_bp_enable()
1764 pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF; in mbox_handler_nix_bp_enable()
1771 struct otx2_qset *qset = &pfvf->qset; in otx2_free_cints()
1772 struct otx2_hw *hw = &pfvf->hw; in otx2_free_cints()
1775 for (qidx = 0, irq = hw->nix_msixoff + NIX_LF_CINT_VEC_START; in otx2_free_cints()
1778 int vector = pci_irq_vector(pfvf->pdev, irq); in otx2_free_cints()
1781 free_cpumask_var(hw->affinity_mask[irq]); in otx2_free_cints()
1782 free_irq(vector, &qset->napi[qidx]); in otx2_free_cints()
1788 struct otx2_hw *hw = &pfvf->hw; in otx2_set_cints_affinity()
1791 vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START; in otx2_set_cints_affinity()
1795 for (cint = 0; cint < pfvf->hw.cint_cnt; cint++, vec++) { in otx2_set_cints_affinity()
1796 if (!alloc_cpumask_var(&hw->affinity_mask[vec], GFP_KERNEL)) in otx2_set_cints_affinity()
1799 cpumask_set_cpu(cpu, hw->affinity_mask[vec]); in otx2_set_cints_affinity()
1801 irq = pci_irq_vector(pfvf->pdev, vec); in otx2_set_cints_affinity()
1802 irq_set_affinity_hint(irq, hw->affinity_mask[vec]); in otx2_set_cints_affinity()
1812 if (is_otx2_lbkvf(pfvf->pdev)) { in get_dwrr_mtu()
1813 pfvf->hw.smq_link_type = SMQ_LINK_TYPE_LBK; in get_dwrr_mtu()
1814 return hw->lbk_dwrr_mtu; in get_dwrr_mtu()
1817 pfvf->hw.smq_link_type = SMQ_LINK_TYPE_RPM; in get_dwrr_mtu()
1818 return hw->rpm_dwrr_mtu; in get_dwrr_mtu()
1828 mutex_lock(&pfvf->mbox.lock); in otx2_get_max_mtu()
1830 req = otx2_mbox_alloc_msg_nix_get_hw_info(&pfvf->mbox); in otx2_get_max_mtu()
1832 rc = -ENOMEM; in otx2_get_max_mtu()
1836 rc = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_get_max_mtu()
1839 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); in otx2_get_max_mtu()
1847 max_mtu = rsp->max_mtu - 8 - OTX2_ETH_HLEN; in otx2_get_max_mtu()
1850 pfvf->hw.dwrr_mtu = get_dwrr_mtu(pfvf, rsp); in otx2_get_max_mtu()
1851 if (!pfvf->hw.dwrr_mtu) in otx2_get_max_mtu()
1852 pfvf->hw.dwrr_mtu = 1; in otx2_get_max_mtu()
1856 mutex_unlock(&pfvf->mbox.lock); in otx2_get_max_mtu()
1858 dev_warn(pfvf->dev, in otx2_get_max_mtu()
1868 netdev_features_t changed = features ^ netdev->features; in otx2_handle_ntuple_tc_features()
1877 if (!pfvf->flow_cfg->max_flows) { in otx2_handle_ntuple_tc_features()
1880 return -EINVAL; in otx2_handle_ntuple_tc_features()
1887 return -EBUSY; in otx2_handle_ntuple_tc_features()
1894 return -EINVAL; in otx2_handle_ntuple_tc_features()