Lines Matching refs:vsi

25 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)  in ice_qp_reset_stats()  argument
30 pf = vsi->back; in ice_qp_reset_stats()
34 vsi_stat = pf->vsi_stats[vsi->idx]; in ice_qp_reset_stats()
42 if (vsi->xdp_rings) in ice_qp_reset_stats()
43 memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0, in ice_qp_reset_stats()
44 sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats)); in ice_qp_reset_stats()
52 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) in ice_qp_clean_rings() argument
54 ice_clean_tx_ring(vsi->tx_rings[q_idx]); in ice_qp_clean_rings()
55 if (vsi->xdp_rings) in ice_qp_clean_rings()
56 ice_clean_tx_ring(vsi->xdp_rings[q_idx]); in ice_qp_clean_rings()
57 ice_clean_rx_ring(vsi->rx_rings[q_idx]); in ice_qp_clean_rings()
67 ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector, in ice_qvec_toggle_napi() argument
70 if (!vsi->netdev || !q_vector) in ice_qvec_toggle_napi()
86 ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring, in ice_qvec_dis_irq() argument
89 struct ice_pf *pf = vsi->back; in ice_qvec_dis_irq()
116 ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid) in ice_qvec_cfg_msix() argument
119 struct ice_pf *pf = vsi->back; in ice_qvec_cfg_msix()
126 ice_cfg_txq_interrupt(vsi, _qid, reg_idx, q_vector->tx.itr_idx); in ice_qvec_cfg_msix()
133 ice_cfg_rxq_interrupt(vsi, _qid, reg_idx, q_vector->rx.itr_idx); in ice_qvec_cfg_msix()
145 static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector) in ice_qvec_ena_irq() argument
147 struct ice_pf *pf = vsi->back; in ice_qvec_ena_irq()
150 ice_irq_dynamic_ena(hw, vsi, q_vector); in ice_qvec_ena_irq()
162 static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) in ice_qp_dis() argument
171 if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq) in ice_qp_dis()
174 tx_ring = vsi->tx_rings[q_idx]; in ice_qp_dis()
175 rx_ring = vsi->rx_rings[q_idx]; in ice_qp_dis()
179 netif_carrier_off(vsi->netdev); in ice_qp_dis()
180 netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); in ice_qp_dis()
182 ice_qvec_dis_irq(vsi, rx_ring, q_vector); in ice_qp_dis()
183 ice_qvec_toggle_napi(vsi, q_vector, false); in ice_qp_dis()
185 ice_fill_txq_meta(vsi, tx_ring, &txq_meta); in ice_qp_dis()
186 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta); in ice_qp_dis()
189 if (vsi->xdp_rings) { in ice_qp_dis()
190 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; in ice_qp_dis()
193 ice_fill_txq_meta(vsi, xdp_ring, &txq_meta); in ice_qp_dis()
194 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring, in ice_qp_dis()
200 ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false); in ice_qp_dis()
201 ice_qp_clean_rings(vsi, q_idx); in ice_qp_dis()
202 ice_qp_reset_stats(vsi, q_idx); in ice_qp_dis()
214 static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) in ice_qp_ena() argument
221 err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx); in ice_qp_ena()
225 if (ice_is_xdp_ena_vsi(vsi)) { in ice_qp_ena()
226 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; in ice_qp_ena()
228 err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx); in ice_qp_ena()
232 ice_tx_xsk_pool(vsi, q_idx); in ice_qp_ena()
235 err = ice_vsi_cfg_single_rxq(vsi, q_idx); in ice_qp_ena()
239 q_vector = vsi->rx_rings[q_idx]->q_vector; in ice_qp_ena()
240 ice_qvec_cfg_msix(vsi, q_vector, q_idx); in ice_qp_ena()
242 err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true); in ice_qp_ena()
246 ice_qvec_toggle_napi(vsi, q_vector, true); in ice_qp_ena()
247 ice_qvec_ena_irq(vsi, q_vector); in ice_qp_ena()
251 ice_get_link_status(vsi->port_info, &link_up); in ice_qp_ena()
253 netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); in ice_qp_ena()
254 netif_carrier_on(vsi->netdev); in ice_qp_ena()
267 static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid) in ice_xsk_pool_disable() argument
269 struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid); in ice_xsk_pool_disable()
288 ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) in ice_xsk_pool_enable() argument
292 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) in ice_xsk_pool_enable()
295 if (qid >= vsi->netdev->real_num_rx_queues || in ice_xsk_pool_enable()
296 qid >= vsi->netdev->real_num_tx_queues) in ice_xsk_pool_enable()
299 err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back), in ice_xsk_pool_enable()
348 int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc) in ice_realloc_zc_buf() argument
353 ice_for_each_rxq(vsi, i) { in ice_realloc_zc_buf()
354 rx_ring = vsi->rx_rings[i]; in ice_realloc_zc_buf()
373 int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) in ice_xsk_pool_setup() argument
378 if (qid >= vsi->num_rxq || qid >= vsi->num_txq) { in ice_xsk_pool_setup()
379 netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n"); in ice_xsk_pool_setup()
384 if_running = !test_bit(ICE_VSI_DOWN, vsi->state) && in ice_xsk_pool_setup()
385 ice_is_xdp_ena_vsi(vsi); in ice_xsk_pool_setup()
388 struct ice_rx_ring *rx_ring = vsi->rx_rings[qid]; in ice_xsk_pool_setup()
390 ret = ice_qp_dis(vsi, qid); in ice_xsk_pool_setup()
392 netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret); in ice_xsk_pool_setup()
401 pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) : in ice_xsk_pool_setup()
402 ice_xsk_pool_disable(vsi, qid); in ice_xsk_pool_setup()
406 ret = ice_qp_ena(vsi, qid); in ice_xsk_pool_setup()
408 napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi); in ice_xsk_pool_setup()
410 netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret); in ice_xsk_pool_setup()
415 netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n", in ice_xsk_pool_setup()
1076 if (!netif_carrier_ok(xdp_ring->vsi->netdev) || in ice_xmit_zc()
1077 !netif_running(xdp_ring->vsi->netdev)) in ice_xmit_zc()
1121 struct ice_vsi *vsi = np->vsi; in ice_xsk_wakeup() local
1124 if (test_bit(ICE_VSI_DOWN, vsi->state) || !netif_carrier_ok(netdev)) in ice_xsk_wakeup()
1127 if (!ice_is_xdp_ena_vsi(vsi)) in ice_xsk_wakeup()
1130 if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq) in ice_xsk_wakeup()
1133 ring = vsi->rx_rings[queue_id]->xdp_ring; in ice_xsk_wakeup()
1146 ice_trigger_sw_intr(&vsi->back->hw, q_vector); in ice_xsk_wakeup()
1157 bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi) in ice_xsk_any_rx_ring_ena() argument
1161 ice_for_each_rxq(vsi, i) { in ice_xsk_any_rx_ring_ena()
1162 if (xsk_get_pool_from_qid(vsi->netdev, i)) in ice_xsk_any_rx_ring_ena()