Lines Matching +full:queue +full:- +full:rx
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
8 * This file contains HFI1 support for netdev RX functionality
20 static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_rx *rx, in hfi1_netdev_setup_ctxt() argument
24 struct hfi1_devdata *dd = rx->dd; in hfi1_netdev_setup_ctxt()
27 uctxt->rhf_rcv_function_map = netdev_rhf_rcv_functions; in hfi1_netdev_setup_ctxt()
28 uctxt->do_interrupt = &handle_receive_interrupt_napi_sp; in hfi1_netdev_setup_ctxt()
30 /* Now allocate the RcvHdr queue and eager buffers. */ in hfi1_netdev_setup_ctxt()
44 if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR)) in hfi1_netdev_setup_ctxt()
46 if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL)) in hfi1_netdev_setup_ctxt()
48 if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL)) in hfi1_netdev_setup_ctxt()
50 if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL)) in hfi1_netdev_setup_ctxt()
53 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt); in hfi1_netdev_setup_ctxt()
64 if (dd->flags & HFI1_FROZEN) in hfi1_netdev_allocate_ctxt()
65 return -EIO; in hfi1_netdev_allocate_ctxt()
67 ret = hfi1_create_ctxtdata(dd->pport, dd->node, &uctxt); in hfi1_netdev_allocate_ctxt()
70 return -ENOMEM; in hfi1_netdev_allocate_ctxt()
73 uctxt->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) | in hfi1_netdev_allocate_ctxt()
78 uctxt->fast_handler = handle_receive_interrupt_napi_fp; in hfi1_netdev_allocate_ctxt()
79 uctxt->slow_handler = handle_receive_interrupt_napi_sp; in hfi1_netdev_allocate_ctxt()
81 uctxt->is_vnic = true; in hfi1_netdev_allocate_ctxt()
85 dd_dev_info(dd, "created netdev context %d\n", uctxt->ctxt); in hfi1_netdev_allocate_ctxt()
107 if (uctxt->msix_intr != CCE_NUM_MSIX_VECTORS) in hfi1_netdev_deallocate_ctxt()
108 msix_free_irq(dd, uctxt->msix_intr); in hfi1_netdev_deallocate_ctxt()
110 uctxt->msix_intr = CCE_NUM_MSIX_VECTORS; in hfi1_netdev_deallocate_ctxt()
111 uctxt->event_flags = 0; in hfi1_netdev_deallocate_ctxt()
116 hfi1_stats.sps_ctxts--; in hfi1_netdev_deallocate_ctxt()
121 static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_rx *rx, in hfi1_netdev_allot_ctxt() argument
125 struct hfi1_devdata *dd = rx->dd; in hfi1_netdev_allot_ctxt()
133 rc = hfi1_netdev_setup_ctxt(rx, *ctxt); in hfi1_netdev_allot_ctxt()
144 * hfi1_num_netdev_contexts - Count of netdev recv contexts to use.
176 cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node)); in hfi1_num_netdev_contexts()
186 static int hfi1_netdev_rxq_init(struct hfi1_netdev_rx *rx) in hfi1_netdev_rxq_init() argument
190 struct hfi1_devdata *dd = rx->dd; in hfi1_netdev_rxq_init()
191 struct net_device *dev = rx->rx_napi; in hfi1_netdev_rxq_init()
193 rx->num_rx_q = dd->num_netdev_contexts; in hfi1_netdev_rxq_init()
194 rx->rxq = kcalloc_node(rx->num_rx_q, sizeof(*rx->rxq), in hfi1_netdev_rxq_init()
195 GFP_KERNEL, dd->node); in hfi1_netdev_rxq_init()
197 if (!rx->rxq) { in hfi1_netdev_rxq_init()
198 dd_dev_err(dd, "Unable to allocate netdev queue data\n"); in hfi1_netdev_rxq_init()
199 return (-ENOMEM); in hfi1_netdev_rxq_init()
202 for (i = 0; i < rx->num_rx_q; i++) { in hfi1_netdev_rxq_init()
203 struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; in hfi1_netdev_rxq_init()
205 rc = hfi1_netdev_allot_ctxt(rx, &rxq->rcd); in hfi1_netdev_rxq_init()
209 hfi1_rcd_get(rxq->rcd); in hfi1_netdev_rxq_init()
210 rxq->rx = rx; in hfi1_netdev_rxq_init()
211 rxq->rcd->napi = &rxq->napi; in hfi1_netdev_rxq_init()
212 dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n", in hfi1_netdev_rxq_init()
213 i, rxq->rcd->ctxt); in hfi1_netdev_rxq_init()
218 set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state); in hfi1_netdev_rxq_init()
219 netif_napi_add(dev, &rxq->napi, hfi1_netdev_rx_napi); in hfi1_netdev_rxq_init()
220 rc = msix_netdev_request_rcd_irq(rxq->rcd); in hfi1_netdev_rxq_init()
229 for (; i >= 0; i--) { in hfi1_netdev_rxq_init()
230 struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; in hfi1_netdev_rxq_init()
232 if (rxq->rcd) { in hfi1_netdev_rxq_init()
233 hfi1_netdev_deallocate_ctxt(dd, rxq->rcd); in hfi1_netdev_rxq_init()
234 hfi1_rcd_put(rxq->rcd); in hfi1_netdev_rxq_init()
235 rxq->rcd = NULL; in hfi1_netdev_rxq_init()
238 kfree(rx->rxq); in hfi1_netdev_rxq_init()
239 rx->rxq = NULL; in hfi1_netdev_rxq_init()
244 static void hfi1_netdev_rxq_deinit(struct hfi1_netdev_rx *rx) in hfi1_netdev_rxq_deinit() argument
247 struct hfi1_devdata *dd = rx->dd; in hfi1_netdev_rxq_deinit()
249 for (i = 0; i < rx->num_rx_q; i++) { in hfi1_netdev_rxq_deinit()
250 struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; in hfi1_netdev_rxq_deinit()
252 netif_napi_del(&rxq->napi); in hfi1_netdev_rxq_deinit()
253 hfi1_netdev_deallocate_ctxt(dd, rxq->rcd); in hfi1_netdev_rxq_deinit()
254 hfi1_rcd_put(rxq->rcd); in hfi1_netdev_rxq_deinit()
255 rxq->rcd = NULL; in hfi1_netdev_rxq_deinit()
258 kfree(rx->rxq); in hfi1_netdev_rxq_deinit()
259 rx->rxq = NULL; in hfi1_netdev_rxq_deinit()
260 rx->num_rx_q = 0; in hfi1_netdev_rxq_deinit()
263 static void enable_queues(struct hfi1_netdev_rx *rx) in enable_queues() argument
267 for (i = 0; i < rx->num_rx_q; i++) { in enable_queues()
268 struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; in enable_queues()
270 dd_dev_info(rx->dd, "enabling queue %d on context %d\n", i, in enable_queues()
271 rxq->rcd->ctxt); in enable_queues()
272 napi_enable(&rxq->napi); in enable_queues()
273 hfi1_rcvctrl(rx->dd, in enable_queues()
275 rxq->rcd); in enable_queues()
279 static void disable_queues(struct hfi1_netdev_rx *rx) in disable_queues() argument
283 msix_netdev_synchronize_irq(rx->dd); in disable_queues()
285 for (i = 0; i < rx->num_rx_q; i++) { in disable_queues()
286 struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; in disable_queues()
288 dd_dev_info(rx->dd, "disabling queue %d on context %d\n", i, in disable_queues()
289 rxq->rcd->ctxt); in disable_queues()
292 hfi1_rcvctrl(rx->dd, in disable_queues()
294 rxq->rcd); in disable_queues()
295 napi_synchronize(&rxq->napi); in disable_queues()
296 napi_disable(&rxq->napi); in disable_queues()
301 * hfi1_netdev_rx_init - Incrememnts netdevs counter. When called first time,
302 * it allocates receive queue data and calls netif_napi_add
303 * for each queue.
309 struct hfi1_netdev_rx *rx = dd->netdev_rx; in hfi1_netdev_rx_init() local
312 if (atomic_fetch_inc(&rx->netdevs)) in hfi1_netdev_rx_init()
316 res = hfi1_netdev_rxq_init(rx); in hfi1_netdev_rx_init()
322 * hfi1_netdev_rx_destroy - Decrements netdevs counter, when it reaches 0
329 struct hfi1_netdev_rx *rx = dd->netdev_rx; in hfi1_netdev_rx_destroy() local
331 /* destroy the RX queues only if it is the last netdev going away */ in hfi1_netdev_rx_destroy()
332 if (atomic_fetch_add_unless(&rx->netdevs, -1, 0) == 1) { in hfi1_netdev_rx_destroy()
334 hfi1_netdev_rxq_deinit(rx); in hfi1_netdev_rx_destroy()
342 * hfi1_alloc_rx - Allocates the rx support structure
345 * Allocate the rx structure to support gathering the receive
350 * Return: 0 (success) -error on failure
355 struct hfi1_netdev_rx *rx; in hfi1_alloc_rx() local
357 dd_dev_info(dd, "allocating rx size %ld\n", sizeof(*rx)); in hfi1_alloc_rx()
358 rx = kzalloc_node(sizeof(*rx), GFP_KERNEL, dd->node); in hfi1_alloc_rx()
360 if (!rx) in hfi1_alloc_rx()
361 return -ENOMEM; in hfi1_alloc_rx()
362 rx->dd = dd; in hfi1_alloc_rx()
363 rx->rx_napi = alloc_netdev_dummy(0); in hfi1_alloc_rx()
364 if (!rx->rx_napi) { in hfi1_alloc_rx()
365 kfree(rx); in hfi1_alloc_rx()
366 return -ENOMEM; in hfi1_alloc_rx()
369 xa_init(&rx->dev_tbl); in hfi1_alloc_rx()
370 atomic_set(&rx->enabled, 0); in hfi1_alloc_rx()
371 atomic_set(&rx->netdevs, 0); in hfi1_alloc_rx()
372 dd->netdev_rx = rx; in hfi1_alloc_rx()
379 if (dd->netdev_rx) { in hfi1_free_rx()
380 dd_dev_info(dd, "hfi1 rx freed\n"); in hfi1_free_rx()
381 free_netdev(dd->netdev_rx->rx_napi); in hfi1_free_rx()
382 kfree(dd->netdev_rx); in hfi1_free_rx()
383 dd->netdev_rx = NULL; in hfi1_free_rx()
388 * hfi1_netdev_enable_queues - This is napi enable function.
392 * calls napi_disable for every queue.
398 struct hfi1_netdev_rx *rx; in hfi1_netdev_enable_queues() local
400 if (!dd->netdev_rx) in hfi1_netdev_enable_queues()
403 rx = dd->netdev_rx; in hfi1_netdev_enable_queues()
404 if (atomic_fetch_inc(&rx->enabled)) in hfi1_netdev_enable_queues()
408 enable_queues(rx); in hfi1_netdev_enable_queues()
414 struct hfi1_netdev_rx *rx; in hfi1_netdev_disable_queues() local
416 if (!dd->netdev_rx) in hfi1_netdev_disable_queues()
419 rx = dd->netdev_rx; in hfi1_netdev_disable_queues()
420 if (atomic_dec_if_positive(&rx->enabled)) in hfi1_netdev_disable_queues()
424 disable_queues(rx); in hfi1_netdev_disable_queues()
429 * hfi1_netdev_add_data - Registers data with unique identifier
440 struct hfi1_netdev_rx *rx = dd->netdev_rx; in hfi1_netdev_add_data() local
442 return xa_insert(&rx->dev_tbl, id, data, GFP_NOWAIT); in hfi1_netdev_add_data()
446 * hfi1_netdev_remove_data - Removes data with previously given id.
454 struct hfi1_netdev_rx *rx = dd->netdev_rx; in hfi1_netdev_remove_data() local
456 return xa_erase(&rx->dev_tbl, id); in hfi1_netdev_remove_data()
460 * hfi1_netdev_get_data - Gets data with given id
467 struct hfi1_netdev_rx *rx = dd->netdev_rx; in hfi1_netdev_get_data() local
469 return xa_load(&rx->dev_tbl, id); in hfi1_netdev_get_data()
473 * hfi1_netdev_get_first_data - Gets first entry with greater or equal id.
480 struct hfi1_netdev_rx *rx = dd->netdev_rx; in hfi1_netdev_get_first_data() local
484 ret = xa_find(&rx->dev_tbl, &index, UINT_MAX, XA_PRESENT); in hfi1_netdev_get_first_data()