Lines Matching +full:num +full:- +full:tx +full:- +full:queues
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
56 if (txqueue >= adapter->num_io_queues) { in ena_tx_timeout()
57 netdev_err(dev, "TX timeout on invalid queue %u\n", txqueue); in ena_tx_timeout()
61 threshold = jiffies_to_usecs(dev->watchdog_timeo); in ena_tx_timeout()
62 tx_ring = &adapter->tx_ring[txqueue]; in ena_tx_timeout()
64 time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies); in ena_tx_timeout()
65 napi_scheduled = !!(tx_ring->napi->state & NAPIF_STATE_SCHED); in ena_tx_timeout()
68 …"TX q %d is paused for too long (threshold %u). Time since last napi %u usec. napi scheduled: %d\n… in ena_tx_timeout()
83 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) in ena_tx_timeout()
87 ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp); in ena_tx_timeout()
94 for (i = 0; i < adapter->num_io_queues; i++) in update_rx_ring_mtu()
95 adapter->rx_ring[i].mtu = mtu; in update_rx_ring_mtu()
103 ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); in ena_change_mtu()
107 WRITE_ONCE(dev->mtu, new_mtu); in ena_change_mtu()
125 if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq, in ena_xmit_common()
127 netif_dbg(adapter, tx_queued, adapter->netdev, in ena_xmit_common()
128 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n", in ena_xmit_common()
129 ring->qid); in ena_xmit_common()
134 rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx, in ena_xmit_common()
142 netif_err(adapter, tx_queued, adapter->netdev, in ena_xmit_common()
143 "Failed to prepare tx bufs\n"); in ena_xmit_common()
144 ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, &ring->syncp); in ena_xmit_common()
145 if (rc != -ENOMEM) in ena_xmit_common()
150 u64_stats_update_begin(&ring->syncp); in ena_xmit_common()
151 ring->tx_stats.cnt++; in ena_xmit_common()
152 ring->tx_stats.bytes += bytes; in ena_xmit_common()
153 u64_stats_update_end(&ring->syncp); in ena_xmit_common()
155 tx_info->tx_descs = nb_hw_desc; in ena_xmit_common()
156 tx_info->total_tx_size = bytes; in ena_xmit_common()
157 tx_info->last_jiffies = jiffies; in ena_xmit_common()
158 tx_info->print_once = 0; in ena_xmit_common()
160 ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, in ena_xmit_common()
161 ring->ring_size); in ena_xmit_common()
171 adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues); in ena_init_rx_cpu_rmap()
172 if (!adapter->netdev->rx_cpu_rmap) in ena_init_rx_cpu_rmap()
173 return -ENOMEM; in ena_init_rx_cpu_rmap()
174 for (i = 0; i < adapter->num_io_queues; i++) { in ena_init_rx_cpu_rmap()
177 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap, in ena_init_rx_cpu_rmap()
178 pci_irq_vector(adapter->pdev, irq_idx)); in ena_init_rx_cpu_rmap()
180 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); in ena_init_rx_cpu_rmap()
181 adapter->netdev->rx_cpu_rmap = NULL; in ena_init_rx_cpu_rmap()
192 ring->qid = qid; in ena_init_io_rings_common()
193 ring->pdev = adapter->pdev; in ena_init_io_rings_common()
194 ring->dev = &adapter->pdev->dev; in ena_init_io_rings_common()
195 ring->netdev = adapter->netdev; in ena_init_io_rings_common()
196 ring->napi = &adapter->ena_napi[qid].napi; in ena_init_io_rings_common()
197 ring->adapter = adapter; in ena_init_io_rings_common()
198 ring->ena_dev = adapter->ena_dev; in ena_init_io_rings_common()
199 ring->per_napi_packets = 0; in ena_init_io_rings_common()
200 ring->cpu = 0; in ena_init_io_rings_common()
201 ring->numa_node = 0; in ena_init_io_rings_common()
202 ring->no_interrupt_event_cnt = 0; in ena_init_io_rings_common()
203 u64_stats_init(&ring->syncp); in ena_init_io_rings_common()
213 ena_dev = adapter->ena_dev; in ena_init_io_rings()
216 txr = &adapter->tx_ring[i]; in ena_init_io_rings()
217 rxr = &adapter->rx_ring[i]; in ena_init_io_rings()
219 /* TX common ring state */ in ena_init_io_rings()
222 /* TX specific ring state */ in ena_init_io_rings()
223 txr->ring_size = adapter->requested_tx_ring_size; in ena_init_io_rings()
224 txr->tx_max_header_size = ena_dev->tx_max_header_size; in ena_init_io_rings()
225 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; in ena_init_io_rings()
226 txr->sgl_size = adapter->max_tx_sgl_size; in ena_init_io_rings()
227 txr->smoothed_interval = in ena_init_io_rings()
229 txr->disable_meta_caching = adapter->disable_meta_caching; in ena_init_io_rings()
230 spin_lock_init(&txr->xdp_tx_lock); in ena_init_io_rings()
232 /* Don't init RX queues for xdp queues */ in ena_init_io_rings()
238 rxr->ring_size = adapter->requested_rx_ring_size; in ena_init_io_rings()
239 rxr->rx_copybreak = adapter->rx_copybreak; in ena_init_io_rings()
240 rxr->sgl_size = adapter->max_rx_sgl_size; in ena_init_io_rings()
241 rxr->smoothed_interval = in ena_init_io_rings()
243 rxr->empty_rx_queue = 0; in ena_init_io_rings()
244 rxr->rx_headroom = NET_SKB_PAD; in ena_init_io_rings()
245 adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in ena_init_io_rings()
246 rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues]; in ena_init_io_rings()
251 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
259 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_setup_tx_resources()
260 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; in ena_setup_tx_resources()
263 if (tx_ring->tx_buffer_info) { in ena_setup_tx_resources()
265 adapter->netdev, "tx_buffer_info info is not NULL"); in ena_setup_tx_resources()
266 return -EEXIST; in ena_setup_tx_resources()
269 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; in ena_setup_tx_resources()
270 node = cpu_to_node(ena_irq->cpu); in ena_setup_tx_resources()
272 tx_ring->tx_buffer_info = vzalloc_node(size, node); in ena_setup_tx_resources()
273 if (!tx_ring->tx_buffer_info) { in ena_setup_tx_resources()
274 tx_ring->tx_buffer_info = vzalloc(size); in ena_setup_tx_resources()
275 if (!tx_ring->tx_buffer_info) in ena_setup_tx_resources()
279 size = sizeof(u16) * tx_ring->ring_size; in ena_setup_tx_resources()
280 tx_ring->free_ids = vzalloc_node(size, node); in ena_setup_tx_resources()
281 if (!tx_ring->free_ids) { in ena_setup_tx_resources()
282 tx_ring->free_ids = vzalloc(size); in ena_setup_tx_resources()
283 if (!tx_ring->free_ids) in ena_setup_tx_resources()
287 size = tx_ring->tx_max_header_size; in ena_setup_tx_resources()
288 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node); in ena_setup_tx_resources()
289 if (!tx_ring->push_buf_intermediate_buf) { in ena_setup_tx_resources()
290 tx_ring->push_buf_intermediate_buf = vzalloc(size); in ena_setup_tx_resources()
291 if (!tx_ring->push_buf_intermediate_buf) in ena_setup_tx_resources()
295 /* Req id ring for TX out of order completions */ in ena_setup_tx_resources()
296 for (i = 0; i < tx_ring->ring_size; i++) in ena_setup_tx_resources()
297 tx_ring->free_ids[i] = i; in ena_setup_tx_resources()
299 /* Reset tx statistics */ in ena_setup_tx_resources()
300 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats)); in ena_setup_tx_resources()
302 tx_ring->next_to_use = 0; in ena_setup_tx_resources()
303 tx_ring->next_to_clean = 0; in ena_setup_tx_resources()
304 tx_ring->cpu = ena_irq->cpu; in ena_setup_tx_resources()
305 tx_ring->numa_node = node; in ena_setup_tx_resources()
309 vfree(tx_ring->free_ids); in ena_setup_tx_resources()
310 tx_ring->free_ids = NULL; in ena_setup_tx_resources()
312 vfree(tx_ring->tx_buffer_info); in ena_setup_tx_resources()
313 tx_ring->tx_buffer_info = NULL; in ena_setup_tx_resources()
315 return -ENOMEM; in ena_setup_tx_resources()
318 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
326 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_free_tx_resources()
328 vfree(tx_ring->tx_buffer_info); in ena_free_tx_resources()
329 tx_ring->tx_buffer_info = NULL; in ena_free_tx_resources()
331 vfree(tx_ring->free_ids); in ena_free_tx_resources()
332 tx_ring->free_ids = NULL; in ena_free_tx_resources()
334 vfree(tx_ring->push_buf_intermediate_buf); in ena_free_tx_resources()
335 tx_ring->push_buf_intermediate_buf = NULL; in ena_free_tx_resources()
353 netif_err(adapter, ifup, adapter->netdev, in ena_setup_tx_resources_in_range()
354 "Tx queue %d: allocation failed\n", i); in ena_setup_tx_resources_in_range()
357 while (first_index < i--) in ena_setup_tx_resources_in_range()
371 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
380 adapter->xdp_num_queues + in ena_free_all_io_tx_resources()
381 adapter->num_io_queues); in ena_free_all_io_tx_resources()
384 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
393 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_setup_rx_resources()
394 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; in ena_setup_rx_resources()
397 if (rx_ring->rx_buffer_info) { in ena_setup_rx_resources()
398 netif_err(adapter, ifup, adapter->netdev, in ena_setup_rx_resources()
400 return -EEXIST; in ena_setup_rx_resources()
406 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1); in ena_setup_rx_resources()
407 node = cpu_to_node(ena_irq->cpu); in ena_setup_rx_resources()
409 rx_ring->rx_buffer_info = vzalloc_node(size, node); in ena_setup_rx_resources()
410 if (!rx_ring->rx_buffer_info) { in ena_setup_rx_resources()
411 rx_ring->rx_buffer_info = vzalloc(size); in ena_setup_rx_resources()
412 if (!rx_ring->rx_buffer_info) in ena_setup_rx_resources()
413 return -ENOMEM; in ena_setup_rx_resources()
416 size = sizeof(u16) * rx_ring->ring_size; in ena_setup_rx_resources()
417 rx_ring->free_ids = vzalloc_node(size, node); in ena_setup_rx_resources()
418 if (!rx_ring->free_ids) { in ena_setup_rx_resources()
419 rx_ring->free_ids = vzalloc(size); in ena_setup_rx_resources()
420 if (!rx_ring->free_ids) { in ena_setup_rx_resources()
421 vfree(rx_ring->rx_buffer_info); in ena_setup_rx_resources()
422 rx_ring->rx_buffer_info = NULL; in ena_setup_rx_resources()
423 return -ENOMEM; in ena_setup_rx_resources()
428 for (i = 0; i < rx_ring->ring_size; i++) in ena_setup_rx_resources()
429 rx_ring->free_ids[i] = i; in ena_setup_rx_resources()
432 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); in ena_setup_rx_resources()
434 rx_ring->next_to_clean = 0; in ena_setup_rx_resources()
435 rx_ring->next_to_use = 0; in ena_setup_rx_resources()
436 rx_ring->cpu = ena_irq->cpu; in ena_setup_rx_resources()
437 rx_ring->numa_node = node; in ena_setup_rx_resources()
442 /* ena_free_rx_resources - Free I/O Rx Resources
451 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_free_rx_resources()
453 vfree(rx_ring->rx_buffer_info); in ena_free_rx_resources()
454 rx_ring->rx_buffer_info = NULL; in ena_free_rx_resources()
456 vfree(rx_ring->free_ids); in ena_free_rx_resources()
457 rx_ring->free_ids = NULL; in ena_free_rx_resources()
460 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
469 for (i = 0; i < adapter->num_io_queues; i++) { in ena_setup_all_rx_resources()
479 netif_err(adapter, ifup, adapter->netdev, in ena_setup_all_rx_resources()
483 while (i--) in ena_setup_all_rx_resources()
488 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
497 for (i = 0; i < adapter->num_io_queues; i++) in ena_free_all_io_rx_resources()
511 ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, &rx_ring->syncp); in ena_alloc_map_page()
512 return ERR_PTR(-ENOSPC); in ena_alloc_map_page()
515 /* To enable NIC-side port-mirroring, AKA SPAN port, in ena_alloc_map_page()
518 *dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, in ena_alloc_map_page()
520 if (unlikely(dma_mapping_error(rx_ring->dev, *dma))) { in ena_alloc_map_page()
521 ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1, in ena_alloc_map_page()
522 &rx_ring->syncp); in ena_alloc_map_page()
524 return ERR_PTR(-EIO); in ena_alloc_map_page()
533 int headroom = rx_ring->rx_headroom; in ena_alloc_rx_buffer()
540 rx_info->buf_offset = headroom; in ena_alloc_rx_buffer()
543 if (unlikely(rx_info->page)) in ena_alloc_rx_buffer()
551 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_alloc_rx_buffer()
556 rx_info->page = page; in ena_alloc_rx_buffer()
557 rx_info->dma_addr = dma; in ena_alloc_rx_buffer()
558 rx_info->page_offset = 0; in ena_alloc_rx_buffer()
559 ena_buf = &rx_info->ena_buf; in ena_alloc_rx_buffer()
560 ena_buf->paddr = dma + headroom; in ena_alloc_rx_buffer()
561 ena_buf->len = ENA_PAGE_SIZE - headroom - tailroom; in ena_alloc_rx_buffer()
570 dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE, DMA_BIDIRECTIONAL, in ena_unmap_rx_buff_attrs()
577 struct page *page = rx_info->page; in ena_free_rx_page()
580 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_free_rx_page()
588 rx_info->page = NULL; in ena_free_rx_page()
591 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) in ena_refill_rx_bufs() argument
597 next_to_use = rx_ring->next_to_use; in ena_refill_rx_bufs()
599 for (i = 0; i < num; i++) { in ena_refill_rx_bufs()
602 req_id = rx_ring->free_ids[next_to_use]; in ena_refill_rx_bufs()
604 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_refill_rx_bufs()
608 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_refill_rx_bufs()
610 rx_ring->qid); in ena_refill_rx_bufs()
613 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, in ena_refill_rx_bufs()
614 &rx_info->ena_buf, in ena_refill_rx_bufs()
617 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_refill_rx_bufs()
619 rx_ring->qid); in ena_refill_rx_bufs()
623 rx_ring->ring_size); in ena_refill_rx_bufs()
626 if (unlikely(i < num)) { in ena_refill_rx_bufs()
627 ena_increase_stat(&rx_ring->rx_stats.refil_partial, 1, in ena_refill_rx_bufs()
628 &rx_ring->syncp); in ena_refill_rx_bufs()
629 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_refill_rx_bufs()
631 rx_ring->qid, i, num); in ena_refill_rx_bufs()
636 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); in ena_refill_rx_bufs()
638 rx_ring->next_to_use = next_to_use; in ena_refill_rx_bufs()
646 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_free_rx_bufs()
649 for (i = 0; i < rx_ring->ring_size; i++) { in ena_free_rx_bufs()
650 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; in ena_free_rx_bufs()
652 if (rx_info->page) in ena_free_rx_bufs()
657 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
665 for (i = 0; i < adapter->num_io_queues; i++) { in ena_refill_all_rx_bufs()
666 rx_ring = &adapter->rx_ring[i]; in ena_refill_all_rx_bufs()
667 bufs_num = rx_ring->ring_size - 1; in ena_refill_all_rx_bufs()
671 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_refill_all_rx_bufs()
681 for (i = 0; i < adapter->num_io_queues; i++) in ena_free_all_rx_bufs()
692 ena_buf = tx_info->bufs; in ena_unmap_tx_buff()
693 cnt = tx_info->num_of_bufs; in ena_unmap_tx_buff()
698 if (tx_info->map_linear_data) { in ena_unmap_tx_buff()
699 dma_unmap_single(tx_ring->dev, in ena_unmap_tx_buff()
704 cnt--; in ena_unmap_tx_buff()
709 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), in ena_unmap_tx_buff()
715 /* ena_free_tx_bufs - Free Tx Buffers per Queue
716 * @tx_ring: TX ring for which buffers be freed
724 is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid); in ena_free_tx_bufs()
726 for (i = 0; i < tx_ring->ring_size; i++) { in ena_free_tx_bufs()
727 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; in ena_free_tx_bufs()
729 if (!tx_info->skb) in ena_free_tx_bufs()
733 netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev, in ena_free_tx_bufs()
734 "Free uncompleted tx skb qid %d idx 0x%x\n", in ena_free_tx_bufs()
735 tx_ring->qid, i); in ena_free_tx_bufs()
738 netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev, in ena_free_tx_bufs()
739 "Free uncompleted tx skb qid %d idx 0x%x\n", in ena_free_tx_bufs()
740 tx_ring->qid, i); in ena_free_tx_bufs()
746 xdp_return_frame(tx_info->xdpf); in ena_free_tx_bufs()
748 dev_kfree_skb_any(tx_info->skb); in ena_free_tx_bufs()
752 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, in ena_free_tx_bufs()
753 tx_ring->qid)); in ena_free_tx_bufs()
761 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { in ena_free_all_tx_bufs()
762 tx_ring = &adapter->tx_ring[i]; in ena_free_all_tx_bufs()
772 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { in ena_destroy_all_tx_queues()
774 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); in ena_destroy_all_tx_queues()
783 for (i = 0; i < adapter->num_io_queues; i++) { in ena_destroy_all_rx_queues()
785 cancel_work_sync(&adapter->ena_napi[i].dim.work); in ena_destroy_all_rx_queues()
786 ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]); in ena_destroy_all_rx_queues()
787 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); in ena_destroy_all_rx_queues()
801 netif_err(ring->adapter, in handle_invalid_req_id()
803 ring->netdev, in handle_invalid_req_id()
805 is_xdp ? "xdp frame" : "skb", ring->qid, req_id); in handle_invalid_req_id()
807 netif_err(ring->adapter, in handle_invalid_req_id()
809 ring->netdev, in handle_invalid_req_id()
811 req_id, ring->qid); in handle_invalid_req_id()
813 ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp); in handle_invalid_req_id()
814 ena_reset_device(ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID); in handle_invalid_req_id()
816 return -EFAULT; in handle_invalid_req_id()
823 tx_info = &tx_ring->tx_buffer_info[req_id]; in validate_tx_req_id()
824 if (likely(tx_info->skb)) in validate_tx_req_id()
841 next_to_clean = tx_ring->next_to_clean; in ena_clean_tx_irq()
842 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid); in ena_clean_tx_irq()
848 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, in ena_clean_tx_irq()
851 if (unlikely(rc == -EINVAL)) in ena_clean_tx_irq()
861 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_clean_tx_irq()
862 skb = tx_info->skb; in ena_clean_tx_irq()
865 prefetch(&skb->end); in ena_clean_tx_irq()
867 tx_info->skb = NULL; in ena_clean_tx_irq()
868 tx_info->last_jiffies = 0; in ena_clean_tx_irq()
872 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, in ena_clean_tx_irq()
873 "tx_poll: q %d skb %p completed\n", tx_ring->qid, in ena_clean_tx_irq()
876 tx_bytes += tx_info->total_tx_size; in ena_clean_tx_irq()
879 total_done += tx_info->tx_descs; in ena_clean_tx_irq()
881 tx_ring->free_ids[next_to_clean] = req_id; in ena_clean_tx_irq()
883 tx_ring->ring_size); in ena_clean_tx_irq()
886 tx_ring->next_to_clean = next_to_clean; in ena_clean_tx_irq()
887 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); in ena_clean_tx_irq()
891 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, in ena_clean_tx_irq()
893 tx_ring->qid, tx_pkts); in ena_clean_tx_irq()
900 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_clean_tx_irq()
905 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_clean_tx_irq()
908 test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { in ena_clean_tx_irq()
910 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1, in ena_clean_tx_irq()
911 &tx_ring->syncp); in ena_clean_tx_irq()
924 skb = napi_alloc_skb(rx_ring->napi, len); in ena_alloc_skb()
929 ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1, in ena_alloc_skb()
930 &rx_ring->syncp); in ena_alloc_skb()
932 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_alloc_skb()
943 struct ena_com_buf *ena_buf = &rx_info->ena_buf; in ena_try_rx_buf_page_reuse()
948 if (SKB_DATA_ALIGN(len + pkt_offset) + ENA_MIN_RX_BUF_SIZE <= ena_buf->len) { in ena_try_rx_buf_page_reuse()
949 page_ref_inc(rx_info->page); in ena_try_rx_buf_page_reuse()
950 rx_info->page_offset += buf_len; in ena_try_rx_buf_page_reuse()
951 ena_buf->paddr += buf_len; in ena_try_rx_buf_page_reuse()
952 ena_buf->len -= buf_len; in ena_try_rx_buf_page_reuse()
980 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_rx_skb()
982 if (unlikely(!rx_info->page)) { in ena_rx_skb()
983 adapter = rx_ring->adapter; in ena_rx_skb()
984 netif_err(adapter, rx_err, rx_ring->netdev, in ena_rx_skb()
985 "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id); in ena_rx_skb()
986 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp); in ena_rx_skb()
991 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
993 rx_info, rx_info->page); in ena_rx_skb()
995 buf_offset = rx_info->buf_offset; in ena_rx_skb()
996 pkt_offset = buf_offset - rx_ring->rx_headroom; in ena_rx_skb()
997 page_offset = rx_info->page_offset; in ena_rx_skb()
998 buf_addr = page_address(rx_info->page) + page_offset; in ena_rx_skb()
1000 if (len <= rx_ring->rx_copybreak) { in ena_rx_skb()
1006 dma_sync_single_for_device(rx_ring->dev, in ena_rx_skb()
1007 dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset, in ena_rx_skb()
1012 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
1013 "RX allocated small packet. len %d.\n", skb->len); in ena_rx_skb()
1014 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ena_rx_skb()
1015 rx_ring->free_ids[*next_to_clean] = req_id; in ena_rx_skb()
1017 rx_ring->ring_size); in ena_rx_skb()
1037 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ena_rx_skb()
1040 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
1042 skb->len, skb->data_len); in ena_rx_skb()
1045 rx_info->page = NULL; in ena_rx_skb()
1047 rx_ring->free_ids[*next_to_clean] = req_id; in ena_rx_skb()
1050 rx_ring->ring_size); in ena_rx_skb()
1051 if (likely(--descs == 0)) in ena_rx_skb()
1058 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_rx_skb()
1060 /* rx_info->buf_offset includes rx_ring->rx_headroom */ in ena_rx_skb()
1061 buf_offset = rx_info->buf_offset; in ena_rx_skb()
1062 pkt_offset = buf_offset - rx_ring->rx_headroom; in ena_rx_skb()
1064 page_offset = rx_info->page_offset; in ena_rx_skb()
1066 pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr); in ena_rx_skb()
1071 dma_sync_single_for_cpu(rx_ring->dev, in ena_rx_skb()
1079 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, in ena_rx_skb()
1087 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
1097 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) { in ena_rx_checksum()
1098 skb->ip_summed = CHECKSUM_NONE; in ena_rx_checksum()
1103 if (ena_rx_ctx->frag) { in ena_rx_checksum()
1104 skb->ip_summed = CHECKSUM_NONE; in ena_rx_checksum()
1109 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) && in ena_rx_checksum()
1110 (ena_rx_ctx->l3_csum_err))) { in ena_rx_checksum()
1112 skb->ip_summed = CHECKSUM_NONE; in ena_rx_checksum()
1113 ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1, in ena_rx_checksum()
1114 &rx_ring->syncp); in ena_rx_checksum()
1115 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_rx_checksum()
1121 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || in ena_rx_checksum()
1122 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) { in ena_rx_checksum()
1123 if (unlikely(ena_rx_ctx->l4_csum_err)) { in ena_rx_checksum()
1125 ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1, in ena_rx_checksum()
1126 &rx_ring->syncp); in ena_rx_checksum()
1127 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_rx_checksum()
1129 skb->ip_summed = CHECKSUM_NONE; in ena_rx_checksum()
1133 if (likely(ena_rx_ctx->l4_csum_checked)) { in ena_rx_checksum()
1134 skb->ip_summed = CHECKSUM_UNNECESSARY; in ena_rx_checksum()
1135 ena_increase_stat(&rx_ring->rx_stats.csum_good, 1, in ena_rx_checksum()
1136 &rx_ring->syncp); in ena_rx_checksum()
1138 ena_increase_stat(&rx_ring->rx_stats.csum_unchecked, 1, in ena_rx_checksum()
1139 &rx_ring->syncp); in ena_rx_checksum()
1140 skb->ip_summed = CHECKSUM_NONE; in ena_rx_checksum()
1143 skb->ip_summed = CHECKSUM_NONE; in ena_rx_checksum()
1155 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) { in ena_set_rx_hash()
1156 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || in ena_set_rx_hash()
1157 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) in ena_set_rx_hash()
1164 if (ena_rx_ctx->frag) in ena_set_rx_hash()
1167 skb_set_hash(skb, ena_rx_ctx->hash, hash_type); in ena_set_rx_hash()
1176 /* XDP multi-buffer packets not supported */ in ena_xdp_handle_buff()
1178 netdev_err_once(rx_ring->adapter->netdev, in ena_xdp_handle_buff()
1179 "xdp: dropped unsupported multi-buffer packets\n"); in ena_xdp_handle_buff()
1180 ena_increase_stat(&rx_ring->rx_stats.xdp_drop, 1, &rx_ring->syncp); in ena_xdp_handle_buff()
1184 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; in ena_xdp_handle_buff()
1185 xdp_prepare_buff(xdp, page_address(rx_info->page), in ena_xdp_handle_buff()
1186 rx_info->buf_offset, in ena_xdp_handle_buff()
1187 rx_ring->ena_bufs[0].len, false); in ena_xdp_handle_buff()
1193 rx_info->buf_offset = xdp->data - xdp->data_hard_start; in ena_xdp_handle_buff()
1194 rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data; in ena_xdp_handle_buff()
1200 /* ena_clean_rx_irq - Cleanup RX irq
1210 u16 next_to_clean = rx_ring->next_to_clean; in ena_clean_rx_irq()
1227 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_clean_rx_irq()
1228 "%s qid %d\n", __func__, rx_ring->qid); in ena_clean_rx_irq()
1230 xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq); in ena_clean_rx_irq()
1235 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; in ena_clean_rx_irq()
1236 ena_rx_ctx.max_bufs = rx_ring->sgl_size; in ena_clean_rx_irq()
1239 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, in ena_clean_rx_irq()
1240 rx_ring->ena_com_io_sq, in ena_clean_rx_irq()
1249 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; in ena_clean_rx_irq()
1251 rx_info->buf_offset += pkt_offset; in ena_clean_rx_irq()
1253 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_clean_rx_irq()
1255 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, in ena_clean_rx_irq()
1258 dma_sync_single_for_cpu(rx_ring->dev, in ena_clean_rx_irq()
1259 dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset, in ena_clean_rx_irq()
1260 rx_ring->ena_bufs[0].len, in ena_clean_rx_irq()
1269 rx_ring->ena_bufs, in ena_clean_rx_irq()
1275 int req_id = rx_ring->ena_bufs[i].req_id; in ena_clean_rx_irq()
1277 rx_ring->free_ids[next_to_clean] = req_id; in ena_clean_rx_irq()
1280 rx_ring->ring_size); in ena_clean_rx_irq()
1287 &rx_ring->rx_buffer_info[req_id], in ena_clean_rx_irq()
1289 rx_ring->rx_buffer_info[req_id].page = NULL; in ena_clean_rx_irq()
1295 res_budget--; in ena_clean_rx_irq()
1305 skb_record_rx_queue(skb, rx_ring->qid); in ena_clean_rx_irq()
1307 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) in ena_clean_rx_irq()
1310 total_len += skb->len; in ena_clean_rx_irq()
1314 res_budget--; in ena_clean_rx_irq()
1317 work_done = budget - res_budget; in ena_clean_rx_irq()
1318 rx_ring->per_napi_packets += work_done; in ena_clean_rx_irq()
1319 u64_stats_update_begin(&rx_ring->syncp); in ena_clean_rx_irq()
1320 rx_ring->rx_stats.bytes += total_len; in ena_clean_rx_irq()
1321 rx_ring->rx_stats.cnt += work_done; in ena_clean_rx_irq()
1322 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt; in ena_clean_rx_irq()
1323 u64_stats_update_end(&rx_ring->syncp); in ena_clean_rx_irq()
1325 rx_ring->next_to_clean = next_to_clean; in ena_clean_rx_irq()
1327 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); in ena_clean_rx_irq()
1329 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, in ena_clean_rx_irq()
1345 adapter = netdev_priv(rx_ring->netdev); in ena_clean_rx_irq()
1347 if (rc == -ENOSPC) { in ena_clean_rx_irq()
1348 ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp); in ena_clean_rx_irq()
1350 } else if (rc == -EFAULT) { in ena_clean_rx_irq()
1353 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, in ena_clean_rx_irq()
1354 &rx_ring->syncp); in ena_clean_rx_irq()
1364 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); in ena_dim_work()
1367 ena_napi->rx_ring->smoothed_interval = cur_moder.usec; in ena_dim_work()
1368 dim->state = DIM_START_MEASURE; in ena_dim_work()
1374 struct ena_ring *rx_ring = ena_napi->rx_ring; in ena_adjust_adaptive_rx_intr_moderation()
1376 if (!rx_ring->per_napi_packets) in ena_adjust_adaptive_rx_intr_moderation()
1379 rx_ring->non_empty_napi_events++; in ena_adjust_adaptive_rx_intr_moderation()
1381 dim_update_sample(rx_ring->non_empty_napi_events, in ena_adjust_adaptive_rx_intr_moderation()
1382 rx_ring->rx_stats.cnt, in ena_adjust_adaptive_rx_intr_moderation()
1383 rx_ring->rx_stats.bytes, in ena_adjust_adaptive_rx_intr_moderation()
1386 net_dim(&ena_napi->dim, dim_sample); in ena_adjust_adaptive_rx_intr_moderation()
1388 rx_ring->per_napi_packets = 0; in ena_adjust_adaptive_rx_intr_moderation()
1394 u32 rx_interval = tx_ring->smoothed_interval; in ena_unmask_interrupt()
1397 /* Rx ring can be NULL when for XDP tx queues which don't have an in ena_unmask_interrupt()
1401 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ? in ena_unmask_interrupt()
1402 rx_ring->smoothed_interval : in ena_unmask_interrupt()
1403 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev); in ena_unmask_interrupt()
1406 * tx intr delay and interrupt unmask in ena_unmask_interrupt()
1410 tx_ring->smoothed_interval, in ena_unmask_interrupt()
1413 ena_increase_stat(&tx_ring->tx_stats.unmask_interrupt, 1, in ena_unmask_interrupt()
1414 &tx_ring->syncp); in ena_unmask_interrupt()
1416 /* It is a shared MSI-X. in ena_unmask_interrupt()
1417 * Tx and Rx CQ have pointer to it. in ena_unmask_interrupt()
1419 * The Tx ring is used because the rx_ring is NULL for XDP queues in ena_unmask_interrupt()
1421 ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg); in ena_unmask_interrupt()
1431 if (likely(tx_ring->cpu == cpu)) in ena_update_ring_numa_node()
1434 tx_ring->cpu = cpu; in ena_update_ring_numa_node()
1436 rx_ring->cpu = cpu; in ena_update_ring_numa_node()
1440 if (likely(tx_ring->numa_node == numa_node)) in ena_update_ring_numa_node()
1446 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node); in ena_update_ring_numa_node()
1447 tx_ring->numa_node = numa_node; in ena_update_ring_numa_node()
1449 rx_ring->numa_node = numa_node; in ena_update_ring_numa_node()
1450 ena_com_update_numa_node(rx_ring->ena_com_io_cq, in ena_update_ring_numa_node()
1470 tx_ring = ena_napi->tx_ring; in ena_io_poll()
1471 rx_ring = ena_napi->rx_ring; in ena_io_poll()
1473 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; in ena_io_poll()
1475 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || in ena_io_poll()
1476 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) { in ena_io_poll()
1483 * tx completions. in ena_io_poll()
1491 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || in ena_io_poll()
1492 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) { in ena_io_poll()
1503 READ_ONCE(ena_napi->interrupts_masked)) { in ena_io_poll()
1505 WRITE_ONCE(ena_napi->interrupts_masked, false); in ena_io_poll()
1507 * Tx uses static interrupt moderation. in ena_io_poll()
1509 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) in ena_io_poll()
1521 u64_stats_update_begin(&tx_ring->syncp); in ena_io_poll()
1522 tx_ring->tx_stats.napi_comp += napi_comp_call; in ena_io_poll()
1523 tx_ring->tx_stats.tx_poll++; in ena_io_poll()
1524 u64_stats_update_end(&tx_ring->syncp); in ena_io_poll()
1526 tx_ring->tx_stats.last_napi_jiffies = jiffies; in ena_io_poll()
1535 ena_com_admin_q_comp_intr_handler(adapter->ena_dev); in ena_intr_msix_mgmnt()
1538 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))) in ena_intr_msix_mgmnt()
1539 ena_com_aenq_intr_handler(adapter->ena_dev, data); in ena_intr_msix_mgmnt()
1544 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1553 WRITE_ONCE(ena_napi->first_interrupt, true); in ena_intr_msix_io()
1555 WRITE_ONCE(ena_napi->interrupts_masked, true); in ena_intr_msix_io()
1558 napi_schedule_irqoff(&ena_napi->napi); in ena_intr_msix_io()
1563 /* Reserve a single MSI-X vector for management (admin + aenq).
1565 * the number of potential io queues is the minimum of what the device
1572 if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { in ena_enable_msix()
1573 netif_err(adapter, probe, adapter->netdev, in ena_enable_msix()
1574 "Error, MSI-X is already enabled\n"); in ena_enable_msix()
1575 return -EPERM; in ena_enable_msix()
1579 msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues); in ena_enable_msix()
1580 netif_dbg(adapter, probe, adapter->netdev, in ena_enable_msix()
1581 "Trying to enable MSI-X, vectors %d\n", msix_vecs); in ena_enable_msix()
1583 irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC, in ena_enable_msix()
1587 netif_err(adapter, probe, adapter->netdev, in ena_enable_msix()
1588 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt); in ena_enable_msix()
1589 return -ENOSPC; in ena_enable_msix()
1593 netif_notice(adapter, probe, adapter->netdev, in ena_enable_msix()
1594 "Enable only %d MSI-X (out of %d), reduce the number of queues\n", in ena_enable_msix()
1596 adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC; in ena_enable_msix()
1600 netif_warn(adapter, probe, adapter->netdev, in ena_enable_msix()
1603 adapter->msix_vecs = irq_cnt; in ena_enable_msix()
1604 set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags); in ena_enable_msix()
1613 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, in ena_setup_mgmnt_intr()
1614 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s", in ena_setup_mgmnt_intr()
1615 pci_name(adapter->pdev)); in ena_setup_mgmnt_intr()
1616 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = in ena_setup_mgmnt_intr()
1618 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; in ena_setup_mgmnt_intr()
1619 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = in ena_setup_mgmnt_intr()
1620 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX); in ena_setup_mgmnt_intr()
1622 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu; in ena_setup_mgmnt_intr()
1624 &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask); in ena_setup_mgmnt_intr()
1633 netdev = adapter->netdev; in ena_setup_io_intr()
1634 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; in ena_setup_io_intr()
1640 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, in ena_setup_io_intr()
1641 "%s-Tx-Rx-%d", netdev->name, i); in ena_setup_io_intr()
1642 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io; in ena_setup_io_intr()
1643 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i]; in ena_setup_io_intr()
1644 adapter->irq_tbl[irq_idx].vector = in ena_setup_io_intr()
1645 pci_irq_vector(adapter->pdev, irq_idx); in ena_setup_io_intr()
1646 adapter->irq_tbl[irq_idx].cpu = cpu; in ena_setup_io_intr()
1649 &adapter->irq_tbl[irq_idx].affinity_hint_mask); in ena_setup_io_intr()
1659 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; in ena_request_mgmnt_irq()
1660 rc = request_irq(irq->vector, irq->handler, flags, irq->name, in ena_request_mgmnt_irq()
1661 irq->data); in ena_request_mgmnt_irq()
1663 netif_err(adapter, probe, adapter->netdev, in ena_request_mgmnt_irq()
1668 netif_dbg(adapter, probe, adapter->netdev, in ena_request_mgmnt_irq()
1670 irq->affinity_hint_mask.bits[0], irq->vector); in ena_request_mgmnt_irq()
1672 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); in ena_request_mgmnt_irq()
1679 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; in ena_request_io_irq()
1684 if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { in ena_request_io_irq()
1685 netif_err(adapter, ifup, adapter->netdev, in ena_request_io_irq()
1686 "Failed to request I/O IRQ: MSI-X is not enabled\n"); in ena_request_io_irq()
1687 return -EINVAL; in ena_request_io_irq()
1691 irq = &adapter->irq_tbl[i]; in ena_request_io_irq()
1692 rc = request_irq(irq->vector, irq->handler, flags, irq->name, in ena_request_io_irq()
1693 irq->data); in ena_request_io_irq()
1695 netif_err(adapter, ifup, adapter->netdev, in ena_request_io_irq()
1701 netif_dbg(adapter, ifup, adapter->netdev, in ena_request_io_irq()
1703 i, irq->affinity_hint_mask.bits[0], irq->vector); in ena_request_io_irq()
1705 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); in ena_request_io_irq()
1712 irq = &adapter->irq_tbl[k]; in ena_request_io_irq()
1713 free_irq(irq->vector, irq->data); in ena_request_io_irq()
1723 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; in ena_free_mgmnt_irq()
1724 synchronize_irq(irq->vector); in ena_free_mgmnt_irq()
1725 irq_set_affinity_hint(irq->vector, NULL); in ena_free_mgmnt_irq()
1726 free_irq(irq->vector, irq->data); in ena_free_mgmnt_irq()
1731 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; in ena_free_io_irq()
1736 if (adapter->msix_vecs >= 1) { in ena_free_io_irq()
1737 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); in ena_free_io_irq()
1738 adapter->netdev->rx_cpu_rmap = NULL; in ena_free_io_irq()
1743 irq = &adapter->irq_tbl[i]; in ena_free_io_irq()
1744 irq_set_affinity_hint(irq->vector, NULL); in ena_free_io_irq()
1745 free_irq(irq->vector, irq->data); in ena_free_io_irq()
1751 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) in ena_disable_msix()
1752 pci_free_irq_vectors(adapter->pdev); in ena_disable_msix()
1757 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; in ena_disable_io_intr_sync()
1760 if (!netif_running(adapter->netdev)) in ena_disable_io_intr_sync()
1764 synchronize_irq(adapter->irq_tbl[i].vector); in ena_disable_io_intr_sync()
1774 netif_napi_del(&adapter->ena_napi[i].napi); in ena_del_napi_in_range()
1777 adapter->ena_napi[i].rx_ring); in ena_del_napi_in_range()
1788 struct ena_napi *napi = &adapter->ena_napi[i]; in ena_init_napi_in_range()
1793 rx_ring = &adapter->rx_ring[i]; in ena_init_napi_in_range()
1794 tx_ring = &adapter->tx_ring[i]; in ena_init_napi_in_range()
1800 netif_napi_add(adapter->netdev, &napi->napi, napi_handler); in ena_init_napi_in_range()
1803 napi->rx_ring = rx_ring; in ena_init_napi_in_range()
1805 napi->tx_ring = tx_ring; in ena_init_napi_in_range()
1806 napi->qid = i; in ena_init_napi_in_range()
1817 napi_disable(&adapter->ena_napi[i].napi); in ena_napi_disable_in_range()
1827 napi_enable(&adapter->ena_napi[i].napi); in ena_napi_enable_in_range()
1833 struct ena_com_dev *ena_dev = adapter->ena_dev; in ena_rss_configure()
1837 if (!ena_dev->rss.tbl_log_size) { in ena_rss_configure()
1839 if (rc && (rc != -EOPNOTSUPP)) { in ena_rss_configure()
1840 netif_err(adapter, ifup, adapter->netdev, "Failed to init RSS rc: %d\n", rc); in ena_rss_configure()
1847 if (unlikely(rc && rc != -EOPNOTSUPP)) in ena_rss_configure()
1852 if (unlikely(rc && (rc != -EOPNOTSUPP))) in ena_rss_configure()
1857 if (unlikely(rc && (rc != -EOPNOTSUPP))) in ena_rss_configure()
1871 ena_change_mtu(adapter->netdev, adapter->netdev->mtu); in ena_up_complete()
1876 netif_tx_start_all_queues(adapter->netdev); in ena_up_complete()
1880 adapter->xdp_num_queues + adapter->num_io_queues); in ena_up_complete()
1894 ena_dev = adapter->ena_dev; in ena_create_io_tx_queue()
1896 tx_ring = &adapter->tx_ring[qid]; in ena_create_io_tx_queue()
1904 ctx.mem_queue_type = ena_dev->tx_mem_queue_type; in ena_create_io_tx_queue()
1906 ctx.queue_size = tx_ring->ring_size; in ena_create_io_tx_queue()
1907 ctx.numa_node = tx_ring->numa_node; in ena_create_io_tx_queue()
1911 netif_err(adapter, ifup, adapter->netdev, in ena_create_io_tx_queue()
1912 "Failed to create I/O TX queue num %d rc: %d\n", in ena_create_io_tx_queue()
1918 &tx_ring->ena_com_io_sq, in ena_create_io_tx_queue()
1919 &tx_ring->ena_com_io_cq); in ena_create_io_tx_queue()
1921 netif_err(adapter, ifup, adapter->netdev, in ena_create_io_tx_queue()
1922 "Failed to get TX queue handlers. TX queue num %d rc: %d\n", in ena_create_io_tx_queue()
1928 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); in ena_create_io_tx_queue()
1935 struct ena_com_dev *ena_dev = adapter->ena_dev; in ena_create_io_tx_queues_in_range()
1947 while (i-- > first_index) in ena_create_io_tx_queues_in_range()
1962 ena_dev = adapter->ena_dev; in ena_create_io_rx_queue()
1964 rx_ring = &adapter->rx_ring[qid]; in ena_create_io_rx_queue()
1974 ctx.queue_size = rx_ring->ring_size; in ena_create_io_rx_queue()
1975 ctx.numa_node = rx_ring->numa_node; in ena_create_io_rx_queue()
1979 netif_err(adapter, ifup, adapter->netdev, in ena_create_io_rx_queue()
1980 "Failed to create I/O RX queue num %d rc: %d\n", in ena_create_io_rx_queue()
1986 &rx_ring->ena_com_io_sq, in ena_create_io_rx_queue()
1987 &rx_ring->ena_com_io_cq); in ena_create_io_rx_queue()
1989 netif_err(adapter, ifup, adapter->netdev, in ena_create_io_rx_queue()
1990 "Failed to get RX queue handlers. RX queue num %d rc: %d\n", in ena_create_io_rx_queue()
1995 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); in ena_create_io_rx_queue()
2005 struct ena_com_dev *ena_dev = adapter->ena_dev; in ena_create_all_io_rx_queues()
2008 for (i = 0; i < adapter->num_io_queues; i++) { in ena_create_all_io_rx_queues()
2012 INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work); in ena_create_all_io_rx_queues()
2014 ena_xdp_register_rxq_info(&adapter->rx_ring[i]); in ena_create_all_io_rx_queues()
2020 while (i--) { in ena_create_all_io_rx_queues()
2021 ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]); in ena_create_all_io_rx_queues()
2022 cancel_work_sync(&adapter->ena_napi[i].dim.work); in ena_create_all_io_rx_queues()
2035 for (i = 0; i < adapter->num_io_queues; i++) { in set_io_rings_size()
2036 adapter->tx_ring[i].ring_size = new_tx_size; in set_io_rings_size()
2037 adapter->rx_ring[i].ring_size = new_rx_size; in set_io_rings_size()
2042 * low on memory. If there is not enough memory to allocate io queues
2043 * the driver will try to allocate smaller queues.
2046 * 1. Try to allocate TX and RX and if successful.
2049 * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
2051 * 3. If TX or RX is smaller than 256
2064 set_io_rings_size(adapter, adapter->requested_tx_ring_size, in create_queues_with_size_backoff()
2065 adapter->requested_rx_ring_size); in create_queues_with_size_backoff()
2076 adapter->num_io_queues); in create_queues_with_size_backoff()
2082 adapter->num_io_queues); in create_queues_with_size_backoff()
2103 if (rc != -ENOMEM) { in create_queues_with_size_backoff()
2104 netif_err(adapter, ifup, adapter->netdev, in create_queues_with_size_backoff()
2110 cur_tx_ring_size = adapter->tx_ring[0].ring_size; in create_queues_with_size_backoff()
2111 cur_rx_ring_size = adapter->rx_ring[0].ring_size; in create_queues_with_size_backoff()
2113 netif_err(adapter, ifup, adapter->netdev, in create_queues_with_size_backoff()
2114 "Not enough memory to create queues with sizes TX=%d, RX=%d\n", in create_queues_with_size_backoff()
2130 netif_err(adapter, ifup, adapter->netdev, in create_queues_with_size_backoff()
2131 …ed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n… in create_queues_with_size_backoff()
2136 netif_err(adapter, ifup, adapter->netdev, in create_queues_with_size_backoff()
2137 "Retrying queue creation with sizes TX=%d, RX=%d\n", in create_queues_with_size_backoff()
2150 netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__); in ena_up()
2152 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; in ena_up()
2165 if (ena_com_interrupt_moderation_supported(adapter->ena_dev)) in ena_up()
2166 ena_com_enable_adaptive_moderation(adapter->ena_dev); in ena_up()
2180 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) in ena_up()
2181 netif_carrier_on(adapter->netdev); in ena_up()
2183 ena_increase_stat(&adapter->dev_stats.interface_up, 1, in ena_up()
2184 &adapter->syncp); in ena_up()
2186 set_bit(ENA_FLAG_DEV_UP, &adapter->flags); in ena_up()
2188 /* Enable completion queues interrupt */ in ena_up()
2189 for (i = 0; i < adapter->num_io_queues; i++) in ena_up()
2190 ena_unmask_interrupt(&adapter->tx_ring[i], in ena_up()
2191 &adapter->rx_ring[i]); in ena_up()
2197 napi_schedule(&adapter->ena_napi[i].napi); in ena_up()
2216 int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; in ena_down()
2218 netif_dbg(adapter, ifdown, adapter->netdev, "%s\n", __func__); in ena_down()
2220 clear_bit(ENA_FLAG_DEV_UP, &adapter->flags); in ena_down()
2222 ena_increase_stat(&adapter->dev_stats.interface_down, 1, in ena_down()
2223 &adapter->syncp); in ena_down()
2225 netif_carrier_off(adapter->netdev); in ena_down()
2226 netif_tx_disable(adapter->netdev); in ena_down()
2228 /* After this point the napi handler won't enable the tx queue */ in ena_down()
2231 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) { in ena_down()
2234 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); in ena_down()
2236 netif_err(adapter, ifdown, adapter->netdev, in ena_down()
2239 ena_com_set_admin_running_state(adapter->ena_dev, false); in ena_down()
2254 /* ena_open - Called when a network interface is made active
2271 rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues); in ena_open()
2273 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n"); in ena_open()
2277 rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues); in ena_open()
2279 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n"); in ena_open()
2290 /* ena_close - Disables a network interface
2295 * The close entry point is called when an interface is de-activated
2306 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) in ena_close()
2309 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) in ena_close()
2314 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { in ena_close()
2315 netif_err(adapter, ifdown, adapter->netdev, in ena_close()
2334 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); in ena_update_queue_params()
2335 ena_close(adapter->netdev); in ena_update_queue_params()
2336 adapter->requested_tx_ring_size = new_tx_size; in ena_update_queue_params()
2337 adapter->requested_rx_ring_size = new_rx_size; in ena_update_queue_params()
2340 adapter->xdp_num_queues + in ena_update_queue_params()
2341 adapter->num_io_queues); in ena_update_queue_params()
2343 large_llq_changed = adapter->ena_dev->tx_mem_queue_type == in ena_update_queue_params()
2346 new_llq_header_len != adapter->ena_dev->tx_max_header_size; in ena_update_queue_params()
2350 adapter->large_llq_header_enabled = !adapter->large_llq_header_enabled; in ena_update_queue_params()
2364 if (rx_copybreak > min_t(u16, adapter->netdev->mtu, ENA_PAGE_SIZE)) in ena_set_rx_copybreak()
2365 return -EINVAL; in ena_set_rx_copybreak()
2367 adapter->rx_copybreak = rx_copybreak; in ena_set_rx_copybreak()
2369 for (i = 0; i < adapter->num_io_queues; i++) { in ena_set_rx_copybreak()
2370 rx_ring = &adapter->rx_ring[i]; in ena_set_rx_copybreak()
2371 rx_ring->rx_copybreak = rx_copybreak; in ena_set_rx_copybreak()
2379 struct ena_com_dev *ena_dev = adapter->ena_dev; in ena_update_queue_count()
2383 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); in ena_update_queue_count()
2384 ena_close(adapter->netdev); in ena_update_queue_count()
2385 prev_channel_count = adapter->num_io_queues; in ena_update_queue_count()
2386 adapter->num_io_queues = new_channel_count; in ena_update_queue_count()
2389 adapter->xdp_first_ring = new_channel_count; in ena_update_queue_count()
2390 adapter->xdp_num_queues = new_channel_count; in ena_update_queue_count()
2398 adapter->xdp_bpf_prog, in ena_update_queue_count()
2409 adapter->xdp_num_queues + in ena_update_queue_count()
2410 adapter->num_io_queues); in ena_update_queue_count()
2411 return dev_was_up ? ena_open(adapter->netdev) : 0; in ena_update_queue_count()
2418 u32 mss = skb_shinfo(skb)->gso_size; in ena_tx_csum()
2419 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; in ena_tx_csum()
2422 if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) { in ena_tx_csum()
2423 ena_tx_ctx->l4_csum_enable = 1; in ena_tx_csum()
2425 ena_tx_ctx->tso_enable = 1; in ena_tx_csum()
2426 ena_meta->l4_hdr_len = tcp_hdr(skb)->doff; in ena_tx_csum()
2427 ena_tx_ctx->l4_csum_partial = 0; in ena_tx_csum()
2429 ena_tx_ctx->tso_enable = 0; in ena_tx_csum()
2430 ena_meta->l4_hdr_len = 0; in ena_tx_csum()
2431 ena_tx_ctx->l4_csum_partial = 1; in ena_tx_csum()
2434 switch (ip_hdr(skb)->version) { in ena_tx_csum()
2436 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; in ena_tx_csum()
2437 if (ip_hdr(skb)->frag_off & htons(IP_DF)) in ena_tx_csum()
2438 ena_tx_ctx->df = 1; in ena_tx_csum()
2440 ena_tx_ctx->l3_csum_enable = 1; in ena_tx_csum()
2441 l4_protocol = ip_hdr(skb)->protocol; in ena_tx_csum()
2444 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; in ena_tx_csum()
2445 l4_protocol = ipv6_hdr(skb)->nexthdr; in ena_tx_csum()
2452 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; in ena_tx_csum()
2454 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; in ena_tx_csum()
2456 ena_meta->mss = mss; in ena_tx_csum()
2457 ena_meta->l3_hdr_len = skb_network_header_len(skb); in ena_tx_csum()
2458 ena_meta->l3_hdr_offset = skb_network_offset(skb); in ena_tx_csum()
2459 ena_tx_ctx->meta_valid = 1; in ena_tx_csum()
2462 ena_tx_ctx->meta_valid = 1; in ena_tx_csum()
2464 ena_tx_ctx->meta_valid = 0; in ena_tx_csum()
2473 num_frags = skb_shinfo(skb)->nr_frags; in ena_check_and_linearize_skb()
2476 if (num_frags < tx_ring->sgl_size) in ena_check_and_linearize_skb()
2479 if ((num_frags == tx_ring->sgl_size) && in ena_check_and_linearize_skb()
2480 (header_len < tx_ring->tx_max_header_size)) in ena_check_and_linearize_skb()
2483 ena_increase_stat(&tx_ring->tx_stats.linearize, 1, &tx_ring->syncp); in ena_check_and_linearize_skb()
2487 ena_increase_stat(&tx_ring->tx_stats.linearize_failed, 1, in ena_check_and_linearize_skb()
2488 &tx_ring->syncp); in ena_check_and_linearize_skb()
2500 struct ena_adapter *adapter = tx_ring->adapter; in ena_tx_map_skb()
2509 tx_info->skb = skb; in ena_tx_map_skb()
2510 ena_buf = tx_info->bufs; in ena_tx_map_skb()
2512 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { in ena_tx_map_skb()
2523 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size); in ena_tx_map_skb()
2525 tx_ring->push_buf_intermediate_buf); in ena_tx_map_skb()
2527 if (unlikely(skb->data != *push_hdr)) { in ena_tx_map_skb()
2528 ena_increase_stat(&tx_ring->tx_stats.llq_buffer_copy, 1, in ena_tx_map_skb()
2529 &tx_ring->syncp); in ena_tx_map_skb()
2531 delta = push_len - skb_head_len; in ena_tx_map_skb()
2536 tx_ring->tx_max_header_size); in ena_tx_map_skb()
2539 netif_dbg(adapter, tx_queued, adapter->netdev, in ena_tx_map_skb()
2540 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb, in ena_tx_map_skb()
2544 dma = dma_map_single(tx_ring->dev, skb->data + push_len, in ena_tx_map_skb()
2545 skb_head_len - push_len, DMA_TO_DEVICE); in ena_tx_map_skb()
2546 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in ena_tx_map_skb()
2549 ena_buf->paddr = dma; in ena_tx_map_skb()
2550 ena_buf->len = skb_head_len - push_len; in ena_tx_map_skb()
2553 tx_info->num_of_bufs++; in ena_tx_map_skb()
2554 tx_info->map_linear_data = 1; in ena_tx_map_skb()
2556 tx_info->map_linear_data = 0; in ena_tx_map_skb()
2559 last_frag = skb_shinfo(skb)->nr_frags; in ena_tx_map_skb()
2562 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in ena_tx_map_skb()
2567 delta -= frag_len; in ena_tx_map_skb()
2571 dma = skb_frag_dma_map(tx_ring->dev, frag, delta, in ena_tx_map_skb()
2572 frag_len - delta, DMA_TO_DEVICE); in ena_tx_map_skb()
2573 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in ena_tx_map_skb()
2576 ena_buf->paddr = dma; in ena_tx_map_skb()
2577 ena_buf->len = frag_len - delta; in ena_tx_map_skb()
2579 tx_info->num_of_bufs++; in ena_tx_map_skb()
2586 ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1, in ena_tx_map_skb()
2587 &tx_ring->syncp); in ena_tx_map_skb()
2588 netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n"); in ena_tx_map_skb()
2590 tx_info->skb = NULL; in ena_tx_map_skb()
2592 tx_info->num_of_bufs += i; in ena_tx_map_skb()
2595 return -EINVAL; in ena_tx_map_skb()
2611 /* Determine which tx ring we will be placed on */ in ena_start_xmit()
2613 tx_ring = &adapter->tx_ring[qid]; in ena_start_xmit()
2620 next_to_use = tx_ring->next_to_use; in ena_start_xmit()
2621 req_id = tx_ring->free_ids[next_to_use]; in ena_start_xmit()
2622 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_start_xmit()
2623 tx_info->num_of_bufs = 0; in ena_start_xmit()
2625 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); in ena_start_xmit()
2632 ena_tx_ctx.ena_bufs = tx_info->bufs; in ena_start_xmit()
2634 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; in ena_start_xmit()
2639 ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching); in ena_start_xmit()
2646 skb->len); in ena_start_xmit()
2650 netdev_tx_sent_queue(txq, skb->len); in ena_start_xmit()
2656 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_start_xmit()
2657 tx_ring->sgl_size + 2))) { in ena_start_xmit()
2662 ena_increase_stat(&tx_ring->tx_stats.queue_stop, 1, in ena_start_xmit()
2663 &tx_ring->syncp); in ena_start_xmit()
2675 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_start_xmit()
2678 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1, in ena_start_xmit()
2679 &tx_ring->syncp); in ena_start_xmit()
2695 tx_info->skb = NULL; in ena_start_xmit()
2704 struct device *dev = &pdev->dev; in ena_config_host_info()
2716 host_info = ena_dev->host_attr.host_info; in ena_config_host_info()
2718 host_info->bdf = pci_dev_id(pdev); in ena_config_host_info()
2719 host_info->os_type = ENA_ADMIN_OS_LINUX; in ena_config_host_info()
2720 host_info->kernel_ver = LINUX_VERSION_CODE; in ena_config_host_info()
2721 ret = strscpy(host_info->kernel_ver_str, utsname()->version, in ena_config_host_info()
2722 sizeof(host_info->kernel_ver_str)); in ena_config_host_info()
2727 host_info->os_dist = 0; in ena_config_host_info()
2728 ret = strscpy(host_info->os_dist_str, utsname()->release, in ena_config_host_info()
2729 sizeof(host_info->os_dist_str)); in ena_config_host_info()
2734 host_info->driver_version = in ena_config_host_info()
2739 host_info->num_cpus = num_online_cpus(); in ena_config_host_info()
2741 host_info->driver_supported_features = in ena_config_host_info()
2750 if (rc == -EOPNOTSUPP) in ena_config_host_info()
2769 ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS); in ena_config_debug_area()
2771 netif_err(adapter, drv, adapter->netdev, in ena_config_debug_area()
2779 rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size); in ena_config_debug_area()
2781 netif_err(adapter, drv, adapter->netdev, in ena_config_debug_area()
2786 rc = ena_com_set_host_attributes(adapter->ena_dev); in ena_config_debug_area()
2788 if (rc == -EOPNOTSUPP) in ena_config_debug_area()
2789 netif_warn(adapter, drv, adapter->netdev, "Cannot set host attributes\n"); in ena_config_debug_area()
2791 netif_err(adapter, drv, adapter->netdev, in ena_config_debug_area()
2798 ena_com_delete_debug_area(adapter->ena_dev); in ena_config_debug_area()
2812 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) in ena_get_stats64()
2815 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { in ena_get_stats64()
2818 tx_ring = &adapter->tx_ring[i]; in ena_get_stats64()
2821 start = u64_stats_fetch_begin(&tx_ring->syncp); in ena_get_stats64()
2822 packets = tx_ring->tx_stats.cnt; in ena_get_stats64()
2823 bytes = tx_ring->tx_stats.bytes; in ena_get_stats64()
2824 } while (u64_stats_fetch_retry(&tx_ring->syncp, start)); in ena_get_stats64()
2826 stats->tx_packets += packets; in ena_get_stats64()
2827 stats->tx_bytes += bytes; in ena_get_stats64()
2833 rx_ring = &adapter->rx_ring[i]; in ena_get_stats64()
2836 start = u64_stats_fetch_begin(&rx_ring->syncp); in ena_get_stats64()
2837 packets = rx_ring->rx_stats.cnt; in ena_get_stats64()
2838 bytes = rx_ring->rx_stats.bytes; in ena_get_stats64()
2839 xdp_rx_drops = rx_ring->rx_stats.xdp_drop; in ena_get_stats64()
2840 } while (u64_stats_fetch_retry(&rx_ring->syncp, start)); in ena_get_stats64()
2842 stats->rx_packets += packets; in ena_get_stats64()
2843 stats->rx_bytes += bytes; in ena_get_stats64()
2848 start = u64_stats_fetch_begin(&adapter->syncp); in ena_get_stats64()
2849 rx_drops = adapter->dev_stats.rx_drops; in ena_get_stats64()
2850 tx_drops = adapter->dev_stats.tx_drops; in ena_get_stats64()
2851 } while (u64_stats_fetch_retry(&adapter->syncp, start)); in ena_get_stats64()
2853 stats->rx_dropped = rx_drops + total_xdp_rx_drops; in ena_get_stats64()
2854 stats->tx_dropped = tx_drops; in ena_get_stats64()
2856 stats->multicast = 0; in ena_get_stats64()
2857 stats->collisions = 0; in ena_get_stats64()
2859 stats->rx_length_errors = 0; in ena_get_stats64()
2860 stats->rx_crc_errors = 0; in ena_get_stats64()
2861 stats->rx_frame_errors = 0; in ena_get_stats64()
2862 stats->rx_fifo_errors = 0; in ena_get_stats64()
2863 stats->rx_missed_errors = 0; in ena_get_stats64()
2864 stats->tx_window_errors = 0; in ena_get_stats64()
2866 stats->rx_errors = 0; in ena_get_stats64()
2867 stats->tx_errors = 0; in ena_get_stats64()
2885 struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq; in ena_calc_io_queue_size()
2886 struct ena_com_dev *ena_dev = adapter->ena_dev; in ena_calc_io_queue_size()
2895 if (adapter->tx_ring->ring_size) in ena_calc_io_queue_size()
2896 tx_queue_size = adapter->tx_ring->ring_size; in ena_calc_io_queue_size()
2898 if (adapter->rx_ring->ring_size) in ena_calc_io_queue_size()
2899 rx_queue_size = adapter->rx_ring->ring_size; in ena_calc_io_queue_size()
2901 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { in ena_calc_io_queue_size()
2903 &get_feat_ctx->max_queue_ext.max_queue_ext; in ena_calc_io_queue_size()
2904 max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth, in ena_calc_io_queue_size()
2905 max_queue_ext->max_rx_sq_depth); in ena_calc_io_queue_size()
2906 max_tx_queue_size = max_queue_ext->max_tx_cq_depth; in ena_calc_io_queue_size()
2908 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) in ena_calc_io_queue_size()
2910 llq->max_llq_depth); in ena_calc_io_queue_size()
2913 max_queue_ext->max_tx_sq_depth); in ena_calc_io_queue_size()
2915 adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, in ena_calc_io_queue_size()
2916 max_queue_ext->max_per_packet_tx_descs); in ena_calc_io_queue_size()
2917 adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, in ena_calc_io_queue_size()
2918 max_queue_ext->max_per_packet_rx_descs); in ena_calc_io_queue_size()
2921 &get_feat_ctx->max_queues; in ena_calc_io_queue_size()
2922 max_rx_queue_size = min_t(u32, max_queues->max_cq_depth, in ena_calc_io_queue_size()
2923 max_queues->max_sq_depth); in ena_calc_io_queue_size()
2924 max_tx_queue_size = max_queues->max_cq_depth; in ena_calc_io_queue_size()
2926 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) in ena_calc_io_queue_size()
2928 llq->max_llq_depth); in ena_calc_io_queue_size()
2931 max_queues->max_sq_depth); in ena_calc_io_queue_size()
2933 adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, in ena_calc_io_queue_size()
2934 max_queues->max_packet_tx_descs); in ena_calc_io_queue_size()
2935 adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, in ena_calc_io_queue_size()
2936 max_queues->max_packet_rx_descs); in ena_calc_io_queue_size()
2943 netdev_err(adapter->netdev, "Device max TX queue size: %d < minimum: %d\n", in ena_calc_io_queue_size()
2945 return -EINVAL; in ena_calc_io_queue_size()
2949 netdev_err(adapter->netdev, "Device max RX queue size: %d < minimum: %d\n", in ena_calc_io_queue_size()
2951 return -EINVAL; in ena_calc_io_queue_size()
2955 * the queue size by 2, leaving the amount of memory used by the queues unchanged. in ena_calc_io_queue_size()
2957 if (adapter->large_llq_header_enabled) { in ena_calc_io_queue_size()
2958 if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) && in ena_calc_io_queue_size()
2959 ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { in ena_calc_io_queue_size()
2961 dev_info(&adapter->pdev->dev, in ena_calc_io_queue_size()
2962 "Forcing large headers and decreasing maximum TX queue size to %d\n", in ena_calc_io_queue_size()
2965 dev_err(&adapter->pdev->dev, in ena_calc_io_queue_size()
2968 adapter->large_llq_header_enabled = false; in ena_calc_io_queue_size()
2980 adapter->max_tx_ring_size = max_tx_queue_size; in ena_calc_io_queue_size()
2981 adapter->max_rx_ring_size = max_rx_queue_size; in ena_calc_io_queue_size()
2982 adapter->requested_tx_ring_size = tx_queue_size; in ena_calc_io_queue_size()
2983 adapter->requested_rx_ring_size = rx_queue_size; in ena_calc_io_queue_size()
2991 struct net_device *netdev = adapter->netdev; in ena_device_validate_params()
2994 rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr, in ena_device_validate_params()
2995 adapter->mac_addr); in ena_device_validate_params()
2999 return -EINVAL; in ena_device_validate_params()
3002 if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) { in ena_device_validate_params()
3005 return -EINVAL; in ena_device_validate_params()
3015 struct ena_com_dev *ena_dev = adapter->ena_dev; in set_default_llq_configurations()
3017 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; in set_default_llq_configurations()
3018 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; in set_default_llq_configurations()
3019 llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; in set_default_llq_configurations()
3021 adapter->large_llq_header_supported = in set_default_llq_configurations()
3022 !!(ena_dev->supported_features & BIT(ENA_ADMIN_LLQ)); in set_default_llq_configurations()
3023 adapter->large_llq_header_supported &= in set_default_llq_configurations()
3024 !!(llq->entry_size_ctrl_supported & in set_default_llq_configurations()
3027 if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) && in set_default_llq_configurations()
3028 adapter->large_llq_header_enabled) { in set_default_llq_configurations()
3029 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_256B; in set_default_llq_configurations()
3030 llq_config->llq_ring_entry_size_value = 256; in set_default_llq_configurations()
3032 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; in set_default_llq_configurations()
3033 llq_config->llq_ring_entry_size_value = 128; in set_default_llq_configurations()
3046 if (!(ena_dev->supported_features & llq_feature_mask)) { in ena_set_queues_placement_policy()
3047 dev_warn(&pdev->dev, in ena_set_queues_placement_policy()
3049 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; in ena_set_queues_placement_policy()
3053 if (!ena_dev->mem_bar) { in ena_set_queues_placement_policy()
3054 netdev_err(ena_dev->net_device, in ena_set_queues_placement_policy()
3056 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; in ena_set_queues_placement_policy()
3062 dev_err(&pdev->dev, in ena_set_queues_placement_policy()
3064 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; in ena_set_queues_placement_policy()
3078 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, in ena_map_llq_mem_bar()
3082 if (!ena_dev->mem_bar) in ena_map_llq_mem_bar()
3083 return -EFAULT; in ena_map_llq_mem_bar()
3092 struct ena_com_dev *ena_dev = adapter->ena_dev; in ena_device_init()
3093 struct net_device *netdev = adapter->netdev; in ena_device_init()
3095 struct device *dev = &pdev->dev; in ena_device_init()
3110 readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ); in ena_device_init()
3147 * of queues. So the driver uses polling mode to retrieve this in ena_device_init()
3168 aenq_groups &= get_feat_ctx->aenq.supported_groups; in ena_device_init()
3178 set_default_llq_configurations(adapter, &llq_config, &get_feat_ctx->llq); in ena_device_init()
3180 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq, in ena_device_init()
3183 netdev_err(netdev, "Cannot set queues placement policy rc= %d\n", rc); in ena_device_init()
3206 struct ena_com_dev *ena_dev = adapter->ena_dev; in ena_enable_msix_and_set_admin_interrupts()
3207 struct device *dev = &adapter->pdev->dev; in ena_enable_msix_and_set_admin_interrupts()
3238 struct net_device *netdev = adapter->netdev; in ena_destroy_device()
3239 struct ena_com_dev *ena_dev = adapter->ena_dev; in ena_destroy_device()
3243 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) in ena_destroy_device()
3248 del_timer_sync(&adapter->timer_service); in ena_destroy_device()
3250 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); in ena_destroy_device()
3251 adapter->dev_up_before_reset = dev_up; in ena_destroy_device()
3261 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) in ena_destroy_device()
3262 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); in ena_destroy_device()
3277 adapter->reset_reason = ENA_REGS_RESET_NORMAL; in ena_destroy_device()
3279 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); in ena_destroy_device()
3280 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); in ena_destroy_device()
3288 struct ena_com_dev *ena_dev = adapter->ena_dev; in ena_restore_device()
3289 struct pci_dev *pdev = adapter->pdev; in ena_restore_device()
3294 set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); in ena_restore_device()
3295 rc = ena_device_init(adapter, adapter->pdev, &get_feat_ctx, &wd_state); in ena_restore_device()
3297 dev_err(&pdev->dev, "Can not initialize device\n"); in ena_restore_device()
3300 adapter->wd_state = wd_state; in ena_restore_device()
3302 count = adapter->xdp_num_queues + adapter->num_io_queues; in ena_restore_device()
3304 txr = &adapter->tx_ring[i]; in ena_restore_device()
3305 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; in ena_restore_device()
3306 txr->tx_max_header_size = ena_dev->tx_max_header_size; in ena_restore_device()
3311 dev_err(&pdev->dev, "Validation of device parameters failed\n"); in ena_restore_device()
3317 dev_err(&pdev->dev, "Enable MSI-X failed\n"); in ena_restore_device()
3321 if (adapter->dev_up_before_reset) { in ena_restore_device()
3324 dev_err(&pdev->dev, "Failed to create I/O queues\n"); in ena_restore_device()
3329 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); in ena_restore_device()
3331 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); in ena_restore_device()
3332 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) in ena_restore_device()
3333 netif_carrier_on(adapter->netdev); in ena_restore_device()
3335 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); in ena_restore_device()
3336 adapter->last_keep_alive_jiffies = jiffies; in ena_restore_device()
3349 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); in ena_restore_device()
3350 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); in ena_restore_device()
3351 dev_err(&pdev->dev, in ena_restore_device()
3366 if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { in ena_fw_reset_device()
3369 adapter->dev_stats.reset_fail += !!rc; in ena_fw_reset_device()
3371 dev_err(&adapter->pdev->dev, "Device reset completed successfully\n"); in ena_fw_reset_device()
3380 struct ena_napi *ena_napi = container_of(rx_ring->napi, struct ena_napi, napi); in check_for_rx_interrupt_queue()
3382 if (likely(READ_ONCE(ena_napi->first_interrupt))) in check_for_rx_interrupt_queue()
3385 if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) in check_for_rx_interrupt_queue()
3388 rx_ring->no_interrupt_event_cnt++; in check_for_rx_interrupt_queue()
3390 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { in check_for_rx_interrupt_queue()
3391 netif_err(adapter, rx_err, adapter->netdev, in check_for_rx_interrupt_queue()
3393 rx_ring->qid); in check_for_rx_interrupt_queue()
3396 return -EIO; in check_for_rx_interrupt_queue()
3405 struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi); in check_missing_comp_in_tx_queue()
3416 missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to); in check_missing_comp_in_tx_queue()
3418 for (i = 0; i < tx_ring->ring_size; i++) { in check_missing_comp_in_tx_queue()
3419 tx_buf = &tx_ring->tx_buffer_info[i]; in check_missing_comp_in_tx_queue()
3420 last_jiffies = tx_buf->last_jiffies; in check_missing_comp_in_tx_queue()
3423 /* no pending Tx at this location */ in check_missing_comp_in_tx_queue()
3427 2 * adapter->missing_tx_completion_to); in check_missing_comp_in_tx_queue()
3429 if (unlikely(!READ_ONCE(ena_napi->first_interrupt) && is_tx_comp_time_expired)) { in check_missing_comp_in_tx_queue()
3433 netif_err(adapter, tx_err, adapter->netdev, in check_missing_comp_in_tx_queue()
3434 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n", in check_missing_comp_in_tx_queue()
3435 tx_ring->qid); in check_missing_comp_in_tx_queue()
3437 return -EIO; in check_missing_comp_in_tx_queue()
3441 adapter->missing_tx_completion_to); in check_missing_comp_in_tx_queue()
3445 jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies); in check_missing_comp_in_tx_queue()
3446 napi_scheduled = !!(ena_napi->napi.state & NAPIF_STATE_SCHED); in check_missing_comp_in_tx_queue()
3454 2 * adapter->missing_tx_completion_to)) in check_missing_comp_in_tx_queue()
3462 if (tx_buf->print_once) in check_missing_comp_in_tx_queue()
3465 netif_notice(adapter, tx_err, adapter->netdev, in check_missing_comp_in_tx_queue()
3466 … "TX hasn't completed, qid %d, index %d. %u usecs from last napi execution, napi scheduled: %d\n", in check_missing_comp_in_tx_queue()
3467 tx_ring->qid, i, time_since_last_napi, napi_scheduled); in check_missing_comp_in_tx_queue()
3469 tx_buf->print_once = 1; in check_missing_comp_in_tx_queue()
3473 if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) { in check_missing_comp_in_tx_queue()
3474 netif_err(adapter, tx_err, adapter->netdev, in check_missing_comp_in_tx_queue()
3475 … "Lost TX completions are above the threshold (%d > %d). Completion transmission timeout: %u.\n", in check_missing_comp_in_tx_queue()
3477 adapter->missing_tx_completion_threshold, in check_missing_comp_in_tx_queue()
3479 netif_err(adapter, tx_err, adapter->netdev, in check_missing_comp_in_tx_queue()
3483 rc = -EIO; in check_missing_comp_in_tx_queue()
3486 ena_increase_stat(&tx_ring->tx_stats.missed_tx, missed_tx, in check_missing_comp_in_tx_queue()
3487 &tx_ring->syncp); in check_missing_comp_in_tx_queue()
3499 io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues; in check_for_missing_completions()
3504 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) in check_for_missing_completions()
3507 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) in check_for_missing_completions()
3510 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT) in check_for_missing_completions()
3515 qid = adapter->last_monitored_tx_qid; in check_for_missing_completions()
3520 tx_ring = &adapter->tx_ring[qid]; in check_for_missing_completions()
3521 rx_ring = &adapter->rx_ring[qid]; in check_for_missing_completions()
3532 budget--; in check_for_missing_completions()
3535 adapter->last_monitored_tx_qid = qid; in check_for_missing_completions()
3552 * When such a situation is detected - Reschedule napi
3559 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) in check_for_empty_rx_ring()
3562 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) in check_for_empty_rx_ring()
3565 for (i = 0; i < adapter->num_io_queues; i++) { in check_for_empty_rx_ring()
3566 rx_ring = &adapter->rx_ring[i]; in check_for_empty_rx_ring()
3568 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); in check_for_empty_rx_ring()
3569 if (unlikely(refill_required == (rx_ring->ring_size - 1))) { in check_for_empty_rx_ring()
3570 rx_ring->empty_rx_queue++; in check_for_empty_rx_ring()
3572 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { in check_for_empty_rx_ring()
3573 ena_increase_stat(&rx_ring->rx_stats.empty_rx_ring, 1, in check_for_empty_rx_ring()
3574 &rx_ring->syncp); in check_for_empty_rx_ring()
3576 netif_err(adapter, drv, adapter->netdev, in check_for_empty_rx_ring()
3579 napi_schedule(rx_ring->napi); in check_for_empty_rx_ring()
3580 rx_ring->empty_rx_queue = 0; in check_for_empty_rx_ring()
3583 rx_ring->empty_rx_queue = 0; in check_for_empty_rx_ring()
3593 if (!adapter->wd_state) in check_for_missing_keep_alive()
3596 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) in check_for_missing_keep_alive()
3599 keep_alive_expired = adapter->last_keep_alive_jiffies + in check_for_missing_keep_alive()
3600 adapter->keep_alive_timeout; in check_for_missing_keep_alive()
3602 netif_err(adapter, drv, adapter->netdev, in check_for_missing_keep_alive()
3604 ena_increase_stat(&adapter->dev_stats.wd_expired, 1, in check_for_missing_keep_alive()
3605 &adapter->syncp); in check_for_missing_keep_alive()
3612 if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) { in check_for_admin_com_state()
3613 netif_err(adapter, drv, adapter->netdev, in check_for_admin_com_state()
3615 ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1, in check_for_admin_com_state()
3616 &adapter->syncp); in check_for_admin_com_state()
3624 struct net_device *netdev = adapter->netdev; in ena_update_hints()
3626 if (hints->admin_completion_tx_timeout) in ena_update_hints()
3627 adapter->ena_dev->admin_queue.completion_timeout = in ena_update_hints()
3628 hints->admin_completion_tx_timeout * 1000; in ena_update_hints()
3630 if (hints->mmio_read_timeout) in ena_update_hints()
3632 adapter->ena_dev->mmio_read.reg_read_to = in ena_update_hints()
3633 hints->mmio_read_timeout * 1000; in ena_update_hints()
3635 if (hints->missed_tx_completion_count_threshold_to_reset) in ena_update_hints()
3636 adapter->missing_tx_completion_threshold = in ena_update_hints()
3637 hints->missed_tx_completion_count_threshold_to_reset; in ena_update_hints()
3639 if (hints->missing_tx_completion_timeout) { in ena_update_hints()
3640 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT) in ena_update_hints()
3641 adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT; in ena_update_hints()
3643 adapter->missing_tx_completion_to = in ena_update_hints()
3644 msecs_to_jiffies(hints->missing_tx_completion_timeout); in ena_update_hints()
3647 if (hints->netdev_wd_timeout) in ena_update_hints()
3648 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout); in ena_update_hints()
3650 if (hints->driver_watchdog_timeout) { in ena_update_hints()
3651 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) in ena_update_hints()
3652 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; in ena_update_hints()
3654 adapter->keep_alive_timeout = in ena_update_hints()
3655 msecs_to_jiffies(hints->driver_watchdog_timeout); in ena_update_hints()
3662 host_info->supported_network_features[0] = in ena_update_host_info()
3663 netdev->features & GENMASK_ULL(31, 0); in ena_update_host_info()
3664 host_info->supported_network_features[1] = in ena_update_host_info()
3665 (netdev->features & GENMASK_ULL(63, 32)) >> 32; in ena_update_host_info()
3671 u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr; in ena_timer_service()
3673 adapter->ena_dev->host_attr.host_info; in ena_timer_service()
3687 ena_update_host_info(host_info, adapter->netdev); in ena_timer_service()
3689 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { in ena_timer_service()
3690 netif_err(adapter, drv, adapter->netdev, in ena_timer_service()
3693 queue_work(ena_wq, &adapter->reset_task); in ena_timer_service()
3698 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); in ena_timer_service()
3707 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { in ena_calc_max_io_queue_num()
3709 &get_feat_ctx->max_queue_ext.max_queue_ext; in ena_calc_max_io_queue_num()
3710 io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num, in ena_calc_max_io_queue_num()
3711 max_queue_ext->max_rx_cq_num); in ena_calc_max_io_queue_num()
3713 io_tx_sq_num = max_queue_ext->max_tx_sq_num; in ena_calc_max_io_queue_num()
3714 io_tx_cq_num = max_queue_ext->max_tx_cq_num; in ena_calc_max_io_queue_num()
3717 &get_feat_ctx->max_queues; in ena_calc_max_io_queue_num()
3718 io_tx_sq_num = max_queues->max_sq_num; in ena_calc_max_io_queue_num()
3719 io_tx_cq_num = max_queues->max_cq_num; in ena_calc_max_io_queue_num()
3723 /* In case of LLQ use the llq fields for the tx SQ/CQ */ in ena_calc_max_io_queue_num()
3724 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) in ena_calc_max_io_queue_num()
3725 io_tx_sq_num = get_feat_ctx->llq.max_llq_num; in ena_calc_max_io_queue_num()
3732 max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1); in ena_calc_max_io_queue_num()
3743 if (feat->offload.tx & in ena_set_dev_offloads()
3747 if (feat->offload.tx & in ena_set_dev_offloads()
3751 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) in ena_set_dev_offloads()
3754 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) in ena_set_dev_offloads()
3757 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) in ena_set_dev_offloads()
3760 if (feat->offload.rx_supported & in ena_set_dev_offloads()
3764 if (feat->offload.rx_supported & in ena_set_dev_offloads()
3768 netdev->features = in ena_set_dev_offloads()
3774 netdev->hw_features |= netdev->features; in ena_set_dev_offloads()
3775 netdev->vlan_features |= netdev->features; in ena_set_dev_offloads()
3781 struct net_device *netdev = adapter->netdev; in ena_set_conf_feat_params()
3784 if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) { in ena_set_conf_feat_params()
3786 ether_addr_copy(adapter->mac_addr, netdev->dev_addr); in ena_set_conf_feat_params()
3788 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr); in ena_set_conf_feat_params()
3789 eth_hw_addr_set(netdev, adapter->mac_addr); in ena_set_conf_feat_params()
3795 adapter->max_mtu = feat->dev_attr.max_mtu; in ena_set_conf_feat_params()
3796 netdev->max_mtu = adapter->max_mtu; in ena_set_conf_feat_params()
3797 netdev->min_mtu = ENA_MIN_MTU; in ena_set_conf_feat_params()
3802 struct ena_com_dev *ena_dev = adapter->ena_dev; in ena_rss_init_default()
3803 struct device *dev = &adapter->pdev->dev; in ena_rss_init_default()
3814 val = ethtool_rxfh_indir_default(i, adapter->num_io_queues); in ena_rss_init_default()
3825 if (unlikely(rc && (rc != -EOPNOTSUPP))) { in ena_rss_init_default()
3831 if (unlikely(rc && (rc != -EOPNOTSUPP))) { in ena_rss_init_default()
3852 /* ena_probe - Device Initialization Routine
3873 dev_dbg(&pdev->dev, "%s\n", __func__); in ena_probe()
3877 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n"); in ena_probe()
3881 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS)); in ena_probe()
3883 dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc); in ena_probe()
3891 rc = -ENOMEM; in ena_probe()
3898 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n", in ena_probe()
3903 ena_dev->reg_bar = devm_ioremap(&pdev->dev, in ena_probe()
3906 if (!ena_dev->reg_bar) { in ena_probe()
3907 dev_err(&pdev->dev, "Failed to remap regs bar\n"); in ena_probe()
3908 rc = -EFAULT; in ena_probe()
3912 ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US; in ena_probe()
3914 ena_dev->dmadev = &pdev->dev; in ena_probe()
3918 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n"); in ena_probe()
3919 rc = -ENOMEM; in ena_probe()
3923 SET_NETDEV_DEV(netdev, &pdev->dev); in ena_probe()
3925 adapter->ena_dev = ena_dev; in ena_probe()
3926 adapter->netdev = netdev; in ena_probe()
3927 adapter->pdev = pdev; in ena_probe()
3928 adapter->msg_enable = DEFAULT_MSG_ENABLE; in ena_probe()
3930 ena_dev->net_device = netdev; in ena_probe()
3942 dev_err(&pdev->dev, "ENA LLQ bar mapping failed\n"); in ena_probe()
3948 dev_err(&pdev->dev, "ENA device init failed\n"); in ena_probe()
3949 if (rc == -ETIME) in ena_probe()
3950 rc = -EPROBE_DEFER; in ena_probe()
3954 /* Initial TX and RX interrupt delay. Assumes 1 usec granularity. in ena_probe()
3957 ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS; in ena_probe()
3958 ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS; in ena_probe()
3959 ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; in ena_probe()
3962 rc = -EFAULT; in ena_probe()
3968 adapter->reset_reason = ENA_REGS_RESET_NORMAL; in ena_probe()
3970 adapter->num_io_queues = max_num_io_queues; in ena_probe()
3971 adapter->max_num_io_queues = max_num_io_queues; in ena_probe()
3972 adapter->last_monitored_tx_qid = 0; in ena_probe()
3974 adapter->xdp_first_ring = 0; in ena_probe()
3975 adapter->xdp_num_queues = 0; in ena_probe()
3977 adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK; in ena_probe()
3978 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) in ena_probe()
3979 adapter->disable_meta_caching = in ena_probe()
3983 adapter->wd_state = wd_state; in ena_probe()
3985 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found); in ena_probe()
3987 rc = ena_com_init_interrupt_moderation(adapter->ena_dev); in ena_probe()
3989 dev_err(&pdev->dev, in ena_probe()
3996 adapter->xdp_num_queues + in ena_probe()
3997 adapter->num_io_queues); in ena_probe()
3999 netdev->netdev_ops = &ena_netdev_ops; in ena_probe()
4000 netdev->watchdog_timeo = TX_TIMEOUT; in ena_probe()
4003 netdev->priv_flags |= IFF_UNICAST_FLT; in ena_probe()
4005 u64_stats_init(&adapter->syncp); in ena_probe()
4009 dev_err(&pdev->dev, in ena_probe()
4014 if (rc && (rc != -EOPNOTSUPP)) { in ena_probe()
4015 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc); in ena_probe()
4021 if (ena_xdp_legal_queue_count(adapter, adapter->num_io_queues)) in ena_probe()
4022 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | in ena_probe()
4025 memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len); in ena_probe()
4031 dev_err(&pdev->dev, "Cannot register net device\n"); in ena_probe()
4035 INIT_WORK(&adapter->reset_task, ena_fw_reset_device); in ena_probe()
4037 adapter->last_keep_alive_jiffies = jiffies; in ena_probe()
4038 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; in ena_probe()
4039 adapter->missing_tx_completion_to = TX_TIMEOUT; in ena_probe()
4040 adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS; in ena_probe()
4044 timer_setup(&adapter->timer_service, ena_timer_service, 0); in ena_probe()
4045 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); in ena_probe()
4047 dev_info(&pdev->dev, in ena_probe()
4050 netdev->dev_addr); in ena_probe()
4052 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); in ena_probe()
4068 del_timer(&adapter->timer_service); in ena_probe()
4087 /* __ena_shutoff - Helper used in both PCI remove/shutdown routines
4101 ena_dev = adapter->ena_dev; in __ena_shutoff()
4102 netdev = adapter->netdev; in __ena_shutoff()
4105 if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) { in __ena_shutoff()
4106 free_irq_cpu_rmap(netdev->rx_cpu_rmap); in __ena_shutoff()
4107 netdev->rx_cpu_rmap = NULL; in __ena_shutoff()
4114 del_timer_sync(&adapter->timer_service); in __ena_shutoff()
4115 cancel_work_sync(&adapter->reset_task); in __ena_shutoff()
4117 rtnl_lock(); /* lock released inside the below if-else block */ in __ena_shutoff()
4118 adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN; in __ena_shutoff()
4146 /* ena_remove - Device Removal Routine
4158 /* ena_shutdown - Device Shutdown Routine
4170 /* ena_suspend - PM suspend callback
4178 ena_increase_stat(&adapter->dev_stats.suspend, 1, &adapter->syncp); in ena_suspend()
4181 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { in ena_suspend()
4182 dev_err(&pdev->dev, in ena_suspend()
4184 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); in ena_suspend()
4191 /* ena_resume - PM resume callback
4199 ena_increase_stat(&adapter->dev_stats.resume, 1, &adapter->syncp); in ena_resume()
4226 return -ENOMEM; in ena_init()
4258 int status = aenq_desc->flags & in ena_update_on_link_change()
4262 netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__); in ena_update_on_link_change()
4263 set_bit(ENA_FLAG_LINK_UP, &adapter->flags); in ena_update_on_link_change()
4264 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags)) in ena_update_on_link_change()
4265 netif_carrier_on(adapter->netdev); in ena_update_on_link_change()
4267 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags); in ena_update_on_link_change()
4268 netif_carrier_off(adapter->netdev); in ena_update_on_link_change()
4281 adapter->last_keep_alive_jiffies = jiffies; in ena_keep_alive_wd()
4283 rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low; in ena_keep_alive_wd()
4284 tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low; in ena_keep_alive_wd()
4286 u64_stats_update_begin(&adapter->syncp); in ena_keep_alive_wd()
4290 adapter->dev_stats.rx_drops = rx_drops; in ena_keep_alive_wd()
4291 adapter->dev_stats.tx_drops = tx_drops; in ena_keep_alive_wd()
4292 u64_stats_update_end(&adapter->syncp); in ena_keep_alive_wd()
4301 WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, in ena_notification()
4303 aenq_e->aenq_common_desc.group, in ena_notification()
4306 switch (aenq_e->aenq_common_desc.syndrome) { in ena_notification()
4309 (&aenq_e->inline_data_w4); in ena_notification()
4313 netif_err(adapter, drv, adapter->netdev, in ena_notification()
4315 aenq_e->aenq_common_desc.syndrome); in ena_notification()
4325 netif_err(adapter, drv, adapter->netdev, in unimplemented_aenq_handler()