Lines Matching +full:num +full:- +full:tx +full:- +full:queues

1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Copyright (c) 2014-2015 Hisilicon Limited.
11 * a set of queues provided by AE
13 * the channel between upper layer and the AE, can do tx and rx
15 * a tx or rx channel within a rbq
21 * "num" means a static number set as a parameter, "count" mean a dynamic
68 /* some said the RX and TX RCB format should not be the same in the future. But
79 #define RCB_REG_OFFSET 0x24 /* pkt num to be handled */
194 } tx; member
231 /* hnae_ring->flags fields */
232 #define RINGF_DIR 0x1 /* TX or RX ring, set if TX */
233 #define is_tx_ring(ring) ((ring)->flags & RINGF_DIR)
278 u32 buf_size; /* size for hnae_desc->addr, preset by AE */
301 ((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
303 ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
311 assert((idx) >= 0 && (idx) < (ring)->desc_num)
321 return (end - begin + ring->desc_num) % ring->desc_num; in ring_dist()
326 return ring->desc_num - in ring_space()
327 ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1; in ring_space()
332 assert_is_ring_idx(ring, ring->next_to_use); in is_ring_empty()
333 assert_is_ring_idx(ring, ring->next_to_clean); in is_ring_empty()
335 return ring->next_to_use == ring->next_to_clean; in is_ring_empty()
338 #define hnae_buf_size(_ring) ((_ring)->buf_size)
388 * the AE driver should manage the space used by handle and its queues while
390 * queues.
394 * Enable the hardware, include all queues
403 * non-ok
416 * get tx and rx of pause frame use
418 * set tx and rx of pause frame use
420 * get usecs to delay a TX interrupt after a packet is sent
422 * get Maximum number of packets to be sent before a TX interrupt.
424 * set usecs to delay a TX interrupt after a packet is sent
426 * set Maximum number of packets to be sent before a TX interrupt.
428 * get RX/TX ring number
430 * get RX/TX ring maximum number
555 u32 dport_id; /* v2 tx bd should fill the dport_id */
559 struct list_head node; /* list to hnae_ae_dev->handle_list */
561 struct hnae_queue *qs[]; /* flexible array of all queues */
564 #define ring_to_dev(ring) ((ring)->q->dev->dev)
580 (q)->tx_ring.io_base + RCB_REG_TAIL)
589 struct hnae_buf_ops *bops = ring->q->handle->bops; in hnae_reserve_buffer_map()
592 ret = bops->alloc_buffer(ring, cb); in hnae_reserve_buffer_map()
596 ret = bops->map_buffer(ring, cb); in hnae_reserve_buffer_map()
603 bops->free_buffer(ring, cb); in hnae_reserve_buffer_map()
610 int ret = hnae_reserve_buffer_map(ring, &ring->desc_cb[i]); in hnae_alloc_buffer_attach()
615 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); in hnae_alloc_buffer_attach()
622 ring->q->handle->bops->unmap_buffer(ring, &ring->desc_cb[i]); in hnae_buffer_detach()
623 ring->desc[i].addr = 0; in hnae_buffer_detach()
628 struct hnae_buf_ops *bops = ring->q->handle->bops; in hnae_free_buffer_detach()
629 struct hnae_desc_cb *cb = &ring->desc_cb[i]; in hnae_free_buffer_detach()
631 if (!ring->desc_cb[i].dma) in hnae_free_buffer_detach()
635 bops->free_buffer(ring, cb); in hnae_free_buffer_detach()
638 /* detach a in-used buffer and replace with a reserved one */
642 struct hnae_buf_ops *bops = ring->q->handle->bops; in hnae_replace_buffer()
644 bops->unmap_buffer(ring, &ring->desc_cb[i]); in hnae_replace_buffer()
645 ring->desc_cb[i] = *res_cb; in hnae_replace_buffer()
646 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); in hnae_replace_buffer()
647 ring->desc[i].rx.ipoff_bnum_pid_flag = 0; in hnae_replace_buffer()
652 ring->desc_cb[i].reuse_flag = 0; in hnae_reuse_buffer()
653 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma in hnae_reuse_buffer()
654 + ring->desc_cb[i].page_offset); in hnae_reuse_buffer()
655 ring->desc[i].rx.ipoff_bnum_pid_flag = 0; in hnae_reuse_buffer()
664 for (i = 0; i < h->q_num; i++) { in hnae_reinit_all_ring_desc()
665 ring = &h->qs[i]->rx_ring; in hnae_reinit_all_ring_desc()
666 for (j = 0; j < ring->desc_num; j++) in hnae_reinit_all_ring_desc()
667 ring->desc[j].addr = cpu_to_le64(ring->desc_cb[j].dma); in hnae_reinit_all_ring_desc()
679 for (i = 0; i < h->q_num; i++) { in hnae_reinit_all_ring_page_off()
680 ring = &h->qs[i]->rx_ring; in hnae_reinit_all_ring_page_off()
681 for (j = 0; j < ring->desc_num; j++) { in hnae_reinit_all_ring_page_off()
682 ring->desc_cb[j].page_offset = 0; in hnae_reinit_all_ring_page_off()
683 if (ring->desc[j].addr != in hnae_reinit_all_ring_page_off()
684 cpu_to_le64(ring->desc_cb[j].dma)) in hnae_reinit_all_ring_page_off()
685 ring->desc[j].addr = in hnae_reinit_all_ring_page_off()
686 cpu_to_le64(ring->desc_cb[j].dma); in hnae_reinit_all_ring_page_off()