Lines Matching +full:queue +full:- +full:rx

1 /* SPDX-License-Identifier: GPL-2.0-only */
19 /* Mailbox Queue */
27 /* Number of descriptors in a queue should be a multiple of 32. RX queue
66 * given RX completion queue has descriptors. This includes _ALL_ buffer
68 * you have a total of 1024 buffers so your RX queue _must_ have at least that
69 * many descriptors. This macro divides a given number of RX descriptors by
70 * number of buffer queues to calculate how many descriptors each buffer queue
71 * can have without overrunning the RX queue.
83 #define IDPF_RX_BUFQ_WORKING_SET(rxq) ((rxq)->desc_count - 1)
87 if (unlikely(++(ntc) == (rxq)->desc_count)) { \
95 if (unlikely(++(idx) == (q)->desc_count)) \
117 ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
118 (txq)->next_to_clean - (txq)->next_to_use - 1)
120 #define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top)
122 (txq)->desc_count >> 2)
124 #define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1)
126 * completions that are expected to arrive on the TX completion queue.
129 (((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
131 (txq)->num_completions_pending - (txq)->complq->num_completions)
136 ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
137 0 : (txq)->compl_tag_cur_gen)
147 struct idpf_flex_tx_desc q; /* queue based scheduling */
154 * struct idpf_buf_lifo - LIFO for managing OOO completions
166 * struct idpf_tx_offload_params - Offload parameters for a given packet
168 * @hdr_offsets: Offset parameter for single queue model
169 * @cd_tunneling: Type of tunneling enabled for single queue model
246 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
251 #define IDPF_TX_MAX_DESC_DATA (SZ_16K - 1)
262 DIV_ROUND_DOWN_ULL((IDPF_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ), \
265 #define IDPF_GET_PTYPE_SIZE(p) struct_size((p), proto_id, (p)->proto_id_count)
298 * 0->1 or 1->0 on each ring wrap. SW maintains its own
307 * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
310 * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
325 #define idpf_queue_set(f, q) __set_bit(__IDPF_Q_##f, (q)->flags)
326 #define idpf_queue_clear(f, q) __clear_bit(__IDPF_Q_##f, (q)->flags)
327 #define idpf_queue_change(f, q) __change_bit(__IDPF_Q_##f, (q)->flags)
328 #define idpf_queue_has(f, q) test_bit(__IDPF_Q_##f, (q)->flags)
331 __test_and_clear_bit(__IDPF_Q_##f, (q)->flags)
333 __assign_bit(__IDPF_Q_##f, (q)->flags, v)
357 * @rx_itr: RX ITR register
379 * @num_rxq: Number of RX queues
383 * @rx: Array of RX queues to service
395 * @rx_dim: Data for RX net_dim algorithm
396 * @rx_itr_value: RX interrupt throttling rate
398 * @rx_itr_idx: RX ITR index
410 struct idpf_rx_queue **rx; member
479 * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode
490 * struct idpf_rx_queue - software structure representing a receive queue
491 * @rx: universal receive descriptor array
495 * @napi: NAPI instance corresponding to this queue (splitq)
498 * @netdev: &net_device corresponding to this queue
499 * @tail: Tail offset. Used for both queue models single and split.
501 * @idx: For RX queue, it is used to index to total RX queue across groups and
504 * @rxdids: Supported RX descriptor ids
505 * @rx_ptype_lkup: LUT of Rx ptypes
508 * @next_to_alloc: RX buffer to allocate at
513 * @q_id: Queue id
517 * @rx_buffer_low_watermark: RX buffer low watermark
520 * @rx_max_pkt_size: RX max packet size
525 union virtchnl2_rx_desc *rx; member
581 * struct idpf_tx_queue - software structure representing a transmit queue
590 * @tail: Tail offset. Used for both queue models single and split
592 * @idx: For TX queue, it is used as index to map between TX queue group and
603 * --------------------------------
604 * | GEN=0-1023 |IDX = 0-63|
605 * --------------------------------
611 * --------------------------------
612 * |GEN | IDX = 0-8159 |
613 * --------------------------------
616 * @netdev: &net_device corresponding to this queue
620 * the TX completion queue, it can be for any TXQ associated
621 * with that completion queue. This means we can clean up to
622 * N TXQs during a single call to clean the completion queue.
624 * that single call to clean the completion queue. By doing so,
627 * @clean_budget: singleq only, queue cleaning budget
629 * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
630 * @stash: Tx buffer stash for Flow-based scheduling mode
636 * @q_id: Queue id
700 * struct idpf_buf_queue - software structure representing a buffer queue
711 * @next_to_alloc: RX buffer to allocate at
714 * @q_id: Queue id
718 * @rx_buffer_low_watermark: RX buffer low watermark
759 * struct idpf_compl_queue - software structure representing a completion queue
764 * @clean_budget: queue cleaning budget
765 * @netdev: &net_device corresponding to this queue
769 * @num_completions: Only relevant for TX completion queue. It tracks the
773 * @q_id: Queue id
838 * @rxq: RX queue
852 * @bufq: Buffer queue
874 * @singleq: Struct with single queue related members
875 * @singleq.num_rxq: Number of RX queues associated
876 * @singleq.rxqs: Array of RX queue pointers
877 * @splitq: Struct with split queue related members
878 * @splitq.num_rxq_sets: Number of RX queue sets
879 * @splitq.rxq_sets: Array of RX queue sets
880 * @splitq.bufq_sets: Buffer queue set pointer
906 * @txqs: Array of TX queue pointers
908 * @complq: Associated completion queue pointer, split queue only
910 * completion queue, acculumated for all TX queues
911 * associated with that completion queue.
936 cpu = cpumask_first(q_vector->affinity_mask); in idpf_q_vector_to_mem()
942 * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
955 * idpf_tx_singleq_build_ctob - populate command tag offset and size
980 * idpf_tx_splitq_build_desc - determine which type of data descriptor to build
990 if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2) in idpf_tx_splitq_build_desc()
997 * idpf_vport_intr_set_wb_on_itr - enable descriptor writeback on disabled interrupts
998 * @q_vector: pointer to queue vector struct
1004 if (q_vector->wb_on_itr) in idpf_vport_intr_set_wb_on_itr()
1007 q_vector->wb_on_itr = true; in idpf_vport_intr_set_wb_on_itr()
1008 reg = &q_vector->intr_reg; in idpf_vport_intr_set_wb_on_itr()
1010 writel(reg->dyn_ctl_wb_on_itr_m | reg->dyn_ctl_intena_msk_m | in idpf_vport_intr_set_wb_on_itr()
1011 (IDPF_NO_ITR_UPDATE_IDX << reg->dyn_ctl_itridx_s), in idpf_vport_intr_set_wb_on_itr()
1012 reg->dyn_ctl); in idpf_vport_intr_set_wb_on_itr()
1057 return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, in idpf_tx_maybe_stop_common()