Lines Matching +full:rx +full:- +full:inactive

1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
36 /* Rx CQ descriptor size. */
40 #define FUNETH_CQE_INFO_OFFSET (FUNETH_CQE_SIZE - sizeof(struct fun_cqe_info))
55 /* Per packet tailroom. Present only for 1-frag packets. */
72 FUN_IRQ_INIT, /* initialized and in the XArray but inactive */
84 u64 tx_tso; /* # of non-encapsulated TSO super-packets */
85 u64 tx_encap_tso; /* # of encapsulated TSO super-packets */
86 u64 tx_uso; /* # of non-encapsulated UDP LSO super-packets */
93 u64 tx_tls_bytes; /* Tx bytes of HW-handled TLS payload */
107 u32 mask; /* queue depth - 1 */
112 volatile __be64 *hw_wb; /* HW write-back location */
131 struct funeth_rxq_stats { /* per Rx queue SW counters */
135 u64 rx_bufs; /* total # of Rx buffers provided to device */
138 u64 rx_page_alloc; /* # of page allocations for Rx buffers */
148 struct funeth_rxbuf { /* per Rx buffer state */
152 int node; /* page node, or -1 if it is PF_MEMALLOC */
155 struct funeth_rx_cache { /* cache of DMA-mapped previously used buffers */
156 struct funeth_rxbuf *bufs; /* base of Rx buffer state ring */
159 unsigned int mask; /* depth - 1 */
162 /* An Rx queue consists of a CQ and an SQ used to provide Rx buffers. */
171 unsigned int cq_mask; /* CQ depth - 1 */
176 struct funeth_rxbuf *bufs; /* base of Rx buffer state ring */
180 unsigned int rq_mask; /* RQ depth - 1 */
203 u64_stats_update_begin(&(q)->syncp); \
204 (q)->stats.counter++; \
205 u64_stats_update_end(&(q)->syncp); \
210 seq = u64_stats_fetch_begin(&(q)->syncp); \
211 stats_copy = (q)->stats; \
212 } while (u64_stats_fetch_retry(&(q)->syncp, (seq)))
221 u16 irq_idx; /* index of MSI-X interrupt */
228 /* Return the start address of the idx-th Tx descriptor. */
232 return q->desc + idx * FUNETH_SQE_SIZE; in fun_tx_desc_addr()
237 unsigned int tail = q->prod_cnt & q->mask; in fun_txq_wr_db()
239 writel(tail, q->db); in fun_txq_wr_db()
244 return cpu_to_mem(cpumask_first(&p->affinity_mask)); in fun_irq_node()