1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _DP_LI_RX_H_
21 #define _DP_LI_RX_H_
22 
23 #include <dp_types.h>
24 #include <dp_rx.h>
25 #include "dp_li.h"
26 
27 /**
28  * dp_rx_process_li() - Brain of the Rx processing functionality
29  *		     Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
30  * @int_ctx: per interrupt context
31  * @hal_ring_hdl: opaque pointer to the HAL Rx Ring, which will be serviced
32  * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring.
33  * @quota: No. of units (packets) that can be serviced in one shot.
34  *
35  * This function implements the core of Rx functionality. This is
36  * expected to handle only non-error frames.
37  *
38  * Return: uint32_t: No. of elements processed
39  */
40 uint32_t dp_rx_process_li(struct dp_intr *int_ctx,
41 			  hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
42 			  uint32_t quota);
43 
44 /**
45  * dp_rx_chain_msdus_li() - Function to chain all msdus of a mpdu
46  *			    to pdev invalid peer list
47  *
48  * @soc: core DP main context
49  * @nbuf: Buffer pointer
50  * @rx_tlv_hdr: start of rx tlv header
51  * @mac_id: mac id
52  *
53  *  Return: bool: true for last msdu of mpdu
54  */
55 bool dp_rx_chain_msdus_li(struct dp_soc *soc, qdf_nbuf_t nbuf,
56 			  uint8_t *rx_tlv_hdr, uint8_t mac_id);
57 
58 /**
59  * dp_rx_desc_pool_init_li() - Initialize Rx Descriptor pool(s)
60  * @soc: Handle to DP Soc structure
61  * @rx_desc_pool: Rx descriptor pool handler
62  * @pool_id: Rx descriptor pool ID
63  *
64  * Return: None
65  */
66 QDF_STATUS dp_rx_desc_pool_init_li(struct dp_soc *soc,
67 				   struct rx_desc_pool *rx_desc_pool,
68 				   uint32_t pool_id);
69 
70 /**
71  * dp_rx_desc_pool_deinit_li() - De-initialize Rx Descriptor pool(s)
72  * @soc: Handle to DP Soc structure
73  * @rx_desc_pool: Rx descriptor pool handler
74  * @pool_id: Rx descriptor pool ID
75  *
76  * Return: None
77  */
78 void dp_rx_desc_pool_deinit_li(struct dp_soc *soc,
79 			       struct rx_desc_pool *rx_desc_pool,
80 			       uint32_t pool_id);
81 
82 /**
83  * dp_wbm_get_rx_desc_from_hal_desc_li() - Get corresponding Rx Desc
84  *					address from WBM ring Desc
85  * @soc: Handle to DP Soc structure
86  * @ring_desc: ring descriptor structure pointer
87  * @r_rx_desc: pointer to a pointer of Rx Desc
88  *
89  * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
90  */
91 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_li(
92 					struct dp_soc *soc,
93 					void *ring_desc,
94 					struct dp_rx_desc **r_rx_desc);
95 /**
96  * dp_rx_get_reo_qdesc_addr_li(): API to get qdesc address of reo
97  * entrance ring desc
98  *
99  * @hal_soc: Handle to HAL Soc structure
100  * @dst_ring_desc: reo dest ring descriptor (used for Lithium DP)
101  * @buf: pointer to the start of RX PKT TLV headers
102  * @txrx_peer: pointer to txrx_peer
103  * @tid: tid value
104  *
105  * Return: qdesc address in reo destination ring buffer
106  */
107 static inline
dp_rx_get_reo_qdesc_addr_li(hal_soc_handle_t hal_soc,uint8_t * dst_ring_desc,uint8_t * buf,struct dp_txrx_peer * txrx_peer,unsigned int tid)108 uint64_t dp_rx_get_reo_qdesc_addr_li(hal_soc_handle_t hal_soc,
109 				     uint8_t *dst_ring_desc,
110 				     uint8_t *buf,
111 				     struct dp_txrx_peer *txrx_peer,
112 				     unsigned int tid)
113 {
114 	return hal_rx_get_qdesc_addr(hal_soc, dst_ring_desc, buf);
115 }
116 
117 /**
118  * dp_rx_desc_cookie_2_va_li() - Convert RX Desc cookie ID to VA
119  * @soc:Handle to DP Soc structure
120  * @cookie: cookie used to lookup virtual address
121  *
122  * Return: Rx descriptor virtual address
123  */
124 static inline
dp_rx_desc_cookie_2_va_li(struct dp_soc * soc,uint32_t cookie)125 struct dp_rx_desc *dp_rx_desc_cookie_2_va_li(struct dp_soc *soc,
126 					     uint32_t cookie)
127 {
128 	return dp_rx_cookie_2_va_rxdma_buf(soc, cookie);
129 }
130 
131 #define DP_PEER_METADATA_VDEV_ID_MASK	0x003f0000
132 #define DP_PEER_METADATA_VDEV_ID_SHIFT	16
133 #define DP_PEER_METADATA_OFFLOAD_MASK	0x01000000
134 #define DP_PEER_METADATA_OFFLOAD_SHIFT	24
135 
136 #define DP_PEER_METADATA_VDEV_ID_GET_LI(_peer_metadata)		\
137 	(((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK)	\
138 			>> DP_PEER_METADATA_VDEV_ID_SHIFT)
139 
140 #define DP_PEER_METADATA_OFFLOAD_GET_LI(_peer_metadata)		\
141 	(((_peer_metadata) & DP_PEER_METADATA_OFFLOAD_MASK)	\
142 			>> DP_PEER_METADATA_OFFLOAD_SHIFT)
143 
144 static inline uint16_t
dp_rx_peer_metadata_peer_id_get_li(struct dp_soc * soc,uint32_t peer_metadata)145 dp_rx_peer_metadata_peer_id_get_li(struct dp_soc *soc, uint32_t peer_metadata)
146 {
147 	struct htt_rx_peer_metadata_v0 *metadata =
148 			(struct htt_rx_peer_metadata_v0 *)&peer_metadata;
149 
150 	return metadata->peer_id;
151 }
152 
153 bool
154 dp_rx_intrabss_handle_nawds_li(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
155 			       qdf_nbuf_t nbuf_copy,
156 			       struct cdp_tid_rx_stats *tid_stats,
157 			       uint8_t link_id);
158 
159 #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
160 static inline
dp_rx_prefetch_nbuf_data(qdf_nbuf_t nbuf,qdf_nbuf_t next)161 void dp_rx_prefetch_nbuf_data(qdf_nbuf_t nbuf, qdf_nbuf_t next)
162 {
163 	struct rx_pkt_tlvs *pkt_tlvs;
164 
165 	if (next) {
166 		/* prefetch skb->next and first few bytes of skb->cb */
167 		qdf_prefetch(next);
168 		/* skb->cb spread across 2 cache lines hence below prefetch */
169 		qdf_prefetch(&next->_skb_refdst);
170 		qdf_prefetch(&next->len);
171 		qdf_prefetch(&next->protocol);
172 		pkt_tlvs = (struct rx_pkt_tlvs *)next->data;
173 		/* sa_idx, da_idx, l3_pad in RX msdu_end TLV */
174 		qdf_prefetch(pkt_tlvs);
175 		/* msdu_done in RX attention TLV */
176 		qdf_prefetch(&pkt_tlvs->attn_tlv);
177 		/* fr_ds & to_ds in RX MPDU start TLV */
178 		if (qdf_nbuf_is_rx_chfrag_end(nbuf))
179 			qdf_prefetch(&pkt_tlvs->mpdu_start_tlv);
180 	}
181 }
182 #else
183 static inline
dp_rx_prefetch_nbuf_data(qdf_nbuf_t nbuf,qdf_nbuf_t next)184 void dp_rx_prefetch_nbuf_data(qdf_nbuf_t nbuf, qdf_nbuf_t next)
185 {
186 }
187 #endif
188 
189 #ifdef QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH
190 /**
191  * dp_rx_cookie_2_va_rxdma_buf_prefetch() - function to prefetch the SW desc
192  * @soc: Handle to DP Soc structure
193  * @cookie: cookie used to lookup virtual address
194  *
195  * Return: prefetched Rx descriptor virtual address
196  */
197 static inline
dp_rx_cookie_2_va_rxdma_buf_prefetch(struct dp_soc * soc,uint32_t cookie)198 void *dp_rx_cookie_2_va_rxdma_buf_prefetch(struct dp_soc *soc, uint32_t cookie)
199 {
200 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
201 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
202 	struct rx_desc_pool *rx_desc_pool;
203 	void *prefetch_desc;
204 
205 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
206 		return NULL;
207 
208 	rx_desc_pool = &soc->rx_desc_buf[pool_id];
209 
210 	if (qdf_unlikely(index >= rx_desc_pool->pool_size))
211 		return NULL;
212 
213 	prefetch_desc = &soc->rx_desc_buf[pool_id].array[index].rx_desc;
214 	qdf_prefetch(prefetch_desc);
215 	return prefetch_desc;
216 }
217 
218 /**
219  * dp_rx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
220  * @soc: Handle to DP Soc structure
221  * @hal_soc: Handle to HAL Soc structure
222  * @num_entries: valid number of HW descriptors
223  * @hal_ring_hdl: Destination ring pointer
224  * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
225  * @last_prefetched_sw_desc: input & output param of last prefetch SW desc
226  *
227  * Return: None
228  */
229 static inline
dp_rx_prefetch_hw_sw_nbuf_desc(struct dp_soc * soc,hal_soc_handle_t hal_soc,uint32_t num_entries,hal_ring_handle_t hal_ring_hdl,hal_ring_desc_t * last_prefetched_hw_desc,struct dp_rx_desc ** last_prefetched_sw_desc)230 void dp_rx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
231 				    hal_soc_handle_t hal_soc,
232 				    uint32_t num_entries,
233 				    hal_ring_handle_t hal_ring_hdl,
234 				    hal_ring_desc_t *last_prefetched_hw_desc,
235 				    struct dp_rx_desc **last_prefetched_sw_desc)
236 {
237 	if (*last_prefetched_sw_desc) {
238 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
239 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
240 	}
241 
242 	if (num_entries) {
243 		*last_prefetched_sw_desc = dp_rx_cookie_2_va_rxdma_buf_prefetch(soc, HAL_RX_REO_BUF_COOKIE_GET(*last_prefetched_hw_desc));
244 		*last_prefetched_hw_desc = hal_srng_dst_prefetch_next_cached_desc(hal_soc,
245 										  hal_ring_hdl,
246 										  (uint8_t *)*last_prefetched_hw_desc);
247 	}
248 }
249 #else
250 static inline
dp_rx_prefetch_hw_sw_nbuf_desc(struct dp_soc * soc,hal_soc_handle_t hal_soc,uint32_t quota,hal_ring_handle_t hal_ring_hdl,hal_ring_desc_t * last_prefetched_hw_desc,struct dp_rx_desc ** last_prefetched_sw_desc)251 void dp_rx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
252 				    hal_soc_handle_t hal_soc,
253 				    uint32_t quota,
254 				    hal_ring_handle_t hal_ring_hdl,
255 				    hal_ring_desc_t *last_prefetched_hw_desc,
256 				    struct dp_rx_desc **last_prefetched_sw_desc)
257 {
258 }
259 #endif
260 
261 static inline
dp_peer_rx_reorder_queue_setup_li(struct dp_soc * soc,struct dp_peer * peer,uint32_t tid_bitmap,uint32_t ba_window_size)262 QDF_STATUS dp_peer_rx_reorder_queue_setup_li(struct dp_soc *soc,
263 					     struct dp_peer *peer,
264 					     uint32_t tid_bitmap,
265 					     uint32_t ba_window_size)
266 {
267 	int tid;
268 	struct dp_rx_tid *rx_tid;
269 
270 	if (!soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
271 		dp_peer_debug("peer_rx_reorder_queue_setup NULL");
272 		return QDF_STATUS_SUCCESS;
273 	}
274 
275 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
276 		if (!(BIT(tid) & tid_bitmap))
277 			continue;
278 
279 		rx_tid = &peer->rx_tid[tid];
280 		if (!rx_tid->hw_qdesc_paddr) {
281 			tid_bitmap &= ~BIT(tid);
282 			continue;
283 		}
284 
285 		if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
286 		    soc->ctrl_psoc,
287 		    peer->vdev->pdev->pdev_id,
288 		    peer->vdev->vdev_id,
289 		    peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
290 		    1, ba_window_size)) {
291 			dp_peer_err("%pK: Fail to send reo q setup. tid %d",
292 				    soc, tid);
293 			return QDF_STATUS_E_FAILURE;
294 		}
295 
296 		if (!tid_bitmap) {
297 			dp_peer_err("tid_bitmap=0. All tids setup fail");
298 			return QDF_STATUS_E_FAILURE;
299 		}
300 	}
301 
302 	return QDF_STATUS_SUCCESS;
303 }
304 
305 /**
306  * dp_rx_wbm_err_reap_desc_li() - Function to reap and replenish
307  *                                WBM RX Error descriptors
308  *
309  * @int_ctx: pointer to DP interrupt context
310  * @soc: core DP main context
311  * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, to be serviced
312  * @quota: No. of units (packets) that can be serviced in one shot.
313  * @rx_bufs_used: No. of descriptors reaped
314  *
315  * This function implements the core Rx functionality like reap and
316  * replenish the RX error ring Descriptors, and create a nbuf list
317  * out of it. It also reads wbm error information from descriptors
318  * and update the nbuf tlv area.
319  *
320  * Return: qdf_nbuf_t: head pointer to the nbuf list created
321  */
322 qdf_nbuf_t
323 dp_rx_wbm_err_reap_desc_li(struct dp_intr *int_ctx, struct dp_soc *soc,
324 			   hal_ring_handle_t hal_ring_hdl, uint32_t quota,
325 			   uint32_t *rx_bufs_used);
326 
327 /**
328  * dp_rx_null_q_desc_handle_li() - Function to handle NULL Queue
329  *                                 descriptor violation on either a
330  *                                 REO or WBM ring
331  *
332  * @soc: core DP main context
333  * @nbuf: buffer pointer
334  * @rx_tlv_hdr: start of rx tlv header
335  * @pool_id: mac id
336  * @txrx_peer: txrx peer handle
337  * @is_reo_exception: flag to check if the error is from REO or WBM
338  * @link_id: link Id on which packet is received
339  *
340  * This function handles NULL queue descriptor violations arising out
341  * a missing REO queue for a given peer or a given TID. This typically
342  * may happen if a packet is received on a QOS enabled TID before the
343  * ADDBA negotiation for that TID, when the TID queue is setup. Or
344  * it may also happen for MC/BC frames if they are not routed to the
345  * non-QOS TID queue, in the absence of any other default TID queue.
346  * This error can show up both in a REO destination or WBM release ring.
347  *
348  * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
349  *         if nbuf could not be handled or dropped.
350  */
351 QDF_STATUS
352 dp_rx_null_q_desc_handle_li(struct dp_soc *soc, qdf_nbuf_t nbuf,
353 			    uint8_t *rx_tlv_hdr, uint8_t pool_id,
354 			    struct dp_txrx_peer *txrx_peer,
355 			    bool is_reo_exception,
356 			    uint8_t link_id);
357 #endif
358