xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_rx.h (revision 45c28558a520fd0e975b20c0ad534a0aa7f08021)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _DP_BE_RX_H_
21 #define _DP_BE_RX_H_
22 
23 #include <dp_types.h>
24 #include "dp_be.h"
25 #include "dp_peer.h"
26 #include <dp_rx.h>
27 #include "hal_be_rx.h"
28 
29 /*
30  * dp_be_intrabss_params
31  *
32  * @dest_soc: dest soc to forward the packet to
33  * @tx_vdev_id: vdev id retrieved from dest peer
34  */
35 struct dp_be_intrabss_params {
36 	struct dp_soc *dest_soc;
37 	uint8_t tx_vdev_id;
38 };
39 
40 #ifndef QCA_HOST_MODE_WIFI_DISABLED
41 
42 /*
43  * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL
44  *  pkt with DA not equal to vdev mac addr, fwd is not allowed.
45  * @soc: core txrx main context
46  * @ta_txrx_peer: source peer entry
47  * @rx_tlv_hdr: start address of rx tlvs
48  * @nbuf: nbuf that has to be intrabss forwarded
49  * @msdu_metadata: msdu metadata
50  *
51  * Return: true if it is forwarded else false
52  */
53 
54 bool dp_rx_intrabss_fwd_be(struct dp_soc *soc,
55 			   struct dp_txrx_peer *ta_txrx_peer,
56 			   uint8_t *rx_tlv_hdr,
57 			   qdf_nbuf_t nbuf,
58 			   struct hal_rx_msdu_metadata msdu_metadata);
59 #endif
60 
61 /*
62  * dp_rx_intrabss_handle_nawds_be() - Forward mcbc intrabss pkts in nawds case
63  * @soc: core txrx main context
64  * @ta_txrx_peer: source txrx_peer entry
65  * @nbuf_copy: nbuf that has to be intrabss forwarded
66  * @tid_stats: tid_stats structure
67  *
68  * Return: true if it is forwarded else false
69  */
70 bool
71 dp_rx_intrabss_handle_nawds_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
72 			       qdf_nbuf_t nbuf_copy,
73 			       struct cdp_tid_rx_stats *tid_stats);
74 
75 void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
76 				  uint32_t *msg_word,
77 				  void *rx_filter);
78 
79 uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
80 			  hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
81 			  uint32_t quota);
82 
83 /**
84  * dp_rx_desc_pool_init_be() - Initialize Rx Descriptor pool(s)
85  * @soc: Handle to DP Soc structure
86  * @rx_desc_pool: Rx descriptor pool handler
87  * @pool_id: Rx descriptor pool ID
88  *
89  * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
90  */
91 QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc,
92 				   struct rx_desc_pool *rx_desc_pool,
93 				   uint32_t pool_id);
94 
95 /**
96  * dp_rx_desc_pool_deinit_be() - De-initialize Rx Descriptor pool(s)
97  * @soc: Handle to DP Soc structure
98  * @rx_desc_pool: Rx descriptor pool handler
99  * @pool_id: Rx descriptor pool ID
100  *
101  * Return: None
102  */
103 void dp_rx_desc_pool_deinit_be(struct dp_soc *soc,
104 			       struct rx_desc_pool *rx_desc_pool,
105 			       uint32_t pool_id);
106 
107 /**
108  * dp_wbm_get_rx_desc_from_hal_desc_be() - Get corresponding Rx Desc
109  *					address from WBM ring Desc
110  * @soc: Handle to DP Soc structure
111  * @ring_desc: ring descriptor structure pointer
112  * @r_rx_desc: pointer to a pointer of Rx Desc
113  *
114  * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
115  */
116 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
117 					       void *ring_desc,
118 					       struct dp_rx_desc **r_rx_desc);
119 
120 /**
121  * dp_rx_desc_cookie_2_va_be() - Convert RX Desc cookie ID to VA
122  * @soc:Handle to DP Soc structure
123  * @cookie: cookie used to lookup virtual address
124  *
125  * Return: Rx descriptor virtual address
126  */
127 struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc,
128 					     uint32_t cookie);
129 
130 #if !defined(DP_FEATURE_HW_COOKIE_CONVERSION) || \
131 		defined(DP_HW_COOKIE_CONVERT_EXCEPTION)
132 /**
133  * dp_rx_desc_sw_cc_check() - check if RX desc VA is got correctly,
134 			      if not, do SW cookie conversion.
135  * @soc:Handle to DP Soc structure
136  * @rx_buf_cookie: RX desc cookie ID
137  * @r_rx_desc: double pointer for RX desc
138  *
139  * Return: None
140  */
141 static inline void
142 dp_rx_desc_sw_cc_check(struct dp_soc *soc,
143 		       uint32_t rx_buf_cookie,
144 		       struct dp_rx_desc **r_rx_desc)
145 {
146 	if (qdf_unlikely(!(*r_rx_desc))) {
147 		*r_rx_desc = (struct dp_rx_desc *)
148 				dp_cc_desc_find(soc,
149 						rx_buf_cookie);
150 	}
151 }
152 #else
153 static inline void
154 dp_rx_desc_sw_cc_check(struct dp_soc *soc,
155 		       uint32_t rx_buf_cookie,
156 		       struct dp_rx_desc **r_rx_desc)
157 {
158 }
159 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION && DP_HW_COOKIE_CONVERT_EXCEPTION */
160 
161 #define DP_PEER_METADATA_OFFLOAD_GET_BE(_peer_metadata)		(0)
162 
163 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
164 static inline uint16_t
165 dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
166 {
167 	struct htt_rx_peer_metadata_v1 *metadata =
168 			(struct htt_rx_peer_metadata_v1 *)&peer_metadata;
169 	uint16_t peer_id;
170 
171 	peer_id = metadata->peer_id |
172 		  (metadata->ml_peer_valid << soc->peer_id_shift);
173 
174 	return peer_id;
175 }
176 #else
177 /* Combine ml_peer_valid and peer_id field */
178 #define DP_BE_PEER_METADATA_PEER_ID_MASK	0x00003fff
179 #define DP_BE_PEER_METADATA_PEER_ID_SHIFT	0
180 
181 static inline uint16_t
182 dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
183 {
184 	return ((peer_metadata & DP_BE_PEER_METADATA_PEER_ID_MASK) >>
185 		DP_BE_PEER_METADATA_PEER_ID_SHIFT);
186 }
187 #endif
188 
189 static inline uint16_t
190 dp_rx_peer_metadata_vdev_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
191 {
192 	struct htt_rx_peer_metadata_v1 *metadata =
193 			(struct htt_rx_peer_metadata_v1 *)&peer_metadata;
194 
195 	return metadata->vdev_id;
196 }
197 
198 static inline uint8_t
199 dp_rx_peer_metadata_lmac_id_get_be(uint32_t peer_metadata)
200 {
201 	return HTT_RX_PEER_META_DATA_V1_LMAC_ID_GET(peer_metadata);
202 }
203 
204 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
205 /**
206  * dp_rx_nf_process() - Near Full state handler for RX rings.
207  * @int_ctx: interrupt context
208  * @hal_ring_hdl: Rx ring handle
209  * @reo_ring_num: RX ring number
210  * @quota: Quota of work to be done
211  *
212  * Return: work done in the handler
213  */
214 uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
215 			  hal_ring_handle_t hal_ring_hdl,
216 			  uint8_t reo_ring_num,
217 			  uint32_t quota);
218 #else
219 static inline
220 uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
221 			  hal_ring_handle_t hal_ring_hdl,
222 			  uint8_t reo_ring_num,
223 			  uint32_t quota)
224 {
225 	return 0;
226 }
227 #endif /*WLAN_FEATURE_NEAR_FULL_IRQ */
228 
229 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
230 struct dp_soc *
231 dp_rx_replensih_soc_get(struct dp_soc *soc, uint8_t chip_id);
232 #else
233 static inline struct dp_soc *
234 dp_rx_replensih_soc_get(struct dp_soc *soc, uint8_t chip_id)
235 {
236 	return soc;
237 }
238 #endif
239 
240 #ifdef WLAN_FEATURE_11BE_MLO
241 /**
242  * dp_rx_mlo_igmp_handler() - Rx handler for Mcast packets
243  * @soc: Handle to DP Soc structure
244  * @vdev: DP vdev handle
245  * @peer: DP peer handle
246  * @nbuf: nbuf to be enqueued
247  *
248  * Return: true when packet sent to stack, false failure
249  */
250 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
251 			    struct dp_vdev *vdev,
252 			    struct dp_txrx_peer *peer,
253 			    qdf_nbuf_t nbuf);
254 
255 /**
256  * dp_peer_rx_reorder_queue_setup() - Send reo queue setup wmi cmd to FW
257 				      per peer type
258  * @soc: DP Soc handle
259  * @peer: dp peer to operate on
260  * @tid: TID
261  * @ba_window_size: BlockAck window size
262  *
263  * Return: 0 - success, others - failure
264  */
265 static inline
266 QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
267 					     struct dp_peer *peer,
268 					     int tid,
269 					     uint32_t ba_window_size)
270 {
271 	uint8_t i;
272 	struct dp_mld_link_peers link_peers_info;
273 	struct dp_peer *link_peer;
274 	struct dp_rx_tid *rx_tid;
275 	struct dp_soc *link_peer_soc;
276 
277 	rx_tid = &peer->rx_tid[tid];
278 	if (!rx_tid->hw_qdesc_paddr)
279 		return QDF_STATUS_E_INVAL;
280 
281 	if (!hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
282 		if (IS_MLO_DP_MLD_PEER(peer)) {
283 			/* get link peers with reference */
284 			dp_get_link_peers_ref_from_mld_peer(soc, peer,
285 							    &link_peers_info,
286 							    DP_MOD_ID_CDP);
287 			/* send WMI cmd to each link peers */
288 			for (i = 0; i < link_peers_info.num_links; i++) {
289 				link_peer = link_peers_info.link_peers[i];
290 				link_peer_soc = link_peer->vdev->pdev->soc;
291 				if (link_peer_soc->cdp_soc.ol_ops->
292 						peer_rx_reorder_queue_setup) {
293 					if (link_peer_soc->cdp_soc.ol_ops->
294 						peer_rx_reorder_queue_setup(
295 					    link_peer_soc->ctrl_psoc,
296 					    link_peer->vdev->pdev->pdev_id,
297 					    link_peer->vdev->vdev_id,
298 					    link_peer->mac_addr.raw,
299 					    rx_tid->hw_qdesc_paddr,
300 					    tid, tid,
301 					    1, ba_window_size)) {
302 						dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
303 							    link_peer_soc, tid);
304 						return QDF_STATUS_E_FAILURE;
305 					}
306 				}
307 			}
308 			/* release link peers reference */
309 			dp_release_link_peers_ref(&link_peers_info,
310 						  DP_MOD_ID_CDP);
311 		} else if (peer->peer_type == CDP_LINK_PEER_TYPE) {
312 			if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
313 				if (soc->cdp_soc.ol_ops->
314 					peer_rx_reorder_queue_setup(
315 				    soc->ctrl_psoc,
316 				    peer->vdev->pdev->pdev_id,
317 				    peer->vdev->vdev_id,
318 				    peer->mac_addr.raw,
319 				    rx_tid->hw_qdesc_paddr,
320 				    tid, tid,
321 				    1, ba_window_size)) {
322 					dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
323 						    soc, tid);
324 					return QDF_STATUS_E_FAILURE;
325 				}
326 			}
327 		} else {
328 			dp_peer_err("invalid peer type %d", peer->peer_type);
329 			return QDF_STATUS_E_FAILURE;
330 		}
331 	} else {
332 		/* Some BE targets dont require WMI and use shared
333 		 * table managed by host for storing Reo queue ref structs
334 		 */
335 		if (IS_MLO_DP_LINK_PEER(peer) ||
336 		    peer->peer_id == HTT_INVALID_PEER) {
337 			/* Return if this is for MLD link peer and table
338 			 * is not used in MLD link peer case as MLD peer's
339 			 * qref is written to LUT in peer setup or peer map.
340 			 * At this point peer setup for link peer is called
341 			 * before peer map, hence peer id is not assigned.
342 			 * This could happen if peer_setup is called before
343 			 * host receives HTT peer map. In this case return
344 			 * success with no op and let peer map handle
345 			 * writing the reo_qref to LUT.
346 			 */
347 			dp_peer_debug("Invalid peer id for dp_peer:%pK", peer);
348 			return QDF_STATUS_SUCCESS;
349 		}
350 
351 		hal_reo_shared_qaddr_write(soc->hal_soc,
352 					   peer->peer_id,
353 					   tid, peer->rx_tid[tid].hw_qdesc_paddr);
354 	}
355 	return QDF_STATUS_SUCCESS;
356 }
357 #else
358 static inline
359 QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
360 					     struct dp_peer *peer,
361 					     int tid,
362 					     uint32_t ba_window_size)
363 {
364 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
365 
366 	if (!rx_tid->hw_qdesc_paddr)
367 		return QDF_STATUS_E_INVAL;
368 
369 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
370 		if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
371 		    soc->ctrl_psoc,
372 		    peer->vdev->pdev->pdev_id,
373 		    peer->vdev->vdev_id,
374 		    peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
375 		    1, ba_window_size)) {
376 			dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
377 				    soc, tid);
378 			return QDF_STATUS_E_FAILURE;
379 		}
380 	}
381 
382 	return QDF_STATUS_SUCCESS;
383 }
384 #endif /* WLAN_FEATURE_11BE_MLO */
385 
386 #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
387 static inline
388 void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next)
389 {
390 	if (next) {
391 		/* prefetch skb->next and first few bytes of skb->cb */
392 		qdf_prefetch(next);
393 		/* skb->cb spread across 2 cache lines hence below prefetch */
394 		qdf_prefetch(&next->_skb_refdst);
395 		qdf_prefetch(&next->len);
396 		qdf_prefetch(&next->protocol);
397 		qdf_prefetch(next->data);
398 		qdf_prefetch(next->data + 64);
399 		qdf_prefetch(next->data + 128);
400 	}
401 }
402 #else
403 static inline
404 void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next)
405 {
406 }
407 #endif
408 
409 #ifdef QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH
410 /**
411  * dp_rx_cookie_2_va_rxdma_buf_prefetch() - function to prefetch the SW desc
412  * @soc: Handle to DP Soc structure
413  * @cookie: cookie used to lookup virtual address
414  *
415  * Return: prefetched Rx descriptor virtual address
416  */
417 static inline
418 void *dp_rx_va_prefetch(void *last_prefetched_hw_desc)
419 {
420 	void *prefetch_desc;
421 
422 	prefetch_desc = (void *)hal_rx_get_reo_desc_va(last_prefetched_hw_desc);
423 	qdf_prefetch(prefetch_desc);
424 	return prefetch_desc;
425 }
426 
427 /**
428  * dp_rx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
429  * @soc: Handle to HAL Soc structure
430  * @num_entries: valid number of HW descriptors
431  * @hal_ring_hdl: Destination ring pointer
432  * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
433  * @last_prefetched_sw_desc: input & output param of last prefetch SW desc
434  *
435  * Return: None
436  */
437 static inline void
438 dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc *soc,
439 			       hal_soc_handle_t hal_soc,
440 			       uint32_t num_entries,
441 			       hal_ring_handle_t hal_ring_hdl,
442 			       hal_ring_desc_t *last_prefetched_hw_desc,
443 			       struct dp_rx_desc **last_prefetched_sw_desc)
444 {
445 	if (*last_prefetched_sw_desc) {
446 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
447 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
448 	}
449 
450 	if (num_entries) {
451 		*last_prefetched_sw_desc =
452 			dp_rx_va_prefetch(*last_prefetched_hw_desc);
453 
454 		if ((uintptr_t)*last_prefetched_hw_desc & 0x3f)
455 			*last_prefetched_hw_desc =
456 				hal_srng_dst_prefetch_next_cached_desc(hal_soc,
457 					  hal_ring_hdl,
458 					  (uint8_t *)*last_prefetched_hw_desc);
459 		else
460 			*last_prefetched_hw_desc =
461 				hal_srng_dst_get_next_32_byte_desc(hal_soc,
462 				   hal_ring_hdl,
463 				   (uint8_t *)*last_prefetched_hw_desc);
464 	}
465 }
466 #else
467 static inline void
468 dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc *soc,
469 			       hal_soc_handle_t hal_soc,
470 			       uint32_t num_entries,
471 			       hal_ring_handle_t hal_ring_hdl,
472 			       hal_ring_desc_t *last_prefetched_hw_desc,
473 			       struct dp_rx_desc **last_prefetched_sw_desc)
474 {
475 }
476 #endif
477 #endif
478