xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_rx.h (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _DP_BE_RX_H_
21 #define _DP_BE_RX_H_
22 
23 #include <dp_types.h>
24 #include "dp_be.h"
25 #include "dp_peer.h"
26 #include <dp_rx.h>
27 #include "hal_be_rx.h"
28 
29 /*
30  * dp_be_intrabss_params
31  *
32  * @dest_soc: dest soc to forward the packet to
33  * @tx_vdev_id: vdev id retrieved from dest peer
34  */
35 struct dp_be_intrabss_params {
36 	struct dp_soc *dest_soc;
37 	uint8_t tx_vdev_id;
38 };
39 
40 #ifndef QCA_HOST_MODE_WIFI_DISABLED
41 
42 /*
43  * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL
44  *  pkt with DA not equal to vdev mac addr, fwd is not allowed.
45  * @soc: core txrx main context
46  * @ta_txrx_peer: source peer entry
47  * @rx_tlv_hdr: start address of rx tlvs
48  * @nbuf: nbuf that has to be intrabss forwarded
49  * @msdu_metadata: msdu metadata
50  *
51  * Return: true if it is forwarded else false
52  */
53 
54 bool dp_rx_intrabss_fwd_be(struct dp_soc *soc,
55 			   struct dp_txrx_peer *ta_txrx_peer,
56 			   uint8_t *rx_tlv_hdr,
57 			   qdf_nbuf_t nbuf,
58 			   struct hal_rx_msdu_metadata msdu_metadata);
59 #endif
60 
61 /*
62  * dp_rx_intrabss_handle_nawds_be() - Forward mcbc intrabss pkts in nawds case
63  * @soc: core txrx main context
64  * @ta_txrx_peer: source txrx_peer entry
65  * @nbuf_copy: nbuf that has to be intrabss forwarded
66  * @tid_stats: tid_stats structure
67  *
68  * Return: true if it is forwarded else false
69  */
70 bool
71 dp_rx_intrabss_handle_nawds_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
72 			       qdf_nbuf_t nbuf_copy,
73 			       struct cdp_tid_rx_stats *tid_stats);
74 
75 void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
76 				  uint32_t *msg_word,
77 				  void *rx_filter);
78 
79 uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
80 			  hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
81 			  uint32_t quota);
82 
83 /**
84  * dp_rx_chain_msdus_be() - Function to chain all msdus of a mpdu
85  *			    to pdev invalid peer list
86  *
87  * @soc: core DP main context
88  * @nbuf: Buffer pointer
89  * @rx_tlv_hdr: start of rx tlv header
90  * @mac_id: mac id
91  *
92  *  Return: bool: true for last msdu of mpdu
93  */
94 bool dp_rx_chain_msdus_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
95 			  uint8_t *rx_tlv_hdr, uint8_t mac_id);
96 
97 /**
98  * dp_rx_desc_pool_init_be() - Initialize Rx Descriptor pool(s)
99  * @soc: Handle to DP Soc structure
100  * @rx_desc_pool: Rx descriptor pool handler
101  * @pool_id: Rx descriptor pool ID
102  *
103  * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
104  */
105 QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc,
106 				   struct rx_desc_pool *rx_desc_pool,
107 				   uint32_t pool_id);
108 
109 /**
110  * dp_rx_desc_pool_deinit_be() - De-initialize Rx Descriptor pool(s)
111  * @soc: Handle to DP Soc structure
112  * @rx_desc_pool: Rx descriptor pool handler
113  * @pool_id: Rx descriptor pool ID
114  *
115  * Return: None
116  */
117 void dp_rx_desc_pool_deinit_be(struct dp_soc *soc,
118 			       struct rx_desc_pool *rx_desc_pool,
119 			       uint32_t pool_id);
120 
121 /**
122  * dp_wbm_get_rx_desc_from_hal_desc_be() - Get corresponding Rx Desc
123  *					address from WBM ring Desc
124  * @soc: Handle to DP Soc structure
125  * @ring_desc: ring descriptor structure pointer
126  * @r_rx_desc: pointer to a pointer of Rx Desc
127  *
128  * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
129  */
130 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
131 					       void *ring_desc,
132 					       struct dp_rx_desc **r_rx_desc);
133 
134 /**
135  * dp_rx_desc_cookie_2_va_be() - Convert RX Desc cookie ID to VA
136  * @soc:Handle to DP Soc structure
137  * @cookie: cookie used to lookup virtual address
138  *
139  * Return: Rx descriptor virtual address
140  */
141 struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc,
142 					     uint32_t cookie);
143 
144 #if !defined(DP_FEATURE_HW_COOKIE_CONVERSION) || \
145 		defined(DP_HW_COOKIE_CONVERT_EXCEPTION)
146 /**
147  * dp_rx_desc_sw_cc_check() - check if RX desc VA is got correctly,
148 			      if not, do SW cookie conversion.
149  * @soc:Handle to DP Soc structure
150  * @rx_buf_cookie: RX desc cookie ID
151  * @r_rx_desc: double pointer for RX desc
152  *
153  * Return: None
154  */
155 static inline void
156 dp_rx_desc_sw_cc_check(struct dp_soc *soc,
157 		       uint32_t rx_buf_cookie,
158 		       struct dp_rx_desc **r_rx_desc)
159 {
160 	if (qdf_unlikely(!(*r_rx_desc))) {
161 		*r_rx_desc = (struct dp_rx_desc *)
162 				dp_cc_desc_find(soc,
163 						rx_buf_cookie);
164 	}
165 }
166 #else
167 static inline void
168 dp_rx_desc_sw_cc_check(struct dp_soc *soc,
169 		       uint32_t rx_buf_cookie,
170 		       struct dp_rx_desc **r_rx_desc)
171 {
172 }
173 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION && DP_HW_COOKIE_CONVERT_EXCEPTION */
174 
175 #define DP_PEER_METADATA_OFFLOAD_GET_BE(_peer_metadata)		(0)
176 
177 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
178 static inline uint16_t
179 dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
180 {
181 	struct htt_rx_peer_metadata_v1 *metadata =
182 			(struct htt_rx_peer_metadata_v1 *)&peer_metadata;
183 	uint16_t peer_id;
184 
185 	peer_id = metadata->peer_id |
186 		  (metadata->ml_peer_valid << soc->peer_id_shift);
187 
188 	return peer_id;
189 }
190 #else
191 /* Combine ml_peer_valid and peer_id field */
192 #define DP_BE_PEER_METADATA_PEER_ID_MASK	0x00003fff
193 #define DP_BE_PEER_METADATA_PEER_ID_SHIFT	0
194 
195 static inline uint16_t
196 dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
197 {
198 	return ((peer_metadata & DP_BE_PEER_METADATA_PEER_ID_MASK) >>
199 		DP_BE_PEER_METADATA_PEER_ID_SHIFT);
200 }
201 #endif
202 
203 static inline uint16_t
204 dp_rx_peer_metadata_vdev_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
205 {
206 	struct htt_rx_peer_metadata_v1 *metadata =
207 			(struct htt_rx_peer_metadata_v1 *)&peer_metadata;
208 
209 	return metadata->vdev_id;
210 }
211 
212 static inline uint8_t
213 dp_rx_peer_metadata_lmac_id_get_be(uint32_t peer_metadata)
214 {
215 	return HTT_RX_PEER_META_DATA_V1_LMAC_ID_GET(peer_metadata);
216 }
217 
218 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
219 /**
220  * dp_rx_nf_process() - Near Full state handler for RX rings.
221  * @int_ctx: interrupt context
222  * @hal_ring_hdl: Rx ring handle
223  * @reo_ring_num: RX ring number
224  * @quota: Quota of work to be done
225  *
226  * Return: work done in the handler
227  */
228 uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
229 			  hal_ring_handle_t hal_ring_hdl,
230 			  uint8_t reo_ring_num,
231 			  uint32_t quota);
232 #else
233 static inline
234 uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
235 			  hal_ring_handle_t hal_ring_hdl,
236 			  uint8_t reo_ring_num,
237 			  uint32_t quota)
238 {
239 	return 0;
240 }
241 #endif /*WLAN_FEATURE_NEAR_FULL_IRQ */
242 
243 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
244 struct dp_soc *
245 dp_rx_replensih_soc_get(struct dp_soc *soc, uint8_t chip_id);
246 #else
247 static inline struct dp_soc *
248 dp_rx_replensih_soc_get(struct dp_soc *soc, uint8_t chip_id)
249 {
250 	return soc;
251 }
252 #endif
253 
254 #ifdef WLAN_FEATURE_11BE_MLO
255 /**
256  * dp_rx_mlo_igmp_handler() - Rx handler for Mcast packets
257  * @soc: Handle to DP Soc structure
258  * @vdev: DP vdev handle
259  * @peer: DP peer handle
260  * @nbuf: nbuf to be enqueued
261  *
262  * Return: true when packet sent to stack, false failure
263  */
264 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
265 			    struct dp_vdev *vdev,
266 			    struct dp_txrx_peer *peer,
267 			    qdf_nbuf_t nbuf);
268 
269 /**
270  * dp_peer_rx_reorder_queue_setup() - Send reo queue setup wmi cmd to FW
271 				      per peer type
272  * @soc: DP Soc handle
273  * @peer: dp peer to operate on
274  * @tid: TID
275  * @ba_window_size: BlockAck window size
276  *
277  * Return: 0 - success, others - failure
278  */
279 static inline
280 QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
281 					     struct dp_peer *peer,
282 					     int tid,
283 					     uint32_t ba_window_size)
284 {
285 	uint8_t i;
286 	struct dp_mld_link_peers link_peers_info;
287 	struct dp_peer *link_peer;
288 	struct dp_rx_tid *rx_tid;
289 	struct dp_soc *link_peer_soc;
290 
291 	rx_tid = &peer->rx_tid[tid];
292 	if (!rx_tid->hw_qdesc_paddr)
293 		return QDF_STATUS_E_INVAL;
294 
295 	if (!hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
296 		if (IS_MLO_DP_MLD_PEER(peer)) {
297 			/* get link peers with reference */
298 			dp_get_link_peers_ref_from_mld_peer(soc, peer,
299 							    &link_peers_info,
300 							    DP_MOD_ID_CDP);
301 			/* send WMI cmd to each link peers */
302 			for (i = 0; i < link_peers_info.num_links; i++) {
303 				link_peer = link_peers_info.link_peers[i];
304 				link_peer_soc = link_peer->vdev->pdev->soc;
305 				if (link_peer_soc->cdp_soc.ol_ops->
306 						peer_rx_reorder_queue_setup) {
307 					if (link_peer_soc->cdp_soc.ol_ops->
308 						peer_rx_reorder_queue_setup(
309 					    link_peer_soc->ctrl_psoc,
310 					    link_peer->vdev->pdev->pdev_id,
311 					    link_peer->vdev->vdev_id,
312 					    link_peer->mac_addr.raw,
313 					    rx_tid->hw_qdesc_paddr,
314 					    tid, tid,
315 					    1, ba_window_size)) {
316 						dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
317 							    link_peer_soc, tid);
318 						return QDF_STATUS_E_FAILURE;
319 					}
320 				}
321 			}
322 			/* release link peers reference */
323 			dp_release_link_peers_ref(&link_peers_info,
324 						  DP_MOD_ID_CDP);
325 		} else if (peer->peer_type == CDP_LINK_PEER_TYPE) {
326 			if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
327 				if (soc->cdp_soc.ol_ops->
328 					peer_rx_reorder_queue_setup(
329 				    soc->ctrl_psoc,
330 				    peer->vdev->pdev->pdev_id,
331 				    peer->vdev->vdev_id,
332 				    peer->mac_addr.raw,
333 				    rx_tid->hw_qdesc_paddr,
334 				    tid, tid,
335 				    1, ba_window_size)) {
336 					dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
337 						    soc, tid);
338 					return QDF_STATUS_E_FAILURE;
339 				}
340 			}
341 		} else {
342 			dp_peer_err("invalid peer type %d", peer->peer_type);
343 			return QDF_STATUS_E_FAILURE;
344 		}
345 	} else {
346 		/* Some BE targets dont require WMI and use shared
347 		 * table managed by host for storing Reo queue ref structs
348 		 */
349 		if (IS_MLO_DP_LINK_PEER(peer) ||
350 		    peer->peer_id == HTT_INVALID_PEER) {
351 			/* Return if this is for MLD link peer and table
352 			 * is not used in MLD link peer case as MLD peer's
353 			 * qref is written to LUT in peer setup or peer map.
354 			 * At this point peer setup for link peer is called
355 			 * before peer map, hence peer id is not assigned.
356 			 * This could happen if peer_setup is called before
357 			 * host receives HTT peer map. In this case return
358 			 * success with no op and let peer map handle
359 			 * writing the reo_qref to LUT.
360 			 */
361 			dp_peer_debug("Invalid peer id for dp_peer:%pK", peer);
362 			return QDF_STATUS_SUCCESS;
363 		}
364 
365 		hal_reo_shared_qaddr_write(soc->hal_soc,
366 					   peer->peer_id,
367 					   tid, peer->rx_tid[tid].hw_qdesc_paddr);
368 	}
369 	return QDF_STATUS_SUCCESS;
370 }
371 #else
372 static inline
373 QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
374 					     struct dp_peer *peer,
375 					     int tid,
376 					     uint32_t ba_window_size)
377 {
378 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
379 
380 	if (!rx_tid->hw_qdesc_paddr)
381 		return QDF_STATUS_E_INVAL;
382 
383 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
384 		if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
385 		    soc->ctrl_psoc,
386 		    peer->vdev->pdev->pdev_id,
387 		    peer->vdev->vdev_id,
388 		    peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
389 		    1, ba_window_size)) {
390 			dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
391 				    soc, tid);
392 			return QDF_STATUS_E_FAILURE;
393 		}
394 	}
395 
396 	return QDF_STATUS_SUCCESS;
397 }
398 #endif /* WLAN_FEATURE_11BE_MLO */
399 
400 #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
401 static inline
402 void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next)
403 {
404 	if (next) {
405 		/* prefetch skb->next and first few bytes of skb->cb */
406 		qdf_prefetch(next);
407 		/* skb->cb spread across 2 cache lines hence below prefetch */
408 		qdf_prefetch(&next->_skb_refdst);
409 		qdf_prefetch(&next->len);
410 		qdf_prefetch(&next->protocol);
411 		qdf_prefetch(next->data);
412 		qdf_prefetch(next->data + 64);
413 		qdf_prefetch(next->data + 128);
414 	}
415 }
416 #else
417 static inline
418 void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next)
419 {
420 }
421 #endif
422 
423 #ifdef QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH
424 /**
425  * dp_rx_cookie_2_va_rxdma_buf_prefetch() - function to prefetch the SW desc
426  * @soc: Handle to DP Soc structure
427  * @cookie: cookie used to lookup virtual address
428  *
429  * Return: prefetched Rx descriptor virtual address
430  */
431 static inline
432 void *dp_rx_va_prefetch(void *last_prefetched_hw_desc)
433 {
434 	void *prefetch_desc;
435 
436 	prefetch_desc = (void *)hal_rx_get_reo_desc_va(last_prefetched_hw_desc);
437 	qdf_prefetch(prefetch_desc);
438 	return prefetch_desc;
439 }
440 
441 /**
442  * dp_rx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
443  * @soc: Handle to HAL Soc structure
444  * @num_entries: valid number of HW descriptors
445  * @hal_ring_hdl: Destination ring pointer
446  * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
447  * @last_prefetched_sw_desc: input & output param of last prefetch SW desc
448  *
449  * Return: None
450  */
451 static inline void
452 dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc *soc,
453 			       hal_soc_handle_t hal_soc,
454 			       uint32_t num_entries,
455 			       hal_ring_handle_t hal_ring_hdl,
456 			       hal_ring_desc_t *last_prefetched_hw_desc,
457 			       struct dp_rx_desc **last_prefetched_sw_desc)
458 {
459 	if (*last_prefetched_sw_desc) {
460 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
461 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
462 	}
463 
464 	if (num_entries) {
465 		*last_prefetched_sw_desc =
466 			dp_rx_va_prefetch(*last_prefetched_hw_desc);
467 
468 		if ((uintptr_t)*last_prefetched_hw_desc & 0x3f)
469 			*last_prefetched_hw_desc =
470 				hal_srng_dst_prefetch_next_cached_desc(hal_soc,
471 					  hal_ring_hdl,
472 					  (uint8_t *)*last_prefetched_hw_desc);
473 		else
474 			*last_prefetched_hw_desc =
475 				hal_srng_dst_get_next_32_byte_desc(hal_soc,
476 				   hal_ring_hdl,
477 				   (uint8_t *)*last_prefetched_hw_desc);
478 	}
479 }
480 #else
481 static inline void
482 dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc *soc,
483 			       hal_soc_handle_t hal_soc,
484 			       uint32_t num_entries,
485 			       hal_ring_handle_t hal_ring_hdl,
486 			       hal_ring_desc_t *last_prefetched_hw_desc,
487 			       struct dp_rx_desc **last_prefetched_sw_desc)
488 {
489 }
490 #endif
491 #ifdef CONFIG_WORD_BASED_TLV
492 /**
493  * dp_rx_get_reo_qdesc_addr_be(): API to get qdesc address of reo
494  * entrance ring desc
495  *
496  * @hal_soc: Handle to HAL Soc structure
497  * @dst_ring_desc: reo dest ring descriptor (used for Lithium DP)
498  * @buf: pointer to the start of RX PKT TLV headers
499  * @txrx_peer: pointer to txrx_peer
500  * @tid: tid value
501  *
502  * Return: qdesc adrress in reo destination ring buffer
503  */
504 static inline
505 uint64_t dp_rx_get_reo_qdesc_addr_be(hal_soc_handle_t hal_soc,
506 				     uint8_t *dst_ring_desc,
507 				     uint8_t *buf,
508 				     struct dp_txrx_peer *txrx_peer,
509 				     unsigned int tid)
510 {
511 	struct dp_peer *peer = NULL;
512 	uint64_t qdesc_addr = 0;
513 
514 	if (hal_reo_shared_qaddr_is_enable(hal_soc)) {
515 		qdesc_addr = (uint64_t)txrx_peer->peer_id;
516 	} else {
517 		peer = dp_peer_get_ref_by_id(txrx_peer->vdev->pdev->soc,
518 					     txrx_peer->peer_id,
519 					     DP_MOD_ID_CONFIG);
520 		if (!peer)
521 			return 0;
522 
523 		qdesc_addr = (uint64_t)peer->rx_tid[tid].hw_qdesc_paddr;
524 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
525 	}
526 	return qdesc_addr;
527 }
528 #else
529 static inline
530 uint64_t dp_rx_get_reo_qdesc_addr_be(hal_soc_handle_t hal_soc,
531 				     uint8_t *dst_ring_desc,
532 				     uint8_t *buf,
533 				     struct dp_txrx_peer *txrx_peer,
534 				     unsigned int tid)
535 {
536 	return hal_rx_get_qdesc_addr(hal_soc, dst_ring_desc, buf);
537 }
538 #endif
539 #endif
540