xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_rx.h (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _DP_BE_RX_H_
21 #define _DP_BE_RX_H_
22 
23 #include <dp_types.h>
24 #include "dp_be.h"
25 #include "dp_peer.h"
26 #include <dp_rx.h>
27 #include "hal_be_rx.h"
28 
29 /*
30  * dp_be_intrabss_params
31  *
32  * @dest_soc: dest soc to forward the packet to
33  * @tx_vdev_id: vdev id retrieved from dest peer
34  */
35 struct dp_be_intrabss_params {
36 	struct dp_soc *dest_soc;
37 	uint8_t tx_vdev_id;
38 };
39 
40 #ifndef QCA_HOST_MODE_WIFI_DISABLED
41 
42 /*
43  * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL
44  *  pkt with DA not equal to vdev mac addr, fwd is not allowed.
45  * @soc: core txrx main context
46  * @ta_txrx_peer: source peer entry
47  * @rx_tlv_hdr: start address of rx tlvs
48  * @nbuf: nbuf that has to be intrabss forwarded
49  * @msdu_metadata: msdu metadata
50  *
51  * Return: true if it is forwarded else false
52  */
53 
54 bool dp_rx_intrabss_fwd_be(struct dp_soc *soc,
55 			   struct dp_txrx_peer *ta_txrx_peer,
56 			   uint8_t *rx_tlv_hdr,
57 			   qdf_nbuf_t nbuf,
58 			   struct hal_rx_msdu_metadata msdu_metadata);
59 #endif
60 
61 /**
62  * dp_rx_intrabss_mcast_handler_be() - intrabss mcast handler
63  * @soc: core txrx main context
64  * @ta_txrx_peer: source txrx_peer entry
65  * @nbuf_copy: nbuf that has to be intrabss forwarded
66  * @tid_stats: tid_stats structure
67  *
68  * Return: true if it is forwarded else false
69  */
70 bool
71 dp_rx_intrabss_mcast_handler_be(struct dp_soc *soc,
72 				struct dp_txrx_peer *ta_txrx_peer,
73 				qdf_nbuf_t nbuf_copy,
74 				struct cdp_tid_rx_stats *tid_stats);
75 
76 void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
77 				  uint32_t *msg_word,
78 				  void *rx_filter);
79 
80 uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
81 			  hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
82 			  uint32_t quota);
83 
84 /**
85  * dp_rx_chain_msdus_be() - Function to chain all msdus of a mpdu
86  *			    to pdev invalid peer list
87  *
88  * @soc: core DP main context
89  * @nbuf: Buffer pointer
90  * @rx_tlv_hdr: start of rx tlv header
91  * @mac_id: mac id
92  *
93  *  Return: bool: true for last msdu of mpdu
94  */
95 bool dp_rx_chain_msdus_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
96 			  uint8_t *rx_tlv_hdr, uint8_t mac_id);
97 
98 /**
99  * dp_rx_desc_pool_init_be() - Initialize Rx Descriptor pool(s)
100  * @soc: Handle to DP Soc structure
101  * @rx_desc_pool: Rx descriptor pool handler
102  * @pool_id: Rx descriptor pool ID
103  *
104  * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
105  */
106 QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc,
107 				   struct rx_desc_pool *rx_desc_pool,
108 				   uint32_t pool_id);
109 
110 /**
111  * dp_rx_desc_pool_deinit_be() - De-initialize Rx Descriptor pool(s)
112  * @soc: Handle to DP Soc structure
113  * @rx_desc_pool: Rx descriptor pool handler
114  * @pool_id: Rx descriptor pool ID
115  *
116  * Return: None
117  */
118 void dp_rx_desc_pool_deinit_be(struct dp_soc *soc,
119 			       struct rx_desc_pool *rx_desc_pool,
120 			       uint32_t pool_id);
121 
122 /**
123  * dp_wbm_get_rx_desc_from_hal_desc_be() - Get corresponding Rx Desc
124  *					address from WBM ring Desc
125  * @soc: Handle to DP Soc structure
126  * @ring_desc: ring descriptor structure pointer
127  * @r_rx_desc: pointer to a pointer of Rx Desc
128  *
129  * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
130  */
131 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
132 					       void *ring_desc,
133 					       struct dp_rx_desc **r_rx_desc);
134 
135 /**
136  * dp_rx_desc_cookie_2_va_be() - Convert RX Desc cookie ID to VA
137  * @soc:Handle to DP Soc structure
138  * @cookie: cookie used to lookup virtual address
139  *
140  * Return: Rx descriptor virtual address
141  */
142 struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc,
143 					     uint32_t cookie);
144 
145 #if !defined(DP_FEATURE_HW_COOKIE_CONVERSION) || \
146 		defined(DP_HW_COOKIE_CONVERT_EXCEPTION)
147 /**
148  * dp_rx_desc_sw_cc_check() - check if RX desc VA is got correctly,
149 			      if not, do SW cookie conversion.
150  * @soc:Handle to DP Soc structure
151  * @rx_buf_cookie: RX desc cookie ID
152  * @r_rx_desc: double pointer for RX desc
153  *
154  * Return: None
155  */
156 static inline void
157 dp_rx_desc_sw_cc_check(struct dp_soc *soc,
158 		       uint32_t rx_buf_cookie,
159 		       struct dp_rx_desc **r_rx_desc)
160 {
161 	if (qdf_unlikely(!(*r_rx_desc))) {
162 		*r_rx_desc = (struct dp_rx_desc *)
163 				dp_cc_desc_find(soc,
164 						rx_buf_cookie);
165 	}
166 }
167 #else
168 static inline void
169 dp_rx_desc_sw_cc_check(struct dp_soc *soc,
170 		       uint32_t rx_buf_cookie,
171 		       struct dp_rx_desc **r_rx_desc)
172 {
173 }
174 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION && DP_HW_COOKIE_CONVERT_EXCEPTION */
175 
176 #define DP_PEER_METADATA_OFFLOAD_GET_BE(_peer_metadata)		(0)
177 
178 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
179 static inline uint16_t
180 dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
181 {
182 	struct htt_rx_peer_metadata_v1 *metadata =
183 			(struct htt_rx_peer_metadata_v1 *)&peer_metadata;
184 	uint16_t peer_id;
185 
186 	peer_id = metadata->peer_id |
187 		  (metadata->ml_peer_valid << soc->peer_id_shift);
188 
189 	return peer_id;
190 }
191 #else
192 /* Combine ml_peer_valid and peer_id field */
193 #define DP_BE_PEER_METADATA_PEER_ID_MASK	0x00003fff
194 #define DP_BE_PEER_METADATA_PEER_ID_SHIFT	0
195 
196 static inline uint16_t
197 dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
198 {
199 	return ((peer_metadata & DP_BE_PEER_METADATA_PEER_ID_MASK) >>
200 		DP_BE_PEER_METADATA_PEER_ID_SHIFT);
201 }
202 #endif
203 
204 static inline uint16_t
205 dp_rx_peer_metadata_vdev_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
206 {
207 	struct htt_rx_peer_metadata_v1 *metadata =
208 			(struct htt_rx_peer_metadata_v1 *)&peer_metadata;
209 
210 	return metadata->vdev_id;
211 }
212 
213 static inline uint8_t
214 dp_rx_peer_metadata_lmac_id_get_be(uint32_t peer_metadata)
215 {
216 	return HTT_RX_PEER_META_DATA_V1_LMAC_ID_GET(peer_metadata);
217 }
218 
219 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
220 /**
221  * dp_rx_nf_process() - Near Full state handler for RX rings.
222  * @int_ctx: interrupt context
223  * @hal_ring_hdl: Rx ring handle
224  * @reo_ring_num: RX ring number
225  * @quota: Quota of work to be done
226  *
227  * Return: work done in the handler
228  */
229 uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
230 			  hal_ring_handle_t hal_ring_hdl,
231 			  uint8_t reo_ring_num,
232 			  uint32_t quota);
233 #else
234 static inline
235 uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
236 			  hal_ring_handle_t hal_ring_hdl,
237 			  uint8_t reo_ring_num,
238 			  uint32_t quota)
239 {
240 	return 0;
241 }
242 #endif /*WLAN_FEATURE_NEAR_FULL_IRQ */
243 
244 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
245 struct dp_soc *
246 dp_rx_replensih_soc_get(struct dp_soc *soc, uint8_t chip_id);
247 
248 struct dp_soc *
249 dp_soc_get_by_idle_bm_id(struct dp_soc *soc, uint8_t idle_bm_id);
250 
251 uint8_t dp_soc_get_num_soc_be(struct dp_soc *soc);
252 #else
253 static inline struct dp_soc *
254 dp_rx_replensih_soc_get(struct dp_soc *soc, uint8_t chip_id)
255 {
256 	return soc;
257 }
258 
259 static inline uint8_t
260 dp_soc_get_num_soc_be(struct dp_soc *soc)
261 {
262 	return 1;
263 }
264 #endif
265 
266 #ifdef WLAN_FEATURE_11BE_MLO
267 /**
268  * dp_rx_mlo_igmp_handler() - Rx handler for Mcast packets
269  * @soc: Handle to DP Soc structure
270  * @vdev: DP vdev handle
271  * @peer: DP peer handle
272  * @nbuf: nbuf to be enqueued
273  *
274  * Return: true when packet sent to stack, false failure
275  */
276 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
277 			    struct dp_vdev *vdev,
278 			    struct dp_txrx_peer *peer,
279 			    qdf_nbuf_t nbuf);
280 
281 /**
282  * dp_peer_rx_reorder_queue_setup() - Send reo queue setup wmi cmd to FW
283 				      per peer type
284  * @soc: DP Soc handle
285  * @peer: dp peer to operate on
286  * @tid: TID
287  * @ba_window_size: BlockAck window size
288  *
289  * Return: 0 - success, others - failure
290  */
291 static inline
292 QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
293 					     struct dp_peer *peer,
294 					     int tid,
295 					     uint32_t ba_window_size)
296 {
297 	uint8_t i;
298 	struct dp_mld_link_peers link_peers_info;
299 	struct dp_peer *link_peer;
300 	struct dp_rx_tid *rx_tid;
301 	struct dp_soc *link_peer_soc;
302 
303 	rx_tid = &peer->rx_tid[tid];
304 	if (!rx_tid->hw_qdesc_paddr)
305 		return QDF_STATUS_E_INVAL;
306 
307 	if (!hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
308 		if (IS_MLO_DP_MLD_PEER(peer)) {
309 			/* get link peers with reference */
310 			dp_get_link_peers_ref_from_mld_peer(soc, peer,
311 							    &link_peers_info,
312 							    DP_MOD_ID_CDP);
313 			/* send WMI cmd to each link peers */
314 			for (i = 0; i < link_peers_info.num_links; i++) {
315 				link_peer = link_peers_info.link_peers[i];
316 				link_peer_soc = link_peer->vdev->pdev->soc;
317 				if (link_peer_soc->cdp_soc.ol_ops->
318 						peer_rx_reorder_queue_setup) {
319 					if (link_peer_soc->cdp_soc.ol_ops->
320 						peer_rx_reorder_queue_setup(
321 					    link_peer_soc->ctrl_psoc,
322 					    link_peer->vdev->pdev->pdev_id,
323 					    link_peer->vdev->vdev_id,
324 					    link_peer->mac_addr.raw,
325 					    rx_tid->hw_qdesc_paddr,
326 					    tid, tid,
327 					    1, ba_window_size)) {
328 						dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
329 							    link_peer_soc, tid);
330 						return QDF_STATUS_E_FAILURE;
331 					}
332 				}
333 			}
334 			/* release link peers reference */
335 			dp_release_link_peers_ref(&link_peers_info,
336 						  DP_MOD_ID_CDP);
337 		} else if (peer->peer_type == CDP_LINK_PEER_TYPE) {
338 			if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
339 				if (soc->cdp_soc.ol_ops->
340 					peer_rx_reorder_queue_setup(
341 				    soc->ctrl_psoc,
342 				    peer->vdev->pdev->pdev_id,
343 				    peer->vdev->vdev_id,
344 				    peer->mac_addr.raw,
345 				    rx_tid->hw_qdesc_paddr,
346 				    tid, tid,
347 				    1, ba_window_size)) {
348 					dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
349 						    soc, tid);
350 					return QDF_STATUS_E_FAILURE;
351 				}
352 			}
353 		} else {
354 			dp_peer_err("invalid peer type %d", peer->peer_type);
355 			return QDF_STATUS_E_FAILURE;
356 		}
357 	} else {
358 		/* Some BE targets dont require WMI and use shared
359 		 * table managed by host for storing Reo queue ref structs
360 		 */
361 		if (IS_MLO_DP_LINK_PEER(peer) ||
362 		    peer->peer_id == HTT_INVALID_PEER) {
363 			/* Return if this is for MLD link peer and table
364 			 * is not used in MLD link peer case as MLD peer's
365 			 * qref is written to LUT in peer setup or peer map.
366 			 * At this point peer setup for link peer is called
367 			 * before peer map, hence peer id is not assigned.
368 			 * This could happen if peer_setup is called before
369 			 * host receives HTT peer map. In this case return
370 			 * success with no op and let peer map handle
371 			 * writing the reo_qref to LUT.
372 			 */
373 			dp_peer_debug("Invalid peer id for dp_peer:%pK", peer);
374 			return QDF_STATUS_SUCCESS;
375 		}
376 
377 		hal_reo_shared_qaddr_write(soc->hal_soc,
378 					   peer->peer_id,
379 					   tid, peer->rx_tid[tid].hw_qdesc_paddr);
380 	}
381 	return QDF_STATUS_SUCCESS;
382 }
383 #else
384 static inline
385 QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
386 					     struct dp_peer *peer,
387 					     int tid,
388 					     uint32_t ba_window_size)
389 {
390 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
391 
392 	if (!rx_tid->hw_qdesc_paddr)
393 		return QDF_STATUS_E_INVAL;
394 
395 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
396 		if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
397 		    soc->ctrl_psoc,
398 		    peer->vdev->pdev->pdev_id,
399 		    peer->vdev->vdev_id,
400 		    peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
401 		    1, ba_window_size)) {
402 			dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
403 				    soc, tid);
404 			return QDF_STATUS_E_FAILURE;
405 		}
406 	}
407 
408 	return QDF_STATUS_SUCCESS;
409 }
410 #endif /* WLAN_FEATURE_11BE_MLO */
411 
412 #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
413 static inline
414 void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next)
415 {
416 	if (next) {
417 		/* prefetch skb->next and first few bytes of skb->cb */
418 		qdf_prefetch(next);
419 		/* skb->cb spread across 2 cache lines hence below prefetch */
420 		qdf_prefetch(&next->_skb_refdst);
421 		qdf_prefetch(&next->len);
422 		qdf_prefetch(&next->protocol);
423 		qdf_prefetch(next->data);
424 		qdf_prefetch(next->data + 64);
425 		qdf_prefetch(next->data + 128);
426 	}
427 }
428 #else
429 static inline
430 void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next)
431 {
432 }
433 #endif
434 
435 #ifdef QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH
436 /**
437  * dp_rx_cookie_2_va_rxdma_buf_prefetch() - function to prefetch the SW desc
438  * @soc: Handle to DP Soc structure
439  * @cookie: cookie used to lookup virtual address
440  *
441  * Return: prefetched Rx descriptor virtual address
442  */
443 static inline
444 void *dp_rx_va_prefetch(void *last_prefetched_hw_desc)
445 {
446 	void *prefetch_desc;
447 
448 	prefetch_desc = (void *)hal_rx_get_reo_desc_va(last_prefetched_hw_desc);
449 	qdf_prefetch(prefetch_desc);
450 	return prefetch_desc;
451 }
452 
453 /**
454  * dp_rx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
455  * @soc: Handle to HAL Soc structure
456  * @num_entries: valid number of HW descriptors
457  * @hal_ring_hdl: Destination ring pointer
458  * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
459  * @last_prefetched_sw_desc: input & output param of last prefetch SW desc
460  *
461  * Return: None
462  */
463 static inline void
464 dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc *soc,
465 			       hal_soc_handle_t hal_soc,
466 			       uint32_t num_entries,
467 			       hal_ring_handle_t hal_ring_hdl,
468 			       hal_ring_desc_t *last_prefetched_hw_desc,
469 			       struct dp_rx_desc **last_prefetched_sw_desc)
470 {
471 	if (*last_prefetched_sw_desc) {
472 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
473 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
474 	}
475 
476 	if (num_entries) {
477 		*last_prefetched_sw_desc =
478 			dp_rx_va_prefetch(*last_prefetched_hw_desc);
479 
480 		if ((uintptr_t)*last_prefetched_hw_desc & 0x3f)
481 			*last_prefetched_hw_desc =
482 				hal_srng_dst_prefetch_next_cached_desc(hal_soc,
483 					  hal_ring_hdl,
484 					  (uint8_t *)*last_prefetched_hw_desc);
485 		else
486 			*last_prefetched_hw_desc =
487 				hal_srng_dst_get_next_32_byte_desc(hal_soc,
488 				   hal_ring_hdl,
489 				   (uint8_t *)*last_prefetched_hw_desc);
490 	}
491 }
492 #else
493 static inline void
494 dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc *soc,
495 			       hal_soc_handle_t hal_soc,
496 			       uint32_t num_entries,
497 			       hal_ring_handle_t hal_ring_hdl,
498 			       hal_ring_desc_t *last_prefetched_hw_desc,
499 			       struct dp_rx_desc **last_prefetched_sw_desc)
500 {
501 }
502 #endif
503 #ifdef CONFIG_WORD_BASED_TLV
504 /**
505  * dp_rx_get_reo_qdesc_addr_be(): API to get qdesc address of reo
506  * entrance ring desc
507  *
508  * @hal_soc: Handle to HAL Soc structure
509  * @dst_ring_desc: reo dest ring descriptor (used for Lithium DP)
510  * @buf: pointer to the start of RX PKT TLV headers
511  * @txrx_peer: pointer to txrx_peer
512  * @tid: tid value
513  *
514  * Return: qdesc address in reo destination ring buffer
515  */
516 static inline
517 uint64_t dp_rx_get_reo_qdesc_addr_be(hal_soc_handle_t hal_soc,
518 				     uint8_t *dst_ring_desc,
519 				     uint8_t *buf,
520 				     struct dp_txrx_peer *txrx_peer,
521 				     unsigned int tid)
522 {
523 	struct dp_peer *peer = NULL;
524 	uint64_t qdesc_addr = 0;
525 
526 	if (hal_reo_shared_qaddr_is_enable(hal_soc)) {
527 		qdesc_addr = (uint64_t)txrx_peer->peer_id;
528 	} else {
529 		peer = dp_peer_get_ref_by_id(txrx_peer->vdev->pdev->soc,
530 					     txrx_peer->peer_id,
531 					     DP_MOD_ID_CONFIG);
532 		if (!peer)
533 			return 0;
534 
535 		qdesc_addr = (uint64_t)peer->rx_tid[tid].hw_qdesc_paddr;
536 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
537 	}
538 	return qdesc_addr;
539 }
540 #else
541 static inline
542 uint64_t dp_rx_get_reo_qdesc_addr_be(hal_soc_handle_t hal_soc,
543 				     uint8_t *dst_ring_desc,
544 				     uint8_t *buf,
545 				     struct dp_txrx_peer *txrx_peer,
546 				     unsigned int tid)
547 {
548 	return hal_rx_get_qdesc_addr(hal_soc, dst_ring_desc, buf);
549 }
550 #endif
551 
552 /**
553  * dp_rx_wbm_err_reap_desc_be() - Function to reap and replenish
554  *                                WBM RX Error descriptors
555  *
556  * @int_ctx: pointer to DP interrupt context
557  * @soc: core DP main context
558  * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, to be serviced
559  * @quota: No. of units (packets) that can be serviced in one shot.
560  * @rx_bufs_used: No. of descriptors reaped
561  *
562  * This function implements the core Rx functionality like reap and
563  * replenish the RX error ring Descriptors, and create a nbuf list
564  * out of it. It also reads wbm error information from descriptors
565  * and update the nbuf tlv area.
566  *
567  * Return: qdf_nbuf_t: head pointer to the nbuf list created
568  */
569 qdf_nbuf_t
570 dp_rx_wbm_err_reap_desc_be(struct dp_intr *int_ctx, struct dp_soc *soc,
571 			   hal_ring_handle_t hal_ring_hdl, uint32_t quota,
572 			   uint32_t *rx_bufs_used);
573 
574 /**
575  * dp_rx_null_q_desc_handle_be() - Function to handle NULL Queue
576  *                                 descriptor violation on either a
577  *                                 REO or WBM ring
578  *
579  * @soc: core DP main context
580  * @nbuf: buffer pointer
581  * @rx_tlv_hdr: start of rx tlv header
582  * @pool_id: mac id
583  * @txrx_peer: txrx peer handle
584  * @is_reo_exception: flag to check if the error is from REO or WBM
585  *
586  * This function handles NULL queue descriptor violations arising out
587  * a missing REO queue for a given peer or a given TID. This typically
588  * may happen if a packet is received on a QOS enabled TID before the
589  * ADDBA negotiation for that TID, when the TID queue is setup. Or
590  * it may also happen for MC/BC frames if they are not routed to the
591  * non-QOS TID queue, in the absence of any other default TID queue.
592  * This error can show up both in a REO destination or WBM release ring.
593  *
594  * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
595  *         if nbuf could not be handled or dropped.
596  */
597 QDF_STATUS
598 dp_rx_null_q_desc_handle_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
599 			    uint8_t *rx_tlv_hdr, uint8_t pool_id,
600 			    struct dp_txrx_peer *txrx_peer,
601 			    bool is_reo_exception);
602 #endif
603