xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_err.c (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #include "dp_types.h"
22 #include "dp_rx.h"
23 #include "dp_tx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 #include "hal_api.h"
27 #include "qdf_trace.h"
28 #include "qdf_nbuf.h"
29 #include "dp_rx_defrag.h"
30 #include "dp_ipa.h"
31 #ifdef WIFI_MONITOR_SUPPORT
32 #include "dp_htt.h"
33 #include <dp_mon.h>
34 #endif
35 #ifdef FEATURE_WDS
36 #include "dp_txrx_wds.h"
37 #endif
38 #include <enet.h>	/* LLC_SNAP_HDR_LEN */
39 #include "qdf_net_types.h"
40 #include "dp_rx_buffer_pool.h"
41 
42 #define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params)
43 #define dp_rx_err_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX_ERROR, params)
44 #define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params)
45 #define dp_rx_err_info(params...) \
46 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
47 #define dp_rx_err_info_rl(params...) \
48 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
49 #define dp_rx_err_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_ERROR, params)
50 
51 #ifndef QCA_HOST_MODE_WIFI_DISABLED
52 
53 
54 /* Max regular Rx packet routing error */
55 #define DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD 20
56 #define DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT 10
57 #define DP_RX_ERR_ROUTE_TIMEOUT_US (5 * 1000 * 1000) /* micro seconds */
58 
59 #ifdef FEATURE_MEC
60 bool dp_rx_mcast_echo_check(struct dp_soc *soc,
61 			    struct dp_txrx_peer *txrx_peer,
62 			    uint8_t *rx_tlv_hdr,
63 			    qdf_nbuf_t nbuf)
64 {
65 	struct dp_vdev *vdev = txrx_peer->vdev;
66 	struct dp_pdev *pdev = vdev->pdev;
67 	struct dp_mec_entry *mecentry = NULL;
68 	struct dp_ast_entry *ase = NULL;
69 	uint16_t sa_idx = 0;
70 	uint8_t *data;
71 	/*
72 	 * Multicast Echo Check is required only if vdev is STA and
73 	 * received pkt is a multicast/broadcast pkt. otherwise
74 	 * skip the MEC check.
75 	 */
76 	if (vdev->opmode != wlan_op_mode_sta)
77 		return false;
78 	if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
79 		return false;
80 
81 	data = qdf_nbuf_data(nbuf);
82 
83 	/*
84 	 * if the received pkts src mac addr matches with vdev
85 	 * mac address then drop the pkt as it is looped back
86 	 */
87 	if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
88 			  vdev->mac_addr.raw,
89 			  QDF_MAC_ADDR_SIZE)))
90 		return true;
91 
92 	/*
93 	 * In case of qwrap isolation mode, donot drop loopback packets.
94 	 * In isolation mode, all packets from the wired stations need to go
95 	 * to rootap and loop back to reach the wireless stations and
96 	 * vice-versa.
97 	 */
98 	if (qdf_unlikely(vdev->isolation_vdev))
99 		return false;
100 
101 	/*
102 	 * if the received pkts src mac addr matches with the
103 	 * wired PCs MAC addr which is behind the STA or with
104 	 * wireless STAs MAC addr which are behind the Repeater,
105 	 * then drop the pkt as it is looped back
106 	 */
107 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
108 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
109 
110 		if ((sa_idx < 0) ||
111 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
112 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
113 				  "invalid sa_idx: %d", sa_idx);
114 			qdf_assert_always(0);
115 		}
116 
117 		qdf_spin_lock_bh(&soc->ast_lock);
118 		ase = soc->ast_table[sa_idx];
119 
120 		/*
121 		 * this check was not needed since MEC is not dependent on AST,
122 		 * but if we dont have this check SON has some issues in
123 		 * dual backhaul scenario. in APS SON mode, client connected
124 		 * to RE 2G and sends multicast packets. the RE sends it to CAP
125 		 * over 5G backhaul. the CAP loopback it on 2G to RE.
126 		 * On receiving in 2G STA vap, we assume that client has roamed
127 		 * and kickout the client.
128 		 */
129 		if (ase && (ase->peer_id != txrx_peer->peer_id)) {
130 			qdf_spin_unlock_bh(&soc->ast_lock);
131 			goto drop;
132 		}
133 
134 		qdf_spin_unlock_bh(&soc->ast_lock);
135 	}
136 
137 	qdf_spin_lock_bh(&soc->mec_lock);
138 
139 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
140 						   &data[QDF_MAC_ADDR_SIZE]);
141 	if (!mecentry) {
142 		qdf_spin_unlock_bh(&soc->mec_lock);
143 		return false;
144 	}
145 
146 	qdf_spin_unlock_bh(&soc->mec_lock);
147 
148 drop:
149 	dp_rx_err_info("%pK: received pkt with same src mac " QDF_MAC_ADDR_FMT,
150 		       soc, QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]));
151 
152 	return true;
153 }
154 #endif
155 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
156 
157 void dp_rx_link_desc_refill_duplicate_check(
158 				struct dp_soc *soc,
159 				struct hal_buf_info *buf_info,
160 				hal_buff_addrinfo_t ring_buf_info)
161 {
162 	struct hal_buf_info current_link_desc_buf_info = { 0 };
163 
164 	/* do duplicate link desc address check */
165 	hal_rx_buffer_addr_info_get_paddr(ring_buf_info,
166 					  &current_link_desc_buf_info);
167 
168 	/*
169 	 * TODO - Check if the hal soc api call can be removed
170 	 * since the cookie is just used for print.
171 	 * buffer_addr_info is the first element of ring_desc
172 	 */
173 	hal_rx_buf_cookie_rbm_get(soc->hal_soc,
174 				  (uint32_t *)ring_buf_info,
175 				  &current_link_desc_buf_info);
176 
177 	if (qdf_unlikely(current_link_desc_buf_info.paddr ==
178 			 buf_info->paddr)) {
179 		dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x",
180 			   current_link_desc_buf_info.paddr,
181 			   current_link_desc_buf_info.sw_cookie);
182 		DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1);
183 	}
184 	*buf_info = current_link_desc_buf_info;
185 }
186 
187 /**
188  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
189  *					(WBM) by address
190  *
191  * @soc: core DP main context
192  * @link_desc_addr: link descriptor addr
193  *
194  * Return: QDF_STATUS
195  */
196 QDF_STATUS
197 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
198 			       hal_buff_addrinfo_t link_desc_addr,
199 			       uint8_t bm_action)
200 {
201 	struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
202 	hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
203 	hal_soc_handle_t hal_soc = soc->hal_soc;
204 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
205 	void *src_srng_desc;
206 
207 	if (!wbm_rel_srng) {
208 		dp_rx_err_err("%pK: WBM RELEASE RING not initialized", soc);
209 		return status;
210 	}
211 
212 	/* do duplicate link desc address check */
213 	dp_rx_link_desc_refill_duplicate_check(
214 				soc,
215 				&soc->last_op_info.wbm_rel_link_desc,
216 				link_desc_addr);
217 
218 	if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
219 
220 		/* TODO */
221 		/*
222 		 * Need API to convert from hal_ring pointer to
223 		 * Ring Type / Ring Id combo
224 		 */
225 		dp_rx_err_err("%pK: HAL RING Access For WBM Release SRNG Failed - %pK",
226 			      soc, wbm_rel_srng);
227 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
228 		goto done;
229 	}
230 	src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
231 	if (qdf_likely(src_srng_desc)) {
232 		/* Return link descriptor through WBM ring (SW2WBM)*/
233 		hal_rx_msdu_link_desc_set(hal_soc,
234 				src_srng_desc, link_desc_addr, bm_action);
235 		status = QDF_STATUS_SUCCESS;
236 	} else {
237 		struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
238 
239 		DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1);
240 
241 		dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)",
242 			   srng->ring_id,
243 			   soc->stats.rx.err.hal_ring_access_full_fail);
244 		dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
245 			   *srng->u.src_ring.hp_addr,
246 			   srng->u.src_ring.reap_hp,
247 			   *srng->u.src_ring.tp_addr,
248 			   srng->u.src_ring.cached_tp);
249 		QDF_BUG(0);
250 	}
251 done:
252 	hal_srng_access_end(hal_soc, wbm_rel_srng);
253 	return status;
254 
255 }
256 
257 qdf_export_symbol(dp_rx_link_desc_return_by_addr);
258 
259 /**
260  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
261  *				(WBM), following error handling
262  *
263  * @soc: core DP main context
264  * @ring_desc: opaque pointer to the REO error ring descriptor
265  *
266  * Return: QDF_STATUS
267  */
268 QDF_STATUS
269 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
270 		       uint8_t bm_action)
271 {
272 	void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
273 
274 	return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
275 }
276 
277 #ifndef QCA_HOST_MODE_WIFI_DISABLED
278 
279 /**
280  * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
281  *
282  * @soc: core txrx main context
283  * @ring_desc: opaque pointer to the REO error ring descriptor
284  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
285  * @head: head of the local descriptor free-list
286  * @tail: tail of the local descriptor free-list
287  * @quota: No. of units (packets) that can be serviced in one shot.
288  *
289  * This function is used to drop all MSDU in an MPDU
290  *
291  * Return: uint32_t: No. of elements processed
292  */
293 static uint32_t
294 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
295 		 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
296 		 uint8_t *mac_id,
297 		 uint32_t quota)
298 {
299 	uint32_t rx_bufs_used = 0;
300 	void *link_desc_va;
301 	struct hal_buf_info buf_info;
302 	struct dp_pdev *pdev;
303 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
304 	int i;
305 	uint8_t *rx_tlv_hdr;
306 	uint32_t tid;
307 	struct rx_desc_pool *rx_desc_pool;
308 	struct dp_rx_desc *rx_desc;
309 	/* First field in REO Dst ring Desc is buffer_addr_info */
310 	void *buf_addr_info = ring_desc;
311 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
312 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
313 
314 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &buf_info);
315 
316 	/* buffer_addr_info is the first element of ring_desc */
317 	hal_rx_buf_cookie_rbm_get(soc->hal_soc,
318 				  (uint32_t *)ring_desc,
319 				  &buf_info);
320 
321 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
322 	if (!link_desc_va) {
323 		dp_rx_err_debug("link desc va is null, soc %pk", soc);
324 		return rx_bufs_used;
325 	}
326 
327 more_msdu_link_desc:
328 	/* No UNMAP required -- this is "malloc_consistent" memory */
329 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
330 			     &mpdu_desc_info->msdu_count);
331 
332 	for (i = 0; (i < mpdu_desc_info->msdu_count); i++) {
333 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
334 						soc, msdu_list.sw_cookie[i]);
335 
336 		qdf_assert_always(rx_desc);
337 
338 		/* all buffers from a MSDU link link belong to same pdev */
339 		*mac_id = rx_desc->pool_id;
340 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
341 		if (!pdev) {
342 			dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
343 					soc, rx_desc->pool_id);
344 			return rx_bufs_used;
345 		}
346 
347 		if (!dp_rx_desc_check_magic(rx_desc)) {
348 			dp_rx_err_err("%pK: Invalid rx_desc cookie=%d",
349 				      soc, msdu_list.sw_cookie[i]);
350 			return rx_bufs_used;
351 		}
352 
353 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
354 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
355 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
356 		rx_desc->unmapped = 1;
357 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
358 
359 		rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
360 
361 		rx_bufs_used++;
362 		tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
363 						rx_desc->rx_buf_start);
364 		dp_rx_err_err("%pK: Packet received with PN error for tid :%d",
365 			      soc, tid);
366 
367 		rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
368 		if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
369 			hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
370 
371 		dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info,
372 				      rx_desc->nbuf,
373 				      QDF_TX_RX_STATUS_DROP, true);
374 		/* Just free the buffers */
375 		dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id);
376 
377 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
378 					    &pdev->free_list_tail, rx_desc);
379 	}
380 
381 	/*
382 	 * If the msdu's are spread across multiple link-descriptors,
383 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
384 	 * spread across multiple buffers).Hence, it is
385 	 * necessary to check the next link_descriptor and release
386 	 * all the msdu's that are part of it.
387 	 */
388 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
389 			link_desc_va,
390 			&next_link_desc_addr_info);
391 
392 	if (hal_rx_is_buf_addr_info_valid(
393 				&next_link_desc_addr_info)) {
394 		/* Clear the next link desc info for the current link_desc */
395 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
396 
397 		dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
398 					       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
399 		hal_rx_buffer_addr_info_get_paddr(
400 				&next_link_desc_addr_info,
401 				&buf_info);
402 		/* buffer_addr_info is the first element of ring_desc */
403 		hal_rx_buf_cookie_rbm_get(soc->hal_soc,
404 					  (uint32_t *)&next_link_desc_addr_info,
405 					  &buf_info);
406 		cur_link_desc_addr_info = next_link_desc_addr_info;
407 		buf_addr_info = &cur_link_desc_addr_info;
408 
409 		link_desc_va =
410 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
411 
412 		goto more_msdu_link_desc;
413 	}
414 	quota--;
415 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
416 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
417 	return rx_bufs_used;
418 }
419 
420 /**
421  * dp_rx_pn_error_handle() - Handles PN check errors
422  *
423  * @soc: core txrx main context
424  * @ring_desc: opaque pointer to the REO error ring descriptor
425  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
426  * @head: head of the local descriptor free-list
427  * @tail: tail of the local descriptor free-list
428  * @quota: No. of units (packets) that can be serviced in one shot.
429  *
430  * This function implements PN error handling
431  * If the peer is configured to ignore the PN check errors
432  * or if DP feels, that this frame is still OK, the frame can be
433  * re-injected back to REO to use some of the other features
434  * of REO e.g. duplicate detection/routing to other cores
435  *
436  * Return: uint32_t: No. of elements processed
437  */
438 static uint32_t
439 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
440 		      struct hal_rx_mpdu_desc_info *mpdu_desc_info,
441 		      uint8_t *mac_id,
442 		      uint32_t quota)
443 {
444 	uint16_t peer_id;
445 	uint32_t rx_bufs_used = 0;
446 	struct dp_txrx_peer *txrx_peer;
447 	bool peer_pn_policy = false;
448 	dp_txrx_ref_handle txrx_ref_handle = NULL;
449 
450 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
451 					       mpdu_desc_info->peer_meta_data);
452 
453 
454 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
455 						   &txrx_ref_handle,
456 						   DP_MOD_ID_RX_ERR);
457 
458 	if (qdf_likely(txrx_peer)) {
459 		/*
460 		 * TODO: Check for peer specific policies & set peer_pn_policy
461 		 */
462 		dp_err_rl("discard rx due to PN error for peer  %pK",
463 			  txrx_peer);
464 
465 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
466 	}
467 	dp_rx_err_err("%pK: Packet received with PN error", soc);
468 
469 	/* No peer PN policy -- definitely drop */
470 	if (!peer_pn_policy)
471 		rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
472 						mpdu_desc_info,
473 						mac_id, quota);
474 
475 	return rx_bufs_used;
476 }
477 
478 #ifdef DP_RX_DELIVER_ALL_OOR_FRAMES
479 /**
480  * dp_rx_deliver_oor_frame() - deliver OOR frames to stack
481  * @soc: Datapath soc handler
482  * @peer: pointer to DP peer
483  * @nbuf: pointer to the skb of RX frame
484  * @frame_mask: the mask for special frame needed
485  * @rx_tlv_hdr: start of rx tlv header
486  *
487  * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
488  * single nbuf is expected.
489  *
490  * return: true - nbuf has been delivered to stack, false - not.
491  */
492 static bool
493 dp_rx_deliver_oor_frame(struct dp_soc *soc,
494 			struct dp_txrx_peer *txrx_peer,
495 			qdf_nbuf_t nbuf, uint32_t frame_mask,
496 			uint8_t *rx_tlv_hdr)
497 {
498 	uint32_t l2_hdr_offset = 0;
499 	uint16_t msdu_len = 0;
500 	uint32_t skip_len;
501 
502 	l2_hdr_offset =
503 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
504 
505 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
506 		skip_len = l2_hdr_offset;
507 	} else {
508 		msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
509 		skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
510 		qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
511 	}
512 
513 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
514 	dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
515 	qdf_nbuf_pull_head(nbuf, skip_len);
516 	qdf_nbuf_set_exc_frame(nbuf, 1);
517 
518 	dp_info_rl("OOR frame, mpdu sn 0x%x",
519 		   hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
520 	dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, nbuf, NULL);
521 	return true;
522 }
523 
524 #else
525 static bool
526 dp_rx_deliver_oor_frame(struct dp_soc *soc,
527 			struct dp_txrx_peer *txrx_peer,
528 			qdf_nbuf_t nbuf, uint32_t frame_mask,
529 			uint8_t *rx_tlv_hdr)
530 {
531 	return dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask,
532 					   rx_tlv_hdr);
533 }
534 #endif
535 
536 /**
537  * dp_rx_oor_handle() - Handles the msdu which is OOR error
538  *
539  * @soc: core txrx main context
540  * @nbuf: pointer to msdu skb
541  * @peer_id: dp peer ID
542  * @rx_tlv_hdr: start of rx tlv header
543  *
544  * This function process the msdu delivered from REO2TCL
545  * ring with error type OOR
546  *
547  * Return: None
548  */
549 static void
550 dp_rx_oor_handle(struct dp_soc *soc,
551 		 qdf_nbuf_t nbuf,
552 		 uint16_t peer_id,
553 		 uint8_t *rx_tlv_hdr)
554 {
555 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
556 				FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
557 	struct dp_txrx_peer *txrx_peer = NULL;
558 	dp_txrx_ref_handle txrx_ref_handle = NULL;
559 
560 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
561 						   &txrx_ref_handle,
562 						   DP_MOD_ID_RX_ERR);
563 	if (!txrx_peer) {
564 		dp_info_rl("peer not found");
565 		goto free_nbuf;
566 	}
567 
568 	if (dp_rx_deliver_oor_frame(soc, txrx_peer, nbuf, frame_mask,
569 				    rx_tlv_hdr)) {
570 		DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1);
571 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
572 		return;
573 	}
574 
575 free_nbuf:
576 	if (txrx_peer)
577 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
578 
579 	DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1);
580 	dp_rx_nbuf_free(nbuf);
581 }
582 
583 /**
584  * dp_rx_err_nbuf_pn_check() - Check if the PN number of this current packet
585  *				is a monotonous increment of packet number
586  *				from the previous successfully re-ordered
587  *				frame.
588  * @soc: Datapath SOC handle
589  * @ring_desc: REO ring descriptor
590  * @nbuf: Current packet
591  *
592  * Return: QDF_STATUS_SUCCESS, if the pn check passes, else QDF_STATUS_E_FAILURE
593  */
594 static inline QDF_STATUS
595 dp_rx_err_nbuf_pn_check(struct dp_soc *soc, hal_ring_desc_t ring_desc,
596 			qdf_nbuf_t nbuf)
597 {
598 	uint64_t prev_pn, curr_pn[2];
599 
600 	if (!hal_rx_encryption_info_valid(soc->hal_soc, qdf_nbuf_data(nbuf)))
601 		return QDF_STATUS_SUCCESS;
602 
603 	hal_rx_reo_prev_pn_get(soc->hal_soc, ring_desc, &prev_pn);
604 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), curr_pn);
605 
606 	if (curr_pn[0] > prev_pn)
607 		return QDF_STATUS_SUCCESS;
608 
609 	return QDF_STATUS_E_FAILURE;
610 }
611 
612 #ifdef WLAN_SKIP_BAR_UPDATE
613 static
614 void dp_rx_err_handle_bar(struct dp_soc *soc,
615 			  struct dp_peer *peer,
616 			  qdf_nbuf_t nbuf)
617 {
618 	dp_info_rl("BAR update to H.W is skipped");
619 	DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
620 }
621 #else
622 static
623 void dp_rx_err_handle_bar(struct dp_soc *soc,
624 			  struct dp_peer *peer,
625 			  qdf_nbuf_t nbuf)
626 {
627 	uint8_t *rx_tlv_hdr;
628 	unsigned char type, subtype;
629 	uint16_t start_seq_num;
630 	uint32_t tid;
631 	QDF_STATUS status;
632 	struct ieee80211_frame_bar *bar;
633 
634 	/*
635 	 * 1. Is this a BAR frame. If not Discard it.
636 	 * 2. If it is, get the peer id, tid, ssn
637 	 * 2a Do a tid update
638 	 */
639 
640 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
641 	bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + soc->rx_pkt_tlv_size);
642 
643 	type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
644 	subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
645 
646 	if (!(type == IEEE80211_FC0_TYPE_CTL &&
647 	      subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
648 		dp_err_rl("Not a BAR frame!");
649 		return;
650 	}
651 
652 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
653 	qdf_assert_always(tid < DP_MAX_TIDS);
654 
655 	start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
656 
657 	dp_info_rl("tid %u window_size %u start_seq_num %u",
658 		   tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
659 
660 	status = dp_rx_tid_update_wifi3(peer, tid,
661 					peer->rx_tid[tid].ba_win_size,
662 					start_seq_num,
663 					true);
664 	if (status != QDF_STATUS_SUCCESS) {
665 		dp_err_rl("failed to handle bar frame update rx tid");
666 		DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
667 	} else {
668 		DP_STATS_INC(soc, rx.err.ssn_update_count, 1);
669 	}
670 }
671 #endif
672 
673 /**
674  * _dp_rx_bar_frame_handle(): Core of the BAR frame handling
675  * @soc: Datapath SoC handle
676  * @nbuf: packet being processed
677  * @mpdu_desc_info: mpdu desc info for the current packet
678  * @tid: tid on which the packet arrived
679  * @err_status: Flag to indicate if REO encountered an error while routing this
680  *		frame
681  * @error_code: REO error code
682  *
683  * Return: None
684  */
685 static void
686 _dp_rx_bar_frame_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
687 			struct hal_rx_mpdu_desc_info *mpdu_desc_info,
688 			uint32_t tid, uint8_t err_status, uint32_t error_code)
689 {
690 	uint16_t peer_id;
691 	struct dp_peer *peer;
692 
693 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
694 					       mpdu_desc_info->peer_meta_data);
695 	peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
696 	if (!peer)
697 		return;
698 
699 	dp_info_rl("BAR frame: "
700 		" peer_id = %d"
701 		" tid = %u"
702 		" SSN = %d"
703 		" error status = %d",
704 		peer->peer_id,
705 		tid,
706 		mpdu_desc_info->mpdu_seq,
707 		err_status);
708 
709 	if (err_status == HAL_REO_ERROR_DETECTED) {
710 		switch (error_code) {
711 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
712 		case HAL_REO_ERR_BAR_FRAME_OOR:
713 			dp_rx_err_handle_bar(soc, peer, nbuf);
714 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
715 			break;
716 		default:
717 			DP_STATS_INC(soc, rx.bar_frame, 1);
718 		}
719 	}
720 
721 	dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
722 }
723 
724 /**
725  * dp_rx_bar_frame_handle() - Function to handle err BAR frames
726  * @soc: core DP main context
727  * @ring_desc: Hal ring desc
728  * @rx_desc: dp rx desc
729  * @mpdu_desc_info: mpdu desc info
730  *
731  * Handle the error BAR frames received. Ensure the SOC level
732  * stats are updated based on the REO error code. The BAR frames
733  * are further processed by updating the Rx tids with the start
734  * sequence number (SSN) and BA window size. Desc is returned
735  * to the free desc list
736  *
737  * Return: none
738  */
739 static void
740 dp_rx_bar_frame_handle(struct dp_soc *soc,
741 		       hal_ring_desc_t ring_desc,
742 		       struct dp_rx_desc *rx_desc,
743 		       struct hal_rx_mpdu_desc_info *mpdu_desc_info,
744 		       uint8_t err_status,
745 		       uint32_t err_code)
746 {
747 	qdf_nbuf_t nbuf;
748 	struct dp_pdev *pdev;
749 	struct rx_desc_pool *rx_desc_pool;
750 	uint8_t *rx_tlv_hdr;
751 	uint32_t tid;
752 
753 	nbuf = rx_desc->nbuf;
754 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
755 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
756 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
757 	rx_desc->unmapped = 1;
758 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
759 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
760 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
761 					rx_tlv_hdr);
762 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
763 
764 	if (!pdev) {
765 		dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
766 				soc, rx_desc->pool_id);
767 		return;
768 	}
769 
770 	_dp_rx_bar_frame_handle(soc, nbuf, mpdu_desc_info, tid, err_status,
771 				err_code);
772 	dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, nbuf,
773 			      QDF_TX_RX_STATUS_DROP, true);
774 	dp_rx_link_desc_return(soc, ring_desc,
775 			       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
776 	dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
777 				    rx_desc->pool_id);
778 	dp_rx_add_to_free_desc_list(&pdev->free_list_head,
779 				    &pdev->free_list_tail,
780 				    rx_desc);
781 }
782 
783 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
784 
785 /**
786  * dp_2k_jump_handle() - Function to handle 2k jump exception
787  *                        on WBM ring
788  *
789  * @soc: core DP main context
790  * @nbuf: buffer pointer
791  * @rx_tlv_hdr: start of rx tlv header
792  * @peer_id: peer id of first msdu
793  * @tid: Tid for which exception occurred
794  *
795  * This function handles 2k jump violations arising out
796  * of receiving aggregates in non BA case. This typically
797  * may happen if aggregates are received on a QOS enabled TID
798  * while Rx window size is still initialized to value of 2. Or
799  * it may also happen if negotiated window size is 1 but peer
800  * sends aggregates.
801  *
802  */
803 
804 void
805 dp_2k_jump_handle(struct dp_soc *soc,
806 		  qdf_nbuf_t nbuf,
807 		  uint8_t *rx_tlv_hdr,
808 		  uint16_t peer_id,
809 		  uint8_t tid)
810 {
811 	struct dp_peer *peer = NULL;
812 	struct dp_rx_tid *rx_tid = NULL;
813 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP;
814 
815 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
816 	if (!peer) {
817 		dp_rx_err_info_rl("%pK: peer not found", soc);
818 		goto free_nbuf;
819 	}
820 
821 	if (tid >= DP_MAX_TIDS) {
822 		dp_info_rl("invalid tid");
823 		goto nbuf_deliver;
824 	}
825 
826 	rx_tid = &peer->rx_tid[tid];
827 	qdf_spin_lock_bh(&rx_tid->tid_lock);
828 
829 	/* only if BA session is active, allow send Delba */
830 	if (rx_tid->ba_status != DP_RX_BA_ACTIVE) {
831 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
832 		goto nbuf_deliver;
833 	}
834 
835 	if (!rx_tid->delba_tx_status) {
836 		rx_tid->delba_tx_retry++;
837 		rx_tid->delba_tx_status = 1;
838 		rx_tid->delba_rcode =
839 			IEEE80211_REASON_QOS_SETUP_REQUIRED;
840 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
841 		if (soc->cdp_soc.ol_ops->send_delba) {
842 			DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent,
843 				     1);
844 			soc->cdp_soc.ol_ops->send_delba(
845 					peer->vdev->pdev->soc->ctrl_psoc,
846 					peer->vdev->vdev_id,
847 					peer->mac_addr.raw,
848 					tid,
849 					rx_tid->delba_rcode,
850 					CDP_DELBA_2K_JUMP);
851 		}
852 	} else {
853 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
854 	}
855 
856 nbuf_deliver:
857 	if (dp_rx_deliver_special_frame(soc, peer->txrx_peer, nbuf, frame_mask,
858 					rx_tlv_hdr)) {
859 		DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1);
860 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
861 		return;
862 	}
863 
864 free_nbuf:
865 	if (peer)
866 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
867 	DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1);
868 	dp_rx_nbuf_free(nbuf);
869 }
870 
871 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
872     defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_KIWI)
873 /**
874  * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
875  * @soc: pointer to dp_soc struct
876  * @pool_id: Pool id to find dp_pdev
877  * @rx_tlv_hdr: TLV header of received packet
878  * @nbuf: SKB
879  *
880  * In certain types of packets if peer_id is not correct then
881  * driver may not be able find. Try finding peer by addr_2 of
882  * received MPDU. If you find the peer then most likely sw_peer_id &
883  * ast_idx is corrupted.
884  *
885  * Return: True if you find the peer by addr_2 of received MPDU else false
886  */
887 static bool
888 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
889 					      uint8_t pool_id,
890 					      uint8_t *rx_tlv_hdr,
891 					      qdf_nbuf_t nbuf)
892 {
893 	struct dp_peer *peer = NULL;
894 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
895 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
896 	struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
897 
898 	if (!pdev) {
899 		dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
900 				soc, pool_id);
901 		return false;
902 	}
903 	/*
904 	 * WAR- In certain types of packets if peer_id is not correct then
905 	 * driver may not be able find. Try finding peer by addr_2 of
906 	 * received MPDU
907 	 */
908 	if (wh)
909 		peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0,
910 					      DP_VDEV_ALL, DP_MOD_ID_RX_ERR);
911 	if (peer) {
912 		dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
913 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
914 				     QDF_TRACE_LEVEL_DEBUG);
915 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
916 				 1, qdf_nbuf_len(nbuf));
917 		dp_rx_nbuf_free(nbuf);
918 
919 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
920 		return true;
921 	}
922 	return false;
923 }
924 #else
925 static inline bool
926 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
927 					      uint8_t pool_id,
928 					      uint8_t *rx_tlv_hdr,
929 					      qdf_nbuf_t nbuf)
930 {
931 	return false;
932 }
933 #endif
934 
935 /**
936  * dp_rx_check_pkt_len() - Check for pktlen validity
937  * @soc: DP SOC context
938  * @pkt_len: computed length of the pkt from caller in bytes
939  *
940  * Return: true if pktlen > RX_BUFFER_SIZE, else return false
941  *
942  */
943 static inline
944 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
945 {
946 	if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) {
947 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
948 				 1, pkt_len);
949 		return true;
950 	} else {
951 		return false;
952 	}
953 }
954 
955 /*
956  * dp_rx_deliver_to_osif_stack() - function to deliver rx pkts to stack
957  * @soc: DP soc
958  * @vdv: DP vdev handle
959  * @txrx_peer: pointer to the txrx_peer object
960  * @nbuf: skb list head
961  * @tail: skb list tail
962  * @is_eapol: eapol pkt check
963  *
964  * Return: None
965  */
966 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
967 static inline void
968 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
969 			    struct dp_vdev *vdev,
970 			    struct dp_txrx_peer *txrx_peer,
971 			    qdf_nbuf_t nbuf,
972 			    qdf_nbuf_t tail,
973 			    bool is_eapol)
974 {
975 	if (is_eapol && soc->eapol_over_control_port)
976 		dp_rx_eapol_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
977 	else
978 		dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
979 }
980 #else
981 static inline void
982 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
983 			    struct dp_vdev *vdev,
984 			    struct dp_txrx_peer *txrx_peer,
985 			    qdf_nbuf_t nbuf,
986 			    qdf_nbuf_t tail,
987 			    bool is_eapol)
988 {
989 	dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
990 }
991 #endif
992 
993 #ifdef WLAN_FEATURE_11BE_MLO
994 /*
995  * dp_rx_err_match_dhost() - function to check whether dest-mac is correct
996  * @eh: Ethernet header of incoming packet
997  * @vdev: dp_vdev object of the VAP on which this data packet is received
998  *
999  * Return: 1 if the destination mac is correct,
1000  *         0 if this frame is not correctly destined to this VAP/MLD
1001  */
1002 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
1003 {
1004 	return ((qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
1005 			     QDF_MAC_ADDR_SIZE) == 0) ||
1006 		(qdf_mem_cmp(eh->ether_dhost, &vdev->mld_mac_addr.raw[0],
1007 			     QDF_MAC_ADDR_SIZE) == 0));
1008 }
1009 
1010 #else
1011 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
1012 {
1013 	return (qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
1014 			    QDF_MAC_ADDR_SIZE) == 0);
1015 }
1016 #endif
1017 
1018 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1019 
1020 /**
1021  * dp_rx_err_drop_3addr_mcast() - Check if feature drop_3ddr_mcast is enabled
1022  *				  If so, drop the multicast frame.
1023  * @vdev: datapath vdev
1024  * @rx_tlv_hdr: TLV header
1025  *
1026  * Return: true if packet is to be dropped,
1027  *	false, if packet is not dropped.
1028  */
1029 static bool
1030 dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr)
1031 {
1032 	struct dp_soc *soc = vdev->pdev->soc;
1033 
1034 	if (!vdev->drop_3addr_mcast)
1035 		return false;
1036 
1037 	if (vdev->opmode != wlan_op_mode_sta)
1038 		return false;
1039 
1040 	if (hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
1041 		return true;
1042 
1043 	return false;
1044 }
1045 
1046 /**
1047  * dp_rx_err_is_pn_check_needed() - Check if the packet number check is needed
1048  *				for this frame received in REO error ring.
1049  * @soc: Datapath SOC handle
1050  * @error: REO error detected or not
1051  * @error_code: Error code in case of REO error
1052  *
1053  * Return: true if pn check if needed in software,
1054  *	false, if pn check if not needed.
1055  */
1056 static inline bool
1057 dp_rx_err_is_pn_check_needed(struct dp_soc *soc, uint8_t error,
1058 			     uint32_t error_code)
1059 {
1060 	return (soc->features.pn_in_reo_dest &&
1061 		(error == HAL_REO_ERROR_DETECTED &&
1062 		 (hal_rx_reo_is_2k_jump(error_code) ||
1063 		  hal_rx_reo_is_oor_error(error_code) ||
1064 		  hal_rx_reo_is_bar_oor_2k_jump(error_code))));
1065 }
1066 
1067 /**
1068  * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
1069  *                              descriptor violation on either a
1070  *                              REO or WBM ring
1071  *
1072  * @soc: core DP main context
1073  * @nbuf: buffer pointer
1074  * @rx_tlv_hdr: start of rx tlv header
1075  * @pool_id: mac id
1076  * @txrx_peer: txrx peer handle
1077  *
1078  * This function handles NULL queue descriptor violations arising out
1079  * a missing REO queue for a given peer or a given TID. This typically
1080  * may happen if a packet is received on a QOS enabled TID before the
1081  * ADDBA negotiation for that TID, when the TID queue is setup. Or
1082  * it may also happen for MC/BC frames if they are not routed to the
1083  * non-QOS TID queue, in the absence of any other default TID queue.
1084  * This error can show up both in a REO destination or WBM release ring.
1085  *
1086  * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
1087  *         if nbuf could not be handled or dropped.
1088  */
1089 static QDF_STATUS
1090 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
1091 			 uint8_t *rx_tlv_hdr, uint8_t pool_id,
1092 			 struct dp_txrx_peer *txrx_peer)
1093 {
1094 	uint32_t pkt_len;
1095 	uint16_t msdu_len;
1096 	struct dp_vdev *vdev;
1097 	uint8_t tid;
1098 	qdf_ether_header_t *eh;
1099 	struct hal_rx_msdu_metadata msdu_metadata;
1100 	uint16_t sa_idx = 0;
1101 	bool is_eapol = 0;
1102 	bool enh_flag;
1103 
1104 	qdf_nbuf_set_rx_chfrag_start(nbuf,
1105 				hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1106 							       rx_tlv_hdr));
1107 	qdf_nbuf_set_rx_chfrag_end(nbuf,
1108 				   hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
1109 								 rx_tlv_hdr));
1110 	qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1111 								  rx_tlv_hdr));
1112 	qdf_nbuf_set_da_valid(nbuf,
1113 			      hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
1114 							      rx_tlv_hdr));
1115 	qdf_nbuf_set_sa_valid(nbuf,
1116 			      hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
1117 							      rx_tlv_hdr));
1118 
1119 	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
1120 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1121 	pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
1122 
1123 	if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
1124 		if (dp_rx_check_pkt_len(soc, pkt_len))
1125 			goto drop_nbuf;
1126 
1127 		/* Set length in nbuf */
1128 		qdf_nbuf_set_pktlen(
1129 			nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
1130 		qdf_assert_always(nbuf->data == rx_tlv_hdr);
1131 	}
1132 
1133 	/*
1134 	 * Check if DMA completed -- msdu_done is the last bit
1135 	 * to be written
1136 	 */
1137 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1138 
1139 		dp_err_rl("MSDU DONE failure");
1140 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1141 				     QDF_TRACE_LEVEL_INFO);
1142 		qdf_assert(0);
1143 	}
1144 
1145 	if (!txrx_peer &&
1146 	    dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
1147 							  rx_tlv_hdr, nbuf))
1148 		return QDF_STATUS_E_FAILURE;
1149 
1150 	if (!txrx_peer) {
1151 		bool mpdu_done = false;
1152 		struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
1153 
1154 		if (!pdev) {
1155 			dp_err_rl("pdev is null for pool_id = %d", pool_id);
1156 			return QDF_STATUS_E_FAILURE;
1157 		}
1158 
1159 		dp_err_rl("txrx_peer is NULL");
1160 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1161 				 qdf_nbuf_len(nbuf));
1162 
1163 		/* QCN9000 has the support enabled */
1164 		if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) {
1165 			mpdu_done = true;
1166 			nbuf->next = NULL;
1167 			/* Trigger invalid peer handler wrapper */
1168 			dp_rx_process_invalid_peer_wrapper(soc,
1169 					nbuf, mpdu_done, pool_id);
1170 		} else {
1171 			mpdu_done = soc->arch_ops.dp_rx_chain_msdus(soc, nbuf,
1172 								    rx_tlv_hdr,
1173 								    pool_id);
1174 			/* Trigger invalid peer handler wrapper */
1175 			dp_rx_process_invalid_peer_wrapper(soc,
1176 					pdev->invalid_peer_head_msdu,
1177 					mpdu_done, pool_id);
1178 		}
1179 
1180 		if (mpdu_done) {
1181 			pdev->invalid_peer_head_msdu = NULL;
1182 			pdev->invalid_peer_tail_msdu = NULL;
1183 		}
1184 
1185 		return QDF_STATUS_E_FAILURE;
1186 	}
1187 
1188 	vdev = txrx_peer->vdev;
1189 	if (!vdev) {
1190 		dp_err_rl("Null vdev!");
1191 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1192 		goto drop_nbuf;
1193 	}
1194 
1195 	/*
1196 	 * Advance the packet start pointer by total size of
1197 	 * pre-header TLV's
1198 	 */
1199 	if (qdf_nbuf_is_frag(nbuf))
1200 		qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
1201 	else
1202 		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
1203 				   soc->rx_pkt_tlv_size));
1204 
1205 	DP_STATS_INC_PKT(vdev, rx_i.null_q_desc_pkt, 1, qdf_nbuf_len(nbuf));
1206 
1207 	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
1208 
1209 	if (dp_rx_err_drop_3addr_mcast(vdev, rx_tlv_hdr)) {
1210 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.mcast_3addr_drop, 1);
1211 		goto drop_nbuf;
1212 	}
1213 
1214 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
1215 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
1216 
1217 		if ((sa_idx < 0) ||
1218 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
1219 			DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
1220 			goto drop_nbuf;
1221 		}
1222 	}
1223 
1224 	if ((!soc->mec_fw_offload) &&
1225 	    dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf)) {
1226 		/* this is a looped back MCBC pkt, drop it */
1227 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
1228 					      qdf_nbuf_len(nbuf));
1229 		goto drop_nbuf;
1230 	}
1231 
1232 	/*
1233 	 * In qwrap mode if the received packet matches with any of the vdev
1234 	 * mac addresses, drop it. Donot receive multicast packets originated
1235 	 * from any proxysta.
1236 	 */
1237 	if (check_qwrap_multicast_loopback(vdev, nbuf)) {
1238 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
1239 					      qdf_nbuf_len(nbuf));
1240 		goto drop_nbuf;
1241 	}
1242 
1243 	if (qdf_unlikely(txrx_peer->nawds_enabled &&
1244 			 hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1245 							rx_tlv_hdr))) {
1246 		dp_err_rl("free buffer for multicast packet");
1247 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.nawds_mcast_drop, 1);
1248 		goto drop_nbuf;
1249 	}
1250 
1251 	if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
1252 		dp_err_rl("mcast Policy Check Drop pkt");
1253 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.policy_check_drop, 1);
1254 		goto drop_nbuf;
1255 	}
1256 	/* WDS Source Port Learning */
1257 	if (!soc->ast_offload_support &&
1258 	    qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
1259 		vdev->wds_enabled))
1260 		dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, txrx_peer, nbuf,
1261 					msdu_metadata);
1262 
1263 	if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
1264 		struct dp_peer *peer;
1265 		struct dp_rx_tid *rx_tid;
1266 		tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
1267 		peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
1268 					     DP_MOD_ID_RX_ERR);
1269 		if (peer) {
1270 			rx_tid = &peer->rx_tid[tid];
1271 			qdf_spin_lock_bh(&rx_tid->tid_lock);
1272 			if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned)
1273 				dp_rx_tid_setup_wifi3(peer, tid, 1,
1274 						      IEEE80211_SEQ_MAX);
1275 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1276 			/* IEEE80211_SEQ_MAX indicates invalid start_seq */
1277 			dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
1278 		}
1279 	}
1280 
1281 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1282 
1283 	if (!txrx_peer->authorize) {
1284 		is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
1285 			   qdf_nbuf_is_ipv4_wapi_pkt(nbuf);
1286 
1287 		if (is_eapol) {
1288 			if (!dp_rx_err_match_dhost(eh, vdev))
1289 				goto drop_nbuf;
1290 		} else {
1291 			goto drop_nbuf;
1292 		}
1293 	}
1294 
1295 	/*
1296 	 * Drop packets in this path if cce_match is found. Packets will come
1297 	 * in following path depending on whether tidQ is setup.
1298 	 * 1. If tidQ is setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE and
1299 	 * cce_match = 1
1300 	 *    Packets with WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE are already
1301 	 *    dropped.
1302 	 * 2. If tidQ is not setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ERROR and
1303 	 * cce_match = 1
1304 	 *    These packets need to be dropped and should not get delivered
1305 	 *    to stack.
1306 	 */
1307 	if (qdf_unlikely(dp_rx_err_cce_drop(soc, vdev, nbuf, rx_tlv_hdr))) {
1308 		goto drop_nbuf;
1309 	}
1310 
1311 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1312 		qdf_nbuf_set_next(nbuf, NULL);
1313 		dp_rx_deliver_raw(vdev, nbuf, txrx_peer);
1314 	} else {
1315 		enh_flag = vdev->pdev->enhanced_stats_en;
1316 		qdf_nbuf_set_next(nbuf, NULL);
1317 		DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
1318 					  enh_flag);
1319 		/*
1320 		 * Update the protocol tag in SKB based on
1321 		 * CCE metadata
1322 		 */
1323 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1324 					  EXCEPTION_DEST_RING_ID,
1325 					  true, true);
1326 
1327 		/* Update the flow tag in SKB based on FSE metadata */
1328 		dp_rx_update_flow_tag(soc, vdev, nbuf,
1329 				      rx_tlv_hdr, true);
1330 
1331 		if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
1332 				 soc->hal_soc, rx_tlv_hdr) &&
1333 				 (vdev->rx_decap_type ==
1334 				  htt_cmn_pkt_type_ethernet))) {
1335 			DP_PEER_MC_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
1336 					    enh_flag);
1337 
1338 			if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
1339 				DP_PEER_BC_INCC_PKT(txrx_peer, 1,
1340 						    qdf_nbuf_len(nbuf),
1341 						    enh_flag);
1342 		}
1343 
1344 		qdf_nbuf_set_exc_frame(nbuf, 1);
1345 		dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
1346 					    is_eapol);
1347 	}
1348 	return QDF_STATUS_SUCCESS;
1349 
1350 drop_nbuf:
1351 	dp_rx_nbuf_free(nbuf);
1352 	return QDF_STATUS_E_FAILURE;
1353 }
1354 
1355 #ifdef DP_WAR_INVALID_FIRST_MSDU_FLAG
1356 static inline void
1357 dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
1358 				  struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1359 				  bool first_msdu_in_mpdu_processed)
1360 {
1361 	if (first_msdu_in_mpdu_processed) {
1362 		/*
1363 		 * This is the 2nd indication of first_msdu in the same mpdu.
1364 		 * Skip re-parsing the mdpu_desc_info and use the cached one,
1365 		 * since this msdu is most probably from the current mpdu
1366 		 * which is being processed
1367 		 */
1368 	} else {
1369 		hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc,
1370 						   qdf_nbuf_data(nbuf),
1371 						   mpdu_desc_info);
1372 	}
1373 }
1374 #else
1375 static inline void
1376 dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
1377 				  struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1378 				  bool first_msdu_in_mpdu_processed)
1379 {
1380 	hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc, qdf_nbuf_data(nbuf),
1381 					   mpdu_desc_info);
1382 }
1383 #endif
1384 
1385 /**
1386  * dp_rx_reo_err_entry_process() - Handles for REO error entry processing
1387  *
1388  * @soc: core txrx main context
1389  * @ring_desc: opaque pointer to the REO error ring descriptor
1390  * @mpdu_desc_info: pointer to mpdu level description info
1391  * @link_desc_va: pointer to msdu_link_desc virtual address
1392  * @err_code: reo error code fetched from ring entry
1393  *
1394  * Function to handle msdus fetched from msdu link desc, currently
1395  * support REO error NULL queue, 2K jump, OOR.
1396  *
1397  * Return: msdu count processed
1398  */
1399 static uint32_t
1400 dp_rx_reo_err_entry_process(struct dp_soc *soc,
1401 			    void *ring_desc,
1402 			    struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1403 			    void *link_desc_va,
1404 			    enum hal_reo_error_code err_code)
1405 {
1406 	uint32_t rx_bufs_used = 0;
1407 	struct dp_pdev *pdev;
1408 	int i;
1409 	uint8_t *rx_tlv_hdr_first;
1410 	uint8_t *rx_tlv_hdr_last;
1411 	uint32_t tid = DP_MAX_TIDS;
1412 	uint16_t peer_id;
1413 	struct dp_rx_desc *rx_desc;
1414 	struct rx_desc_pool *rx_desc_pool;
1415 	qdf_nbuf_t nbuf;
1416 	struct hal_buf_info buf_info;
1417 	struct hal_rx_msdu_list msdu_list;
1418 	uint16_t num_msdus;
1419 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
1420 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
1421 	/* First field in REO Dst ring Desc is buffer_addr_info */
1422 	void *buf_addr_info = ring_desc;
1423 	qdf_nbuf_t head_nbuf = NULL;
1424 	qdf_nbuf_t tail_nbuf = NULL;
1425 	uint16_t msdu_processed = 0;
1426 	QDF_STATUS status;
1427 	bool ret, is_pn_check_needed;
1428 	uint8_t rx_desc_pool_id;
1429 	struct dp_txrx_peer *txrx_peer = NULL;
1430 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1431 	hal_ring_handle_t hal_ring_hdl = soc->reo_exception_ring.hal_srng;
1432 	bool first_msdu_in_mpdu_processed = false;
1433 	bool msdu_dropped = false;
1434 
1435 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1436 					mpdu_desc_info->peer_meta_data);
1437 	is_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
1438 							  HAL_REO_ERROR_DETECTED,
1439 							  err_code);
1440 more_msdu_link_desc:
1441 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
1442 			     &num_msdus);
1443 	for (i = 0; i < num_msdus; i++) {
1444 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
1445 						soc,
1446 						msdu_list.sw_cookie[i]);
1447 
1448 		qdf_assert_always(rx_desc);
1449 		nbuf = rx_desc->nbuf;
1450 
1451 		/*
1452 		 * this is a unlikely scenario where the host is reaping
1453 		 * a descriptor which it already reaped just a while ago
1454 		 * but is yet to replenish it back to HW.
1455 		 * In this case host will dump the last 128 descriptors
1456 		 * including the software descriptor rx_desc and assert.
1457 		 */
1458 		if (qdf_unlikely(!rx_desc->in_use) ||
1459 		    qdf_unlikely(!nbuf)) {
1460 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
1461 			dp_info_rl("Reaping rx_desc not in use!");
1462 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1463 						   ring_desc, rx_desc);
1464 			/* ignore duplicate RX desc and continue to process */
1465 			/* Pop out the descriptor */
1466 			msdu_dropped = true;
1467 			continue;
1468 		}
1469 
1470 		ret = dp_rx_desc_paddr_sanity_check(rx_desc,
1471 						    msdu_list.paddr[i]);
1472 		if (!ret) {
1473 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1474 			rx_desc->in_err_state = 1;
1475 			msdu_dropped = true;
1476 			continue;
1477 		}
1478 
1479 		rx_desc_pool_id = rx_desc->pool_id;
1480 		/* all buffers from a MSDU link belong to same pdev */
1481 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc_pool_id);
1482 
1483 		rx_desc_pool = &soc->rx_desc_buf[rx_desc_pool_id];
1484 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
1485 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
1486 		rx_desc->unmapped = 1;
1487 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
1488 
1489 		QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len;
1490 		rx_bufs_used++;
1491 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
1492 					    &pdev->free_list_tail, rx_desc);
1493 
1494 		DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf);
1495 
1496 		if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags &
1497 				 HAL_MSDU_F_MSDU_CONTINUATION))
1498 			continue;
1499 
1500 		if (dp_rx_buffer_pool_refill(soc, head_nbuf,
1501 					     rx_desc_pool_id)) {
1502 			/* MSDU queued back to the pool */
1503 			msdu_dropped = true;
1504 			goto process_next_msdu;
1505 		}
1506 
1507 		if (is_pn_check_needed) {
1508 			if (msdu_list.msdu_info[i].msdu_flags &
1509 			    HAL_MSDU_F_FIRST_MSDU_IN_MPDU) {
1510 				dp_rx_err_populate_mpdu_desc_info(soc, nbuf,
1511 						mpdu_desc_info,
1512 						first_msdu_in_mpdu_processed);
1513 				first_msdu_in_mpdu_processed = true;
1514 			} else {
1515 				if (!first_msdu_in_mpdu_processed) {
1516 					/*
1517 					 * If no msdu in this mpdu was dropped
1518 					 * due to failed sanity checks, then
1519 					 * its not expected to hit this
1520 					 * condition. Hence we assert here.
1521 					 */
1522 					if (!msdu_dropped)
1523 						qdf_assert_always(0);
1524 
1525 					/*
1526 					 * We do not have valid mpdu_desc_info
1527 					 * to process this nbuf, hence drop it.
1528 					 */
1529 					dp_rx_nbuf_free(nbuf);
1530 					/* TODO - Increment stats */
1531 					goto process_next_msdu;
1532 				}
1533 				/*
1534 				 * DO NOTHING -
1535 				 * Continue using the same mpdu_desc_info
1536 				 * details populated from the first msdu in
1537 				 * the mpdu.
1538 				 */
1539 			}
1540 
1541 			status = dp_rx_err_nbuf_pn_check(soc, ring_desc, nbuf);
1542 			if (QDF_IS_STATUS_ERROR(status)) {
1543 				DP_STATS_INC(soc, rx.err.pn_in_dest_check_fail,
1544 					     1);
1545 				dp_rx_nbuf_free(nbuf);
1546 				goto process_next_msdu;
1547 			}
1548 
1549 			peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1550 					mpdu_desc_info->peer_meta_data);
1551 
1552 			if (mpdu_desc_info->bar_frame)
1553 				_dp_rx_bar_frame_handle(soc, nbuf,
1554 							mpdu_desc_info, tid,
1555 							HAL_REO_ERROR_DETECTED,
1556 							err_code);
1557 		}
1558 
1559 		if (qdf_unlikely(mpdu_desc_info->mpdu_flags &
1560 				 HAL_MPDU_F_RAW_AMPDU)) {
1561 			dp_err_rl("RAW ampdu in REO error not expected");
1562 			DP_STATS_INC(soc, rx.err.reo_err_raw_mpdu_drop, 1);
1563 			qdf_nbuf_list_free(head_nbuf);
1564 			goto process_next_msdu;
1565 		}
1566 
1567 		rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf);
1568 		rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf);
1569 
1570 		if (qdf_unlikely(head_nbuf != tail_nbuf)) {
1571 			nbuf = dp_rx_sg_create(soc, head_nbuf);
1572 			qdf_nbuf_set_is_frag(nbuf, 1);
1573 			DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1);
1574 		}
1575 
1576 		switch (err_code) {
1577 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
1578 		case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
1579 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
1580 			/*
1581 			 * only first msdu, mpdu start description tlv valid?
1582 			 * and use it for following msdu.
1583 			 */
1584 			if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1585 							   rx_tlv_hdr_last))
1586 				tid = hal_rx_mpdu_start_tid_get(
1587 							soc->hal_soc,
1588 							rx_tlv_hdr_first);
1589 
1590 			dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last,
1591 					  peer_id, tid);
1592 			break;
1593 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
1594 		case HAL_REO_ERR_BAR_FRAME_OOR:
1595 			dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last);
1596 			break;
1597 		case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
1598 			txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(
1599 							soc, peer_id,
1600 							&txrx_ref_handle,
1601 							DP_MOD_ID_RX_ERR);
1602 			if (!txrx_peer)
1603 				dp_info_rl("txrx_peer is null peer_id %u",
1604 					   peer_id);
1605 			dp_rx_null_q_desc_handle(soc, nbuf, rx_tlv_hdr_last,
1606 						 rx_desc_pool_id, txrx_peer);
1607 			if (txrx_peer)
1608 				dp_txrx_peer_unref_delete(txrx_ref_handle,
1609 							  DP_MOD_ID_RX_ERR);
1610 			break;
1611 		default:
1612 			dp_err_rl("Non-support error code %d", err_code);
1613 			dp_rx_nbuf_free(nbuf);
1614 		}
1615 
1616 process_next_msdu:
1617 		msdu_processed++;
1618 		head_nbuf = NULL;
1619 		tail_nbuf = NULL;
1620 	}
1621 
1622 	/*
1623 	 * If the msdu's are spread across multiple link-descriptors,
1624 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
1625 	 * spread across multiple buffers).Hence, it is
1626 	 * necessary to check the next link_descriptor and release
1627 	 * all the msdu's that are part of it.
1628 	 */
1629 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
1630 			link_desc_va,
1631 			&next_link_desc_addr_info);
1632 
1633 	if (hal_rx_is_buf_addr_info_valid(
1634 				&next_link_desc_addr_info)) {
1635 		/* Clear the next link desc info for the current link_desc */
1636 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
1637 		dp_rx_link_desc_return_by_addr(
1638 				soc,
1639 				buf_addr_info,
1640 				HAL_BM_ACTION_PUT_IN_IDLE_LIST);
1641 
1642 		hal_rx_buffer_addr_info_get_paddr(
1643 				&next_link_desc_addr_info,
1644 				&buf_info);
1645 		/* buffer_addr_info is the first element of ring_desc */
1646 		hal_rx_buf_cookie_rbm_get(soc->hal_soc,
1647 					  (uint32_t *)&next_link_desc_addr_info,
1648 					  &buf_info);
1649 		link_desc_va =
1650 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1651 		cur_link_desc_addr_info = next_link_desc_addr_info;
1652 		buf_addr_info = &cur_link_desc_addr_info;
1653 
1654 		goto more_msdu_link_desc;
1655 	}
1656 
1657 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
1658 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
1659 	if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count))
1660 		DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1);
1661 
1662 	return rx_bufs_used;
1663 }
1664 
1665 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1666 
1667 /**
1668  * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
1669  *			       frames to OS or wifi parse errors.
1670  * @soc: core DP main context
1671  * @nbuf: buffer pointer
1672  * @rx_tlv_hdr: start of rx tlv header
1673  * @txrx_peer: peer reference
1674  * @err_code: rxdma err code
1675  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
1676  * pool_id has same mapping)
1677  *
1678  * Return: None
1679  */
1680 void
1681 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1682 			uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
1683 			uint8_t err_code, uint8_t mac_id)
1684 {
1685 	uint32_t pkt_len, l2_hdr_offset;
1686 	uint16_t msdu_len;
1687 	struct dp_vdev *vdev;
1688 	qdf_ether_header_t *eh;
1689 	bool is_broadcast;
1690 
1691 	/*
1692 	 * Check if DMA completed -- msdu_done is the last bit
1693 	 * to be written
1694 	 */
1695 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1696 
1697 		dp_err_rl("MSDU DONE failure");
1698 
1699 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1700 				     QDF_TRACE_LEVEL_INFO);
1701 		qdf_assert(0);
1702 	}
1703 
1704 	l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
1705 							   rx_tlv_hdr);
1706 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1707 	pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
1708 
1709 	if (dp_rx_check_pkt_len(soc, pkt_len)) {
1710 		/* Drop & free packet */
1711 		dp_rx_nbuf_free(nbuf);
1712 		return;
1713 	}
1714 	/* Set length in nbuf */
1715 	qdf_nbuf_set_pktlen(nbuf, pkt_len);
1716 
1717 	qdf_nbuf_set_next(nbuf, NULL);
1718 
1719 	qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
1720 	qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
1721 
1722 	if (!txrx_peer) {
1723 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "txrx_peer is NULL");
1724 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1725 				qdf_nbuf_len(nbuf));
1726 		/* Trigger invalid peer handler wrapper */
1727 		dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
1728 		return;
1729 	}
1730 
1731 	vdev = txrx_peer->vdev;
1732 	if (!vdev) {
1733 		dp_rx_err_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc,
1734 				 vdev);
1735 		/* Drop & free packet */
1736 		dp_rx_nbuf_free(nbuf);
1737 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1738 		return;
1739 	}
1740 
1741 	/*
1742 	 * Advance the packet start pointer by total size of
1743 	 * pre-header TLV's
1744 	 */
1745 	dp_rx_skip_tlvs(soc, nbuf, l2_hdr_offset);
1746 
1747 	if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
1748 		uint8_t *pkt_type;
1749 
1750 		pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
1751 		if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
1752 			if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
1753 							htons(QDF_LLC_STP)) {
1754 				DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
1755 				goto process_mesh;
1756 			} else {
1757 				goto process_rx;
1758 			}
1759 		}
1760 	}
1761 	if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
1762 		goto process_mesh;
1763 
1764 	/*
1765 	 * WAPI cert AP sends rekey frames as unencrypted.
1766 	 * Thus RXDMA will report unencrypted frame error.
1767 	 * To pass WAPI cert case, SW needs to pass unencrypted
1768 	 * rekey frame to stack.
1769 	 */
1770 	if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1771 		goto process_rx;
1772 	}
1773 	/*
1774 	 * In dynamic WEP case rekey frames are not encrypted
1775 	 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
1776 	 * key install is already done
1777 	 */
1778 	if ((vdev->sec_type == cdp_sec_type_wep104) &&
1779 	    (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
1780 		goto process_rx;
1781 
1782 process_mesh:
1783 
1784 	if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
1785 		dp_rx_nbuf_free(nbuf);
1786 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1787 		return;
1788 	}
1789 
1790 	if (vdev->mesh_vdev) {
1791 		if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
1792 				      == QDF_STATUS_SUCCESS) {
1793 			dp_rx_err_info("%pK: mesh pkt filtered", soc);
1794 			DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
1795 
1796 			dp_rx_nbuf_free(nbuf);
1797 			return;
1798 		}
1799 		dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, txrx_peer);
1800 	}
1801 process_rx:
1802 	if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1803 							rx_tlv_hdr) &&
1804 		(vdev->rx_decap_type ==
1805 				htt_cmn_pkt_type_ethernet))) {
1806 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1807 		is_broadcast = (QDF_IS_ADDR_BROADCAST
1808 				(eh->ether_dhost)) ? 1 : 0 ;
1809 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 1,
1810 					      qdf_nbuf_len(nbuf));
1811 		if (is_broadcast) {
1812 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast, 1,
1813 						      qdf_nbuf_len(nbuf));
1814 		}
1815 	}
1816 
1817 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1818 		dp_rx_deliver_raw(vdev, nbuf, txrx_peer);
1819 	} else {
1820 		/* Update the protocol tag in SKB based on CCE metadata */
1821 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1822 					  EXCEPTION_DEST_RING_ID, true, true);
1823 		/* Update the flow tag in SKB based on FSE metadata */
1824 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
1825 		DP_PEER_STATS_FLAT_INC(txrx_peer, to_stack.num, 1);
1826 		qdf_nbuf_set_exc_frame(nbuf, 1);
1827 		dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
1828 					    qdf_nbuf_is_ipv4_eapol_pkt(nbuf));
1829 	}
1830 
1831 	return;
1832 }
1833 
1834 /**
1835  * dp_rx_process_mic_error(): Function to pass mic error indication to umac
1836  * @soc: core DP main context
1837  * @nbuf: buffer pointer
1838  * @rx_tlv_hdr: start of rx tlv header
1839  * @txrx_peer: txrx peer handle
1840  *
1841  * return: void
1842  */
1843 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
1844 			     uint8_t *rx_tlv_hdr,
1845 			     struct dp_txrx_peer *txrx_peer)
1846 {
1847 	struct dp_vdev *vdev = NULL;
1848 	struct dp_pdev *pdev = NULL;
1849 	struct ol_if_ops *tops = NULL;
1850 	uint16_t rx_seq, fragno;
1851 	uint8_t is_raw;
1852 	unsigned int tid;
1853 	QDF_STATUS status;
1854 	struct cdp_rx_mic_err_info mic_failure_info;
1855 
1856 	if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1857 					    rx_tlv_hdr))
1858 		return;
1859 
1860 	if (!txrx_peer) {
1861 		dp_info_rl("txrx_peer not found");
1862 		goto fail;
1863 	}
1864 
1865 	vdev = txrx_peer->vdev;
1866 	if (!vdev) {
1867 		dp_info_rl("VDEV not found");
1868 		goto fail;
1869 	}
1870 
1871 	pdev = vdev->pdev;
1872 	if (!pdev) {
1873 		dp_info_rl("PDEV not found");
1874 		goto fail;
1875 	}
1876 
1877 	is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
1878 	if (is_raw) {
1879 		fragno = dp_rx_frag_get_mpdu_frag_number(soc,
1880 							 qdf_nbuf_data(nbuf));
1881 		/* Can get only last fragment */
1882 		if (fragno) {
1883 			tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
1884 							qdf_nbuf_data(nbuf));
1885 			rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
1886 							qdf_nbuf_data(nbuf));
1887 
1888 			status = dp_rx_defrag_add_last_frag(soc, txrx_peer,
1889 							    tid, rx_seq, nbuf);
1890 			dp_info_rl("Frag pkt seq# %d frag# %d consumed "
1891 				   "status %d !", rx_seq, fragno, status);
1892 			return;
1893 		}
1894 	}
1895 
1896 	if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
1897 				  &mic_failure_info.da_mac_addr.bytes[0])) {
1898 		dp_err_rl("Failed to get da_mac_addr");
1899 		goto fail;
1900 	}
1901 
1902 	if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
1903 				  &mic_failure_info.ta_mac_addr.bytes[0])) {
1904 		dp_err_rl("Failed to get ta_mac_addr");
1905 		goto fail;
1906 	}
1907 
1908 	mic_failure_info.key_id = 0;
1909 	mic_failure_info.multicast =
1910 		IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
1911 	qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
1912 	mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
1913 	mic_failure_info.data = NULL;
1914 	mic_failure_info.vdev_id = vdev->vdev_id;
1915 
1916 	tops = pdev->soc->cdp_soc.ol_ops;
1917 	if (tops->rx_mic_error)
1918 		tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
1919 				   &mic_failure_info);
1920 
1921 fail:
1922 	dp_rx_nbuf_free(nbuf);
1923 	return;
1924 }
1925 
1926 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
1927 	defined(WLAN_MCAST_MLO)
1928 static bool dp_rx_igmp_handler(struct dp_soc *soc,
1929 			       struct dp_vdev *vdev,
1930 			       struct dp_txrx_peer *peer,
1931 			       qdf_nbuf_t nbuf)
1932 {
1933 	if (soc->arch_ops.dp_rx_mcast_handler) {
1934 		if (soc->arch_ops.dp_rx_mcast_handler(soc, vdev, peer, nbuf))
1935 			return true;
1936 	}
1937 	return false;
1938 }
1939 #else
1940 static bool dp_rx_igmp_handler(struct dp_soc *soc,
1941 			       struct dp_vdev *vdev,
1942 			       struct dp_txrx_peer *peer,
1943 			       qdf_nbuf_t nbuf)
1944 {
1945 	return false;
1946 }
1947 #endif
1948 
1949 /**
1950  * dp_rx_err_route_hdl() - Function to send EAPOL frames to stack
1951  *                            Free any other packet which comes in
1952  *                            this path.
1953  *
1954  * @soc: core DP main context
1955  * @nbuf: buffer pointer
1956  * @txrx_peer: txrx peer handle
1957  * @rx_tlv_hdr: start of rx tlv header
1958  * @err_src: rxdma/reo
1959  *
1960  * This function indicates EAPOL frame received in wbm error ring to stack.
1961  * Any other frame should be dropped.
1962  *
1963  * Return: SUCCESS if delivered to stack
1964  */
1965 static void
1966 dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
1967 		    struct dp_txrx_peer *txrx_peer, uint8_t *rx_tlv_hdr,
1968 		    enum hal_rx_wbm_error_source err_src)
1969 {
1970 	uint32_t pkt_len;
1971 	uint16_t msdu_len;
1972 	struct dp_vdev *vdev;
1973 	struct hal_rx_msdu_metadata msdu_metadata;
1974 	bool is_eapol;
1975 
1976 	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
1977 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1978 	pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
1979 
1980 	if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
1981 		if (dp_rx_check_pkt_len(soc, pkt_len))
1982 			goto drop_nbuf;
1983 
1984 		/* Set length in nbuf */
1985 		qdf_nbuf_set_pktlen(
1986 			nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
1987 		qdf_assert_always(nbuf->data == rx_tlv_hdr);
1988 	}
1989 
1990 	/*
1991 	 * Check if DMA completed -- msdu_done is the last bit
1992 	 * to be written
1993 	 */
1994 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1995 		dp_err_rl("MSDU DONE failure");
1996 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1997 				     QDF_TRACE_LEVEL_INFO);
1998 		qdf_assert(0);
1999 	}
2000 
2001 	if (!txrx_peer)
2002 		goto drop_nbuf;
2003 
2004 	vdev = txrx_peer->vdev;
2005 	if (!vdev) {
2006 		dp_err_rl("Null vdev!");
2007 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
2008 		goto drop_nbuf;
2009 	}
2010 
2011 	/*
2012 	 * Advance the packet start pointer by total size of
2013 	 * pre-header TLV's
2014 	 */
2015 	if (qdf_nbuf_is_frag(nbuf))
2016 		qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
2017 	else
2018 		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
2019 				   soc->rx_pkt_tlv_size));
2020 
2021 	if (dp_rx_igmp_handler(soc, vdev, txrx_peer, nbuf))
2022 		return;
2023 
2024 	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
2025 
2026 	/*
2027 	 * Indicate EAPOL frame to stack only when vap mac address
2028 	 * matches the destination address.
2029 	 */
2030 	is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf);
2031 	if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
2032 		qdf_ether_header_t *eh =
2033 			(qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2034 		if (dp_rx_err_match_dhost(eh, vdev)) {
2035 			DP_STATS_INC_PKT(vdev, rx_i.routed_eapol_pkt, 1,
2036 					 qdf_nbuf_len(nbuf));
2037 
2038 			/*
2039 			 * Update the protocol tag in SKB based on
2040 			 * CCE metadata.
2041 			 */
2042 			dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
2043 						  EXCEPTION_DEST_RING_ID,
2044 						  true, true);
2045 			/* Update the flow tag in SKB based on FSE metadata */
2046 			dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
2047 					      true);
2048 			DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
2049 						  qdf_nbuf_len(nbuf),
2050 						  vdev->pdev->enhanced_stats_en);
2051 			qdf_nbuf_set_exc_frame(nbuf, 1);
2052 			qdf_nbuf_set_next(nbuf, NULL);
2053 
2054 			dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf,
2055 						    NULL, is_eapol);
2056 
2057 			return;
2058 		}
2059 	}
2060 
2061 drop_nbuf:
2062 
2063 	DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1,
2064 		      err_src == HAL_RX_WBM_ERR_SRC_REO);
2065 	DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1,
2066 		      err_src == HAL_RX_WBM_ERR_SRC_RXDMA);
2067 
2068 	dp_rx_nbuf_free(nbuf);
2069 }
2070 
2071 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2072 
2073 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
2074 /**
2075  * dp_rx_link_cookie_check() - Validate link desc cookie
2076  * @ring_desc: ring descriptor
2077  *
2078  * Return: qdf status
2079  */
2080 static inline QDF_STATUS
2081 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
2082 {
2083 	if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc)))
2084 		return QDF_STATUS_E_FAILURE;
2085 
2086 	return QDF_STATUS_SUCCESS;
2087 }
2088 
2089 /**
2090  * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie
2091  * @ring_desc: ring descriptor
2092  *
2093  * Return: None
2094  */
2095 static inline void
2096 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
2097 {
2098 	HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc);
2099 }
2100 #else
2101 static inline QDF_STATUS
2102 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
2103 {
2104 	return QDF_STATUS_SUCCESS;
2105 }
2106 
2107 static inline void
2108 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
2109 {
2110 }
2111 #endif
2112 
2113 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
2114 /**
2115  * dp_rx_err_ring_record_entry() - Record rx err ring history
2116  * @soc: Datapath soc structure
2117  * @paddr: paddr of the buffer in RX err ring
2118  * @sw_cookie: SW cookie of the buffer in RX err ring
2119  * @rbm: Return buffer manager of the buffer in RX err ring
2120  *
2121  * Returns: None
2122  */
2123 static inline void
2124 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
2125 			    uint32_t sw_cookie, uint8_t rbm)
2126 {
2127 	struct dp_buf_info_record *record;
2128 	uint32_t idx;
2129 
2130 	if (qdf_unlikely(!soc->rx_err_ring_history))
2131 		return;
2132 
2133 	idx = dp_history_get_next_index(&soc->rx_err_ring_history->index,
2134 					DP_RX_ERR_HIST_MAX);
2135 
2136 	/* No NULL check needed for record since its an array */
2137 	record = &soc->rx_err_ring_history->entry[idx];
2138 
2139 	record->timestamp = qdf_get_log_timestamp();
2140 	record->hbi.paddr = paddr;
2141 	record->hbi.sw_cookie = sw_cookie;
2142 	record->hbi.rbm = rbm;
2143 }
2144 #else
2145 static inline void
2146 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
2147 			    uint32_t sw_cookie, uint8_t rbm)
2148 {
2149 }
2150 #endif
2151 
2152 #ifdef HANDLE_RX_REROUTE_ERR
2153 static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc,
2154 				     hal_ring_desc_t ring_desc)
2155 {
2156 	int lmac_id = DP_INVALID_LMAC_ID;
2157 	struct dp_rx_desc *rx_desc;
2158 	struct hal_buf_info hbi;
2159 	struct dp_pdev *pdev;
2160 	struct rx_desc_pool *rx_desc_pool;
2161 
2162 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2163 
2164 	rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, hbi.sw_cookie);
2165 
2166 	/* sanity */
2167 	if (!rx_desc) {
2168 		DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_invalid_cookie, 1);
2169 		goto assert_return;
2170 	}
2171 
2172 	if (!rx_desc->nbuf)
2173 		goto assert_return;
2174 
2175 	dp_rx_err_ring_record_entry(soc, hbi.paddr,
2176 				    hbi.sw_cookie,
2177 				    hal_rx_ret_buf_manager_get(soc->hal_soc,
2178 							       ring_desc));
2179 	if (hbi.paddr != qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)) {
2180 		DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
2181 		rx_desc->in_err_state = 1;
2182 		goto assert_return;
2183 	}
2184 
2185 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2186 	/* After this point the rx_desc and nbuf are valid */
2187 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
2188 	qdf_assert_always(!rx_desc->unmapped);
2189 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
2190 	rx_desc->unmapped = 1;
2191 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
2192 	dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
2193 				    rx_desc->pool_id);
2194 
2195 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
2196 	lmac_id = rx_desc->pool_id;
2197 	dp_rx_add_to_free_desc_list(&pdev->free_list_head,
2198 				    &pdev->free_list_tail,
2199 				    rx_desc);
2200 	return lmac_id;
2201 
2202 assert_return:
2203 	qdf_assert(0);
2204 	return lmac_id;
2205 }
2206 
2207 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
2208 {
2209 	int ret;
2210 	uint64_t cur_time_stamp;
2211 
2212 	DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_rcved, 1);
2213 
2214 	/* Recover if overall error count exceeds threshold */
2215 	if (soc->stats.rx.err.reo_err_msdu_buf_rcved >
2216 	    DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD) {
2217 		dp_err("pkt threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
2218 		       soc->stats.rx.err.reo_err_msdu_buf_rcved,
2219 		       soc->rx_route_err_start_pkt_ts);
2220 		qdf_trigger_self_recovery(NULL, QDF_RX_REG_PKT_ROUTE_ERR);
2221 	}
2222 
2223 	cur_time_stamp = qdf_get_log_timestamp_usecs();
2224 	if (!soc->rx_route_err_start_pkt_ts)
2225 		soc->rx_route_err_start_pkt_ts = cur_time_stamp;
2226 
2227 	/* Recover if threshold number of packets received in threshold time */
2228 	if ((cur_time_stamp - soc->rx_route_err_start_pkt_ts) >
2229 						DP_RX_ERR_ROUTE_TIMEOUT_US) {
2230 		soc->rx_route_err_start_pkt_ts = cur_time_stamp;
2231 
2232 		if (soc->rx_route_err_in_window >
2233 		    DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT) {
2234 			qdf_trigger_self_recovery(NULL,
2235 						  QDF_RX_REG_PKT_ROUTE_ERR);
2236 			dp_err("rate threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
2237 			       soc->stats.rx.err.reo_err_msdu_buf_rcved,
2238 			       soc->rx_route_err_start_pkt_ts);
2239 		} else {
2240 			soc->rx_route_err_in_window = 1;
2241 		}
2242 	} else {
2243 		soc->rx_route_err_in_window++;
2244 	}
2245 
2246 	ret = dp_rx_err_handle_msdu_buf(soc, ring_desc);
2247 
2248 	return ret;
2249 }
2250 #else /* HANDLE_RX_REROUTE_ERR */
2251 
2252 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
2253 {
2254 	qdf_assert_always(0);
2255 
2256 	return DP_INVALID_LMAC_ID;
2257 }
2258 #endif /* HANDLE_RX_REROUTE_ERR */
2259 
2260 #ifdef WLAN_MLO_MULTI_CHIP
2261 /*
2262  * dp_idle_link_bm_id_check() - war for HW issue
2263  *
2264  * This is a war for HW issue where link descriptor
2265  * of partner soc received due to packets wrongly
2266  * interpreted as fragments
2267  *
2268  * @soc: DP SOC handle
2269  * @rbm: idle link RBM value
2270  * @ring_desc: reo error link descriptor
2271  *
2272  * returns: true in case link desc is consumed
2273  *	    false in other cases
2274  */
2275 static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
2276 				     void *ring_desc)
2277 {
2278 	struct dp_soc *replenish_soc = NULL;
2279 
2280 	/* return ok incase of link desc of same soc */
2281 	if (rbm == soc->idle_link_bm_id)
2282 		return false;
2283 
2284 	if (soc->arch_ops.dp_soc_get_by_idle_bm_id)
2285 		replenish_soc =
2286 			soc->arch_ops.dp_soc_get_by_idle_bm_id(soc, rbm);
2287 
2288 	qdf_assert_always(replenish_soc);
2289 
2290 	/*
2291 	 * For WIN usecase we should only get fragment packets in
2292 	 * this ring as for MLO case fragmentation is not supported
2293 	 * we should not see links from other soc.
2294 	 *
2295 	 * Drop all packets from partner soc and replenish the descriptors
2296 	 */
2297 	dp_handle_wbm_internal_error(replenish_soc, ring_desc,
2298 				     HAL_WBM_RELEASE_RING_2_DESC_TYPE);
2299 
2300 	return true;
2301 }
2302 #else
2303 static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
2304 				     void *ring_desc)
2305 {
2306 	return false;
2307 }
2308 #endif
2309 
2310 uint32_t
2311 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2312 		  hal_ring_handle_t hal_ring_hdl, uint32_t quota)
2313 {
2314 	hal_ring_desc_t ring_desc;
2315 	hal_soc_handle_t hal_soc;
2316 	uint32_t count = 0;
2317 	uint32_t rx_bufs_used = 0;
2318 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
2319 	uint8_t mac_id = 0;
2320 	uint8_t buf_type;
2321 	uint8_t err_status;
2322 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
2323 	struct hal_buf_info hbi;
2324 	struct dp_pdev *dp_pdev;
2325 	struct dp_srng *dp_rxdma_srng;
2326 	struct rx_desc_pool *rx_desc_pool;
2327 	void *link_desc_va;
2328 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
2329 	uint16_t num_msdus;
2330 	struct dp_rx_desc *rx_desc = NULL;
2331 	QDF_STATUS status;
2332 	bool ret;
2333 	uint32_t error_code = 0;
2334 	bool sw_pn_check_needed;
2335 	int max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
2336 	int i, rx_bufs_reaped_total;
2337 
2338 	/* Debug -- Remove later */
2339 	qdf_assert(soc && hal_ring_hdl);
2340 
2341 	hal_soc = soc->hal_soc;
2342 
2343 	/* Debug -- Remove later */
2344 	qdf_assert(hal_soc);
2345 
2346 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
2347 
2348 		/* TODO */
2349 		/*
2350 		 * Need API to convert from hal_ring pointer to
2351 		 * Ring Type / Ring Id combo
2352 		 */
2353 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
2354 		dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", soc,
2355 			      hal_ring_hdl);
2356 		goto done;
2357 	}
2358 
2359 	while (qdf_likely(quota-- && (ring_desc =
2360 				hal_srng_dst_peek(hal_soc,
2361 						  hal_ring_hdl)))) {
2362 
2363 		DP_STATS_INC(soc, rx.err_ring_pkts, 1);
2364 		err_status = hal_rx_err_status_get(hal_soc, ring_desc);
2365 		buf_type = hal_rx_reo_buf_type_get(hal_soc, ring_desc);
2366 
2367 		if (err_status == HAL_REO_ERROR_DETECTED)
2368 			error_code = hal_rx_get_reo_error_code(hal_soc,
2369 							       ring_desc);
2370 
2371 		qdf_mem_set(&mpdu_desc_info, sizeof(mpdu_desc_info), 0);
2372 		sw_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
2373 								  err_status,
2374 								  error_code);
2375 		if (!sw_pn_check_needed) {
2376 			/*
2377 			 * MPDU desc info will be present in the REO desc
2378 			 * only in the below scenarios
2379 			 * 1) pn_in_dest_disabled:  always
2380 			 * 2) pn_in_dest enabled: All cases except 2k-jup
2381 			 *			and OOR errors
2382 			 */
2383 			hal_rx_mpdu_desc_info_get(hal_soc, ring_desc,
2384 						  &mpdu_desc_info);
2385 		}
2386 
2387 		if (HAL_RX_REO_DESC_MSDU_COUNT_GET(ring_desc) == 0)
2388 			goto next_entry;
2389 
2390 		/*
2391 		 * For REO error ring, only MSDU LINK DESC is expected.
2392 		 * Handle HAL_RX_REO_MSDU_BUF_ADDR_TYPE exception case.
2393 		 */
2394 		if (qdf_unlikely(buf_type != HAL_RX_REO_MSDU_LINK_DESC_TYPE)) {
2395 			int lmac_id;
2396 
2397 			lmac_id = dp_rx_err_exception(soc, ring_desc);
2398 			if (lmac_id >= 0)
2399 				rx_bufs_reaped[lmac_id] += 1;
2400 			goto next_entry;
2401 		}
2402 
2403 		hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc,
2404 					  &hbi);
2405 		/*
2406 		 * check for the magic number in the sw cookie
2407 		 */
2408 		qdf_assert_always((hbi.sw_cookie >> LINK_DESC_ID_SHIFT) &
2409 					soc->link_desc_id_start);
2410 
2411 		if (dp_idle_link_bm_id_check(soc, hbi.rbm, ring_desc)) {
2412 			DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
2413 			goto next_entry;
2414 		}
2415 
2416 		status = dp_rx_link_cookie_check(ring_desc);
2417 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
2418 			DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
2419 			break;
2420 		}
2421 
2422 		hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2423 		link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
2424 		hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
2425 				     &num_msdus);
2426 		if (!num_msdus ||
2427 		    !dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[0])) {
2428 			dp_rx_err_info_rl("Invalid MSDU info num_msdus %u cookie: 0x%x",
2429 					  num_msdus, msdu_list.sw_cookie[0]);
2430 			dp_rx_link_desc_return(soc, ring_desc,
2431 					       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
2432 			goto next_entry;
2433 		}
2434 
2435 		dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0],
2436 					    msdu_list.sw_cookie[0],
2437 					    msdu_list.rbm[0]);
2438 		// TODO - BE- Check if the RBM is to be checked for all chips
2439 		if (qdf_unlikely((msdu_list.rbm[0] !=
2440 					dp_rx_get_rx_bm_id(soc)) &&
2441 				 (msdu_list.rbm[0] !=
2442 				  soc->idle_link_bm_id) &&
2443 				 (msdu_list.rbm[0] !=
2444 					dp_rx_get_defrag_bm_id(soc)))) {
2445 			/* TODO */
2446 			/* Call appropriate handler */
2447 			if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2448 				DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
2449 				dp_rx_err_err("%pK: Invalid RBM %d",
2450 					      soc, msdu_list.rbm[0]);
2451 			}
2452 
2453 			/* Return link descriptor through WBM ring (SW2WBM)*/
2454 			dp_rx_link_desc_return(soc, ring_desc,
2455 					HAL_BM_ACTION_RELEASE_MSDU_LIST);
2456 			goto next_entry;
2457 		}
2458 
2459 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
2460 						soc,
2461 						msdu_list.sw_cookie[0]);
2462 		qdf_assert_always(rx_desc);
2463 
2464 		mac_id = rx_desc->pool_id;
2465 
2466 		if (sw_pn_check_needed) {
2467 			goto process_reo_error_code;
2468 		}
2469 
2470 		if (mpdu_desc_info.bar_frame) {
2471 			qdf_assert_always(mpdu_desc_info.msdu_count == 1);
2472 
2473 			dp_rx_bar_frame_handle(soc, ring_desc, rx_desc,
2474 					       &mpdu_desc_info, err_status,
2475 					       error_code);
2476 
2477 			rx_bufs_reaped[mac_id] += 1;
2478 			goto next_entry;
2479 		}
2480 
2481 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
2482 			/*
2483 			 * We only handle one msdu per link desc for fragmented
2484 			 * case. We drop the msdus and release the link desc
2485 			 * back if there are more than one msdu in link desc.
2486 			 */
2487 			if (qdf_unlikely(num_msdus > 1)) {
2488 				count = dp_rx_msdus_drop(soc, ring_desc,
2489 							 &mpdu_desc_info,
2490 							 &mac_id, quota);
2491 				rx_bufs_reaped[mac_id] += count;
2492 				goto next_entry;
2493 			}
2494 
2495 			/*
2496 			 * this is a unlikely scenario where the host is reaping
2497 			 * a descriptor which it already reaped just a while ago
2498 			 * but is yet to replenish it back to HW.
2499 			 * In this case host will dump the last 128 descriptors
2500 			 * including the software descriptor rx_desc and assert.
2501 			 */
2502 
2503 			if (qdf_unlikely(!rx_desc->in_use)) {
2504 				DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
2505 				dp_info_rl("Reaping rx_desc not in use!");
2506 				dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2507 							   ring_desc, rx_desc);
2508 				/* ignore duplicate RX desc and continue */
2509 				/* Pop out the descriptor */
2510 				goto next_entry;
2511 			}
2512 
2513 			ret = dp_rx_desc_paddr_sanity_check(rx_desc,
2514 							    msdu_list.paddr[0]);
2515 			if (!ret) {
2516 				DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
2517 				rx_desc->in_err_state = 1;
2518 				goto next_entry;
2519 			}
2520 
2521 			count = dp_rx_frag_handle(soc,
2522 						  ring_desc, &mpdu_desc_info,
2523 						  rx_desc, &mac_id, quota);
2524 
2525 			rx_bufs_reaped[mac_id] += count;
2526 			DP_STATS_INC(soc, rx.rx_frags, 1);
2527 			goto next_entry;
2528 		}
2529 
2530 process_reo_error_code:
2531 		/*
2532 		 * Expect REO errors to be handled after this point
2533 		 */
2534 		qdf_assert_always(err_status == HAL_REO_ERROR_DETECTED);
2535 
2536 		dp_info_rl("Got pkt with REO ERROR: %d", error_code);
2537 
2538 		switch (error_code) {
2539 		case HAL_REO_ERR_PN_CHECK_FAILED:
2540 		case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
2541 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2542 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2543 			if (dp_pdev)
2544 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
2545 			count = dp_rx_pn_error_handle(soc,
2546 						      ring_desc,
2547 						      &mpdu_desc_info, &mac_id,
2548 						      quota);
2549 
2550 			rx_bufs_reaped[mac_id] += count;
2551 			break;
2552 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2553 		case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
2554 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2555 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
2556 		case HAL_REO_ERR_BAR_FRAME_OOR:
2557 		case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2558 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2559 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2560 			if (dp_pdev)
2561 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
2562 			count = dp_rx_reo_err_entry_process(
2563 					soc,
2564 					ring_desc,
2565 					&mpdu_desc_info,
2566 					link_desc_va,
2567 					error_code);
2568 
2569 			rx_bufs_reaped[mac_id] += count;
2570 			break;
2571 		case HAL_REO_ERR_QUEUE_DESC_INVALID:
2572 		case HAL_REO_ERR_AMPDU_IN_NON_BA:
2573 		case HAL_REO_ERR_NON_BA_DUPLICATE:
2574 		case HAL_REO_ERR_BA_DUPLICATE:
2575 		case HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION:
2576 		case HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN:
2577 		case HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET:
2578 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2579 			count = dp_rx_msdus_drop(soc, ring_desc,
2580 						 &mpdu_desc_info,
2581 						 &mac_id, quota);
2582 			rx_bufs_reaped[mac_id] += count;
2583 			break;
2584 		default:
2585 			/* Assert if unexpected error type */
2586 			qdf_assert_always(0);
2587 		}
2588 next_entry:
2589 		dp_rx_link_cookie_invalidate(ring_desc);
2590 		hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2591 
2592 		rx_bufs_reaped_total = 0;
2593 		for (i = 0; i < MAX_PDEV_CNT; i++)
2594 			rx_bufs_reaped_total += rx_bufs_reaped[i];
2595 
2596 		if (dp_rx_reap_loop_pkt_limit_hit(soc, rx_bufs_reaped_total,
2597 						  max_reap_limit))
2598 			break;
2599 	}
2600 
2601 done:
2602 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
2603 
2604 	if (soc->rx.flags.defrag_timeout_check) {
2605 		uint32_t now_ms =
2606 			qdf_system_ticks_to_msecs(qdf_system_ticks());
2607 
2608 		if (now_ms >= soc->rx.defrag.next_flush_ms)
2609 			dp_rx_defrag_waitlist_flush(soc);
2610 	}
2611 
2612 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2613 		if (rx_bufs_reaped[mac_id]) {
2614 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2615 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2616 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
2617 
2618 			dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2619 						rx_desc_pool,
2620 						rx_bufs_reaped[mac_id],
2621 						&dp_pdev->free_list_head,
2622 						&dp_pdev->free_list_tail,
2623 						false);
2624 			rx_bufs_used += rx_bufs_reaped[mac_id];
2625 		}
2626 	}
2627 
2628 	return rx_bufs_used; /* Assume no scale factor for now */
2629 }
2630 
2631 #ifdef DROP_RXDMA_DECRYPT_ERR
2632 /**
2633  * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled
2634  *
2635  * Return: true if rxdma decrypt err frames are handled and false otherwise
2636  */
2637 static inline bool dp_handle_rxdma_decrypt_err(void)
2638 {
2639 	return false;
2640 }
2641 #else
2642 static inline bool dp_handle_rxdma_decrypt_err(void)
2643 {
2644 	return true;
2645 }
2646 #endif
2647 
2648 /*
2649  * dp_rx_wbm_sg_list_last_msdu_war() - war for HW issue
2650  *
2651  * This is a war for HW issue where length is only valid in last msdu
2652  *@soc: DP SOC handle
2653  */
2654 static inline void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc)
2655 {
2656 	if (soc->wbm_sg_last_msdu_war) {
2657 		uint32_t len;
2658 		qdf_nbuf_t temp = soc->wbm_sg_param.wbm_sg_nbuf_tail;
2659 
2660 		len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
2661 						     qdf_nbuf_data(temp));
2662 		temp = soc->wbm_sg_param.wbm_sg_nbuf_head;
2663 		while (temp) {
2664 			QDF_NBUF_CB_RX_PKT_LEN(temp) = len;
2665 			temp = temp->next;
2666 		}
2667 	}
2668 }
2669 
2670 #ifdef RX_DESC_DEBUG_CHECK
2671 /**
2672  * dp_rx_wbm_desc_nbuf_sanity_check - Add sanity check to for WBM rx_desc paddr
2673  *					corruption
2674  * @soc: core txrx main context
2675  * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring
2676  * @ring_desc: REO ring descriptor
2677  * @rx_desc: Rx descriptor
2678  *
2679  * Return: NONE
2680  */
2681 static
2682 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2683 					    hal_ring_handle_t hal_ring_hdl,
2684 					    hal_ring_desc_t ring_desc,
2685 					    struct dp_rx_desc *rx_desc)
2686 {
2687 	struct hal_buf_info hbi;
2688 
2689 	hal_rx_wbm_rel_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2690 	/* Sanity check for possible buffer paddr corruption */
2691 	if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
2692 		return QDF_STATUS_SUCCESS;
2693 
2694 	hal_srng_dump_ring_desc(soc->hal_soc, hal_ring_hdl, ring_desc);
2695 
2696 	return QDF_STATUS_E_FAILURE;
2697 }
2698 
2699 #else
2700 static
2701 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2702 					    hal_ring_handle_t hal_ring_hdl,
2703 					    hal_ring_desc_t ring_desc,
2704 					    struct dp_rx_desc *rx_desc)
2705 {
2706 	return QDF_STATUS_SUCCESS;
2707 }
2708 #endif
2709 
2710 static inline bool
2711 dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
2712 {
2713 	/*
2714 	 * Currently Null Queue and Unencrypted error handlers has support for
2715 	 * SG. Other error handler do not deal with SG buffer.
2716 	 */
2717 	if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) &&
2718 	     (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) ||
2719 	    ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) &&
2720 	     (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED)))
2721 		return true;
2722 
2723 	return false;
2724 }
2725 
2726 #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
2727 static inline void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
2728 					    qdf_nbuf_t nbuf)
2729 {
2730 	/*
2731 	 * In case of fast recycle TX driver can avoid invalidate
2732 	 * of buffer in case of SFE forward. We need to invalidate
2733 	 * the TLV headers after writing to this location
2734 	 */
2735 	qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2736 				      (void *)(nbuf->data +
2737 					       soc->rx_pkt_tlv_size +
2738 					       L3_HEADER_PAD));
2739 }
2740 #else
2741 static inline void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
2742 					    qdf_nbuf_t nbuf)
2743 {
2744 }
2745 #endif
2746 
2747 uint32_t
2748 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2749 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota)
2750 {
2751 	hal_ring_desc_t ring_desc;
2752 	hal_soc_handle_t hal_soc;
2753 	struct dp_rx_desc *rx_desc;
2754 	union dp_rx_desc_list_elem_t
2755 		*head[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } };
2756 	union dp_rx_desc_list_elem_t
2757 		*tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } };
2758 	uint32_t rx_bufs_used = 0;
2759 	uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { 0 } };
2760 	uint8_t buf_type;
2761 	uint8_t mac_id;
2762 	struct dp_pdev *dp_pdev;
2763 	struct dp_srng *dp_rxdma_srng;
2764 	struct rx_desc_pool *rx_desc_pool;
2765 	uint8_t *rx_tlv_hdr;
2766 	bool is_tkip_mic_err;
2767 	qdf_nbuf_t nbuf_head = NULL;
2768 	qdf_nbuf_t nbuf_tail = NULL;
2769 	qdf_nbuf_t nbuf, next;
2770 	struct hal_wbm_err_desc_info wbm_err_info = { 0 };
2771 	uint8_t pool_id;
2772 	uint8_t tid = 0;
2773 	uint8_t msdu_continuation = 0;
2774 	bool process_sg_buf = false;
2775 	uint32_t wbm_err_src;
2776 	QDF_STATUS status;
2777 	struct dp_soc *replenish_soc;
2778 	uint8_t chip_id;
2779 	struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 };
2780 
2781 	/* Debug -- Remove later */
2782 	qdf_assert(soc && hal_ring_hdl);
2783 
2784 	hal_soc = soc->hal_soc;
2785 
2786 	/* Debug -- Remove later */
2787 	qdf_assert(hal_soc);
2788 
2789 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
2790 
2791 		/* TODO */
2792 		/*
2793 		 * Need API to convert from hal_ring pointer to
2794 		 * Ring Type / Ring Id combo
2795 		 */
2796 		dp_rx_err_err("%pK: HAL RING Access Failed -- %pK",
2797 			      soc, hal_ring_hdl);
2798 		goto done;
2799 	}
2800 
2801 	while (qdf_likely(quota)) {
2802 		ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2803 		if (qdf_unlikely(!ring_desc))
2804 			break;
2805 
2806 		/* XXX */
2807 		buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
2808 
2809 		/*
2810 		 * For WBM ring, expect only MSDU buffers
2811 		 */
2812 		qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
2813 
2814 		wbm_err_src = hal_rx_wbm_err_src_get(hal_soc, ring_desc);
2815 		qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) ||
2816 			   (wbm_err_src == HAL_RX_WBM_ERR_SRC_REO));
2817 
2818 		if (soc->arch_ops.dp_wbm_get_rx_desc_from_hal_desc(soc,
2819 								   ring_desc,
2820 								   &rx_desc)) {
2821 			dp_rx_err_err("get rx desc from hal_desc failed");
2822 			continue;
2823 		}
2824 
2825 		qdf_assert_always(rx_desc);
2826 
2827 		if (!dp_rx_desc_check_magic(rx_desc)) {
2828 			dp_rx_err_err("%pk: Invalid rx_desc %pk",
2829 				      soc, rx_desc);
2830 			continue;
2831 		}
2832 
2833 		/*
2834 		 * this is a unlikely scenario where the host is reaping
2835 		 * a descriptor which it already reaped just a while ago
2836 		 * but is yet to replenish it back to HW.
2837 		 * In this case host will dump the last 128 descriptors
2838 		 * including the software descriptor rx_desc and assert.
2839 		 */
2840 		if (qdf_unlikely(!rx_desc->in_use)) {
2841 			DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
2842 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2843 						   ring_desc, rx_desc);
2844 			continue;
2845 		}
2846 
2847 		hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
2848 		nbuf = rx_desc->nbuf;
2849 
2850 		status = dp_rx_wbm_desc_nbuf_sanity_check(soc, hal_ring_hdl,
2851 							  ring_desc, rx_desc);
2852 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
2853 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
2854 			dp_info_rl("Rx error Nbuf %pk sanity check failure!",
2855 				   nbuf);
2856 			rx_desc->in_err_state = 1;
2857 			rx_desc->unmapped = 1;
2858 			rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
2859 
2860 			dp_rx_add_to_free_desc_list(
2861 				&head[rx_desc->chip_id][rx_desc->pool_id],
2862 				&tail[rx_desc->chip_id][rx_desc->pool_id],
2863 				rx_desc);
2864 			continue;
2865 		}
2866 
2867 		/* Get MPDU DESC info */
2868 		hal_rx_mpdu_desc_info_get(hal_soc, ring_desc, &mpdu_desc_info);
2869 
2870 		if (qdf_likely(mpdu_desc_info.mpdu_flags &
2871 			       HAL_MPDU_F_QOS_CONTROL_VALID))
2872 			qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid);
2873 
2874 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2875 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
2876 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
2877 		rx_desc->unmapped = 1;
2878 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
2879 
2880 		if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support &&
2881 				 dp_rx_is_sg_formation_required(&wbm_err_info))) {
2882 			/* SG is detected from continuation bit */
2883 			msdu_continuation =
2884 				hal_rx_wbm_err_msdu_continuation_get(hal_soc,
2885 								     ring_desc);
2886 			if (msdu_continuation &&
2887 			    !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) {
2888 				/* Update length from first buffer in SG */
2889 				soc->wbm_sg_param.wbm_sg_desc_msdu_len =
2890 					hal_rx_msdu_start_msdu_len_get(
2891 						soc->hal_soc,
2892 						qdf_nbuf_data(nbuf));
2893 				soc->wbm_sg_param.wbm_is_first_msdu_in_sg = true;
2894 			}
2895 
2896 			if (msdu_continuation) {
2897 				/* MSDU continued packets */
2898 				qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
2899 				QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2900 					soc->wbm_sg_param.wbm_sg_desc_msdu_len;
2901 			} else {
2902 				/* This is the terminal packet in SG */
2903 				qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
2904 				qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
2905 				QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2906 					soc->wbm_sg_param.wbm_sg_desc_msdu_len;
2907 				process_sg_buf = true;
2908 			}
2909 		}
2910 
2911 		/*
2912 		 * save the wbm desc info in nbuf TLV. We will need this
2913 		 * info when we do the actual nbuf processing
2914 		 */
2915 		wbm_err_info.pool_id = rx_desc->pool_id;
2916 		hal_rx_priv_info_set_in_tlv(soc->hal_soc,
2917 					    qdf_nbuf_data(nbuf),
2918 					    (uint8_t *)&wbm_err_info,
2919 					    sizeof(wbm_err_info));
2920 
2921 		dp_rx_err_tlv_invalidate(soc, nbuf);
2922 		rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
2923 
2924 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) {
2925 			DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head,
2926 					  soc->wbm_sg_param.wbm_sg_nbuf_tail,
2927 					  nbuf);
2928 			if (process_sg_buf) {
2929 				if (!dp_rx_buffer_pool_refill(
2930 					soc,
2931 					soc->wbm_sg_param.wbm_sg_nbuf_head,
2932 					rx_desc->pool_id))
2933 					DP_RX_MERGE_TWO_LIST(
2934 						nbuf_head, nbuf_tail,
2935 						soc->wbm_sg_param.wbm_sg_nbuf_head,
2936 						soc->wbm_sg_param.wbm_sg_nbuf_tail);
2937 				dp_rx_wbm_sg_list_last_msdu_war(soc);
2938 				dp_rx_wbm_sg_list_reset(soc);
2939 				process_sg_buf = false;
2940 			}
2941 		} else if (!dp_rx_buffer_pool_refill(soc, nbuf,
2942 						     rx_desc->pool_id)) {
2943 			DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
2944 		}
2945 
2946 		dp_rx_add_to_free_desc_list
2947 			(&head[rx_desc->chip_id][rx_desc->pool_id],
2948 			 &tail[rx_desc->chip_id][rx_desc->pool_id], rx_desc);
2949 
2950 		/*
2951 		 * if continuation bit is set then we have MSDU spread
2952 		 * across multiple buffers, let us not decrement quota
2953 		 * till we reap all buffers of that MSDU.
2954 		 */
2955 		if (qdf_likely(!msdu_continuation))
2956 			quota -= 1;
2957 	}
2958 done:
2959 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
2960 
2961 	for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
2962 		for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2963 			/*
2964 			 * continue with next mac_id if no pkts were reaped
2965 			 * from that pool
2966 			 */
2967 			if (!rx_bufs_reaped[chip_id][mac_id])
2968 				continue;
2969 
2970 			replenish_soc =
2971 			soc->arch_ops.dp_rx_replenish_soc_get(soc, chip_id);
2972 
2973 			dp_rxdma_srng =
2974 				&replenish_soc->rx_refill_buf_ring[mac_id];
2975 
2976 			rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id];
2977 
2978 			dp_rx_buffers_replenish(replenish_soc, mac_id,
2979 						dp_rxdma_srng,
2980 						rx_desc_pool,
2981 						rx_bufs_reaped[chip_id][mac_id],
2982 						&head[chip_id][mac_id],
2983 						&tail[chip_id][mac_id], false);
2984 			rx_bufs_used += rx_bufs_reaped[chip_id][mac_id];
2985 		}
2986 	}
2987 
2988 	nbuf = nbuf_head;
2989 	while (nbuf) {
2990 		struct dp_txrx_peer *txrx_peer;
2991 		struct dp_peer *peer;
2992 		uint16_t peer_id;
2993 		uint8_t err_code;
2994 		uint8_t *tlv_hdr;
2995 		uint32_t peer_meta_data;
2996 		dp_txrx_ref_handle txrx_ref_handle = NULL;
2997 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
2998 
2999 		/*
3000 		 * retrieve the wbm desc info from nbuf TLV, so we can
3001 		 * handle error cases appropriately
3002 		 */
3003 		hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_tlv_hdr,
3004 					      (uint8_t *)&wbm_err_info,
3005 					      sizeof(wbm_err_info));
3006 
3007 		peer_meta_data = hal_rx_tlv_peer_meta_data_get(soc->hal_soc,
3008 							       rx_tlv_hdr);
3009 		peer_id = dp_rx_peer_metadata_peer_id_get(soc, peer_meta_data);
3010 		txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
3011 							   &txrx_ref_handle,
3012 							   DP_MOD_ID_RX_ERR);
3013 
3014 		if (!txrx_peer)
3015 			dp_info_rl("peer is null peer_id %u err_src %u, "
3016 				   "REO: push_rsn %u err_code %u, "
3017 				   "RXDMA: push_rsn %u err_code %u",
3018 				   peer_id, wbm_err_info.wbm_err_src,
3019 				   wbm_err_info.reo_psh_rsn,
3020 				   wbm_err_info.reo_err_code,
3021 				   wbm_err_info.rxdma_psh_rsn,
3022 				   wbm_err_info.rxdma_err_code);
3023 
3024 		/* Set queue_mapping in nbuf to 0 */
3025 		dp_set_rx_queue(nbuf, 0);
3026 
3027 		next = nbuf->next;
3028 
3029 		/*
3030 		 * Form the SG for msdu continued buffers
3031 		 * QCN9000 has this support
3032 		 */
3033 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
3034 			nbuf = dp_rx_sg_create(soc, nbuf);
3035 			next = nbuf->next;
3036 			/*
3037 			 * SG error handling is not done correctly,
3038 			 * drop SG frames for now.
3039 			 */
3040 			dp_rx_nbuf_free(nbuf);
3041 			dp_info_rl("scattered msdu dropped");
3042 			nbuf = next;
3043 			if (txrx_peer)
3044 				dp_txrx_peer_unref_delete(txrx_ref_handle,
3045 							  DP_MOD_ID_RX_ERR);
3046 			continue;
3047 		}
3048 
3049 		if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
3050 			if (wbm_err_info.reo_psh_rsn
3051 					== HAL_RX_WBM_REO_PSH_RSN_ERROR) {
3052 
3053 				DP_STATS_INC(soc,
3054 					rx.err.reo_error
3055 					[wbm_err_info.reo_err_code], 1);
3056 				/* increment @pdev level */
3057 				pool_id = wbm_err_info.pool_id;
3058 				dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
3059 				if (dp_pdev)
3060 					DP_STATS_INC(dp_pdev, err.reo_error,
3061 						     1);
3062 
3063 				switch (wbm_err_info.reo_err_code) {
3064 				/*
3065 				 * Handling for packets which have NULL REO
3066 				 * queue descriptor
3067 				 */
3068 				case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
3069 					pool_id = wbm_err_info.pool_id;
3070 					dp_rx_null_q_desc_handle(soc, nbuf,
3071 								 rx_tlv_hdr,
3072 								 pool_id,
3073 								 txrx_peer);
3074 					break;
3075 				/* TODO */
3076 				/* Add per error code accounting */
3077 				case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
3078 					if (txrx_peer)
3079 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3080 									  rx.err.jump_2k_err,
3081 									  1);
3082 
3083 					pool_id = wbm_err_info.pool_id;
3084 
3085 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
3086 									   rx_tlv_hdr)) {
3087 						tid =
3088 						hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
3089 					}
3090 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
3091 					hal_rx_msdu_start_msdu_len_get(
3092 						soc->hal_soc, rx_tlv_hdr);
3093 					nbuf->next = NULL;
3094 					dp_2k_jump_handle(soc, nbuf,
3095 							  rx_tlv_hdr,
3096 							  peer_id, tid);
3097 					break;
3098 				case HAL_REO_ERR_REGULAR_FRAME_OOR:
3099 					if (txrx_peer)
3100 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3101 									  rx.err.oor_err,
3102 									  1);
3103 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
3104 									   rx_tlv_hdr)) {
3105 						tid =
3106 							hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
3107 					}
3108 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
3109 						hal_rx_msdu_start_msdu_len_get(
3110 						soc->hal_soc, rx_tlv_hdr);
3111 					nbuf->next = NULL;
3112 					dp_rx_oor_handle(soc, nbuf,
3113 							 peer_id,
3114 							 rx_tlv_hdr);
3115 					break;
3116 				case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
3117 				case HAL_REO_ERR_BAR_FRAME_OOR:
3118 					peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
3119 					if (peer) {
3120 						dp_rx_err_handle_bar(soc, peer,
3121 								     nbuf);
3122 						dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
3123 					}
3124 					dp_rx_nbuf_free(nbuf);
3125 					break;
3126 
3127 				case HAL_REO_ERR_PN_CHECK_FAILED:
3128 				case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
3129 					if (txrx_peer)
3130 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3131 									  rx.err.pn_err,
3132 									  1);
3133 					dp_rx_nbuf_free(nbuf);
3134 					break;
3135 
3136 				default:
3137 					dp_info_rl("Got pkt with REO ERROR: %d",
3138 						   wbm_err_info.reo_err_code);
3139 					dp_rx_nbuf_free(nbuf);
3140 				}
3141 			} else if (wbm_err_info.reo_psh_rsn
3142 					== HAL_RX_WBM_REO_PSH_RSN_ROUTE) {
3143 				dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
3144 						    rx_tlv_hdr,
3145 						    HAL_RX_WBM_ERR_SRC_REO);
3146 			} else {
3147 				/* should not enter here */
3148 				dp_rx_err_alert("invalid reo push reason %u",
3149 						wbm_err_info.reo_psh_rsn);
3150 				dp_rx_nbuf_free(nbuf);
3151 				qdf_assert_always(0);
3152 			}
3153 		} else if (wbm_err_info.wbm_err_src ==
3154 					HAL_RX_WBM_ERR_SRC_RXDMA) {
3155 			if (wbm_err_info.rxdma_psh_rsn
3156 					== HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
3157 				DP_STATS_INC(soc,
3158 					rx.err.rxdma_error
3159 					[wbm_err_info.rxdma_err_code], 1);
3160 				/* increment @pdev level */
3161 				pool_id = wbm_err_info.pool_id;
3162 				dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
3163 				if (dp_pdev)
3164 					DP_STATS_INC(dp_pdev,
3165 						     err.rxdma_error, 1);
3166 
3167 				switch (wbm_err_info.rxdma_err_code) {
3168 				case HAL_RXDMA_ERR_UNENCRYPTED:
3169 
3170 				case HAL_RXDMA_ERR_WIFI_PARSE:
3171 					if (txrx_peer)
3172 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3173 									  rx.err.rxdma_wifi_parse_err,
3174 									  1);
3175 
3176 					pool_id = wbm_err_info.pool_id;
3177 					dp_rx_process_rxdma_err(soc, nbuf,
3178 								rx_tlv_hdr,
3179 								txrx_peer,
3180 								wbm_err_info.
3181 								rxdma_err_code,
3182 								pool_id);
3183 					break;
3184 
3185 				case HAL_RXDMA_ERR_TKIP_MIC:
3186 					dp_rx_process_mic_error(soc, nbuf,
3187 								rx_tlv_hdr,
3188 								txrx_peer);
3189 					if (txrx_peer)
3190 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3191 									  rx.err.mic_err,
3192 									  1);
3193 					break;
3194 
3195 				case HAL_RXDMA_ERR_DECRYPT:
3196 					/* All the TKIP-MIC failures are treated as Decrypt Errors
3197 					 * for QCN9224 Targets
3198 					 */
3199 					is_tkip_mic_err = hal_rx_msdu_end_is_tkip_mic_err(hal_soc, rx_tlv_hdr);
3200 
3201 					if (is_tkip_mic_err && txrx_peer) {
3202 						dp_rx_process_mic_error(soc, nbuf,
3203 									rx_tlv_hdr,
3204 									txrx_peer);
3205 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3206 									  rx.err.mic_err,
3207 									  1);
3208 						break;
3209 					}
3210 
3211 					if (txrx_peer) {
3212 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3213 									  rx.err.decrypt_err,
3214 									  1);
3215 						dp_rx_nbuf_free(nbuf);
3216 						break;
3217 					}
3218 
3219 					if (!dp_handle_rxdma_decrypt_err()) {
3220 						dp_rx_nbuf_free(nbuf);
3221 						break;
3222 					}
3223 
3224 					pool_id = wbm_err_info.pool_id;
3225 					err_code = wbm_err_info.rxdma_err_code;
3226 					tlv_hdr = rx_tlv_hdr;
3227 					dp_rx_process_rxdma_err(soc, nbuf,
3228 								tlv_hdr, NULL,
3229 								err_code,
3230 								pool_id);
3231 					break;
3232 				case HAL_RXDMA_MULTICAST_ECHO:
3233 					if (txrx_peer)
3234 						DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
3235 									      rx.mec_drop, 1,
3236 									      qdf_nbuf_len(nbuf));
3237 					dp_rx_nbuf_free(nbuf);
3238 					break;
3239 				case HAL_RXDMA_UNAUTHORIZED_WDS:
3240 					pool_id = wbm_err_info.pool_id;
3241 					err_code = wbm_err_info.rxdma_err_code;
3242 					tlv_hdr = rx_tlv_hdr;
3243 					dp_rx_process_rxdma_err(soc, nbuf,
3244 								tlv_hdr,
3245 								txrx_peer,
3246 								err_code,
3247 								pool_id);
3248 					break;
3249 				default:
3250 					dp_rx_nbuf_free(nbuf);
3251 					dp_err_rl("RXDMA error %d",
3252 						  wbm_err_info.rxdma_err_code);
3253 				}
3254 			} else if (wbm_err_info.rxdma_psh_rsn
3255 					== HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) {
3256 				dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
3257 						    rx_tlv_hdr,
3258 						    HAL_RX_WBM_ERR_SRC_RXDMA);
3259 			} else if (wbm_err_info.rxdma_psh_rsn
3260 					== HAL_RX_WBM_RXDMA_PSH_RSN_FLUSH) {
3261 				dp_rx_err_err("rxdma push reason %u",
3262 						wbm_err_info.rxdma_psh_rsn);
3263 				DP_STATS_INC(soc, rx.err.rx_flush_count, 1);
3264 				dp_rx_nbuf_free(nbuf);
3265 			} else {
3266 				/* should not enter here */
3267 				dp_rx_err_alert("invalid rxdma push reason %u",
3268 						wbm_err_info.rxdma_psh_rsn);
3269 				dp_rx_nbuf_free(nbuf);
3270 				qdf_assert_always(0);
3271 			}
3272 		} else {
3273 			/* Should not come here */
3274 			qdf_assert(0);
3275 		}
3276 
3277 		if (txrx_peer)
3278 			dp_txrx_peer_unref_delete(txrx_ref_handle,
3279 						  DP_MOD_ID_RX_ERR);
3280 
3281 		nbuf = next;
3282 	}
3283 	return rx_bufs_used; /* Assume no scale factor for now */
3284 }
3285 
3286 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
3287 
3288 /**
3289  * dup_desc_dbg() - dump and assert if duplicate rx desc found
3290  *
3291  * @soc: core DP main context
3292  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
3293  * @rx_desc: void pointer to rx descriptor
3294  *
3295  * Return: void
3296  */
3297 static void dup_desc_dbg(struct dp_soc *soc,
3298 			 hal_rxdma_desc_t rxdma_dst_ring_desc,
3299 			 void *rx_desc)
3300 {
3301 	DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
3302 	dp_rx_dump_info_and_assert(
3303 			soc,
3304 			soc->rx_rel_ring.hal_srng,
3305 			hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
3306 			rx_desc);
3307 }
3308 
3309 /**
3310  * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
3311  *
3312  * @soc: core DP main context
3313  * @mac_id: mac id which is one of 3 mac_ids
3314  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
3315  * @head: head of descs list to be freed
3316  * @tail: tail of decs list to be freed
3317 
3318  * Return: number of msdu in MPDU to be popped
3319  */
3320 static inline uint32_t
3321 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
3322 	hal_rxdma_desc_t rxdma_dst_ring_desc,
3323 	union dp_rx_desc_list_elem_t **head,
3324 	union dp_rx_desc_list_elem_t **tail)
3325 {
3326 	void *rx_msdu_link_desc;
3327 	qdf_nbuf_t msdu;
3328 	qdf_nbuf_t last;
3329 	struct hal_rx_msdu_list msdu_list;
3330 	uint16_t num_msdus;
3331 	struct hal_buf_info buf_info;
3332 	uint32_t rx_bufs_used = 0;
3333 	uint32_t msdu_cnt;
3334 	uint32_t i;
3335 	uint8_t push_reason;
3336 	uint8_t rxdma_error_code = 0;
3337 	uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
3338 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
3339 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
3340 	hal_rxdma_desc_t ring_desc;
3341 	struct rx_desc_pool *rx_desc_pool;
3342 
3343 	if (!pdev) {
3344 		dp_rx_err_debug("%pK: pdev is null for mac_id = %d",
3345 				soc, mac_id);
3346 		return rx_bufs_used;
3347 	}
3348 
3349 	msdu = 0;
3350 
3351 	last = NULL;
3352 
3353 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
3354 				     &buf_info, &msdu_cnt);
3355 
3356 	push_reason =
3357 		hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
3358 	if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
3359 		rxdma_error_code =
3360 			hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
3361 	}
3362 
3363 	do {
3364 		rx_msdu_link_desc =
3365 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
3366 
3367 		qdf_assert_always(rx_msdu_link_desc);
3368 
3369 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
3370 				     &msdu_list, &num_msdus);
3371 
3372 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
3373 			/* if the msdus belongs to NSS offloaded radio &&
3374 			 * the rbm is not SW1_BM then return the msdu_link
3375 			 * descriptor without freeing the msdus (nbufs). let
3376 			 * these buffers be given to NSS completion ring for
3377 			 * NSS to free them.
3378 			 * else iterate through the msdu link desc list and
3379 			 * free each msdu in the list.
3380 			 */
3381 			if (msdu_list.rbm[0] !=
3382 				HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id) &&
3383 			    wlan_cfg_get_dp_pdev_nss_enabled(
3384 							pdev->wlan_cfg_ctx))
3385 				bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
3386 			else {
3387 				for (i = 0; i < num_msdus; i++) {
3388 					struct dp_rx_desc *rx_desc =
3389 						soc->arch_ops.
3390 						dp_rx_desc_cookie_2_va(
3391 							soc,
3392 							msdu_list.sw_cookie[i]);
3393 					qdf_assert_always(rx_desc);
3394 					msdu = rx_desc->nbuf;
3395 					/*
3396 					 * this is a unlikely scenario
3397 					 * where the host is reaping
3398 					 * a descriptor which
3399 					 * it already reaped just a while ago
3400 					 * but is yet to replenish
3401 					 * it back to HW.
3402 					 * In this case host will dump
3403 					 * the last 128 descriptors
3404 					 * including the software descriptor
3405 					 * rx_desc and assert.
3406 					 */
3407 					ring_desc = rxdma_dst_ring_desc;
3408 					if (qdf_unlikely(!rx_desc->in_use)) {
3409 						dup_desc_dbg(soc,
3410 							     ring_desc,
3411 							     rx_desc);
3412 						continue;
3413 					}
3414 
3415 					if (rx_desc->unmapped == 0) {
3416 						rx_desc_pool =
3417 							&soc->rx_desc_buf[rx_desc->pool_id];
3418 						dp_ipa_rx_buf_smmu_mapping_lock(soc);
3419 						dp_rx_nbuf_unmap_pool(soc,
3420 								      rx_desc_pool,
3421 								      msdu);
3422 						rx_desc->unmapped = 1;
3423 						dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3424 					}
3425 
3426 					dp_rx_err_debug("%pK: msdu_nbuf=%pK ",
3427 							soc, msdu);
3428 
3429 					dp_rx_buffer_pool_nbuf_free(soc, msdu,
3430 							rx_desc->pool_id);
3431 					rx_bufs_used++;
3432 					dp_rx_add_to_free_desc_list(head,
3433 						tail, rx_desc);
3434 				}
3435 			}
3436 		} else {
3437 			rxdma_error_code = HAL_RXDMA_ERR_WAR;
3438 		}
3439 
3440 		/*
3441 		 * Store the current link buffer into to the local structure
3442 		 * to be used for release purpose.
3443 		 */
3444 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
3445 					     buf_info.paddr, buf_info.sw_cookie,
3446 					     buf_info.rbm);
3447 
3448 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
3449 					      &buf_info);
3450 		dp_rx_link_desc_return_by_addr(soc,
3451 					       (hal_buff_addrinfo_t)
3452 						rx_link_buf_info,
3453 						bm_action);
3454 	} while (buf_info.paddr);
3455 
3456 	DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
3457 	if (pdev)
3458 		DP_STATS_INC(pdev, err.rxdma_error, 1);
3459 
3460 	if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
3461 		dp_rx_err_err("%pK: Packet received with Decrypt error", soc);
3462 	}
3463 
3464 	return rx_bufs_used;
3465 }
3466 
3467 uint32_t
3468 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
3469 		     uint32_t mac_id, uint32_t quota)
3470 {
3471 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
3472 	hal_rxdma_desc_t rxdma_dst_ring_desc;
3473 	hal_soc_handle_t hal_soc;
3474 	void *err_dst_srng;
3475 	union dp_rx_desc_list_elem_t *head = NULL;
3476 	union dp_rx_desc_list_elem_t *tail = NULL;
3477 	struct dp_srng *dp_rxdma_srng;
3478 	struct rx_desc_pool *rx_desc_pool;
3479 	uint32_t work_done = 0;
3480 	uint32_t rx_bufs_used = 0;
3481 
3482 	if (!pdev)
3483 		return 0;
3484 
3485 	err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng;
3486 
3487 	if (!err_dst_srng) {
3488 		dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
3489 			      soc, err_dst_srng);
3490 		return 0;
3491 	}
3492 
3493 	hal_soc = soc->hal_soc;
3494 
3495 	qdf_assert(hal_soc);
3496 
3497 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
3498 		dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
3499 			      soc, err_dst_srng);
3500 		return 0;
3501 	}
3502 
3503 	while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
3504 		hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
3505 
3506 			rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
3507 						rxdma_dst_ring_desc,
3508 						&head, &tail);
3509 	}
3510 
3511 	dp_srng_access_end(int_ctx, soc, err_dst_srng);
3512 
3513 	if (rx_bufs_used) {
3514 		if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3515 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
3516 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
3517 		} else {
3518 			dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id];
3519 			rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
3520 		}
3521 
3522 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
3523 			rx_desc_pool, rx_bufs_used, &head, &tail, false);
3524 
3525 		work_done += rx_bufs_used;
3526 	}
3527 
3528 	return work_done;
3529 }
3530 
3531 #ifndef QCA_HOST_MODE_WIFI_DISABLED
3532 
3533 static inline void
3534 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
3535 			hal_rxdma_desc_t rxdma_dst_ring_desc,
3536 			union dp_rx_desc_list_elem_t **head,
3537 			union dp_rx_desc_list_elem_t **tail,
3538 			uint32_t *rx_bufs_used)
3539 {
3540 	void *rx_msdu_link_desc;
3541 	qdf_nbuf_t msdu;
3542 	qdf_nbuf_t last;
3543 	struct hal_rx_msdu_list msdu_list;
3544 	uint16_t num_msdus;
3545 	struct hal_buf_info buf_info;
3546 	uint32_t msdu_cnt, i;
3547 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
3548 	struct rx_desc_pool *rx_desc_pool;
3549 	struct dp_rx_desc *rx_desc;
3550 
3551 	msdu = 0;
3552 
3553 	last = NULL;
3554 
3555 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
3556 				     &buf_info, &msdu_cnt);
3557 
3558 	do {
3559 		rx_msdu_link_desc =
3560 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
3561 
3562 		if (!rx_msdu_link_desc) {
3563 			DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
3564 			break;
3565 		}
3566 
3567 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
3568 				     &msdu_list, &num_msdus);
3569 
3570 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
3571 			for (i = 0; i < num_msdus; i++) {
3572 				if (!dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[i])) {
3573 					dp_rx_err_info_rl("Invalid MSDU info cookie: 0x%x",
3574 							  msdu_list.sw_cookie[i]);
3575 					continue;
3576 				}
3577 
3578 				rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
3579 							soc,
3580 							msdu_list.sw_cookie[i]);
3581 				qdf_assert_always(rx_desc);
3582 				rx_desc_pool =
3583 					&soc->rx_desc_buf[rx_desc->pool_id];
3584 				msdu = rx_desc->nbuf;
3585 
3586 				/*
3587 				 * this is a unlikely scenario where the host is reaping
3588 				 * a descriptor which it already reaped just a while ago
3589 				 * but is yet to replenish it back to HW.
3590 				 */
3591 				if (qdf_unlikely(!rx_desc->in_use) ||
3592 				    qdf_unlikely(!msdu)) {
3593 					dp_rx_err_info_rl("Reaping rx_desc not in use!");
3594 					continue;
3595 				}
3596 
3597 				dp_ipa_rx_buf_smmu_mapping_lock(soc);
3598 				dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, msdu);
3599 				rx_desc->unmapped = 1;
3600 				dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3601 
3602 				dp_rx_buffer_pool_nbuf_free(soc, msdu,
3603 							    rx_desc->pool_id);
3604 				rx_bufs_used[rx_desc->pool_id]++;
3605 				dp_rx_add_to_free_desc_list(head,
3606 							    tail, rx_desc);
3607 			}
3608 		}
3609 
3610 		/*
3611 		 * Store the current link buffer into to the local structure
3612 		 * to be used for release purpose.
3613 		 */
3614 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
3615 					     buf_info.paddr, buf_info.sw_cookie,
3616 					     buf_info.rbm);
3617 
3618 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
3619 					      &buf_info);
3620 		dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
3621 					rx_link_buf_info,
3622 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
3623 	} while (buf_info.paddr);
3624 }
3625 
3626 /*
3627  *
3628  * dp_handle_wbm_internal_error() - handles wbm_internal_error case
3629  *
3630  * @soc: core DP main context
3631  * @hal_desc: hal descriptor
3632  * @buf_type: indicates if the buffer is of type link disc or msdu
3633  * Return: None
3634  *
3635  * wbm_internal_error is seen in following scenarios :
3636  *
3637  * 1.  Null pointers detected in WBM_RELEASE_RING descriptors
3638  * 2.  Null pointers detected during delinking process
3639  *
3640  * Some null pointer cases:
3641  *
3642  * a. MSDU buffer pointer is NULL
3643  * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag
3644  * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL
3645  */
3646 void
3647 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
3648 			     uint32_t buf_type)
3649 {
3650 	struct hal_buf_info buf_info = {0};
3651 	struct dp_rx_desc *rx_desc = NULL;
3652 	struct rx_desc_pool *rx_desc_pool;
3653 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = {0};
3654 	union dp_rx_desc_list_elem_t *head = NULL;
3655 	union dp_rx_desc_list_elem_t *tail = NULL;
3656 	uint8_t pool_id;
3657 	uint8_t mac_id;
3658 
3659 	hal_rx_reo_buf_paddr_get(soc->hal_soc, hal_desc, &buf_info);
3660 
3661 	if (!buf_info.paddr) {
3662 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
3663 		return;
3664 	}
3665 
3666 	/* buffer_addr_info is the first element of ring_desc */
3667 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)hal_desc,
3668 				  &buf_info);
3669 
3670 	if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
3671 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
3672 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
3673 							soc,
3674 							buf_info.sw_cookie);
3675 
3676 		if (rx_desc && rx_desc->nbuf) {
3677 			rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
3678 			dp_ipa_rx_buf_smmu_mapping_lock(soc);
3679 			dp_rx_nbuf_unmap_pool(soc, rx_desc_pool,
3680 					      rx_desc->nbuf);
3681 			rx_desc->unmapped = 1;
3682 			dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3683 
3684 			dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
3685 						    rx_desc->pool_id);
3686 			dp_rx_add_to_free_desc_list(&head,
3687 						    &tail,
3688 						    rx_desc);
3689 
3690 			rx_bufs_reaped[rx_desc->pool_id]++;
3691 		}
3692 	} else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
3693 		pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(buf_info.sw_cookie);
3694 
3695 		dp_wbm_int_err_mpdu_pop(soc, pool_id, hal_desc,
3696 					&head, &tail, rx_bufs_reaped);
3697 	}
3698 
3699 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
3700 		struct rx_desc_pool *rx_desc_pool;
3701 		struct dp_srng *dp_rxdma_srng;
3702 
3703 		if (!rx_bufs_reaped[mac_id])
3704 			continue;
3705 
3706 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
3707 		dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
3708 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
3709 
3710 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
3711 					rx_desc_pool,
3712 					rx_bufs_reaped[mac_id],
3713 					&head, &tail, false);
3714 	}
3715 }
3716 
3717 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
3718