xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_err.c (revision cbe81707988efe2ae91b3ee68cb9464251d5e597)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #include "dp_types.h"
22 #include "dp_rx.h"
23 #include "dp_tx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 #include "hal_api.h"
27 #include "qdf_trace.h"
28 #include "qdf_nbuf.h"
29 #include "dp_rx_defrag.h"
30 #include "dp_ipa.h"
31 #ifdef WIFI_MONITOR_SUPPORT
32 #include "dp_htt.h"
33 #include <dp_mon.h>
34 #endif
35 #ifdef FEATURE_WDS
36 #include "dp_txrx_wds.h"
37 #endif
38 #include <enet.h>	/* LLC_SNAP_HDR_LEN */
39 #include "qdf_net_types.h"
40 #include "dp_rx_buffer_pool.h"
41 
42 #define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params)
43 #define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params)
44 #define dp_rx_err_info(params...) \
45 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
46 #define dp_rx_err_info_rl(params...) \
47 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
48 #define dp_rx_err_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_ERROR, params)
49 
50 #ifndef QCA_HOST_MODE_WIFI_DISABLED
51 
52 
53 /* Max regular Rx packet routing error */
54 #define DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD 20
55 #define DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT 10
56 #define DP_RX_ERR_ROUTE_TIMEOUT_US (5 * 1000 * 1000) /* micro seconds */
57 
58 #ifdef FEATURE_MEC
59 bool dp_rx_mcast_echo_check(struct dp_soc *soc,
60 			    struct dp_txrx_peer *txrx_peer,
61 			    uint8_t *rx_tlv_hdr,
62 			    qdf_nbuf_t nbuf)
63 {
64 	struct dp_vdev *vdev = txrx_peer->vdev;
65 	struct dp_pdev *pdev = vdev->pdev;
66 	struct dp_mec_entry *mecentry = NULL;
67 	struct dp_ast_entry *ase = NULL;
68 	uint16_t sa_idx = 0;
69 	uint8_t *data;
70 	/*
71 	 * Multicast Echo Check is required only if vdev is STA and
72 	 * received pkt is a multicast/broadcast pkt. otherwise
73 	 * skip the MEC check.
74 	 */
75 	if (vdev->opmode != wlan_op_mode_sta)
76 		return false;
77 	if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
78 		return false;
79 
80 	data = qdf_nbuf_data(nbuf);
81 
82 	/*
83 	 * if the received pkts src mac addr matches with vdev
84 	 * mac address then drop the pkt as it is looped back
85 	 */
86 	if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
87 			  vdev->mac_addr.raw,
88 			  QDF_MAC_ADDR_SIZE)))
89 		return true;
90 
91 	/*
92 	 * In case of qwrap isolation mode, donot drop loopback packets.
93 	 * In isolation mode, all packets from the wired stations need to go
94 	 * to rootap and loop back to reach the wireless stations and
95 	 * vice-versa.
96 	 */
97 	if (qdf_unlikely(vdev->isolation_vdev))
98 		return false;
99 
100 	/*
101 	 * if the received pkts src mac addr matches with the
102 	 * wired PCs MAC addr which is behind the STA or with
103 	 * wireless STAs MAC addr which are behind the Repeater,
104 	 * then drop the pkt as it is looped back
105 	 */
106 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
107 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
108 
109 		if ((sa_idx < 0) ||
110 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
111 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
112 				  "invalid sa_idx: %d", sa_idx);
113 			qdf_assert_always(0);
114 		}
115 
116 		qdf_spin_lock_bh(&soc->ast_lock);
117 		ase = soc->ast_table[sa_idx];
118 
119 		/*
120 		 * this check was not needed since MEC is not dependent on AST,
121 		 * but if we dont have this check SON has some issues in
122 		 * dual backhaul scenario. in APS SON mode, client connected
123 		 * to RE 2G and sends multicast packets. the RE sends it to CAP
124 		 * over 5G backhaul. the CAP loopback it on 2G to RE.
125 		 * On receiving in 2G STA vap, we assume that client has roamed
126 		 * and kickout the client.
127 		 */
128 		if (ase && (ase->peer_id != txrx_peer->peer_id)) {
129 			qdf_spin_unlock_bh(&soc->ast_lock);
130 			goto drop;
131 		}
132 
133 		qdf_spin_unlock_bh(&soc->ast_lock);
134 	}
135 
136 	qdf_spin_lock_bh(&soc->mec_lock);
137 
138 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
139 						   &data[QDF_MAC_ADDR_SIZE]);
140 	if (!mecentry) {
141 		qdf_spin_unlock_bh(&soc->mec_lock);
142 		return false;
143 	}
144 
145 	qdf_spin_unlock_bh(&soc->mec_lock);
146 
147 drop:
148 	dp_rx_err_info("%pK: received pkt with same src mac " QDF_MAC_ADDR_FMT,
149 		       soc, QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]));
150 
151 	return true;
152 }
153 #endif
154 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
155 
156 void dp_rx_link_desc_refill_duplicate_check(
157 				struct dp_soc *soc,
158 				struct hal_buf_info *buf_info,
159 				hal_buff_addrinfo_t ring_buf_info)
160 {
161 	struct hal_buf_info current_link_desc_buf_info = { 0 };
162 
163 	/* do duplicate link desc address check */
164 	hal_rx_buffer_addr_info_get_paddr(ring_buf_info,
165 					  &current_link_desc_buf_info);
166 
167 	/*
168 	 * TODO - Check if the hal soc api call can be removed
169 	 * since the cookie is just used for print.
170 	 * buffer_addr_info is the first element of ring_desc
171 	 */
172 	hal_rx_buf_cookie_rbm_get(soc->hal_soc,
173 				  (uint32_t *)ring_buf_info,
174 				  &current_link_desc_buf_info);
175 
176 	if (qdf_unlikely(current_link_desc_buf_info.paddr ==
177 			 buf_info->paddr)) {
178 		dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x",
179 			   current_link_desc_buf_info.paddr,
180 			   current_link_desc_buf_info.sw_cookie);
181 		DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1);
182 	}
183 	*buf_info = current_link_desc_buf_info;
184 }
185 
186 QDF_STATUS
187 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
188 			       hal_buff_addrinfo_t link_desc_addr,
189 			       uint8_t bm_action)
190 {
191 	struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
192 	hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
193 	hal_soc_handle_t hal_soc = soc->hal_soc;
194 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
195 	void *src_srng_desc;
196 
197 	if (!wbm_rel_srng) {
198 		dp_rx_err_err("%pK: WBM RELEASE RING not initialized", soc);
199 		return status;
200 	}
201 
202 	/* do duplicate link desc address check */
203 	dp_rx_link_desc_refill_duplicate_check(
204 				soc,
205 				&soc->last_op_info.wbm_rel_link_desc,
206 				link_desc_addr);
207 
208 	if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
209 
210 		/* TODO */
211 		/*
212 		 * Need API to convert from hal_ring pointer to
213 		 * Ring Type / Ring Id combo
214 		 */
215 		dp_rx_err_err("%pK: HAL RING Access For WBM Release SRNG Failed - %pK",
216 			      soc, wbm_rel_srng);
217 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
218 		goto done;
219 	}
220 	src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
221 	if (qdf_likely(src_srng_desc)) {
222 		/* Return link descriptor through WBM ring (SW2WBM)*/
223 		hal_rx_msdu_link_desc_set(hal_soc,
224 				src_srng_desc, link_desc_addr, bm_action);
225 		status = QDF_STATUS_SUCCESS;
226 	} else {
227 		struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
228 
229 		DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1);
230 
231 		dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)",
232 			   srng->ring_id,
233 			   soc->stats.rx.err.hal_ring_access_full_fail);
234 		dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
235 			   *srng->u.src_ring.hp_addr,
236 			   srng->u.src_ring.reap_hp,
237 			   *srng->u.src_ring.tp_addr,
238 			   srng->u.src_ring.cached_tp);
239 		QDF_BUG(0);
240 	}
241 done:
242 	hal_srng_access_end(hal_soc, wbm_rel_srng);
243 	return status;
244 
245 }
246 
247 qdf_export_symbol(dp_rx_link_desc_return_by_addr);
248 
249 QDF_STATUS
250 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
251 		       uint8_t bm_action)
252 {
253 	void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
254 
255 	return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
256 }
257 
258 #ifndef QCA_HOST_MODE_WIFI_DISABLED
259 
260 /**
261  * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
262  *
263  * @soc: core txrx main context
264  * @ring_desc: opaque pointer to the REO error ring descriptor
265  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
266  * @mac_id: mac ID
267  * @quota: No. of units (packets) that can be serviced in one shot.
268  *
269  * This function is used to drop all MSDU in an MPDU
270  *
271  * Return: uint32_t: No. of elements processed
272  */
273 static uint32_t
274 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
275 		 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
276 		 uint8_t *mac_id,
277 		 uint32_t quota)
278 {
279 	uint32_t rx_bufs_used = 0;
280 	void *link_desc_va;
281 	struct hal_buf_info buf_info;
282 	struct dp_pdev *pdev;
283 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
284 	int i;
285 	uint8_t *rx_tlv_hdr;
286 	uint32_t tid;
287 	struct rx_desc_pool *rx_desc_pool;
288 	struct dp_rx_desc *rx_desc;
289 	/* First field in REO Dst ring Desc is buffer_addr_info */
290 	void *buf_addr_info = ring_desc;
291 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
292 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
293 
294 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &buf_info);
295 
296 	/* buffer_addr_info is the first element of ring_desc */
297 	hal_rx_buf_cookie_rbm_get(soc->hal_soc,
298 				  (uint32_t *)ring_desc,
299 				  &buf_info);
300 
301 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
302 	if (!link_desc_va) {
303 		dp_rx_err_debug("link desc va is null, soc %pk", soc);
304 		return rx_bufs_used;
305 	}
306 
307 more_msdu_link_desc:
308 	/* No UNMAP required -- this is "malloc_consistent" memory */
309 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
310 			     &mpdu_desc_info->msdu_count);
311 
312 	for (i = 0; (i < mpdu_desc_info->msdu_count); i++) {
313 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
314 						soc, msdu_list.sw_cookie[i]);
315 
316 		qdf_assert_always(rx_desc);
317 
318 		/* all buffers from a MSDU link link belong to same pdev */
319 		*mac_id = rx_desc->pool_id;
320 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
321 		if (!pdev) {
322 			dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
323 					soc, rx_desc->pool_id);
324 			return rx_bufs_used;
325 		}
326 
327 		if (!dp_rx_desc_check_magic(rx_desc)) {
328 			dp_rx_err_err("%pK: Invalid rx_desc cookie=%d",
329 				      soc, msdu_list.sw_cookie[i]);
330 			return rx_bufs_used;
331 		}
332 
333 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
334 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
335 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
336 		rx_desc->unmapped = 1;
337 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
338 
339 		rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
340 
341 		rx_bufs_used++;
342 		tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
343 						rx_desc->rx_buf_start);
344 		dp_rx_err_err("%pK: Packet received with PN error for tid :%d",
345 			      soc, tid);
346 
347 		rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
348 		if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
349 			hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
350 
351 		dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info,
352 				      rx_desc->nbuf,
353 				      QDF_TX_RX_STATUS_DROP, true);
354 		/* Just free the buffers */
355 		dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id);
356 
357 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
358 					    &pdev->free_list_tail, rx_desc);
359 	}
360 
361 	/*
362 	 * If the msdu's are spread across multiple link-descriptors,
363 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
364 	 * spread across multiple buffers).Hence, it is
365 	 * necessary to check the next link_descriptor and release
366 	 * all the msdu's that are part of it.
367 	 */
368 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
369 			link_desc_va,
370 			&next_link_desc_addr_info);
371 
372 	if (hal_rx_is_buf_addr_info_valid(
373 				&next_link_desc_addr_info)) {
374 		/* Clear the next link desc info for the current link_desc */
375 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
376 
377 		dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
378 					       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
379 		hal_rx_buffer_addr_info_get_paddr(
380 				&next_link_desc_addr_info,
381 				&buf_info);
382 		/* buffer_addr_info is the first element of ring_desc */
383 		hal_rx_buf_cookie_rbm_get(soc->hal_soc,
384 					  (uint32_t *)&next_link_desc_addr_info,
385 					  &buf_info);
386 		cur_link_desc_addr_info = next_link_desc_addr_info;
387 		buf_addr_info = &cur_link_desc_addr_info;
388 
389 		link_desc_va =
390 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
391 
392 		goto more_msdu_link_desc;
393 	}
394 	quota--;
395 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
396 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
397 	return rx_bufs_used;
398 }
399 
400 /**
401  * dp_rx_pn_error_handle() - Handles PN check errors
402  *
403  * @soc: core txrx main context
404  * @ring_desc: opaque pointer to the REO error ring descriptor
405  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
406  * @mac_id: mac ID
407  * @quota: No. of units (packets) that can be serviced in one shot.
408  *
409  * This function implements PN error handling
410  * If the peer is configured to ignore the PN check errors
411  * or if DP feels, that this frame is still OK, the frame can be
412  * re-injected back to REO to use some of the other features
413  * of REO e.g. duplicate detection/routing to other cores
414  *
415  * Return: uint32_t: No. of elements processed
416  */
417 static uint32_t
418 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
419 		      struct hal_rx_mpdu_desc_info *mpdu_desc_info,
420 		      uint8_t *mac_id,
421 		      uint32_t quota)
422 {
423 	uint16_t peer_id;
424 	uint32_t rx_bufs_used = 0;
425 	struct dp_txrx_peer *txrx_peer;
426 	bool peer_pn_policy = false;
427 	dp_txrx_ref_handle txrx_ref_handle = NULL;
428 
429 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
430 					       mpdu_desc_info->peer_meta_data);
431 
432 
433 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
434 						   &txrx_ref_handle,
435 						   DP_MOD_ID_RX_ERR);
436 
437 	if (qdf_likely(txrx_peer)) {
438 		/*
439 		 * TODO: Check for peer specific policies & set peer_pn_policy
440 		 */
441 		dp_err_rl("discard rx due to PN error for peer  %pK",
442 			  txrx_peer);
443 
444 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
445 	}
446 	dp_rx_err_err("%pK: Packet received with PN error", soc);
447 
448 	/* No peer PN policy -- definitely drop */
449 	if (!peer_pn_policy)
450 		rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
451 						mpdu_desc_info,
452 						mac_id, quota);
453 
454 	return rx_bufs_used;
455 }
456 
457 #ifdef DP_RX_DELIVER_ALL_OOR_FRAMES
458 /**
459  * dp_rx_deliver_oor_frame() - deliver OOR frames to stack
460  * @soc: Datapath soc handler
461  * @txrx_peer: pointer to DP peer
462  * @nbuf: pointer to the skb of RX frame
463  * @frame_mask: the mask for special frame needed
464  * @rx_tlv_hdr: start of rx tlv header
465  *
466  * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
467  * single nbuf is expected.
468  *
469  * return: true - nbuf has been delivered to stack, false - not.
470  */
471 static bool
472 dp_rx_deliver_oor_frame(struct dp_soc *soc,
473 			struct dp_txrx_peer *txrx_peer,
474 			qdf_nbuf_t nbuf, uint32_t frame_mask,
475 			uint8_t *rx_tlv_hdr)
476 {
477 	uint32_t l2_hdr_offset = 0;
478 	uint16_t msdu_len = 0;
479 	uint32_t skip_len;
480 
481 	l2_hdr_offset =
482 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
483 
484 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
485 		skip_len = l2_hdr_offset;
486 	} else {
487 		msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
488 		skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
489 		qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
490 	}
491 
492 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
493 	dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
494 	qdf_nbuf_pull_head(nbuf, skip_len);
495 	qdf_nbuf_set_exc_frame(nbuf, 1);
496 
497 	dp_info_rl("OOR frame, mpdu sn 0x%x",
498 		   hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
499 	dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, nbuf, NULL);
500 	return true;
501 }
502 
503 #else
504 static bool
505 dp_rx_deliver_oor_frame(struct dp_soc *soc,
506 			struct dp_txrx_peer *txrx_peer,
507 			qdf_nbuf_t nbuf, uint32_t frame_mask,
508 			uint8_t *rx_tlv_hdr)
509 {
510 	return dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask,
511 					   rx_tlv_hdr);
512 }
513 #endif
514 
515 /**
516  * dp_rx_oor_handle() - Handles the msdu which is OOR error
517  *
518  * @soc: core txrx main context
519  * @nbuf: pointer to msdu skb
520  * @peer_id: dp peer ID
521  * @rx_tlv_hdr: start of rx tlv header
522  *
523  * This function process the msdu delivered from REO2TCL
524  * ring with error type OOR
525  *
526  * Return: None
527  */
528 static void
529 dp_rx_oor_handle(struct dp_soc *soc,
530 		 qdf_nbuf_t nbuf,
531 		 uint16_t peer_id,
532 		 uint8_t *rx_tlv_hdr)
533 {
534 	uint32_t frame_mask = wlan_cfg_get_special_frame_cfg(soc->wlan_cfg_ctx);
535 
536 	struct dp_txrx_peer *txrx_peer = NULL;
537 	dp_txrx_ref_handle txrx_ref_handle = NULL;
538 
539 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
540 						   &txrx_ref_handle,
541 						   DP_MOD_ID_RX_ERR);
542 	if (!txrx_peer) {
543 		dp_info_rl("peer not found");
544 		goto free_nbuf;
545 	}
546 
547 	if (dp_rx_deliver_oor_frame(soc, txrx_peer, nbuf, frame_mask,
548 				    rx_tlv_hdr)) {
549 		DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1);
550 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
551 		return;
552 	}
553 
554 free_nbuf:
555 	if (txrx_peer)
556 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
557 
558 	DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1);
559 	dp_rx_nbuf_free(nbuf);
560 }
561 
562 /**
563  * dp_rx_err_nbuf_pn_check() - Check if the PN number of this current packet
564  *				is a monotonous increment of packet number
565  *				from the previous successfully re-ordered
566  *				frame.
567  * @soc: Datapath SOC handle
568  * @ring_desc: REO ring descriptor
569  * @nbuf: Current packet
570  *
571  * Return: QDF_STATUS_SUCCESS, if the pn check passes, else QDF_STATUS_E_FAILURE
572  */
573 static inline QDF_STATUS
574 dp_rx_err_nbuf_pn_check(struct dp_soc *soc, hal_ring_desc_t ring_desc,
575 			qdf_nbuf_t nbuf)
576 {
577 	uint64_t prev_pn, curr_pn[2];
578 
579 	if (!hal_rx_encryption_info_valid(soc->hal_soc, qdf_nbuf_data(nbuf)))
580 		return QDF_STATUS_SUCCESS;
581 
582 	hal_rx_reo_prev_pn_get(soc->hal_soc, ring_desc, &prev_pn);
583 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), curr_pn);
584 
585 	if (curr_pn[0] > prev_pn)
586 		return QDF_STATUS_SUCCESS;
587 
588 	return QDF_STATUS_E_FAILURE;
589 }
590 
591 #ifdef WLAN_SKIP_BAR_UPDATE
592 static
593 void dp_rx_err_handle_bar(struct dp_soc *soc,
594 			  struct dp_peer *peer,
595 			  qdf_nbuf_t nbuf)
596 {
597 	dp_info_rl("BAR update to H.W is skipped");
598 	DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
599 }
600 #else
601 static
602 void dp_rx_err_handle_bar(struct dp_soc *soc,
603 			  struct dp_peer *peer,
604 			  qdf_nbuf_t nbuf)
605 {
606 	uint8_t *rx_tlv_hdr;
607 	unsigned char type, subtype;
608 	uint16_t start_seq_num;
609 	uint32_t tid;
610 	QDF_STATUS status;
611 	struct ieee80211_frame_bar *bar;
612 
613 	/*
614 	 * 1. Is this a BAR frame. If not Discard it.
615 	 * 2. If it is, get the peer id, tid, ssn
616 	 * 2a Do a tid update
617 	 */
618 
619 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
620 	bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + soc->rx_pkt_tlv_size);
621 
622 	type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
623 	subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
624 
625 	if (!(type == IEEE80211_FC0_TYPE_CTL &&
626 	      subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
627 		dp_err_rl("Not a BAR frame!");
628 		return;
629 	}
630 
631 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
632 	qdf_assert_always(tid < DP_MAX_TIDS);
633 
634 	start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
635 
636 	dp_info_rl("tid %u window_size %u start_seq_num %u",
637 		   tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
638 
639 	status = dp_rx_tid_update_wifi3(peer, tid,
640 					peer->rx_tid[tid].ba_win_size,
641 					start_seq_num,
642 					true);
643 	if (status != QDF_STATUS_SUCCESS) {
644 		dp_err_rl("failed to handle bar frame update rx tid");
645 		DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
646 	} else {
647 		DP_STATS_INC(soc, rx.err.ssn_update_count, 1);
648 	}
649 }
650 #endif
651 
652 /**
653  * _dp_rx_bar_frame_handle(): Core of the BAR frame handling
654  * @soc: Datapath SoC handle
655  * @nbuf: packet being processed
656  * @mpdu_desc_info: mpdu desc info for the current packet
657  * @tid: tid on which the packet arrived
658  * @err_status: Flag to indicate if REO encountered an error while routing this
659  *		frame
660  * @error_code: REO error code
661  *
662  * Return: None
663  */
664 static void
665 _dp_rx_bar_frame_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
666 			struct hal_rx_mpdu_desc_info *mpdu_desc_info,
667 			uint32_t tid, uint8_t err_status, uint32_t error_code)
668 {
669 	uint16_t peer_id;
670 	struct dp_peer *peer;
671 
672 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
673 					       mpdu_desc_info->peer_meta_data);
674 	peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
675 	if (!peer)
676 		return;
677 
678 	dp_info_rl("BAR frame: "
679 		" peer_id = %d"
680 		" tid = %u"
681 		" SSN = %d"
682 		" error status = %d",
683 		peer->peer_id,
684 		tid,
685 		mpdu_desc_info->mpdu_seq,
686 		err_status);
687 
688 	if (err_status == HAL_REO_ERROR_DETECTED) {
689 		switch (error_code) {
690 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
691 		case HAL_REO_ERR_BAR_FRAME_OOR:
692 			dp_rx_err_handle_bar(soc, peer, nbuf);
693 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
694 			break;
695 		default:
696 			DP_STATS_INC(soc, rx.bar_frame, 1);
697 		}
698 	}
699 
700 	dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
701 }
702 
703 /**
704  * dp_rx_bar_frame_handle() - Function to handle err BAR frames
705  * @soc: core DP main context
706  * @ring_desc: Hal ring desc
707  * @rx_desc: dp rx desc
708  * @mpdu_desc_info: mpdu desc info
709  * @err_status: error status
710  * @err_code: error code
711  *
712  * Handle the error BAR frames received. Ensure the SOC level
713  * stats are updated based on the REO error code. The BAR frames
714  * are further processed by updating the Rx tids with the start
715  * sequence number (SSN) and BA window size. Desc is returned
716  * to the free desc list
717  *
718  * Return: none
719  */
720 static void
721 dp_rx_bar_frame_handle(struct dp_soc *soc,
722 		       hal_ring_desc_t ring_desc,
723 		       struct dp_rx_desc *rx_desc,
724 		       struct hal_rx_mpdu_desc_info *mpdu_desc_info,
725 		       uint8_t err_status,
726 		       uint32_t err_code)
727 {
728 	qdf_nbuf_t nbuf;
729 	struct dp_pdev *pdev;
730 	struct rx_desc_pool *rx_desc_pool;
731 	uint8_t *rx_tlv_hdr;
732 	uint32_t tid;
733 
734 	nbuf = rx_desc->nbuf;
735 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
736 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
737 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
738 	rx_desc->unmapped = 1;
739 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
740 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
741 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
742 					rx_tlv_hdr);
743 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
744 
745 	if (!pdev) {
746 		dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
747 				soc, rx_desc->pool_id);
748 		return;
749 	}
750 
751 	_dp_rx_bar_frame_handle(soc, nbuf, mpdu_desc_info, tid, err_status,
752 				err_code);
753 	dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, nbuf,
754 			      QDF_TX_RX_STATUS_DROP, true);
755 	dp_rx_link_desc_return(soc, ring_desc,
756 			       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
757 	dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
758 				    rx_desc->pool_id);
759 	dp_rx_add_to_free_desc_list(&pdev->free_list_head,
760 				    &pdev->free_list_tail,
761 				    rx_desc);
762 }
763 
764 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
765 
766 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
767 		       uint16_t peer_id, uint8_t tid)
768 {
769 	struct dp_peer *peer = NULL;
770 	struct dp_rx_tid *rx_tid = NULL;
771 	struct dp_txrx_peer *txrx_peer;
772 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP;
773 
774 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
775 	if (!peer) {
776 		dp_rx_err_info_rl("%pK: peer not found", soc);
777 		goto free_nbuf;
778 	}
779 
780 	txrx_peer = dp_get_txrx_peer(peer);
781 	if (!txrx_peer) {
782 		dp_rx_err_info_rl("%pK: txrx_peer not found", soc);
783 		goto free_nbuf;
784 	}
785 
786 	if (tid >= DP_MAX_TIDS) {
787 		dp_info_rl("invalid tid");
788 		goto nbuf_deliver;
789 	}
790 
791 	rx_tid = &peer->rx_tid[tid];
792 	qdf_spin_lock_bh(&rx_tid->tid_lock);
793 
794 	/* only if BA session is active, allow send Delba */
795 	if (rx_tid->ba_status != DP_RX_BA_ACTIVE) {
796 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
797 		goto nbuf_deliver;
798 	}
799 
800 	if (!rx_tid->delba_tx_status) {
801 		rx_tid->delba_tx_retry++;
802 		rx_tid->delba_tx_status = 1;
803 		rx_tid->delba_rcode =
804 			IEEE80211_REASON_QOS_SETUP_REQUIRED;
805 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
806 		if (soc->cdp_soc.ol_ops->send_delba) {
807 			DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent,
808 				     1);
809 			soc->cdp_soc.ol_ops->send_delba(
810 					peer->vdev->pdev->soc->ctrl_psoc,
811 					peer->vdev->vdev_id,
812 					peer->mac_addr.raw,
813 					tid,
814 					rx_tid->delba_rcode,
815 					CDP_DELBA_2K_JUMP);
816 		}
817 	} else {
818 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
819 	}
820 
821 nbuf_deliver:
822 	if (dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask,
823 					rx_tlv_hdr)) {
824 		DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1);
825 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
826 		return;
827 	}
828 
829 free_nbuf:
830 	if (peer)
831 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
832 	DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1);
833 	dp_rx_nbuf_free(nbuf);
834 }
835 
836 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
837     defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_KIWI)
838 bool
839 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
840 					      uint8_t pool_id,
841 					      uint8_t *rx_tlv_hdr,
842 					      qdf_nbuf_t nbuf)
843 {
844 	struct dp_peer *peer = NULL;
845 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
846 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
847 	struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
848 
849 	if (!pdev) {
850 		dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
851 				soc, pool_id);
852 		return false;
853 	}
854 	/*
855 	 * WAR- In certain types of packets if peer_id is not correct then
856 	 * driver may not be able find. Try finding peer by addr_2 of
857 	 * received MPDU
858 	 */
859 	if (wh)
860 		peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0,
861 					      DP_VDEV_ALL, DP_MOD_ID_RX_ERR);
862 	if (peer) {
863 		dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
864 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
865 				     QDF_TRACE_LEVEL_DEBUG);
866 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
867 				 1, qdf_nbuf_len(nbuf));
868 		dp_rx_nbuf_free(nbuf);
869 
870 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
871 		return true;
872 	}
873 	return false;
874 }
875 #else
876 bool
877 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
878 					      uint8_t pool_id,
879 					      uint8_t *rx_tlv_hdr,
880 					      qdf_nbuf_t nbuf)
881 {
882 	return false;
883 }
884 #endif
885 
886 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
887 {
888 	if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) {
889 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
890 				 1, pkt_len);
891 		return true;
892 	} else {
893 		return false;
894 	}
895 }
896 
897 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
898 void
899 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
900 			    struct dp_vdev *vdev,
901 			    struct dp_txrx_peer *txrx_peer,
902 			    qdf_nbuf_t nbuf,
903 			    qdf_nbuf_t tail,
904 			    bool is_eapol)
905 {
906 	if (is_eapol && soc->eapol_over_control_port)
907 		dp_rx_eapol_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
908 	else
909 		dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
910 }
911 #else
912 void
913 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
914 			    struct dp_vdev *vdev,
915 			    struct dp_txrx_peer *txrx_peer,
916 			    qdf_nbuf_t nbuf,
917 			    qdf_nbuf_t tail,
918 			    bool is_eapol)
919 {
920 	dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
921 }
922 #endif
923 
924 #ifdef WLAN_FEATURE_11BE_MLO
925 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
926 {
927 	return ((qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
928 			     QDF_MAC_ADDR_SIZE) == 0) ||
929 		(qdf_mem_cmp(eh->ether_dhost, &vdev->mld_mac_addr.raw[0],
930 			     QDF_MAC_ADDR_SIZE) == 0));
931 }
932 
933 #else
934 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
935 {
936 	return (qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
937 			    QDF_MAC_ADDR_SIZE) == 0);
938 }
939 #endif
940 
941 #ifndef QCA_HOST_MODE_WIFI_DISABLED
942 
943 bool
944 dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr)
945 {
946 	struct dp_soc *soc = vdev->pdev->soc;
947 
948 	if (!vdev->drop_3addr_mcast)
949 		return false;
950 
951 	if (vdev->opmode != wlan_op_mode_sta)
952 		return false;
953 
954 	if (hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
955 		return true;
956 
957 	return false;
958 }
959 
960 /**
961  * dp_rx_err_is_pn_check_needed() - Check if the packet number check is needed
962  *				for this frame received in REO error ring.
963  * @soc: Datapath SOC handle
964  * @error: REO error detected or not
965  * @error_code: Error code in case of REO error
966  *
967  * Return: true if pn check if needed in software,
968  *	false, if pn check if not needed.
969  */
970 static inline bool
971 dp_rx_err_is_pn_check_needed(struct dp_soc *soc, uint8_t error,
972 			     uint32_t error_code)
973 {
974 	return (soc->features.pn_in_reo_dest &&
975 		(error == HAL_REO_ERROR_DETECTED &&
976 		 (hal_rx_reo_is_2k_jump(error_code) ||
977 		  hal_rx_reo_is_oor_error(error_code) ||
978 		  hal_rx_reo_is_bar_oor_2k_jump(error_code))));
979 }
980 
981 #ifdef DP_WAR_INVALID_FIRST_MSDU_FLAG
982 static inline void
983 dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
984 				  struct hal_rx_mpdu_desc_info *mpdu_desc_info,
985 				  bool first_msdu_in_mpdu_processed)
986 {
987 	if (first_msdu_in_mpdu_processed) {
988 		/*
989 		 * This is the 2nd indication of first_msdu in the same mpdu.
990 		 * Skip re-parsing the mdpu_desc_info and use the cached one,
991 		 * since this msdu is most probably from the current mpdu
992 		 * which is being processed
993 		 */
994 	} else {
995 		hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc,
996 						   qdf_nbuf_data(nbuf),
997 						   mpdu_desc_info);
998 	}
999 }
1000 #else
1001 static inline void
1002 dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
1003 				  struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1004 				  bool first_msdu_in_mpdu_processed)
1005 {
1006 	hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc, qdf_nbuf_data(nbuf),
1007 					   mpdu_desc_info);
1008 }
1009 #endif
1010 
1011 /**
1012  * dp_rx_reo_err_entry_process() - Handles for REO error entry processing
1013  *
1014  * @soc: core txrx main context
1015  * @ring_desc: opaque pointer to the REO error ring descriptor
1016  * @mpdu_desc_info: pointer to mpdu level description info
1017  * @link_desc_va: pointer to msdu_link_desc virtual address
1018  * @err_code: reo error code fetched from ring entry
1019  *
1020  * Function to handle msdus fetched from msdu link desc, currently
1021  * support REO error NULL queue, 2K jump, OOR.
1022  *
1023  * Return: msdu count processed
1024  */
1025 static uint32_t
1026 dp_rx_reo_err_entry_process(struct dp_soc *soc,
1027 			    void *ring_desc,
1028 			    struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1029 			    void *link_desc_va,
1030 			    enum hal_reo_error_code err_code)
1031 {
1032 	uint32_t rx_bufs_used = 0;
1033 	struct dp_pdev *pdev;
1034 	int i;
1035 	uint8_t *rx_tlv_hdr_first;
1036 	uint8_t *rx_tlv_hdr_last;
1037 	uint32_t tid = DP_MAX_TIDS;
1038 	uint16_t peer_id;
1039 	struct dp_rx_desc *rx_desc;
1040 	struct rx_desc_pool *rx_desc_pool;
1041 	qdf_nbuf_t nbuf;
1042 	qdf_nbuf_t next_nbuf;
1043 	struct hal_buf_info buf_info;
1044 	struct hal_rx_msdu_list msdu_list;
1045 	uint16_t num_msdus;
1046 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
1047 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
1048 	/* First field in REO Dst ring Desc is buffer_addr_info */
1049 	void *buf_addr_info = ring_desc;
1050 	qdf_nbuf_t head_nbuf = NULL;
1051 	qdf_nbuf_t tail_nbuf = NULL;
1052 	uint16_t msdu_processed = 0;
1053 	QDF_STATUS status;
1054 	bool ret, is_pn_check_needed;
1055 	uint8_t rx_desc_pool_id;
1056 	struct dp_txrx_peer *txrx_peer = NULL;
1057 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1058 	hal_ring_handle_t hal_ring_hdl = soc->reo_exception_ring.hal_srng;
1059 	bool first_msdu_in_mpdu_processed = false;
1060 	bool msdu_dropped = false;
1061 	uint8_t link_id = 0;
1062 
1063 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1064 					mpdu_desc_info->peer_meta_data);
1065 	is_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
1066 							  HAL_REO_ERROR_DETECTED,
1067 							  err_code);
1068 more_msdu_link_desc:
1069 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
1070 			     &num_msdus);
1071 	for (i = 0; i < num_msdus; i++) {
1072 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
1073 						soc,
1074 						msdu_list.sw_cookie[i]);
1075 
1076 		qdf_assert_always(rx_desc);
1077 		nbuf = rx_desc->nbuf;
1078 
1079 		/*
1080 		 * this is a unlikely scenario where the host is reaping
1081 		 * a descriptor which it already reaped just a while ago
1082 		 * but is yet to replenish it back to HW.
1083 		 * In this case host will dump the last 128 descriptors
1084 		 * including the software descriptor rx_desc and assert.
1085 		 */
1086 		if (qdf_unlikely(!rx_desc->in_use) ||
1087 		    qdf_unlikely(!nbuf)) {
1088 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
1089 			dp_info_rl("Reaping rx_desc not in use!");
1090 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1091 						   ring_desc, rx_desc);
1092 			/* ignore duplicate RX desc and continue to process */
1093 			/* Pop out the descriptor */
1094 			msdu_dropped = true;
1095 			continue;
1096 		}
1097 
1098 		ret = dp_rx_desc_paddr_sanity_check(rx_desc,
1099 						    msdu_list.paddr[i]);
1100 		if (!ret) {
1101 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1102 			rx_desc->in_err_state = 1;
1103 			msdu_dropped = true;
1104 			continue;
1105 		}
1106 
1107 		rx_desc_pool_id = rx_desc->pool_id;
1108 		/* all buffers from a MSDU link belong to same pdev */
1109 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc_pool_id);
1110 
1111 		rx_desc_pool = &soc->rx_desc_buf[rx_desc_pool_id];
1112 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
1113 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
1114 		rx_desc->unmapped = 1;
1115 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
1116 
1117 		QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len;
1118 		rx_bufs_used++;
1119 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
1120 					    &pdev->free_list_tail, rx_desc);
1121 
1122 		DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf);
1123 
1124 		if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags &
1125 				 HAL_MSDU_F_MSDU_CONTINUATION)) {
1126 			qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
1127 			continue;
1128 		}
1129 
1130 		if (dp_rx_buffer_pool_refill(soc, head_nbuf,
1131 					     rx_desc_pool_id)) {
1132 			/* MSDU queued back to the pool */
1133 			msdu_dropped = true;
1134 			head_nbuf = NULL;
1135 			goto process_next_msdu;
1136 		}
1137 
1138 		if (is_pn_check_needed) {
1139 			if (msdu_list.msdu_info[i].msdu_flags &
1140 			    HAL_MSDU_F_FIRST_MSDU_IN_MPDU) {
1141 				dp_rx_err_populate_mpdu_desc_info(soc, nbuf,
1142 						mpdu_desc_info,
1143 						first_msdu_in_mpdu_processed);
1144 				first_msdu_in_mpdu_processed = true;
1145 			} else {
1146 				if (!first_msdu_in_mpdu_processed) {
1147 					/*
1148 					 * If no msdu in this mpdu was dropped
1149 					 * due to failed sanity checks, then
1150 					 * its not expected to hit this
1151 					 * condition. Hence we assert here.
1152 					 */
1153 					if (!msdu_dropped)
1154 						qdf_assert_always(0);
1155 
1156 					/*
1157 					 * We do not have valid mpdu_desc_info
1158 					 * to process this nbuf, hence drop it.
1159 					 * TODO - Increment stats
1160 					 */
1161 					goto process_next_msdu;
1162 				}
1163 				/*
1164 				 * DO NOTHING -
1165 				 * Continue using the same mpdu_desc_info
1166 				 * details populated from the first msdu in
1167 				 * the mpdu.
1168 				 */
1169 			}
1170 
1171 			status = dp_rx_err_nbuf_pn_check(soc, ring_desc, nbuf);
1172 			if (QDF_IS_STATUS_ERROR(status)) {
1173 				DP_STATS_INC(soc, rx.err.pn_in_dest_check_fail,
1174 					     1);
1175 				goto process_next_msdu;
1176 			}
1177 
1178 			peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1179 					mpdu_desc_info->peer_meta_data);
1180 
1181 			if (mpdu_desc_info->bar_frame)
1182 				_dp_rx_bar_frame_handle(soc, nbuf,
1183 							mpdu_desc_info, tid,
1184 							HAL_REO_ERROR_DETECTED,
1185 							err_code);
1186 		}
1187 
1188 		rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf);
1189 		rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf);
1190 
1191 		if (qdf_unlikely(head_nbuf != tail_nbuf)) {
1192 			/*
1193 			 * For SG case, only the length of last skb is valid
1194 			 * as HW only populate the msdu_len for last msdu
1195 			 * in rx link descriptor, use the length from
1196 			 * last skb to overwrite the head skb for further
1197 			 * SG processing.
1198 			 */
1199 			QDF_NBUF_CB_RX_PKT_LEN(head_nbuf) =
1200 					QDF_NBUF_CB_RX_PKT_LEN(tail_nbuf);
1201 			nbuf = dp_rx_sg_create(soc, head_nbuf);
1202 			qdf_nbuf_set_is_frag(nbuf, 1);
1203 			DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1);
1204 		}
1205 		head_nbuf = NULL;
1206 
1207 		switch (err_code) {
1208 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
1209 		case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
1210 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
1211 			/*
1212 			 * only first msdu, mpdu start description tlv valid?
1213 			 * and use it for following msdu.
1214 			 */
1215 			if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1216 							   rx_tlv_hdr_last))
1217 				tid = hal_rx_mpdu_start_tid_get(
1218 							soc->hal_soc,
1219 							rx_tlv_hdr_first);
1220 
1221 			dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last,
1222 					  peer_id, tid);
1223 			break;
1224 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
1225 		case HAL_REO_ERR_BAR_FRAME_OOR:
1226 			dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last);
1227 			break;
1228 		case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
1229 			txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(
1230 							soc, peer_id,
1231 							&txrx_ref_handle,
1232 							DP_MOD_ID_RX_ERR);
1233 			if (!txrx_peer)
1234 				dp_info_rl("txrx_peer is null peer_id %u",
1235 					   peer_id);
1236 			soc->arch_ops.dp_rx_null_q_desc_handle(soc, nbuf,
1237 							       rx_tlv_hdr_last,
1238 							       rx_desc_pool_id,
1239 							       txrx_peer,
1240 							       TRUE,
1241 							       link_id);
1242 			if (txrx_peer)
1243 				dp_txrx_peer_unref_delete(txrx_ref_handle,
1244 							  DP_MOD_ID_RX_ERR);
1245 			break;
1246 		default:
1247 			dp_err_rl("Non-support error code %d", err_code);
1248 			dp_rx_nbuf_free(nbuf);
1249 		}
1250 
1251 process_next_msdu:
1252 		nbuf = head_nbuf;
1253 		while (nbuf) {
1254 			next_nbuf = qdf_nbuf_next(nbuf);
1255 			dp_rx_nbuf_free(nbuf);
1256 			nbuf = next_nbuf;
1257 		}
1258 		msdu_processed++;
1259 		head_nbuf = NULL;
1260 		tail_nbuf = NULL;
1261 	}
1262 
1263 	/*
1264 	 * If the msdu's are spread across multiple link-descriptors,
1265 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
1266 	 * spread across multiple buffers).Hence, it is
1267 	 * necessary to check the next link_descriptor and release
1268 	 * all the msdu's that are part of it.
1269 	 */
1270 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
1271 			link_desc_va,
1272 			&next_link_desc_addr_info);
1273 
1274 	if (hal_rx_is_buf_addr_info_valid(
1275 				&next_link_desc_addr_info)) {
1276 		/* Clear the next link desc info for the current link_desc */
1277 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
1278 		dp_rx_link_desc_return_by_addr(
1279 				soc,
1280 				buf_addr_info,
1281 				HAL_BM_ACTION_PUT_IN_IDLE_LIST);
1282 
1283 		hal_rx_buffer_addr_info_get_paddr(
1284 				&next_link_desc_addr_info,
1285 				&buf_info);
1286 		/* buffer_addr_info is the first element of ring_desc */
1287 		hal_rx_buf_cookie_rbm_get(soc->hal_soc,
1288 					  (uint32_t *)&next_link_desc_addr_info,
1289 					  &buf_info);
1290 		link_desc_va =
1291 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1292 		cur_link_desc_addr_info = next_link_desc_addr_info;
1293 		buf_addr_info = &cur_link_desc_addr_info;
1294 
1295 		goto more_msdu_link_desc;
1296 	}
1297 
1298 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
1299 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
1300 	if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count))
1301 		DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1);
1302 
1303 	return rx_bufs_used;
1304 }
1305 
1306 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1307 
1308 void
1309 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1310 			uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
1311 			uint8_t err_code, uint8_t mac_id, uint8_t link_id)
1312 {
1313 	uint32_t pkt_len, l2_hdr_offset;
1314 	uint16_t msdu_len;
1315 	struct dp_vdev *vdev;
1316 	qdf_ether_header_t *eh;
1317 	bool is_broadcast;
1318 
1319 	/*
1320 	 * Check if DMA completed -- msdu_done is the last bit
1321 	 * to be written
1322 	 */
1323 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1324 
1325 		dp_err_rl("MSDU DONE failure");
1326 
1327 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1328 				     QDF_TRACE_LEVEL_INFO);
1329 		qdf_assert(0);
1330 	}
1331 
1332 	l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
1333 							   rx_tlv_hdr);
1334 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1335 	pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
1336 
1337 	if (dp_rx_check_pkt_len(soc, pkt_len)) {
1338 		/* Drop & free packet */
1339 		dp_rx_nbuf_free(nbuf);
1340 		return;
1341 	}
1342 	/* Set length in nbuf */
1343 	qdf_nbuf_set_pktlen(nbuf, pkt_len);
1344 
1345 	qdf_nbuf_set_next(nbuf, NULL);
1346 
1347 	qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
1348 	qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
1349 
1350 	if (!txrx_peer) {
1351 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "txrx_peer is NULL");
1352 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1353 				qdf_nbuf_len(nbuf));
1354 		/* Trigger invalid peer handler wrapper */
1355 		dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
1356 		return;
1357 	}
1358 
1359 	vdev = txrx_peer->vdev;
1360 	if (!vdev) {
1361 		dp_rx_err_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc,
1362 				 vdev);
1363 		/* Drop & free packet */
1364 		dp_rx_nbuf_free(nbuf);
1365 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1366 		return;
1367 	}
1368 
1369 	/*
1370 	 * Advance the packet start pointer by total size of
1371 	 * pre-header TLV's
1372 	 */
1373 	dp_rx_skip_tlvs(soc, nbuf, l2_hdr_offset);
1374 
1375 	if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
1376 		uint8_t *pkt_type;
1377 
1378 		pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
1379 		if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
1380 			if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
1381 							htons(QDF_LLC_STP)) {
1382 				DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
1383 				goto process_mesh;
1384 			} else {
1385 				goto process_rx;
1386 			}
1387 		}
1388 	}
1389 	if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
1390 		goto process_mesh;
1391 
1392 	/*
1393 	 * WAPI cert AP sends rekey frames as unencrypted.
1394 	 * Thus RXDMA will report unencrypted frame error.
1395 	 * To pass WAPI cert case, SW needs to pass unencrypted
1396 	 * rekey frame to stack.
1397 	 */
1398 	if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1399 		goto process_rx;
1400 	}
1401 	/*
1402 	 * In dynamic WEP case rekey frames are not encrypted
1403 	 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
1404 	 * key install is already done
1405 	 */
1406 	if ((vdev->sec_type == cdp_sec_type_wep104) &&
1407 	    (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
1408 		goto process_rx;
1409 
1410 process_mesh:
1411 
1412 	if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
1413 		dp_rx_nbuf_free(nbuf);
1414 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1415 		return;
1416 	}
1417 
1418 	if (vdev->mesh_vdev) {
1419 		if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
1420 				      == QDF_STATUS_SUCCESS) {
1421 			dp_rx_err_info("%pK: mesh pkt filtered", soc);
1422 			DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
1423 
1424 			dp_rx_nbuf_free(nbuf);
1425 			return;
1426 		}
1427 		dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, txrx_peer);
1428 	}
1429 process_rx:
1430 	if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1431 							rx_tlv_hdr) &&
1432 		(vdev->rx_decap_type ==
1433 				htt_cmn_pkt_type_ethernet))) {
1434 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1435 		is_broadcast = (QDF_IS_ADDR_BROADCAST
1436 				(eh->ether_dhost)) ? 1 : 0 ;
1437 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 1,
1438 					      qdf_nbuf_len(nbuf), link_id);
1439 		if (is_broadcast) {
1440 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast, 1,
1441 						      qdf_nbuf_len(nbuf),
1442 						      link_id);
1443 		}
1444 	} else {
1445 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.unicast, 1,
1446 					      qdf_nbuf_len(nbuf),
1447 					      link_id);
1448 	}
1449 
1450 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1451 		dp_rx_deliver_raw(vdev, nbuf, txrx_peer, link_id);
1452 	} else {
1453 		/* Update the protocol tag in SKB based on CCE metadata */
1454 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1455 					  EXCEPTION_DEST_RING_ID, true, true);
1456 		/* Update the flow tag in SKB based on FSE metadata */
1457 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
1458 		DP_PEER_STATS_FLAT_INC(txrx_peer, to_stack.num, 1);
1459 		qdf_nbuf_set_exc_frame(nbuf, 1);
1460 		dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
1461 					    qdf_nbuf_is_ipv4_eapol_pkt(nbuf));
1462 	}
1463 
1464 	return;
1465 }
1466 
1467 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
1468 			     uint8_t *rx_tlv_hdr,
1469 			     struct dp_txrx_peer *txrx_peer)
1470 {
1471 	struct dp_vdev *vdev = NULL;
1472 	struct dp_pdev *pdev = NULL;
1473 	struct ol_if_ops *tops = NULL;
1474 	uint16_t rx_seq, fragno;
1475 	uint8_t is_raw;
1476 	unsigned int tid;
1477 	QDF_STATUS status;
1478 	struct cdp_rx_mic_err_info mic_failure_info;
1479 
1480 	if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1481 					    rx_tlv_hdr))
1482 		return;
1483 
1484 	if (!txrx_peer) {
1485 		dp_info_rl("txrx_peer not found");
1486 		goto fail;
1487 	}
1488 
1489 	vdev = txrx_peer->vdev;
1490 	if (!vdev) {
1491 		dp_info_rl("VDEV not found");
1492 		goto fail;
1493 	}
1494 
1495 	pdev = vdev->pdev;
1496 	if (!pdev) {
1497 		dp_info_rl("PDEV not found");
1498 		goto fail;
1499 	}
1500 
1501 	is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
1502 	if (is_raw) {
1503 		fragno = dp_rx_frag_get_mpdu_frag_number(soc,
1504 							 qdf_nbuf_data(nbuf));
1505 		/* Can get only last fragment */
1506 		if (fragno) {
1507 			tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
1508 							qdf_nbuf_data(nbuf));
1509 			rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
1510 							qdf_nbuf_data(nbuf));
1511 
1512 			status = dp_rx_defrag_add_last_frag(soc, txrx_peer,
1513 							    tid, rx_seq, nbuf);
1514 			dp_info_rl("Frag pkt seq# %d frag# %d consumed "
1515 				   "status %d !", rx_seq, fragno, status);
1516 			return;
1517 		}
1518 	}
1519 
1520 	if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
1521 				  &mic_failure_info.da_mac_addr.bytes[0])) {
1522 		dp_err_rl("Failed to get da_mac_addr");
1523 		goto fail;
1524 	}
1525 
1526 	if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
1527 				  &mic_failure_info.ta_mac_addr.bytes[0])) {
1528 		dp_err_rl("Failed to get ta_mac_addr");
1529 		goto fail;
1530 	}
1531 
1532 	mic_failure_info.key_id = 0;
1533 	mic_failure_info.multicast =
1534 		IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
1535 	qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
1536 	mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
1537 	mic_failure_info.data = NULL;
1538 	mic_failure_info.vdev_id = vdev->vdev_id;
1539 
1540 	tops = pdev->soc->cdp_soc.ol_ops;
1541 	if (tops->rx_mic_error)
1542 		tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
1543 				   &mic_failure_info);
1544 
1545 fail:
1546 	dp_rx_nbuf_free(nbuf);
1547 	return;
1548 }
1549 
1550 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
1551 	defined(WLAN_MCAST_MLO)
1552 static bool dp_rx_igmp_handler(struct dp_soc *soc,
1553 			       struct dp_vdev *vdev,
1554 			       struct dp_txrx_peer *peer,
1555 			       qdf_nbuf_t nbuf,
1556 			       uint8_t link_id)
1557 {
1558 	if (soc->arch_ops.dp_rx_mcast_handler) {
1559 		if (soc->arch_ops.dp_rx_mcast_handler(soc, vdev, peer,
1560 						      nbuf, link_id))
1561 			return true;
1562 	}
1563 	return false;
1564 }
1565 #else
1566 static bool dp_rx_igmp_handler(struct dp_soc *soc,
1567 			       struct dp_vdev *vdev,
1568 			       struct dp_txrx_peer *peer,
1569 			       qdf_nbuf_t nbuf,
1570 			       uint8_t link_id)
1571 {
1572 	return false;
1573 }
1574 #endif
1575 
1576 /**
1577  * dp_rx_err_route_hdl() - Function to send EAPOL frames to stack
1578  *                            Free any other packet which comes in
1579  *                            this path.
1580  *
1581  * @soc: core DP main context
1582  * @nbuf: buffer pointer
1583  * @txrx_peer: txrx peer handle
1584  * @rx_tlv_hdr: start of rx tlv header
1585  * @err_src: rxdma/reo
1586  * @link_id: link id on which the packet is received
1587  *
1588  * This function indicates EAPOL frame received in wbm error ring to stack.
1589  * Any other frame should be dropped.
1590  *
1591  * Return: SUCCESS if delivered to stack
1592  */
1593 static void
1594 dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
1595 		    struct dp_txrx_peer *txrx_peer, uint8_t *rx_tlv_hdr,
1596 		    enum hal_rx_wbm_error_source err_src,
1597 		    uint8_t link_id)
1598 {
1599 	uint32_t pkt_len;
1600 	uint16_t msdu_len;
1601 	struct dp_vdev *vdev;
1602 	struct hal_rx_msdu_metadata msdu_metadata;
1603 	bool is_eapol;
1604 
1605 	qdf_nbuf_set_rx_chfrag_start(
1606 				nbuf,
1607 				hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1608 							       rx_tlv_hdr));
1609 	qdf_nbuf_set_rx_chfrag_end(nbuf,
1610 				   hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
1611 								 rx_tlv_hdr));
1612 	qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1613 								  rx_tlv_hdr));
1614 	qdf_nbuf_set_da_valid(nbuf,
1615 			      hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
1616 							      rx_tlv_hdr));
1617 	qdf_nbuf_set_sa_valid(nbuf,
1618 			      hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
1619 							      rx_tlv_hdr));
1620 
1621 	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
1622 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1623 	pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
1624 
1625 	if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
1626 		if (dp_rx_check_pkt_len(soc, pkt_len))
1627 			goto drop_nbuf;
1628 
1629 		/* Set length in nbuf */
1630 		qdf_nbuf_set_pktlen(
1631 			nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
1632 		qdf_assert_always(nbuf->data == rx_tlv_hdr);
1633 	}
1634 
1635 	/*
1636 	 * Check if DMA completed -- msdu_done is the last bit
1637 	 * to be written
1638 	 */
1639 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1640 		dp_err_rl("MSDU DONE failure");
1641 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1642 				     QDF_TRACE_LEVEL_INFO);
1643 		qdf_assert(0);
1644 	}
1645 
1646 	if (!txrx_peer)
1647 		goto drop_nbuf;
1648 
1649 	vdev = txrx_peer->vdev;
1650 	if (!vdev) {
1651 		dp_err_rl("Null vdev!");
1652 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1653 		goto drop_nbuf;
1654 	}
1655 
1656 	/*
1657 	 * Advance the packet start pointer by total size of
1658 	 * pre-header TLV's
1659 	 */
1660 	if (qdf_nbuf_is_frag(nbuf))
1661 		qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
1662 	else
1663 		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
1664 				   soc->rx_pkt_tlv_size));
1665 
1666 	QDF_NBUF_CB_RX_PEER_ID(nbuf) = txrx_peer->peer_id;
1667 	if (dp_rx_igmp_handler(soc, vdev, txrx_peer, nbuf, link_id))
1668 		return;
1669 
1670 	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
1671 
1672 	/*
1673 	 * Indicate EAPOL frame to stack only when vap mac address
1674 	 * matches the destination address.
1675 	 */
1676 	is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf);
1677 	if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1678 		qdf_ether_header_t *eh =
1679 			(qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1680 		if (dp_rx_err_match_dhost(eh, vdev)) {
1681 			DP_STATS_INC_PKT(vdev, rx_i.routed_eapol_pkt, 1,
1682 					 qdf_nbuf_len(nbuf));
1683 
1684 			/*
1685 			 * Update the protocol tag in SKB based on
1686 			 * CCE metadata.
1687 			 */
1688 			dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1689 						  EXCEPTION_DEST_RING_ID,
1690 						  true, true);
1691 			/* Update the flow tag in SKB based on FSE metadata */
1692 			dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
1693 					      true);
1694 			DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
1695 						  qdf_nbuf_len(nbuf),
1696 						  vdev->pdev->enhanced_stats_en);
1697 			qdf_nbuf_set_exc_frame(nbuf, 1);
1698 			qdf_nbuf_set_next(nbuf, NULL);
1699 
1700 			dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf,
1701 						    NULL, is_eapol);
1702 
1703 			return;
1704 		}
1705 	}
1706 
1707 drop_nbuf:
1708 
1709 	DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1,
1710 		      err_src == HAL_RX_WBM_ERR_SRC_REO);
1711 	DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1,
1712 		      err_src == HAL_RX_WBM_ERR_SRC_RXDMA);
1713 
1714 	dp_rx_nbuf_free(nbuf);
1715 }
1716 
1717 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1718 
1719 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
1720 /**
1721  * dp_rx_link_cookie_check() - Validate link desc cookie
1722  * @ring_desc: ring descriptor
1723  *
1724  * Return: qdf status
1725  */
1726 static inline QDF_STATUS
1727 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
1728 {
1729 	if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc)))
1730 		return QDF_STATUS_E_FAILURE;
1731 
1732 	return QDF_STATUS_SUCCESS;
1733 }
1734 
1735 /**
1736  * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie
1737  * @ring_desc: ring descriptor
1738  *
1739  * Return: None
1740  */
1741 static inline void
1742 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
1743 {
1744 	HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc);
1745 }
1746 #else
1747 static inline QDF_STATUS
1748 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
1749 {
1750 	return QDF_STATUS_SUCCESS;
1751 }
1752 
1753 static inline void
1754 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
1755 {
1756 }
1757 #endif
1758 
1759 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1760 /**
1761  * dp_rx_err_ring_record_entry() - Record rx err ring history
1762  * @soc: Datapath soc structure
1763  * @paddr: paddr of the buffer in RX err ring
1764  * @sw_cookie: SW cookie of the buffer in RX err ring
1765  * @rbm: Return buffer manager of the buffer in RX err ring
1766  *
1767  * Return: None
1768  */
1769 static inline void
1770 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1771 			    uint32_t sw_cookie, uint8_t rbm)
1772 {
1773 	struct dp_buf_info_record *record;
1774 	uint32_t idx;
1775 
1776 	if (qdf_unlikely(!soc->rx_err_ring_history))
1777 		return;
1778 
1779 	idx = dp_history_get_next_index(&soc->rx_err_ring_history->index,
1780 					DP_RX_ERR_HIST_MAX);
1781 
1782 	/* No NULL check needed for record since its an array */
1783 	record = &soc->rx_err_ring_history->entry[idx];
1784 
1785 	record->timestamp = qdf_get_log_timestamp();
1786 	record->hbi.paddr = paddr;
1787 	record->hbi.sw_cookie = sw_cookie;
1788 	record->hbi.rbm = rbm;
1789 }
1790 #else
1791 static inline void
1792 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1793 			    uint32_t sw_cookie, uint8_t rbm)
1794 {
1795 }
1796 #endif
1797 
1798 #ifdef HANDLE_RX_REROUTE_ERR
1799 static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc,
1800 				     hal_ring_desc_t ring_desc)
1801 {
1802 	int lmac_id = DP_INVALID_LMAC_ID;
1803 	struct dp_rx_desc *rx_desc;
1804 	struct hal_buf_info hbi;
1805 	struct dp_pdev *pdev;
1806 	struct rx_desc_pool *rx_desc_pool;
1807 
1808 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
1809 
1810 	rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, hbi.sw_cookie);
1811 
1812 	/* sanity */
1813 	if (!rx_desc) {
1814 		DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_invalid_cookie, 1);
1815 		goto assert_return;
1816 	}
1817 
1818 	if (!rx_desc->nbuf)
1819 		goto assert_return;
1820 
1821 	dp_rx_err_ring_record_entry(soc, hbi.paddr,
1822 				    hbi.sw_cookie,
1823 				    hal_rx_ret_buf_manager_get(soc->hal_soc,
1824 							       ring_desc));
1825 	if (hbi.paddr != qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)) {
1826 		DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1827 		rx_desc->in_err_state = 1;
1828 		goto assert_return;
1829 	}
1830 
1831 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
1832 	/* After this point the rx_desc and nbuf are valid */
1833 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
1834 	qdf_assert_always(!rx_desc->unmapped);
1835 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
1836 	rx_desc->unmapped = 1;
1837 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
1838 	dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
1839 				    rx_desc->pool_id);
1840 
1841 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
1842 	lmac_id = rx_desc->pool_id;
1843 	dp_rx_add_to_free_desc_list(&pdev->free_list_head,
1844 				    &pdev->free_list_tail,
1845 				    rx_desc);
1846 	return lmac_id;
1847 
1848 assert_return:
1849 	qdf_assert(0);
1850 	return lmac_id;
1851 }
1852 
1853 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
1854 {
1855 	int ret;
1856 	uint64_t cur_time_stamp;
1857 
1858 	DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_rcved, 1);
1859 
1860 	/* Recover if overall error count exceeds threshold */
1861 	if (soc->stats.rx.err.reo_err_msdu_buf_rcved >
1862 	    DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD) {
1863 		dp_err("pkt threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
1864 		       soc->stats.rx.err.reo_err_msdu_buf_rcved,
1865 		       soc->rx_route_err_start_pkt_ts);
1866 		qdf_trigger_self_recovery(NULL, QDF_RX_REG_PKT_ROUTE_ERR);
1867 	}
1868 
1869 	cur_time_stamp = qdf_get_log_timestamp_usecs();
1870 	if (!soc->rx_route_err_start_pkt_ts)
1871 		soc->rx_route_err_start_pkt_ts = cur_time_stamp;
1872 
1873 	/* Recover if threshold number of packets received in threshold time */
1874 	if ((cur_time_stamp - soc->rx_route_err_start_pkt_ts) >
1875 						DP_RX_ERR_ROUTE_TIMEOUT_US) {
1876 		soc->rx_route_err_start_pkt_ts = cur_time_stamp;
1877 
1878 		if (soc->rx_route_err_in_window >
1879 		    DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT) {
1880 			qdf_trigger_self_recovery(NULL,
1881 						  QDF_RX_REG_PKT_ROUTE_ERR);
1882 			dp_err("rate threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
1883 			       soc->stats.rx.err.reo_err_msdu_buf_rcved,
1884 			       soc->rx_route_err_start_pkt_ts);
1885 		} else {
1886 			soc->rx_route_err_in_window = 1;
1887 		}
1888 	} else {
1889 		soc->rx_route_err_in_window++;
1890 	}
1891 
1892 	ret = dp_rx_err_handle_msdu_buf(soc, ring_desc);
1893 
1894 	return ret;
1895 }
1896 #else /* HANDLE_RX_REROUTE_ERR */
1897 
1898 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
1899 {
1900 	qdf_assert_always(0);
1901 
1902 	return DP_INVALID_LMAC_ID;
1903 }
1904 #endif /* HANDLE_RX_REROUTE_ERR */
1905 
1906 #ifdef WLAN_MLO_MULTI_CHIP
1907 /**
1908  * dp_idle_link_bm_id_check() - war for HW issue
1909  *
1910  * @soc: DP SOC handle
1911  * @rbm: idle link RBM value
1912  * @ring_desc: reo error link descriptor
1913  *
1914  * This is a war for HW issue where link descriptor
1915  * of partner soc received due to packets wrongly
1916  * interpreted as fragments
1917  *
1918  * Return: true in case link desc is consumed
1919  *	   false in other cases
1920  */
1921 static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
1922 				     void *ring_desc)
1923 {
1924 	struct dp_soc *replenish_soc = NULL;
1925 
1926 	/* return ok incase of link desc of same soc */
1927 	if (rbm == soc->idle_link_bm_id)
1928 		return false;
1929 
1930 	if (soc->arch_ops.dp_soc_get_by_idle_bm_id)
1931 		replenish_soc =
1932 			soc->arch_ops.dp_soc_get_by_idle_bm_id(soc, rbm);
1933 
1934 	qdf_assert_always(replenish_soc);
1935 
1936 	/*
1937 	 * For WIN usecase we should only get fragment packets in
1938 	 * this ring as for MLO case fragmentation is not supported
1939 	 * we should not see links from other soc.
1940 	 *
1941 	 * Drop all packets from partner soc and replenish the descriptors
1942 	 */
1943 	dp_handle_wbm_internal_error(replenish_soc, ring_desc,
1944 				     HAL_WBM_RELEASE_RING_2_DESC_TYPE);
1945 
1946 	return true;
1947 }
1948 #else
1949 static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
1950 				     void *ring_desc)
1951 {
1952 	return false;
1953 }
1954 #endif
1955 
1956 static inline void
1957 dp_rx_err_dup_frame(struct dp_soc *soc,
1958 		    struct hal_rx_mpdu_desc_info *mpdu_desc_info)
1959 {
1960 	struct dp_txrx_peer *txrx_peer = NULL;
1961 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1962 	uint16_t peer_id;
1963 
1964 	peer_id =
1965 		dp_rx_peer_metadata_peer_id_get(soc,
1966 						mpdu_desc_info->peer_meta_data);
1967 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
1968 						   &txrx_ref_handle,
1969 						   DP_MOD_ID_RX_ERR);
1970 	if (txrx_peer) {
1971 		DP_STATS_INC(txrx_peer->vdev, rx.duplicate_count, 1);
1972 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
1973 	}
1974 }
1975 
1976 uint32_t
1977 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1978 		  hal_ring_handle_t hal_ring_hdl, uint32_t quota)
1979 {
1980 	hal_ring_desc_t ring_desc;
1981 	hal_soc_handle_t hal_soc;
1982 	uint32_t count = 0;
1983 	uint32_t rx_bufs_used = 0;
1984 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
1985 	uint8_t mac_id = 0;
1986 	uint8_t buf_type;
1987 	uint8_t err_status;
1988 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
1989 	struct hal_buf_info hbi;
1990 	struct dp_pdev *dp_pdev;
1991 	struct dp_srng *dp_rxdma_srng;
1992 	struct rx_desc_pool *rx_desc_pool;
1993 	void *link_desc_va;
1994 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
1995 	uint16_t num_msdus;
1996 	struct dp_rx_desc *rx_desc = NULL;
1997 	QDF_STATUS status;
1998 	bool ret;
1999 	uint32_t error_code = 0;
2000 	bool sw_pn_check_needed;
2001 	int max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
2002 	int i, rx_bufs_reaped_total;
2003 	uint16_t peer_id;
2004 	struct dp_txrx_peer *txrx_peer = NULL;
2005 	dp_txrx_ref_handle txrx_ref_handle = NULL;
2006 
2007 	/* Debug -- Remove later */
2008 	qdf_assert(soc && hal_ring_hdl);
2009 
2010 	hal_soc = soc->hal_soc;
2011 
2012 	/* Debug -- Remove later */
2013 	qdf_assert(hal_soc);
2014 
2015 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
2016 
2017 		/* TODO */
2018 		/*
2019 		 * Need API to convert from hal_ring pointer to
2020 		 * Ring Type / Ring Id combo
2021 		 */
2022 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
2023 		dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", soc,
2024 			      hal_ring_hdl);
2025 		goto done;
2026 	}
2027 
2028 	while (qdf_likely(quota-- && (ring_desc =
2029 				hal_srng_dst_peek(hal_soc,
2030 						  hal_ring_hdl)))) {
2031 
2032 		DP_STATS_INC(soc, rx.err_ring_pkts, 1);
2033 		err_status = hal_rx_err_status_get(hal_soc, ring_desc);
2034 		buf_type = hal_rx_reo_buf_type_get(hal_soc, ring_desc);
2035 
2036 		if (err_status == HAL_REO_ERROR_DETECTED)
2037 			error_code = hal_rx_get_reo_error_code(hal_soc,
2038 							       ring_desc);
2039 
2040 		qdf_mem_set(&mpdu_desc_info, sizeof(mpdu_desc_info), 0);
2041 		sw_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
2042 								  err_status,
2043 								  error_code);
2044 		if (!sw_pn_check_needed) {
2045 			/*
2046 			 * MPDU desc info will be present in the REO desc
2047 			 * only in the below scenarios
2048 			 * 1) pn_in_dest_disabled:  always
2049 			 * 2) pn_in_dest enabled: All cases except 2k-jup
2050 			 *			and OOR errors
2051 			 */
2052 			hal_rx_mpdu_desc_info_get(hal_soc, ring_desc,
2053 						  &mpdu_desc_info);
2054 		}
2055 
2056 		if (HAL_RX_REO_DESC_MSDU_COUNT_GET(ring_desc) == 0)
2057 			goto next_entry;
2058 
2059 		/*
2060 		 * For REO error ring, only MSDU LINK DESC is expected.
2061 		 * Handle HAL_RX_REO_MSDU_BUF_ADDR_TYPE exception case.
2062 		 */
2063 		if (qdf_unlikely(buf_type != HAL_RX_REO_MSDU_LINK_DESC_TYPE)) {
2064 			int lmac_id;
2065 
2066 			lmac_id = dp_rx_err_exception(soc, ring_desc);
2067 			if (lmac_id >= 0)
2068 				rx_bufs_reaped[lmac_id] += 1;
2069 			goto next_entry;
2070 		}
2071 
2072 		hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc,
2073 					  &hbi);
2074 		/*
2075 		 * check for the magic number in the sw cookie
2076 		 */
2077 		qdf_assert_always((hbi.sw_cookie >> LINK_DESC_ID_SHIFT) &
2078 					soc->link_desc_id_start);
2079 
2080 		if (dp_idle_link_bm_id_check(soc, hbi.rbm, ring_desc)) {
2081 			DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
2082 			goto next_entry;
2083 		}
2084 
2085 		status = dp_rx_link_cookie_check(ring_desc);
2086 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
2087 			DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
2088 			break;
2089 		}
2090 
2091 		hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2092 		link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
2093 		hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
2094 				     &num_msdus);
2095 		if (!num_msdus ||
2096 		    !dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[0])) {
2097 			dp_rx_err_info_rl("Invalid MSDU info num_msdus %u cookie: 0x%x",
2098 					  num_msdus, msdu_list.sw_cookie[0]);
2099 			dp_rx_link_desc_return(soc, ring_desc,
2100 					       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
2101 			goto next_entry;
2102 		}
2103 
2104 		dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0],
2105 					    msdu_list.sw_cookie[0],
2106 					    msdu_list.rbm[0]);
2107 		// TODO - BE- Check if the RBM is to be checked for all chips
2108 		if (qdf_unlikely((msdu_list.rbm[0] !=
2109 					dp_rx_get_rx_bm_id(soc)) &&
2110 				 (msdu_list.rbm[0] !=
2111 				  soc->idle_link_bm_id) &&
2112 				 (msdu_list.rbm[0] !=
2113 					dp_rx_get_defrag_bm_id(soc)))) {
2114 			/* TODO */
2115 			/* Call appropriate handler */
2116 			if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2117 				DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
2118 				dp_rx_err_err("%pK: Invalid RBM %d",
2119 					      soc, msdu_list.rbm[0]);
2120 			}
2121 
2122 			/* Return link descriptor through WBM ring (SW2WBM)*/
2123 			dp_rx_link_desc_return(soc, ring_desc,
2124 					HAL_BM_ACTION_RELEASE_MSDU_LIST);
2125 			goto next_entry;
2126 		}
2127 
2128 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
2129 						soc,
2130 						msdu_list.sw_cookie[0]);
2131 		qdf_assert_always(rx_desc);
2132 
2133 		mac_id = rx_desc->pool_id;
2134 
2135 		if (sw_pn_check_needed) {
2136 			goto process_reo_error_code;
2137 		}
2138 
2139 		if (mpdu_desc_info.bar_frame) {
2140 			qdf_assert_always(mpdu_desc_info.msdu_count == 1);
2141 
2142 			dp_rx_bar_frame_handle(soc, ring_desc, rx_desc,
2143 					       &mpdu_desc_info, err_status,
2144 					       error_code);
2145 
2146 			rx_bufs_reaped[mac_id] += 1;
2147 			goto next_entry;
2148 		}
2149 
2150 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
2151 			/*
2152 			 * We only handle one msdu per link desc for fragmented
2153 			 * case. We drop the msdus and release the link desc
2154 			 * back if there are more than one msdu in link desc.
2155 			 */
2156 			if (qdf_unlikely(num_msdus > 1)) {
2157 				count = dp_rx_msdus_drop(soc, ring_desc,
2158 							 &mpdu_desc_info,
2159 							 &mac_id, quota);
2160 				rx_bufs_reaped[mac_id] += count;
2161 				goto next_entry;
2162 			}
2163 
2164 			/*
2165 			 * this is a unlikely scenario where the host is reaping
2166 			 * a descriptor which it already reaped just a while ago
2167 			 * but is yet to replenish it back to HW.
2168 			 * In this case host will dump the last 128 descriptors
2169 			 * including the software descriptor rx_desc and assert.
2170 			 */
2171 
2172 			if (qdf_unlikely(!rx_desc->in_use)) {
2173 				DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
2174 				dp_info_rl("Reaping rx_desc not in use!");
2175 				dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2176 							   ring_desc, rx_desc);
2177 				/* ignore duplicate RX desc and continue */
2178 				/* Pop out the descriptor */
2179 				goto next_entry;
2180 			}
2181 
2182 			ret = dp_rx_desc_paddr_sanity_check(rx_desc,
2183 							    msdu_list.paddr[0]);
2184 			if (!ret) {
2185 				DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
2186 				rx_desc->in_err_state = 1;
2187 				goto next_entry;
2188 			}
2189 
2190 			count = dp_rx_frag_handle(soc,
2191 						  ring_desc, &mpdu_desc_info,
2192 						  rx_desc, &mac_id, quota);
2193 
2194 			rx_bufs_reaped[mac_id] += count;
2195 			DP_STATS_INC(soc, rx.rx_frags, 1);
2196 
2197 			peer_id = dp_rx_peer_metadata_peer_id_get(soc,
2198 					mpdu_desc_info.peer_meta_data);
2199 			txrx_peer =
2200 				dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
2201 							       &txrx_ref_handle,
2202 							       DP_MOD_ID_RX_ERR);
2203 			if (txrx_peer) {
2204 				DP_STATS_INC(txrx_peer->vdev,
2205 					     rx.fragment_count, 1);
2206 				dp_txrx_peer_unref_delete(txrx_ref_handle,
2207 							  DP_MOD_ID_RX_ERR);
2208 			}
2209 			goto next_entry;
2210 		}
2211 
2212 process_reo_error_code:
2213 		/*
2214 		 * Expect REO errors to be handled after this point
2215 		 */
2216 		qdf_assert_always(err_status == HAL_REO_ERROR_DETECTED);
2217 
2218 		dp_info_rl("Got pkt with REO ERROR: %d", error_code);
2219 
2220 		switch (error_code) {
2221 		case HAL_REO_ERR_PN_CHECK_FAILED:
2222 		case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
2223 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2224 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2225 			if (dp_pdev)
2226 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
2227 			count = dp_rx_pn_error_handle(soc,
2228 						      ring_desc,
2229 						      &mpdu_desc_info, &mac_id,
2230 						      quota);
2231 
2232 			rx_bufs_reaped[mac_id] += count;
2233 			break;
2234 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2235 		case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
2236 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2237 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
2238 		case HAL_REO_ERR_BAR_FRAME_OOR:
2239 		case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2240 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2241 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2242 			if (dp_pdev)
2243 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
2244 			count = dp_rx_reo_err_entry_process(
2245 					soc,
2246 					ring_desc,
2247 					&mpdu_desc_info,
2248 					link_desc_va,
2249 					error_code);
2250 
2251 			rx_bufs_reaped[mac_id] += count;
2252 			break;
2253 		case HAL_REO_ERR_NON_BA_DUPLICATE:
2254 			dp_rx_err_dup_frame(soc, &mpdu_desc_info);
2255 			fallthrough;
2256 		case HAL_REO_ERR_QUEUE_DESC_INVALID:
2257 		case HAL_REO_ERR_AMPDU_IN_NON_BA:
2258 		case HAL_REO_ERR_BA_DUPLICATE:
2259 		case HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION:
2260 		case HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN:
2261 		case HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET:
2262 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2263 			count = dp_rx_msdus_drop(soc, ring_desc,
2264 						 &mpdu_desc_info,
2265 						 &mac_id, quota);
2266 			rx_bufs_reaped[mac_id] += count;
2267 			break;
2268 		default:
2269 			/* Assert if unexpected error type */
2270 			qdf_assert_always(0);
2271 		}
2272 next_entry:
2273 		dp_rx_link_cookie_invalidate(ring_desc);
2274 		hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2275 
2276 		rx_bufs_reaped_total = 0;
2277 		for (i = 0; i < MAX_PDEV_CNT; i++)
2278 			rx_bufs_reaped_total += rx_bufs_reaped[i];
2279 
2280 		if (dp_rx_reap_loop_pkt_limit_hit(soc, rx_bufs_reaped_total,
2281 						  max_reap_limit))
2282 			break;
2283 	}
2284 
2285 done:
2286 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
2287 
2288 	if (soc->rx.flags.defrag_timeout_check) {
2289 		uint32_t now_ms =
2290 			qdf_system_ticks_to_msecs(qdf_system_ticks());
2291 
2292 		if (now_ms >= soc->rx.defrag.next_flush_ms)
2293 			dp_rx_defrag_waitlist_flush(soc);
2294 	}
2295 
2296 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2297 		if (rx_bufs_reaped[mac_id]) {
2298 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2299 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2300 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
2301 
2302 			dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2303 						rx_desc_pool,
2304 						rx_bufs_reaped[mac_id],
2305 						&dp_pdev->free_list_head,
2306 						&dp_pdev->free_list_tail,
2307 						false);
2308 			rx_bufs_used += rx_bufs_reaped[mac_id];
2309 		}
2310 	}
2311 
2312 	return rx_bufs_used; /* Assume no scale factor for now */
2313 }
2314 
2315 #ifdef DROP_RXDMA_DECRYPT_ERR
2316 /**
2317  * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled
2318  *
2319  * Return: true if rxdma decrypt err frames are handled and false otherwise
2320  */
2321 static inline bool dp_handle_rxdma_decrypt_err(void)
2322 {
2323 	return false;
2324 }
2325 #else
2326 static inline bool dp_handle_rxdma_decrypt_err(void)
2327 {
2328 	return true;
2329 }
2330 #endif
2331 
2332 void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc)
2333 {
2334 	if (soc->wbm_sg_last_msdu_war) {
2335 		uint32_t len;
2336 		qdf_nbuf_t temp = soc->wbm_sg_param.wbm_sg_nbuf_tail;
2337 
2338 		len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
2339 						     qdf_nbuf_data(temp));
2340 		temp = soc->wbm_sg_param.wbm_sg_nbuf_head;
2341 		while (temp) {
2342 			QDF_NBUF_CB_RX_PKT_LEN(temp) = len;
2343 			temp = temp->next;
2344 		}
2345 	}
2346 }
2347 
2348 #ifdef RX_DESC_DEBUG_CHECK
2349 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2350 					    hal_ring_handle_t hal_ring_hdl,
2351 					    hal_ring_desc_t ring_desc,
2352 					    struct dp_rx_desc *rx_desc)
2353 {
2354 	struct hal_buf_info hbi;
2355 
2356 	hal_rx_wbm_rel_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2357 	/* Sanity check for possible buffer paddr corruption */
2358 	if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
2359 		return QDF_STATUS_SUCCESS;
2360 
2361 	hal_srng_dump_ring_desc(soc->hal_soc, hal_ring_hdl, ring_desc);
2362 
2363 	return QDF_STATUS_E_FAILURE;
2364 }
2365 
2366 #else
2367 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2368 					    hal_ring_handle_t hal_ring_hdl,
2369 					    hal_ring_desc_t ring_desc,
2370 					    struct dp_rx_desc *rx_desc)
2371 {
2372 	return QDF_STATUS_SUCCESS;
2373 }
2374 #endif
2375 bool
2376 dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
2377 {
2378 	/*
2379 	 * Currently Null Queue and Unencrypted error handlers has support for
2380 	 * SG. Other error handler do not deal with SG buffer.
2381 	 */
2382 	if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) &&
2383 	     (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) ||
2384 	    ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) &&
2385 	     (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED)))
2386 		return true;
2387 
2388 	return false;
2389 }
2390 
2391 #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
2392 void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
2393 			      qdf_nbuf_t nbuf)
2394 {
2395 	/*
2396 	 * In case of fast recycle TX driver can avoid invalidate
2397 	 * of buffer in case of SFE forward. We need to invalidate
2398 	 * the TLV headers after writing to this location
2399 	 */
2400 	qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2401 				      (void *)(nbuf->data +
2402 					       soc->rx_pkt_tlv_size +
2403 					       L3_HEADER_PAD));
2404 }
2405 #else
2406 void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
2407 			      qdf_nbuf_t nbuf)
2408 {
2409 }
2410 #endif
2411 
2412 #ifndef CONFIG_NBUF_AP_PLATFORM
2413 static inline uint16_t
2414 dp_rx_get_peer_id(struct dp_soc *soc,
2415 		  uint8_t *rx_tlv_hdr,
2416 		  qdf_nbuf_t nbuf)
2417 {
2418 	uint32_t peer_mdata = 0;
2419 
2420 	peer_mdata = hal_rx_tlv_peer_meta_data_get(soc->hal_soc,
2421 						   rx_tlv_hdr);
2422 	return dp_rx_peer_metadata_peer_id_get(soc, peer_mdata);
2423 }
2424 
2425 static inline void
2426 dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc *soc,
2427 				 qdf_nbuf_t nbuf,
2428 				 uint8_t *rx_tlv_hdr,
2429 				 union hal_wbm_err_info_u *wbm_err)
2430 {
2431 	hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_tlv_hdr,
2432 				      (uint8_t *)&wbm_err->info,
2433 				      sizeof(union hal_wbm_err_info_u));
2434 }
2435 
2436 void
2437 dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
2438 			       qdf_nbuf_t nbuf,
2439 			       union hal_wbm_err_info_u wbm_err)
2440 {
2441 	hal_rx_priv_info_set_in_tlv(soc->hal_soc,
2442 				    qdf_nbuf_data(nbuf),
2443 				    (uint8_t *)&wbm_err.info,
2444 				    sizeof(union hal_wbm_err_info_u));
2445 }
2446 #else
2447 static inline uint16_t
2448 dp_rx_get_peer_id(struct dp_soc *soc,
2449 		  uint8_t *rx_tlv_hdr,
2450 		  qdf_nbuf_t nbuf)
2451 {
2452 	uint32_t peer_mdata = QDF_NBUF_CB_RX_MPDU_DESC_INFO_2(nbuf);
2453 
2454 	return dp_rx_peer_metadata_peer_id_get(soc, peer_mdata);
2455 }
2456 
2457 static inline void
2458 dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc *soc,
2459 				 qdf_nbuf_t nbuf,
2460 				 uint8_t *rx_tlv_hdr,
2461 				 union hal_wbm_err_info_u *wbm_err)
2462 {
2463 	wbm_err->info = QDF_NBUF_CB_RX_ERROR_CODE_INFO(nbuf);
2464 }
2465 
2466 void
2467 dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
2468 			       qdf_nbuf_t nbuf,
2469 			       union hal_wbm_err_info_u wbm_err)
2470 {
2471 	QDF_NBUF_CB_RX_ERROR_CODE_INFO(nbuf) = wbm_err.info;
2472 }
2473 #endif /* CONFIG_NBUF_AP_PLATFORM */
2474 
2475 uint32_t
2476 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2477 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota)
2478 {
2479 	hal_soc_handle_t hal_soc;
2480 	uint32_t rx_bufs_used = 0;
2481 	struct dp_pdev *dp_pdev;
2482 	uint8_t *rx_tlv_hdr;
2483 	bool is_tkip_mic_err;
2484 	qdf_nbuf_t nbuf_head = NULL;
2485 	qdf_nbuf_t nbuf, next;
2486 	union hal_wbm_err_info_u wbm_err = { 0 };
2487 	uint8_t pool_id;
2488 	uint8_t tid = 0;
2489 	uint8_t link_id = 0;
2490 
2491 	/* Debug -- Remove later */
2492 	qdf_assert(soc && hal_ring_hdl);
2493 
2494 	hal_soc = soc->hal_soc;
2495 
2496 	/* Debug -- Remove later */
2497 	qdf_assert(hal_soc);
2498 
2499 	nbuf_head = soc->arch_ops.dp_rx_wbm_err_reap_desc(int_ctx, soc,
2500 							  hal_ring_hdl,
2501 							  quota,
2502 							  &rx_bufs_used);
2503 	nbuf = nbuf_head;
2504 	while (nbuf) {
2505 		struct dp_txrx_peer *txrx_peer;
2506 		struct dp_peer *peer;
2507 		uint16_t peer_id;
2508 		uint8_t err_code;
2509 		uint8_t *tlv_hdr;
2510 		dp_txrx_ref_handle txrx_ref_handle = NULL;
2511 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
2512 
2513 		/*
2514 		 * retrieve the wbm desc info from nbuf CB/TLV, so we can
2515 		 * handle error cases appropriately
2516 		 */
2517 		dp_rx_get_wbm_err_info_from_nbuf(soc, nbuf,
2518 						 rx_tlv_hdr,
2519 						 &wbm_err);
2520 
2521 		peer_id = dp_rx_get_peer_id(soc,
2522 					    rx_tlv_hdr,
2523 					    nbuf);
2524 		txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
2525 							   &txrx_ref_handle,
2526 							   DP_MOD_ID_RX_ERR);
2527 
2528 		if (!txrx_peer)
2529 			dp_info_rl("peer is null peer_id %u err_src %u, "
2530 				   "REO: push_rsn %u err_code %u, "
2531 				   "RXDMA: push_rsn %u err_code %u",
2532 				   peer_id, wbm_err.info_bit.wbm_err_src,
2533 				   wbm_err.info_bit.reo_psh_rsn,
2534 				   wbm_err.info_bit.reo_err_code,
2535 				   wbm_err.info_bit.rxdma_psh_rsn,
2536 				   wbm_err.info_bit.rxdma_err_code);
2537 
2538 		/* Set queue_mapping in nbuf to 0 */
2539 		dp_set_rx_queue(nbuf, 0);
2540 
2541 		next = nbuf->next;
2542 		/*
2543 		 * Form the SG for msdu continued buffers
2544 		 * QCN9000 has this support
2545 		 */
2546 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
2547 			nbuf = dp_rx_sg_create(soc, nbuf);
2548 			next = nbuf->next;
2549 			/*
2550 			 * SG error handling is not done correctly,
2551 			 * drop SG frames for now.
2552 			 */
2553 			dp_rx_nbuf_free(nbuf);
2554 			dp_info_rl("scattered msdu dropped");
2555 			nbuf = next;
2556 			if (txrx_peer)
2557 				dp_txrx_peer_unref_delete(txrx_ref_handle,
2558 							  DP_MOD_ID_RX_ERR);
2559 			continue;
2560 		}
2561 
2562 		dp_rx_nbuf_set_link_id_from_tlv(soc, rx_tlv_hdr, nbuf);
2563 
2564 		pool_id = wbm_err.info_bit.pool_id;
2565 		dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
2566 
2567 		if (dp_pdev && dp_pdev->link_peer_stats &&
2568 		    txrx_peer && txrx_peer->is_mld_peer) {
2569 			link_id = dp_rx_get_stats_arr_idx_from_link_id(
2570 								nbuf,
2571 								txrx_peer);
2572 		} else {
2573 			link_id = 0;
2574 		}
2575 
2576 		if (wbm_err.info_bit.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
2577 			if (wbm_err.info_bit.reo_psh_rsn
2578 					== HAL_RX_WBM_REO_PSH_RSN_ERROR) {
2579 
2580 				DP_STATS_INC(soc,
2581 					rx.err.reo_error
2582 					[wbm_err.info_bit.reo_err_code], 1);
2583 				/* increment @pdev level */
2584 				if (dp_pdev)
2585 					DP_STATS_INC(dp_pdev, err.reo_error,
2586 						     1);
2587 
2588 				switch (wbm_err.info_bit.reo_err_code) {
2589 				/*
2590 				 * Handling for packets which have NULL REO
2591 				 * queue descriptor
2592 				 */
2593 				case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2594 					pool_id = wbm_err.info_bit.pool_id;
2595 					soc->arch_ops.dp_rx_null_q_desc_handle(
2596 								soc, nbuf,
2597 								rx_tlv_hdr,
2598 								pool_id,
2599 								txrx_peer,
2600 								FALSE,
2601 								link_id);
2602 					break;
2603 				/* TODO */
2604 				/* Add per error code accounting */
2605 				case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2606 					if (txrx_peer)
2607 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2608 									  rx.err.jump_2k_err,
2609 									  1,
2610 									  link_id);
2611 
2612 					pool_id = wbm_err.info_bit.pool_id;
2613 
2614 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2615 									   rx_tlv_hdr)) {
2616 						tid =
2617 						hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
2618 					}
2619 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2620 					hal_rx_msdu_start_msdu_len_get(
2621 						soc->hal_soc, rx_tlv_hdr);
2622 					nbuf->next = NULL;
2623 					dp_2k_jump_handle(soc, nbuf,
2624 							  rx_tlv_hdr,
2625 							  peer_id, tid);
2626 					break;
2627 				case HAL_REO_ERR_REGULAR_FRAME_OOR:
2628 					if (txrx_peer)
2629 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2630 									  rx.err.oor_err,
2631 									  1,
2632 									  link_id);
2633 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2634 									   rx_tlv_hdr)) {
2635 						tid =
2636 							hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
2637 					}
2638 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2639 						hal_rx_msdu_start_msdu_len_get(
2640 						soc->hal_soc, rx_tlv_hdr);
2641 					nbuf->next = NULL;
2642 					dp_rx_oor_handle(soc, nbuf,
2643 							 peer_id,
2644 							 rx_tlv_hdr);
2645 					break;
2646 				case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2647 				case HAL_REO_ERR_BAR_FRAME_OOR:
2648 					peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
2649 					if (peer) {
2650 						dp_rx_err_handle_bar(soc, peer,
2651 								     nbuf);
2652 						dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
2653 					}
2654 					dp_rx_nbuf_free(nbuf);
2655 					break;
2656 
2657 				case HAL_REO_ERR_PN_CHECK_FAILED:
2658 				case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
2659 					if (txrx_peer)
2660 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2661 									  rx.err.pn_err,
2662 									  1,
2663 									  link_id);
2664 					dp_rx_nbuf_free(nbuf);
2665 					break;
2666 
2667 				default:
2668 					dp_info_rl("Got pkt with REO ERROR: %d",
2669 						   wbm_err.info_bit.
2670 						   reo_err_code);
2671 					dp_rx_nbuf_free(nbuf);
2672 				}
2673 			} else if (wbm_err.info_bit.reo_psh_rsn
2674 					== HAL_RX_WBM_REO_PSH_RSN_ROUTE) {
2675 				dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
2676 						    rx_tlv_hdr,
2677 						    HAL_RX_WBM_ERR_SRC_REO,
2678 						    link_id);
2679 			} else {
2680 				/* should not enter here */
2681 				dp_rx_err_alert("invalid reo push reason %u",
2682 						wbm_err.info_bit.reo_psh_rsn);
2683 				dp_rx_nbuf_free(nbuf);
2684 				qdf_assert_always(0);
2685 			}
2686 		} else if (wbm_err.info_bit.wbm_err_src ==
2687 					HAL_RX_WBM_ERR_SRC_RXDMA) {
2688 			if (wbm_err.info_bit.rxdma_psh_rsn
2689 					== HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
2690 				DP_STATS_INC(soc,
2691 					rx.err.rxdma_error
2692 					[wbm_err.info_bit.rxdma_err_code], 1);
2693 				/* increment @pdev level */
2694 				if (dp_pdev)
2695 					DP_STATS_INC(dp_pdev,
2696 						     err.rxdma_error, 1);
2697 
2698 				switch (wbm_err.info_bit.rxdma_err_code) {
2699 				case HAL_RXDMA_ERR_UNENCRYPTED:
2700 
2701 				case HAL_RXDMA_ERR_WIFI_PARSE:
2702 					if (txrx_peer)
2703 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2704 									  rx.err.rxdma_wifi_parse_err,
2705 									  1,
2706 									  link_id);
2707 
2708 					pool_id = wbm_err.info_bit.pool_id;
2709 					dp_rx_process_rxdma_err(soc, nbuf,
2710 								rx_tlv_hdr,
2711 								txrx_peer,
2712 								wbm_err.
2713 								info_bit.
2714 								rxdma_err_code,
2715 								pool_id,
2716 								link_id);
2717 					break;
2718 
2719 				case HAL_RXDMA_ERR_TKIP_MIC:
2720 					dp_rx_process_mic_error(soc, nbuf,
2721 								rx_tlv_hdr,
2722 								txrx_peer);
2723 					if (txrx_peer)
2724 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2725 									  rx.err.mic_err,
2726 									  1,
2727 									  link_id);
2728 					break;
2729 
2730 				case HAL_RXDMA_ERR_DECRYPT:
2731 					/* All the TKIP-MIC failures are treated as Decrypt Errors
2732 					 * for QCN9224 Targets
2733 					 */
2734 					is_tkip_mic_err = hal_rx_msdu_end_is_tkip_mic_err(hal_soc, rx_tlv_hdr);
2735 
2736 					if (is_tkip_mic_err && txrx_peer) {
2737 						dp_rx_process_mic_error(soc, nbuf,
2738 									rx_tlv_hdr,
2739 									txrx_peer);
2740 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2741 									  rx.err.mic_err,
2742 									  1,
2743 									  link_id);
2744 						break;
2745 					}
2746 
2747 					if (txrx_peer) {
2748 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2749 									  rx.err.decrypt_err,
2750 									  1,
2751 									  link_id);
2752 						dp_rx_nbuf_free(nbuf);
2753 						break;
2754 					}
2755 
2756 					if (!dp_handle_rxdma_decrypt_err()) {
2757 						dp_rx_nbuf_free(nbuf);
2758 						break;
2759 					}
2760 
2761 					pool_id = wbm_err.info_bit.pool_id;
2762 					err_code = wbm_err.info_bit.rxdma_err_code;
2763 					tlv_hdr = rx_tlv_hdr;
2764 					dp_rx_process_rxdma_err(soc, nbuf,
2765 								tlv_hdr, NULL,
2766 								err_code,
2767 								pool_id,
2768 								link_id);
2769 					break;
2770 				case HAL_RXDMA_MULTICAST_ECHO:
2771 					if (txrx_peer)
2772 						DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
2773 									      rx.mec_drop, 1,
2774 									      qdf_nbuf_len(nbuf),
2775 									      link_id);
2776 					dp_rx_nbuf_free(nbuf);
2777 					break;
2778 				case HAL_RXDMA_UNAUTHORIZED_WDS:
2779 					pool_id = wbm_err.info_bit.pool_id;
2780 					err_code = wbm_err.info_bit.rxdma_err_code;
2781 					tlv_hdr = rx_tlv_hdr;
2782 					dp_rx_process_rxdma_err(soc, nbuf,
2783 								tlv_hdr,
2784 								txrx_peer,
2785 								err_code,
2786 								pool_id,
2787 								link_id);
2788 					break;
2789 				default:
2790 					dp_rx_nbuf_free(nbuf);
2791 					dp_err_rl("RXDMA error %d",
2792 						  wbm_err.info_bit.rxdma_err_code);
2793 				}
2794 			} else if (wbm_err.info_bit.rxdma_psh_rsn
2795 					== HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) {
2796 				dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
2797 						    rx_tlv_hdr,
2798 						    HAL_RX_WBM_ERR_SRC_RXDMA,
2799 						    link_id);
2800 			} else if (wbm_err.info_bit.rxdma_psh_rsn
2801 					== HAL_RX_WBM_RXDMA_PSH_RSN_FLUSH) {
2802 				dp_rx_err_err("rxdma push reason %u",
2803 						wbm_err.info_bit.rxdma_psh_rsn);
2804 				DP_STATS_INC(soc, rx.err.rx_flush_count, 1);
2805 				dp_rx_nbuf_free(nbuf);
2806 			} else {
2807 				/* should not enter here */
2808 				dp_rx_err_alert("invalid rxdma push reason %u",
2809 						wbm_err.info_bit.rxdma_psh_rsn);
2810 				dp_rx_nbuf_free(nbuf);
2811 				qdf_assert_always(0);
2812 			}
2813 		} else {
2814 			/* Should not come here */
2815 			qdf_assert(0);
2816 		}
2817 
2818 		if (txrx_peer)
2819 			dp_txrx_peer_unref_delete(txrx_ref_handle,
2820 						  DP_MOD_ID_RX_ERR);
2821 
2822 		nbuf = next;
2823 	}
2824 	return rx_bufs_used; /* Assume no scale factor for now */
2825 }
2826 
2827 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2828 
2829 /**
2830  * dup_desc_dbg() - dump and assert if duplicate rx desc found
2831  *
2832  * @soc: core DP main context
2833  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
2834  * @rx_desc: void pointer to rx descriptor
2835  *
2836  * Return: void
2837  */
2838 static void dup_desc_dbg(struct dp_soc *soc,
2839 			 hal_rxdma_desc_t rxdma_dst_ring_desc,
2840 			 void *rx_desc)
2841 {
2842 	DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
2843 	dp_rx_dump_info_and_assert(
2844 			soc,
2845 			soc->rx_rel_ring.hal_srng,
2846 			hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
2847 			rx_desc);
2848 }
2849 
2850 /**
2851  * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
2852  *
2853  * @soc: core DP main context
2854  * @mac_id: mac id which is one of 3 mac_ids
2855  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
2856  * @head: head of descs list to be freed
2857  * @tail: tail of decs list to be freed
2858  *
2859  * Return: number of msdu in MPDU to be popped
2860  */
2861 static inline uint32_t
2862 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
2863 	hal_rxdma_desc_t rxdma_dst_ring_desc,
2864 	union dp_rx_desc_list_elem_t **head,
2865 	union dp_rx_desc_list_elem_t **tail)
2866 {
2867 	void *rx_msdu_link_desc;
2868 	qdf_nbuf_t msdu;
2869 	qdf_nbuf_t last;
2870 	struct hal_rx_msdu_list msdu_list;
2871 	uint16_t num_msdus;
2872 	struct hal_buf_info buf_info;
2873 	uint32_t rx_bufs_used = 0;
2874 	uint32_t msdu_cnt;
2875 	uint32_t i;
2876 	uint8_t push_reason;
2877 	uint8_t rxdma_error_code = 0;
2878 	uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
2879 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2880 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
2881 	hal_rxdma_desc_t ring_desc;
2882 	struct rx_desc_pool *rx_desc_pool;
2883 
2884 	if (!pdev) {
2885 		dp_rx_err_debug("%pK: pdev is null for mac_id = %d",
2886 				soc, mac_id);
2887 		return rx_bufs_used;
2888 	}
2889 
2890 	msdu = 0;
2891 
2892 	last = NULL;
2893 
2894 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
2895 				     &buf_info, &msdu_cnt);
2896 
2897 	push_reason =
2898 		hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
2899 	if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
2900 		rxdma_error_code =
2901 			hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
2902 	}
2903 
2904 	do {
2905 		rx_msdu_link_desc =
2906 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
2907 
2908 		qdf_assert_always(rx_msdu_link_desc);
2909 
2910 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
2911 				     &msdu_list, &num_msdus);
2912 
2913 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
2914 			/* if the msdus belongs to NSS offloaded radio &&
2915 			 * the rbm is not SW1_BM then return the msdu_link
2916 			 * descriptor without freeing the msdus (nbufs). let
2917 			 * these buffers be given to NSS completion ring for
2918 			 * NSS to free them.
2919 			 * else iterate through the msdu link desc list and
2920 			 * free each msdu in the list.
2921 			 */
2922 			if (msdu_list.rbm[0] !=
2923 				HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id) &&
2924 			    wlan_cfg_get_dp_pdev_nss_enabled(
2925 							pdev->wlan_cfg_ctx))
2926 				bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
2927 			else {
2928 				for (i = 0; i < num_msdus; i++) {
2929 					struct dp_rx_desc *rx_desc =
2930 						soc->arch_ops.
2931 						dp_rx_desc_cookie_2_va(
2932 							soc,
2933 							msdu_list.sw_cookie[i]);
2934 					qdf_assert_always(rx_desc);
2935 					msdu = rx_desc->nbuf;
2936 					/*
2937 					 * this is a unlikely scenario
2938 					 * where the host is reaping
2939 					 * a descriptor which
2940 					 * it already reaped just a while ago
2941 					 * but is yet to replenish
2942 					 * it back to HW.
2943 					 * In this case host will dump
2944 					 * the last 128 descriptors
2945 					 * including the software descriptor
2946 					 * rx_desc and assert.
2947 					 */
2948 					ring_desc = rxdma_dst_ring_desc;
2949 					if (qdf_unlikely(!rx_desc->in_use)) {
2950 						dup_desc_dbg(soc,
2951 							     ring_desc,
2952 							     rx_desc);
2953 						continue;
2954 					}
2955 
2956 					if (rx_desc->unmapped == 0) {
2957 						rx_desc_pool =
2958 							&soc->rx_desc_buf[rx_desc->pool_id];
2959 						dp_ipa_rx_buf_smmu_mapping_lock(soc);
2960 						dp_rx_nbuf_unmap_pool(soc,
2961 								      rx_desc_pool,
2962 								      msdu);
2963 						rx_desc->unmapped = 1;
2964 						dp_ipa_rx_buf_smmu_mapping_unlock(soc);
2965 					}
2966 
2967 					dp_rx_err_debug("%pK: msdu_nbuf=%pK ",
2968 							soc, msdu);
2969 
2970 					dp_rx_buffer_pool_nbuf_free(soc, msdu,
2971 							rx_desc->pool_id);
2972 					rx_bufs_used++;
2973 					dp_rx_add_to_free_desc_list(head,
2974 						tail, rx_desc);
2975 				}
2976 			}
2977 		} else {
2978 			rxdma_error_code = HAL_RXDMA_ERR_WAR;
2979 		}
2980 
2981 		/*
2982 		 * Store the current link buffer into to the local structure
2983 		 * to be used for release purpose.
2984 		 */
2985 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
2986 					     buf_info.paddr, buf_info.sw_cookie,
2987 					     buf_info.rbm);
2988 
2989 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
2990 					      &buf_info);
2991 		dp_rx_link_desc_return_by_addr(soc,
2992 					       (hal_buff_addrinfo_t)
2993 						rx_link_buf_info,
2994 						bm_action);
2995 	} while (buf_info.paddr);
2996 
2997 	DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
2998 	if (pdev)
2999 		DP_STATS_INC(pdev, err.rxdma_error, 1);
3000 
3001 	if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
3002 		dp_rx_err_err("%pK: Packet received with Decrypt error", soc);
3003 	}
3004 
3005 	return rx_bufs_used;
3006 }
3007 
3008 uint32_t
3009 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
3010 		     uint32_t mac_id, uint32_t quota)
3011 {
3012 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
3013 	hal_rxdma_desc_t rxdma_dst_ring_desc;
3014 	hal_soc_handle_t hal_soc;
3015 	void *err_dst_srng;
3016 	union dp_rx_desc_list_elem_t *head = NULL;
3017 	union dp_rx_desc_list_elem_t *tail = NULL;
3018 	struct dp_srng *dp_rxdma_srng;
3019 	struct rx_desc_pool *rx_desc_pool;
3020 	uint32_t work_done = 0;
3021 	uint32_t rx_bufs_used = 0;
3022 
3023 	if (!pdev)
3024 		return 0;
3025 
3026 	err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng;
3027 
3028 	if (!err_dst_srng) {
3029 		dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
3030 			      soc, err_dst_srng);
3031 		return 0;
3032 	}
3033 
3034 	hal_soc = soc->hal_soc;
3035 
3036 	qdf_assert(hal_soc);
3037 
3038 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
3039 		dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
3040 			      soc, err_dst_srng);
3041 		return 0;
3042 	}
3043 
3044 	while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
3045 		hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
3046 
3047 			rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
3048 						rxdma_dst_ring_desc,
3049 						&head, &tail);
3050 	}
3051 
3052 	dp_srng_access_end(int_ctx, soc, err_dst_srng);
3053 
3054 	if (rx_bufs_used) {
3055 		if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3056 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
3057 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
3058 		} else {
3059 			dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id];
3060 			rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
3061 		}
3062 
3063 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
3064 			rx_desc_pool, rx_bufs_used, &head, &tail, false);
3065 
3066 		work_done += rx_bufs_used;
3067 	}
3068 
3069 	return work_done;
3070 }
3071 
3072 #ifndef QCA_HOST_MODE_WIFI_DISABLED
3073 
3074 static inline void
3075 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
3076 			hal_rxdma_desc_t rxdma_dst_ring_desc,
3077 			union dp_rx_desc_list_elem_t **head,
3078 			union dp_rx_desc_list_elem_t **tail,
3079 			uint32_t *rx_bufs_used)
3080 {
3081 	void *rx_msdu_link_desc;
3082 	qdf_nbuf_t msdu;
3083 	qdf_nbuf_t last;
3084 	struct hal_rx_msdu_list msdu_list;
3085 	uint16_t num_msdus;
3086 	struct hal_buf_info buf_info;
3087 	uint32_t msdu_cnt, i;
3088 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
3089 	struct rx_desc_pool *rx_desc_pool;
3090 	struct dp_rx_desc *rx_desc;
3091 
3092 	msdu = 0;
3093 
3094 	last = NULL;
3095 
3096 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
3097 				     &buf_info, &msdu_cnt);
3098 
3099 	do {
3100 		rx_msdu_link_desc =
3101 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
3102 
3103 		if (!rx_msdu_link_desc) {
3104 			DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
3105 			break;
3106 		}
3107 
3108 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
3109 				     &msdu_list, &num_msdus);
3110 
3111 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
3112 			for (i = 0; i < num_msdus; i++) {
3113 				if (!dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[i])) {
3114 					dp_rx_err_info_rl("Invalid MSDU info cookie: 0x%x",
3115 							  msdu_list.sw_cookie[i]);
3116 					continue;
3117 				}
3118 
3119 				rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
3120 							soc,
3121 							msdu_list.sw_cookie[i]);
3122 				qdf_assert_always(rx_desc);
3123 				rx_desc_pool =
3124 					&soc->rx_desc_buf[rx_desc->pool_id];
3125 				msdu = rx_desc->nbuf;
3126 
3127 				/*
3128 				 * this is a unlikely scenario where the host is reaping
3129 				 * a descriptor which it already reaped just a while ago
3130 				 * but is yet to replenish it back to HW.
3131 				 */
3132 				if (qdf_unlikely(!rx_desc->in_use) ||
3133 				    qdf_unlikely(!msdu)) {
3134 					dp_rx_err_info_rl("Reaping rx_desc not in use!");
3135 					continue;
3136 				}
3137 
3138 				dp_ipa_rx_buf_smmu_mapping_lock(soc);
3139 				dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, msdu);
3140 				rx_desc->unmapped = 1;
3141 				dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3142 
3143 				dp_rx_buffer_pool_nbuf_free(soc, msdu,
3144 							    rx_desc->pool_id);
3145 				rx_bufs_used[rx_desc->pool_id]++;
3146 				dp_rx_add_to_free_desc_list(head,
3147 							    tail, rx_desc);
3148 			}
3149 		}
3150 
3151 		/*
3152 		 * Store the current link buffer into to the local structure
3153 		 * to be used for release purpose.
3154 		 */
3155 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
3156 					     buf_info.paddr, buf_info.sw_cookie,
3157 					     buf_info.rbm);
3158 
3159 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
3160 					      &buf_info);
3161 		dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
3162 					rx_link_buf_info,
3163 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
3164 	} while (buf_info.paddr);
3165 }
3166 
3167 void
3168 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
3169 			     uint32_t buf_type)
3170 {
3171 	struct hal_buf_info buf_info = {0};
3172 	struct dp_rx_desc *rx_desc = NULL;
3173 	struct rx_desc_pool *rx_desc_pool;
3174 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = {0};
3175 	union dp_rx_desc_list_elem_t *head = NULL;
3176 	union dp_rx_desc_list_elem_t *tail = NULL;
3177 	uint8_t pool_id;
3178 	uint8_t mac_id;
3179 
3180 	hal_rx_reo_buf_paddr_get(soc->hal_soc, hal_desc, &buf_info);
3181 
3182 	if (!buf_info.paddr) {
3183 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
3184 		return;
3185 	}
3186 
3187 	/* buffer_addr_info is the first element of ring_desc */
3188 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)hal_desc,
3189 				  &buf_info);
3190 
3191 	if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
3192 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
3193 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
3194 							soc,
3195 							buf_info.sw_cookie);
3196 
3197 		if (rx_desc && rx_desc->nbuf) {
3198 			rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
3199 			dp_ipa_rx_buf_smmu_mapping_lock(soc);
3200 			dp_rx_nbuf_unmap_pool(soc, rx_desc_pool,
3201 					      rx_desc->nbuf);
3202 			rx_desc->unmapped = 1;
3203 			dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3204 
3205 			dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
3206 						    rx_desc->pool_id);
3207 			dp_rx_add_to_free_desc_list(&head,
3208 						    &tail,
3209 						    rx_desc);
3210 
3211 			rx_bufs_reaped[rx_desc->pool_id]++;
3212 		}
3213 	} else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
3214 		pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(buf_info.sw_cookie);
3215 
3216 		dp_wbm_int_err_mpdu_pop(soc, pool_id, hal_desc,
3217 					&head, &tail, rx_bufs_reaped);
3218 	}
3219 
3220 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
3221 		struct rx_desc_pool *rx_desc_pool;
3222 		struct dp_srng *dp_rxdma_srng;
3223 
3224 		if (!rx_bufs_reaped[mac_id])
3225 			continue;
3226 
3227 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
3228 		dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
3229 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
3230 
3231 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
3232 					rx_desc_pool,
3233 					rx_bufs_reaped[mac_id],
3234 					&head, &tail, false);
3235 	}
3236 }
3237 
3238 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
3239