xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_err.c (revision 3efaabd70475270fea7fcc46621defb016797d6e)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #include "dp_types.h"
22 #include "dp_rx.h"
23 #include "dp_tx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 #include "hal_api.h"
27 #include "qdf_trace.h"
28 #include "qdf_nbuf.h"
29 #include "dp_rx_defrag.h"
30 #include "dp_ipa.h"
31 #include "dp_internal.h"
32 #ifdef WIFI_MONITOR_SUPPORT
33 #include "dp_htt.h"
34 #include <dp_mon.h>
35 #endif
36 #ifdef FEATURE_WDS
37 #include "dp_txrx_wds.h"
38 #endif
39 #include <enet.h>	/* LLC_SNAP_HDR_LEN */
40 #include "qdf_net_types.h"
41 #include "dp_rx_buffer_pool.h"
42 
43 #define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params)
44 #define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params)
45 #define dp_rx_err_info(params...) \
46 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
47 #define dp_rx_err_info_rl(params...) \
48 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
49 #define dp_rx_err_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_ERROR, params)
50 
51 #ifndef QCA_HOST_MODE_WIFI_DISABLED
52 
53 
54 /* Max regular Rx packet routing error */
55 #define DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD 20
56 #define DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT 10
57 #define DP_RX_ERR_ROUTE_TIMEOUT_US (5 * 1000 * 1000) /* micro seconds */
58 
59 #ifdef FEATURE_MEC
60 bool dp_rx_mcast_echo_check(struct dp_soc *soc,
61 			    struct dp_txrx_peer *txrx_peer,
62 			    uint8_t *rx_tlv_hdr,
63 			    qdf_nbuf_t nbuf)
64 {
65 	struct dp_vdev *vdev = txrx_peer->vdev;
66 	struct dp_pdev *pdev = vdev->pdev;
67 	struct dp_mec_entry *mecentry = NULL;
68 	struct dp_ast_entry *ase = NULL;
69 	uint16_t sa_idx = 0;
70 	uint8_t *data;
71 	/*
72 	 * Multicast Echo Check is required only if vdev is STA and
73 	 * received pkt is a multicast/broadcast pkt. otherwise
74 	 * skip the MEC check.
75 	 */
76 	if (vdev->opmode != wlan_op_mode_sta)
77 		return false;
78 	if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
79 		return false;
80 
81 	data = qdf_nbuf_data(nbuf);
82 
83 	/*
84 	 * if the received pkts src mac addr matches with vdev
85 	 * mac address then drop the pkt as it is looped back
86 	 */
87 	if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
88 			  vdev->mac_addr.raw,
89 			  QDF_MAC_ADDR_SIZE)))
90 		return true;
91 
92 	/*
93 	 * In case of qwrap isolation mode, donot drop loopback packets.
94 	 * In isolation mode, all packets from the wired stations need to go
95 	 * to rootap and loop back to reach the wireless stations and
96 	 * vice-versa.
97 	 */
98 	if (qdf_unlikely(vdev->isolation_vdev))
99 		return false;
100 
101 	/*
102 	 * if the received pkts src mac addr matches with the
103 	 * wired PCs MAC addr which is behind the STA or with
104 	 * wireless STAs MAC addr which are behind the Repeater,
105 	 * then drop the pkt as it is looped back
106 	 */
107 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
108 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
109 
110 		if ((sa_idx < 0) ||
111 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
112 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
113 				  "invalid sa_idx: %d", sa_idx);
114 			qdf_assert_always(0);
115 		}
116 
117 		qdf_spin_lock_bh(&soc->ast_lock);
118 		ase = soc->ast_table[sa_idx];
119 
120 		/*
121 		 * this check was not needed since MEC is not dependent on AST,
122 		 * but if we dont have this check SON has some issues in
123 		 * dual backhaul scenario. in APS SON mode, client connected
124 		 * to RE 2G and sends multicast packets. the RE sends it to CAP
125 		 * over 5G backhaul. the CAP loopback it on 2G to RE.
126 		 * On receiving in 2G STA vap, we assume that client has roamed
127 		 * and kickout the client.
128 		 */
129 		if (ase && (ase->peer_id != txrx_peer->peer_id)) {
130 			qdf_spin_unlock_bh(&soc->ast_lock);
131 			goto drop;
132 		}
133 
134 		qdf_spin_unlock_bh(&soc->ast_lock);
135 	}
136 
137 	qdf_spin_lock_bh(&soc->mec_lock);
138 
139 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
140 						   &data[QDF_MAC_ADDR_SIZE]);
141 	if (!mecentry) {
142 		qdf_spin_unlock_bh(&soc->mec_lock);
143 		return false;
144 	}
145 
146 	qdf_spin_unlock_bh(&soc->mec_lock);
147 
148 drop:
149 	dp_rx_err_info("%pK: received pkt with same src mac " QDF_MAC_ADDR_FMT,
150 		       soc, QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]));
151 
152 	return true;
153 }
154 #endif
155 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
156 
157 void dp_rx_link_desc_refill_duplicate_check(
158 				struct dp_soc *soc,
159 				struct hal_buf_info *buf_info,
160 				hal_buff_addrinfo_t ring_buf_info)
161 {
162 	struct hal_buf_info current_link_desc_buf_info = { 0 };
163 
164 	/* do duplicate link desc address check */
165 	hal_rx_buffer_addr_info_get_paddr(ring_buf_info,
166 					  &current_link_desc_buf_info);
167 
168 	/*
169 	 * TODO - Check if the hal soc api call can be removed
170 	 * since the cookie is just used for print.
171 	 * buffer_addr_info is the first element of ring_desc
172 	 */
173 	hal_rx_buf_cookie_rbm_get(soc->hal_soc,
174 				  (uint32_t *)ring_buf_info,
175 				  &current_link_desc_buf_info);
176 
177 	if (qdf_unlikely(current_link_desc_buf_info.paddr ==
178 			 buf_info->paddr)) {
179 		dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x",
180 			   current_link_desc_buf_info.paddr,
181 			   current_link_desc_buf_info.sw_cookie);
182 		DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1);
183 	}
184 	*buf_info = current_link_desc_buf_info;
185 }
186 
187 QDF_STATUS
188 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
189 			       hal_buff_addrinfo_t link_desc_addr,
190 			       uint8_t bm_action)
191 {
192 	struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
193 	hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
194 	hal_soc_handle_t hal_soc = soc->hal_soc;
195 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
196 	void *src_srng_desc;
197 
198 	if (!wbm_rel_srng) {
199 		dp_rx_err_err("%pK: WBM RELEASE RING not initialized", soc);
200 		return status;
201 	}
202 
203 	/* do duplicate link desc address check */
204 	dp_rx_link_desc_refill_duplicate_check(
205 				soc,
206 				&soc->last_op_info.wbm_rel_link_desc,
207 				link_desc_addr);
208 
209 	if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
210 
211 		/* TODO */
212 		/*
213 		 * Need API to convert from hal_ring pointer to
214 		 * Ring Type / Ring Id combo
215 		 */
216 		dp_rx_err_err("%pK: HAL RING Access For WBM Release SRNG Failed - %pK",
217 			      soc, wbm_rel_srng);
218 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
219 		goto done;
220 	}
221 	src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
222 	if (qdf_likely(src_srng_desc)) {
223 		/* Return link descriptor through WBM ring (SW2WBM)*/
224 		hal_rx_msdu_link_desc_set(hal_soc,
225 				src_srng_desc, link_desc_addr, bm_action);
226 		status = QDF_STATUS_SUCCESS;
227 	} else {
228 		struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
229 
230 		DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1);
231 
232 		dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)",
233 			   srng->ring_id,
234 			   soc->stats.rx.err.hal_ring_access_full_fail);
235 		dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
236 			   *srng->u.src_ring.hp_addr,
237 			   srng->u.src_ring.reap_hp,
238 			   *srng->u.src_ring.tp_addr,
239 			   srng->u.src_ring.cached_tp);
240 		QDF_BUG(0);
241 	}
242 done:
243 	hal_srng_access_end(hal_soc, wbm_rel_srng);
244 	return status;
245 
246 }
247 
248 qdf_export_symbol(dp_rx_link_desc_return_by_addr);
249 
250 QDF_STATUS
251 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
252 		       uint8_t bm_action)
253 {
254 	void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
255 
256 	return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
257 }
258 
259 #ifndef QCA_HOST_MODE_WIFI_DISABLED
260 
261 /**
262  * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
263  *
264  * @soc: core txrx main context
265  * @ring_desc: opaque pointer to the REO error ring descriptor
266  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
267  * @mac_id: mac ID
268  * @quota: No. of units (packets) that can be serviced in one shot.
269  *
270  * This function is used to drop all MSDU in an MPDU
271  *
272  * Return: uint32_t: No. of elements processed
273  */
274 static uint32_t
275 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
276 		 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
277 		 uint8_t *mac_id,
278 		 uint32_t quota)
279 {
280 	uint32_t rx_bufs_used = 0;
281 	void *link_desc_va;
282 	struct hal_buf_info buf_info;
283 	struct dp_pdev *pdev;
284 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
285 	int i;
286 	uint8_t *rx_tlv_hdr;
287 	uint32_t tid;
288 	struct rx_desc_pool *rx_desc_pool;
289 	struct dp_rx_desc *rx_desc;
290 	/* First field in REO Dst ring Desc is buffer_addr_info */
291 	void *buf_addr_info = ring_desc;
292 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
293 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
294 
295 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &buf_info);
296 
297 	/* buffer_addr_info is the first element of ring_desc */
298 	hal_rx_buf_cookie_rbm_get(soc->hal_soc,
299 				  (uint32_t *)ring_desc,
300 				  &buf_info);
301 
302 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
303 	if (!link_desc_va) {
304 		dp_rx_err_debug("link desc va is null, soc %pk", soc);
305 		return rx_bufs_used;
306 	}
307 
308 more_msdu_link_desc:
309 	/* No UNMAP required -- this is "malloc_consistent" memory */
310 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
311 			     &mpdu_desc_info->msdu_count);
312 
313 	for (i = 0; (i < mpdu_desc_info->msdu_count); i++) {
314 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
315 						soc, msdu_list.sw_cookie[i]);
316 
317 		qdf_assert_always(rx_desc);
318 
319 		/* all buffers from a MSDU link link belong to same pdev */
320 		*mac_id = rx_desc->pool_id;
321 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
322 		if (!pdev) {
323 			dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
324 					soc, rx_desc->pool_id);
325 			return rx_bufs_used;
326 		}
327 
328 		if (!dp_rx_desc_check_magic(rx_desc)) {
329 			dp_rx_err_err("%pK: Invalid rx_desc cookie=%d",
330 				      soc, msdu_list.sw_cookie[i]);
331 			return rx_bufs_used;
332 		}
333 
334 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
335 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
336 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
337 		rx_desc->unmapped = 1;
338 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
339 
340 		rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
341 
342 		rx_bufs_used++;
343 		tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
344 						rx_desc->rx_buf_start);
345 		dp_rx_err_err("%pK: Packet received with PN error for tid :%d",
346 			      soc, tid);
347 
348 		rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
349 		if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
350 			hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
351 
352 		dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info,
353 				      rx_desc->nbuf,
354 				      QDF_TX_RX_STATUS_DROP, true);
355 		/* Just free the buffers */
356 		dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id);
357 
358 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
359 					    &pdev->free_list_tail, rx_desc);
360 	}
361 
362 	/*
363 	 * If the msdu's are spread across multiple link-descriptors,
364 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
365 	 * spread across multiple buffers).Hence, it is
366 	 * necessary to check the next link_descriptor and release
367 	 * all the msdu's that are part of it.
368 	 */
369 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
370 			link_desc_va,
371 			&next_link_desc_addr_info);
372 
373 	if (hal_rx_is_buf_addr_info_valid(
374 				&next_link_desc_addr_info)) {
375 		/* Clear the next link desc info for the current link_desc */
376 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
377 
378 		dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
379 					       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
380 		hal_rx_buffer_addr_info_get_paddr(
381 				&next_link_desc_addr_info,
382 				&buf_info);
383 		/* buffer_addr_info is the first element of ring_desc */
384 		hal_rx_buf_cookie_rbm_get(soc->hal_soc,
385 					  (uint32_t *)&next_link_desc_addr_info,
386 					  &buf_info);
387 		cur_link_desc_addr_info = next_link_desc_addr_info;
388 		buf_addr_info = &cur_link_desc_addr_info;
389 
390 		link_desc_va =
391 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
392 
393 		goto more_msdu_link_desc;
394 	}
395 	quota--;
396 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
397 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
398 	return rx_bufs_used;
399 }
400 
401 /**
402  * dp_rx_pn_error_handle() - Handles PN check errors
403  *
404  * @soc: core txrx main context
405  * @ring_desc: opaque pointer to the REO error ring descriptor
406  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
407  * @mac_id: mac ID
408  * @quota: No. of units (packets) that can be serviced in one shot.
409  *
410  * This function implements PN error handling
411  * If the peer is configured to ignore the PN check errors
412  * or if DP feels, that this frame is still OK, the frame can be
413  * re-injected back to REO to use some of the other features
414  * of REO e.g. duplicate detection/routing to other cores
415  *
416  * Return: uint32_t: No. of elements processed
417  */
418 static uint32_t
419 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
420 		      struct hal_rx_mpdu_desc_info *mpdu_desc_info,
421 		      uint8_t *mac_id,
422 		      uint32_t quota)
423 {
424 	uint16_t peer_id;
425 	uint32_t rx_bufs_used = 0;
426 	struct dp_txrx_peer *txrx_peer;
427 	bool peer_pn_policy = false;
428 	dp_txrx_ref_handle txrx_ref_handle = NULL;
429 
430 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
431 					       mpdu_desc_info->peer_meta_data);
432 
433 
434 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
435 						   &txrx_ref_handle,
436 						   DP_MOD_ID_RX_ERR);
437 
438 	if (qdf_likely(txrx_peer)) {
439 		/*
440 		 * TODO: Check for peer specific policies & set peer_pn_policy
441 		 */
442 		dp_err_rl("discard rx due to PN error for peer  %pK",
443 			  txrx_peer);
444 
445 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
446 	}
447 	dp_rx_err_err("%pK: Packet received with PN error", soc);
448 
449 	/* No peer PN policy -- definitely drop */
450 	if (!peer_pn_policy)
451 		rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
452 						mpdu_desc_info,
453 						mac_id, quota);
454 
455 	return rx_bufs_used;
456 }
457 
458 #ifdef DP_RX_DELIVER_ALL_OOR_FRAMES
459 /**
460  * dp_rx_deliver_oor_frame() - deliver OOR frames to stack
461  * @soc: Datapath soc handler
462  * @txrx_peer: pointer to DP peer
463  * @nbuf: pointer to the skb of RX frame
464  * @frame_mask: the mask for special frame needed
465  * @rx_tlv_hdr: start of rx tlv header
466  *
467  * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
468  * single nbuf is expected.
469  *
470  * return: true - nbuf has been delivered to stack, false - not.
471  */
472 static bool
473 dp_rx_deliver_oor_frame(struct dp_soc *soc,
474 			struct dp_txrx_peer *txrx_peer,
475 			qdf_nbuf_t nbuf, uint32_t frame_mask,
476 			uint8_t *rx_tlv_hdr)
477 {
478 	uint32_t l2_hdr_offset = 0;
479 	uint16_t msdu_len = 0;
480 	uint32_t skip_len;
481 
482 	l2_hdr_offset =
483 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
484 
485 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
486 		skip_len = l2_hdr_offset;
487 	} else {
488 		msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
489 		skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
490 		qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
491 	}
492 
493 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
494 	dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
495 	qdf_nbuf_pull_head(nbuf, skip_len);
496 	qdf_nbuf_set_exc_frame(nbuf, 1);
497 
498 	dp_info_rl("OOR frame, mpdu sn 0x%x",
499 		   hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
500 	dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, nbuf, NULL);
501 	return true;
502 }
503 
504 #else
505 static bool
506 dp_rx_deliver_oor_frame(struct dp_soc *soc,
507 			struct dp_txrx_peer *txrx_peer,
508 			qdf_nbuf_t nbuf, uint32_t frame_mask,
509 			uint8_t *rx_tlv_hdr)
510 {
511 	return dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask,
512 					   rx_tlv_hdr);
513 }
514 #endif
515 
516 /**
517  * dp_rx_oor_handle() - Handles the msdu which is OOR error
518  *
519  * @soc: core txrx main context
520  * @nbuf: pointer to msdu skb
521  * @peer_id: dp peer ID
522  * @rx_tlv_hdr: start of rx tlv header
523  *
524  * This function process the msdu delivered from REO2TCL
525  * ring with error type OOR
526  *
527  * Return: None
528  */
529 static void
530 dp_rx_oor_handle(struct dp_soc *soc,
531 		 qdf_nbuf_t nbuf,
532 		 uint16_t peer_id,
533 		 uint8_t *rx_tlv_hdr)
534 {
535 	uint32_t frame_mask = wlan_cfg_get_special_frame_cfg(soc->wlan_cfg_ctx);
536 
537 	struct dp_txrx_peer *txrx_peer = NULL;
538 	dp_txrx_ref_handle txrx_ref_handle = NULL;
539 
540 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
541 						   &txrx_ref_handle,
542 						   DP_MOD_ID_RX_ERR);
543 	if (!txrx_peer) {
544 		dp_info_rl("peer not found");
545 		goto free_nbuf;
546 	}
547 
548 	if (dp_rx_deliver_oor_frame(soc, txrx_peer, nbuf, frame_mask,
549 				    rx_tlv_hdr)) {
550 		DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1);
551 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
552 		return;
553 	}
554 
555 free_nbuf:
556 	if (txrx_peer)
557 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
558 
559 	DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1);
560 	dp_rx_nbuf_free(nbuf);
561 }
562 
563 /**
564  * dp_rx_err_nbuf_pn_check() - Check if the PN number of this current packet
565  *				is a monotonous increment of packet number
566  *				from the previous successfully re-ordered
567  *				frame.
568  * @soc: Datapath SOC handle
569  * @ring_desc: REO ring descriptor
570  * @nbuf: Current packet
571  *
572  * Return: QDF_STATUS_SUCCESS, if the pn check passes, else QDF_STATUS_E_FAILURE
573  */
574 static inline QDF_STATUS
575 dp_rx_err_nbuf_pn_check(struct dp_soc *soc, hal_ring_desc_t ring_desc,
576 			qdf_nbuf_t nbuf)
577 {
578 	uint64_t prev_pn, curr_pn[2];
579 
580 	if (!hal_rx_encryption_info_valid(soc->hal_soc, qdf_nbuf_data(nbuf)))
581 		return QDF_STATUS_SUCCESS;
582 
583 	hal_rx_reo_prev_pn_get(soc->hal_soc, ring_desc, &prev_pn);
584 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), curr_pn);
585 
586 	if (curr_pn[0] > prev_pn)
587 		return QDF_STATUS_SUCCESS;
588 
589 	return QDF_STATUS_E_FAILURE;
590 }
591 
592 #ifdef WLAN_SKIP_BAR_UPDATE
593 static
594 void dp_rx_err_handle_bar(struct dp_soc *soc,
595 			  struct dp_peer *peer,
596 			  qdf_nbuf_t nbuf)
597 {
598 	dp_info_rl("BAR update to H.W is skipped");
599 	DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
600 }
601 #else
602 static
603 void dp_rx_err_handle_bar(struct dp_soc *soc,
604 			  struct dp_peer *peer,
605 			  qdf_nbuf_t nbuf)
606 {
607 	uint8_t *rx_tlv_hdr;
608 	unsigned char type, subtype;
609 	uint16_t start_seq_num;
610 	uint32_t tid;
611 	QDF_STATUS status;
612 	struct ieee80211_frame_bar *bar;
613 
614 	/*
615 	 * 1. Is this a BAR frame. If not Discard it.
616 	 * 2. If it is, get the peer id, tid, ssn
617 	 * 2a Do a tid update
618 	 */
619 
620 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
621 	bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + soc->rx_pkt_tlv_size);
622 
623 	type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
624 	subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
625 
626 	if (!(type == IEEE80211_FC0_TYPE_CTL &&
627 	      subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
628 		dp_err_rl("Not a BAR frame!");
629 		return;
630 	}
631 
632 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
633 	qdf_assert_always(tid < DP_MAX_TIDS);
634 
635 	start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
636 
637 	dp_info_rl("tid %u window_size %u start_seq_num %u",
638 		   tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
639 
640 	status = dp_rx_tid_update_wifi3(peer, tid,
641 					peer->rx_tid[tid].ba_win_size,
642 					start_seq_num,
643 					true);
644 	if (status != QDF_STATUS_SUCCESS) {
645 		dp_err_rl("failed to handle bar frame update rx tid");
646 		DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
647 	} else {
648 		DP_STATS_INC(soc, rx.err.ssn_update_count, 1);
649 	}
650 }
651 #endif
652 
653 /**
654  * _dp_rx_bar_frame_handle(): Core of the BAR frame handling
655  * @soc: Datapath SoC handle
656  * @nbuf: packet being processed
657  * @mpdu_desc_info: mpdu desc info for the current packet
658  * @tid: tid on which the packet arrived
659  * @err_status: Flag to indicate if REO encountered an error while routing this
660  *		frame
661  * @error_code: REO error code
662  *
663  * Return: None
664  */
665 static void
666 _dp_rx_bar_frame_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
667 			struct hal_rx_mpdu_desc_info *mpdu_desc_info,
668 			uint32_t tid, uint8_t err_status, uint32_t error_code)
669 {
670 	uint16_t peer_id;
671 	struct dp_peer *peer;
672 
673 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
674 					       mpdu_desc_info->peer_meta_data);
675 	peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
676 	if (!peer)
677 		return;
678 
679 	dp_info_rl("BAR frame: "
680 		" peer_id = %d"
681 		" tid = %u"
682 		" SSN = %d"
683 		" error status = %d",
684 		peer->peer_id,
685 		tid,
686 		mpdu_desc_info->mpdu_seq,
687 		err_status);
688 
689 	if (err_status == HAL_REO_ERROR_DETECTED) {
690 		switch (error_code) {
691 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
692 		case HAL_REO_ERR_BAR_FRAME_OOR:
693 			dp_rx_err_handle_bar(soc, peer, nbuf);
694 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
695 			break;
696 		default:
697 			DP_STATS_INC(soc, rx.bar_frame, 1);
698 		}
699 	}
700 
701 	dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
702 }
703 
704 /**
705  * dp_rx_bar_frame_handle() - Function to handle err BAR frames
706  * @soc: core DP main context
707  * @ring_desc: Hal ring desc
708  * @rx_desc: dp rx desc
709  * @mpdu_desc_info: mpdu desc info
710  * @err_status: error status
711  * @err_code: error code
712  *
713  * Handle the error BAR frames received. Ensure the SOC level
714  * stats are updated based on the REO error code. The BAR frames
715  * are further processed by updating the Rx tids with the start
716  * sequence number (SSN) and BA window size. Desc is returned
717  * to the free desc list
718  *
719  * Return: none
720  */
721 static void
722 dp_rx_bar_frame_handle(struct dp_soc *soc,
723 		       hal_ring_desc_t ring_desc,
724 		       struct dp_rx_desc *rx_desc,
725 		       struct hal_rx_mpdu_desc_info *mpdu_desc_info,
726 		       uint8_t err_status,
727 		       uint32_t err_code)
728 {
729 	qdf_nbuf_t nbuf;
730 	struct dp_pdev *pdev;
731 	struct rx_desc_pool *rx_desc_pool;
732 	uint8_t *rx_tlv_hdr;
733 	uint32_t tid;
734 
735 	nbuf = rx_desc->nbuf;
736 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
737 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
738 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
739 	rx_desc->unmapped = 1;
740 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
741 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
742 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
743 					rx_tlv_hdr);
744 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
745 
746 	if (!pdev) {
747 		dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
748 				soc, rx_desc->pool_id);
749 		return;
750 	}
751 
752 	_dp_rx_bar_frame_handle(soc, nbuf, mpdu_desc_info, tid, err_status,
753 				err_code);
754 	dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, nbuf,
755 			      QDF_TX_RX_STATUS_DROP, true);
756 	dp_rx_link_desc_return(soc, ring_desc,
757 			       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
758 	dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
759 				    rx_desc->pool_id);
760 	dp_rx_add_to_free_desc_list(&pdev->free_list_head,
761 				    &pdev->free_list_tail,
762 				    rx_desc);
763 }
764 
765 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
766 
767 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
768 		       uint16_t peer_id, uint8_t tid)
769 {
770 	struct dp_peer *peer = NULL;
771 	struct dp_rx_tid *rx_tid = NULL;
772 	struct dp_txrx_peer *txrx_peer;
773 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP;
774 
775 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
776 	if (!peer) {
777 		dp_rx_err_info_rl("%pK: peer not found", soc);
778 		goto free_nbuf;
779 	}
780 
781 	txrx_peer = dp_get_txrx_peer(peer);
782 	if (!txrx_peer) {
783 		dp_rx_err_info_rl("%pK: txrx_peer not found", soc);
784 		goto free_nbuf;
785 	}
786 
787 	if (tid >= DP_MAX_TIDS) {
788 		dp_info_rl("invalid tid");
789 		goto nbuf_deliver;
790 	}
791 
792 	rx_tid = &peer->rx_tid[tid];
793 	qdf_spin_lock_bh(&rx_tid->tid_lock);
794 
795 	/* only if BA session is active, allow send Delba */
796 	if (rx_tid->ba_status != DP_RX_BA_ACTIVE) {
797 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
798 		goto nbuf_deliver;
799 	}
800 
801 	if (!rx_tid->delba_tx_status) {
802 		rx_tid->delba_tx_retry++;
803 		rx_tid->delba_tx_status = 1;
804 		rx_tid->delba_rcode =
805 			IEEE80211_REASON_QOS_SETUP_REQUIRED;
806 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
807 		if (soc->cdp_soc.ol_ops->send_delba) {
808 			DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent,
809 				     1);
810 			soc->cdp_soc.ol_ops->send_delba(
811 					peer->vdev->pdev->soc->ctrl_psoc,
812 					peer->vdev->vdev_id,
813 					peer->mac_addr.raw,
814 					tid,
815 					rx_tid->delba_rcode,
816 					CDP_DELBA_2K_JUMP);
817 		}
818 	} else {
819 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
820 	}
821 
822 nbuf_deliver:
823 	if (dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask,
824 					rx_tlv_hdr)) {
825 		DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1);
826 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
827 		return;
828 	}
829 
830 free_nbuf:
831 	if (peer)
832 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
833 	DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1);
834 	dp_rx_nbuf_free(nbuf);
835 }
836 
837 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
838     defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_KIWI)
839 bool
840 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
841 					      uint8_t pool_id,
842 					      uint8_t *rx_tlv_hdr,
843 					      qdf_nbuf_t nbuf)
844 {
845 	struct dp_peer *peer = NULL;
846 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
847 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
848 	struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
849 
850 	if (!pdev) {
851 		dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
852 				soc, pool_id);
853 		return false;
854 	}
855 	/*
856 	 * WAR- In certain types of packets if peer_id is not correct then
857 	 * driver may not be able find. Try finding peer by addr_2 of
858 	 * received MPDU
859 	 */
860 	if (wh)
861 		peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0,
862 					      DP_VDEV_ALL, DP_MOD_ID_RX_ERR);
863 	if (peer) {
864 		dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
865 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
866 				     QDF_TRACE_LEVEL_DEBUG);
867 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
868 				 1, qdf_nbuf_len(nbuf));
869 		dp_rx_nbuf_free(nbuf);
870 
871 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
872 		return true;
873 	}
874 	return false;
875 }
876 #else
877 bool
878 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
879 					      uint8_t pool_id,
880 					      uint8_t *rx_tlv_hdr,
881 					      qdf_nbuf_t nbuf)
882 {
883 	return false;
884 }
885 #endif
886 
887 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
888 {
889 	if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) {
890 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
891 				 1, pkt_len);
892 		return true;
893 	} else {
894 		return false;
895 	}
896 }
897 
898 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
899 void
900 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
901 			    struct dp_vdev *vdev,
902 			    struct dp_txrx_peer *txrx_peer,
903 			    qdf_nbuf_t nbuf,
904 			    qdf_nbuf_t tail,
905 			    bool is_eapol)
906 {
907 	if (is_eapol && soc->eapol_over_control_port)
908 		dp_rx_eapol_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
909 	else
910 		dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
911 }
912 #else
913 void
914 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
915 			    struct dp_vdev *vdev,
916 			    struct dp_txrx_peer *txrx_peer,
917 			    qdf_nbuf_t nbuf,
918 			    qdf_nbuf_t tail,
919 			    bool is_eapol)
920 {
921 	dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
922 }
923 #endif
924 
925 #ifdef WLAN_FEATURE_11BE_MLO
926 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
927 {
928 	return ((qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
929 			     QDF_MAC_ADDR_SIZE) == 0) ||
930 		(qdf_mem_cmp(eh->ether_dhost, &vdev->mld_mac_addr.raw[0],
931 			     QDF_MAC_ADDR_SIZE) == 0));
932 }
933 
934 #else
935 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
936 {
937 	return (qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
938 			    QDF_MAC_ADDR_SIZE) == 0);
939 }
940 #endif
941 
942 #ifndef QCA_HOST_MODE_WIFI_DISABLED
943 
944 bool
945 dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr)
946 {
947 	struct dp_soc *soc = vdev->pdev->soc;
948 
949 	if (!vdev->drop_3addr_mcast)
950 		return false;
951 
952 	if (vdev->opmode != wlan_op_mode_sta)
953 		return false;
954 
955 	if (hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
956 		return true;
957 
958 	return false;
959 }
960 
961 /**
962  * dp_rx_err_is_pn_check_needed() - Check if the packet number check is needed
963  *				for this frame received in REO error ring.
964  * @soc: Datapath SOC handle
965  * @error: REO error detected or not
966  * @error_code: Error code in case of REO error
967  *
968  * Return: true if pn check if needed in software,
969  *	false, if pn check if not needed.
970  */
971 static inline bool
972 dp_rx_err_is_pn_check_needed(struct dp_soc *soc, uint8_t error,
973 			     uint32_t error_code)
974 {
975 	return (soc->features.pn_in_reo_dest &&
976 		(error == HAL_REO_ERROR_DETECTED &&
977 		 (hal_rx_reo_is_2k_jump(error_code) ||
978 		  hal_rx_reo_is_oor_error(error_code) ||
979 		  hal_rx_reo_is_bar_oor_2k_jump(error_code))));
980 }
981 
982 #ifdef DP_WAR_INVALID_FIRST_MSDU_FLAG
983 static inline void
984 dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
985 				  struct hal_rx_mpdu_desc_info *mpdu_desc_info,
986 				  bool first_msdu_in_mpdu_processed)
987 {
988 	if (first_msdu_in_mpdu_processed) {
989 		/*
990 		 * This is the 2nd indication of first_msdu in the same mpdu.
991 		 * Skip re-parsing the mdpu_desc_info and use the cached one,
992 		 * since this msdu is most probably from the current mpdu
993 		 * which is being processed
994 		 */
995 	} else {
996 		hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc,
997 						   qdf_nbuf_data(nbuf),
998 						   mpdu_desc_info);
999 	}
1000 }
1001 #else
1002 static inline void
1003 dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
1004 				  struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1005 				  bool first_msdu_in_mpdu_processed)
1006 {
1007 	hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc, qdf_nbuf_data(nbuf),
1008 					   mpdu_desc_info);
1009 }
1010 #endif
1011 
1012 /**
1013  * dp_rx_reo_err_entry_process() - Handles for REO error entry processing
1014  *
1015  * @soc: core txrx main context
1016  * @ring_desc: opaque pointer to the REO error ring descriptor
1017  * @mpdu_desc_info: pointer to mpdu level description info
1018  * @link_desc_va: pointer to msdu_link_desc virtual address
1019  * @err_code: reo error code fetched from ring entry
1020  *
1021  * Function to handle msdus fetched from msdu link desc, currently
1022  * support REO error NULL queue, 2K jump, OOR.
1023  *
1024  * Return: msdu count processed
1025  */
1026 static uint32_t
1027 dp_rx_reo_err_entry_process(struct dp_soc *soc,
1028 			    void *ring_desc,
1029 			    struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1030 			    void *link_desc_va,
1031 			    enum hal_reo_error_code err_code)
1032 {
1033 	uint32_t rx_bufs_used = 0;
1034 	struct dp_pdev *pdev;
1035 	int i;
1036 	uint8_t *rx_tlv_hdr_first;
1037 	uint8_t *rx_tlv_hdr_last;
1038 	uint32_t tid = DP_MAX_TIDS;
1039 	uint16_t peer_id;
1040 	struct dp_rx_desc *rx_desc;
1041 	struct rx_desc_pool *rx_desc_pool;
1042 	qdf_nbuf_t nbuf;
1043 	qdf_nbuf_t next_nbuf;
1044 	struct hal_buf_info buf_info;
1045 	struct hal_rx_msdu_list msdu_list;
1046 	uint16_t num_msdus;
1047 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
1048 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
1049 	/* First field in REO Dst ring Desc is buffer_addr_info */
1050 	void *buf_addr_info = ring_desc;
1051 	qdf_nbuf_t head_nbuf = NULL;
1052 	qdf_nbuf_t tail_nbuf = NULL;
1053 	uint16_t msdu_processed = 0;
1054 	QDF_STATUS status;
1055 	bool ret, is_pn_check_needed;
1056 	uint8_t rx_desc_pool_id;
1057 	struct dp_txrx_peer *txrx_peer = NULL;
1058 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1059 	hal_ring_handle_t hal_ring_hdl = soc->reo_exception_ring.hal_srng;
1060 	bool first_msdu_in_mpdu_processed = false;
1061 	bool msdu_dropped = false;
1062 	uint8_t link_id = 0;
1063 
1064 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1065 					mpdu_desc_info->peer_meta_data);
1066 	is_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
1067 							  HAL_REO_ERROR_DETECTED,
1068 							  err_code);
1069 more_msdu_link_desc:
1070 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
1071 			     &num_msdus);
1072 	for (i = 0; i < num_msdus; i++) {
1073 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
1074 						soc,
1075 						msdu_list.sw_cookie[i]);
1076 
1077 		if (dp_assert_always_internal_stat(rx_desc, soc,
1078 						   rx.err.reo_err_rx_desc_null))
1079 			continue;
1080 
1081 		nbuf = rx_desc->nbuf;
1082 
1083 		/*
1084 		 * this is a unlikely scenario where the host is reaping
1085 		 * a descriptor which it already reaped just a while ago
1086 		 * but is yet to replenish it back to HW.
1087 		 * In this case host will dump the last 128 descriptors
1088 		 * including the software descriptor rx_desc and assert.
1089 		 */
1090 		if (qdf_unlikely(!rx_desc->in_use) ||
1091 		    qdf_unlikely(!nbuf)) {
1092 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
1093 			dp_info_rl("Reaping rx_desc not in use!");
1094 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1095 						   ring_desc, rx_desc);
1096 			/* ignore duplicate RX desc and continue to process */
1097 			/* Pop out the descriptor */
1098 			msdu_dropped = true;
1099 			continue;
1100 		}
1101 
1102 		ret = dp_rx_desc_paddr_sanity_check(rx_desc,
1103 						    msdu_list.paddr[i]);
1104 		if (!ret) {
1105 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1106 			rx_desc->in_err_state = 1;
1107 			msdu_dropped = true;
1108 			continue;
1109 		}
1110 
1111 		rx_desc_pool_id = rx_desc->pool_id;
1112 		/* all buffers from a MSDU link belong to same pdev */
1113 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc_pool_id);
1114 
1115 		rx_desc_pool = &soc->rx_desc_buf[rx_desc_pool_id];
1116 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
1117 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
1118 		rx_desc->unmapped = 1;
1119 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
1120 
1121 		QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len;
1122 		rx_bufs_used++;
1123 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
1124 					    &pdev->free_list_tail, rx_desc);
1125 
1126 		DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf);
1127 
1128 		if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags &
1129 				 HAL_MSDU_F_MSDU_CONTINUATION)) {
1130 			qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
1131 			continue;
1132 		}
1133 
1134 		if (dp_rx_buffer_pool_refill(soc, head_nbuf,
1135 					     rx_desc_pool_id)) {
1136 			/* MSDU queued back to the pool */
1137 			msdu_dropped = true;
1138 			head_nbuf = NULL;
1139 			goto process_next_msdu;
1140 		}
1141 
1142 		if (is_pn_check_needed) {
1143 			if (msdu_list.msdu_info[i].msdu_flags &
1144 			    HAL_MSDU_F_FIRST_MSDU_IN_MPDU) {
1145 				dp_rx_err_populate_mpdu_desc_info(soc, nbuf,
1146 						mpdu_desc_info,
1147 						first_msdu_in_mpdu_processed);
1148 				first_msdu_in_mpdu_processed = true;
1149 			} else {
1150 				if (!first_msdu_in_mpdu_processed) {
1151 					/*
1152 					 * If no msdu in this mpdu was dropped
1153 					 * due to failed sanity checks, then
1154 					 * its not expected to hit this
1155 					 * condition. Hence we assert here.
1156 					 */
1157 					if (!msdu_dropped)
1158 						qdf_assert_always(0);
1159 
1160 					/*
1161 					 * We do not have valid mpdu_desc_info
1162 					 * to process this nbuf, hence drop it.
1163 					 * TODO - Increment stats
1164 					 */
1165 					goto process_next_msdu;
1166 				}
1167 				/*
1168 				 * DO NOTHING -
1169 				 * Continue using the same mpdu_desc_info
1170 				 * details populated from the first msdu in
1171 				 * the mpdu.
1172 				 */
1173 			}
1174 
1175 			status = dp_rx_err_nbuf_pn_check(soc, ring_desc, nbuf);
1176 			if (QDF_IS_STATUS_ERROR(status)) {
1177 				DP_STATS_INC(soc, rx.err.pn_in_dest_check_fail,
1178 					     1);
1179 				goto process_next_msdu;
1180 			}
1181 
1182 			peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1183 					mpdu_desc_info->peer_meta_data);
1184 
1185 			if (mpdu_desc_info->bar_frame)
1186 				_dp_rx_bar_frame_handle(soc, nbuf,
1187 							mpdu_desc_info, tid,
1188 							HAL_REO_ERROR_DETECTED,
1189 							err_code);
1190 		}
1191 
1192 		rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf);
1193 		rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf);
1194 
1195 		if (qdf_unlikely(head_nbuf != tail_nbuf)) {
1196 			/*
1197 			 * For SG case, only the length of last skb is valid
1198 			 * as HW only populate the msdu_len for last msdu
1199 			 * in rx link descriptor, use the length from
1200 			 * last skb to overwrite the head skb for further
1201 			 * SG processing.
1202 			 */
1203 			QDF_NBUF_CB_RX_PKT_LEN(head_nbuf) =
1204 					QDF_NBUF_CB_RX_PKT_LEN(tail_nbuf);
1205 			nbuf = dp_rx_sg_create(soc, head_nbuf);
1206 			qdf_nbuf_set_is_frag(nbuf, 1);
1207 			DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1);
1208 		}
1209 		head_nbuf = NULL;
1210 
1211 		dp_rx_nbuf_set_link_id_from_tlv(soc, qdf_nbuf_data(nbuf), nbuf);
1212 
1213 		if (pdev && pdev->link_peer_stats &&
1214 		    txrx_peer && txrx_peer->is_mld_peer) {
1215 			link_id = dp_rx_get_stats_arr_idx_from_link_id(
1216 								nbuf,
1217 								txrx_peer);
1218 		}
1219 
1220 		if (txrx_peer)
1221 			dp_rx_set_nbuf_band(nbuf, txrx_peer, link_id);
1222 
1223 		switch (err_code) {
1224 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
1225 		case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
1226 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
1227 			/*
1228 			 * only first msdu, mpdu start description tlv valid?
1229 			 * and use it for following msdu.
1230 			 */
1231 			if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1232 							   rx_tlv_hdr_last))
1233 				tid = hal_rx_mpdu_start_tid_get(
1234 							soc->hal_soc,
1235 							rx_tlv_hdr_first);
1236 
1237 			dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last,
1238 					  peer_id, tid);
1239 			break;
1240 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
1241 		case HAL_REO_ERR_BAR_FRAME_OOR:
1242 			dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last);
1243 			break;
1244 		case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
1245 			txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(
1246 							soc, peer_id,
1247 							&txrx_ref_handle,
1248 							DP_MOD_ID_RX_ERR);
1249 			if (!txrx_peer)
1250 				dp_info_rl("txrx_peer is null peer_id %u",
1251 					   peer_id);
1252 			soc->arch_ops.dp_rx_null_q_desc_handle(soc, nbuf,
1253 							       rx_tlv_hdr_last,
1254 							       rx_desc_pool_id,
1255 							       txrx_peer,
1256 							       TRUE,
1257 							       link_id);
1258 			if (txrx_peer)
1259 				dp_txrx_peer_unref_delete(txrx_ref_handle,
1260 							  DP_MOD_ID_RX_ERR);
1261 			break;
1262 		default:
1263 			dp_err_rl("Non-support error code %d", err_code);
1264 			dp_rx_nbuf_free(nbuf);
1265 		}
1266 
1267 process_next_msdu:
1268 		nbuf = head_nbuf;
1269 		while (nbuf) {
1270 			next_nbuf = qdf_nbuf_next(nbuf);
1271 			dp_rx_nbuf_free(nbuf);
1272 			nbuf = next_nbuf;
1273 		}
1274 		msdu_processed++;
1275 		head_nbuf = NULL;
1276 		tail_nbuf = NULL;
1277 	}
1278 
1279 	/*
1280 	 * If the msdu's are spread across multiple link-descriptors,
1281 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
1282 	 * spread across multiple buffers).Hence, it is
1283 	 * necessary to check the next link_descriptor and release
1284 	 * all the msdu's that are part of it.
1285 	 */
1286 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
1287 			link_desc_va,
1288 			&next_link_desc_addr_info);
1289 
1290 	if (hal_rx_is_buf_addr_info_valid(
1291 				&next_link_desc_addr_info)) {
1292 		/* Clear the next link desc info for the current link_desc */
1293 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
1294 		dp_rx_link_desc_return_by_addr(
1295 				soc,
1296 				buf_addr_info,
1297 				HAL_BM_ACTION_PUT_IN_IDLE_LIST);
1298 
1299 		hal_rx_buffer_addr_info_get_paddr(
1300 				&next_link_desc_addr_info,
1301 				&buf_info);
1302 		/* buffer_addr_info is the first element of ring_desc */
1303 		hal_rx_buf_cookie_rbm_get(soc->hal_soc,
1304 					  (uint32_t *)&next_link_desc_addr_info,
1305 					  &buf_info);
1306 		link_desc_va =
1307 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1308 		cur_link_desc_addr_info = next_link_desc_addr_info;
1309 		buf_addr_info = &cur_link_desc_addr_info;
1310 
1311 		goto more_msdu_link_desc;
1312 	}
1313 
1314 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
1315 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
1316 	if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count))
1317 		DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1);
1318 
1319 	return rx_bufs_used;
1320 }
1321 
1322 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1323 
1324 void
1325 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1326 			uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
1327 			uint8_t err_code, uint8_t mac_id, uint8_t link_id)
1328 {
1329 	uint32_t pkt_len, l2_hdr_offset;
1330 	uint16_t msdu_len;
1331 	struct dp_vdev *vdev;
1332 	qdf_ether_header_t *eh;
1333 	bool is_broadcast;
1334 
1335 	/*
1336 	 * Check if DMA completed -- msdu_done is the last bit
1337 	 * to be written
1338 	 */
1339 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1340 
1341 		dp_err_rl("MSDU DONE failure");
1342 
1343 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1344 				     QDF_TRACE_LEVEL_INFO);
1345 		qdf_assert(0);
1346 	}
1347 
1348 	l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
1349 							   rx_tlv_hdr);
1350 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1351 	pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
1352 
1353 	if (dp_rx_check_pkt_len(soc, pkt_len)) {
1354 		/* Drop & free packet */
1355 		dp_rx_nbuf_free(nbuf);
1356 		return;
1357 	}
1358 	/* Set length in nbuf */
1359 	qdf_nbuf_set_pktlen(nbuf, pkt_len);
1360 
1361 	qdf_nbuf_set_next(nbuf, NULL);
1362 
1363 	qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
1364 	qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
1365 
1366 	if (!txrx_peer) {
1367 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "txrx_peer is NULL");
1368 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1369 				qdf_nbuf_len(nbuf));
1370 		/* Trigger invalid peer handler wrapper */
1371 		dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
1372 		return;
1373 	}
1374 
1375 	vdev = txrx_peer->vdev;
1376 	if (!vdev) {
1377 		dp_rx_err_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc,
1378 				 vdev);
1379 		/* Drop & free packet */
1380 		dp_rx_nbuf_free(nbuf);
1381 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1382 		return;
1383 	}
1384 
1385 	/*
1386 	 * Advance the packet start pointer by total size of
1387 	 * pre-header TLV's
1388 	 */
1389 	dp_rx_skip_tlvs(soc, nbuf, l2_hdr_offset);
1390 
1391 	if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
1392 		uint8_t *pkt_type;
1393 
1394 		pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
1395 		if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
1396 			if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
1397 							htons(QDF_LLC_STP)) {
1398 				DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
1399 				goto process_mesh;
1400 			} else {
1401 				goto process_rx;
1402 			}
1403 		}
1404 	}
1405 	if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
1406 		goto process_mesh;
1407 
1408 	/*
1409 	 * WAPI cert AP sends rekey frames as unencrypted.
1410 	 * Thus RXDMA will report unencrypted frame error.
1411 	 * To pass WAPI cert case, SW needs to pass unencrypted
1412 	 * rekey frame to stack.
1413 	 */
1414 	if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1415 		goto process_rx;
1416 	}
1417 	/*
1418 	 * In dynamic WEP case rekey frames are not encrypted
1419 	 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
1420 	 * key install is already done
1421 	 */
1422 	if ((vdev->sec_type == cdp_sec_type_wep104) &&
1423 	    (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
1424 		goto process_rx;
1425 
1426 process_mesh:
1427 
1428 	if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
1429 		dp_rx_nbuf_free(nbuf);
1430 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1431 		return;
1432 	}
1433 
1434 	if (vdev->mesh_vdev) {
1435 		if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
1436 				      == QDF_STATUS_SUCCESS) {
1437 			dp_rx_err_info("%pK: mesh pkt filtered", soc);
1438 			DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
1439 
1440 			dp_rx_nbuf_free(nbuf);
1441 			return;
1442 		}
1443 		dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, txrx_peer);
1444 	}
1445 process_rx:
1446 	if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1447 							rx_tlv_hdr) &&
1448 		(vdev->rx_decap_type ==
1449 				htt_cmn_pkt_type_ethernet))) {
1450 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1451 		is_broadcast = (QDF_IS_ADDR_BROADCAST
1452 				(eh->ether_dhost)) ? 1 : 0 ;
1453 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 1,
1454 					      qdf_nbuf_len(nbuf), link_id);
1455 		if (is_broadcast) {
1456 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast, 1,
1457 						      qdf_nbuf_len(nbuf),
1458 						      link_id);
1459 		}
1460 	} else {
1461 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.unicast, 1,
1462 					      qdf_nbuf_len(nbuf),
1463 					      link_id);
1464 	}
1465 
1466 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1467 		dp_rx_deliver_raw(vdev, nbuf, txrx_peer, link_id);
1468 	} else {
1469 		/* Update the protocol tag in SKB based on CCE metadata */
1470 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1471 					  EXCEPTION_DEST_RING_ID, true, true);
1472 		/* Update the flow tag in SKB based on FSE metadata */
1473 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
1474 		DP_PEER_STATS_FLAT_INC(txrx_peer, to_stack.num, 1);
1475 		qdf_nbuf_set_exc_frame(nbuf, 1);
1476 		dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
1477 					    qdf_nbuf_is_ipv4_eapol_pkt(nbuf));
1478 	}
1479 
1480 	return;
1481 }
1482 
1483 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
1484 			     uint8_t *rx_tlv_hdr,
1485 			     struct dp_txrx_peer *txrx_peer)
1486 {
1487 	struct dp_vdev *vdev = NULL;
1488 	struct dp_pdev *pdev = NULL;
1489 	struct ol_if_ops *tops = NULL;
1490 	uint16_t rx_seq, fragno;
1491 	uint8_t is_raw;
1492 	unsigned int tid;
1493 	QDF_STATUS status;
1494 	struct cdp_rx_mic_err_info mic_failure_info;
1495 
1496 	if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1497 					    rx_tlv_hdr))
1498 		return;
1499 
1500 	if (!txrx_peer) {
1501 		dp_info_rl("txrx_peer not found");
1502 		goto fail;
1503 	}
1504 
1505 	vdev = txrx_peer->vdev;
1506 	if (!vdev) {
1507 		dp_info_rl("VDEV not found");
1508 		goto fail;
1509 	}
1510 
1511 	pdev = vdev->pdev;
1512 	if (!pdev) {
1513 		dp_info_rl("PDEV not found");
1514 		goto fail;
1515 	}
1516 
1517 	is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
1518 	if (is_raw) {
1519 		fragno = dp_rx_frag_get_mpdu_frag_number(soc,
1520 							 qdf_nbuf_data(nbuf));
1521 		/* Can get only last fragment */
1522 		if (fragno) {
1523 			tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
1524 							qdf_nbuf_data(nbuf));
1525 			rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
1526 							qdf_nbuf_data(nbuf));
1527 
1528 			status = dp_rx_defrag_add_last_frag(soc, txrx_peer,
1529 							    tid, rx_seq, nbuf);
1530 			dp_info_rl("Frag pkt seq# %d frag# %d consumed "
1531 				   "status %d !", rx_seq, fragno, status);
1532 			return;
1533 		}
1534 	}
1535 
1536 	if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
1537 				  &mic_failure_info.da_mac_addr.bytes[0])) {
1538 		dp_err_rl("Failed to get da_mac_addr");
1539 		goto fail;
1540 	}
1541 
1542 	if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
1543 				  &mic_failure_info.ta_mac_addr.bytes[0])) {
1544 		dp_err_rl("Failed to get ta_mac_addr");
1545 		goto fail;
1546 	}
1547 
1548 	mic_failure_info.key_id = 0;
1549 	mic_failure_info.multicast =
1550 		IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
1551 	qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
1552 	mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
1553 	mic_failure_info.data = NULL;
1554 	mic_failure_info.vdev_id = vdev->vdev_id;
1555 
1556 	tops = pdev->soc->cdp_soc.ol_ops;
1557 	if (tops->rx_mic_error)
1558 		tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
1559 				   &mic_failure_info);
1560 
1561 fail:
1562 	dp_rx_nbuf_free(nbuf);
1563 	return;
1564 }
1565 
1566 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
1567 static void dp_rx_peek_trapped_packet(struct dp_soc *soc,
1568 				      struct dp_vdev *vdev)
1569 {
1570 	if (soc->cdp_soc.ol_ops->send_wakeup_trigger)
1571 		soc->cdp_soc.ol_ops->send_wakeup_trigger(soc->ctrl_psoc,
1572 				vdev->vdev_id);
1573 }
1574 #else
1575 static void dp_rx_peek_trapped_packet(struct dp_soc *soc,
1576 				      struct dp_vdev *vdev)
1577 {
1578 	return;
1579 }
1580 #endif
1581 
1582 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
1583 	defined(WLAN_MCAST_MLO)
1584 static bool dp_rx_igmp_handler(struct dp_soc *soc,
1585 			       struct dp_vdev *vdev,
1586 			       struct dp_txrx_peer *peer,
1587 			       qdf_nbuf_t nbuf,
1588 			       uint8_t link_id)
1589 {
1590 	if (soc->arch_ops.dp_rx_mcast_handler) {
1591 		if (soc->arch_ops.dp_rx_mcast_handler(soc, vdev, peer,
1592 						      nbuf, link_id))
1593 			return true;
1594 	}
1595 	return false;
1596 }
1597 #else
1598 static bool dp_rx_igmp_handler(struct dp_soc *soc,
1599 			       struct dp_vdev *vdev,
1600 			       struct dp_txrx_peer *peer,
1601 			       qdf_nbuf_t nbuf,
1602 			       uint8_t link_id)
1603 {
1604 	return false;
1605 }
1606 #endif
1607 
1608 /**
1609  * dp_rx_err_route_hdl() - Function to send EAPOL frames to stack
1610  *                            Free any other packet which comes in
1611  *                            this path.
1612  *
1613  * @soc: core DP main context
1614  * @nbuf: buffer pointer
1615  * @txrx_peer: txrx peer handle
1616  * @rx_tlv_hdr: start of rx tlv header
1617  * @err_src: rxdma/reo
1618  * @link_id: link id on which the packet is received
1619  *
1620  * This function indicates EAPOL frame received in wbm error ring to stack.
1621  * Any other frame should be dropped.
1622  *
1623  * Return: SUCCESS if delivered to stack
1624  */
1625 static void
1626 dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
1627 		    struct dp_txrx_peer *txrx_peer, uint8_t *rx_tlv_hdr,
1628 		    enum hal_rx_wbm_error_source err_src,
1629 		    uint8_t link_id)
1630 {
1631 	uint32_t pkt_len;
1632 	uint16_t msdu_len;
1633 	struct dp_vdev *vdev;
1634 	struct hal_rx_msdu_metadata msdu_metadata;
1635 	bool is_eapol;
1636 
1637 	qdf_nbuf_set_rx_chfrag_start(
1638 				nbuf,
1639 				hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1640 							       rx_tlv_hdr));
1641 	qdf_nbuf_set_rx_chfrag_end(nbuf,
1642 				   hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
1643 								 rx_tlv_hdr));
1644 	qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1645 								  rx_tlv_hdr));
1646 	qdf_nbuf_set_da_valid(nbuf,
1647 			      hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
1648 							      rx_tlv_hdr));
1649 	qdf_nbuf_set_sa_valid(nbuf,
1650 			      hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
1651 							      rx_tlv_hdr));
1652 
1653 	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
1654 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1655 	pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
1656 
1657 	if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
1658 		if (dp_rx_check_pkt_len(soc, pkt_len))
1659 			goto drop_nbuf;
1660 
1661 		/* Set length in nbuf */
1662 		qdf_nbuf_set_pktlen(
1663 			nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
1664 	}
1665 
1666 	/*
1667 	 * Check if DMA completed -- msdu_done is the last bit
1668 	 * to be written
1669 	 */
1670 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1671 		dp_err_rl("MSDU DONE failure");
1672 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1673 				     QDF_TRACE_LEVEL_INFO);
1674 		qdf_assert(0);
1675 	}
1676 
1677 	if (!txrx_peer)
1678 		goto drop_nbuf;
1679 
1680 	vdev = txrx_peer->vdev;
1681 	if (!vdev) {
1682 		dp_err_rl("Null vdev!");
1683 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1684 		goto drop_nbuf;
1685 	}
1686 
1687 	/*
1688 	 * Advance the packet start pointer by total size of
1689 	 * pre-header TLV's
1690 	 */
1691 	if (qdf_nbuf_is_frag(nbuf))
1692 		qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
1693 	else
1694 		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
1695 				   soc->rx_pkt_tlv_size));
1696 
1697 	if (hal_rx_msdu_cce_metadata_get(soc->hal_soc, rx_tlv_hdr) ==
1698 			CDP_STANDBY_METADATA)
1699 		dp_rx_peek_trapped_packet(soc, vdev);
1700 
1701 	QDF_NBUF_CB_RX_PEER_ID(nbuf) = txrx_peer->peer_id;
1702 	if (dp_rx_igmp_handler(soc, vdev, txrx_peer, nbuf, link_id))
1703 		return;
1704 
1705 	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
1706 
1707 	/*
1708 	 * Indicate EAPOL frame to stack only when vap mac address
1709 	 * matches the destination address.
1710 	 */
1711 	is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf);
1712 	if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1713 		qdf_ether_header_t *eh =
1714 			(qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1715 		if (dp_rx_err_match_dhost(eh, vdev)) {
1716 			DP_STATS_INC_PKT(vdev, rx_i.routed_eapol_pkt, 1,
1717 					 qdf_nbuf_len(nbuf));
1718 
1719 			/*
1720 			 * Update the protocol tag in SKB based on
1721 			 * CCE metadata.
1722 			 */
1723 			dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1724 						  EXCEPTION_DEST_RING_ID,
1725 						  true, true);
1726 			/* Update the flow tag in SKB based on FSE metadata */
1727 			dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
1728 					      true);
1729 			DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
1730 						  qdf_nbuf_len(nbuf),
1731 						  vdev->pdev->enhanced_stats_en);
1732 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
1733 						      rx.rx_success, 1,
1734 						      qdf_nbuf_len(nbuf),
1735 						      link_id);
1736 			qdf_nbuf_set_exc_frame(nbuf, 1);
1737 			qdf_nbuf_set_next(nbuf, NULL);
1738 
1739 			dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf,
1740 						    NULL, is_eapol);
1741 
1742 			return;
1743 		}
1744 	}
1745 
1746 drop_nbuf:
1747 
1748 	DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1,
1749 		      err_src == HAL_RX_WBM_ERR_SRC_REO);
1750 	DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1,
1751 		      err_src == HAL_RX_WBM_ERR_SRC_RXDMA);
1752 
1753 	dp_rx_nbuf_free(nbuf);
1754 }
1755 
1756 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1757 
1758 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
1759 /**
1760  * dp_rx_link_cookie_check() - Validate link desc cookie
1761  * @ring_desc: ring descriptor
1762  *
1763  * Return: qdf status
1764  */
1765 static inline QDF_STATUS
1766 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
1767 {
1768 	if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc)))
1769 		return QDF_STATUS_E_FAILURE;
1770 
1771 	return QDF_STATUS_SUCCESS;
1772 }
1773 
1774 /**
1775  * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie
1776  * @ring_desc: ring descriptor
1777  *
1778  * Return: None
1779  */
1780 static inline void
1781 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
1782 {
1783 	HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc);
1784 }
1785 #else
1786 static inline QDF_STATUS
1787 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
1788 {
1789 	return QDF_STATUS_SUCCESS;
1790 }
1791 
1792 static inline void
1793 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
1794 {
1795 }
1796 #endif
1797 
1798 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1799 /**
1800  * dp_rx_err_ring_record_entry() - Record rx err ring history
1801  * @soc: Datapath soc structure
1802  * @paddr: paddr of the buffer in RX err ring
1803  * @sw_cookie: SW cookie of the buffer in RX err ring
1804  * @rbm: Return buffer manager of the buffer in RX err ring
1805  *
1806  * Return: None
1807  */
1808 static inline void
1809 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1810 			    uint32_t sw_cookie, uint8_t rbm)
1811 {
1812 	struct dp_buf_info_record *record;
1813 	uint32_t idx;
1814 
1815 	if (qdf_unlikely(!soc->rx_err_ring_history))
1816 		return;
1817 
1818 	idx = dp_history_get_next_index(&soc->rx_err_ring_history->index,
1819 					DP_RX_ERR_HIST_MAX);
1820 
1821 	/* No NULL check needed for record since its an array */
1822 	record = &soc->rx_err_ring_history->entry[idx];
1823 
1824 	record->timestamp = qdf_get_log_timestamp();
1825 	record->hbi.paddr = paddr;
1826 	record->hbi.sw_cookie = sw_cookie;
1827 	record->hbi.rbm = rbm;
1828 }
1829 #else
1830 static inline void
1831 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1832 			    uint32_t sw_cookie, uint8_t rbm)
1833 {
1834 }
1835 #endif
1836 
1837 #if defined(HANDLE_RX_REROUTE_ERR) || defined(REO_EXCEPTION_MSDU_WAR)
1838 static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc,
1839 				     hal_ring_desc_t ring_desc)
1840 {
1841 	int lmac_id = DP_INVALID_LMAC_ID;
1842 	struct dp_rx_desc *rx_desc;
1843 	struct hal_buf_info hbi;
1844 	struct dp_pdev *pdev;
1845 	struct rx_desc_pool *rx_desc_pool;
1846 
1847 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
1848 
1849 	rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, hbi.sw_cookie);
1850 
1851 	/* sanity */
1852 	if (!rx_desc) {
1853 		DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_invalid_cookie, 1);
1854 		goto assert_return;
1855 	}
1856 
1857 	if (!rx_desc->nbuf)
1858 		goto assert_return;
1859 
1860 	dp_rx_err_ring_record_entry(soc, hbi.paddr,
1861 				    hbi.sw_cookie,
1862 				    hal_rx_ret_buf_manager_get(soc->hal_soc,
1863 							       ring_desc));
1864 	if (hbi.paddr != qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)) {
1865 		DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1866 		rx_desc->in_err_state = 1;
1867 		goto assert_return;
1868 	}
1869 
1870 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
1871 	/* After this point the rx_desc and nbuf are valid */
1872 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
1873 	qdf_assert_always(!rx_desc->unmapped);
1874 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
1875 	rx_desc->unmapped = 1;
1876 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
1877 	dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
1878 				    rx_desc->pool_id);
1879 
1880 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
1881 	lmac_id = rx_desc->pool_id;
1882 	dp_rx_add_to_free_desc_list(&pdev->free_list_head,
1883 				    &pdev->free_list_tail,
1884 				    rx_desc);
1885 	return lmac_id;
1886 
1887 assert_return:
1888 	qdf_assert(0);
1889 	return lmac_id;
1890 }
1891 #endif
1892 
1893 #ifdef HANDLE_RX_REROUTE_ERR
1894 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
1895 {
1896 	int ret;
1897 	uint64_t cur_time_stamp;
1898 
1899 	DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_rcved, 1);
1900 
1901 	/* Recover if overall error count exceeds threshold */
1902 	if (soc->stats.rx.err.reo_err_msdu_buf_rcved >
1903 	    DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD) {
1904 		dp_err("pkt threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
1905 		       soc->stats.rx.err.reo_err_msdu_buf_rcved,
1906 		       soc->rx_route_err_start_pkt_ts);
1907 		qdf_trigger_self_recovery(NULL, QDF_RX_REG_PKT_ROUTE_ERR);
1908 	}
1909 
1910 	cur_time_stamp = qdf_get_log_timestamp_usecs();
1911 	if (!soc->rx_route_err_start_pkt_ts)
1912 		soc->rx_route_err_start_pkt_ts = cur_time_stamp;
1913 
1914 	/* Recover if threshold number of packets received in threshold time */
1915 	if ((cur_time_stamp - soc->rx_route_err_start_pkt_ts) >
1916 						DP_RX_ERR_ROUTE_TIMEOUT_US) {
1917 		soc->rx_route_err_start_pkt_ts = cur_time_stamp;
1918 
1919 		if (soc->rx_route_err_in_window >
1920 		    DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT) {
1921 			qdf_trigger_self_recovery(NULL,
1922 						  QDF_RX_REG_PKT_ROUTE_ERR);
1923 			dp_err("rate threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
1924 			       soc->stats.rx.err.reo_err_msdu_buf_rcved,
1925 			       soc->rx_route_err_start_pkt_ts);
1926 		} else {
1927 			soc->rx_route_err_in_window = 1;
1928 		}
1929 	} else {
1930 		soc->rx_route_err_in_window++;
1931 	}
1932 
1933 	ret = dp_rx_err_handle_msdu_buf(soc, ring_desc);
1934 
1935 	return ret;
1936 }
1937 #else /* HANDLE_RX_REROUTE_ERR */
1938 #ifdef REO_EXCEPTION_MSDU_WAR
1939 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
1940 {
1941 	return dp_rx_err_handle_msdu_buf(soc, ring_desc);
1942 }
1943 #else	/* REO_EXCEPTION_MSDU_WAR */
1944 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
1945 {
1946 	qdf_assert_always(0);
1947 
1948 	return DP_INVALID_LMAC_ID;
1949 }
1950 #endif /* REO_EXCEPTION_MSDU_WAR */
1951 #endif /* HANDLE_RX_REROUTE_ERR */
1952 
1953 #ifdef WLAN_MLO_MULTI_CHIP
1954 /**
1955  * dp_idle_link_bm_id_check() - war for HW issue
1956  *
1957  * @soc: DP SOC handle
1958  * @rbm: idle link RBM value
1959  * @ring_desc: reo error link descriptor
1960  *
1961  * This is a war for HW issue where link descriptor
1962  * of partner soc received due to packets wrongly
1963  * interpreted as fragments
1964  *
1965  * Return: true in case link desc is consumed
1966  *	   false in other cases
1967  */
1968 static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
1969 				     void *ring_desc)
1970 {
1971 	struct dp_soc *replenish_soc = NULL;
1972 
1973 	/* return ok incase of link desc of same soc */
1974 	if (rbm == soc->idle_link_bm_id)
1975 		return false;
1976 
1977 	if (soc->arch_ops.dp_soc_get_by_idle_bm_id)
1978 		replenish_soc =
1979 			soc->arch_ops.dp_soc_get_by_idle_bm_id(soc, rbm);
1980 
1981 	qdf_assert_always(replenish_soc);
1982 
1983 	/*
1984 	 * For WIN usecase we should only get fragment packets in
1985 	 * this ring as for MLO case fragmentation is not supported
1986 	 * we should not see links from other soc.
1987 	 *
1988 	 * Drop all packets from partner soc and replenish the descriptors
1989 	 */
1990 	dp_handle_wbm_internal_error(replenish_soc, ring_desc,
1991 				     HAL_WBM_RELEASE_RING_2_DESC_TYPE);
1992 
1993 	return true;
1994 }
1995 #else
1996 static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
1997 				     void *ring_desc)
1998 {
1999 	return false;
2000 }
2001 #endif
2002 
2003 static inline void
2004 dp_rx_err_dup_frame(struct dp_soc *soc,
2005 		    struct hal_rx_mpdu_desc_info *mpdu_desc_info)
2006 {
2007 	struct dp_txrx_peer *txrx_peer = NULL;
2008 	dp_txrx_ref_handle txrx_ref_handle = NULL;
2009 	uint16_t peer_id;
2010 
2011 	peer_id =
2012 		dp_rx_peer_metadata_peer_id_get(soc,
2013 						mpdu_desc_info->peer_meta_data);
2014 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
2015 						   &txrx_ref_handle,
2016 						   DP_MOD_ID_RX_ERR);
2017 	if (txrx_peer) {
2018 		DP_STATS_INC(txrx_peer->vdev, rx.duplicate_count, 1);
2019 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
2020 	}
2021 }
2022 
2023 uint32_t
2024 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2025 		  hal_ring_handle_t hal_ring_hdl, uint32_t quota)
2026 {
2027 	hal_ring_desc_t ring_desc;
2028 	hal_soc_handle_t hal_soc;
2029 	uint32_t count = 0;
2030 	uint32_t rx_bufs_used = 0;
2031 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
2032 	uint8_t mac_id = 0;
2033 	uint8_t buf_type;
2034 	uint8_t err_status;
2035 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
2036 	struct hal_buf_info hbi;
2037 	struct dp_pdev *dp_pdev;
2038 	struct dp_srng *dp_rxdma_srng;
2039 	struct rx_desc_pool *rx_desc_pool;
2040 	void *link_desc_va;
2041 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
2042 	uint16_t num_msdus;
2043 	struct dp_rx_desc *rx_desc = NULL;
2044 	QDF_STATUS status;
2045 	bool ret;
2046 	uint32_t error_code = 0;
2047 	bool sw_pn_check_needed;
2048 	int max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
2049 	int i, rx_bufs_reaped_total;
2050 	uint16_t peer_id;
2051 	struct dp_txrx_peer *txrx_peer = NULL;
2052 	dp_txrx_ref_handle txrx_ref_handle = NULL;
2053 
2054 	/* Debug -- Remove later */
2055 	qdf_assert(soc && hal_ring_hdl);
2056 
2057 	hal_soc = soc->hal_soc;
2058 
2059 	/* Debug -- Remove later */
2060 	qdf_assert(hal_soc);
2061 
2062 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
2063 
2064 		/* TODO */
2065 		/*
2066 		 * Need API to convert from hal_ring pointer to
2067 		 * Ring Type / Ring Id combo
2068 		 */
2069 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
2070 		dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", soc,
2071 			      hal_ring_hdl);
2072 		goto done;
2073 	}
2074 
2075 	while (qdf_likely(quota-- && (ring_desc =
2076 				hal_srng_dst_peek(hal_soc,
2077 						  hal_ring_hdl)))) {
2078 
2079 		DP_STATS_INC(soc, rx.err_ring_pkts, 1);
2080 		err_status = hal_rx_err_status_get(hal_soc, ring_desc);
2081 		buf_type = hal_rx_reo_buf_type_get(hal_soc, ring_desc);
2082 
2083 		if (err_status == HAL_REO_ERROR_DETECTED)
2084 			error_code = hal_rx_get_reo_error_code(hal_soc,
2085 							       ring_desc);
2086 
2087 		qdf_mem_set(&mpdu_desc_info, sizeof(mpdu_desc_info), 0);
2088 		sw_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
2089 								  err_status,
2090 								  error_code);
2091 		if (!sw_pn_check_needed) {
2092 			/*
2093 			 * MPDU desc info will be present in the REO desc
2094 			 * only in the below scenarios
2095 			 * 1) pn_in_dest_disabled:  always
2096 			 * 2) pn_in_dest enabled: All cases except 2k-jup
2097 			 *			and OOR errors
2098 			 */
2099 			hal_rx_mpdu_desc_info_get(hal_soc, ring_desc,
2100 						  &mpdu_desc_info);
2101 		}
2102 
2103 		if (HAL_RX_REO_DESC_MSDU_COUNT_GET(ring_desc) == 0)
2104 			goto next_entry;
2105 
2106 		/*
2107 		 * For REO error ring, only MSDU LINK DESC is expected.
2108 		 * Handle HAL_RX_REO_MSDU_BUF_ADDR_TYPE exception case.
2109 		 */
2110 		if (qdf_unlikely(buf_type != HAL_RX_REO_MSDU_LINK_DESC_TYPE)) {
2111 			int lmac_id;
2112 
2113 			lmac_id = dp_rx_err_exception(soc, ring_desc);
2114 			if (lmac_id >= 0)
2115 				rx_bufs_reaped[lmac_id] += 1;
2116 			goto next_entry;
2117 		}
2118 
2119 		hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc,
2120 					  &hbi);
2121 		/*
2122 		 * check for the magic number in the sw cookie
2123 		 */
2124 		qdf_assert_always((hbi.sw_cookie >> LINK_DESC_ID_SHIFT) &
2125 					soc->link_desc_id_start);
2126 
2127 		if (dp_idle_link_bm_id_check(soc, hbi.rbm, ring_desc)) {
2128 			DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
2129 			goto next_entry;
2130 		}
2131 
2132 		status = dp_rx_link_cookie_check(ring_desc);
2133 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
2134 			DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
2135 			break;
2136 		}
2137 
2138 		hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2139 		link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
2140 		hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
2141 				     &num_msdus);
2142 		if (!num_msdus ||
2143 		    !dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[0])) {
2144 			dp_rx_err_info_rl("Invalid MSDU info num_msdus %u cookie: 0x%x",
2145 					  num_msdus, msdu_list.sw_cookie[0]);
2146 			dp_rx_link_desc_return(soc, ring_desc,
2147 					       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
2148 			goto next_entry;
2149 		}
2150 
2151 		dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0],
2152 					    msdu_list.sw_cookie[0],
2153 					    msdu_list.rbm[0]);
2154 		// TODO - BE- Check if the RBM is to be checked for all chips
2155 		if (qdf_unlikely((msdu_list.rbm[0] !=
2156 					dp_rx_get_rx_bm_id(soc)) &&
2157 				 (msdu_list.rbm[0] !=
2158 				  soc->idle_link_bm_id) &&
2159 				 (msdu_list.rbm[0] !=
2160 					dp_rx_get_defrag_bm_id(soc)))) {
2161 			/* TODO */
2162 			/* Call appropriate handler */
2163 			if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2164 				DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
2165 				dp_rx_err_err("%pK: Invalid RBM %d",
2166 					      soc, msdu_list.rbm[0]);
2167 			}
2168 
2169 			/* Return link descriptor through WBM ring (SW2WBM)*/
2170 			dp_rx_link_desc_return(soc, ring_desc,
2171 					HAL_BM_ACTION_RELEASE_MSDU_LIST);
2172 			goto next_entry;
2173 		}
2174 
2175 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
2176 						soc,
2177 						msdu_list.sw_cookie[0]);
2178 		qdf_assert_always(rx_desc);
2179 
2180 		mac_id = rx_desc->pool_id;
2181 
2182 		if (sw_pn_check_needed) {
2183 			goto process_reo_error_code;
2184 		}
2185 
2186 		if (mpdu_desc_info.bar_frame) {
2187 			qdf_assert_always(mpdu_desc_info.msdu_count == 1);
2188 
2189 			dp_rx_bar_frame_handle(soc, ring_desc, rx_desc,
2190 					       &mpdu_desc_info, err_status,
2191 					       error_code);
2192 
2193 			rx_bufs_reaped[mac_id] += 1;
2194 			goto next_entry;
2195 		}
2196 
2197 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
2198 			/*
2199 			 * We only handle one msdu per link desc for fragmented
2200 			 * case. We drop the msdus and release the link desc
2201 			 * back if there are more than one msdu in link desc.
2202 			 */
2203 			if (qdf_unlikely(num_msdus > 1)) {
2204 				count = dp_rx_msdus_drop(soc, ring_desc,
2205 							 &mpdu_desc_info,
2206 							 &mac_id, quota);
2207 				rx_bufs_reaped[mac_id] += count;
2208 				goto next_entry;
2209 			}
2210 
2211 			/*
2212 			 * this is a unlikely scenario where the host is reaping
2213 			 * a descriptor which it already reaped just a while ago
2214 			 * but is yet to replenish it back to HW.
2215 			 * In this case host will dump the last 128 descriptors
2216 			 * including the software descriptor rx_desc and assert.
2217 			 */
2218 
2219 			if (qdf_unlikely(!rx_desc->in_use)) {
2220 				DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
2221 				dp_info_rl("Reaping rx_desc not in use!");
2222 				dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2223 							   ring_desc, rx_desc);
2224 				/* ignore duplicate RX desc and continue */
2225 				/* Pop out the descriptor */
2226 				goto next_entry;
2227 			}
2228 
2229 			ret = dp_rx_desc_paddr_sanity_check(rx_desc,
2230 							    msdu_list.paddr[0]);
2231 			if (!ret) {
2232 				DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
2233 				rx_desc->in_err_state = 1;
2234 				goto next_entry;
2235 			}
2236 
2237 			count = dp_rx_frag_handle(soc,
2238 						  ring_desc, &mpdu_desc_info,
2239 						  rx_desc, &mac_id, quota);
2240 
2241 			rx_bufs_reaped[mac_id] += count;
2242 			DP_STATS_INC(soc, rx.rx_frags, 1);
2243 
2244 			peer_id = dp_rx_peer_metadata_peer_id_get(soc,
2245 					mpdu_desc_info.peer_meta_data);
2246 			txrx_peer =
2247 				dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
2248 							       &txrx_ref_handle,
2249 							       DP_MOD_ID_RX_ERR);
2250 			if (txrx_peer) {
2251 				DP_STATS_INC(txrx_peer->vdev,
2252 					     rx.fragment_count, 1);
2253 				dp_txrx_peer_unref_delete(txrx_ref_handle,
2254 							  DP_MOD_ID_RX_ERR);
2255 			}
2256 			goto next_entry;
2257 		}
2258 
2259 process_reo_error_code:
2260 		/*
2261 		 * Expect REO errors to be handled after this point
2262 		 */
2263 		qdf_assert_always(err_status == HAL_REO_ERROR_DETECTED);
2264 
2265 		dp_info_rl("Got pkt with REO ERROR: %d", error_code);
2266 
2267 		switch (error_code) {
2268 		case HAL_REO_ERR_PN_CHECK_FAILED:
2269 		case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
2270 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2271 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2272 			if (dp_pdev)
2273 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
2274 			count = dp_rx_pn_error_handle(soc,
2275 						      ring_desc,
2276 						      &mpdu_desc_info, &mac_id,
2277 						      quota);
2278 
2279 			rx_bufs_reaped[mac_id] += count;
2280 			break;
2281 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2282 		case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
2283 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2284 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
2285 		case HAL_REO_ERR_BAR_FRAME_OOR:
2286 		case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2287 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2288 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2289 			if (dp_pdev)
2290 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
2291 			count = dp_rx_reo_err_entry_process(
2292 					soc,
2293 					ring_desc,
2294 					&mpdu_desc_info,
2295 					link_desc_va,
2296 					error_code);
2297 
2298 			rx_bufs_reaped[mac_id] += count;
2299 			break;
2300 		case HAL_REO_ERR_NON_BA_DUPLICATE:
2301 			dp_rx_err_dup_frame(soc, &mpdu_desc_info);
2302 			fallthrough;
2303 		case HAL_REO_ERR_QUEUE_DESC_INVALID:
2304 		case HAL_REO_ERR_AMPDU_IN_NON_BA:
2305 		case HAL_REO_ERR_BA_DUPLICATE:
2306 		case HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION:
2307 		case HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN:
2308 		case HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET:
2309 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2310 			count = dp_rx_msdus_drop(soc, ring_desc,
2311 						 &mpdu_desc_info,
2312 						 &mac_id, quota);
2313 			rx_bufs_reaped[mac_id] += count;
2314 			break;
2315 		default:
2316 			/* Assert if unexpected error type */
2317 			qdf_assert_always(0);
2318 		}
2319 next_entry:
2320 		dp_rx_link_cookie_invalidate(ring_desc);
2321 		hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2322 
2323 		rx_bufs_reaped_total = 0;
2324 		for (i = 0; i < MAX_PDEV_CNT; i++)
2325 			rx_bufs_reaped_total += rx_bufs_reaped[i];
2326 
2327 		if (dp_rx_reap_loop_pkt_limit_hit(soc, rx_bufs_reaped_total,
2328 						  max_reap_limit))
2329 			break;
2330 	}
2331 
2332 done:
2333 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
2334 
2335 	if (soc->rx.flags.defrag_timeout_check) {
2336 		uint32_t now_ms =
2337 			qdf_system_ticks_to_msecs(qdf_system_ticks());
2338 
2339 		if (now_ms >= soc->rx.defrag.next_flush_ms)
2340 			dp_rx_defrag_waitlist_flush(soc);
2341 	}
2342 
2343 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2344 		if (rx_bufs_reaped[mac_id]) {
2345 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2346 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2347 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
2348 
2349 			dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2350 						rx_desc_pool,
2351 						rx_bufs_reaped[mac_id],
2352 						&dp_pdev->free_list_head,
2353 						&dp_pdev->free_list_tail,
2354 						false);
2355 			rx_bufs_used += rx_bufs_reaped[mac_id];
2356 		}
2357 	}
2358 
2359 	return rx_bufs_used; /* Assume no scale factor for now */
2360 }
2361 
2362 #ifdef DROP_RXDMA_DECRYPT_ERR
2363 /**
2364  * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled
2365  *
2366  * Return: true if rxdma decrypt err frames are handled and false otherwise
2367  */
2368 static inline bool dp_handle_rxdma_decrypt_err(void)
2369 {
2370 	return false;
2371 }
2372 #else
2373 static inline bool dp_handle_rxdma_decrypt_err(void)
2374 {
2375 	return true;
2376 }
2377 #endif
2378 
2379 void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc)
2380 {
2381 	if (soc->wbm_sg_last_msdu_war) {
2382 		uint32_t len;
2383 		qdf_nbuf_t temp = soc->wbm_sg_param.wbm_sg_nbuf_tail;
2384 
2385 		len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
2386 						     qdf_nbuf_data(temp));
2387 		temp = soc->wbm_sg_param.wbm_sg_nbuf_head;
2388 		while (temp) {
2389 			QDF_NBUF_CB_RX_PKT_LEN(temp) = len;
2390 			temp = temp->next;
2391 		}
2392 	}
2393 }
2394 
2395 #ifdef RX_DESC_DEBUG_CHECK
2396 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2397 					    hal_ring_handle_t hal_ring_hdl,
2398 					    hal_ring_desc_t ring_desc,
2399 					    struct dp_rx_desc *rx_desc)
2400 {
2401 	struct hal_buf_info hbi;
2402 
2403 	hal_rx_wbm_rel_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2404 	/* Sanity check for possible buffer paddr corruption */
2405 	if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
2406 		return QDF_STATUS_SUCCESS;
2407 
2408 	hal_srng_dump_ring_desc(soc->hal_soc, hal_ring_hdl, ring_desc);
2409 
2410 	return QDF_STATUS_E_FAILURE;
2411 }
2412 
2413 #else
2414 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2415 					    hal_ring_handle_t hal_ring_hdl,
2416 					    hal_ring_desc_t ring_desc,
2417 					    struct dp_rx_desc *rx_desc)
2418 {
2419 	return QDF_STATUS_SUCCESS;
2420 }
2421 #endif
2422 bool
2423 dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
2424 {
2425 	/*
2426 	 * Currently Null Queue and Unencrypted error handlers has support for
2427 	 * SG. Other error handler do not deal with SG buffer.
2428 	 */
2429 	if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) &&
2430 	     (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) ||
2431 	    ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) &&
2432 	     (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED)))
2433 		return true;
2434 
2435 	return false;
2436 }
2437 
2438 #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
2439 void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
2440 			      qdf_nbuf_t nbuf)
2441 {
2442 	/*
2443 	 * In case of fast recycle TX driver can avoid invalidate
2444 	 * of buffer in case of SFE forward. We need to invalidate
2445 	 * the TLV headers after writing to this location
2446 	 */
2447 	qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2448 				      (void *)(nbuf->data +
2449 					       soc->rx_pkt_tlv_size +
2450 					       L3_HEADER_PAD));
2451 }
2452 #else
2453 void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
2454 			      qdf_nbuf_t nbuf)
2455 {
2456 }
2457 #endif
2458 
2459 #ifndef CONFIG_NBUF_AP_PLATFORM
2460 static inline uint16_t
2461 dp_rx_get_peer_id(struct dp_soc *soc,
2462 		  uint8_t *rx_tlv_hdr,
2463 		  qdf_nbuf_t nbuf)
2464 {
2465 	uint32_t peer_mdata = 0;
2466 
2467 	peer_mdata = hal_rx_tlv_peer_meta_data_get(soc->hal_soc,
2468 						   rx_tlv_hdr);
2469 	return dp_rx_peer_metadata_peer_id_get(soc, peer_mdata);
2470 }
2471 
2472 static inline void
2473 dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc *soc,
2474 				 qdf_nbuf_t nbuf,
2475 				 uint8_t *rx_tlv_hdr,
2476 				 union hal_wbm_err_info_u *wbm_err)
2477 {
2478 	hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_tlv_hdr,
2479 				      (uint8_t *)&wbm_err->info,
2480 				      sizeof(union hal_wbm_err_info_u));
2481 }
2482 
2483 void
2484 dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
2485 			       qdf_nbuf_t nbuf,
2486 			       union hal_wbm_err_info_u wbm_err)
2487 {
2488 	hal_rx_priv_info_set_in_tlv(soc->hal_soc,
2489 				    qdf_nbuf_data(nbuf),
2490 				    (uint8_t *)&wbm_err.info,
2491 				    sizeof(union hal_wbm_err_info_u));
2492 }
2493 #else
2494 static inline uint16_t
2495 dp_rx_get_peer_id(struct dp_soc *soc,
2496 		  uint8_t *rx_tlv_hdr,
2497 		  qdf_nbuf_t nbuf)
2498 {
2499 	uint32_t peer_mdata = QDF_NBUF_CB_RX_MPDU_DESC_INFO_2(nbuf);
2500 
2501 	return dp_rx_peer_metadata_peer_id_get(soc, peer_mdata);
2502 }
2503 
2504 static inline void
2505 dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc *soc,
2506 				 qdf_nbuf_t nbuf,
2507 				 uint8_t *rx_tlv_hdr,
2508 				 union hal_wbm_err_info_u *wbm_err)
2509 {
2510 	wbm_err->info = QDF_NBUF_CB_RX_ERROR_CODE_INFO(nbuf);
2511 }
2512 
2513 void
2514 dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
2515 			       qdf_nbuf_t nbuf,
2516 			       union hal_wbm_err_info_u wbm_err)
2517 {
2518 	QDF_NBUF_CB_RX_ERROR_CODE_INFO(nbuf) = wbm_err.info;
2519 }
2520 #endif /* CONFIG_NBUF_AP_PLATFORM */
2521 
2522 uint32_t
2523 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2524 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota)
2525 {
2526 	hal_soc_handle_t hal_soc;
2527 	uint32_t rx_bufs_used = 0;
2528 	struct dp_pdev *dp_pdev;
2529 	uint8_t *rx_tlv_hdr;
2530 	bool is_tkip_mic_err;
2531 	qdf_nbuf_t nbuf_head = NULL;
2532 	qdf_nbuf_t nbuf, next;
2533 	union hal_wbm_err_info_u wbm_err = { 0 };
2534 	uint8_t pool_id;
2535 	uint8_t tid = 0;
2536 	uint8_t link_id = 0;
2537 
2538 	/* Debug -- Remove later */
2539 	qdf_assert(soc && hal_ring_hdl);
2540 
2541 	hal_soc = soc->hal_soc;
2542 
2543 	/* Debug -- Remove later */
2544 	qdf_assert(hal_soc);
2545 
2546 	nbuf_head = soc->arch_ops.dp_rx_wbm_err_reap_desc(int_ctx, soc,
2547 							  hal_ring_hdl,
2548 							  quota,
2549 							  &rx_bufs_used);
2550 	nbuf = nbuf_head;
2551 	while (nbuf) {
2552 		struct dp_txrx_peer *txrx_peer;
2553 		struct dp_peer *peer;
2554 		uint16_t peer_id;
2555 		uint8_t err_code;
2556 		uint8_t *tlv_hdr;
2557 		dp_txrx_ref_handle txrx_ref_handle = NULL;
2558 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
2559 
2560 		/*
2561 		 * retrieve the wbm desc info from nbuf CB/TLV, so we can
2562 		 * handle error cases appropriately
2563 		 */
2564 		dp_rx_get_wbm_err_info_from_nbuf(soc, nbuf,
2565 						 rx_tlv_hdr,
2566 						 &wbm_err);
2567 
2568 		peer_id = dp_rx_get_peer_id(soc,
2569 					    rx_tlv_hdr,
2570 					    nbuf);
2571 		txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
2572 							   &txrx_ref_handle,
2573 							   DP_MOD_ID_RX_ERR);
2574 
2575 		if (!txrx_peer)
2576 			dp_info_rl("peer is null peer_id %u err_src %u, "
2577 				   "REO: push_rsn %u err_code %u, "
2578 				   "RXDMA: push_rsn %u err_code %u",
2579 				   peer_id, wbm_err.info_bit.wbm_err_src,
2580 				   wbm_err.info_bit.reo_psh_rsn,
2581 				   wbm_err.info_bit.reo_err_code,
2582 				   wbm_err.info_bit.rxdma_psh_rsn,
2583 				   wbm_err.info_bit.rxdma_err_code);
2584 
2585 		/* Set queue_mapping in nbuf to 0 */
2586 		dp_set_rx_queue(nbuf, 0);
2587 
2588 		next = nbuf->next;
2589 		/*
2590 		 * Form the SG for msdu continued buffers
2591 		 * QCN9000 has this support
2592 		 */
2593 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
2594 			nbuf = dp_rx_sg_create(soc, nbuf);
2595 			next = nbuf->next;
2596 			/*
2597 			 * SG error handling is not done correctly,
2598 			 * drop SG frames for now.
2599 			 */
2600 			dp_rx_nbuf_free(nbuf);
2601 			dp_info_rl("scattered msdu dropped");
2602 			nbuf = next;
2603 			if (txrx_peer)
2604 				dp_txrx_peer_unref_delete(txrx_ref_handle,
2605 							  DP_MOD_ID_RX_ERR);
2606 			continue;
2607 		}
2608 
2609 		dp_rx_nbuf_set_link_id_from_tlv(soc, rx_tlv_hdr, nbuf);
2610 
2611 		pool_id = wbm_err.info_bit.pool_id;
2612 		dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
2613 
2614 		if (dp_pdev && dp_pdev->link_peer_stats &&
2615 		    txrx_peer && txrx_peer->is_mld_peer) {
2616 			link_id = dp_rx_get_stats_arr_idx_from_link_id(
2617 								nbuf,
2618 								txrx_peer);
2619 		} else {
2620 			link_id = 0;
2621 		}
2622 
2623 		if (wbm_err.info_bit.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
2624 			if (wbm_err.info_bit.reo_psh_rsn
2625 					== HAL_RX_WBM_REO_PSH_RSN_ERROR) {
2626 
2627 				DP_STATS_INC(soc,
2628 					rx.err.reo_error
2629 					[wbm_err.info_bit.reo_err_code], 1);
2630 				/* increment @pdev level */
2631 				if (dp_pdev)
2632 					DP_STATS_INC(dp_pdev, err.reo_error,
2633 						     1);
2634 
2635 				switch (wbm_err.info_bit.reo_err_code) {
2636 				/*
2637 				 * Handling for packets which have NULL REO
2638 				 * queue descriptor
2639 				 */
2640 				case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2641 					pool_id = wbm_err.info_bit.pool_id;
2642 					soc->arch_ops.dp_rx_null_q_desc_handle(
2643 								soc, nbuf,
2644 								rx_tlv_hdr,
2645 								pool_id,
2646 								txrx_peer,
2647 								FALSE,
2648 								link_id);
2649 					break;
2650 				/* TODO */
2651 				/* Add per error code accounting */
2652 				case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2653 					if (txrx_peer)
2654 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2655 									  rx.err.jump_2k_err,
2656 									  1,
2657 									  link_id);
2658 
2659 					pool_id = wbm_err.info_bit.pool_id;
2660 
2661 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2662 									   rx_tlv_hdr)) {
2663 						tid =
2664 						hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
2665 					}
2666 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2667 					hal_rx_msdu_start_msdu_len_get(
2668 						soc->hal_soc, rx_tlv_hdr);
2669 					nbuf->next = NULL;
2670 					dp_2k_jump_handle(soc, nbuf,
2671 							  rx_tlv_hdr,
2672 							  peer_id, tid);
2673 					break;
2674 				case HAL_REO_ERR_REGULAR_FRAME_OOR:
2675 					if (txrx_peer)
2676 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2677 									  rx.err.oor_err,
2678 									  1,
2679 									  link_id);
2680 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2681 									   rx_tlv_hdr)) {
2682 						tid =
2683 							hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
2684 					}
2685 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2686 						hal_rx_msdu_start_msdu_len_get(
2687 						soc->hal_soc, rx_tlv_hdr);
2688 					nbuf->next = NULL;
2689 					dp_rx_oor_handle(soc, nbuf,
2690 							 peer_id,
2691 							 rx_tlv_hdr);
2692 					break;
2693 				case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2694 				case HAL_REO_ERR_BAR_FRAME_OOR:
2695 					peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
2696 					if (peer) {
2697 						dp_rx_err_handle_bar(soc, peer,
2698 								     nbuf);
2699 						dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
2700 					}
2701 					dp_rx_nbuf_free(nbuf);
2702 					break;
2703 
2704 				case HAL_REO_ERR_PN_CHECK_FAILED:
2705 				case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
2706 					if (txrx_peer)
2707 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2708 									  rx.err.pn_err,
2709 									  1,
2710 									  link_id);
2711 					dp_rx_nbuf_free(nbuf);
2712 					break;
2713 
2714 				default:
2715 					dp_info_rl("Got pkt with REO ERROR: %d",
2716 						   wbm_err.info_bit.
2717 						   reo_err_code);
2718 					dp_rx_nbuf_free(nbuf);
2719 				}
2720 			} else if (wbm_err.info_bit.reo_psh_rsn
2721 					== HAL_RX_WBM_REO_PSH_RSN_ROUTE) {
2722 				dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
2723 						    rx_tlv_hdr,
2724 						    HAL_RX_WBM_ERR_SRC_REO,
2725 						    link_id);
2726 			} else {
2727 				/* should not enter here */
2728 				dp_rx_err_alert("invalid reo push reason %u",
2729 						wbm_err.info_bit.reo_psh_rsn);
2730 				dp_rx_nbuf_free(nbuf);
2731 				dp_assert_always_internal(0);
2732 			}
2733 		} else if (wbm_err.info_bit.wbm_err_src ==
2734 					HAL_RX_WBM_ERR_SRC_RXDMA) {
2735 			if (wbm_err.info_bit.rxdma_psh_rsn
2736 					== HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
2737 				DP_STATS_INC(soc,
2738 					rx.err.rxdma_error
2739 					[wbm_err.info_bit.rxdma_err_code], 1);
2740 				/* increment @pdev level */
2741 				if (dp_pdev)
2742 					DP_STATS_INC(dp_pdev,
2743 						     err.rxdma_error, 1);
2744 
2745 				switch (wbm_err.info_bit.rxdma_err_code) {
2746 				case HAL_RXDMA_ERR_UNENCRYPTED:
2747 
2748 				case HAL_RXDMA_ERR_WIFI_PARSE:
2749 					if (txrx_peer)
2750 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2751 									  rx.err.rxdma_wifi_parse_err,
2752 									  1,
2753 									  link_id);
2754 
2755 					pool_id = wbm_err.info_bit.pool_id;
2756 					dp_rx_process_rxdma_err(soc, nbuf,
2757 								rx_tlv_hdr,
2758 								txrx_peer,
2759 								wbm_err.
2760 								info_bit.
2761 								rxdma_err_code,
2762 								pool_id,
2763 								link_id);
2764 					break;
2765 
2766 				case HAL_RXDMA_ERR_TKIP_MIC:
2767 					dp_rx_process_mic_error(soc, nbuf,
2768 								rx_tlv_hdr,
2769 								txrx_peer);
2770 					if (txrx_peer)
2771 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2772 									  rx.err.mic_err,
2773 									  1,
2774 									  link_id);
2775 					break;
2776 
2777 				case HAL_RXDMA_ERR_DECRYPT:
2778 					/* All the TKIP-MIC failures are treated as Decrypt Errors
2779 					 * for QCN9224 Targets
2780 					 */
2781 					is_tkip_mic_err = hal_rx_msdu_end_is_tkip_mic_err(hal_soc, rx_tlv_hdr);
2782 
2783 					if (is_tkip_mic_err && txrx_peer) {
2784 						dp_rx_process_mic_error(soc, nbuf,
2785 									rx_tlv_hdr,
2786 									txrx_peer);
2787 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2788 									  rx.err.mic_err,
2789 									  1,
2790 									  link_id);
2791 						break;
2792 					}
2793 
2794 					if (txrx_peer) {
2795 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2796 									  rx.err.decrypt_err,
2797 									  1,
2798 									  link_id);
2799 						dp_rx_nbuf_free(nbuf);
2800 						break;
2801 					}
2802 
2803 					if (!dp_handle_rxdma_decrypt_err()) {
2804 						dp_rx_nbuf_free(nbuf);
2805 						break;
2806 					}
2807 
2808 					pool_id = wbm_err.info_bit.pool_id;
2809 					err_code = wbm_err.info_bit.rxdma_err_code;
2810 					tlv_hdr = rx_tlv_hdr;
2811 					dp_rx_process_rxdma_err(soc, nbuf,
2812 								tlv_hdr, NULL,
2813 								err_code,
2814 								pool_id,
2815 								link_id);
2816 					break;
2817 				case HAL_RXDMA_MULTICAST_ECHO:
2818 					if (txrx_peer)
2819 						DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
2820 									      rx.mec_drop, 1,
2821 									      qdf_nbuf_len(nbuf),
2822 									      link_id);
2823 					dp_rx_nbuf_free(nbuf);
2824 					break;
2825 				case HAL_RXDMA_UNAUTHORIZED_WDS:
2826 					pool_id = wbm_err.info_bit.pool_id;
2827 					err_code = wbm_err.info_bit.rxdma_err_code;
2828 					tlv_hdr = rx_tlv_hdr;
2829 					dp_rx_process_rxdma_err(soc, nbuf,
2830 								tlv_hdr,
2831 								txrx_peer,
2832 								err_code,
2833 								pool_id,
2834 								link_id);
2835 					break;
2836 				default:
2837 					dp_rx_nbuf_free(nbuf);
2838 					dp_err_rl("RXDMA error %d",
2839 						  wbm_err.info_bit.rxdma_err_code);
2840 				}
2841 			} else if (wbm_err.info_bit.rxdma_psh_rsn
2842 					== HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) {
2843 				dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
2844 						    rx_tlv_hdr,
2845 						    HAL_RX_WBM_ERR_SRC_RXDMA,
2846 						    link_id);
2847 			} else if (wbm_err.info_bit.rxdma_psh_rsn
2848 					== HAL_RX_WBM_RXDMA_PSH_RSN_FLUSH) {
2849 				dp_rx_err_err("rxdma push reason %u",
2850 						wbm_err.info_bit.rxdma_psh_rsn);
2851 				DP_STATS_INC(soc, rx.err.rx_flush_count, 1);
2852 				dp_rx_nbuf_free(nbuf);
2853 			} else {
2854 				/* should not enter here */
2855 				dp_rx_err_alert("invalid rxdma push reason %u",
2856 						wbm_err.info_bit.rxdma_psh_rsn);
2857 				dp_rx_nbuf_free(nbuf);
2858 				dp_assert_always_internal(0);
2859 			}
2860 		} else {
2861 			/* Should not come here */
2862 			qdf_assert(0);
2863 		}
2864 
2865 		if (txrx_peer)
2866 			dp_txrx_peer_unref_delete(txrx_ref_handle,
2867 						  DP_MOD_ID_RX_ERR);
2868 
2869 		nbuf = next;
2870 	}
2871 	return rx_bufs_used; /* Assume no scale factor for now */
2872 }
2873 
2874 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2875 
2876 /**
2877  * dup_desc_dbg() - dump and assert if duplicate rx desc found
2878  *
2879  * @soc: core DP main context
2880  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
2881  * @rx_desc: void pointer to rx descriptor
2882  *
2883  * Return: void
2884  */
2885 static void dup_desc_dbg(struct dp_soc *soc,
2886 			 hal_rxdma_desc_t rxdma_dst_ring_desc,
2887 			 void *rx_desc)
2888 {
2889 	DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
2890 	dp_rx_dump_info_and_assert(
2891 			soc,
2892 			soc->rx_rel_ring.hal_srng,
2893 			hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
2894 			rx_desc);
2895 }
2896 
2897 /**
2898  * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
2899  *
2900  * @soc: core DP main context
2901  * @mac_id: mac id which is one of 3 mac_ids
2902  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
2903  * @head: head of descs list to be freed
2904  * @tail: tail of decs list to be freed
2905  *
2906  * Return: number of msdu in MPDU to be popped
2907  */
2908 static inline uint32_t
2909 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
2910 	hal_rxdma_desc_t rxdma_dst_ring_desc,
2911 	union dp_rx_desc_list_elem_t **head,
2912 	union dp_rx_desc_list_elem_t **tail)
2913 {
2914 	void *rx_msdu_link_desc;
2915 	qdf_nbuf_t msdu;
2916 	qdf_nbuf_t last;
2917 	struct hal_rx_msdu_list msdu_list;
2918 	uint16_t num_msdus;
2919 	struct hal_buf_info buf_info;
2920 	uint32_t rx_bufs_used = 0;
2921 	uint32_t msdu_cnt;
2922 	uint32_t i;
2923 	uint8_t push_reason;
2924 	uint8_t rxdma_error_code = 0;
2925 	uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
2926 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2927 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
2928 	hal_rxdma_desc_t ring_desc;
2929 	struct rx_desc_pool *rx_desc_pool;
2930 
2931 	if (!pdev) {
2932 		dp_rx_err_debug("%pK: pdev is null for mac_id = %d",
2933 				soc, mac_id);
2934 		return rx_bufs_used;
2935 	}
2936 
2937 	msdu = 0;
2938 
2939 	last = NULL;
2940 
2941 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
2942 				     &buf_info, &msdu_cnt);
2943 
2944 	push_reason =
2945 		hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
2946 	if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
2947 		rxdma_error_code =
2948 			hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
2949 	}
2950 
2951 	do {
2952 		rx_msdu_link_desc =
2953 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
2954 
2955 		qdf_assert_always(rx_msdu_link_desc);
2956 
2957 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
2958 				     &msdu_list, &num_msdus);
2959 
2960 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
2961 			/* if the msdus belongs to NSS offloaded radio &&
2962 			 * the rbm is not SW1_BM then return the msdu_link
2963 			 * descriptor without freeing the msdus (nbufs). let
2964 			 * these buffers be given to NSS completion ring for
2965 			 * NSS to free them.
2966 			 * else iterate through the msdu link desc list and
2967 			 * free each msdu in the list.
2968 			 */
2969 			if (msdu_list.rbm[0] !=
2970 				HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id) &&
2971 			    wlan_cfg_get_dp_pdev_nss_enabled(
2972 							pdev->wlan_cfg_ctx))
2973 				bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
2974 			else {
2975 				for (i = 0; i < num_msdus; i++) {
2976 					struct dp_rx_desc *rx_desc =
2977 						soc->arch_ops.
2978 						dp_rx_desc_cookie_2_va(
2979 							soc,
2980 							msdu_list.sw_cookie[i]);
2981 					qdf_assert_always(rx_desc);
2982 					msdu = rx_desc->nbuf;
2983 					/*
2984 					 * this is a unlikely scenario
2985 					 * where the host is reaping
2986 					 * a descriptor which
2987 					 * it already reaped just a while ago
2988 					 * but is yet to replenish
2989 					 * it back to HW.
2990 					 * In this case host will dump
2991 					 * the last 128 descriptors
2992 					 * including the software descriptor
2993 					 * rx_desc and assert.
2994 					 */
2995 					ring_desc = rxdma_dst_ring_desc;
2996 					if (qdf_unlikely(!rx_desc->in_use)) {
2997 						dup_desc_dbg(soc,
2998 							     ring_desc,
2999 							     rx_desc);
3000 						continue;
3001 					}
3002 
3003 					if (rx_desc->unmapped == 0) {
3004 						rx_desc_pool =
3005 							&soc->rx_desc_buf[rx_desc->pool_id];
3006 						dp_ipa_rx_buf_smmu_mapping_lock(soc);
3007 						dp_rx_nbuf_unmap_pool(soc,
3008 								      rx_desc_pool,
3009 								      msdu);
3010 						rx_desc->unmapped = 1;
3011 						dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3012 					}
3013 
3014 					dp_rx_err_debug("%pK: msdu_nbuf=%pK ",
3015 							soc, msdu);
3016 
3017 					dp_rx_buffer_pool_nbuf_free(soc, msdu,
3018 							rx_desc->pool_id);
3019 					rx_bufs_used++;
3020 					dp_rx_add_to_free_desc_list(head,
3021 						tail, rx_desc);
3022 				}
3023 			}
3024 		} else {
3025 			rxdma_error_code = HAL_RXDMA_ERR_WAR;
3026 		}
3027 
3028 		/*
3029 		 * Store the current link buffer into to the local structure
3030 		 * to be used for release purpose.
3031 		 */
3032 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
3033 					     buf_info.paddr, buf_info.sw_cookie,
3034 					     buf_info.rbm);
3035 
3036 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
3037 					      &buf_info);
3038 		dp_rx_link_desc_return_by_addr(soc,
3039 					       (hal_buff_addrinfo_t)
3040 						rx_link_buf_info,
3041 						bm_action);
3042 	} while (buf_info.paddr);
3043 
3044 	DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
3045 	if (pdev)
3046 		DP_STATS_INC(pdev, err.rxdma_error, 1);
3047 
3048 	if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
3049 		dp_rx_err_err("%pK: Packet received with Decrypt error", soc);
3050 	}
3051 
3052 	return rx_bufs_used;
3053 }
3054 
3055 uint32_t
3056 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
3057 		     uint32_t mac_id, uint32_t quota)
3058 {
3059 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
3060 	hal_rxdma_desc_t rxdma_dst_ring_desc;
3061 	hal_soc_handle_t hal_soc;
3062 	void *err_dst_srng;
3063 	union dp_rx_desc_list_elem_t *head = NULL;
3064 	union dp_rx_desc_list_elem_t *tail = NULL;
3065 	struct dp_srng *dp_rxdma_srng;
3066 	struct rx_desc_pool *rx_desc_pool;
3067 	uint32_t work_done = 0;
3068 	uint32_t rx_bufs_used = 0;
3069 
3070 	if (!pdev)
3071 		return 0;
3072 
3073 	err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng;
3074 
3075 	if (!err_dst_srng) {
3076 		dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
3077 			      soc, err_dst_srng);
3078 		return 0;
3079 	}
3080 
3081 	hal_soc = soc->hal_soc;
3082 
3083 	qdf_assert(hal_soc);
3084 
3085 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
3086 		dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
3087 			      soc, err_dst_srng);
3088 		return 0;
3089 	}
3090 
3091 	while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
3092 		hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
3093 
3094 			rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
3095 						rxdma_dst_ring_desc,
3096 						&head, &tail);
3097 	}
3098 
3099 	dp_srng_access_end(int_ctx, soc, err_dst_srng);
3100 
3101 	if (rx_bufs_used) {
3102 		if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3103 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
3104 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
3105 		} else {
3106 			dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id];
3107 			rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
3108 		}
3109 
3110 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
3111 			rx_desc_pool, rx_bufs_used, &head, &tail, false);
3112 
3113 		work_done += rx_bufs_used;
3114 	}
3115 
3116 	return work_done;
3117 }
3118 
3119 #ifndef QCA_HOST_MODE_WIFI_DISABLED
3120 
3121 static inline void
3122 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
3123 			hal_rxdma_desc_t rxdma_dst_ring_desc,
3124 			union dp_rx_desc_list_elem_t **head,
3125 			union dp_rx_desc_list_elem_t **tail,
3126 			uint32_t *rx_bufs_used)
3127 {
3128 	void *rx_msdu_link_desc;
3129 	qdf_nbuf_t msdu;
3130 	qdf_nbuf_t last;
3131 	struct hal_rx_msdu_list msdu_list;
3132 	uint16_t num_msdus;
3133 	struct hal_buf_info buf_info;
3134 	uint32_t msdu_cnt, i;
3135 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
3136 	struct rx_desc_pool *rx_desc_pool;
3137 	struct dp_rx_desc *rx_desc;
3138 
3139 	msdu = 0;
3140 
3141 	last = NULL;
3142 
3143 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
3144 				     &buf_info, &msdu_cnt);
3145 
3146 	do {
3147 		rx_msdu_link_desc =
3148 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
3149 
3150 		if (!rx_msdu_link_desc) {
3151 			DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
3152 			break;
3153 		}
3154 
3155 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
3156 				     &msdu_list, &num_msdus);
3157 
3158 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
3159 			for (i = 0; i < num_msdus; i++) {
3160 				if (!dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[i])) {
3161 					dp_rx_err_info_rl("Invalid MSDU info cookie: 0x%x",
3162 							  msdu_list.sw_cookie[i]);
3163 					continue;
3164 				}
3165 
3166 				rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
3167 							soc,
3168 							msdu_list.sw_cookie[i]);
3169 				qdf_assert_always(rx_desc);
3170 				rx_desc_pool =
3171 					&soc->rx_desc_buf[rx_desc->pool_id];
3172 				msdu = rx_desc->nbuf;
3173 
3174 				/*
3175 				 * this is a unlikely scenario where the host is reaping
3176 				 * a descriptor which it already reaped just a while ago
3177 				 * but is yet to replenish it back to HW.
3178 				 */
3179 				if (qdf_unlikely(!rx_desc->in_use) ||
3180 				    qdf_unlikely(!msdu)) {
3181 					dp_rx_err_info_rl("Reaping rx_desc not in use!");
3182 					continue;
3183 				}
3184 
3185 				dp_ipa_rx_buf_smmu_mapping_lock(soc);
3186 				dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, msdu);
3187 				rx_desc->unmapped = 1;
3188 				dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3189 
3190 				dp_rx_buffer_pool_nbuf_free(soc, msdu,
3191 							    rx_desc->pool_id);
3192 				rx_bufs_used[rx_desc->pool_id]++;
3193 				dp_rx_add_to_free_desc_list(head,
3194 							    tail, rx_desc);
3195 			}
3196 		}
3197 
3198 		/*
3199 		 * Store the current link buffer into to the local structure
3200 		 * to be used for release purpose.
3201 		 */
3202 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
3203 					     buf_info.paddr, buf_info.sw_cookie,
3204 					     buf_info.rbm);
3205 
3206 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
3207 					      &buf_info);
3208 		dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
3209 					rx_link_buf_info,
3210 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
3211 	} while (buf_info.paddr);
3212 }
3213 
3214 void
3215 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
3216 			     uint32_t buf_type)
3217 {
3218 	struct hal_buf_info buf_info = {0};
3219 	struct dp_rx_desc *rx_desc = NULL;
3220 	struct rx_desc_pool *rx_desc_pool;
3221 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = {0};
3222 	union dp_rx_desc_list_elem_t *head = NULL;
3223 	union dp_rx_desc_list_elem_t *tail = NULL;
3224 	uint8_t pool_id;
3225 	uint8_t mac_id;
3226 
3227 	hal_rx_reo_buf_paddr_get(soc->hal_soc, hal_desc, &buf_info);
3228 
3229 	if (!buf_info.paddr) {
3230 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
3231 		return;
3232 	}
3233 
3234 	/* buffer_addr_info is the first element of ring_desc */
3235 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)hal_desc,
3236 				  &buf_info);
3237 
3238 	if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
3239 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
3240 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
3241 							soc,
3242 							buf_info.sw_cookie);
3243 
3244 		if (rx_desc && rx_desc->nbuf) {
3245 			rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
3246 			dp_ipa_rx_buf_smmu_mapping_lock(soc);
3247 			dp_rx_nbuf_unmap_pool(soc, rx_desc_pool,
3248 					      rx_desc->nbuf);
3249 			rx_desc->unmapped = 1;
3250 			dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3251 
3252 			dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
3253 						    rx_desc->pool_id);
3254 			dp_rx_add_to_free_desc_list(&head,
3255 						    &tail,
3256 						    rx_desc);
3257 
3258 			rx_bufs_reaped[rx_desc->pool_id]++;
3259 		}
3260 	} else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
3261 		pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(buf_info.sw_cookie);
3262 
3263 		dp_wbm_int_err_mpdu_pop(soc, pool_id, hal_desc,
3264 					&head, &tail, rx_bufs_reaped);
3265 	}
3266 
3267 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
3268 		struct rx_desc_pool *rx_desc_pool;
3269 		struct dp_srng *dp_rxdma_srng;
3270 
3271 		if (!rx_bufs_reaped[mac_id])
3272 			continue;
3273 
3274 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
3275 		dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
3276 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
3277 
3278 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
3279 					rx_desc_pool,
3280 					rx_bufs_reaped[mac_id],
3281 					&head, &tail, false);
3282 	}
3283 }
3284 
3285 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
3286