xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_err.c (revision b62151f8dd0743da724a4533988c78d2c7385d4f)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #include "dp_types.h"
22 #include "dp_rx.h"
23 #include "dp_tx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 #include "hal_api.h"
27 #include "qdf_trace.h"
28 #include "qdf_nbuf.h"
29 #include "dp_rx_defrag.h"
30 #include "dp_ipa.h"
31 #ifdef WIFI_MONITOR_SUPPORT
32 #include "dp_htt.h"
33 #include <dp_mon.h>
34 #endif
35 #ifdef FEATURE_WDS
36 #include "dp_txrx_wds.h"
37 #endif
38 #include <enet.h>	/* LLC_SNAP_HDR_LEN */
39 #include "qdf_net_types.h"
40 #include "dp_rx_buffer_pool.h"
41 
42 #define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params)
43 #define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params)
44 #define dp_rx_err_info(params...) \
45 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
46 #define dp_rx_err_info_rl(params...) \
47 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
48 #define dp_rx_err_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_ERROR, params)
49 
50 #ifndef QCA_HOST_MODE_WIFI_DISABLED
51 
52 
53 /* Max regular Rx packet routing error */
54 #define DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD 20
55 #define DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT 10
56 #define DP_RX_ERR_ROUTE_TIMEOUT_US (5 * 1000 * 1000) /* micro seconds */
57 
58 #ifdef FEATURE_MEC
59 bool dp_rx_mcast_echo_check(struct dp_soc *soc,
60 			    struct dp_txrx_peer *txrx_peer,
61 			    uint8_t *rx_tlv_hdr,
62 			    qdf_nbuf_t nbuf)
63 {
64 	struct dp_vdev *vdev = txrx_peer->vdev;
65 	struct dp_pdev *pdev = vdev->pdev;
66 	struct dp_mec_entry *mecentry = NULL;
67 	struct dp_ast_entry *ase = NULL;
68 	uint16_t sa_idx = 0;
69 	uint8_t *data;
70 	/*
71 	 * Multicast Echo Check is required only if vdev is STA and
72 	 * received pkt is a multicast/broadcast pkt. otherwise
73 	 * skip the MEC check.
74 	 */
75 	if (vdev->opmode != wlan_op_mode_sta)
76 		return false;
77 	if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
78 		return false;
79 
80 	data = qdf_nbuf_data(nbuf);
81 
82 	/*
83 	 * if the received pkts src mac addr matches with vdev
84 	 * mac address then drop the pkt as it is looped back
85 	 */
86 	if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
87 			  vdev->mac_addr.raw,
88 			  QDF_MAC_ADDR_SIZE)))
89 		return true;
90 
91 	/*
92 	 * In case of qwrap isolation mode, donot drop loopback packets.
93 	 * In isolation mode, all packets from the wired stations need to go
94 	 * to rootap and loop back to reach the wireless stations and
95 	 * vice-versa.
96 	 */
97 	if (qdf_unlikely(vdev->isolation_vdev))
98 		return false;
99 
100 	/*
101 	 * if the received pkts src mac addr matches with the
102 	 * wired PCs MAC addr which is behind the STA or with
103 	 * wireless STAs MAC addr which are behind the Repeater,
104 	 * then drop the pkt as it is looped back
105 	 */
106 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
107 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
108 
109 		if ((sa_idx < 0) ||
110 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
111 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
112 				  "invalid sa_idx: %d", sa_idx);
113 			qdf_assert_always(0);
114 		}
115 
116 		qdf_spin_lock_bh(&soc->ast_lock);
117 		ase = soc->ast_table[sa_idx];
118 
119 		/*
120 		 * this check was not needed since MEC is not dependent on AST,
121 		 * but if we dont have this check SON has some issues in
122 		 * dual backhaul scenario. in APS SON mode, client connected
123 		 * to RE 2G and sends multicast packets. the RE sends it to CAP
124 		 * over 5G backhaul. the CAP loopback it on 2G to RE.
125 		 * On receiving in 2G STA vap, we assume that client has roamed
126 		 * and kickout the client.
127 		 */
128 		if (ase && (ase->peer_id != txrx_peer->peer_id)) {
129 			qdf_spin_unlock_bh(&soc->ast_lock);
130 			goto drop;
131 		}
132 
133 		qdf_spin_unlock_bh(&soc->ast_lock);
134 	}
135 
136 	qdf_spin_lock_bh(&soc->mec_lock);
137 
138 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
139 						   &data[QDF_MAC_ADDR_SIZE]);
140 	if (!mecentry) {
141 		qdf_spin_unlock_bh(&soc->mec_lock);
142 		return false;
143 	}
144 
145 	qdf_spin_unlock_bh(&soc->mec_lock);
146 
147 drop:
148 	dp_rx_err_info("%pK: received pkt with same src mac " QDF_MAC_ADDR_FMT,
149 		       soc, QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]));
150 
151 	return true;
152 }
153 #endif
154 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
155 
156 void dp_rx_link_desc_refill_duplicate_check(
157 				struct dp_soc *soc,
158 				struct hal_buf_info *buf_info,
159 				hal_buff_addrinfo_t ring_buf_info)
160 {
161 	struct hal_buf_info current_link_desc_buf_info = { 0 };
162 
163 	/* do duplicate link desc address check */
164 	hal_rx_buffer_addr_info_get_paddr(ring_buf_info,
165 					  &current_link_desc_buf_info);
166 
167 	/*
168 	 * TODO - Check if the hal soc api call can be removed
169 	 * since the cookie is just used for print.
170 	 * buffer_addr_info is the first element of ring_desc
171 	 */
172 	hal_rx_buf_cookie_rbm_get(soc->hal_soc,
173 				  (uint32_t *)ring_buf_info,
174 				  &current_link_desc_buf_info);
175 
176 	if (qdf_unlikely(current_link_desc_buf_info.paddr ==
177 			 buf_info->paddr)) {
178 		dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x",
179 			   current_link_desc_buf_info.paddr,
180 			   current_link_desc_buf_info.sw_cookie);
181 		DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1);
182 	}
183 	*buf_info = current_link_desc_buf_info;
184 }
185 
186 QDF_STATUS
187 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
188 			       hal_buff_addrinfo_t link_desc_addr,
189 			       uint8_t bm_action)
190 {
191 	struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
192 	hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
193 	hal_soc_handle_t hal_soc = soc->hal_soc;
194 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
195 	void *src_srng_desc;
196 
197 	if (!wbm_rel_srng) {
198 		dp_rx_err_err("%pK: WBM RELEASE RING not initialized", soc);
199 		return status;
200 	}
201 
202 	/* do duplicate link desc address check */
203 	dp_rx_link_desc_refill_duplicate_check(
204 				soc,
205 				&soc->last_op_info.wbm_rel_link_desc,
206 				link_desc_addr);
207 
208 	if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
209 
210 		/* TODO */
211 		/*
212 		 * Need API to convert from hal_ring pointer to
213 		 * Ring Type / Ring Id combo
214 		 */
215 		dp_rx_err_err("%pK: HAL RING Access For WBM Release SRNG Failed - %pK",
216 			      soc, wbm_rel_srng);
217 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
218 		goto done;
219 	}
220 	src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
221 	if (qdf_likely(src_srng_desc)) {
222 		/* Return link descriptor through WBM ring (SW2WBM)*/
223 		hal_rx_msdu_link_desc_set(hal_soc,
224 				src_srng_desc, link_desc_addr, bm_action);
225 		status = QDF_STATUS_SUCCESS;
226 	} else {
227 		struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
228 
229 		DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1);
230 
231 		dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)",
232 			   srng->ring_id,
233 			   soc->stats.rx.err.hal_ring_access_full_fail);
234 		dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
235 			   *srng->u.src_ring.hp_addr,
236 			   srng->u.src_ring.reap_hp,
237 			   *srng->u.src_ring.tp_addr,
238 			   srng->u.src_ring.cached_tp);
239 		QDF_BUG(0);
240 	}
241 done:
242 	hal_srng_access_end(hal_soc, wbm_rel_srng);
243 	return status;
244 
245 }
246 
247 qdf_export_symbol(dp_rx_link_desc_return_by_addr);
248 
249 QDF_STATUS
250 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
251 		       uint8_t bm_action)
252 {
253 	void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
254 
255 	return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
256 }
257 
258 #ifndef QCA_HOST_MODE_WIFI_DISABLED
259 
260 /**
261  * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
262  *
263  * @soc: core txrx main context
264  * @ring_desc: opaque pointer to the REO error ring descriptor
265  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
266  * @mac_id: mac ID
267  * @quota: No. of units (packets) that can be serviced in one shot.
268  *
269  * This function is used to drop all MSDU in an MPDU
270  *
271  * Return: uint32_t: No. of elements processed
272  */
273 static uint32_t
274 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
275 		 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
276 		 uint8_t *mac_id,
277 		 uint32_t quota)
278 {
279 	uint32_t rx_bufs_used = 0;
280 	void *link_desc_va;
281 	struct hal_buf_info buf_info;
282 	struct dp_pdev *pdev;
283 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
284 	int i;
285 	uint8_t *rx_tlv_hdr;
286 	uint32_t tid;
287 	struct rx_desc_pool *rx_desc_pool;
288 	struct dp_rx_desc *rx_desc;
289 	/* First field in REO Dst ring Desc is buffer_addr_info */
290 	void *buf_addr_info = ring_desc;
291 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
292 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
293 
294 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &buf_info);
295 
296 	/* buffer_addr_info is the first element of ring_desc */
297 	hal_rx_buf_cookie_rbm_get(soc->hal_soc,
298 				  (uint32_t *)ring_desc,
299 				  &buf_info);
300 
301 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
302 	if (!link_desc_va) {
303 		dp_rx_err_debug("link desc va is null, soc %pk", soc);
304 		return rx_bufs_used;
305 	}
306 
307 more_msdu_link_desc:
308 	/* No UNMAP required -- this is "malloc_consistent" memory */
309 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
310 			     &mpdu_desc_info->msdu_count);
311 
312 	for (i = 0; (i < mpdu_desc_info->msdu_count); i++) {
313 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
314 						soc, msdu_list.sw_cookie[i]);
315 
316 		qdf_assert_always(rx_desc);
317 
318 		/* all buffers from a MSDU link link belong to same pdev */
319 		*mac_id = rx_desc->pool_id;
320 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
321 		if (!pdev) {
322 			dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
323 					soc, rx_desc->pool_id);
324 			return rx_bufs_used;
325 		}
326 
327 		if (!dp_rx_desc_check_magic(rx_desc)) {
328 			dp_rx_err_err("%pK: Invalid rx_desc cookie=%d",
329 				      soc, msdu_list.sw_cookie[i]);
330 			return rx_bufs_used;
331 		}
332 
333 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
334 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
335 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
336 		rx_desc->unmapped = 1;
337 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
338 
339 		rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
340 
341 		rx_bufs_used++;
342 		tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
343 						rx_desc->rx_buf_start);
344 		dp_rx_err_err("%pK: Packet received with PN error for tid :%d",
345 			      soc, tid);
346 
347 		rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
348 		if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
349 			hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
350 
351 		dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info,
352 				      rx_desc->nbuf,
353 				      QDF_TX_RX_STATUS_DROP, true);
354 		/* Just free the buffers */
355 		dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id);
356 
357 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
358 					    &pdev->free_list_tail, rx_desc);
359 	}
360 
361 	/*
362 	 * If the msdu's are spread across multiple link-descriptors,
363 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
364 	 * spread across multiple buffers).Hence, it is
365 	 * necessary to check the next link_descriptor and release
366 	 * all the msdu's that are part of it.
367 	 */
368 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
369 			link_desc_va,
370 			&next_link_desc_addr_info);
371 
372 	if (hal_rx_is_buf_addr_info_valid(
373 				&next_link_desc_addr_info)) {
374 		/* Clear the next link desc info for the current link_desc */
375 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
376 
377 		dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
378 					       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
379 		hal_rx_buffer_addr_info_get_paddr(
380 				&next_link_desc_addr_info,
381 				&buf_info);
382 		/* buffer_addr_info is the first element of ring_desc */
383 		hal_rx_buf_cookie_rbm_get(soc->hal_soc,
384 					  (uint32_t *)&next_link_desc_addr_info,
385 					  &buf_info);
386 		cur_link_desc_addr_info = next_link_desc_addr_info;
387 		buf_addr_info = &cur_link_desc_addr_info;
388 
389 		link_desc_va =
390 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
391 
392 		goto more_msdu_link_desc;
393 	}
394 	quota--;
395 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
396 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
397 	return rx_bufs_used;
398 }
399 
400 /**
401  * dp_rx_pn_error_handle() - Handles PN check errors
402  *
403  * @soc: core txrx main context
404  * @ring_desc: opaque pointer to the REO error ring descriptor
405  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
406  * @mac_id: mac ID
407  * @quota: No. of units (packets) that can be serviced in one shot.
408  *
409  * This function implements PN error handling
410  * If the peer is configured to ignore the PN check errors
411  * or if DP feels, that this frame is still OK, the frame can be
412  * re-injected back to REO to use some of the other features
413  * of REO e.g. duplicate detection/routing to other cores
414  *
415  * Return: uint32_t: No. of elements processed
416  */
417 static uint32_t
418 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
419 		      struct hal_rx_mpdu_desc_info *mpdu_desc_info,
420 		      uint8_t *mac_id,
421 		      uint32_t quota)
422 {
423 	uint16_t peer_id;
424 	uint32_t rx_bufs_used = 0;
425 	struct dp_txrx_peer *txrx_peer;
426 	bool peer_pn_policy = false;
427 	dp_txrx_ref_handle txrx_ref_handle = NULL;
428 
429 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
430 					       mpdu_desc_info->peer_meta_data);
431 
432 
433 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
434 						   &txrx_ref_handle,
435 						   DP_MOD_ID_RX_ERR);
436 
437 	if (qdf_likely(txrx_peer)) {
438 		/*
439 		 * TODO: Check for peer specific policies & set peer_pn_policy
440 		 */
441 		dp_err_rl("discard rx due to PN error for peer  %pK",
442 			  txrx_peer);
443 
444 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
445 	}
446 	dp_rx_err_err("%pK: Packet received with PN error", soc);
447 
448 	/* No peer PN policy -- definitely drop */
449 	if (!peer_pn_policy)
450 		rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
451 						mpdu_desc_info,
452 						mac_id, quota);
453 
454 	return rx_bufs_used;
455 }
456 
457 #ifdef DP_RX_DELIVER_ALL_OOR_FRAMES
458 /**
459  * dp_rx_deliver_oor_frame() - deliver OOR frames to stack
460  * @soc: Datapath soc handler
461  * @txrx_peer: pointer to DP peer
462  * @nbuf: pointer to the skb of RX frame
463  * @frame_mask: the mask for special frame needed
464  * @rx_tlv_hdr: start of rx tlv header
465  *
466  * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
467  * single nbuf is expected.
468  *
469  * return: true - nbuf has been delivered to stack, false - not.
470  */
471 static bool
472 dp_rx_deliver_oor_frame(struct dp_soc *soc,
473 			struct dp_txrx_peer *txrx_peer,
474 			qdf_nbuf_t nbuf, uint32_t frame_mask,
475 			uint8_t *rx_tlv_hdr)
476 {
477 	uint32_t l2_hdr_offset = 0;
478 	uint16_t msdu_len = 0;
479 	uint32_t skip_len;
480 
481 	l2_hdr_offset =
482 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
483 
484 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
485 		skip_len = l2_hdr_offset;
486 	} else {
487 		msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
488 		skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
489 		qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
490 	}
491 
492 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
493 	dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
494 	qdf_nbuf_pull_head(nbuf, skip_len);
495 	qdf_nbuf_set_exc_frame(nbuf, 1);
496 
497 	dp_info_rl("OOR frame, mpdu sn 0x%x",
498 		   hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
499 	dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, nbuf, NULL);
500 	return true;
501 }
502 
503 #else
504 static bool
505 dp_rx_deliver_oor_frame(struct dp_soc *soc,
506 			struct dp_txrx_peer *txrx_peer,
507 			qdf_nbuf_t nbuf, uint32_t frame_mask,
508 			uint8_t *rx_tlv_hdr)
509 {
510 	return dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask,
511 					   rx_tlv_hdr);
512 }
513 #endif
514 
515 /**
516  * dp_rx_oor_handle() - Handles the msdu which is OOR error
517  *
518  * @soc: core txrx main context
519  * @nbuf: pointer to msdu skb
520  * @peer_id: dp peer ID
521  * @rx_tlv_hdr: start of rx tlv header
522  *
523  * This function process the msdu delivered from REO2TCL
524  * ring with error type OOR
525  *
526  * Return: None
527  */
528 static void
529 dp_rx_oor_handle(struct dp_soc *soc,
530 		 qdf_nbuf_t nbuf,
531 		 uint16_t peer_id,
532 		 uint8_t *rx_tlv_hdr)
533 {
534 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
535 				FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
536 	struct dp_txrx_peer *txrx_peer = NULL;
537 	dp_txrx_ref_handle txrx_ref_handle = NULL;
538 
539 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
540 						   &txrx_ref_handle,
541 						   DP_MOD_ID_RX_ERR);
542 	if (!txrx_peer) {
543 		dp_info_rl("peer not found");
544 		goto free_nbuf;
545 	}
546 
547 	if (dp_rx_deliver_oor_frame(soc, txrx_peer, nbuf, frame_mask,
548 				    rx_tlv_hdr)) {
549 		DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1);
550 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
551 		return;
552 	}
553 
554 free_nbuf:
555 	if (txrx_peer)
556 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
557 
558 	DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1);
559 	dp_rx_nbuf_free(nbuf);
560 }
561 
562 /**
563  * dp_rx_err_nbuf_pn_check() - Check if the PN number of this current packet
564  *				is a monotonous increment of packet number
565  *				from the previous successfully re-ordered
566  *				frame.
567  * @soc: Datapath SOC handle
568  * @ring_desc: REO ring descriptor
569  * @nbuf: Current packet
570  *
571  * Return: QDF_STATUS_SUCCESS, if the pn check passes, else QDF_STATUS_E_FAILURE
572  */
573 static inline QDF_STATUS
574 dp_rx_err_nbuf_pn_check(struct dp_soc *soc, hal_ring_desc_t ring_desc,
575 			qdf_nbuf_t nbuf)
576 {
577 	uint64_t prev_pn, curr_pn[2];
578 
579 	if (!hal_rx_encryption_info_valid(soc->hal_soc, qdf_nbuf_data(nbuf)))
580 		return QDF_STATUS_SUCCESS;
581 
582 	hal_rx_reo_prev_pn_get(soc->hal_soc, ring_desc, &prev_pn);
583 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), curr_pn);
584 
585 	if (curr_pn[0] > prev_pn)
586 		return QDF_STATUS_SUCCESS;
587 
588 	return QDF_STATUS_E_FAILURE;
589 }
590 
591 #ifdef WLAN_SKIP_BAR_UPDATE
592 static
593 void dp_rx_err_handle_bar(struct dp_soc *soc,
594 			  struct dp_peer *peer,
595 			  qdf_nbuf_t nbuf)
596 {
597 	dp_info_rl("BAR update to H.W is skipped");
598 	DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
599 }
600 #else
601 static
602 void dp_rx_err_handle_bar(struct dp_soc *soc,
603 			  struct dp_peer *peer,
604 			  qdf_nbuf_t nbuf)
605 {
606 	uint8_t *rx_tlv_hdr;
607 	unsigned char type, subtype;
608 	uint16_t start_seq_num;
609 	uint32_t tid;
610 	QDF_STATUS status;
611 	struct ieee80211_frame_bar *bar;
612 
613 	/*
614 	 * 1. Is this a BAR frame. If not Discard it.
615 	 * 2. If it is, get the peer id, tid, ssn
616 	 * 2a Do a tid update
617 	 */
618 
619 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
620 	bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + soc->rx_pkt_tlv_size);
621 
622 	type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
623 	subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
624 
625 	if (!(type == IEEE80211_FC0_TYPE_CTL &&
626 	      subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
627 		dp_err_rl("Not a BAR frame!");
628 		return;
629 	}
630 
631 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
632 	qdf_assert_always(tid < DP_MAX_TIDS);
633 
634 	start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
635 
636 	dp_info_rl("tid %u window_size %u start_seq_num %u",
637 		   tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
638 
639 	status = dp_rx_tid_update_wifi3(peer, tid,
640 					peer->rx_tid[tid].ba_win_size,
641 					start_seq_num,
642 					true);
643 	if (status != QDF_STATUS_SUCCESS) {
644 		dp_err_rl("failed to handle bar frame update rx tid");
645 		DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
646 	} else {
647 		DP_STATS_INC(soc, rx.err.ssn_update_count, 1);
648 	}
649 }
650 #endif
651 
652 /**
653  * _dp_rx_bar_frame_handle(): Core of the BAR frame handling
654  * @soc: Datapath SoC handle
655  * @nbuf: packet being processed
656  * @mpdu_desc_info: mpdu desc info for the current packet
657  * @tid: tid on which the packet arrived
658  * @err_status: Flag to indicate if REO encountered an error while routing this
659  *		frame
660  * @error_code: REO error code
661  *
662  * Return: None
663  */
664 static void
665 _dp_rx_bar_frame_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
666 			struct hal_rx_mpdu_desc_info *mpdu_desc_info,
667 			uint32_t tid, uint8_t err_status, uint32_t error_code)
668 {
669 	uint16_t peer_id;
670 	struct dp_peer *peer;
671 
672 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
673 					       mpdu_desc_info->peer_meta_data);
674 	peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
675 	if (!peer)
676 		return;
677 
678 	dp_info_rl("BAR frame: "
679 		" peer_id = %d"
680 		" tid = %u"
681 		" SSN = %d"
682 		" error status = %d",
683 		peer->peer_id,
684 		tid,
685 		mpdu_desc_info->mpdu_seq,
686 		err_status);
687 
688 	if (err_status == HAL_REO_ERROR_DETECTED) {
689 		switch (error_code) {
690 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
691 		case HAL_REO_ERR_BAR_FRAME_OOR:
692 			dp_rx_err_handle_bar(soc, peer, nbuf);
693 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
694 			break;
695 		default:
696 			DP_STATS_INC(soc, rx.bar_frame, 1);
697 		}
698 	}
699 
700 	dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
701 }
702 
703 /**
704  * dp_rx_bar_frame_handle() - Function to handle err BAR frames
705  * @soc: core DP main context
706  * @ring_desc: Hal ring desc
707  * @rx_desc: dp rx desc
708  * @mpdu_desc_info: mpdu desc info
709  * @err_status: error status
710  * @err_code: error code
711  *
712  * Handle the error BAR frames received. Ensure the SOC level
713  * stats are updated based on the REO error code. The BAR frames
714  * are further processed by updating the Rx tids with the start
715  * sequence number (SSN) and BA window size. Desc is returned
716  * to the free desc list
717  *
718  * Return: none
719  */
720 static void
721 dp_rx_bar_frame_handle(struct dp_soc *soc,
722 		       hal_ring_desc_t ring_desc,
723 		       struct dp_rx_desc *rx_desc,
724 		       struct hal_rx_mpdu_desc_info *mpdu_desc_info,
725 		       uint8_t err_status,
726 		       uint32_t err_code)
727 {
728 	qdf_nbuf_t nbuf;
729 	struct dp_pdev *pdev;
730 	struct rx_desc_pool *rx_desc_pool;
731 	uint8_t *rx_tlv_hdr;
732 	uint32_t tid;
733 
734 	nbuf = rx_desc->nbuf;
735 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
736 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
737 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
738 	rx_desc->unmapped = 1;
739 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
740 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
741 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
742 					rx_tlv_hdr);
743 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
744 
745 	if (!pdev) {
746 		dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
747 				soc, rx_desc->pool_id);
748 		return;
749 	}
750 
751 	_dp_rx_bar_frame_handle(soc, nbuf, mpdu_desc_info, tid, err_status,
752 				err_code);
753 	dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, nbuf,
754 			      QDF_TX_RX_STATUS_DROP, true);
755 	dp_rx_link_desc_return(soc, ring_desc,
756 			       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
757 	dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
758 				    rx_desc->pool_id);
759 	dp_rx_add_to_free_desc_list(&pdev->free_list_head,
760 				    &pdev->free_list_tail,
761 				    rx_desc);
762 }
763 
764 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
765 
766 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
767 		       uint16_t peer_id, uint8_t tid)
768 {
769 	struct dp_peer *peer = NULL;
770 	struct dp_rx_tid *rx_tid = NULL;
771 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP;
772 
773 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
774 	if (!peer) {
775 		dp_rx_err_info_rl("%pK: peer not found", soc);
776 		goto free_nbuf;
777 	}
778 
779 	if (tid >= DP_MAX_TIDS) {
780 		dp_info_rl("invalid tid");
781 		goto nbuf_deliver;
782 	}
783 
784 	rx_tid = &peer->rx_tid[tid];
785 	qdf_spin_lock_bh(&rx_tid->tid_lock);
786 
787 	/* only if BA session is active, allow send Delba */
788 	if (rx_tid->ba_status != DP_RX_BA_ACTIVE) {
789 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
790 		goto nbuf_deliver;
791 	}
792 
793 	if (!rx_tid->delba_tx_status) {
794 		rx_tid->delba_tx_retry++;
795 		rx_tid->delba_tx_status = 1;
796 		rx_tid->delba_rcode =
797 			IEEE80211_REASON_QOS_SETUP_REQUIRED;
798 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
799 		if (soc->cdp_soc.ol_ops->send_delba) {
800 			DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent,
801 				     1);
802 			soc->cdp_soc.ol_ops->send_delba(
803 					peer->vdev->pdev->soc->ctrl_psoc,
804 					peer->vdev->vdev_id,
805 					peer->mac_addr.raw,
806 					tid,
807 					rx_tid->delba_rcode,
808 					CDP_DELBA_2K_JUMP);
809 		}
810 	} else {
811 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
812 	}
813 
814 nbuf_deliver:
815 	if (dp_rx_deliver_special_frame(soc, peer->txrx_peer, nbuf, frame_mask,
816 					rx_tlv_hdr)) {
817 		DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1);
818 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
819 		return;
820 	}
821 
822 free_nbuf:
823 	if (peer)
824 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
825 	DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1);
826 	dp_rx_nbuf_free(nbuf);
827 }
828 
829 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
830     defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_KIWI)
831 bool
832 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
833 					      uint8_t pool_id,
834 					      uint8_t *rx_tlv_hdr,
835 					      qdf_nbuf_t nbuf)
836 {
837 	struct dp_peer *peer = NULL;
838 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
839 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
840 	struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
841 
842 	if (!pdev) {
843 		dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
844 				soc, pool_id);
845 		return false;
846 	}
847 	/*
848 	 * WAR- In certain types of packets if peer_id is not correct then
849 	 * driver may not be able find. Try finding peer by addr_2 of
850 	 * received MPDU
851 	 */
852 	if (wh)
853 		peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0,
854 					      DP_VDEV_ALL, DP_MOD_ID_RX_ERR);
855 	if (peer) {
856 		dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
857 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
858 				     QDF_TRACE_LEVEL_DEBUG);
859 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
860 				 1, qdf_nbuf_len(nbuf));
861 		dp_rx_nbuf_free(nbuf);
862 
863 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
864 		return true;
865 	}
866 	return false;
867 }
868 #else
869 bool
870 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
871 					      uint8_t pool_id,
872 					      uint8_t *rx_tlv_hdr,
873 					      qdf_nbuf_t nbuf)
874 {
875 	return false;
876 }
877 #endif
878 
879 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
880 {
881 	if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) {
882 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
883 				 1, pkt_len);
884 		return true;
885 	} else {
886 		return false;
887 	}
888 }
889 
890 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
891 void
892 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
893 			    struct dp_vdev *vdev,
894 			    struct dp_txrx_peer *txrx_peer,
895 			    qdf_nbuf_t nbuf,
896 			    qdf_nbuf_t tail,
897 			    bool is_eapol)
898 {
899 	if (is_eapol && soc->eapol_over_control_port)
900 		dp_rx_eapol_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
901 	else
902 		dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
903 }
904 #else
905 void
906 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
907 			    struct dp_vdev *vdev,
908 			    struct dp_txrx_peer *txrx_peer,
909 			    qdf_nbuf_t nbuf,
910 			    qdf_nbuf_t tail,
911 			    bool is_eapol)
912 {
913 	dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
914 }
915 #endif
916 
917 #ifdef WLAN_FEATURE_11BE_MLO
918 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
919 {
920 	return ((qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
921 			     QDF_MAC_ADDR_SIZE) == 0) ||
922 		(qdf_mem_cmp(eh->ether_dhost, &vdev->mld_mac_addr.raw[0],
923 			     QDF_MAC_ADDR_SIZE) == 0));
924 }
925 
926 #else
927 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
928 {
929 	return (qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
930 			    QDF_MAC_ADDR_SIZE) == 0);
931 }
932 #endif
933 
934 #ifndef QCA_HOST_MODE_WIFI_DISABLED
935 
936 bool
937 dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr)
938 {
939 	struct dp_soc *soc = vdev->pdev->soc;
940 
941 	if (!vdev->drop_3addr_mcast)
942 		return false;
943 
944 	if (vdev->opmode != wlan_op_mode_sta)
945 		return false;
946 
947 	if (hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
948 		return true;
949 
950 	return false;
951 }
952 
953 /**
954  * dp_rx_err_is_pn_check_needed() - Check if the packet number check is needed
955  *				for this frame received in REO error ring.
956  * @soc: Datapath SOC handle
957  * @error: REO error detected or not
958  * @error_code: Error code in case of REO error
959  *
960  * Return: true if pn check if needed in software,
961  *	false, if pn check if not needed.
962  */
963 static inline bool
964 dp_rx_err_is_pn_check_needed(struct dp_soc *soc, uint8_t error,
965 			     uint32_t error_code)
966 {
967 	return (soc->features.pn_in_reo_dest &&
968 		(error == HAL_REO_ERROR_DETECTED &&
969 		 (hal_rx_reo_is_2k_jump(error_code) ||
970 		  hal_rx_reo_is_oor_error(error_code) ||
971 		  hal_rx_reo_is_bar_oor_2k_jump(error_code))));
972 }
973 
974 #ifdef DP_WAR_INVALID_FIRST_MSDU_FLAG
975 static inline void
976 dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
977 				  struct hal_rx_mpdu_desc_info *mpdu_desc_info,
978 				  bool first_msdu_in_mpdu_processed)
979 {
980 	if (first_msdu_in_mpdu_processed) {
981 		/*
982 		 * This is the 2nd indication of first_msdu in the same mpdu.
983 		 * Skip re-parsing the mdpu_desc_info and use the cached one,
984 		 * since this msdu is most probably from the current mpdu
985 		 * which is being processed
986 		 */
987 	} else {
988 		hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc,
989 						   qdf_nbuf_data(nbuf),
990 						   mpdu_desc_info);
991 	}
992 }
993 #else
994 static inline void
995 dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
996 				  struct hal_rx_mpdu_desc_info *mpdu_desc_info,
997 				  bool first_msdu_in_mpdu_processed)
998 {
999 	hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc, qdf_nbuf_data(nbuf),
1000 					   mpdu_desc_info);
1001 }
1002 #endif
1003 
1004 /**
1005  * dp_rx_reo_err_entry_process() - Handles for REO error entry processing
1006  *
1007  * @soc: core txrx main context
1008  * @ring_desc: opaque pointer to the REO error ring descriptor
1009  * @mpdu_desc_info: pointer to mpdu level description info
1010  * @link_desc_va: pointer to msdu_link_desc virtual address
1011  * @err_code: reo error code fetched from ring entry
1012  *
1013  * Function to handle msdus fetched from msdu link desc, currently
1014  * support REO error NULL queue, 2K jump, OOR.
1015  *
1016  * Return: msdu count processed
1017  */
1018 static uint32_t
1019 dp_rx_reo_err_entry_process(struct dp_soc *soc,
1020 			    void *ring_desc,
1021 			    struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1022 			    void *link_desc_va,
1023 			    enum hal_reo_error_code err_code)
1024 {
1025 	uint32_t rx_bufs_used = 0;
1026 	struct dp_pdev *pdev;
1027 	int i;
1028 	uint8_t *rx_tlv_hdr_first;
1029 	uint8_t *rx_tlv_hdr_last;
1030 	uint32_t tid = DP_MAX_TIDS;
1031 	uint16_t peer_id;
1032 	struct dp_rx_desc *rx_desc;
1033 	struct rx_desc_pool *rx_desc_pool;
1034 	qdf_nbuf_t nbuf;
1035 	struct hal_buf_info buf_info;
1036 	struct hal_rx_msdu_list msdu_list;
1037 	uint16_t num_msdus;
1038 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
1039 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
1040 	/* First field in REO Dst ring Desc is buffer_addr_info */
1041 	void *buf_addr_info = ring_desc;
1042 	qdf_nbuf_t head_nbuf = NULL;
1043 	qdf_nbuf_t tail_nbuf = NULL;
1044 	uint16_t msdu_processed = 0;
1045 	QDF_STATUS status;
1046 	bool ret, is_pn_check_needed;
1047 	uint8_t rx_desc_pool_id;
1048 	struct dp_txrx_peer *txrx_peer = NULL;
1049 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1050 	hal_ring_handle_t hal_ring_hdl = soc->reo_exception_ring.hal_srng;
1051 	bool first_msdu_in_mpdu_processed = false;
1052 	bool msdu_dropped = false;
1053 	uint8_t link_id = 0;
1054 
1055 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1056 					mpdu_desc_info->peer_meta_data);
1057 	is_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
1058 							  HAL_REO_ERROR_DETECTED,
1059 							  err_code);
1060 more_msdu_link_desc:
1061 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
1062 			     &num_msdus);
1063 	for (i = 0; i < num_msdus; i++) {
1064 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
1065 						soc,
1066 						msdu_list.sw_cookie[i]);
1067 
1068 		qdf_assert_always(rx_desc);
1069 		nbuf = rx_desc->nbuf;
1070 
1071 		/*
1072 		 * this is a unlikely scenario where the host is reaping
1073 		 * a descriptor which it already reaped just a while ago
1074 		 * but is yet to replenish it back to HW.
1075 		 * In this case host will dump the last 128 descriptors
1076 		 * including the software descriptor rx_desc and assert.
1077 		 */
1078 		if (qdf_unlikely(!rx_desc->in_use) ||
1079 		    qdf_unlikely(!nbuf)) {
1080 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
1081 			dp_info_rl("Reaping rx_desc not in use!");
1082 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1083 						   ring_desc, rx_desc);
1084 			/* ignore duplicate RX desc and continue to process */
1085 			/* Pop out the descriptor */
1086 			msdu_dropped = true;
1087 			continue;
1088 		}
1089 
1090 		ret = dp_rx_desc_paddr_sanity_check(rx_desc,
1091 						    msdu_list.paddr[i]);
1092 		if (!ret) {
1093 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1094 			rx_desc->in_err_state = 1;
1095 			msdu_dropped = true;
1096 			continue;
1097 		}
1098 
1099 		rx_desc_pool_id = rx_desc->pool_id;
1100 		/* all buffers from a MSDU link belong to same pdev */
1101 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc_pool_id);
1102 
1103 		rx_desc_pool = &soc->rx_desc_buf[rx_desc_pool_id];
1104 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
1105 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
1106 		rx_desc->unmapped = 1;
1107 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
1108 
1109 		QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len;
1110 		rx_bufs_used++;
1111 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
1112 					    &pdev->free_list_tail, rx_desc);
1113 
1114 		DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf);
1115 
1116 		if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags &
1117 				 HAL_MSDU_F_MSDU_CONTINUATION))
1118 			continue;
1119 
1120 		if (dp_rx_buffer_pool_refill(soc, head_nbuf,
1121 					     rx_desc_pool_id)) {
1122 			/* MSDU queued back to the pool */
1123 			msdu_dropped = true;
1124 			goto process_next_msdu;
1125 		}
1126 
1127 		if (is_pn_check_needed) {
1128 			if (msdu_list.msdu_info[i].msdu_flags &
1129 			    HAL_MSDU_F_FIRST_MSDU_IN_MPDU) {
1130 				dp_rx_err_populate_mpdu_desc_info(soc, nbuf,
1131 						mpdu_desc_info,
1132 						first_msdu_in_mpdu_processed);
1133 				first_msdu_in_mpdu_processed = true;
1134 			} else {
1135 				if (!first_msdu_in_mpdu_processed) {
1136 					/*
1137 					 * If no msdu in this mpdu was dropped
1138 					 * due to failed sanity checks, then
1139 					 * its not expected to hit this
1140 					 * condition. Hence we assert here.
1141 					 */
1142 					if (!msdu_dropped)
1143 						qdf_assert_always(0);
1144 
1145 					/*
1146 					 * We do not have valid mpdu_desc_info
1147 					 * to process this nbuf, hence drop it.
1148 					 */
1149 					dp_rx_nbuf_free(nbuf);
1150 					/* TODO - Increment stats */
1151 					goto process_next_msdu;
1152 				}
1153 				/*
1154 				 * DO NOTHING -
1155 				 * Continue using the same mpdu_desc_info
1156 				 * details populated from the first msdu in
1157 				 * the mpdu.
1158 				 */
1159 			}
1160 
1161 			status = dp_rx_err_nbuf_pn_check(soc, ring_desc, nbuf);
1162 			if (QDF_IS_STATUS_ERROR(status)) {
1163 				DP_STATS_INC(soc, rx.err.pn_in_dest_check_fail,
1164 					     1);
1165 				dp_rx_nbuf_free(nbuf);
1166 				goto process_next_msdu;
1167 			}
1168 
1169 			peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1170 					mpdu_desc_info->peer_meta_data);
1171 
1172 			if (mpdu_desc_info->bar_frame)
1173 				_dp_rx_bar_frame_handle(soc, nbuf,
1174 							mpdu_desc_info, tid,
1175 							HAL_REO_ERROR_DETECTED,
1176 							err_code);
1177 		}
1178 
1179 		rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf);
1180 		rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf);
1181 
1182 		if (qdf_unlikely(head_nbuf != tail_nbuf)) {
1183 			nbuf = dp_rx_sg_create(soc, head_nbuf);
1184 			qdf_nbuf_set_is_frag(nbuf, 1);
1185 			DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1);
1186 		}
1187 
1188 		switch (err_code) {
1189 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
1190 		case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
1191 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
1192 			/*
1193 			 * only first msdu, mpdu start description tlv valid?
1194 			 * and use it for following msdu.
1195 			 */
1196 			if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1197 							   rx_tlv_hdr_last))
1198 				tid = hal_rx_mpdu_start_tid_get(
1199 							soc->hal_soc,
1200 							rx_tlv_hdr_first);
1201 
1202 			dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last,
1203 					  peer_id, tid);
1204 			break;
1205 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
1206 		case HAL_REO_ERR_BAR_FRAME_OOR:
1207 			dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last);
1208 			break;
1209 		case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
1210 			txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(
1211 							soc, peer_id,
1212 							&txrx_ref_handle,
1213 							DP_MOD_ID_RX_ERR);
1214 			if (!txrx_peer)
1215 				dp_info_rl("txrx_peer is null peer_id %u",
1216 					   peer_id);
1217 			soc->arch_ops.dp_rx_null_q_desc_handle(soc, nbuf,
1218 							       rx_tlv_hdr_last,
1219 							       rx_desc_pool_id,
1220 							       txrx_peer,
1221 							       TRUE,
1222 							       link_id);
1223 			if (txrx_peer)
1224 				dp_txrx_peer_unref_delete(txrx_ref_handle,
1225 							  DP_MOD_ID_RX_ERR);
1226 			break;
1227 		default:
1228 			dp_err_rl("Non-support error code %d", err_code);
1229 			dp_rx_nbuf_free(nbuf);
1230 		}
1231 
1232 process_next_msdu:
1233 		msdu_processed++;
1234 		head_nbuf = NULL;
1235 		tail_nbuf = NULL;
1236 	}
1237 
1238 	/*
1239 	 * If the msdu's are spread across multiple link-descriptors,
1240 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
1241 	 * spread across multiple buffers).Hence, it is
1242 	 * necessary to check the next link_descriptor and release
1243 	 * all the msdu's that are part of it.
1244 	 */
1245 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
1246 			link_desc_va,
1247 			&next_link_desc_addr_info);
1248 
1249 	if (hal_rx_is_buf_addr_info_valid(
1250 				&next_link_desc_addr_info)) {
1251 		/* Clear the next link desc info for the current link_desc */
1252 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
1253 		dp_rx_link_desc_return_by_addr(
1254 				soc,
1255 				buf_addr_info,
1256 				HAL_BM_ACTION_PUT_IN_IDLE_LIST);
1257 
1258 		hal_rx_buffer_addr_info_get_paddr(
1259 				&next_link_desc_addr_info,
1260 				&buf_info);
1261 		/* buffer_addr_info is the first element of ring_desc */
1262 		hal_rx_buf_cookie_rbm_get(soc->hal_soc,
1263 					  (uint32_t *)&next_link_desc_addr_info,
1264 					  &buf_info);
1265 		link_desc_va =
1266 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1267 		cur_link_desc_addr_info = next_link_desc_addr_info;
1268 		buf_addr_info = &cur_link_desc_addr_info;
1269 
1270 		goto more_msdu_link_desc;
1271 	}
1272 
1273 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
1274 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
1275 	if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count))
1276 		DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1);
1277 
1278 	return rx_bufs_used;
1279 }
1280 
1281 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1282 
1283 void
1284 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1285 			uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
1286 			uint8_t err_code, uint8_t mac_id, uint8_t link_id)
1287 {
1288 	uint32_t pkt_len, l2_hdr_offset;
1289 	uint16_t msdu_len;
1290 	struct dp_vdev *vdev;
1291 	qdf_ether_header_t *eh;
1292 	bool is_broadcast;
1293 
1294 	/*
1295 	 * Check if DMA completed -- msdu_done is the last bit
1296 	 * to be written
1297 	 */
1298 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1299 
1300 		dp_err_rl("MSDU DONE failure");
1301 
1302 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1303 				     QDF_TRACE_LEVEL_INFO);
1304 		qdf_assert(0);
1305 	}
1306 
1307 	l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
1308 							   rx_tlv_hdr);
1309 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1310 	pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
1311 
1312 	if (dp_rx_check_pkt_len(soc, pkt_len)) {
1313 		/* Drop & free packet */
1314 		dp_rx_nbuf_free(nbuf);
1315 		return;
1316 	}
1317 	/* Set length in nbuf */
1318 	qdf_nbuf_set_pktlen(nbuf, pkt_len);
1319 
1320 	qdf_nbuf_set_next(nbuf, NULL);
1321 
1322 	qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
1323 	qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
1324 
1325 	if (!txrx_peer) {
1326 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "txrx_peer is NULL");
1327 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1328 				qdf_nbuf_len(nbuf));
1329 		/* Trigger invalid peer handler wrapper */
1330 		dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
1331 		return;
1332 	}
1333 
1334 	vdev = txrx_peer->vdev;
1335 	if (!vdev) {
1336 		dp_rx_err_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc,
1337 				 vdev);
1338 		/* Drop & free packet */
1339 		dp_rx_nbuf_free(nbuf);
1340 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1341 		return;
1342 	}
1343 
1344 	/*
1345 	 * Advance the packet start pointer by total size of
1346 	 * pre-header TLV's
1347 	 */
1348 	dp_rx_skip_tlvs(soc, nbuf, l2_hdr_offset);
1349 
1350 	if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
1351 		uint8_t *pkt_type;
1352 
1353 		pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
1354 		if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
1355 			if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
1356 							htons(QDF_LLC_STP)) {
1357 				DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
1358 				goto process_mesh;
1359 			} else {
1360 				goto process_rx;
1361 			}
1362 		}
1363 	}
1364 	if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
1365 		goto process_mesh;
1366 
1367 	/*
1368 	 * WAPI cert AP sends rekey frames as unencrypted.
1369 	 * Thus RXDMA will report unencrypted frame error.
1370 	 * To pass WAPI cert case, SW needs to pass unencrypted
1371 	 * rekey frame to stack.
1372 	 */
1373 	if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1374 		goto process_rx;
1375 	}
1376 	/*
1377 	 * In dynamic WEP case rekey frames are not encrypted
1378 	 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
1379 	 * key install is already done
1380 	 */
1381 	if ((vdev->sec_type == cdp_sec_type_wep104) &&
1382 	    (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
1383 		goto process_rx;
1384 
1385 process_mesh:
1386 
1387 	if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
1388 		dp_rx_nbuf_free(nbuf);
1389 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1390 		return;
1391 	}
1392 
1393 	if (vdev->mesh_vdev) {
1394 		if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
1395 				      == QDF_STATUS_SUCCESS) {
1396 			dp_rx_err_info("%pK: mesh pkt filtered", soc);
1397 			DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
1398 
1399 			dp_rx_nbuf_free(nbuf);
1400 			return;
1401 		}
1402 		dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, txrx_peer);
1403 	}
1404 process_rx:
1405 	if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1406 							rx_tlv_hdr) &&
1407 		(vdev->rx_decap_type ==
1408 				htt_cmn_pkt_type_ethernet))) {
1409 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1410 		is_broadcast = (QDF_IS_ADDR_BROADCAST
1411 				(eh->ether_dhost)) ? 1 : 0 ;
1412 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 1,
1413 					      qdf_nbuf_len(nbuf), link_id);
1414 		if (is_broadcast) {
1415 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast, 1,
1416 						      qdf_nbuf_len(nbuf),
1417 						      link_id);
1418 		}
1419 	} else {
1420 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.unicast, 1,
1421 					      qdf_nbuf_len(nbuf),
1422 					      link_id);
1423 	}
1424 
1425 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1426 		dp_rx_deliver_raw(vdev, nbuf, txrx_peer, link_id);
1427 	} else {
1428 		/* Update the protocol tag in SKB based on CCE metadata */
1429 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1430 					  EXCEPTION_DEST_RING_ID, true, true);
1431 		/* Update the flow tag in SKB based on FSE metadata */
1432 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
1433 		DP_PEER_STATS_FLAT_INC(txrx_peer, to_stack.num, 1);
1434 		qdf_nbuf_set_exc_frame(nbuf, 1);
1435 		dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
1436 					    qdf_nbuf_is_ipv4_eapol_pkt(nbuf));
1437 	}
1438 
1439 	return;
1440 }
1441 
1442 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
1443 			     uint8_t *rx_tlv_hdr,
1444 			     struct dp_txrx_peer *txrx_peer)
1445 {
1446 	struct dp_vdev *vdev = NULL;
1447 	struct dp_pdev *pdev = NULL;
1448 	struct ol_if_ops *tops = NULL;
1449 	uint16_t rx_seq, fragno;
1450 	uint8_t is_raw;
1451 	unsigned int tid;
1452 	QDF_STATUS status;
1453 	struct cdp_rx_mic_err_info mic_failure_info;
1454 
1455 	if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1456 					    rx_tlv_hdr))
1457 		return;
1458 
1459 	if (!txrx_peer) {
1460 		dp_info_rl("txrx_peer not found");
1461 		goto fail;
1462 	}
1463 
1464 	vdev = txrx_peer->vdev;
1465 	if (!vdev) {
1466 		dp_info_rl("VDEV not found");
1467 		goto fail;
1468 	}
1469 
1470 	pdev = vdev->pdev;
1471 	if (!pdev) {
1472 		dp_info_rl("PDEV not found");
1473 		goto fail;
1474 	}
1475 
1476 	is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
1477 	if (is_raw) {
1478 		fragno = dp_rx_frag_get_mpdu_frag_number(soc,
1479 							 qdf_nbuf_data(nbuf));
1480 		/* Can get only last fragment */
1481 		if (fragno) {
1482 			tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
1483 							qdf_nbuf_data(nbuf));
1484 			rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
1485 							qdf_nbuf_data(nbuf));
1486 
1487 			status = dp_rx_defrag_add_last_frag(soc, txrx_peer,
1488 							    tid, rx_seq, nbuf);
1489 			dp_info_rl("Frag pkt seq# %d frag# %d consumed "
1490 				   "status %d !", rx_seq, fragno, status);
1491 			return;
1492 		}
1493 	}
1494 
1495 	if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
1496 				  &mic_failure_info.da_mac_addr.bytes[0])) {
1497 		dp_err_rl("Failed to get da_mac_addr");
1498 		goto fail;
1499 	}
1500 
1501 	if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
1502 				  &mic_failure_info.ta_mac_addr.bytes[0])) {
1503 		dp_err_rl("Failed to get ta_mac_addr");
1504 		goto fail;
1505 	}
1506 
1507 	mic_failure_info.key_id = 0;
1508 	mic_failure_info.multicast =
1509 		IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
1510 	qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
1511 	mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
1512 	mic_failure_info.data = NULL;
1513 	mic_failure_info.vdev_id = vdev->vdev_id;
1514 
1515 	tops = pdev->soc->cdp_soc.ol_ops;
1516 	if (tops->rx_mic_error)
1517 		tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
1518 				   &mic_failure_info);
1519 
1520 fail:
1521 	dp_rx_nbuf_free(nbuf);
1522 	return;
1523 }
1524 
1525 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
1526 	defined(WLAN_MCAST_MLO) && !defined(CONFIG_MLO_SINGLE_DEV)
1527 static bool dp_rx_igmp_handler(struct dp_soc *soc,
1528 			       struct dp_vdev *vdev,
1529 			       struct dp_txrx_peer *peer,
1530 			       qdf_nbuf_t nbuf,
1531 			       uint8_t link_id)
1532 {
1533 	if (soc->arch_ops.dp_rx_mcast_handler) {
1534 		if (soc->arch_ops.dp_rx_mcast_handler(soc, vdev, peer,
1535 						      nbuf, link_id))
1536 			return true;
1537 	}
1538 	return false;
1539 }
1540 #else
1541 static bool dp_rx_igmp_handler(struct dp_soc *soc,
1542 			       struct dp_vdev *vdev,
1543 			       struct dp_txrx_peer *peer,
1544 			       qdf_nbuf_t nbuf,
1545 			       uint8_t link_id)
1546 {
1547 	return false;
1548 }
1549 #endif
1550 
1551 /**
1552  * dp_rx_err_route_hdl() - Function to send EAPOL frames to stack
1553  *                            Free any other packet which comes in
1554  *                            this path.
1555  *
1556  * @soc: core DP main context
1557  * @nbuf: buffer pointer
1558  * @txrx_peer: txrx peer handle
1559  * @rx_tlv_hdr: start of rx tlv header
1560  * @err_src: rxdma/reo
1561  * @link_id: link id on which the packet is received
1562  *
1563  * This function indicates EAPOL frame received in wbm error ring to stack.
1564  * Any other frame should be dropped.
1565  *
1566  * Return: SUCCESS if delivered to stack
1567  */
1568 static void
1569 dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
1570 		    struct dp_txrx_peer *txrx_peer, uint8_t *rx_tlv_hdr,
1571 		    enum hal_rx_wbm_error_source err_src,
1572 		    uint8_t link_id)
1573 {
1574 	uint32_t pkt_len;
1575 	uint16_t msdu_len;
1576 	struct dp_vdev *vdev;
1577 	struct hal_rx_msdu_metadata msdu_metadata;
1578 	bool is_eapol;
1579 
1580 	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
1581 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1582 	pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
1583 
1584 	if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
1585 		if (dp_rx_check_pkt_len(soc, pkt_len))
1586 			goto drop_nbuf;
1587 
1588 		/* Set length in nbuf */
1589 		qdf_nbuf_set_pktlen(
1590 			nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
1591 		qdf_assert_always(nbuf->data == rx_tlv_hdr);
1592 	}
1593 
1594 	/*
1595 	 * Check if DMA completed -- msdu_done is the last bit
1596 	 * to be written
1597 	 */
1598 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1599 		dp_err_rl("MSDU DONE failure");
1600 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1601 				     QDF_TRACE_LEVEL_INFO);
1602 		qdf_assert(0);
1603 	}
1604 
1605 	if (!txrx_peer)
1606 		goto drop_nbuf;
1607 
1608 	vdev = txrx_peer->vdev;
1609 	if (!vdev) {
1610 		dp_err_rl("Null vdev!");
1611 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1612 		goto drop_nbuf;
1613 	}
1614 
1615 	/*
1616 	 * Advance the packet start pointer by total size of
1617 	 * pre-header TLV's
1618 	 */
1619 	if (qdf_nbuf_is_frag(nbuf))
1620 		qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
1621 	else
1622 		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
1623 				   soc->rx_pkt_tlv_size));
1624 
1625 	if (dp_rx_igmp_handler(soc, vdev, txrx_peer, nbuf, link_id))
1626 		return;
1627 
1628 	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
1629 
1630 	/*
1631 	 * Indicate EAPOL frame to stack only when vap mac address
1632 	 * matches the destination address.
1633 	 */
1634 	is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf);
1635 	if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1636 		qdf_ether_header_t *eh =
1637 			(qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1638 		if (dp_rx_err_match_dhost(eh, vdev)) {
1639 			DP_STATS_INC_PKT(vdev, rx_i.routed_eapol_pkt, 1,
1640 					 qdf_nbuf_len(nbuf));
1641 
1642 			/*
1643 			 * Update the protocol tag in SKB based on
1644 			 * CCE metadata.
1645 			 */
1646 			dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1647 						  EXCEPTION_DEST_RING_ID,
1648 						  true, true);
1649 			/* Update the flow tag in SKB based on FSE metadata */
1650 			dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
1651 					      true);
1652 			DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
1653 						  qdf_nbuf_len(nbuf),
1654 						  vdev->pdev->enhanced_stats_en);
1655 			qdf_nbuf_set_exc_frame(nbuf, 1);
1656 			qdf_nbuf_set_next(nbuf, NULL);
1657 
1658 			dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf,
1659 						    NULL, is_eapol);
1660 
1661 			return;
1662 		}
1663 	}
1664 
1665 drop_nbuf:
1666 
1667 	DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1,
1668 		      err_src == HAL_RX_WBM_ERR_SRC_REO);
1669 	DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1,
1670 		      err_src == HAL_RX_WBM_ERR_SRC_RXDMA);
1671 
1672 	dp_rx_nbuf_free(nbuf);
1673 }
1674 
1675 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1676 
1677 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
1678 /**
1679  * dp_rx_link_cookie_check() - Validate link desc cookie
1680  * @ring_desc: ring descriptor
1681  *
1682  * Return: qdf status
1683  */
1684 static inline QDF_STATUS
1685 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
1686 {
1687 	if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc)))
1688 		return QDF_STATUS_E_FAILURE;
1689 
1690 	return QDF_STATUS_SUCCESS;
1691 }
1692 
1693 /**
1694  * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie
1695  * @ring_desc: ring descriptor
1696  *
1697  * Return: None
1698  */
1699 static inline void
1700 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
1701 {
1702 	HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc);
1703 }
1704 #else
1705 static inline QDF_STATUS
1706 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
1707 {
1708 	return QDF_STATUS_SUCCESS;
1709 }
1710 
1711 static inline void
1712 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
1713 {
1714 }
1715 #endif
1716 
1717 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1718 /**
1719  * dp_rx_err_ring_record_entry() - Record rx err ring history
1720  * @soc: Datapath soc structure
1721  * @paddr: paddr of the buffer in RX err ring
1722  * @sw_cookie: SW cookie of the buffer in RX err ring
1723  * @rbm: Return buffer manager of the buffer in RX err ring
1724  *
1725  * Return: None
1726  */
1727 static inline void
1728 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1729 			    uint32_t sw_cookie, uint8_t rbm)
1730 {
1731 	struct dp_buf_info_record *record;
1732 	uint32_t idx;
1733 
1734 	if (qdf_unlikely(!soc->rx_err_ring_history))
1735 		return;
1736 
1737 	idx = dp_history_get_next_index(&soc->rx_err_ring_history->index,
1738 					DP_RX_ERR_HIST_MAX);
1739 
1740 	/* No NULL check needed for record since its an array */
1741 	record = &soc->rx_err_ring_history->entry[idx];
1742 
1743 	record->timestamp = qdf_get_log_timestamp();
1744 	record->hbi.paddr = paddr;
1745 	record->hbi.sw_cookie = sw_cookie;
1746 	record->hbi.rbm = rbm;
1747 }
1748 #else
1749 static inline void
1750 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1751 			    uint32_t sw_cookie, uint8_t rbm)
1752 {
1753 }
1754 #endif
1755 
1756 #ifdef HANDLE_RX_REROUTE_ERR
1757 static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc,
1758 				     hal_ring_desc_t ring_desc)
1759 {
1760 	int lmac_id = DP_INVALID_LMAC_ID;
1761 	struct dp_rx_desc *rx_desc;
1762 	struct hal_buf_info hbi;
1763 	struct dp_pdev *pdev;
1764 	struct rx_desc_pool *rx_desc_pool;
1765 
1766 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
1767 
1768 	rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, hbi.sw_cookie);
1769 
1770 	/* sanity */
1771 	if (!rx_desc) {
1772 		DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_invalid_cookie, 1);
1773 		goto assert_return;
1774 	}
1775 
1776 	if (!rx_desc->nbuf)
1777 		goto assert_return;
1778 
1779 	dp_rx_err_ring_record_entry(soc, hbi.paddr,
1780 				    hbi.sw_cookie,
1781 				    hal_rx_ret_buf_manager_get(soc->hal_soc,
1782 							       ring_desc));
1783 	if (hbi.paddr != qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)) {
1784 		DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1785 		rx_desc->in_err_state = 1;
1786 		goto assert_return;
1787 	}
1788 
1789 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
1790 	/* After this point the rx_desc and nbuf are valid */
1791 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
1792 	qdf_assert_always(!rx_desc->unmapped);
1793 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
1794 	rx_desc->unmapped = 1;
1795 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
1796 	dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
1797 				    rx_desc->pool_id);
1798 
1799 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
1800 	lmac_id = rx_desc->pool_id;
1801 	dp_rx_add_to_free_desc_list(&pdev->free_list_head,
1802 				    &pdev->free_list_tail,
1803 				    rx_desc);
1804 	return lmac_id;
1805 
1806 assert_return:
1807 	qdf_assert(0);
1808 	return lmac_id;
1809 }
1810 
1811 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
1812 {
1813 	int ret;
1814 	uint64_t cur_time_stamp;
1815 
1816 	DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_rcved, 1);
1817 
1818 	/* Recover if overall error count exceeds threshold */
1819 	if (soc->stats.rx.err.reo_err_msdu_buf_rcved >
1820 	    DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD) {
1821 		dp_err("pkt threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
1822 		       soc->stats.rx.err.reo_err_msdu_buf_rcved,
1823 		       soc->rx_route_err_start_pkt_ts);
1824 		qdf_trigger_self_recovery(NULL, QDF_RX_REG_PKT_ROUTE_ERR);
1825 	}
1826 
1827 	cur_time_stamp = qdf_get_log_timestamp_usecs();
1828 	if (!soc->rx_route_err_start_pkt_ts)
1829 		soc->rx_route_err_start_pkt_ts = cur_time_stamp;
1830 
1831 	/* Recover if threshold number of packets received in threshold time */
1832 	if ((cur_time_stamp - soc->rx_route_err_start_pkt_ts) >
1833 						DP_RX_ERR_ROUTE_TIMEOUT_US) {
1834 		soc->rx_route_err_start_pkt_ts = cur_time_stamp;
1835 
1836 		if (soc->rx_route_err_in_window >
1837 		    DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT) {
1838 			qdf_trigger_self_recovery(NULL,
1839 						  QDF_RX_REG_PKT_ROUTE_ERR);
1840 			dp_err("rate threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
1841 			       soc->stats.rx.err.reo_err_msdu_buf_rcved,
1842 			       soc->rx_route_err_start_pkt_ts);
1843 		} else {
1844 			soc->rx_route_err_in_window = 1;
1845 		}
1846 	} else {
1847 		soc->rx_route_err_in_window++;
1848 	}
1849 
1850 	ret = dp_rx_err_handle_msdu_buf(soc, ring_desc);
1851 
1852 	return ret;
1853 }
1854 #else /* HANDLE_RX_REROUTE_ERR */
1855 
1856 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
1857 {
1858 	qdf_assert_always(0);
1859 
1860 	return DP_INVALID_LMAC_ID;
1861 }
1862 #endif /* HANDLE_RX_REROUTE_ERR */
1863 
1864 #ifdef WLAN_MLO_MULTI_CHIP
1865 /**
1866  * dp_idle_link_bm_id_check() - war for HW issue
1867  *
1868  * @soc: DP SOC handle
1869  * @rbm: idle link RBM value
1870  * @ring_desc: reo error link descriptor
1871  *
1872  * This is a war for HW issue where link descriptor
1873  * of partner soc received due to packets wrongly
1874  * interpreted as fragments
1875  *
1876  * Return: true in case link desc is consumed
1877  *	   false in other cases
1878  */
1879 static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
1880 				     void *ring_desc)
1881 {
1882 	struct dp_soc *replenish_soc = NULL;
1883 
1884 	/* return ok incase of link desc of same soc */
1885 	if (rbm == soc->idle_link_bm_id)
1886 		return false;
1887 
1888 	if (soc->arch_ops.dp_soc_get_by_idle_bm_id)
1889 		replenish_soc =
1890 			soc->arch_ops.dp_soc_get_by_idle_bm_id(soc, rbm);
1891 
1892 	qdf_assert_always(replenish_soc);
1893 
1894 	/*
1895 	 * For WIN usecase we should only get fragment packets in
1896 	 * this ring as for MLO case fragmentation is not supported
1897 	 * we should not see links from other soc.
1898 	 *
1899 	 * Drop all packets from partner soc and replenish the descriptors
1900 	 */
1901 	dp_handle_wbm_internal_error(replenish_soc, ring_desc,
1902 				     HAL_WBM_RELEASE_RING_2_DESC_TYPE);
1903 
1904 	return true;
1905 }
1906 #else
1907 static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
1908 				     void *ring_desc)
1909 {
1910 	return false;
1911 }
1912 #endif
1913 
1914 uint32_t
1915 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1916 		  hal_ring_handle_t hal_ring_hdl, uint32_t quota)
1917 {
1918 	hal_ring_desc_t ring_desc;
1919 	hal_soc_handle_t hal_soc;
1920 	uint32_t count = 0;
1921 	uint32_t rx_bufs_used = 0;
1922 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
1923 	uint8_t mac_id = 0;
1924 	uint8_t buf_type;
1925 	uint8_t err_status;
1926 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
1927 	struct hal_buf_info hbi;
1928 	struct dp_pdev *dp_pdev;
1929 	struct dp_srng *dp_rxdma_srng;
1930 	struct rx_desc_pool *rx_desc_pool;
1931 	void *link_desc_va;
1932 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
1933 	uint16_t num_msdus;
1934 	struct dp_rx_desc *rx_desc = NULL;
1935 	QDF_STATUS status;
1936 	bool ret;
1937 	uint32_t error_code = 0;
1938 	bool sw_pn_check_needed;
1939 	int max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
1940 	int i, rx_bufs_reaped_total;
1941 
1942 	/* Debug -- Remove later */
1943 	qdf_assert(soc && hal_ring_hdl);
1944 
1945 	hal_soc = soc->hal_soc;
1946 
1947 	/* Debug -- Remove later */
1948 	qdf_assert(hal_soc);
1949 
1950 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
1951 
1952 		/* TODO */
1953 		/*
1954 		 * Need API to convert from hal_ring pointer to
1955 		 * Ring Type / Ring Id combo
1956 		 */
1957 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
1958 		dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", soc,
1959 			      hal_ring_hdl);
1960 		goto done;
1961 	}
1962 
1963 	while (qdf_likely(quota-- && (ring_desc =
1964 				hal_srng_dst_peek(hal_soc,
1965 						  hal_ring_hdl)))) {
1966 
1967 		DP_STATS_INC(soc, rx.err_ring_pkts, 1);
1968 		err_status = hal_rx_err_status_get(hal_soc, ring_desc);
1969 		buf_type = hal_rx_reo_buf_type_get(hal_soc, ring_desc);
1970 
1971 		if (err_status == HAL_REO_ERROR_DETECTED)
1972 			error_code = hal_rx_get_reo_error_code(hal_soc,
1973 							       ring_desc);
1974 
1975 		qdf_mem_set(&mpdu_desc_info, sizeof(mpdu_desc_info), 0);
1976 		sw_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
1977 								  err_status,
1978 								  error_code);
1979 		if (!sw_pn_check_needed) {
1980 			/*
1981 			 * MPDU desc info will be present in the REO desc
1982 			 * only in the below scenarios
1983 			 * 1) pn_in_dest_disabled:  always
1984 			 * 2) pn_in_dest enabled: All cases except 2k-jup
1985 			 *			and OOR errors
1986 			 */
1987 			hal_rx_mpdu_desc_info_get(hal_soc, ring_desc,
1988 						  &mpdu_desc_info);
1989 		}
1990 
1991 		if (HAL_RX_REO_DESC_MSDU_COUNT_GET(ring_desc) == 0)
1992 			goto next_entry;
1993 
1994 		/*
1995 		 * For REO error ring, only MSDU LINK DESC is expected.
1996 		 * Handle HAL_RX_REO_MSDU_BUF_ADDR_TYPE exception case.
1997 		 */
1998 		if (qdf_unlikely(buf_type != HAL_RX_REO_MSDU_LINK_DESC_TYPE)) {
1999 			int lmac_id;
2000 
2001 			lmac_id = dp_rx_err_exception(soc, ring_desc);
2002 			if (lmac_id >= 0)
2003 				rx_bufs_reaped[lmac_id] += 1;
2004 			goto next_entry;
2005 		}
2006 
2007 		hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc,
2008 					  &hbi);
2009 		/*
2010 		 * check for the magic number in the sw cookie
2011 		 */
2012 		qdf_assert_always((hbi.sw_cookie >> LINK_DESC_ID_SHIFT) &
2013 					soc->link_desc_id_start);
2014 
2015 		if (dp_idle_link_bm_id_check(soc, hbi.rbm, ring_desc)) {
2016 			DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
2017 			goto next_entry;
2018 		}
2019 
2020 		status = dp_rx_link_cookie_check(ring_desc);
2021 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
2022 			DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
2023 			break;
2024 		}
2025 
2026 		hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2027 		link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
2028 		hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
2029 				     &num_msdus);
2030 		if (!num_msdus ||
2031 		    !dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[0])) {
2032 			dp_rx_err_info_rl("Invalid MSDU info num_msdus %u cookie: 0x%x",
2033 					  num_msdus, msdu_list.sw_cookie[0]);
2034 			dp_rx_link_desc_return(soc, ring_desc,
2035 					       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
2036 			goto next_entry;
2037 		}
2038 
2039 		dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0],
2040 					    msdu_list.sw_cookie[0],
2041 					    msdu_list.rbm[0]);
2042 		// TODO - BE- Check if the RBM is to be checked for all chips
2043 		if (qdf_unlikely((msdu_list.rbm[0] !=
2044 					dp_rx_get_rx_bm_id(soc)) &&
2045 				 (msdu_list.rbm[0] !=
2046 				  soc->idle_link_bm_id) &&
2047 				 (msdu_list.rbm[0] !=
2048 					dp_rx_get_defrag_bm_id(soc)))) {
2049 			/* TODO */
2050 			/* Call appropriate handler */
2051 			if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2052 				DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
2053 				dp_rx_err_err("%pK: Invalid RBM %d",
2054 					      soc, msdu_list.rbm[0]);
2055 			}
2056 
2057 			/* Return link descriptor through WBM ring (SW2WBM)*/
2058 			dp_rx_link_desc_return(soc, ring_desc,
2059 					HAL_BM_ACTION_RELEASE_MSDU_LIST);
2060 			goto next_entry;
2061 		}
2062 
2063 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
2064 						soc,
2065 						msdu_list.sw_cookie[0]);
2066 		qdf_assert_always(rx_desc);
2067 
2068 		mac_id = rx_desc->pool_id;
2069 
2070 		if (sw_pn_check_needed) {
2071 			goto process_reo_error_code;
2072 		}
2073 
2074 		if (mpdu_desc_info.bar_frame) {
2075 			qdf_assert_always(mpdu_desc_info.msdu_count == 1);
2076 
2077 			dp_rx_bar_frame_handle(soc, ring_desc, rx_desc,
2078 					       &mpdu_desc_info, err_status,
2079 					       error_code);
2080 
2081 			rx_bufs_reaped[mac_id] += 1;
2082 			goto next_entry;
2083 		}
2084 
2085 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
2086 			/*
2087 			 * We only handle one msdu per link desc for fragmented
2088 			 * case. We drop the msdus and release the link desc
2089 			 * back if there are more than one msdu in link desc.
2090 			 */
2091 			if (qdf_unlikely(num_msdus > 1)) {
2092 				count = dp_rx_msdus_drop(soc, ring_desc,
2093 							 &mpdu_desc_info,
2094 							 &mac_id, quota);
2095 				rx_bufs_reaped[mac_id] += count;
2096 				goto next_entry;
2097 			}
2098 
2099 			/*
2100 			 * this is a unlikely scenario where the host is reaping
2101 			 * a descriptor which it already reaped just a while ago
2102 			 * but is yet to replenish it back to HW.
2103 			 * In this case host will dump the last 128 descriptors
2104 			 * including the software descriptor rx_desc and assert.
2105 			 */
2106 
2107 			if (qdf_unlikely(!rx_desc->in_use)) {
2108 				DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
2109 				dp_info_rl("Reaping rx_desc not in use!");
2110 				dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2111 							   ring_desc, rx_desc);
2112 				/* ignore duplicate RX desc and continue */
2113 				/* Pop out the descriptor */
2114 				goto next_entry;
2115 			}
2116 
2117 			ret = dp_rx_desc_paddr_sanity_check(rx_desc,
2118 							    msdu_list.paddr[0]);
2119 			if (!ret) {
2120 				DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
2121 				rx_desc->in_err_state = 1;
2122 				goto next_entry;
2123 			}
2124 
2125 			count = dp_rx_frag_handle(soc,
2126 						  ring_desc, &mpdu_desc_info,
2127 						  rx_desc, &mac_id, quota);
2128 
2129 			rx_bufs_reaped[mac_id] += count;
2130 			DP_STATS_INC(soc, rx.rx_frags, 1);
2131 			goto next_entry;
2132 		}
2133 
2134 process_reo_error_code:
2135 		/*
2136 		 * Expect REO errors to be handled after this point
2137 		 */
2138 		qdf_assert_always(err_status == HAL_REO_ERROR_DETECTED);
2139 
2140 		dp_info_rl("Got pkt with REO ERROR: %d", error_code);
2141 
2142 		switch (error_code) {
2143 		case HAL_REO_ERR_PN_CHECK_FAILED:
2144 		case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
2145 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2146 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2147 			if (dp_pdev)
2148 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
2149 			count = dp_rx_pn_error_handle(soc,
2150 						      ring_desc,
2151 						      &mpdu_desc_info, &mac_id,
2152 						      quota);
2153 
2154 			rx_bufs_reaped[mac_id] += count;
2155 			break;
2156 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2157 		case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
2158 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2159 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
2160 		case HAL_REO_ERR_BAR_FRAME_OOR:
2161 		case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2162 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2163 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2164 			if (dp_pdev)
2165 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
2166 			count = dp_rx_reo_err_entry_process(
2167 					soc,
2168 					ring_desc,
2169 					&mpdu_desc_info,
2170 					link_desc_va,
2171 					error_code);
2172 
2173 			rx_bufs_reaped[mac_id] += count;
2174 			break;
2175 		case HAL_REO_ERR_QUEUE_DESC_INVALID:
2176 		case HAL_REO_ERR_AMPDU_IN_NON_BA:
2177 		case HAL_REO_ERR_NON_BA_DUPLICATE:
2178 		case HAL_REO_ERR_BA_DUPLICATE:
2179 		case HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION:
2180 		case HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN:
2181 		case HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET:
2182 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2183 			count = dp_rx_msdus_drop(soc, ring_desc,
2184 						 &mpdu_desc_info,
2185 						 &mac_id, quota);
2186 			rx_bufs_reaped[mac_id] += count;
2187 			break;
2188 		default:
2189 			/* Assert if unexpected error type */
2190 			qdf_assert_always(0);
2191 		}
2192 next_entry:
2193 		dp_rx_link_cookie_invalidate(ring_desc);
2194 		hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2195 
2196 		rx_bufs_reaped_total = 0;
2197 		for (i = 0; i < MAX_PDEV_CNT; i++)
2198 			rx_bufs_reaped_total += rx_bufs_reaped[i];
2199 
2200 		if (dp_rx_reap_loop_pkt_limit_hit(soc, rx_bufs_reaped_total,
2201 						  max_reap_limit))
2202 			break;
2203 	}
2204 
2205 done:
2206 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
2207 
2208 	if (soc->rx.flags.defrag_timeout_check) {
2209 		uint32_t now_ms =
2210 			qdf_system_ticks_to_msecs(qdf_system_ticks());
2211 
2212 		if (now_ms >= soc->rx.defrag.next_flush_ms)
2213 			dp_rx_defrag_waitlist_flush(soc);
2214 	}
2215 
2216 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2217 		if (rx_bufs_reaped[mac_id]) {
2218 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2219 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2220 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
2221 
2222 			dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2223 						rx_desc_pool,
2224 						rx_bufs_reaped[mac_id],
2225 						&dp_pdev->free_list_head,
2226 						&dp_pdev->free_list_tail,
2227 						false);
2228 			rx_bufs_used += rx_bufs_reaped[mac_id];
2229 		}
2230 	}
2231 
2232 	return rx_bufs_used; /* Assume no scale factor for now */
2233 }
2234 
2235 #ifdef DROP_RXDMA_DECRYPT_ERR
2236 /**
2237  * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled
2238  *
2239  * Return: true if rxdma decrypt err frames are handled and false otherwise
2240  */
2241 static inline bool dp_handle_rxdma_decrypt_err(void)
2242 {
2243 	return false;
2244 }
2245 #else
2246 static inline bool dp_handle_rxdma_decrypt_err(void)
2247 {
2248 	return true;
2249 }
2250 #endif
2251 
2252 void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc)
2253 {
2254 	if (soc->wbm_sg_last_msdu_war) {
2255 		uint32_t len;
2256 		qdf_nbuf_t temp = soc->wbm_sg_param.wbm_sg_nbuf_tail;
2257 
2258 		len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
2259 						     qdf_nbuf_data(temp));
2260 		temp = soc->wbm_sg_param.wbm_sg_nbuf_head;
2261 		while (temp) {
2262 			QDF_NBUF_CB_RX_PKT_LEN(temp) = len;
2263 			temp = temp->next;
2264 		}
2265 	}
2266 }
2267 
2268 #ifdef RX_DESC_DEBUG_CHECK
2269 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2270 					    hal_ring_handle_t hal_ring_hdl,
2271 					    hal_ring_desc_t ring_desc,
2272 					    struct dp_rx_desc *rx_desc)
2273 {
2274 	struct hal_buf_info hbi;
2275 
2276 	hal_rx_wbm_rel_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2277 	/* Sanity check for possible buffer paddr corruption */
2278 	if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
2279 		return QDF_STATUS_SUCCESS;
2280 
2281 	hal_srng_dump_ring_desc(soc->hal_soc, hal_ring_hdl, ring_desc);
2282 
2283 	return QDF_STATUS_E_FAILURE;
2284 }
2285 
2286 #else
2287 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2288 					    hal_ring_handle_t hal_ring_hdl,
2289 					    hal_ring_desc_t ring_desc,
2290 					    struct dp_rx_desc *rx_desc)
2291 {
2292 	return QDF_STATUS_SUCCESS;
2293 }
2294 #endif
2295 bool
2296 dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
2297 {
2298 	/*
2299 	 * Currently Null Queue and Unencrypted error handlers has support for
2300 	 * SG. Other error handler do not deal with SG buffer.
2301 	 */
2302 	if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) &&
2303 	     (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) ||
2304 	    ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) &&
2305 	     (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED)))
2306 		return true;
2307 
2308 	return false;
2309 }
2310 
2311 #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
2312 void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
2313 			      qdf_nbuf_t nbuf)
2314 {
2315 	/*
2316 	 * In case of fast recycle TX driver can avoid invalidate
2317 	 * of buffer in case of SFE forward. We need to invalidate
2318 	 * the TLV headers after writing to this location
2319 	 */
2320 	qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2321 				      (void *)(nbuf->data +
2322 					       soc->rx_pkt_tlv_size +
2323 					       L3_HEADER_PAD));
2324 }
2325 #else
2326 void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
2327 			      qdf_nbuf_t nbuf)
2328 {
2329 }
2330 #endif
2331 
2332 uint32_t
2333 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2334 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota)
2335 {
2336 	hal_soc_handle_t hal_soc;
2337 	uint32_t rx_bufs_used = 0;
2338 	struct dp_pdev *dp_pdev;
2339 	uint8_t *rx_tlv_hdr;
2340 	bool is_tkip_mic_err;
2341 	qdf_nbuf_t nbuf_head = NULL;
2342 	qdf_nbuf_t nbuf, next;
2343 	struct hal_wbm_err_desc_info wbm_err_info = { 0 };
2344 	uint8_t pool_id;
2345 	uint8_t tid = 0;
2346 	uint8_t link_id = 0;
2347 
2348 	/* Debug -- Remove later */
2349 	qdf_assert(soc && hal_ring_hdl);
2350 
2351 	hal_soc = soc->hal_soc;
2352 
2353 	/* Debug -- Remove later */
2354 	qdf_assert(hal_soc);
2355 
2356 	nbuf_head = soc->arch_ops.dp_rx_wbm_err_reap_desc(int_ctx, soc,
2357 							  hal_ring_hdl,
2358 							  quota,
2359 							  &rx_bufs_used);
2360 	nbuf = nbuf_head;
2361 	while (nbuf) {
2362 		struct dp_txrx_peer *txrx_peer;
2363 		struct dp_peer *peer;
2364 		uint16_t peer_id;
2365 		uint8_t err_code;
2366 		uint8_t *tlv_hdr;
2367 		uint32_t peer_meta_data;
2368 		dp_txrx_ref_handle txrx_ref_handle = NULL;
2369 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
2370 
2371 		/*
2372 		 * retrieve the wbm desc info from nbuf TLV, so we can
2373 		 * handle error cases appropriately
2374 		 */
2375 		wbm_err_info = dp_rx_get_err_info(soc, nbuf);
2376 		peer_meta_data = hal_rx_tlv_peer_meta_data_get(soc->hal_soc,
2377 							       rx_tlv_hdr);
2378 		peer_id = dp_rx_peer_metadata_peer_id_get(soc, peer_meta_data);
2379 		txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
2380 							   &txrx_ref_handle,
2381 							   DP_MOD_ID_RX_ERR);
2382 
2383 		if (!txrx_peer)
2384 			dp_info_rl("peer is null peer_id %u err_src %u, "
2385 				   "REO: push_rsn %u err_code %u, "
2386 				   "RXDMA: push_rsn %u err_code %u",
2387 				   peer_id, wbm_err_info.wbm_err_src,
2388 				   wbm_err_info.reo_psh_rsn,
2389 				   wbm_err_info.reo_err_code,
2390 				   wbm_err_info.rxdma_psh_rsn,
2391 				   wbm_err_info.rxdma_err_code);
2392 
2393 		/* Set queue_mapping in nbuf to 0 */
2394 		dp_set_rx_queue(nbuf, 0);
2395 
2396 		next = nbuf->next;
2397 		/*
2398 		 * Form the SG for msdu continued buffers
2399 		 * QCN9000 has this support
2400 		 */
2401 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
2402 			nbuf = dp_rx_sg_create(soc, nbuf);
2403 			next = nbuf->next;
2404 			/*
2405 			 * SG error handling is not done correctly,
2406 			 * drop SG frames for now.
2407 			 */
2408 			dp_rx_nbuf_free(nbuf);
2409 			dp_info_rl("scattered msdu dropped");
2410 			nbuf = next;
2411 			if (txrx_peer)
2412 				dp_txrx_peer_unref_delete(txrx_ref_handle,
2413 							  DP_MOD_ID_RX_ERR);
2414 			continue;
2415 		}
2416 
2417 		pool_id = wbm_err_info.pool_id;
2418 		dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
2419 
2420 		if (dp_pdev && dp_pdev->link_peer_stats &&
2421 		    txrx_peer && txrx_peer->is_mld_peer) {
2422 			link_id = dp_rx_peer_mdata_link_id_get(
2423 							soc,
2424 							peer_meta_data);
2425 				if (!link_id) {
2426 					DP_PEER_PER_PKT_STATS_INC(
2427 						  txrx_peer,
2428 						  rx.inval_link_id_pkt_cnt,
2429 						  1, link_id);
2430 				}
2431 		} else {
2432 			link_id = 0;
2433 		}
2434 
2435 		if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
2436 			if (wbm_err_info.reo_psh_rsn
2437 					== HAL_RX_WBM_REO_PSH_RSN_ERROR) {
2438 
2439 				DP_STATS_INC(soc,
2440 					rx.err.reo_error
2441 					[wbm_err_info.reo_err_code], 1);
2442 				/* increment @pdev level */
2443 				if (dp_pdev)
2444 					DP_STATS_INC(dp_pdev, err.reo_error,
2445 						     1);
2446 
2447 				switch (wbm_err_info.reo_err_code) {
2448 				/*
2449 				 * Handling for packets which have NULL REO
2450 				 * queue descriptor
2451 				 */
2452 				case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2453 					pool_id = wbm_err_info.pool_id;
2454 					soc->arch_ops.dp_rx_null_q_desc_handle(
2455 								soc, nbuf,
2456 								rx_tlv_hdr,
2457 								pool_id,
2458 								txrx_peer,
2459 								FALSE,
2460 								link_id);
2461 					break;
2462 				/* TODO */
2463 				/* Add per error code accounting */
2464 				case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2465 					if (txrx_peer)
2466 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2467 									  rx.err.jump_2k_err,
2468 									  1,
2469 									  link_id);
2470 
2471 					pool_id = wbm_err_info.pool_id;
2472 
2473 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2474 									   rx_tlv_hdr)) {
2475 						tid =
2476 						hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
2477 					}
2478 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2479 					hal_rx_msdu_start_msdu_len_get(
2480 						soc->hal_soc, rx_tlv_hdr);
2481 					nbuf->next = NULL;
2482 					dp_2k_jump_handle(soc, nbuf,
2483 							  rx_tlv_hdr,
2484 							  peer_id, tid);
2485 					break;
2486 				case HAL_REO_ERR_REGULAR_FRAME_OOR:
2487 					if (txrx_peer)
2488 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2489 									  rx.err.oor_err,
2490 									  1,
2491 									  link_id);
2492 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2493 									   rx_tlv_hdr)) {
2494 						tid =
2495 							hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
2496 					}
2497 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2498 						hal_rx_msdu_start_msdu_len_get(
2499 						soc->hal_soc, rx_tlv_hdr);
2500 					nbuf->next = NULL;
2501 					dp_rx_oor_handle(soc, nbuf,
2502 							 peer_id,
2503 							 rx_tlv_hdr);
2504 					break;
2505 				case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2506 				case HAL_REO_ERR_BAR_FRAME_OOR:
2507 					peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
2508 					if (peer) {
2509 						dp_rx_err_handle_bar(soc, peer,
2510 								     nbuf);
2511 						dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
2512 					}
2513 					dp_rx_nbuf_free(nbuf);
2514 					break;
2515 
2516 				case HAL_REO_ERR_PN_CHECK_FAILED:
2517 				case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
2518 					if (txrx_peer)
2519 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2520 									  rx.err.pn_err,
2521 									  1,
2522 									  link_id);
2523 					dp_rx_nbuf_free(nbuf);
2524 					break;
2525 
2526 				default:
2527 					dp_info_rl("Got pkt with REO ERROR: %d",
2528 						   wbm_err_info.reo_err_code);
2529 					dp_rx_nbuf_free(nbuf);
2530 				}
2531 			} else if (wbm_err_info.reo_psh_rsn
2532 					== HAL_RX_WBM_REO_PSH_RSN_ROUTE) {
2533 				dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
2534 						    rx_tlv_hdr,
2535 						    HAL_RX_WBM_ERR_SRC_REO,
2536 						    link_id);
2537 			} else {
2538 				/* should not enter here */
2539 				dp_rx_err_alert("invalid reo push reason %u",
2540 						wbm_err_info.reo_psh_rsn);
2541 				dp_rx_nbuf_free(nbuf);
2542 				qdf_assert_always(0);
2543 			}
2544 		} else if (wbm_err_info.wbm_err_src ==
2545 					HAL_RX_WBM_ERR_SRC_RXDMA) {
2546 			if (wbm_err_info.rxdma_psh_rsn
2547 					== HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
2548 				DP_STATS_INC(soc,
2549 					rx.err.rxdma_error
2550 					[wbm_err_info.rxdma_err_code], 1);
2551 				/* increment @pdev level */
2552 				if (dp_pdev)
2553 					DP_STATS_INC(dp_pdev,
2554 						     err.rxdma_error, 1);
2555 
2556 				switch (wbm_err_info.rxdma_err_code) {
2557 				case HAL_RXDMA_ERR_UNENCRYPTED:
2558 
2559 				case HAL_RXDMA_ERR_WIFI_PARSE:
2560 					if (txrx_peer)
2561 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2562 									  rx.err.rxdma_wifi_parse_err,
2563 									  1,
2564 									  link_id);
2565 
2566 					pool_id = wbm_err_info.pool_id;
2567 					dp_rx_process_rxdma_err(soc, nbuf,
2568 								rx_tlv_hdr,
2569 								txrx_peer,
2570 								wbm_err_info.
2571 								rxdma_err_code,
2572 								pool_id,
2573 								link_id);
2574 					break;
2575 
2576 				case HAL_RXDMA_ERR_TKIP_MIC:
2577 					dp_rx_process_mic_error(soc, nbuf,
2578 								rx_tlv_hdr,
2579 								txrx_peer);
2580 					if (txrx_peer)
2581 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2582 									  rx.err.mic_err,
2583 									  1,
2584 									  link_id);
2585 					break;
2586 
2587 				case HAL_RXDMA_ERR_DECRYPT:
2588 					/* All the TKIP-MIC failures are treated as Decrypt Errors
2589 					 * for QCN9224 Targets
2590 					 */
2591 					is_tkip_mic_err = hal_rx_msdu_end_is_tkip_mic_err(hal_soc, rx_tlv_hdr);
2592 
2593 					if (is_tkip_mic_err && txrx_peer) {
2594 						dp_rx_process_mic_error(soc, nbuf,
2595 									rx_tlv_hdr,
2596 									txrx_peer);
2597 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2598 									  rx.err.mic_err,
2599 									  1,
2600 									  link_id);
2601 						break;
2602 					}
2603 
2604 					if (txrx_peer) {
2605 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2606 									  rx.err.decrypt_err,
2607 									  1,
2608 									  link_id);
2609 						dp_rx_nbuf_free(nbuf);
2610 						break;
2611 					}
2612 
2613 					if (!dp_handle_rxdma_decrypt_err()) {
2614 						dp_rx_nbuf_free(nbuf);
2615 						break;
2616 					}
2617 
2618 					pool_id = wbm_err_info.pool_id;
2619 					err_code = wbm_err_info.rxdma_err_code;
2620 					tlv_hdr = rx_tlv_hdr;
2621 					dp_rx_process_rxdma_err(soc, nbuf,
2622 								tlv_hdr, NULL,
2623 								err_code,
2624 								pool_id,
2625 								link_id);
2626 					break;
2627 				case HAL_RXDMA_MULTICAST_ECHO:
2628 					if (txrx_peer)
2629 						DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
2630 									      rx.mec_drop, 1,
2631 									      qdf_nbuf_len(nbuf),
2632 									      link_id);
2633 					dp_rx_nbuf_free(nbuf);
2634 					break;
2635 				case HAL_RXDMA_UNAUTHORIZED_WDS:
2636 					pool_id = wbm_err_info.pool_id;
2637 					err_code = wbm_err_info.rxdma_err_code;
2638 					tlv_hdr = rx_tlv_hdr;
2639 					dp_rx_process_rxdma_err(soc, nbuf,
2640 								tlv_hdr,
2641 								txrx_peer,
2642 								err_code,
2643 								pool_id,
2644 								link_id);
2645 					break;
2646 				default:
2647 					dp_rx_nbuf_free(nbuf);
2648 					dp_err_rl("RXDMA error %d",
2649 						  wbm_err_info.rxdma_err_code);
2650 				}
2651 			} else if (wbm_err_info.rxdma_psh_rsn
2652 					== HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) {
2653 				dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
2654 						    rx_tlv_hdr,
2655 						    HAL_RX_WBM_ERR_SRC_RXDMA,
2656 						    link_id);
2657 			} else if (wbm_err_info.rxdma_psh_rsn
2658 					== HAL_RX_WBM_RXDMA_PSH_RSN_FLUSH) {
2659 				dp_rx_err_err("rxdma push reason %u",
2660 						wbm_err_info.rxdma_psh_rsn);
2661 				DP_STATS_INC(soc, rx.err.rx_flush_count, 1);
2662 				dp_rx_nbuf_free(nbuf);
2663 			} else {
2664 				/* should not enter here */
2665 				dp_rx_err_alert("invalid rxdma push reason %u",
2666 						wbm_err_info.rxdma_psh_rsn);
2667 				dp_rx_nbuf_free(nbuf);
2668 				qdf_assert_always(0);
2669 			}
2670 		} else {
2671 			/* Should not come here */
2672 			qdf_assert(0);
2673 		}
2674 
2675 		if (txrx_peer)
2676 			dp_txrx_peer_unref_delete(txrx_ref_handle,
2677 						  DP_MOD_ID_RX_ERR);
2678 
2679 		nbuf = next;
2680 	}
2681 	return rx_bufs_used; /* Assume no scale factor for now */
2682 }
2683 
2684 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2685 
2686 /**
2687  * dup_desc_dbg() - dump and assert if duplicate rx desc found
2688  *
2689  * @soc: core DP main context
2690  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
2691  * @rx_desc: void pointer to rx descriptor
2692  *
2693  * Return: void
2694  */
2695 static void dup_desc_dbg(struct dp_soc *soc,
2696 			 hal_rxdma_desc_t rxdma_dst_ring_desc,
2697 			 void *rx_desc)
2698 {
2699 	DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
2700 	dp_rx_dump_info_and_assert(
2701 			soc,
2702 			soc->rx_rel_ring.hal_srng,
2703 			hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
2704 			rx_desc);
2705 }
2706 
2707 /**
2708  * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
2709  *
2710  * @soc: core DP main context
2711  * @mac_id: mac id which is one of 3 mac_ids
2712  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
2713  * @head: head of descs list to be freed
2714  * @tail: tail of decs list to be freed
2715  *
2716  * Return: number of msdu in MPDU to be popped
2717  */
2718 static inline uint32_t
2719 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
2720 	hal_rxdma_desc_t rxdma_dst_ring_desc,
2721 	union dp_rx_desc_list_elem_t **head,
2722 	union dp_rx_desc_list_elem_t **tail)
2723 {
2724 	void *rx_msdu_link_desc;
2725 	qdf_nbuf_t msdu;
2726 	qdf_nbuf_t last;
2727 	struct hal_rx_msdu_list msdu_list;
2728 	uint16_t num_msdus;
2729 	struct hal_buf_info buf_info;
2730 	uint32_t rx_bufs_used = 0;
2731 	uint32_t msdu_cnt;
2732 	uint32_t i;
2733 	uint8_t push_reason;
2734 	uint8_t rxdma_error_code = 0;
2735 	uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
2736 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2737 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
2738 	hal_rxdma_desc_t ring_desc;
2739 	struct rx_desc_pool *rx_desc_pool;
2740 
2741 	if (!pdev) {
2742 		dp_rx_err_debug("%pK: pdev is null for mac_id = %d",
2743 				soc, mac_id);
2744 		return rx_bufs_used;
2745 	}
2746 
2747 	msdu = 0;
2748 
2749 	last = NULL;
2750 
2751 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
2752 				     &buf_info, &msdu_cnt);
2753 
2754 	push_reason =
2755 		hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
2756 	if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
2757 		rxdma_error_code =
2758 			hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
2759 	}
2760 
2761 	do {
2762 		rx_msdu_link_desc =
2763 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
2764 
2765 		qdf_assert_always(rx_msdu_link_desc);
2766 
2767 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
2768 				     &msdu_list, &num_msdus);
2769 
2770 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
2771 			/* if the msdus belongs to NSS offloaded radio &&
2772 			 * the rbm is not SW1_BM then return the msdu_link
2773 			 * descriptor without freeing the msdus (nbufs). let
2774 			 * these buffers be given to NSS completion ring for
2775 			 * NSS to free them.
2776 			 * else iterate through the msdu link desc list and
2777 			 * free each msdu in the list.
2778 			 */
2779 			if (msdu_list.rbm[0] !=
2780 				HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id) &&
2781 			    wlan_cfg_get_dp_pdev_nss_enabled(
2782 							pdev->wlan_cfg_ctx))
2783 				bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
2784 			else {
2785 				for (i = 0; i < num_msdus; i++) {
2786 					struct dp_rx_desc *rx_desc =
2787 						soc->arch_ops.
2788 						dp_rx_desc_cookie_2_va(
2789 							soc,
2790 							msdu_list.sw_cookie[i]);
2791 					qdf_assert_always(rx_desc);
2792 					msdu = rx_desc->nbuf;
2793 					/*
2794 					 * this is a unlikely scenario
2795 					 * where the host is reaping
2796 					 * a descriptor which
2797 					 * it already reaped just a while ago
2798 					 * but is yet to replenish
2799 					 * it back to HW.
2800 					 * In this case host will dump
2801 					 * the last 128 descriptors
2802 					 * including the software descriptor
2803 					 * rx_desc and assert.
2804 					 */
2805 					ring_desc = rxdma_dst_ring_desc;
2806 					if (qdf_unlikely(!rx_desc->in_use)) {
2807 						dup_desc_dbg(soc,
2808 							     ring_desc,
2809 							     rx_desc);
2810 						continue;
2811 					}
2812 
2813 					if (rx_desc->unmapped == 0) {
2814 						rx_desc_pool =
2815 							&soc->rx_desc_buf[rx_desc->pool_id];
2816 						dp_ipa_rx_buf_smmu_mapping_lock(soc);
2817 						dp_rx_nbuf_unmap_pool(soc,
2818 								      rx_desc_pool,
2819 								      msdu);
2820 						rx_desc->unmapped = 1;
2821 						dp_ipa_rx_buf_smmu_mapping_unlock(soc);
2822 					}
2823 
2824 					dp_rx_err_debug("%pK: msdu_nbuf=%pK ",
2825 							soc, msdu);
2826 
2827 					dp_rx_buffer_pool_nbuf_free(soc, msdu,
2828 							rx_desc->pool_id);
2829 					rx_bufs_used++;
2830 					dp_rx_add_to_free_desc_list(head,
2831 						tail, rx_desc);
2832 				}
2833 			}
2834 		} else {
2835 			rxdma_error_code = HAL_RXDMA_ERR_WAR;
2836 		}
2837 
2838 		/*
2839 		 * Store the current link buffer into to the local structure
2840 		 * to be used for release purpose.
2841 		 */
2842 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
2843 					     buf_info.paddr, buf_info.sw_cookie,
2844 					     buf_info.rbm);
2845 
2846 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
2847 					      &buf_info);
2848 		dp_rx_link_desc_return_by_addr(soc,
2849 					       (hal_buff_addrinfo_t)
2850 						rx_link_buf_info,
2851 						bm_action);
2852 	} while (buf_info.paddr);
2853 
2854 	DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
2855 	if (pdev)
2856 		DP_STATS_INC(pdev, err.rxdma_error, 1);
2857 
2858 	if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
2859 		dp_rx_err_err("%pK: Packet received with Decrypt error", soc);
2860 	}
2861 
2862 	return rx_bufs_used;
2863 }
2864 
2865 uint32_t
2866 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2867 		     uint32_t mac_id, uint32_t quota)
2868 {
2869 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2870 	hal_rxdma_desc_t rxdma_dst_ring_desc;
2871 	hal_soc_handle_t hal_soc;
2872 	void *err_dst_srng;
2873 	union dp_rx_desc_list_elem_t *head = NULL;
2874 	union dp_rx_desc_list_elem_t *tail = NULL;
2875 	struct dp_srng *dp_rxdma_srng;
2876 	struct rx_desc_pool *rx_desc_pool;
2877 	uint32_t work_done = 0;
2878 	uint32_t rx_bufs_used = 0;
2879 
2880 	if (!pdev)
2881 		return 0;
2882 
2883 	err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng;
2884 
2885 	if (!err_dst_srng) {
2886 		dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
2887 			      soc, err_dst_srng);
2888 		return 0;
2889 	}
2890 
2891 	hal_soc = soc->hal_soc;
2892 
2893 	qdf_assert(hal_soc);
2894 
2895 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
2896 		dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
2897 			      soc, err_dst_srng);
2898 		return 0;
2899 	}
2900 
2901 	while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
2902 		hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
2903 
2904 			rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
2905 						rxdma_dst_ring_desc,
2906 						&head, &tail);
2907 	}
2908 
2909 	dp_srng_access_end(int_ctx, soc, err_dst_srng);
2910 
2911 	if (rx_bufs_used) {
2912 		if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2913 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2914 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
2915 		} else {
2916 			dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id];
2917 			rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
2918 		}
2919 
2920 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2921 			rx_desc_pool, rx_bufs_used, &head, &tail, false);
2922 
2923 		work_done += rx_bufs_used;
2924 	}
2925 
2926 	return work_done;
2927 }
2928 
2929 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2930 
2931 static inline void
2932 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
2933 			hal_rxdma_desc_t rxdma_dst_ring_desc,
2934 			union dp_rx_desc_list_elem_t **head,
2935 			union dp_rx_desc_list_elem_t **tail,
2936 			uint32_t *rx_bufs_used)
2937 {
2938 	void *rx_msdu_link_desc;
2939 	qdf_nbuf_t msdu;
2940 	qdf_nbuf_t last;
2941 	struct hal_rx_msdu_list msdu_list;
2942 	uint16_t num_msdus;
2943 	struct hal_buf_info buf_info;
2944 	uint32_t msdu_cnt, i;
2945 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
2946 	struct rx_desc_pool *rx_desc_pool;
2947 	struct dp_rx_desc *rx_desc;
2948 
2949 	msdu = 0;
2950 
2951 	last = NULL;
2952 
2953 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
2954 				     &buf_info, &msdu_cnt);
2955 
2956 	do {
2957 		rx_msdu_link_desc =
2958 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
2959 
2960 		if (!rx_msdu_link_desc) {
2961 			DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
2962 			break;
2963 		}
2964 
2965 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
2966 				     &msdu_list, &num_msdus);
2967 
2968 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
2969 			for (i = 0; i < num_msdus; i++) {
2970 				if (!dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[i])) {
2971 					dp_rx_err_info_rl("Invalid MSDU info cookie: 0x%x",
2972 							  msdu_list.sw_cookie[i]);
2973 					continue;
2974 				}
2975 
2976 				rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
2977 							soc,
2978 							msdu_list.sw_cookie[i]);
2979 				qdf_assert_always(rx_desc);
2980 				rx_desc_pool =
2981 					&soc->rx_desc_buf[rx_desc->pool_id];
2982 				msdu = rx_desc->nbuf;
2983 
2984 				/*
2985 				 * this is a unlikely scenario where the host is reaping
2986 				 * a descriptor which it already reaped just a while ago
2987 				 * but is yet to replenish it back to HW.
2988 				 */
2989 				if (qdf_unlikely(!rx_desc->in_use) ||
2990 				    qdf_unlikely(!msdu)) {
2991 					dp_rx_err_info_rl("Reaping rx_desc not in use!");
2992 					continue;
2993 				}
2994 
2995 				dp_ipa_rx_buf_smmu_mapping_lock(soc);
2996 				dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, msdu);
2997 				rx_desc->unmapped = 1;
2998 				dp_ipa_rx_buf_smmu_mapping_unlock(soc);
2999 
3000 				dp_rx_buffer_pool_nbuf_free(soc, msdu,
3001 							    rx_desc->pool_id);
3002 				rx_bufs_used[rx_desc->pool_id]++;
3003 				dp_rx_add_to_free_desc_list(head,
3004 							    tail, rx_desc);
3005 			}
3006 		}
3007 
3008 		/*
3009 		 * Store the current link buffer into to the local structure
3010 		 * to be used for release purpose.
3011 		 */
3012 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
3013 					     buf_info.paddr, buf_info.sw_cookie,
3014 					     buf_info.rbm);
3015 
3016 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
3017 					      &buf_info);
3018 		dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
3019 					rx_link_buf_info,
3020 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
3021 	} while (buf_info.paddr);
3022 }
3023 
3024 void
3025 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
3026 			     uint32_t buf_type)
3027 {
3028 	struct hal_buf_info buf_info = {0};
3029 	struct dp_rx_desc *rx_desc = NULL;
3030 	struct rx_desc_pool *rx_desc_pool;
3031 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = {0};
3032 	union dp_rx_desc_list_elem_t *head = NULL;
3033 	union dp_rx_desc_list_elem_t *tail = NULL;
3034 	uint8_t pool_id;
3035 	uint8_t mac_id;
3036 
3037 	hal_rx_reo_buf_paddr_get(soc->hal_soc, hal_desc, &buf_info);
3038 
3039 	if (!buf_info.paddr) {
3040 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
3041 		return;
3042 	}
3043 
3044 	/* buffer_addr_info is the first element of ring_desc */
3045 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)hal_desc,
3046 				  &buf_info);
3047 
3048 	if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
3049 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
3050 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
3051 							soc,
3052 							buf_info.sw_cookie);
3053 
3054 		if (rx_desc && rx_desc->nbuf) {
3055 			rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
3056 			dp_ipa_rx_buf_smmu_mapping_lock(soc);
3057 			dp_rx_nbuf_unmap_pool(soc, rx_desc_pool,
3058 					      rx_desc->nbuf);
3059 			rx_desc->unmapped = 1;
3060 			dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3061 
3062 			dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
3063 						    rx_desc->pool_id);
3064 			dp_rx_add_to_free_desc_list(&head,
3065 						    &tail,
3066 						    rx_desc);
3067 
3068 			rx_bufs_reaped[rx_desc->pool_id]++;
3069 		}
3070 	} else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
3071 		pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(buf_info.sw_cookie);
3072 
3073 		dp_wbm_int_err_mpdu_pop(soc, pool_id, hal_desc,
3074 					&head, &tail, rx_bufs_reaped);
3075 	}
3076 
3077 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
3078 		struct rx_desc_pool *rx_desc_pool;
3079 		struct dp_srng *dp_rxdma_srng;
3080 
3081 		if (!rx_bufs_reaped[mac_id])
3082 			continue;
3083 
3084 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
3085 		dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
3086 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
3087 
3088 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
3089 					rx_desc_pool,
3090 					rx_bufs_reaped[mac_id],
3091 					&head, &tail, false);
3092 	}
3093 }
3094 
3095 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
3096