xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_err.c (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #include "dp_types.h"
22 #include "dp_rx.h"
23 #include "dp_tx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 #include "hal_api.h"
27 #include "qdf_trace.h"
28 #include "qdf_nbuf.h"
29 #include "dp_rx_defrag.h"
30 #include "dp_ipa.h"
31 #ifdef WIFI_MONITOR_SUPPORT
32 #include "dp_htt.h"
33 #include <dp_mon.h>
34 #endif
35 #ifdef FEATURE_WDS
36 #include "dp_txrx_wds.h"
37 #endif
38 #include <enet.h>	/* LLC_SNAP_HDR_LEN */
39 #include "qdf_net_types.h"
40 #include "dp_rx_buffer_pool.h"
41 
42 #define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params)
43 #define dp_rx_err_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX_ERROR, params)
44 #define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params)
45 #define dp_rx_err_info(params...) \
46 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
47 #define dp_rx_err_info_rl(params...) \
48 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
49 #define dp_rx_err_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_ERROR, params)
50 
51 #ifndef QCA_HOST_MODE_WIFI_DISABLED
52 
53 /* Max buffer in invalid peer SG list*/
54 #define DP_MAX_INVALID_BUFFERS 10
55 
56 /* Max regular Rx packet routing error */
57 #define DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD 20
58 #define DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT 10
59 #define DP_RX_ERR_ROUTE_TIMEOUT_US (5 * 1000 * 1000) /* micro seconds */
60 
61 #ifdef FEATURE_MEC
62 bool dp_rx_mcast_echo_check(struct dp_soc *soc,
63 			    struct dp_txrx_peer *txrx_peer,
64 			    uint8_t *rx_tlv_hdr,
65 			    qdf_nbuf_t nbuf)
66 {
67 	struct dp_vdev *vdev = txrx_peer->vdev;
68 	struct dp_pdev *pdev = vdev->pdev;
69 	struct dp_mec_entry *mecentry = NULL;
70 	struct dp_ast_entry *ase = NULL;
71 	uint16_t sa_idx = 0;
72 	uint8_t *data;
73 	/*
74 	 * Multicast Echo Check is required only if vdev is STA and
75 	 * received pkt is a multicast/broadcast pkt. otherwise
76 	 * skip the MEC check.
77 	 */
78 	if (vdev->opmode != wlan_op_mode_sta)
79 		return false;
80 	if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
81 		return false;
82 
83 	data = qdf_nbuf_data(nbuf);
84 
85 	/*
86 	 * if the received pkts src mac addr matches with vdev
87 	 * mac address then drop the pkt as it is looped back
88 	 */
89 	if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
90 			  vdev->mac_addr.raw,
91 			  QDF_MAC_ADDR_SIZE)))
92 		return true;
93 
94 	/*
95 	 * In case of qwrap isolation mode, donot drop loopback packets.
96 	 * In isolation mode, all packets from the wired stations need to go
97 	 * to rootap and loop back to reach the wireless stations and
98 	 * vice-versa.
99 	 */
100 	if (qdf_unlikely(vdev->isolation_vdev))
101 		return false;
102 
103 	/*
104 	 * if the received pkts src mac addr matches with the
105 	 * wired PCs MAC addr which is behind the STA or with
106 	 * wireless STAs MAC addr which are behind the Repeater,
107 	 * then drop the pkt as it is looped back
108 	 */
109 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
110 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
111 
112 		if ((sa_idx < 0) ||
113 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
114 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
115 				  "invalid sa_idx: %d", sa_idx);
116 			qdf_assert_always(0);
117 		}
118 
119 		qdf_spin_lock_bh(&soc->ast_lock);
120 		ase = soc->ast_table[sa_idx];
121 
122 		/*
123 		 * this check was not needed since MEC is not dependent on AST,
124 		 * but if we dont have this check SON has some issues in
125 		 * dual backhaul scenario. in APS SON mode, client connected
126 		 * to RE 2G and sends multicast packets. the RE sends it to CAP
127 		 * over 5G backhaul. the CAP loopback it on 2G to RE.
128 		 * On receiving in 2G STA vap, we assume that client has roamed
129 		 * and kickout the client.
130 		 */
131 		if (ase && (ase->peer_id != txrx_peer->peer_id)) {
132 			qdf_spin_unlock_bh(&soc->ast_lock);
133 			goto drop;
134 		}
135 
136 		qdf_spin_unlock_bh(&soc->ast_lock);
137 	}
138 
139 	qdf_spin_lock_bh(&soc->mec_lock);
140 
141 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
142 						   &data[QDF_MAC_ADDR_SIZE]);
143 	if (!mecentry) {
144 		qdf_spin_unlock_bh(&soc->mec_lock);
145 		return false;
146 	}
147 
148 	qdf_spin_unlock_bh(&soc->mec_lock);
149 
150 drop:
151 	dp_rx_err_info("%pK: received pkt with same src mac " QDF_MAC_ADDR_FMT,
152 		       soc, QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]));
153 
154 	return true;
155 }
156 #endif
157 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
158 
159 void dp_rx_link_desc_refill_duplicate_check(
160 				struct dp_soc *soc,
161 				struct hal_buf_info *buf_info,
162 				hal_buff_addrinfo_t ring_buf_info)
163 {
164 	struct hal_buf_info current_link_desc_buf_info = { 0 };
165 
166 	/* do duplicate link desc address check */
167 	hal_rx_buffer_addr_info_get_paddr(ring_buf_info,
168 					  &current_link_desc_buf_info);
169 
170 	/*
171 	 * TODO - Check if the hal soc api call can be removed
172 	 * since the cookie is just used for print.
173 	 * buffer_addr_info is the first element of ring_desc
174 	 */
175 	hal_rx_buf_cookie_rbm_get(soc->hal_soc,
176 				  (uint32_t *)ring_buf_info,
177 				  &current_link_desc_buf_info);
178 
179 	if (qdf_unlikely(current_link_desc_buf_info.paddr ==
180 			 buf_info->paddr)) {
181 		dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x",
182 			   current_link_desc_buf_info.paddr,
183 			   current_link_desc_buf_info.sw_cookie);
184 		DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1);
185 	}
186 	*buf_info = current_link_desc_buf_info;
187 }
188 
189 /**
190  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
191  *					(WBM) by address
192  *
193  * @soc: core DP main context
194  * @link_desc_addr: link descriptor addr
195  *
196  * Return: QDF_STATUS
197  */
198 QDF_STATUS
199 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
200 			       hal_buff_addrinfo_t link_desc_addr,
201 			       uint8_t bm_action)
202 {
203 	struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
204 	hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
205 	hal_soc_handle_t hal_soc = soc->hal_soc;
206 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
207 	void *src_srng_desc;
208 
209 	if (!wbm_rel_srng) {
210 		dp_rx_err_err("%pK: WBM RELEASE RING not initialized", soc);
211 		return status;
212 	}
213 
214 	/* do duplicate link desc address check */
215 	dp_rx_link_desc_refill_duplicate_check(
216 				soc,
217 				&soc->last_op_info.wbm_rel_link_desc,
218 				link_desc_addr);
219 
220 	if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
221 
222 		/* TODO */
223 		/*
224 		 * Need API to convert from hal_ring pointer to
225 		 * Ring Type / Ring Id combo
226 		 */
227 		dp_rx_err_err("%pK: HAL RING Access For WBM Release SRNG Failed - %pK",
228 			      soc, wbm_rel_srng);
229 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
230 		goto done;
231 	}
232 	src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
233 	if (qdf_likely(src_srng_desc)) {
234 		/* Return link descriptor through WBM ring (SW2WBM)*/
235 		hal_rx_msdu_link_desc_set(hal_soc,
236 				src_srng_desc, link_desc_addr, bm_action);
237 		status = QDF_STATUS_SUCCESS;
238 	} else {
239 		struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
240 
241 		DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1);
242 
243 		dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)",
244 			   srng->ring_id,
245 			   soc->stats.rx.err.hal_ring_access_full_fail);
246 		dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
247 			   *srng->u.src_ring.hp_addr,
248 			   srng->u.src_ring.reap_hp,
249 			   *srng->u.src_ring.tp_addr,
250 			   srng->u.src_ring.cached_tp);
251 		QDF_BUG(0);
252 	}
253 done:
254 	hal_srng_access_end(hal_soc, wbm_rel_srng);
255 	return status;
256 
257 }
258 
259 qdf_export_symbol(dp_rx_link_desc_return_by_addr);
260 
261 /**
262  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
263  *				(WBM), following error handling
264  *
265  * @soc: core DP main context
266  * @ring_desc: opaque pointer to the REO error ring descriptor
267  *
268  * Return: QDF_STATUS
269  */
270 QDF_STATUS
271 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
272 		       uint8_t bm_action)
273 {
274 	void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
275 
276 	return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
277 }
278 
279 #ifndef QCA_HOST_MODE_WIFI_DISABLED
280 
281 /**
282  * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
283  *
284  * @soc: core txrx main context
285  * @ring_desc: opaque pointer to the REO error ring descriptor
286  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
287  * @head: head of the local descriptor free-list
288  * @tail: tail of the local descriptor free-list
289  * @quota: No. of units (packets) that can be serviced in one shot.
290  *
291  * This function is used to drop all MSDU in an MPDU
292  *
293  * Return: uint32_t: No. of elements processed
294  */
295 static uint32_t
296 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
297 		 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
298 		 uint8_t *mac_id,
299 		 uint32_t quota)
300 {
301 	uint32_t rx_bufs_used = 0;
302 	void *link_desc_va;
303 	struct hal_buf_info buf_info;
304 	struct dp_pdev *pdev;
305 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
306 	int i;
307 	uint8_t *rx_tlv_hdr;
308 	uint32_t tid;
309 	struct rx_desc_pool *rx_desc_pool;
310 	struct dp_rx_desc *rx_desc;
311 	/* First field in REO Dst ring Desc is buffer_addr_info */
312 	void *buf_addr_info = ring_desc;
313 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
314 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
315 
316 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &buf_info);
317 
318 	/* buffer_addr_info is the first element of ring_desc */
319 	hal_rx_buf_cookie_rbm_get(soc->hal_soc,
320 				  (uint32_t *)ring_desc,
321 				  &buf_info);
322 
323 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
324 	if (!link_desc_va) {
325 		dp_rx_err_debug("link desc va is null, soc %pk", soc);
326 		return rx_bufs_used;
327 	}
328 
329 more_msdu_link_desc:
330 	/* No UNMAP required -- this is "malloc_consistent" memory */
331 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
332 			     &mpdu_desc_info->msdu_count);
333 
334 	for (i = 0; (i < mpdu_desc_info->msdu_count); i++) {
335 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
336 						soc, msdu_list.sw_cookie[i]);
337 
338 		qdf_assert_always(rx_desc);
339 
340 		/* all buffers from a MSDU link link belong to same pdev */
341 		*mac_id = rx_desc->pool_id;
342 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
343 		if (!pdev) {
344 			dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
345 					soc, rx_desc->pool_id);
346 			return rx_bufs_used;
347 		}
348 
349 		if (!dp_rx_desc_check_magic(rx_desc)) {
350 			dp_rx_err_err("%pK: Invalid rx_desc cookie=%d",
351 				      soc, msdu_list.sw_cookie[i]);
352 			return rx_bufs_used;
353 		}
354 
355 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
356 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
357 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
358 		rx_desc->unmapped = 1;
359 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
360 
361 		rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
362 
363 		rx_bufs_used++;
364 		tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
365 						rx_desc->rx_buf_start);
366 		dp_rx_err_err("%pK: Packet received with PN error for tid :%d",
367 			      soc, tid);
368 
369 		rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
370 		if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
371 			hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
372 
373 		dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info,
374 				      rx_desc->nbuf,
375 				      QDF_TX_RX_STATUS_DROP, true);
376 		/* Just free the buffers */
377 		dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id);
378 
379 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
380 					    &pdev->free_list_tail, rx_desc);
381 	}
382 
383 	/*
384 	 * If the msdu's are spread across multiple link-descriptors,
385 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
386 	 * spread across multiple buffers).Hence, it is
387 	 * necessary to check the next link_descriptor and release
388 	 * all the msdu's that are part of it.
389 	 */
390 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
391 			link_desc_va,
392 			&next_link_desc_addr_info);
393 
394 	if (hal_rx_is_buf_addr_info_valid(
395 				&next_link_desc_addr_info)) {
396 		/* Clear the next link desc info for the current link_desc */
397 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
398 
399 		dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
400 					       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
401 		hal_rx_buffer_addr_info_get_paddr(
402 				&next_link_desc_addr_info,
403 				&buf_info);
404 		/* buffer_addr_info is the first element of ring_desc */
405 		hal_rx_buf_cookie_rbm_get(soc->hal_soc,
406 					  (uint32_t *)&next_link_desc_addr_info,
407 					  &buf_info);
408 		cur_link_desc_addr_info = next_link_desc_addr_info;
409 		buf_addr_info = &cur_link_desc_addr_info;
410 
411 		link_desc_va =
412 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
413 
414 		goto more_msdu_link_desc;
415 	}
416 	quota--;
417 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
418 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
419 	return rx_bufs_used;
420 }
421 
422 /**
423  * dp_rx_pn_error_handle() - Handles PN check errors
424  *
425  * @soc: core txrx main context
426  * @ring_desc: opaque pointer to the REO error ring descriptor
427  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
428  * @head: head of the local descriptor free-list
429  * @tail: tail of the local descriptor free-list
430  * @quota: No. of units (packets) that can be serviced in one shot.
431  *
432  * This function implements PN error handling
433  * If the peer is configured to ignore the PN check errors
434  * or if DP feels, that this frame is still OK, the frame can be
435  * re-injected back to REO to use some of the other features
436  * of REO e.g. duplicate detection/routing to other cores
437  *
438  * Return: uint32_t: No. of elements processed
439  */
440 static uint32_t
441 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
442 		      struct hal_rx_mpdu_desc_info *mpdu_desc_info,
443 		      uint8_t *mac_id,
444 		      uint32_t quota)
445 {
446 	uint16_t peer_id;
447 	uint32_t rx_bufs_used = 0;
448 	struct dp_txrx_peer *txrx_peer;
449 	bool peer_pn_policy = false;
450 	dp_txrx_ref_handle txrx_ref_handle = NULL;
451 
452 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
453 					       mpdu_desc_info->peer_meta_data);
454 
455 
456 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
457 						   &txrx_ref_handle,
458 						   DP_MOD_ID_RX_ERR);
459 
460 	if (qdf_likely(txrx_peer)) {
461 		/*
462 		 * TODO: Check for peer specific policies & set peer_pn_policy
463 		 */
464 		dp_err_rl("discard rx due to PN error for peer  %pK",
465 			  txrx_peer);
466 
467 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
468 	}
469 	dp_rx_err_err("%pK: Packet received with PN error", soc);
470 
471 	/* No peer PN policy -- definitely drop */
472 	if (!peer_pn_policy)
473 		rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
474 						mpdu_desc_info,
475 						mac_id, quota);
476 
477 	return rx_bufs_used;
478 }
479 
480 #ifdef DP_RX_DELIVER_ALL_OOR_FRAMES
481 /**
482  * dp_rx_deliver_oor_frame() - deliver OOR frames to stack
483  * @soc: Datapath soc handler
484  * @peer: pointer to DP peer
485  * @nbuf: pointer to the skb of RX frame
486  * @frame_mask: the mask for speical frame needed
487  * @rx_tlv_hdr: start of rx tlv header
488  *
489  * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
490  * single nbuf is expected.
491  *
492  * return: true - nbuf has been delivered to stack, false - not.
493  */
494 static bool
495 dp_rx_deliver_oor_frame(struct dp_soc *soc,
496 			struct dp_txrx_peer *txrx_peer,
497 			qdf_nbuf_t nbuf, uint32_t frame_mask,
498 			uint8_t *rx_tlv_hdr)
499 {
500 	uint32_t l2_hdr_offset = 0;
501 	uint16_t msdu_len = 0;
502 	uint32_t skip_len;
503 
504 	l2_hdr_offset =
505 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
506 
507 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
508 		skip_len = l2_hdr_offset;
509 	} else {
510 		msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
511 		skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
512 		qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
513 	}
514 
515 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
516 	dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
517 	qdf_nbuf_pull_head(nbuf, skip_len);
518 	qdf_nbuf_set_exc_frame(nbuf, 1);
519 
520 	dp_info_rl("OOR frame, mpdu sn 0x%x",
521 		   hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
522 	dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, nbuf, NULL);
523 	return true;
524 }
525 
526 #else
527 static bool
528 dp_rx_deliver_oor_frame(struct dp_soc *soc,
529 			struct dp_txrx_peer *txrx_peer,
530 			qdf_nbuf_t nbuf, uint32_t frame_mask,
531 			uint8_t *rx_tlv_hdr)
532 {
533 	return dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask,
534 					   rx_tlv_hdr);
535 }
536 #endif
537 
538 /**
539  * dp_rx_oor_handle() - Handles the msdu which is OOR error
540  *
541  * @soc: core txrx main context
542  * @nbuf: pointer to msdu skb
543  * @peer_id: dp peer ID
544  * @rx_tlv_hdr: start of rx tlv header
545  *
546  * This function process the msdu delivered from REO2TCL
547  * ring with error type OOR
548  *
549  * Return: None
550  */
551 static void
552 dp_rx_oor_handle(struct dp_soc *soc,
553 		 qdf_nbuf_t nbuf,
554 		 uint16_t peer_id,
555 		 uint8_t *rx_tlv_hdr)
556 {
557 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
558 				FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
559 	struct dp_txrx_peer *txrx_peer = NULL;
560 	dp_txrx_ref_handle txrx_ref_handle = NULL;
561 
562 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
563 						   &txrx_ref_handle,
564 						   DP_MOD_ID_RX_ERR);
565 	if (!txrx_peer) {
566 		dp_info_rl("peer not found");
567 		goto free_nbuf;
568 	}
569 
570 	if (dp_rx_deliver_oor_frame(soc, txrx_peer, nbuf, frame_mask,
571 				    rx_tlv_hdr)) {
572 		DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1);
573 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
574 		return;
575 	}
576 
577 free_nbuf:
578 	if (txrx_peer)
579 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
580 
581 	DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1);
582 	dp_rx_nbuf_free(nbuf);
583 }
584 
585 /**
586  * dp_rx_err_nbuf_pn_check() - Check if the PN number of this current packet
587  *				is a monotonous increment of packet number
588  *				from the previous successfully re-ordered
589  *				frame.
590  * @soc: Datapath SOC handle
591  * @ring_desc: REO ring descriptor
592  * @nbuf: Current packet
593  *
594  * Return: QDF_STATUS_SUCCESS, if the pn check passes, else QDF_STATUS_E_FAILURE
595  */
596 static inline QDF_STATUS
597 dp_rx_err_nbuf_pn_check(struct dp_soc *soc, hal_ring_desc_t ring_desc,
598 			qdf_nbuf_t nbuf)
599 {
600 	uint64_t prev_pn, curr_pn[2];
601 
602 	if (!hal_rx_encryption_info_valid(soc->hal_soc, qdf_nbuf_data(nbuf)))
603 		return QDF_STATUS_SUCCESS;
604 
605 	hal_rx_reo_prev_pn_get(soc->hal_soc, ring_desc, &prev_pn);
606 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), curr_pn);
607 
608 	if (curr_pn[0] > prev_pn)
609 		return QDF_STATUS_SUCCESS;
610 
611 	return QDF_STATUS_E_FAILURE;
612 }
613 
614 #ifdef WLAN_SKIP_BAR_UPDATE
615 static
616 void dp_rx_err_handle_bar(struct dp_soc *soc,
617 			  struct dp_peer *peer,
618 			  qdf_nbuf_t nbuf)
619 {
620 	dp_info_rl("BAR update to H.W is skipped");
621 	DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
622 }
623 #else
624 static
625 void dp_rx_err_handle_bar(struct dp_soc *soc,
626 			  struct dp_peer *peer,
627 			  qdf_nbuf_t nbuf)
628 {
629 	uint8_t *rx_tlv_hdr;
630 	unsigned char type, subtype;
631 	uint16_t start_seq_num;
632 	uint32_t tid;
633 	QDF_STATUS status;
634 	struct ieee80211_frame_bar *bar;
635 
636 	/*
637 	 * 1. Is this a BAR frame. If not Discard it.
638 	 * 2. If it is, get the peer id, tid, ssn
639 	 * 2a Do a tid update
640 	 */
641 
642 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
643 	bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + soc->rx_pkt_tlv_size);
644 
645 	type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
646 	subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
647 
648 	if (!(type == IEEE80211_FC0_TYPE_CTL &&
649 	      subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
650 		dp_err_rl("Not a BAR frame!");
651 		return;
652 	}
653 
654 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
655 	qdf_assert_always(tid < DP_MAX_TIDS);
656 
657 	start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
658 
659 	dp_info_rl("tid %u window_size %u start_seq_num %u",
660 		   tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
661 
662 	status = dp_rx_tid_update_wifi3(peer, tid,
663 					peer->rx_tid[tid].ba_win_size,
664 					start_seq_num,
665 					true);
666 	if (status != QDF_STATUS_SUCCESS) {
667 		dp_err_rl("failed to handle bar frame update rx tid");
668 		DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
669 	} else {
670 		DP_STATS_INC(soc, rx.err.ssn_update_count, 1);
671 	}
672 }
673 #endif
674 
675 /**
676  * _dp_rx_bar_frame_handle(): Core of the BAR frame handling
677  * @soc: Datapath SoC handle
678  * @nbuf: packet being processed
679  * @mpdu_desc_info: mpdu desc info for the current packet
680  * @tid: tid on which the packet arrived
681  * @err_status: Flag to indicate if REO encountered an error while routing this
682  *		frame
683  * @error_code: REO error code
684  *
685  * Return: None
686  */
687 static void
688 _dp_rx_bar_frame_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
689 			struct hal_rx_mpdu_desc_info *mpdu_desc_info,
690 			uint32_t tid, uint8_t err_status, uint32_t error_code)
691 {
692 	uint16_t peer_id;
693 	struct dp_peer *peer;
694 
695 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
696 					       mpdu_desc_info->peer_meta_data);
697 	peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
698 	if (!peer)
699 		return;
700 
701 	dp_info("BAR frame: "
702 		" peer_id = %d"
703 		" tid = %u"
704 		" SSN = %d"
705 		" error status = %d",
706 		peer->peer_id,
707 		tid,
708 		mpdu_desc_info->mpdu_seq,
709 		err_status);
710 
711 	if (err_status == HAL_REO_ERROR_DETECTED) {
712 		switch (error_code) {
713 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
714 		case HAL_REO_ERR_BAR_FRAME_OOR:
715 			dp_rx_err_handle_bar(soc, peer, nbuf);
716 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
717 			break;
718 		default:
719 			DP_STATS_INC(soc, rx.bar_frame, 1);
720 		}
721 	}
722 
723 	dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
724 }
725 
726 #ifdef DP_INVALID_PEER_ASSERT
727 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \
728 		do {                                \
729 			qdf_assert_always(!(head)); \
730 			qdf_assert_always(!(tail)); \
731 		} while (0)
732 #else
733 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */
734 #endif
735 
736 /**
737  * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu
738  *                       to pdev invalid peer list
739  *
740  * @soc: core DP main context
741  * @nbuf: Buffer pointer
742  * @rx_tlv_hdr: start of rx tlv header
743  * @mac_id: mac id
744  *
745  *  Return: bool: true for last msdu of mpdu
746  */
747 static bool
748 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf,
749 		  uint8_t *rx_tlv_hdr, uint8_t mac_id)
750 {
751 	bool mpdu_done = false;
752 	qdf_nbuf_t curr_nbuf = NULL;
753 	qdf_nbuf_t tmp_nbuf = NULL;
754 
755 	/* TODO: Currently only single radio is supported, hence
756 	 * pdev hard coded to '0' index
757 	 */
758 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
759 
760 	if (!dp_pdev) {
761 		dp_rx_err_debug("%pK: pdev is null for mac_id = %d", soc, mac_id);
762 		return mpdu_done;
763 	}
764 	/* if invalid peer SG list has max values free the buffers in list
765 	 * and treat current buffer as start of list
766 	 *
767 	 * current logic to detect the last buffer from attn_tlv is not reliable
768 	 * in OFDMA UL scenario hence add max buffers check to avoid list pile
769 	 * up
770 	 */
771 	if (!dp_pdev->first_nbuf ||
772 	    (dp_pdev->invalid_peer_head_msdu &&
773 	    QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST
774 	    (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) {
775 		qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
776 		dp_pdev->ppdu_id = hal_rx_get_ppdu_id(soc->hal_soc,
777 						      rx_tlv_hdr);
778 		dp_pdev->first_nbuf = true;
779 
780 		/* If the new nbuf received is the first msdu of the
781 		 * amsdu and there are msdus in the invalid peer msdu
782 		 * list, then let us free all the msdus of the invalid
783 		 * peer msdu list.
784 		 * This scenario can happen when we start receiving
785 		 * new a-msdu even before the previous a-msdu is completely
786 		 * received.
787 		 */
788 		curr_nbuf = dp_pdev->invalid_peer_head_msdu;
789 		while (curr_nbuf) {
790 			tmp_nbuf = curr_nbuf->next;
791 			dp_rx_nbuf_free(curr_nbuf);
792 			curr_nbuf = tmp_nbuf;
793 		}
794 
795 		dp_pdev->invalid_peer_head_msdu = NULL;
796 		dp_pdev->invalid_peer_tail_msdu = NULL;
797 
798 		dp_monitor_get_mpdu_status(dp_pdev, soc, rx_tlv_hdr);
799 	}
800 
801 	if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(soc->hal_soc,
802 							    rx_tlv_hdr) &&
803 	    hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
804 		qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
805 		qdf_assert_always(dp_pdev->first_nbuf == true);
806 		dp_pdev->first_nbuf = false;
807 		mpdu_done = true;
808 	}
809 
810 	/*
811 	 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu
812 	 * should be NULL here, add the checking for debugging purpose
813 	 * in case some corner case.
814 	 */
815 	DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu,
816 					dp_pdev->invalid_peer_tail_msdu);
817 	DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
818 				dp_pdev->invalid_peer_tail_msdu,
819 				nbuf);
820 
821 	return mpdu_done;
822 }
823 
824 /**
825  * dp_rx_bar_frame_handle() - Function to handle err BAR frames
826  * @soc: core DP main context
827  * @ring_desc: Hal ring desc
828  * @rx_desc: dp rx desc
829  * @mpdu_desc_info: mpdu desc info
830  *
831  * Handle the error BAR frames received. Ensure the SOC level
832  * stats are updated based on the REO error code. The BAR frames
833  * are further processed by updating the Rx tids with the start
834  * sequence number (SSN) and BA window size. Desc is returned
835  * to the free desc list
836  *
837  * Return: none
838  */
839 static void
840 dp_rx_bar_frame_handle(struct dp_soc *soc,
841 		       hal_ring_desc_t ring_desc,
842 		       struct dp_rx_desc *rx_desc,
843 		       struct hal_rx_mpdu_desc_info *mpdu_desc_info,
844 		       uint8_t err_status,
845 		       uint32_t err_code)
846 {
847 	qdf_nbuf_t nbuf;
848 	struct dp_pdev *pdev;
849 	struct rx_desc_pool *rx_desc_pool;
850 	uint8_t *rx_tlv_hdr;
851 	uint32_t tid;
852 
853 	nbuf = rx_desc->nbuf;
854 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
855 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
856 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
857 	rx_desc->unmapped = 1;
858 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
859 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
860 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
861 					rx_tlv_hdr);
862 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
863 
864 	_dp_rx_bar_frame_handle(soc, nbuf, mpdu_desc_info, tid, err_status,
865 				err_code);
866 	dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, nbuf,
867 			      QDF_TX_RX_STATUS_DROP, true);
868 	dp_rx_link_desc_return(soc, ring_desc,
869 			       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
870 	dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
871 				    rx_desc->pool_id);
872 	dp_rx_add_to_free_desc_list(&pdev->free_list_head,
873 				    &pdev->free_list_tail,
874 				    rx_desc);
875 }
876 
877 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
878 
879 /**
880  * dp_2k_jump_handle() - Function to handle 2k jump exception
881  *                        on WBM ring
882  *
883  * @soc: core DP main context
884  * @nbuf: buffer pointer
885  * @rx_tlv_hdr: start of rx tlv header
886  * @peer_id: peer id of first msdu
887  * @tid: Tid for which exception occurred
888  *
889  * This function handles 2k jump violations arising out
890  * of receiving aggregates in non BA case. This typically
891  * may happen if aggregates are received on a QOS enabled TID
892  * while Rx window size is still initialized to value of 2. Or
893  * it may also happen if negotiated window size is 1 but peer
894  * sends aggregates.
895  *
896  */
897 
898 void
899 dp_2k_jump_handle(struct dp_soc *soc,
900 		  qdf_nbuf_t nbuf,
901 		  uint8_t *rx_tlv_hdr,
902 		  uint16_t peer_id,
903 		  uint8_t tid)
904 {
905 	struct dp_peer *peer = NULL;
906 	struct dp_rx_tid *rx_tid = NULL;
907 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP;
908 
909 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
910 	if (!peer) {
911 		dp_rx_err_info_rl("%pK: peer not found", soc);
912 		goto free_nbuf;
913 	}
914 
915 	if (tid >= DP_MAX_TIDS) {
916 		dp_info_rl("invalid tid");
917 		goto nbuf_deliver;
918 	}
919 
920 	rx_tid = &peer->rx_tid[tid];
921 	qdf_spin_lock_bh(&rx_tid->tid_lock);
922 
923 	/* only if BA session is active, allow send Delba */
924 	if (rx_tid->ba_status != DP_RX_BA_ACTIVE) {
925 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
926 		goto nbuf_deliver;
927 	}
928 
929 	if (!rx_tid->delba_tx_status) {
930 		rx_tid->delba_tx_retry++;
931 		rx_tid->delba_tx_status = 1;
932 		rx_tid->delba_rcode =
933 			IEEE80211_REASON_QOS_SETUP_REQUIRED;
934 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
935 		if (soc->cdp_soc.ol_ops->send_delba) {
936 			DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent,
937 				     1);
938 			soc->cdp_soc.ol_ops->send_delba(
939 					peer->vdev->pdev->soc->ctrl_psoc,
940 					peer->vdev->vdev_id,
941 					peer->mac_addr.raw,
942 					tid,
943 					rx_tid->delba_rcode,
944 					CDP_DELBA_2K_JUMP);
945 		}
946 	} else {
947 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
948 	}
949 
950 nbuf_deliver:
951 	if (dp_rx_deliver_special_frame(soc, peer->txrx_peer, nbuf, frame_mask,
952 					rx_tlv_hdr)) {
953 		DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1);
954 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
955 		return;
956 	}
957 
958 free_nbuf:
959 	if (peer)
960 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
961 	DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1);
962 	dp_rx_nbuf_free(nbuf);
963 }
964 
965 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
966     defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_KIWI)
967 /**
968  * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
969  * @soc: pointer to dp_soc struct
970  * @pool_id: Pool id to find dp_pdev
971  * @rx_tlv_hdr: TLV header of received packet
972  * @nbuf: SKB
973  *
974  * In certain types of packets if peer_id is not correct then
975  * driver may not be able find. Try finding peer by addr_2 of
976  * received MPDU. If you find the peer then most likely sw_peer_id &
977  * ast_idx is corrupted.
978  *
979  * Return: True if you find the peer by addr_2 of received MPDU else false
980  */
981 static bool
982 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
983 					      uint8_t pool_id,
984 					      uint8_t *rx_tlv_hdr,
985 					      qdf_nbuf_t nbuf)
986 {
987 	struct dp_peer *peer = NULL;
988 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
989 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
990 	struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
991 
992 	if (!pdev) {
993 		dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
994 				soc, pool_id);
995 		return false;
996 	}
997 	/*
998 	 * WAR- In certain types of packets if peer_id is not correct then
999 	 * driver may not be able find. Try finding peer by addr_2 of
1000 	 * received MPDU
1001 	 */
1002 	if (wh)
1003 		peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0,
1004 					      DP_VDEV_ALL, DP_MOD_ID_RX_ERR);
1005 	if (peer) {
1006 		dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
1007 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1008 				     QDF_TRACE_LEVEL_DEBUG);
1009 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
1010 				 1, qdf_nbuf_len(nbuf));
1011 		dp_rx_nbuf_free(nbuf);
1012 
1013 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
1014 		return true;
1015 	}
1016 	return false;
1017 }
1018 
1019 /**
1020  * dp_rx_check_pkt_len() - Check for pktlen validity
1021  * @soc: DP SOC context
1022  * @pkt_len: computed length of the pkt from caller in bytes
1023  *
1024  * Return: true if pktlen > RX_BUFFER_SIZE, else return false
1025  *
1026  */
1027 static inline
1028 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
1029 {
1030 	if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) {
1031 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
1032 				 1, pkt_len);
1033 		return true;
1034 	} else {
1035 		return false;
1036 	}
1037 }
1038 
1039 #else
1040 static inline bool
1041 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
1042 					      uint8_t pool_id,
1043 					      uint8_t *rx_tlv_hdr,
1044 					      qdf_nbuf_t nbuf)
1045 {
1046 	return false;
1047 }
1048 
1049 static inline
1050 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
1051 {
1052 	return false;
1053 }
1054 
1055 #endif
1056 
1057 /*
1058  * dp_rx_deliver_to_osif_stack() - function to deliver rx pkts to stack
1059  * @soc: DP soc
1060  * @vdv: DP vdev handle
1061  * @txrx_peer: pointer to the txrx_peer object
1062  * @nbuf: skb list head
1063  * @tail: skb list tail
1064  * @is_eapol: eapol pkt check
1065  *
1066  * Return: None
1067  */
1068 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
1069 static inline void
1070 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
1071 			    struct dp_vdev *vdev,
1072 			    struct dp_txrx_peer *txrx_peer,
1073 			    qdf_nbuf_t nbuf,
1074 			    qdf_nbuf_t tail,
1075 			    bool is_eapol)
1076 {
1077 	if (is_eapol && soc->eapol_over_control_port)
1078 		dp_rx_eapol_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
1079 	else
1080 		dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
1081 }
1082 #else
1083 static inline void
1084 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
1085 			    struct dp_vdev *vdev,
1086 			    struct dp_txrx_peer *txrx_peer,
1087 			    qdf_nbuf_t nbuf,
1088 			    qdf_nbuf_t tail,
1089 			    bool is_eapol)
1090 {
1091 	dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
1092 }
1093 #endif
1094 
1095 #ifdef WLAN_FEATURE_11BE_MLO
1096 /*
1097  * dp_rx_err_match_dhost() - function to check whether dest-mac is correct
1098  * @eh: Ethernet header of incoming packet
1099  * @vdev: dp_vdev object of the VAP on which this data packet is received
1100  *
1101  * Return: 1 if the destination mac is correct,
1102  *         0 if this frame is not correctly destined to this VAP/MLD
1103  */
1104 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
1105 {
1106 	return ((qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
1107 			     QDF_MAC_ADDR_SIZE) == 0) ||
1108 		(qdf_mem_cmp(eh->ether_dhost, &vdev->mld_mac_addr.raw[0],
1109 			     QDF_MAC_ADDR_SIZE) == 0));
1110 }
1111 
1112 #else
1113 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
1114 {
1115 	return (qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
1116 			    QDF_MAC_ADDR_SIZE) == 0);
1117 }
1118 #endif
1119 
1120 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1121 
1122 /**
1123  * dp_rx_err_drop_3addr_mcast() - Check if feature drop_3ddr_mcast is enabled
1124  *				  If so, drop the multicast frame.
1125  * @vdev: datapath vdev
1126  * @rx_tlv_hdr: TLV header
1127  *
1128  * Return: true if packet is to be dropped,
1129  *	false, if packet is not dropped.
1130  */
1131 static bool
1132 dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr)
1133 {
1134 	struct dp_soc *soc = vdev->pdev->soc;
1135 
1136 	if (!vdev->drop_3addr_mcast)
1137 		return false;
1138 
1139 	if (vdev->opmode != wlan_op_mode_sta)
1140 		return false;
1141 
1142 	if (hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
1143 		return true;
1144 
1145 	return false;
1146 }
1147 
1148 /**
1149  * dp_rx_err_is_pn_check_needed() - Check if the packet number check is needed
1150  *				for this frame received in REO error ring.
1151  * @soc: Datapath SOC handle
1152  * @error: REO error detected or not
1153  * @error_code: Error code in case of REO error
1154  *
1155  * Return: true if pn check if needed in software,
1156  *	false, if pn check if not needed.
1157  */
1158 static inline bool
1159 dp_rx_err_is_pn_check_needed(struct dp_soc *soc, uint8_t error,
1160 			     uint32_t error_code)
1161 {
1162 	return (soc->features.pn_in_reo_dest &&
1163 		(error == HAL_REO_ERROR_DETECTED &&
1164 		 (hal_rx_reo_is_2k_jump(error_code) ||
1165 		  hal_rx_reo_is_oor_error(error_code) ||
1166 		  hal_rx_reo_is_bar_oor_2k_jump(error_code))));
1167 }
1168 
1169 /**
1170  * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
1171  *                              descriptor violation on either a
1172  *                              REO or WBM ring
1173  *
1174  * @soc: core DP main context
1175  * @nbuf: buffer pointer
1176  * @rx_tlv_hdr: start of rx tlv header
1177  * @pool_id: mac id
1178  * @txrx_peer: txrx peer handle
1179  *
1180  * This function handles NULL queue descriptor violations arising out
1181  * a missing REO queue for a given peer or a given TID. This typically
1182  * may happen if a packet is received on a QOS enabled TID before the
1183  * ADDBA negotiation for that TID, when the TID queue is setup. Or
1184  * it may also happen for MC/BC frames if they are not routed to the
1185  * non-QOS TID queue, in the absence of any other default TID queue.
1186  * This error can show up both in a REO destination or WBM release ring.
1187  *
1188  * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
1189  *         if nbuf could not be handled or dropped.
1190  */
1191 static QDF_STATUS
1192 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
1193 			 uint8_t *rx_tlv_hdr, uint8_t pool_id,
1194 			 struct dp_txrx_peer *txrx_peer)
1195 {
1196 	uint32_t pkt_len;
1197 	uint16_t msdu_len;
1198 	struct dp_vdev *vdev;
1199 	uint8_t tid;
1200 	qdf_ether_header_t *eh;
1201 	struct hal_rx_msdu_metadata msdu_metadata;
1202 	uint16_t sa_idx = 0;
1203 	bool is_eapol = 0;
1204 	bool enh_flag;
1205 
1206 	qdf_nbuf_set_rx_chfrag_start(nbuf,
1207 				hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1208 							       rx_tlv_hdr));
1209 	qdf_nbuf_set_rx_chfrag_end(nbuf,
1210 				   hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
1211 								 rx_tlv_hdr));
1212 	qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1213 								  rx_tlv_hdr));
1214 	qdf_nbuf_set_da_valid(nbuf,
1215 			      hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
1216 							      rx_tlv_hdr));
1217 	qdf_nbuf_set_sa_valid(nbuf,
1218 			      hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
1219 							      rx_tlv_hdr));
1220 
1221 	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
1222 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1223 	pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
1224 
1225 	if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
1226 		if (dp_rx_check_pkt_len(soc, pkt_len))
1227 			goto drop_nbuf;
1228 
1229 		/* Set length in nbuf */
1230 		qdf_nbuf_set_pktlen(
1231 			nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
1232 		qdf_assert_always(nbuf->data == rx_tlv_hdr);
1233 	}
1234 
1235 	/*
1236 	 * Check if DMA completed -- msdu_done is the last bit
1237 	 * to be written
1238 	 */
1239 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1240 
1241 		dp_err_rl("MSDU DONE failure");
1242 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1243 				     QDF_TRACE_LEVEL_INFO);
1244 		qdf_assert(0);
1245 	}
1246 
1247 	if (!txrx_peer &&
1248 	    dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
1249 							  rx_tlv_hdr, nbuf))
1250 		return QDF_STATUS_E_FAILURE;
1251 
1252 	if (!txrx_peer) {
1253 		bool mpdu_done = false;
1254 		struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
1255 
1256 		if (!pdev) {
1257 			dp_err_rl("pdev is null for pool_id = %d", pool_id);
1258 			return QDF_STATUS_E_FAILURE;
1259 		}
1260 
1261 		dp_err_rl("txrx_peer is NULL");
1262 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1263 				 qdf_nbuf_len(nbuf));
1264 
1265 		/* QCN9000 has the support enabled */
1266 		if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) {
1267 			mpdu_done = true;
1268 			nbuf->next = NULL;
1269 			/* Trigger invalid peer handler wrapper */
1270 			dp_rx_process_invalid_peer_wrapper(soc,
1271 					nbuf, mpdu_done, pool_id);
1272 		} else {
1273 			mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id);
1274 			/* Trigger invalid peer handler wrapper */
1275 			dp_rx_process_invalid_peer_wrapper(soc,
1276 					pdev->invalid_peer_head_msdu,
1277 					mpdu_done, pool_id);
1278 		}
1279 
1280 		if (mpdu_done) {
1281 			pdev->invalid_peer_head_msdu = NULL;
1282 			pdev->invalid_peer_tail_msdu = NULL;
1283 		}
1284 
1285 		return QDF_STATUS_E_FAILURE;
1286 	}
1287 
1288 	vdev = txrx_peer->vdev;
1289 	if (!vdev) {
1290 		dp_err_rl("Null vdev!");
1291 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1292 		goto drop_nbuf;
1293 	}
1294 
1295 	/*
1296 	 * Advance the packet start pointer by total size of
1297 	 * pre-header TLV's
1298 	 */
1299 	if (qdf_nbuf_is_frag(nbuf))
1300 		qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
1301 	else
1302 		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
1303 				   soc->rx_pkt_tlv_size));
1304 
1305 	DP_STATS_INC_PKT(vdev, rx_i.null_q_desc_pkt, 1, qdf_nbuf_len(nbuf));
1306 
1307 	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
1308 
1309 	if (dp_rx_err_drop_3addr_mcast(vdev, rx_tlv_hdr)) {
1310 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.mcast_3addr_drop, 1);
1311 		goto drop_nbuf;
1312 	}
1313 
1314 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
1315 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
1316 
1317 		if ((sa_idx < 0) ||
1318 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
1319 			DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
1320 			goto drop_nbuf;
1321 		}
1322 	}
1323 
1324 	if ((!soc->mec_fw_offload) &&
1325 	    dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf)) {
1326 		/* this is a looped back MCBC pkt, drop it */
1327 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
1328 					      qdf_nbuf_len(nbuf));
1329 		goto drop_nbuf;
1330 	}
1331 
1332 	/*
1333 	 * In qwrap mode if the received packet matches with any of the vdev
1334 	 * mac addresses, drop it. Donot receive multicast packets originated
1335 	 * from any proxysta.
1336 	 */
1337 	if (check_qwrap_multicast_loopback(vdev, nbuf)) {
1338 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
1339 					      qdf_nbuf_len(nbuf));
1340 		goto drop_nbuf;
1341 	}
1342 
1343 	if (qdf_unlikely(txrx_peer->nawds_enabled &&
1344 			 hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1345 							rx_tlv_hdr))) {
1346 		dp_err_rl("free buffer for multicast packet");
1347 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.nawds_mcast_drop, 1);
1348 		goto drop_nbuf;
1349 	}
1350 
1351 	if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
1352 		dp_err_rl("mcast Policy Check Drop pkt");
1353 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.policy_check_drop, 1);
1354 		goto drop_nbuf;
1355 	}
1356 	/* WDS Source Port Learning */
1357 	if (!soc->ast_offload_support &&
1358 	    qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
1359 		vdev->wds_enabled))
1360 		dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, txrx_peer, nbuf,
1361 					msdu_metadata);
1362 
1363 	if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
1364 		struct dp_peer *peer;
1365 		tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
1366 		peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
1367 					     DP_MOD_ID_RX_ERR);
1368 		if (peer) {
1369 			if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned)
1370 				dp_rx_tid_setup_wifi3(peer, tid, 1,
1371 						      IEEE80211_SEQ_MAX);
1372 			/* IEEE80211_SEQ_MAX indicates invalid start_seq */
1373 			dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
1374 		}
1375 	}
1376 
1377 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1378 
1379 	if (!txrx_peer->authorize) {
1380 		is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
1381 			   qdf_nbuf_is_ipv4_wapi_pkt(nbuf);
1382 
1383 		if (is_eapol) {
1384 			if (!dp_rx_err_match_dhost(eh, vdev))
1385 				goto drop_nbuf;
1386 		} else {
1387 			goto drop_nbuf;
1388 		}
1389 	}
1390 
1391 	/*
1392 	 * Drop packets in this path if cce_match is found. Packets will come
1393 	 * in following path depending on whether tidQ is setup.
1394 	 * 1. If tidQ is setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE and
1395 	 * cce_match = 1
1396 	 *    Packets with WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE are already
1397 	 *    dropped.
1398 	 * 2. If tidQ is not setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ERROR and
1399 	 * cce_match = 1
1400 	 *    These packets need to be dropped and should not get delivered
1401 	 *    to stack.
1402 	 */
1403 	if (qdf_unlikely(dp_rx_err_cce_drop(soc, vdev, nbuf, rx_tlv_hdr))) {
1404 		goto drop_nbuf;
1405 	}
1406 
1407 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1408 		qdf_nbuf_set_next(nbuf, NULL);
1409 		dp_rx_deliver_raw(vdev, nbuf, txrx_peer);
1410 	} else {
1411 		enh_flag = vdev->pdev->enhanced_stats_en;
1412 		qdf_nbuf_set_next(nbuf, NULL);
1413 		DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
1414 					  enh_flag);
1415 		/*
1416 		 * Update the protocol tag in SKB based on
1417 		 * CCE metadata
1418 		 */
1419 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1420 					  EXCEPTION_DEST_RING_ID,
1421 					  true, true);
1422 
1423 		/* Update the flow tag in SKB based on FSE metadata */
1424 		dp_rx_update_flow_tag(soc, vdev, nbuf,
1425 				      rx_tlv_hdr, true);
1426 
1427 		if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
1428 				 soc->hal_soc, rx_tlv_hdr) &&
1429 				 (vdev->rx_decap_type ==
1430 				  htt_cmn_pkt_type_ethernet))) {
1431 			DP_PEER_MC_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
1432 					    enh_flag);
1433 
1434 			if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
1435 				DP_PEER_BC_INCC_PKT(txrx_peer, 1,
1436 						    qdf_nbuf_len(nbuf),
1437 						    enh_flag);
1438 		}
1439 
1440 		qdf_nbuf_set_exc_frame(nbuf, 1);
1441 		dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
1442 					    is_eapol);
1443 	}
1444 	return QDF_STATUS_SUCCESS;
1445 
1446 drop_nbuf:
1447 	dp_rx_nbuf_free(nbuf);
1448 	return QDF_STATUS_E_FAILURE;
1449 }
1450 
1451 /**
1452  * dp_rx_reo_err_entry_process() - Handles for REO error entry processing
1453  *
1454  * @soc: core txrx main context
1455  * @ring_desc: opaque pointer to the REO error ring descriptor
1456  * @mpdu_desc_info: pointer to mpdu level description info
1457  * @link_desc_va: pointer to msdu_link_desc virtual address
1458  * @err_code: reo erro code fetched from ring entry
1459  *
1460  * Function to handle msdus fetched from msdu link desc, currently
1461  * support REO error NULL queue, 2K jump, OOR.
1462  *
1463  * Return: msdu count processed
1464  */
1465 static uint32_t
1466 dp_rx_reo_err_entry_process(struct dp_soc *soc,
1467 			    void *ring_desc,
1468 			    struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1469 			    void *link_desc_va,
1470 			    enum hal_reo_error_code err_code)
1471 {
1472 	uint32_t rx_bufs_used = 0;
1473 	struct dp_pdev *pdev;
1474 	int i;
1475 	uint8_t *rx_tlv_hdr_first;
1476 	uint8_t *rx_tlv_hdr_last;
1477 	uint32_t tid = DP_MAX_TIDS;
1478 	uint16_t peer_id;
1479 	struct dp_rx_desc *rx_desc;
1480 	struct rx_desc_pool *rx_desc_pool;
1481 	qdf_nbuf_t nbuf;
1482 	struct hal_buf_info buf_info;
1483 	struct hal_rx_msdu_list msdu_list;
1484 	uint16_t num_msdus;
1485 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
1486 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
1487 	/* First field in REO Dst ring Desc is buffer_addr_info */
1488 	void *buf_addr_info = ring_desc;
1489 	qdf_nbuf_t head_nbuf = NULL;
1490 	qdf_nbuf_t tail_nbuf = NULL;
1491 	uint16_t msdu_processed = 0;
1492 	QDF_STATUS status;
1493 	bool ret, is_pn_check_needed;
1494 	uint8_t rx_desc_pool_id;
1495 	struct dp_txrx_peer *txrx_peer = NULL;
1496 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1497 	hal_ring_handle_t hal_ring_hdl = soc->reo_exception_ring.hal_srng;
1498 
1499 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1500 					mpdu_desc_info->peer_meta_data);
1501 	is_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
1502 							  HAL_REO_ERROR_DETECTED,
1503 							  err_code);
1504 more_msdu_link_desc:
1505 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
1506 			     &num_msdus);
1507 	for (i = 0; i < num_msdus; i++) {
1508 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
1509 						soc,
1510 						msdu_list.sw_cookie[i]);
1511 
1512 		qdf_assert_always(rx_desc);
1513 		nbuf = rx_desc->nbuf;
1514 
1515 		/*
1516 		 * this is a unlikely scenario where the host is reaping
1517 		 * a descriptor which it already reaped just a while ago
1518 		 * but is yet to replenish it back to HW.
1519 		 * In this case host will dump the last 128 descriptors
1520 		 * including the software descriptor rx_desc and assert.
1521 		 */
1522 		if (qdf_unlikely(!rx_desc->in_use) ||
1523 		    qdf_unlikely(!nbuf)) {
1524 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
1525 			dp_info_rl("Reaping rx_desc not in use!");
1526 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1527 						   ring_desc, rx_desc);
1528 			/* ignore duplicate RX desc and continue to process */
1529 			/* Pop out the descriptor */
1530 			continue;
1531 		}
1532 
1533 		ret = dp_rx_desc_paddr_sanity_check(rx_desc,
1534 						    msdu_list.paddr[i]);
1535 		if (!ret) {
1536 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1537 			rx_desc->in_err_state = 1;
1538 			continue;
1539 		}
1540 
1541 		rx_desc_pool_id = rx_desc->pool_id;
1542 		/* all buffers from a MSDU link belong to same pdev */
1543 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc_pool_id);
1544 
1545 		rx_desc_pool = &soc->rx_desc_buf[rx_desc_pool_id];
1546 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
1547 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
1548 		rx_desc->unmapped = 1;
1549 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
1550 
1551 		QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len;
1552 		rx_bufs_used++;
1553 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
1554 					    &pdev->free_list_tail, rx_desc);
1555 
1556 		DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf);
1557 
1558 		if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags &
1559 				 HAL_MSDU_F_MSDU_CONTINUATION))
1560 			continue;
1561 
1562 		if (dp_rx_buffer_pool_refill(soc, head_nbuf,
1563 					     rx_desc_pool_id)) {
1564 			/* MSDU queued back to the pool */
1565 			goto process_next_msdu;
1566 		}
1567 
1568 		rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf);
1569 		rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf);
1570 
1571 		if (qdf_unlikely(head_nbuf != tail_nbuf)) {
1572 			nbuf = dp_rx_sg_create(soc, head_nbuf);
1573 			qdf_nbuf_set_is_frag(nbuf, 1);
1574 			DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1);
1575 		}
1576 
1577 		if (is_pn_check_needed) {
1578 			status = dp_rx_err_nbuf_pn_check(soc, ring_desc, nbuf);
1579 			if (QDF_IS_STATUS_ERROR(status)) {
1580 				DP_STATS_INC(soc, rx.err.pn_in_dest_check_fail,
1581 					     1);
1582 				dp_rx_nbuf_free(nbuf);
1583 				goto process_next_msdu;
1584 			}
1585 
1586 			hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc,
1587 							   qdf_nbuf_data(nbuf),
1588 							   mpdu_desc_info);
1589 			peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1590 					mpdu_desc_info->peer_meta_data);
1591 
1592 			if (mpdu_desc_info->bar_frame)
1593 				_dp_rx_bar_frame_handle(soc, nbuf,
1594 							mpdu_desc_info, tid,
1595 							HAL_REO_ERROR_DETECTED,
1596 							err_code);
1597 		}
1598 
1599 		switch (err_code) {
1600 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
1601 		case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
1602 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
1603 			/*
1604 			 * only first msdu, mpdu start description tlv valid?
1605 			 * and use it for following msdu.
1606 			 */
1607 			if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1608 							   rx_tlv_hdr_last))
1609 				tid = hal_rx_mpdu_start_tid_get(
1610 							soc->hal_soc,
1611 							rx_tlv_hdr_first);
1612 
1613 			dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last,
1614 					  peer_id, tid);
1615 			break;
1616 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
1617 		case HAL_REO_ERR_BAR_FRAME_OOR:
1618 			dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last);
1619 			break;
1620 		case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
1621 			txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(
1622 							soc, peer_id,
1623 							&txrx_ref_handle,
1624 							DP_MOD_ID_RX_ERR);
1625 			if (!txrx_peer)
1626 				dp_info_rl("txrx_peer is null peer_id %u",
1627 					   peer_id);
1628 			dp_rx_null_q_desc_handle(soc, nbuf, rx_tlv_hdr_last,
1629 						 rx_desc_pool_id, txrx_peer);
1630 			if (txrx_peer)
1631 				dp_txrx_peer_unref_delete(txrx_ref_handle,
1632 							  DP_MOD_ID_RX_ERR);
1633 			break;
1634 		default:
1635 			dp_err_rl("Non-support error code %d", err_code);
1636 			dp_rx_nbuf_free(nbuf);
1637 		}
1638 
1639 process_next_msdu:
1640 		msdu_processed++;
1641 		head_nbuf = NULL;
1642 		tail_nbuf = NULL;
1643 	}
1644 
1645 	/*
1646 	 * If the msdu's are spread across multiple link-descriptors,
1647 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
1648 	 * spread across multiple buffers).Hence, it is
1649 	 * necessary to check the next link_descriptor and release
1650 	 * all the msdu's that are part of it.
1651 	 */
1652 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
1653 			link_desc_va,
1654 			&next_link_desc_addr_info);
1655 
1656 	if (hal_rx_is_buf_addr_info_valid(
1657 				&next_link_desc_addr_info)) {
1658 		/* Clear the next link desc info for the current link_desc */
1659 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
1660 		dp_rx_link_desc_return_by_addr(
1661 				soc,
1662 				buf_addr_info,
1663 				HAL_BM_ACTION_PUT_IN_IDLE_LIST);
1664 
1665 		hal_rx_buffer_addr_info_get_paddr(
1666 				&next_link_desc_addr_info,
1667 				&buf_info);
1668 		/* buffer_addr_info is the first element of ring_desc */
1669 		hal_rx_buf_cookie_rbm_get(soc->hal_soc,
1670 					  (uint32_t *)&next_link_desc_addr_info,
1671 					  &buf_info);
1672 		link_desc_va =
1673 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1674 		cur_link_desc_addr_info = next_link_desc_addr_info;
1675 		buf_addr_info = &cur_link_desc_addr_info;
1676 
1677 		goto more_msdu_link_desc;
1678 	}
1679 
1680 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
1681 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
1682 	if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count))
1683 		DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1);
1684 
1685 	return rx_bufs_used;
1686 }
1687 
1688 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1689 
1690 /**
1691  * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
1692  *			       frames to OS or wifi parse errors.
1693  * @soc: core DP main context
1694  * @nbuf: buffer pointer
1695  * @rx_tlv_hdr: start of rx tlv header
1696  * @txrx_peer: peer reference
1697  * @err_code: rxdma err code
1698  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
1699  * pool_id has same mapping)
1700  *
1701  * Return: None
1702  */
1703 void
1704 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1705 			uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
1706 			uint8_t err_code, uint8_t mac_id)
1707 {
1708 	uint32_t pkt_len, l2_hdr_offset;
1709 	uint16_t msdu_len;
1710 	struct dp_vdev *vdev;
1711 	qdf_ether_header_t *eh;
1712 	bool is_broadcast;
1713 
1714 	/*
1715 	 * Check if DMA completed -- msdu_done is the last bit
1716 	 * to be written
1717 	 */
1718 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1719 
1720 		dp_err_rl("MSDU DONE failure");
1721 
1722 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1723 				     QDF_TRACE_LEVEL_INFO);
1724 		qdf_assert(0);
1725 	}
1726 
1727 	l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
1728 							   rx_tlv_hdr);
1729 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1730 	pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
1731 
1732 	if (dp_rx_check_pkt_len(soc, pkt_len)) {
1733 		/* Drop & free packet */
1734 		dp_rx_nbuf_free(nbuf);
1735 		return;
1736 	}
1737 	/* Set length in nbuf */
1738 	qdf_nbuf_set_pktlen(nbuf, pkt_len);
1739 
1740 	qdf_nbuf_set_next(nbuf, NULL);
1741 
1742 	qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
1743 	qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
1744 
1745 	if (!txrx_peer) {
1746 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "txrx_peer is NULL");
1747 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1748 				qdf_nbuf_len(nbuf));
1749 		/* Trigger invalid peer handler wrapper */
1750 		dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
1751 		return;
1752 	}
1753 
1754 	vdev = txrx_peer->vdev;
1755 	if (!vdev) {
1756 		dp_rx_err_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc,
1757 				 vdev);
1758 		/* Drop & free packet */
1759 		dp_rx_nbuf_free(nbuf);
1760 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1761 		return;
1762 	}
1763 
1764 	/*
1765 	 * Advance the packet start pointer by total size of
1766 	 * pre-header TLV's
1767 	 */
1768 	dp_rx_skip_tlvs(soc, nbuf, l2_hdr_offset);
1769 
1770 	if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
1771 		uint8_t *pkt_type;
1772 
1773 		pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
1774 		if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
1775 			if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
1776 							htons(QDF_LLC_STP)) {
1777 				DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
1778 				goto process_mesh;
1779 			} else {
1780 				goto process_rx;
1781 			}
1782 		}
1783 	}
1784 	if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
1785 		goto process_mesh;
1786 
1787 	/*
1788 	 * WAPI cert AP sends rekey frames as unencrypted.
1789 	 * Thus RXDMA will report unencrypted frame error.
1790 	 * To pass WAPI cert case, SW needs to pass unencrypted
1791 	 * rekey frame to stack.
1792 	 */
1793 	if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1794 		goto process_rx;
1795 	}
1796 	/*
1797 	 * In dynamic WEP case rekey frames are not encrypted
1798 	 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
1799 	 * key install is already done
1800 	 */
1801 	if ((vdev->sec_type == cdp_sec_type_wep104) &&
1802 	    (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
1803 		goto process_rx;
1804 
1805 process_mesh:
1806 
1807 	if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
1808 		dp_rx_nbuf_free(nbuf);
1809 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1810 		return;
1811 	}
1812 
1813 	if (vdev->mesh_vdev) {
1814 		if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
1815 				      == QDF_STATUS_SUCCESS) {
1816 			dp_rx_err_info("%pK: mesh pkt filtered", soc);
1817 			DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
1818 
1819 			dp_rx_nbuf_free(nbuf);
1820 			return;
1821 		}
1822 		dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, txrx_peer);
1823 	}
1824 process_rx:
1825 	if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1826 							rx_tlv_hdr) &&
1827 		(vdev->rx_decap_type ==
1828 				htt_cmn_pkt_type_ethernet))) {
1829 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1830 		is_broadcast = (QDF_IS_ADDR_BROADCAST
1831 				(eh->ether_dhost)) ? 1 : 0 ;
1832 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 1,
1833 					      qdf_nbuf_len(nbuf));
1834 		if (is_broadcast) {
1835 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast, 1,
1836 						      qdf_nbuf_len(nbuf));
1837 		}
1838 	}
1839 
1840 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1841 		dp_rx_deliver_raw(vdev, nbuf, txrx_peer);
1842 	} else {
1843 		/* Update the protocol tag in SKB based on CCE metadata */
1844 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1845 					  EXCEPTION_DEST_RING_ID, true, true);
1846 		/* Update the flow tag in SKB based on FSE metadata */
1847 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
1848 		DP_PEER_STATS_FLAT_INC(txrx_peer, to_stack.num, 1);
1849 		qdf_nbuf_set_exc_frame(nbuf, 1);
1850 		dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
1851 	}
1852 
1853 	return;
1854 }
1855 
1856 /**
1857  * dp_rx_process_mic_error(): Function to pass mic error indication to umac
1858  * @soc: core DP main context
1859  * @nbuf: buffer pointer
1860  * @rx_tlv_hdr: start of rx tlv header
1861  * @txrx_peer: txrx peer handle
1862  *
1863  * return: void
1864  */
1865 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
1866 			     uint8_t *rx_tlv_hdr,
1867 			     struct dp_txrx_peer *txrx_peer)
1868 {
1869 	struct dp_vdev *vdev = NULL;
1870 	struct dp_pdev *pdev = NULL;
1871 	struct ol_if_ops *tops = NULL;
1872 	uint16_t rx_seq, fragno;
1873 	uint8_t is_raw;
1874 	unsigned int tid;
1875 	QDF_STATUS status;
1876 	struct cdp_rx_mic_err_info mic_failure_info;
1877 
1878 	if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1879 					    rx_tlv_hdr))
1880 		return;
1881 
1882 	if (!txrx_peer) {
1883 		dp_info_rl("txrx_peer not found");
1884 		goto fail;
1885 	}
1886 
1887 	vdev = txrx_peer->vdev;
1888 	if (!vdev) {
1889 		dp_info_rl("VDEV not found");
1890 		goto fail;
1891 	}
1892 
1893 	pdev = vdev->pdev;
1894 	if (!pdev) {
1895 		dp_info_rl("PDEV not found");
1896 		goto fail;
1897 	}
1898 
1899 	is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
1900 	if (is_raw) {
1901 		fragno = dp_rx_frag_get_mpdu_frag_number(soc,
1902 							 qdf_nbuf_data(nbuf));
1903 		/* Can get only last fragment */
1904 		if (fragno) {
1905 			tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
1906 							qdf_nbuf_data(nbuf));
1907 			rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
1908 							qdf_nbuf_data(nbuf));
1909 
1910 			status = dp_rx_defrag_add_last_frag(soc, txrx_peer,
1911 							    tid, rx_seq, nbuf);
1912 			dp_info_rl("Frag pkt seq# %d frag# %d consumed "
1913 				   "status %d !", rx_seq, fragno, status);
1914 			return;
1915 		}
1916 	}
1917 
1918 	if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
1919 				  &mic_failure_info.da_mac_addr.bytes[0])) {
1920 		dp_err_rl("Failed to get da_mac_addr");
1921 		goto fail;
1922 	}
1923 
1924 	if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
1925 				  &mic_failure_info.ta_mac_addr.bytes[0])) {
1926 		dp_err_rl("Failed to get ta_mac_addr");
1927 		goto fail;
1928 	}
1929 
1930 	mic_failure_info.key_id = 0;
1931 	mic_failure_info.multicast =
1932 		IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
1933 	qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
1934 	mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
1935 	mic_failure_info.data = NULL;
1936 	mic_failure_info.vdev_id = vdev->vdev_id;
1937 
1938 	tops = pdev->soc->cdp_soc.ol_ops;
1939 	if (tops->rx_mic_error)
1940 		tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
1941 				   &mic_failure_info);
1942 
1943 fail:
1944 	dp_rx_nbuf_free(nbuf);
1945 	return;
1946 }
1947 
1948 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
1949 	defined(WLAN_MCAST_MLO)
1950 static bool dp_rx_igmp_handler(struct dp_soc *soc,
1951 			       struct dp_vdev *vdev,
1952 			       struct dp_txrx_peer *peer,
1953 			       qdf_nbuf_t nbuf)
1954 {
1955 	if (soc->arch_ops.dp_rx_mcast_handler) {
1956 		if (soc->arch_ops.dp_rx_mcast_handler(soc, vdev, peer, nbuf))
1957 			return true;
1958 	}
1959 	return false;
1960 }
1961 #else
1962 static bool dp_rx_igmp_handler(struct dp_soc *soc,
1963 			       struct dp_vdev *vdev,
1964 			       struct dp_txrx_peer *peer,
1965 			       qdf_nbuf_t nbuf)
1966 {
1967 	return false;
1968 }
1969 #endif
1970 
1971 /**
1972  * dp_rx_err_route_hdl() - Function to send EAPOL frames to stack
1973  *                            Free any other packet which comes in
1974  *                            this path.
1975  *
1976  * @soc: core DP main context
1977  * @nbuf: buffer pointer
1978  * @txrx_peer: txrx peer handle
1979  * @rx_tlv_hdr: start of rx tlv header
1980  * @err_src: rxdma/reo
1981  *
1982  * This function indicates EAPOL frame received in wbm error ring to stack.
1983  * Any other frame should be dropped.
1984  *
1985  * Return: SUCCESS if delivered to stack
1986  */
1987 static void
1988 dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
1989 		    struct dp_txrx_peer *txrx_peer, uint8_t *rx_tlv_hdr,
1990 		    enum hal_rx_wbm_error_source err_src)
1991 {
1992 	uint32_t pkt_len;
1993 	uint16_t msdu_len;
1994 	struct dp_vdev *vdev;
1995 	struct hal_rx_msdu_metadata msdu_metadata;
1996 	bool is_eapol;
1997 
1998 	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
1999 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
2000 	pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
2001 
2002 	if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
2003 		if (dp_rx_check_pkt_len(soc, pkt_len))
2004 			goto drop_nbuf;
2005 
2006 		/* Set length in nbuf */
2007 		qdf_nbuf_set_pktlen(
2008 			nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
2009 		qdf_assert_always(nbuf->data == rx_tlv_hdr);
2010 	}
2011 
2012 	/*
2013 	 * Check if DMA completed -- msdu_done is the last bit
2014 	 * to be written
2015 	 */
2016 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
2017 		dp_err_rl("MSDU DONE failure");
2018 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
2019 				     QDF_TRACE_LEVEL_INFO);
2020 		qdf_assert(0);
2021 	}
2022 
2023 	if (!txrx_peer)
2024 		goto drop_nbuf;
2025 
2026 	vdev = txrx_peer->vdev;
2027 	if (!vdev) {
2028 		dp_err_rl("Null vdev!");
2029 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
2030 		goto drop_nbuf;
2031 	}
2032 
2033 	/*
2034 	 * Advance the packet start pointer by total size of
2035 	 * pre-header TLV's
2036 	 */
2037 	if (qdf_nbuf_is_frag(nbuf))
2038 		qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
2039 	else
2040 		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
2041 				   soc->rx_pkt_tlv_size));
2042 
2043 	if (dp_rx_igmp_handler(soc, vdev, txrx_peer, nbuf))
2044 		return;
2045 
2046 	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
2047 
2048 	/*
2049 	 * Indicate EAPOL frame to stack only when vap mac address
2050 	 * matches the destination address.
2051 	 */
2052 	is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf);
2053 	if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
2054 		qdf_ether_header_t *eh =
2055 			(qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2056 		if (dp_rx_err_match_dhost(eh, vdev)) {
2057 			DP_STATS_INC_PKT(vdev, rx_i.routed_eapol_pkt, 1,
2058 					 qdf_nbuf_len(nbuf));
2059 
2060 			/*
2061 			 * Update the protocol tag in SKB based on
2062 			 * CCE metadata.
2063 			 */
2064 			dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
2065 						  EXCEPTION_DEST_RING_ID,
2066 						  true, true);
2067 			/* Update the flow tag in SKB based on FSE metadata */
2068 			dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
2069 					      true);
2070 			DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
2071 						  qdf_nbuf_len(nbuf),
2072 						  vdev->pdev->enhanced_stats_en);
2073 			qdf_nbuf_set_exc_frame(nbuf, 1);
2074 			qdf_nbuf_set_next(nbuf, NULL);
2075 
2076 			dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf,
2077 						    NULL, is_eapol);
2078 
2079 			return;
2080 		}
2081 	}
2082 
2083 drop_nbuf:
2084 
2085 	DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1,
2086 		      err_src == HAL_RX_WBM_ERR_SRC_REO);
2087 	DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1,
2088 		      err_src == HAL_RX_WBM_ERR_SRC_RXDMA);
2089 
2090 	dp_rx_nbuf_free(nbuf);
2091 }
2092 
2093 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2094 
2095 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
2096 /**
2097  * dp_rx_link_cookie_check() - Validate link desc cookie
2098  * @ring_desc: ring descriptor
2099  *
2100  * Return: qdf status
2101  */
2102 static inline QDF_STATUS
2103 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
2104 {
2105 	if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc)))
2106 		return QDF_STATUS_E_FAILURE;
2107 
2108 	return QDF_STATUS_SUCCESS;
2109 }
2110 
2111 /**
2112  * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie
2113  * @ring_desc: ring descriptor
2114  *
2115  * Return: None
2116  */
2117 static inline void
2118 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
2119 {
2120 	HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc);
2121 }
2122 #else
2123 static inline QDF_STATUS
2124 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
2125 {
2126 	return QDF_STATUS_SUCCESS;
2127 }
2128 
2129 static inline void
2130 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
2131 {
2132 }
2133 #endif
2134 
2135 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
2136 /**
2137  * dp_rx_err_ring_record_entry() - Record rx err ring history
2138  * @soc: Datapath soc structure
2139  * @paddr: paddr of the buffer in RX err ring
2140  * @sw_cookie: SW cookie of the buffer in RX err ring
2141  * @rbm: Return buffer manager of the buffer in RX err ring
2142  *
2143  * Returns: None
2144  */
2145 static inline void
2146 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
2147 			    uint32_t sw_cookie, uint8_t rbm)
2148 {
2149 	struct dp_buf_info_record *record;
2150 	uint32_t idx;
2151 
2152 	if (qdf_unlikely(!soc->rx_err_ring_history))
2153 		return;
2154 
2155 	idx = dp_history_get_next_index(&soc->rx_err_ring_history->index,
2156 					DP_RX_ERR_HIST_MAX);
2157 
2158 	/* No NULL check needed for record since its an array */
2159 	record = &soc->rx_err_ring_history->entry[idx];
2160 
2161 	record->timestamp = qdf_get_log_timestamp();
2162 	record->hbi.paddr = paddr;
2163 	record->hbi.sw_cookie = sw_cookie;
2164 	record->hbi.rbm = rbm;
2165 }
2166 #else
2167 static inline void
2168 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
2169 			    uint32_t sw_cookie, uint8_t rbm)
2170 {
2171 }
2172 #endif
2173 
2174 #ifdef HANDLE_RX_REROUTE_ERR
2175 static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc,
2176 				     hal_ring_desc_t ring_desc)
2177 {
2178 	int lmac_id = DP_INVALID_LMAC_ID;
2179 	struct dp_rx_desc *rx_desc;
2180 	struct hal_buf_info hbi;
2181 	struct dp_pdev *pdev;
2182 	struct rx_desc_pool *rx_desc_pool;
2183 
2184 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2185 
2186 	rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, hbi.sw_cookie);
2187 
2188 	/* sanity */
2189 	if (!rx_desc) {
2190 		DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_invalid_cookie, 1);
2191 		goto assert_return;
2192 	}
2193 
2194 	if (!rx_desc->nbuf)
2195 		goto assert_return;
2196 
2197 	dp_rx_err_ring_record_entry(soc, hbi.paddr,
2198 				    hbi.sw_cookie,
2199 				    hal_rx_ret_buf_manager_get(soc->hal_soc,
2200 							       ring_desc));
2201 	if (hbi.paddr != qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)) {
2202 		DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
2203 		rx_desc->in_err_state = 1;
2204 		goto assert_return;
2205 	}
2206 
2207 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2208 	/* After this point the rx_desc and nbuf are valid */
2209 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
2210 	qdf_assert_always(!rx_desc->unmapped);
2211 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
2212 	rx_desc->unmapped = 1;
2213 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
2214 	dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
2215 				    rx_desc->pool_id);
2216 
2217 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
2218 	lmac_id = rx_desc->pool_id;
2219 	dp_rx_add_to_free_desc_list(&pdev->free_list_head,
2220 				    &pdev->free_list_tail,
2221 				    rx_desc);
2222 	return lmac_id;
2223 
2224 assert_return:
2225 	qdf_assert(0);
2226 	return lmac_id;
2227 }
2228 
2229 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
2230 {
2231 	int ret;
2232 	uint64_t cur_time_stamp;
2233 
2234 	DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_rcved, 1);
2235 
2236 	/* Recover if overall error count exceeds threshold */
2237 	if (soc->stats.rx.err.reo_err_msdu_buf_rcved >
2238 	    DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD) {
2239 		dp_err("pkt threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
2240 		       soc->stats.rx.err.reo_err_msdu_buf_rcved,
2241 		       soc->rx_route_err_start_pkt_ts);
2242 		qdf_trigger_self_recovery(NULL, QDF_RX_REG_PKT_ROUTE_ERR);
2243 	}
2244 
2245 	cur_time_stamp = qdf_get_log_timestamp_usecs();
2246 	if (!soc->rx_route_err_start_pkt_ts)
2247 		soc->rx_route_err_start_pkt_ts = cur_time_stamp;
2248 
2249 	/* Recover if threshold number of packets received in threshold time */
2250 	if ((cur_time_stamp - soc->rx_route_err_start_pkt_ts) >
2251 						DP_RX_ERR_ROUTE_TIMEOUT_US) {
2252 		soc->rx_route_err_start_pkt_ts = cur_time_stamp;
2253 
2254 		if (soc->rx_route_err_in_window >
2255 		    DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT) {
2256 			qdf_trigger_self_recovery(NULL,
2257 						  QDF_RX_REG_PKT_ROUTE_ERR);
2258 			dp_err("rate threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
2259 			       soc->stats.rx.err.reo_err_msdu_buf_rcved,
2260 			       soc->rx_route_err_start_pkt_ts);
2261 		} else {
2262 			soc->rx_route_err_in_window = 1;
2263 		}
2264 	} else {
2265 		soc->rx_route_err_in_window++;
2266 	}
2267 
2268 	ret = dp_rx_err_handle_msdu_buf(soc, ring_desc);
2269 
2270 	return ret;
2271 }
2272 #else /* HANDLE_RX_REROUTE_ERR */
2273 
2274 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
2275 {
2276 	qdf_assert_always(0);
2277 
2278 	return DP_INVALID_LMAC_ID;
2279 }
2280 #endif /* HANDLE_RX_REROUTE_ERR */
2281 
2282 uint32_t
2283 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2284 		  hal_ring_handle_t hal_ring_hdl, uint32_t quota)
2285 {
2286 	hal_ring_desc_t ring_desc;
2287 	hal_soc_handle_t hal_soc;
2288 	uint32_t count = 0;
2289 	uint32_t rx_bufs_used = 0;
2290 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
2291 	uint8_t mac_id = 0;
2292 	uint8_t buf_type;
2293 	uint8_t err_status;
2294 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
2295 	struct hal_buf_info hbi;
2296 	struct dp_pdev *dp_pdev;
2297 	struct dp_srng *dp_rxdma_srng;
2298 	struct rx_desc_pool *rx_desc_pool;
2299 	void *link_desc_va;
2300 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
2301 	uint16_t num_msdus;
2302 	struct dp_rx_desc *rx_desc = NULL;
2303 	QDF_STATUS status;
2304 	bool ret;
2305 	uint32_t error_code = 0;
2306 	bool sw_pn_check_needed;
2307 
2308 	/* Debug -- Remove later */
2309 	qdf_assert(soc && hal_ring_hdl);
2310 
2311 	hal_soc = soc->hal_soc;
2312 
2313 	/* Debug -- Remove later */
2314 	qdf_assert(hal_soc);
2315 
2316 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
2317 
2318 		/* TODO */
2319 		/*
2320 		 * Need API to convert from hal_ring pointer to
2321 		 * Ring Type / Ring Id combo
2322 		 */
2323 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
2324 		dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", soc,
2325 			      hal_ring_hdl);
2326 		goto done;
2327 	}
2328 
2329 	while (qdf_likely(quota-- && (ring_desc =
2330 				hal_srng_dst_peek(hal_soc,
2331 						  hal_ring_hdl)))) {
2332 
2333 		DP_STATS_INC(soc, rx.err_ring_pkts, 1);
2334 		err_status = hal_rx_err_status_get(hal_soc, ring_desc);
2335 		buf_type = hal_rx_reo_buf_type_get(hal_soc, ring_desc);
2336 
2337 		if (err_status == HAL_REO_ERROR_DETECTED)
2338 			error_code = hal_rx_get_reo_error_code(hal_soc,
2339 							       ring_desc);
2340 
2341 		qdf_mem_set(&mpdu_desc_info, sizeof(mpdu_desc_info), 0);
2342 		sw_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
2343 								  err_status,
2344 								  error_code);
2345 		if (!sw_pn_check_needed) {
2346 			/*
2347 			 * MPDU desc info will be present in the REO desc
2348 			 * only in the below scenarios
2349 			 * 1) pn_in_dest_disabled:  always
2350 			 * 2) pn_in_dest enabled: All cases except 2k-jup
2351 			 *			and OOR errors
2352 			 */
2353 			hal_rx_mpdu_desc_info_get(hal_soc, ring_desc,
2354 						  &mpdu_desc_info);
2355 		}
2356 
2357 		if (HAL_RX_REO_DESC_MSDU_COUNT_GET(ring_desc) == 0)
2358 			goto next_entry;
2359 
2360 		/*
2361 		 * For REO error ring, only MSDU LINK DESC is expected.
2362 		 * Handle HAL_RX_REO_MSDU_BUF_ADDR_TYPE exception case.
2363 		 */
2364 		if (qdf_unlikely(buf_type != HAL_RX_REO_MSDU_LINK_DESC_TYPE)) {
2365 			int lmac_id;
2366 
2367 			lmac_id = dp_rx_err_exception(soc, ring_desc);
2368 			if (lmac_id >= 0)
2369 				rx_bufs_reaped[lmac_id] += 1;
2370 			goto next_entry;
2371 		}
2372 
2373 		hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc,
2374 					  &hbi);
2375 		/*
2376 		 * check for the magic number in the sw cookie
2377 		 */
2378 		qdf_assert_always((hbi.sw_cookie >> LINK_DESC_ID_SHIFT) &
2379 					soc->link_desc_id_start);
2380 
2381 		status = dp_rx_link_cookie_check(ring_desc);
2382 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
2383 			DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
2384 			break;
2385 		}
2386 
2387 		hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2388 		link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
2389 		hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
2390 				     &num_msdus);
2391 		if (!num_msdus ||
2392 		    !dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[0])) {
2393 			dp_rx_err_info_rl("Invalid MSDU info num_msdus %u cookie: 0x%x",
2394 					  num_msdus, msdu_list.sw_cookie[0]);
2395 			dp_rx_link_desc_return(soc, ring_desc,
2396 					       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
2397 			goto next_entry;
2398 		}
2399 
2400 		dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0],
2401 					    msdu_list.sw_cookie[0],
2402 					    msdu_list.rbm[0]);
2403 		// TODO - BE- Check if the RBM is to be checked for all chips
2404 		if (qdf_unlikely((msdu_list.rbm[0] !=
2405 					dp_rx_get_rx_bm_id(soc)) &&
2406 				 (msdu_list.rbm[0] !=
2407 				  soc->idle_link_bm_id) &&
2408 				 (msdu_list.rbm[0] !=
2409 					dp_rx_get_defrag_bm_id(soc)))) {
2410 			/* TODO */
2411 			/* Call appropriate handler */
2412 			if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2413 				DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
2414 				dp_rx_err_err("%pK: Invalid RBM %d",
2415 					      soc, msdu_list.rbm[0]);
2416 			}
2417 
2418 			/* Return link descriptor through WBM ring (SW2WBM)*/
2419 			dp_rx_link_desc_return(soc, ring_desc,
2420 					HAL_BM_ACTION_RELEASE_MSDU_LIST);
2421 			goto next_entry;
2422 		}
2423 
2424 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
2425 						soc,
2426 						msdu_list.sw_cookie[0]);
2427 		qdf_assert_always(rx_desc);
2428 
2429 		mac_id = rx_desc->pool_id;
2430 
2431 		if (sw_pn_check_needed) {
2432 			goto process_reo_error_code;
2433 		}
2434 
2435 		if (mpdu_desc_info.bar_frame) {
2436 			qdf_assert_always(mpdu_desc_info.msdu_count == 1);
2437 
2438 			dp_rx_bar_frame_handle(soc, ring_desc, rx_desc,
2439 					       &mpdu_desc_info, err_status,
2440 					       error_code);
2441 
2442 			rx_bufs_reaped[mac_id] += 1;
2443 			goto next_entry;
2444 		}
2445 
2446 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
2447 			/*
2448 			 * We only handle one msdu per link desc for fragmented
2449 			 * case. We drop the msdus and release the link desc
2450 			 * back if there are more than one msdu in link desc.
2451 			 */
2452 			if (qdf_unlikely(num_msdus > 1)) {
2453 				count = dp_rx_msdus_drop(soc, ring_desc,
2454 							 &mpdu_desc_info,
2455 							 &mac_id, quota);
2456 				rx_bufs_reaped[mac_id] += count;
2457 				goto next_entry;
2458 			}
2459 
2460 			/*
2461 			 * this is a unlikely scenario where the host is reaping
2462 			 * a descriptor which it already reaped just a while ago
2463 			 * but is yet to replenish it back to HW.
2464 			 * In this case host will dump the last 128 descriptors
2465 			 * including the software descriptor rx_desc and assert.
2466 			 */
2467 
2468 			if (qdf_unlikely(!rx_desc->in_use)) {
2469 				DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
2470 				dp_info_rl("Reaping rx_desc not in use!");
2471 				dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2472 							   ring_desc, rx_desc);
2473 				/* ignore duplicate RX desc and continue */
2474 				/* Pop out the descriptor */
2475 				goto next_entry;
2476 			}
2477 
2478 			ret = dp_rx_desc_paddr_sanity_check(rx_desc,
2479 							    msdu_list.paddr[0]);
2480 			if (!ret) {
2481 				DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
2482 				rx_desc->in_err_state = 1;
2483 				goto next_entry;
2484 			}
2485 
2486 			count = dp_rx_frag_handle(soc,
2487 						  ring_desc, &mpdu_desc_info,
2488 						  rx_desc, &mac_id, quota);
2489 
2490 			rx_bufs_reaped[mac_id] += count;
2491 			DP_STATS_INC(soc, rx.rx_frags, 1);
2492 			goto next_entry;
2493 		}
2494 
2495 process_reo_error_code:
2496 		/*
2497 		 * Expect REO errors to be handled after this point
2498 		 */
2499 		qdf_assert_always(err_status == HAL_REO_ERROR_DETECTED);
2500 
2501 		dp_info_rl("Got pkt with REO ERROR: %d", error_code);
2502 
2503 		switch (error_code) {
2504 		case HAL_REO_ERR_PN_CHECK_FAILED:
2505 		case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
2506 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2507 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2508 			if (dp_pdev)
2509 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
2510 			count = dp_rx_pn_error_handle(soc,
2511 						      ring_desc,
2512 						      &mpdu_desc_info, &mac_id,
2513 						      quota);
2514 
2515 			rx_bufs_reaped[mac_id] += count;
2516 			break;
2517 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2518 		case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
2519 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2520 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
2521 		case HAL_REO_ERR_BAR_FRAME_OOR:
2522 		case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2523 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2524 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2525 			if (dp_pdev)
2526 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
2527 			count = dp_rx_reo_err_entry_process(
2528 					soc,
2529 					ring_desc,
2530 					&mpdu_desc_info,
2531 					link_desc_va,
2532 					error_code);
2533 
2534 			rx_bufs_reaped[mac_id] += count;
2535 			break;
2536 		case HAL_REO_ERR_QUEUE_DESC_INVALID:
2537 		case HAL_REO_ERR_AMPDU_IN_NON_BA:
2538 		case HAL_REO_ERR_NON_BA_DUPLICATE:
2539 		case HAL_REO_ERR_BA_DUPLICATE:
2540 		case HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION:
2541 		case HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN:
2542 		case HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET:
2543 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2544 			count = dp_rx_msdus_drop(soc, ring_desc,
2545 						 &mpdu_desc_info,
2546 						 &mac_id, quota);
2547 			rx_bufs_reaped[mac_id] += count;
2548 			break;
2549 		default:
2550 			/* Assert if unexpected error type */
2551 			qdf_assert_always(0);
2552 		}
2553 next_entry:
2554 		dp_rx_link_cookie_invalidate(ring_desc);
2555 		hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2556 	}
2557 
2558 done:
2559 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
2560 
2561 	if (soc->rx.flags.defrag_timeout_check) {
2562 		uint32_t now_ms =
2563 			qdf_system_ticks_to_msecs(qdf_system_ticks());
2564 
2565 		if (now_ms >= soc->rx.defrag.next_flush_ms)
2566 			dp_rx_defrag_waitlist_flush(soc);
2567 	}
2568 
2569 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2570 		if (rx_bufs_reaped[mac_id]) {
2571 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2572 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2573 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
2574 
2575 			dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2576 						rx_desc_pool,
2577 						rx_bufs_reaped[mac_id],
2578 						&dp_pdev->free_list_head,
2579 						&dp_pdev->free_list_tail);
2580 			rx_bufs_used += rx_bufs_reaped[mac_id];
2581 		}
2582 	}
2583 
2584 	return rx_bufs_used; /* Assume no scale factor for now */
2585 }
2586 
2587 #ifdef DROP_RXDMA_DECRYPT_ERR
2588 /**
2589  * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled
2590  *
2591  * Return: true if rxdma decrypt err frames are handled and false otheriwse
2592  */
2593 static inline bool dp_handle_rxdma_decrypt_err(void)
2594 {
2595 	return false;
2596 }
2597 #else
2598 static inline bool dp_handle_rxdma_decrypt_err(void)
2599 {
2600 	return true;
2601 }
2602 #endif
2603 
2604 /*
2605  * dp_rx_wbm_sg_list_last_msdu_war() - war for HW issue
2606  *
2607  * This is a war for HW issue where length is only valid in last msdu
2608  *@soc: DP SOC handle
2609  */
2610 static inline void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc)
2611 {
2612 	if (soc->wbm_sg_last_msdu_war) {
2613 		uint32_t len;
2614 		qdf_nbuf_t temp = soc->wbm_sg_param.wbm_sg_nbuf_tail;
2615 
2616 		len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
2617 						     qdf_nbuf_data(temp));
2618 		temp = soc->wbm_sg_param.wbm_sg_nbuf_head;
2619 		while (temp) {
2620 			QDF_NBUF_CB_RX_PKT_LEN(temp) = len;
2621 			temp = temp->next;
2622 		}
2623 	}
2624 }
2625 
2626 #ifdef RX_DESC_DEBUG_CHECK
2627 /**
2628  * dp_rx_wbm_desc_nbuf_sanity_check - Add sanity check to for WBM rx_desc paddr
2629  *					corruption
2630  * @soc: core txrx main context
2631  * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring
2632  * @ring_desc: REO ring descriptor
2633  * @rx_desc: Rx descriptor
2634  *
2635  * Return: NONE
2636  */
2637 static
2638 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2639 					    hal_ring_handle_t hal_ring_hdl,
2640 					    hal_ring_desc_t ring_desc,
2641 					    struct dp_rx_desc *rx_desc)
2642 {
2643 	struct hal_buf_info hbi;
2644 
2645 	hal_rx_wbm_rel_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2646 	/* Sanity check for possible buffer paddr corruption */
2647 	if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
2648 		return QDF_STATUS_SUCCESS;
2649 
2650 	hal_srng_dump_ring_desc(soc->hal_soc, hal_ring_hdl, ring_desc);
2651 
2652 	return QDF_STATUS_E_FAILURE;
2653 }
2654 
2655 #else
2656 static
2657 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2658 					    hal_ring_handle_t hal_ring_hdl,
2659 					    hal_ring_desc_t ring_desc,
2660 					    struct dp_rx_desc *rx_desc)
2661 {
2662 	return QDF_STATUS_SUCCESS;
2663 }
2664 #endif
2665 
2666 static inline bool
2667 dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
2668 {
2669 	/*
2670 	 * Currently Null Queue and Unencrypted error handlers has support for
2671 	 * SG. Other error handler do not deal with SG buffer.
2672 	 */
2673 	if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) &&
2674 	     (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) ||
2675 	    ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) &&
2676 	     (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED)))
2677 		return true;
2678 
2679 	return false;
2680 }
2681 
2682 uint32_t
2683 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2684 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota)
2685 {
2686 	hal_ring_desc_t ring_desc;
2687 	hal_soc_handle_t hal_soc;
2688 	struct dp_rx_desc *rx_desc;
2689 	union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
2690 	union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
2691 	uint32_t rx_bufs_used = 0;
2692 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
2693 	uint8_t buf_type;
2694 	uint8_t mac_id;
2695 	struct dp_pdev *dp_pdev;
2696 	struct dp_srng *dp_rxdma_srng;
2697 	struct rx_desc_pool *rx_desc_pool;
2698 	uint8_t *rx_tlv_hdr;
2699 	bool is_tkip_mic_err;
2700 	qdf_nbuf_t nbuf_head = NULL;
2701 	qdf_nbuf_t nbuf_tail = NULL;
2702 	qdf_nbuf_t nbuf, next;
2703 	struct hal_wbm_err_desc_info wbm_err_info = { 0 };
2704 	uint8_t pool_id;
2705 	uint8_t tid = 0;
2706 	uint8_t msdu_continuation = 0;
2707 	bool process_sg_buf = false;
2708 	uint32_t wbm_err_src;
2709 	QDF_STATUS status;
2710 
2711 	/* Debug -- Remove later */
2712 	qdf_assert(soc && hal_ring_hdl);
2713 
2714 	hal_soc = soc->hal_soc;
2715 
2716 	/* Debug -- Remove later */
2717 	qdf_assert(hal_soc);
2718 
2719 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
2720 
2721 		/* TODO */
2722 		/*
2723 		 * Need API to convert from hal_ring pointer to
2724 		 * Ring Type / Ring Id combo
2725 		 */
2726 		dp_rx_err_err("%pK: HAL RING Access Failed -- %pK",
2727 			      soc, hal_ring_hdl);
2728 		goto done;
2729 	}
2730 
2731 	while (qdf_likely(quota)) {
2732 		ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2733 		if (qdf_unlikely(!ring_desc))
2734 			break;
2735 
2736 		/* XXX */
2737 		buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
2738 
2739 		/*
2740 		 * For WBM ring, expect only MSDU buffers
2741 		 */
2742 		qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
2743 
2744 		wbm_err_src = hal_rx_wbm_err_src_get(hal_soc, ring_desc);
2745 		qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) ||
2746 			   (wbm_err_src == HAL_RX_WBM_ERR_SRC_REO));
2747 
2748 		if (soc->arch_ops.dp_wbm_get_rx_desc_from_hal_desc(soc,
2749 								   ring_desc,
2750 								   &rx_desc)) {
2751 			dp_rx_err_err("get rx desc from hal_desc failed");
2752 			continue;
2753 		}
2754 
2755 		qdf_assert_always(rx_desc);
2756 
2757 		if (!dp_rx_desc_check_magic(rx_desc)) {
2758 			dp_rx_err_err("%pk: Invalid rx_desc %pk",
2759 				      soc, rx_desc);
2760 			continue;
2761 		}
2762 
2763 		/*
2764 		 * this is a unlikely scenario where the host is reaping
2765 		 * a descriptor which it already reaped just a while ago
2766 		 * but is yet to replenish it back to HW.
2767 		 * In this case host will dump the last 128 descriptors
2768 		 * including the software descriptor rx_desc and assert.
2769 		 */
2770 		if (qdf_unlikely(!rx_desc->in_use)) {
2771 			DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
2772 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2773 						   ring_desc, rx_desc);
2774 			continue;
2775 		}
2776 
2777 		hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
2778 		nbuf = rx_desc->nbuf;
2779 
2780 		status = dp_rx_wbm_desc_nbuf_sanity_check(soc, hal_ring_hdl,
2781 							  ring_desc, rx_desc);
2782 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
2783 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
2784 			dp_info_rl("Rx error Nbuf %pk sanity check failure!",
2785 				   nbuf);
2786 			rx_desc->in_err_state = 1;
2787 			rx_desc->unmapped = 1;
2788 			rx_bufs_reaped[rx_desc->pool_id]++;
2789 			dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
2790 						    &tail[rx_desc->pool_id],
2791 						    rx_desc);
2792 
2793 			continue;
2794 		}
2795 
2796 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2797 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
2798 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
2799 		rx_desc->unmapped = 1;
2800 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
2801 
2802 		if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support &&
2803 				 dp_rx_is_sg_formation_required(&wbm_err_info))) {
2804 			/* SG is detected from continuation bit */
2805 			msdu_continuation =
2806 				hal_rx_wbm_err_msdu_continuation_get(hal_soc,
2807 								     ring_desc);
2808 			if (msdu_continuation &&
2809 			    !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) {
2810 				/* Update length from first buffer in SG */
2811 				soc->wbm_sg_param.wbm_sg_desc_msdu_len =
2812 					hal_rx_msdu_start_msdu_len_get(
2813 						soc->hal_soc,
2814 						qdf_nbuf_data(nbuf));
2815 				soc->wbm_sg_param.wbm_is_first_msdu_in_sg = true;
2816 			}
2817 
2818 			if (msdu_continuation) {
2819 				/* MSDU continued packets */
2820 				qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
2821 				QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2822 					soc->wbm_sg_param.wbm_sg_desc_msdu_len;
2823 			} else {
2824 				/* This is the terminal packet in SG */
2825 				qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
2826 				qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
2827 				QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2828 					soc->wbm_sg_param.wbm_sg_desc_msdu_len;
2829 				process_sg_buf = true;
2830 			}
2831 		}
2832 
2833 		/*
2834 		 * save the wbm desc info in nbuf TLV. We will need this
2835 		 * info when we do the actual nbuf processing
2836 		 */
2837 		wbm_err_info.pool_id = rx_desc->pool_id;
2838 		hal_rx_priv_info_set_in_tlv(soc->hal_soc,
2839 					    qdf_nbuf_data(nbuf),
2840 					    (uint8_t *)&wbm_err_info,
2841 					    sizeof(wbm_err_info));
2842 
2843 		rx_bufs_reaped[rx_desc->pool_id]++;
2844 
2845 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) {
2846 			DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head,
2847 					  soc->wbm_sg_param.wbm_sg_nbuf_tail,
2848 					  nbuf);
2849 			if (process_sg_buf) {
2850 				if (!dp_rx_buffer_pool_refill(
2851 					soc,
2852 					soc->wbm_sg_param.wbm_sg_nbuf_head,
2853 					rx_desc->pool_id))
2854 					DP_RX_MERGE_TWO_LIST(
2855 						nbuf_head, nbuf_tail,
2856 						soc->wbm_sg_param.wbm_sg_nbuf_head,
2857 						soc->wbm_sg_param.wbm_sg_nbuf_tail);
2858 				dp_rx_wbm_sg_list_last_msdu_war(soc);
2859 				dp_rx_wbm_sg_list_reset(soc);
2860 				process_sg_buf = false;
2861 			}
2862 		} else if (!dp_rx_buffer_pool_refill(soc, nbuf,
2863 						     rx_desc->pool_id)) {
2864 			DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
2865 		}
2866 
2867 		dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
2868 						&tail[rx_desc->pool_id],
2869 						rx_desc);
2870 
2871 		/*
2872 		 * if continuation bit is set then we have MSDU spread
2873 		 * across multiple buffers, let us not decrement quota
2874 		 * till we reap all buffers of that MSDU.
2875 		 */
2876 		if (qdf_likely(!msdu_continuation))
2877 			quota -= 1;
2878 	}
2879 done:
2880 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
2881 
2882 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2883 		if (rx_bufs_reaped[mac_id]) {
2884 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2885 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
2886 
2887 			dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2888 					rx_desc_pool, rx_bufs_reaped[mac_id],
2889 					&head[mac_id], &tail[mac_id]);
2890 			rx_bufs_used += rx_bufs_reaped[mac_id];
2891 		}
2892 	}
2893 
2894 	nbuf = nbuf_head;
2895 	while (nbuf) {
2896 		struct dp_txrx_peer *txrx_peer;
2897 		struct dp_peer *peer;
2898 		uint16_t peer_id;
2899 		uint8_t err_code;
2900 		uint8_t *tlv_hdr;
2901 		uint32_t peer_meta_data;
2902 		dp_txrx_ref_handle txrx_ref_handle = NULL;
2903 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
2904 
2905 		/*
2906 		 * retrieve the wbm desc info from nbuf TLV, so we can
2907 		 * handle error cases appropriately
2908 		 */
2909 		hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_tlv_hdr,
2910 					      (uint8_t *)&wbm_err_info,
2911 					      sizeof(wbm_err_info));
2912 
2913 		peer_meta_data = hal_rx_mpdu_peer_meta_data_get(soc->hal_soc,
2914 								rx_tlv_hdr);
2915 		peer_id = dp_rx_peer_metadata_peer_id_get(soc, peer_meta_data);
2916 		txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
2917 							   &txrx_ref_handle,
2918 							   DP_MOD_ID_RX_ERR);
2919 
2920 		if (!txrx_peer)
2921 			dp_info_rl("peer is null peer_id%u err_src%u err_rsn%u",
2922 				   peer_id, wbm_err_info.wbm_err_src,
2923 				   wbm_err_info.reo_psh_rsn);
2924 
2925 		/* Set queue_mapping in nbuf to 0 */
2926 		dp_set_rx_queue(nbuf, 0);
2927 
2928 		next = nbuf->next;
2929 
2930 		/*
2931 		 * Form the SG for msdu continued buffers
2932 		 * QCN9000 has this support
2933 		 */
2934 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
2935 			nbuf = dp_rx_sg_create(soc, nbuf);
2936 			next = nbuf->next;
2937 			/*
2938 			 * SG error handling is not done correctly,
2939 			 * drop SG frames for now.
2940 			 */
2941 			dp_rx_nbuf_free(nbuf);
2942 			dp_info_rl("scattered msdu dropped");
2943 			nbuf = next;
2944 			if (txrx_peer)
2945 				dp_txrx_peer_unref_delete(txrx_ref_handle,
2946 							  DP_MOD_ID_RX_ERR);
2947 			continue;
2948 		}
2949 
2950 		if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
2951 			if (wbm_err_info.reo_psh_rsn
2952 					== HAL_RX_WBM_REO_PSH_RSN_ERROR) {
2953 
2954 				DP_STATS_INC(soc,
2955 					rx.err.reo_error
2956 					[wbm_err_info.reo_err_code], 1);
2957 				/* increment @pdev level */
2958 				pool_id = wbm_err_info.pool_id;
2959 				dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
2960 				if (dp_pdev)
2961 					DP_STATS_INC(dp_pdev, err.reo_error,
2962 						     1);
2963 
2964 				switch (wbm_err_info.reo_err_code) {
2965 				/*
2966 				 * Handling for packets which have NULL REO
2967 				 * queue descriptor
2968 				 */
2969 				case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2970 					pool_id = wbm_err_info.pool_id;
2971 					dp_rx_null_q_desc_handle(soc, nbuf,
2972 								 rx_tlv_hdr,
2973 								 pool_id,
2974 								 txrx_peer);
2975 					break;
2976 				/* TODO */
2977 				/* Add per error code accounting */
2978 				case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2979 					if (txrx_peer)
2980 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2981 									  rx.err.jump_2k_err,
2982 									  1);
2983 
2984 					pool_id = wbm_err_info.pool_id;
2985 
2986 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2987 									   rx_tlv_hdr)) {
2988 						tid =
2989 						hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
2990 					}
2991 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2992 					hal_rx_msdu_start_msdu_len_get(
2993 						soc->hal_soc, rx_tlv_hdr);
2994 					nbuf->next = NULL;
2995 					dp_2k_jump_handle(soc, nbuf,
2996 							  rx_tlv_hdr,
2997 							  peer_id, tid);
2998 					break;
2999 				case HAL_REO_ERR_REGULAR_FRAME_OOR:
3000 					if (txrx_peer)
3001 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3002 									  rx.err.oor_err,
3003 									  1);
3004 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
3005 									   rx_tlv_hdr)) {
3006 						tid =
3007 							hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
3008 					}
3009 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
3010 						hal_rx_msdu_start_msdu_len_get(
3011 						soc->hal_soc, rx_tlv_hdr);
3012 					nbuf->next = NULL;
3013 					dp_rx_oor_handle(soc, nbuf,
3014 							 peer_id,
3015 							 rx_tlv_hdr);
3016 					break;
3017 				case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
3018 				case HAL_REO_ERR_BAR_FRAME_OOR:
3019 					peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
3020 					if (peer) {
3021 						dp_rx_err_handle_bar(soc, peer,
3022 								     nbuf);
3023 						dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
3024 					}
3025 					dp_rx_nbuf_free(nbuf);
3026 					break;
3027 
3028 				case HAL_REO_ERR_PN_CHECK_FAILED:
3029 				case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
3030 					if (txrx_peer)
3031 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3032 									  rx.err.pn_err,
3033 									  1);
3034 					dp_rx_nbuf_free(nbuf);
3035 					break;
3036 
3037 				default:
3038 					dp_info_rl("Got pkt with REO ERROR: %d",
3039 						   wbm_err_info.reo_err_code);
3040 					dp_rx_nbuf_free(nbuf);
3041 				}
3042 			} else if (wbm_err_info.reo_psh_rsn
3043 					== HAL_RX_WBM_REO_PSH_RSN_ROUTE) {
3044 				dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
3045 						    rx_tlv_hdr,
3046 						    HAL_RX_WBM_ERR_SRC_REO);
3047 			} else {
3048 				/* should not enter here */
3049 				dp_rx_err_alert("invalid reo push reason %u",
3050 						wbm_err_info.reo_psh_rsn);
3051 				dp_rx_nbuf_free(nbuf);
3052 				qdf_assert_always(0);
3053 			}
3054 		} else if (wbm_err_info.wbm_err_src ==
3055 					HAL_RX_WBM_ERR_SRC_RXDMA) {
3056 			if (wbm_err_info.rxdma_psh_rsn
3057 					== HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
3058 				DP_STATS_INC(soc,
3059 					rx.err.rxdma_error
3060 					[wbm_err_info.rxdma_err_code], 1);
3061 				/* increment @pdev level */
3062 				pool_id = wbm_err_info.pool_id;
3063 				dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
3064 				if (dp_pdev)
3065 					DP_STATS_INC(dp_pdev,
3066 						     err.rxdma_error, 1);
3067 
3068 				switch (wbm_err_info.rxdma_err_code) {
3069 				case HAL_RXDMA_ERR_UNENCRYPTED:
3070 
3071 				case HAL_RXDMA_ERR_WIFI_PARSE:
3072 					if (txrx_peer)
3073 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3074 									  rx.err.rxdma_wifi_parse_err,
3075 									  1);
3076 
3077 					pool_id = wbm_err_info.pool_id;
3078 					dp_rx_process_rxdma_err(soc, nbuf,
3079 								rx_tlv_hdr,
3080 								txrx_peer,
3081 								wbm_err_info.
3082 								rxdma_err_code,
3083 								pool_id);
3084 					break;
3085 
3086 				case HAL_RXDMA_ERR_TKIP_MIC:
3087 					dp_rx_process_mic_error(soc, nbuf,
3088 								rx_tlv_hdr,
3089 								txrx_peer);
3090 					if (txrx_peer)
3091 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3092 									  rx.err.mic_err,
3093 									  1);
3094 					break;
3095 
3096 				case HAL_RXDMA_ERR_DECRYPT:
3097 					/* All the TKIP-MIC failures are treated as Decrypt Errors
3098 					 * for QCN9224 Targets
3099 					 */
3100 					is_tkip_mic_err = hal_rx_msdu_end_is_tkip_mic_err(hal_soc, rx_tlv_hdr);
3101 
3102 					if (is_tkip_mic_err && txrx_peer) {
3103 						dp_rx_process_mic_error(soc, nbuf,
3104 									rx_tlv_hdr,
3105 									txrx_peer);
3106 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3107 									  rx.err.mic_err,
3108 									  1);
3109 						break;
3110 					}
3111 
3112 					if (txrx_peer) {
3113 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3114 									  rx.err.decrypt_err,
3115 									  1);
3116 						dp_rx_nbuf_free(nbuf);
3117 						break;
3118 					}
3119 
3120 					if (!dp_handle_rxdma_decrypt_err()) {
3121 						dp_rx_nbuf_free(nbuf);
3122 						break;
3123 					}
3124 
3125 					pool_id = wbm_err_info.pool_id;
3126 					err_code = wbm_err_info.rxdma_err_code;
3127 					tlv_hdr = rx_tlv_hdr;
3128 					dp_rx_process_rxdma_err(soc, nbuf,
3129 								tlv_hdr, NULL,
3130 								err_code,
3131 								pool_id);
3132 					break;
3133 				case HAL_RXDMA_MULTICAST_ECHO:
3134 					if (txrx_peer)
3135 						DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
3136 									      rx.mec_drop, 1,
3137 									      qdf_nbuf_len(nbuf));
3138 					dp_rx_nbuf_free(nbuf);
3139 					break;
3140 				case HAL_RXDMA_UNAUTHORIZED_WDS:
3141 					pool_id = wbm_err_info.pool_id;
3142 					err_code = wbm_err_info.rxdma_err_code;
3143 					tlv_hdr = rx_tlv_hdr;
3144 					dp_rx_process_rxdma_err(soc, nbuf,
3145 								tlv_hdr, NULL,
3146 								err_code,
3147 								pool_id);
3148 					break;
3149 				default:
3150 					dp_rx_nbuf_free(nbuf);
3151 					dp_err_rl("RXDMA error %d",
3152 						  wbm_err_info.rxdma_err_code);
3153 				}
3154 			} else if (wbm_err_info.rxdma_psh_rsn
3155 					== HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) {
3156 				dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
3157 						    rx_tlv_hdr,
3158 						    HAL_RX_WBM_ERR_SRC_RXDMA);
3159 			} else if (wbm_err_info.rxdma_psh_rsn
3160 					== HAL_RX_WBM_RXDMA_PSH_RSN_FLUSH) {
3161 				dp_rx_err_err("rxdma push reason %u",
3162 						wbm_err_info.rxdma_psh_rsn);
3163 				DP_STATS_INC(soc, rx.err.rx_flush_count, 1);
3164 				dp_rx_nbuf_free(nbuf);
3165 			} else {
3166 				/* should not enter here */
3167 				dp_rx_err_alert("invalid rxdma push reason %u",
3168 						wbm_err_info.rxdma_psh_rsn);
3169 				dp_rx_nbuf_free(nbuf);
3170 				qdf_assert_always(0);
3171 			}
3172 		} else {
3173 			/* Should not come here */
3174 			qdf_assert(0);
3175 		}
3176 
3177 		if (txrx_peer)
3178 			dp_txrx_peer_unref_delete(txrx_ref_handle,
3179 						  DP_MOD_ID_RX_ERR);
3180 
3181 		nbuf = next;
3182 	}
3183 	return rx_bufs_used; /* Assume no scale factor for now */
3184 }
3185 
3186 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
3187 
3188 /**
3189  * dup_desc_dbg() - dump and assert if duplicate rx desc found
3190  *
3191  * @soc: core DP main context
3192  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
3193  * @rx_desc: void pointer to rx descriptor
3194  *
3195  * Return: void
3196  */
3197 static void dup_desc_dbg(struct dp_soc *soc,
3198 			 hal_rxdma_desc_t rxdma_dst_ring_desc,
3199 			 void *rx_desc)
3200 {
3201 	DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
3202 	dp_rx_dump_info_and_assert(
3203 			soc,
3204 			soc->rx_rel_ring.hal_srng,
3205 			hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
3206 			rx_desc);
3207 }
3208 
3209 /**
3210  * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
3211  *
3212  * @soc: core DP main context
3213  * @mac_id: mac id which is one of 3 mac_ids
3214  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
3215  * @head: head of descs list to be freed
3216  * @tail: tail of decs list to be freed
3217 
3218  * Return: number of msdu in MPDU to be popped
3219  */
3220 static inline uint32_t
3221 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
3222 	hal_rxdma_desc_t rxdma_dst_ring_desc,
3223 	union dp_rx_desc_list_elem_t **head,
3224 	union dp_rx_desc_list_elem_t **tail)
3225 {
3226 	void *rx_msdu_link_desc;
3227 	qdf_nbuf_t msdu;
3228 	qdf_nbuf_t last;
3229 	struct hal_rx_msdu_list msdu_list;
3230 	uint16_t num_msdus;
3231 	struct hal_buf_info buf_info;
3232 	uint32_t rx_bufs_used = 0;
3233 	uint32_t msdu_cnt;
3234 	uint32_t i;
3235 	uint8_t push_reason;
3236 	uint8_t rxdma_error_code = 0;
3237 	uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
3238 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
3239 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
3240 	hal_rxdma_desc_t ring_desc;
3241 	struct rx_desc_pool *rx_desc_pool;
3242 
3243 	if (!pdev) {
3244 		dp_rx_err_debug("%pK: pdev is null for mac_id = %d",
3245 				soc, mac_id);
3246 		return rx_bufs_used;
3247 	}
3248 
3249 	msdu = 0;
3250 
3251 	last = NULL;
3252 
3253 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
3254 				     &buf_info, &msdu_cnt);
3255 
3256 	push_reason =
3257 		hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
3258 	if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
3259 		rxdma_error_code =
3260 			hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
3261 	}
3262 
3263 	do {
3264 		rx_msdu_link_desc =
3265 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
3266 
3267 		qdf_assert_always(rx_msdu_link_desc);
3268 
3269 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
3270 				     &msdu_list, &num_msdus);
3271 
3272 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
3273 			/* if the msdus belongs to NSS offloaded radio &&
3274 			 * the rbm is not SW1_BM then return the msdu_link
3275 			 * descriptor without freeing the msdus (nbufs). let
3276 			 * these buffers be given to NSS completion ring for
3277 			 * NSS to free them.
3278 			 * else iterate through the msdu link desc list and
3279 			 * free each msdu in the list.
3280 			 */
3281 			if (msdu_list.rbm[0] !=
3282 				HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id) &&
3283 			    wlan_cfg_get_dp_pdev_nss_enabled(
3284 							pdev->wlan_cfg_ctx))
3285 				bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
3286 			else {
3287 				for (i = 0; i < num_msdus; i++) {
3288 					struct dp_rx_desc *rx_desc =
3289 						soc->arch_ops.
3290 						dp_rx_desc_cookie_2_va(
3291 							soc,
3292 							msdu_list.sw_cookie[i]);
3293 					qdf_assert_always(rx_desc);
3294 					msdu = rx_desc->nbuf;
3295 					/*
3296 					 * this is a unlikely scenario
3297 					 * where the host is reaping
3298 					 * a descriptor which
3299 					 * it already reaped just a while ago
3300 					 * but is yet to replenish
3301 					 * it back to HW.
3302 					 * In this case host will dump
3303 					 * the last 128 descriptors
3304 					 * including the software descriptor
3305 					 * rx_desc and assert.
3306 					 */
3307 					ring_desc = rxdma_dst_ring_desc;
3308 					if (qdf_unlikely(!rx_desc->in_use)) {
3309 						dup_desc_dbg(soc,
3310 							     ring_desc,
3311 							     rx_desc);
3312 						continue;
3313 					}
3314 
3315 					if (rx_desc->unmapped == 0) {
3316 						rx_desc_pool =
3317 							&soc->rx_desc_buf[rx_desc->pool_id];
3318 						dp_ipa_rx_buf_smmu_mapping_lock(soc);
3319 						dp_rx_nbuf_unmap_pool(soc,
3320 								      rx_desc_pool,
3321 								      msdu);
3322 						rx_desc->unmapped = 1;
3323 						dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3324 					}
3325 
3326 					dp_rx_err_debug("%pK: msdu_nbuf=%pK ",
3327 							soc, msdu);
3328 
3329 					dp_rx_buffer_pool_nbuf_free(soc, msdu,
3330 							rx_desc->pool_id);
3331 					rx_bufs_used++;
3332 					dp_rx_add_to_free_desc_list(head,
3333 						tail, rx_desc);
3334 				}
3335 			}
3336 		} else {
3337 			rxdma_error_code = HAL_RXDMA_ERR_WAR;
3338 		}
3339 
3340 		/*
3341 		 * Store the current link buffer into to the local structure
3342 		 * to be used for release purpose.
3343 		 */
3344 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
3345 					     buf_info.paddr, buf_info.sw_cookie,
3346 					     buf_info.rbm);
3347 
3348 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
3349 					      &buf_info);
3350 		dp_rx_link_desc_return_by_addr(soc,
3351 					       (hal_buff_addrinfo_t)
3352 						rx_link_buf_info,
3353 						bm_action);
3354 	} while (buf_info.paddr);
3355 
3356 	DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
3357 	if (pdev)
3358 		DP_STATS_INC(pdev, err.rxdma_error, 1);
3359 
3360 	if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
3361 		dp_rx_err_err("%pK: Packet received with Decrypt error", soc);
3362 	}
3363 
3364 	return rx_bufs_used;
3365 }
3366 
3367 uint32_t
3368 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
3369 		     uint32_t mac_id, uint32_t quota)
3370 {
3371 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
3372 	hal_rxdma_desc_t rxdma_dst_ring_desc;
3373 	hal_soc_handle_t hal_soc;
3374 	void *err_dst_srng;
3375 	union dp_rx_desc_list_elem_t *head = NULL;
3376 	union dp_rx_desc_list_elem_t *tail = NULL;
3377 	struct dp_srng *dp_rxdma_srng;
3378 	struct rx_desc_pool *rx_desc_pool;
3379 	uint32_t work_done = 0;
3380 	uint32_t rx_bufs_used = 0;
3381 
3382 	if (!pdev)
3383 		return 0;
3384 
3385 	err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng;
3386 
3387 	if (!err_dst_srng) {
3388 		dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
3389 			      soc, err_dst_srng);
3390 		return 0;
3391 	}
3392 
3393 	hal_soc = soc->hal_soc;
3394 
3395 	qdf_assert(hal_soc);
3396 
3397 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
3398 		dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
3399 			      soc, err_dst_srng);
3400 		return 0;
3401 	}
3402 
3403 	while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
3404 		hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
3405 
3406 			rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
3407 						rxdma_dst_ring_desc,
3408 						&head, &tail);
3409 	}
3410 
3411 	dp_srng_access_end(int_ctx, soc, err_dst_srng);
3412 
3413 	if (rx_bufs_used) {
3414 		if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3415 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
3416 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
3417 		} else {
3418 			dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id];
3419 			rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
3420 		}
3421 
3422 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
3423 			rx_desc_pool, rx_bufs_used, &head, &tail);
3424 
3425 		work_done += rx_bufs_used;
3426 	}
3427 
3428 	return work_done;
3429 }
3430 
3431 #ifndef QCA_HOST_MODE_WIFI_DISABLED
3432 
3433 static inline void
3434 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
3435 			hal_rxdma_desc_t rxdma_dst_ring_desc,
3436 			union dp_rx_desc_list_elem_t **head,
3437 			union dp_rx_desc_list_elem_t **tail,
3438 			uint32_t *rx_bufs_used)
3439 {
3440 	void *rx_msdu_link_desc;
3441 	qdf_nbuf_t msdu;
3442 	qdf_nbuf_t last;
3443 	struct hal_rx_msdu_list msdu_list;
3444 	uint16_t num_msdus;
3445 	struct hal_buf_info buf_info;
3446 	uint32_t msdu_cnt, i;
3447 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
3448 	struct rx_desc_pool *rx_desc_pool;
3449 	struct dp_rx_desc *rx_desc;
3450 
3451 	msdu = 0;
3452 
3453 	last = NULL;
3454 
3455 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
3456 				     &buf_info, &msdu_cnt);
3457 
3458 	do {
3459 		rx_msdu_link_desc =
3460 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
3461 
3462 		if (!rx_msdu_link_desc) {
3463 			DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
3464 			break;
3465 		}
3466 
3467 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
3468 				     &msdu_list, &num_msdus);
3469 
3470 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
3471 			for (i = 0; i < num_msdus; i++) {
3472 				if (!dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[i])) {
3473 					dp_rx_err_info_rl("Invalid MSDU info cookie: 0x%x",
3474 							  msdu_list.sw_cookie[i]);
3475 					continue;
3476 				}
3477 
3478 				rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
3479 							soc,
3480 							msdu_list.sw_cookie[i]);
3481 				qdf_assert_always(rx_desc);
3482 				rx_desc_pool =
3483 					&soc->rx_desc_buf[rx_desc->pool_id];
3484 				msdu = rx_desc->nbuf;
3485 
3486 				/*
3487 				 * this is a unlikely scenario where the host is reaping
3488 				 * a descriptor which it already reaped just a while ago
3489 				 * but is yet to replenish it back to HW.
3490 				 */
3491 				if (qdf_unlikely(!rx_desc->in_use) ||
3492 				    qdf_unlikely(!msdu)) {
3493 					dp_rx_err_info_rl("Reaping rx_desc not in use!");
3494 					continue;
3495 				}
3496 
3497 				dp_ipa_rx_buf_smmu_mapping_lock(soc);
3498 				dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, msdu);
3499 				rx_desc->unmapped = 1;
3500 				dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3501 
3502 				dp_rx_buffer_pool_nbuf_free(soc, msdu,
3503 							    rx_desc->pool_id);
3504 				rx_bufs_used[rx_desc->pool_id]++;
3505 				dp_rx_add_to_free_desc_list(head,
3506 							    tail, rx_desc);
3507 			}
3508 		}
3509 
3510 		/*
3511 		 * Store the current link buffer into to the local structure
3512 		 * to be used for release purpose.
3513 		 */
3514 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
3515 					     buf_info.paddr, buf_info.sw_cookie,
3516 					     buf_info.rbm);
3517 
3518 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
3519 					      &buf_info);
3520 		dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
3521 					rx_link_buf_info,
3522 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
3523 	} while (buf_info.paddr);
3524 }
3525 
3526 /*
3527  *
3528  * dp_handle_wbm_internal_error() - handles wbm_internal_error case
3529  *
3530  * @soc: core DP main context
3531  * @hal_desc: hal descriptor
3532  * @buf_type: indicates if the buffer is of type link disc or msdu
3533  * Return: None
3534  *
3535  * wbm_internal_error is seen in following scenarios :
3536  *
3537  * 1.  Null pointers detected in WBM_RELEASE_RING descriptors
3538  * 2.  Null pointers detected during delinking process
3539  *
3540  * Some null pointer cases:
3541  *
3542  * a. MSDU buffer pointer is NULL
3543  * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag
3544  * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL
3545  */
3546 void
3547 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
3548 			     uint32_t buf_type)
3549 {
3550 	struct hal_buf_info buf_info = {0};
3551 	struct dp_rx_desc *rx_desc = NULL;
3552 	struct rx_desc_pool *rx_desc_pool;
3553 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = {0};
3554 	union dp_rx_desc_list_elem_t *head = NULL;
3555 	union dp_rx_desc_list_elem_t *tail = NULL;
3556 	uint8_t pool_id;
3557 	uint8_t mac_id;
3558 
3559 	hal_rx_reo_buf_paddr_get(soc->hal_soc, hal_desc, &buf_info);
3560 
3561 	if (!buf_info.paddr) {
3562 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
3563 		return;
3564 	}
3565 
3566 	/* buffer_addr_info is the first element of ring_desc */
3567 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)hal_desc,
3568 				  &buf_info);
3569 	pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(buf_info.sw_cookie);
3570 
3571 	if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
3572 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
3573 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
3574 							soc,
3575 							buf_info.sw_cookie);
3576 
3577 		if (rx_desc && rx_desc->nbuf) {
3578 			rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
3579 			dp_ipa_rx_buf_smmu_mapping_lock(soc);
3580 			dp_rx_nbuf_unmap_pool(soc, rx_desc_pool,
3581 					      rx_desc->nbuf);
3582 			rx_desc->unmapped = 1;
3583 			dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3584 
3585 			dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
3586 						    rx_desc->pool_id);
3587 			dp_rx_add_to_free_desc_list(&head,
3588 						    &tail,
3589 						    rx_desc);
3590 
3591 			rx_bufs_reaped[rx_desc->pool_id]++;
3592 		}
3593 	} else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
3594 		dp_wbm_int_err_mpdu_pop(soc, pool_id, hal_desc,
3595 					&head, &tail, rx_bufs_reaped);
3596 	}
3597 
3598 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
3599 		struct rx_desc_pool *rx_desc_pool;
3600 		struct dp_srng *dp_rxdma_srng;
3601 
3602 		if (!rx_bufs_reaped[mac_id])
3603 			continue;
3604 
3605 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
3606 		dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
3607 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
3608 
3609 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
3610 					rx_desc_pool,
3611 					rx_bufs_reaped[mac_id],
3612 					&head, &tail);
3613 	}
3614 }
3615 
3616 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
3617