xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_err.c (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #include "dp_types.h"
22 #include "dp_rx.h"
23 #include "dp_tx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 #include "hal_api.h"
27 #include "qdf_trace.h"
28 #include "qdf_nbuf.h"
29 #include "dp_rx_defrag.h"
30 #include "dp_ipa.h"
31 #ifdef WIFI_MONITOR_SUPPORT
32 #include "dp_htt.h"
33 #include <dp_mon.h>
34 #endif
35 #ifdef FEATURE_WDS
36 #include "dp_txrx_wds.h"
37 #endif
38 #include <enet.h>	/* LLC_SNAP_HDR_LEN */
39 #include "qdf_net_types.h"
40 #include "dp_rx_buffer_pool.h"
41 
42 #define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params)
43 #define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params)
44 #define dp_rx_err_info(params...) \
45 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
46 #define dp_rx_err_info_rl(params...) \
47 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
48 #define dp_rx_err_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_ERROR, params)
49 
50 #ifndef QCA_HOST_MODE_WIFI_DISABLED
51 
52 
53 /* Max regular Rx packet routing error */
54 #define DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD 20
55 #define DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT 10
56 #define DP_RX_ERR_ROUTE_TIMEOUT_US (5 * 1000 * 1000) /* micro seconds */
57 
58 #ifdef FEATURE_MEC
59 bool dp_rx_mcast_echo_check(struct dp_soc *soc,
60 			    struct dp_txrx_peer *txrx_peer,
61 			    uint8_t *rx_tlv_hdr,
62 			    qdf_nbuf_t nbuf)
63 {
64 	struct dp_vdev *vdev = txrx_peer->vdev;
65 	struct dp_pdev *pdev = vdev->pdev;
66 	struct dp_mec_entry *mecentry = NULL;
67 	struct dp_ast_entry *ase = NULL;
68 	uint16_t sa_idx = 0;
69 	uint8_t *data;
70 	/*
71 	 * Multicast Echo Check is required only if vdev is STA and
72 	 * received pkt is a multicast/broadcast pkt. otherwise
73 	 * skip the MEC check.
74 	 */
75 	if (vdev->opmode != wlan_op_mode_sta)
76 		return false;
77 	if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
78 		return false;
79 
80 	data = qdf_nbuf_data(nbuf);
81 
82 	/*
83 	 * if the received pkts src mac addr matches with vdev
84 	 * mac address then drop the pkt as it is looped back
85 	 */
86 	if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
87 			  vdev->mac_addr.raw,
88 			  QDF_MAC_ADDR_SIZE)))
89 		return true;
90 
91 	/*
92 	 * In case of qwrap isolation mode, donot drop loopback packets.
93 	 * In isolation mode, all packets from the wired stations need to go
94 	 * to rootap and loop back to reach the wireless stations and
95 	 * vice-versa.
96 	 */
97 	if (qdf_unlikely(vdev->isolation_vdev))
98 		return false;
99 
100 	/*
101 	 * if the received pkts src mac addr matches with the
102 	 * wired PCs MAC addr which is behind the STA or with
103 	 * wireless STAs MAC addr which are behind the Repeater,
104 	 * then drop the pkt as it is looped back
105 	 */
106 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
107 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
108 
109 		if ((sa_idx < 0) ||
110 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
111 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
112 				  "invalid sa_idx: %d", sa_idx);
113 			qdf_assert_always(0);
114 		}
115 
116 		qdf_spin_lock_bh(&soc->ast_lock);
117 		ase = soc->ast_table[sa_idx];
118 
119 		/*
120 		 * this check was not needed since MEC is not dependent on AST,
121 		 * but if we dont have this check SON has some issues in
122 		 * dual backhaul scenario. in APS SON mode, client connected
123 		 * to RE 2G and sends multicast packets. the RE sends it to CAP
124 		 * over 5G backhaul. the CAP loopback it on 2G to RE.
125 		 * On receiving in 2G STA vap, we assume that client has roamed
126 		 * and kickout the client.
127 		 */
128 		if (ase && (ase->peer_id != txrx_peer->peer_id)) {
129 			qdf_spin_unlock_bh(&soc->ast_lock);
130 			goto drop;
131 		}
132 
133 		qdf_spin_unlock_bh(&soc->ast_lock);
134 	}
135 
136 	qdf_spin_lock_bh(&soc->mec_lock);
137 
138 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
139 						   &data[QDF_MAC_ADDR_SIZE]);
140 	if (!mecentry) {
141 		qdf_spin_unlock_bh(&soc->mec_lock);
142 		return false;
143 	}
144 
145 	qdf_spin_unlock_bh(&soc->mec_lock);
146 
147 drop:
148 	dp_rx_err_info("%pK: received pkt with same src mac " QDF_MAC_ADDR_FMT,
149 		       soc, QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]));
150 
151 	return true;
152 }
153 #endif
154 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
155 
156 void dp_rx_link_desc_refill_duplicate_check(
157 				struct dp_soc *soc,
158 				struct hal_buf_info *buf_info,
159 				hal_buff_addrinfo_t ring_buf_info)
160 {
161 	struct hal_buf_info current_link_desc_buf_info = { 0 };
162 
163 	/* do duplicate link desc address check */
164 	hal_rx_buffer_addr_info_get_paddr(ring_buf_info,
165 					  &current_link_desc_buf_info);
166 
167 	/*
168 	 * TODO - Check if the hal soc api call can be removed
169 	 * since the cookie is just used for print.
170 	 * buffer_addr_info is the first element of ring_desc
171 	 */
172 	hal_rx_buf_cookie_rbm_get(soc->hal_soc,
173 				  (uint32_t *)ring_buf_info,
174 				  &current_link_desc_buf_info);
175 
176 	if (qdf_unlikely(current_link_desc_buf_info.paddr ==
177 			 buf_info->paddr)) {
178 		dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x",
179 			   current_link_desc_buf_info.paddr,
180 			   current_link_desc_buf_info.sw_cookie);
181 		DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1);
182 	}
183 	*buf_info = current_link_desc_buf_info;
184 }
185 
186 /**
187  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
188  *					(WBM) by address
189  *
190  * @soc: core DP main context
191  * @link_desc_addr: link descriptor addr
192  *
193  * Return: QDF_STATUS
194  */
195 QDF_STATUS
196 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
197 			       hal_buff_addrinfo_t link_desc_addr,
198 			       uint8_t bm_action)
199 {
200 	struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
201 	hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
202 	hal_soc_handle_t hal_soc = soc->hal_soc;
203 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
204 	void *src_srng_desc;
205 
206 	if (!wbm_rel_srng) {
207 		dp_rx_err_err("%pK: WBM RELEASE RING not initialized", soc);
208 		return status;
209 	}
210 
211 	/* do duplicate link desc address check */
212 	dp_rx_link_desc_refill_duplicate_check(
213 				soc,
214 				&soc->last_op_info.wbm_rel_link_desc,
215 				link_desc_addr);
216 
217 	if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
218 
219 		/* TODO */
220 		/*
221 		 * Need API to convert from hal_ring pointer to
222 		 * Ring Type / Ring Id combo
223 		 */
224 		dp_rx_err_err("%pK: HAL RING Access For WBM Release SRNG Failed - %pK",
225 			      soc, wbm_rel_srng);
226 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
227 		goto done;
228 	}
229 	src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
230 	if (qdf_likely(src_srng_desc)) {
231 		/* Return link descriptor through WBM ring (SW2WBM)*/
232 		hal_rx_msdu_link_desc_set(hal_soc,
233 				src_srng_desc, link_desc_addr, bm_action);
234 		status = QDF_STATUS_SUCCESS;
235 	} else {
236 		struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
237 
238 		DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1);
239 
240 		dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)",
241 			   srng->ring_id,
242 			   soc->stats.rx.err.hal_ring_access_full_fail);
243 		dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
244 			   *srng->u.src_ring.hp_addr,
245 			   srng->u.src_ring.reap_hp,
246 			   *srng->u.src_ring.tp_addr,
247 			   srng->u.src_ring.cached_tp);
248 		QDF_BUG(0);
249 	}
250 done:
251 	hal_srng_access_end(hal_soc, wbm_rel_srng);
252 	return status;
253 
254 }
255 
256 qdf_export_symbol(dp_rx_link_desc_return_by_addr);
257 
258 /**
259  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
260  *				(WBM), following error handling
261  *
262  * @soc: core DP main context
263  * @ring_desc: opaque pointer to the REO error ring descriptor
264  *
265  * Return: QDF_STATUS
266  */
267 QDF_STATUS
268 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
269 		       uint8_t bm_action)
270 {
271 	void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
272 
273 	return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
274 }
275 
276 #ifndef QCA_HOST_MODE_WIFI_DISABLED
277 
278 /**
279  * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
280  *
281  * @soc: core txrx main context
282  * @ring_desc: opaque pointer to the REO error ring descriptor
283  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
284  * @head: head of the local descriptor free-list
285  * @tail: tail of the local descriptor free-list
286  * @quota: No. of units (packets) that can be serviced in one shot.
287  *
288  * This function is used to drop all MSDU in an MPDU
289  *
290  * Return: uint32_t: No. of elements processed
291  */
292 static uint32_t
293 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
294 		 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
295 		 uint8_t *mac_id,
296 		 uint32_t quota)
297 {
298 	uint32_t rx_bufs_used = 0;
299 	void *link_desc_va;
300 	struct hal_buf_info buf_info;
301 	struct dp_pdev *pdev;
302 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
303 	int i;
304 	uint8_t *rx_tlv_hdr;
305 	uint32_t tid;
306 	struct rx_desc_pool *rx_desc_pool;
307 	struct dp_rx_desc *rx_desc;
308 	/* First field in REO Dst ring Desc is buffer_addr_info */
309 	void *buf_addr_info = ring_desc;
310 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
311 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
312 
313 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &buf_info);
314 
315 	/* buffer_addr_info is the first element of ring_desc */
316 	hal_rx_buf_cookie_rbm_get(soc->hal_soc,
317 				  (uint32_t *)ring_desc,
318 				  &buf_info);
319 
320 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
321 	if (!link_desc_va) {
322 		dp_rx_err_debug("link desc va is null, soc %pk", soc);
323 		return rx_bufs_used;
324 	}
325 
326 more_msdu_link_desc:
327 	/* No UNMAP required -- this is "malloc_consistent" memory */
328 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
329 			     &mpdu_desc_info->msdu_count);
330 
331 	for (i = 0; (i < mpdu_desc_info->msdu_count); i++) {
332 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
333 						soc, msdu_list.sw_cookie[i]);
334 
335 		qdf_assert_always(rx_desc);
336 
337 		/* all buffers from a MSDU link link belong to same pdev */
338 		*mac_id = rx_desc->pool_id;
339 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
340 		if (!pdev) {
341 			dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
342 					soc, rx_desc->pool_id);
343 			return rx_bufs_used;
344 		}
345 
346 		if (!dp_rx_desc_check_magic(rx_desc)) {
347 			dp_rx_err_err("%pK: Invalid rx_desc cookie=%d",
348 				      soc, msdu_list.sw_cookie[i]);
349 			return rx_bufs_used;
350 		}
351 
352 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
353 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
354 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
355 		rx_desc->unmapped = 1;
356 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
357 
358 		rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
359 
360 		rx_bufs_used++;
361 		tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
362 						rx_desc->rx_buf_start);
363 		dp_rx_err_err("%pK: Packet received with PN error for tid :%d",
364 			      soc, tid);
365 
366 		rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
367 		if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
368 			hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
369 
370 		dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info,
371 				      rx_desc->nbuf,
372 				      QDF_TX_RX_STATUS_DROP, true);
373 		/* Just free the buffers */
374 		dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id);
375 
376 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
377 					    &pdev->free_list_tail, rx_desc);
378 	}
379 
380 	/*
381 	 * If the msdu's are spread across multiple link-descriptors,
382 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
383 	 * spread across multiple buffers).Hence, it is
384 	 * necessary to check the next link_descriptor and release
385 	 * all the msdu's that are part of it.
386 	 */
387 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
388 			link_desc_va,
389 			&next_link_desc_addr_info);
390 
391 	if (hal_rx_is_buf_addr_info_valid(
392 				&next_link_desc_addr_info)) {
393 		/* Clear the next link desc info for the current link_desc */
394 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
395 
396 		dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
397 					       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
398 		hal_rx_buffer_addr_info_get_paddr(
399 				&next_link_desc_addr_info,
400 				&buf_info);
401 		/* buffer_addr_info is the first element of ring_desc */
402 		hal_rx_buf_cookie_rbm_get(soc->hal_soc,
403 					  (uint32_t *)&next_link_desc_addr_info,
404 					  &buf_info);
405 		cur_link_desc_addr_info = next_link_desc_addr_info;
406 		buf_addr_info = &cur_link_desc_addr_info;
407 
408 		link_desc_va =
409 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
410 
411 		goto more_msdu_link_desc;
412 	}
413 	quota--;
414 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
415 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
416 	return rx_bufs_used;
417 }
418 
419 /**
420  * dp_rx_pn_error_handle() - Handles PN check errors
421  *
422  * @soc: core txrx main context
423  * @ring_desc: opaque pointer to the REO error ring descriptor
424  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
425  * @head: head of the local descriptor free-list
426  * @tail: tail of the local descriptor free-list
427  * @quota: No. of units (packets) that can be serviced in one shot.
428  *
429  * This function implements PN error handling
430  * If the peer is configured to ignore the PN check errors
431  * or if DP feels, that this frame is still OK, the frame can be
432  * re-injected back to REO to use some of the other features
433  * of REO e.g. duplicate detection/routing to other cores
434  *
435  * Return: uint32_t: No. of elements processed
436  */
437 static uint32_t
438 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
439 		      struct hal_rx_mpdu_desc_info *mpdu_desc_info,
440 		      uint8_t *mac_id,
441 		      uint32_t quota)
442 {
443 	uint16_t peer_id;
444 	uint32_t rx_bufs_used = 0;
445 	struct dp_txrx_peer *txrx_peer;
446 	bool peer_pn_policy = false;
447 	dp_txrx_ref_handle txrx_ref_handle = NULL;
448 
449 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
450 					       mpdu_desc_info->peer_meta_data);
451 
452 
453 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
454 						   &txrx_ref_handle,
455 						   DP_MOD_ID_RX_ERR);
456 
457 	if (qdf_likely(txrx_peer)) {
458 		/*
459 		 * TODO: Check for peer specific policies & set peer_pn_policy
460 		 */
461 		dp_err_rl("discard rx due to PN error for peer  %pK",
462 			  txrx_peer);
463 
464 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
465 	}
466 	dp_rx_err_err("%pK: Packet received with PN error", soc);
467 
468 	/* No peer PN policy -- definitely drop */
469 	if (!peer_pn_policy)
470 		rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
471 						mpdu_desc_info,
472 						mac_id, quota);
473 
474 	return rx_bufs_used;
475 }
476 
477 #ifdef DP_RX_DELIVER_ALL_OOR_FRAMES
478 /**
479  * dp_rx_deliver_oor_frame() - deliver OOR frames to stack
480  * @soc: Datapath soc handler
481  * @peer: pointer to DP peer
482  * @nbuf: pointer to the skb of RX frame
483  * @frame_mask: the mask for special frame needed
484  * @rx_tlv_hdr: start of rx tlv header
485  *
486  * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
487  * single nbuf is expected.
488  *
489  * return: true - nbuf has been delivered to stack, false - not.
490  */
491 static bool
492 dp_rx_deliver_oor_frame(struct dp_soc *soc,
493 			struct dp_txrx_peer *txrx_peer,
494 			qdf_nbuf_t nbuf, uint32_t frame_mask,
495 			uint8_t *rx_tlv_hdr)
496 {
497 	uint32_t l2_hdr_offset = 0;
498 	uint16_t msdu_len = 0;
499 	uint32_t skip_len;
500 
501 	l2_hdr_offset =
502 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
503 
504 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
505 		skip_len = l2_hdr_offset;
506 	} else {
507 		msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
508 		skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
509 		qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
510 	}
511 
512 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
513 	dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
514 	qdf_nbuf_pull_head(nbuf, skip_len);
515 	qdf_nbuf_set_exc_frame(nbuf, 1);
516 
517 	dp_info_rl("OOR frame, mpdu sn 0x%x",
518 		   hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
519 	dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, nbuf, NULL);
520 	return true;
521 }
522 
523 #else
524 static bool
525 dp_rx_deliver_oor_frame(struct dp_soc *soc,
526 			struct dp_txrx_peer *txrx_peer,
527 			qdf_nbuf_t nbuf, uint32_t frame_mask,
528 			uint8_t *rx_tlv_hdr)
529 {
530 	return dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask,
531 					   rx_tlv_hdr);
532 }
533 #endif
534 
535 /**
536  * dp_rx_oor_handle() - Handles the msdu which is OOR error
537  *
538  * @soc: core txrx main context
539  * @nbuf: pointer to msdu skb
540  * @peer_id: dp peer ID
541  * @rx_tlv_hdr: start of rx tlv header
542  *
543  * This function process the msdu delivered from REO2TCL
544  * ring with error type OOR
545  *
546  * Return: None
547  */
548 static void
549 dp_rx_oor_handle(struct dp_soc *soc,
550 		 qdf_nbuf_t nbuf,
551 		 uint16_t peer_id,
552 		 uint8_t *rx_tlv_hdr)
553 {
554 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
555 				FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
556 	struct dp_txrx_peer *txrx_peer = NULL;
557 	dp_txrx_ref_handle txrx_ref_handle = NULL;
558 
559 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
560 						   &txrx_ref_handle,
561 						   DP_MOD_ID_RX_ERR);
562 	if (!txrx_peer) {
563 		dp_info_rl("peer not found");
564 		goto free_nbuf;
565 	}
566 
567 	if (dp_rx_deliver_oor_frame(soc, txrx_peer, nbuf, frame_mask,
568 				    rx_tlv_hdr)) {
569 		DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1);
570 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
571 		return;
572 	}
573 
574 free_nbuf:
575 	if (txrx_peer)
576 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
577 
578 	DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1);
579 	dp_rx_nbuf_free(nbuf);
580 }
581 
582 /**
583  * dp_rx_err_nbuf_pn_check() - Check if the PN number of this current packet
584  *				is a monotonous increment of packet number
585  *				from the previous successfully re-ordered
586  *				frame.
587  * @soc: Datapath SOC handle
588  * @ring_desc: REO ring descriptor
589  * @nbuf: Current packet
590  *
591  * Return: QDF_STATUS_SUCCESS, if the pn check passes, else QDF_STATUS_E_FAILURE
592  */
593 static inline QDF_STATUS
594 dp_rx_err_nbuf_pn_check(struct dp_soc *soc, hal_ring_desc_t ring_desc,
595 			qdf_nbuf_t nbuf)
596 {
597 	uint64_t prev_pn, curr_pn[2];
598 
599 	if (!hal_rx_encryption_info_valid(soc->hal_soc, qdf_nbuf_data(nbuf)))
600 		return QDF_STATUS_SUCCESS;
601 
602 	hal_rx_reo_prev_pn_get(soc->hal_soc, ring_desc, &prev_pn);
603 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), curr_pn);
604 
605 	if (curr_pn[0] > prev_pn)
606 		return QDF_STATUS_SUCCESS;
607 
608 	return QDF_STATUS_E_FAILURE;
609 }
610 
611 #ifdef WLAN_SKIP_BAR_UPDATE
612 static
613 void dp_rx_err_handle_bar(struct dp_soc *soc,
614 			  struct dp_peer *peer,
615 			  qdf_nbuf_t nbuf)
616 {
617 	dp_info_rl("BAR update to H.W is skipped");
618 	DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
619 }
620 #else
621 static
622 void dp_rx_err_handle_bar(struct dp_soc *soc,
623 			  struct dp_peer *peer,
624 			  qdf_nbuf_t nbuf)
625 {
626 	uint8_t *rx_tlv_hdr;
627 	unsigned char type, subtype;
628 	uint16_t start_seq_num;
629 	uint32_t tid;
630 	QDF_STATUS status;
631 	struct ieee80211_frame_bar *bar;
632 
633 	/*
634 	 * 1. Is this a BAR frame. If not Discard it.
635 	 * 2. If it is, get the peer id, tid, ssn
636 	 * 2a Do a tid update
637 	 */
638 
639 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
640 	bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + soc->rx_pkt_tlv_size);
641 
642 	type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
643 	subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
644 
645 	if (!(type == IEEE80211_FC0_TYPE_CTL &&
646 	      subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
647 		dp_err_rl("Not a BAR frame!");
648 		return;
649 	}
650 
651 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
652 	qdf_assert_always(tid < DP_MAX_TIDS);
653 
654 	start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
655 
656 	dp_info_rl("tid %u window_size %u start_seq_num %u",
657 		   tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
658 
659 	status = dp_rx_tid_update_wifi3(peer, tid,
660 					peer->rx_tid[tid].ba_win_size,
661 					start_seq_num,
662 					true);
663 	if (status != QDF_STATUS_SUCCESS) {
664 		dp_err_rl("failed to handle bar frame update rx tid");
665 		DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
666 	} else {
667 		DP_STATS_INC(soc, rx.err.ssn_update_count, 1);
668 	}
669 }
670 #endif
671 
672 /**
673  * _dp_rx_bar_frame_handle(): Core of the BAR frame handling
674  * @soc: Datapath SoC handle
675  * @nbuf: packet being processed
676  * @mpdu_desc_info: mpdu desc info for the current packet
677  * @tid: tid on which the packet arrived
678  * @err_status: Flag to indicate if REO encountered an error while routing this
679  *		frame
680  * @error_code: REO error code
681  *
682  * Return: None
683  */
684 static void
685 _dp_rx_bar_frame_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
686 			struct hal_rx_mpdu_desc_info *mpdu_desc_info,
687 			uint32_t tid, uint8_t err_status, uint32_t error_code)
688 {
689 	uint16_t peer_id;
690 	struct dp_peer *peer;
691 
692 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
693 					       mpdu_desc_info->peer_meta_data);
694 	peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
695 	if (!peer)
696 		return;
697 
698 	dp_info_rl("BAR frame: "
699 		" peer_id = %d"
700 		" tid = %u"
701 		" SSN = %d"
702 		" error status = %d",
703 		peer->peer_id,
704 		tid,
705 		mpdu_desc_info->mpdu_seq,
706 		err_status);
707 
708 	if (err_status == HAL_REO_ERROR_DETECTED) {
709 		switch (error_code) {
710 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
711 		case HAL_REO_ERR_BAR_FRAME_OOR:
712 			dp_rx_err_handle_bar(soc, peer, nbuf);
713 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
714 			break;
715 		default:
716 			DP_STATS_INC(soc, rx.bar_frame, 1);
717 		}
718 	}
719 
720 	dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
721 }
722 
723 /**
724  * dp_rx_bar_frame_handle() - Function to handle err BAR frames
725  * @soc: core DP main context
726  * @ring_desc: Hal ring desc
727  * @rx_desc: dp rx desc
728  * @mpdu_desc_info: mpdu desc info
729  *
730  * Handle the error BAR frames received. Ensure the SOC level
731  * stats are updated based on the REO error code. The BAR frames
732  * are further processed by updating the Rx tids with the start
733  * sequence number (SSN) and BA window size. Desc is returned
734  * to the free desc list
735  *
736  * Return: none
737  */
738 static void
739 dp_rx_bar_frame_handle(struct dp_soc *soc,
740 		       hal_ring_desc_t ring_desc,
741 		       struct dp_rx_desc *rx_desc,
742 		       struct hal_rx_mpdu_desc_info *mpdu_desc_info,
743 		       uint8_t err_status,
744 		       uint32_t err_code)
745 {
746 	qdf_nbuf_t nbuf;
747 	struct dp_pdev *pdev;
748 	struct rx_desc_pool *rx_desc_pool;
749 	uint8_t *rx_tlv_hdr;
750 	uint32_t tid;
751 
752 	nbuf = rx_desc->nbuf;
753 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
754 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
755 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
756 	rx_desc->unmapped = 1;
757 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
758 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
759 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
760 					rx_tlv_hdr);
761 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
762 
763 	if (!pdev) {
764 		dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
765 				soc, rx_desc->pool_id);
766 		return;
767 	}
768 
769 	_dp_rx_bar_frame_handle(soc, nbuf, mpdu_desc_info, tid, err_status,
770 				err_code);
771 	dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, nbuf,
772 			      QDF_TX_RX_STATUS_DROP, true);
773 	dp_rx_link_desc_return(soc, ring_desc,
774 			       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
775 	dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
776 				    rx_desc->pool_id);
777 	dp_rx_add_to_free_desc_list(&pdev->free_list_head,
778 				    &pdev->free_list_tail,
779 				    rx_desc);
780 }
781 
782 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
783 
784 /**
785  * dp_2k_jump_handle() - Function to handle 2k jump exception
786  *                        on WBM ring
787  *
788  * @soc: core DP main context
789  * @nbuf: buffer pointer
790  * @rx_tlv_hdr: start of rx tlv header
791  * @peer_id: peer id of first msdu
792  * @tid: Tid for which exception occurred
793  *
794  * This function handles 2k jump violations arising out
795  * of receiving aggregates in non BA case. This typically
796  * may happen if aggregates are received on a QOS enabled TID
797  * while Rx window size is still initialized to value of 2. Or
798  * it may also happen if negotiated window size is 1 but peer
799  * sends aggregates.
800  *
801  */
802 
803 void
804 dp_2k_jump_handle(struct dp_soc *soc,
805 		  qdf_nbuf_t nbuf,
806 		  uint8_t *rx_tlv_hdr,
807 		  uint16_t peer_id,
808 		  uint8_t tid)
809 {
810 	struct dp_peer *peer = NULL;
811 	struct dp_rx_tid *rx_tid = NULL;
812 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP;
813 
814 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
815 	if (!peer) {
816 		dp_rx_err_info_rl("%pK: peer not found", soc);
817 		goto free_nbuf;
818 	}
819 
820 	if (tid >= DP_MAX_TIDS) {
821 		dp_info_rl("invalid tid");
822 		goto nbuf_deliver;
823 	}
824 
825 	rx_tid = &peer->rx_tid[tid];
826 	qdf_spin_lock_bh(&rx_tid->tid_lock);
827 
828 	/* only if BA session is active, allow send Delba */
829 	if (rx_tid->ba_status != DP_RX_BA_ACTIVE) {
830 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
831 		goto nbuf_deliver;
832 	}
833 
834 	if (!rx_tid->delba_tx_status) {
835 		rx_tid->delba_tx_retry++;
836 		rx_tid->delba_tx_status = 1;
837 		rx_tid->delba_rcode =
838 			IEEE80211_REASON_QOS_SETUP_REQUIRED;
839 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
840 		if (soc->cdp_soc.ol_ops->send_delba) {
841 			DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent,
842 				     1);
843 			soc->cdp_soc.ol_ops->send_delba(
844 					peer->vdev->pdev->soc->ctrl_psoc,
845 					peer->vdev->vdev_id,
846 					peer->mac_addr.raw,
847 					tid,
848 					rx_tid->delba_rcode,
849 					CDP_DELBA_2K_JUMP);
850 		}
851 	} else {
852 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
853 	}
854 
855 nbuf_deliver:
856 	if (dp_rx_deliver_special_frame(soc, peer->txrx_peer, nbuf, frame_mask,
857 					rx_tlv_hdr)) {
858 		DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1);
859 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
860 		return;
861 	}
862 
863 free_nbuf:
864 	if (peer)
865 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
866 	DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1);
867 	dp_rx_nbuf_free(nbuf);
868 }
869 
870 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
871     defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_KIWI)
872 bool
873 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
874 					      uint8_t pool_id,
875 					      uint8_t *rx_tlv_hdr,
876 					      qdf_nbuf_t nbuf)
877 {
878 	struct dp_peer *peer = NULL;
879 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
880 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
881 	struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
882 
883 	if (!pdev) {
884 		dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
885 				soc, pool_id);
886 		return false;
887 	}
888 	/*
889 	 * WAR- In certain types of packets if peer_id is not correct then
890 	 * driver may not be able find. Try finding peer by addr_2 of
891 	 * received MPDU
892 	 */
893 	if (wh)
894 		peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0,
895 					      DP_VDEV_ALL, DP_MOD_ID_RX_ERR);
896 	if (peer) {
897 		dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
898 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
899 				     QDF_TRACE_LEVEL_DEBUG);
900 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
901 				 1, qdf_nbuf_len(nbuf));
902 		dp_rx_nbuf_free(nbuf);
903 
904 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
905 		return true;
906 	}
907 	return false;
908 }
909 #else
910 bool
911 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
912 					      uint8_t pool_id,
913 					      uint8_t *rx_tlv_hdr,
914 					      qdf_nbuf_t nbuf)
915 {
916 	return false;
917 }
918 #endif
919 
920 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
921 {
922 	if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) {
923 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
924 				 1, pkt_len);
925 		return true;
926 	} else {
927 		return false;
928 	}
929 }
930 
931 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
932 void
933 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
934 			    struct dp_vdev *vdev,
935 			    struct dp_txrx_peer *txrx_peer,
936 			    qdf_nbuf_t nbuf,
937 			    qdf_nbuf_t tail,
938 			    bool is_eapol)
939 {
940 	if (is_eapol && soc->eapol_over_control_port)
941 		dp_rx_eapol_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
942 	else
943 		dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
944 }
945 #else
946 void
947 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
948 			    struct dp_vdev *vdev,
949 			    struct dp_txrx_peer *txrx_peer,
950 			    qdf_nbuf_t nbuf,
951 			    qdf_nbuf_t tail,
952 			    bool is_eapol)
953 {
954 	dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
955 }
956 #endif
957 
958 #ifdef WLAN_FEATURE_11BE_MLO
959 /*
960  * dp_rx_err_match_dhost() - function to check whether dest-mac is correct
961  * @eh: Ethernet header of incoming packet
962  * @vdev: dp_vdev object of the VAP on which this data packet is received
963  *
964  * Return: 1 if the destination mac is correct,
965  *         0 if this frame is not correctly destined to this VAP/MLD
966  */
967 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
968 {
969 	return ((qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
970 			     QDF_MAC_ADDR_SIZE) == 0) ||
971 		(qdf_mem_cmp(eh->ether_dhost, &vdev->mld_mac_addr.raw[0],
972 			     QDF_MAC_ADDR_SIZE) == 0));
973 }
974 
975 #else
976 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
977 {
978 	return (qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
979 			    QDF_MAC_ADDR_SIZE) == 0);
980 }
981 #endif
982 
983 #ifndef QCA_HOST_MODE_WIFI_DISABLED
984 
985 bool
986 dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr)
987 {
988 	struct dp_soc *soc = vdev->pdev->soc;
989 
990 	if (!vdev->drop_3addr_mcast)
991 		return false;
992 
993 	if (vdev->opmode != wlan_op_mode_sta)
994 		return false;
995 
996 	if (hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
997 		return true;
998 
999 	return false;
1000 }
1001 
1002 /**
1003  * dp_rx_err_is_pn_check_needed() - Check if the packet number check is needed
1004  *				for this frame received in REO error ring.
1005  * @soc: Datapath SOC handle
1006  * @error: REO error detected or not
1007  * @error_code: Error code in case of REO error
1008  *
1009  * Return: true if pn check if needed in software,
1010  *	false, if pn check if not needed.
1011  */
1012 static inline bool
1013 dp_rx_err_is_pn_check_needed(struct dp_soc *soc, uint8_t error,
1014 			     uint32_t error_code)
1015 {
1016 	return (soc->features.pn_in_reo_dest &&
1017 		(error == HAL_REO_ERROR_DETECTED &&
1018 		 (hal_rx_reo_is_2k_jump(error_code) ||
1019 		  hal_rx_reo_is_oor_error(error_code) ||
1020 		  hal_rx_reo_is_bar_oor_2k_jump(error_code))));
1021 }
1022 
1023 #ifdef DP_WAR_INVALID_FIRST_MSDU_FLAG
1024 static inline void
1025 dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
1026 				  struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1027 				  bool first_msdu_in_mpdu_processed)
1028 {
1029 	if (first_msdu_in_mpdu_processed) {
1030 		/*
1031 		 * This is the 2nd indication of first_msdu in the same mpdu.
1032 		 * Skip re-parsing the mdpu_desc_info and use the cached one,
1033 		 * since this msdu is most probably from the current mpdu
1034 		 * which is being processed
1035 		 */
1036 	} else {
1037 		hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc,
1038 						   qdf_nbuf_data(nbuf),
1039 						   mpdu_desc_info);
1040 	}
1041 }
1042 #else
1043 static inline void
1044 dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
1045 				  struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1046 				  bool first_msdu_in_mpdu_processed)
1047 {
1048 	hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc, qdf_nbuf_data(nbuf),
1049 					   mpdu_desc_info);
1050 }
1051 #endif
1052 
1053 /**
1054  * dp_rx_reo_err_entry_process() - Handles for REO error entry processing
1055  *
1056  * @soc: core txrx main context
1057  * @ring_desc: opaque pointer to the REO error ring descriptor
1058  * @mpdu_desc_info: pointer to mpdu level description info
1059  * @link_desc_va: pointer to msdu_link_desc virtual address
1060  * @err_code: reo error code fetched from ring entry
1061  *
1062  * Function to handle msdus fetched from msdu link desc, currently
1063  * support REO error NULL queue, 2K jump, OOR.
1064  *
1065  * Return: msdu count processed
1066  */
1067 static uint32_t
1068 dp_rx_reo_err_entry_process(struct dp_soc *soc,
1069 			    void *ring_desc,
1070 			    struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1071 			    void *link_desc_va,
1072 			    enum hal_reo_error_code err_code)
1073 {
1074 	uint32_t rx_bufs_used = 0;
1075 	struct dp_pdev *pdev;
1076 	int i;
1077 	uint8_t *rx_tlv_hdr_first;
1078 	uint8_t *rx_tlv_hdr_last;
1079 	uint32_t tid = DP_MAX_TIDS;
1080 	uint16_t peer_id;
1081 	struct dp_rx_desc *rx_desc;
1082 	struct rx_desc_pool *rx_desc_pool;
1083 	qdf_nbuf_t nbuf;
1084 	struct hal_buf_info buf_info;
1085 	struct hal_rx_msdu_list msdu_list;
1086 	uint16_t num_msdus;
1087 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
1088 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
1089 	/* First field in REO Dst ring Desc is buffer_addr_info */
1090 	void *buf_addr_info = ring_desc;
1091 	qdf_nbuf_t head_nbuf = NULL;
1092 	qdf_nbuf_t tail_nbuf = NULL;
1093 	uint16_t msdu_processed = 0;
1094 	QDF_STATUS status;
1095 	bool ret, is_pn_check_needed;
1096 	uint8_t rx_desc_pool_id;
1097 	struct dp_txrx_peer *txrx_peer = NULL;
1098 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1099 	hal_ring_handle_t hal_ring_hdl = soc->reo_exception_ring.hal_srng;
1100 	bool first_msdu_in_mpdu_processed = false;
1101 	bool msdu_dropped = false;
1102 
1103 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1104 					mpdu_desc_info->peer_meta_data);
1105 	is_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
1106 							  HAL_REO_ERROR_DETECTED,
1107 							  err_code);
1108 more_msdu_link_desc:
1109 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
1110 			     &num_msdus);
1111 	for (i = 0; i < num_msdus; i++) {
1112 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
1113 						soc,
1114 						msdu_list.sw_cookie[i]);
1115 
1116 		qdf_assert_always(rx_desc);
1117 		nbuf = rx_desc->nbuf;
1118 
1119 		/*
1120 		 * this is a unlikely scenario where the host is reaping
1121 		 * a descriptor which it already reaped just a while ago
1122 		 * but is yet to replenish it back to HW.
1123 		 * In this case host will dump the last 128 descriptors
1124 		 * including the software descriptor rx_desc and assert.
1125 		 */
1126 		if (qdf_unlikely(!rx_desc->in_use) ||
1127 		    qdf_unlikely(!nbuf)) {
1128 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
1129 			dp_info_rl("Reaping rx_desc not in use!");
1130 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1131 						   ring_desc, rx_desc);
1132 			/* ignore duplicate RX desc and continue to process */
1133 			/* Pop out the descriptor */
1134 			msdu_dropped = true;
1135 			continue;
1136 		}
1137 
1138 		ret = dp_rx_desc_paddr_sanity_check(rx_desc,
1139 						    msdu_list.paddr[i]);
1140 		if (!ret) {
1141 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1142 			rx_desc->in_err_state = 1;
1143 			msdu_dropped = true;
1144 			continue;
1145 		}
1146 
1147 		rx_desc_pool_id = rx_desc->pool_id;
1148 		/* all buffers from a MSDU link belong to same pdev */
1149 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc_pool_id);
1150 
1151 		rx_desc_pool = &soc->rx_desc_buf[rx_desc_pool_id];
1152 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
1153 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
1154 		rx_desc->unmapped = 1;
1155 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
1156 
1157 		QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len;
1158 		rx_bufs_used++;
1159 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
1160 					    &pdev->free_list_tail, rx_desc);
1161 
1162 		DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf);
1163 
1164 		if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags &
1165 				 HAL_MSDU_F_MSDU_CONTINUATION))
1166 			continue;
1167 
1168 		if (dp_rx_buffer_pool_refill(soc, head_nbuf,
1169 					     rx_desc_pool_id)) {
1170 			/* MSDU queued back to the pool */
1171 			msdu_dropped = true;
1172 			goto process_next_msdu;
1173 		}
1174 
1175 		if (is_pn_check_needed) {
1176 			if (msdu_list.msdu_info[i].msdu_flags &
1177 			    HAL_MSDU_F_FIRST_MSDU_IN_MPDU) {
1178 				dp_rx_err_populate_mpdu_desc_info(soc, nbuf,
1179 						mpdu_desc_info,
1180 						first_msdu_in_mpdu_processed);
1181 				first_msdu_in_mpdu_processed = true;
1182 			} else {
1183 				if (!first_msdu_in_mpdu_processed) {
1184 					/*
1185 					 * If no msdu in this mpdu was dropped
1186 					 * due to failed sanity checks, then
1187 					 * its not expected to hit this
1188 					 * condition. Hence we assert here.
1189 					 */
1190 					if (!msdu_dropped)
1191 						qdf_assert_always(0);
1192 
1193 					/*
1194 					 * We do not have valid mpdu_desc_info
1195 					 * to process this nbuf, hence drop it.
1196 					 */
1197 					dp_rx_nbuf_free(nbuf);
1198 					/* TODO - Increment stats */
1199 					goto process_next_msdu;
1200 				}
1201 				/*
1202 				 * DO NOTHING -
1203 				 * Continue using the same mpdu_desc_info
1204 				 * details populated from the first msdu in
1205 				 * the mpdu.
1206 				 */
1207 			}
1208 
1209 			status = dp_rx_err_nbuf_pn_check(soc, ring_desc, nbuf);
1210 			if (QDF_IS_STATUS_ERROR(status)) {
1211 				DP_STATS_INC(soc, rx.err.pn_in_dest_check_fail,
1212 					     1);
1213 				dp_rx_nbuf_free(nbuf);
1214 				goto process_next_msdu;
1215 			}
1216 
1217 			peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1218 					mpdu_desc_info->peer_meta_data);
1219 
1220 			if (mpdu_desc_info->bar_frame)
1221 				_dp_rx_bar_frame_handle(soc, nbuf,
1222 							mpdu_desc_info, tid,
1223 							HAL_REO_ERROR_DETECTED,
1224 							err_code);
1225 		}
1226 
1227 		rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf);
1228 		rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf);
1229 
1230 		if (qdf_unlikely(head_nbuf != tail_nbuf)) {
1231 			nbuf = dp_rx_sg_create(soc, head_nbuf);
1232 			qdf_nbuf_set_is_frag(nbuf, 1);
1233 			DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1);
1234 		}
1235 
1236 		switch (err_code) {
1237 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
1238 		case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
1239 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
1240 			/*
1241 			 * only first msdu, mpdu start description tlv valid?
1242 			 * and use it for following msdu.
1243 			 */
1244 			if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1245 							   rx_tlv_hdr_last))
1246 				tid = hal_rx_mpdu_start_tid_get(
1247 							soc->hal_soc,
1248 							rx_tlv_hdr_first);
1249 
1250 			dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last,
1251 					  peer_id, tid);
1252 			break;
1253 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
1254 		case HAL_REO_ERR_BAR_FRAME_OOR:
1255 			dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last);
1256 			break;
1257 		case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
1258 			txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(
1259 							soc, peer_id,
1260 							&txrx_ref_handle,
1261 							DP_MOD_ID_RX_ERR);
1262 			if (!txrx_peer)
1263 				dp_info_rl("txrx_peer is null peer_id %u",
1264 					   peer_id);
1265 			soc->arch_ops.dp_rx_null_q_desc_handle(soc, nbuf,
1266 							       rx_tlv_hdr_last,
1267 							       rx_desc_pool_id,
1268 							       txrx_peer,
1269 							       TRUE);
1270 			if (txrx_peer)
1271 				dp_txrx_peer_unref_delete(txrx_ref_handle,
1272 							  DP_MOD_ID_RX_ERR);
1273 			break;
1274 		default:
1275 			dp_err_rl("Non-support error code %d", err_code);
1276 			dp_rx_nbuf_free(nbuf);
1277 		}
1278 
1279 process_next_msdu:
1280 		msdu_processed++;
1281 		head_nbuf = NULL;
1282 		tail_nbuf = NULL;
1283 	}
1284 
1285 	/*
1286 	 * If the msdu's are spread across multiple link-descriptors,
1287 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
1288 	 * spread across multiple buffers).Hence, it is
1289 	 * necessary to check the next link_descriptor and release
1290 	 * all the msdu's that are part of it.
1291 	 */
1292 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
1293 			link_desc_va,
1294 			&next_link_desc_addr_info);
1295 
1296 	if (hal_rx_is_buf_addr_info_valid(
1297 				&next_link_desc_addr_info)) {
1298 		/* Clear the next link desc info for the current link_desc */
1299 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
1300 		dp_rx_link_desc_return_by_addr(
1301 				soc,
1302 				buf_addr_info,
1303 				HAL_BM_ACTION_PUT_IN_IDLE_LIST);
1304 
1305 		hal_rx_buffer_addr_info_get_paddr(
1306 				&next_link_desc_addr_info,
1307 				&buf_info);
1308 		/* buffer_addr_info is the first element of ring_desc */
1309 		hal_rx_buf_cookie_rbm_get(soc->hal_soc,
1310 					  (uint32_t *)&next_link_desc_addr_info,
1311 					  &buf_info);
1312 		link_desc_va =
1313 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1314 		cur_link_desc_addr_info = next_link_desc_addr_info;
1315 		buf_addr_info = &cur_link_desc_addr_info;
1316 
1317 		goto more_msdu_link_desc;
1318 	}
1319 
1320 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
1321 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
1322 	if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count))
1323 		DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1);
1324 
1325 	return rx_bufs_used;
1326 }
1327 
1328 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1329 
1330 /**
1331  * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
1332  *			       frames to OS or wifi parse errors.
1333  * @soc: core DP main context
1334  * @nbuf: buffer pointer
1335  * @rx_tlv_hdr: start of rx tlv header
1336  * @txrx_peer: peer reference
1337  * @err_code: rxdma err code
1338  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
1339  * pool_id has same mapping)
1340  *
1341  * Return: None
1342  */
1343 void
1344 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1345 			uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
1346 			uint8_t err_code, uint8_t mac_id)
1347 {
1348 	uint32_t pkt_len, l2_hdr_offset;
1349 	uint16_t msdu_len;
1350 	struct dp_vdev *vdev;
1351 	qdf_ether_header_t *eh;
1352 	bool is_broadcast;
1353 
1354 	/*
1355 	 * Check if DMA completed -- msdu_done is the last bit
1356 	 * to be written
1357 	 */
1358 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1359 
1360 		dp_err_rl("MSDU DONE failure");
1361 
1362 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1363 				     QDF_TRACE_LEVEL_INFO);
1364 		qdf_assert(0);
1365 	}
1366 
1367 	l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
1368 							   rx_tlv_hdr);
1369 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1370 	pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
1371 
1372 	if (dp_rx_check_pkt_len(soc, pkt_len)) {
1373 		/* Drop & free packet */
1374 		dp_rx_nbuf_free(nbuf);
1375 		return;
1376 	}
1377 	/* Set length in nbuf */
1378 	qdf_nbuf_set_pktlen(nbuf, pkt_len);
1379 
1380 	qdf_nbuf_set_next(nbuf, NULL);
1381 
1382 	qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
1383 	qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
1384 
1385 	if (!txrx_peer) {
1386 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "txrx_peer is NULL");
1387 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1388 				qdf_nbuf_len(nbuf));
1389 		/* Trigger invalid peer handler wrapper */
1390 		dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
1391 		return;
1392 	}
1393 
1394 	vdev = txrx_peer->vdev;
1395 	if (!vdev) {
1396 		dp_rx_err_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc,
1397 				 vdev);
1398 		/* Drop & free packet */
1399 		dp_rx_nbuf_free(nbuf);
1400 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1401 		return;
1402 	}
1403 
1404 	/*
1405 	 * Advance the packet start pointer by total size of
1406 	 * pre-header TLV's
1407 	 */
1408 	dp_rx_skip_tlvs(soc, nbuf, l2_hdr_offset);
1409 
1410 	if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
1411 		uint8_t *pkt_type;
1412 
1413 		pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
1414 		if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
1415 			if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
1416 							htons(QDF_LLC_STP)) {
1417 				DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
1418 				goto process_mesh;
1419 			} else {
1420 				goto process_rx;
1421 			}
1422 		}
1423 	}
1424 	if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
1425 		goto process_mesh;
1426 
1427 	/*
1428 	 * WAPI cert AP sends rekey frames as unencrypted.
1429 	 * Thus RXDMA will report unencrypted frame error.
1430 	 * To pass WAPI cert case, SW needs to pass unencrypted
1431 	 * rekey frame to stack.
1432 	 */
1433 	if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1434 		goto process_rx;
1435 	}
1436 	/*
1437 	 * In dynamic WEP case rekey frames are not encrypted
1438 	 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
1439 	 * key install is already done
1440 	 */
1441 	if ((vdev->sec_type == cdp_sec_type_wep104) &&
1442 	    (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
1443 		goto process_rx;
1444 
1445 process_mesh:
1446 
1447 	if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
1448 		dp_rx_nbuf_free(nbuf);
1449 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1450 		return;
1451 	}
1452 
1453 	if (vdev->mesh_vdev) {
1454 		if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
1455 				      == QDF_STATUS_SUCCESS) {
1456 			dp_rx_err_info("%pK: mesh pkt filtered", soc);
1457 			DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
1458 
1459 			dp_rx_nbuf_free(nbuf);
1460 			return;
1461 		}
1462 		dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, txrx_peer);
1463 	}
1464 process_rx:
1465 	if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1466 							rx_tlv_hdr) &&
1467 		(vdev->rx_decap_type ==
1468 				htt_cmn_pkt_type_ethernet))) {
1469 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1470 		is_broadcast = (QDF_IS_ADDR_BROADCAST
1471 				(eh->ether_dhost)) ? 1 : 0 ;
1472 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 1,
1473 					      qdf_nbuf_len(nbuf));
1474 		if (is_broadcast) {
1475 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast, 1,
1476 						      qdf_nbuf_len(nbuf));
1477 		}
1478 	}
1479 
1480 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1481 		dp_rx_deliver_raw(vdev, nbuf, txrx_peer);
1482 	} else {
1483 		/* Update the protocol tag in SKB based on CCE metadata */
1484 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1485 					  EXCEPTION_DEST_RING_ID, true, true);
1486 		/* Update the flow tag in SKB based on FSE metadata */
1487 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
1488 		DP_PEER_STATS_FLAT_INC(txrx_peer, to_stack.num, 1);
1489 		qdf_nbuf_set_exc_frame(nbuf, 1);
1490 		dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
1491 					    qdf_nbuf_is_ipv4_eapol_pkt(nbuf));
1492 	}
1493 
1494 	return;
1495 }
1496 
1497 /**
1498  * dp_rx_process_mic_error(): Function to pass mic error indication to umac
1499  * @soc: core DP main context
1500  * @nbuf: buffer pointer
1501  * @rx_tlv_hdr: start of rx tlv header
1502  * @txrx_peer: txrx peer handle
1503  *
1504  * return: void
1505  */
1506 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
1507 			     uint8_t *rx_tlv_hdr,
1508 			     struct dp_txrx_peer *txrx_peer)
1509 {
1510 	struct dp_vdev *vdev = NULL;
1511 	struct dp_pdev *pdev = NULL;
1512 	struct ol_if_ops *tops = NULL;
1513 	uint16_t rx_seq, fragno;
1514 	uint8_t is_raw;
1515 	unsigned int tid;
1516 	QDF_STATUS status;
1517 	struct cdp_rx_mic_err_info mic_failure_info;
1518 
1519 	if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1520 					    rx_tlv_hdr))
1521 		return;
1522 
1523 	if (!txrx_peer) {
1524 		dp_info_rl("txrx_peer not found");
1525 		goto fail;
1526 	}
1527 
1528 	vdev = txrx_peer->vdev;
1529 	if (!vdev) {
1530 		dp_info_rl("VDEV not found");
1531 		goto fail;
1532 	}
1533 
1534 	pdev = vdev->pdev;
1535 	if (!pdev) {
1536 		dp_info_rl("PDEV not found");
1537 		goto fail;
1538 	}
1539 
1540 	is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
1541 	if (is_raw) {
1542 		fragno = dp_rx_frag_get_mpdu_frag_number(soc,
1543 							 qdf_nbuf_data(nbuf));
1544 		/* Can get only last fragment */
1545 		if (fragno) {
1546 			tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
1547 							qdf_nbuf_data(nbuf));
1548 			rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
1549 							qdf_nbuf_data(nbuf));
1550 
1551 			status = dp_rx_defrag_add_last_frag(soc, txrx_peer,
1552 							    tid, rx_seq, nbuf);
1553 			dp_info_rl("Frag pkt seq# %d frag# %d consumed "
1554 				   "status %d !", rx_seq, fragno, status);
1555 			return;
1556 		}
1557 	}
1558 
1559 	if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
1560 				  &mic_failure_info.da_mac_addr.bytes[0])) {
1561 		dp_err_rl("Failed to get da_mac_addr");
1562 		goto fail;
1563 	}
1564 
1565 	if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
1566 				  &mic_failure_info.ta_mac_addr.bytes[0])) {
1567 		dp_err_rl("Failed to get ta_mac_addr");
1568 		goto fail;
1569 	}
1570 
1571 	mic_failure_info.key_id = 0;
1572 	mic_failure_info.multicast =
1573 		IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
1574 	qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
1575 	mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
1576 	mic_failure_info.data = NULL;
1577 	mic_failure_info.vdev_id = vdev->vdev_id;
1578 
1579 	tops = pdev->soc->cdp_soc.ol_ops;
1580 	if (tops->rx_mic_error)
1581 		tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
1582 				   &mic_failure_info);
1583 
1584 fail:
1585 	dp_rx_nbuf_free(nbuf);
1586 	return;
1587 }
1588 
1589 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
1590 	defined(WLAN_MCAST_MLO) && !defined(CONFIG_MLO_SINGLE_DEV)
1591 static bool dp_rx_igmp_handler(struct dp_soc *soc,
1592 			       struct dp_vdev *vdev,
1593 			       struct dp_txrx_peer *peer,
1594 			       qdf_nbuf_t nbuf)
1595 {
1596 	if (soc->arch_ops.dp_rx_mcast_handler) {
1597 		if (soc->arch_ops.dp_rx_mcast_handler(soc, vdev, peer, nbuf))
1598 			return true;
1599 	}
1600 	return false;
1601 }
1602 #else
1603 static bool dp_rx_igmp_handler(struct dp_soc *soc,
1604 			       struct dp_vdev *vdev,
1605 			       struct dp_txrx_peer *peer,
1606 			       qdf_nbuf_t nbuf)
1607 {
1608 	return false;
1609 }
1610 #endif
1611 
1612 /**
1613  * dp_rx_err_route_hdl() - Function to send EAPOL frames to stack
1614  *                            Free any other packet which comes in
1615  *                            this path.
1616  *
1617  * @soc: core DP main context
1618  * @nbuf: buffer pointer
1619  * @txrx_peer: txrx peer handle
1620  * @rx_tlv_hdr: start of rx tlv header
1621  * @err_src: rxdma/reo
1622  *
1623  * This function indicates EAPOL frame received in wbm error ring to stack.
1624  * Any other frame should be dropped.
1625  *
1626  * Return: SUCCESS if delivered to stack
1627  */
1628 static void
1629 dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
1630 		    struct dp_txrx_peer *txrx_peer, uint8_t *rx_tlv_hdr,
1631 		    enum hal_rx_wbm_error_source err_src)
1632 {
1633 	uint32_t pkt_len;
1634 	uint16_t msdu_len;
1635 	struct dp_vdev *vdev;
1636 	struct hal_rx_msdu_metadata msdu_metadata;
1637 	bool is_eapol;
1638 
1639 	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
1640 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1641 	pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
1642 
1643 	if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
1644 		if (dp_rx_check_pkt_len(soc, pkt_len))
1645 			goto drop_nbuf;
1646 
1647 		/* Set length in nbuf */
1648 		qdf_nbuf_set_pktlen(
1649 			nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
1650 		qdf_assert_always(nbuf->data == rx_tlv_hdr);
1651 	}
1652 
1653 	/*
1654 	 * Check if DMA completed -- msdu_done is the last bit
1655 	 * to be written
1656 	 */
1657 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1658 		dp_err_rl("MSDU DONE failure");
1659 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1660 				     QDF_TRACE_LEVEL_INFO);
1661 		qdf_assert(0);
1662 	}
1663 
1664 	if (!txrx_peer)
1665 		goto drop_nbuf;
1666 
1667 	vdev = txrx_peer->vdev;
1668 	if (!vdev) {
1669 		dp_err_rl("Null vdev!");
1670 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1671 		goto drop_nbuf;
1672 	}
1673 
1674 	/*
1675 	 * Advance the packet start pointer by total size of
1676 	 * pre-header TLV's
1677 	 */
1678 	if (qdf_nbuf_is_frag(nbuf))
1679 		qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
1680 	else
1681 		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
1682 				   soc->rx_pkt_tlv_size));
1683 
1684 	if (dp_rx_igmp_handler(soc, vdev, txrx_peer, nbuf))
1685 		return;
1686 
1687 	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
1688 
1689 	/*
1690 	 * Indicate EAPOL frame to stack only when vap mac address
1691 	 * matches the destination address.
1692 	 */
1693 	is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf);
1694 	if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1695 		qdf_ether_header_t *eh =
1696 			(qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1697 		if (dp_rx_err_match_dhost(eh, vdev)) {
1698 			DP_STATS_INC_PKT(vdev, rx_i.routed_eapol_pkt, 1,
1699 					 qdf_nbuf_len(nbuf));
1700 
1701 			/*
1702 			 * Update the protocol tag in SKB based on
1703 			 * CCE metadata.
1704 			 */
1705 			dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1706 						  EXCEPTION_DEST_RING_ID,
1707 						  true, true);
1708 			/* Update the flow tag in SKB based on FSE metadata */
1709 			dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
1710 					      true);
1711 			DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
1712 						  qdf_nbuf_len(nbuf),
1713 						  vdev->pdev->enhanced_stats_en);
1714 			qdf_nbuf_set_exc_frame(nbuf, 1);
1715 			qdf_nbuf_set_next(nbuf, NULL);
1716 
1717 			dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf,
1718 						    NULL, is_eapol);
1719 
1720 			return;
1721 		}
1722 	}
1723 
1724 drop_nbuf:
1725 
1726 	DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1,
1727 		      err_src == HAL_RX_WBM_ERR_SRC_REO);
1728 	DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1,
1729 		      err_src == HAL_RX_WBM_ERR_SRC_RXDMA);
1730 
1731 	dp_rx_nbuf_free(nbuf);
1732 }
1733 
1734 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1735 
1736 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
1737 /**
1738  * dp_rx_link_cookie_check() - Validate link desc cookie
1739  * @ring_desc: ring descriptor
1740  *
1741  * Return: qdf status
1742  */
1743 static inline QDF_STATUS
1744 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
1745 {
1746 	if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc)))
1747 		return QDF_STATUS_E_FAILURE;
1748 
1749 	return QDF_STATUS_SUCCESS;
1750 }
1751 
1752 /**
1753  * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie
1754  * @ring_desc: ring descriptor
1755  *
1756  * Return: None
1757  */
1758 static inline void
1759 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
1760 {
1761 	HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc);
1762 }
1763 #else
1764 static inline QDF_STATUS
1765 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
1766 {
1767 	return QDF_STATUS_SUCCESS;
1768 }
1769 
1770 static inline void
1771 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
1772 {
1773 }
1774 #endif
1775 
1776 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1777 /**
1778  * dp_rx_err_ring_record_entry() - Record rx err ring history
1779  * @soc: Datapath soc structure
1780  * @paddr: paddr of the buffer in RX err ring
1781  * @sw_cookie: SW cookie of the buffer in RX err ring
1782  * @rbm: Return buffer manager of the buffer in RX err ring
1783  *
1784  * Returns: None
1785  */
1786 static inline void
1787 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1788 			    uint32_t sw_cookie, uint8_t rbm)
1789 {
1790 	struct dp_buf_info_record *record;
1791 	uint32_t idx;
1792 
1793 	if (qdf_unlikely(!soc->rx_err_ring_history))
1794 		return;
1795 
1796 	idx = dp_history_get_next_index(&soc->rx_err_ring_history->index,
1797 					DP_RX_ERR_HIST_MAX);
1798 
1799 	/* No NULL check needed for record since its an array */
1800 	record = &soc->rx_err_ring_history->entry[idx];
1801 
1802 	record->timestamp = qdf_get_log_timestamp();
1803 	record->hbi.paddr = paddr;
1804 	record->hbi.sw_cookie = sw_cookie;
1805 	record->hbi.rbm = rbm;
1806 }
1807 #else
1808 static inline void
1809 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1810 			    uint32_t sw_cookie, uint8_t rbm)
1811 {
1812 }
1813 #endif
1814 
1815 #ifdef HANDLE_RX_REROUTE_ERR
1816 static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc,
1817 				     hal_ring_desc_t ring_desc)
1818 {
1819 	int lmac_id = DP_INVALID_LMAC_ID;
1820 	struct dp_rx_desc *rx_desc;
1821 	struct hal_buf_info hbi;
1822 	struct dp_pdev *pdev;
1823 	struct rx_desc_pool *rx_desc_pool;
1824 
1825 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
1826 
1827 	rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, hbi.sw_cookie);
1828 
1829 	/* sanity */
1830 	if (!rx_desc) {
1831 		DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_invalid_cookie, 1);
1832 		goto assert_return;
1833 	}
1834 
1835 	if (!rx_desc->nbuf)
1836 		goto assert_return;
1837 
1838 	dp_rx_err_ring_record_entry(soc, hbi.paddr,
1839 				    hbi.sw_cookie,
1840 				    hal_rx_ret_buf_manager_get(soc->hal_soc,
1841 							       ring_desc));
1842 	if (hbi.paddr != qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)) {
1843 		DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1844 		rx_desc->in_err_state = 1;
1845 		goto assert_return;
1846 	}
1847 
1848 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
1849 	/* After this point the rx_desc and nbuf are valid */
1850 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
1851 	qdf_assert_always(!rx_desc->unmapped);
1852 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
1853 	rx_desc->unmapped = 1;
1854 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
1855 	dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
1856 				    rx_desc->pool_id);
1857 
1858 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
1859 	lmac_id = rx_desc->pool_id;
1860 	dp_rx_add_to_free_desc_list(&pdev->free_list_head,
1861 				    &pdev->free_list_tail,
1862 				    rx_desc);
1863 	return lmac_id;
1864 
1865 assert_return:
1866 	qdf_assert(0);
1867 	return lmac_id;
1868 }
1869 
1870 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
1871 {
1872 	int ret;
1873 	uint64_t cur_time_stamp;
1874 
1875 	DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_rcved, 1);
1876 
1877 	/* Recover if overall error count exceeds threshold */
1878 	if (soc->stats.rx.err.reo_err_msdu_buf_rcved >
1879 	    DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD) {
1880 		dp_err("pkt threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
1881 		       soc->stats.rx.err.reo_err_msdu_buf_rcved,
1882 		       soc->rx_route_err_start_pkt_ts);
1883 		qdf_trigger_self_recovery(NULL, QDF_RX_REG_PKT_ROUTE_ERR);
1884 	}
1885 
1886 	cur_time_stamp = qdf_get_log_timestamp_usecs();
1887 	if (!soc->rx_route_err_start_pkt_ts)
1888 		soc->rx_route_err_start_pkt_ts = cur_time_stamp;
1889 
1890 	/* Recover if threshold number of packets received in threshold time */
1891 	if ((cur_time_stamp - soc->rx_route_err_start_pkt_ts) >
1892 						DP_RX_ERR_ROUTE_TIMEOUT_US) {
1893 		soc->rx_route_err_start_pkt_ts = cur_time_stamp;
1894 
1895 		if (soc->rx_route_err_in_window >
1896 		    DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT) {
1897 			qdf_trigger_self_recovery(NULL,
1898 						  QDF_RX_REG_PKT_ROUTE_ERR);
1899 			dp_err("rate threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
1900 			       soc->stats.rx.err.reo_err_msdu_buf_rcved,
1901 			       soc->rx_route_err_start_pkt_ts);
1902 		} else {
1903 			soc->rx_route_err_in_window = 1;
1904 		}
1905 	} else {
1906 		soc->rx_route_err_in_window++;
1907 	}
1908 
1909 	ret = dp_rx_err_handle_msdu_buf(soc, ring_desc);
1910 
1911 	return ret;
1912 }
1913 #else /* HANDLE_RX_REROUTE_ERR */
1914 
1915 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
1916 {
1917 	qdf_assert_always(0);
1918 
1919 	return DP_INVALID_LMAC_ID;
1920 }
1921 #endif /* HANDLE_RX_REROUTE_ERR */
1922 
1923 #ifdef WLAN_MLO_MULTI_CHIP
1924 /*
1925  * dp_idle_link_bm_id_check() - war for HW issue
1926  *
1927  * This is a war for HW issue where link descriptor
1928  * of partner soc received due to packets wrongly
1929  * interpreted as fragments
1930  *
1931  * @soc: DP SOC handle
1932  * @rbm: idle link RBM value
1933  * @ring_desc: reo error link descriptor
1934  *
1935  * returns: true in case link desc is consumed
1936  *	    false in other cases
1937  */
1938 static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
1939 				     void *ring_desc)
1940 {
1941 	struct dp_soc *replenish_soc = NULL;
1942 
1943 	/* return ok incase of link desc of same soc */
1944 	if (rbm == soc->idle_link_bm_id)
1945 		return false;
1946 
1947 	if (soc->arch_ops.dp_soc_get_by_idle_bm_id)
1948 		replenish_soc =
1949 			soc->arch_ops.dp_soc_get_by_idle_bm_id(soc, rbm);
1950 
1951 	qdf_assert_always(replenish_soc);
1952 
1953 	/*
1954 	 * For WIN usecase we should only get fragment packets in
1955 	 * this ring as for MLO case fragmentation is not supported
1956 	 * we should not see links from other soc.
1957 	 *
1958 	 * Drop all packets from partner soc and replenish the descriptors
1959 	 */
1960 	dp_handle_wbm_internal_error(replenish_soc, ring_desc,
1961 				     HAL_WBM_RELEASE_RING_2_DESC_TYPE);
1962 
1963 	return true;
1964 }
1965 #else
1966 static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
1967 				     void *ring_desc)
1968 {
1969 	return false;
1970 }
1971 #endif
1972 
1973 uint32_t
1974 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1975 		  hal_ring_handle_t hal_ring_hdl, uint32_t quota)
1976 {
1977 	hal_ring_desc_t ring_desc;
1978 	hal_soc_handle_t hal_soc;
1979 	uint32_t count = 0;
1980 	uint32_t rx_bufs_used = 0;
1981 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
1982 	uint8_t mac_id = 0;
1983 	uint8_t buf_type;
1984 	uint8_t err_status;
1985 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
1986 	struct hal_buf_info hbi;
1987 	struct dp_pdev *dp_pdev;
1988 	struct dp_srng *dp_rxdma_srng;
1989 	struct rx_desc_pool *rx_desc_pool;
1990 	void *link_desc_va;
1991 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
1992 	uint16_t num_msdus;
1993 	struct dp_rx_desc *rx_desc = NULL;
1994 	QDF_STATUS status;
1995 	bool ret;
1996 	uint32_t error_code = 0;
1997 	bool sw_pn_check_needed;
1998 	int max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
1999 	int i, rx_bufs_reaped_total;
2000 
2001 	/* Debug -- Remove later */
2002 	qdf_assert(soc && hal_ring_hdl);
2003 
2004 	hal_soc = soc->hal_soc;
2005 
2006 	/* Debug -- Remove later */
2007 	qdf_assert(hal_soc);
2008 
2009 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
2010 
2011 		/* TODO */
2012 		/*
2013 		 * Need API to convert from hal_ring pointer to
2014 		 * Ring Type / Ring Id combo
2015 		 */
2016 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
2017 		dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", soc,
2018 			      hal_ring_hdl);
2019 		goto done;
2020 	}
2021 
2022 	while (qdf_likely(quota-- && (ring_desc =
2023 				hal_srng_dst_peek(hal_soc,
2024 						  hal_ring_hdl)))) {
2025 
2026 		DP_STATS_INC(soc, rx.err_ring_pkts, 1);
2027 		err_status = hal_rx_err_status_get(hal_soc, ring_desc);
2028 		buf_type = hal_rx_reo_buf_type_get(hal_soc, ring_desc);
2029 
2030 		if (err_status == HAL_REO_ERROR_DETECTED)
2031 			error_code = hal_rx_get_reo_error_code(hal_soc,
2032 							       ring_desc);
2033 
2034 		qdf_mem_set(&mpdu_desc_info, sizeof(mpdu_desc_info), 0);
2035 		sw_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
2036 								  err_status,
2037 								  error_code);
2038 		if (!sw_pn_check_needed) {
2039 			/*
2040 			 * MPDU desc info will be present in the REO desc
2041 			 * only in the below scenarios
2042 			 * 1) pn_in_dest_disabled:  always
2043 			 * 2) pn_in_dest enabled: All cases except 2k-jup
2044 			 *			and OOR errors
2045 			 */
2046 			hal_rx_mpdu_desc_info_get(hal_soc, ring_desc,
2047 						  &mpdu_desc_info);
2048 		}
2049 
2050 		if (HAL_RX_REO_DESC_MSDU_COUNT_GET(ring_desc) == 0)
2051 			goto next_entry;
2052 
2053 		/*
2054 		 * For REO error ring, only MSDU LINK DESC is expected.
2055 		 * Handle HAL_RX_REO_MSDU_BUF_ADDR_TYPE exception case.
2056 		 */
2057 		if (qdf_unlikely(buf_type != HAL_RX_REO_MSDU_LINK_DESC_TYPE)) {
2058 			int lmac_id;
2059 
2060 			lmac_id = dp_rx_err_exception(soc, ring_desc);
2061 			if (lmac_id >= 0)
2062 				rx_bufs_reaped[lmac_id] += 1;
2063 			goto next_entry;
2064 		}
2065 
2066 		hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc,
2067 					  &hbi);
2068 		/*
2069 		 * check for the magic number in the sw cookie
2070 		 */
2071 		qdf_assert_always((hbi.sw_cookie >> LINK_DESC_ID_SHIFT) &
2072 					soc->link_desc_id_start);
2073 
2074 		if (dp_idle_link_bm_id_check(soc, hbi.rbm, ring_desc)) {
2075 			DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
2076 			goto next_entry;
2077 		}
2078 
2079 		status = dp_rx_link_cookie_check(ring_desc);
2080 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
2081 			DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
2082 			break;
2083 		}
2084 
2085 		hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2086 		link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
2087 		hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
2088 				     &num_msdus);
2089 		if (!num_msdus ||
2090 		    !dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[0])) {
2091 			dp_rx_err_info_rl("Invalid MSDU info num_msdus %u cookie: 0x%x",
2092 					  num_msdus, msdu_list.sw_cookie[0]);
2093 			dp_rx_link_desc_return(soc, ring_desc,
2094 					       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
2095 			goto next_entry;
2096 		}
2097 
2098 		dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0],
2099 					    msdu_list.sw_cookie[0],
2100 					    msdu_list.rbm[0]);
2101 		// TODO - BE- Check if the RBM is to be checked for all chips
2102 		if (qdf_unlikely((msdu_list.rbm[0] !=
2103 					dp_rx_get_rx_bm_id(soc)) &&
2104 				 (msdu_list.rbm[0] !=
2105 				  soc->idle_link_bm_id) &&
2106 				 (msdu_list.rbm[0] !=
2107 					dp_rx_get_defrag_bm_id(soc)))) {
2108 			/* TODO */
2109 			/* Call appropriate handler */
2110 			if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2111 				DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
2112 				dp_rx_err_err("%pK: Invalid RBM %d",
2113 					      soc, msdu_list.rbm[0]);
2114 			}
2115 
2116 			/* Return link descriptor through WBM ring (SW2WBM)*/
2117 			dp_rx_link_desc_return(soc, ring_desc,
2118 					HAL_BM_ACTION_RELEASE_MSDU_LIST);
2119 			goto next_entry;
2120 		}
2121 
2122 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
2123 						soc,
2124 						msdu_list.sw_cookie[0]);
2125 		qdf_assert_always(rx_desc);
2126 
2127 		mac_id = rx_desc->pool_id;
2128 
2129 		if (sw_pn_check_needed) {
2130 			goto process_reo_error_code;
2131 		}
2132 
2133 		if (mpdu_desc_info.bar_frame) {
2134 			qdf_assert_always(mpdu_desc_info.msdu_count == 1);
2135 
2136 			dp_rx_bar_frame_handle(soc, ring_desc, rx_desc,
2137 					       &mpdu_desc_info, err_status,
2138 					       error_code);
2139 
2140 			rx_bufs_reaped[mac_id] += 1;
2141 			goto next_entry;
2142 		}
2143 
2144 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
2145 			/*
2146 			 * We only handle one msdu per link desc for fragmented
2147 			 * case. We drop the msdus and release the link desc
2148 			 * back if there are more than one msdu in link desc.
2149 			 */
2150 			if (qdf_unlikely(num_msdus > 1)) {
2151 				count = dp_rx_msdus_drop(soc, ring_desc,
2152 							 &mpdu_desc_info,
2153 							 &mac_id, quota);
2154 				rx_bufs_reaped[mac_id] += count;
2155 				goto next_entry;
2156 			}
2157 
2158 			/*
2159 			 * this is a unlikely scenario where the host is reaping
2160 			 * a descriptor which it already reaped just a while ago
2161 			 * but is yet to replenish it back to HW.
2162 			 * In this case host will dump the last 128 descriptors
2163 			 * including the software descriptor rx_desc and assert.
2164 			 */
2165 
2166 			if (qdf_unlikely(!rx_desc->in_use)) {
2167 				DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
2168 				dp_info_rl("Reaping rx_desc not in use!");
2169 				dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2170 							   ring_desc, rx_desc);
2171 				/* ignore duplicate RX desc and continue */
2172 				/* Pop out the descriptor */
2173 				goto next_entry;
2174 			}
2175 
2176 			ret = dp_rx_desc_paddr_sanity_check(rx_desc,
2177 							    msdu_list.paddr[0]);
2178 			if (!ret) {
2179 				DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
2180 				rx_desc->in_err_state = 1;
2181 				goto next_entry;
2182 			}
2183 
2184 			count = dp_rx_frag_handle(soc,
2185 						  ring_desc, &mpdu_desc_info,
2186 						  rx_desc, &mac_id, quota);
2187 
2188 			rx_bufs_reaped[mac_id] += count;
2189 			DP_STATS_INC(soc, rx.rx_frags, 1);
2190 			goto next_entry;
2191 		}
2192 
2193 process_reo_error_code:
2194 		/*
2195 		 * Expect REO errors to be handled after this point
2196 		 */
2197 		qdf_assert_always(err_status == HAL_REO_ERROR_DETECTED);
2198 
2199 		dp_info_rl("Got pkt with REO ERROR: %d", error_code);
2200 
2201 		switch (error_code) {
2202 		case HAL_REO_ERR_PN_CHECK_FAILED:
2203 		case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
2204 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2205 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2206 			if (dp_pdev)
2207 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
2208 			count = dp_rx_pn_error_handle(soc,
2209 						      ring_desc,
2210 						      &mpdu_desc_info, &mac_id,
2211 						      quota);
2212 
2213 			rx_bufs_reaped[mac_id] += count;
2214 			break;
2215 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2216 		case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
2217 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2218 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
2219 		case HAL_REO_ERR_BAR_FRAME_OOR:
2220 		case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2221 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2222 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2223 			if (dp_pdev)
2224 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
2225 			count = dp_rx_reo_err_entry_process(
2226 					soc,
2227 					ring_desc,
2228 					&mpdu_desc_info,
2229 					link_desc_va,
2230 					error_code);
2231 
2232 			rx_bufs_reaped[mac_id] += count;
2233 			break;
2234 		case HAL_REO_ERR_QUEUE_DESC_INVALID:
2235 		case HAL_REO_ERR_AMPDU_IN_NON_BA:
2236 		case HAL_REO_ERR_NON_BA_DUPLICATE:
2237 		case HAL_REO_ERR_BA_DUPLICATE:
2238 		case HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION:
2239 		case HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN:
2240 		case HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET:
2241 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2242 			count = dp_rx_msdus_drop(soc, ring_desc,
2243 						 &mpdu_desc_info,
2244 						 &mac_id, quota);
2245 			rx_bufs_reaped[mac_id] += count;
2246 			break;
2247 		default:
2248 			/* Assert if unexpected error type */
2249 			qdf_assert_always(0);
2250 		}
2251 next_entry:
2252 		dp_rx_link_cookie_invalidate(ring_desc);
2253 		hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2254 
2255 		rx_bufs_reaped_total = 0;
2256 		for (i = 0; i < MAX_PDEV_CNT; i++)
2257 			rx_bufs_reaped_total += rx_bufs_reaped[i];
2258 
2259 		if (dp_rx_reap_loop_pkt_limit_hit(soc, rx_bufs_reaped_total,
2260 						  max_reap_limit))
2261 			break;
2262 	}
2263 
2264 done:
2265 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
2266 
2267 	if (soc->rx.flags.defrag_timeout_check) {
2268 		uint32_t now_ms =
2269 			qdf_system_ticks_to_msecs(qdf_system_ticks());
2270 
2271 		if (now_ms >= soc->rx.defrag.next_flush_ms)
2272 			dp_rx_defrag_waitlist_flush(soc);
2273 	}
2274 
2275 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2276 		if (rx_bufs_reaped[mac_id]) {
2277 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2278 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2279 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
2280 
2281 			dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2282 						rx_desc_pool,
2283 						rx_bufs_reaped[mac_id],
2284 						&dp_pdev->free_list_head,
2285 						&dp_pdev->free_list_tail,
2286 						false);
2287 			rx_bufs_used += rx_bufs_reaped[mac_id];
2288 		}
2289 	}
2290 
2291 	return rx_bufs_used; /* Assume no scale factor for now */
2292 }
2293 
2294 #ifdef DROP_RXDMA_DECRYPT_ERR
2295 /**
2296  * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled
2297  *
2298  * Return: true if rxdma decrypt err frames are handled and false otherwise
2299  */
2300 static inline bool dp_handle_rxdma_decrypt_err(void)
2301 {
2302 	return false;
2303 }
2304 #else
2305 static inline bool dp_handle_rxdma_decrypt_err(void)
2306 {
2307 	return true;
2308 }
2309 #endif
2310 
2311 void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc)
2312 {
2313 	if (soc->wbm_sg_last_msdu_war) {
2314 		uint32_t len;
2315 		qdf_nbuf_t temp = soc->wbm_sg_param.wbm_sg_nbuf_tail;
2316 
2317 		len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
2318 						     qdf_nbuf_data(temp));
2319 		temp = soc->wbm_sg_param.wbm_sg_nbuf_head;
2320 		while (temp) {
2321 			QDF_NBUF_CB_RX_PKT_LEN(temp) = len;
2322 			temp = temp->next;
2323 		}
2324 	}
2325 }
2326 
2327 #ifdef RX_DESC_DEBUG_CHECK
2328 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2329 					    hal_ring_handle_t hal_ring_hdl,
2330 					    hal_ring_desc_t ring_desc,
2331 					    struct dp_rx_desc *rx_desc)
2332 {
2333 	struct hal_buf_info hbi;
2334 
2335 	hal_rx_wbm_rel_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2336 	/* Sanity check for possible buffer paddr corruption */
2337 	if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
2338 		return QDF_STATUS_SUCCESS;
2339 
2340 	hal_srng_dump_ring_desc(soc->hal_soc, hal_ring_hdl, ring_desc);
2341 
2342 	return QDF_STATUS_E_FAILURE;
2343 }
2344 
2345 #else
2346 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2347 					    hal_ring_handle_t hal_ring_hdl,
2348 					    hal_ring_desc_t ring_desc,
2349 					    struct dp_rx_desc *rx_desc)
2350 {
2351 	return QDF_STATUS_SUCCESS;
2352 }
2353 #endif
2354 bool
2355 dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
2356 {
2357 	/*
2358 	 * Currently Null Queue and Unencrypted error handlers has support for
2359 	 * SG. Other error handler do not deal with SG buffer.
2360 	 */
2361 	if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) &&
2362 	     (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) ||
2363 	    ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) &&
2364 	     (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED)))
2365 		return true;
2366 
2367 	return false;
2368 }
2369 
2370 #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
2371 void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
2372 			      qdf_nbuf_t nbuf)
2373 {
2374 	/*
2375 	 * In case of fast recycle TX driver can avoid invalidate
2376 	 * of buffer in case of SFE forward. We need to invalidate
2377 	 * the TLV headers after writing to this location
2378 	 */
2379 	qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2380 				      (void *)(nbuf->data +
2381 					       soc->rx_pkt_tlv_size +
2382 					       L3_HEADER_PAD));
2383 }
2384 #else
2385 void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
2386 			      qdf_nbuf_t nbuf)
2387 {
2388 }
2389 #endif
2390 
2391 uint32_t
2392 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2393 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota)
2394 {
2395 	hal_soc_handle_t hal_soc;
2396 	uint32_t rx_bufs_used = 0;
2397 	struct dp_pdev *dp_pdev;
2398 	uint8_t *rx_tlv_hdr;
2399 	bool is_tkip_mic_err;
2400 	qdf_nbuf_t nbuf_head = NULL;
2401 	qdf_nbuf_t nbuf, next;
2402 	struct hal_wbm_err_desc_info wbm_err_info = { 0 };
2403 	uint8_t pool_id;
2404 	uint8_t tid = 0;
2405 
2406 	/* Debug -- Remove later */
2407 	qdf_assert(soc && hal_ring_hdl);
2408 
2409 	hal_soc = soc->hal_soc;
2410 
2411 	/* Debug -- Remove later */
2412 	qdf_assert(hal_soc);
2413 
2414 	nbuf_head = soc->arch_ops.dp_rx_wbm_err_reap_desc(int_ctx, soc,
2415 							  hal_ring_hdl,
2416 							  quota,
2417 							  &rx_bufs_used);
2418 	nbuf = nbuf_head;
2419 	while (nbuf) {
2420 		struct dp_txrx_peer *txrx_peer;
2421 		struct dp_peer *peer;
2422 		uint16_t peer_id;
2423 		uint8_t err_code;
2424 		uint8_t *tlv_hdr;
2425 		uint32_t peer_meta_data;
2426 		dp_txrx_ref_handle txrx_ref_handle = NULL;
2427 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
2428 
2429 		/*
2430 		 * retrieve the wbm desc info from nbuf TLV, so we can
2431 		 * handle error cases appropriately
2432 		 */
2433 		hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_tlv_hdr,
2434 					      (uint8_t *)&wbm_err_info,
2435 					      sizeof(wbm_err_info));
2436 
2437 		peer_meta_data = hal_rx_tlv_peer_meta_data_get(soc->hal_soc,
2438 							       rx_tlv_hdr);
2439 		peer_id = dp_rx_peer_metadata_peer_id_get(soc, peer_meta_data);
2440 		txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
2441 							   &txrx_ref_handle,
2442 							   DP_MOD_ID_RX_ERR);
2443 
2444 		if (!txrx_peer)
2445 			dp_info_rl("peer is null peer_id %u err_src %u, "
2446 				   "REO: push_rsn %u err_code %u, "
2447 				   "RXDMA: push_rsn %u err_code %u",
2448 				   peer_id, wbm_err_info.wbm_err_src,
2449 				   wbm_err_info.reo_psh_rsn,
2450 				   wbm_err_info.reo_err_code,
2451 				   wbm_err_info.rxdma_psh_rsn,
2452 				   wbm_err_info.rxdma_err_code);
2453 
2454 		/* Set queue_mapping in nbuf to 0 */
2455 		dp_set_rx_queue(nbuf, 0);
2456 
2457 		next = nbuf->next;
2458 		/*
2459 		 * Form the SG for msdu continued buffers
2460 		 * QCN9000 has this support
2461 		 */
2462 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
2463 			nbuf = dp_rx_sg_create(soc, nbuf);
2464 			next = nbuf->next;
2465 			/*
2466 			 * SG error handling is not done correctly,
2467 			 * drop SG frames for now.
2468 			 */
2469 			dp_rx_nbuf_free(nbuf);
2470 			dp_info_rl("scattered msdu dropped");
2471 			nbuf = next;
2472 			if (txrx_peer)
2473 				dp_txrx_peer_unref_delete(txrx_ref_handle,
2474 							  DP_MOD_ID_RX_ERR);
2475 			continue;
2476 		}
2477 
2478 		if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
2479 			if (wbm_err_info.reo_psh_rsn
2480 					== HAL_RX_WBM_REO_PSH_RSN_ERROR) {
2481 
2482 				DP_STATS_INC(soc,
2483 					rx.err.reo_error
2484 					[wbm_err_info.reo_err_code], 1);
2485 				/* increment @pdev level */
2486 				pool_id = wbm_err_info.pool_id;
2487 				dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
2488 				if (dp_pdev)
2489 					DP_STATS_INC(dp_pdev, err.reo_error,
2490 						     1);
2491 
2492 				switch (wbm_err_info.reo_err_code) {
2493 				/*
2494 				 * Handling for packets which have NULL REO
2495 				 * queue descriptor
2496 				 */
2497 				case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2498 					pool_id = wbm_err_info.pool_id;
2499 					soc->arch_ops.dp_rx_null_q_desc_handle(
2500 								soc, nbuf,
2501 								rx_tlv_hdr,
2502 								pool_id,
2503 								txrx_peer,
2504 								FALSE);
2505 					break;
2506 				/* TODO */
2507 				/* Add per error code accounting */
2508 				case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2509 					if (txrx_peer)
2510 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2511 									  rx.err.jump_2k_err,
2512 									  1);
2513 
2514 					pool_id = wbm_err_info.pool_id;
2515 
2516 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2517 									   rx_tlv_hdr)) {
2518 						tid =
2519 						hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
2520 					}
2521 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2522 					hal_rx_msdu_start_msdu_len_get(
2523 						soc->hal_soc, rx_tlv_hdr);
2524 					nbuf->next = NULL;
2525 					dp_2k_jump_handle(soc, nbuf,
2526 							  rx_tlv_hdr,
2527 							  peer_id, tid);
2528 					break;
2529 				case HAL_REO_ERR_REGULAR_FRAME_OOR:
2530 					if (txrx_peer)
2531 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2532 									  rx.err.oor_err,
2533 									  1);
2534 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2535 									   rx_tlv_hdr)) {
2536 						tid =
2537 							hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
2538 					}
2539 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2540 						hal_rx_msdu_start_msdu_len_get(
2541 						soc->hal_soc, rx_tlv_hdr);
2542 					nbuf->next = NULL;
2543 					dp_rx_oor_handle(soc, nbuf,
2544 							 peer_id,
2545 							 rx_tlv_hdr);
2546 					break;
2547 				case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2548 				case HAL_REO_ERR_BAR_FRAME_OOR:
2549 					peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
2550 					if (peer) {
2551 						dp_rx_err_handle_bar(soc, peer,
2552 								     nbuf);
2553 						dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
2554 					}
2555 					dp_rx_nbuf_free(nbuf);
2556 					break;
2557 
2558 				case HAL_REO_ERR_PN_CHECK_FAILED:
2559 				case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
2560 					if (txrx_peer)
2561 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2562 									  rx.err.pn_err,
2563 									  1);
2564 					dp_rx_nbuf_free(nbuf);
2565 					break;
2566 
2567 				default:
2568 					dp_info_rl("Got pkt with REO ERROR: %d",
2569 						   wbm_err_info.reo_err_code);
2570 					dp_rx_nbuf_free(nbuf);
2571 				}
2572 			} else if (wbm_err_info.reo_psh_rsn
2573 					== HAL_RX_WBM_REO_PSH_RSN_ROUTE) {
2574 				dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
2575 						    rx_tlv_hdr,
2576 						    HAL_RX_WBM_ERR_SRC_REO);
2577 			} else {
2578 				/* should not enter here */
2579 				dp_rx_err_alert("invalid reo push reason %u",
2580 						wbm_err_info.reo_psh_rsn);
2581 				dp_rx_nbuf_free(nbuf);
2582 				qdf_assert_always(0);
2583 			}
2584 		} else if (wbm_err_info.wbm_err_src ==
2585 					HAL_RX_WBM_ERR_SRC_RXDMA) {
2586 			if (wbm_err_info.rxdma_psh_rsn
2587 					== HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
2588 				DP_STATS_INC(soc,
2589 					rx.err.rxdma_error
2590 					[wbm_err_info.rxdma_err_code], 1);
2591 				/* increment @pdev level */
2592 				pool_id = wbm_err_info.pool_id;
2593 				dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
2594 				if (dp_pdev)
2595 					DP_STATS_INC(dp_pdev,
2596 						     err.rxdma_error, 1);
2597 
2598 				switch (wbm_err_info.rxdma_err_code) {
2599 				case HAL_RXDMA_ERR_UNENCRYPTED:
2600 
2601 				case HAL_RXDMA_ERR_WIFI_PARSE:
2602 					if (txrx_peer)
2603 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2604 									  rx.err.rxdma_wifi_parse_err,
2605 									  1);
2606 
2607 					pool_id = wbm_err_info.pool_id;
2608 					dp_rx_process_rxdma_err(soc, nbuf,
2609 								rx_tlv_hdr,
2610 								txrx_peer,
2611 								wbm_err_info.
2612 								rxdma_err_code,
2613 								pool_id);
2614 					break;
2615 
2616 				case HAL_RXDMA_ERR_TKIP_MIC:
2617 					dp_rx_process_mic_error(soc, nbuf,
2618 								rx_tlv_hdr,
2619 								txrx_peer);
2620 					if (txrx_peer)
2621 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2622 									  rx.err.mic_err,
2623 									  1);
2624 					break;
2625 
2626 				case HAL_RXDMA_ERR_DECRYPT:
2627 					/* All the TKIP-MIC failures are treated as Decrypt Errors
2628 					 * for QCN9224 Targets
2629 					 */
2630 					is_tkip_mic_err = hal_rx_msdu_end_is_tkip_mic_err(hal_soc, rx_tlv_hdr);
2631 
2632 					if (is_tkip_mic_err && txrx_peer) {
2633 						dp_rx_process_mic_error(soc, nbuf,
2634 									rx_tlv_hdr,
2635 									txrx_peer);
2636 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2637 									  rx.err.mic_err,
2638 									  1);
2639 						break;
2640 					}
2641 
2642 					if (txrx_peer) {
2643 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2644 									  rx.err.decrypt_err,
2645 									  1);
2646 						dp_rx_nbuf_free(nbuf);
2647 						break;
2648 					}
2649 
2650 					if (!dp_handle_rxdma_decrypt_err()) {
2651 						dp_rx_nbuf_free(nbuf);
2652 						break;
2653 					}
2654 
2655 					pool_id = wbm_err_info.pool_id;
2656 					err_code = wbm_err_info.rxdma_err_code;
2657 					tlv_hdr = rx_tlv_hdr;
2658 					dp_rx_process_rxdma_err(soc, nbuf,
2659 								tlv_hdr, NULL,
2660 								err_code,
2661 								pool_id);
2662 					break;
2663 				case HAL_RXDMA_MULTICAST_ECHO:
2664 					if (txrx_peer)
2665 						DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
2666 									      rx.mec_drop, 1,
2667 									      qdf_nbuf_len(nbuf));
2668 					dp_rx_nbuf_free(nbuf);
2669 					break;
2670 				case HAL_RXDMA_UNAUTHORIZED_WDS:
2671 					pool_id = wbm_err_info.pool_id;
2672 					err_code = wbm_err_info.rxdma_err_code;
2673 					tlv_hdr = rx_tlv_hdr;
2674 					dp_rx_process_rxdma_err(soc, nbuf,
2675 								tlv_hdr,
2676 								txrx_peer,
2677 								err_code,
2678 								pool_id);
2679 					break;
2680 				default:
2681 					dp_rx_nbuf_free(nbuf);
2682 					dp_err_rl("RXDMA error %d",
2683 						  wbm_err_info.rxdma_err_code);
2684 				}
2685 			} else if (wbm_err_info.rxdma_psh_rsn
2686 					== HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) {
2687 				dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
2688 						    rx_tlv_hdr,
2689 						    HAL_RX_WBM_ERR_SRC_RXDMA);
2690 			} else if (wbm_err_info.rxdma_psh_rsn
2691 					== HAL_RX_WBM_RXDMA_PSH_RSN_FLUSH) {
2692 				dp_rx_err_err("rxdma push reason %u",
2693 						wbm_err_info.rxdma_psh_rsn);
2694 				DP_STATS_INC(soc, rx.err.rx_flush_count, 1);
2695 				dp_rx_nbuf_free(nbuf);
2696 			} else {
2697 				/* should not enter here */
2698 				dp_rx_err_alert("invalid rxdma push reason %u",
2699 						wbm_err_info.rxdma_psh_rsn);
2700 				dp_rx_nbuf_free(nbuf);
2701 				qdf_assert_always(0);
2702 			}
2703 		} else {
2704 			/* Should not come here */
2705 			qdf_assert(0);
2706 		}
2707 
2708 		if (txrx_peer)
2709 			dp_txrx_peer_unref_delete(txrx_ref_handle,
2710 						  DP_MOD_ID_RX_ERR);
2711 
2712 		nbuf = next;
2713 	}
2714 	return rx_bufs_used; /* Assume no scale factor for now */
2715 }
2716 
2717 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2718 
2719 /**
2720  * dup_desc_dbg() - dump and assert if duplicate rx desc found
2721  *
2722  * @soc: core DP main context
2723  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
2724  * @rx_desc: void pointer to rx descriptor
2725  *
2726  * Return: void
2727  */
2728 static void dup_desc_dbg(struct dp_soc *soc,
2729 			 hal_rxdma_desc_t rxdma_dst_ring_desc,
2730 			 void *rx_desc)
2731 {
2732 	DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
2733 	dp_rx_dump_info_and_assert(
2734 			soc,
2735 			soc->rx_rel_ring.hal_srng,
2736 			hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
2737 			rx_desc);
2738 }
2739 
2740 /**
2741  * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
2742  *
2743  * @soc: core DP main context
2744  * @mac_id: mac id which is one of 3 mac_ids
2745  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
2746  * @head: head of descs list to be freed
2747  * @tail: tail of decs list to be freed
2748 
2749  * Return: number of msdu in MPDU to be popped
2750  */
2751 static inline uint32_t
2752 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
2753 	hal_rxdma_desc_t rxdma_dst_ring_desc,
2754 	union dp_rx_desc_list_elem_t **head,
2755 	union dp_rx_desc_list_elem_t **tail)
2756 {
2757 	void *rx_msdu_link_desc;
2758 	qdf_nbuf_t msdu;
2759 	qdf_nbuf_t last;
2760 	struct hal_rx_msdu_list msdu_list;
2761 	uint16_t num_msdus;
2762 	struct hal_buf_info buf_info;
2763 	uint32_t rx_bufs_used = 0;
2764 	uint32_t msdu_cnt;
2765 	uint32_t i;
2766 	uint8_t push_reason;
2767 	uint8_t rxdma_error_code = 0;
2768 	uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
2769 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2770 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
2771 	hal_rxdma_desc_t ring_desc;
2772 	struct rx_desc_pool *rx_desc_pool;
2773 
2774 	if (!pdev) {
2775 		dp_rx_err_debug("%pK: pdev is null for mac_id = %d",
2776 				soc, mac_id);
2777 		return rx_bufs_used;
2778 	}
2779 
2780 	msdu = 0;
2781 
2782 	last = NULL;
2783 
2784 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
2785 				     &buf_info, &msdu_cnt);
2786 
2787 	push_reason =
2788 		hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
2789 	if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
2790 		rxdma_error_code =
2791 			hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
2792 	}
2793 
2794 	do {
2795 		rx_msdu_link_desc =
2796 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
2797 
2798 		qdf_assert_always(rx_msdu_link_desc);
2799 
2800 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
2801 				     &msdu_list, &num_msdus);
2802 
2803 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
2804 			/* if the msdus belongs to NSS offloaded radio &&
2805 			 * the rbm is not SW1_BM then return the msdu_link
2806 			 * descriptor without freeing the msdus (nbufs). let
2807 			 * these buffers be given to NSS completion ring for
2808 			 * NSS to free them.
2809 			 * else iterate through the msdu link desc list and
2810 			 * free each msdu in the list.
2811 			 */
2812 			if (msdu_list.rbm[0] !=
2813 				HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id) &&
2814 			    wlan_cfg_get_dp_pdev_nss_enabled(
2815 							pdev->wlan_cfg_ctx))
2816 				bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
2817 			else {
2818 				for (i = 0; i < num_msdus; i++) {
2819 					struct dp_rx_desc *rx_desc =
2820 						soc->arch_ops.
2821 						dp_rx_desc_cookie_2_va(
2822 							soc,
2823 							msdu_list.sw_cookie[i]);
2824 					qdf_assert_always(rx_desc);
2825 					msdu = rx_desc->nbuf;
2826 					/*
2827 					 * this is a unlikely scenario
2828 					 * where the host is reaping
2829 					 * a descriptor which
2830 					 * it already reaped just a while ago
2831 					 * but is yet to replenish
2832 					 * it back to HW.
2833 					 * In this case host will dump
2834 					 * the last 128 descriptors
2835 					 * including the software descriptor
2836 					 * rx_desc and assert.
2837 					 */
2838 					ring_desc = rxdma_dst_ring_desc;
2839 					if (qdf_unlikely(!rx_desc->in_use)) {
2840 						dup_desc_dbg(soc,
2841 							     ring_desc,
2842 							     rx_desc);
2843 						continue;
2844 					}
2845 
2846 					if (rx_desc->unmapped == 0) {
2847 						rx_desc_pool =
2848 							&soc->rx_desc_buf[rx_desc->pool_id];
2849 						dp_ipa_rx_buf_smmu_mapping_lock(soc);
2850 						dp_rx_nbuf_unmap_pool(soc,
2851 								      rx_desc_pool,
2852 								      msdu);
2853 						rx_desc->unmapped = 1;
2854 						dp_ipa_rx_buf_smmu_mapping_unlock(soc);
2855 					}
2856 
2857 					dp_rx_err_debug("%pK: msdu_nbuf=%pK ",
2858 							soc, msdu);
2859 
2860 					dp_rx_buffer_pool_nbuf_free(soc, msdu,
2861 							rx_desc->pool_id);
2862 					rx_bufs_used++;
2863 					dp_rx_add_to_free_desc_list(head,
2864 						tail, rx_desc);
2865 				}
2866 			}
2867 		} else {
2868 			rxdma_error_code = HAL_RXDMA_ERR_WAR;
2869 		}
2870 
2871 		/*
2872 		 * Store the current link buffer into to the local structure
2873 		 * to be used for release purpose.
2874 		 */
2875 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
2876 					     buf_info.paddr, buf_info.sw_cookie,
2877 					     buf_info.rbm);
2878 
2879 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
2880 					      &buf_info);
2881 		dp_rx_link_desc_return_by_addr(soc,
2882 					       (hal_buff_addrinfo_t)
2883 						rx_link_buf_info,
2884 						bm_action);
2885 	} while (buf_info.paddr);
2886 
2887 	DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
2888 	if (pdev)
2889 		DP_STATS_INC(pdev, err.rxdma_error, 1);
2890 
2891 	if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
2892 		dp_rx_err_err("%pK: Packet received with Decrypt error", soc);
2893 	}
2894 
2895 	return rx_bufs_used;
2896 }
2897 
2898 uint32_t
2899 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2900 		     uint32_t mac_id, uint32_t quota)
2901 {
2902 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2903 	hal_rxdma_desc_t rxdma_dst_ring_desc;
2904 	hal_soc_handle_t hal_soc;
2905 	void *err_dst_srng;
2906 	union dp_rx_desc_list_elem_t *head = NULL;
2907 	union dp_rx_desc_list_elem_t *tail = NULL;
2908 	struct dp_srng *dp_rxdma_srng;
2909 	struct rx_desc_pool *rx_desc_pool;
2910 	uint32_t work_done = 0;
2911 	uint32_t rx_bufs_used = 0;
2912 
2913 	if (!pdev)
2914 		return 0;
2915 
2916 	err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng;
2917 
2918 	if (!err_dst_srng) {
2919 		dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
2920 			      soc, err_dst_srng);
2921 		return 0;
2922 	}
2923 
2924 	hal_soc = soc->hal_soc;
2925 
2926 	qdf_assert(hal_soc);
2927 
2928 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
2929 		dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
2930 			      soc, err_dst_srng);
2931 		return 0;
2932 	}
2933 
2934 	while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
2935 		hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
2936 
2937 			rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
2938 						rxdma_dst_ring_desc,
2939 						&head, &tail);
2940 	}
2941 
2942 	dp_srng_access_end(int_ctx, soc, err_dst_srng);
2943 
2944 	if (rx_bufs_used) {
2945 		if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2946 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2947 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
2948 		} else {
2949 			dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id];
2950 			rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
2951 		}
2952 
2953 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2954 			rx_desc_pool, rx_bufs_used, &head, &tail, false);
2955 
2956 		work_done += rx_bufs_used;
2957 	}
2958 
2959 	return work_done;
2960 }
2961 
2962 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2963 
2964 static inline void
2965 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
2966 			hal_rxdma_desc_t rxdma_dst_ring_desc,
2967 			union dp_rx_desc_list_elem_t **head,
2968 			union dp_rx_desc_list_elem_t **tail,
2969 			uint32_t *rx_bufs_used)
2970 {
2971 	void *rx_msdu_link_desc;
2972 	qdf_nbuf_t msdu;
2973 	qdf_nbuf_t last;
2974 	struct hal_rx_msdu_list msdu_list;
2975 	uint16_t num_msdus;
2976 	struct hal_buf_info buf_info;
2977 	uint32_t msdu_cnt, i;
2978 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
2979 	struct rx_desc_pool *rx_desc_pool;
2980 	struct dp_rx_desc *rx_desc;
2981 
2982 	msdu = 0;
2983 
2984 	last = NULL;
2985 
2986 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
2987 				     &buf_info, &msdu_cnt);
2988 
2989 	do {
2990 		rx_msdu_link_desc =
2991 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
2992 
2993 		if (!rx_msdu_link_desc) {
2994 			DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
2995 			break;
2996 		}
2997 
2998 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
2999 				     &msdu_list, &num_msdus);
3000 
3001 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
3002 			for (i = 0; i < num_msdus; i++) {
3003 				if (!dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[i])) {
3004 					dp_rx_err_info_rl("Invalid MSDU info cookie: 0x%x",
3005 							  msdu_list.sw_cookie[i]);
3006 					continue;
3007 				}
3008 
3009 				rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
3010 							soc,
3011 							msdu_list.sw_cookie[i]);
3012 				qdf_assert_always(rx_desc);
3013 				rx_desc_pool =
3014 					&soc->rx_desc_buf[rx_desc->pool_id];
3015 				msdu = rx_desc->nbuf;
3016 
3017 				/*
3018 				 * this is a unlikely scenario where the host is reaping
3019 				 * a descriptor which it already reaped just a while ago
3020 				 * but is yet to replenish it back to HW.
3021 				 */
3022 				if (qdf_unlikely(!rx_desc->in_use) ||
3023 				    qdf_unlikely(!msdu)) {
3024 					dp_rx_err_info_rl("Reaping rx_desc not in use!");
3025 					continue;
3026 				}
3027 
3028 				dp_ipa_rx_buf_smmu_mapping_lock(soc);
3029 				dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, msdu);
3030 				rx_desc->unmapped = 1;
3031 				dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3032 
3033 				dp_rx_buffer_pool_nbuf_free(soc, msdu,
3034 							    rx_desc->pool_id);
3035 				rx_bufs_used[rx_desc->pool_id]++;
3036 				dp_rx_add_to_free_desc_list(head,
3037 							    tail, rx_desc);
3038 			}
3039 		}
3040 
3041 		/*
3042 		 * Store the current link buffer into to the local structure
3043 		 * to be used for release purpose.
3044 		 */
3045 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
3046 					     buf_info.paddr, buf_info.sw_cookie,
3047 					     buf_info.rbm);
3048 
3049 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
3050 					      &buf_info);
3051 		dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
3052 					rx_link_buf_info,
3053 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
3054 	} while (buf_info.paddr);
3055 }
3056 
3057 /*
3058  *
3059  * dp_handle_wbm_internal_error() - handles wbm_internal_error case
3060  *
3061  * @soc: core DP main context
3062  * @hal_desc: hal descriptor
3063  * @buf_type: indicates if the buffer is of type link disc or msdu
3064  * Return: None
3065  *
3066  * wbm_internal_error is seen in following scenarios :
3067  *
3068  * 1.  Null pointers detected in WBM_RELEASE_RING descriptors
3069  * 2.  Null pointers detected during delinking process
3070  *
3071  * Some null pointer cases:
3072  *
3073  * a. MSDU buffer pointer is NULL
3074  * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag
3075  * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL
3076  */
3077 void
3078 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
3079 			     uint32_t buf_type)
3080 {
3081 	struct hal_buf_info buf_info = {0};
3082 	struct dp_rx_desc *rx_desc = NULL;
3083 	struct rx_desc_pool *rx_desc_pool;
3084 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = {0};
3085 	union dp_rx_desc_list_elem_t *head = NULL;
3086 	union dp_rx_desc_list_elem_t *tail = NULL;
3087 	uint8_t pool_id;
3088 	uint8_t mac_id;
3089 
3090 	hal_rx_reo_buf_paddr_get(soc->hal_soc, hal_desc, &buf_info);
3091 
3092 	if (!buf_info.paddr) {
3093 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
3094 		return;
3095 	}
3096 
3097 	/* buffer_addr_info is the first element of ring_desc */
3098 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)hal_desc,
3099 				  &buf_info);
3100 
3101 	if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
3102 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
3103 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
3104 							soc,
3105 							buf_info.sw_cookie);
3106 
3107 		if (rx_desc && rx_desc->nbuf) {
3108 			rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
3109 			dp_ipa_rx_buf_smmu_mapping_lock(soc);
3110 			dp_rx_nbuf_unmap_pool(soc, rx_desc_pool,
3111 					      rx_desc->nbuf);
3112 			rx_desc->unmapped = 1;
3113 			dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3114 
3115 			dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
3116 						    rx_desc->pool_id);
3117 			dp_rx_add_to_free_desc_list(&head,
3118 						    &tail,
3119 						    rx_desc);
3120 
3121 			rx_bufs_reaped[rx_desc->pool_id]++;
3122 		}
3123 	} else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
3124 		pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(buf_info.sw_cookie);
3125 
3126 		dp_wbm_int_err_mpdu_pop(soc, pool_id, hal_desc,
3127 					&head, &tail, rx_bufs_reaped);
3128 	}
3129 
3130 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
3131 		struct rx_desc_pool *rx_desc_pool;
3132 		struct dp_srng *dp_rxdma_srng;
3133 
3134 		if (!rx_bufs_reaped[mac_id])
3135 			continue;
3136 
3137 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
3138 		dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
3139 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
3140 
3141 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
3142 					rx_desc_pool,
3143 					rx_bufs_reaped[mac_id],
3144 					&head, &tail, false);
3145 	}
3146 }
3147 
3148 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
3149