xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_err.c (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #include "dp_types.h"
22 #include "dp_rx.h"
23 #include "dp_tx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 #include "hal_api.h"
27 #include "qdf_trace.h"
28 #include "qdf_nbuf.h"
29 #include "dp_rx_defrag.h"
30 #include "dp_ipa.h"
31 #ifdef WIFI_MONITOR_SUPPORT
32 #include "dp_htt.h"
33 #include <dp_mon.h>
34 #endif
35 #ifdef FEATURE_WDS
36 #include "dp_txrx_wds.h"
37 #endif
38 #include <enet.h>	/* LLC_SNAP_HDR_LEN */
39 #include "qdf_net_types.h"
40 #include "dp_rx_buffer_pool.h"
41 
42 #define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params)
43 #define dp_rx_err_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX_ERROR, params)
44 #define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params)
45 #define dp_rx_err_info(params...) \
46 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
47 #define dp_rx_err_info_rl(params...) \
48 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
49 #define dp_rx_err_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_ERROR, params)
50 
51 #ifndef QCA_HOST_MODE_WIFI_DISABLED
52 
53 
54 /* Max regular Rx packet routing error */
55 #define DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD 20
56 #define DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT 10
57 #define DP_RX_ERR_ROUTE_TIMEOUT_US (5 * 1000 * 1000) /* micro seconds */
58 
59 #ifdef FEATURE_MEC
60 bool dp_rx_mcast_echo_check(struct dp_soc *soc,
61 			    struct dp_txrx_peer *txrx_peer,
62 			    uint8_t *rx_tlv_hdr,
63 			    qdf_nbuf_t nbuf)
64 {
65 	struct dp_vdev *vdev = txrx_peer->vdev;
66 	struct dp_pdev *pdev = vdev->pdev;
67 	struct dp_mec_entry *mecentry = NULL;
68 	struct dp_ast_entry *ase = NULL;
69 	uint16_t sa_idx = 0;
70 	uint8_t *data;
71 	/*
72 	 * Multicast Echo Check is required only if vdev is STA and
73 	 * received pkt is a multicast/broadcast pkt. otherwise
74 	 * skip the MEC check.
75 	 */
76 	if (vdev->opmode != wlan_op_mode_sta)
77 		return false;
78 	if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
79 		return false;
80 
81 	data = qdf_nbuf_data(nbuf);
82 
83 	/*
84 	 * if the received pkts src mac addr matches with vdev
85 	 * mac address then drop the pkt as it is looped back
86 	 */
87 	if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
88 			  vdev->mac_addr.raw,
89 			  QDF_MAC_ADDR_SIZE)))
90 		return true;
91 
92 	/*
93 	 * In case of qwrap isolation mode, donot drop loopback packets.
94 	 * In isolation mode, all packets from the wired stations need to go
95 	 * to rootap and loop back to reach the wireless stations and
96 	 * vice-versa.
97 	 */
98 	if (qdf_unlikely(vdev->isolation_vdev))
99 		return false;
100 
101 	/*
102 	 * if the received pkts src mac addr matches with the
103 	 * wired PCs MAC addr which is behind the STA or with
104 	 * wireless STAs MAC addr which are behind the Repeater,
105 	 * then drop the pkt as it is looped back
106 	 */
107 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
108 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
109 
110 		if ((sa_idx < 0) ||
111 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
112 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
113 				  "invalid sa_idx: %d", sa_idx);
114 			qdf_assert_always(0);
115 		}
116 
117 		qdf_spin_lock_bh(&soc->ast_lock);
118 		ase = soc->ast_table[sa_idx];
119 
120 		/*
121 		 * this check was not needed since MEC is not dependent on AST,
122 		 * but if we dont have this check SON has some issues in
123 		 * dual backhaul scenario. in APS SON mode, client connected
124 		 * to RE 2G and sends multicast packets. the RE sends it to CAP
125 		 * over 5G backhaul. the CAP loopback it on 2G to RE.
126 		 * On receiving in 2G STA vap, we assume that client has roamed
127 		 * and kickout the client.
128 		 */
129 		if (ase && (ase->peer_id != txrx_peer->peer_id)) {
130 			qdf_spin_unlock_bh(&soc->ast_lock);
131 			goto drop;
132 		}
133 
134 		qdf_spin_unlock_bh(&soc->ast_lock);
135 	}
136 
137 	qdf_spin_lock_bh(&soc->mec_lock);
138 
139 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
140 						   &data[QDF_MAC_ADDR_SIZE]);
141 	if (!mecentry) {
142 		qdf_spin_unlock_bh(&soc->mec_lock);
143 		return false;
144 	}
145 
146 	qdf_spin_unlock_bh(&soc->mec_lock);
147 
148 drop:
149 	dp_rx_err_info("%pK: received pkt with same src mac " QDF_MAC_ADDR_FMT,
150 		       soc, QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]));
151 
152 	return true;
153 }
154 #endif
155 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
156 
157 void dp_rx_link_desc_refill_duplicate_check(
158 				struct dp_soc *soc,
159 				struct hal_buf_info *buf_info,
160 				hal_buff_addrinfo_t ring_buf_info)
161 {
162 	struct hal_buf_info current_link_desc_buf_info = { 0 };
163 
164 	/* do duplicate link desc address check */
165 	hal_rx_buffer_addr_info_get_paddr(ring_buf_info,
166 					  &current_link_desc_buf_info);
167 
168 	/*
169 	 * TODO - Check if the hal soc api call can be removed
170 	 * since the cookie is just used for print.
171 	 * buffer_addr_info is the first element of ring_desc
172 	 */
173 	hal_rx_buf_cookie_rbm_get(soc->hal_soc,
174 				  (uint32_t *)ring_buf_info,
175 				  &current_link_desc_buf_info);
176 
177 	if (qdf_unlikely(current_link_desc_buf_info.paddr ==
178 			 buf_info->paddr)) {
179 		dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x",
180 			   current_link_desc_buf_info.paddr,
181 			   current_link_desc_buf_info.sw_cookie);
182 		DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1);
183 	}
184 	*buf_info = current_link_desc_buf_info;
185 }
186 
187 /**
188  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
189  *					(WBM) by address
190  *
191  * @soc: core DP main context
192  * @link_desc_addr: link descriptor addr
193  *
194  * Return: QDF_STATUS
195  */
196 QDF_STATUS
197 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
198 			       hal_buff_addrinfo_t link_desc_addr,
199 			       uint8_t bm_action)
200 {
201 	struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
202 	hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
203 	hal_soc_handle_t hal_soc = soc->hal_soc;
204 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
205 	void *src_srng_desc;
206 
207 	if (!wbm_rel_srng) {
208 		dp_rx_err_err("%pK: WBM RELEASE RING not initialized", soc);
209 		return status;
210 	}
211 
212 	/* do duplicate link desc address check */
213 	dp_rx_link_desc_refill_duplicate_check(
214 				soc,
215 				&soc->last_op_info.wbm_rel_link_desc,
216 				link_desc_addr);
217 
218 	if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
219 
220 		/* TODO */
221 		/*
222 		 * Need API to convert from hal_ring pointer to
223 		 * Ring Type / Ring Id combo
224 		 */
225 		dp_rx_err_err("%pK: HAL RING Access For WBM Release SRNG Failed - %pK",
226 			      soc, wbm_rel_srng);
227 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
228 		goto done;
229 	}
230 	src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
231 	if (qdf_likely(src_srng_desc)) {
232 		/* Return link descriptor through WBM ring (SW2WBM)*/
233 		hal_rx_msdu_link_desc_set(hal_soc,
234 				src_srng_desc, link_desc_addr, bm_action);
235 		status = QDF_STATUS_SUCCESS;
236 	} else {
237 		struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
238 
239 		DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1);
240 
241 		dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)",
242 			   srng->ring_id,
243 			   soc->stats.rx.err.hal_ring_access_full_fail);
244 		dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
245 			   *srng->u.src_ring.hp_addr,
246 			   srng->u.src_ring.reap_hp,
247 			   *srng->u.src_ring.tp_addr,
248 			   srng->u.src_ring.cached_tp);
249 		QDF_BUG(0);
250 	}
251 done:
252 	hal_srng_access_end(hal_soc, wbm_rel_srng);
253 	return status;
254 
255 }
256 
257 qdf_export_symbol(dp_rx_link_desc_return_by_addr);
258 
259 /**
260  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
261  *				(WBM), following error handling
262  *
263  * @soc: core DP main context
264  * @ring_desc: opaque pointer to the REO error ring descriptor
265  *
266  * Return: QDF_STATUS
267  */
268 QDF_STATUS
269 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
270 		       uint8_t bm_action)
271 {
272 	void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
273 
274 	return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
275 }
276 
277 #ifndef QCA_HOST_MODE_WIFI_DISABLED
278 
279 /**
280  * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
281  *
282  * @soc: core txrx main context
283  * @ring_desc: opaque pointer to the REO error ring descriptor
284  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
285  * @head: head of the local descriptor free-list
286  * @tail: tail of the local descriptor free-list
287  * @quota: No. of units (packets) that can be serviced in one shot.
288  *
289  * This function is used to drop all MSDU in an MPDU
290  *
291  * Return: uint32_t: No. of elements processed
292  */
293 static uint32_t
294 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
295 		 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
296 		 uint8_t *mac_id,
297 		 uint32_t quota)
298 {
299 	uint32_t rx_bufs_used = 0;
300 	void *link_desc_va;
301 	struct hal_buf_info buf_info;
302 	struct dp_pdev *pdev;
303 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
304 	int i;
305 	uint8_t *rx_tlv_hdr;
306 	uint32_t tid;
307 	struct rx_desc_pool *rx_desc_pool;
308 	struct dp_rx_desc *rx_desc;
309 	/* First field in REO Dst ring Desc is buffer_addr_info */
310 	void *buf_addr_info = ring_desc;
311 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
312 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
313 
314 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &buf_info);
315 
316 	/* buffer_addr_info is the first element of ring_desc */
317 	hal_rx_buf_cookie_rbm_get(soc->hal_soc,
318 				  (uint32_t *)ring_desc,
319 				  &buf_info);
320 
321 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
322 	if (!link_desc_va) {
323 		dp_rx_err_debug("link desc va is null, soc %pk", soc);
324 		return rx_bufs_used;
325 	}
326 
327 more_msdu_link_desc:
328 	/* No UNMAP required -- this is "malloc_consistent" memory */
329 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
330 			     &mpdu_desc_info->msdu_count);
331 
332 	for (i = 0; (i < mpdu_desc_info->msdu_count); i++) {
333 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
334 						soc, msdu_list.sw_cookie[i]);
335 
336 		qdf_assert_always(rx_desc);
337 
338 		/* all buffers from a MSDU link link belong to same pdev */
339 		*mac_id = rx_desc->pool_id;
340 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
341 		if (!pdev) {
342 			dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
343 					soc, rx_desc->pool_id);
344 			return rx_bufs_used;
345 		}
346 
347 		if (!dp_rx_desc_check_magic(rx_desc)) {
348 			dp_rx_err_err("%pK: Invalid rx_desc cookie=%d",
349 				      soc, msdu_list.sw_cookie[i]);
350 			return rx_bufs_used;
351 		}
352 
353 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
354 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
355 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
356 		rx_desc->unmapped = 1;
357 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
358 
359 		rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
360 
361 		rx_bufs_used++;
362 		tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
363 						rx_desc->rx_buf_start);
364 		dp_rx_err_err("%pK: Packet received with PN error for tid :%d",
365 			      soc, tid);
366 
367 		rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
368 		if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
369 			hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
370 
371 		dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info,
372 				      rx_desc->nbuf,
373 				      QDF_TX_RX_STATUS_DROP, true);
374 		/* Just free the buffers */
375 		dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id);
376 
377 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
378 					    &pdev->free_list_tail, rx_desc);
379 	}
380 
381 	/*
382 	 * If the msdu's are spread across multiple link-descriptors,
383 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
384 	 * spread across multiple buffers).Hence, it is
385 	 * necessary to check the next link_descriptor and release
386 	 * all the msdu's that are part of it.
387 	 */
388 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
389 			link_desc_va,
390 			&next_link_desc_addr_info);
391 
392 	if (hal_rx_is_buf_addr_info_valid(
393 				&next_link_desc_addr_info)) {
394 		/* Clear the next link desc info for the current link_desc */
395 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
396 
397 		dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
398 					       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
399 		hal_rx_buffer_addr_info_get_paddr(
400 				&next_link_desc_addr_info,
401 				&buf_info);
402 		/* buffer_addr_info is the first element of ring_desc */
403 		hal_rx_buf_cookie_rbm_get(soc->hal_soc,
404 					  (uint32_t *)&next_link_desc_addr_info,
405 					  &buf_info);
406 		cur_link_desc_addr_info = next_link_desc_addr_info;
407 		buf_addr_info = &cur_link_desc_addr_info;
408 
409 		link_desc_va =
410 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
411 
412 		goto more_msdu_link_desc;
413 	}
414 	quota--;
415 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
416 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
417 	return rx_bufs_used;
418 }
419 
420 /**
421  * dp_rx_pn_error_handle() - Handles PN check errors
422  *
423  * @soc: core txrx main context
424  * @ring_desc: opaque pointer to the REO error ring descriptor
425  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
426  * @head: head of the local descriptor free-list
427  * @tail: tail of the local descriptor free-list
428  * @quota: No. of units (packets) that can be serviced in one shot.
429  *
430  * This function implements PN error handling
431  * If the peer is configured to ignore the PN check errors
432  * or if DP feels, that this frame is still OK, the frame can be
433  * re-injected back to REO to use some of the other features
434  * of REO e.g. duplicate detection/routing to other cores
435  *
436  * Return: uint32_t: No. of elements processed
437  */
438 static uint32_t
439 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
440 		      struct hal_rx_mpdu_desc_info *mpdu_desc_info,
441 		      uint8_t *mac_id,
442 		      uint32_t quota)
443 {
444 	uint16_t peer_id;
445 	uint32_t rx_bufs_used = 0;
446 	struct dp_txrx_peer *txrx_peer;
447 	bool peer_pn_policy = false;
448 	dp_txrx_ref_handle txrx_ref_handle = NULL;
449 
450 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
451 					       mpdu_desc_info->peer_meta_data);
452 
453 
454 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
455 						   &txrx_ref_handle,
456 						   DP_MOD_ID_RX_ERR);
457 
458 	if (qdf_likely(txrx_peer)) {
459 		/*
460 		 * TODO: Check for peer specific policies & set peer_pn_policy
461 		 */
462 		dp_err_rl("discard rx due to PN error for peer  %pK",
463 			  txrx_peer);
464 
465 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
466 	}
467 	dp_rx_err_err("%pK: Packet received with PN error", soc);
468 
469 	/* No peer PN policy -- definitely drop */
470 	if (!peer_pn_policy)
471 		rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
472 						mpdu_desc_info,
473 						mac_id, quota);
474 
475 	return rx_bufs_used;
476 }
477 
478 #ifdef DP_RX_DELIVER_ALL_OOR_FRAMES
479 /**
480  * dp_rx_deliver_oor_frame() - deliver OOR frames to stack
481  * @soc: Datapath soc handler
482  * @peer: pointer to DP peer
483  * @nbuf: pointer to the skb of RX frame
484  * @frame_mask: the mask for special frame needed
485  * @rx_tlv_hdr: start of rx tlv header
486  *
487  * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
488  * single nbuf is expected.
489  *
490  * return: true - nbuf has been delivered to stack, false - not.
491  */
492 static bool
493 dp_rx_deliver_oor_frame(struct dp_soc *soc,
494 			struct dp_txrx_peer *txrx_peer,
495 			qdf_nbuf_t nbuf, uint32_t frame_mask,
496 			uint8_t *rx_tlv_hdr)
497 {
498 	uint32_t l2_hdr_offset = 0;
499 	uint16_t msdu_len = 0;
500 	uint32_t skip_len;
501 
502 	l2_hdr_offset =
503 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
504 
505 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
506 		skip_len = l2_hdr_offset;
507 	} else {
508 		msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
509 		skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
510 		qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
511 	}
512 
513 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
514 	dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
515 	qdf_nbuf_pull_head(nbuf, skip_len);
516 	qdf_nbuf_set_exc_frame(nbuf, 1);
517 
518 	dp_info_rl("OOR frame, mpdu sn 0x%x",
519 		   hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
520 	dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, nbuf, NULL);
521 	return true;
522 }
523 
524 #else
525 static bool
526 dp_rx_deliver_oor_frame(struct dp_soc *soc,
527 			struct dp_txrx_peer *txrx_peer,
528 			qdf_nbuf_t nbuf, uint32_t frame_mask,
529 			uint8_t *rx_tlv_hdr)
530 {
531 	return dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask,
532 					   rx_tlv_hdr);
533 }
534 #endif
535 
536 /**
537  * dp_rx_oor_handle() - Handles the msdu which is OOR error
538  *
539  * @soc: core txrx main context
540  * @nbuf: pointer to msdu skb
541  * @peer_id: dp peer ID
542  * @rx_tlv_hdr: start of rx tlv header
543  *
544  * This function process the msdu delivered from REO2TCL
545  * ring with error type OOR
546  *
547  * Return: None
548  */
549 static void
550 dp_rx_oor_handle(struct dp_soc *soc,
551 		 qdf_nbuf_t nbuf,
552 		 uint16_t peer_id,
553 		 uint8_t *rx_tlv_hdr)
554 {
555 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
556 				FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
557 	struct dp_txrx_peer *txrx_peer = NULL;
558 	dp_txrx_ref_handle txrx_ref_handle = NULL;
559 
560 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
561 						   &txrx_ref_handle,
562 						   DP_MOD_ID_RX_ERR);
563 	if (!txrx_peer) {
564 		dp_info_rl("peer not found");
565 		goto free_nbuf;
566 	}
567 
568 	if (dp_rx_deliver_oor_frame(soc, txrx_peer, nbuf, frame_mask,
569 				    rx_tlv_hdr)) {
570 		DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1);
571 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
572 		return;
573 	}
574 
575 free_nbuf:
576 	if (txrx_peer)
577 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
578 
579 	DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1);
580 	dp_rx_nbuf_free(nbuf);
581 }
582 
583 /**
584  * dp_rx_err_nbuf_pn_check() - Check if the PN number of this current packet
585  *				is a monotonous increment of packet number
586  *				from the previous successfully re-ordered
587  *				frame.
588  * @soc: Datapath SOC handle
589  * @ring_desc: REO ring descriptor
590  * @nbuf: Current packet
591  *
592  * Return: QDF_STATUS_SUCCESS, if the pn check passes, else QDF_STATUS_E_FAILURE
593  */
594 static inline QDF_STATUS
595 dp_rx_err_nbuf_pn_check(struct dp_soc *soc, hal_ring_desc_t ring_desc,
596 			qdf_nbuf_t nbuf)
597 {
598 	uint64_t prev_pn, curr_pn[2];
599 
600 	if (!hal_rx_encryption_info_valid(soc->hal_soc, qdf_nbuf_data(nbuf)))
601 		return QDF_STATUS_SUCCESS;
602 
603 	hal_rx_reo_prev_pn_get(soc->hal_soc, ring_desc, &prev_pn);
604 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), curr_pn);
605 
606 	if (curr_pn[0] > prev_pn)
607 		return QDF_STATUS_SUCCESS;
608 
609 	return QDF_STATUS_E_FAILURE;
610 }
611 
612 #ifdef WLAN_SKIP_BAR_UPDATE
613 static
614 void dp_rx_err_handle_bar(struct dp_soc *soc,
615 			  struct dp_peer *peer,
616 			  qdf_nbuf_t nbuf)
617 {
618 	dp_info_rl("BAR update to H.W is skipped");
619 	DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
620 }
621 #else
622 static
623 void dp_rx_err_handle_bar(struct dp_soc *soc,
624 			  struct dp_peer *peer,
625 			  qdf_nbuf_t nbuf)
626 {
627 	uint8_t *rx_tlv_hdr;
628 	unsigned char type, subtype;
629 	uint16_t start_seq_num;
630 	uint32_t tid;
631 	QDF_STATUS status;
632 	struct ieee80211_frame_bar *bar;
633 
634 	/*
635 	 * 1. Is this a BAR frame. If not Discard it.
636 	 * 2. If it is, get the peer id, tid, ssn
637 	 * 2a Do a tid update
638 	 */
639 
640 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
641 	bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + soc->rx_pkt_tlv_size);
642 
643 	type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
644 	subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
645 
646 	if (!(type == IEEE80211_FC0_TYPE_CTL &&
647 	      subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
648 		dp_err_rl("Not a BAR frame!");
649 		return;
650 	}
651 
652 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
653 	qdf_assert_always(tid < DP_MAX_TIDS);
654 
655 	start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
656 
657 	dp_info_rl("tid %u window_size %u start_seq_num %u",
658 		   tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
659 
660 	status = dp_rx_tid_update_wifi3(peer, tid,
661 					peer->rx_tid[tid].ba_win_size,
662 					start_seq_num,
663 					true);
664 	if (status != QDF_STATUS_SUCCESS) {
665 		dp_err_rl("failed to handle bar frame update rx tid");
666 		DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
667 	} else {
668 		DP_STATS_INC(soc, rx.err.ssn_update_count, 1);
669 	}
670 }
671 #endif
672 
673 /**
674  * _dp_rx_bar_frame_handle(): Core of the BAR frame handling
675  * @soc: Datapath SoC handle
676  * @nbuf: packet being processed
677  * @mpdu_desc_info: mpdu desc info for the current packet
678  * @tid: tid on which the packet arrived
679  * @err_status: Flag to indicate if REO encountered an error while routing this
680  *		frame
681  * @error_code: REO error code
682  *
683  * Return: None
684  */
685 static void
686 _dp_rx_bar_frame_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
687 			struct hal_rx_mpdu_desc_info *mpdu_desc_info,
688 			uint32_t tid, uint8_t err_status, uint32_t error_code)
689 {
690 	uint16_t peer_id;
691 	struct dp_peer *peer;
692 
693 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
694 					       mpdu_desc_info->peer_meta_data);
695 	peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
696 	if (!peer)
697 		return;
698 
699 	dp_info("BAR frame: "
700 		" peer_id = %d"
701 		" tid = %u"
702 		" SSN = %d"
703 		" error status = %d",
704 		peer->peer_id,
705 		tid,
706 		mpdu_desc_info->mpdu_seq,
707 		err_status);
708 
709 	if (err_status == HAL_REO_ERROR_DETECTED) {
710 		switch (error_code) {
711 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
712 		case HAL_REO_ERR_BAR_FRAME_OOR:
713 			dp_rx_err_handle_bar(soc, peer, nbuf);
714 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
715 			break;
716 		default:
717 			DP_STATS_INC(soc, rx.bar_frame, 1);
718 		}
719 	}
720 
721 	dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
722 }
723 
724 /**
725  * dp_rx_bar_frame_handle() - Function to handle err BAR frames
726  * @soc: core DP main context
727  * @ring_desc: Hal ring desc
728  * @rx_desc: dp rx desc
729  * @mpdu_desc_info: mpdu desc info
730  *
731  * Handle the error BAR frames received. Ensure the SOC level
732  * stats are updated based on the REO error code. The BAR frames
733  * are further processed by updating the Rx tids with the start
734  * sequence number (SSN) and BA window size. Desc is returned
735  * to the free desc list
736  *
737  * Return: none
738  */
739 static void
740 dp_rx_bar_frame_handle(struct dp_soc *soc,
741 		       hal_ring_desc_t ring_desc,
742 		       struct dp_rx_desc *rx_desc,
743 		       struct hal_rx_mpdu_desc_info *mpdu_desc_info,
744 		       uint8_t err_status,
745 		       uint32_t err_code)
746 {
747 	qdf_nbuf_t nbuf;
748 	struct dp_pdev *pdev;
749 	struct rx_desc_pool *rx_desc_pool;
750 	uint8_t *rx_tlv_hdr;
751 	uint32_t tid;
752 
753 	nbuf = rx_desc->nbuf;
754 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
755 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
756 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
757 	rx_desc->unmapped = 1;
758 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
759 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
760 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
761 					rx_tlv_hdr);
762 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
763 
764 	_dp_rx_bar_frame_handle(soc, nbuf, mpdu_desc_info, tid, err_status,
765 				err_code);
766 	dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, nbuf,
767 			      QDF_TX_RX_STATUS_DROP, true);
768 	dp_rx_link_desc_return(soc, ring_desc,
769 			       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
770 	dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
771 				    rx_desc->pool_id);
772 	dp_rx_add_to_free_desc_list(&pdev->free_list_head,
773 				    &pdev->free_list_tail,
774 				    rx_desc);
775 }
776 
777 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
778 
779 /**
780  * dp_2k_jump_handle() - Function to handle 2k jump exception
781  *                        on WBM ring
782  *
783  * @soc: core DP main context
784  * @nbuf: buffer pointer
785  * @rx_tlv_hdr: start of rx tlv header
786  * @peer_id: peer id of first msdu
787  * @tid: Tid for which exception occurred
788  *
789  * This function handles 2k jump violations arising out
790  * of receiving aggregates in non BA case. This typically
791  * may happen if aggregates are received on a QOS enabled TID
792  * while Rx window size is still initialized to value of 2. Or
793  * it may also happen if negotiated window size is 1 but peer
794  * sends aggregates.
795  *
796  */
797 
798 void
799 dp_2k_jump_handle(struct dp_soc *soc,
800 		  qdf_nbuf_t nbuf,
801 		  uint8_t *rx_tlv_hdr,
802 		  uint16_t peer_id,
803 		  uint8_t tid)
804 {
805 	struct dp_peer *peer = NULL;
806 	struct dp_rx_tid *rx_tid = NULL;
807 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP;
808 
809 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
810 	if (!peer) {
811 		dp_rx_err_info_rl("%pK: peer not found", soc);
812 		goto free_nbuf;
813 	}
814 
815 	if (tid >= DP_MAX_TIDS) {
816 		dp_info_rl("invalid tid");
817 		goto nbuf_deliver;
818 	}
819 
820 	rx_tid = &peer->rx_tid[tid];
821 	qdf_spin_lock_bh(&rx_tid->tid_lock);
822 
823 	/* only if BA session is active, allow send Delba */
824 	if (rx_tid->ba_status != DP_RX_BA_ACTIVE) {
825 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
826 		goto nbuf_deliver;
827 	}
828 
829 	if (!rx_tid->delba_tx_status) {
830 		rx_tid->delba_tx_retry++;
831 		rx_tid->delba_tx_status = 1;
832 		rx_tid->delba_rcode =
833 			IEEE80211_REASON_QOS_SETUP_REQUIRED;
834 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
835 		if (soc->cdp_soc.ol_ops->send_delba) {
836 			DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent,
837 				     1);
838 			soc->cdp_soc.ol_ops->send_delba(
839 					peer->vdev->pdev->soc->ctrl_psoc,
840 					peer->vdev->vdev_id,
841 					peer->mac_addr.raw,
842 					tid,
843 					rx_tid->delba_rcode,
844 					CDP_DELBA_2K_JUMP);
845 		}
846 	} else {
847 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
848 	}
849 
850 nbuf_deliver:
851 	if (dp_rx_deliver_special_frame(soc, peer->txrx_peer, nbuf, frame_mask,
852 					rx_tlv_hdr)) {
853 		DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1);
854 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
855 		return;
856 	}
857 
858 free_nbuf:
859 	if (peer)
860 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
861 	DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1);
862 	dp_rx_nbuf_free(nbuf);
863 }
864 
865 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
866     defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_KIWI)
867 /**
868  * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
869  * @soc: pointer to dp_soc struct
870  * @pool_id: Pool id to find dp_pdev
871  * @rx_tlv_hdr: TLV header of received packet
872  * @nbuf: SKB
873  *
874  * In certain types of packets if peer_id is not correct then
875  * driver may not be able find. Try finding peer by addr_2 of
876  * received MPDU. If you find the peer then most likely sw_peer_id &
877  * ast_idx is corrupted.
878  *
879  * Return: True if you find the peer by addr_2 of received MPDU else false
880  */
881 static bool
882 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
883 					      uint8_t pool_id,
884 					      uint8_t *rx_tlv_hdr,
885 					      qdf_nbuf_t nbuf)
886 {
887 	struct dp_peer *peer = NULL;
888 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
889 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
890 	struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
891 
892 	if (!pdev) {
893 		dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
894 				soc, pool_id);
895 		return false;
896 	}
897 	/*
898 	 * WAR- In certain types of packets if peer_id is not correct then
899 	 * driver may not be able find. Try finding peer by addr_2 of
900 	 * received MPDU
901 	 */
902 	if (wh)
903 		peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0,
904 					      DP_VDEV_ALL, DP_MOD_ID_RX_ERR);
905 	if (peer) {
906 		dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
907 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
908 				     QDF_TRACE_LEVEL_DEBUG);
909 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
910 				 1, qdf_nbuf_len(nbuf));
911 		dp_rx_nbuf_free(nbuf);
912 
913 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
914 		return true;
915 	}
916 	return false;
917 }
918 #else
919 static inline bool
920 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
921 					      uint8_t pool_id,
922 					      uint8_t *rx_tlv_hdr,
923 					      qdf_nbuf_t nbuf)
924 {
925 	return false;
926 }
927 #endif
928 
929 /**
930  * dp_rx_check_pkt_len() - Check for pktlen validity
931  * @soc: DP SOC context
932  * @pkt_len: computed length of the pkt from caller in bytes
933  *
934  * Return: true if pktlen > RX_BUFFER_SIZE, else return false
935  *
936  */
937 static inline
938 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
939 {
940 	if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) {
941 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
942 				 1, pkt_len);
943 		return true;
944 	} else {
945 		return false;
946 	}
947 }
948 
949 /*
950  * dp_rx_deliver_to_osif_stack() - function to deliver rx pkts to stack
951  * @soc: DP soc
952  * @vdv: DP vdev handle
953  * @txrx_peer: pointer to the txrx_peer object
954  * @nbuf: skb list head
955  * @tail: skb list tail
956  * @is_eapol: eapol pkt check
957  *
958  * Return: None
959  */
960 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
961 static inline void
962 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
963 			    struct dp_vdev *vdev,
964 			    struct dp_txrx_peer *txrx_peer,
965 			    qdf_nbuf_t nbuf,
966 			    qdf_nbuf_t tail,
967 			    bool is_eapol)
968 {
969 	if (is_eapol && soc->eapol_over_control_port)
970 		dp_rx_eapol_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
971 	else
972 		dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
973 }
974 #else
975 static inline void
976 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
977 			    struct dp_vdev *vdev,
978 			    struct dp_txrx_peer *txrx_peer,
979 			    qdf_nbuf_t nbuf,
980 			    qdf_nbuf_t tail,
981 			    bool is_eapol)
982 {
983 	dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
984 }
985 #endif
986 
987 #ifdef WLAN_FEATURE_11BE_MLO
988 /*
989  * dp_rx_err_match_dhost() - function to check whether dest-mac is correct
990  * @eh: Ethernet header of incoming packet
991  * @vdev: dp_vdev object of the VAP on which this data packet is received
992  *
993  * Return: 1 if the destination mac is correct,
994  *         0 if this frame is not correctly destined to this VAP/MLD
995  */
996 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
997 {
998 	return ((qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
999 			     QDF_MAC_ADDR_SIZE) == 0) ||
1000 		(qdf_mem_cmp(eh->ether_dhost, &vdev->mld_mac_addr.raw[0],
1001 			     QDF_MAC_ADDR_SIZE) == 0));
1002 }
1003 
1004 #else
1005 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
1006 {
1007 	return (qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
1008 			    QDF_MAC_ADDR_SIZE) == 0);
1009 }
1010 #endif
1011 
1012 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1013 
1014 /**
1015  * dp_rx_err_drop_3addr_mcast() - Check if feature drop_3ddr_mcast is enabled
1016  *				  If so, drop the multicast frame.
1017  * @vdev: datapath vdev
1018  * @rx_tlv_hdr: TLV header
1019  *
1020  * Return: true if packet is to be dropped,
1021  *	false, if packet is not dropped.
1022  */
1023 static bool
1024 dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr)
1025 {
1026 	struct dp_soc *soc = vdev->pdev->soc;
1027 
1028 	if (!vdev->drop_3addr_mcast)
1029 		return false;
1030 
1031 	if (vdev->opmode != wlan_op_mode_sta)
1032 		return false;
1033 
1034 	if (hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
1035 		return true;
1036 
1037 	return false;
1038 }
1039 
1040 /**
1041  * dp_rx_err_is_pn_check_needed() - Check if the packet number check is needed
1042  *				for this frame received in REO error ring.
1043  * @soc: Datapath SOC handle
1044  * @error: REO error detected or not
1045  * @error_code: Error code in case of REO error
1046  *
1047  * Return: true if pn check if needed in software,
1048  *	false, if pn check if not needed.
1049  */
1050 static inline bool
1051 dp_rx_err_is_pn_check_needed(struct dp_soc *soc, uint8_t error,
1052 			     uint32_t error_code)
1053 {
1054 	return (soc->features.pn_in_reo_dest &&
1055 		(error == HAL_REO_ERROR_DETECTED &&
1056 		 (hal_rx_reo_is_2k_jump(error_code) ||
1057 		  hal_rx_reo_is_oor_error(error_code) ||
1058 		  hal_rx_reo_is_bar_oor_2k_jump(error_code))));
1059 }
1060 
1061 /**
1062  * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
1063  *                              descriptor violation on either a
1064  *                              REO or WBM ring
1065  *
1066  * @soc: core DP main context
1067  * @nbuf: buffer pointer
1068  * @rx_tlv_hdr: start of rx tlv header
1069  * @pool_id: mac id
1070  * @txrx_peer: txrx peer handle
1071  *
1072  * This function handles NULL queue descriptor violations arising out
1073  * a missing REO queue for a given peer or a given TID. This typically
1074  * may happen if a packet is received on a QOS enabled TID before the
1075  * ADDBA negotiation for that TID, when the TID queue is setup. Or
1076  * it may also happen for MC/BC frames if they are not routed to the
1077  * non-QOS TID queue, in the absence of any other default TID queue.
1078  * This error can show up both in a REO destination or WBM release ring.
1079  *
1080  * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
1081  *         if nbuf could not be handled or dropped.
1082  */
1083 static QDF_STATUS
1084 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
1085 			 uint8_t *rx_tlv_hdr, uint8_t pool_id,
1086 			 struct dp_txrx_peer *txrx_peer)
1087 {
1088 	uint32_t pkt_len;
1089 	uint16_t msdu_len;
1090 	struct dp_vdev *vdev;
1091 	uint8_t tid;
1092 	qdf_ether_header_t *eh;
1093 	struct hal_rx_msdu_metadata msdu_metadata;
1094 	uint16_t sa_idx = 0;
1095 	bool is_eapol = 0;
1096 	bool enh_flag;
1097 
1098 	qdf_nbuf_set_rx_chfrag_start(nbuf,
1099 				hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1100 							       rx_tlv_hdr));
1101 	qdf_nbuf_set_rx_chfrag_end(nbuf,
1102 				   hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
1103 								 rx_tlv_hdr));
1104 	qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1105 								  rx_tlv_hdr));
1106 	qdf_nbuf_set_da_valid(nbuf,
1107 			      hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
1108 							      rx_tlv_hdr));
1109 	qdf_nbuf_set_sa_valid(nbuf,
1110 			      hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
1111 							      rx_tlv_hdr));
1112 
1113 	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
1114 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1115 	pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
1116 
1117 	if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
1118 		if (dp_rx_check_pkt_len(soc, pkt_len))
1119 			goto drop_nbuf;
1120 
1121 		/* Set length in nbuf */
1122 		qdf_nbuf_set_pktlen(
1123 			nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
1124 		qdf_assert_always(nbuf->data == rx_tlv_hdr);
1125 	}
1126 
1127 	/*
1128 	 * Check if DMA completed -- msdu_done is the last bit
1129 	 * to be written
1130 	 */
1131 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1132 
1133 		dp_err_rl("MSDU DONE failure");
1134 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1135 				     QDF_TRACE_LEVEL_INFO);
1136 		qdf_assert(0);
1137 	}
1138 
1139 	if (!txrx_peer &&
1140 	    dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
1141 							  rx_tlv_hdr, nbuf))
1142 		return QDF_STATUS_E_FAILURE;
1143 
1144 	if (!txrx_peer) {
1145 		bool mpdu_done = false;
1146 		struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
1147 
1148 		if (!pdev) {
1149 			dp_err_rl("pdev is null for pool_id = %d", pool_id);
1150 			return QDF_STATUS_E_FAILURE;
1151 		}
1152 
1153 		dp_err_rl("txrx_peer is NULL");
1154 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1155 				 qdf_nbuf_len(nbuf));
1156 
1157 		/* QCN9000 has the support enabled */
1158 		if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) {
1159 			mpdu_done = true;
1160 			nbuf->next = NULL;
1161 			/* Trigger invalid peer handler wrapper */
1162 			dp_rx_process_invalid_peer_wrapper(soc,
1163 					nbuf, mpdu_done, pool_id);
1164 		} else {
1165 			mpdu_done = soc->arch_ops.dp_rx_chain_msdus(soc, nbuf,
1166 								    rx_tlv_hdr,
1167 								    pool_id);
1168 			/* Trigger invalid peer handler wrapper */
1169 			dp_rx_process_invalid_peer_wrapper(soc,
1170 					pdev->invalid_peer_head_msdu,
1171 					mpdu_done, pool_id);
1172 		}
1173 
1174 		if (mpdu_done) {
1175 			pdev->invalid_peer_head_msdu = NULL;
1176 			pdev->invalid_peer_tail_msdu = NULL;
1177 		}
1178 
1179 		return QDF_STATUS_E_FAILURE;
1180 	}
1181 
1182 	vdev = txrx_peer->vdev;
1183 	if (!vdev) {
1184 		dp_err_rl("Null vdev!");
1185 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1186 		goto drop_nbuf;
1187 	}
1188 
1189 	/*
1190 	 * Advance the packet start pointer by total size of
1191 	 * pre-header TLV's
1192 	 */
1193 	if (qdf_nbuf_is_frag(nbuf))
1194 		qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
1195 	else
1196 		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
1197 				   soc->rx_pkt_tlv_size));
1198 
1199 	DP_STATS_INC_PKT(vdev, rx_i.null_q_desc_pkt, 1, qdf_nbuf_len(nbuf));
1200 
1201 	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
1202 
1203 	if (dp_rx_err_drop_3addr_mcast(vdev, rx_tlv_hdr)) {
1204 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.mcast_3addr_drop, 1);
1205 		goto drop_nbuf;
1206 	}
1207 
1208 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
1209 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
1210 
1211 		if ((sa_idx < 0) ||
1212 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
1213 			DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
1214 			goto drop_nbuf;
1215 		}
1216 	}
1217 
1218 	if ((!soc->mec_fw_offload) &&
1219 	    dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf)) {
1220 		/* this is a looped back MCBC pkt, drop it */
1221 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
1222 					      qdf_nbuf_len(nbuf));
1223 		goto drop_nbuf;
1224 	}
1225 
1226 	/*
1227 	 * In qwrap mode if the received packet matches with any of the vdev
1228 	 * mac addresses, drop it. Donot receive multicast packets originated
1229 	 * from any proxysta.
1230 	 */
1231 	if (check_qwrap_multicast_loopback(vdev, nbuf)) {
1232 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
1233 					      qdf_nbuf_len(nbuf));
1234 		goto drop_nbuf;
1235 	}
1236 
1237 	if (qdf_unlikely(txrx_peer->nawds_enabled &&
1238 			 hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1239 							rx_tlv_hdr))) {
1240 		dp_err_rl("free buffer for multicast packet");
1241 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.nawds_mcast_drop, 1);
1242 		goto drop_nbuf;
1243 	}
1244 
1245 	if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
1246 		dp_err_rl("mcast Policy Check Drop pkt");
1247 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.policy_check_drop, 1);
1248 		goto drop_nbuf;
1249 	}
1250 	/* WDS Source Port Learning */
1251 	if (!soc->ast_offload_support &&
1252 	    qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
1253 		vdev->wds_enabled))
1254 		dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, txrx_peer, nbuf,
1255 					msdu_metadata);
1256 
1257 	if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
1258 		struct dp_peer *peer;
1259 		struct dp_rx_tid *rx_tid;
1260 		tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
1261 		peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
1262 					     DP_MOD_ID_RX_ERR);
1263 		if (peer) {
1264 			rx_tid = &peer->rx_tid[tid];
1265 			qdf_spin_lock_bh(&rx_tid->tid_lock);
1266 			if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned)
1267 				dp_rx_tid_setup_wifi3(peer, tid, 1,
1268 						      IEEE80211_SEQ_MAX);
1269 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1270 			/* IEEE80211_SEQ_MAX indicates invalid start_seq */
1271 			dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
1272 		}
1273 	}
1274 
1275 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1276 
1277 	if (!txrx_peer->authorize) {
1278 		is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
1279 			   qdf_nbuf_is_ipv4_wapi_pkt(nbuf);
1280 
1281 		if (is_eapol) {
1282 			if (!dp_rx_err_match_dhost(eh, vdev))
1283 				goto drop_nbuf;
1284 		} else {
1285 			goto drop_nbuf;
1286 		}
1287 	}
1288 
1289 	/*
1290 	 * Drop packets in this path if cce_match is found. Packets will come
1291 	 * in following path depending on whether tidQ is setup.
1292 	 * 1. If tidQ is setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE and
1293 	 * cce_match = 1
1294 	 *    Packets with WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE are already
1295 	 *    dropped.
1296 	 * 2. If tidQ is not setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ERROR and
1297 	 * cce_match = 1
1298 	 *    These packets need to be dropped and should not get delivered
1299 	 *    to stack.
1300 	 */
1301 	if (qdf_unlikely(dp_rx_err_cce_drop(soc, vdev, nbuf, rx_tlv_hdr))) {
1302 		goto drop_nbuf;
1303 	}
1304 
1305 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1306 		qdf_nbuf_set_next(nbuf, NULL);
1307 		dp_rx_deliver_raw(vdev, nbuf, txrx_peer);
1308 	} else {
1309 		enh_flag = vdev->pdev->enhanced_stats_en;
1310 		qdf_nbuf_set_next(nbuf, NULL);
1311 		DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
1312 					  enh_flag);
1313 		/*
1314 		 * Update the protocol tag in SKB based on
1315 		 * CCE metadata
1316 		 */
1317 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1318 					  EXCEPTION_DEST_RING_ID,
1319 					  true, true);
1320 
1321 		/* Update the flow tag in SKB based on FSE metadata */
1322 		dp_rx_update_flow_tag(soc, vdev, nbuf,
1323 				      rx_tlv_hdr, true);
1324 
1325 		if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
1326 				 soc->hal_soc, rx_tlv_hdr) &&
1327 				 (vdev->rx_decap_type ==
1328 				  htt_cmn_pkt_type_ethernet))) {
1329 			DP_PEER_MC_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
1330 					    enh_flag);
1331 
1332 			if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
1333 				DP_PEER_BC_INCC_PKT(txrx_peer, 1,
1334 						    qdf_nbuf_len(nbuf),
1335 						    enh_flag);
1336 		}
1337 
1338 		qdf_nbuf_set_exc_frame(nbuf, 1);
1339 		dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
1340 					    is_eapol);
1341 	}
1342 	return QDF_STATUS_SUCCESS;
1343 
1344 drop_nbuf:
1345 	dp_rx_nbuf_free(nbuf);
1346 	return QDF_STATUS_E_FAILURE;
1347 }
1348 
1349 /**
1350  * dp_rx_reo_err_entry_process() - Handles for REO error entry processing
1351  *
1352  * @soc: core txrx main context
1353  * @ring_desc: opaque pointer to the REO error ring descriptor
1354  * @mpdu_desc_info: pointer to mpdu level description info
1355  * @link_desc_va: pointer to msdu_link_desc virtual address
1356  * @err_code: reo error code fetched from ring entry
1357  *
1358  * Function to handle msdus fetched from msdu link desc, currently
1359  * support REO error NULL queue, 2K jump, OOR.
1360  *
1361  * Return: msdu count processed
1362  */
1363 static uint32_t
1364 dp_rx_reo_err_entry_process(struct dp_soc *soc,
1365 			    void *ring_desc,
1366 			    struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1367 			    void *link_desc_va,
1368 			    enum hal_reo_error_code err_code)
1369 {
1370 	uint32_t rx_bufs_used = 0;
1371 	struct dp_pdev *pdev;
1372 	int i;
1373 	uint8_t *rx_tlv_hdr_first;
1374 	uint8_t *rx_tlv_hdr_last;
1375 	uint32_t tid = DP_MAX_TIDS;
1376 	uint16_t peer_id;
1377 	struct dp_rx_desc *rx_desc;
1378 	struct rx_desc_pool *rx_desc_pool;
1379 	qdf_nbuf_t nbuf;
1380 	struct hal_buf_info buf_info;
1381 	struct hal_rx_msdu_list msdu_list;
1382 	uint16_t num_msdus;
1383 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
1384 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
1385 	/* First field in REO Dst ring Desc is buffer_addr_info */
1386 	void *buf_addr_info = ring_desc;
1387 	qdf_nbuf_t head_nbuf = NULL;
1388 	qdf_nbuf_t tail_nbuf = NULL;
1389 	uint16_t msdu_processed = 0;
1390 	QDF_STATUS status;
1391 	bool ret, is_pn_check_needed;
1392 	uint8_t rx_desc_pool_id;
1393 	struct dp_txrx_peer *txrx_peer = NULL;
1394 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1395 	hal_ring_handle_t hal_ring_hdl = soc->reo_exception_ring.hal_srng;
1396 
1397 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1398 					mpdu_desc_info->peer_meta_data);
1399 	is_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
1400 							  HAL_REO_ERROR_DETECTED,
1401 							  err_code);
1402 more_msdu_link_desc:
1403 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
1404 			     &num_msdus);
1405 	for (i = 0; i < num_msdus; i++) {
1406 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
1407 						soc,
1408 						msdu_list.sw_cookie[i]);
1409 
1410 		qdf_assert_always(rx_desc);
1411 		nbuf = rx_desc->nbuf;
1412 
1413 		/*
1414 		 * this is a unlikely scenario where the host is reaping
1415 		 * a descriptor which it already reaped just a while ago
1416 		 * but is yet to replenish it back to HW.
1417 		 * In this case host will dump the last 128 descriptors
1418 		 * including the software descriptor rx_desc and assert.
1419 		 */
1420 		if (qdf_unlikely(!rx_desc->in_use) ||
1421 		    qdf_unlikely(!nbuf)) {
1422 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
1423 			dp_info_rl("Reaping rx_desc not in use!");
1424 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1425 						   ring_desc, rx_desc);
1426 			/* ignore duplicate RX desc and continue to process */
1427 			/* Pop out the descriptor */
1428 			continue;
1429 		}
1430 
1431 		ret = dp_rx_desc_paddr_sanity_check(rx_desc,
1432 						    msdu_list.paddr[i]);
1433 		if (!ret) {
1434 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1435 			rx_desc->in_err_state = 1;
1436 			continue;
1437 		}
1438 
1439 		rx_desc_pool_id = rx_desc->pool_id;
1440 		/* all buffers from a MSDU link belong to same pdev */
1441 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc_pool_id);
1442 
1443 		rx_desc_pool = &soc->rx_desc_buf[rx_desc_pool_id];
1444 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
1445 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
1446 		rx_desc->unmapped = 1;
1447 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
1448 
1449 		QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len;
1450 		rx_bufs_used++;
1451 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
1452 					    &pdev->free_list_tail, rx_desc);
1453 
1454 		DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf);
1455 
1456 		if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags &
1457 				 HAL_MSDU_F_MSDU_CONTINUATION))
1458 			continue;
1459 
1460 		if (dp_rx_buffer_pool_refill(soc, head_nbuf,
1461 					     rx_desc_pool_id)) {
1462 			/* MSDU queued back to the pool */
1463 			goto process_next_msdu;
1464 		}
1465 
1466 		if (is_pn_check_needed) {
1467 			if (msdu_list.msdu_info[i].msdu_flags &
1468 			    HAL_MSDU_F_FIRST_MSDU_IN_MPDU) {
1469 				hal_rx_tlv_populate_mpdu_desc_info(
1470 							soc->hal_soc,
1471 							qdf_nbuf_data(nbuf),
1472 							mpdu_desc_info);
1473 			} else {
1474 				/*
1475 				 * DO NOTHING -
1476 				 * Continue using the same mpdu_desc_info
1477 				 * details populated from the first msdu in
1478 				 * the mpdu.
1479 				 */
1480 			}
1481 
1482 			status = dp_rx_err_nbuf_pn_check(soc, ring_desc, nbuf);
1483 			if (QDF_IS_STATUS_ERROR(status)) {
1484 				DP_STATS_INC(soc, rx.err.pn_in_dest_check_fail,
1485 					     1);
1486 				dp_rx_nbuf_free(nbuf);
1487 				goto process_next_msdu;
1488 			}
1489 
1490 			peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1491 					mpdu_desc_info->peer_meta_data);
1492 
1493 			if (mpdu_desc_info->bar_frame)
1494 				_dp_rx_bar_frame_handle(soc, nbuf,
1495 							mpdu_desc_info, tid,
1496 							HAL_REO_ERROR_DETECTED,
1497 							err_code);
1498 		}
1499 
1500 		if (qdf_unlikely(mpdu_desc_info->mpdu_flags &
1501 				 HAL_MPDU_F_RAW_AMPDU)) {
1502 			dp_err_rl("RAW ampdu in REO error not expected");
1503 			DP_STATS_INC(soc, rx.err.reo_err_raw_mpdu_drop, 1);
1504 			qdf_nbuf_list_free(head_nbuf);
1505 			goto process_next_msdu;
1506 		}
1507 
1508 		rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf);
1509 		rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf);
1510 
1511 		if (qdf_unlikely(head_nbuf != tail_nbuf)) {
1512 			nbuf = dp_rx_sg_create(soc, head_nbuf);
1513 			qdf_nbuf_set_is_frag(nbuf, 1);
1514 			DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1);
1515 		}
1516 
1517 		switch (err_code) {
1518 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
1519 		case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
1520 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
1521 			/*
1522 			 * only first msdu, mpdu start description tlv valid?
1523 			 * and use it for following msdu.
1524 			 */
1525 			if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1526 							   rx_tlv_hdr_last))
1527 				tid = hal_rx_mpdu_start_tid_get(
1528 							soc->hal_soc,
1529 							rx_tlv_hdr_first);
1530 
1531 			dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last,
1532 					  peer_id, tid);
1533 			break;
1534 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
1535 		case HAL_REO_ERR_BAR_FRAME_OOR:
1536 			dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last);
1537 			break;
1538 		case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
1539 			txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(
1540 							soc, peer_id,
1541 							&txrx_ref_handle,
1542 							DP_MOD_ID_RX_ERR);
1543 			if (!txrx_peer)
1544 				dp_info_rl("txrx_peer is null peer_id %u",
1545 					   peer_id);
1546 			dp_rx_null_q_desc_handle(soc, nbuf, rx_tlv_hdr_last,
1547 						 rx_desc_pool_id, txrx_peer);
1548 			if (txrx_peer)
1549 				dp_txrx_peer_unref_delete(txrx_ref_handle,
1550 							  DP_MOD_ID_RX_ERR);
1551 			break;
1552 		default:
1553 			dp_err_rl("Non-support error code %d", err_code);
1554 			dp_rx_nbuf_free(nbuf);
1555 		}
1556 
1557 process_next_msdu:
1558 		msdu_processed++;
1559 		head_nbuf = NULL;
1560 		tail_nbuf = NULL;
1561 	}
1562 
1563 	/*
1564 	 * If the msdu's are spread across multiple link-descriptors,
1565 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
1566 	 * spread across multiple buffers).Hence, it is
1567 	 * necessary to check the next link_descriptor and release
1568 	 * all the msdu's that are part of it.
1569 	 */
1570 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
1571 			link_desc_va,
1572 			&next_link_desc_addr_info);
1573 
1574 	if (hal_rx_is_buf_addr_info_valid(
1575 				&next_link_desc_addr_info)) {
1576 		/* Clear the next link desc info for the current link_desc */
1577 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
1578 		dp_rx_link_desc_return_by_addr(
1579 				soc,
1580 				buf_addr_info,
1581 				HAL_BM_ACTION_PUT_IN_IDLE_LIST);
1582 
1583 		hal_rx_buffer_addr_info_get_paddr(
1584 				&next_link_desc_addr_info,
1585 				&buf_info);
1586 		/* buffer_addr_info is the first element of ring_desc */
1587 		hal_rx_buf_cookie_rbm_get(soc->hal_soc,
1588 					  (uint32_t *)&next_link_desc_addr_info,
1589 					  &buf_info);
1590 		link_desc_va =
1591 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1592 		cur_link_desc_addr_info = next_link_desc_addr_info;
1593 		buf_addr_info = &cur_link_desc_addr_info;
1594 
1595 		goto more_msdu_link_desc;
1596 	}
1597 
1598 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
1599 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
1600 	if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count))
1601 		DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1);
1602 
1603 	return rx_bufs_used;
1604 }
1605 
1606 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1607 
1608 /**
1609  * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
1610  *			       frames to OS or wifi parse errors.
1611  * @soc: core DP main context
1612  * @nbuf: buffer pointer
1613  * @rx_tlv_hdr: start of rx tlv header
1614  * @txrx_peer: peer reference
1615  * @err_code: rxdma err code
1616  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
1617  * pool_id has same mapping)
1618  *
1619  * Return: None
1620  */
1621 void
1622 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1623 			uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
1624 			uint8_t err_code, uint8_t mac_id)
1625 {
1626 	uint32_t pkt_len, l2_hdr_offset;
1627 	uint16_t msdu_len;
1628 	struct dp_vdev *vdev;
1629 	qdf_ether_header_t *eh;
1630 	bool is_broadcast;
1631 
1632 	/*
1633 	 * Check if DMA completed -- msdu_done is the last bit
1634 	 * to be written
1635 	 */
1636 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1637 
1638 		dp_err_rl("MSDU DONE failure");
1639 
1640 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1641 				     QDF_TRACE_LEVEL_INFO);
1642 		qdf_assert(0);
1643 	}
1644 
1645 	l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
1646 							   rx_tlv_hdr);
1647 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1648 	pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
1649 
1650 	if (dp_rx_check_pkt_len(soc, pkt_len)) {
1651 		/* Drop & free packet */
1652 		dp_rx_nbuf_free(nbuf);
1653 		return;
1654 	}
1655 	/* Set length in nbuf */
1656 	qdf_nbuf_set_pktlen(nbuf, pkt_len);
1657 
1658 	qdf_nbuf_set_next(nbuf, NULL);
1659 
1660 	qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
1661 	qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
1662 
1663 	if (!txrx_peer) {
1664 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "txrx_peer is NULL");
1665 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1666 				qdf_nbuf_len(nbuf));
1667 		/* Trigger invalid peer handler wrapper */
1668 		dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
1669 		return;
1670 	}
1671 
1672 	vdev = txrx_peer->vdev;
1673 	if (!vdev) {
1674 		dp_rx_err_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc,
1675 				 vdev);
1676 		/* Drop & free packet */
1677 		dp_rx_nbuf_free(nbuf);
1678 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1679 		return;
1680 	}
1681 
1682 	/*
1683 	 * Advance the packet start pointer by total size of
1684 	 * pre-header TLV's
1685 	 */
1686 	dp_rx_skip_tlvs(soc, nbuf, l2_hdr_offset);
1687 
1688 	if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
1689 		uint8_t *pkt_type;
1690 
1691 		pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
1692 		if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
1693 			if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
1694 							htons(QDF_LLC_STP)) {
1695 				DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
1696 				goto process_mesh;
1697 			} else {
1698 				goto process_rx;
1699 			}
1700 		}
1701 	}
1702 	if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
1703 		goto process_mesh;
1704 
1705 	/*
1706 	 * WAPI cert AP sends rekey frames as unencrypted.
1707 	 * Thus RXDMA will report unencrypted frame error.
1708 	 * To pass WAPI cert case, SW needs to pass unencrypted
1709 	 * rekey frame to stack.
1710 	 */
1711 	if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1712 		goto process_rx;
1713 	}
1714 	/*
1715 	 * In dynamic WEP case rekey frames are not encrypted
1716 	 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
1717 	 * key install is already done
1718 	 */
1719 	if ((vdev->sec_type == cdp_sec_type_wep104) &&
1720 	    (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
1721 		goto process_rx;
1722 
1723 process_mesh:
1724 
1725 	if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
1726 		dp_rx_nbuf_free(nbuf);
1727 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1728 		return;
1729 	}
1730 
1731 	if (vdev->mesh_vdev) {
1732 		if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
1733 				      == QDF_STATUS_SUCCESS) {
1734 			dp_rx_err_info("%pK: mesh pkt filtered", soc);
1735 			DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
1736 
1737 			dp_rx_nbuf_free(nbuf);
1738 			return;
1739 		}
1740 		dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, txrx_peer);
1741 	}
1742 process_rx:
1743 	if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1744 							rx_tlv_hdr) &&
1745 		(vdev->rx_decap_type ==
1746 				htt_cmn_pkt_type_ethernet))) {
1747 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1748 		is_broadcast = (QDF_IS_ADDR_BROADCAST
1749 				(eh->ether_dhost)) ? 1 : 0 ;
1750 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 1,
1751 					      qdf_nbuf_len(nbuf));
1752 		if (is_broadcast) {
1753 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast, 1,
1754 						      qdf_nbuf_len(nbuf));
1755 		}
1756 	}
1757 
1758 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1759 		dp_rx_deliver_raw(vdev, nbuf, txrx_peer);
1760 	} else {
1761 		/* Update the protocol tag in SKB based on CCE metadata */
1762 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1763 					  EXCEPTION_DEST_RING_ID, true, true);
1764 		/* Update the flow tag in SKB based on FSE metadata */
1765 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
1766 		DP_PEER_STATS_FLAT_INC(txrx_peer, to_stack.num, 1);
1767 		qdf_nbuf_set_exc_frame(nbuf, 1);
1768 		dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
1769 	}
1770 
1771 	return;
1772 }
1773 
1774 /**
1775  * dp_rx_process_mic_error(): Function to pass mic error indication to umac
1776  * @soc: core DP main context
1777  * @nbuf: buffer pointer
1778  * @rx_tlv_hdr: start of rx tlv header
1779  * @txrx_peer: txrx peer handle
1780  *
1781  * return: void
1782  */
1783 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
1784 			     uint8_t *rx_tlv_hdr,
1785 			     struct dp_txrx_peer *txrx_peer)
1786 {
1787 	struct dp_vdev *vdev = NULL;
1788 	struct dp_pdev *pdev = NULL;
1789 	struct ol_if_ops *tops = NULL;
1790 	uint16_t rx_seq, fragno;
1791 	uint8_t is_raw;
1792 	unsigned int tid;
1793 	QDF_STATUS status;
1794 	struct cdp_rx_mic_err_info mic_failure_info;
1795 
1796 	if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1797 					    rx_tlv_hdr))
1798 		return;
1799 
1800 	if (!txrx_peer) {
1801 		dp_info_rl("txrx_peer not found");
1802 		goto fail;
1803 	}
1804 
1805 	vdev = txrx_peer->vdev;
1806 	if (!vdev) {
1807 		dp_info_rl("VDEV not found");
1808 		goto fail;
1809 	}
1810 
1811 	pdev = vdev->pdev;
1812 	if (!pdev) {
1813 		dp_info_rl("PDEV not found");
1814 		goto fail;
1815 	}
1816 
1817 	is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
1818 	if (is_raw) {
1819 		fragno = dp_rx_frag_get_mpdu_frag_number(soc,
1820 							 qdf_nbuf_data(nbuf));
1821 		/* Can get only last fragment */
1822 		if (fragno) {
1823 			tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
1824 							qdf_nbuf_data(nbuf));
1825 			rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
1826 							qdf_nbuf_data(nbuf));
1827 
1828 			status = dp_rx_defrag_add_last_frag(soc, txrx_peer,
1829 							    tid, rx_seq, nbuf);
1830 			dp_info_rl("Frag pkt seq# %d frag# %d consumed "
1831 				   "status %d !", rx_seq, fragno, status);
1832 			return;
1833 		}
1834 	}
1835 
1836 	if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
1837 				  &mic_failure_info.da_mac_addr.bytes[0])) {
1838 		dp_err_rl("Failed to get da_mac_addr");
1839 		goto fail;
1840 	}
1841 
1842 	if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
1843 				  &mic_failure_info.ta_mac_addr.bytes[0])) {
1844 		dp_err_rl("Failed to get ta_mac_addr");
1845 		goto fail;
1846 	}
1847 
1848 	mic_failure_info.key_id = 0;
1849 	mic_failure_info.multicast =
1850 		IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
1851 	qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
1852 	mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
1853 	mic_failure_info.data = NULL;
1854 	mic_failure_info.vdev_id = vdev->vdev_id;
1855 
1856 	tops = pdev->soc->cdp_soc.ol_ops;
1857 	if (tops->rx_mic_error)
1858 		tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
1859 				   &mic_failure_info);
1860 
1861 fail:
1862 	dp_rx_nbuf_free(nbuf);
1863 	return;
1864 }
1865 
1866 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
1867 	defined(WLAN_MCAST_MLO)
1868 static bool dp_rx_igmp_handler(struct dp_soc *soc,
1869 			       struct dp_vdev *vdev,
1870 			       struct dp_txrx_peer *peer,
1871 			       qdf_nbuf_t nbuf)
1872 {
1873 	if (soc->arch_ops.dp_rx_mcast_handler) {
1874 		if (soc->arch_ops.dp_rx_mcast_handler(soc, vdev, peer, nbuf))
1875 			return true;
1876 	}
1877 	return false;
1878 }
1879 #else
1880 static bool dp_rx_igmp_handler(struct dp_soc *soc,
1881 			       struct dp_vdev *vdev,
1882 			       struct dp_txrx_peer *peer,
1883 			       qdf_nbuf_t nbuf)
1884 {
1885 	return false;
1886 }
1887 #endif
1888 
1889 /**
1890  * dp_rx_err_route_hdl() - Function to send EAPOL frames to stack
1891  *                            Free any other packet which comes in
1892  *                            this path.
1893  *
1894  * @soc: core DP main context
1895  * @nbuf: buffer pointer
1896  * @txrx_peer: txrx peer handle
1897  * @rx_tlv_hdr: start of rx tlv header
1898  * @err_src: rxdma/reo
1899  *
1900  * This function indicates EAPOL frame received in wbm error ring to stack.
1901  * Any other frame should be dropped.
1902  *
1903  * Return: SUCCESS if delivered to stack
1904  */
1905 static void
1906 dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
1907 		    struct dp_txrx_peer *txrx_peer, uint8_t *rx_tlv_hdr,
1908 		    enum hal_rx_wbm_error_source err_src)
1909 {
1910 	uint32_t pkt_len;
1911 	uint16_t msdu_len;
1912 	struct dp_vdev *vdev;
1913 	struct hal_rx_msdu_metadata msdu_metadata;
1914 	bool is_eapol;
1915 
1916 	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
1917 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1918 	pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
1919 
1920 	if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
1921 		if (dp_rx_check_pkt_len(soc, pkt_len))
1922 			goto drop_nbuf;
1923 
1924 		/* Set length in nbuf */
1925 		qdf_nbuf_set_pktlen(
1926 			nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
1927 		qdf_assert_always(nbuf->data == rx_tlv_hdr);
1928 	}
1929 
1930 	/*
1931 	 * Check if DMA completed -- msdu_done is the last bit
1932 	 * to be written
1933 	 */
1934 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1935 		dp_err_rl("MSDU DONE failure");
1936 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1937 				     QDF_TRACE_LEVEL_INFO);
1938 		qdf_assert(0);
1939 	}
1940 
1941 	if (!txrx_peer)
1942 		goto drop_nbuf;
1943 
1944 	vdev = txrx_peer->vdev;
1945 	if (!vdev) {
1946 		dp_err_rl("Null vdev!");
1947 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1948 		goto drop_nbuf;
1949 	}
1950 
1951 	/*
1952 	 * Advance the packet start pointer by total size of
1953 	 * pre-header TLV's
1954 	 */
1955 	if (qdf_nbuf_is_frag(nbuf))
1956 		qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
1957 	else
1958 		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
1959 				   soc->rx_pkt_tlv_size));
1960 
1961 	if (dp_rx_igmp_handler(soc, vdev, txrx_peer, nbuf))
1962 		return;
1963 
1964 	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
1965 
1966 	/*
1967 	 * Indicate EAPOL frame to stack only when vap mac address
1968 	 * matches the destination address.
1969 	 */
1970 	is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf);
1971 	if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1972 		qdf_ether_header_t *eh =
1973 			(qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1974 		if (dp_rx_err_match_dhost(eh, vdev)) {
1975 			DP_STATS_INC_PKT(vdev, rx_i.routed_eapol_pkt, 1,
1976 					 qdf_nbuf_len(nbuf));
1977 
1978 			/*
1979 			 * Update the protocol tag in SKB based on
1980 			 * CCE metadata.
1981 			 */
1982 			dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1983 						  EXCEPTION_DEST_RING_ID,
1984 						  true, true);
1985 			/* Update the flow tag in SKB based on FSE metadata */
1986 			dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
1987 					      true);
1988 			DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
1989 						  qdf_nbuf_len(nbuf),
1990 						  vdev->pdev->enhanced_stats_en);
1991 			qdf_nbuf_set_exc_frame(nbuf, 1);
1992 			qdf_nbuf_set_next(nbuf, NULL);
1993 
1994 			dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf,
1995 						    NULL, is_eapol);
1996 
1997 			return;
1998 		}
1999 	}
2000 
2001 drop_nbuf:
2002 
2003 	DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1,
2004 		      err_src == HAL_RX_WBM_ERR_SRC_REO);
2005 	DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1,
2006 		      err_src == HAL_RX_WBM_ERR_SRC_RXDMA);
2007 
2008 	dp_rx_nbuf_free(nbuf);
2009 }
2010 
2011 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2012 
2013 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
2014 /**
2015  * dp_rx_link_cookie_check() - Validate link desc cookie
2016  * @ring_desc: ring descriptor
2017  *
2018  * Return: qdf status
2019  */
2020 static inline QDF_STATUS
2021 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
2022 {
2023 	if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc)))
2024 		return QDF_STATUS_E_FAILURE;
2025 
2026 	return QDF_STATUS_SUCCESS;
2027 }
2028 
2029 /**
2030  * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie
2031  * @ring_desc: ring descriptor
2032  *
2033  * Return: None
2034  */
2035 static inline void
2036 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
2037 {
2038 	HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc);
2039 }
2040 #else
2041 static inline QDF_STATUS
2042 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
2043 {
2044 	return QDF_STATUS_SUCCESS;
2045 }
2046 
2047 static inline void
2048 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
2049 {
2050 }
2051 #endif
2052 
2053 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
2054 /**
2055  * dp_rx_err_ring_record_entry() - Record rx err ring history
2056  * @soc: Datapath soc structure
2057  * @paddr: paddr of the buffer in RX err ring
2058  * @sw_cookie: SW cookie of the buffer in RX err ring
2059  * @rbm: Return buffer manager of the buffer in RX err ring
2060  *
2061  * Returns: None
2062  */
2063 static inline void
2064 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
2065 			    uint32_t sw_cookie, uint8_t rbm)
2066 {
2067 	struct dp_buf_info_record *record;
2068 	uint32_t idx;
2069 
2070 	if (qdf_unlikely(!soc->rx_err_ring_history))
2071 		return;
2072 
2073 	idx = dp_history_get_next_index(&soc->rx_err_ring_history->index,
2074 					DP_RX_ERR_HIST_MAX);
2075 
2076 	/* No NULL check needed for record since its an array */
2077 	record = &soc->rx_err_ring_history->entry[idx];
2078 
2079 	record->timestamp = qdf_get_log_timestamp();
2080 	record->hbi.paddr = paddr;
2081 	record->hbi.sw_cookie = sw_cookie;
2082 	record->hbi.rbm = rbm;
2083 }
2084 #else
2085 static inline void
2086 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
2087 			    uint32_t sw_cookie, uint8_t rbm)
2088 {
2089 }
2090 #endif
2091 
2092 #ifdef HANDLE_RX_REROUTE_ERR
2093 static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc,
2094 				     hal_ring_desc_t ring_desc)
2095 {
2096 	int lmac_id = DP_INVALID_LMAC_ID;
2097 	struct dp_rx_desc *rx_desc;
2098 	struct hal_buf_info hbi;
2099 	struct dp_pdev *pdev;
2100 	struct rx_desc_pool *rx_desc_pool;
2101 
2102 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2103 
2104 	rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, hbi.sw_cookie);
2105 
2106 	/* sanity */
2107 	if (!rx_desc) {
2108 		DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_invalid_cookie, 1);
2109 		goto assert_return;
2110 	}
2111 
2112 	if (!rx_desc->nbuf)
2113 		goto assert_return;
2114 
2115 	dp_rx_err_ring_record_entry(soc, hbi.paddr,
2116 				    hbi.sw_cookie,
2117 				    hal_rx_ret_buf_manager_get(soc->hal_soc,
2118 							       ring_desc));
2119 	if (hbi.paddr != qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)) {
2120 		DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
2121 		rx_desc->in_err_state = 1;
2122 		goto assert_return;
2123 	}
2124 
2125 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2126 	/* After this point the rx_desc and nbuf are valid */
2127 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
2128 	qdf_assert_always(!rx_desc->unmapped);
2129 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
2130 	rx_desc->unmapped = 1;
2131 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
2132 	dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
2133 				    rx_desc->pool_id);
2134 
2135 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
2136 	lmac_id = rx_desc->pool_id;
2137 	dp_rx_add_to_free_desc_list(&pdev->free_list_head,
2138 				    &pdev->free_list_tail,
2139 				    rx_desc);
2140 	return lmac_id;
2141 
2142 assert_return:
2143 	qdf_assert(0);
2144 	return lmac_id;
2145 }
2146 
2147 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
2148 {
2149 	int ret;
2150 	uint64_t cur_time_stamp;
2151 
2152 	DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_rcved, 1);
2153 
2154 	/* Recover if overall error count exceeds threshold */
2155 	if (soc->stats.rx.err.reo_err_msdu_buf_rcved >
2156 	    DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD) {
2157 		dp_err("pkt threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
2158 		       soc->stats.rx.err.reo_err_msdu_buf_rcved,
2159 		       soc->rx_route_err_start_pkt_ts);
2160 		qdf_trigger_self_recovery(NULL, QDF_RX_REG_PKT_ROUTE_ERR);
2161 	}
2162 
2163 	cur_time_stamp = qdf_get_log_timestamp_usecs();
2164 	if (!soc->rx_route_err_start_pkt_ts)
2165 		soc->rx_route_err_start_pkt_ts = cur_time_stamp;
2166 
2167 	/* Recover if threshold number of packets received in threshold time */
2168 	if ((cur_time_stamp - soc->rx_route_err_start_pkt_ts) >
2169 						DP_RX_ERR_ROUTE_TIMEOUT_US) {
2170 		soc->rx_route_err_start_pkt_ts = cur_time_stamp;
2171 
2172 		if (soc->rx_route_err_in_window >
2173 		    DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT) {
2174 			qdf_trigger_self_recovery(NULL,
2175 						  QDF_RX_REG_PKT_ROUTE_ERR);
2176 			dp_err("rate threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
2177 			       soc->stats.rx.err.reo_err_msdu_buf_rcved,
2178 			       soc->rx_route_err_start_pkt_ts);
2179 		} else {
2180 			soc->rx_route_err_in_window = 1;
2181 		}
2182 	} else {
2183 		soc->rx_route_err_in_window++;
2184 	}
2185 
2186 	ret = dp_rx_err_handle_msdu_buf(soc, ring_desc);
2187 
2188 	return ret;
2189 }
2190 #else /* HANDLE_RX_REROUTE_ERR */
2191 
2192 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
2193 {
2194 	qdf_assert_always(0);
2195 
2196 	return DP_INVALID_LMAC_ID;
2197 }
2198 #endif /* HANDLE_RX_REROUTE_ERR */
2199 
2200 #ifdef WLAN_MLO_MULTI_CHIP
2201 static void dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm)
2202 {
2203 	/*
2204 	 * For WIN usecase we should only get fragment packets in
2205 	 * this ring as for MLO case fragmentation is not supported
2206 	 * we should not see links from other soc.
2207 	 *
2208 	 * Adding a assert for link descriptors from local soc
2209 	 */
2210 	qdf_assert_always(rbm == soc->idle_link_bm_id);
2211 }
2212 #else
2213 static void dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm)
2214 {
2215 }
2216 #endif
2217 
2218 uint32_t
2219 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2220 		  hal_ring_handle_t hal_ring_hdl, uint32_t quota)
2221 {
2222 	hal_ring_desc_t ring_desc;
2223 	hal_soc_handle_t hal_soc;
2224 	uint32_t count = 0;
2225 	uint32_t rx_bufs_used = 0;
2226 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
2227 	uint8_t mac_id = 0;
2228 	uint8_t buf_type;
2229 	uint8_t err_status;
2230 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
2231 	struct hal_buf_info hbi;
2232 	struct dp_pdev *dp_pdev;
2233 	struct dp_srng *dp_rxdma_srng;
2234 	struct rx_desc_pool *rx_desc_pool;
2235 	void *link_desc_va;
2236 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
2237 	uint16_t num_msdus;
2238 	struct dp_rx_desc *rx_desc = NULL;
2239 	QDF_STATUS status;
2240 	bool ret;
2241 	uint32_t error_code = 0;
2242 	bool sw_pn_check_needed;
2243 	int max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
2244 	int i, rx_bufs_reaped_total;
2245 
2246 	/* Debug -- Remove later */
2247 	qdf_assert(soc && hal_ring_hdl);
2248 
2249 	hal_soc = soc->hal_soc;
2250 
2251 	/* Debug -- Remove later */
2252 	qdf_assert(hal_soc);
2253 
2254 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
2255 
2256 		/* TODO */
2257 		/*
2258 		 * Need API to convert from hal_ring pointer to
2259 		 * Ring Type / Ring Id combo
2260 		 */
2261 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
2262 		dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", soc,
2263 			      hal_ring_hdl);
2264 		goto done;
2265 	}
2266 
2267 	while (qdf_likely(quota-- && (ring_desc =
2268 				hal_srng_dst_peek(hal_soc,
2269 						  hal_ring_hdl)))) {
2270 
2271 		DP_STATS_INC(soc, rx.err_ring_pkts, 1);
2272 		err_status = hal_rx_err_status_get(hal_soc, ring_desc);
2273 		buf_type = hal_rx_reo_buf_type_get(hal_soc, ring_desc);
2274 
2275 		if (err_status == HAL_REO_ERROR_DETECTED)
2276 			error_code = hal_rx_get_reo_error_code(hal_soc,
2277 							       ring_desc);
2278 
2279 		qdf_mem_set(&mpdu_desc_info, sizeof(mpdu_desc_info), 0);
2280 		sw_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
2281 								  err_status,
2282 								  error_code);
2283 		if (!sw_pn_check_needed) {
2284 			/*
2285 			 * MPDU desc info will be present in the REO desc
2286 			 * only in the below scenarios
2287 			 * 1) pn_in_dest_disabled:  always
2288 			 * 2) pn_in_dest enabled: All cases except 2k-jup
2289 			 *			and OOR errors
2290 			 */
2291 			hal_rx_mpdu_desc_info_get(hal_soc, ring_desc,
2292 						  &mpdu_desc_info);
2293 		}
2294 
2295 		if (HAL_RX_REO_DESC_MSDU_COUNT_GET(ring_desc) == 0)
2296 			goto next_entry;
2297 
2298 		/*
2299 		 * For REO error ring, only MSDU LINK DESC is expected.
2300 		 * Handle HAL_RX_REO_MSDU_BUF_ADDR_TYPE exception case.
2301 		 */
2302 		if (qdf_unlikely(buf_type != HAL_RX_REO_MSDU_LINK_DESC_TYPE)) {
2303 			int lmac_id;
2304 
2305 			lmac_id = dp_rx_err_exception(soc, ring_desc);
2306 			if (lmac_id >= 0)
2307 				rx_bufs_reaped[lmac_id] += 1;
2308 			goto next_entry;
2309 		}
2310 
2311 		hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc,
2312 					  &hbi);
2313 		/*
2314 		 * check for the magic number in the sw cookie
2315 		 */
2316 		qdf_assert_always((hbi.sw_cookie >> LINK_DESC_ID_SHIFT) &
2317 					soc->link_desc_id_start);
2318 
2319 		dp_idle_link_bm_id_check(soc, hbi.rbm);
2320 
2321 		status = dp_rx_link_cookie_check(ring_desc);
2322 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
2323 			DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
2324 			break;
2325 		}
2326 
2327 		hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2328 		link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
2329 		hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
2330 				     &num_msdus);
2331 		if (!num_msdus ||
2332 		    !dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[0])) {
2333 			dp_rx_err_info_rl("Invalid MSDU info num_msdus %u cookie: 0x%x",
2334 					  num_msdus, msdu_list.sw_cookie[0]);
2335 			dp_rx_link_desc_return(soc, ring_desc,
2336 					       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
2337 			goto next_entry;
2338 		}
2339 
2340 		dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0],
2341 					    msdu_list.sw_cookie[0],
2342 					    msdu_list.rbm[0]);
2343 		// TODO - BE- Check if the RBM is to be checked for all chips
2344 		if (qdf_unlikely((msdu_list.rbm[0] !=
2345 					dp_rx_get_rx_bm_id(soc)) &&
2346 				 (msdu_list.rbm[0] !=
2347 				  soc->idle_link_bm_id) &&
2348 				 (msdu_list.rbm[0] !=
2349 					dp_rx_get_defrag_bm_id(soc)))) {
2350 			/* TODO */
2351 			/* Call appropriate handler */
2352 			if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2353 				DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
2354 				dp_rx_err_err("%pK: Invalid RBM %d",
2355 					      soc, msdu_list.rbm[0]);
2356 			}
2357 
2358 			/* Return link descriptor through WBM ring (SW2WBM)*/
2359 			dp_rx_link_desc_return(soc, ring_desc,
2360 					HAL_BM_ACTION_RELEASE_MSDU_LIST);
2361 			goto next_entry;
2362 		}
2363 
2364 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
2365 						soc,
2366 						msdu_list.sw_cookie[0]);
2367 		qdf_assert_always(rx_desc);
2368 
2369 		mac_id = rx_desc->pool_id;
2370 
2371 		if (sw_pn_check_needed) {
2372 			goto process_reo_error_code;
2373 		}
2374 
2375 		if (mpdu_desc_info.bar_frame) {
2376 			qdf_assert_always(mpdu_desc_info.msdu_count == 1);
2377 
2378 			dp_rx_bar_frame_handle(soc, ring_desc, rx_desc,
2379 					       &mpdu_desc_info, err_status,
2380 					       error_code);
2381 
2382 			rx_bufs_reaped[mac_id] += 1;
2383 			goto next_entry;
2384 		}
2385 
2386 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
2387 			/*
2388 			 * We only handle one msdu per link desc for fragmented
2389 			 * case. We drop the msdus and release the link desc
2390 			 * back if there are more than one msdu in link desc.
2391 			 */
2392 			if (qdf_unlikely(num_msdus > 1)) {
2393 				count = dp_rx_msdus_drop(soc, ring_desc,
2394 							 &mpdu_desc_info,
2395 							 &mac_id, quota);
2396 				rx_bufs_reaped[mac_id] += count;
2397 				goto next_entry;
2398 			}
2399 
2400 			/*
2401 			 * this is a unlikely scenario where the host is reaping
2402 			 * a descriptor which it already reaped just a while ago
2403 			 * but is yet to replenish it back to HW.
2404 			 * In this case host will dump the last 128 descriptors
2405 			 * including the software descriptor rx_desc and assert.
2406 			 */
2407 
2408 			if (qdf_unlikely(!rx_desc->in_use)) {
2409 				DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
2410 				dp_info_rl("Reaping rx_desc not in use!");
2411 				dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2412 							   ring_desc, rx_desc);
2413 				/* ignore duplicate RX desc and continue */
2414 				/* Pop out the descriptor */
2415 				goto next_entry;
2416 			}
2417 
2418 			ret = dp_rx_desc_paddr_sanity_check(rx_desc,
2419 							    msdu_list.paddr[0]);
2420 			if (!ret) {
2421 				DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
2422 				rx_desc->in_err_state = 1;
2423 				goto next_entry;
2424 			}
2425 
2426 			count = dp_rx_frag_handle(soc,
2427 						  ring_desc, &mpdu_desc_info,
2428 						  rx_desc, &mac_id, quota);
2429 
2430 			rx_bufs_reaped[mac_id] += count;
2431 			DP_STATS_INC(soc, rx.rx_frags, 1);
2432 			goto next_entry;
2433 		}
2434 
2435 process_reo_error_code:
2436 		/*
2437 		 * Expect REO errors to be handled after this point
2438 		 */
2439 		qdf_assert_always(err_status == HAL_REO_ERROR_DETECTED);
2440 
2441 		dp_info_rl("Got pkt with REO ERROR: %d", error_code);
2442 
2443 		switch (error_code) {
2444 		case HAL_REO_ERR_PN_CHECK_FAILED:
2445 		case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
2446 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2447 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2448 			if (dp_pdev)
2449 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
2450 			count = dp_rx_pn_error_handle(soc,
2451 						      ring_desc,
2452 						      &mpdu_desc_info, &mac_id,
2453 						      quota);
2454 
2455 			rx_bufs_reaped[mac_id] += count;
2456 			break;
2457 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2458 		case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
2459 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2460 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
2461 		case HAL_REO_ERR_BAR_FRAME_OOR:
2462 		case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2463 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2464 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2465 			if (dp_pdev)
2466 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
2467 			count = dp_rx_reo_err_entry_process(
2468 					soc,
2469 					ring_desc,
2470 					&mpdu_desc_info,
2471 					link_desc_va,
2472 					error_code);
2473 
2474 			rx_bufs_reaped[mac_id] += count;
2475 			break;
2476 		case HAL_REO_ERR_QUEUE_DESC_INVALID:
2477 		case HAL_REO_ERR_AMPDU_IN_NON_BA:
2478 		case HAL_REO_ERR_NON_BA_DUPLICATE:
2479 		case HAL_REO_ERR_BA_DUPLICATE:
2480 		case HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION:
2481 		case HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN:
2482 		case HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET:
2483 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2484 			count = dp_rx_msdus_drop(soc, ring_desc,
2485 						 &mpdu_desc_info,
2486 						 &mac_id, quota);
2487 			rx_bufs_reaped[mac_id] += count;
2488 			break;
2489 		default:
2490 			/* Assert if unexpected error type */
2491 			qdf_assert_always(0);
2492 		}
2493 next_entry:
2494 		dp_rx_link_cookie_invalidate(ring_desc);
2495 		hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2496 
2497 		rx_bufs_reaped_total = 0;
2498 		for (i = 0; i < MAX_PDEV_CNT; i++)
2499 			rx_bufs_reaped_total += rx_bufs_reaped[i];
2500 
2501 		if (dp_rx_reap_loop_pkt_limit_hit(soc, rx_bufs_reaped_total,
2502 						  max_reap_limit))
2503 			break;
2504 	}
2505 
2506 done:
2507 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
2508 
2509 	if (soc->rx.flags.defrag_timeout_check) {
2510 		uint32_t now_ms =
2511 			qdf_system_ticks_to_msecs(qdf_system_ticks());
2512 
2513 		if (now_ms >= soc->rx.defrag.next_flush_ms)
2514 			dp_rx_defrag_waitlist_flush(soc);
2515 	}
2516 
2517 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2518 		if (rx_bufs_reaped[mac_id]) {
2519 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2520 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2521 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
2522 
2523 			dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2524 						rx_desc_pool,
2525 						rx_bufs_reaped[mac_id],
2526 						&dp_pdev->free_list_head,
2527 						&dp_pdev->free_list_tail,
2528 						false);
2529 			rx_bufs_used += rx_bufs_reaped[mac_id];
2530 		}
2531 	}
2532 
2533 	return rx_bufs_used; /* Assume no scale factor for now */
2534 }
2535 
2536 #ifdef DROP_RXDMA_DECRYPT_ERR
2537 /**
2538  * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled
2539  *
2540  * Return: true if rxdma decrypt err frames are handled and false otherwise
2541  */
2542 static inline bool dp_handle_rxdma_decrypt_err(void)
2543 {
2544 	return false;
2545 }
2546 #else
2547 static inline bool dp_handle_rxdma_decrypt_err(void)
2548 {
2549 	return true;
2550 }
2551 #endif
2552 
2553 /*
2554  * dp_rx_wbm_sg_list_last_msdu_war() - war for HW issue
2555  *
2556  * This is a war for HW issue where length is only valid in last msdu
2557  *@soc: DP SOC handle
2558  */
2559 static inline void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc)
2560 {
2561 	if (soc->wbm_sg_last_msdu_war) {
2562 		uint32_t len;
2563 		qdf_nbuf_t temp = soc->wbm_sg_param.wbm_sg_nbuf_tail;
2564 
2565 		len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
2566 						     qdf_nbuf_data(temp));
2567 		temp = soc->wbm_sg_param.wbm_sg_nbuf_head;
2568 		while (temp) {
2569 			QDF_NBUF_CB_RX_PKT_LEN(temp) = len;
2570 			temp = temp->next;
2571 		}
2572 	}
2573 }
2574 
2575 #ifdef RX_DESC_DEBUG_CHECK
2576 /**
2577  * dp_rx_wbm_desc_nbuf_sanity_check - Add sanity check to for WBM rx_desc paddr
2578  *					corruption
2579  * @soc: core txrx main context
2580  * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring
2581  * @ring_desc: REO ring descriptor
2582  * @rx_desc: Rx descriptor
2583  *
2584  * Return: NONE
2585  */
2586 static
2587 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2588 					    hal_ring_handle_t hal_ring_hdl,
2589 					    hal_ring_desc_t ring_desc,
2590 					    struct dp_rx_desc *rx_desc)
2591 {
2592 	struct hal_buf_info hbi;
2593 
2594 	hal_rx_wbm_rel_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2595 	/* Sanity check for possible buffer paddr corruption */
2596 	if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
2597 		return QDF_STATUS_SUCCESS;
2598 
2599 	hal_srng_dump_ring_desc(soc->hal_soc, hal_ring_hdl, ring_desc);
2600 
2601 	return QDF_STATUS_E_FAILURE;
2602 }
2603 
2604 #else
2605 static
2606 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2607 					    hal_ring_handle_t hal_ring_hdl,
2608 					    hal_ring_desc_t ring_desc,
2609 					    struct dp_rx_desc *rx_desc)
2610 {
2611 	return QDF_STATUS_SUCCESS;
2612 }
2613 #endif
2614 
2615 static inline bool
2616 dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
2617 {
2618 	/*
2619 	 * Currently Null Queue and Unencrypted error handlers has support for
2620 	 * SG. Other error handler do not deal with SG buffer.
2621 	 */
2622 	if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) &&
2623 	     (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) ||
2624 	    ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) &&
2625 	     (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED)))
2626 		return true;
2627 
2628 	return false;
2629 }
2630 
2631 uint32_t
2632 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2633 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota)
2634 {
2635 	hal_ring_desc_t ring_desc;
2636 	hal_soc_handle_t hal_soc;
2637 	struct dp_rx_desc *rx_desc;
2638 	union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
2639 	union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
2640 	uint32_t rx_bufs_used = 0;
2641 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
2642 	uint8_t buf_type;
2643 	uint8_t mac_id;
2644 	struct dp_pdev *dp_pdev;
2645 	struct dp_srng *dp_rxdma_srng;
2646 	struct rx_desc_pool *rx_desc_pool;
2647 	uint8_t *rx_tlv_hdr;
2648 	bool is_tkip_mic_err;
2649 	qdf_nbuf_t nbuf_head = NULL;
2650 	qdf_nbuf_t nbuf_tail = NULL;
2651 	qdf_nbuf_t nbuf, next;
2652 	struct hal_wbm_err_desc_info wbm_err_info = { 0 };
2653 	uint8_t pool_id;
2654 	uint8_t tid = 0;
2655 	uint8_t msdu_continuation = 0;
2656 	bool process_sg_buf = false;
2657 	uint32_t wbm_err_src;
2658 	QDF_STATUS status;
2659 	struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 };
2660 
2661 	/* Debug -- Remove later */
2662 	qdf_assert(soc && hal_ring_hdl);
2663 
2664 	hal_soc = soc->hal_soc;
2665 
2666 	/* Debug -- Remove later */
2667 	qdf_assert(hal_soc);
2668 
2669 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
2670 
2671 		/* TODO */
2672 		/*
2673 		 * Need API to convert from hal_ring pointer to
2674 		 * Ring Type / Ring Id combo
2675 		 */
2676 		dp_rx_err_err("%pK: HAL RING Access Failed -- %pK",
2677 			      soc, hal_ring_hdl);
2678 		goto done;
2679 	}
2680 
2681 	while (qdf_likely(quota)) {
2682 		ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2683 		if (qdf_unlikely(!ring_desc))
2684 			break;
2685 
2686 		/* XXX */
2687 		buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
2688 
2689 		/*
2690 		 * For WBM ring, expect only MSDU buffers
2691 		 */
2692 		qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
2693 
2694 		wbm_err_src = hal_rx_wbm_err_src_get(hal_soc, ring_desc);
2695 		qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) ||
2696 			   (wbm_err_src == HAL_RX_WBM_ERR_SRC_REO));
2697 
2698 		if (soc->arch_ops.dp_wbm_get_rx_desc_from_hal_desc(soc,
2699 								   ring_desc,
2700 								   &rx_desc)) {
2701 			dp_rx_err_err("get rx desc from hal_desc failed");
2702 			continue;
2703 		}
2704 
2705 		qdf_assert_always(rx_desc);
2706 
2707 		if (!dp_rx_desc_check_magic(rx_desc)) {
2708 			dp_rx_err_err("%pk: Invalid rx_desc %pk",
2709 				      soc, rx_desc);
2710 			continue;
2711 		}
2712 
2713 		/*
2714 		 * this is a unlikely scenario where the host is reaping
2715 		 * a descriptor which it already reaped just a while ago
2716 		 * but is yet to replenish it back to HW.
2717 		 * In this case host will dump the last 128 descriptors
2718 		 * including the software descriptor rx_desc and assert.
2719 		 */
2720 		if (qdf_unlikely(!rx_desc->in_use)) {
2721 			DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
2722 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2723 						   ring_desc, rx_desc);
2724 			continue;
2725 		}
2726 
2727 		hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
2728 		nbuf = rx_desc->nbuf;
2729 
2730 		status = dp_rx_wbm_desc_nbuf_sanity_check(soc, hal_ring_hdl,
2731 							  ring_desc, rx_desc);
2732 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
2733 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
2734 			dp_info_rl("Rx error Nbuf %pk sanity check failure!",
2735 				   nbuf);
2736 			rx_desc->in_err_state = 1;
2737 			rx_desc->unmapped = 1;
2738 			rx_bufs_reaped[rx_desc->pool_id]++;
2739 			dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
2740 						    &tail[rx_desc->pool_id],
2741 						    rx_desc);
2742 
2743 			continue;
2744 		}
2745 
2746 		/* Get MPDU DESC info */
2747 		hal_rx_mpdu_desc_info_get(hal_soc, ring_desc, &mpdu_desc_info);
2748 
2749 		if (qdf_likely(mpdu_desc_info.mpdu_flags &
2750 			       HAL_MPDU_F_QOS_CONTROL_VALID))
2751 			qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid);
2752 
2753 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2754 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
2755 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
2756 		rx_desc->unmapped = 1;
2757 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
2758 
2759 		if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support &&
2760 				 dp_rx_is_sg_formation_required(&wbm_err_info))) {
2761 			/* SG is detected from continuation bit */
2762 			msdu_continuation =
2763 				hal_rx_wbm_err_msdu_continuation_get(hal_soc,
2764 								     ring_desc);
2765 			if (msdu_continuation &&
2766 			    !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) {
2767 				/* Update length from first buffer in SG */
2768 				soc->wbm_sg_param.wbm_sg_desc_msdu_len =
2769 					hal_rx_msdu_start_msdu_len_get(
2770 						soc->hal_soc,
2771 						qdf_nbuf_data(nbuf));
2772 				soc->wbm_sg_param.wbm_is_first_msdu_in_sg = true;
2773 			}
2774 
2775 			if (msdu_continuation) {
2776 				/* MSDU continued packets */
2777 				qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
2778 				QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2779 					soc->wbm_sg_param.wbm_sg_desc_msdu_len;
2780 			} else {
2781 				/* This is the terminal packet in SG */
2782 				qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
2783 				qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
2784 				QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2785 					soc->wbm_sg_param.wbm_sg_desc_msdu_len;
2786 				process_sg_buf = true;
2787 			}
2788 		}
2789 
2790 		/*
2791 		 * save the wbm desc info in nbuf TLV. We will need this
2792 		 * info when we do the actual nbuf processing
2793 		 */
2794 		wbm_err_info.pool_id = rx_desc->pool_id;
2795 		hal_rx_priv_info_set_in_tlv(soc->hal_soc,
2796 					    qdf_nbuf_data(nbuf),
2797 					    (uint8_t *)&wbm_err_info,
2798 					    sizeof(wbm_err_info));
2799 
2800 		rx_bufs_reaped[rx_desc->pool_id]++;
2801 
2802 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) {
2803 			DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head,
2804 					  soc->wbm_sg_param.wbm_sg_nbuf_tail,
2805 					  nbuf);
2806 			if (process_sg_buf) {
2807 				if (!dp_rx_buffer_pool_refill(
2808 					soc,
2809 					soc->wbm_sg_param.wbm_sg_nbuf_head,
2810 					rx_desc->pool_id))
2811 					DP_RX_MERGE_TWO_LIST(
2812 						nbuf_head, nbuf_tail,
2813 						soc->wbm_sg_param.wbm_sg_nbuf_head,
2814 						soc->wbm_sg_param.wbm_sg_nbuf_tail);
2815 				dp_rx_wbm_sg_list_last_msdu_war(soc);
2816 				dp_rx_wbm_sg_list_reset(soc);
2817 				process_sg_buf = false;
2818 			}
2819 		} else if (!dp_rx_buffer_pool_refill(soc, nbuf,
2820 						     rx_desc->pool_id)) {
2821 			DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
2822 		}
2823 
2824 		dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
2825 						&tail[rx_desc->pool_id],
2826 						rx_desc);
2827 
2828 		/*
2829 		 * if continuation bit is set then we have MSDU spread
2830 		 * across multiple buffers, let us not decrement quota
2831 		 * till we reap all buffers of that MSDU.
2832 		 */
2833 		if (qdf_likely(!msdu_continuation))
2834 			quota -= 1;
2835 	}
2836 done:
2837 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
2838 
2839 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2840 		if (rx_bufs_reaped[mac_id]) {
2841 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2842 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
2843 
2844 			dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2845 					rx_desc_pool, rx_bufs_reaped[mac_id],
2846 					&head[mac_id], &tail[mac_id], false);
2847 			rx_bufs_used += rx_bufs_reaped[mac_id];
2848 		}
2849 	}
2850 
2851 	nbuf = nbuf_head;
2852 	while (nbuf) {
2853 		struct dp_txrx_peer *txrx_peer;
2854 		struct dp_peer *peer;
2855 		uint16_t peer_id;
2856 		uint8_t err_code;
2857 		uint8_t *tlv_hdr;
2858 		uint32_t peer_meta_data;
2859 		dp_txrx_ref_handle txrx_ref_handle = NULL;
2860 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
2861 
2862 		/*
2863 		 * retrieve the wbm desc info from nbuf TLV, so we can
2864 		 * handle error cases appropriately
2865 		 */
2866 		hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_tlv_hdr,
2867 					      (uint8_t *)&wbm_err_info,
2868 					      sizeof(wbm_err_info));
2869 
2870 		peer_meta_data = hal_rx_tlv_peer_meta_data_get(soc->hal_soc,
2871 							       rx_tlv_hdr);
2872 		peer_id = dp_rx_peer_metadata_peer_id_get(soc, peer_meta_data);
2873 		txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
2874 							   &txrx_ref_handle,
2875 							   DP_MOD_ID_RX_ERR);
2876 
2877 		if (!txrx_peer)
2878 			dp_info_rl("peer is null peer_id%u err_src%u err_rsn%u",
2879 				   peer_id, wbm_err_info.wbm_err_src,
2880 				   wbm_err_info.reo_psh_rsn);
2881 
2882 		/* Set queue_mapping in nbuf to 0 */
2883 		dp_set_rx_queue(nbuf, 0);
2884 
2885 		next = nbuf->next;
2886 
2887 		/*
2888 		 * Form the SG for msdu continued buffers
2889 		 * QCN9000 has this support
2890 		 */
2891 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
2892 			nbuf = dp_rx_sg_create(soc, nbuf);
2893 			next = nbuf->next;
2894 			/*
2895 			 * SG error handling is not done correctly,
2896 			 * drop SG frames for now.
2897 			 */
2898 			dp_rx_nbuf_free(nbuf);
2899 			dp_info_rl("scattered msdu dropped");
2900 			nbuf = next;
2901 			if (txrx_peer)
2902 				dp_txrx_peer_unref_delete(txrx_ref_handle,
2903 							  DP_MOD_ID_RX_ERR);
2904 			continue;
2905 		}
2906 
2907 		if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
2908 			if (wbm_err_info.reo_psh_rsn
2909 					== HAL_RX_WBM_REO_PSH_RSN_ERROR) {
2910 
2911 				DP_STATS_INC(soc,
2912 					rx.err.reo_error
2913 					[wbm_err_info.reo_err_code], 1);
2914 				/* increment @pdev level */
2915 				pool_id = wbm_err_info.pool_id;
2916 				dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
2917 				if (dp_pdev)
2918 					DP_STATS_INC(dp_pdev, err.reo_error,
2919 						     1);
2920 
2921 				switch (wbm_err_info.reo_err_code) {
2922 				/*
2923 				 * Handling for packets which have NULL REO
2924 				 * queue descriptor
2925 				 */
2926 				case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2927 					pool_id = wbm_err_info.pool_id;
2928 					dp_rx_null_q_desc_handle(soc, nbuf,
2929 								 rx_tlv_hdr,
2930 								 pool_id,
2931 								 txrx_peer);
2932 					break;
2933 				/* TODO */
2934 				/* Add per error code accounting */
2935 				case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2936 					if (txrx_peer)
2937 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2938 									  rx.err.jump_2k_err,
2939 									  1);
2940 
2941 					pool_id = wbm_err_info.pool_id;
2942 
2943 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2944 									   rx_tlv_hdr)) {
2945 						tid =
2946 						hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
2947 					}
2948 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2949 					hal_rx_msdu_start_msdu_len_get(
2950 						soc->hal_soc, rx_tlv_hdr);
2951 					nbuf->next = NULL;
2952 					dp_2k_jump_handle(soc, nbuf,
2953 							  rx_tlv_hdr,
2954 							  peer_id, tid);
2955 					break;
2956 				case HAL_REO_ERR_REGULAR_FRAME_OOR:
2957 					if (txrx_peer)
2958 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2959 									  rx.err.oor_err,
2960 									  1);
2961 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2962 									   rx_tlv_hdr)) {
2963 						tid =
2964 							hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
2965 					}
2966 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2967 						hal_rx_msdu_start_msdu_len_get(
2968 						soc->hal_soc, rx_tlv_hdr);
2969 					nbuf->next = NULL;
2970 					dp_rx_oor_handle(soc, nbuf,
2971 							 peer_id,
2972 							 rx_tlv_hdr);
2973 					break;
2974 				case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2975 				case HAL_REO_ERR_BAR_FRAME_OOR:
2976 					peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
2977 					if (peer) {
2978 						dp_rx_err_handle_bar(soc, peer,
2979 								     nbuf);
2980 						dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
2981 					}
2982 					dp_rx_nbuf_free(nbuf);
2983 					break;
2984 
2985 				case HAL_REO_ERR_PN_CHECK_FAILED:
2986 				case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
2987 					if (txrx_peer)
2988 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2989 									  rx.err.pn_err,
2990 									  1);
2991 					dp_rx_nbuf_free(nbuf);
2992 					break;
2993 
2994 				default:
2995 					dp_info_rl("Got pkt with REO ERROR: %d",
2996 						   wbm_err_info.reo_err_code);
2997 					dp_rx_nbuf_free(nbuf);
2998 				}
2999 			} else if (wbm_err_info.reo_psh_rsn
3000 					== HAL_RX_WBM_REO_PSH_RSN_ROUTE) {
3001 				dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
3002 						    rx_tlv_hdr,
3003 						    HAL_RX_WBM_ERR_SRC_REO);
3004 			} else {
3005 				/* should not enter here */
3006 				dp_rx_err_alert("invalid reo push reason %u",
3007 						wbm_err_info.reo_psh_rsn);
3008 				dp_rx_nbuf_free(nbuf);
3009 				qdf_assert_always(0);
3010 			}
3011 		} else if (wbm_err_info.wbm_err_src ==
3012 					HAL_RX_WBM_ERR_SRC_RXDMA) {
3013 			if (wbm_err_info.rxdma_psh_rsn
3014 					== HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
3015 				DP_STATS_INC(soc,
3016 					rx.err.rxdma_error
3017 					[wbm_err_info.rxdma_err_code], 1);
3018 				/* increment @pdev level */
3019 				pool_id = wbm_err_info.pool_id;
3020 				dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
3021 				if (dp_pdev)
3022 					DP_STATS_INC(dp_pdev,
3023 						     err.rxdma_error, 1);
3024 
3025 				switch (wbm_err_info.rxdma_err_code) {
3026 				case HAL_RXDMA_ERR_UNENCRYPTED:
3027 
3028 				case HAL_RXDMA_ERR_WIFI_PARSE:
3029 					if (txrx_peer)
3030 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3031 									  rx.err.rxdma_wifi_parse_err,
3032 									  1);
3033 
3034 					pool_id = wbm_err_info.pool_id;
3035 					dp_rx_process_rxdma_err(soc, nbuf,
3036 								rx_tlv_hdr,
3037 								txrx_peer,
3038 								wbm_err_info.
3039 								rxdma_err_code,
3040 								pool_id);
3041 					break;
3042 
3043 				case HAL_RXDMA_ERR_TKIP_MIC:
3044 					dp_rx_process_mic_error(soc, nbuf,
3045 								rx_tlv_hdr,
3046 								txrx_peer);
3047 					if (txrx_peer)
3048 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3049 									  rx.err.mic_err,
3050 									  1);
3051 					break;
3052 
3053 				case HAL_RXDMA_ERR_DECRYPT:
3054 					/* All the TKIP-MIC failures are treated as Decrypt Errors
3055 					 * for QCN9224 Targets
3056 					 */
3057 					is_tkip_mic_err = hal_rx_msdu_end_is_tkip_mic_err(hal_soc, rx_tlv_hdr);
3058 
3059 					if (is_tkip_mic_err && txrx_peer) {
3060 						dp_rx_process_mic_error(soc, nbuf,
3061 									rx_tlv_hdr,
3062 									txrx_peer);
3063 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3064 									  rx.err.mic_err,
3065 									  1);
3066 						break;
3067 					}
3068 
3069 					if (txrx_peer) {
3070 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3071 									  rx.err.decrypt_err,
3072 									  1);
3073 						dp_rx_nbuf_free(nbuf);
3074 						break;
3075 					}
3076 
3077 					if (!dp_handle_rxdma_decrypt_err()) {
3078 						dp_rx_nbuf_free(nbuf);
3079 						break;
3080 					}
3081 
3082 					pool_id = wbm_err_info.pool_id;
3083 					err_code = wbm_err_info.rxdma_err_code;
3084 					tlv_hdr = rx_tlv_hdr;
3085 					dp_rx_process_rxdma_err(soc, nbuf,
3086 								tlv_hdr, NULL,
3087 								err_code,
3088 								pool_id);
3089 					break;
3090 				case HAL_RXDMA_MULTICAST_ECHO:
3091 					if (txrx_peer)
3092 						DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
3093 									      rx.mec_drop, 1,
3094 									      qdf_nbuf_len(nbuf));
3095 					dp_rx_nbuf_free(nbuf);
3096 					break;
3097 				case HAL_RXDMA_UNAUTHORIZED_WDS:
3098 					pool_id = wbm_err_info.pool_id;
3099 					err_code = wbm_err_info.rxdma_err_code;
3100 					tlv_hdr = rx_tlv_hdr;
3101 					dp_rx_process_rxdma_err(soc, nbuf,
3102 								tlv_hdr,
3103 								txrx_peer,
3104 								err_code,
3105 								pool_id);
3106 					break;
3107 				default:
3108 					dp_rx_nbuf_free(nbuf);
3109 					dp_err_rl("RXDMA error %d",
3110 						  wbm_err_info.rxdma_err_code);
3111 				}
3112 			} else if (wbm_err_info.rxdma_psh_rsn
3113 					== HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) {
3114 				dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
3115 						    rx_tlv_hdr,
3116 						    HAL_RX_WBM_ERR_SRC_RXDMA);
3117 			} else if (wbm_err_info.rxdma_psh_rsn
3118 					== HAL_RX_WBM_RXDMA_PSH_RSN_FLUSH) {
3119 				dp_rx_err_err("rxdma push reason %u",
3120 						wbm_err_info.rxdma_psh_rsn);
3121 				DP_STATS_INC(soc, rx.err.rx_flush_count, 1);
3122 				dp_rx_nbuf_free(nbuf);
3123 			} else {
3124 				/* should not enter here */
3125 				dp_rx_err_alert("invalid rxdma push reason %u",
3126 						wbm_err_info.rxdma_psh_rsn);
3127 				dp_rx_nbuf_free(nbuf);
3128 				qdf_assert_always(0);
3129 			}
3130 		} else {
3131 			/* Should not come here */
3132 			qdf_assert(0);
3133 		}
3134 
3135 		if (txrx_peer)
3136 			dp_txrx_peer_unref_delete(txrx_ref_handle,
3137 						  DP_MOD_ID_RX_ERR);
3138 
3139 		nbuf = next;
3140 	}
3141 	return rx_bufs_used; /* Assume no scale factor for now */
3142 }
3143 
3144 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
3145 
3146 /**
3147  * dup_desc_dbg() - dump and assert if duplicate rx desc found
3148  *
3149  * @soc: core DP main context
3150  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
3151  * @rx_desc: void pointer to rx descriptor
3152  *
3153  * Return: void
3154  */
3155 static void dup_desc_dbg(struct dp_soc *soc,
3156 			 hal_rxdma_desc_t rxdma_dst_ring_desc,
3157 			 void *rx_desc)
3158 {
3159 	DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
3160 	dp_rx_dump_info_and_assert(
3161 			soc,
3162 			soc->rx_rel_ring.hal_srng,
3163 			hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
3164 			rx_desc);
3165 }
3166 
3167 /**
3168  * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
3169  *
3170  * @soc: core DP main context
3171  * @mac_id: mac id which is one of 3 mac_ids
3172  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
3173  * @head: head of descs list to be freed
3174  * @tail: tail of decs list to be freed
3175 
3176  * Return: number of msdu in MPDU to be popped
3177  */
3178 static inline uint32_t
3179 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
3180 	hal_rxdma_desc_t rxdma_dst_ring_desc,
3181 	union dp_rx_desc_list_elem_t **head,
3182 	union dp_rx_desc_list_elem_t **tail)
3183 {
3184 	void *rx_msdu_link_desc;
3185 	qdf_nbuf_t msdu;
3186 	qdf_nbuf_t last;
3187 	struct hal_rx_msdu_list msdu_list;
3188 	uint16_t num_msdus;
3189 	struct hal_buf_info buf_info;
3190 	uint32_t rx_bufs_used = 0;
3191 	uint32_t msdu_cnt;
3192 	uint32_t i;
3193 	uint8_t push_reason;
3194 	uint8_t rxdma_error_code = 0;
3195 	uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
3196 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
3197 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
3198 	hal_rxdma_desc_t ring_desc;
3199 	struct rx_desc_pool *rx_desc_pool;
3200 
3201 	if (!pdev) {
3202 		dp_rx_err_debug("%pK: pdev is null for mac_id = %d",
3203 				soc, mac_id);
3204 		return rx_bufs_used;
3205 	}
3206 
3207 	msdu = 0;
3208 
3209 	last = NULL;
3210 
3211 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
3212 				     &buf_info, &msdu_cnt);
3213 
3214 	push_reason =
3215 		hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
3216 	if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
3217 		rxdma_error_code =
3218 			hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
3219 	}
3220 
3221 	do {
3222 		rx_msdu_link_desc =
3223 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
3224 
3225 		qdf_assert_always(rx_msdu_link_desc);
3226 
3227 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
3228 				     &msdu_list, &num_msdus);
3229 
3230 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
3231 			/* if the msdus belongs to NSS offloaded radio &&
3232 			 * the rbm is not SW1_BM then return the msdu_link
3233 			 * descriptor without freeing the msdus (nbufs). let
3234 			 * these buffers be given to NSS completion ring for
3235 			 * NSS to free them.
3236 			 * else iterate through the msdu link desc list and
3237 			 * free each msdu in the list.
3238 			 */
3239 			if (msdu_list.rbm[0] !=
3240 				HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id) &&
3241 			    wlan_cfg_get_dp_pdev_nss_enabled(
3242 							pdev->wlan_cfg_ctx))
3243 				bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
3244 			else {
3245 				for (i = 0; i < num_msdus; i++) {
3246 					struct dp_rx_desc *rx_desc =
3247 						soc->arch_ops.
3248 						dp_rx_desc_cookie_2_va(
3249 							soc,
3250 							msdu_list.sw_cookie[i]);
3251 					qdf_assert_always(rx_desc);
3252 					msdu = rx_desc->nbuf;
3253 					/*
3254 					 * this is a unlikely scenario
3255 					 * where the host is reaping
3256 					 * a descriptor which
3257 					 * it already reaped just a while ago
3258 					 * but is yet to replenish
3259 					 * it back to HW.
3260 					 * In this case host will dump
3261 					 * the last 128 descriptors
3262 					 * including the software descriptor
3263 					 * rx_desc and assert.
3264 					 */
3265 					ring_desc = rxdma_dst_ring_desc;
3266 					if (qdf_unlikely(!rx_desc->in_use)) {
3267 						dup_desc_dbg(soc,
3268 							     ring_desc,
3269 							     rx_desc);
3270 						continue;
3271 					}
3272 
3273 					if (rx_desc->unmapped == 0) {
3274 						rx_desc_pool =
3275 							&soc->rx_desc_buf[rx_desc->pool_id];
3276 						dp_ipa_rx_buf_smmu_mapping_lock(soc);
3277 						dp_rx_nbuf_unmap_pool(soc,
3278 								      rx_desc_pool,
3279 								      msdu);
3280 						rx_desc->unmapped = 1;
3281 						dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3282 					}
3283 
3284 					dp_rx_err_debug("%pK: msdu_nbuf=%pK ",
3285 							soc, msdu);
3286 
3287 					dp_rx_buffer_pool_nbuf_free(soc, msdu,
3288 							rx_desc->pool_id);
3289 					rx_bufs_used++;
3290 					dp_rx_add_to_free_desc_list(head,
3291 						tail, rx_desc);
3292 				}
3293 			}
3294 		} else {
3295 			rxdma_error_code = HAL_RXDMA_ERR_WAR;
3296 		}
3297 
3298 		/*
3299 		 * Store the current link buffer into to the local structure
3300 		 * to be used for release purpose.
3301 		 */
3302 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
3303 					     buf_info.paddr, buf_info.sw_cookie,
3304 					     buf_info.rbm);
3305 
3306 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
3307 					      &buf_info);
3308 		dp_rx_link_desc_return_by_addr(soc,
3309 					       (hal_buff_addrinfo_t)
3310 						rx_link_buf_info,
3311 						bm_action);
3312 	} while (buf_info.paddr);
3313 
3314 	DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
3315 	if (pdev)
3316 		DP_STATS_INC(pdev, err.rxdma_error, 1);
3317 
3318 	if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
3319 		dp_rx_err_err("%pK: Packet received with Decrypt error", soc);
3320 	}
3321 
3322 	return rx_bufs_used;
3323 }
3324 
3325 uint32_t
3326 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
3327 		     uint32_t mac_id, uint32_t quota)
3328 {
3329 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
3330 	hal_rxdma_desc_t rxdma_dst_ring_desc;
3331 	hal_soc_handle_t hal_soc;
3332 	void *err_dst_srng;
3333 	union dp_rx_desc_list_elem_t *head = NULL;
3334 	union dp_rx_desc_list_elem_t *tail = NULL;
3335 	struct dp_srng *dp_rxdma_srng;
3336 	struct rx_desc_pool *rx_desc_pool;
3337 	uint32_t work_done = 0;
3338 	uint32_t rx_bufs_used = 0;
3339 
3340 	if (!pdev)
3341 		return 0;
3342 
3343 	err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng;
3344 
3345 	if (!err_dst_srng) {
3346 		dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
3347 			      soc, err_dst_srng);
3348 		return 0;
3349 	}
3350 
3351 	hal_soc = soc->hal_soc;
3352 
3353 	qdf_assert(hal_soc);
3354 
3355 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
3356 		dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
3357 			      soc, err_dst_srng);
3358 		return 0;
3359 	}
3360 
3361 	while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
3362 		hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
3363 
3364 			rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
3365 						rxdma_dst_ring_desc,
3366 						&head, &tail);
3367 	}
3368 
3369 	dp_srng_access_end(int_ctx, soc, err_dst_srng);
3370 
3371 	if (rx_bufs_used) {
3372 		if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3373 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
3374 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
3375 		} else {
3376 			dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id];
3377 			rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
3378 		}
3379 
3380 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
3381 			rx_desc_pool, rx_bufs_used, &head, &tail, false);
3382 
3383 		work_done += rx_bufs_used;
3384 	}
3385 
3386 	return work_done;
3387 }
3388 
3389 #ifndef QCA_HOST_MODE_WIFI_DISABLED
3390 
3391 static inline void
3392 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
3393 			hal_rxdma_desc_t rxdma_dst_ring_desc,
3394 			union dp_rx_desc_list_elem_t **head,
3395 			union dp_rx_desc_list_elem_t **tail,
3396 			uint32_t *rx_bufs_used)
3397 {
3398 	void *rx_msdu_link_desc;
3399 	qdf_nbuf_t msdu;
3400 	qdf_nbuf_t last;
3401 	struct hal_rx_msdu_list msdu_list;
3402 	uint16_t num_msdus;
3403 	struct hal_buf_info buf_info;
3404 	uint32_t msdu_cnt, i;
3405 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
3406 	struct rx_desc_pool *rx_desc_pool;
3407 	struct dp_rx_desc *rx_desc;
3408 
3409 	msdu = 0;
3410 
3411 	last = NULL;
3412 
3413 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
3414 				     &buf_info, &msdu_cnt);
3415 
3416 	do {
3417 		rx_msdu_link_desc =
3418 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
3419 
3420 		if (!rx_msdu_link_desc) {
3421 			DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
3422 			break;
3423 		}
3424 
3425 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
3426 				     &msdu_list, &num_msdus);
3427 
3428 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
3429 			for (i = 0; i < num_msdus; i++) {
3430 				if (!dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[i])) {
3431 					dp_rx_err_info_rl("Invalid MSDU info cookie: 0x%x",
3432 							  msdu_list.sw_cookie[i]);
3433 					continue;
3434 				}
3435 
3436 				rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
3437 							soc,
3438 							msdu_list.sw_cookie[i]);
3439 				qdf_assert_always(rx_desc);
3440 				rx_desc_pool =
3441 					&soc->rx_desc_buf[rx_desc->pool_id];
3442 				msdu = rx_desc->nbuf;
3443 
3444 				/*
3445 				 * this is a unlikely scenario where the host is reaping
3446 				 * a descriptor which it already reaped just a while ago
3447 				 * but is yet to replenish it back to HW.
3448 				 */
3449 				if (qdf_unlikely(!rx_desc->in_use) ||
3450 				    qdf_unlikely(!msdu)) {
3451 					dp_rx_err_info_rl("Reaping rx_desc not in use!");
3452 					continue;
3453 				}
3454 
3455 				dp_ipa_rx_buf_smmu_mapping_lock(soc);
3456 				dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, msdu);
3457 				rx_desc->unmapped = 1;
3458 				dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3459 
3460 				dp_rx_buffer_pool_nbuf_free(soc, msdu,
3461 							    rx_desc->pool_id);
3462 				rx_bufs_used[rx_desc->pool_id]++;
3463 				dp_rx_add_to_free_desc_list(head,
3464 							    tail, rx_desc);
3465 			}
3466 		}
3467 
3468 		/*
3469 		 * Store the current link buffer into to the local structure
3470 		 * to be used for release purpose.
3471 		 */
3472 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
3473 					     buf_info.paddr, buf_info.sw_cookie,
3474 					     buf_info.rbm);
3475 
3476 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
3477 					      &buf_info);
3478 		dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
3479 					rx_link_buf_info,
3480 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
3481 	} while (buf_info.paddr);
3482 }
3483 
3484 /*
3485  *
3486  * dp_handle_wbm_internal_error() - handles wbm_internal_error case
3487  *
3488  * @soc: core DP main context
3489  * @hal_desc: hal descriptor
3490  * @buf_type: indicates if the buffer is of type link disc or msdu
3491  * Return: None
3492  *
3493  * wbm_internal_error is seen in following scenarios :
3494  *
3495  * 1.  Null pointers detected in WBM_RELEASE_RING descriptors
3496  * 2.  Null pointers detected during delinking process
3497  *
3498  * Some null pointer cases:
3499  *
3500  * a. MSDU buffer pointer is NULL
3501  * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag
3502  * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL
3503  */
3504 void
3505 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
3506 			     uint32_t buf_type)
3507 {
3508 	struct hal_buf_info buf_info = {0};
3509 	struct dp_rx_desc *rx_desc = NULL;
3510 	struct rx_desc_pool *rx_desc_pool;
3511 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = {0};
3512 	union dp_rx_desc_list_elem_t *head = NULL;
3513 	union dp_rx_desc_list_elem_t *tail = NULL;
3514 	uint8_t pool_id;
3515 	uint8_t mac_id;
3516 
3517 	hal_rx_reo_buf_paddr_get(soc->hal_soc, hal_desc, &buf_info);
3518 
3519 	if (!buf_info.paddr) {
3520 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
3521 		return;
3522 	}
3523 
3524 	/* buffer_addr_info is the first element of ring_desc */
3525 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)hal_desc,
3526 				  &buf_info);
3527 	pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(buf_info.sw_cookie);
3528 
3529 	if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
3530 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
3531 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
3532 							soc,
3533 							buf_info.sw_cookie);
3534 
3535 		if (rx_desc && rx_desc->nbuf) {
3536 			rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
3537 			dp_ipa_rx_buf_smmu_mapping_lock(soc);
3538 			dp_rx_nbuf_unmap_pool(soc, rx_desc_pool,
3539 					      rx_desc->nbuf);
3540 			rx_desc->unmapped = 1;
3541 			dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3542 
3543 			dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
3544 						    rx_desc->pool_id);
3545 			dp_rx_add_to_free_desc_list(&head,
3546 						    &tail,
3547 						    rx_desc);
3548 
3549 			rx_bufs_reaped[rx_desc->pool_id]++;
3550 		}
3551 	} else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
3552 		dp_wbm_int_err_mpdu_pop(soc, pool_id, hal_desc,
3553 					&head, &tail, rx_bufs_reaped);
3554 	}
3555 
3556 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
3557 		struct rx_desc_pool *rx_desc_pool;
3558 		struct dp_srng *dp_rxdma_srng;
3559 
3560 		if (!rx_bufs_reaped[mac_id])
3561 			continue;
3562 
3563 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
3564 		dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
3565 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
3566 
3567 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
3568 					rx_desc_pool,
3569 					rx_bufs_reaped[mac_id],
3570 					&head, &tail, false);
3571 	}
3572 }
3573 
3574 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
3575