xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_err.c (revision 93830f424d9ddc2ed54b338975b4f4fb38ca46e6)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #include "dp_types.h"
22 #include "dp_rx.h"
23 #include "dp_tx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 #include "hal_api.h"
27 #include "qdf_trace.h"
28 #include "qdf_nbuf.h"
29 #include "dp_rx_defrag.h"
30 #include "dp_ipa.h"
31 #include "dp_internal.h"
32 #ifdef WIFI_MONITOR_SUPPORT
33 #include "dp_htt.h"
34 #include <dp_mon.h>
35 #endif
36 #ifdef FEATURE_WDS
37 #include "dp_txrx_wds.h"
38 #endif
39 #include <enet.h>	/* LLC_SNAP_HDR_LEN */
40 #include "qdf_net_types.h"
41 #include "dp_rx_buffer_pool.h"
42 
43 #define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params)
44 #define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params)
45 #define dp_rx_err_info(params...) \
46 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
47 #define dp_rx_err_info_rl(params...) \
48 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
49 #define dp_rx_err_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_ERROR, params)
50 
51 #ifndef QCA_HOST_MODE_WIFI_DISABLED
52 
53 
54 /* Max regular Rx packet routing error */
55 #define DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD 20
56 #define DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT 10
57 #define DP_RX_ERR_ROUTE_TIMEOUT_US (5 * 1000 * 1000) /* micro seconds */
58 
59 #ifdef FEATURE_MEC
60 bool dp_rx_mcast_echo_check(struct dp_soc *soc,
61 			    struct dp_txrx_peer *txrx_peer,
62 			    uint8_t *rx_tlv_hdr,
63 			    qdf_nbuf_t nbuf)
64 {
65 	struct dp_vdev *vdev = txrx_peer->vdev;
66 	struct dp_pdev *pdev = vdev->pdev;
67 	struct dp_mec_entry *mecentry = NULL;
68 	struct dp_ast_entry *ase = NULL;
69 	uint16_t sa_idx = 0;
70 	uint8_t *data;
71 	/*
72 	 * Multicast Echo Check is required only if vdev is STA and
73 	 * received pkt is a multicast/broadcast pkt. otherwise
74 	 * skip the MEC check.
75 	 */
76 	if (vdev->opmode != wlan_op_mode_sta)
77 		return false;
78 	if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
79 		return false;
80 
81 	data = qdf_nbuf_data(nbuf);
82 
83 	/*
84 	 * if the received pkts src mac addr matches with vdev
85 	 * mac address then drop the pkt as it is looped back
86 	 */
87 	if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
88 			  vdev->mac_addr.raw,
89 			  QDF_MAC_ADDR_SIZE)))
90 		return true;
91 
92 	/*
93 	 * In case of qwrap isolation mode, donot drop loopback packets.
94 	 * In isolation mode, all packets from the wired stations need to go
95 	 * to rootap and loop back to reach the wireless stations and
96 	 * vice-versa.
97 	 */
98 	if (qdf_unlikely(vdev->isolation_vdev))
99 		return false;
100 
101 	/*
102 	 * if the received pkts src mac addr matches with the
103 	 * wired PCs MAC addr which is behind the STA or with
104 	 * wireless STAs MAC addr which are behind the Repeater,
105 	 * then drop the pkt as it is looped back
106 	 */
107 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
108 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
109 
110 		if ((sa_idx < 0) ||
111 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
112 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
113 				  "invalid sa_idx: %d", sa_idx);
114 			qdf_assert_always(0);
115 		}
116 
117 		qdf_spin_lock_bh(&soc->ast_lock);
118 		ase = soc->ast_table[sa_idx];
119 
120 		/*
121 		 * this check was not needed since MEC is not dependent on AST,
122 		 * but if we dont have this check SON has some issues in
123 		 * dual backhaul scenario. in APS SON mode, client connected
124 		 * to RE 2G and sends multicast packets. the RE sends it to CAP
125 		 * over 5G backhaul. the CAP loopback it on 2G to RE.
126 		 * On receiving in 2G STA vap, we assume that client has roamed
127 		 * and kickout the client.
128 		 */
129 		if (ase && (ase->peer_id != txrx_peer->peer_id)) {
130 			qdf_spin_unlock_bh(&soc->ast_lock);
131 			goto drop;
132 		}
133 
134 		qdf_spin_unlock_bh(&soc->ast_lock);
135 	}
136 
137 	qdf_spin_lock_bh(&soc->mec_lock);
138 
139 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
140 						   &data[QDF_MAC_ADDR_SIZE]);
141 	if (!mecentry) {
142 		qdf_spin_unlock_bh(&soc->mec_lock);
143 		return false;
144 	}
145 
146 	qdf_spin_unlock_bh(&soc->mec_lock);
147 
148 drop:
149 	dp_rx_err_info("%pK: received pkt with same src mac " QDF_MAC_ADDR_FMT,
150 		       soc, QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]));
151 
152 	return true;
153 }
154 #endif
155 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
156 
157 void dp_rx_link_desc_refill_duplicate_check(
158 				struct dp_soc *soc,
159 				struct hal_buf_info *buf_info,
160 				hal_buff_addrinfo_t ring_buf_info)
161 {
162 	struct hal_buf_info current_link_desc_buf_info = { 0 };
163 
164 	/* do duplicate link desc address check */
165 	hal_rx_buffer_addr_info_get_paddr(ring_buf_info,
166 					  &current_link_desc_buf_info);
167 
168 	/*
169 	 * TODO - Check if the hal soc api call can be removed
170 	 * since the cookie is just used for print.
171 	 * buffer_addr_info is the first element of ring_desc
172 	 */
173 	hal_rx_buf_cookie_rbm_get(soc->hal_soc,
174 				  (uint32_t *)ring_buf_info,
175 				  &current_link_desc_buf_info);
176 
177 	if (qdf_unlikely(current_link_desc_buf_info.paddr ==
178 			 buf_info->paddr)) {
179 		dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x",
180 			   current_link_desc_buf_info.paddr,
181 			   current_link_desc_buf_info.sw_cookie);
182 		DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1);
183 	}
184 	*buf_info = current_link_desc_buf_info;
185 }
186 
187 QDF_STATUS
188 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
189 			       hal_buff_addrinfo_t link_desc_addr,
190 			       uint8_t bm_action)
191 {
192 	struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
193 	hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
194 	hal_soc_handle_t hal_soc = soc->hal_soc;
195 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
196 	void *src_srng_desc;
197 
198 	if (!wbm_rel_srng) {
199 		dp_rx_err_err("%pK: WBM RELEASE RING not initialized", soc);
200 		return status;
201 	}
202 
203 	/* do duplicate link desc address check */
204 	dp_rx_link_desc_refill_duplicate_check(
205 				soc,
206 				&soc->last_op_info.wbm_rel_link_desc,
207 				link_desc_addr);
208 
209 	if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
210 
211 		/* TODO */
212 		/*
213 		 * Need API to convert from hal_ring pointer to
214 		 * Ring Type / Ring Id combo
215 		 */
216 		dp_rx_err_err("%pK: HAL RING Access For WBM Release SRNG Failed - %pK",
217 			      soc, wbm_rel_srng);
218 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
219 		goto done;
220 	}
221 	src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
222 	if (qdf_likely(src_srng_desc)) {
223 		/* Return link descriptor through WBM ring (SW2WBM)*/
224 		hal_rx_msdu_link_desc_set(hal_soc,
225 				src_srng_desc, link_desc_addr, bm_action);
226 		status = QDF_STATUS_SUCCESS;
227 	} else {
228 		struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
229 
230 		DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1);
231 
232 		dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)",
233 			   srng->ring_id,
234 			   soc->stats.rx.err.hal_ring_access_full_fail);
235 		dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
236 			   *srng->u.src_ring.hp_addr,
237 			   srng->u.src_ring.reap_hp,
238 			   *srng->u.src_ring.tp_addr,
239 			   srng->u.src_ring.cached_tp);
240 		QDF_BUG(0);
241 	}
242 done:
243 	hal_srng_access_end(hal_soc, wbm_rel_srng);
244 	return status;
245 
246 }
247 
248 qdf_export_symbol(dp_rx_link_desc_return_by_addr);
249 
250 QDF_STATUS
251 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
252 		       uint8_t bm_action)
253 {
254 	void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
255 
256 	return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
257 }
258 
259 #ifndef QCA_HOST_MODE_WIFI_DISABLED
260 
261 /**
262  * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
263  *
264  * @soc: core txrx main context
265  * @ring_desc: opaque pointer to the REO error ring descriptor
266  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
267  * @mac_id: mac ID
268  * @quota: No. of units (packets) that can be serviced in one shot.
269  *
270  * This function is used to drop all MSDU in an MPDU
271  *
272  * Return: uint32_t: No. of elements processed
273  */
274 static uint32_t
275 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
276 		 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
277 		 uint8_t *mac_id,
278 		 uint32_t quota)
279 {
280 	uint32_t rx_bufs_used = 0;
281 	void *link_desc_va;
282 	struct hal_buf_info buf_info;
283 	struct dp_pdev *pdev;
284 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
285 	int i;
286 	uint8_t *rx_tlv_hdr;
287 	uint32_t tid;
288 	struct rx_desc_pool *rx_desc_pool;
289 	struct dp_rx_desc *rx_desc;
290 	/* First field in REO Dst ring Desc is buffer_addr_info */
291 	void *buf_addr_info = ring_desc;
292 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
293 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
294 
295 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &buf_info);
296 
297 	/* buffer_addr_info is the first element of ring_desc */
298 	hal_rx_buf_cookie_rbm_get(soc->hal_soc,
299 				  (uint32_t *)ring_desc,
300 				  &buf_info);
301 
302 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
303 	if (!link_desc_va) {
304 		dp_rx_err_debug("link desc va is null, soc %pk", soc);
305 		return rx_bufs_used;
306 	}
307 
308 more_msdu_link_desc:
309 	/* No UNMAP required -- this is "malloc_consistent" memory */
310 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
311 			     &mpdu_desc_info->msdu_count);
312 
313 	for (i = 0; (i < mpdu_desc_info->msdu_count); i++) {
314 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
315 						soc, msdu_list.sw_cookie[i]);
316 
317 		qdf_assert_always(rx_desc);
318 
319 		/* all buffers from a MSDU link link belong to same pdev */
320 		*mac_id = rx_desc->pool_id;
321 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
322 		if (!pdev) {
323 			dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
324 					soc, rx_desc->pool_id);
325 			return rx_bufs_used;
326 		}
327 
328 		if (!dp_rx_desc_check_magic(rx_desc)) {
329 			dp_rx_err_err("%pK: Invalid rx_desc cookie=%d",
330 				      soc, msdu_list.sw_cookie[i]);
331 			return rx_bufs_used;
332 		}
333 
334 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
335 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
336 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
337 		rx_desc->unmapped = 1;
338 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
339 
340 		rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
341 
342 		rx_bufs_used++;
343 		tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
344 						rx_desc->rx_buf_start);
345 		dp_rx_err_err("%pK: Packet received with PN error for tid :%d",
346 			      soc, tid);
347 
348 		rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
349 		if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
350 			hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
351 
352 		dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info,
353 				      rx_desc->nbuf,
354 				      QDF_TX_RX_STATUS_DROP, true);
355 		/* Just free the buffers */
356 		dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id);
357 
358 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
359 					    &pdev->free_list_tail, rx_desc);
360 	}
361 
362 	/*
363 	 * If the msdu's are spread across multiple link-descriptors,
364 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
365 	 * spread across multiple buffers).Hence, it is
366 	 * necessary to check the next link_descriptor and release
367 	 * all the msdu's that are part of it.
368 	 */
369 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
370 			link_desc_va,
371 			&next_link_desc_addr_info);
372 
373 	if (hal_rx_is_buf_addr_info_valid(
374 				&next_link_desc_addr_info)) {
375 		/* Clear the next link desc info for the current link_desc */
376 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
377 
378 		dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
379 					       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
380 		hal_rx_buffer_addr_info_get_paddr(
381 				&next_link_desc_addr_info,
382 				&buf_info);
383 		/* buffer_addr_info is the first element of ring_desc */
384 		hal_rx_buf_cookie_rbm_get(soc->hal_soc,
385 					  (uint32_t *)&next_link_desc_addr_info,
386 					  &buf_info);
387 		cur_link_desc_addr_info = next_link_desc_addr_info;
388 		buf_addr_info = &cur_link_desc_addr_info;
389 
390 		link_desc_va =
391 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
392 
393 		goto more_msdu_link_desc;
394 	}
395 	quota--;
396 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
397 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
398 	return rx_bufs_used;
399 }
400 
401 /**
402  * dp_rx_pn_error_handle() - Handles PN check errors
403  *
404  * @soc: core txrx main context
405  * @ring_desc: opaque pointer to the REO error ring descriptor
406  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
407  * @mac_id: mac ID
408  * @quota: No. of units (packets) that can be serviced in one shot.
409  *
410  * This function implements PN error handling
411  * If the peer is configured to ignore the PN check errors
412  * or if DP feels, that this frame is still OK, the frame can be
413  * re-injected back to REO to use some of the other features
414  * of REO e.g. duplicate detection/routing to other cores
415  *
416  * Return: uint32_t: No. of elements processed
417  */
418 static uint32_t
419 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
420 		      struct hal_rx_mpdu_desc_info *mpdu_desc_info,
421 		      uint8_t *mac_id,
422 		      uint32_t quota)
423 {
424 	uint16_t peer_id;
425 	uint32_t rx_bufs_used = 0;
426 	struct dp_txrx_peer *txrx_peer;
427 	bool peer_pn_policy = false;
428 	dp_txrx_ref_handle txrx_ref_handle = NULL;
429 
430 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
431 					       mpdu_desc_info->peer_meta_data);
432 
433 
434 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
435 						   &txrx_ref_handle,
436 						   DP_MOD_ID_RX_ERR);
437 
438 	if (qdf_likely(txrx_peer)) {
439 		/*
440 		 * TODO: Check for peer specific policies & set peer_pn_policy
441 		 */
442 		dp_err_rl("discard rx due to PN error for peer  %pK",
443 			  txrx_peer);
444 
445 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
446 	}
447 	dp_rx_err_err("%pK: Packet received with PN error", soc);
448 
449 	/* No peer PN policy -- definitely drop */
450 	if (!peer_pn_policy)
451 		rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
452 						mpdu_desc_info,
453 						mac_id, quota);
454 
455 	return rx_bufs_used;
456 }
457 
458 #ifdef DP_RX_DELIVER_ALL_OOR_FRAMES
459 /**
460  * dp_rx_deliver_oor_frame() - deliver OOR frames to stack
461  * @soc: Datapath soc handler
462  * @txrx_peer: pointer to DP peer
463  * @nbuf: pointer to the skb of RX frame
464  * @frame_mask: the mask for special frame needed
465  * @rx_tlv_hdr: start of rx tlv header
466  *
467  * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
468  * single nbuf is expected.
469  *
470  * return: true - nbuf has been delivered to stack, false - not.
471  */
472 static bool
473 dp_rx_deliver_oor_frame(struct dp_soc *soc,
474 			struct dp_txrx_peer *txrx_peer,
475 			qdf_nbuf_t nbuf, uint32_t frame_mask,
476 			uint8_t *rx_tlv_hdr)
477 {
478 	uint32_t l2_hdr_offset = 0;
479 	uint16_t msdu_len = 0;
480 	uint32_t skip_len;
481 
482 	l2_hdr_offset =
483 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
484 
485 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
486 		skip_len = l2_hdr_offset;
487 	} else {
488 		msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
489 		skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
490 		qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
491 	}
492 
493 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
494 	dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
495 	qdf_nbuf_pull_head(nbuf, skip_len);
496 	qdf_nbuf_set_exc_frame(nbuf, 1);
497 
498 	dp_info_rl("OOR frame, mpdu sn 0x%x",
499 		   hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
500 	dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, nbuf, NULL);
501 	return true;
502 }
503 
504 #else
505 static bool
506 dp_rx_deliver_oor_frame(struct dp_soc *soc,
507 			struct dp_txrx_peer *txrx_peer,
508 			qdf_nbuf_t nbuf, uint32_t frame_mask,
509 			uint8_t *rx_tlv_hdr)
510 {
511 	return dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask,
512 					   rx_tlv_hdr);
513 }
514 #endif
515 
516 /**
517  * dp_rx_oor_handle() - Handles the msdu which is OOR error
518  *
519  * @soc: core txrx main context
520  * @nbuf: pointer to msdu skb
521  * @peer_id: dp peer ID
522  * @rx_tlv_hdr: start of rx tlv header
523  *
524  * This function process the msdu delivered from REO2TCL
525  * ring with error type OOR
526  *
527  * Return: None
528  */
529 static void
530 dp_rx_oor_handle(struct dp_soc *soc,
531 		 qdf_nbuf_t nbuf,
532 		 uint16_t peer_id,
533 		 uint8_t *rx_tlv_hdr)
534 {
535 	uint32_t frame_mask = wlan_cfg_get_special_frame_cfg(soc->wlan_cfg_ctx);
536 
537 	struct dp_txrx_peer *txrx_peer = NULL;
538 	dp_txrx_ref_handle txrx_ref_handle = NULL;
539 
540 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
541 						   &txrx_ref_handle,
542 						   DP_MOD_ID_RX_ERR);
543 	if (!txrx_peer) {
544 		dp_info_rl("peer not found");
545 		goto free_nbuf;
546 	}
547 
548 	if (dp_rx_deliver_oor_frame(soc, txrx_peer, nbuf, frame_mask,
549 				    rx_tlv_hdr)) {
550 		DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1);
551 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
552 		return;
553 	}
554 
555 free_nbuf:
556 	if (txrx_peer)
557 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
558 
559 	DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1);
560 	dp_rx_nbuf_free(nbuf);
561 }
562 
563 /**
564  * dp_rx_err_nbuf_pn_check() - Check if the PN number of this current packet
565  *				is a monotonous increment of packet number
566  *				from the previous successfully re-ordered
567  *				frame.
568  * @soc: Datapath SOC handle
569  * @ring_desc: REO ring descriptor
570  * @nbuf: Current packet
571  *
572  * Return: QDF_STATUS_SUCCESS, if the pn check passes, else QDF_STATUS_E_FAILURE
573  */
574 static inline QDF_STATUS
575 dp_rx_err_nbuf_pn_check(struct dp_soc *soc, hal_ring_desc_t ring_desc,
576 			qdf_nbuf_t nbuf)
577 {
578 	uint64_t prev_pn, curr_pn[2];
579 
580 	if (!hal_rx_encryption_info_valid(soc->hal_soc, qdf_nbuf_data(nbuf)))
581 		return QDF_STATUS_SUCCESS;
582 
583 	hal_rx_reo_prev_pn_get(soc->hal_soc, ring_desc, &prev_pn);
584 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), curr_pn);
585 
586 	if (curr_pn[0] > prev_pn)
587 		return QDF_STATUS_SUCCESS;
588 
589 	return QDF_STATUS_E_FAILURE;
590 }
591 
592 #ifdef WLAN_SKIP_BAR_UPDATE
593 static
594 void dp_rx_err_handle_bar(struct dp_soc *soc,
595 			  struct dp_peer *peer,
596 			  qdf_nbuf_t nbuf)
597 {
598 	dp_info_rl("BAR update to H.W is skipped");
599 	DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
600 }
601 #else
602 static
603 void dp_rx_err_handle_bar(struct dp_soc *soc,
604 			  struct dp_peer *peer,
605 			  qdf_nbuf_t nbuf)
606 {
607 	uint8_t *rx_tlv_hdr;
608 	unsigned char type, subtype;
609 	uint16_t start_seq_num;
610 	uint32_t tid;
611 	QDF_STATUS status;
612 	struct ieee80211_frame_bar *bar;
613 
614 	/*
615 	 * 1. Is this a BAR frame. If not Discard it.
616 	 * 2. If it is, get the peer id, tid, ssn
617 	 * 2a Do a tid update
618 	 */
619 
620 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
621 	bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + soc->rx_pkt_tlv_size);
622 
623 	type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
624 	subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
625 
626 	if (!(type == IEEE80211_FC0_TYPE_CTL &&
627 	      subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
628 		dp_err_rl("Not a BAR frame!");
629 		return;
630 	}
631 
632 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
633 	qdf_assert_always(tid < DP_MAX_TIDS);
634 
635 	start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
636 
637 	dp_info_rl("tid %u window_size %u start_seq_num %u",
638 		   tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
639 
640 	status = dp_rx_tid_update_wifi3(peer, tid,
641 					peer->rx_tid[tid].ba_win_size,
642 					start_seq_num,
643 					true);
644 	if (status != QDF_STATUS_SUCCESS) {
645 		dp_err_rl("failed to handle bar frame update rx tid");
646 		DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
647 	} else {
648 		DP_STATS_INC(soc, rx.err.ssn_update_count, 1);
649 	}
650 }
651 #endif
652 
653 /**
654  * _dp_rx_bar_frame_handle(): Core of the BAR frame handling
655  * @soc: Datapath SoC handle
656  * @nbuf: packet being processed
657  * @mpdu_desc_info: mpdu desc info for the current packet
658  * @tid: tid on which the packet arrived
659  * @err_status: Flag to indicate if REO encountered an error while routing this
660  *		frame
661  * @error_code: REO error code
662  *
663  * Return: None
664  */
665 static void
666 _dp_rx_bar_frame_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
667 			struct hal_rx_mpdu_desc_info *mpdu_desc_info,
668 			uint32_t tid, uint8_t err_status, uint32_t error_code)
669 {
670 	uint16_t peer_id;
671 	struct dp_peer *peer;
672 
673 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
674 					       mpdu_desc_info->peer_meta_data);
675 	peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
676 	if (!peer)
677 		return;
678 
679 	dp_info_rl("BAR frame: "
680 		" peer_id = %d"
681 		" tid = %u"
682 		" SSN = %d"
683 		" error status = %d",
684 		peer->peer_id,
685 		tid,
686 		mpdu_desc_info->mpdu_seq,
687 		err_status);
688 
689 	if (err_status == HAL_REO_ERROR_DETECTED) {
690 		switch (error_code) {
691 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
692 		case HAL_REO_ERR_BAR_FRAME_OOR:
693 			dp_rx_err_handle_bar(soc, peer, nbuf);
694 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
695 			break;
696 		default:
697 			DP_STATS_INC(soc, rx.bar_frame, 1);
698 		}
699 	}
700 
701 	dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
702 }
703 
704 /**
705  * dp_rx_bar_frame_handle() - Function to handle err BAR frames
706  * @soc: core DP main context
707  * @ring_desc: Hal ring desc
708  * @rx_desc: dp rx desc
709  * @mpdu_desc_info: mpdu desc info
710  * @err_status: error status
711  * @err_code: error code
712  *
713  * Handle the error BAR frames received. Ensure the SOC level
714  * stats are updated based on the REO error code. The BAR frames
715  * are further processed by updating the Rx tids with the start
716  * sequence number (SSN) and BA window size. Desc is returned
717  * to the free desc list
718  *
719  * Return: none
720  */
721 static void
722 dp_rx_bar_frame_handle(struct dp_soc *soc,
723 		       hal_ring_desc_t ring_desc,
724 		       struct dp_rx_desc *rx_desc,
725 		       struct hal_rx_mpdu_desc_info *mpdu_desc_info,
726 		       uint8_t err_status,
727 		       uint32_t err_code)
728 {
729 	qdf_nbuf_t nbuf;
730 	struct dp_pdev *pdev;
731 	struct rx_desc_pool *rx_desc_pool;
732 	uint8_t *rx_tlv_hdr;
733 	uint32_t tid;
734 
735 	nbuf = rx_desc->nbuf;
736 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
737 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
738 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
739 	rx_desc->unmapped = 1;
740 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
741 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
742 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
743 					rx_tlv_hdr);
744 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
745 
746 	if (!pdev) {
747 		dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
748 				soc, rx_desc->pool_id);
749 		return;
750 	}
751 
752 	_dp_rx_bar_frame_handle(soc, nbuf, mpdu_desc_info, tid, err_status,
753 				err_code);
754 	dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, nbuf,
755 			      QDF_TX_RX_STATUS_DROP, true);
756 	dp_rx_link_desc_return(soc, ring_desc,
757 			       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
758 	dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
759 				    rx_desc->pool_id);
760 	dp_rx_add_to_free_desc_list(&pdev->free_list_head,
761 				    &pdev->free_list_tail,
762 				    rx_desc);
763 }
764 
765 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
766 
767 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
768 		       uint16_t peer_id, uint8_t tid)
769 {
770 	struct dp_peer *peer = NULL;
771 	struct dp_rx_tid *rx_tid = NULL;
772 	struct dp_txrx_peer *txrx_peer;
773 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP;
774 
775 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
776 	if (!peer) {
777 		dp_rx_err_info_rl("%pK: peer not found", soc);
778 		goto free_nbuf;
779 	}
780 
781 	txrx_peer = dp_get_txrx_peer(peer);
782 	if (!txrx_peer) {
783 		dp_rx_err_info_rl("%pK: txrx_peer not found", soc);
784 		goto free_nbuf;
785 	}
786 
787 	if (tid >= DP_MAX_TIDS) {
788 		dp_info_rl("invalid tid");
789 		goto nbuf_deliver;
790 	}
791 
792 	rx_tid = &peer->rx_tid[tid];
793 	qdf_spin_lock_bh(&rx_tid->tid_lock);
794 
795 	/* only if BA session is active, allow send Delba */
796 	if (rx_tid->ba_status != DP_RX_BA_ACTIVE) {
797 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
798 		goto nbuf_deliver;
799 	}
800 
801 	if (!rx_tid->delba_tx_status) {
802 		rx_tid->delba_tx_retry++;
803 		rx_tid->delba_tx_status = 1;
804 		rx_tid->delba_rcode =
805 			IEEE80211_REASON_QOS_SETUP_REQUIRED;
806 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
807 		if (soc->cdp_soc.ol_ops->send_delba) {
808 			DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent,
809 				     1);
810 			soc->cdp_soc.ol_ops->send_delba(
811 					peer->vdev->pdev->soc->ctrl_psoc,
812 					peer->vdev->vdev_id,
813 					peer->mac_addr.raw,
814 					tid,
815 					rx_tid->delba_rcode,
816 					CDP_DELBA_2K_JUMP);
817 		}
818 	} else {
819 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
820 	}
821 
822 nbuf_deliver:
823 	if (dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask,
824 					rx_tlv_hdr)) {
825 		DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1);
826 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
827 		return;
828 	}
829 
830 free_nbuf:
831 	if (peer)
832 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
833 	DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1);
834 	dp_rx_nbuf_free(nbuf);
835 }
836 
837 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
838     defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_KIWI)
839 bool
840 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
841 					      uint8_t pool_id,
842 					      uint8_t *rx_tlv_hdr,
843 					      qdf_nbuf_t nbuf)
844 {
845 	struct dp_peer *peer = NULL;
846 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
847 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
848 	struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
849 
850 	if (!pdev) {
851 		dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
852 				soc, pool_id);
853 		return false;
854 	}
855 	/*
856 	 * WAR- In certain types of packets if peer_id is not correct then
857 	 * driver may not be able find. Try finding peer by addr_2 of
858 	 * received MPDU
859 	 */
860 	if (wh)
861 		peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0,
862 					      DP_VDEV_ALL, DP_MOD_ID_RX_ERR);
863 	if (peer) {
864 		dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
865 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
866 				     QDF_TRACE_LEVEL_DEBUG);
867 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
868 				 1, qdf_nbuf_len(nbuf));
869 		dp_rx_nbuf_free(nbuf);
870 
871 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
872 		return true;
873 	}
874 	return false;
875 }
876 #else
877 bool
878 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
879 					      uint8_t pool_id,
880 					      uint8_t *rx_tlv_hdr,
881 					      qdf_nbuf_t nbuf)
882 {
883 	return false;
884 }
885 #endif
886 
887 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
888 {
889 	if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) {
890 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
891 				 1, pkt_len);
892 		return true;
893 	} else {
894 		return false;
895 	}
896 }
897 
898 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
899 void
900 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
901 			    struct dp_vdev *vdev,
902 			    struct dp_txrx_peer *txrx_peer,
903 			    qdf_nbuf_t nbuf,
904 			    qdf_nbuf_t tail,
905 			    bool is_eapol)
906 {
907 	if (is_eapol && soc->eapol_over_control_port)
908 		dp_rx_eapol_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
909 	else
910 		dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
911 }
912 #else
913 void
914 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
915 			    struct dp_vdev *vdev,
916 			    struct dp_txrx_peer *txrx_peer,
917 			    qdf_nbuf_t nbuf,
918 			    qdf_nbuf_t tail,
919 			    bool is_eapol)
920 {
921 	dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
922 }
923 #endif
924 
925 #ifdef WLAN_FEATURE_11BE_MLO
926 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
927 {
928 	return ((qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
929 			     QDF_MAC_ADDR_SIZE) == 0) ||
930 		(qdf_mem_cmp(eh->ether_dhost, &vdev->mld_mac_addr.raw[0],
931 			     QDF_MAC_ADDR_SIZE) == 0));
932 }
933 
934 #else
935 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
936 {
937 	return (qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
938 			    QDF_MAC_ADDR_SIZE) == 0);
939 }
940 #endif
941 
942 #ifndef QCA_HOST_MODE_WIFI_DISABLED
943 
944 bool
945 dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr)
946 {
947 	struct dp_soc *soc = vdev->pdev->soc;
948 
949 	if (!vdev->drop_3addr_mcast)
950 		return false;
951 
952 	if (vdev->opmode != wlan_op_mode_sta)
953 		return false;
954 
955 	if (hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
956 		return true;
957 
958 	return false;
959 }
960 
961 /**
962  * dp_rx_err_is_pn_check_needed() - Check if the packet number check is needed
963  *				for this frame received in REO error ring.
964  * @soc: Datapath SOC handle
965  * @error: REO error detected or not
966  * @error_code: Error code in case of REO error
967  *
968  * Return: true if pn check if needed in software,
969  *	false, if pn check if not needed.
970  */
971 static inline bool
972 dp_rx_err_is_pn_check_needed(struct dp_soc *soc, uint8_t error,
973 			     uint32_t error_code)
974 {
975 	return (soc->features.pn_in_reo_dest &&
976 		(error == HAL_REO_ERROR_DETECTED &&
977 		 (hal_rx_reo_is_2k_jump(error_code) ||
978 		  hal_rx_reo_is_oor_error(error_code) ||
979 		  hal_rx_reo_is_bar_oor_2k_jump(error_code))));
980 }
981 
982 #ifdef DP_WAR_INVALID_FIRST_MSDU_FLAG
983 static inline void
984 dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
985 				  struct hal_rx_mpdu_desc_info *mpdu_desc_info,
986 				  bool first_msdu_in_mpdu_processed)
987 {
988 	if (first_msdu_in_mpdu_processed) {
989 		/*
990 		 * This is the 2nd indication of first_msdu in the same mpdu.
991 		 * Skip re-parsing the mdpu_desc_info and use the cached one,
992 		 * since this msdu is most probably from the current mpdu
993 		 * which is being processed
994 		 */
995 	} else {
996 		hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc,
997 						   qdf_nbuf_data(nbuf),
998 						   mpdu_desc_info);
999 	}
1000 }
1001 #else
1002 static inline void
1003 dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
1004 				  struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1005 				  bool first_msdu_in_mpdu_processed)
1006 {
1007 	hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc, qdf_nbuf_data(nbuf),
1008 					   mpdu_desc_info);
1009 }
1010 #endif
1011 
1012 /**
1013  * dp_rx_reo_err_entry_process() - Handles for REO error entry processing
1014  *
1015  * @soc: core txrx main context
1016  * @ring_desc: opaque pointer to the REO error ring descriptor
1017  * @mpdu_desc_info: pointer to mpdu level description info
1018  * @link_desc_va: pointer to msdu_link_desc virtual address
1019  * @err_code: reo error code fetched from ring entry
1020  *
1021  * Function to handle msdus fetched from msdu link desc, currently
1022  * support REO error NULL queue, 2K jump, OOR.
1023  *
1024  * Return: msdu count processed
1025  */
1026 static uint32_t
1027 dp_rx_reo_err_entry_process(struct dp_soc *soc,
1028 			    void *ring_desc,
1029 			    struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1030 			    void *link_desc_va,
1031 			    enum hal_reo_error_code err_code)
1032 {
1033 	uint32_t rx_bufs_used = 0;
1034 	struct dp_pdev *pdev;
1035 	int i;
1036 	uint8_t *rx_tlv_hdr_first;
1037 	uint8_t *rx_tlv_hdr_last;
1038 	uint32_t tid = DP_MAX_TIDS;
1039 	uint16_t peer_id;
1040 	struct dp_rx_desc *rx_desc;
1041 	struct rx_desc_pool *rx_desc_pool;
1042 	qdf_nbuf_t nbuf;
1043 	qdf_nbuf_t next_nbuf;
1044 	struct hal_buf_info buf_info;
1045 	struct hal_rx_msdu_list msdu_list;
1046 	uint16_t num_msdus;
1047 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
1048 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
1049 	/* First field in REO Dst ring Desc is buffer_addr_info */
1050 	void *buf_addr_info = ring_desc;
1051 	qdf_nbuf_t head_nbuf = NULL;
1052 	qdf_nbuf_t tail_nbuf = NULL;
1053 	uint16_t msdu_processed = 0;
1054 	QDF_STATUS status;
1055 	bool ret, is_pn_check_needed;
1056 	uint8_t rx_desc_pool_id;
1057 	struct dp_txrx_peer *txrx_peer = NULL;
1058 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1059 	hal_ring_handle_t hal_ring_hdl = soc->reo_exception_ring.hal_srng;
1060 	bool first_msdu_in_mpdu_processed = false;
1061 	bool msdu_dropped = false;
1062 	uint8_t link_id = 0;
1063 
1064 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1065 					mpdu_desc_info->peer_meta_data);
1066 	is_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
1067 							  HAL_REO_ERROR_DETECTED,
1068 							  err_code);
1069 more_msdu_link_desc:
1070 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
1071 			     &num_msdus);
1072 	for (i = 0; i < num_msdus; i++) {
1073 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
1074 						soc,
1075 						msdu_list.sw_cookie[i]);
1076 
1077 		if (dp_assert_always_internal_stat(rx_desc, soc,
1078 						   rx.err.reo_err_rx_desc_null))
1079 			continue;
1080 
1081 		nbuf = rx_desc->nbuf;
1082 
1083 		/*
1084 		 * this is a unlikely scenario where the host is reaping
1085 		 * a descriptor which it already reaped just a while ago
1086 		 * but is yet to replenish it back to HW.
1087 		 * In this case host will dump the last 128 descriptors
1088 		 * including the software descriptor rx_desc and assert.
1089 		 */
1090 		if (qdf_unlikely(!rx_desc->in_use) ||
1091 		    qdf_unlikely(!nbuf)) {
1092 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
1093 			dp_info_rl("Reaping rx_desc not in use!");
1094 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1095 						   ring_desc, rx_desc);
1096 			/* ignore duplicate RX desc and continue to process */
1097 			/* Pop out the descriptor */
1098 			msdu_dropped = true;
1099 			continue;
1100 		}
1101 
1102 		ret = dp_rx_desc_paddr_sanity_check(rx_desc,
1103 						    msdu_list.paddr[i]);
1104 		if (!ret) {
1105 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1106 			rx_desc->in_err_state = 1;
1107 			msdu_dropped = true;
1108 			continue;
1109 		}
1110 
1111 		rx_desc_pool_id = rx_desc->pool_id;
1112 		/* all buffers from a MSDU link belong to same pdev */
1113 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc_pool_id);
1114 
1115 		rx_desc_pool = &soc->rx_desc_buf[rx_desc_pool_id];
1116 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
1117 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
1118 		rx_desc->unmapped = 1;
1119 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
1120 
1121 		QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len;
1122 		rx_bufs_used++;
1123 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
1124 					    &pdev->free_list_tail, rx_desc);
1125 
1126 		DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf);
1127 
1128 		if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags &
1129 				 HAL_MSDU_F_MSDU_CONTINUATION)) {
1130 			qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
1131 			continue;
1132 		}
1133 
1134 		if (dp_rx_buffer_pool_refill(soc, head_nbuf,
1135 					     rx_desc_pool_id)) {
1136 			/* MSDU queued back to the pool */
1137 			msdu_dropped = true;
1138 			head_nbuf = NULL;
1139 			goto process_next_msdu;
1140 		}
1141 
1142 		if (is_pn_check_needed) {
1143 			if (msdu_list.msdu_info[i].msdu_flags &
1144 			    HAL_MSDU_F_FIRST_MSDU_IN_MPDU) {
1145 				dp_rx_err_populate_mpdu_desc_info(soc, nbuf,
1146 						mpdu_desc_info,
1147 						first_msdu_in_mpdu_processed);
1148 				first_msdu_in_mpdu_processed = true;
1149 			} else {
1150 				if (!first_msdu_in_mpdu_processed) {
1151 					/*
1152 					 * If no msdu in this mpdu was dropped
1153 					 * due to failed sanity checks, then
1154 					 * its not expected to hit this
1155 					 * condition. Hence we assert here.
1156 					 */
1157 					if (!msdu_dropped)
1158 						qdf_assert_always(0);
1159 
1160 					/*
1161 					 * We do not have valid mpdu_desc_info
1162 					 * to process this nbuf, hence drop it.
1163 					 * TODO - Increment stats
1164 					 */
1165 					goto process_next_msdu;
1166 				}
1167 				/*
1168 				 * DO NOTHING -
1169 				 * Continue using the same mpdu_desc_info
1170 				 * details populated from the first msdu in
1171 				 * the mpdu.
1172 				 */
1173 			}
1174 
1175 			status = dp_rx_err_nbuf_pn_check(soc, ring_desc, nbuf);
1176 			if (QDF_IS_STATUS_ERROR(status)) {
1177 				DP_STATS_INC(soc, rx.err.pn_in_dest_check_fail,
1178 					     1);
1179 				goto process_next_msdu;
1180 			}
1181 
1182 			peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1183 					mpdu_desc_info->peer_meta_data);
1184 
1185 			if (mpdu_desc_info->bar_frame)
1186 				_dp_rx_bar_frame_handle(soc, nbuf,
1187 							mpdu_desc_info, tid,
1188 							HAL_REO_ERROR_DETECTED,
1189 							err_code);
1190 		}
1191 
1192 		rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf);
1193 		rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf);
1194 
1195 		if (qdf_unlikely(head_nbuf != tail_nbuf)) {
1196 			/*
1197 			 * For SG case, only the length of last skb is valid
1198 			 * as HW only populate the msdu_len for last msdu
1199 			 * in rx link descriptor, use the length from
1200 			 * last skb to overwrite the head skb for further
1201 			 * SG processing.
1202 			 */
1203 			QDF_NBUF_CB_RX_PKT_LEN(head_nbuf) =
1204 					QDF_NBUF_CB_RX_PKT_LEN(tail_nbuf);
1205 			nbuf = dp_rx_sg_create(soc, head_nbuf);
1206 			qdf_nbuf_set_is_frag(nbuf, 1);
1207 			DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1);
1208 		}
1209 		head_nbuf = NULL;
1210 
1211 		dp_rx_nbuf_set_link_id_from_tlv(soc, qdf_nbuf_data(nbuf), nbuf);
1212 
1213 		if (pdev && pdev->link_peer_stats &&
1214 		    txrx_peer && txrx_peer->is_mld_peer) {
1215 			link_id = dp_rx_get_stats_arr_idx_from_link_id(
1216 								nbuf,
1217 								txrx_peer);
1218 		}
1219 
1220 		if (txrx_peer)
1221 			dp_rx_set_nbuf_band(nbuf, txrx_peer, link_id);
1222 
1223 		switch (err_code) {
1224 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
1225 		case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
1226 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
1227 			/*
1228 			 * only first msdu, mpdu start description tlv valid?
1229 			 * and use it for following msdu.
1230 			 */
1231 			if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1232 							   rx_tlv_hdr_last))
1233 				tid = hal_rx_mpdu_start_tid_get(
1234 							soc->hal_soc,
1235 							rx_tlv_hdr_first);
1236 
1237 			dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last,
1238 					  peer_id, tid);
1239 			break;
1240 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
1241 		case HAL_REO_ERR_BAR_FRAME_OOR:
1242 			dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last);
1243 			break;
1244 		case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
1245 			txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(
1246 							soc, peer_id,
1247 							&txrx_ref_handle,
1248 							DP_MOD_ID_RX_ERR);
1249 			if (!txrx_peer)
1250 				dp_info_rl("txrx_peer is null peer_id %u",
1251 					   peer_id);
1252 			soc->arch_ops.dp_rx_null_q_desc_handle(soc, nbuf,
1253 							       rx_tlv_hdr_last,
1254 							       rx_desc_pool_id,
1255 							       txrx_peer,
1256 							       TRUE,
1257 							       link_id);
1258 			if (txrx_peer)
1259 				dp_txrx_peer_unref_delete(txrx_ref_handle,
1260 							  DP_MOD_ID_RX_ERR);
1261 			break;
1262 		default:
1263 			dp_err_rl("Non-support error code %d", err_code);
1264 			dp_rx_nbuf_free(nbuf);
1265 		}
1266 
1267 process_next_msdu:
1268 		nbuf = head_nbuf;
1269 		while (nbuf) {
1270 			next_nbuf = qdf_nbuf_next(nbuf);
1271 			dp_rx_nbuf_free(nbuf);
1272 			nbuf = next_nbuf;
1273 		}
1274 		msdu_processed++;
1275 		head_nbuf = NULL;
1276 		tail_nbuf = NULL;
1277 	}
1278 
1279 	/*
1280 	 * If the msdu's are spread across multiple link-descriptors,
1281 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
1282 	 * spread across multiple buffers).Hence, it is
1283 	 * necessary to check the next link_descriptor and release
1284 	 * all the msdu's that are part of it.
1285 	 */
1286 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
1287 			link_desc_va,
1288 			&next_link_desc_addr_info);
1289 
1290 	if (hal_rx_is_buf_addr_info_valid(
1291 				&next_link_desc_addr_info)) {
1292 		/* Clear the next link desc info for the current link_desc */
1293 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
1294 		dp_rx_link_desc_return_by_addr(
1295 				soc,
1296 				buf_addr_info,
1297 				HAL_BM_ACTION_PUT_IN_IDLE_LIST);
1298 
1299 		hal_rx_buffer_addr_info_get_paddr(
1300 				&next_link_desc_addr_info,
1301 				&buf_info);
1302 		/* buffer_addr_info is the first element of ring_desc */
1303 		hal_rx_buf_cookie_rbm_get(soc->hal_soc,
1304 					  (uint32_t *)&next_link_desc_addr_info,
1305 					  &buf_info);
1306 		link_desc_va =
1307 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1308 		cur_link_desc_addr_info = next_link_desc_addr_info;
1309 		buf_addr_info = &cur_link_desc_addr_info;
1310 
1311 		goto more_msdu_link_desc;
1312 	}
1313 
1314 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
1315 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
1316 	if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count))
1317 		DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1);
1318 
1319 	return rx_bufs_used;
1320 }
1321 
1322 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1323 
1324 void
1325 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1326 			uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
1327 			uint8_t err_code, uint8_t mac_id, uint8_t link_id)
1328 {
1329 	uint32_t pkt_len, l2_hdr_offset;
1330 	uint16_t msdu_len;
1331 	struct dp_vdev *vdev;
1332 	qdf_ether_header_t *eh;
1333 	bool is_broadcast;
1334 
1335 	/*
1336 	 * Check if DMA completed -- msdu_done is the last bit
1337 	 * to be written
1338 	 */
1339 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1340 
1341 		dp_err_rl("MSDU DONE failure");
1342 
1343 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1344 				     QDF_TRACE_LEVEL_INFO);
1345 		qdf_assert(0);
1346 	}
1347 
1348 	l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
1349 							   rx_tlv_hdr);
1350 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1351 	pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
1352 
1353 	if (dp_rx_check_pkt_len(soc, pkt_len)) {
1354 		/* Drop & free packet */
1355 		dp_rx_nbuf_free(nbuf);
1356 		return;
1357 	}
1358 	/* Set length in nbuf */
1359 	qdf_nbuf_set_pktlen(nbuf, pkt_len);
1360 
1361 	qdf_nbuf_set_next(nbuf, NULL);
1362 
1363 	qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
1364 	qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
1365 
1366 	if (!txrx_peer) {
1367 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "txrx_peer is NULL");
1368 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1369 				qdf_nbuf_len(nbuf));
1370 		/* Trigger invalid peer handler wrapper */
1371 		dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
1372 		return;
1373 	}
1374 
1375 	vdev = txrx_peer->vdev;
1376 	if (!vdev) {
1377 		dp_rx_err_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc,
1378 				 vdev);
1379 		/* Drop & free packet */
1380 		dp_rx_nbuf_free(nbuf);
1381 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1382 		return;
1383 	}
1384 
1385 	/*
1386 	 * Advance the packet start pointer by total size of
1387 	 * pre-header TLV's
1388 	 */
1389 	dp_rx_skip_tlvs(soc, nbuf, l2_hdr_offset);
1390 
1391 	if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
1392 		uint8_t *pkt_type;
1393 
1394 		pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
1395 		if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
1396 			if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
1397 							htons(QDF_LLC_STP)) {
1398 				DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
1399 				goto process_mesh;
1400 			} else {
1401 				goto process_rx;
1402 			}
1403 		}
1404 	}
1405 	if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
1406 		goto process_mesh;
1407 
1408 	/*
1409 	 * WAPI cert AP sends rekey frames as unencrypted.
1410 	 * Thus RXDMA will report unencrypted frame error.
1411 	 * To pass WAPI cert case, SW needs to pass unencrypted
1412 	 * rekey frame to stack.
1413 	 */
1414 	if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1415 		goto process_rx;
1416 	}
1417 	/*
1418 	 * In dynamic WEP case rekey frames are not encrypted
1419 	 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
1420 	 * key install is already done
1421 	 */
1422 	if ((vdev->sec_type == cdp_sec_type_wep104) &&
1423 	    (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
1424 		goto process_rx;
1425 
1426 process_mesh:
1427 
1428 	if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
1429 		dp_rx_nbuf_free(nbuf);
1430 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1431 		return;
1432 	}
1433 
1434 	if (vdev->mesh_vdev) {
1435 		if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
1436 				      == QDF_STATUS_SUCCESS) {
1437 			dp_rx_err_info("%pK: mesh pkt filtered", soc);
1438 			DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
1439 
1440 			dp_rx_nbuf_free(nbuf);
1441 			return;
1442 		}
1443 		dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, txrx_peer);
1444 	}
1445 process_rx:
1446 	if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1447 							rx_tlv_hdr) &&
1448 		(vdev->rx_decap_type ==
1449 				htt_cmn_pkt_type_ethernet))) {
1450 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1451 		is_broadcast = (QDF_IS_ADDR_BROADCAST
1452 				(eh->ether_dhost)) ? 1 : 0 ;
1453 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 1,
1454 					      qdf_nbuf_len(nbuf), link_id);
1455 		if (is_broadcast) {
1456 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast, 1,
1457 						      qdf_nbuf_len(nbuf),
1458 						      link_id);
1459 		}
1460 	} else {
1461 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.unicast, 1,
1462 					      qdf_nbuf_len(nbuf),
1463 					      link_id);
1464 	}
1465 
1466 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1467 		dp_rx_deliver_raw(vdev, nbuf, txrx_peer, link_id);
1468 	} else {
1469 		/* Update the protocol tag in SKB based on CCE metadata */
1470 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1471 					  EXCEPTION_DEST_RING_ID, true, true);
1472 		/* Update the flow tag in SKB based on FSE metadata */
1473 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
1474 		DP_PEER_STATS_FLAT_INC(txrx_peer, to_stack.num, 1);
1475 		qdf_nbuf_set_exc_frame(nbuf, 1);
1476 		dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
1477 					    qdf_nbuf_is_ipv4_eapol_pkt(nbuf));
1478 	}
1479 
1480 	return;
1481 }
1482 
1483 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
1484 			     uint8_t *rx_tlv_hdr,
1485 			     struct dp_txrx_peer *txrx_peer)
1486 {
1487 	struct dp_vdev *vdev = NULL;
1488 	struct dp_pdev *pdev = NULL;
1489 	struct ol_if_ops *tops = NULL;
1490 	uint16_t rx_seq, fragno;
1491 	uint8_t is_raw;
1492 	unsigned int tid;
1493 	QDF_STATUS status;
1494 	struct cdp_rx_mic_err_info mic_failure_info;
1495 
1496 	if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1497 					    rx_tlv_hdr))
1498 		return;
1499 
1500 	if (!txrx_peer) {
1501 		dp_info_rl("txrx_peer not found");
1502 		goto fail;
1503 	}
1504 
1505 	vdev = txrx_peer->vdev;
1506 	if (!vdev) {
1507 		dp_info_rl("VDEV not found");
1508 		goto fail;
1509 	}
1510 
1511 	pdev = vdev->pdev;
1512 	if (!pdev) {
1513 		dp_info_rl("PDEV not found");
1514 		goto fail;
1515 	}
1516 
1517 	is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
1518 	if (is_raw) {
1519 		fragno = dp_rx_frag_get_mpdu_frag_number(soc,
1520 							 qdf_nbuf_data(nbuf));
1521 		/* Can get only last fragment */
1522 		if (fragno) {
1523 			tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
1524 							qdf_nbuf_data(nbuf));
1525 			rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
1526 							qdf_nbuf_data(nbuf));
1527 
1528 			status = dp_rx_defrag_add_last_frag(soc, txrx_peer,
1529 							    tid, rx_seq, nbuf);
1530 			dp_info_rl("Frag pkt seq# %d frag# %d consumed "
1531 				   "status %d !", rx_seq, fragno, status);
1532 			return;
1533 		}
1534 	}
1535 
1536 	if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
1537 				  &mic_failure_info.da_mac_addr.bytes[0])) {
1538 		dp_err_rl("Failed to get da_mac_addr");
1539 		goto fail;
1540 	}
1541 
1542 	if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
1543 				  &mic_failure_info.ta_mac_addr.bytes[0])) {
1544 		dp_err_rl("Failed to get ta_mac_addr");
1545 		goto fail;
1546 	}
1547 
1548 	mic_failure_info.key_id = 0;
1549 	mic_failure_info.multicast =
1550 		IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
1551 	qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
1552 	mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
1553 	mic_failure_info.data = NULL;
1554 	mic_failure_info.vdev_id = vdev->vdev_id;
1555 
1556 	tops = pdev->soc->cdp_soc.ol_ops;
1557 	if (tops->rx_mic_error)
1558 		tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
1559 				   &mic_failure_info);
1560 
1561 fail:
1562 	dp_rx_nbuf_free(nbuf);
1563 	return;
1564 }
1565 
1566 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
1567 	defined(WLAN_MCAST_MLO)
1568 static bool dp_rx_igmp_handler(struct dp_soc *soc,
1569 			       struct dp_vdev *vdev,
1570 			       struct dp_txrx_peer *peer,
1571 			       qdf_nbuf_t nbuf,
1572 			       uint8_t link_id)
1573 {
1574 	if (soc->arch_ops.dp_rx_mcast_handler) {
1575 		if (soc->arch_ops.dp_rx_mcast_handler(soc, vdev, peer,
1576 						      nbuf, link_id))
1577 			return true;
1578 	}
1579 	return false;
1580 }
1581 #else
1582 static bool dp_rx_igmp_handler(struct dp_soc *soc,
1583 			       struct dp_vdev *vdev,
1584 			       struct dp_txrx_peer *peer,
1585 			       qdf_nbuf_t nbuf,
1586 			       uint8_t link_id)
1587 {
1588 	return false;
1589 }
1590 #endif
1591 
1592 /**
1593  * dp_rx_err_route_hdl() - Function to send EAPOL frames to stack
1594  *                            Free any other packet which comes in
1595  *                            this path.
1596  *
1597  * @soc: core DP main context
1598  * @nbuf: buffer pointer
1599  * @txrx_peer: txrx peer handle
1600  * @rx_tlv_hdr: start of rx tlv header
1601  * @err_src: rxdma/reo
1602  * @link_id: link id on which the packet is received
1603  *
1604  * This function indicates EAPOL frame received in wbm error ring to stack.
1605  * Any other frame should be dropped.
1606  *
1607  * Return: SUCCESS if delivered to stack
1608  */
1609 static void
1610 dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
1611 		    struct dp_txrx_peer *txrx_peer, uint8_t *rx_tlv_hdr,
1612 		    enum hal_rx_wbm_error_source err_src,
1613 		    uint8_t link_id)
1614 {
1615 	uint32_t pkt_len;
1616 	uint16_t msdu_len;
1617 	struct dp_vdev *vdev;
1618 	struct hal_rx_msdu_metadata msdu_metadata;
1619 	bool is_eapol;
1620 
1621 	qdf_nbuf_set_rx_chfrag_start(
1622 				nbuf,
1623 				hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1624 							       rx_tlv_hdr));
1625 	qdf_nbuf_set_rx_chfrag_end(nbuf,
1626 				   hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
1627 								 rx_tlv_hdr));
1628 	qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1629 								  rx_tlv_hdr));
1630 	qdf_nbuf_set_da_valid(nbuf,
1631 			      hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
1632 							      rx_tlv_hdr));
1633 	qdf_nbuf_set_sa_valid(nbuf,
1634 			      hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
1635 							      rx_tlv_hdr));
1636 
1637 	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
1638 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1639 	pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
1640 
1641 	if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
1642 		if (dp_rx_check_pkt_len(soc, pkt_len))
1643 			goto drop_nbuf;
1644 
1645 		/* Set length in nbuf */
1646 		qdf_nbuf_set_pktlen(
1647 			nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
1648 	}
1649 
1650 	/*
1651 	 * Check if DMA completed -- msdu_done is the last bit
1652 	 * to be written
1653 	 */
1654 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1655 		dp_err_rl("MSDU DONE failure");
1656 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1657 				     QDF_TRACE_LEVEL_INFO);
1658 		qdf_assert(0);
1659 	}
1660 
1661 	if (!txrx_peer)
1662 		goto drop_nbuf;
1663 
1664 	vdev = txrx_peer->vdev;
1665 	if (!vdev) {
1666 		dp_err_rl("Null vdev!");
1667 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1668 		goto drop_nbuf;
1669 	}
1670 
1671 	/*
1672 	 * Advance the packet start pointer by total size of
1673 	 * pre-header TLV's
1674 	 */
1675 	if (qdf_nbuf_is_frag(nbuf))
1676 		qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
1677 	else
1678 		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
1679 				   soc->rx_pkt_tlv_size));
1680 
1681 	QDF_NBUF_CB_RX_PEER_ID(nbuf) = txrx_peer->peer_id;
1682 	if (dp_rx_igmp_handler(soc, vdev, txrx_peer, nbuf, link_id))
1683 		return;
1684 
1685 	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
1686 
1687 	/*
1688 	 * Indicate EAPOL frame to stack only when vap mac address
1689 	 * matches the destination address.
1690 	 */
1691 	is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf);
1692 	if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1693 		qdf_ether_header_t *eh =
1694 			(qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1695 		if (dp_rx_err_match_dhost(eh, vdev)) {
1696 			DP_STATS_INC_PKT(vdev, rx_i.routed_eapol_pkt, 1,
1697 					 qdf_nbuf_len(nbuf));
1698 
1699 			/*
1700 			 * Update the protocol tag in SKB based on
1701 			 * CCE metadata.
1702 			 */
1703 			dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1704 						  EXCEPTION_DEST_RING_ID,
1705 						  true, true);
1706 			/* Update the flow tag in SKB based on FSE metadata */
1707 			dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
1708 					      true);
1709 			DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
1710 						  qdf_nbuf_len(nbuf),
1711 						  vdev->pdev->enhanced_stats_en);
1712 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
1713 						      rx.rx_success, 1,
1714 						      qdf_nbuf_len(nbuf),
1715 						      link_id);
1716 			qdf_nbuf_set_exc_frame(nbuf, 1);
1717 			qdf_nbuf_set_next(nbuf, NULL);
1718 
1719 			dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf,
1720 						    NULL, is_eapol);
1721 
1722 			return;
1723 		}
1724 	}
1725 
1726 drop_nbuf:
1727 
1728 	DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1,
1729 		      err_src == HAL_RX_WBM_ERR_SRC_REO);
1730 	DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1,
1731 		      err_src == HAL_RX_WBM_ERR_SRC_RXDMA);
1732 
1733 	dp_rx_nbuf_free(nbuf);
1734 }
1735 
1736 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1737 
1738 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
1739 /**
1740  * dp_rx_link_cookie_check() - Validate link desc cookie
1741  * @ring_desc: ring descriptor
1742  *
1743  * Return: qdf status
1744  */
1745 static inline QDF_STATUS
1746 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
1747 {
1748 	if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc)))
1749 		return QDF_STATUS_E_FAILURE;
1750 
1751 	return QDF_STATUS_SUCCESS;
1752 }
1753 
1754 /**
1755  * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie
1756  * @ring_desc: ring descriptor
1757  *
1758  * Return: None
1759  */
1760 static inline void
1761 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
1762 {
1763 	HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc);
1764 }
1765 #else
1766 static inline QDF_STATUS
1767 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
1768 {
1769 	return QDF_STATUS_SUCCESS;
1770 }
1771 
1772 static inline void
1773 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
1774 {
1775 }
1776 #endif
1777 
1778 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1779 /**
1780  * dp_rx_err_ring_record_entry() - Record rx err ring history
1781  * @soc: Datapath soc structure
1782  * @paddr: paddr of the buffer in RX err ring
1783  * @sw_cookie: SW cookie of the buffer in RX err ring
1784  * @rbm: Return buffer manager of the buffer in RX err ring
1785  *
1786  * Return: None
1787  */
1788 static inline void
1789 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1790 			    uint32_t sw_cookie, uint8_t rbm)
1791 {
1792 	struct dp_buf_info_record *record;
1793 	uint32_t idx;
1794 
1795 	if (qdf_unlikely(!soc->rx_err_ring_history))
1796 		return;
1797 
1798 	idx = dp_history_get_next_index(&soc->rx_err_ring_history->index,
1799 					DP_RX_ERR_HIST_MAX);
1800 
1801 	/* No NULL check needed for record since its an array */
1802 	record = &soc->rx_err_ring_history->entry[idx];
1803 
1804 	record->timestamp = qdf_get_log_timestamp();
1805 	record->hbi.paddr = paddr;
1806 	record->hbi.sw_cookie = sw_cookie;
1807 	record->hbi.rbm = rbm;
1808 }
1809 #else
1810 static inline void
1811 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1812 			    uint32_t sw_cookie, uint8_t rbm)
1813 {
1814 }
1815 #endif
1816 
1817 #ifdef HANDLE_RX_REROUTE_ERR
1818 static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc,
1819 				     hal_ring_desc_t ring_desc)
1820 {
1821 	int lmac_id = DP_INVALID_LMAC_ID;
1822 	struct dp_rx_desc *rx_desc;
1823 	struct hal_buf_info hbi;
1824 	struct dp_pdev *pdev;
1825 	struct rx_desc_pool *rx_desc_pool;
1826 
1827 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
1828 
1829 	rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, hbi.sw_cookie);
1830 
1831 	/* sanity */
1832 	if (!rx_desc) {
1833 		DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_invalid_cookie, 1);
1834 		goto assert_return;
1835 	}
1836 
1837 	if (!rx_desc->nbuf)
1838 		goto assert_return;
1839 
1840 	dp_rx_err_ring_record_entry(soc, hbi.paddr,
1841 				    hbi.sw_cookie,
1842 				    hal_rx_ret_buf_manager_get(soc->hal_soc,
1843 							       ring_desc));
1844 	if (hbi.paddr != qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)) {
1845 		DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1846 		rx_desc->in_err_state = 1;
1847 		goto assert_return;
1848 	}
1849 
1850 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
1851 	/* After this point the rx_desc and nbuf are valid */
1852 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
1853 	qdf_assert_always(!rx_desc->unmapped);
1854 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
1855 	rx_desc->unmapped = 1;
1856 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
1857 	dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
1858 				    rx_desc->pool_id);
1859 
1860 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
1861 	lmac_id = rx_desc->pool_id;
1862 	dp_rx_add_to_free_desc_list(&pdev->free_list_head,
1863 				    &pdev->free_list_tail,
1864 				    rx_desc);
1865 	return lmac_id;
1866 
1867 assert_return:
1868 	qdf_assert(0);
1869 	return lmac_id;
1870 }
1871 
1872 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
1873 {
1874 	int ret;
1875 	uint64_t cur_time_stamp;
1876 
1877 	DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_rcved, 1);
1878 
1879 	/* Recover if overall error count exceeds threshold */
1880 	if (soc->stats.rx.err.reo_err_msdu_buf_rcved >
1881 	    DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD) {
1882 		dp_err("pkt threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
1883 		       soc->stats.rx.err.reo_err_msdu_buf_rcved,
1884 		       soc->rx_route_err_start_pkt_ts);
1885 		qdf_trigger_self_recovery(NULL, QDF_RX_REG_PKT_ROUTE_ERR);
1886 	}
1887 
1888 	cur_time_stamp = qdf_get_log_timestamp_usecs();
1889 	if (!soc->rx_route_err_start_pkt_ts)
1890 		soc->rx_route_err_start_pkt_ts = cur_time_stamp;
1891 
1892 	/* Recover if threshold number of packets received in threshold time */
1893 	if ((cur_time_stamp - soc->rx_route_err_start_pkt_ts) >
1894 						DP_RX_ERR_ROUTE_TIMEOUT_US) {
1895 		soc->rx_route_err_start_pkt_ts = cur_time_stamp;
1896 
1897 		if (soc->rx_route_err_in_window >
1898 		    DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT) {
1899 			qdf_trigger_self_recovery(NULL,
1900 						  QDF_RX_REG_PKT_ROUTE_ERR);
1901 			dp_err("rate threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
1902 			       soc->stats.rx.err.reo_err_msdu_buf_rcved,
1903 			       soc->rx_route_err_start_pkt_ts);
1904 		} else {
1905 			soc->rx_route_err_in_window = 1;
1906 		}
1907 	} else {
1908 		soc->rx_route_err_in_window++;
1909 	}
1910 
1911 	ret = dp_rx_err_handle_msdu_buf(soc, ring_desc);
1912 
1913 	return ret;
1914 }
1915 #else /* HANDLE_RX_REROUTE_ERR */
1916 
1917 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
1918 {
1919 	qdf_assert_always(0);
1920 
1921 	return DP_INVALID_LMAC_ID;
1922 }
1923 #endif /* HANDLE_RX_REROUTE_ERR */
1924 
1925 #ifdef WLAN_MLO_MULTI_CHIP
1926 /**
1927  * dp_idle_link_bm_id_check() - war for HW issue
1928  *
1929  * @soc: DP SOC handle
1930  * @rbm: idle link RBM value
1931  * @ring_desc: reo error link descriptor
1932  *
1933  * This is a war for HW issue where link descriptor
1934  * of partner soc received due to packets wrongly
1935  * interpreted as fragments
1936  *
1937  * Return: true in case link desc is consumed
1938  *	   false in other cases
1939  */
1940 static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
1941 				     void *ring_desc)
1942 {
1943 	struct dp_soc *replenish_soc = NULL;
1944 
1945 	/* return ok incase of link desc of same soc */
1946 	if (rbm == soc->idle_link_bm_id)
1947 		return false;
1948 
1949 	if (soc->arch_ops.dp_soc_get_by_idle_bm_id)
1950 		replenish_soc =
1951 			soc->arch_ops.dp_soc_get_by_idle_bm_id(soc, rbm);
1952 
1953 	qdf_assert_always(replenish_soc);
1954 
1955 	/*
1956 	 * For WIN usecase we should only get fragment packets in
1957 	 * this ring as for MLO case fragmentation is not supported
1958 	 * we should not see links from other soc.
1959 	 *
1960 	 * Drop all packets from partner soc and replenish the descriptors
1961 	 */
1962 	dp_handle_wbm_internal_error(replenish_soc, ring_desc,
1963 				     HAL_WBM_RELEASE_RING_2_DESC_TYPE);
1964 
1965 	return true;
1966 }
1967 #else
1968 static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
1969 				     void *ring_desc)
1970 {
1971 	return false;
1972 }
1973 #endif
1974 
1975 static inline void
1976 dp_rx_err_dup_frame(struct dp_soc *soc,
1977 		    struct hal_rx_mpdu_desc_info *mpdu_desc_info)
1978 {
1979 	struct dp_txrx_peer *txrx_peer = NULL;
1980 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1981 	uint16_t peer_id;
1982 
1983 	peer_id =
1984 		dp_rx_peer_metadata_peer_id_get(soc,
1985 						mpdu_desc_info->peer_meta_data);
1986 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
1987 						   &txrx_ref_handle,
1988 						   DP_MOD_ID_RX_ERR);
1989 	if (txrx_peer) {
1990 		DP_STATS_INC(txrx_peer->vdev, rx.duplicate_count, 1);
1991 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
1992 	}
1993 }
1994 
1995 uint32_t
1996 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1997 		  hal_ring_handle_t hal_ring_hdl, uint32_t quota)
1998 {
1999 	hal_ring_desc_t ring_desc;
2000 	hal_soc_handle_t hal_soc;
2001 	uint32_t count = 0;
2002 	uint32_t rx_bufs_used = 0;
2003 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
2004 	uint8_t mac_id = 0;
2005 	uint8_t buf_type;
2006 	uint8_t err_status;
2007 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
2008 	struct hal_buf_info hbi;
2009 	struct dp_pdev *dp_pdev;
2010 	struct dp_srng *dp_rxdma_srng;
2011 	struct rx_desc_pool *rx_desc_pool;
2012 	void *link_desc_va;
2013 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
2014 	uint16_t num_msdus;
2015 	struct dp_rx_desc *rx_desc = NULL;
2016 	QDF_STATUS status;
2017 	bool ret;
2018 	uint32_t error_code = 0;
2019 	bool sw_pn_check_needed;
2020 	int max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
2021 	int i, rx_bufs_reaped_total;
2022 	uint16_t peer_id;
2023 	struct dp_txrx_peer *txrx_peer = NULL;
2024 	dp_txrx_ref_handle txrx_ref_handle = NULL;
2025 
2026 	/* Debug -- Remove later */
2027 	qdf_assert(soc && hal_ring_hdl);
2028 
2029 	hal_soc = soc->hal_soc;
2030 
2031 	/* Debug -- Remove later */
2032 	qdf_assert(hal_soc);
2033 
2034 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
2035 
2036 		/* TODO */
2037 		/*
2038 		 * Need API to convert from hal_ring pointer to
2039 		 * Ring Type / Ring Id combo
2040 		 */
2041 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
2042 		dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", soc,
2043 			      hal_ring_hdl);
2044 		goto done;
2045 	}
2046 
2047 	while (qdf_likely(quota-- && (ring_desc =
2048 				hal_srng_dst_peek(hal_soc,
2049 						  hal_ring_hdl)))) {
2050 
2051 		DP_STATS_INC(soc, rx.err_ring_pkts, 1);
2052 		err_status = hal_rx_err_status_get(hal_soc, ring_desc);
2053 		buf_type = hal_rx_reo_buf_type_get(hal_soc, ring_desc);
2054 
2055 		if (err_status == HAL_REO_ERROR_DETECTED)
2056 			error_code = hal_rx_get_reo_error_code(hal_soc,
2057 							       ring_desc);
2058 
2059 		qdf_mem_set(&mpdu_desc_info, sizeof(mpdu_desc_info), 0);
2060 		sw_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
2061 								  err_status,
2062 								  error_code);
2063 		if (!sw_pn_check_needed) {
2064 			/*
2065 			 * MPDU desc info will be present in the REO desc
2066 			 * only in the below scenarios
2067 			 * 1) pn_in_dest_disabled:  always
2068 			 * 2) pn_in_dest enabled: All cases except 2k-jup
2069 			 *			and OOR errors
2070 			 */
2071 			hal_rx_mpdu_desc_info_get(hal_soc, ring_desc,
2072 						  &mpdu_desc_info);
2073 		}
2074 
2075 		if (HAL_RX_REO_DESC_MSDU_COUNT_GET(ring_desc) == 0)
2076 			goto next_entry;
2077 
2078 		/*
2079 		 * For REO error ring, only MSDU LINK DESC is expected.
2080 		 * Handle HAL_RX_REO_MSDU_BUF_ADDR_TYPE exception case.
2081 		 */
2082 		if (qdf_unlikely(buf_type != HAL_RX_REO_MSDU_LINK_DESC_TYPE)) {
2083 			int lmac_id;
2084 
2085 			lmac_id = dp_rx_err_exception(soc, ring_desc);
2086 			if (lmac_id >= 0)
2087 				rx_bufs_reaped[lmac_id] += 1;
2088 			goto next_entry;
2089 		}
2090 
2091 		hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc,
2092 					  &hbi);
2093 		/*
2094 		 * check for the magic number in the sw cookie
2095 		 */
2096 		qdf_assert_always((hbi.sw_cookie >> LINK_DESC_ID_SHIFT) &
2097 					soc->link_desc_id_start);
2098 
2099 		if (dp_idle_link_bm_id_check(soc, hbi.rbm, ring_desc)) {
2100 			DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
2101 			goto next_entry;
2102 		}
2103 
2104 		status = dp_rx_link_cookie_check(ring_desc);
2105 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
2106 			DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
2107 			break;
2108 		}
2109 
2110 		hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2111 		link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
2112 		hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
2113 				     &num_msdus);
2114 		if (!num_msdus ||
2115 		    !dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[0])) {
2116 			dp_rx_err_info_rl("Invalid MSDU info num_msdus %u cookie: 0x%x",
2117 					  num_msdus, msdu_list.sw_cookie[0]);
2118 			dp_rx_link_desc_return(soc, ring_desc,
2119 					       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
2120 			goto next_entry;
2121 		}
2122 
2123 		dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0],
2124 					    msdu_list.sw_cookie[0],
2125 					    msdu_list.rbm[0]);
2126 		// TODO - BE- Check if the RBM is to be checked for all chips
2127 		if (qdf_unlikely((msdu_list.rbm[0] !=
2128 					dp_rx_get_rx_bm_id(soc)) &&
2129 				 (msdu_list.rbm[0] !=
2130 				  soc->idle_link_bm_id) &&
2131 				 (msdu_list.rbm[0] !=
2132 					dp_rx_get_defrag_bm_id(soc)))) {
2133 			/* TODO */
2134 			/* Call appropriate handler */
2135 			if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2136 				DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
2137 				dp_rx_err_err("%pK: Invalid RBM %d",
2138 					      soc, msdu_list.rbm[0]);
2139 			}
2140 
2141 			/* Return link descriptor through WBM ring (SW2WBM)*/
2142 			dp_rx_link_desc_return(soc, ring_desc,
2143 					HAL_BM_ACTION_RELEASE_MSDU_LIST);
2144 			goto next_entry;
2145 		}
2146 
2147 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
2148 						soc,
2149 						msdu_list.sw_cookie[0]);
2150 		qdf_assert_always(rx_desc);
2151 
2152 		mac_id = rx_desc->pool_id;
2153 
2154 		if (sw_pn_check_needed) {
2155 			goto process_reo_error_code;
2156 		}
2157 
2158 		if (mpdu_desc_info.bar_frame) {
2159 			qdf_assert_always(mpdu_desc_info.msdu_count == 1);
2160 
2161 			dp_rx_bar_frame_handle(soc, ring_desc, rx_desc,
2162 					       &mpdu_desc_info, err_status,
2163 					       error_code);
2164 
2165 			rx_bufs_reaped[mac_id] += 1;
2166 			goto next_entry;
2167 		}
2168 
2169 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
2170 			/*
2171 			 * We only handle one msdu per link desc for fragmented
2172 			 * case. We drop the msdus and release the link desc
2173 			 * back if there are more than one msdu in link desc.
2174 			 */
2175 			if (qdf_unlikely(num_msdus > 1)) {
2176 				count = dp_rx_msdus_drop(soc, ring_desc,
2177 							 &mpdu_desc_info,
2178 							 &mac_id, quota);
2179 				rx_bufs_reaped[mac_id] += count;
2180 				goto next_entry;
2181 			}
2182 
2183 			/*
2184 			 * this is a unlikely scenario where the host is reaping
2185 			 * a descriptor which it already reaped just a while ago
2186 			 * but is yet to replenish it back to HW.
2187 			 * In this case host will dump the last 128 descriptors
2188 			 * including the software descriptor rx_desc and assert.
2189 			 */
2190 
2191 			if (qdf_unlikely(!rx_desc->in_use)) {
2192 				DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
2193 				dp_info_rl("Reaping rx_desc not in use!");
2194 				dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2195 							   ring_desc, rx_desc);
2196 				/* ignore duplicate RX desc and continue */
2197 				/* Pop out the descriptor */
2198 				goto next_entry;
2199 			}
2200 
2201 			ret = dp_rx_desc_paddr_sanity_check(rx_desc,
2202 							    msdu_list.paddr[0]);
2203 			if (!ret) {
2204 				DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
2205 				rx_desc->in_err_state = 1;
2206 				goto next_entry;
2207 			}
2208 
2209 			count = dp_rx_frag_handle(soc,
2210 						  ring_desc, &mpdu_desc_info,
2211 						  rx_desc, &mac_id, quota);
2212 
2213 			rx_bufs_reaped[mac_id] += count;
2214 			DP_STATS_INC(soc, rx.rx_frags, 1);
2215 
2216 			peer_id = dp_rx_peer_metadata_peer_id_get(soc,
2217 					mpdu_desc_info.peer_meta_data);
2218 			txrx_peer =
2219 				dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
2220 							       &txrx_ref_handle,
2221 							       DP_MOD_ID_RX_ERR);
2222 			if (txrx_peer) {
2223 				DP_STATS_INC(txrx_peer->vdev,
2224 					     rx.fragment_count, 1);
2225 				dp_txrx_peer_unref_delete(txrx_ref_handle,
2226 							  DP_MOD_ID_RX_ERR);
2227 			}
2228 			goto next_entry;
2229 		}
2230 
2231 process_reo_error_code:
2232 		/*
2233 		 * Expect REO errors to be handled after this point
2234 		 */
2235 		qdf_assert_always(err_status == HAL_REO_ERROR_DETECTED);
2236 
2237 		dp_info_rl("Got pkt with REO ERROR: %d", error_code);
2238 
2239 		switch (error_code) {
2240 		case HAL_REO_ERR_PN_CHECK_FAILED:
2241 		case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
2242 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2243 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2244 			if (dp_pdev)
2245 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
2246 			count = dp_rx_pn_error_handle(soc,
2247 						      ring_desc,
2248 						      &mpdu_desc_info, &mac_id,
2249 						      quota);
2250 
2251 			rx_bufs_reaped[mac_id] += count;
2252 			break;
2253 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2254 		case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
2255 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2256 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
2257 		case HAL_REO_ERR_BAR_FRAME_OOR:
2258 		case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2259 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2260 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2261 			if (dp_pdev)
2262 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
2263 			count = dp_rx_reo_err_entry_process(
2264 					soc,
2265 					ring_desc,
2266 					&mpdu_desc_info,
2267 					link_desc_va,
2268 					error_code);
2269 
2270 			rx_bufs_reaped[mac_id] += count;
2271 			break;
2272 		case HAL_REO_ERR_NON_BA_DUPLICATE:
2273 			dp_rx_err_dup_frame(soc, &mpdu_desc_info);
2274 			fallthrough;
2275 		case HAL_REO_ERR_QUEUE_DESC_INVALID:
2276 		case HAL_REO_ERR_AMPDU_IN_NON_BA:
2277 		case HAL_REO_ERR_BA_DUPLICATE:
2278 		case HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION:
2279 		case HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN:
2280 		case HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET:
2281 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2282 			count = dp_rx_msdus_drop(soc, ring_desc,
2283 						 &mpdu_desc_info,
2284 						 &mac_id, quota);
2285 			rx_bufs_reaped[mac_id] += count;
2286 			break;
2287 		default:
2288 			/* Assert if unexpected error type */
2289 			qdf_assert_always(0);
2290 		}
2291 next_entry:
2292 		dp_rx_link_cookie_invalidate(ring_desc);
2293 		hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2294 
2295 		rx_bufs_reaped_total = 0;
2296 		for (i = 0; i < MAX_PDEV_CNT; i++)
2297 			rx_bufs_reaped_total += rx_bufs_reaped[i];
2298 
2299 		if (dp_rx_reap_loop_pkt_limit_hit(soc, rx_bufs_reaped_total,
2300 						  max_reap_limit))
2301 			break;
2302 	}
2303 
2304 done:
2305 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
2306 
2307 	if (soc->rx.flags.defrag_timeout_check) {
2308 		uint32_t now_ms =
2309 			qdf_system_ticks_to_msecs(qdf_system_ticks());
2310 
2311 		if (now_ms >= soc->rx.defrag.next_flush_ms)
2312 			dp_rx_defrag_waitlist_flush(soc);
2313 	}
2314 
2315 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2316 		if (rx_bufs_reaped[mac_id]) {
2317 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2318 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2319 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
2320 
2321 			dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2322 						rx_desc_pool,
2323 						rx_bufs_reaped[mac_id],
2324 						&dp_pdev->free_list_head,
2325 						&dp_pdev->free_list_tail,
2326 						false);
2327 			rx_bufs_used += rx_bufs_reaped[mac_id];
2328 		}
2329 	}
2330 
2331 	return rx_bufs_used; /* Assume no scale factor for now */
2332 }
2333 
2334 #ifdef DROP_RXDMA_DECRYPT_ERR
2335 /**
2336  * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled
2337  *
2338  * Return: true if rxdma decrypt err frames are handled and false otherwise
2339  */
2340 static inline bool dp_handle_rxdma_decrypt_err(void)
2341 {
2342 	return false;
2343 }
2344 #else
2345 static inline bool dp_handle_rxdma_decrypt_err(void)
2346 {
2347 	return true;
2348 }
2349 #endif
2350 
2351 void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc)
2352 {
2353 	if (soc->wbm_sg_last_msdu_war) {
2354 		uint32_t len;
2355 		qdf_nbuf_t temp = soc->wbm_sg_param.wbm_sg_nbuf_tail;
2356 
2357 		len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
2358 						     qdf_nbuf_data(temp));
2359 		temp = soc->wbm_sg_param.wbm_sg_nbuf_head;
2360 		while (temp) {
2361 			QDF_NBUF_CB_RX_PKT_LEN(temp) = len;
2362 			temp = temp->next;
2363 		}
2364 	}
2365 }
2366 
2367 #ifdef RX_DESC_DEBUG_CHECK
2368 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2369 					    hal_ring_handle_t hal_ring_hdl,
2370 					    hal_ring_desc_t ring_desc,
2371 					    struct dp_rx_desc *rx_desc)
2372 {
2373 	struct hal_buf_info hbi;
2374 
2375 	hal_rx_wbm_rel_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2376 	/* Sanity check for possible buffer paddr corruption */
2377 	if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
2378 		return QDF_STATUS_SUCCESS;
2379 
2380 	hal_srng_dump_ring_desc(soc->hal_soc, hal_ring_hdl, ring_desc);
2381 
2382 	return QDF_STATUS_E_FAILURE;
2383 }
2384 
2385 #else
2386 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2387 					    hal_ring_handle_t hal_ring_hdl,
2388 					    hal_ring_desc_t ring_desc,
2389 					    struct dp_rx_desc *rx_desc)
2390 {
2391 	return QDF_STATUS_SUCCESS;
2392 }
2393 #endif
2394 bool
2395 dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
2396 {
2397 	/*
2398 	 * Currently Null Queue and Unencrypted error handlers has support for
2399 	 * SG. Other error handler do not deal with SG buffer.
2400 	 */
2401 	if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) &&
2402 	     (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) ||
2403 	    ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) &&
2404 	     (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED)))
2405 		return true;
2406 
2407 	return false;
2408 }
2409 
2410 #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
2411 void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
2412 			      qdf_nbuf_t nbuf)
2413 {
2414 	/*
2415 	 * In case of fast recycle TX driver can avoid invalidate
2416 	 * of buffer in case of SFE forward. We need to invalidate
2417 	 * the TLV headers after writing to this location
2418 	 */
2419 	qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2420 				      (void *)(nbuf->data +
2421 					       soc->rx_pkt_tlv_size +
2422 					       L3_HEADER_PAD));
2423 }
2424 #else
2425 void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
2426 			      qdf_nbuf_t nbuf)
2427 {
2428 }
2429 #endif
2430 
2431 #ifndef CONFIG_NBUF_AP_PLATFORM
2432 static inline uint16_t
2433 dp_rx_get_peer_id(struct dp_soc *soc,
2434 		  uint8_t *rx_tlv_hdr,
2435 		  qdf_nbuf_t nbuf)
2436 {
2437 	uint32_t peer_mdata = 0;
2438 
2439 	peer_mdata = hal_rx_tlv_peer_meta_data_get(soc->hal_soc,
2440 						   rx_tlv_hdr);
2441 	return dp_rx_peer_metadata_peer_id_get(soc, peer_mdata);
2442 }
2443 
2444 static inline void
2445 dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc *soc,
2446 				 qdf_nbuf_t nbuf,
2447 				 uint8_t *rx_tlv_hdr,
2448 				 union hal_wbm_err_info_u *wbm_err)
2449 {
2450 	hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_tlv_hdr,
2451 				      (uint8_t *)&wbm_err->info,
2452 				      sizeof(union hal_wbm_err_info_u));
2453 }
2454 
2455 void
2456 dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
2457 			       qdf_nbuf_t nbuf,
2458 			       union hal_wbm_err_info_u wbm_err)
2459 {
2460 	hal_rx_priv_info_set_in_tlv(soc->hal_soc,
2461 				    qdf_nbuf_data(nbuf),
2462 				    (uint8_t *)&wbm_err.info,
2463 				    sizeof(union hal_wbm_err_info_u));
2464 }
2465 #else
2466 static inline uint16_t
2467 dp_rx_get_peer_id(struct dp_soc *soc,
2468 		  uint8_t *rx_tlv_hdr,
2469 		  qdf_nbuf_t nbuf)
2470 {
2471 	uint32_t peer_mdata = QDF_NBUF_CB_RX_MPDU_DESC_INFO_2(nbuf);
2472 
2473 	return dp_rx_peer_metadata_peer_id_get(soc, peer_mdata);
2474 }
2475 
2476 static inline void
2477 dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc *soc,
2478 				 qdf_nbuf_t nbuf,
2479 				 uint8_t *rx_tlv_hdr,
2480 				 union hal_wbm_err_info_u *wbm_err)
2481 {
2482 	wbm_err->info = QDF_NBUF_CB_RX_ERROR_CODE_INFO(nbuf);
2483 }
2484 
2485 void
2486 dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
2487 			       qdf_nbuf_t nbuf,
2488 			       union hal_wbm_err_info_u wbm_err)
2489 {
2490 	QDF_NBUF_CB_RX_ERROR_CODE_INFO(nbuf) = wbm_err.info;
2491 }
2492 #endif /* CONFIG_NBUF_AP_PLATFORM */
2493 
2494 uint32_t
2495 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2496 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota)
2497 {
2498 	hal_soc_handle_t hal_soc;
2499 	uint32_t rx_bufs_used = 0;
2500 	struct dp_pdev *dp_pdev;
2501 	uint8_t *rx_tlv_hdr;
2502 	bool is_tkip_mic_err;
2503 	qdf_nbuf_t nbuf_head = NULL;
2504 	qdf_nbuf_t nbuf, next;
2505 	union hal_wbm_err_info_u wbm_err = { 0 };
2506 	uint8_t pool_id;
2507 	uint8_t tid = 0;
2508 	uint8_t link_id = 0;
2509 
2510 	/* Debug -- Remove later */
2511 	qdf_assert(soc && hal_ring_hdl);
2512 
2513 	hal_soc = soc->hal_soc;
2514 
2515 	/* Debug -- Remove later */
2516 	qdf_assert(hal_soc);
2517 
2518 	nbuf_head = soc->arch_ops.dp_rx_wbm_err_reap_desc(int_ctx, soc,
2519 							  hal_ring_hdl,
2520 							  quota,
2521 							  &rx_bufs_used);
2522 	nbuf = nbuf_head;
2523 	while (nbuf) {
2524 		struct dp_txrx_peer *txrx_peer;
2525 		struct dp_peer *peer;
2526 		uint16_t peer_id;
2527 		uint8_t err_code;
2528 		uint8_t *tlv_hdr;
2529 		dp_txrx_ref_handle txrx_ref_handle = NULL;
2530 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
2531 
2532 		/*
2533 		 * retrieve the wbm desc info from nbuf CB/TLV, so we can
2534 		 * handle error cases appropriately
2535 		 */
2536 		dp_rx_get_wbm_err_info_from_nbuf(soc, nbuf,
2537 						 rx_tlv_hdr,
2538 						 &wbm_err);
2539 
2540 		peer_id = dp_rx_get_peer_id(soc,
2541 					    rx_tlv_hdr,
2542 					    nbuf);
2543 		txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
2544 							   &txrx_ref_handle,
2545 							   DP_MOD_ID_RX_ERR);
2546 
2547 		if (!txrx_peer)
2548 			dp_info_rl("peer is null peer_id %u err_src %u, "
2549 				   "REO: push_rsn %u err_code %u, "
2550 				   "RXDMA: push_rsn %u err_code %u",
2551 				   peer_id, wbm_err.info_bit.wbm_err_src,
2552 				   wbm_err.info_bit.reo_psh_rsn,
2553 				   wbm_err.info_bit.reo_err_code,
2554 				   wbm_err.info_bit.rxdma_psh_rsn,
2555 				   wbm_err.info_bit.rxdma_err_code);
2556 
2557 		/* Set queue_mapping in nbuf to 0 */
2558 		dp_set_rx_queue(nbuf, 0);
2559 
2560 		next = nbuf->next;
2561 		/*
2562 		 * Form the SG for msdu continued buffers
2563 		 * QCN9000 has this support
2564 		 */
2565 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
2566 			nbuf = dp_rx_sg_create(soc, nbuf);
2567 			next = nbuf->next;
2568 			/*
2569 			 * SG error handling is not done correctly,
2570 			 * drop SG frames for now.
2571 			 */
2572 			dp_rx_nbuf_free(nbuf);
2573 			dp_info_rl("scattered msdu dropped");
2574 			nbuf = next;
2575 			if (txrx_peer)
2576 				dp_txrx_peer_unref_delete(txrx_ref_handle,
2577 							  DP_MOD_ID_RX_ERR);
2578 			continue;
2579 		}
2580 
2581 		dp_rx_nbuf_set_link_id_from_tlv(soc, rx_tlv_hdr, nbuf);
2582 
2583 		pool_id = wbm_err.info_bit.pool_id;
2584 		dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
2585 
2586 		if (dp_pdev && dp_pdev->link_peer_stats &&
2587 		    txrx_peer && txrx_peer->is_mld_peer) {
2588 			link_id = dp_rx_get_stats_arr_idx_from_link_id(
2589 								nbuf,
2590 								txrx_peer);
2591 		} else {
2592 			link_id = 0;
2593 		}
2594 
2595 		if (wbm_err.info_bit.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
2596 			if (wbm_err.info_bit.reo_psh_rsn
2597 					== HAL_RX_WBM_REO_PSH_RSN_ERROR) {
2598 
2599 				DP_STATS_INC(soc,
2600 					rx.err.reo_error
2601 					[wbm_err.info_bit.reo_err_code], 1);
2602 				/* increment @pdev level */
2603 				if (dp_pdev)
2604 					DP_STATS_INC(dp_pdev, err.reo_error,
2605 						     1);
2606 
2607 				switch (wbm_err.info_bit.reo_err_code) {
2608 				/*
2609 				 * Handling for packets which have NULL REO
2610 				 * queue descriptor
2611 				 */
2612 				case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2613 					pool_id = wbm_err.info_bit.pool_id;
2614 					soc->arch_ops.dp_rx_null_q_desc_handle(
2615 								soc, nbuf,
2616 								rx_tlv_hdr,
2617 								pool_id,
2618 								txrx_peer,
2619 								FALSE,
2620 								link_id);
2621 					break;
2622 				/* TODO */
2623 				/* Add per error code accounting */
2624 				case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2625 					if (txrx_peer)
2626 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2627 									  rx.err.jump_2k_err,
2628 									  1,
2629 									  link_id);
2630 
2631 					pool_id = wbm_err.info_bit.pool_id;
2632 
2633 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2634 									   rx_tlv_hdr)) {
2635 						tid =
2636 						hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
2637 					}
2638 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2639 					hal_rx_msdu_start_msdu_len_get(
2640 						soc->hal_soc, rx_tlv_hdr);
2641 					nbuf->next = NULL;
2642 					dp_2k_jump_handle(soc, nbuf,
2643 							  rx_tlv_hdr,
2644 							  peer_id, tid);
2645 					break;
2646 				case HAL_REO_ERR_REGULAR_FRAME_OOR:
2647 					if (txrx_peer)
2648 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2649 									  rx.err.oor_err,
2650 									  1,
2651 									  link_id);
2652 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2653 									   rx_tlv_hdr)) {
2654 						tid =
2655 							hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
2656 					}
2657 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2658 						hal_rx_msdu_start_msdu_len_get(
2659 						soc->hal_soc, rx_tlv_hdr);
2660 					nbuf->next = NULL;
2661 					dp_rx_oor_handle(soc, nbuf,
2662 							 peer_id,
2663 							 rx_tlv_hdr);
2664 					break;
2665 				case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2666 				case HAL_REO_ERR_BAR_FRAME_OOR:
2667 					peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
2668 					if (peer) {
2669 						dp_rx_err_handle_bar(soc, peer,
2670 								     nbuf);
2671 						dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
2672 					}
2673 					dp_rx_nbuf_free(nbuf);
2674 					break;
2675 
2676 				case HAL_REO_ERR_PN_CHECK_FAILED:
2677 				case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
2678 					if (txrx_peer)
2679 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2680 									  rx.err.pn_err,
2681 									  1,
2682 									  link_id);
2683 					dp_rx_nbuf_free(nbuf);
2684 					break;
2685 
2686 				default:
2687 					dp_info_rl("Got pkt with REO ERROR: %d",
2688 						   wbm_err.info_bit.
2689 						   reo_err_code);
2690 					dp_rx_nbuf_free(nbuf);
2691 				}
2692 			} else if (wbm_err.info_bit.reo_psh_rsn
2693 					== HAL_RX_WBM_REO_PSH_RSN_ROUTE) {
2694 				dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
2695 						    rx_tlv_hdr,
2696 						    HAL_RX_WBM_ERR_SRC_REO,
2697 						    link_id);
2698 			} else {
2699 				/* should not enter here */
2700 				dp_rx_err_alert("invalid reo push reason %u",
2701 						wbm_err.info_bit.reo_psh_rsn);
2702 				dp_rx_nbuf_free(nbuf);
2703 				dp_assert_always_internal(0);
2704 			}
2705 		} else if (wbm_err.info_bit.wbm_err_src ==
2706 					HAL_RX_WBM_ERR_SRC_RXDMA) {
2707 			if (wbm_err.info_bit.rxdma_psh_rsn
2708 					== HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
2709 				DP_STATS_INC(soc,
2710 					rx.err.rxdma_error
2711 					[wbm_err.info_bit.rxdma_err_code], 1);
2712 				/* increment @pdev level */
2713 				if (dp_pdev)
2714 					DP_STATS_INC(dp_pdev,
2715 						     err.rxdma_error, 1);
2716 
2717 				switch (wbm_err.info_bit.rxdma_err_code) {
2718 				case HAL_RXDMA_ERR_UNENCRYPTED:
2719 
2720 				case HAL_RXDMA_ERR_WIFI_PARSE:
2721 					if (txrx_peer)
2722 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2723 									  rx.err.rxdma_wifi_parse_err,
2724 									  1,
2725 									  link_id);
2726 
2727 					pool_id = wbm_err.info_bit.pool_id;
2728 					dp_rx_process_rxdma_err(soc, nbuf,
2729 								rx_tlv_hdr,
2730 								txrx_peer,
2731 								wbm_err.
2732 								info_bit.
2733 								rxdma_err_code,
2734 								pool_id,
2735 								link_id);
2736 					break;
2737 
2738 				case HAL_RXDMA_ERR_TKIP_MIC:
2739 					dp_rx_process_mic_error(soc, nbuf,
2740 								rx_tlv_hdr,
2741 								txrx_peer);
2742 					if (txrx_peer)
2743 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2744 									  rx.err.mic_err,
2745 									  1,
2746 									  link_id);
2747 					break;
2748 
2749 				case HAL_RXDMA_ERR_DECRYPT:
2750 					/* All the TKIP-MIC failures are treated as Decrypt Errors
2751 					 * for QCN9224 Targets
2752 					 */
2753 					is_tkip_mic_err = hal_rx_msdu_end_is_tkip_mic_err(hal_soc, rx_tlv_hdr);
2754 
2755 					if (is_tkip_mic_err && txrx_peer) {
2756 						dp_rx_process_mic_error(soc, nbuf,
2757 									rx_tlv_hdr,
2758 									txrx_peer);
2759 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2760 									  rx.err.mic_err,
2761 									  1,
2762 									  link_id);
2763 						break;
2764 					}
2765 
2766 					if (txrx_peer) {
2767 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2768 									  rx.err.decrypt_err,
2769 									  1,
2770 									  link_id);
2771 						dp_rx_nbuf_free(nbuf);
2772 						break;
2773 					}
2774 
2775 					if (!dp_handle_rxdma_decrypt_err()) {
2776 						dp_rx_nbuf_free(nbuf);
2777 						break;
2778 					}
2779 
2780 					pool_id = wbm_err.info_bit.pool_id;
2781 					err_code = wbm_err.info_bit.rxdma_err_code;
2782 					tlv_hdr = rx_tlv_hdr;
2783 					dp_rx_process_rxdma_err(soc, nbuf,
2784 								tlv_hdr, NULL,
2785 								err_code,
2786 								pool_id,
2787 								link_id);
2788 					break;
2789 				case HAL_RXDMA_MULTICAST_ECHO:
2790 					if (txrx_peer)
2791 						DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
2792 									      rx.mec_drop, 1,
2793 									      qdf_nbuf_len(nbuf),
2794 									      link_id);
2795 					dp_rx_nbuf_free(nbuf);
2796 					break;
2797 				case HAL_RXDMA_UNAUTHORIZED_WDS:
2798 					pool_id = wbm_err.info_bit.pool_id;
2799 					err_code = wbm_err.info_bit.rxdma_err_code;
2800 					tlv_hdr = rx_tlv_hdr;
2801 					dp_rx_process_rxdma_err(soc, nbuf,
2802 								tlv_hdr,
2803 								txrx_peer,
2804 								err_code,
2805 								pool_id,
2806 								link_id);
2807 					break;
2808 				default:
2809 					dp_rx_nbuf_free(nbuf);
2810 					dp_err_rl("RXDMA error %d",
2811 						  wbm_err.info_bit.rxdma_err_code);
2812 				}
2813 			} else if (wbm_err.info_bit.rxdma_psh_rsn
2814 					== HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) {
2815 				dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
2816 						    rx_tlv_hdr,
2817 						    HAL_RX_WBM_ERR_SRC_RXDMA,
2818 						    link_id);
2819 			} else if (wbm_err.info_bit.rxdma_psh_rsn
2820 					== HAL_RX_WBM_RXDMA_PSH_RSN_FLUSH) {
2821 				dp_rx_err_err("rxdma push reason %u",
2822 						wbm_err.info_bit.rxdma_psh_rsn);
2823 				DP_STATS_INC(soc, rx.err.rx_flush_count, 1);
2824 				dp_rx_nbuf_free(nbuf);
2825 			} else {
2826 				/* should not enter here */
2827 				dp_rx_err_alert("invalid rxdma push reason %u",
2828 						wbm_err.info_bit.rxdma_psh_rsn);
2829 				dp_rx_nbuf_free(nbuf);
2830 				dp_assert_always_internal(0);
2831 			}
2832 		} else {
2833 			/* Should not come here */
2834 			qdf_assert(0);
2835 		}
2836 
2837 		if (txrx_peer)
2838 			dp_txrx_peer_unref_delete(txrx_ref_handle,
2839 						  DP_MOD_ID_RX_ERR);
2840 
2841 		nbuf = next;
2842 	}
2843 	return rx_bufs_used; /* Assume no scale factor for now */
2844 }
2845 
2846 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2847 
2848 /**
2849  * dup_desc_dbg() - dump and assert if duplicate rx desc found
2850  *
2851  * @soc: core DP main context
2852  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
2853  * @rx_desc: void pointer to rx descriptor
2854  *
2855  * Return: void
2856  */
2857 static void dup_desc_dbg(struct dp_soc *soc,
2858 			 hal_rxdma_desc_t rxdma_dst_ring_desc,
2859 			 void *rx_desc)
2860 {
2861 	DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
2862 	dp_rx_dump_info_and_assert(
2863 			soc,
2864 			soc->rx_rel_ring.hal_srng,
2865 			hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
2866 			rx_desc);
2867 }
2868 
2869 /**
2870  * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
2871  *
2872  * @soc: core DP main context
2873  * @mac_id: mac id which is one of 3 mac_ids
2874  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
2875  * @head: head of descs list to be freed
2876  * @tail: tail of decs list to be freed
2877  *
2878  * Return: number of msdu in MPDU to be popped
2879  */
2880 static inline uint32_t
2881 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
2882 	hal_rxdma_desc_t rxdma_dst_ring_desc,
2883 	union dp_rx_desc_list_elem_t **head,
2884 	union dp_rx_desc_list_elem_t **tail)
2885 {
2886 	void *rx_msdu_link_desc;
2887 	qdf_nbuf_t msdu;
2888 	qdf_nbuf_t last;
2889 	struct hal_rx_msdu_list msdu_list;
2890 	uint16_t num_msdus;
2891 	struct hal_buf_info buf_info;
2892 	uint32_t rx_bufs_used = 0;
2893 	uint32_t msdu_cnt;
2894 	uint32_t i;
2895 	uint8_t push_reason;
2896 	uint8_t rxdma_error_code = 0;
2897 	uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
2898 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2899 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
2900 	hal_rxdma_desc_t ring_desc;
2901 	struct rx_desc_pool *rx_desc_pool;
2902 
2903 	if (!pdev) {
2904 		dp_rx_err_debug("%pK: pdev is null for mac_id = %d",
2905 				soc, mac_id);
2906 		return rx_bufs_used;
2907 	}
2908 
2909 	msdu = 0;
2910 
2911 	last = NULL;
2912 
2913 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
2914 				     &buf_info, &msdu_cnt);
2915 
2916 	push_reason =
2917 		hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
2918 	if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
2919 		rxdma_error_code =
2920 			hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
2921 	}
2922 
2923 	do {
2924 		rx_msdu_link_desc =
2925 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
2926 
2927 		qdf_assert_always(rx_msdu_link_desc);
2928 
2929 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
2930 				     &msdu_list, &num_msdus);
2931 
2932 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
2933 			/* if the msdus belongs to NSS offloaded radio &&
2934 			 * the rbm is not SW1_BM then return the msdu_link
2935 			 * descriptor without freeing the msdus (nbufs). let
2936 			 * these buffers be given to NSS completion ring for
2937 			 * NSS to free them.
2938 			 * else iterate through the msdu link desc list and
2939 			 * free each msdu in the list.
2940 			 */
2941 			if (msdu_list.rbm[0] !=
2942 				HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id) &&
2943 			    wlan_cfg_get_dp_pdev_nss_enabled(
2944 							pdev->wlan_cfg_ctx))
2945 				bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
2946 			else {
2947 				for (i = 0; i < num_msdus; i++) {
2948 					struct dp_rx_desc *rx_desc =
2949 						soc->arch_ops.
2950 						dp_rx_desc_cookie_2_va(
2951 							soc,
2952 							msdu_list.sw_cookie[i]);
2953 					qdf_assert_always(rx_desc);
2954 					msdu = rx_desc->nbuf;
2955 					/*
2956 					 * this is a unlikely scenario
2957 					 * where the host is reaping
2958 					 * a descriptor which
2959 					 * it already reaped just a while ago
2960 					 * but is yet to replenish
2961 					 * it back to HW.
2962 					 * In this case host will dump
2963 					 * the last 128 descriptors
2964 					 * including the software descriptor
2965 					 * rx_desc and assert.
2966 					 */
2967 					ring_desc = rxdma_dst_ring_desc;
2968 					if (qdf_unlikely(!rx_desc->in_use)) {
2969 						dup_desc_dbg(soc,
2970 							     ring_desc,
2971 							     rx_desc);
2972 						continue;
2973 					}
2974 
2975 					if (rx_desc->unmapped == 0) {
2976 						rx_desc_pool =
2977 							&soc->rx_desc_buf[rx_desc->pool_id];
2978 						dp_ipa_rx_buf_smmu_mapping_lock(soc);
2979 						dp_rx_nbuf_unmap_pool(soc,
2980 								      rx_desc_pool,
2981 								      msdu);
2982 						rx_desc->unmapped = 1;
2983 						dp_ipa_rx_buf_smmu_mapping_unlock(soc);
2984 					}
2985 
2986 					dp_rx_err_debug("%pK: msdu_nbuf=%pK ",
2987 							soc, msdu);
2988 
2989 					dp_rx_buffer_pool_nbuf_free(soc, msdu,
2990 							rx_desc->pool_id);
2991 					rx_bufs_used++;
2992 					dp_rx_add_to_free_desc_list(head,
2993 						tail, rx_desc);
2994 				}
2995 			}
2996 		} else {
2997 			rxdma_error_code = HAL_RXDMA_ERR_WAR;
2998 		}
2999 
3000 		/*
3001 		 * Store the current link buffer into to the local structure
3002 		 * to be used for release purpose.
3003 		 */
3004 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
3005 					     buf_info.paddr, buf_info.sw_cookie,
3006 					     buf_info.rbm);
3007 
3008 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
3009 					      &buf_info);
3010 		dp_rx_link_desc_return_by_addr(soc,
3011 					       (hal_buff_addrinfo_t)
3012 						rx_link_buf_info,
3013 						bm_action);
3014 	} while (buf_info.paddr);
3015 
3016 	DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
3017 	if (pdev)
3018 		DP_STATS_INC(pdev, err.rxdma_error, 1);
3019 
3020 	if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
3021 		dp_rx_err_err("%pK: Packet received with Decrypt error", soc);
3022 	}
3023 
3024 	return rx_bufs_used;
3025 }
3026 
3027 uint32_t
3028 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
3029 		     uint32_t mac_id, uint32_t quota)
3030 {
3031 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
3032 	hal_rxdma_desc_t rxdma_dst_ring_desc;
3033 	hal_soc_handle_t hal_soc;
3034 	void *err_dst_srng;
3035 	union dp_rx_desc_list_elem_t *head = NULL;
3036 	union dp_rx_desc_list_elem_t *tail = NULL;
3037 	struct dp_srng *dp_rxdma_srng;
3038 	struct rx_desc_pool *rx_desc_pool;
3039 	uint32_t work_done = 0;
3040 	uint32_t rx_bufs_used = 0;
3041 
3042 	if (!pdev)
3043 		return 0;
3044 
3045 	err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng;
3046 
3047 	if (!err_dst_srng) {
3048 		dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
3049 			      soc, err_dst_srng);
3050 		return 0;
3051 	}
3052 
3053 	hal_soc = soc->hal_soc;
3054 
3055 	qdf_assert(hal_soc);
3056 
3057 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
3058 		dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
3059 			      soc, err_dst_srng);
3060 		return 0;
3061 	}
3062 
3063 	while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
3064 		hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
3065 
3066 			rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
3067 						rxdma_dst_ring_desc,
3068 						&head, &tail);
3069 	}
3070 
3071 	dp_srng_access_end(int_ctx, soc, err_dst_srng);
3072 
3073 	if (rx_bufs_used) {
3074 		if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3075 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
3076 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
3077 		} else {
3078 			dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id];
3079 			rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
3080 		}
3081 
3082 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
3083 			rx_desc_pool, rx_bufs_used, &head, &tail, false);
3084 
3085 		work_done += rx_bufs_used;
3086 	}
3087 
3088 	return work_done;
3089 }
3090 
3091 #ifndef QCA_HOST_MODE_WIFI_DISABLED
3092 
3093 static inline void
3094 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
3095 			hal_rxdma_desc_t rxdma_dst_ring_desc,
3096 			union dp_rx_desc_list_elem_t **head,
3097 			union dp_rx_desc_list_elem_t **tail,
3098 			uint32_t *rx_bufs_used)
3099 {
3100 	void *rx_msdu_link_desc;
3101 	qdf_nbuf_t msdu;
3102 	qdf_nbuf_t last;
3103 	struct hal_rx_msdu_list msdu_list;
3104 	uint16_t num_msdus;
3105 	struct hal_buf_info buf_info;
3106 	uint32_t msdu_cnt, i;
3107 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
3108 	struct rx_desc_pool *rx_desc_pool;
3109 	struct dp_rx_desc *rx_desc;
3110 
3111 	msdu = 0;
3112 
3113 	last = NULL;
3114 
3115 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
3116 				     &buf_info, &msdu_cnt);
3117 
3118 	do {
3119 		rx_msdu_link_desc =
3120 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
3121 
3122 		if (!rx_msdu_link_desc) {
3123 			DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
3124 			break;
3125 		}
3126 
3127 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
3128 				     &msdu_list, &num_msdus);
3129 
3130 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
3131 			for (i = 0; i < num_msdus; i++) {
3132 				if (!dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[i])) {
3133 					dp_rx_err_info_rl("Invalid MSDU info cookie: 0x%x",
3134 							  msdu_list.sw_cookie[i]);
3135 					continue;
3136 				}
3137 
3138 				rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
3139 							soc,
3140 							msdu_list.sw_cookie[i]);
3141 				qdf_assert_always(rx_desc);
3142 				rx_desc_pool =
3143 					&soc->rx_desc_buf[rx_desc->pool_id];
3144 				msdu = rx_desc->nbuf;
3145 
3146 				/*
3147 				 * this is a unlikely scenario where the host is reaping
3148 				 * a descriptor which it already reaped just a while ago
3149 				 * but is yet to replenish it back to HW.
3150 				 */
3151 				if (qdf_unlikely(!rx_desc->in_use) ||
3152 				    qdf_unlikely(!msdu)) {
3153 					dp_rx_err_info_rl("Reaping rx_desc not in use!");
3154 					continue;
3155 				}
3156 
3157 				dp_ipa_rx_buf_smmu_mapping_lock(soc);
3158 				dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, msdu);
3159 				rx_desc->unmapped = 1;
3160 				dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3161 
3162 				dp_rx_buffer_pool_nbuf_free(soc, msdu,
3163 							    rx_desc->pool_id);
3164 				rx_bufs_used[rx_desc->pool_id]++;
3165 				dp_rx_add_to_free_desc_list(head,
3166 							    tail, rx_desc);
3167 			}
3168 		}
3169 
3170 		/*
3171 		 * Store the current link buffer into to the local structure
3172 		 * to be used for release purpose.
3173 		 */
3174 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
3175 					     buf_info.paddr, buf_info.sw_cookie,
3176 					     buf_info.rbm);
3177 
3178 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
3179 					      &buf_info);
3180 		dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
3181 					rx_link_buf_info,
3182 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
3183 	} while (buf_info.paddr);
3184 }
3185 
3186 void
3187 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
3188 			     uint32_t buf_type)
3189 {
3190 	struct hal_buf_info buf_info = {0};
3191 	struct dp_rx_desc *rx_desc = NULL;
3192 	struct rx_desc_pool *rx_desc_pool;
3193 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = {0};
3194 	union dp_rx_desc_list_elem_t *head = NULL;
3195 	union dp_rx_desc_list_elem_t *tail = NULL;
3196 	uint8_t pool_id;
3197 	uint8_t mac_id;
3198 
3199 	hal_rx_reo_buf_paddr_get(soc->hal_soc, hal_desc, &buf_info);
3200 
3201 	if (!buf_info.paddr) {
3202 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
3203 		return;
3204 	}
3205 
3206 	/* buffer_addr_info is the first element of ring_desc */
3207 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)hal_desc,
3208 				  &buf_info);
3209 
3210 	if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
3211 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
3212 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
3213 							soc,
3214 							buf_info.sw_cookie);
3215 
3216 		if (rx_desc && rx_desc->nbuf) {
3217 			rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
3218 			dp_ipa_rx_buf_smmu_mapping_lock(soc);
3219 			dp_rx_nbuf_unmap_pool(soc, rx_desc_pool,
3220 					      rx_desc->nbuf);
3221 			rx_desc->unmapped = 1;
3222 			dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3223 
3224 			dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
3225 						    rx_desc->pool_id);
3226 			dp_rx_add_to_free_desc_list(&head,
3227 						    &tail,
3228 						    rx_desc);
3229 
3230 			rx_bufs_reaped[rx_desc->pool_id]++;
3231 		}
3232 	} else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
3233 		pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(buf_info.sw_cookie);
3234 
3235 		dp_wbm_int_err_mpdu_pop(soc, pool_id, hal_desc,
3236 					&head, &tail, rx_bufs_reaped);
3237 	}
3238 
3239 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
3240 		struct rx_desc_pool *rx_desc_pool;
3241 		struct dp_srng *dp_rxdma_srng;
3242 
3243 		if (!rx_bufs_reaped[mac_id])
3244 			continue;
3245 
3246 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
3247 		dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
3248 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
3249 
3250 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
3251 					rx_desc_pool,
3252 					rx_bufs_reaped[mac_id],
3253 					&head, &tail, false);
3254 	}
3255 }
3256 
3257 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
3258