xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_err.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_rx.h"
22 #include "dp_peer.h"
23 #include "dp_internal.h"
24 #include "hal_api.h"
25 #include "qdf_trace.h"
26 #include "qdf_nbuf.h"
27 #include "dp_rx_defrag.h"
28 #include "dp_ipa.h"
29 #ifdef FEATURE_WDS
30 #include "dp_txrx_wds.h"
31 #endif
32 #include <enet.h>	/* LLC_SNAP_HDR_LEN */
33 #include "qdf_net_types.h"
34 #include "dp_rx_buffer_pool.h"
35 
36 /* Max buffer in invalid peer SG list*/
37 #define DP_MAX_INVALID_BUFFERS 10
38 
39 /**
40  * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
41  *			      back on same vap or a different vap.
42  *
43  * @soc: core DP main context
44  * @peer: dp peer handler
45  * @rx_tlv_hdr: start of the rx TLV header
46  * @nbuf: pkt buffer
47  *
48  * Return: bool (true if it is a looped back pkt else false)
49  *
50  */
51 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
52 					struct dp_peer *peer,
53 					uint8_t *rx_tlv_hdr,
54 					qdf_nbuf_t nbuf)
55 {
56 	struct dp_vdev *vdev = peer->vdev;
57 	struct dp_ast_entry *ase = NULL;
58 	uint16_t sa_idx = 0;
59 	uint8_t *data;
60 
61 	/*
62 	 * Multicast Echo Check is required only if vdev is STA and
63 	 * received pkt is a multicast/broadcast pkt. otherwise
64 	 * skip the MEC check.
65 	 */
66 	if (vdev->opmode != wlan_op_mode_sta)
67 		return false;
68 
69 	if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
70 		return false;
71 
72 	data = qdf_nbuf_data(nbuf);
73 	/*
74 	 * if the received pkts src mac addr matches with vdev
75 	 * mac address then drop the pkt as it is looped back
76 	 */
77 	if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
78 			vdev->mac_addr.raw,
79 			QDF_MAC_ADDR_SIZE)))
80 		return true;
81 
82 	/*
83 	 * In case of qwrap isolation mode, donot drop loopback packets.
84 	 * In isolation mode, all packets from the wired stations need to go
85 	 * to rootap and loop back to reach the wireless stations and
86 	 * vice-versa.
87 	 */
88 	if (qdf_unlikely(vdev->isolation_vdev))
89 		return false;
90 
91 	/* if the received pkts src mac addr matches with the
92 	 * wired PCs MAC addr which is behind the STA or with
93 	 * wireless STAs MAC addr which are behind the Repeater,
94 	 * then drop the pkt as it is looped back
95 	 */
96 	qdf_spin_lock_bh(&soc->ast_lock);
97 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
98 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
99 
100 		if ((sa_idx < 0) ||
101 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
102 			qdf_spin_unlock_bh(&soc->ast_lock);
103 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
104 					"invalid sa_idx: %d", sa_idx);
105 			qdf_assert_always(0);
106 		}
107 
108 		ase = soc->ast_table[sa_idx];
109 		if (!ase) {
110 			/* We do not get a peer map event for STA and without
111 			 * this event we don't know what is STA's sa_idx.
112 			 * For this reason the AST is still not associated to
113 			 * any index postion in ast_table.
114 			 * In these kind of scenarios where sa is valid but
115 			 * ast is not in ast_table, we use the below API to get
116 			 * AST entry for STA's own mac_address.
117 			 */
118 			ase = dp_peer_ast_hash_find_by_vdevid
119 				(soc, &data[QDF_MAC_ADDR_SIZE],
120 				 peer->vdev->vdev_id);
121 			if (ase) {
122 				ase->ast_idx = sa_idx;
123 				soc->ast_table[sa_idx] = ase;
124 				ase->is_mapped = TRUE;
125 			}
126 		}
127 	} else {
128 		ase = dp_peer_ast_hash_find_by_pdevid(soc,
129 						      &data[QDF_MAC_ADDR_SIZE],
130 						      vdev->pdev->pdev_id);
131 	}
132 
133 	if (ase) {
134 
135 		if (ase->pdev_id != vdev->pdev->pdev_id) {
136 			qdf_spin_unlock_bh(&soc->ast_lock);
137 			QDF_TRACE(QDF_MODULE_ID_DP,
138 				QDF_TRACE_LEVEL_INFO,
139 				"Detected DBDC Root AP "QDF_MAC_ADDR_FMT", %d %d",
140 				QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]),
141 				vdev->pdev->pdev_id,
142 				ase->pdev_id);
143 			return false;
144 		}
145 
146 		if ((ase->type == CDP_TXRX_AST_TYPE_MEC) ||
147 				(ase->peer_id != peer->peer_id)) {
148 			qdf_spin_unlock_bh(&soc->ast_lock);
149 			QDF_TRACE(QDF_MODULE_ID_DP,
150 				QDF_TRACE_LEVEL_INFO,
151 				"received pkt with same src mac "QDF_MAC_ADDR_FMT,
152 				QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]));
153 
154 			return true;
155 		}
156 	}
157 	qdf_spin_unlock_bh(&soc->ast_lock);
158 	return false;
159 }
160 
161 void dp_rx_link_desc_refill_duplicate_check(
162 				struct dp_soc *soc,
163 				struct hal_buf_info *buf_info,
164 				hal_buff_addrinfo_t ring_buf_info)
165 {
166 	struct hal_buf_info current_link_desc_buf_info = { 0 };
167 
168 	/* do duplicate link desc address check */
169 	hal_rx_buffer_addr_info_get_paddr(ring_buf_info,
170 					  &current_link_desc_buf_info);
171 	if (qdf_unlikely(current_link_desc_buf_info.paddr ==
172 			 buf_info->paddr)) {
173 		dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x",
174 			   current_link_desc_buf_info.paddr,
175 			   current_link_desc_buf_info.sw_cookie);
176 		DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1);
177 	}
178 	*buf_info = current_link_desc_buf_info;
179 }
180 
181 /**
182  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
183  *					(WBM) by address
184  *
185  * @soc: core DP main context
186  * @link_desc_addr: link descriptor addr
187  *
188  * Return: QDF_STATUS
189  */
190 QDF_STATUS
191 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
192 			       hal_buff_addrinfo_t link_desc_addr,
193 			       uint8_t bm_action)
194 {
195 	struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
196 	hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
197 	hal_soc_handle_t hal_soc = soc->hal_soc;
198 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
199 	void *src_srng_desc;
200 
201 	if (!wbm_rel_srng) {
202 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
203 			"WBM RELEASE RING not initialized");
204 		return status;
205 	}
206 
207 	/* do duplicate link desc address check */
208 	dp_rx_link_desc_refill_duplicate_check(
209 				soc,
210 				&soc->last_op_info.wbm_rel_link_desc,
211 				link_desc_addr);
212 
213 	if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
214 
215 		/* TODO */
216 		/*
217 		 * Need API to convert from hal_ring pointer to
218 		 * Ring Type / Ring Id combo
219 		 */
220 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
221 			FL("HAL RING Access For WBM Release SRNG Failed - %pK"),
222 			wbm_rel_srng);
223 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
224 		goto done;
225 	}
226 	src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
227 	if (qdf_likely(src_srng_desc)) {
228 		/* Return link descriptor through WBM ring (SW2WBM)*/
229 		hal_rx_msdu_link_desc_set(hal_soc,
230 				src_srng_desc, link_desc_addr, bm_action);
231 		status = QDF_STATUS_SUCCESS;
232 	} else {
233 		struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
234 
235 		DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1);
236 
237 		dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)",
238 			   srng->ring_id,
239 			   soc->stats.rx.err.hal_ring_access_full_fail);
240 		dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
241 			   *srng->u.src_ring.hp_addr,
242 			   srng->u.src_ring.reap_hp,
243 			   *srng->u.src_ring.tp_addr,
244 			   srng->u.src_ring.cached_tp);
245 		QDF_BUG(0);
246 	}
247 done:
248 	hal_srng_access_end(hal_soc, wbm_rel_srng);
249 	return status;
250 
251 }
252 
253 /**
254  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
255  *				(WBM), following error handling
256  *
257  * @soc: core DP main context
258  * @ring_desc: opaque pointer to the REO error ring descriptor
259  *
260  * Return: QDF_STATUS
261  */
262 QDF_STATUS
263 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
264 		       uint8_t bm_action)
265 {
266 	void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
267 
268 	return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
269 }
270 
271 /**
272  * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
273  *
274  * @soc: core txrx main context
275  * @ring_desc: opaque pointer to the REO error ring descriptor
276  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
277  * @head: head of the local descriptor free-list
278  * @tail: tail of the local descriptor free-list
279  * @quota: No. of units (packets) that can be serviced in one shot.
280  *
281  * This function is used to drop all MSDU in an MPDU
282  *
283  * Return: uint32_t: No. of elements processed
284  */
285 static uint32_t
286 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
287 		 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
288 		 uint8_t *mac_id,
289 		 uint32_t quota)
290 {
291 	uint32_t rx_bufs_used = 0;
292 	void *link_desc_va;
293 	struct hal_buf_info buf_info;
294 	struct dp_pdev *pdev;
295 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
296 	int i;
297 	uint8_t *rx_tlv_hdr;
298 	uint32_t tid;
299 	struct rx_desc_pool *rx_desc_pool;
300 	struct dp_rx_desc *rx_desc;
301 	/* First field in REO Dst ring Desc is buffer_addr_info */
302 	void *buf_addr_info = ring_desc;
303 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
304 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
305 
306 	hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
307 
308 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
309 
310 more_msdu_link_desc:
311 	/* No UNMAP required -- this is "malloc_consistent" memory */
312 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
313 			     &mpdu_desc_info->msdu_count);
314 
315 	for (i = 0; (i < mpdu_desc_info->msdu_count); i++) {
316 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc,
317 						      msdu_list.sw_cookie[i]);
318 
319 		qdf_assert_always(rx_desc);
320 
321 		/* all buffers from a MSDU link link belong to same pdev */
322 		*mac_id = rx_desc->pool_id;
323 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
324 		if (!pdev) {
325 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
326 				  "pdev is null for pool_id = %d",
327 				  rx_desc->pool_id);
328 			return rx_bufs_used;
329 		}
330 
331 		if (!dp_rx_desc_check_magic(rx_desc)) {
332 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
333 					FL("Invalid rx_desc cookie=%d"),
334 					msdu_list.sw_cookie[i]);
335 			return rx_bufs_used;
336 		}
337 
338 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
339 		dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
340 						  rx_desc_pool->buf_size,
341 						  false);
342 		qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
343 					     QDF_DMA_FROM_DEVICE,
344 					     rx_desc_pool->buf_size);
345 		rx_desc->unmapped = 1;
346 
347 		rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
348 
349 		rx_bufs_used++;
350 		tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
351 						rx_desc->rx_buf_start);
352 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
353 			"Packet received with PN error for tid :%d", tid);
354 
355 		rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
356 		if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
357 			hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
358 
359 		/* Just free the buffers */
360 		dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id);
361 
362 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
363 					    &pdev->free_list_tail, rx_desc);
364 	}
365 
366 	/*
367 	 * If the msdu's are spread across multiple link-descriptors,
368 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
369 	 * spread across multiple buffers).Hence, it is
370 	 * necessary to check the next link_descriptor and release
371 	 * all the msdu's that are part of it.
372 	 */
373 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
374 			link_desc_va,
375 			&next_link_desc_addr_info);
376 
377 	if (hal_rx_is_buf_addr_info_valid(
378 				&next_link_desc_addr_info)) {
379 		/* Clear the next link desc info for the current link_desc */
380 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
381 
382 		dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
383 					       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
384 		hal_rx_buffer_addr_info_get_paddr(
385 				&next_link_desc_addr_info,
386 				&buf_info);
387 		cur_link_desc_addr_info = next_link_desc_addr_info;
388 		buf_addr_info = &cur_link_desc_addr_info;
389 
390 		link_desc_va =
391 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
392 
393 		goto more_msdu_link_desc;
394 	}
395 	quota--;
396 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
397 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
398 	return rx_bufs_used;
399 }
400 
401 /**
402  * dp_rx_pn_error_handle() - Handles PN check errors
403  *
404  * @soc: core txrx main context
405  * @ring_desc: opaque pointer to the REO error ring descriptor
406  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
407  * @head: head of the local descriptor free-list
408  * @tail: tail of the local descriptor free-list
409  * @quota: No. of units (packets) that can be serviced in one shot.
410  *
411  * This function implements PN error handling
412  * If the peer is configured to ignore the PN check errors
413  * or if DP feels, that this frame is still OK, the frame can be
414  * re-injected back to REO to use some of the other features
415  * of REO e.g. duplicate detection/routing to other cores
416  *
417  * Return: uint32_t: No. of elements processed
418  */
419 static uint32_t
420 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
421 		      struct hal_rx_mpdu_desc_info *mpdu_desc_info,
422 		      uint8_t *mac_id,
423 		      uint32_t quota)
424 {
425 	uint16_t peer_id;
426 	uint32_t rx_bufs_used = 0;
427 	struct dp_peer *peer;
428 	bool peer_pn_policy = false;
429 
430 	peer_id = DP_PEER_METADATA_PEER_ID_GET(
431 				mpdu_desc_info->peer_meta_data);
432 
433 
434 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
435 
436 	if (qdf_likely(peer)) {
437 		/*
438 		 * TODO: Check for peer specific policies & set peer_pn_policy
439 		 */
440 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
441 			"discard rx due to PN error for peer  %pK  "QDF_MAC_ADDR_FMT,
442 			peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
443 
444 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
445 	}
446 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
447 		"Packet received with PN error");
448 
449 	/* No peer PN policy -- definitely drop */
450 	if (!peer_pn_policy)
451 		rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
452 						mpdu_desc_info,
453 						mac_id, quota);
454 
455 	return rx_bufs_used;
456 }
457 
458 /**
459  * dp_rx_oor_handle() - Handles the msdu which is OOR error
460  *
461  * @soc: core txrx main context
462  * @nbuf: pointer to msdu skb
463  * @rx_tlv_hdr: start of rx tlv header
464  * @mpdu_desc_info: pointer to mpdu level description info
465  * @peer_id: dp peer ID
466  * @tid: dp tid
467  *
468  * This function process the msdu delivered from REO2TCL
469  * ring with error type OOR
470  *
471  * Return: None
472  */
473 static void
474 dp_rx_oor_handle(struct dp_soc *soc,
475 		 qdf_nbuf_t nbuf,
476 		 uint8_t *rx_tlv_hdr,
477 		 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
478 		 uint16_t peer_id,
479 		 uint8_t tid)
480 {
481 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
482 				FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
483 	struct dp_peer *peer = NULL;
484 
485 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
486 	if (!peer || tid >= DP_MAX_TIDS) {
487 		dp_info_rl("peer or tid %d not valid", tid);
488 		goto free_nbuf;
489 	}
490 
491 	/*
492 	 * For REO error 7 OOR, if it is retry frame under BA session,
493 	 * then it is likely SN duplicated frame, do not deliver EAPOL
494 	 * to stack in this case since the connection might fail due to
495 	 * duplicated EAP response.
496 	 */
497 	if (mpdu_desc_info->mpdu_flags & HAL_MPDU_F_RETRY_BIT &&
498 	    peer->rx_tid[tid].ba_status == DP_RX_BA_ACTIVE) {
499 		frame_mask &= ~FRAME_MASK_IPV4_EAPOL;
500 		DP_STATS_INC(soc, rx.err.reo_err_oor_eapol_drop, 1);
501 	}
502 
503 	if (dp_rx_deliver_special_frame(soc, peer, nbuf, frame_mask,
504 					rx_tlv_hdr)) {
505 		DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1);
506 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
507 		return;
508 	}
509 
510 free_nbuf:
511 	if (peer)
512 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
513 
514 	DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1);
515 	qdf_nbuf_free(nbuf);
516 }
517 
518 /**
519  * dp_rx_reo_err_entry_process() - Handles for REO error entry processing
520  *
521  * @soc: core txrx main context
522  * @ring_desc: opaque pointer to the REO error ring descriptor
523  * @mpdu_desc_info: pointer to mpdu level description info
524  * @link_desc_va: pointer to msdu_link_desc virtual address
525  * @err_code: reo erro code fetched from ring entry
526  *
527  * Function to handle msdus fetched from msdu link desc, currently
528  * only support 2K jump, OOR error.
529  *
530  * Return: msdu count processed.
531  */
532 static uint32_t
533 dp_rx_reo_err_entry_process(struct dp_soc *soc,
534 			    void *ring_desc,
535 			    struct hal_rx_mpdu_desc_info *mpdu_desc_info,
536 			    void *link_desc_va,
537 			    enum hal_reo_error_code err_code)
538 {
539 	uint32_t rx_bufs_used = 0;
540 	struct dp_pdev *pdev;
541 	int i;
542 	uint8_t *rx_tlv_hdr_first;
543 	uint8_t *rx_tlv_hdr_last;
544 	uint32_t tid = DP_MAX_TIDS;
545 	uint16_t peer_id;
546 	struct dp_rx_desc *rx_desc;
547 	struct rx_desc_pool *rx_desc_pool;
548 	qdf_nbuf_t nbuf;
549 	struct hal_buf_info buf_info;
550 	struct hal_rx_msdu_list msdu_list;
551 	uint16_t num_msdus;
552 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
553 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
554 	/* First field in REO Dst ring Desc is buffer_addr_info */
555 	void *buf_addr_info = ring_desc;
556 	qdf_nbuf_t head_nbuf = NULL;
557 	qdf_nbuf_t tail_nbuf = NULL;
558 	uint16_t msdu_processed = 0;
559 
560 	peer_id = DP_PEER_METADATA_PEER_ID_GET(
561 					mpdu_desc_info->peer_meta_data);
562 
563 more_msdu_link_desc:
564 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
565 			     &num_msdus);
566 	for (i = 0; i < num_msdus; i++) {
567 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(
568 					soc,
569 					msdu_list.sw_cookie[i]);
570 
571 		qdf_assert_always(rx_desc);
572 
573 		/* all buffers from a MSDU link belong to same pdev */
574 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
575 
576 		nbuf = rx_desc->nbuf;
577 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
578 		dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
579 						  rx_desc_pool->buf_size,
580 						  false);
581 		qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
582 					     QDF_DMA_FROM_DEVICE,
583 					     rx_desc_pool->buf_size);
584 		rx_desc->unmapped = 1;
585 
586 		QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len;
587 		rx_bufs_used++;
588 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
589 					    &pdev->free_list_tail, rx_desc);
590 
591 		DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf);
592 
593 		if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags &
594 				 HAL_MSDU_F_MSDU_CONTINUATION))
595 			continue;
596 
597 		if (dp_rx_buffer_pool_refill(soc, head_nbuf,
598 					     rx_desc->pool_id)) {
599 			/* MSDU queued back to the pool */
600 			goto process_next_msdu;
601 		}
602 
603 		rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf);
604 		rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf);
605 
606 		if (qdf_unlikely(head_nbuf != tail_nbuf)) {
607 			nbuf = dp_rx_sg_create(soc, head_nbuf);
608 			qdf_nbuf_set_is_frag(nbuf, 1);
609 			DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1);
610 		}
611 
612 		/*
613 		 * only first msdu, mpdu start description tlv valid?
614 		 * and use it for following msdu.
615 		 */
616 		if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
617 						   rx_tlv_hdr_last))
618 			tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
619 							rx_tlv_hdr_first);
620 
621 		switch (err_code) {
622 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
623 			dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last,
624 					  peer_id, tid);
625 			break;
626 
627 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
628 			dp_rx_oor_handle(soc, nbuf, rx_tlv_hdr_last,
629 					 mpdu_desc_info, peer_id, tid);
630 			break;
631 		default:
632 			dp_err_rl("Non-support error code %d", err_code);
633 			qdf_nbuf_free(nbuf);
634 		}
635 
636 process_next_msdu:
637 		msdu_processed++;
638 		head_nbuf = NULL;
639 		tail_nbuf = NULL;
640 	}
641 
642 	/*
643 	 * If the msdu's are spread across multiple link-descriptors,
644 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
645 	 * spread across multiple buffers).Hence, it is
646 	 * necessary to check the next link_descriptor and release
647 	 * all the msdu's that are part of it.
648 	 */
649 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
650 			link_desc_va,
651 			&next_link_desc_addr_info);
652 
653 	if (hal_rx_is_buf_addr_info_valid(
654 				&next_link_desc_addr_info)) {
655 		/* Clear the next link desc info for the current link_desc */
656 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
657 		dp_rx_link_desc_return_by_addr(
658 				soc,
659 				buf_addr_info,
660 				HAL_BM_ACTION_PUT_IN_IDLE_LIST);
661 
662 		hal_rx_buffer_addr_info_get_paddr(
663 				&next_link_desc_addr_info,
664 				&buf_info);
665 		link_desc_va =
666 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
667 		cur_link_desc_addr_info = next_link_desc_addr_info;
668 		buf_addr_info = &cur_link_desc_addr_info;
669 
670 		goto more_msdu_link_desc;
671 	}
672 
673 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
674 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
675 	if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count))
676 		DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1);
677 
678 	return rx_bufs_used;
679 }
680 
681 #ifdef DP_INVALID_PEER_ASSERT
682 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \
683 		do {                                \
684 			qdf_assert_always(!(head)); \
685 			qdf_assert_always(!(tail)); \
686 		} while (0)
687 #else
688 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */
689 #endif
690 
691 /**
692  * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu
693  *                       to pdev invalid peer list
694  *
695  * @soc: core DP main context
696  * @nbuf: Buffer pointer
697  * @rx_tlv_hdr: start of rx tlv header
698  * @mac_id: mac id
699  *
700  *  Return: bool: true for last msdu of mpdu
701  */
702 static bool
703 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf,
704 		  uint8_t *rx_tlv_hdr, uint8_t mac_id)
705 {
706 	bool mpdu_done = false;
707 	qdf_nbuf_t curr_nbuf = NULL;
708 	qdf_nbuf_t tmp_nbuf = NULL;
709 
710 	/* TODO: Currently only single radio is supported, hence
711 	 * pdev hard coded to '0' index
712 	 */
713 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
714 
715 	if (!dp_pdev) {
716 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
717 			  "pdev is null for mac_id = %d", mac_id);
718 		return mpdu_done;
719 	}
720 	/* if invalid peer SG list has max values free the buffers in list
721 	 * and treat current buffer as start of list
722 	 *
723 	 * current logic to detect the last buffer from attn_tlv is not reliable
724 	 * in OFDMA UL scenario hence add max buffers check to avoid list pile
725 	 * up
726 	 */
727 	if (!dp_pdev->first_nbuf ||
728 	    (dp_pdev->invalid_peer_head_msdu &&
729 	    QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST
730 	    (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) {
731 		qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
732 		dp_pdev->ppdu_id = hal_rx_get_ppdu_id(soc->hal_soc,
733 						      rx_tlv_hdr);
734 		dp_pdev->first_nbuf = true;
735 
736 		/* If the new nbuf received is the first msdu of the
737 		 * amsdu and there are msdus in the invalid peer msdu
738 		 * list, then let us free all the msdus of the invalid
739 		 * peer msdu list.
740 		 * This scenario can happen when we start receiving
741 		 * new a-msdu even before the previous a-msdu is completely
742 		 * received.
743 		 */
744 		curr_nbuf = dp_pdev->invalid_peer_head_msdu;
745 		while (curr_nbuf) {
746 			tmp_nbuf = curr_nbuf->next;
747 			qdf_nbuf_free(curr_nbuf);
748 			curr_nbuf = tmp_nbuf;
749 		}
750 
751 		dp_pdev->invalid_peer_head_msdu = NULL;
752 		dp_pdev->invalid_peer_tail_msdu = NULL;
753 		hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr,
754 				&(dp_pdev->ppdu_info.rx_status));
755 
756 	}
757 
758 	if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) &&
759 	    hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
760 		qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
761 		qdf_assert_always(dp_pdev->first_nbuf == true);
762 		dp_pdev->first_nbuf = false;
763 		mpdu_done = true;
764 	}
765 
766 	/*
767 	 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu
768 	 * should be NULL here, add the checking for debugging purpose
769 	 * in case some corner case.
770 	 */
771 	DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu,
772 					dp_pdev->invalid_peer_tail_msdu);
773 	DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
774 				dp_pdev->invalid_peer_tail_msdu,
775 				nbuf);
776 
777 	return mpdu_done;
778 }
779 
780 static
781 void dp_rx_err_handle_bar(struct dp_soc *soc,
782 			  struct dp_peer *peer,
783 			  qdf_nbuf_t nbuf)
784 {
785 	uint8_t *rx_tlv_hdr;
786 	unsigned char type, subtype;
787 	uint16_t start_seq_num;
788 	uint32_t tid;
789 	struct ieee80211_frame_bar *bar;
790 
791 	/*
792 	 * 1. Is this a BAR frame. If not Discard it.
793 	 * 2. If it is, get the peer id, tid, ssn
794 	 * 2a Do a tid update
795 	 */
796 
797 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
798 	bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + SIZE_OF_DATA_RX_TLV);
799 
800 	type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
801 	subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
802 
803 	if (!(type == IEEE80211_FC0_TYPE_CTL &&
804 	      subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
805 		dp_err_rl("Not a BAR frame!");
806 		return;
807 	}
808 
809 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
810 	qdf_assert_always(tid < DP_MAX_TIDS);
811 
812 	start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
813 
814 	dp_info_rl("tid %u window_size %u start_seq_num %u",
815 		   tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
816 
817 	dp_rx_tid_update_wifi3(peer, tid,
818 			       peer->rx_tid[tid].ba_win_size,
819 			       start_seq_num);
820 }
821 
822 static void
823 dp_rx_bar_frame_handle(struct dp_soc *soc,
824 		       hal_ring_desc_t ring_desc,
825 		       struct dp_rx_desc *rx_desc,
826 		       struct hal_rx_mpdu_desc_info *mpdu_desc_info,
827 		       uint8_t error)
828 {
829 	qdf_nbuf_t nbuf;
830 	struct dp_pdev *pdev;
831 	struct dp_peer *peer;
832 	struct rx_desc_pool *rx_desc_pool;
833 	uint16_t peer_id;
834 	uint8_t *rx_tlv_hdr;
835 	uint32_t tid;
836 
837 	nbuf = rx_desc->nbuf;
838 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
839 	dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
840 					  rx_desc_pool->buf_size,
841 					  false);
842 	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
843 				     QDF_DMA_FROM_DEVICE,
844 				     rx_desc_pool->buf_size);
845 	rx_desc->unmapped = 1;
846 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
847 	peer_id =
848 		hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
849 						rx_tlv_hdr);
850 	peer = dp_peer_get_ref_by_id(soc, peer_id,
851 				     DP_MOD_ID_RX_ERR);
852 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
853 					rx_tlv_hdr);
854 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
855 
856 	if (!peer)
857 		goto next;
858 
859 	dp_info("BAR frame: peer = "QDF_MAC_ADDR_FMT
860 		" peer_id = %d"
861 		" tid = %u"
862 		" SSN = %d"
863 		" error status = %d",
864 		QDF_MAC_ADDR_REF(peer->mac_addr.raw),
865 		peer->peer_id,
866 		tid,
867 		mpdu_desc_info->mpdu_seq,
868 		error);
869 
870 	switch (error) {
871 	case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
872 		/* fallthrough */
873 	case HAL_REO_ERR_BAR_FRAME_OOR:
874 		dp_rx_err_handle_bar(soc, peer, nbuf);
875 		DP_STATS_INC(soc,
876 			     rx.err.reo_error[error], 1);
877 		break;
878 	default:
879 		DP_STATS_INC(soc, rx.bar_frame, 1);
880 	}
881 
882 	dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
883 next:
884 	dp_rx_link_desc_return(soc, ring_desc,
885 			       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
886 	dp_rx_add_to_free_desc_list(&pdev->free_list_head,
887 				    &pdev->free_list_tail,
888 				    rx_desc);
889 	qdf_nbuf_free(nbuf);
890 }
891 
892 /**
893  * dp_2k_jump_handle() - Function to handle 2k jump exception
894  *                        on WBM ring
895  *
896  * @soc: core DP main context
897  * @nbuf: buffer pointer
898  * @rx_tlv_hdr: start of rx tlv header
899  * @peer_id: peer id of first msdu
900  * @tid: Tid for which exception occurred
901  *
902  * This function handles 2k jump violations arising out
903  * of receiving aggregates in non BA case. This typically
904  * may happen if aggregates are received on a QOS enabled TID
905  * while Rx window size is still initialized to value of 2. Or
906  * it may also happen if negotiated window size is 1 but peer
907  * sends aggregates.
908  *
909  */
910 
911 void
912 dp_2k_jump_handle(struct dp_soc *soc,
913 		  qdf_nbuf_t nbuf,
914 		  uint8_t *rx_tlv_hdr,
915 		  uint16_t peer_id,
916 		  uint8_t tid)
917 {
918 	struct dp_peer *peer = NULL;
919 	struct dp_rx_tid *rx_tid = NULL;
920 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP;
921 
922 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
923 	if (!peer) {
924 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
925 			  "peer not found");
926 		goto free_nbuf;
927 	}
928 
929 	if (tid >= DP_MAX_TIDS) {
930 		dp_info_rl("invalid tid");
931 		goto nbuf_deliver;
932 	}
933 
934 	rx_tid = &peer->rx_tid[tid];
935 	qdf_spin_lock_bh(&rx_tid->tid_lock);
936 
937 	/* only if BA session is active, allow send Delba */
938 	if (rx_tid->ba_status != DP_RX_BA_ACTIVE) {
939 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
940 		goto nbuf_deliver;
941 	}
942 
943 	if (!rx_tid->delba_tx_status) {
944 		rx_tid->delba_tx_retry++;
945 		rx_tid->delba_tx_status = 1;
946 		rx_tid->delba_rcode =
947 			IEEE80211_REASON_QOS_SETUP_REQUIRED;
948 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
949 		if (soc->cdp_soc.ol_ops->send_delba) {
950 			DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent, 1);
951 			soc->cdp_soc.ol_ops->send_delba(
952 					peer->vdev->pdev->soc->ctrl_psoc,
953 					peer->vdev->vdev_id,
954 					peer->mac_addr.raw,
955 					tid,
956 					rx_tid->delba_rcode);
957 		}
958 	} else {
959 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
960 	}
961 
962 nbuf_deliver:
963 	if (dp_rx_deliver_special_frame(soc, peer, nbuf, frame_mask,
964 					rx_tlv_hdr)) {
965 		DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1);
966 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
967 		return;
968 	}
969 
970 free_nbuf:
971 	if (peer)
972 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
973 	DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1);
974 	qdf_nbuf_free(nbuf);
975 }
976 
977 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
978     defined(QCA_WIFI_QCA6750)
979 /**
980  * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
981  * @soc: pointer to dp_soc struct
982  * @pool_id: Pool id to find dp_pdev
983  * @rx_tlv_hdr: TLV header of received packet
984  * @nbuf: SKB
985  *
986  * In certain types of packets if peer_id is not correct then
987  * driver may not be able find. Try finding peer by addr_2 of
988  * received MPDU. If you find the peer then most likely sw_peer_id &
989  * ast_idx is corrupted.
990  *
991  * Return: True if you find the peer by addr_2 of received MPDU else false
992  */
993 static bool
994 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
995 					      uint8_t pool_id,
996 					      uint8_t *rx_tlv_hdr,
997 					      qdf_nbuf_t nbuf)
998 {
999 	struct dp_peer *peer = NULL;
1000 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
1001 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
1002 	struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
1003 
1004 	if (!pdev) {
1005 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1006 			  "pdev is null for pool_id = %d", pool_id);
1007 		return false;
1008 	}
1009 	/*
1010 	 * WAR- In certain types of packets if peer_id is not correct then
1011 	 * driver may not be able find. Try finding peer by addr_2 of
1012 	 * received MPDU
1013 	 */
1014 	if (wh)
1015 		peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0,
1016 					      DP_VDEV_ALL, DP_MOD_ID_RX_ERR);
1017 	if (peer) {
1018 		dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
1019 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1020 				     QDF_TRACE_LEVEL_DEBUG);
1021 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
1022 				 1, qdf_nbuf_len(nbuf));
1023 		qdf_nbuf_free(nbuf);
1024 
1025 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
1026 		return true;
1027 	}
1028 	return false;
1029 }
1030 
1031 /**
1032  * dp_rx_check_pkt_len() - Check for pktlen validity
1033  * @soc: DP SOC context
1034  * @pkt_len: computed length of the pkt from caller in bytes
1035  *
1036  * Return: true if pktlen > RX_BUFFER_SIZE, else return false
1037  *
1038  */
1039 static inline
1040 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
1041 {
1042 	if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) {
1043 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
1044 				 1, pkt_len);
1045 		return true;
1046 	} else {
1047 		return false;
1048 	}
1049 }
1050 
1051 #else
1052 static inline bool
1053 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
1054 					      uint8_t pool_id,
1055 					      uint8_t *rx_tlv_hdr,
1056 					      qdf_nbuf_t nbuf)
1057 {
1058 	return false;
1059 }
1060 
1061 static inline
1062 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
1063 {
1064 	return false;
1065 }
1066 
1067 #endif
1068 
1069 /**
1070  * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
1071  *                              descriptor violation on either a
1072  *                              REO or WBM ring
1073  *
1074  * @soc: core DP main context
1075  * @nbuf: buffer pointer
1076  * @rx_tlv_hdr: start of rx tlv header
1077  * @pool_id: mac id
1078  * @peer: peer handle
1079  *
1080  * This function handles NULL queue descriptor violations arising out
1081  * a missing REO queue for a given peer or a given TID. This typically
1082  * may happen if a packet is received on a QOS enabled TID before the
1083  * ADDBA negotiation for that TID, when the TID queue is setup. Or
1084  * it may also happen for MC/BC frames if they are not routed to the
1085  * non-QOS TID queue, in the absence of any other default TID queue.
1086  * This error can show up both in a REO destination or WBM release ring.
1087  *
1088  * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
1089  *         if nbuf could not be handled or dropped.
1090  */
1091 static QDF_STATUS
1092 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
1093 			 uint8_t *rx_tlv_hdr, uint8_t pool_id,
1094 			 struct dp_peer *peer)
1095 {
1096 	uint32_t pkt_len;
1097 	uint16_t msdu_len;
1098 	struct dp_vdev *vdev;
1099 	uint8_t tid;
1100 	qdf_ether_header_t *eh;
1101 	struct hal_rx_msdu_metadata msdu_metadata;
1102 	uint16_t sa_idx = 0;
1103 
1104 	qdf_nbuf_set_rx_chfrag_start(nbuf,
1105 				hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1106 							       rx_tlv_hdr));
1107 	qdf_nbuf_set_rx_chfrag_end(nbuf,
1108 				   hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
1109 								 rx_tlv_hdr));
1110 	qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1111 								  rx_tlv_hdr));
1112 	qdf_nbuf_set_da_valid(nbuf,
1113 			      hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
1114 							      rx_tlv_hdr));
1115 	qdf_nbuf_set_sa_valid(nbuf,
1116 			      hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
1117 							      rx_tlv_hdr));
1118 
1119 	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
1120 	msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
1121 	pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + RX_PKT_TLVS_LEN;
1122 
1123 	if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
1124 		if (dp_rx_check_pkt_len(soc, pkt_len))
1125 			goto drop_nbuf;
1126 
1127 		/* Set length in nbuf */
1128 		qdf_nbuf_set_pktlen(
1129 			nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
1130 		qdf_assert_always(nbuf->data == rx_tlv_hdr);
1131 	}
1132 
1133 	/*
1134 	 * Check if DMA completed -- msdu_done is the last bit
1135 	 * to be written
1136 	 */
1137 	if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
1138 
1139 		dp_err_rl("MSDU DONE failure");
1140 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1141 				     QDF_TRACE_LEVEL_INFO);
1142 		qdf_assert(0);
1143 	}
1144 
1145 	if (!peer &&
1146 	    dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
1147 							  rx_tlv_hdr, nbuf))
1148 		return QDF_STATUS_E_FAILURE;
1149 
1150 	if (!peer) {
1151 		bool mpdu_done = false;
1152 		struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
1153 
1154 		if (!pdev) {
1155 			dp_err_rl("pdev is null for pool_id = %d", pool_id);
1156 			return QDF_STATUS_E_FAILURE;
1157 		}
1158 
1159 		dp_err_rl("peer is NULL");
1160 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1161 				 qdf_nbuf_len(nbuf));
1162 
1163 		/* QCN9000 has the support enabled */
1164 		if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) {
1165 			mpdu_done = true;
1166 			nbuf->next = NULL;
1167 			/* Trigger invalid peer handler wrapper */
1168 			dp_rx_process_invalid_peer_wrapper(soc,
1169 					nbuf, mpdu_done, pool_id);
1170 		} else {
1171 			mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id);
1172 			/* Trigger invalid peer handler wrapper */
1173 			dp_rx_process_invalid_peer_wrapper(soc,
1174 					pdev->invalid_peer_head_msdu,
1175 					mpdu_done, pool_id);
1176 		}
1177 
1178 		if (mpdu_done) {
1179 			pdev->invalid_peer_head_msdu = NULL;
1180 			pdev->invalid_peer_tail_msdu = NULL;
1181 		}
1182 
1183 		return QDF_STATUS_E_FAILURE;
1184 	}
1185 
1186 	vdev = peer->vdev;
1187 	if (!vdev) {
1188 		dp_err_rl("Null vdev!");
1189 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1190 		goto drop_nbuf;
1191 	}
1192 
1193 	/*
1194 	 * Advance the packet start pointer by total size of
1195 	 * pre-header TLV's
1196 	 */
1197 	if (qdf_nbuf_is_frag(nbuf))
1198 		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1199 	else
1200 		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
1201 				   RX_PKT_TLVS_LEN));
1202 
1203 	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
1204 
1205 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
1206 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
1207 
1208 		if ((sa_idx < 0) ||
1209 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
1210 			DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
1211 			goto drop_nbuf;
1212 		}
1213 	}
1214 
1215 	if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) {
1216 		/* this is a looped back MCBC pkt, drop it */
1217 		DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
1218 		goto drop_nbuf;
1219 	}
1220 
1221 	/*
1222 	 * In qwrap mode if the received packet matches with any of the vdev
1223 	 * mac addresses, drop it. Donot receive multicast packets originated
1224 	 * from any proxysta.
1225 	 */
1226 	if (check_qwrap_multicast_loopback(vdev, nbuf)) {
1227 		DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
1228 		goto drop_nbuf;
1229 	}
1230 
1231 
1232 	if (qdf_unlikely((peer->nawds_enabled == true) &&
1233 			hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1234 						       rx_tlv_hdr))) {
1235 		dp_err_rl("free buffer for multicast packet");
1236 		DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
1237 		goto drop_nbuf;
1238 	}
1239 
1240 	if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
1241 		dp_err_rl("mcast Policy Check Drop pkt");
1242 		goto drop_nbuf;
1243 	}
1244 	/* WDS Source Port Learning */
1245 	if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
1246 		vdev->wds_enabled))
1247 		dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf,
1248 					msdu_metadata);
1249 
1250 	if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
1251 		tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
1252 		if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned)
1253 			dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
1254 			/* IEEE80211_SEQ_MAX indicates invalid start_seq */
1255 	}
1256 
1257 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1258 		qdf_nbuf_set_next(nbuf, NULL);
1259 		dp_rx_deliver_raw(vdev, nbuf, peer);
1260 	} else {
1261 		qdf_nbuf_set_next(nbuf, NULL);
1262 		DP_STATS_INC_PKT(peer, rx.to_stack, 1,
1263 				 qdf_nbuf_len(nbuf));
1264 
1265 		/*
1266 		 * Update the protocol tag in SKB based on
1267 		 * CCE metadata
1268 		 */
1269 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1270 					  EXCEPTION_DEST_RING_ID,
1271 					  true, true);
1272 
1273 		/* Update the flow tag in SKB based on FSE metadata */
1274 		dp_rx_update_flow_tag(soc, vdev, nbuf,
1275 				      rx_tlv_hdr, true);
1276 
1277 		if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
1278 				 soc->hal_soc, rx_tlv_hdr) &&
1279 				 (vdev->rx_decap_type ==
1280 				  htt_cmn_pkt_type_ethernet))) {
1281 			eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1282 			DP_STATS_INC_PKT(peer, rx.multicast, 1,
1283 					 qdf_nbuf_len(nbuf));
1284 
1285 			if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
1286 				DP_STATS_INC_PKT(peer, rx.bcast, 1,
1287 						 qdf_nbuf_len(nbuf));
1288 		}
1289 
1290 		qdf_nbuf_set_exc_frame(nbuf, 1);
1291 		dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL);
1292 	}
1293 	return QDF_STATUS_SUCCESS;
1294 
1295 drop_nbuf:
1296 	qdf_nbuf_free(nbuf);
1297 	return QDF_STATUS_E_FAILURE;
1298 }
1299 
1300 /**
1301  * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
1302  *			       frames to OS or wifi parse errors.
1303  * @soc: core DP main context
1304  * @nbuf: buffer pointer
1305  * @rx_tlv_hdr: start of rx tlv header
1306  * @peer: peer reference
1307  * @err_code: rxdma err code
1308  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
1309  * pool_id has same mapping)
1310  *
1311  * Return: None
1312  */
1313 void
1314 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1315 			uint8_t *rx_tlv_hdr, struct dp_peer *peer,
1316 			uint8_t err_code, uint8_t mac_id)
1317 {
1318 	uint32_t pkt_len, l2_hdr_offset;
1319 	uint16_t msdu_len;
1320 	struct dp_vdev *vdev;
1321 	qdf_ether_header_t *eh;
1322 	bool is_broadcast;
1323 
1324 	/*
1325 	 * Check if DMA completed -- msdu_done is the last bit
1326 	 * to be written
1327 	 */
1328 	if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
1329 
1330 		dp_err_rl("MSDU DONE failure");
1331 
1332 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1333 				     QDF_TRACE_LEVEL_INFO);
1334 		qdf_assert(0);
1335 	}
1336 
1337 	l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
1338 							   rx_tlv_hdr);
1339 	msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
1340 	pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
1341 
1342 	if (dp_rx_check_pkt_len(soc, pkt_len)) {
1343 		/* Drop & free packet */
1344 		qdf_nbuf_free(nbuf);
1345 		return;
1346 	}
1347 	/* Set length in nbuf */
1348 	qdf_nbuf_set_pktlen(nbuf, pkt_len);
1349 
1350 	qdf_nbuf_set_next(nbuf, NULL);
1351 
1352 	qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
1353 	qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
1354 
1355 	if (!peer) {
1356 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL");
1357 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1358 				qdf_nbuf_len(nbuf));
1359 		/* Trigger invalid peer handler wrapper */
1360 		dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
1361 		return;
1362 	}
1363 
1364 	vdev = peer->vdev;
1365 	if (!vdev) {
1366 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1367 				FL("INVALID vdev %pK OR osif_rx"), vdev);
1368 		/* Drop & free packet */
1369 		qdf_nbuf_free(nbuf);
1370 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1371 		return;
1372 	}
1373 
1374 	/*
1375 	 * Advance the packet start pointer by total size of
1376 	 * pre-header TLV's
1377 	 */
1378 	dp_rx_skip_tlvs(nbuf, l2_hdr_offset);
1379 
1380 	if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
1381 		uint8_t *pkt_type;
1382 
1383 		pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
1384 		if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
1385 			if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
1386 							htons(QDF_LLC_STP)) {
1387 				DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
1388 				goto process_mesh;
1389 			} else {
1390 				goto process_rx;
1391 			}
1392 		}
1393 	}
1394 	if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
1395 		goto process_mesh;
1396 
1397 	/*
1398 	 * WAPI cert AP sends rekey frames as unencrypted.
1399 	 * Thus RXDMA will report unencrypted frame error.
1400 	 * To pass WAPI cert case, SW needs to pass unencrypted
1401 	 * rekey frame to stack.
1402 	 */
1403 	if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1404 		goto process_rx;
1405 	}
1406 	/*
1407 	 * In dynamic WEP case rekey frames are not encrypted
1408 	 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
1409 	 * key install is already done
1410 	 */
1411 	if ((vdev->sec_type == cdp_sec_type_wep104) &&
1412 	    (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
1413 		goto process_rx;
1414 
1415 process_mesh:
1416 
1417 	if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
1418 		qdf_nbuf_free(nbuf);
1419 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1420 		return;
1421 	}
1422 
1423 	if (vdev->mesh_vdev) {
1424 		if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
1425 				      == QDF_STATUS_SUCCESS) {
1426 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED,
1427 				  FL("mesh pkt filtered"));
1428 			DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
1429 
1430 			qdf_nbuf_free(nbuf);
1431 			return;
1432 		}
1433 		dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
1434 	}
1435 process_rx:
1436 	if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1437 							rx_tlv_hdr) &&
1438 		(vdev->rx_decap_type ==
1439 				htt_cmn_pkt_type_ethernet))) {
1440 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1441 		is_broadcast = (QDF_IS_ADDR_BROADCAST
1442 				(eh->ether_dhost)) ? 1 : 0 ;
1443 		DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf));
1444 		if (is_broadcast) {
1445 			DP_STATS_INC_PKT(peer, rx.bcast, 1,
1446 					qdf_nbuf_len(nbuf));
1447 		}
1448 	}
1449 
1450 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1451 		dp_rx_deliver_raw(vdev, nbuf, peer);
1452 	} else {
1453 		/* Update the protocol tag in SKB based on CCE metadata */
1454 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1455 					  EXCEPTION_DEST_RING_ID, true, true);
1456 		/* Update the flow tag in SKB based on FSE metadata */
1457 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
1458 		DP_STATS_INC(peer, rx.to_stack.num, 1);
1459 		qdf_nbuf_set_exc_frame(nbuf, 1);
1460 		dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL);
1461 	}
1462 
1463 	return;
1464 }
1465 
1466 /**
1467  * dp_rx_process_mic_error(): Function to pass mic error indication to umac
1468  * @soc: core DP main context
1469  * @nbuf: buffer pointer
1470  * @rx_tlv_hdr: start of rx tlv header
1471  * @peer: peer handle
1472  *
1473  * return: void
1474  */
1475 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
1476 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer)
1477 {
1478 	struct dp_vdev *vdev = NULL;
1479 	struct dp_pdev *pdev = NULL;
1480 	struct ol_if_ops *tops = NULL;
1481 	uint16_t rx_seq, fragno;
1482 	uint8_t is_raw;
1483 	unsigned int tid;
1484 	QDF_STATUS status;
1485 	struct cdp_rx_mic_err_info mic_failure_info;
1486 
1487 	if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1488 					    rx_tlv_hdr))
1489 		return;
1490 
1491 	if (!peer) {
1492 		dp_info_rl("peer not found");
1493 		goto fail;
1494 	}
1495 
1496 	vdev = peer->vdev;
1497 	if (!vdev) {
1498 		dp_info_rl("VDEV not found");
1499 		goto fail;
1500 	}
1501 
1502 	pdev = vdev->pdev;
1503 	if (!pdev) {
1504 		dp_info_rl("PDEV not found");
1505 		goto fail;
1506 	}
1507 
1508 	is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
1509 	if (is_raw) {
1510 		fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf));
1511 		/* Can get only last fragment */
1512 		if (fragno) {
1513 			tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
1514 							qdf_nbuf_data(nbuf));
1515 			rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
1516 							qdf_nbuf_data(nbuf));
1517 
1518 			status = dp_rx_defrag_add_last_frag(soc, peer,
1519 							    tid, rx_seq, nbuf);
1520 			dp_info_rl("Frag pkt seq# %d frag# %d consumed "
1521 				   "status %d !", rx_seq, fragno, status);
1522 			return;
1523 		}
1524 	}
1525 
1526 	if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
1527 				  &mic_failure_info.da_mac_addr.bytes[0])) {
1528 		dp_err_rl("Failed to get da_mac_addr");
1529 		goto fail;
1530 	}
1531 
1532 	if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
1533 				  &mic_failure_info.ta_mac_addr.bytes[0])) {
1534 		dp_err_rl("Failed to get ta_mac_addr");
1535 		goto fail;
1536 	}
1537 
1538 	mic_failure_info.key_id = 0;
1539 	mic_failure_info.multicast =
1540 		IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
1541 	qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
1542 	mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
1543 	mic_failure_info.data = NULL;
1544 	mic_failure_info.vdev_id = vdev->vdev_id;
1545 
1546 	tops = pdev->soc->cdp_soc.ol_ops;
1547 	if (tops->rx_mic_error)
1548 		tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
1549 				   &mic_failure_info);
1550 
1551 fail:
1552 	qdf_nbuf_free(nbuf);
1553 	return;
1554 }
1555 
1556 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
1557 /**
1558  * dp_rx_link_cookie_check() - Validate link desc cookie
1559  * @ring_desc: ring descriptor
1560  *
1561  * Return: qdf status
1562  */
1563 static inline QDF_STATUS
1564 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
1565 {
1566 	if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc)))
1567 		return QDF_STATUS_E_FAILURE;
1568 
1569 	return QDF_STATUS_SUCCESS;
1570 }
1571 
1572 /**
1573  * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie
1574  * @ring_desc: ring descriptor
1575  *
1576  * Return: None
1577  */
1578 static inline void
1579 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
1580 {
1581 	HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc);
1582 }
1583 #else
1584 static inline QDF_STATUS
1585 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
1586 {
1587 	return QDF_STATUS_SUCCESS;
1588 }
1589 
1590 static inline void
1591 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
1592 {
1593 }
1594 #endif
1595 
1596 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1597 /**
1598  * dp_rx_err_ring_record_entry() - Record rx err ring history
1599  * @soc: Datapath soc structure
1600  * @paddr: paddr of the buffer in RX err ring
1601  * @sw_cookie: SW cookie of the buffer in RX err ring
1602  * @rbm: Return buffer manager of the buffer in RX err ring
1603  *
1604  * Returns: None
1605  */
1606 static inline void
1607 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1608 			    uint32_t sw_cookie, uint8_t rbm)
1609 {
1610 	struct dp_buf_info_record *record;
1611 	uint32_t idx;
1612 
1613 	if (qdf_unlikely(!soc->rx_err_ring_history))
1614 		return;
1615 
1616 	idx = dp_history_get_next_index(&soc->rx_err_ring_history->index,
1617 					DP_RX_ERR_HIST_MAX);
1618 
1619 	/* No NULL check needed for record since its an array */
1620 	record = &soc->rx_err_ring_history->entry[idx];
1621 
1622 	record->timestamp = qdf_get_log_timestamp();
1623 	record->hbi.paddr = paddr;
1624 	record->hbi.sw_cookie = sw_cookie;
1625 	record->hbi.rbm = rbm;
1626 }
1627 #else
1628 static inline void
1629 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1630 			    uint32_t sw_cookie, uint8_t rbm)
1631 {
1632 }
1633 #endif
1634 
1635 uint32_t
1636 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1637 		  hal_ring_handle_t hal_ring_hdl, uint32_t quota)
1638 {
1639 	hal_ring_desc_t ring_desc;
1640 	hal_soc_handle_t hal_soc;
1641 	uint32_t count = 0;
1642 	uint32_t rx_bufs_used = 0;
1643 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
1644 	uint8_t mac_id = 0;
1645 	uint8_t buf_type;
1646 	uint8_t error, rbm;
1647 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
1648 	struct hal_buf_info hbi;
1649 	struct dp_pdev *dp_pdev;
1650 	struct dp_srng *dp_rxdma_srng;
1651 	struct rx_desc_pool *rx_desc_pool;
1652 	uint32_t cookie = 0;
1653 	void *link_desc_va;
1654 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
1655 	uint16_t num_msdus;
1656 	struct dp_rx_desc *rx_desc = NULL;
1657 	QDF_STATUS status;
1658 	bool ret;
1659 
1660 	/* Debug -- Remove later */
1661 	qdf_assert(soc && hal_ring_hdl);
1662 
1663 	hal_soc = soc->hal_soc;
1664 
1665 	/* Debug -- Remove later */
1666 	qdf_assert(hal_soc);
1667 
1668 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
1669 
1670 		/* TODO */
1671 		/*
1672 		 * Need API to convert from hal_ring pointer to
1673 		 * Ring Type / Ring Id combo
1674 		 */
1675 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
1676 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1677 			FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
1678 		goto done;
1679 	}
1680 
1681 	while (qdf_likely(quota-- && (ring_desc =
1682 				hal_srng_dst_peek(hal_soc,
1683 						  hal_ring_hdl)))) {
1684 
1685 		DP_STATS_INC(soc, rx.err_ring_pkts, 1);
1686 
1687 		error = HAL_RX_ERROR_STATUS_GET(ring_desc);
1688 
1689 		buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc);
1690 
1691 		/* Get the MPDU DESC info */
1692 		hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
1693 
1694 		if (mpdu_desc_info.msdu_count == 0)
1695 			goto next_entry;
1696 
1697 		/*
1698 		 * For REO error ring, expect only MSDU LINK DESC
1699 		 */
1700 		qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE);
1701 
1702 		cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
1703 		/*
1704 		 * check for the magic number in the sw cookie
1705 		 */
1706 		qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) &
1707 							LINK_DESC_ID_START);
1708 
1709 		status = dp_rx_link_cookie_check(ring_desc);
1710 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
1711 			DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
1712 			break;
1713 		}
1714 
1715 		/*
1716 		 * Check if the buffer is to be processed on this processor
1717 		 */
1718 		rbm = hal_rx_ret_buf_manager_get(ring_desc);
1719 
1720 		hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
1721 		link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
1722 		hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
1723 				     &num_msdus);
1724 		dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0],
1725 					    msdu_list.sw_cookie[0],
1726 					    msdu_list.rbm[0]);
1727 		if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) &&
1728 				(msdu_list.rbm[0] !=
1729 					HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST) &&
1730 				(msdu_list.rbm[0] != DP_DEFRAG_RBM))) {
1731 			/* TODO */
1732 			/* Call appropriate handler */
1733 			if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
1734 				DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
1735 				QDF_TRACE(QDF_MODULE_ID_DP,
1736 					  QDF_TRACE_LEVEL_ERROR,
1737 					  FL("Invalid RBM %d"),
1738 					     msdu_list.rbm[0]);
1739 			}
1740 
1741 			/* Return link descriptor through WBM ring (SW2WBM)*/
1742 			dp_rx_link_desc_return(soc, ring_desc,
1743 					HAL_BM_ACTION_RELEASE_MSDU_LIST);
1744 			goto next_entry;
1745 		}
1746 
1747 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc,
1748 						      msdu_list.sw_cookie[0]);
1749 		qdf_assert_always(rx_desc);
1750 
1751 		mac_id = rx_desc->pool_id;
1752 
1753 		if (mpdu_desc_info.bar_frame) {
1754 			qdf_assert_always(mpdu_desc_info.msdu_count == 1);
1755 
1756 			dp_rx_bar_frame_handle(soc,
1757 					       ring_desc,
1758 					       rx_desc,
1759 					       &mpdu_desc_info,
1760 					       error);
1761 
1762 			rx_bufs_reaped[mac_id] += 1;
1763 			goto next_entry;
1764 		}
1765 
1766 		dp_info("Got pkt with REO ERROR: %d", error);
1767 
1768 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
1769 			/*
1770 			 * We only handle one msdu per link desc for fragmented
1771 			 * case. We drop the msdus and release the link desc
1772 			 * back if there are more than one msdu in link desc.
1773 			 */
1774 			if (qdf_unlikely(num_msdus > 1)) {
1775 				count = dp_rx_msdus_drop(soc, ring_desc,
1776 							 &mpdu_desc_info,
1777 							 &mac_id, quota);
1778 				rx_bufs_reaped[mac_id] += count;
1779 				goto next_entry;
1780 			}
1781 
1782 			/*
1783 			 * this is a unlikely scenario where the host is reaping
1784 			 * a descriptor which it already reaped just a while ago
1785 			 * but is yet to replenish it back to HW.
1786 			 * In this case host will dump the last 128 descriptors
1787 			 * including the software descriptor rx_desc and assert.
1788 			 */
1789 
1790 			if (qdf_unlikely(!rx_desc->in_use)) {
1791 				DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
1792 				dp_info_rl("Reaping rx_desc not in use!");
1793 				dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1794 							   ring_desc, rx_desc);
1795 				/* ignore duplicate RX desc and continue */
1796 				/* Pop out the descriptor */
1797 				goto next_entry;
1798 			}
1799 
1800 			ret = dp_rx_desc_paddr_sanity_check(rx_desc,
1801 							    msdu_list.paddr[0]);
1802 			if (!ret) {
1803 				DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1804 				rx_desc->in_err_state = 1;
1805 				goto next_entry;
1806 			}
1807 
1808 			count = dp_rx_frag_handle(soc,
1809 						  ring_desc, &mpdu_desc_info,
1810 						  rx_desc, &mac_id, quota);
1811 
1812 			rx_bufs_reaped[mac_id] += count;
1813 			DP_STATS_INC(soc, rx.rx_frags, 1);
1814 			goto next_entry;
1815 		}
1816 
1817 		/*
1818 		 * Expect REO errors to be handled after this point
1819 		 */
1820 		qdf_assert_always(error == HAL_REO_ERROR_DETECTED);
1821 
1822 		if (hal_rx_reo_is_pn_error(ring_desc)) {
1823 			/* TOD0 */
1824 			DP_STATS_INC(soc,
1825 				rx.err.
1826 				reo_error[HAL_REO_ERR_PN_CHECK_FAILED],
1827 				1);
1828 			/* increment @pdev level */
1829 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1830 			if (dp_pdev)
1831 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
1832 			count = dp_rx_pn_error_handle(soc,
1833 						      ring_desc,
1834 						      &mpdu_desc_info, &mac_id,
1835 						      quota);
1836 
1837 			rx_bufs_reaped[mac_id] += count;
1838 			goto next_entry;
1839 		}
1840 
1841 		if (hal_rx_reo_is_2k_jump(ring_desc)) {
1842 			/* TOD0 */
1843 			DP_STATS_INC(soc,
1844 				rx.err.
1845 				reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP],
1846 				1);
1847 			/* increment @pdev level */
1848 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1849 			if (dp_pdev)
1850 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
1851 
1852 			count = dp_rx_reo_err_entry_process(
1853 					soc,
1854 					ring_desc,
1855 					&mpdu_desc_info,
1856 					link_desc_va,
1857 					HAL_REO_ERR_REGULAR_FRAME_2K_JUMP);
1858 
1859 			rx_bufs_reaped[mac_id] += count;
1860 			goto next_entry;
1861 		}
1862 
1863 		if (hal_rx_reo_is_oor_error(ring_desc)) {
1864 			DP_STATS_INC(
1865 				soc,
1866 				rx.err.
1867 				reo_error[HAL_REO_ERR_REGULAR_FRAME_OOR],
1868 				1);
1869 			/* increment @pdev level */
1870 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1871 			if (dp_pdev)
1872 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
1873 			count = dp_rx_reo_err_entry_process(
1874 					soc,
1875 					ring_desc,
1876 					&mpdu_desc_info,
1877 					link_desc_va,
1878 					HAL_REO_ERR_REGULAR_FRAME_OOR);
1879 
1880 			rx_bufs_reaped[mac_id] += count;
1881 			goto next_entry;
1882 		}
1883 		/* Assert if unexpected error type */
1884 		qdf_assert_always(0);
1885 next_entry:
1886 		dp_rx_link_cookie_invalidate(ring_desc);
1887 		hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
1888 	}
1889 
1890 done:
1891 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
1892 
1893 	if (soc->rx.flags.defrag_timeout_check) {
1894 		uint32_t now_ms =
1895 			qdf_system_ticks_to_msecs(qdf_system_ticks());
1896 
1897 		if (now_ms >= soc->rx.defrag.next_flush_ms)
1898 			dp_rx_defrag_waitlist_flush(soc);
1899 	}
1900 
1901 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
1902 		if (rx_bufs_reaped[mac_id]) {
1903 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1904 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
1905 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
1906 
1907 			dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
1908 						rx_desc_pool,
1909 						rx_bufs_reaped[mac_id],
1910 						&dp_pdev->free_list_head,
1911 						&dp_pdev->free_list_tail);
1912 			rx_bufs_used += rx_bufs_reaped[mac_id];
1913 		}
1914 	}
1915 
1916 	return rx_bufs_used; /* Assume no scale factor for now */
1917 }
1918 
1919 #ifdef DROP_RXDMA_DECRYPT_ERR
1920 /**
1921  * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled
1922  *
1923  * Return: true if rxdma decrypt err frames are handled and false otheriwse
1924  */
1925 static inline bool dp_handle_rxdma_decrypt_err(void)
1926 {
1927 	return false;
1928 }
1929 #else
1930 static inline bool dp_handle_rxdma_decrypt_err(void)
1931 {
1932 	return true;
1933 }
1934 #endif
1935 
1936 static inline bool
1937 dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
1938 {
1939 	/*
1940 	 * Currently Null Queue and Unencrypted error handlers has support for
1941 	 * SG. Other error handler do not deal with SG buffer.
1942 	 */
1943 	if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) &&
1944 	     (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) ||
1945 	    ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) &&
1946 	     (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED)))
1947 		return true;
1948 
1949 	return false;
1950 }
1951 
1952 uint32_t
1953 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1954 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota)
1955 {
1956 	hal_ring_desc_t ring_desc;
1957 	hal_soc_handle_t hal_soc;
1958 	struct dp_rx_desc *rx_desc;
1959 	union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
1960 	union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
1961 	uint32_t rx_bufs_used = 0;
1962 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
1963 	uint8_t buf_type, rbm;
1964 	uint32_t rx_buf_cookie;
1965 	uint8_t mac_id;
1966 	struct dp_pdev *dp_pdev;
1967 	struct dp_srng *dp_rxdma_srng;
1968 	struct rx_desc_pool *rx_desc_pool;
1969 	uint8_t *rx_tlv_hdr;
1970 	qdf_nbuf_t nbuf_head = NULL;
1971 	qdf_nbuf_t nbuf_tail = NULL;
1972 	qdf_nbuf_t nbuf, next;
1973 	struct hal_wbm_err_desc_info wbm_err_info = { 0 };
1974 	uint8_t pool_id;
1975 	uint8_t tid = 0;
1976 	uint8_t msdu_continuation = 0;
1977 	bool process_sg_buf = false;
1978 
1979 	/* Debug -- Remove later */
1980 	qdf_assert(soc && hal_ring_hdl);
1981 
1982 	hal_soc = soc->hal_soc;
1983 
1984 	/* Debug -- Remove later */
1985 	qdf_assert(hal_soc);
1986 
1987 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
1988 
1989 		/* TODO */
1990 		/*
1991 		 * Need API to convert from hal_ring pointer to
1992 		 * Ring Type / Ring Id combo
1993 		 */
1994 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1995 			FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
1996 		goto done;
1997 	}
1998 
1999 	while (qdf_likely(quota)) {
2000 		ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2001 		if (qdf_unlikely(!ring_desc))
2002 			break;
2003 
2004 		/* XXX */
2005 		buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
2006 
2007 		/*
2008 		 * For WBM ring, expect only MSDU buffers
2009 		 */
2010 		qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
2011 
2012 		qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc)
2013 				== HAL_RX_WBM_ERR_SRC_RXDMA) ||
2014 				(HAL_RX_WBM_ERR_SRC_GET(ring_desc)
2015 				== HAL_RX_WBM_ERR_SRC_REO));
2016 
2017 		/*
2018 		 * Check if the buffer is to be processed on this processor
2019 		 */
2020 		rbm = hal_rx_ret_buf_manager_get(ring_desc);
2021 
2022 		if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
2023 			/* TODO */
2024 			/* Call appropriate handler */
2025 			DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
2026 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2027 				FL("Invalid RBM %d"), rbm);
2028 			continue;
2029 		}
2030 
2031 		rx_buf_cookie =	HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
2032 
2033 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
2034 		qdf_assert_always(rx_desc);
2035 
2036 		if (!dp_rx_desc_check_magic(rx_desc)) {
2037 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2038 					FL("Invalid rx_desc cookie=%d"),
2039 					rx_buf_cookie);
2040 			continue;
2041 		}
2042 
2043 		/*
2044 		 * this is a unlikely scenario where the host is reaping
2045 		 * a descriptor which it already reaped just a while ago
2046 		 * but is yet to replenish it back to HW.
2047 		 * In this case host will dump the last 128 descriptors
2048 		 * including the software descriptor rx_desc and assert.
2049 		 */
2050 		if (qdf_unlikely(!rx_desc->in_use)) {
2051 			DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
2052 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2053 						   ring_desc, rx_desc);
2054 		}
2055 
2056 		hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
2057 		nbuf = rx_desc->nbuf;
2058 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2059 		dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
2060 						  rx_desc_pool->buf_size,
2061 						  false);
2062 		qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
2063 					     QDF_DMA_FROM_DEVICE,
2064 					     rx_desc_pool->buf_size);
2065 		rx_desc->unmapped = 1;
2066 
2067 		if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support &&
2068 				 dp_rx_is_sg_formation_required(&wbm_err_info))) {
2069 			/* SG is detected from continuation bit */
2070 			msdu_continuation = hal_rx_wbm_err_msdu_continuation_get(hal_soc,
2071 					ring_desc);
2072 			if (msdu_continuation &&
2073 			    !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) {
2074 				/* Update length from first buffer in SG */
2075 				soc->wbm_sg_param.wbm_sg_desc_msdu_len =
2076 					hal_rx_msdu_start_msdu_len_get(
2077 						qdf_nbuf_data(nbuf));
2078 				soc->wbm_sg_param.wbm_is_first_msdu_in_sg = true;
2079 			}
2080 
2081 			if (msdu_continuation) {
2082 				/* MSDU continued packets */
2083 				qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
2084 				QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2085 					soc->wbm_sg_param.wbm_sg_desc_msdu_len;
2086 			} else {
2087 				/* This is the terminal packet in SG */
2088 				qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
2089 				qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
2090 				QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2091 					soc->wbm_sg_param.wbm_sg_desc_msdu_len;
2092 				process_sg_buf = true;
2093 			}
2094 		}
2095 
2096 		/*
2097 		 * save the wbm desc info in nbuf TLV. We will need this
2098 		 * info when we do the actual nbuf processing
2099 		 */
2100 		wbm_err_info.pool_id = rx_desc->pool_id;
2101 		hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf),
2102 								&wbm_err_info);
2103 
2104 		rx_bufs_reaped[rx_desc->pool_id]++;
2105 
2106 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) {
2107 			DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head,
2108 					  soc->wbm_sg_param.wbm_sg_nbuf_tail,
2109 					  nbuf);
2110 			if (process_sg_buf) {
2111 				if (!dp_rx_buffer_pool_refill(
2112 					soc,
2113 					soc->wbm_sg_param.wbm_sg_nbuf_head,
2114 					rx_desc->pool_id))
2115 					DP_RX_MERGE_TWO_LIST(
2116 						nbuf_head, nbuf_tail,
2117 						soc->wbm_sg_param.wbm_sg_nbuf_head,
2118 						soc->wbm_sg_param.wbm_sg_nbuf_tail);
2119 				dp_rx_wbm_sg_list_reset(soc);
2120 				process_sg_buf = false;
2121 			}
2122 		} else if (!dp_rx_buffer_pool_refill(soc, nbuf,
2123 						     rx_desc->pool_id)) {
2124 			DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
2125 		}
2126 
2127 		dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
2128 						&tail[rx_desc->pool_id],
2129 						rx_desc);
2130 
2131 		/*
2132 		 * if continuation bit is set then we have MSDU spread
2133 		 * across multiple buffers, let us not decrement quota
2134 		 * till we reap all buffers of that MSDU.
2135 		 */
2136 		if (qdf_likely(!msdu_continuation))
2137 			quota -= 1;
2138 	}
2139 done:
2140 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
2141 
2142 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2143 		if (rx_bufs_reaped[mac_id]) {
2144 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2145 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
2146 
2147 			dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2148 					rx_desc_pool, rx_bufs_reaped[mac_id],
2149 					&head[mac_id], &tail[mac_id]);
2150 			rx_bufs_used += rx_bufs_reaped[mac_id];
2151 		}
2152 	}
2153 
2154 	nbuf = nbuf_head;
2155 	while (nbuf) {
2156 		struct dp_peer *peer;
2157 		uint16_t peer_id;
2158 		uint8_t err_code;
2159 		uint8_t *tlv_hdr;
2160 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
2161 
2162 		/*
2163 		 * retrieve the wbm desc info from nbuf TLV, so we can
2164 		 * handle error cases appropriately
2165 		 */
2166 		hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info);
2167 
2168 		peer_id = hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
2169 							   rx_tlv_hdr);
2170 		peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
2171 
2172 		if (!peer)
2173 			dp_info_rl("peer is null peer_id%u err_src%u err_rsn%u",
2174 				   peer_id, wbm_err_info.wbm_err_src,
2175 				   wbm_err_info.reo_psh_rsn);
2176 
2177 		/* Set queue_mapping in nbuf to 0 */
2178 		dp_set_rx_queue(nbuf, 0);
2179 
2180 		next = nbuf->next;
2181 
2182 		/*
2183 		 * Form the SG for msdu continued buffers
2184 		 * QCN9000 has this support
2185 		 */
2186 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
2187 			nbuf = dp_rx_sg_create(soc, nbuf);
2188 			next = nbuf->next;
2189 			/*
2190 			 * SG error handling is not done correctly,
2191 			 * drop SG frames for now.
2192 			 */
2193 			qdf_nbuf_free(nbuf);
2194 			dp_info_rl("scattered msdu dropped");
2195 			nbuf = next;
2196 			if (peer)
2197 				dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
2198 			continue;
2199 		}
2200 
2201 		if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
2202 			if (wbm_err_info.reo_psh_rsn
2203 					== HAL_RX_WBM_REO_PSH_RSN_ERROR) {
2204 
2205 				DP_STATS_INC(soc,
2206 					rx.err.reo_error
2207 					[wbm_err_info.reo_err_code], 1);
2208 				/* increment @pdev level */
2209 				pool_id = wbm_err_info.pool_id;
2210 				dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
2211 				if (dp_pdev)
2212 					DP_STATS_INC(dp_pdev, err.reo_error,
2213 						     1);
2214 
2215 				switch (wbm_err_info.reo_err_code) {
2216 				/*
2217 				 * Handling for packets which have NULL REO
2218 				 * queue descriptor
2219 				 */
2220 				case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2221 					pool_id = wbm_err_info.pool_id;
2222 					dp_rx_null_q_desc_handle(soc, nbuf,
2223 								 rx_tlv_hdr,
2224 								 pool_id, peer);
2225 					break;
2226 				/* TODO */
2227 				/* Add per error code accounting */
2228 				case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2229 					pool_id = wbm_err_info.pool_id;
2230 
2231 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2232 									   rx_tlv_hdr)) {
2233 						peer_id =
2234 						hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
2235 										 rx_tlv_hdr);
2236 						tid =
2237 						hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
2238 					}
2239 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2240 					hal_rx_msdu_start_msdu_len_get(
2241 								rx_tlv_hdr);
2242 					nbuf->next = NULL;
2243 					dp_2k_jump_handle(soc, nbuf,
2244 							  rx_tlv_hdr,
2245 							  peer_id, tid);
2246 					break;
2247 				case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2248 				case HAL_REO_ERR_BAR_FRAME_OOR:
2249 					if (peer)
2250 						dp_rx_err_handle_bar(soc,
2251 								     peer,
2252 								     nbuf);
2253 					qdf_nbuf_free(nbuf);
2254 					break;
2255 
2256 				default:
2257 					dp_info_rl("Got pkt with REO ERROR: %d",
2258 						   wbm_err_info.reo_err_code);
2259 					qdf_nbuf_free(nbuf);
2260 				}
2261 			} else if (wbm_err_info.reo_psh_rsn
2262 				   == HAL_RX_WBM_REO_PSH_RSN_ROUTE) {
2263 				DP_STATS_INC(soc, rx.reo2rel_route_drop, 1);
2264 				qdf_nbuf_free(nbuf);
2265 			}
2266 		} else if (wbm_err_info.wbm_err_src ==
2267 					HAL_RX_WBM_ERR_SRC_RXDMA) {
2268 			if (wbm_err_info.rxdma_psh_rsn
2269 					== HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
2270 				DP_STATS_INC(soc,
2271 					rx.err.rxdma_error
2272 					[wbm_err_info.rxdma_err_code], 1);
2273 				/* increment @pdev level */
2274 				pool_id = wbm_err_info.pool_id;
2275 				dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
2276 				if (dp_pdev)
2277 					DP_STATS_INC(dp_pdev,
2278 						     err.rxdma_error, 1);
2279 
2280 				switch (wbm_err_info.rxdma_err_code) {
2281 				case HAL_RXDMA_ERR_UNENCRYPTED:
2282 
2283 				case HAL_RXDMA_ERR_WIFI_PARSE:
2284 					pool_id = wbm_err_info.pool_id;
2285 					dp_rx_process_rxdma_err(soc, nbuf,
2286 								rx_tlv_hdr,
2287 								peer,
2288 								wbm_err_info.
2289 								rxdma_err_code,
2290 								pool_id);
2291 					break;
2292 
2293 				case HAL_RXDMA_ERR_TKIP_MIC:
2294 					dp_rx_process_mic_error(soc, nbuf,
2295 								rx_tlv_hdr,
2296 								peer);
2297 					if (peer)
2298 						DP_STATS_INC(peer, rx.err.mic_err, 1);
2299 					break;
2300 
2301 				case HAL_RXDMA_ERR_DECRYPT:
2302 
2303 					if (peer) {
2304 						DP_STATS_INC(peer, rx.err.
2305 							     decrypt_err, 1);
2306 						qdf_nbuf_free(nbuf);
2307 						break;
2308 					}
2309 
2310 					if (!dp_handle_rxdma_decrypt_err()) {
2311 						qdf_nbuf_free(nbuf);
2312 						break;
2313 					}
2314 
2315 					pool_id = wbm_err_info.pool_id;
2316 					err_code = wbm_err_info.rxdma_err_code;
2317 					tlv_hdr = rx_tlv_hdr;
2318 					dp_rx_process_rxdma_err(soc, nbuf,
2319 								tlv_hdr, NULL,
2320 								err_code,
2321 								pool_id);
2322 					break;
2323 
2324 				default:
2325 					qdf_nbuf_free(nbuf);
2326 					dp_err_rl("RXDMA error %d",
2327 						  wbm_err_info.rxdma_err_code);
2328 				}
2329 			} else if (wbm_err_info.rxdma_psh_rsn
2330 				   == HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) {
2331 				DP_STATS_INC(soc, rx.rxdma2rel_route_drop, 1);
2332 				qdf_nbuf_free(nbuf);
2333 			}
2334 		} else {
2335 			/* Should not come here */
2336 			qdf_assert(0);
2337 		}
2338 
2339 		if (peer)
2340 			dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
2341 
2342 		nbuf = next;
2343 	}
2344 	return rx_bufs_used; /* Assume no scale factor for now */
2345 }
2346 
2347 /**
2348  * dup_desc_dbg() - dump and assert if duplicate rx desc found
2349  *
2350  * @soc: core DP main context
2351  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
2352  * @rx_desc: void pointer to rx descriptor
2353  *
2354  * Return: void
2355  */
2356 static void dup_desc_dbg(struct dp_soc *soc,
2357 			 hal_rxdma_desc_t rxdma_dst_ring_desc,
2358 			 void *rx_desc)
2359 {
2360 	DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
2361 	dp_rx_dump_info_and_assert(
2362 			soc,
2363 			soc->rx_rel_ring.hal_srng,
2364 			hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
2365 			rx_desc);
2366 }
2367 
2368 /**
2369  * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
2370  *
2371  * @soc: core DP main context
2372  * @mac_id: mac id which is one of 3 mac_ids
2373  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
2374  * @head: head of descs list to be freed
2375  * @tail: tail of decs list to be freed
2376 
2377  * Return: number of msdu in MPDU to be popped
2378  */
2379 static inline uint32_t
2380 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
2381 	hal_rxdma_desc_t rxdma_dst_ring_desc,
2382 	union dp_rx_desc_list_elem_t **head,
2383 	union dp_rx_desc_list_elem_t **tail)
2384 {
2385 	void *rx_msdu_link_desc;
2386 	qdf_nbuf_t msdu;
2387 	qdf_nbuf_t last;
2388 	struct hal_rx_msdu_list msdu_list;
2389 	uint16_t num_msdus;
2390 	struct hal_buf_info buf_info;
2391 	uint32_t rx_bufs_used = 0;
2392 	uint32_t msdu_cnt;
2393 	uint32_t i;
2394 	uint8_t push_reason;
2395 	uint8_t rxdma_error_code = 0;
2396 	uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
2397 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2398 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
2399 	hal_rxdma_desc_t ring_desc;
2400 	struct rx_desc_pool *rx_desc_pool;
2401 
2402 	if (!pdev) {
2403 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2404 			  "pdev is null for mac_id = %d", mac_id);
2405 		return rx_bufs_used;
2406 	}
2407 
2408 	msdu = 0;
2409 
2410 	last = NULL;
2411 
2412 	hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
2413 					&msdu_cnt);
2414 
2415 	push_reason =
2416 		hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
2417 	if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
2418 		rxdma_error_code =
2419 			hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
2420 	}
2421 
2422 	do {
2423 		rx_msdu_link_desc =
2424 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
2425 
2426 		qdf_assert_always(rx_msdu_link_desc);
2427 
2428 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
2429 				     &msdu_list, &num_msdus);
2430 
2431 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
2432 			/* if the msdus belongs to NSS offloaded radio &&
2433 			 * the rbm is not SW1_BM then return the msdu_link
2434 			 * descriptor without freeing the msdus (nbufs). let
2435 			 * these buffers be given to NSS completion ring for
2436 			 * NSS to free them.
2437 			 * else iterate through the msdu link desc list and
2438 			 * free each msdu in the list.
2439 			 */
2440 			if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM &&
2441 				wlan_cfg_get_dp_pdev_nss_enabled(
2442 							  pdev->wlan_cfg_ctx))
2443 				bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
2444 			else {
2445 				for (i = 0; i < num_msdus; i++) {
2446 					struct dp_rx_desc *rx_desc =
2447 						dp_rx_cookie_2_va_rxdma_buf(soc,
2448 							msdu_list.sw_cookie[i]);
2449 					qdf_assert_always(rx_desc);
2450 					msdu = rx_desc->nbuf;
2451 					/*
2452 					 * this is a unlikely scenario
2453 					 * where the host is reaping
2454 					 * a descriptor which
2455 					 * it already reaped just a while ago
2456 					 * but is yet to replenish
2457 					 * it back to HW.
2458 					 * In this case host will dump
2459 					 * the last 128 descriptors
2460 					 * including the software descriptor
2461 					 * rx_desc and assert.
2462 					 */
2463 					ring_desc = rxdma_dst_ring_desc;
2464 					if (qdf_unlikely(!rx_desc->in_use)) {
2465 						dup_desc_dbg(soc,
2466 							     ring_desc,
2467 							     rx_desc);
2468 						continue;
2469 					}
2470 
2471 					rx_desc_pool = &soc->
2472 						rx_desc_buf[rx_desc->pool_id];
2473 					dp_ipa_handle_rx_buf_smmu_mapping(
2474 							soc, msdu,
2475 							rx_desc_pool->buf_size,
2476 							false);
2477 					qdf_nbuf_unmap_nbytes_single(
2478 						soc->osdev, msdu,
2479 						QDF_DMA_FROM_DEVICE,
2480 						rx_desc_pool->buf_size);
2481 					rx_desc->unmapped = 1;
2482 
2483 					QDF_TRACE(QDF_MODULE_ID_DP,
2484 						QDF_TRACE_LEVEL_DEBUG,
2485 						"[%s][%d] msdu_nbuf=%pK ",
2486 						__func__, __LINE__, msdu);
2487 
2488 					dp_rx_buffer_pool_nbuf_free(soc, msdu,
2489 							rx_desc->pool_id);
2490 					rx_bufs_used++;
2491 					dp_rx_add_to_free_desc_list(head,
2492 						tail, rx_desc);
2493 				}
2494 			}
2495 		} else {
2496 			rxdma_error_code = HAL_RXDMA_ERR_WAR;
2497 		}
2498 
2499 		/*
2500 		 * Store the current link buffer into to the local structure
2501 		 * to be used for release purpose.
2502 		 */
2503 		hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr,
2504 					     buf_info.sw_cookie, buf_info.rbm);
2505 
2506 		hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info);
2507 		dp_rx_link_desc_return_by_addr(soc,
2508 					       (hal_buff_addrinfo_t)
2509 						rx_link_buf_info,
2510 						bm_action);
2511 	} while (buf_info.paddr);
2512 
2513 	DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
2514 	if (pdev)
2515 		DP_STATS_INC(pdev, err.rxdma_error, 1);
2516 
2517 	if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
2518 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2519 			"Packet received with Decrypt error");
2520 	}
2521 
2522 	return rx_bufs_used;
2523 }
2524 
2525 uint32_t
2526 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2527 		     uint32_t mac_id, uint32_t quota)
2528 {
2529 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2530 	hal_rxdma_desc_t rxdma_dst_ring_desc;
2531 	hal_soc_handle_t hal_soc;
2532 	void *err_dst_srng;
2533 	union dp_rx_desc_list_elem_t *head = NULL;
2534 	union dp_rx_desc_list_elem_t *tail = NULL;
2535 	struct dp_srng *dp_rxdma_srng;
2536 	struct rx_desc_pool *rx_desc_pool;
2537 	uint32_t work_done = 0;
2538 	uint32_t rx_bufs_used = 0;
2539 
2540 	if (!pdev)
2541 		return 0;
2542 
2543 	err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng;
2544 
2545 	if (!err_dst_srng) {
2546 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2547 			"%s %d : HAL Monitor Destination Ring Init \
2548 			Failed -- %pK",
2549 			__func__, __LINE__, err_dst_srng);
2550 		return 0;
2551 	}
2552 
2553 	hal_soc = soc->hal_soc;
2554 
2555 	qdf_assert(hal_soc);
2556 
2557 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
2558 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2559 			"%s %d : HAL Monitor Destination Ring Init \
2560 			Failed -- %pK",
2561 			__func__, __LINE__, err_dst_srng);
2562 		return 0;
2563 	}
2564 
2565 	while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
2566 		hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
2567 
2568 			rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
2569 						rxdma_dst_ring_desc,
2570 						&head, &tail);
2571 	}
2572 
2573 	dp_srng_access_end(int_ctx, soc, err_dst_srng);
2574 
2575 	if (rx_bufs_used) {
2576 		if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2577 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2578 		else
2579 			dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id];
2580 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
2581 
2582 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2583 			rx_desc_pool, rx_bufs_used, &head, &tail);
2584 
2585 		work_done += rx_bufs_used;
2586 	}
2587 
2588 	return work_done;
2589 }
2590 
2591 static inline uint32_t
2592 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
2593 			hal_rxdma_desc_t rxdma_dst_ring_desc,
2594 			union dp_rx_desc_list_elem_t **head,
2595 			union dp_rx_desc_list_elem_t **tail)
2596 {
2597 	void *rx_msdu_link_desc;
2598 	qdf_nbuf_t msdu;
2599 	qdf_nbuf_t last;
2600 	struct hal_rx_msdu_list msdu_list;
2601 	uint16_t num_msdus;
2602 	struct hal_buf_info buf_info;
2603 	uint32_t rx_bufs_used = 0, msdu_cnt, i;
2604 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
2605 
2606 	msdu = 0;
2607 
2608 	last = NULL;
2609 
2610 	hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
2611 				     &msdu_cnt);
2612 
2613 	do {
2614 		rx_msdu_link_desc =
2615 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
2616 
2617 		if (!rx_msdu_link_desc) {
2618 			DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
2619 			break;
2620 		}
2621 
2622 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
2623 				     &msdu_list, &num_msdus);
2624 
2625 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
2626 			for (i = 0; i < num_msdus; i++) {
2627 				struct dp_rx_desc *rx_desc =
2628 					dp_rx_cookie_2_va_rxdma_buf(
2629 							soc,
2630 							msdu_list.sw_cookie[i]);
2631 				qdf_assert_always(rx_desc);
2632 				msdu = rx_desc->nbuf;
2633 
2634 				qdf_nbuf_unmap_single(soc->osdev, msdu,
2635 						      QDF_DMA_FROM_DEVICE);
2636 
2637 				dp_rx_buffer_pool_nbuf_free(soc, msdu,
2638 							    rx_desc->pool_id);
2639 				rx_bufs_used++;
2640 				dp_rx_add_to_free_desc_list(head,
2641 							    tail, rx_desc);
2642 			}
2643 		}
2644 
2645 		/*
2646 		 * Store the current link buffer into to the local structure
2647 		 * to be used for release purpose.
2648 		 */
2649 		hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr,
2650 					     buf_info.sw_cookie, buf_info.rbm);
2651 
2652 		hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info);
2653 		dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
2654 					rx_link_buf_info,
2655 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
2656 	} while (buf_info.paddr);
2657 
2658 	return rx_bufs_used;
2659 }
2660 
2661 /*
2662  *
2663  * dp_handle_wbm_internal_error() - handles wbm_internal_error case
2664  *
2665  * @soc: core DP main context
2666  * @hal_desc: hal descriptor
2667  * @buf_type: indicates if the buffer is of type link disc or msdu
2668  * Return: None
2669  *
2670  * wbm_internal_error is seen in following scenarios :
2671  *
2672  * 1.  Null pointers detected in WBM_RELEASE_RING descriptors
2673  * 2.  Null pointers detected during delinking process
2674  *
2675  * Some null pointer cases:
2676  *
2677  * a. MSDU buffer pointer is NULL
2678  * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag
2679  * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL
2680  */
2681 void
2682 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
2683 			     uint32_t buf_type)
2684 {
2685 	struct hal_buf_info buf_info = {0};
2686 	struct dp_rx_desc *rx_desc = NULL;
2687 	struct rx_desc_pool *rx_desc_pool;
2688 	uint32_t rx_buf_cookie;
2689 	uint32_t rx_bufs_reaped = 0;
2690 	union dp_rx_desc_list_elem_t *head = NULL;
2691 	union dp_rx_desc_list_elem_t *tail = NULL;
2692 	uint8_t pool_id;
2693 
2694 	hal_rx_reo_buf_paddr_get(hal_desc, &buf_info);
2695 
2696 	if (!buf_info.paddr) {
2697 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
2698 		return;
2699 	}
2700 
2701 	rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(hal_desc);
2702 	pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(rx_buf_cookie);
2703 
2704 	if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
2705 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
2706 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
2707 
2708 		if (rx_desc && rx_desc->nbuf) {
2709 			rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2710 			dp_ipa_handle_rx_buf_smmu_mapping(
2711 						soc, rx_desc->nbuf,
2712 						rx_desc_pool->buf_size,
2713 						false);
2714 			qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
2715 						     QDF_DMA_FROM_DEVICE,
2716 						     rx_desc_pool->buf_size);
2717 			rx_desc->unmapped = 1;
2718 
2719 			dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
2720 						    rx_desc->pool_id);
2721 			dp_rx_add_to_free_desc_list(&head,
2722 						    &tail,
2723 						    rx_desc);
2724 
2725 			rx_bufs_reaped++;
2726 		}
2727 	} else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
2728 		rx_bufs_reaped = dp_wbm_int_err_mpdu_pop(soc, pool_id,
2729 							 hal_desc,
2730 							 &head, &tail);
2731 	}
2732 
2733 	if (rx_bufs_reaped) {
2734 		struct rx_desc_pool *rx_desc_pool;
2735 		struct dp_srng *dp_rxdma_srng;
2736 
2737 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
2738 		dp_rxdma_srng = &soc->rx_refill_buf_ring[pool_id];
2739 		rx_desc_pool = &soc->rx_desc_buf[pool_id];
2740 
2741 		dp_rx_buffers_replenish(soc, pool_id, dp_rxdma_srng,
2742 					rx_desc_pool,
2743 					rx_bufs_reaped,
2744 					&head, &tail);
2745 	}
2746 }
2747