xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_err.c (revision 45a38684b07295822dc8eba39e293408f203eec8)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_rx.h"
22 #include "dp_peer.h"
23 #include "dp_internal.h"
24 #include "hal_api.h"
25 #include "qdf_trace.h"
26 #include "qdf_nbuf.h"
27 #include "dp_rx_defrag.h"
28 #include "dp_ipa.h"
29 #ifdef FEATURE_WDS
30 #include "dp_txrx_wds.h"
31 #endif
32 #include <enet.h>	/* LLC_SNAP_HDR_LEN */
33 #include "qdf_net_types.h"
34 #include "dp_rx_buffer_pool.h"
35 
36 /* Max buffer in invalid peer SG list*/
37 #define DP_MAX_INVALID_BUFFERS 10
38 
39 /**
40  * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
41  *			      back on same vap or a different vap.
42  *
43  * @soc: core DP main context
44  * @peer: dp peer handler
45  * @rx_tlv_hdr: start of the rx TLV header
46  * @nbuf: pkt buffer
47  *
48  * Return: bool (true if it is a looped back pkt else false)
49  *
50  */
51 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
52 					struct dp_peer *peer,
53 					uint8_t *rx_tlv_hdr,
54 					qdf_nbuf_t nbuf)
55 {
56 	struct dp_vdev *vdev = peer->vdev;
57 	struct dp_ast_entry *ase = NULL;
58 	uint16_t sa_idx = 0;
59 	uint8_t *data;
60 
61 	/*
62 	 * Multicast Echo Check is required only if vdev is STA and
63 	 * received pkt is a multicast/broadcast pkt. otherwise
64 	 * skip the MEC check.
65 	 */
66 	if (vdev->opmode != wlan_op_mode_sta)
67 		return false;
68 
69 	if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
70 		return false;
71 
72 	data = qdf_nbuf_data(nbuf);
73 	/*
74 	 * if the received pkts src mac addr matches with vdev
75 	 * mac address then drop the pkt as it is looped back
76 	 */
77 	if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
78 			vdev->mac_addr.raw,
79 			QDF_MAC_ADDR_SIZE)))
80 		return true;
81 
82 	/*
83 	 * In case of qwrap isolation mode, donot drop loopback packets.
84 	 * In isolation mode, all packets from the wired stations need to go
85 	 * to rootap and loop back to reach the wireless stations and
86 	 * vice-versa.
87 	 */
88 	if (qdf_unlikely(vdev->isolation_vdev))
89 		return false;
90 
91 	/* if the received pkts src mac addr matches with the
92 	 * wired PCs MAC addr which is behind the STA or with
93 	 * wireless STAs MAC addr which are behind the Repeater,
94 	 * then drop the pkt as it is looped back
95 	 */
96 	qdf_spin_lock_bh(&soc->ast_lock);
97 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
98 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
99 
100 		if ((sa_idx < 0) ||
101 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
102 			qdf_spin_unlock_bh(&soc->ast_lock);
103 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
104 					"invalid sa_idx: %d", sa_idx);
105 			qdf_assert_always(0);
106 		}
107 
108 		ase = soc->ast_table[sa_idx];
109 		if (!ase) {
110 			/* We do not get a peer map event for STA and without
111 			 * this event we don't know what is STA's sa_idx.
112 			 * For this reason the AST is still not associated to
113 			 * any index postion in ast_table.
114 			 * In these kind of scenarios where sa is valid but
115 			 * ast is not in ast_table, we use the below API to get
116 			 * AST entry for STA's own mac_address.
117 			 */
118 			ase = dp_peer_ast_list_find(soc, peer,
119 						    &data[QDF_MAC_ADDR_SIZE]);
120 			if (ase) {
121 				ase->ast_idx = sa_idx;
122 				soc->ast_table[sa_idx] = ase;
123 				ase->is_mapped = TRUE;
124 			}
125 		}
126 	} else {
127 		ase = dp_peer_ast_hash_find_by_pdevid(soc,
128 						      &data[QDF_MAC_ADDR_SIZE],
129 						      vdev->pdev->pdev_id);
130 	}
131 
132 	if (ase) {
133 
134 		if (ase->pdev_id != vdev->pdev->pdev_id) {
135 			qdf_spin_unlock_bh(&soc->ast_lock);
136 			QDF_TRACE(QDF_MODULE_ID_DP,
137 				QDF_TRACE_LEVEL_INFO,
138 				"Detected DBDC Root AP %pM, %d %d",
139 				&data[QDF_MAC_ADDR_SIZE], vdev->pdev->pdev_id,
140 				ase->pdev_id);
141 			return false;
142 		}
143 
144 		if ((ase->type == CDP_TXRX_AST_TYPE_MEC) ||
145 				(ase->peer != peer)) {
146 			qdf_spin_unlock_bh(&soc->ast_lock);
147 			QDF_TRACE(QDF_MODULE_ID_DP,
148 				QDF_TRACE_LEVEL_INFO,
149 				"received pkt with same src mac %pM",
150 				&data[QDF_MAC_ADDR_SIZE]);
151 
152 			return true;
153 		}
154 	}
155 	qdf_spin_unlock_bh(&soc->ast_lock);
156 	return false;
157 }
158 
159 void dp_rx_link_desc_refill_duplicate_check(
160 				struct dp_soc *soc,
161 				struct hal_buf_info *buf_info,
162 				hal_buff_addrinfo_t ring_buf_info)
163 {
164 	struct hal_buf_info current_link_desc_buf_info = { 0 };
165 
166 	/* do duplicate link desc address check */
167 	hal_rx_buffer_addr_info_get_paddr(ring_buf_info,
168 					  &current_link_desc_buf_info);
169 	if (qdf_unlikely(current_link_desc_buf_info.paddr ==
170 			 buf_info->paddr)) {
171 		dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x",
172 			   current_link_desc_buf_info.paddr,
173 			   current_link_desc_buf_info.sw_cookie);
174 		DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1);
175 	}
176 	*buf_info = current_link_desc_buf_info;
177 }
178 
179 /**
180  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
181  *					(WBM) by address
182  *
183  * @soc: core DP main context
184  * @link_desc_addr: link descriptor addr
185  *
186  * Return: QDF_STATUS
187  */
188 QDF_STATUS
189 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
190 			       hal_buff_addrinfo_t link_desc_addr,
191 			       uint8_t bm_action)
192 {
193 	struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
194 	hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
195 	hal_soc_handle_t hal_soc = soc->hal_soc;
196 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
197 	void *src_srng_desc;
198 
199 	if (!wbm_rel_srng) {
200 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
201 			"WBM RELEASE RING not initialized");
202 		return status;
203 	}
204 
205 	/* do duplicate link desc address check */
206 	dp_rx_link_desc_refill_duplicate_check(
207 				soc,
208 				&soc->last_op_info.wbm_rel_link_desc,
209 				link_desc_addr);
210 
211 	if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
212 
213 		/* TODO */
214 		/*
215 		 * Need API to convert from hal_ring pointer to
216 		 * Ring Type / Ring Id combo
217 		 */
218 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
219 			FL("HAL RING Access For WBM Release SRNG Failed - %pK"),
220 			wbm_rel_srng);
221 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
222 		goto done;
223 	}
224 	src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
225 	if (qdf_likely(src_srng_desc)) {
226 		/* Return link descriptor through WBM ring (SW2WBM)*/
227 		hal_rx_msdu_link_desc_set(hal_soc,
228 				src_srng_desc, link_desc_addr, bm_action);
229 		status = QDF_STATUS_SUCCESS;
230 	} else {
231 		struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
232 
233 		DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1);
234 
235 		dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)",
236 			   srng->ring_id,
237 			   soc->stats.rx.err.hal_ring_access_full_fail);
238 		dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
239 			   *srng->u.src_ring.hp_addr,
240 			   srng->u.src_ring.reap_hp,
241 			   *srng->u.src_ring.tp_addr,
242 			   srng->u.src_ring.cached_tp);
243 		QDF_BUG(0);
244 	}
245 done:
246 	hal_srng_access_end(hal_soc, wbm_rel_srng);
247 	return status;
248 
249 }
250 
251 /**
252  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
253  *				(WBM), following error handling
254  *
255  * @soc: core DP main context
256  * @ring_desc: opaque pointer to the REO error ring descriptor
257  *
258  * Return: QDF_STATUS
259  */
260 QDF_STATUS
261 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
262 		       uint8_t bm_action)
263 {
264 	void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
265 
266 	return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
267 }
268 
269 /**
270  * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
271  *
272  * @soc: core txrx main context
273  * @ring_desc: opaque pointer to the REO error ring descriptor
274  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
275  * @head: head of the local descriptor free-list
276  * @tail: tail of the local descriptor free-list
277  * @quota: No. of units (packets) that can be serviced in one shot.
278  *
279  * This function is used to drop all MSDU in an MPDU
280  *
281  * Return: uint32_t: No. of elements processed
282  */
283 static uint32_t
284 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
285 		 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
286 		 uint8_t *mac_id,
287 		 uint32_t quota)
288 {
289 	uint32_t rx_bufs_used = 0;
290 	void *link_desc_va;
291 	struct hal_buf_info buf_info;
292 	struct dp_pdev *pdev;
293 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
294 	int i;
295 	uint8_t *rx_tlv_hdr;
296 	uint32_t tid;
297 	struct rx_desc_pool *rx_desc_pool;
298 
299 	hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
300 
301 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
302 
303 	/* No UNMAP required -- this is "malloc_consistent" memory */
304 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
305 			     &mpdu_desc_info->msdu_count);
306 
307 	for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) {
308 		struct dp_rx_desc *rx_desc =
309 			dp_rx_cookie_2_va_rxdma_buf(soc,
310 			msdu_list.sw_cookie[i]);
311 
312 		qdf_assert_always(rx_desc);
313 
314 		/* all buffers from a MSDU link link belong to same pdev */
315 		*mac_id = rx_desc->pool_id;
316 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
317 		if (!pdev) {
318 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
319 				  "pdev is null for pool_id = %d",
320 				  rx_desc->pool_id);
321 			return rx_bufs_used;
322 		}
323 
324 		if (!dp_rx_desc_check_magic(rx_desc)) {
325 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
326 					FL("Invalid rx_desc cookie=%d"),
327 					msdu_list.sw_cookie[i]);
328 			return rx_bufs_used;
329 		}
330 
331 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
332 		dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
333 						  rx_desc_pool->buf_size,
334 						  false);
335 		qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
336 					     QDF_DMA_FROM_DEVICE,
337 					     rx_desc_pool->buf_size);
338 		rx_desc->unmapped = 1;
339 
340 		rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
341 
342 		rx_bufs_used++;
343 		tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
344 						rx_desc->rx_buf_start);
345 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
346 			"Packet received with PN error for tid :%d", tid);
347 
348 		rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
349 		if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
350 			hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
351 
352 		/* Just free the buffers */
353 		dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id);
354 
355 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
356 					    &pdev->free_list_tail, rx_desc);
357 	}
358 
359 	/* Return link descriptor through WBM ring (SW2WBM)*/
360 	dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST);
361 
362 	return rx_bufs_used;
363 }
364 
365 /**
366  * dp_rx_pn_error_handle() - Handles PN check errors
367  *
368  * @soc: core txrx main context
369  * @ring_desc: opaque pointer to the REO error ring descriptor
370  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
371  * @head: head of the local descriptor free-list
372  * @tail: tail of the local descriptor free-list
373  * @quota: No. of units (packets) that can be serviced in one shot.
374  *
375  * This function implements PN error handling
376  * If the peer is configured to ignore the PN check errors
377  * or if DP feels, that this frame is still OK, the frame can be
378  * re-injected back to REO to use some of the other features
379  * of REO e.g. duplicate detection/routing to other cores
380  *
381  * Return: uint32_t: No. of elements processed
382  */
383 static uint32_t
384 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
385 		      struct hal_rx_mpdu_desc_info *mpdu_desc_info,
386 		      uint8_t *mac_id,
387 		      uint32_t quota)
388 {
389 	uint16_t peer_id;
390 	uint32_t rx_bufs_used = 0;
391 	struct dp_peer *peer;
392 	bool peer_pn_policy = false;
393 
394 	peer_id = DP_PEER_METADATA_PEER_ID_GET(
395 				mpdu_desc_info->peer_meta_data);
396 
397 
398 	peer = dp_peer_find_by_id(soc, peer_id);
399 
400 	if (qdf_likely(peer)) {
401 		/*
402 		 * TODO: Check for peer specific policies & set peer_pn_policy
403 		 */
404 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
405 			"discard rx due to PN error for peer  %pK  %pM",
406 			peer, peer->mac_addr.raw);
407 
408 		dp_peer_unref_del_find_by_id(peer);
409 	}
410 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
411 		"Packet received with PN error");
412 
413 	/* No peer PN policy -- definitely drop */
414 	if (!peer_pn_policy)
415 		rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
416 						mpdu_desc_info,
417 						mac_id, quota);
418 
419 	return rx_bufs_used;
420 }
421 
422 /**
423  * dp_rx_oor_handle() - Handles the msdu which is OOR error
424  *
425  * @soc: core txrx main context
426  * @nbuf: pointer to msdu skb
427  * @peer_id: dp peer ID
428  * @rx_tlv_hdr: start of rx tlv header
429  *
430  * This function process the msdu delivered from REO2TCL
431  * ring with error type OOR
432  *
433  * Return: None
434  */
435 static void
436 dp_rx_oor_handle(struct dp_soc *soc,
437 		 qdf_nbuf_t nbuf,
438 		 uint16_t peer_id,
439 		 uint8_t *rx_tlv_hdr)
440 {
441 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
442 				FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
443 	struct dp_peer *peer = NULL;
444 
445 	peer = dp_peer_find_by_id(soc, peer_id);
446 	if (!peer) {
447 		dp_info_rl("peer not found");
448 		goto free_nbuf;
449 	}
450 
451 	if (dp_rx_deliver_special_frame(soc, peer, nbuf, frame_mask,
452 					rx_tlv_hdr)) {
453 		DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1);
454 		dp_peer_unref_del_find_by_id(peer);
455 		return;
456 	}
457 
458 free_nbuf:
459 	if (peer)
460 		dp_peer_unref_del_find_by_id(peer);
461 
462 	DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1);
463 	qdf_nbuf_free(nbuf);
464 }
465 
466 /**
467  * dp_rx_reo_err_entry_process() - Handles for REO error entry processing
468  *
469  * @soc: core txrx main context
470  * @ring_desc: opaque pointer to the REO error ring descriptor
471  * @mpdu_desc_info: pointer to mpdu level description info
472  * @link_desc_va: pointer to msdu_link_desc virtual address
473  * @err_code: reo erro code fetched from ring entry
474  *
475  * Function to handle msdus fetched from msdu link desc, currently
476  * only support 2K jump, OOR error.
477  *
478  * Return: msdu count processed.
479  */
480 static uint32_t
481 dp_rx_reo_err_entry_process(struct dp_soc *soc,
482 			    void *ring_desc,
483 			    struct hal_rx_mpdu_desc_info *mpdu_desc_info,
484 			    void *link_desc_va,
485 			    enum hal_reo_error_code err_code)
486 {
487 	uint32_t rx_bufs_used = 0;
488 	struct dp_pdev *pdev;
489 	int i;
490 	uint8_t *rx_tlv_hdr_first;
491 	uint8_t *rx_tlv_hdr_last;
492 	uint32_t tid = DP_MAX_TIDS;
493 	uint16_t peer_id;
494 	struct dp_rx_desc *rx_desc;
495 	struct rx_desc_pool *rx_desc_pool;
496 	qdf_nbuf_t nbuf;
497 	struct hal_buf_info buf_info;
498 	struct hal_rx_msdu_list msdu_list;
499 	uint16_t num_msdus;
500 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
501 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
502 	/* First field in REO Dst ring Desc is buffer_addr_info */
503 	void *buf_addr_info = ring_desc;
504 	qdf_nbuf_t head_nbuf = NULL;
505 	qdf_nbuf_t tail_nbuf = NULL;
506 	uint16_t msdu_processed = 0;
507 
508 	peer_id = DP_PEER_METADATA_PEER_ID_GET(
509 					mpdu_desc_info->peer_meta_data);
510 
511 more_msdu_link_desc:
512 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
513 			     &num_msdus);
514 	for (i = 0; i < num_msdus; i++) {
515 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(
516 					soc,
517 					msdu_list.sw_cookie[i]);
518 
519 		qdf_assert_always(rx_desc);
520 
521 		/* all buffers from a MSDU link belong to same pdev */
522 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
523 
524 		nbuf = rx_desc->nbuf;
525 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
526 		dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
527 						  rx_desc_pool->buf_size,
528 						  false);
529 		qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
530 					     QDF_DMA_FROM_DEVICE,
531 					     rx_desc_pool->buf_size);
532 		rx_desc->unmapped = 1;
533 
534 		QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len;
535 		rx_bufs_used++;
536 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
537 					    &pdev->free_list_tail, rx_desc);
538 
539 		DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf);
540 
541 		if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags &
542 				 HAL_MSDU_F_MSDU_CONTINUATION))
543 			continue;
544 
545 		if (dp_rx_buffer_pool_refill(soc, head_nbuf,
546 					     rx_desc->pool_id)) {
547 			/* MSDU queued back to the pool */
548 			goto process_next_msdu;
549 		}
550 
551 		rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf);
552 		rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf);
553 
554 		if (qdf_unlikely(head_nbuf != tail_nbuf)) {
555 			nbuf = dp_rx_sg_create(head_nbuf);
556 			qdf_nbuf_set_is_frag(nbuf, 1);
557 			DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1);
558 		}
559 
560 		switch (err_code) {
561 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
562 			/*
563 			 * only first msdu, mpdu start description tlv valid?
564 			 * and use it for following msdu.
565 			 */
566 			if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
567 							   rx_tlv_hdr_last))
568 				tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
569 							      rx_tlv_hdr_first);
570 
571 			dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last,
572 					  peer_id, tid);
573 			break;
574 
575 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
576 			dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last);
577 			break;
578 		default:
579 			dp_err_rl("Non-support error code %d", err_code);
580 			qdf_nbuf_free(nbuf);
581 		}
582 
583 process_next_msdu:
584 		msdu_processed++;
585 		head_nbuf = NULL;
586 		tail_nbuf = NULL;
587 	}
588 
589 	if (msdu_processed < mpdu_desc_info->msdu_count) {
590 		hal_rx_get_next_msdu_link_desc_buf_addr_info(
591 						link_desc_va,
592 						&next_link_desc_addr_info);
593 
594 		if (hal_rx_is_buf_addr_info_valid(
595 				&next_link_desc_addr_info)) {
596 			dp_rx_link_desc_return_by_addr(
597 					soc,
598 					buf_addr_info,
599 					HAL_BM_ACTION_PUT_IN_IDLE_LIST);
600 
601 			hal_rx_buffer_addr_info_get_paddr(
602 						&next_link_desc_addr_info,
603 						&buf_info);
604 			link_desc_va =
605 				dp_rx_cookie_2_link_desc_va(soc, &buf_info);
606 			cur_link_desc_addr_info = next_link_desc_addr_info;
607 			buf_addr_info = &cur_link_desc_addr_info;
608 
609 			goto more_msdu_link_desc;
610 		}
611 	}
612 
613 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
614 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
615 	if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count))
616 		DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1);
617 
618 	return rx_bufs_used;
619 }
620 
621 #ifdef DP_INVALID_PEER_ASSERT
622 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \
623 		do {                                \
624 			qdf_assert_always(!(head)); \
625 			qdf_assert_always(!(tail)); \
626 		} while (0)
627 #else
628 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */
629 #endif
630 
631 /**
632  * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu
633  *                       to pdev invalid peer list
634  *
635  * @soc: core DP main context
636  * @nbuf: Buffer pointer
637  * @rx_tlv_hdr: start of rx tlv header
638  * @mac_id: mac id
639  *
640  *  Return: bool: true for last msdu of mpdu
641  */
642 static bool
643 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf,
644 		  uint8_t *rx_tlv_hdr, uint8_t mac_id)
645 {
646 	bool mpdu_done = false;
647 	qdf_nbuf_t curr_nbuf = NULL;
648 	qdf_nbuf_t tmp_nbuf = NULL;
649 
650 	/* TODO: Currently only single radio is supported, hence
651 	 * pdev hard coded to '0' index
652 	 */
653 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
654 
655 	if (!dp_pdev) {
656 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
657 			  "pdev is null for mac_id = %d", mac_id);
658 		return mpdu_done;
659 	}
660 	/* if invalid peer SG list has max values free the buffers in list
661 	 * and treat current buffer as start of list
662 	 *
663 	 * current logic to detect the last buffer from attn_tlv is not reliable
664 	 * in OFDMA UL scenario hence add max buffers check to avoid list pile
665 	 * up
666 	 */
667 	if (!dp_pdev->first_nbuf ||
668 	    (dp_pdev->invalid_peer_head_msdu &&
669 	    QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST
670 	    (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) {
671 		qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
672 		dp_pdev->ppdu_id = hal_rx_get_ppdu_id(soc->hal_soc,
673 						      rx_tlv_hdr);
674 		dp_pdev->first_nbuf = true;
675 
676 		/* If the new nbuf received is the first msdu of the
677 		 * amsdu and there are msdus in the invalid peer msdu
678 		 * list, then let us free all the msdus of the invalid
679 		 * peer msdu list.
680 		 * This scenario can happen when we start receiving
681 		 * new a-msdu even before the previous a-msdu is completely
682 		 * received.
683 		 */
684 		curr_nbuf = dp_pdev->invalid_peer_head_msdu;
685 		while (curr_nbuf) {
686 			tmp_nbuf = curr_nbuf->next;
687 			qdf_nbuf_free(curr_nbuf);
688 			curr_nbuf = tmp_nbuf;
689 		}
690 
691 		dp_pdev->invalid_peer_head_msdu = NULL;
692 		dp_pdev->invalid_peer_tail_msdu = NULL;
693 		hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr,
694 				&(dp_pdev->ppdu_info.rx_status));
695 
696 	}
697 
698 	if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) &&
699 	    hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
700 		qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
701 		qdf_assert_always(dp_pdev->first_nbuf == true);
702 		dp_pdev->first_nbuf = false;
703 		mpdu_done = true;
704 	}
705 
706 	/*
707 	 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu
708 	 * should be NULL here, add the checking for debugging purpose
709 	 * in case some corner case.
710 	 */
711 	DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu,
712 					dp_pdev->invalid_peer_tail_msdu);
713 	DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
714 				dp_pdev->invalid_peer_tail_msdu,
715 				nbuf);
716 
717 	return mpdu_done;
718 }
719 
720 static
721 void dp_rx_wbm_err_handle_bar(struct dp_soc *soc,
722 			      struct dp_peer *peer,
723 			      qdf_nbuf_t nbuf)
724 {
725 	uint8_t *rx_tlv_hdr;
726 	unsigned char type, subtype;
727 	uint16_t start_seq_num;
728 	uint32_t tid;
729 	struct ieee80211_frame_bar *bar;
730 
731 	/*
732 	 * 1. Is this a BAR frame. If not Discard it.
733 	 * 2. If it is, get the peer id, tid, ssn
734 	 * 2a Do a tid update
735 	 */
736 
737 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
738 	bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + SIZE_OF_DATA_RX_TLV);
739 
740 	type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
741 	subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
742 
743 	if (!(type == IEEE80211_FC0_TYPE_CTL &&
744 	      subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
745 		dp_err_rl("Not a BAR frame!");
746 		return;
747 	}
748 
749 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
750 	qdf_assert_always(tid < DP_MAX_TIDS);
751 
752 	start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
753 
754 	dp_info_rl("tid %u window_size %u start_seq_num %u",
755 		   tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
756 
757 	dp_rx_tid_update_wifi3(peer, tid,
758 			       peer->rx_tid[tid].ba_win_size,
759 			       start_seq_num);
760 }
761 
762 /**
763  * dp_2k_jump_handle() - Function to handle 2k jump exception
764  *                        on WBM ring
765  *
766  * @soc: core DP main context
767  * @nbuf: buffer pointer
768  * @rx_tlv_hdr: start of rx tlv header
769  * @peer_id: peer id of first msdu
770  * @tid: Tid for which exception occurred
771  *
772  * This function handles 2k jump violations arising out
773  * of receiving aggregates in non BA case. This typically
774  * may happen if aggregates are received on a QOS enabled TID
775  * while Rx window size is still initialized to value of 2. Or
776  * it may also happen if negotiated window size is 1 but peer
777  * sends aggregates.
778  *
779  */
780 
781 void
782 dp_2k_jump_handle(struct dp_soc *soc,
783 		  qdf_nbuf_t nbuf,
784 		  uint8_t *rx_tlv_hdr,
785 		  uint16_t peer_id,
786 		  uint8_t tid)
787 {
788 	struct dp_peer *peer = NULL;
789 	struct dp_rx_tid *rx_tid = NULL;
790 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP;
791 
792 	peer = dp_peer_find_by_id(soc, peer_id);
793 	if (!peer) {
794 		dp_info_rl("peer not found");
795 		goto free_nbuf;
796 	}
797 
798 	if (tid >= DP_MAX_TIDS) {
799 		dp_info_rl("invalid tid");
800 		goto nbuf_deliver;
801 	}
802 
803 	rx_tid = &peer->rx_tid[tid];
804 	qdf_spin_lock_bh(&rx_tid->tid_lock);
805 
806 	/* only if BA session is active, allow send Delba */
807 	if (rx_tid->ba_status != DP_RX_BA_ACTIVE) {
808 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
809 		goto nbuf_deliver;
810 	}
811 
812 	if (!rx_tid->delba_tx_status) {
813 		rx_tid->delba_tx_retry++;
814 		rx_tid->delba_tx_status = 1;
815 		rx_tid->delba_rcode =
816 			IEEE80211_REASON_QOS_SETUP_REQUIRED;
817 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
818 		if (soc->cdp_soc.ol_ops->send_delba) {
819 			DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent, 1);
820 			soc->cdp_soc.ol_ops->send_delba(
821 					peer->vdev->pdev->soc->ctrl_psoc,
822 					peer->vdev->vdev_id,
823 					peer->mac_addr.raw,
824 					tid,
825 					rx_tid->delba_rcode);
826 		}
827 	} else {
828 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
829 	}
830 
831 nbuf_deliver:
832 	if (dp_rx_deliver_special_frame(soc, peer, nbuf, frame_mask,
833 					rx_tlv_hdr)) {
834 		DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1);
835 		dp_peer_unref_del_find_by_id(peer);
836 		return;
837 	}
838 
839 free_nbuf:
840 	if (peer)
841 		dp_peer_unref_del_find_by_id(peer);
842 
843 	DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1);
844 	qdf_nbuf_free(nbuf);
845 }
846 
847 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
848     defined(QCA_WIFI_QCA6750)
849 /**
850  * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
851  * @soc: pointer to dp_soc struct
852  * @pool_id: Pool id to find dp_pdev
853  * @rx_tlv_hdr: TLV header of received packet
854  * @nbuf: SKB
855  *
856  * In certain types of packets if peer_id is not correct then
857  * driver may not be able find. Try finding peer by addr_2 of
858  * received MPDU. If you find the peer then most likely sw_peer_id &
859  * ast_idx is corrupted.
860  *
861  * Return: True if you find the peer by addr_2 of received MPDU else false
862  */
863 static bool
864 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
865 					      uint8_t pool_id,
866 					      uint8_t *rx_tlv_hdr,
867 					      qdf_nbuf_t nbuf)
868 {
869 	struct dp_peer *peer = NULL;
870 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
871 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
872 	struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
873 
874 	if (!pdev) {
875 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
876 			  "pdev is null for pool_id = %d", pool_id);
877 		return false;
878 	}
879 	/*
880 	 * WAR- In certain types of packets if peer_id is not correct then
881 	 * driver may not be able find. Try finding peer by addr_2 of
882 	 * received MPDU
883 	 */
884 	if (wh)
885 		peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev,
886 					    wh->i_addr2);
887 	if (peer) {
888 		dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
889 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
890 				     QDF_TRACE_LEVEL_DEBUG);
891 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
892 				 1, qdf_nbuf_len(nbuf));
893 		qdf_nbuf_free(nbuf);
894 
895 		return true;
896 	}
897 	return false;
898 }
899 
900 /**
901  * dp_rx_check_pkt_len() - Check for pktlen validity
902  * @soc: DP SOC context
903  * @pkt_len: computed length of the pkt from caller in bytes
904  *
905  * Return: true if pktlen > RX_BUFFER_SIZE, else return false
906  *
907  */
908 static inline
909 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
910 {
911 	if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) {
912 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
913 				 1, pkt_len);
914 		return true;
915 	} else {
916 		return false;
917 	}
918 }
919 
920 #else
921 static inline bool
922 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
923 					      uint8_t pool_id,
924 					      uint8_t *rx_tlv_hdr,
925 					      qdf_nbuf_t nbuf)
926 {
927 	return false;
928 }
929 
930 static inline
931 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
932 {
933 	return false;
934 }
935 
936 #endif
937 
938 /**
939  * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
940  *                              descriptor violation on either a
941  *                              REO or WBM ring
942  *
943  * @soc: core DP main context
944  * @nbuf: buffer pointer
945  * @rx_tlv_hdr: start of rx tlv header
946  * @pool_id: mac id
947  * @peer: peer handle
948  *
949  * This function handles NULL queue descriptor violations arising out
950  * a missing REO queue for a given peer or a given TID. This typically
951  * may happen if a packet is received on a QOS enabled TID before the
952  * ADDBA negotiation for that TID, when the TID queue is setup. Or
953  * it may also happen for MC/BC frames if they are not routed to the
954  * non-QOS TID queue, in the absence of any other default TID queue.
955  * This error can show up both in a REO destination or WBM release ring.
956  *
957  * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
958  *         if nbuf could not be handled or dropped.
959  */
960 static QDF_STATUS
961 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
962 			 uint8_t *rx_tlv_hdr, uint8_t pool_id,
963 			 struct dp_peer *peer)
964 {
965 	uint32_t pkt_len;
966 	uint16_t msdu_len;
967 	struct dp_vdev *vdev;
968 	uint8_t tid;
969 	qdf_ether_header_t *eh;
970 	struct hal_rx_msdu_metadata msdu_metadata;
971 	uint16_t sa_idx = 0;
972 
973 	qdf_nbuf_set_rx_chfrag_start(nbuf,
974 				hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
975 							       rx_tlv_hdr));
976 	qdf_nbuf_set_rx_chfrag_end(nbuf,
977 				   hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
978 								 rx_tlv_hdr));
979 	qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
980 								  rx_tlv_hdr));
981 	qdf_nbuf_set_da_valid(nbuf,
982 			      hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
983 							      rx_tlv_hdr));
984 	qdf_nbuf_set_sa_valid(nbuf,
985 			      hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
986 							      rx_tlv_hdr));
987 
988 	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
989 	msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
990 	pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + RX_PKT_TLVS_LEN;
991 
992 	if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
993 		if (dp_rx_check_pkt_len(soc, pkt_len))
994 			goto drop_nbuf;
995 
996 		/* Set length in nbuf */
997 		qdf_nbuf_set_pktlen(
998 			nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
999 		qdf_assert_always(nbuf->data == rx_tlv_hdr);
1000 	}
1001 
1002 	/*
1003 	 * Check if DMA completed -- msdu_done is the last bit
1004 	 * to be written
1005 	 */
1006 	if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
1007 
1008 		dp_err_rl("MSDU DONE failure");
1009 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1010 				     QDF_TRACE_LEVEL_INFO);
1011 		qdf_assert(0);
1012 	}
1013 
1014 	if (!peer &&
1015 	    dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
1016 							  rx_tlv_hdr, nbuf))
1017 		return QDF_STATUS_E_FAILURE;
1018 
1019 	if (!peer) {
1020 		bool mpdu_done = false;
1021 		struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
1022 
1023 		if (!pdev) {
1024 			dp_err_rl("pdev is null for pool_id = %d", pool_id);
1025 			return QDF_STATUS_E_FAILURE;
1026 		}
1027 
1028 		dp_err_rl("peer is NULL");
1029 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1030 				 qdf_nbuf_len(nbuf));
1031 
1032 		/* QCN9000 has the support enabled */
1033 		if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) {
1034 			mpdu_done = true;
1035 			nbuf->next = NULL;
1036 			/* Trigger invalid peer handler wrapper */
1037 			dp_rx_process_invalid_peer_wrapper(soc,
1038 					nbuf, mpdu_done, pool_id);
1039 		} else {
1040 			mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id);
1041 			/* Trigger invalid peer handler wrapper */
1042 			dp_rx_process_invalid_peer_wrapper(soc,
1043 					pdev->invalid_peer_head_msdu,
1044 					mpdu_done, pool_id);
1045 		}
1046 
1047 		if (mpdu_done) {
1048 			pdev->invalid_peer_head_msdu = NULL;
1049 			pdev->invalid_peer_tail_msdu = NULL;
1050 		}
1051 
1052 		return QDF_STATUS_E_FAILURE;
1053 	}
1054 
1055 	vdev = peer->vdev;
1056 	if (!vdev) {
1057 		dp_err_rl("Null vdev!");
1058 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1059 		goto drop_nbuf;
1060 	}
1061 
1062 	/*
1063 	 * Advance the packet start pointer by total size of
1064 	 * pre-header TLV's
1065 	 */
1066 	if (qdf_nbuf_is_frag(nbuf))
1067 		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1068 	else
1069 		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
1070 				   RX_PKT_TLVS_LEN));
1071 
1072 	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
1073 
1074 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
1075 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
1076 
1077 		if ((sa_idx < 0) ||
1078 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
1079 			DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
1080 			goto drop_nbuf;
1081 		}
1082 	}
1083 
1084 	if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) {
1085 		/* this is a looped back MCBC pkt, drop it */
1086 		DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
1087 		goto drop_nbuf;
1088 	}
1089 
1090 	/*
1091 	 * In qwrap mode if the received packet matches with any of the vdev
1092 	 * mac addresses, drop it. Donot receive multicast packets originated
1093 	 * from any proxysta.
1094 	 */
1095 	if (check_qwrap_multicast_loopback(vdev, nbuf)) {
1096 		DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
1097 		goto drop_nbuf;
1098 	}
1099 
1100 
1101 	if (qdf_unlikely((peer->nawds_enabled == true) &&
1102 			hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1103 						       rx_tlv_hdr))) {
1104 		dp_err_rl("free buffer for multicast packet");
1105 		DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
1106 		goto drop_nbuf;
1107 	}
1108 
1109 	if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
1110 		dp_err_rl("mcast Policy Check Drop pkt");
1111 		goto drop_nbuf;
1112 	}
1113 	/* WDS Source Port Learning */
1114 	if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
1115 		vdev->wds_enabled))
1116 		dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf,
1117 					msdu_metadata);
1118 
1119 	if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
1120 		tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
1121 		if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned)
1122 			dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
1123 			/* IEEE80211_SEQ_MAX indicates invalid start_seq */
1124 	}
1125 
1126 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1127 		qdf_nbuf_set_next(nbuf, NULL);
1128 		dp_rx_deliver_raw(vdev, nbuf, peer);
1129 	} else {
1130 		qdf_nbuf_set_next(nbuf, NULL);
1131 		DP_STATS_INC_PKT(peer, rx.to_stack, 1,
1132 				 qdf_nbuf_len(nbuf));
1133 
1134 		/*
1135 		 * Update the protocol tag in SKB based on
1136 		 * CCE metadata
1137 		 */
1138 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1139 					  EXCEPTION_DEST_RING_ID,
1140 					  true, true);
1141 
1142 		/* Update the flow tag in SKB based on FSE metadata */
1143 		dp_rx_update_flow_tag(soc, vdev, nbuf,
1144 				      rx_tlv_hdr, true);
1145 
1146 		if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
1147 				 soc->hal_soc, rx_tlv_hdr) &&
1148 				 (vdev->rx_decap_type ==
1149 				  htt_cmn_pkt_type_ethernet))) {
1150 			eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1151 			DP_STATS_INC_PKT(peer, rx.multicast, 1,
1152 					 qdf_nbuf_len(nbuf));
1153 
1154 			if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
1155 				DP_STATS_INC_PKT(peer, rx.bcast, 1,
1156 						 qdf_nbuf_len(nbuf));
1157 		}
1158 
1159 		qdf_nbuf_set_exc_frame(nbuf, 1);
1160 		dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL);
1161 	}
1162 	return QDF_STATUS_SUCCESS;
1163 
1164 drop_nbuf:
1165 	qdf_nbuf_free(nbuf);
1166 	return QDF_STATUS_E_FAILURE;
1167 }
1168 
1169 /**
1170  * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
1171  *			       frames to OS or wifi parse errors.
1172  * @soc: core DP main context
1173  * @nbuf: buffer pointer
1174  * @rx_tlv_hdr: start of rx tlv header
1175  * @peer: peer reference
1176  * @err_code: rxdma err code
1177  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
1178  * pool_id has same mapping)
1179  *
1180  * Return: None
1181  */
1182 void
1183 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1184 			uint8_t *rx_tlv_hdr, struct dp_peer *peer,
1185 			uint8_t err_code, uint8_t mac_id)
1186 {
1187 	uint32_t pkt_len, l2_hdr_offset;
1188 	uint16_t msdu_len;
1189 	struct dp_vdev *vdev;
1190 	qdf_ether_header_t *eh;
1191 	bool is_broadcast;
1192 
1193 	/*
1194 	 * Check if DMA completed -- msdu_done is the last bit
1195 	 * to be written
1196 	 */
1197 	if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
1198 
1199 		dp_err_rl("MSDU DONE failure");
1200 
1201 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1202 				     QDF_TRACE_LEVEL_INFO);
1203 		qdf_assert(0);
1204 	}
1205 
1206 	l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
1207 							   rx_tlv_hdr);
1208 	msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
1209 	pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
1210 
1211 	if (dp_rx_check_pkt_len(soc, pkt_len)) {
1212 		/* Drop & free packet */
1213 		qdf_nbuf_free(nbuf);
1214 		return;
1215 	}
1216 	/* Set length in nbuf */
1217 	qdf_nbuf_set_pktlen(nbuf, pkt_len);
1218 
1219 	qdf_nbuf_set_next(nbuf, NULL);
1220 
1221 	qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
1222 	qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
1223 
1224 	if (!peer) {
1225 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL");
1226 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1227 				qdf_nbuf_len(nbuf));
1228 		/* Trigger invalid peer handler wrapper */
1229 		dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
1230 		return;
1231 	}
1232 
1233 	vdev = peer->vdev;
1234 	if (!vdev) {
1235 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1236 				FL("INVALID vdev %pK OR osif_rx"), vdev);
1237 		/* Drop & free packet */
1238 		qdf_nbuf_free(nbuf);
1239 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1240 		return;
1241 	}
1242 
1243 	/*
1244 	 * Advance the packet start pointer by total size of
1245 	 * pre-header TLV's
1246 	 */
1247 	dp_rx_skip_tlvs(nbuf, l2_hdr_offset);
1248 
1249 	if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
1250 		uint8_t *pkt_type;
1251 
1252 		pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
1253 		if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
1254 			if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
1255 							htons(QDF_LLC_STP)) {
1256 				DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
1257 				goto process_mesh;
1258 			} else {
1259 				goto process_rx;
1260 			}
1261 		}
1262 	}
1263 	if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
1264 		goto process_mesh;
1265 
1266 	/*
1267 	 * WAPI cert AP sends rekey frames as unencrypted.
1268 	 * Thus RXDMA will report unencrypted frame error.
1269 	 * To pass WAPI cert case, SW needs to pass unencrypted
1270 	 * rekey frame to stack.
1271 	 */
1272 	if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1273 		goto process_rx;
1274 	}
1275 	/*
1276 	 * In dynamic WEP case rekey frames are not encrypted
1277 	 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
1278 	 * key install is already done
1279 	 */
1280 	if ((vdev->sec_type == cdp_sec_type_wep104) &&
1281 	    (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
1282 		goto process_rx;
1283 
1284 process_mesh:
1285 
1286 	if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
1287 		qdf_nbuf_free(nbuf);
1288 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1289 		return;
1290 	}
1291 
1292 	if (vdev->mesh_vdev) {
1293 		if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
1294 				      == QDF_STATUS_SUCCESS) {
1295 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED,
1296 				  FL("mesh pkt filtered"));
1297 			DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
1298 
1299 			qdf_nbuf_free(nbuf);
1300 			return;
1301 		}
1302 		dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
1303 	}
1304 process_rx:
1305 	if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1306 							rx_tlv_hdr) &&
1307 		(vdev->rx_decap_type ==
1308 				htt_cmn_pkt_type_ethernet))) {
1309 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1310 		is_broadcast = (QDF_IS_ADDR_BROADCAST
1311 				(eh->ether_dhost)) ? 1 : 0 ;
1312 		DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf));
1313 		if (is_broadcast) {
1314 			DP_STATS_INC_PKT(peer, rx.bcast, 1,
1315 					qdf_nbuf_len(nbuf));
1316 		}
1317 	}
1318 
1319 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1320 		dp_rx_deliver_raw(vdev, nbuf, peer);
1321 	} else {
1322 		/* Update the protocol tag in SKB based on CCE metadata */
1323 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1324 					  EXCEPTION_DEST_RING_ID, true, true);
1325 		/* Update the flow tag in SKB based on FSE metadata */
1326 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
1327 		DP_STATS_INC(peer, rx.to_stack.num, 1);
1328 		qdf_nbuf_set_exc_frame(nbuf, 1);
1329 		dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL);
1330 	}
1331 
1332 	return;
1333 }
1334 
1335 /**
1336  * dp_rx_process_mic_error(): Function to pass mic error indication to umac
1337  * @soc: core DP main context
1338  * @nbuf: buffer pointer
1339  * @rx_tlv_hdr: start of rx tlv header
1340  * @peer: peer handle
1341  *
1342  * return: void
1343  */
1344 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
1345 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer)
1346 {
1347 	struct dp_vdev *vdev = NULL;
1348 	struct dp_pdev *pdev = NULL;
1349 	struct ol_if_ops *tops = NULL;
1350 	uint16_t rx_seq, fragno;
1351 	uint8_t is_raw;
1352 	unsigned int tid;
1353 	QDF_STATUS status;
1354 	struct cdp_rx_mic_err_info mic_failure_info;
1355 
1356 	if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1357 					    rx_tlv_hdr))
1358 		return;
1359 
1360 	if (!peer) {
1361 		dp_info_rl("peer not found");
1362 		goto fail;
1363 	}
1364 
1365 	vdev = peer->vdev;
1366 	if (!vdev) {
1367 		dp_info_rl("VDEV not found");
1368 		goto fail;
1369 	}
1370 
1371 	pdev = vdev->pdev;
1372 	if (!pdev) {
1373 		dp_info_rl("PDEV not found");
1374 		goto fail;
1375 	}
1376 
1377 	is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
1378 	if (is_raw) {
1379 		fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf));
1380 		/* Can get only last fragment */
1381 		if (fragno) {
1382 			tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
1383 							qdf_nbuf_data(nbuf));
1384 			rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
1385 							qdf_nbuf_data(nbuf));
1386 
1387 			status = dp_rx_defrag_add_last_frag(soc, peer,
1388 							    tid, rx_seq, nbuf);
1389 			dp_info_rl("Frag pkt seq# %d frag# %d consumed "
1390 				   "status %d !", rx_seq, fragno, status);
1391 			return;
1392 		}
1393 	}
1394 
1395 	if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
1396 				  &mic_failure_info.da_mac_addr.bytes[0])) {
1397 		dp_err_rl("Failed to get da_mac_addr");
1398 		goto fail;
1399 	}
1400 
1401 	if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
1402 				  &mic_failure_info.ta_mac_addr.bytes[0])) {
1403 		dp_err_rl("Failed to get ta_mac_addr");
1404 		goto fail;
1405 	}
1406 
1407 	mic_failure_info.key_id = 0;
1408 	mic_failure_info.multicast =
1409 		IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
1410 	qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
1411 	mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
1412 	mic_failure_info.data = NULL;
1413 	mic_failure_info.vdev_id = vdev->vdev_id;
1414 
1415 	tops = pdev->soc->cdp_soc.ol_ops;
1416 	if (tops->rx_mic_error)
1417 		tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
1418 				   &mic_failure_info);
1419 
1420 fail:
1421 	qdf_nbuf_free(nbuf);
1422 	return;
1423 }
1424 
1425 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
1426 /**
1427  * dp_rx_link_cookie_check() - Validate link desc cookie
1428  * @ring_desc: ring descriptor
1429  *
1430  * Return: qdf status
1431  */
1432 static inline QDF_STATUS
1433 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
1434 {
1435 	if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc)))
1436 		return QDF_STATUS_E_FAILURE;
1437 
1438 	return QDF_STATUS_SUCCESS;
1439 }
1440 
1441 /**
1442  * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie
1443  * @ring_desc: ring descriptor
1444  *
1445  * Return: None
1446  */
1447 static inline void
1448 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
1449 {
1450 	HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc);
1451 }
1452 #else
1453 static inline QDF_STATUS
1454 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
1455 {
1456 	return QDF_STATUS_SUCCESS;
1457 }
1458 
1459 static inline void
1460 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
1461 {
1462 }
1463 #endif
1464 
1465 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1466 /**
1467  * dp_rx_err_ring_record_entry() - Record rx err ring history
1468  * @soc: Datapath soc structure
1469  * @paddr: paddr of the buffer in RX err ring
1470  * @sw_cookie: SW cookie of the buffer in RX err ring
1471  * @rbm: Return buffer manager of the buffer in RX err ring
1472  *
1473  * Returns: None
1474  */
1475 static inline void
1476 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1477 			    uint32_t sw_cookie, uint8_t rbm)
1478 {
1479 	struct dp_buf_info_record *record;
1480 	uint32_t idx;
1481 
1482 	if (qdf_unlikely(soc->rx_err_ring_history))
1483 		return;
1484 
1485 	idx = dp_history_get_next_index(&soc->rx_err_ring_history->index,
1486 					DP_RX_ERR_HIST_MAX);
1487 
1488 	/* No NULL check needed for record since its an array */
1489 	record = &soc->rx_err_ring_history->entry[idx];
1490 
1491 	record->timestamp = qdf_get_log_timestamp();
1492 	record->hbi.paddr = paddr;
1493 	record->hbi.sw_cookie = sw_cookie;
1494 	record->hbi.rbm = rbm;
1495 }
1496 #else
1497 static inline void
1498 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1499 			    uint32_t sw_cookie, uint8_t rbm)
1500 {
1501 }
1502 #endif
1503 
1504 uint32_t
1505 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1506 		  hal_ring_handle_t hal_ring_hdl, uint32_t quota)
1507 {
1508 	hal_ring_desc_t ring_desc;
1509 	hal_soc_handle_t hal_soc;
1510 	uint32_t count = 0;
1511 	uint32_t rx_bufs_used = 0;
1512 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
1513 	uint8_t mac_id = 0;
1514 	uint8_t buf_type;
1515 	uint8_t error, rbm;
1516 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
1517 	struct hal_buf_info hbi;
1518 	struct dp_pdev *dp_pdev;
1519 	struct dp_srng *dp_rxdma_srng;
1520 	struct rx_desc_pool *rx_desc_pool;
1521 	uint32_t cookie = 0;
1522 	void *link_desc_va;
1523 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
1524 	uint16_t num_msdus;
1525 	struct dp_rx_desc *rx_desc = NULL;
1526 	QDF_STATUS status;
1527 	bool ret;
1528 
1529 	/* Debug -- Remove later */
1530 	qdf_assert(soc && hal_ring_hdl);
1531 
1532 	hal_soc = soc->hal_soc;
1533 
1534 	/* Debug -- Remove later */
1535 	qdf_assert(hal_soc);
1536 
1537 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
1538 
1539 		/* TODO */
1540 		/*
1541 		 * Need API to convert from hal_ring pointer to
1542 		 * Ring Type / Ring Id combo
1543 		 */
1544 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
1545 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1546 			FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
1547 		goto done;
1548 	}
1549 
1550 	while (qdf_likely(quota-- && (ring_desc =
1551 				hal_srng_dst_peek(hal_soc,
1552 						  hal_ring_hdl)))) {
1553 
1554 		DP_STATS_INC(soc, rx.err_ring_pkts, 1);
1555 
1556 		error = HAL_RX_ERROR_STATUS_GET(ring_desc);
1557 
1558 		qdf_assert(error == HAL_REO_ERROR_DETECTED);
1559 
1560 		buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc);
1561 		/*
1562 		 * For REO error ring, expect only MSDU LINK DESC
1563 		 */
1564 		qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE);
1565 
1566 		cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
1567 		/*
1568 		 * check for the magic number in the sw cookie
1569 		 */
1570 		qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) &
1571 							LINK_DESC_ID_START);
1572 
1573 		status = dp_rx_link_cookie_check(ring_desc);
1574 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
1575 			DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
1576 			break;
1577 		}
1578 
1579 		/*
1580 		 * Check if the buffer is to be processed on this processor
1581 		 */
1582 		rbm = hal_rx_ret_buf_manager_get(ring_desc);
1583 
1584 		hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
1585 		link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
1586 		hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
1587 				     &num_msdus);
1588 		dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0],
1589 					    msdu_list.sw_cookie[0],
1590 					    msdu_list.rbm[0]);
1591 		if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) &&
1592 				(msdu_list.rbm[0] !=
1593 					HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST) &&
1594 				(msdu_list.rbm[0] != DP_DEFRAG_RBM))) {
1595 			/* TODO */
1596 			/* Call appropriate handler */
1597 			if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
1598 				DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
1599 				QDF_TRACE(QDF_MODULE_ID_DP,
1600 					  QDF_TRACE_LEVEL_ERROR,
1601 					  FL("Invalid RBM %d"),
1602 					     msdu_list.rbm[0]);
1603 			}
1604 
1605 			/* Return link descriptor through WBM ring (SW2WBM)*/
1606 			dp_rx_link_desc_return(soc, ring_desc,
1607 					HAL_BM_ACTION_RELEASE_MSDU_LIST);
1608 			goto next_entry;
1609 		}
1610 
1611 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc,
1612 						      msdu_list.sw_cookie[0]);
1613 		qdf_assert_always(rx_desc);
1614 
1615 		mac_id = rx_desc->pool_id;
1616 
1617 		/* Get the MPDU DESC info */
1618 		hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
1619 
1620 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
1621 			/*
1622 			 * We only handle one msdu per link desc for fragmented
1623 			 * case. We drop the msdus and release the link desc
1624 			 * back if there are more than one msdu in link desc.
1625 			 */
1626 			if (qdf_unlikely(num_msdus > 1)) {
1627 				count = dp_rx_msdus_drop(soc, ring_desc,
1628 							 &mpdu_desc_info,
1629 							 &mac_id, quota);
1630 				rx_bufs_reaped[mac_id] += count;
1631 				goto next_entry;
1632 			}
1633 
1634 			/*
1635 			 * this is a unlikely scenario where the host is reaping
1636 			 * a descriptor which it already reaped just a while ago
1637 			 * but is yet to replenish it back to HW.
1638 			 * In this case host will dump the last 128 descriptors
1639 			 * including the software descriptor rx_desc and assert.
1640 			 */
1641 
1642 			if (qdf_unlikely(!rx_desc->in_use)) {
1643 				DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
1644 				dp_info_rl("Reaping rx_desc not in use!");
1645 				dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1646 							   ring_desc, rx_desc);
1647 				/* ignore duplicate RX desc and continue */
1648 				/* Pop out the descriptor */
1649 				goto next_entry;
1650 			}
1651 
1652 			ret = dp_rx_desc_paddr_sanity_check(rx_desc,
1653 							    msdu_list.paddr[0]);
1654 			if (!ret) {
1655 				DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1656 				rx_desc->in_err_state = 1;
1657 				goto next_entry;
1658 			}
1659 
1660 			count = dp_rx_frag_handle(soc,
1661 						  ring_desc, &mpdu_desc_info,
1662 						  rx_desc, &mac_id, quota);
1663 
1664 			rx_bufs_reaped[mac_id] += count;
1665 			DP_STATS_INC(soc, rx.rx_frags, 1);
1666 			goto next_entry;
1667 		}
1668 
1669 		if (hal_rx_reo_is_pn_error(ring_desc)) {
1670 			/* TOD0 */
1671 			DP_STATS_INC(soc,
1672 				rx.err.
1673 				reo_error[HAL_REO_ERR_PN_CHECK_FAILED],
1674 				1);
1675 			/* increment @pdev level */
1676 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1677 			if (dp_pdev)
1678 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
1679 			count = dp_rx_pn_error_handle(soc,
1680 						      ring_desc,
1681 						      &mpdu_desc_info, &mac_id,
1682 						      quota);
1683 
1684 			rx_bufs_reaped[mac_id] += count;
1685 			goto next_entry;
1686 		}
1687 
1688 		if (hal_rx_reo_is_2k_jump(ring_desc)) {
1689 			/* TOD0 */
1690 			DP_STATS_INC(soc,
1691 				rx.err.
1692 				reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP],
1693 				1);
1694 			/* increment @pdev level */
1695 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1696 			if (dp_pdev)
1697 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
1698 
1699 			count = dp_rx_reo_err_entry_process(
1700 					soc,
1701 					ring_desc,
1702 					&mpdu_desc_info,
1703 					link_desc_va,
1704 					HAL_REO_ERR_REGULAR_FRAME_2K_JUMP);
1705 
1706 			rx_bufs_reaped[mac_id] += count;
1707 			goto next_entry;
1708 		}
1709 
1710 		if (hal_rx_reo_is_oor_error(ring_desc)) {
1711 			DP_STATS_INC(
1712 				soc,
1713 				rx.err.
1714 				reo_error[HAL_REO_ERR_REGULAR_FRAME_OOR],
1715 				1);
1716 			/* increment @pdev level */
1717 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1718 			if (dp_pdev)
1719 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
1720 			count = dp_rx_reo_err_entry_process(
1721 					soc,
1722 					ring_desc,
1723 					&mpdu_desc_info,
1724 					link_desc_va,
1725 					HAL_REO_ERR_REGULAR_FRAME_OOR);
1726 
1727 			rx_bufs_reaped[mac_id] += count;
1728 			goto next_entry;
1729 		}
1730 next_entry:
1731 		dp_rx_link_cookie_invalidate(ring_desc);
1732 		hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
1733 	}
1734 
1735 done:
1736 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
1737 
1738 	if (soc->rx.flags.defrag_timeout_check) {
1739 		uint32_t now_ms =
1740 			qdf_system_ticks_to_msecs(qdf_system_ticks());
1741 
1742 		if (now_ms >= soc->rx.defrag.next_flush_ms)
1743 			dp_rx_defrag_waitlist_flush(soc);
1744 	}
1745 
1746 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
1747 		if (rx_bufs_reaped[mac_id]) {
1748 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1749 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
1750 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
1751 
1752 			dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
1753 						rx_desc_pool,
1754 						rx_bufs_reaped[mac_id],
1755 						&dp_pdev->free_list_head,
1756 						&dp_pdev->free_list_tail);
1757 			rx_bufs_used += rx_bufs_reaped[mac_id];
1758 		}
1759 	}
1760 
1761 	return rx_bufs_used; /* Assume no scale factor for now */
1762 }
1763 
1764 #ifdef DROP_RXDMA_DECRYPT_ERR
1765 /**
1766  * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled
1767  *
1768  * Return: true if rxdma decrypt err frames are handled and false otheriwse
1769  */
1770 static inline bool dp_handle_rxdma_decrypt_err(void)
1771 {
1772 	return false;
1773 }
1774 #else
1775 static inline bool dp_handle_rxdma_decrypt_err(void)
1776 {
1777 	return true;
1778 }
1779 #endif
1780 
1781 static inline bool
1782 dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
1783 {
1784 	/*
1785 	 * Currently Null Queue and Unencrypted error handlers has support for
1786 	 * SG. Other error handler do not deal with SG buffer.
1787 	 */
1788 	if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) &&
1789 	     (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) ||
1790 	    ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) &&
1791 	     (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED)))
1792 		return true;
1793 
1794 	return false;
1795 }
1796 
1797 uint32_t
1798 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1799 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota)
1800 {
1801 	hal_ring_desc_t ring_desc;
1802 	hal_soc_handle_t hal_soc;
1803 	struct dp_rx_desc *rx_desc;
1804 	union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
1805 	union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
1806 	uint32_t rx_bufs_used = 0;
1807 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
1808 	uint8_t buf_type, rbm;
1809 	uint32_t rx_buf_cookie;
1810 	uint8_t mac_id;
1811 	struct dp_pdev *dp_pdev;
1812 	struct dp_srng *dp_rxdma_srng;
1813 	struct rx_desc_pool *rx_desc_pool;
1814 	uint8_t *rx_tlv_hdr;
1815 	qdf_nbuf_t nbuf_head = NULL;
1816 	qdf_nbuf_t nbuf_tail = NULL;
1817 	qdf_nbuf_t nbuf, next;
1818 	struct hal_wbm_err_desc_info wbm_err_info = { 0 };
1819 	uint8_t pool_id;
1820 	uint8_t tid = 0;
1821 	uint8_t msdu_continuation = 0;
1822 	bool process_sg_buf = false;
1823 
1824 	/* Debug -- Remove later */
1825 	qdf_assert(soc && hal_ring_hdl);
1826 
1827 	hal_soc = soc->hal_soc;
1828 
1829 	/* Debug -- Remove later */
1830 	qdf_assert(hal_soc);
1831 
1832 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
1833 
1834 		/* TODO */
1835 		/*
1836 		 * Need API to convert from hal_ring pointer to
1837 		 * Ring Type / Ring Id combo
1838 		 */
1839 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1840 			FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
1841 		goto done;
1842 	}
1843 
1844 	while (qdf_likely(quota)) {
1845 		ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
1846 		if (qdf_unlikely(!ring_desc))
1847 			break;
1848 
1849 		/* XXX */
1850 		buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
1851 
1852 		/*
1853 		 * For WBM ring, expect only MSDU buffers
1854 		 */
1855 		qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
1856 
1857 		qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc)
1858 				== HAL_RX_WBM_ERR_SRC_RXDMA) ||
1859 				(HAL_RX_WBM_ERR_SRC_GET(ring_desc)
1860 				== HAL_RX_WBM_ERR_SRC_REO));
1861 
1862 		/*
1863 		 * Check if the buffer is to be processed on this processor
1864 		 */
1865 		rbm = hal_rx_ret_buf_manager_get(ring_desc);
1866 
1867 		if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
1868 			/* TODO */
1869 			/* Call appropriate handler */
1870 			DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
1871 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1872 				FL("Invalid RBM %d"), rbm);
1873 			continue;
1874 		}
1875 
1876 		rx_buf_cookie =	HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
1877 
1878 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
1879 		qdf_assert_always(rx_desc);
1880 
1881 		if (!dp_rx_desc_check_magic(rx_desc)) {
1882 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1883 					FL("Invalid rx_desc cookie=%d"),
1884 					rx_buf_cookie);
1885 			continue;
1886 		}
1887 
1888 		/*
1889 		 * this is a unlikely scenario where the host is reaping
1890 		 * a descriptor which it already reaped just a while ago
1891 		 * but is yet to replenish it back to HW.
1892 		 * In this case host will dump the last 128 descriptors
1893 		 * including the software descriptor rx_desc and assert.
1894 		 */
1895 		if (qdf_unlikely(!rx_desc->in_use)) {
1896 			DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
1897 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1898 						   ring_desc, rx_desc);
1899 		}
1900 
1901 		hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
1902 
1903 		if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support &&
1904 				 dp_rx_is_sg_formation_required(&wbm_err_info))) {
1905 			/* SG is detected from continuation bit */
1906 			msdu_continuation = hal_rx_wbm_err_msdu_continuation_get(hal_soc,
1907 					ring_desc);
1908 			if (msdu_continuation &&
1909 			    !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) {
1910 				/* Update length from first buffer in SG */
1911 				soc->wbm_sg_param.wbm_sg_desc_msdu_len =
1912 					hal_rx_msdu_start_msdu_len_get(
1913 						qdf_nbuf_data(rx_desc->nbuf));
1914 				soc->wbm_sg_param.wbm_is_first_msdu_in_sg = true;
1915 			}
1916 
1917 			if (msdu_continuation) {
1918 				/* MSDU continued packets */
1919 				qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
1920 				QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) =
1921 					soc->wbm_sg_param.wbm_sg_desc_msdu_len;
1922 			} else {
1923 				/* This is the terminal packet in SG */
1924 				qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
1925 				qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
1926 				QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) =
1927 					soc->wbm_sg_param.wbm_sg_desc_msdu_len;
1928 				process_sg_buf = true;
1929 			}
1930 		}
1931 
1932 		nbuf = rx_desc->nbuf;
1933 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
1934 		dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
1935 						  rx_desc_pool->buf_size,
1936 						  false);
1937 		qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
1938 					     QDF_DMA_FROM_DEVICE,
1939 					     rx_desc_pool->buf_size);
1940 		rx_desc->unmapped = 1;
1941 
1942 		/*
1943 		 * save the wbm desc info in nbuf TLV. We will need this
1944 		 * info when we do the actual nbuf processing
1945 		 */
1946 		wbm_err_info.pool_id = rx_desc->pool_id;
1947 		hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf),
1948 								&wbm_err_info);
1949 
1950 		rx_bufs_reaped[rx_desc->pool_id]++;
1951 
1952 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) {
1953 			DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head,
1954 					  soc->wbm_sg_param.wbm_sg_nbuf_tail,
1955 					  nbuf);
1956 			if (process_sg_buf) {
1957 				if (!dp_rx_buffer_pool_refill(
1958 					soc,
1959 					soc->wbm_sg_param.wbm_sg_nbuf_head,
1960 					rx_desc->pool_id))
1961 					DP_RX_MERGE_TWO_LIST(
1962 						nbuf_head, nbuf_tail,
1963 						soc->wbm_sg_param.wbm_sg_nbuf_head,
1964 						soc->wbm_sg_param.wbm_sg_nbuf_tail);
1965 				dp_rx_wbm_sg_list_reset(soc);
1966 				process_sg_buf = false;
1967 			}
1968 		} else if (!dp_rx_buffer_pool_refill(soc, nbuf,
1969 						     rx_desc->pool_id)) {
1970 			DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
1971 		}
1972 
1973 		dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
1974 						&tail[rx_desc->pool_id],
1975 						rx_desc);
1976 
1977 		/*
1978 		 * if continuation bit is set then we have MSDU spread
1979 		 * across multiple buffers, let us not decrement quota
1980 		 * till we reap all buffers of that MSDU.
1981 		 */
1982 		if (qdf_likely(!msdu_continuation))
1983 			quota -= 1;
1984 	}
1985 done:
1986 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
1987 
1988 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
1989 		if (rx_bufs_reaped[mac_id]) {
1990 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
1991 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
1992 
1993 			dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
1994 					rx_desc_pool, rx_bufs_reaped[mac_id],
1995 					&head[mac_id], &tail[mac_id]);
1996 			rx_bufs_used += rx_bufs_reaped[mac_id];
1997 		}
1998 	}
1999 
2000 	nbuf = nbuf_head;
2001 	while (nbuf) {
2002 		struct dp_peer *peer;
2003 		uint16_t peer_id;
2004 		uint8_t err_code;
2005 		uint8_t *tlv_hdr;
2006 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
2007 
2008 		/*
2009 		 * retrieve the wbm desc info from nbuf TLV, so we can
2010 		 * handle error cases appropriately
2011 		 */
2012 		hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info);
2013 
2014 		peer_id = hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
2015 							   rx_tlv_hdr);
2016 		peer = dp_peer_find_by_id(soc, peer_id);
2017 
2018 		if (!peer)
2019 			dp_info_rl("peer is null peer_id%u err_src%u err_rsn%u",
2020 				   peer_id, wbm_err_info.wbm_err_src,
2021 				   wbm_err_info.reo_psh_rsn);
2022 
2023 		/* Set queue_mapping in nbuf to 0 */
2024 		dp_set_rx_queue(nbuf, 0);
2025 
2026 		next = nbuf->next;
2027 
2028 		/*
2029 		 * Form the SG for msdu continued buffers
2030 		 * QCN9000 has this support
2031 		 */
2032 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
2033 			nbuf = dp_rx_sg_create(nbuf);
2034 			next = nbuf->next;
2035 			/*
2036 			 * SG error handling is not done correctly,
2037 			 * drop SG frames for now.
2038 			 */
2039 			qdf_nbuf_free(nbuf);
2040 			dp_info_rl("scattered msdu dropped");
2041 			nbuf = next;
2042 			continue;
2043 		}
2044 
2045 		if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
2046 			if (wbm_err_info.reo_psh_rsn
2047 					== HAL_RX_WBM_REO_PSH_RSN_ERROR) {
2048 
2049 				DP_STATS_INC(soc,
2050 					rx.err.reo_error
2051 					[wbm_err_info.reo_err_code], 1);
2052 				/* increment @pdev level */
2053 				pool_id = wbm_err_info.pool_id;
2054 				dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
2055 				if (dp_pdev)
2056 					DP_STATS_INC(dp_pdev, err.reo_error,
2057 						     1);
2058 
2059 				switch (wbm_err_info.reo_err_code) {
2060 				/*
2061 				 * Handling for packets which have NULL REO
2062 				 * queue descriptor
2063 				 */
2064 				case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2065 					pool_id = wbm_err_info.pool_id;
2066 					dp_rx_null_q_desc_handle(soc, nbuf,
2067 								 rx_tlv_hdr,
2068 								 pool_id, peer);
2069 					nbuf = next;
2070 					if (peer)
2071 						dp_peer_unref_del_find_by_id(
2072 									peer);
2073 					continue;
2074 				/* TODO */
2075 				/* Add per error code accounting */
2076 				case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2077 					pool_id = wbm_err_info.pool_id;
2078 
2079 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2080 									   rx_tlv_hdr)) {
2081 						peer_id =
2082 						hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
2083 										 rx_tlv_hdr);
2084 						tid =
2085 						hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
2086 					}
2087 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2088 					hal_rx_msdu_start_msdu_len_get(
2089 								rx_tlv_hdr);
2090 					nbuf->next = NULL;
2091 					dp_2k_jump_handle(soc, nbuf,
2092 							  rx_tlv_hdr,
2093 							  peer_id, tid);
2094 					nbuf = next;
2095 					if (peer)
2096 						dp_peer_unref_del_find_by_id(
2097 									peer);
2098 					continue;
2099 				case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2100 				case HAL_REO_ERR_BAR_FRAME_OOR:
2101 					if (peer)
2102 						dp_rx_wbm_err_handle_bar(soc,
2103 									 peer,
2104 									 nbuf);
2105 					break;
2106 
2107 				default:
2108 					dp_info_rl("Got pkt with REO ERROR: %d",
2109 						   wbm_err_info.reo_err_code);
2110 					break;
2111 				}
2112 			}
2113 		} else if (wbm_err_info.wbm_err_src ==
2114 					HAL_RX_WBM_ERR_SRC_RXDMA) {
2115 			if (wbm_err_info.rxdma_psh_rsn
2116 					== HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
2117 				DP_STATS_INC(soc,
2118 					rx.err.rxdma_error
2119 					[wbm_err_info.rxdma_err_code], 1);
2120 				/* increment @pdev level */
2121 				pool_id = wbm_err_info.pool_id;
2122 				dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
2123 				if (dp_pdev)
2124 					DP_STATS_INC(dp_pdev,
2125 						     err.rxdma_error, 1);
2126 
2127 				switch (wbm_err_info.rxdma_err_code) {
2128 				case HAL_RXDMA_ERR_UNENCRYPTED:
2129 
2130 				case HAL_RXDMA_ERR_WIFI_PARSE:
2131 					pool_id = wbm_err_info.pool_id;
2132 					dp_rx_process_rxdma_err(soc, nbuf,
2133 								rx_tlv_hdr,
2134 								peer,
2135 								wbm_err_info.
2136 								rxdma_err_code,
2137 								pool_id);
2138 					nbuf = next;
2139 					if (peer)
2140 						dp_peer_unref_del_find_by_id(peer);
2141 					continue;
2142 
2143 				case HAL_RXDMA_ERR_TKIP_MIC:
2144 					dp_rx_process_mic_error(soc, nbuf,
2145 								rx_tlv_hdr,
2146 								peer);
2147 					nbuf = next;
2148 					if (peer) {
2149 						DP_STATS_INC(peer, rx.err.mic_err, 1);
2150 						dp_peer_unref_del_find_by_id(
2151 									peer);
2152 					}
2153 					continue;
2154 
2155 				case HAL_RXDMA_ERR_DECRYPT:
2156 
2157 					if (peer) {
2158 						DP_STATS_INC(peer, rx.err.
2159 							     decrypt_err, 1);
2160 						break;
2161 					}
2162 
2163 					if (!dp_handle_rxdma_decrypt_err())
2164 						break;
2165 
2166 					pool_id = wbm_err_info.pool_id;
2167 					err_code = wbm_err_info.rxdma_err_code;
2168 					tlv_hdr = rx_tlv_hdr;
2169 					dp_rx_process_rxdma_err(soc, nbuf,
2170 								tlv_hdr, NULL,
2171 								err_code,
2172 								pool_id);
2173 					nbuf = next;
2174 					continue;
2175 
2176 				default:
2177 					dp_err_rl("RXDMA error %d",
2178 						  wbm_err_info.rxdma_err_code);
2179 				}
2180 			}
2181 		} else {
2182 			/* Should not come here */
2183 			qdf_assert(0);
2184 		}
2185 
2186 		if (peer)
2187 			dp_peer_unref_del_find_by_id(peer);
2188 
2189 		hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
2190 				     QDF_TRACE_LEVEL_DEBUG);
2191 		qdf_nbuf_free(nbuf);
2192 		nbuf = next;
2193 	}
2194 	return rx_bufs_used; /* Assume no scale factor for now */
2195 }
2196 
2197 /**
2198  * dup_desc_dbg() - dump and assert if duplicate rx desc found
2199  *
2200  * @soc: core DP main context
2201  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
2202  * @rx_desc: void pointer to rx descriptor
2203  *
2204  * Return: void
2205  */
2206 static void dup_desc_dbg(struct dp_soc *soc,
2207 			 hal_rxdma_desc_t rxdma_dst_ring_desc,
2208 			 void *rx_desc)
2209 {
2210 	DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
2211 	dp_rx_dump_info_and_assert(
2212 			soc,
2213 			soc->rx_rel_ring.hal_srng,
2214 			hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
2215 			rx_desc);
2216 }
2217 
2218 /**
2219  * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
2220  *
2221  * @soc: core DP main context
2222  * @mac_id: mac id which is one of 3 mac_ids
2223  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
2224  * @head: head of descs list to be freed
2225  * @tail: tail of decs list to be freed
2226 
2227  * Return: number of msdu in MPDU to be popped
2228  */
2229 static inline uint32_t
2230 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
2231 	hal_rxdma_desc_t rxdma_dst_ring_desc,
2232 	union dp_rx_desc_list_elem_t **head,
2233 	union dp_rx_desc_list_elem_t **tail)
2234 {
2235 	void *rx_msdu_link_desc;
2236 	qdf_nbuf_t msdu;
2237 	qdf_nbuf_t last;
2238 	struct hal_rx_msdu_list msdu_list;
2239 	uint16_t num_msdus;
2240 	struct hal_buf_info buf_info;
2241 	uint32_t rx_bufs_used = 0;
2242 	uint32_t msdu_cnt;
2243 	uint32_t i;
2244 	uint8_t push_reason;
2245 	uint8_t rxdma_error_code = 0;
2246 	uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
2247 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2248 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
2249 	hal_rxdma_desc_t ring_desc;
2250 	struct rx_desc_pool *rx_desc_pool;
2251 
2252 	if (!pdev) {
2253 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2254 			  "pdev is null for mac_id = %d", mac_id);
2255 		return rx_bufs_used;
2256 	}
2257 
2258 	msdu = 0;
2259 
2260 	last = NULL;
2261 
2262 	hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
2263 					&msdu_cnt);
2264 
2265 	push_reason =
2266 		hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
2267 	if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
2268 		rxdma_error_code =
2269 			hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
2270 	}
2271 
2272 	do {
2273 		rx_msdu_link_desc =
2274 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
2275 
2276 		qdf_assert_always(rx_msdu_link_desc);
2277 
2278 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
2279 				     &msdu_list, &num_msdus);
2280 
2281 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
2282 			/* if the msdus belongs to NSS offloaded radio &&
2283 			 * the rbm is not SW1_BM then return the msdu_link
2284 			 * descriptor without freeing the msdus (nbufs). let
2285 			 * these buffers be given to NSS completion ring for
2286 			 * NSS to free them.
2287 			 * else iterate through the msdu link desc list and
2288 			 * free each msdu in the list.
2289 			 */
2290 			if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM &&
2291 				wlan_cfg_get_dp_pdev_nss_enabled(
2292 							  pdev->wlan_cfg_ctx))
2293 				bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
2294 			else {
2295 				for (i = 0; i < num_msdus; i++) {
2296 					struct dp_rx_desc *rx_desc =
2297 						dp_rx_cookie_2_va_rxdma_buf(soc,
2298 							msdu_list.sw_cookie[i]);
2299 					qdf_assert_always(rx_desc);
2300 					msdu = rx_desc->nbuf;
2301 					/*
2302 					 * this is a unlikely scenario
2303 					 * where the host is reaping
2304 					 * a descriptor which
2305 					 * it already reaped just a while ago
2306 					 * but is yet to replenish
2307 					 * it back to HW.
2308 					 * In this case host will dump
2309 					 * the last 128 descriptors
2310 					 * including the software descriptor
2311 					 * rx_desc and assert.
2312 					 */
2313 					ring_desc = rxdma_dst_ring_desc;
2314 					if (qdf_unlikely(!rx_desc->in_use)) {
2315 						dup_desc_dbg(soc,
2316 							     ring_desc,
2317 							     rx_desc);
2318 						continue;
2319 					}
2320 
2321 					rx_desc_pool = &soc->
2322 						rx_desc_buf[rx_desc->pool_id];
2323 					dp_ipa_handle_rx_buf_smmu_mapping(
2324 							soc, msdu,
2325 							rx_desc_pool->buf_size,
2326 							false);
2327 					qdf_nbuf_unmap_nbytes_single(
2328 						soc->osdev, msdu,
2329 						QDF_DMA_FROM_DEVICE,
2330 						rx_desc_pool->buf_size);
2331 					rx_desc->unmapped = 1;
2332 
2333 					QDF_TRACE(QDF_MODULE_ID_DP,
2334 						QDF_TRACE_LEVEL_DEBUG,
2335 						"[%s][%d] msdu_nbuf=%pK ",
2336 						__func__, __LINE__, msdu);
2337 
2338 					dp_rx_buffer_pool_nbuf_free(soc, msdu,
2339 							rx_desc->pool_id);
2340 					rx_bufs_used++;
2341 					dp_rx_add_to_free_desc_list(head,
2342 						tail, rx_desc);
2343 				}
2344 			}
2345 		} else {
2346 			rxdma_error_code = HAL_RXDMA_ERR_WAR;
2347 		}
2348 
2349 		/*
2350 		 * Store the current link buffer into to the local structure
2351 		 * to be used for release purpose.
2352 		 */
2353 		hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr,
2354 					     buf_info.sw_cookie, buf_info.rbm);
2355 
2356 		hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info);
2357 		dp_rx_link_desc_return_by_addr(soc,
2358 					       (hal_buff_addrinfo_t)
2359 						rx_link_buf_info,
2360 						bm_action);
2361 	} while (buf_info.paddr);
2362 
2363 	DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
2364 	if (pdev)
2365 		DP_STATS_INC(pdev, err.rxdma_error, 1);
2366 
2367 	if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
2368 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2369 			"Packet received with Decrypt error");
2370 	}
2371 
2372 	return rx_bufs_used;
2373 }
2374 
2375 uint32_t
2376 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2377 		     uint32_t mac_id, uint32_t quota)
2378 {
2379 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2380 	hal_rxdma_desc_t rxdma_dst_ring_desc;
2381 	hal_soc_handle_t hal_soc;
2382 	void *err_dst_srng;
2383 	union dp_rx_desc_list_elem_t *head = NULL;
2384 	union dp_rx_desc_list_elem_t *tail = NULL;
2385 	struct dp_srng *dp_rxdma_srng;
2386 	struct rx_desc_pool *rx_desc_pool;
2387 	uint32_t work_done = 0;
2388 	uint32_t rx_bufs_used = 0;
2389 
2390 	if (!pdev)
2391 		return 0;
2392 
2393 	err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng;
2394 
2395 	if (!err_dst_srng) {
2396 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2397 			"%s %d : HAL Monitor Destination Ring Init \
2398 			Failed -- %pK",
2399 			__func__, __LINE__, err_dst_srng);
2400 		return 0;
2401 	}
2402 
2403 	hal_soc = soc->hal_soc;
2404 
2405 	qdf_assert(hal_soc);
2406 
2407 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
2408 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2409 			"%s %d : HAL Monitor Destination Ring Init \
2410 			Failed -- %pK",
2411 			__func__, __LINE__, err_dst_srng);
2412 		return 0;
2413 	}
2414 
2415 	while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
2416 		hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
2417 
2418 			rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
2419 						rxdma_dst_ring_desc,
2420 						&head, &tail);
2421 	}
2422 
2423 	dp_srng_access_end(int_ctx, soc, err_dst_srng);
2424 
2425 	if (rx_bufs_used) {
2426 		if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2427 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2428 		else
2429 			dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id];
2430 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
2431 
2432 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2433 			rx_desc_pool, rx_bufs_used, &head, &tail);
2434 
2435 		work_done += rx_bufs_used;
2436 	}
2437 
2438 	return work_done;
2439 }
2440 
2441 static inline uint32_t
2442 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
2443 			hal_rxdma_desc_t rxdma_dst_ring_desc,
2444 			union dp_rx_desc_list_elem_t **head,
2445 			union dp_rx_desc_list_elem_t **tail)
2446 {
2447 	void *rx_msdu_link_desc;
2448 	qdf_nbuf_t msdu;
2449 	qdf_nbuf_t last;
2450 	struct hal_rx_msdu_list msdu_list;
2451 	uint16_t num_msdus;
2452 	struct hal_buf_info buf_info;
2453 	uint32_t rx_bufs_used = 0, msdu_cnt, i;
2454 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
2455 
2456 	msdu = 0;
2457 
2458 	last = NULL;
2459 
2460 	hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
2461 				     &msdu_cnt);
2462 
2463 	do {
2464 		rx_msdu_link_desc =
2465 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
2466 
2467 		if (!rx_msdu_link_desc) {
2468 			DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
2469 			break;
2470 		}
2471 
2472 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
2473 				     &msdu_list, &num_msdus);
2474 
2475 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
2476 			for (i = 0; i < num_msdus; i++) {
2477 				struct dp_rx_desc *rx_desc =
2478 					dp_rx_cookie_2_va_rxdma_buf(
2479 							soc,
2480 							msdu_list.sw_cookie[i]);
2481 				qdf_assert_always(rx_desc);
2482 				msdu = rx_desc->nbuf;
2483 
2484 				qdf_nbuf_unmap_single(soc->osdev, msdu,
2485 						      QDF_DMA_FROM_DEVICE);
2486 
2487 				dp_rx_buffer_pool_nbuf_free(soc, msdu,
2488 							    rx_desc->pool_id);
2489 				rx_bufs_used++;
2490 				dp_rx_add_to_free_desc_list(head,
2491 							    tail, rx_desc);
2492 			}
2493 		}
2494 
2495 		/*
2496 		 * Store the current link buffer into to the local structure
2497 		 * to be used for release purpose.
2498 		 */
2499 		hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr,
2500 					     buf_info.sw_cookie, buf_info.rbm);
2501 
2502 		hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info);
2503 		dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
2504 					rx_link_buf_info,
2505 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
2506 	} while (buf_info.paddr);
2507 
2508 	return rx_bufs_used;
2509 }
2510 
2511 /*
2512  *
2513  * dp_handle_wbm_internal_error() - handles wbm_internal_error case
2514  *
2515  * @soc: core DP main context
2516  * @hal_desc: hal descriptor
2517  * @buf_type: indicates if the buffer is of type link disc or msdu
2518  * Return: None
2519  *
2520  * wbm_internal_error is seen in following scenarios :
2521  *
2522  * 1.  Null pointers detected in WBM_RELEASE_RING descriptors
2523  * 2.  Null pointers detected during delinking process
2524  *
2525  * Some null pointer cases:
2526  *
2527  * a. MSDU buffer pointer is NULL
2528  * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag
2529  * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL
2530  */
2531 void
2532 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
2533 			     uint32_t buf_type)
2534 {
2535 	struct hal_buf_info buf_info = {0};
2536 	struct dp_rx_desc *rx_desc = NULL;
2537 	struct rx_desc_pool *rx_desc_pool;
2538 	uint32_t rx_buf_cookie;
2539 	uint32_t rx_bufs_reaped = 0;
2540 	union dp_rx_desc_list_elem_t *head = NULL;
2541 	union dp_rx_desc_list_elem_t *tail = NULL;
2542 	uint8_t pool_id;
2543 
2544 	hal_rx_reo_buf_paddr_get(hal_desc, &buf_info);
2545 
2546 	if (!buf_info.paddr) {
2547 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
2548 		return;
2549 	}
2550 
2551 	rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(hal_desc);
2552 	pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(rx_buf_cookie);
2553 
2554 	if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
2555 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
2556 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
2557 
2558 		if (rx_desc && rx_desc->nbuf) {
2559 			rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2560 			dp_ipa_handle_rx_buf_smmu_mapping(
2561 						soc, rx_desc->nbuf,
2562 						rx_desc_pool->buf_size,
2563 						false);
2564 			qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
2565 						     QDF_DMA_FROM_DEVICE,
2566 						     rx_desc_pool->buf_size);
2567 			rx_desc->unmapped = 1;
2568 
2569 			dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
2570 						    rx_desc->pool_id);
2571 			dp_rx_add_to_free_desc_list(&head,
2572 						    &tail,
2573 						    rx_desc);
2574 
2575 			rx_bufs_reaped++;
2576 		}
2577 	} else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
2578 		rx_bufs_reaped = dp_wbm_int_err_mpdu_pop(soc, pool_id,
2579 							 hal_desc,
2580 							 &head, &tail);
2581 	}
2582 
2583 	if (rx_bufs_reaped) {
2584 		struct rx_desc_pool *rx_desc_pool;
2585 		struct dp_srng *dp_rxdma_srng;
2586 
2587 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
2588 		dp_rxdma_srng = &soc->rx_refill_buf_ring[pool_id];
2589 		rx_desc_pool = &soc->rx_desc_buf[pool_id];
2590 
2591 		dp_rx_buffers_replenish(soc, pool_id, dp_rxdma_srng,
2592 					rx_desc_pool,
2593 					rx_bufs_reaped,
2594 					&head, &tail);
2595 	}
2596 }
2597