xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_err.c (revision d57e7836dc389f88871517cfeedfdd0f572e4b31)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_rx.h"
22 #include "dp_peer.h"
23 #include "dp_internal.h"
24 #include "hal_api.h"
25 #include "qdf_trace.h"
26 #include "qdf_nbuf.h"
27 #include "dp_rx_defrag.h"
28 #include "dp_ipa.h"
29 #ifdef FEATURE_WDS
30 #include "dp_txrx_wds.h"
31 #endif
32 #include <enet.h>	/* LLC_SNAP_HDR_LEN */
33 #include "qdf_net_types.h"
34 #include "dp_rx_buffer_pool.h"
35 
36 /* Max buffer in invalid peer SG list*/
37 #define DP_MAX_INVALID_BUFFERS 10
38 
39 /**
40  * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
41  *			      back on same vap or a different vap.
42  *
43  * @soc: core DP main context
44  * @peer: dp peer handler
45  * @rx_tlv_hdr: start of the rx TLV header
46  * @nbuf: pkt buffer
47  *
48  * Return: bool (true if it is a looped back pkt else false)
49  *
50  */
51 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
52 					struct dp_peer *peer,
53 					uint8_t *rx_tlv_hdr,
54 					qdf_nbuf_t nbuf)
55 {
56 	struct dp_vdev *vdev = peer->vdev;
57 	struct dp_ast_entry *ase = NULL;
58 	uint16_t sa_idx = 0;
59 	uint8_t *data;
60 
61 	/*
62 	 * Multicast Echo Check is required only if vdev is STA and
63 	 * received pkt is a multicast/broadcast pkt. otherwise
64 	 * skip the MEC check.
65 	 */
66 	if (vdev->opmode != wlan_op_mode_sta)
67 		return false;
68 
69 	if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
70 		return false;
71 
72 	data = qdf_nbuf_data(nbuf);
73 	/*
74 	 * if the received pkts src mac addr matches with vdev
75 	 * mac address then drop the pkt as it is looped back
76 	 */
77 	if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
78 			vdev->mac_addr.raw,
79 			QDF_MAC_ADDR_SIZE)))
80 		return true;
81 
82 	/*
83 	 * In case of qwrap isolation mode, donot drop loopback packets.
84 	 * In isolation mode, all packets from the wired stations need to go
85 	 * to rootap and loop back to reach the wireless stations and
86 	 * vice-versa.
87 	 */
88 	if (qdf_unlikely(vdev->isolation_vdev))
89 		return false;
90 
91 	/* if the received pkts src mac addr matches with the
92 	 * wired PCs MAC addr which is behind the STA or with
93 	 * wireless STAs MAC addr which are behind the Repeater,
94 	 * then drop the pkt as it is looped back
95 	 */
96 	qdf_spin_lock_bh(&soc->ast_lock);
97 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
98 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
99 
100 		if ((sa_idx < 0) ||
101 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
102 			qdf_spin_unlock_bh(&soc->ast_lock);
103 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
104 					"invalid sa_idx: %d", sa_idx);
105 			qdf_assert_always(0);
106 		}
107 
108 		ase = soc->ast_table[sa_idx];
109 		if (!ase) {
110 			/* We do not get a peer map event for STA and without
111 			 * this event we don't know what is STA's sa_idx.
112 			 * For this reason the AST is still not associated to
113 			 * any index postion in ast_table.
114 			 * In these kind of scenarios where sa is valid but
115 			 * ast is not in ast_table, we use the below API to get
116 			 * AST entry for STA's own mac_address.
117 			 */
118 			ase = dp_peer_ast_hash_find_by_vdevid
119 				(soc, &data[QDF_MAC_ADDR_SIZE],
120 				 peer->vdev->vdev_id);
121 			if (ase) {
122 				ase->ast_idx = sa_idx;
123 				soc->ast_table[sa_idx] = ase;
124 				ase->is_mapped = TRUE;
125 			}
126 		}
127 	} else {
128 		ase = dp_peer_ast_hash_find_by_pdevid(soc,
129 						      &data[QDF_MAC_ADDR_SIZE],
130 						      vdev->pdev->pdev_id);
131 	}
132 
133 	if (ase) {
134 
135 		if (ase->pdev_id != vdev->pdev->pdev_id) {
136 			qdf_spin_unlock_bh(&soc->ast_lock);
137 			QDF_TRACE(QDF_MODULE_ID_DP,
138 				QDF_TRACE_LEVEL_INFO,
139 				"Detected DBDC Root AP "QDF_MAC_ADDR_FMT", %d %d",
140 				QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]),
141 				vdev->pdev->pdev_id,
142 				ase->pdev_id);
143 			return false;
144 		}
145 
146 		if ((ase->type == CDP_TXRX_AST_TYPE_MEC) ||
147 				(ase->peer_id != peer->peer_id)) {
148 			qdf_spin_unlock_bh(&soc->ast_lock);
149 			QDF_TRACE(QDF_MODULE_ID_DP,
150 				QDF_TRACE_LEVEL_INFO,
151 				"received pkt with same src mac "QDF_MAC_ADDR_FMT,
152 				QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]));
153 
154 			return true;
155 		}
156 	}
157 	qdf_spin_unlock_bh(&soc->ast_lock);
158 	return false;
159 }
160 
161 void dp_rx_link_desc_refill_duplicate_check(
162 				struct dp_soc *soc,
163 				struct hal_buf_info *buf_info,
164 				hal_buff_addrinfo_t ring_buf_info)
165 {
166 	struct hal_buf_info current_link_desc_buf_info = { 0 };
167 
168 	/* do duplicate link desc address check */
169 	hal_rx_buffer_addr_info_get_paddr(ring_buf_info,
170 					  &current_link_desc_buf_info);
171 	if (qdf_unlikely(current_link_desc_buf_info.paddr ==
172 			 buf_info->paddr)) {
173 		dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x",
174 			   current_link_desc_buf_info.paddr,
175 			   current_link_desc_buf_info.sw_cookie);
176 		DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1);
177 	}
178 	*buf_info = current_link_desc_buf_info;
179 }
180 
181 /**
182  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
183  *					(WBM) by address
184  *
185  * @soc: core DP main context
186  * @link_desc_addr: link descriptor addr
187  *
188  * Return: QDF_STATUS
189  */
190 QDF_STATUS
191 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
192 			       hal_buff_addrinfo_t link_desc_addr,
193 			       uint8_t bm_action)
194 {
195 	struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
196 	hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
197 	hal_soc_handle_t hal_soc = soc->hal_soc;
198 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
199 	void *src_srng_desc;
200 
201 	if (!wbm_rel_srng) {
202 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
203 			"WBM RELEASE RING not initialized");
204 		return status;
205 	}
206 
207 	/* do duplicate link desc address check */
208 	dp_rx_link_desc_refill_duplicate_check(
209 				soc,
210 				&soc->last_op_info.wbm_rel_link_desc,
211 				link_desc_addr);
212 
213 	if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
214 
215 		/* TODO */
216 		/*
217 		 * Need API to convert from hal_ring pointer to
218 		 * Ring Type / Ring Id combo
219 		 */
220 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
221 			FL("HAL RING Access For WBM Release SRNG Failed - %pK"),
222 			wbm_rel_srng);
223 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
224 		goto done;
225 	}
226 	src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
227 	if (qdf_likely(src_srng_desc)) {
228 		/* Return link descriptor through WBM ring (SW2WBM)*/
229 		hal_rx_msdu_link_desc_set(hal_soc,
230 				src_srng_desc, link_desc_addr, bm_action);
231 		status = QDF_STATUS_SUCCESS;
232 	} else {
233 		struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
234 
235 		DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1);
236 
237 		dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)",
238 			   srng->ring_id,
239 			   soc->stats.rx.err.hal_ring_access_full_fail);
240 		dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
241 			   *srng->u.src_ring.hp_addr,
242 			   srng->u.src_ring.reap_hp,
243 			   *srng->u.src_ring.tp_addr,
244 			   srng->u.src_ring.cached_tp);
245 		QDF_BUG(0);
246 	}
247 done:
248 	hal_srng_access_end(hal_soc, wbm_rel_srng);
249 	return status;
250 
251 }
252 
253 /**
254  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
255  *				(WBM), following error handling
256  *
257  * @soc: core DP main context
258  * @ring_desc: opaque pointer to the REO error ring descriptor
259  *
260  * Return: QDF_STATUS
261  */
262 QDF_STATUS
263 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
264 		       uint8_t bm_action)
265 {
266 	void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
267 
268 	return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
269 }
270 
271 /**
272  * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
273  *
274  * @soc: core txrx main context
275  * @ring_desc: opaque pointer to the REO error ring descriptor
276  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
277  * @head: head of the local descriptor free-list
278  * @tail: tail of the local descriptor free-list
279  * @quota: No. of units (packets) that can be serviced in one shot.
280  *
281  * This function is used to drop all MSDU in an MPDU
282  *
283  * Return: uint32_t: No. of elements processed
284  */
285 static uint32_t
286 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
287 		 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
288 		 uint8_t *mac_id,
289 		 uint32_t quota)
290 {
291 	uint32_t rx_bufs_used = 0;
292 	void *link_desc_va;
293 	struct hal_buf_info buf_info;
294 	struct dp_pdev *pdev;
295 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
296 	int i;
297 	uint8_t *rx_tlv_hdr;
298 	uint32_t tid;
299 	struct rx_desc_pool *rx_desc_pool;
300 	struct dp_rx_desc *rx_desc;
301 	/* First field in REO Dst ring Desc is buffer_addr_info */
302 	void *buf_addr_info = ring_desc;
303 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
304 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
305 
306 	hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
307 
308 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
309 
310 more_msdu_link_desc:
311 	/* No UNMAP required -- this is "malloc_consistent" memory */
312 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
313 			     &mpdu_desc_info->msdu_count);
314 
315 	for (i = 0; (i < mpdu_desc_info->msdu_count); i++) {
316 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc,
317 						      msdu_list.sw_cookie[i]);
318 
319 		qdf_assert_always(rx_desc);
320 
321 		/* all buffers from a MSDU link link belong to same pdev */
322 		*mac_id = rx_desc->pool_id;
323 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
324 		if (!pdev) {
325 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
326 				  "pdev is null for pool_id = %d",
327 				  rx_desc->pool_id);
328 			return rx_bufs_used;
329 		}
330 
331 		if (!dp_rx_desc_check_magic(rx_desc)) {
332 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
333 					FL("Invalid rx_desc cookie=%d"),
334 					msdu_list.sw_cookie[i]);
335 			return rx_bufs_used;
336 		}
337 
338 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
339 		dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
340 						  rx_desc_pool->buf_size,
341 						  false);
342 		qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
343 					     QDF_DMA_FROM_DEVICE,
344 					     rx_desc_pool->buf_size);
345 		rx_desc->unmapped = 1;
346 
347 		rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
348 
349 		rx_bufs_used++;
350 		tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
351 						rx_desc->rx_buf_start);
352 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
353 			"Packet received with PN error for tid :%d", tid);
354 
355 		rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
356 		if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
357 			hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
358 
359 		/* Just free the buffers */
360 		dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id);
361 
362 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
363 					    &pdev->free_list_tail, rx_desc);
364 	}
365 
366 	/*
367 	 * If the msdu's are spread across multiple link-descriptors,
368 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
369 	 * spread across multiple buffers).Hence, it is
370 	 * necessary to check the next link_descriptor and release
371 	 * all the msdu's that are part of it.
372 	 */
373 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
374 			link_desc_va,
375 			&next_link_desc_addr_info);
376 
377 	if (hal_rx_is_buf_addr_info_valid(
378 				&next_link_desc_addr_info)) {
379 		/* Clear the next link desc info for the current link_desc */
380 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
381 
382 		dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
383 					       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
384 		hal_rx_buffer_addr_info_get_paddr(
385 				&next_link_desc_addr_info,
386 				&buf_info);
387 		cur_link_desc_addr_info = next_link_desc_addr_info;
388 		buf_addr_info = &cur_link_desc_addr_info;
389 
390 		link_desc_va =
391 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
392 
393 		goto more_msdu_link_desc;
394 	}
395 	quota--;
396 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
397 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
398 	return rx_bufs_used;
399 }
400 
401 /**
402  * dp_rx_pn_error_handle() - Handles PN check errors
403  *
404  * @soc: core txrx main context
405  * @ring_desc: opaque pointer to the REO error ring descriptor
406  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
407  * @head: head of the local descriptor free-list
408  * @tail: tail of the local descriptor free-list
409  * @quota: No. of units (packets) that can be serviced in one shot.
410  *
411  * This function implements PN error handling
412  * If the peer is configured to ignore the PN check errors
413  * or if DP feels, that this frame is still OK, the frame can be
414  * re-injected back to REO to use some of the other features
415  * of REO e.g. duplicate detection/routing to other cores
416  *
417  * Return: uint32_t: No. of elements processed
418  */
419 static uint32_t
420 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
421 		      struct hal_rx_mpdu_desc_info *mpdu_desc_info,
422 		      uint8_t *mac_id,
423 		      uint32_t quota)
424 {
425 	uint16_t peer_id;
426 	uint32_t rx_bufs_used = 0;
427 	struct dp_peer *peer;
428 	bool peer_pn_policy = false;
429 
430 	peer_id = DP_PEER_METADATA_PEER_ID_GET(
431 				mpdu_desc_info->peer_meta_data);
432 
433 
434 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
435 
436 	if (qdf_likely(peer)) {
437 		/*
438 		 * TODO: Check for peer specific policies & set peer_pn_policy
439 		 */
440 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
441 			"discard rx due to PN error for peer  %pK  "QDF_MAC_ADDR_FMT,
442 			peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
443 
444 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
445 	}
446 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
447 		"Packet received with PN error");
448 
449 	/* No peer PN policy -- definitely drop */
450 	if (!peer_pn_policy)
451 		rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
452 						mpdu_desc_info,
453 						mac_id, quota);
454 
455 	return rx_bufs_used;
456 }
457 
458 /**
459  * dp_rx_oor_handle() - Handles the msdu which is OOR error
460  *
461  * @soc: core txrx main context
462  * @nbuf: pointer to msdu skb
463  * @peer_id: dp peer ID
464  * @rx_tlv_hdr: start of rx tlv header
465  *
466  * This function process the msdu delivered from REO2TCL
467  * ring with error type OOR
468  *
469  * Return: None
470  */
471 static void
472 dp_rx_oor_handle(struct dp_soc *soc,
473 		 qdf_nbuf_t nbuf,
474 		 uint16_t peer_id,
475 		 uint8_t *rx_tlv_hdr)
476 {
477 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
478 				FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
479 	struct dp_peer *peer = NULL;
480 
481 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
482 	if (!peer) {
483 		dp_info_rl("peer not found");
484 		goto free_nbuf;
485 	}
486 
487 	if (dp_rx_deliver_special_frame(soc, peer, nbuf, frame_mask,
488 					rx_tlv_hdr)) {
489 		DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1);
490 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
491 		return;
492 	}
493 
494 free_nbuf:
495 	if (peer)
496 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
497 
498 	DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1);
499 	qdf_nbuf_free(nbuf);
500 }
501 
502 /**
503  * dp_rx_reo_err_entry_process() - Handles for REO error entry processing
504  *
505  * @soc: core txrx main context
506  * @ring_desc: opaque pointer to the REO error ring descriptor
507  * @mpdu_desc_info: pointer to mpdu level description info
508  * @link_desc_va: pointer to msdu_link_desc virtual address
509  * @err_code: reo erro code fetched from ring entry
510  *
511  * Function to handle msdus fetched from msdu link desc, currently
512  * only support 2K jump, OOR error.
513  *
514  * Return: msdu count processed.
515  */
516 static uint32_t
517 dp_rx_reo_err_entry_process(struct dp_soc *soc,
518 			    void *ring_desc,
519 			    struct hal_rx_mpdu_desc_info *mpdu_desc_info,
520 			    void *link_desc_va,
521 			    enum hal_reo_error_code err_code)
522 {
523 	uint32_t rx_bufs_used = 0;
524 	struct dp_pdev *pdev;
525 	int i;
526 	uint8_t *rx_tlv_hdr_first;
527 	uint8_t *rx_tlv_hdr_last;
528 	uint32_t tid = DP_MAX_TIDS;
529 	uint16_t peer_id;
530 	struct dp_rx_desc *rx_desc;
531 	struct rx_desc_pool *rx_desc_pool;
532 	qdf_nbuf_t nbuf;
533 	struct hal_buf_info buf_info;
534 	struct hal_rx_msdu_list msdu_list;
535 	uint16_t num_msdus;
536 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
537 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
538 	/* First field in REO Dst ring Desc is buffer_addr_info */
539 	void *buf_addr_info = ring_desc;
540 	qdf_nbuf_t head_nbuf = NULL;
541 	qdf_nbuf_t tail_nbuf = NULL;
542 	uint16_t msdu_processed = 0;
543 
544 	peer_id = DP_PEER_METADATA_PEER_ID_GET(
545 					mpdu_desc_info->peer_meta_data);
546 
547 more_msdu_link_desc:
548 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
549 			     &num_msdus);
550 	for (i = 0; i < num_msdus; i++) {
551 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(
552 					soc,
553 					msdu_list.sw_cookie[i]);
554 
555 		qdf_assert_always(rx_desc);
556 
557 		/* all buffers from a MSDU link belong to same pdev */
558 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
559 
560 		nbuf = rx_desc->nbuf;
561 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
562 		dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
563 						  rx_desc_pool->buf_size,
564 						  false);
565 		qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
566 					     QDF_DMA_FROM_DEVICE,
567 					     rx_desc_pool->buf_size);
568 		rx_desc->unmapped = 1;
569 
570 		QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len;
571 		rx_bufs_used++;
572 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
573 					    &pdev->free_list_tail, rx_desc);
574 
575 		DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf);
576 
577 		if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags &
578 				 HAL_MSDU_F_MSDU_CONTINUATION))
579 			continue;
580 
581 		if (dp_rx_buffer_pool_refill(soc, head_nbuf,
582 					     rx_desc->pool_id)) {
583 			/* MSDU queued back to the pool */
584 			goto process_next_msdu;
585 		}
586 
587 		rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf);
588 		rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf);
589 
590 		if (qdf_unlikely(head_nbuf != tail_nbuf)) {
591 			nbuf = dp_rx_sg_create(head_nbuf);
592 			qdf_nbuf_set_is_frag(nbuf, 1);
593 			DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1);
594 		}
595 
596 		switch (err_code) {
597 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
598 			/*
599 			 * only first msdu, mpdu start description tlv valid?
600 			 * and use it for following msdu.
601 			 */
602 			if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
603 							   rx_tlv_hdr_last))
604 				tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
605 							      rx_tlv_hdr_first);
606 
607 			dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last,
608 					  peer_id, tid);
609 			break;
610 
611 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
612 			dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last);
613 			break;
614 		default:
615 			dp_err_rl("Non-support error code %d", err_code);
616 			qdf_nbuf_free(nbuf);
617 		}
618 
619 process_next_msdu:
620 		msdu_processed++;
621 		head_nbuf = NULL;
622 		tail_nbuf = NULL;
623 	}
624 
625 	/*
626 	 * If the msdu's are spread across multiple link-descriptors,
627 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
628 	 * spread across multiple buffers).Hence, it is
629 	 * necessary to check the next link_descriptor and release
630 	 * all the msdu's that are part of it.
631 	 */
632 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
633 			link_desc_va,
634 			&next_link_desc_addr_info);
635 
636 	if (hal_rx_is_buf_addr_info_valid(
637 				&next_link_desc_addr_info)) {
638 		/* Clear the next link desc info for the current link_desc */
639 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
640 		dp_rx_link_desc_return_by_addr(
641 				soc,
642 				buf_addr_info,
643 				HAL_BM_ACTION_PUT_IN_IDLE_LIST);
644 
645 		hal_rx_buffer_addr_info_get_paddr(
646 				&next_link_desc_addr_info,
647 				&buf_info);
648 		link_desc_va =
649 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
650 		cur_link_desc_addr_info = next_link_desc_addr_info;
651 		buf_addr_info = &cur_link_desc_addr_info;
652 
653 		goto more_msdu_link_desc;
654 	}
655 
656 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
657 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
658 	if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count))
659 		DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1);
660 
661 	return rx_bufs_used;
662 }
663 
664 #ifdef DP_INVALID_PEER_ASSERT
665 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \
666 		do {                                \
667 			qdf_assert_always(!(head)); \
668 			qdf_assert_always(!(tail)); \
669 		} while (0)
670 #else
671 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */
672 #endif
673 
674 /**
675  * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu
676  *                       to pdev invalid peer list
677  *
678  * @soc: core DP main context
679  * @nbuf: Buffer pointer
680  * @rx_tlv_hdr: start of rx tlv header
681  * @mac_id: mac id
682  *
683  *  Return: bool: true for last msdu of mpdu
684  */
685 static bool
686 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf,
687 		  uint8_t *rx_tlv_hdr, uint8_t mac_id)
688 {
689 	bool mpdu_done = false;
690 	qdf_nbuf_t curr_nbuf = NULL;
691 	qdf_nbuf_t tmp_nbuf = NULL;
692 
693 	/* TODO: Currently only single radio is supported, hence
694 	 * pdev hard coded to '0' index
695 	 */
696 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
697 
698 	if (!dp_pdev) {
699 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
700 			  "pdev is null for mac_id = %d", mac_id);
701 		return mpdu_done;
702 	}
703 	/* if invalid peer SG list has max values free the buffers in list
704 	 * and treat current buffer as start of list
705 	 *
706 	 * current logic to detect the last buffer from attn_tlv is not reliable
707 	 * in OFDMA UL scenario hence add max buffers check to avoid list pile
708 	 * up
709 	 */
710 	if (!dp_pdev->first_nbuf ||
711 	    (dp_pdev->invalid_peer_head_msdu &&
712 	    QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST
713 	    (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) {
714 		qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
715 		dp_pdev->ppdu_id = hal_rx_get_ppdu_id(soc->hal_soc,
716 						      rx_tlv_hdr);
717 		dp_pdev->first_nbuf = true;
718 
719 		/* If the new nbuf received is the first msdu of the
720 		 * amsdu and there are msdus in the invalid peer msdu
721 		 * list, then let us free all the msdus of the invalid
722 		 * peer msdu list.
723 		 * This scenario can happen when we start receiving
724 		 * new a-msdu even before the previous a-msdu is completely
725 		 * received.
726 		 */
727 		curr_nbuf = dp_pdev->invalid_peer_head_msdu;
728 		while (curr_nbuf) {
729 			tmp_nbuf = curr_nbuf->next;
730 			qdf_nbuf_free(curr_nbuf);
731 			curr_nbuf = tmp_nbuf;
732 		}
733 
734 		dp_pdev->invalid_peer_head_msdu = NULL;
735 		dp_pdev->invalid_peer_tail_msdu = NULL;
736 		hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr,
737 				&(dp_pdev->ppdu_info.rx_status));
738 
739 	}
740 
741 	if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) &&
742 	    hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
743 		qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
744 		qdf_assert_always(dp_pdev->first_nbuf == true);
745 		dp_pdev->first_nbuf = false;
746 		mpdu_done = true;
747 	}
748 
749 	/*
750 	 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu
751 	 * should be NULL here, add the checking for debugging purpose
752 	 * in case some corner case.
753 	 */
754 	DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu,
755 					dp_pdev->invalid_peer_tail_msdu);
756 	DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
757 				dp_pdev->invalid_peer_tail_msdu,
758 				nbuf);
759 
760 	return mpdu_done;
761 }
762 
763 static
764 void dp_rx_wbm_err_handle_bar(struct dp_soc *soc,
765 			      struct dp_peer *peer,
766 			      qdf_nbuf_t nbuf)
767 {
768 	uint8_t *rx_tlv_hdr;
769 	unsigned char type, subtype;
770 	uint16_t start_seq_num;
771 	uint32_t tid;
772 	struct ieee80211_frame_bar *bar;
773 
774 	/*
775 	 * 1. Is this a BAR frame. If not Discard it.
776 	 * 2. If it is, get the peer id, tid, ssn
777 	 * 2a Do a tid update
778 	 */
779 
780 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
781 	bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + SIZE_OF_DATA_RX_TLV);
782 
783 	type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
784 	subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
785 
786 	if (!(type == IEEE80211_FC0_TYPE_CTL &&
787 	      subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
788 		dp_err_rl("Not a BAR frame!");
789 		return;
790 	}
791 
792 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
793 	qdf_assert_always(tid < DP_MAX_TIDS);
794 
795 	start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
796 
797 	dp_info_rl("tid %u window_size %u start_seq_num %u",
798 		   tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
799 
800 	dp_rx_tid_update_wifi3(peer, tid,
801 			       peer->rx_tid[tid].ba_win_size,
802 			       start_seq_num);
803 }
804 
805 /**
806  * dp_2k_jump_handle() - Function to handle 2k jump exception
807  *                        on WBM ring
808  *
809  * @soc: core DP main context
810  * @nbuf: buffer pointer
811  * @rx_tlv_hdr: start of rx tlv header
812  * @peer_id: peer id of first msdu
813  * @tid: Tid for which exception occurred
814  *
815  * This function handles 2k jump violations arising out
816  * of receiving aggregates in non BA case. This typically
817  * may happen if aggregates are received on a QOS enabled TID
818  * while Rx window size is still initialized to value of 2. Or
819  * it may also happen if negotiated window size is 1 but peer
820  * sends aggregates.
821  *
822  */
823 
824 void
825 dp_2k_jump_handle(struct dp_soc *soc,
826 		  qdf_nbuf_t nbuf,
827 		  uint8_t *rx_tlv_hdr,
828 		  uint16_t peer_id,
829 		  uint8_t tid)
830 {
831 	struct dp_peer *peer = NULL;
832 	struct dp_rx_tid *rx_tid = NULL;
833 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP;
834 
835 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
836 	if (!peer) {
837 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
838 			  "peer not found");
839 		goto free_nbuf;
840 	}
841 
842 	if (tid >= DP_MAX_TIDS) {
843 		dp_info_rl("invalid tid");
844 		goto nbuf_deliver;
845 	}
846 
847 	rx_tid = &peer->rx_tid[tid];
848 	qdf_spin_lock_bh(&rx_tid->tid_lock);
849 
850 	/* only if BA session is active, allow send Delba */
851 	if (rx_tid->ba_status != DP_RX_BA_ACTIVE) {
852 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
853 		goto nbuf_deliver;
854 	}
855 
856 	if (!rx_tid->delba_tx_status) {
857 		rx_tid->delba_tx_retry++;
858 		rx_tid->delba_tx_status = 1;
859 		rx_tid->delba_rcode =
860 			IEEE80211_REASON_QOS_SETUP_REQUIRED;
861 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
862 		if (soc->cdp_soc.ol_ops->send_delba) {
863 			DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent, 1);
864 			soc->cdp_soc.ol_ops->send_delba(
865 					peer->vdev->pdev->soc->ctrl_psoc,
866 					peer->vdev->vdev_id,
867 					peer->mac_addr.raw,
868 					tid,
869 					rx_tid->delba_rcode);
870 		}
871 	} else {
872 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
873 	}
874 
875 nbuf_deliver:
876 	if (dp_rx_deliver_special_frame(soc, peer, nbuf, frame_mask,
877 					rx_tlv_hdr)) {
878 		DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1);
879 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
880 		return;
881 	}
882 
883 free_nbuf:
884 	if (peer)
885 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
886 	DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1);
887 	qdf_nbuf_free(nbuf);
888 }
889 
890 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
891     defined(QCA_WIFI_QCA6750)
892 /**
893  * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
894  * @soc: pointer to dp_soc struct
895  * @pool_id: Pool id to find dp_pdev
896  * @rx_tlv_hdr: TLV header of received packet
897  * @nbuf: SKB
898  *
899  * In certain types of packets if peer_id is not correct then
900  * driver may not be able find. Try finding peer by addr_2 of
901  * received MPDU. If you find the peer then most likely sw_peer_id &
902  * ast_idx is corrupted.
903  *
904  * Return: True if you find the peer by addr_2 of received MPDU else false
905  */
906 static bool
907 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
908 					      uint8_t pool_id,
909 					      uint8_t *rx_tlv_hdr,
910 					      qdf_nbuf_t nbuf)
911 {
912 	struct dp_peer *peer = NULL;
913 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
914 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
915 	struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
916 
917 	if (!pdev) {
918 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
919 			  "pdev is null for pool_id = %d", pool_id);
920 		return false;
921 	}
922 	/*
923 	 * WAR- In certain types of packets if peer_id is not correct then
924 	 * driver may not be able find. Try finding peer by addr_2 of
925 	 * received MPDU
926 	 */
927 	if (wh)
928 		peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0,
929 					      DP_VDEV_ALL, DP_MOD_ID_RX_ERR);
930 	if (peer) {
931 		dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
932 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
933 				     QDF_TRACE_LEVEL_DEBUG);
934 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
935 				 1, qdf_nbuf_len(nbuf));
936 		qdf_nbuf_free(nbuf);
937 
938 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
939 		return true;
940 	}
941 	return false;
942 }
943 
944 /**
945  * dp_rx_check_pkt_len() - Check for pktlen validity
946  * @soc: DP SOC context
947  * @pkt_len: computed length of the pkt from caller in bytes
948  *
949  * Return: true if pktlen > RX_BUFFER_SIZE, else return false
950  *
951  */
952 static inline
953 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
954 {
955 	if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) {
956 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
957 				 1, pkt_len);
958 		return true;
959 	} else {
960 		return false;
961 	}
962 }
963 
964 #else
965 static inline bool
966 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
967 					      uint8_t pool_id,
968 					      uint8_t *rx_tlv_hdr,
969 					      qdf_nbuf_t nbuf)
970 {
971 	return false;
972 }
973 
974 static inline
975 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
976 {
977 	return false;
978 }
979 
980 #endif
981 
982 /**
983  * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
984  *                              descriptor violation on either a
985  *                              REO or WBM ring
986  *
987  * @soc: core DP main context
988  * @nbuf: buffer pointer
989  * @rx_tlv_hdr: start of rx tlv header
990  * @pool_id: mac id
991  * @peer: peer handle
992  *
993  * This function handles NULL queue descriptor violations arising out
994  * a missing REO queue for a given peer or a given TID. This typically
995  * may happen if a packet is received on a QOS enabled TID before the
996  * ADDBA negotiation for that TID, when the TID queue is setup. Or
997  * it may also happen for MC/BC frames if they are not routed to the
998  * non-QOS TID queue, in the absence of any other default TID queue.
999  * This error can show up both in a REO destination or WBM release ring.
1000  *
1001  * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
1002  *         if nbuf could not be handled or dropped.
1003  */
1004 static QDF_STATUS
1005 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
1006 			 uint8_t *rx_tlv_hdr, uint8_t pool_id,
1007 			 struct dp_peer *peer)
1008 {
1009 	uint32_t pkt_len;
1010 	uint16_t msdu_len;
1011 	struct dp_vdev *vdev;
1012 	uint8_t tid;
1013 	qdf_ether_header_t *eh;
1014 	struct hal_rx_msdu_metadata msdu_metadata;
1015 	uint16_t sa_idx = 0;
1016 
1017 	qdf_nbuf_set_rx_chfrag_start(nbuf,
1018 				hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1019 							       rx_tlv_hdr));
1020 	qdf_nbuf_set_rx_chfrag_end(nbuf,
1021 				   hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
1022 								 rx_tlv_hdr));
1023 	qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1024 								  rx_tlv_hdr));
1025 	qdf_nbuf_set_da_valid(nbuf,
1026 			      hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
1027 							      rx_tlv_hdr));
1028 	qdf_nbuf_set_sa_valid(nbuf,
1029 			      hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
1030 							      rx_tlv_hdr));
1031 
1032 	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
1033 	msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
1034 	pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + RX_PKT_TLVS_LEN;
1035 
1036 	if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
1037 		if (dp_rx_check_pkt_len(soc, pkt_len))
1038 			goto drop_nbuf;
1039 
1040 		/* Set length in nbuf */
1041 		qdf_nbuf_set_pktlen(
1042 			nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
1043 		qdf_assert_always(nbuf->data == rx_tlv_hdr);
1044 	}
1045 
1046 	/*
1047 	 * Check if DMA completed -- msdu_done is the last bit
1048 	 * to be written
1049 	 */
1050 	if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
1051 
1052 		dp_err_rl("MSDU DONE failure");
1053 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1054 				     QDF_TRACE_LEVEL_INFO);
1055 		qdf_assert(0);
1056 	}
1057 
1058 	if (!peer &&
1059 	    dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
1060 							  rx_tlv_hdr, nbuf))
1061 		return QDF_STATUS_E_FAILURE;
1062 
1063 	if (!peer) {
1064 		bool mpdu_done = false;
1065 		struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
1066 
1067 		if (!pdev) {
1068 			dp_err_rl("pdev is null for pool_id = %d", pool_id);
1069 			return QDF_STATUS_E_FAILURE;
1070 		}
1071 
1072 		dp_err_rl("peer is NULL");
1073 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1074 				 qdf_nbuf_len(nbuf));
1075 
1076 		/* QCN9000 has the support enabled */
1077 		if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) {
1078 			mpdu_done = true;
1079 			nbuf->next = NULL;
1080 			/* Trigger invalid peer handler wrapper */
1081 			dp_rx_process_invalid_peer_wrapper(soc,
1082 					nbuf, mpdu_done, pool_id);
1083 		} else {
1084 			mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id);
1085 			/* Trigger invalid peer handler wrapper */
1086 			dp_rx_process_invalid_peer_wrapper(soc,
1087 					pdev->invalid_peer_head_msdu,
1088 					mpdu_done, pool_id);
1089 		}
1090 
1091 		if (mpdu_done) {
1092 			pdev->invalid_peer_head_msdu = NULL;
1093 			pdev->invalid_peer_tail_msdu = NULL;
1094 		}
1095 
1096 		return QDF_STATUS_E_FAILURE;
1097 	}
1098 
1099 	vdev = peer->vdev;
1100 	if (!vdev) {
1101 		dp_err_rl("Null vdev!");
1102 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1103 		goto drop_nbuf;
1104 	}
1105 
1106 	/*
1107 	 * Advance the packet start pointer by total size of
1108 	 * pre-header TLV's
1109 	 */
1110 	if (qdf_nbuf_is_frag(nbuf))
1111 		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1112 	else
1113 		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
1114 				   RX_PKT_TLVS_LEN));
1115 
1116 	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
1117 
1118 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
1119 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
1120 
1121 		if ((sa_idx < 0) ||
1122 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
1123 			DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
1124 			goto drop_nbuf;
1125 		}
1126 	}
1127 
1128 	if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) {
1129 		/* this is a looped back MCBC pkt, drop it */
1130 		DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
1131 		goto drop_nbuf;
1132 	}
1133 
1134 	/*
1135 	 * In qwrap mode if the received packet matches with any of the vdev
1136 	 * mac addresses, drop it. Donot receive multicast packets originated
1137 	 * from any proxysta.
1138 	 */
1139 	if (check_qwrap_multicast_loopback(vdev, nbuf)) {
1140 		DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
1141 		goto drop_nbuf;
1142 	}
1143 
1144 
1145 	if (qdf_unlikely((peer->nawds_enabled == true) &&
1146 			hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1147 						       rx_tlv_hdr))) {
1148 		dp_err_rl("free buffer for multicast packet");
1149 		DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
1150 		goto drop_nbuf;
1151 	}
1152 
1153 	if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
1154 		dp_err_rl("mcast Policy Check Drop pkt");
1155 		goto drop_nbuf;
1156 	}
1157 	/* WDS Source Port Learning */
1158 	if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
1159 		vdev->wds_enabled))
1160 		dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf,
1161 					msdu_metadata);
1162 
1163 	if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
1164 		tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
1165 		if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned)
1166 			dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
1167 			/* IEEE80211_SEQ_MAX indicates invalid start_seq */
1168 	}
1169 
1170 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1171 		qdf_nbuf_set_next(nbuf, NULL);
1172 		dp_rx_deliver_raw(vdev, nbuf, peer);
1173 	} else {
1174 		qdf_nbuf_set_next(nbuf, NULL);
1175 		DP_STATS_INC_PKT(peer, rx.to_stack, 1,
1176 				 qdf_nbuf_len(nbuf));
1177 
1178 		/*
1179 		 * Update the protocol tag in SKB based on
1180 		 * CCE metadata
1181 		 */
1182 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1183 					  EXCEPTION_DEST_RING_ID,
1184 					  true, true);
1185 
1186 		/* Update the flow tag in SKB based on FSE metadata */
1187 		dp_rx_update_flow_tag(soc, vdev, nbuf,
1188 				      rx_tlv_hdr, true);
1189 
1190 		if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
1191 				 soc->hal_soc, rx_tlv_hdr) &&
1192 				 (vdev->rx_decap_type ==
1193 				  htt_cmn_pkt_type_ethernet))) {
1194 			eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1195 			DP_STATS_INC_PKT(peer, rx.multicast, 1,
1196 					 qdf_nbuf_len(nbuf));
1197 
1198 			if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
1199 				DP_STATS_INC_PKT(peer, rx.bcast, 1,
1200 						 qdf_nbuf_len(nbuf));
1201 		}
1202 
1203 		qdf_nbuf_set_exc_frame(nbuf, 1);
1204 		dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL);
1205 	}
1206 	return QDF_STATUS_SUCCESS;
1207 
1208 drop_nbuf:
1209 	qdf_nbuf_free(nbuf);
1210 	return QDF_STATUS_E_FAILURE;
1211 }
1212 
1213 /**
1214  * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
1215  *			       frames to OS or wifi parse errors.
1216  * @soc: core DP main context
1217  * @nbuf: buffer pointer
1218  * @rx_tlv_hdr: start of rx tlv header
1219  * @peer: peer reference
1220  * @err_code: rxdma err code
1221  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
1222  * pool_id has same mapping)
1223  *
1224  * Return: None
1225  */
1226 void
1227 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1228 			uint8_t *rx_tlv_hdr, struct dp_peer *peer,
1229 			uint8_t err_code, uint8_t mac_id)
1230 {
1231 	uint32_t pkt_len, l2_hdr_offset;
1232 	uint16_t msdu_len;
1233 	struct dp_vdev *vdev;
1234 	qdf_ether_header_t *eh;
1235 	bool is_broadcast;
1236 
1237 	/*
1238 	 * Check if DMA completed -- msdu_done is the last bit
1239 	 * to be written
1240 	 */
1241 	if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
1242 
1243 		dp_err_rl("MSDU DONE failure");
1244 
1245 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1246 				     QDF_TRACE_LEVEL_INFO);
1247 		qdf_assert(0);
1248 	}
1249 
1250 	l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
1251 							   rx_tlv_hdr);
1252 	msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
1253 	pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
1254 
1255 	if (dp_rx_check_pkt_len(soc, pkt_len)) {
1256 		/* Drop & free packet */
1257 		qdf_nbuf_free(nbuf);
1258 		return;
1259 	}
1260 	/* Set length in nbuf */
1261 	qdf_nbuf_set_pktlen(nbuf, pkt_len);
1262 
1263 	qdf_nbuf_set_next(nbuf, NULL);
1264 
1265 	qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
1266 	qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
1267 
1268 	if (!peer) {
1269 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL");
1270 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1271 				qdf_nbuf_len(nbuf));
1272 		/* Trigger invalid peer handler wrapper */
1273 		dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
1274 		return;
1275 	}
1276 
1277 	vdev = peer->vdev;
1278 	if (!vdev) {
1279 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1280 				FL("INVALID vdev %pK OR osif_rx"), vdev);
1281 		/* Drop & free packet */
1282 		qdf_nbuf_free(nbuf);
1283 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1284 		return;
1285 	}
1286 
1287 	/*
1288 	 * Advance the packet start pointer by total size of
1289 	 * pre-header TLV's
1290 	 */
1291 	dp_rx_skip_tlvs(nbuf, l2_hdr_offset);
1292 
1293 	if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
1294 		uint8_t *pkt_type;
1295 
1296 		pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
1297 		if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
1298 			if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
1299 							htons(QDF_LLC_STP)) {
1300 				DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
1301 				goto process_mesh;
1302 			} else {
1303 				goto process_rx;
1304 			}
1305 		}
1306 	}
1307 	if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
1308 		goto process_mesh;
1309 
1310 	/*
1311 	 * WAPI cert AP sends rekey frames as unencrypted.
1312 	 * Thus RXDMA will report unencrypted frame error.
1313 	 * To pass WAPI cert case, SW needs to pass unencrypted
1314 	 * rekey frame to stack.
1315 	 */
1316 	if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1317 		goto process_rx;
1318 	}
1319 	/*
1320 	 * In dynamic WEP case rekey frames are not encrypted
1321 	 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
1322 	 * key install is already done
1323 	 */
1324 	if ((vdev->sec_type == cdp_sec_type_wep104) &&
1325 	    (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
1326 		goto process_rx;
1327 
1328 process_mesh:
1329 
1330 	if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
1331 		qdf_nbuf_free(nbuf);
1332 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1333 		return;
1334 	}
1335 
1336 	if (vdev->mesh_vdev) {
1337 		if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
1338 				      == QDF_STATUS_SUCCESS) {
1339 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED,
1340 				  FL("mesh pkt filtered"));
1341 			DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
1342 
1343 			qdf_nbuf_free(nbuf);
1344 			return;
1345 		}
1346 		dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
1347 	}
1348 process_rx:
1349 	if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1350 							rx_tlv_hdr) &&
1351 		(vdev->rx_decap_type ==
1352 				htt_cmn_pkt_type_ethernet))) {
1353 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1354 		is_broadcast = (QDF_IS_ADDR_BROADCAST
1355 				(eh->ether_dhost)) ? 1 : 0 ;
1356 		DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf));
1357 		if (is_broadcast) {
1358 			DP_STATS_INC_PKT(peer, rx.bcast, 1,
1359 					qdf_nbuf_len(nbuf));
1360 		}
1361 	}
1362 
1363 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1364 		dp_rx_deliver_raw(vdev, nbuf, peer);
1365 	} else {
1366 		/* Update the protocol tag in SKB based on CCE metadata */
1367 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1368 					  EXCEPTION_DEST_RING_ID, true, true);
1369 		/* Update the flow tag in SKB based on FSE metadata */
1370 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
1371 		DP_STATS_INC(peer, rx.to_stack.num, 1);
1372 		qdf_nbuf_set_exc_frame(nbuf, 1);
1373 		dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL);
1374 	}
1375 
1376 	return;
1377 }
1378 
1379 /**
1380  * dp_rx_process_mic_error(): Function to pass mic error indication to umac
1381  * @soc: core DP main context
1382  * @nbuf: buffer pointer
1383  * @rx_tlv_hdr: start of rx tlv header
1384  * @peer: peer handle
1385  *
1386  * return: void
1387  */
1388 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
1389 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer)
1390 {
1391 	struct dp_vdev *vdev = NULL;
1392 	struct dp_pdev *pdev = NULL;
1393 	struct ol_if_ops *tops = NULL;
1394 	uint16_t rx_seq, fragno;
1395 	uint8_t is_raw;
1396 	unsigned int tid;
1397 	QDF_STATUS status;
1398 	struct cdp_rx_mic_err_info mic_failure_info;
1399 
1400 	if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1401 					    rx_tlv_hdr))
1402 		return;
1403 
1404 	if (!peer) {
1405 		dp_info_rl("peer not found");
1406 		goto fail;
1407 	}
1408 
1409 	vdev = peer->vdev;
1410 	if (!vdev) {
1411 		dp_info_rl("VDEV not found");
1412 		goto fail;
1413 	}
1414 
1415 	pdev = vdev->pdev;
1416 	if (!pdev) {
1417 		dp_info_rl("PDEV not found");
1418 		goto fail;
1419 	}
1420 
1421 	is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
1422 	if (is_raw) {
1423 		fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf));
1424 		/* Can get only last fragment */
1425 		if (fragno) {
1426 			tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
1427 							qdf_nbuf_data(nbuf));
1428 			rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
1429 							qdf_nbuf_data(nbuf));
1430 
1431 			status = dp_rx_defrag_add_last_frag(soc, peer,
1432 							    tid, rx_seq, nbuf);
1433 			dp_info_rl("Frag pkt seq# %d frag# %d consumed "
1434 				   "status %d !", rx_seq, fragno, status);
1435 			return;
1436 		}
1437 	}
1438 
1439 	if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
1440 				  &mic_failure_info.da_mac_addr.bytes[0])) {
1441 		dp_err_rl("Failed to get da_mac_addr");
1442 		goto fail;
1443 	}
1444 
1445 	if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
1446 				  &mic_failure_info.ta_mac_addr.bytes[0])) {
1447 		dp_err_rl("Failed to get ta_mac_addr");
1448 		goto fail;
1449 	}
1450 
1451 	mic_failure_info.key_id = 0;
1452 	mic_failure_info.multicast =
1453 		IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
1454 	qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
1455 	mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
1456 	mic_failure_info.data = NULL;
1457 	mic_failure_info.vdev_id = vdev->vdev_id;
1458 
1459 	tops = pdev->soc->cdp_soc.ol_ops;
1460 	if (tops->rx_mic_error)
1461 		tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
1462 				   &mic_failure_info);
1463 
1464 fail:
1465 	qdf_nbuf_free(nbuf);
1466 	return;
1467 }
1468 
1469 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
1470 /**
1471  * dp_rx_link_cookie_check() - Validate link desc cookie
1472  * @ring_desc: ring descriptor
1473  *
1474  * Return: qdf status
1475  */
1476 static inline QDF_STATUS
1477 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
1478 {
1479 	if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc)))
1480 		return QDF_STATUS_E_FAILURE;
1481 
1482 	return QDF_STATUS_SUCCESS;
1483 }
1484 
1485 /**
1486  * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie
1487  * @ring_desc: ring descriptor
1488  *
1489  * Return: None
1490  */
1491 static inline void
1492 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
1493 {
1494 	HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc);
1495 }
1496 #else
1497 static inline QDF_STATUS
1498 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
1499 {
1500 	return QDF_STATUS_SUCCESS;
1501 }
1502 
1503 static inline void
1504 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
1505 {
1506 }
1507 #endif
1508 
1509 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1510 /**
1511  * dp_rx_err_ring_record_entry() - Record rx err ring history
1512  * @soc: Datapath soc structure
1513  * @paddr: paddr of the buffer in RX err ring
1514  * @sw_cookie: SW cookie of the buffer in RX err ring
1515  * @rbm: Return buffer manager of the buffer in RX err ring
1516  *
1517  * Returns: None
1518  */
1519 static inline void
1520 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1521 			    uint32_t sw_cookie, uint8_t rbm)
1522 {
1523 	struct dp_buf_info_record *record;
1524 	uint32_t idx;
1525 
1526 	if (qdf_unlikely(!soc->rx_err_ring_history))
1527 		return;
1528 
1529 	idx = dp_history_get_next_index(&soc->rx_err_ring_history->index,
1530 					DP_RX_ERR_HIST_MAX);
1531 
1532 	/* No NULL check needed for record since its an array */
1533 	record = &soc->rx_err_ring_history->entry[idx];
1534 
1535 	record->timestamp = qdf_get_log_timestamp();
1536 	record->hbi.paddr = paddr;
1537 	record->hbi.sw_cookie = sw_cookie;
1538 	record->hbi.rbm = rbm;
1539 }
1540 #else
1541 static inline void
1542 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1543 			    uint32_t sw_cookie, uint8_t rbm)
1544 {
1545 }
1546 #endif
1547 
1548 uint32_t
1549 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1550 		  hal_ring_handle_t hal_ring_hdl, uint32_t quota)
1551 {
1552 	hal_ring_desc_t ring_desc;
1553 	hal_soc_handle_t hal_soc;
1554 	uint32_t count = 0;
1555 	uint32_t rx_bufs_used = 0;
1556 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
1557 	uint8_t mac_id = 0;
1558 	uint8_t buf_type;
1559 	uint8_t error, rbm;
1560 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
1561 	struct hal_buf_info hbi;
1562 	struct dp_pdev *dp_pdev;
1563 	struct dp_srng *dp_rxdma_srng;
1564 	struct rx_desc_pool *rx_desc_pool;
1565 	uint32_t cookie = 0;
1566 	void *link_desc_va;
1567 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
1568 	uint16_t num_msdus;
1569 	struct dp_rx_desc *rx_desc = NULL;
1570 	QDF_STATUS status;
1571 	bool ret;
1572 
1573 	/* Debug -- Remove later */
1574 	qdf_assert(soc && hal_ring_hdl);
1575 
1576 	hal_soc = soc->hal_soc;
1577 
1578 	/* Debug -- Remove later */
1579 	qdf_assert(hal_soc);
1580 
1581 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
1582 
1583 		/* TODO */
1584 		/*
1585 		 * Need API to convert from hal_ring pointer to
1586 		 * Ring Type / Ring Id combo
1587 		 */
1588 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
1589 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1590 			FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
1591 		goto done;
1592 	}
1593 
1594 	while (qdf_likely(quota-- && (ring_desc =
1595 				hal_srng_dst_peek(hal_soc,
1596 						  hal_ring_hdl)))) {
1597 
1598 		DP_STATS_INC(soc, rx.err_ring_pkts, 1);
1599 
1600 		error = HAL_RX_ERROR_STATUS_GET(ring_desc);
1601 
1602 		qdf_assert(error == HAL_REO_ERROR_DETECTED);
1603 
1604 		buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc);
1605 		/*
1606 		 * For REO error ring, expect only MSDU LINK DESC
1607 		 */
1608 		qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE);
1609 
1610 		cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
1611 		/*
1612 		 * check for the magic number in the sw cookie
1613 		 */
1614 		qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) &
1615 							LINK_DESC_ID_START);
1616 
1617 		status = dp_rx_link_cookie_check(ring_desc);
1618 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
1619 			DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
1620 			break;
1621 		}
1622 
1623 		/*
1624 		 * Check if the buffer is to be processed on this processor
1625 		 */
1626 		rbm = hal_rx_ret_buf_manager_get(ring_desc);
1627 
1628 		hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
1629 		link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
1630 		hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
1631 				     &num_msdus);
1632 		dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0],
1633 					    msdu_list.sw_cookie[0],
1634 					    msdu_list.rbm[0]);
1635 		if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) &&
1636 				(msdu_list.rbm[0] !=
1637 					HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST) &&
1638 				(msdu_list.rbm[0] != DP_DEFRAG_RBM))) {
1639 			/* TODO */
1640 			/* Call appropriate handler */
1641 			if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
1642 				DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
1643 				QDF_TRACE(QDF_MODULE_ID_DP,
1644 					  QDF_TRACE_LEVEL_ERROR,
1645 					  FL("Invalid RBM %d"),
1646 					     msdu_list.rbm[0]);
1647 			}
1648 
1649 			/* Return link descriptor through WBM ring (SW2WBM)*/
1650 			dp_rx_link_desc_return(soc, ring_desc,
1651 					HAL_BM_ACTION_RELEASE_MSDU_LIST);
1652 			goto next_entry;
1653 		}
1654 
1655 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc,
1656 						      msdu_list.sw_cookie[0]);
1657 		qdf_assert_always(rx_desc);
1658 
1659 		mac_id = rx_desc->pool_id;
1660 
1661 		/* Get the MPDU DESC info */
1662 		hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
1663 
1664 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
1665 			/*
1666 			 * We only handle one msdu per link desc for fragmented
1667 			 * case. We drop the msdus and release the link desc
1668 			 * back if there are more than one msdu in link desc.
1669 			 */
1670 			if (qdf_unlikely(num_msdus > 1)) {
1671 				count = dp_rx_msdus_drop(soc, ring_desc,
1672 							 &mpdu_desc_info,
1673 							 &mac_id, quota);
1674 				rx_bufs_reaped[mac_id] += count;
1675 				goto next_entry;
1676 			}
1677 
1678 			/*
1679 			 * this is a unlikely scenario where the host is reaping
1680 			 * a descriptor which it already reaped just a while ago
1681 			 * but is yet to replenish it back to HW.
1682 			 * In this case host will dump the last 128 descriptors
1683 			 * including the software descriptor rx_desc and assert.
1684 			 */
1685 
1686 			if (qdf_unlikely(!rx_desc->in_use)) {
1687 				DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
1688 				dp_info_rl("Reaping rx_desc not in use!");
1689 				dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1690 							   ring_desc, rx_desc);
1691 				/* ignore duplicate RX desc and continue */
1692 				/* Pop out the descriptor */
1693 				goto next_entry;
1694 			}
1695 
1696 			ret = dp_rx_desc_paddr_sanity_check(rx_desc,
1697 							    msdu_list.paddr[0]);
1698 			if (!ret) {
1699 				DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1700 				rx_desc->in_err_state = 1;
1701 				goto next_entry;
1702 			}
1703 
1704 			count = dp_rx_frag_handle(soc,
1705 						  ring_desc, &mpdu_desc_info,
1706 						  rx_desc, &mac_id, quota);
1707 
1708 			rx_bufs_reaped[mac_id] += count;
1709 			DP_STATS_INC(soc, rx.rx_frags, 1);
1710 			goto next_entry;
1711 		}
1712 
1713 		if (hal_rx_reo_is_pn_error(ring_desc)) {
1714 			/* TOD0 */
1715 			DP_STATS_INC(soc,
1716 				rx.err.
1717 				reo_error[HAL_REO_ERR_PN_CHECK_FAILED],
1718 				1);
1719 			/* increment @pdev level */
1720 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1721 			if (dp_pdev)
1722 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
1723 			count = dp_rx_pn_error_handle(soc,
1724 						      ring_desc,
1725 						      &mpdu_desc_info, &mac_id,
1726 						      quota);
1727 
1728 			rx_bufs_reaped[mac_id] += count;
1729 			goto next_entry;
1730 		}
1731 
1732 		if (hal_rx_reo_is_2k_jump(ring_desc)) {
1733 			/* TOD0 */
1734 			DP_STATS_INC(soc,
1735 				rx.err.
1736 				reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP],
1737 				1);
1738 			/* increment @pdev level */
1739 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1740 			if (dp_pdev)
1741 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
1742 
1743 			count = dp_rx_reo_err_entry_process(
1744 					soc,
1745 					ring_desc,
1746 					&mpdu_desc_info,
1747 					link_desc_va,
1748 					HAL_REO_ERR_REGULAR_FRAME_2K_JUMP);
1749 
1750 			rx_bufs_reaped[mac_id] += count;
1751 			goto next_entry;
1752 		}
1753 
1754 		if (hal_rx_reo_is_oor_error(ring_desc)) {
1755 			DP_STATS_INC(
1756 				soc,
1757 				rx.err.
1758 				reo_error[HAL_REO_ERR_REGULAR_FRAME_OOR],
1759 				1);
1760 			/* increment @pdev level */
1761 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1762 			if (dp_pdev)
1763 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
1764 			count = dp_rx_reo_err_entry_process(
1765 					soc,
1766 					ring_desc,
1767 					&mpdu_desc_info,
1768 					link_desc_va,
1769 					HAL_REO_ERR_REGULAR_FRAME_OOR);
1770 
1771 			rx_bufs_reaped[mac_id] += count;
1772 			goto next_entry;
1773 		}
1774 next_entry:
1775 		dp_rx_link_cookie_invalidate(ring_desc);
1776 		hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
1777 	}
1778 
1779 done:
1780 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
1781 
1782 	if (soc->rx.flags.defrag_timeout_check) {
1783 		uint32_t now_ms =
1784 			qdf_system_ticks_to_msecs(qdf_system_ticks());
1785 
1786 		if (now_ms >= soc->rx.defrag.next_flush_ms)
1787 			dp_rx_defrag_waitlist_flush(soc);
1788 	}
1789 
1790 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
1791 		if (rx_bufs_reaped[mac_id]) {
1792 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1793 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
1794 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
1795 
1796 			dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
1797 						rx_desc_pool,
1798 						rx_bufs_reaped[mac_id],
1799 						&dp_pdev->free_list_head,
1800 						&dp_pdev->free_list_tail);
1801 			rx_bufs_used += rx_bufs_reaped[mac_id];
1802 		}
1803 	}
1804 
1805 	return rx_bufs_used; /* Assume no scale factor for now */
1806 }
1807 
1808 #ifdef DROP_RXDMA_DECRYPT_ERR
1809 /**
1810  * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled
1811  *
1812  * Return: true if rxdma decrypt err frames are handled and false otheriwse
1813  */
1814 static inline bool dp_handle_rxdma_decrypt_err(void)
1815 {
1816 	return false;
1817 }
1818 #else
1819 static inline bool dp_handle_rxdma_decrypt_err(void)
1820 {
1821 	return true;
1822 }
1823 #endif
1824 
1825 static inline bool
1826 dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
1827 {
1828 	/*
1829 	 * Currently Null Queue and Unencrypted error handlers has support for
1830 	 * SG. Other error handler do not deal with SG buffer.
1831 	 */
1832 	if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) &&
1833 	     (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) ||
1834 	    ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) &&
1835 	     (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED)))
1836 		return true;
1837 
1838 	return false;
1839 }
1840 
1841 uint32_t
1842 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1843 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota)
1844 {
1845 	hal_ring_desc_t ring_desc;
1846 	hal_soc_handle_t hal_soc;
1847 	struct dp_rx_desc *rx_desc;
1848 	union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
1849 	union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
1850 	uint32_t rx_bufs_used = 0;
1851 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
1852 	uint8_t buf_type, rbm;
1853 	uint32_t rx_buf_cookie;
1854 	uint8_t mac_id;
1855 	struct dp_pdev *dp_pdev;
1856 	struct dp_srng *dp_rxdma_srng;
1857 	struct rx_desc_pool *rx_desc_pool;
1858 	uint8_t *rx_tlv_hdr;
1859 	qdf_nbuf_t nbuf_head = NULL;
1860 	qdf_nbuf_t nbuf_tail = NULL;
1861 	qdf_nbuf_t nbuf, next;
1862 	struct hal_wbm_err_desc_info wbm_err_info = { 0 };
1863 	uint8_t pool_id;
1864 	uint8_t tid = 0;
1865 	uint8_t msdu_continuation = 0;
1866 	bool process_sg_buf = false;
1867 
1868 	/* Debug -- Remove later */
1869 	qdf_assert(soc && hal_ring_hdl);
1870 
1871 	hal_soc = soc->hal_soc;
1872 
1873 	/* Debug -- Remove later */
1874 	qdf_assert(hal_soc);
1875 
1876 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
1877 
1878 		/* TODO */
1879 		/*
1880 		 * Need API to convert from hal_ring pointer to
1881 		 * Ring Type / Ring Id combo
1882 		 */
1883 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1884 			FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
1885 		goto done;
1886 	}
1887 
1888 	while (qdf_likely(quota)) {
1889 		ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
1890 		if (qdf_unlikely(!ring_desc))
1891 			break;
1892 
1893 		/* XXX */
1894 		buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
1895 
1896 		/*
1897 		 * For WBM ring, expect only MSDU buffers
1898 		 */
1899 		qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
1900 
1901 		qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc)
1902 				== HAL_RX_WBM_ERR_SRC_RXDMA) ||
1903 				(HAL_RX_WBM_ERR_SRC_GET(ring_desc)
1904 				== HAL_RX_WBM_ERR_SRC_REO));
1905 
1906 		/*
1907 		 * Check if the buffer is to be processed on this processor
1908 		 */
1909 		rbm = hal_rx_ret_buf_manager_get(ring_desc);
1910 
1911 		if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
1912 			/* TODO */
1913 			/* Call appropriate handler */
1914 			DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
1915 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1916 				FL("Invalid RBM %d"), rbm);
1917 			continue;
1918 		}
1919 
1920 		rx_buf_cookie =	HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
1921 
1922 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
1923 		qdf_assert_always(rx_desc);
1924 
1925 		if (!dp_rx_desc_check_magic(rx_desc)) {
1926 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1927 					FL("Invalid rx_desc cookie=%d"),
1928 					rx_buf_cookie);
1929 			continue;
1930 		}
1931 
1932 		/*
1933 		 * this is a unlikely scenario where the host is reaping
1934 		 * a descriptor which it already reaped just a while ago
1935 		 * but is yet to replenish it back to HW.
1936 		 * In this case host will dump the last 128 descriptors
1937 		 * including the software descriptor rx_desc and assert.
1938 		 */
1939 		if (qdf_unlikely(!rx_desc->in_use)) {
1940 			DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
1941 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1942 						   ring_desc, rx_desc);
1943 		}
1944 
1945 		hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
1946 
1947 		if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support &&
1948 				 dp_rx_is_sg_formation_required(&wbm_err_info))) {
1949 			/* SG is detected from continuation bit */
1950 			msdu_continuation = hal_rx_wbm_err_msdu_continuation_get(hal_soc,
1951 					ring_desc);
1952 			if (msdu_continuation &&
1953 			    !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) {
1954 				/* Update length from first buffer in SG */
1955 				soc->wbm_sg_param.wbm_sg_desc_msdu_len =
1956 					hal_rx_msdu_start_msdu_len_get(
1957 						qdf_nbuf_data(rx_desc->nbuf));
1958 				soc->wbm_sg_param.wbm_is_first_msdu_in_sg = true;
1959 			}
1960 
1961 			if (msdu_continuation) {
1962 				/* MSDU continued packets */
1963 				qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
1964 				QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) =
1965 					soc->wbm_sg_param.wbm_sg_desc_msdu_len;
1966 			} else {
1967 				/* This is the terminal packet in SG */
1968 				qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
1969 				qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
1970 				QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) =
1971 					soc->wbm_sg_param.wbm_sg_desc_msdu_len;
1972 				process_sg_buf = true;
1973 			}
1974 		}
1975 
1976 		nbuf = rx_desc->nbuf;
1977 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
1978 		dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
1979 						  rx_desc_pool->buf_size,
1980 						  false);
1981 		qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
1982 					     QDF_DMA_FROM_DEVICE,
1983 					     rx_desc_pool->buf_size);
1984 		rx_desc->unmapped = 1;
1985 
1986 		/*
1987 		 * save the wbm desc info in nbuf TLV. We will need this
1988 		 * info when we do the actual nbuf processing
1989 		 */
1990 		wbm_err_info.pool_id = rx_desc->pool_id;
1991 		hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf),
1992 								&wbm_err_info);
1993 
1994 		rx_bufs_reaped[rx_desc->pool_id]++;
1995 
1996 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) {
1997 			DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head,
1998 					  soc->wbm_sg_param.wbm_sg_nbuf_tail,
1999 					  nbuf);
2000 			if (process_sg_buf) {
2001 				if (!dp_rx_buffer_pool_refill(
2002 					soc,
2003 					soc->wbm_sg_param.wbm_sg_nbuf_head,
2004 					rx_desc->pool_id))
2005 					DP_RX_MERGE_TWO_LIST(
2006 						nbuf_head, nbuf_tail,
2007 						soc->wbm_sg_param.wbm_sg_nbuf_head,
2008 						soc->wbm_sg_param.wbm_sg_nbuf_tail);
2009 				dp_rx_wbm_sg_list_reset(soc);
2010 				process_sg_buf = false;
2011 			}
2012 		} else if (!dp_rx_buffer_pool_refill(soc, nbuf,
2013 						     rx_desc->pool_id)) {
2014 			DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
2015 		}
2016 
2017 		dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
2018 						&tail[rx_desc->pool_id],
2019 						rx_desc);
2020 
2021 		/*
2022 		 * if continuation bit is set then we have MSDU spread
2023 		 * across multiple buffers, let us not decrement quota
2024 		 * till we reap all buffers of that MSDU.
2025 		 */
2026 		if (qdf_likely(!msdu_continuation))
2027 			quota -= 1;
2028 	}
2029 done:
2030 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
2031 
2032 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2033 		if (rx_bufs_reaped[mac_id]) {
2034 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2035 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
2036 
2037 			dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2038 					rx_desc_pool, rx_bufs_reaped[mac_id],
2039 					&head[mac_id], &tail[mac_id]);
2040 			rx_bufs_used += rx_bufs_reaped[mac_id];
2041 		}
2042 	}
2043 
2044 	nbuf = nbuf_head;
2045 	while (nbuf) {
2046 		struct dp_peer *peer;
2047 		uint16_t peer_id;
2048 		uint8_t err_code;
2049 		uint8_t *tlv_hdr;
2050 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
2051 
2052 		/*
2053 		 * retrieve the wbm desc info from nbuf TLV, so we can
2054 		 * handle error cases appropriately
2055 		 */
2056 		hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info);
2057 
2058 		peer_id = hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
2059 							   rx_tlv_hdr);
2060 		peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
2061 
2062 		if (!peer)
2063 			dp_info_rl("peer is null peer_id%u err_src%u err_rsn%u",
2064 				   peer_id, wbm_err_info.wbm_err_src,
2065 				   wbm_err_info.reo_psh_rsn);
2066 
2067 		/* Set queue_mapping in nbuf to 0 */
2068 		dp_set_rx_queue(nbuf, 0);
2069 
2070 		next = nbuf->next;
2071 
2072 		/*
2073 		 * Form the SG for msdu continued buffers
2074 		 * QCN9000 has this support
2075 		 */
2076 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
2077 			nbuf = dp_rx_sg_create(nbuf);
2078 			next = nbuf->next;
2079 			/*
2080 			 * SG error handling is not done correctly,
2081 			 * drop SG frames for now.
2082 			 */
2083 			qdf_nbuf_free(nbuf);
2084 			dp_info_rl("scattered msdu dropped");
2085 			nbuf = next;
2086 			if (peer)
2087 				dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
2088 			continue;
2089 		}
2090 
2091 		if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
2092 			if (wbm_err_info.reo_psh_rsn
2093 					== HAL_RX_WBM_REO_PSH_RSN_ERROR) {
2094 
2095 				DP_STATS_INC(soc,
2096 					rx.err.reo_error
2097 					[wbm_err_info.reo_err_code], 1);
2098 				/* increment @pdev level */
2099 				pool_id = wbm_err_info.pool_id;
2100 				dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
2101 				if (dp_pdev)
2102 					DP_STATS_INC(dp_pdev, err.reo_error,
2103 						     1);
2104 
2105 				switch (wbm_err_info.reo_err_code) {
2106 				/*
2107 				 * Handling for packets which have NULL REO
2108 				 * queue descriptor
2109 				 */
2110 				case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2111 					pool_id = wbm_err_info.pool_id;
2112 					dp_rx_null_q_desc_handle(soc, nbuf,
2113 								 rx_tlv_hdr,
2114 								 pool_id, peer);
2115 					break;
2116 				/* TODO */
2117 				/* Add per error code accounting */
2118 				case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2119 					pool_id = wbm_err_info.pool_id;
2120 
2121 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2122 									   rx_tlv_hdr)) {
2123 						peer_id =
2124 						hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
2125 										 rx_tlv_hdr);
2126 						tid =
2127 						hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
2128 					}
2129 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2130 					hal_rx_msdu_start_msdu_len_get(
2131 								rx_tlv_hdr);
2132 					nbuf->next = NULL;
2133 					dp_2k_jump_handle(soc, nbuf,
2134 							  rx_tlv_hdr,
2135 							  peer_id, tid);
2136 					break;
2137 				case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2138 				case HAL_REO_ERR_BAR_FRAME_OOR:
2139 					if (peer)
2140 						dp_rx_wbm_err_handle_bar(soc,
2141 									 peer,
2142 									 nbuf);
2143 					qdf_nbuf_free(nbuf);
2144 					break;
2145 
2146 				default:
2147 					dp_info_rl("Got pkt with REO ERROR: %d",
2148 						   wbm_err_info.reo_err_code);
2149 					qdf_nbuf_free(nbuf);
2150 				}
2151 			}
2152 		} else if (wbm_err_info.wbm_err_src ==
2153 					HAL_RX_WBM_ERR_SRC_RXDMA) {
2154 			if (wbm_err_info.rxdma_psh_rsn
2155 					== HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
2156 				DP_STATS_INC(soc,
2157 					rx.err.rxdma_error
2158 					[wbm_err_info.rxdma_err_code], 1);
2159 				/* increment @pdev level */
2160 				pool_id = wbm_err_info.pool_id;
2161 				dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
2162 				if (dp_pdev)
2163 					DP_STATS_INC(dp_pdev,
2164 						     err.rxdma_error, 1);
2165 
2166 				switch (wbm_err_info.rxdma_err_code) {
2167 				case HAL_RXDMA_ERR_UNENCRYPTED:
2168 
2169 				case HAL_RXDMA_ERR_WIFI_PARSE:
2170 					pool_id = wbm_err_info.pool_id;
2171 					dp_rx_process_rxdma_err(soc, nbuf,
2172 								rx_tlv_hdr,
2173 								peer,
2174 								wbm_err_info.
2175 								rxdma_err_code,
2176 								pool_id);
2177 					break;
2178 
2179 				case HAL_RXDMA_ERR_TKIP_MIC:
2180 					dp_rx_process_mic_error(soc, nbuf,
2181 								rx_tlv_hdr,
2182 								peer);
2183 					if (peer)
2184 						DP_STATS_INC(peer, rx.err.mic_err, 1);
2185 					break;
2186 
2187 				case HAL_RXDMA_ERR_DECRYPT:
2188 
2189 					if (peer) {
2190 						DP_STATS_INC(peer, rx.err.
2191 							     decrypt_err, 1);
2192 						qdf_nbuf_free(nbuf);
2193 						break;
2194 					}
2195 
2196 					if (!dp_handle_rxdma_decrypt_err()) {
2197 						qdf_nbuf_free(nbuf);
2198 						break;
2199 					}
2200 
2201 					pool_id = wbm_err_info.pool_id;
2202 					err_code = wbm_err_info.rxdma_err_code;
2203 					tlv_hdr = rx_tlv_hdr;
2204 					dp_rx_process_rxdma_err(soc, nbuf,
2205 								tlv_hdr, NULL,
2206 								err_code,
2207 								pool_id);
2208 					break;
2209 
2210 				default:
2211 					qdf_nbuf_free(nbuf);
2212 					dp_err_rl("RXDMA error %d",
2213 						  wbm_err_info.rxdma_err_code);
2214 				}
2215 			}
2216 		} else {
2217 			/* Should not come here */
2218 			qdf_assert(0);
2219 		}
2220 
2221 		if (peer)
2222 			dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
2223 
2224 		nbuf = next;
2225 	}
2226 	return rx_bufs_used; /* Assume no scale factor for now */
2227 }
2228 
2229 /**
2230  * dup_desc_dbg() - dump and assert if duplicate rx desc found
2231  *
2232  * @soc: core DP main context
2233  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
2234  * @rx_desc: void pointer to rx descriptor
2235  *
2236  * Return: void
2237  */
2238 static void dup_desc_dbg(struct dp_soc *soc,
2239 			 hal_rxdma_desc_t rxdma_dst_ring_desc,
2240 			 void *rx_desc)
2241 {
2242 	DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
2243 	dp_rx_dump_info_and_assert(
2244 			soc,
2245 			soc->rx_rel_ring.hal_srng,
2246 			hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
2247 			rx_desc);
2248 }
2249 
2250 /**
2251  * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
2252  *
2253  * @soc: core DP main context
2254  * @mac_id: mac id which is one of 3 mac_ids
2255  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
2256  * @head: head of descs list to be freed
2257  * @tail: tail of decs list to be freed
2258 
2259  * Return: number of msdu in MPDU to be popped
2260  */
2261 static inline uint32_t
2262 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
2263 	hal_rxdma_desc_t rxdma_dst_ring_desc,
2264 	union dp_rx_desc_list_elem_t **head,
2265 	union dp_rx_desc_list_elem_t **tail)
2266 {
2267 	void *rx_msdu_link_desc;
2268 	qdf_nbuf_t msdu;
2269 	qdf_nbuf_t last;
2270 	struct hal_rx_msdu_list msdu_list;
2271 	uint16_t num_msdus;
2272 	struct hal_buf_info buf_info;
2273 	uint32_t rx_bufs_used = 0;
2274 	uint32_t msdu_cnt;
2275 	uint32_t i;
2276 	uint8_t push_reason;
2277 	uint8_t rxdma_error_code = 0;
2278 	uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
2279 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2280 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
2281 	hal_rxdma_desc_t ring_desc;
2282 	struct rx_desc_pool *rx_desc_pool;
2283 
2284 	if (!pdev) {
2285 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2286 			  "pdev is null for mac_id = %d", mac_id);
2287 		return rx_bufs_used;
2288 	}
2289 
2290 	msdu = 0;
2291 
2292 	last = NULL;
2293 
2294 	hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
2295 					&msdu_cnt);
2296 
2297 	push_reason =
2298 		hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
2299 	if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
2300 		rxdma_error_code =
2301 			hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
2302 	}
2303 
2304 	do {
2305 		rx_msdu_link_desc =
2306 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
2307 
2308 		qdf_assert_always(rx_msdu_link_desc);
2309 
2310 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
2311 				     &msdu_list, &num_msdus);
2312 
2313 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
2314 			/* if the msdus belongs to NSS offloaded radio &&
2315 			 * the rbm is not SW1_BM then return the msdu_link
2316 			 * descriptor without freeing the msdus (nbufs). let
2317 			 * these buffers be given to NSS completion ring for
2318 			 * NSS to free them.
2319 			 * else iterate through the msdu link desc list and
2320 			 * free each msdu in the list.
2321 			 */
2322 			if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM &&
2323 				wlan_cfg_get_dp_pdev_nss_enabled(
2324 							  pdev->wlan_cfg_ctx))
2325 				bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
2326 			else {
2327 				for (i = 0; i < num_msdus; i++) {
2328 					struct dp_rx_desc *rx_desc =
2329 						dp_rx_cookie_2_va_rxdma_buf(soc,
2330 							msdu_list.sw_cookie[i]);
2331 					qdf_assert_always(rx_desc);
2332 					msdu = rx_desc->nbuf;
2333 					/*
2334 					 * this is a unlikely scenario
2335 					 * where the host is reaping
2336 					 * a descriptor which
2337 					 * it already reaped just a while ago
2338 					 * but is yet to replenish
2339 					 * it back to HW.
2340 					 * In this case host will dump
2341 					 * the last 128 descriptors
2342 					 * including the software descriptor
2343 					 * rx_desc and assert.
2344 					 */
2345 					ring_desc = rxdma_dst_ring_desc;
2346 					if (qdf_unlikely(!rx_desc->in_use)) {
2347 						dup_desc_dbg(soc,
2348 							     ring_desc,
2349 							     rx_desc);
2350 						continue;
2351 					}
2352 
2353 					rx_desc_pool = &soc->
2354 						rx_desc_buf[rx_desc->pool_id];
2355 					dp_ipa_handle_rx_buf_smmu_mapping(
2356 							soc, msdu,
2357 							rx_desc_pool->buf_size,
2358 							false);
2359 					qdf_nbuf_unmap_nbytes_single(
2360 						soc->osdev, msdu,
2361 						QDF_DMA_FROM_DEVICE,
2362 						rx_desc_pool->buf_size);
2363 					rx_desc->unmapped = 1;
2364 
2365 					QDF_TRACE(QDF_MODULE_ID_DP,
2366 						QDF_TRACE_LEVEL_DEBUG,
2367 						"[%s][%d] msdu_nbuf=%pK ",
2368 						__func__, __LINE__, msdu);
2369 
2370 					dp_rx_buffer_pool_nbuf_free(soc, msdu,
2371 							rx_desc->pool_id);
2372 					rx_bufs_used++;
2373 					dp_rx_add_to_free_desc_list(head,
2374 						tail, rx_desc);
2375 				}
2376 			}
2377 		} else {
2378 			rxdma_error_code = HAL_RXDMA_ERR_WAR;
2379 		}
2380 
2381 		/*
2382 		 * Store the current link buffer into to the local structure
2383 		 * to be used for release purpose.
2384 		 */
2385 		hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr,
2386 					     buf_info.sw_cookie, buf_info.rbm);
2387 
2388 		hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info);
2389 		dp_rx_link_desc_return_by_addr(soc,
2390 					       (hal_buff_addrinfo_t)
2391 						rx_link_buf_info,
2392 						bm_action);
2393 	} while (buf_info.paddr);
2394 
2395 	DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
2396 	if (pdev)
2397 		DP_STATS_INC(pdev, err.rxdma_error, 1);
2398 
2399 	if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
2400 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2401 			"Packet received with Decrypt error");
2402 	}
2403 
2404 	return rx_bufs_used;
2405 }
2406 
2407 uint32_t
2408 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2409 		     uint32_t mac_id, uint32_t quota)
2410 {
2411 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2412 	hal_rxdma_desc_t rxdma_dst_ring_desc;
2413 	hal_soc_handle_t hal_soc;
2414 	void *err_dst_srng;
2415 	union dp_rx_desc_list_elem_t *head = NULL;
2416 	union dp_rx_desc_list_elem_t *tail = NULL;
2417 	struct dp_srng *dp_rxdma_srng;
2418 	struct rx_desc_pool *rx_desc_pool;
2419 	uint32_t work_done = 0;
2420 	uint32_t rx_bufs_used = 0;
2421 
2422 	if (!pdev)
2423 		return 0;
2424 
2425 	err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng;
2426 
2427 	if (!err_dst_srng) {
2428 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2429 			"%s %d : HAL Monitor Destination Ring Init \
2430 			Failed -- %pK",
2431 			__func__, __LINE__, err_dst_srng);
2432 		return 0;
2433 	}
2434 
2435 	hal_soc = soc->hal_soc;
2436 
2437 	qdf_assert(hal_soc);
2438 
2439 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
2440 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2441 			"%s %d : HAL Monitor Destination Ring Init \
2442 			Failed -- %pK",
2443 			__func__, __LINE__, err_dst_srng);
2444 		return 0;
2445 	}
2446 
2447 	while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
2448 		hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
2449 
2450 			rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
2451 						rxdma_dst_ring_desc,
2452 						&head, &tail);
2453 	}
2454 
2455 	dp_srng_access_end(int_ctx, soc, err_dst_srng);
2456 
2457 	if (rx_bufs_used) {
2458 		if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2459 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2460 		else
2461 			dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id];
2462 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
2463 
2464 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2465 			rx_desc_pool, rx_bufs_used, &head, &tail);
2466 
2467 		work_done += rx_bufs_used;
2468 	}
2469 
2470 	return work_done;
2471 }
2472 
2473 static inline uint32_t
2474 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
2475 			hal_rxdma_desc_t rxdma_dst_ring_desc,
2476 			union dp_rx_desc_list_elem_t **head,
2477 			union dp_rx_desc_list_elem_t **tail)
2478 {
2479 	void *rx_msdu_link_desc;
2480 	qdf_nbuf_t msdu;
2481 	qdf_nbuf_t last;
2482 	struct hal_rx_msdu_list msdu_list;
2483 	uint16_t num_msdus;
2484 	struct hal_buf_info buf_info;
2485 	uint32_t rx_bufs_used = 0, msdu_cnt, i;
2486 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
2487 
2488 	msdu = 0;
2489 
2490 	last = NULL;
2491 
2492 	hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
2493 				     &msdu_cnt);
2494 
2495 	do {
2496 		rx_msdu_link_desc =
2497 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
2498 
2499 		if (!rx_msdu_link_desc) {
2500 			DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
2501 			break;
2502 		}
2503 
2504 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
2505 				     &msdu_list, &num_msdus);
2506 
2507 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
2508 			for (i = 0; i < num_msdus; i++) {
2509 				struct dp_rx_desc *rx_desc =
2510 					dp_rx_cookie_2_va_rxdma_buf(
2511 							soc,
2512 							msdu_list.sw_cookie[i]);
2513 				qdf_assert_always(rx_desc);
2514 				msdu = rx_desc->nbuf;
2515 
2516 				qdf_nbuf_unmap_single(soc->osdev, msdu,
2517 						      QDF_DMA_FROM_DEVICE);
2518 
2519 				dp_rx_buffer_pool_nbuf_free(soc, msdu,
2520 							    rx_desc->pool_id);
2521 				rx_bufs_used++;
2522 				dp_rx_add_to_free_desc_list(head,
2523 							    tail, rx_desc);
2524 			}
2525 		}
2526 
2527 		/*
2528 		 * Store the current link buffer into to the local structure
2529 		 * to be used for release purpose.
2530 		 */
2531 		hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr,
2532 					     buf_info.sw_cookie, buf_info.rbm);
2533 
2534 		hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info);
2535 		dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
2536 					rx_link_buf_info,
2537 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
2538 	} while (buf_info.paddr);
2539 
2540 	return rx_bufs_used;
2541 }
2542 
2543 /*
2544  *
2545  * dp_handle_wbm_internal_error() - handles wbm_internal_error case
2546  *
2547  * @soc: core DP main context
2548  * @hal_desc: hal descriptor
2549  * @buf_type: indicates if the buffer is of type link disc or msdu
2550  * Return: None
2551  *
2552  * wbm_internal_error is seen in following scenarios :
2553  *
2554  * 1.  Null pointers detected in WBM_RELEASE_RING descriptors
2555  * 2.  Null pointers detected during delinking process
2556  *
2557  * Some null pointer cases:
2558  *
2559  * a. MSDU buffer pointer is NULL
2560  * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag
2561  * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL
2562  */
2563 void
2564 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
2565 			     uint32_t buf_type)
2566 {
2567 	struct hal_buf_info buf_info = {0};
2568 	struct dp_rx_desc *rx_desc = NULL;
2569 	struct rx_desc_pool *rx_desc_pool;
2570 	uint32_t rx_buf_cookie;
2571 	uint32_t rx_bufs_reaped = 0;
2572 	union dp_rx_desc_list_elem_t *head = NULL;
2573 	union dp_rx_desc_list_elem_t *tail = NULL;
2574 	uint8_t pool_id;
2575 
2576 	hal_rx_reo_buf_paddr_get(hal_desc, &buf_info);
2577 
2578 	if (!buf_info.paddr) {
2579 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
2580 		return;
2581 	}
2582 
2583 	rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(hal_desc);
2584 	pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(rx_buf_cookie);
2585 
2586 	if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
2587 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
2588 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
2589 
2590 		if (rx_desc && rx_desc->nbuf) {
2591 			rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2592 			dp_ipa_handle_rx_buf_smmu_mapping(
2593 						soc, rx_desc->nbuf,
2594 						rx_desc_pool->buf_size,
2595 						false);
2596 			qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
2597 						     QDF_DMA_FROM_DEVICE,
2598 						     rx_desc_pool->buf_size);
2599 			rx_desc->unmapped = 1;
2600 
2601 			dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
2602 						    rx_desc->pool_id);
2603 			dp_rx_add_to_free_desc_list(&head,
2604 						    &tail,
2605 						    rx_desc);
2606 
2607 			rx_bufs_reaped++;
2608 		}
2609 	} else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
2610 		rx_bufs_reaped = dp_wbm_int_err_mpdu_pop(soc, pool_id,
2611 							 hal_desc,
2612 							 &head, &tail);
2613 	}
2614 
2615 	if (rx_bufs_reaped) {
2616 		struct rx_desc_pool *rx_desc_pool;
2617 		struct dp_srng *dp_rxdma_srng;
2618 
2619 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
2620 		dp_rxdma_srng = &soc->rx_refill_buf_ring[pool_id];
2621 		rx_desc_pool = &soc->rx_desc_buf[pool_id];
2622 
2623 		dp_rx_buffers_replenish(soc, pool_id, dp_rxdma_srng,
2624 					rx_desc_pool,
2625 					rx_bufs_reaped,
2626 					&head, &tail);
2627 	}
2628 }
2629