xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/2.0/dp_rx_mon_2.0.c (revision 2888b71da71bce103343119fa1b31f4a0cee07c8)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "hal_be_hw_headers.h"
19 #include "dp_types.h"
20 #include "hal_be_rx.h"
21 #include "hal_api.h"
22 #include "qdf_trace.h"
23 #include "hal_be_api_mon.h"
24 #include "dp_internal.h"
25 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
26 #include <qdf_flex_mem.h>
27 #include "qdf_nbuf_frag.h"
28 #include "dp_mon.h"
29 #include <dp_rx_mon.h>
30 #include <dp_mon_2.0.h>
31 #include <dp_rx_mon.h>
32 #include <dp_rx_mon_2.0.h>
33 #include <dp_rx.h>
34 #include <dp_be.h>
35 #include <hal_be_api_mon.h>
36 #ifdef QCA_SUPPORT_LITE_MONITOR
37 #include "dp_lite_mon.h"
38 #endif
39 
40 #define F_MASK 0xFFFF
41 #define TEST_MASK 0xCBF
42 
43 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
44 	    defined(WLAN_SUPPORT_RX_FLOW_TAG)
45 
46 #ifdef QCA_TEST_MON_PF_TAGS_STATS
47 
48 static
49 void dp_rx_mon_print_tag_buf(uint8_t *buf, uint16_t test, uint16_t room)
50 {
51 	if (test != TEST_MASK)
52 		return;
53 	print_hex_dump(KERN_ERR, "TLV BUFFER: ", DUMP_PREFIX_NONE,
54 		       32, 2, buf, room, false);
55 }
56 
57 static
58 void dp_rx_mon_enable_pf_test(uint16_t **nbuf)
59 {
60 	uint16_t *nbuf_head = *nbuf;
61 
62 	*((uint16_t *)nbuf_head) = TEST_MASK;
63 	nbuf_head += sizeof(uint16_t);
64 
65 	*nbuf = nbuf_head;
66 }
67 
68 #else
69 static
70 void dp_rx_mon_print_tag_buf(uint8_t *buf, uint16_t test, uint16_t room)
71 {
72 }
73 
74 static
75 void dp_rx_mon_enable_pf_test(uint8_t **nbuf)
76 {
77 	uint8_t *nbuf_head = *nbuf;
78 
79 	nbuf_head += sizeof(uint16_t);
80 	*nbuf = nbuf_head;
81 }
82 #endif
83 
84 static
85 void dp_rx_mon_set_zero(qdf_nbuf_t nbuf)
86 {
87 	qdf_mem_zero(qdf_nbuf_head(nbuf), DP_RX_MON_TLV_ROOM);
88 }
89 
90 /**
91  * dp_rx_mon_get_ppdu_info() - Get PPDU info from freelist
92  *
93  * @mon_pdev: monitor pdev
94  *
95  * Return: ppdu_info
96  */
97 static inline struct hal_rx_ppdu_info*
98 dp_rx_mon_get_ppdu_info(struct dp_mon_pdev *mon_pdev)
99 {
100 	struct dp_mon_pdev_be *mon_pdev_be =
101 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
102 	struct hal_rx_ppdu_info *ppdu_info, *temp_ppdu_info;
103 
104 	qdf_spin_lock_bh(&mon_pdev_be->ppdu_info_lock);
105 	TAILQ_FOREACH_SAFE(ppdu_info,
106 			   &mon_pdev_be->rx_mon_free_queue,
107 			   ppdu_list_elem,
108 			   temp_ppdu_info) {
109 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_free_queue,
110 			     ppdu_info, ppdu_free_list_elem);
111 
112 		if (ppdu_info) {
113 			mon_pdev_be->total_free_elem--;
114 			break;
115 		}
116 	}
117 	qdf_spin_unlock_bh(&mon_pdev_be->ppdu_info_lock);
118 
119 	return ppdu_info;
120 }
121 
122 static inline void
123 __dp_rx_mon_free_ppdu_info(struct dp_mon_pdev *mon_pdev,
124 			   struct hal_rx_ppdu_info *ppdu_info)
125 {
126 	struct dp_mon_pdev_be *mon_pdev_be =
127 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
128 
129 	qdf_spin_lock_bh(&mon_pdev_be->ppdu_info_lock);
130 	if (ppdu_info) {
131 		TAILQ_INSERT_TAIL(&mon_pdev_be->rx_mon_free_queue, ppdu_info,
132 				  ppdu_free_list_elem);
133 		mon_pdev_be->total_free_elem++;
134 	}
135 	qdf_spin_unlock_bh(&mon_pdev_be->ppdu_info_lock);
136 }
137 
138 /**
139  * dp_rx_mon_nbuf_add_rx_frag () -  Add frag to SKB
140  *
141  * @nbuf: SKB to which frag is going to be added
142  * @frag: frag to be added to SKB
143  * @frag_len: frag length
144  * @offset: frag offset
145  * @buf_size: buffer size
146  * @frag_ref: take frag ref
147  *
148  * Return: QDF_STATUS
149  */
150 static inline QDF_STATUS
151 dp_rx_mon_nbuf_add_rx_frag(qdf_nbuf_t nbuf, qdf_frag_t *frag,
152 			   uint16_t frag_len, uint16_t offset,
153 			   uint16_t buf_size, bool frag_ref)
154 {
155 	uint8_t num_frags;
156 
157 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
158 	if (num_frags < QDF_NBUF_MAX_FRAGS) {
159 		qdf_nbuf_add_rx_frag(frag, nbuf,
160 				     offset,
161 				     frag_len,
162 				     buf_size,
163 				     frag_ref);
164 		return QDF_STATUS_SUCCESS;
165 	}
166 	return QDF_STATUS_E_FAILURE;
167 }
168 
169 /**
170  * dp_mon_free_parent_nbuf() - Free parent SKB
171  *
172  * @mon_pdev: monitor pdev
173  * @nbuf: SKB to be freed
174  *
175  * @Return: void
176  */
177 void
178 dp_mon_free_parent_nbuf(struct dp_mon_pdev *mon_pdev,
179 			qdf_nbuf_t nbuf)
180 {
181 	mon_pdev->rx_mon_stats.parent_buf_free++;
182 	qdf_nbuf_free(nbuf);
183 }
184 
185 void
186 dp_rx_mon_shift_pf_tag_in_headroom(qdf_nbuf_t nbuf, struct dp_soc *soc,
187 				   struct hal_rx_ppdu_info *ppdu_info)
188 {
189 	uint32_t test = 0;
190 	uint32_t room = 0;
191 	uint16_t msdu_count = 0;
192 	uint16_t *dp = NULL;
193 	uint16_t *hp = NULL;
194 	uint16_t tlv_data_len, total_tlv_len;
195 	uint32_t bytes = 0;
196 
197 	if (qdf_unlikely(!soc)) {
198 		dp_mon_err("Soc[%pK] Null. Can't update pftag to nbuf headroom",
199 			   soc);
200 		qdf_assert_always(0);
201 	}
202 
203 	if (!wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx))
204 		return;
205 
206 	if (qdf_unlikely(!nbuf))
207 		return;
208 
209 	/* Headroom must be have enough space for tlv to be added*/
210 	if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < DP_RX_MON_TLV_ROOM)) {
211 		dp_mon_err("Headroom[%d] < DP_RX_MON_TLV_ROOM[%d]",
212 			   qdf_nbuf_headroom(nbuf), DP_RX_MON_TLV_ROOM);
213 		return;
214 	}
215 
216 	hp = (uint16_t *)qdf_nbuf_head(nbuf);
217 	test = *hp & F_MASK;
218 	hp += sizeof(uint16_t);
219 	msdu_count = *hp;
220 
221 	if (qdf_unlikely(!msdu_count))
222 		return;
223 
224 	dp_mon_debug("msdu_count: %d", msdu_count);
225 
226 	room = DP_RX_MON_PF_TAG_LEN_PER_FRAG * msdu_count;
227 	tlv_data_len = DP_RX_MON_TLV_MSDU_CNT + (room);
228 	total_tlv_len = DP_RX_MON_TLV_HDR_LEN + tlv_data_len;
229 
230 	//1. store space for MARKER
231 	dp = (uint16_t *)qdf_nbuf_push_head(nbuf, sizeof(uint16_t));
232 	if (qdf_likely(dp)) {
233 		*(uint16_t *)dp = DP_RX_MON_TLV_HDR_MARKER;
234 		bytes += sizeof(uint16_t);
235 	}
236 
237 	//2. store space for total size
238 	dp = (uint16_t *)qdf_nbuf_push_head(nbuf, sizeof(uint16_t));
239 	if (qdf_likely(dp)) {
240 		*(uint16_t *)dp = total_tlv_len;
241 		bytes += sizeof(uint16_t);
242 	}
243 
244 	//create TLV
245 	bytes += dp_mon_rx_add_tlv(DP_RX_MON_TLV_PF_ID, tlv_data_len, hp, nbuf);
246 
247 	dp_rx_mon_print_tag_buf(qdf_nbuf_data(nbuf), test, total_tlv_len);
248 
249 	qdf_nbuf_pull_head(nbuf, bytes);
250 
251 }
252 
253 void
254 dp_rx_mon_pf_tag_to_buf_headroom_2_0(void *nbuf,
255 				     struct hal_rx_ppdu_info *ppdu_info,
256 				     struct dp_pdev *pdev, struct dp_soc *soc)
257 {
258 	uint8_t *nbuf_head = NULL;
259 	uint8_t user_id;
260 	struct hal_rx_mon_msdu_info *msdu_info;
261 	uint16_t flow_id;
262 	uint16_t cce_metadata;
263 	uint16_t protocol_tag = 0;
264 	uint32_t flow_tag;
265 	uint8_t invalid_cce = 0, invalid_fse = 0;
266 
267 	if (qdf_unlikely(!soc)) {
268 		dp_mon_err("Soc[%pK] Null. Can't update pftag to nbuf headroom",
269 			   soc);
270 		qdf_assert_always(0);
271 	}
272 
273 	if (!wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx))
274 		return;
275 
276 	if (qdf_unlikely(!nbuf))
277 		return;
278 
279 	/* Headroom must be have enough space for tlv to be added*/
280 	if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < DP_RX_MON_TLV_ROOM)) {
281 		dp_mon_err("Headroom[%d] < DP_RX_MON_TLV_ROOM[%d]",
282 			   qdf_nbuf_headroom(nbuf), DP_RX_MON_TLV_ROOM);
283 		return;
284 	}
285 
286 	user_id = ppdu_info->user_id;
287 	if (qdf_unlikely(user_id > HAL_MAX_UL_MU_USERS)) {
288 		dp_mon_debug("Invalid user_id user_id: %d pdev: %pK", user_id, pdev);
289 		return;
290 	}
291 
292 	msdu_info = &ppdu_info->msdu[user_id];
293 	flow_id = ppdu_info->rx_msdu_info[user_id].flow_idx;
294 	cce_metadata = ppdu_info->rx_msdu_info[user_id].cce_metadata -
295 		       RX_PROTOCOL_TAG_START_OFFSET;
296 
297 	flow_tag = ppdu_info->rx_msdu_info[user_id].fse_metadata & F_MASK;
298 
299 	if (qdf_unlikely((cce_metadata > RX_PROTOCOL_TAG_MAX - 1) ||
300 			 (cce_metadata > 0 && cce_metadata < 4))) {
301 		dp_mon_debug("Invalid user_id cce_metadata: %d pdev: %pK", cce_metadata, pdev);
302 		invalid_cce = 1;
303 		protocol_tag = cce_metadata;
304 	} else {
305 		protocol_tag = pdev->rx_proto_tag_map[cce_metadata].tag;
306 		dp_mon_rx_update_rx_protocol_tag_stats(pdev, cce_metadata);
307 	}
308 
309 	if (flow_tag > 0) {
310 		dp_mon_rx_update_rx_flow_tag_stats(pdev, flow_id);
311 	} else {
312 		dp_mon_debug("Invalid flow_tag: %d pdev: %pK ", flow_tag, pdev);
313 		invalid_fse = 1;
314 	}
315 
316 	if (invalid_cce && invalid_fse)
317 		return;
318 
319 	if (msdu_info->msdu_index >= DP_RX_MON_MAX_MSDU) {
320 		dp_mon_err("msdu_index causes overflow in headroom");
321 		return;
322 	}
323 
324 	dp_mon_debug("protocol_tag: %d, cce_metadata: %d, flow_tag: %d",
325 		     protocol_tag, cce_metadata, flow_tag);
326 
327 	dp_mon_debug("msdu_index: %d", msdu_info->msdu_index);
328 
329 
330 	nbuf_head = qdf_nbuf_head(nbuf);
331 	dp_rx_mon_enable_pf_test(&nbuf_head);
332 
333 	*((uint16_t *)nbuf_head) = msdu_info->msdu_index + 1;
334 	nbuf_head += DP_RX_MON_TLV_MSDU_CNT;
335 
336 	nbuf_head += ((msdu_info->msdu_index) * DP_RX_MON_PF_TAG_SIZE);
337 	if (!invalid_cce)
338 		*((uint16_t *)nbuf_head) = protocol_tag;
339 	nbuf_head += sizeof(uint16_t);
340 	if (!invalid_fse)
341 		*((uint16_t *)nbuf_head) = flow_tag;
342 }
343 
344 #else
345 
346 static
347 void dp_rx_mon_set_zero(qdf_nbuf_t nbuf)
348 {
349 }
350 
351 static
352 void dp_rx_mon_shift_pf_tag_in_headroom(qdf_nbuf_t nbuf, struct dp_soc *soc,
353 					struct hal_rx_ppdu_info *ppdu_info)
354 {
355 }
356 
357 static
358 void dp_rx_mon_pf_tag_to_buf_headroom_2_0(void *nbuf,
359 					  struct hal_rx_ppdu_info *ppdu_info,
360 					  struct dp_pdev *pdev,
361 					  struct dp_soc *soc)
362 {
363 }
364 
365 #endif
366 
367 /**
368  * dp_rx_mon_free_ppdu_info () - Free PPDU info
369  * @pdev: DP pdev
370  * @ppdu_info: PPDU info
371  *
372  * Return: Void
373  */
374 static void
375 dp_rx_mon_free_ppdu_info(struct dp_pdev *pdev,
376 			 struct hal_rx_ppdu_info *ppdu_info)
377 {
378 	uint8_t user;
379 	struct dp_mon_pdev *mon_pdev;
380 
381 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
382 	for (user = 0; user < ppdu_info->com_info.num_users; user++) {
383 		uint16_t mpdu_count  = ppdu_info->mpdu_count[user];
384 		uint16_t mpdu_idx;
385 		qdf_nbuf_t mpdu;
386 
387 		for (mpdu_idx = 0; mpdu_idx < mpdu_count; mpdu_idx++) {
388 			mpdu = qdf_nbuf_queue_remove(&ppdu_info->mpdu_q[user]);
389 
390 			if (!mpdu)
391 				continue;
392 			dp_mon_free_parent_nbuf(mon_pdev, mpdu);
393 		}
394 	}
395 	__dp_rx_mon_free_ppdu_info(mon_pdev, ppdu_info);
396 }
397 
398 void dp_rx_mon_drain_wq(struct dp_pdev *pdev)
399 {
400 	struct dp_mon_pdev *mon_pdev;
401 	struct hal_rx_ppdu_info *ppdu_info = NULL;
402 	struct hal_rx_ppdu_info *temp_ppdu_info = NULL;
403 	struct dp_mon_pdev_be *mon_pdev_be;
404 
405 	if (qdf_unlikely(!pdev)) {
406 		dp_mon_debug("Pdev is NULL");
407 		return;
408 	}
409 
410 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
411 	if (qdf_unlikely(!mon_pdev)) {
412 		dp_mon_debug("monitor pdev is NULL");
413 		return;
414 	}
415 
416 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
417 
418 	qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
419 	TAILQ_FOREACH_SAFE(ppdu_info,
420 			   &mon_pdev_be->rx_mon_queue,
421 			   ppdu_list_elem,
422 			   temp_ppdu_info) {
423 		mon_pdev_be->rx_mon_queue_depth--;
424 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_queue,
425 			     ppdu_info, ppdu_list_elem);
426 
427 		__dp_rx_mon_free_ppdu_info(mon_pdev, ppdu_info);
428 	}
429 	qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
430 }
431 
432 /**
433  * dp_rx_mon_deliver_mpdu() - Deliver MPDU to osif layer
434  *
435  * @mon_pdev: monitor pdev
436  * @mpdu: MPDU nbuf
437  * @status: monitor status
438  *
439  * Return: QDF_STATUS
440  */
441 static QDF_STATUS
442 dp_rx_mon_deliver_mpdu(struct dp_mon_pdev *mon_pdev,
443 		       qdf_nbuf_t mpdu,
444 		       struct mon_rx_status *rx_status)
445 {
446 	qdf_nbuf_t nbuf;
447 
448 	if (mon_pdev->mvdev && mon_pdev->mvdev->monitor_vdev->osif_rx_mon) {
449 		mon_pdev->rx_mon_stats.mpdus_buf_to_stack++;
450 		nbuf = qdf_nbuf_get_ext_list(mpdu);
451 
452 		while (nbuf) {
453 			mon_pdev->rx_mon_stats.mpdus_buf_to_stack++;
454 			nbuf = nbuf->next;
455 		}
456 		mon_pdev->mvdev->monitor_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev,
457 							   mpdu,
458 							   rx_status);
459 	} else {
460 		return QDF_STATUS_E_FAILURE;
461 	}
462 
463 	return QDF_STATUS_SUCCESS;
464 }
465 
466 /**
467  * dp_rx_mon_process_ppdu_info () - Process PPDU info
468  * @pdev: DP pdev
469  * @ppdu_info: PPDU info
470  *
471  * Return: Void
472  */
473 static void
474 dp_rx_mon_process_ppdu_info(struct dp_pdev *pdev,
475 			    struct hal_rx_ppdu_info *ppdu_info)
476 {
477 	struct dp_mon_pdev *mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
478 	uint8_t user;
479 
480 	if (!ppdu_info)
481 		return;
482 
483 	mon_pdev->ppdu_info.rx_status.chan_noise_floor = pdev->chan_noise_floor;
484 
485 	for (user = 0; user < ppdu_info->com_info.num_users; user++) {
486 		uint16_t mpdu_count  = ppdu_info->mpdu_count[user];
487 		uint16_t mpdu_idx;
488 		qdf_nbuf_t mpdu;
489 		struct hal_rx_mon_mpdu_info *mpdu_meta;
490 		QDF_STATUS status;
491 
492 		for (mpdu_idx = 0; mpdu_idx < mpdu_count; mpdu_idx++) {
493 			mpdu = qdf_nbuf_queue_remove(&ppdu_info->mpdu_q[user]);
494 
495 			if (!mpdu)
496 				continue;
497 
498 			mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(mpdu);
499 
500 			if (dp_lite_mon_is_rx_enabled(mon_pdev)) {
501 				status = dp_lite_mon_rx_mpdu_process(pdev, ppdu_info,
502 								     mpdu, mpdu_idx, user);
503 				if (status != QDF_STATUS_SUCCESS) {
504 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
505 					continue;
506 				}
507 			} else {
508 				if (mpdu_meta->full_pkt) {
509 					if (qdf_unlikely(mpdu_meta->truncated)) {
510 						dp_mon_free_parent_nbuf(mon_pdev, mpdu);
511 						continue;
512 					}
513 
514 					status = dp_rx_mon_handle_full_mon(pdev,
515 									   ppdu_info, mpdu);
516 					if (status != QDF_STATUS_SUCCESS) {
517 						dp_mon_free_parent_nbuf(mon_pdev, mpdu);
518 						continue;
519 					}
520 				} else {
521 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
522 					continue;
523 				}
524 
525 				/* reset mpdu metadata and apply radiotap header over MPDU */
526 				qdf_mem_zero(mpdu_meta, sizeof(struct hal_rx_mon_mpdu_info));
527 				if (!qdf_nbuf_update_radiotap(&ppdu_info->rx_status,
528 							      mpdu,
529 							      qdf_nbuf_headroom(mpdu))) {
530 					dp_mon_err("failed to update radiotap pdev: %pK",
531 						   pdev);
532 				}
533 
534 				dp_rx_mon_shift_pf_tag_in_headroom(mpdu,
535 								   pdev->soc,
536 								   ppdu_info);
537 
538 				/* Deliver MPDU to osif layer */
539 				status = dp_rx_mon_deliver_mpdu(mon_pdev,
540 								mpdu,
541 								&ppdu_info->rx_status);
542 				if (status != QDF_STATUS_SUCCESS)
543 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
544 			}
545 		}
546 	}
547 }
548 
549 /**
550  * dp_rx_mon_process_ppdu ()-  Deferred monitor processing
551  * This workqueue API handles:
552  * a. Full monitor
553  * b. Lite monitor
554  *
555  * @context: Opaque work context
556  *
557  * Return: none
558  */
559 void dp_rx_mon_process_ppdu(void *context)
560 {
561 	struct dp_pdev *pdev = (struct dp_pdev *)context;
562 	struct dp_mon_pdev *mon_pdev;
563 	struct hal_rx_ppdu_info *ppdu_info = NULL;
564 	struct hal_rx_ppdu_info *temp_ppdu_info = NULL;
565 	struct dp_mon_pdev_be *mon_pdev_be;
566 
567 	if (qdf_unlikely(!pdev)) {
568 		dp_mon_debug("Pdev is NULL");
569 		return;
570 	}
571 
572 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
573 	if (qdf_unlikely(!mon_pdev)) {
574 		dp_mon_debug("monitor pdev is NULL");
575 		return;
576 	}
577 
578 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
579 
580 	qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
581 	TAILQ_FOREACH_SAFE(ppdu_info,
582 			   &mon_pdev_be->rx_mon_queue,
583 			   ppdu_list_elem, temp_ppdu_info) {
584 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_queue,
585 			     ppdu_info, ppdu_list_elem);
586 
587 		mon_pdev_be->rx_mon_queue_depth--;
588 		dp_rx_mon_process_ppdu_info(pdev, ppdu_info);
589 		__dp_rx_mon_free_ppdu_info(mon_pdev, ppdu_info);
590 	}
591 	qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
592 }
593 
594 /**
595  * dp_rx_mon_add_ppdu_info_to_wq () - Add PPDU info to workqueue
596  *
597  * @mon_pdev: monitor pdev
598  * @ppdu_info: ppdu info to be added to workqueue
599  *
600  * Return: SUCCESS or FAILIRE
601  */
602 
603 static QDF_STATUS
604 dp_rx_mon_add_ppdu_info_to_wq(struct dp_pdev *pdev,
605 			      struct hal_rx_ppdu_info *ppdu_info)
606 {
607 	struct dp_mon_pdev *mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
608 	struct dp_mon_pdev_be *mon_pdev_be =
609 		dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
610 
611 	/* Full monitor or lite monitor mode is not enabled, return */
612 	if (!mon_pdev->monitor_configured &&
613 	    !dp_lite_mon_is_rx_enabled(mon_pdev))
614 		return QDF_STATUS_E_FAILURE;
615 
616 	if (qdf_likely(ppdu_info)) {
617 		if (mon_pdev_be->rx_mon_queue_depth < DP_RX_MON_WQ_THRESHOLD) {
618 			qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
619 			TAILQ_INSERT_TAIL(&mon_pdev_be->rx_mon_queue,
620 					  ppdu_info, ppdu_list_elem);
621 			mon_pdev_be->rx_mon_queue_depth++;
622 			mon_pdev->rx_mon_stats.total_ppdu_info_enq++;
623 		} else {
624 			mon_pdev->rx_mon_stats.total_ppdu_info_drop++;
625 			dp_rx_mon_free_ppdu_info(pdev, ppdu_info);
626 		}
627 		qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
628 
629 		if (mon_pdev_be->rx_mon_queue_depth > DP_MON_QUEUE_DEPTH_MAX) {
630 			qdf_queue_work(0, mon_pdev_be->rx_mon_workqueue,
631 				       &mon_pdev_be->rx_mon_work);
632 		}
633 	}
634 	return QDF_STATUS_SUCCESS;
635 }
636 
637 QDF_STATUS
638 dp_rx_mon_handle_full_mon(struct dp_pdev *pdev,
639 			  struct hal_rx_ppdu_info *ppdu_info,
640 			  qdf_nbuf_t mpdu)
641 {
642 	uint32_t wifi_hdr_len, sec_hdr_len, msdu_llc_len,
643 		 mpdu_buf_len, decap_hdr_pull_bytes, dir,
644 		 is_amsdu, amsdu_pad, frag_size, tot_msdu_len;
645 	struct hal_rx_mon_mpdu_info *mpdu_meta;
646 	struct hal_rx_mon_msdu_info *msdu_meta;
647 	char *hdr_desc;
648 	uint8_t num_frags, frag_iter, l2_hdr_offset;
649 	struct ieee80211_frame *wh;
650 	struct ieee80211_qoscntl *qos;
651 	void *hdr_frag_addr;
652 	uint32_t hdr_frag_size, frag_page_offset, pad_byte_pholder,
653 		 msdu_len;
654 	qdf_nbuf_t head_msdu, msdu_cur;
655 	void *frag_addr;
656 	bool prev_msdu_end_received = false;
657 	bool is_nbuf_head = true;
658 
659 	/***************************************************************************
660 	 *********************** Non-raw packet ************************************
661 	 ---------------------------------------------------------------------------
662 	 |      | frag-0   | frag-1    | frag - 2 | frag - 3  | frag - 4 | frag - 5  |
663 	 | skb  | rx_hdr-1 | rx_msdu-1 | rx_hdr-2 | rx_msdu-2 | rx_hdr-3 | rx-msdu-3 |
664 	 ---------------------------------------------------------------------------
665 	 **************************************************************************/
666 
667 	if (!mpdu) {
668 		dp_mon_debug("nbuf is NULL, return");
669 		return QDF_STATUS_E_FAILURE;
670 	}
671 
672 	head_msdu = mpdu;
673 
674 	mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(mpdu);
675 
676 	if (mpdu_meta->decap_type == HAL_HW_RX_DECAP_FORMAT_RAW) {
677 		qdf_nbuf_trim_add_frag_size(mpdu,
678 					    qdf_nbuf_get_nr_frags(mpdu) - 1,
679 					    -HAL_RX_FCS_LEN, 0);
680 		return QDF_STATUS_SUCCESS;
681 	}
682 
683 	num_frags = qdf_nbuf_get_nr_frags(mpdu);
684 	if (qdf_unlikely(num_frags < DP_MON_MIN_FRAGS_FOR_RESTITCH)) {
685 		dp_mon_debug("not enough frags(%d) for restitch", num_frags);
686 		return QDF_STATUS_E_FAILURE;
687 	}
688 
689 	l2_hdr_offset = DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
690 
691 	/* hdr_desc points to 80211 hdr */
692 	hdr_desc = qdf_nbuf_get_frag_addr(mpdu, 0);
693 
694 	/* Calculate Base header size */
695 	wifi_hdr_len = sizeof(struct ieee80211_frame);
696 	wh = (struct ieee80211_frame *)hdr_desc;
697 
698 	dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
699 
700 	if (dir == IEEE80211_FC1_DIR_DSTODS)
701 		wifi_hdr_len += 6;
702 
703 	is_amsdu = 0;
704 	if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) {
705 		qos = (struct ieee80211_qoscntl *)
706 			(hdr_desc + wifi_hdr_len);
707 		wifi_hdr_len += 2;
708 
709 		is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU);
710 	}
711 
712 	/*Calculate security header length based on 'Protected'
713 	 * and 'EXT_IV' flag
714 	 */
715 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
716 		char *iv = (char *)wh + wifi_hdr_len;
717 
718 		if (iv[3] & KEY_EXTIV)
719 			sec_hdr_len = 8;
720 		else
721 			sec_hdr_len = 4;
722 	} else {
723 		sec_hdr_len = 0;
724 	}
725 	wifi_hdr_len += sec_hdr_len;
726 
727 	/* MSDU related stuff LLC - AMSDU subframe header etc */
728 	msdu_llc_len = is_amsdu ? (DP_RX_MON_DECAP_HDR_SIZE +
729 				   DP_RX_MON_LLC_SIZE +
730 				   DP_RX_MON_SNAP_SIZE) :
731 				   (DP_RX_MON_LLC_SIZE + DP_RX_MON_SNAP_SIZE);
732 
733 	mpdu_buf_len = wifi_hdr_len + msdu_llc_len;
734 
735 	/* "Decap" header to remove from MSDU buffer */
736 	decap_hdr_pull_bytes = DP_RX_MON_DECAP_HDR_SIZE;
737 
738 	amsdu_pad = 0;
739 	tot_msdu_len = 0;
740 	tot_msdu_len = 0;
741 
742 	/*
743 	 * Update protocol and flow tag for MSDU
744 	 * update frag index in ctx_idx field.
745 	 * Reset head pointer data of nbuf before updating.
746 	 */
747 	QDF_NBUF_CB_RX_CTX_ID(mpdu) = 0;
748 
749 	/* Construct destination address */
750 	hdr_frag_addr = qdf_nbuf_get_frag_addr(mpdu, 0);
751 	hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(mpdu, 0);
752 
753 	/* Adjust page frag offset to point to 802.11 header */
754 	qdf_nbuf_trim_add_frag_size(head_msdu, 0, -(hdr_frag_size - mpdu_buf_len), 0);
755 
756 	msdu_meta = (struct hal_rx_mon_msdu_info *)(((void *)qdf_nbuf_get_frag_addr(mpdu, 1)) - (DP_RX_MON_PACKET_OFFSET + DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE));
757 
758 	msdu_len = msdu_meta->msdu_len;
759 
760 	/* Adjust page frag offset to appropriate after decap header */
761 	frag_page_offset =
762 		decap_hdr_pull_bytes;
763 	qdf_nbuf_move_frag_page_offset(head_msdu, 1, frag_page_offset);
764 
765 	frag_size = qdf_nbuf_get_frag_size_by_idx(head_msdu, 1);
766 	pad_byte_pholder =
767 		RX_MONITOR_BUFFER_SIZE - (frag_size + DP_RX_MON_PACKET_OFFSET + DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE);
768 
769 	if (msdu_meta->first_buffer && msdu_meta->last_buffer) {
770 		/* MSDU with single bufffer */
771 		amsdu_pad = frag_size & 0x3;
772 		amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
773 		if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) {
774 			char *frag_addr_temp;
775 
776 			qdf_nbuf_trim_add_frag_size(mpdu, 1, amsdu_pad, 0);
777 			frag_addr_temp =
778 				(char *)qdf_nbuf_get_frag_addr(mpdu, 1);
779 			frag_addr_temp = (frag_addr_temp +
780 					  qdf_nbuf_get_frag_size_by_idx(mpdu, 1)) -
781 				amsdu_pad;
782 			qdf_mem_zero(frag_addr_temp, amsdu_pad);
783 			amsdu_pad = 0;
784 		}
785 	} else {
786 		tot_msdu_len = frag_size;
787 		amsdu_pad = 0;
788 	}
789 
790 	pad_byte_pholder = 0;
791 	for (msdu_cur = mpdu; msdu_cur;) {
792 		/* frag_iter will start from 0 for second skb onwards */
793 		if (msdu_cur == mpdu)
794 			frag_iter = 2;
795 		else
796 			frag_iter = 0;
797 
798 		num_frags = qdf_nbuf_get_nr_frags(msdu_cur);
799 
800 		for (; frag_iter < num_frags; frag_iter++) {
801 			/* Construct destination address
802 			 *  ----------------------------------------------------------
803 			 * |            | L2_HDR_PAD   |   Decap HDR | Payload | Pad  |
804 			 * |            | (First buffer)             |         |      |
805 			 * |            |                            /        /       |
806 			 * |            >Frag address points here   /        /        |
807 			 * |            \                          /        /         |
808 			 * |             \ This bytes needs to    /        /          |
809 			 * |              \  removed to frame pkt/        /           |
810 			 * |               ----------------------        /            |
811 			 * |                                     |     /     Add      |
812 			 * |                                     |    /   amsdu pad   |
813 			 * |   LLC HDR will be added here      <-|    |   Byte for    |
814 			 * |        |                            |    |   last frame  |
815 			 * |         >Dest addr will point       |    |    if space   |
816 			 * |            somewhere in this area   |    |    available  |
817 			 * |  And amsdu_pad will be created if   |    |               |
818 			 * | dint get added in last buffer       |    |               |
819 			 * |       (First Buffer)                |    |               |
820 			 *  ----------------------------------------------------------
821 			 */
822 			/* If previous msdu end has received, modify next frag's offset to point to LLC */
823 			if (prev_msdu_end_received) {
824 				hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter);
825 				/* Adjust page frag offset to point to llc/snap header */
826 				qdf_nbuf_trim_add_frag_size(msdu_cur, frag_iter, -(hdr_frag_size - msdu_llc_len), 0);
827 				prev_msdu_end_received = false;
828 				continue;
829 			}
830 
831 			frag_addr =
832 				qdf_nbuf_get_frag_addr(msdu_cur, frag_iter) -
833 						       (DP_RX_MON_PACKET_OFFSET +
834 						       DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE);
835 			msdu_meta = (struct hal_rx_mon_msdu_info *)frag_addr;
836 
837 			/*
838 			 * Update protocol and flow tag for MSDU
839 			 * update frag index in ctx_idx field
840 			 */
841 			QDF_NBUF_CB_RX_CTX_ID(msdu_cur) = frag_iter;
842 
843 			frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur,
844 					frag_iter);
845 
846 			/* If Middle buffer, dont add any header */
847 			if ((!msdu_meta->first_buffer) &&
848 					(!msdu_meta->last_buffer)) {
849 				tot_msdu_len += frag_size;
850 				amsdu_pad = 0;
851 				pad_byte_pholder = 0;
852 				continue;
853 			}
854 
855 			/* Calculate if current buffer has placeholder
856 			 * to accommodate amsdu pad byte
857 			 */
858 			pad_byte_pholder =
859 				RX_MONITOR_BUFFER_SIZE - (frag_size + (DP_RX_MON_PACKET_OFFSET +
860 							  DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE));
861 			/*
862 			 * We will come here only only three condition:
863 			 * 1. Msdu with single Buffer
864 			 * 2. First buffer in case MSDU is spread in multiple
865 			 *    buffer
866 			 * 3. Last buffer in case MSDU is spread in multiple
867 			 *    buffer
868 			 *
869 			 *         First buffER | Last buffer
870 			 * Case 1:      1       |     1
871 			 * Case 2:      1       |     0
872 			 * Case 3:      0       |     1
873 			 *
874 			 * In 3rd case only l2_hdr_padding byte will be Zero and
875 			 * in other case, It will be 2 Bytes.
876 			 */
877 			if (msdu_meta->first_buffer)
878 				l2_hdr_offset =
879 					DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
880 			else
881 				l2_hdr_offset = DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
882 
883 			if (msdu_meta->first_buffer) {
884 				/* Adjust page frag offset to point to 802.11 header */
885 				hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter-1);
886 				qdf_nbuf_trim_add_frag_size(msdu_cur, frag_iter - 1, -(hdr_frag_size - (msdu_llc_len + amsdu_pad)), 0);
887 
888 				/* Adjust page frag offset to appropriate after decap header */
889 				frag_page_offset =
890 					(decap_hdr_pull_bytes + l2_hdr_offset);
891 				if (frag_size > (decap_hdr_pull_bytes + l2_hdr_offset)) {
892 					qdf_nbuf_move_frag_page_offset(msdu_cur, frag_iter, frag_page_offset);
893 					frag_size = frag_size - (l2_hdr_offset + decap_hdr_pull_bytes);
894 				}
895 
896 
897 				/*
898 				 * Calculate new page offset and create hole
899 				 * if amsdu_pad required.
900 				 */
901 				tot_msdu_len = frag_size;
902 				/*
903 				 * No amsdu padding required for first frame of
904 				 * continuation buffer
905 				 */
906 				if (!msdu_meta->last_buffer) {
907 					amsdu_pad = 0;
908 					continue;
909 				}
910 			} else {
911 				tot_msdu_len += frag_size;
912 			}
913 
914 			/* Will reach to this place in only two case:
915 			 * 1. Single buffer MSDU
916 			 * 2. Last buffer of MSDU in case of multiple buf MSDU
917 			 */
918 
919 			/* This flag is used to identify msdu boundry */
920 			prev_msdu_end_received = true;
921 			/* Check size of buffer if amsdu padding required */
922 			amsdu_pad = tot_msdu_len & 0x3;
923 			amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
924 
925 			/* Create placeholder if current bufer can
926 			 * accommodate padding.
927 			 */
928 			if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) {
929 				char *frag_addr_temp;
930 
931 				qdf_nbuf_trim_add_frag_size(msdu_cur,
932 						frag_iter,
933 						amsdu_pad, 0);
934 				frag_addr_temp = (char *)qdf_nbuf_get_frag_addr(msdu_cur,
935 						frag_iter);
936 				frag_addr_temp = (frag_addr_temp +
937 						qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter)) -
938 					amsdu_pad;
939 				qdf_mem_zero(frag_addr_temp, amsdu_pad);
940 				amsdu_pad = 0;
941 			}
942 
943 			/* reset tot_msdu_len */
944 			tot_msdu_len = 0;
945 		}
946 		if (is_nbuf_head) {
947 			msdu_cur = qdf_nbuf_get_ext_list(msdu_cur);
948 			is_nbuf_head = false;
949 		} else {
950 			msdu_cur = qdf_nbuf_queue_next(msdu_cur);
951 		}
952 	}
953 
954 	return QDF_STATUS_SUCCESS;
955 }
956 
957 /**
958  * dp_rx_mon_flush_status_buf_queue () - Flush status buffer queue
959  *
960  * @pdev: DP pdev handle
961  *
962  *Return: void
963  */
964 static inline void
965 dp_rx_mon_flush_status_buf_queue(struct dp_pdev *pdev)
966 {
967 	struct dp_soc *soc = pdev->soc;
968 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
969 	struct dp_mon_pdev_be *mon_pdev_be =
970 		dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
971 	union dp_mon_desc_list_elem_t *desc_list = NULL;
972 	union dp_mon_desc_list_elem_t *tail = NULL;
973 	struct dp_mon_desc *mon_desc;
974 	uint8_t idx;
975 	void *buf;
976 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
977 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
978 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
979 	uint8_t work_done = 0;
980 	uint16_t status_buf_count;
981 
982 	if (!mon_pdev_be->desc_count) {
983 		dp_mon_info("no of status buffer count is zero: %pK", pdev);
984 		return;
985 	}
986 
987 	status_buf_count = mon_pdev_be->desc_count;
988 	for (idx = 0; idx < status_buf_count; idx++) {
989 		mon_desc = mon_pdev_be->status[idx];
990 		if (!mon_desc) {
991 			qdf_assert_always(0);
992 			return;
993 		}
994 
995 		buf = mon_desc->buf_addr;
996 
997 		dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
998 		work_done++;
999 
1000 		/* set status buffer pointer to NULL */
1001 		mon_pdev_be->status[idx] = NULL;
1002 		mon_pdev_be->desc_count--;
1003 
1004 		qdf_frag_free(buf);
1005 		DP_STATS_INC(mon_soc, frag_free, 1);
1006 	}
1007 
1008 	if (work_done) {
1009 		mon_pdev->rx_mon_stats.mon_rx_bufs_replenished_dest +=
1010 			work_done;
1011 		dp_mon_buffers_replenish(soc, &soc->rxdma_mon_buf_ring[0],
1012 					 rx_mon_desc_pool,
1013 					 work_done,
1014 					 &desc_list, &tail, NULL);
1015 	}
1016 }
1017 
1018 /**
1019  * dp_rx_mon_handle_flush_n_trucated_ppdu () - Handle flush and truncated ppdu
1020  *
1021  * @soc: DP soc handle
1022  * @pdev: pdev handle
1023  * @mon_desc: mon sw desc
1024  */
1025 static inline void
1026 dp_rx_mon_handle_flush_n_trucated_ppdu(struct dp_soc *soc,
1027 				       struct dp_pdev *pdev,
1028 				       struct dp_mon_desc *mon_desc)
1029 {
1030 	union dp_mon_desc_list_elem_t *desc_list = NULL;
1031 	union dp_mon_desc_list_elem_t *tail = NULL;
1032 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1033 	struct dp_mon_soc_be *mon_soc_be =
1034 			dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1035 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1036 	uint16_t work_done;
1037 
1038 	/* Flush status buffers in queue */
1039 	dp_rx_mon_flush_status_buf_queue(pdev);
1040 	qdf_frag_free(mon_desc->buf_addr);
1041 	DP_STATS_INC(mon_soc, frag_free, 1);
1042 	dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
1043 	work_done = 1;
1044 	dp_mon_buffers_replenish(soc, &soc->rxdma_mon_buf_ring[0],
1045 				 rx_mon_desc_pool,
1046 				 work_done,
1047 				 &desc_list, &tail, NULL);
1048 }
1049 
1050 uint8_t dp_rx_mon_process_tlv_status(struct dp_pdev *pdev,
1051 				     struct hal_rx_ppdu_info *ppdu_info,
1052 				     void *status_frag,
1053 				     uint16_t tlv_status,
1054 				     union dp_mon_desc_list_elem_t **desc_list,
1055 				     union dp_mon_desc_list_elem_t **tail)
1056 {
1057 	struct dp_soc *soc  = pdev->soc;
1058 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1059 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1060 	qdf_nbuf_t nbuf, tmp_nbuf;
1061 	qdf_frag_t addr;
1062 	uint8_t user_id = ppdu_info->user_id;
1063 	uint8_t mpdu_idx = ppdu_info->mpdu_count[user_id];
1064 	uint16_t num_frags;
1065 	uint8_t num_buf_reaped = 0;
1066 	QDF_STATUS status;
1067 
1068 	if (!mon_pdev->monitor_configured &&
1069 	    !dp_lite_mon_is_rx_enabled(mon_pdev)) {
1070 		return num_buf_reaped;
1071 	}
1072 
1073 	switch (tlv_status) {
1074 	case HAL_TLV_STATUS_HEADER: {
1075 		/* If this is first RX_HEADER for MPDU, allocate skb
1076 		 * else add frag to already allocated skb
1077 		 */
1078 
1079 		if (!ppdu_info->mpdu_info[user_id].mpdu_start_received) {
1080 
1081 			nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
1082 					      DP_RX_MON_TLV_ROOM +
1083 					      DP_RX_MON_MAX_RADIO_TAP_HDR,
1084 					      DP_RX_MON_TLV_ROOM +
1085 					      DP_RX_MON_MAX_RADIO_TAP_HDR,
1086 					      4, FALSE);
1087 
1088 			/* Set *head_msdu->next as NULL as all msdus are
1089 			 *                          * mapped via nr frags
1090 			 *                                                   */
1091 			if (qdf_unlikely(!nbuf)) {
1092 				dp_mon_err("malloc failed pdev: %pK ", pdev);
1093 				return num_buf_reaped;
1094 			}
1095 
1096 			mon_pdev->rx_mon_stats.parent_buf_alloc++;
1097 
1098 			dp_rx_mon_set_zero(nbuf);
1099 
1100 			qdf_nbuf_set_next(nbuf, NULL);
1101 
1102 			qdf_nbuf_queue_add(&ppdu_info->mpdu_q[user_id], nbuf);
1103 
1104 			status = dp_rx_mon_nbuf_add_rx_frag(nbuf, status_frag,
1105 							    ppdu_info->hdr_len - DP_RX_MON_RX_HDR_OFFSET,
1106 							    ppdu_info->data - (unsigned char *)status_frag + 4,
1107 							    DP_MON_DATA_BUFFER_SIZE, true);
1108 			if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
1109 				dp_mon_err("num_frags exceeding MAX frags");
1110 				qdf_assert_always(0);
1111 			}
1112 			ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = true;
1113 			ppdu_info->mpdu_info[user_id].first_rx_hdr_rcvd = true;
1114 			/* initialize decap type to invalid, this will be set to appropriate
1115 			 * value once the mpdu start tlv is received
1116 			 */
1117 			ppdu_info->mpdu_info[user_id].decap_type = DP_MON_DECAP_FORMAT_INVALID;
1118 		} else {
1119 			if (ppdu_info->mpdu_info[user_id].decap_type ==
1120 					HAL_HW_RX_DECAP_FORMAT_RAW) {
1121 				return num_buf_reaped;
1122 			}
1123 
1124 			if (dp_lite_mon_is_rx_enabled(mon_pdev) &&
1125 			    !dp_lite_mon_is_level_msdu(mon_pdev))
1126 				break;
1127 
1128 			nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1129 			if (qdf_unlikely(!nbuf)) {
1130 				dp_mon_debug("nbuf is NULL");
1131 				return num_buf_reaped;
1132 			}
1133 
1134 			tmp_nbuf = qdf_get_nbuf_valid_frag(nbuf);
1135 
1136 			if (!tmp_nbuf) {
1137 				tmp_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
1138 							  DP_RX_MON_MAX_MONITOR_HEADER,
1139 							  DP_RX_MON_MAX_MONITOR_HEADER,
1140 							  4, FALSE);
1141 				if (qdf_unlikely(!tmp_nbuf)) {
1142 					dp_mon_err("nbuf is NULL");
1143 					qdf_assert_always(0);
1144 				}
1145 				mon_pdev->rx_mon_stats.parent_buf_alloc++;
1146 				/* add new skb to frag list */
1147 				qdf_nbuf_append_ext_list(nbuf, tmp_nbuf,
1148 							 qdf_nbuf_len(tmp_nbuf));
1149 			}
1150 			dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, status_frag,
1151 						   ppdu_info->hdr_len - DP_RX_MON_RX_HDR_OFFSET,
1152 						   ppdu_info->data - (unsigned char *)status_frag + 4,
1153 						   DP_MON_DATA_BUFFER_SIZE,
1154 						   true);
1155 		}
1156 		ppdu_info->rx_hdr_rcvd[user_id] = true;
1157 	}
1158 	break;
1159 	case HAL_TLV_STATUS_MON_BUF_ADDR:
1160 	{
1161 		struct hal_rx_mon_msdu_info *buf_info;
1162 		struct hal_mon_packet_info *packet_info = &ppdu_info->packet_info;
1163 		struct dp_mon_desc *mon_desc = (struct dp_mon_desc *)(uintptr_t)ppdu_info->packet_info.sw_cookie;
1164 		struct hal_rx_mon_mpdu_info *mpdu_info;
1165 		uint16_t frag_idx = 0;
1166 
1167 		qdf_assert_always(mon_desc);
1168 
1169 		if (mon_desc->magic != DP_MON_DESC_MAGIC)
1170 			qdf_assert_always(0);
1171 
1172 		addr = mon_desc->buf_addr;
1173 		qdf_assert_always(addr);
1174 
1175 		mpdu_info = &ppdu_info->mpdu_info[user_id];
1176 		if (!mon_desc->unmapped) {
1177 			qdf_mem_unmap_page(soc->osdev,
1178 					   (qdf_dma_addr_t)mon_desc->paddr,
1179 				   DP_MON_DATA_BUFFER_SIZE,
1180 					   QDF_DMA_FROM_DEVICE);
1181 			mon_desc->unmapped = 1;
1182 		}
1183 		dp_mon_add_to_free_desc_list(desc_list, tail, mon_desc);
1184 		num_buf_reaped++;
1185 
1186 		mon_pdev->rx_mon_stats.pkt_buf_count++;
1187 
1188 		if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
1189 
1190 			/* WAR: RX_HDR is not received for this MPDU, drop this frame */
1191 			mon_pdev->rx_mon_stats.rx_hdr_not_received++;
1192 			DP_STATS_INC(mon_soc, frag_free, 1);
1193 			qdf_frag_free(addr);
1194 			return num_buf_reaped;
1195 		}
1196 
1197 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1198 
1199 		if (mpdu_info->decap_type == DP_MON_DECAP_FORMAT_INVALID) {
1200 			/* decap type is invalid, drop the frame */
1201 			mon_pdev->rx_mon_stats.mpdu_decap_type_invalid++;
1202 			DP_STATS_INC(mon_soc, frag_free, 1);
1203 			mon_pdev->rx_mon_stats.parent_buf_free++;
1204 			qdf_frag_free(addr);
1205 			qdf_nbuf_queue_remove_last(&ppdu_info->mpdu_q[user_id]);
1206 			qdf_nbuf_free(nbuf);
1207 			/* we have freed the nbuf mark the q entry null */
1208 			return num_buf_reaped;
1209 		}
1210 
1211 		tmp_nbuf = qdf_get_nbuf_valid_frag(nbuf);
1212 
1213 		if (!tmp_nbuf) {
1214 			tmp_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
1215 						  DP_RX_MON_MAX_MONITOR_HEADER,
1216 						  DP_RX_MON_MAX_MONITOR_HEADER,
1217 						  4, FALSE);
1218 			if (qdf_unlikely(!tmp_nbuf)) {
1219 				dp_mon_err("nbuf is NULL");
1220 				DP_STATS_INC(mon_soc, frag_free, 1);
1221 				mon_pdev->rx_mon_stats.parent_buf_free++;
1222 				qdf_frag_free(addr);
1223 				/* remove this nbuf from queue */
1224 				qdf_nbuf_queue_remove_last(&ppdu_info->mpdu_q[user_id]);
1225 				qdf_nbuf_free(nbuf);
1226 				return num_buf_reaped;
1227 			}
1228 			mon_pdev->rx_mon_stats.parent_buf_alloc++;
1229 			/* add new skb to frag list */
1230 			qdf_nbuf_append_ext_list(nbuf, tmp_nbuf,
1231 						 qdf_nbuf_len(tmp_nbuf));
1232 		}
1233 		mpdu_info->full_pkt = true;
1234 
1235 		if (mpdu_info->decap_type == HAL_HW_RX_DECAP_FORMAT_RAW) {
1236 			if (mpdu_info->first_rx_hdr_rcvd) {
1237 				qdf_nbuf_remove_frag(nbuf, frag_idx, DP_MON_DATA_BUFFER_SIZE);
1238 				dp_rx_mon_nbuf_add_rx_frag(nbuf, addr,
1239 							   packet_info->dma_length,
1240 							   DP_RX_MON_PACKET_OFFSET,
1241 							   DP_MON_DATA_BUFFER_SIZE,
1242 							   false);
1243 				DP_STATS_INC(mon_soc, frag_free, 1);
1244 				mpdu_info->first_rx_hdr_rcvd = false;
1245 			} else {
1246 				dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, addr,
1247 							   packet_info->dma_length,
1248 							   DP_RX_MON_PACKET_OFFSET,
1249 							   DP_MON_DATA_BUFFER_SIZE,
1250 							   false);
1251 				DP_STATS_INC(mon_soc, frag_free, 1);
1252 			}
1253 		} else {
1254 			dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, addr,
1255 						   packet_info->dma_length,
1256 						   DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE +
1257 						   DP_RX_MON_PACKET_OFFSET,
1258 						   DP_MON_DATA_BUFFER_SIZE,
1259 						   false);
1260 				DP_STATS_INC(mon_soc, frag_free, 1);
1261 			buf_info = addr;
1262 
1263 			if (!ppdu_info->msdu[user_id].first_buffer) {
1264 				buf_info->first_buffer = true;
1265 				ppdu_info->msdu[user_id].first_buffer = true;
1266 			} else {
1267 				buf_info->first_buffer = false;
1268 			}
1269 
1270 			if (packet_info->msdu_continuation)
1271 				buf_info->last_buffer = false;
1272 			else
1273 				buf_info->last_buffer = true;
1274 
1275 			buf_info->frag_len = packet_info->dma_length;
1276 		}
1277 		if (qdf_unlikely(packet_info->truncated))
1278 			mpdu_info->truncated = true;
1279 	}
1280 	break;
1281 	case HAL_TLV_STATUS_MSDU_END:
1282 	{
1283 		struct hal_rx_mon_msdu_info *msdu_info = &ppdu_info->msdu[user_id];
1284 		struct hal_rx_mon_msdu_info *last_buf_info;
1285 		/* update msdu metadata at last buffer of msdu in MPDU */
1286 		if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
1287 			/* reset msdu info for next msdu for same user */
1288 			qdf_mem_zero(msdu_info, sizeof(*msdu_info));
1289 			dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d",
1290 				     __LINE__, user_id, mpdu_idx);
1291 			break;
1292 		}
1293 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1294 		num_frags = qdf_nbuf_get_nr_frags(nbuf);
1295 		if (ppdu_info->mpdu_info[user_id].decap_type ==
1296 				HAL_HW_RX_DECAP_FORMAT_RAW) {
1297 			break;
1298 		}
1299 		/* This points to last buffer of MSDU . update metadata here */
1300 		addr = qdf_nbuf_get_frag_addr(nbuf, num_frags - 1) -
1301 					      (DP_RX_MON_PACKET_OFFSET +
1302 					       DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE);
1303 		last_buf_info = addr;
1304 
1305 		last_buf_info->first_msdu = msdu_info->first_msdu;
1306 		last_buf_info->last_msdu = msdu_info->last_msdu;
1307 		last_buf_info->decap_type = msdu_info->decap_type;
1308 		last_buf_info->msdu_index = msdu_info->msdu_index;
1309 		last_buf_info->user_rssi = msdu_info->user_rssi;
1310 		last_buf_info->reception_type = msdu_info->reception_type;
1311 		last_buf_info->msdu_len = msdu_info->msdu_len;
1312 
1313 		dp_rx_mon_pf_tag_to_buf_headroom_2_0(nbuf, ppdu_info, pdev,
1314 						     soc);
1315 		/* reset msdu info for next msdu for same user */
1316 		qdf_mem_zero(msdu_info, sizeof(*msdu_info));
1317 
1318 		/* If flow classification is enabled,
1319 		 * update cce_metadata and fse_metadata
1320 		 */
1321 	}
1322 	break;
1323 	case HAL_TLV_STATUS_MPDU_START:
1324 	{
1325 		struct hal_rx_mon_mpdu_info *mpdu_info, *mpdu_meta;
1326 
1327 		if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
1328 			dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d", __LINE__, user_id, mpdu_idx);
1329 			break;
1330 		}
1331 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1332 		mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(nbuf);
1333 		mpdu_info = &ppdu_info->mpdu_info[user_id];
1334 		mpdu_meta->decap_type = mpdu_info->decap_type;
1335 		ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = true;
1336 	break;
1337 	}
1338 	case HAL_TLV_STATUS_MPDU_END:
1339 	{
1340 		struct hal_rx_mon_mpdu_info *mpdu_info, *mpdu_meta;
1341 		mpdu_info = &ppdu_info->mpdu_info[user_id];
1342 		if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
1343 			/* reset mpdu info for next mpdu for same user */
1344 			qdf_mem_zero(mpdu_info, sizeof(*mpdu_info));
1345 			dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d",
1346 				     __LINE__, user_id, mpdu_idx);
1347 			break;
1348 		}
1349 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1350 		mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(nbuf);
1351 		mpdu_meta->mpdu_length_err = mpdu_info->mpdu_length_err;
1352 		mpdu_meta->fcs_err = mpdu_info->fcs_err;
1353 		ppdu_info->rx_status.rs_fcs_err = mpdu_info->fcs_err;
1354 		mpdu_meta->overflow_err = mpdu_info->overflow_err;
1355 		mpdu_meta->decrypt_err = mpdu_info->decrypt_err;
1356 		mpdu_meta->full_pkt = mpdu_info->full_pkt;
1357 		mpdu_meta->truncated = mpdu_info->truncated;
1358 
1359 		/* reset mpdu info for next mpdu for same user */
1360 		qdf_mem_zero(mpdu_info, sizeof(*mpdu_info));
1361 		ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = false;
1362 		ppdu_info->mpdu_count[user_id]++;
1363 		ppdu_info->rx_hdr_rcvd[user_id] = false;
1364 	}
1365 	break;
1366 	}
1367 	return num_buf_reaped;
1368 }
1369 
1370 /**
1371  * dp_rx_mon_process_status_tlv () - Handle mon status process TLV
1372  *
1373  * @pdev: DP pdev handle
1374  *
1375  * Return
1376  */
1377 static inline struct hal_rx_ppdu_info *
1378 dp_rx_mon_process_status_tlv(struct dp_pdev *pdev)
1379 {
1380 	struct dp_soc *soc = pdev->soc;
1381 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1382 	struct dp_mon_pdev_be *mon_pdev_be =
1383 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1384 	union dp_mon_desc_list_elem_t *desc_list = NULL;
1385 	union dp_mon_desc_list_elem_t *tail = NULL;
1386 	struct dp_mon_desc *mon_desc;
1387 	uint8_t idx, user;
1388 	void *buf;
1389 	struct hal_rx_ppdu_info *ppdu_info;
1390 	uint8_t *rx_tlv;
1391 	uint8_t *rx_tlv_start;
1392 	uint16_t end_offset = 0;
1393 	uint16_t tlv_status = HAL_TLV_STATUS_BUF_DONE;
1394 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1395 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1396 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1397 	uint8_t work_done = 0;
1398 	uint16_t status_buf_count;
1399 
1400 	if (!mon_pdev_be->desc_count) {
1401 		dp_mon_err("no of status buffer count is zero: %pK", pdev);
1402 		return NULL;
1403 	}
1404 
1405 	ppdu_info = dp_rx_mon_get_ppdu_info(mon_pdev);
1406 
1407 	if (!ppdu_info) {
1408 		dp_mon_err("ppdu_info malloc failed pdev: %pK", pdev);
1409 		dp_rx_mon_flush_status_buf_queue(pdev);
1410 		return NULL;
1411 	}
1412 	mon_pdev->rx_mon_stats.total_ppdu_info_alloc++;
1413 
1414 	for (user = 0; user < HAL_MAX_UL_MU_USERS; user++)
1415 		qdf_nbuf_queue_init(&ppdu_info->mpdu_q[user]);
1416 
1417 	status_buf_count = mon_pdev_be->desc_count;
1418 	for (idx = 0; idx < status_buf_count; idx++) {
1419 		mon_desc = mon_pdev_be->status[idx];
1420 		if (!mon_desc) {
1421 			qdf_assert_always(0);
1422 			return NULL;
1423 		}
1424 
1425 		buf = mon_desc->buf_addr;
1426 		end_offset = mon_desc->end_offset;
1427 
1428 		dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
1429 		work_done++;
1430 
1431 		rx_tlv = buf;
1432 		rx_tlv_start = buf;
1433 
1434 		do {
1435 			tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
1436 								ppdu_info,
1437 								pdev->soc->hal_soc,
1438 								buf);
1439 
1440 			work_done += dp_rx_mon_process_tlv_status(pdev,
1441 								  ppdu_info,
1442 								  buf,
1443 								  tlv_status,
1444 								  &desc_list,
1445 								  &tail);
1446 			rx_tlv = hal_rx_status_get_next_tlv(rx_tlv, 1);
1447 
1448 			/* HW provides end_offset (how many bytes HW DMA'ed)
1449 			 * as part of descriptor, use this as delimiter for
1450 			 * status buffer
1451 			 */
1452 			if ((rx_tlv - rx_tlv_start) >= (end_offset + 1))
1453 				break;
1454 
1455 	} while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) ||
1456 			(tlv_status == HAL_TLV_STATUS_HEADER) ||
1457 			(tlv_status == HAL_TLV_STATUS_MPDU_END) ||
1458 			(tlv_status == HAL_TLV_STATUS_MSDU_END) ||
1459 			(tlv_status == HAL_TLV_STATUS_MON_BUF_ADDR) ||
1460 			(tlv_status == HAL_TLV_STATUS_MPDU_START));
1461 
1462 		/* set status buffer pointer to NULL */
1463 		mon_pdev_be->status[idx] = NULL;
1464 		mon_pdev_be->desc_count--;
1465 
1466 		qdf_frag_free(buf);
1467 		DP_STATS_INC(mon_soc, frag_free, 1);
1468 		mon_pdev->rx_mon_stats.status_buf_count++;
1469 	}
1470 
1471 	if (work_done) {
1472 		mon_pdev->rx_mon_stats.mon_rx_bufs_replenished_dest +=
1473 				work_done;
1474 		dp_mon_buffers_replenish(soc, &soc->rxdma_mon_buf_ring[0],
1475 					 rx_mon_desc_pool,
1476 					 work_done,
1477 					 &desc_list, &tail, NULL);
1478 	}
1479 
1480 	ppdu_info->rx_status.tsft = ppdu_info->rx_status.tsft +
1481 				    pdev->timestamp.mlo_offset_lo_us +
1482 				    ((uint64_t)pdev->timestamp.mlo_offset_hi_us
1483 				    << 32);
1484 
1485 	return ppdu_info;
1486 }
1487 
1488 /**
1489  * dp_rx_mon_update_peer_id() - Update sw_peer_id with link peer_id
1490  *
1491  * @pdev: DP pdev handle
1492  * @ppdu_info: HAL PPDU Info buffer
1493  *
1494  * Return: none
1495  */
1496 #ifdef WLAN_FEATURE_11BE_MLO
1497 #define DP_PEER_ID_MASK 0x3FFF
1498 static inline
1499 void dp_rx_mon_update_peer_id(struct dp_pdev *pdev,
1500 			      struct hal_rx_ppdu_info *ppdu_info)
1501 {
1502 	uint32_t i;
1503 	uint16_t peer_id;
1504 	struct dp_soc *soc = pdev->soc;
1505 	uint32_t num_users = ppdu_info->com_info.num_users;
1506 
1507 	for (i = 0; i < num_users; i++) {
1508 		peer_id = ppdu_info->rx_user_status[i].sw_peer_id;
1509 		if (peer_id == HTT_INVALID_PEER)
1510 			continue;
1511 		/*
1512 		+---------------------------------------------------------------------+
1513 		| 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
1514 		+---------------------------------------------------------------------+
1515 		| CHIP ID | ML |                     PEER ID                          |
1516 		+---------------------------------------------------------------------+
1517 		*/
1518 		peer_id &= DP_PEER_ID_MASK;
1519 		peer_id = dp_get_link_peer_id_by_lmac_id(soc, peer_id,
1520 							 pdev->lmac_id);
1521 		ppdu_info->rx_user_status[i].sw_peer_id = peer_id;
1522 	}
1523 }
1524 #else
1525 static inline
1526 void dp_rx_mon_update_peer_id(struct dp_pdev *pdev,
1527 			      struct hal_rx_ppdu_info *ppdu_info)
1528 {
1529 }
1530 #endif
1531 
1532 static inline uint32_t
1533 dp_rx_mon_srng_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
1534 			   uint32_t mac_id, uint32_t quota)
1535 {
1536 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1537 	struct dp_mon_pdev *mon_pdev;
1538 	struct dp_mon_pdev_be *mon_pdev_be;
1539 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1540 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1541 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1542 	hal_soc_handle_t hal_soc = soc->hal_soc;
1543 	void *rx_mon_dst_ring_desc;
1544 	void *mon_dst_srng;
1545 	uint32_t work_done = 0;
1546 	struct hal_rx_ppdu_info *ppdu_info = NULL;
1547 	QDF_STATUS status;
1548 
1549 	if (!pdev) {
1550 		dp_mon_err("%pK: pdev is null for mac_id = %d", soc, mac_id);
1551 		return work_done;
1552 	}
1553 
1554 	mon_pdev = pdev->monitor_pdev;
1555 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1556 	mon_dst_srng = soc->rxdma_mon_dst_ring[mac_id].hal_srng;
1557 
1558 	if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) {
1559 		dp_mon_err("%pK: : HAL Monitor Destination Ring Init Failed -- %pK",
1560 			   soc, mon_dst_srng);
1561 		return work_done;
1562 	}
1563 
1564 	hal_soc = soc->hal_soc;
1565 
1566 	qdf_assert((hal_soc && pdev));
1567 
1568 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
1569 
1570 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_dst_srng))) {
1571 		dp_mon_err("%s %d : HAL Mon Dest Ring access Failed -- %pK",
1572 			   __func__, __LINE__, mon_dst_srng);
1573 		qdf_spin_unlock_bh(&mon_pdev->mon_lock);
1574 		return work_done;
1575 	}
1576 
1577 	while (qdf_likely((rx_mon_dst_ring_desc =
1578 			  (void *)hal_srng_dst_peek(hal_soc, mon_dst_srng))
1579 				&& quota--)) {
1580 		struct hal_mon_desc hal_mon_rx_desc = {0};
1581 		struct dp_mon_desc *mon_desc;
1582 		hal_be_get_mon_dest_status(soc->hal_soc,
1583 					   rx_mon_dst_ring_desc,
1584 					   &hal_mon_rx_desc);
1585 		/* If it's empty descriptor, skip processing
1586 		 * and process next hW desc
1587 		 */
1588 		if (hal_mon_rx_desc.empty_descriptor == 1) {
1589 			dp_mon_debug("empty descriptor found mon_pdev: %pK",
1590 				     mon_pdev);
1591 			rx_mon_dst_ring_desc =
1592 				hal_srng_dst_get_next(hal_soc, mon_dst_srng);
1593 			mon_pdev->rx_mon_stats.empty_desc_ppdu++;
1594 			continue;
1595 		}
1596 		mon_desc = (struct dp_mon_desc *)(uintptr_t)(hal_mon_rx_desc.buf_addr);
1597 		qdf_assert_always(mon_desc);
1598 
1599 		if ((mon_desc == mon_pdev_be->prev_rxmon_desc) &&
1600 		    (mon_desc->cookie == mon_pdev_be->prev_rxmon_cookie)) {
1601 			dp_mon_err("duplicate descritout found mon_pdev: %pK mon_desc: %pK cookie: %d",
1602 				   mon_pdev, mon_desc, mon_desc->cookie);
1603 			mon_pdev->rx_mon_stats.dup_mon_buf_cnt++;
1604 			hal_srng_dst_get_next(hal_soc, mon_dst_srng);
1605 			continue;
1606 		}
1607 		mon_pdev_be->prev_rxmon_desc = mon_desc;
1608 		mon_pdev_be->prev_rxmon_cookie = mon_desc->cookie;
1609 
1610 		if (!mon_desc->unmapped) {
1611 			qdf_mem_unmap_page(soc->osdev, mon_desc->paddr,
1612 					   rx_mon_desc_pool->buf_size,
1613 					   QDF_DMA_FROM_DEVICE);
1614 			mon_desc->unmapped = 1;
1615 		}
1616 		mon_desc->end_offset = hal_mon_rx_desc.end_offset;
1617 
1618 		/* Flush and truncated status buffers content
1619 		 * need to discarded
1620 		 */
1621 		if (hal_mon_rx_desc.end_reason == HAL_MON_FLUSH_DETECTED ||
1622 		    hal_mon_rx_desc.end_reason == HAL_MON_PPDU_TRUNCATED) {
1623 			dp_mon_debug("end_resaon: %d mon_pdev: %pK",
1624 				     hal_mon_rx_desc.end_reason, mon_pdev);
1625 			mon_pdev->rx_mon_stats.status_ppdu_drop++;
1626 			dp_rx_mon_handle_flush_n_trucated_ppdu(soc,
1627 							       pdev,
1628 							       mon_desc);
1629 			rx_mon_dst_ring_desc = hal_srng_dst_get_next(hal_soc,
1630 							mon_dst_srng);
1631 			continue;
1632 		}
1633 		if (mon_pdev_be->desc_count >= DP_MON_MAX_STATUS_BUF)
1634 			qdf_assert_always(0);
1635 
1636 		mon_pdev_be->status[mon_pdev_be->desc_count++] = mon_desc;
1637 
1638 		rx_mon_dst_ring_desc = hal_srng_dst_get_next(hal_soc, mon_dst_srng);
1639 
1640 		status = dp_rx_process_pktlog_be(soc, pdev, ppdu_info,
1641 						 mon_desc->buf_addr,
1642 						 hal_mon_rx_desc.end_offset);
1643 
1644 		if (hal_mon_rx_desc.end_reason == HAL_MON_STATUS_BUFFER_FULL)
1645 			continue;
1646 
1647 		mon_pdev->rx_mon_stats.status_ppdu_done++;
1648 
1649 		ppdu_info = dp_rx_mon_process_status_tlv(pdev);
1650 
1651 		if (ppdu_info)
1652 			dp_rx_mon_update_peer_id(pdev, ppdu_info);
1653 
1654 		/* Call enhanced stats update API */
1655 		if (mon_pdev->enhanced_stats_en && ppdu_info)
1656 			dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info);
1657 		else if (dp_cfr_rcc_mode_status(pdev) && ppdu_info)
1658 			dp_rx_handle_cfr(soc, pdev, ppdu_info);
1659 
1660 		status = dp_rx_mon_add_ppdu_info_to_wq(pdev, ppdu_info);
1661 		if (status != QDF_STATUS_SUCCESS) {
1662 			if (ppdu_info)
1663 				__dp_rx_mon_free_ppdu_info(mon_pdev, ppdu_info);
1664 		}
1665 
1666 		work_done++;
1667 
1668 		/* desc_count should be zero  after PPDU status processing */
1669 		if (mon_pdev_be->desc_count > 0)
1670 			qdf_assert_always(0);
1671 
1672 		mon_pdev_be->desc_count = 0;
1673 	}
1674 	dp_srng_access_end(int_ctx, soc, mon_dst_srng);
1675 
1676 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
1677 	dp_mon_info("mac_id: %d, work_done:%d", mac_id, work_done);
1678 	return work_done;
1679 }
1680 
1681 uint32_t
1682 dp_rx_mon_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
1683 		      uint32_t mac_id, uint32_t quota)
1684 {
1685 	uint32_t work_done;
1686 
1687 	work_done = dp_rx_mon_srng_process_2_0(soc, int_ctx, mac_id, quota);
1688 
1689 	return work_done;
1690 }
1691 
1692 void
1693 dp_rx_mon_buf_desc_pool_deinit(struct dp_soc *soc)
1694 {
1695 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1696 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1697 
1698 	/* Drain page frag cachce before pool deinit */
1699 	qdf_frag_cache_drain(&mon_soc_be->rx_desc_mon.pf_cache);
1700 	dp_mon_desc_pool_deinit(&mon_soc_be->rx_desc_mon);
1701 }
1702 
1703 QDF_STATUS
1704 dp_rx_mon_buf_desc_pool_init(struct dp_soc *soc)
1705 {
1706 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1707 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1708 	uint32_t num_entries;
1709 
1710 	num_entries =
1711 		wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc->wlan_cfg_ctx);
1712 	return dp_mon_desc_pool_init(&mon_soc_be->rx_desc_mon, num_entries);
1713 }
1714 
1715 void dp_rx_mon_buf_desc_pool_free(struct dp_soc *soc)
1716 {
1717 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1718 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1719 
1720 	if (mon_soc)
1721 		dp_mon_desc_pool_free(&mon_soc_be->rx_desc_mon);
1722 }
1723 
1724 QDF_STATUS
1725 dp_rx_mon_buf_desc_pool_alloc(struct dp_soc *soc)
1726 {
1727 	struct dp_srng *mon_buf_ring;
1728 	struct dp_mon_desc_pool *rx_mon_desc_pool;
1729 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1730 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1731 	int entries;
1732 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1733 
1734 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1735 
1736 	entries = wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc_cfg_ctx);
1737 	mon_buf_ring = &soc->rxdma_mon_buf_ring[0];
1738 
1739 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1740 
1741 	qdf_print("%s:%d rx mon buf desc pool entries: %d", __func__, __LINE__, entries);
1742 	return dp_mon_desc_pool_alloc(entries, rx_mon_desc_pool);
1743 }
1744 
1745 void
1746 dp_rx_mon_buffers_free(struct dp_soc *soc)
1747 {
1748 	struct dp_mon_desc_pool *rx_mon_desc_pool;
1749 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1750 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1751 
1752 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1753 
1754 	dp_mon_pool_frag_unmap_and_free(soc, rx_mon_desc_pool);
1755 }
1756 
1757 QDF_STATUS
1758 dp_rx_mon_buffers_alloc(struct dp_soc *soc, uint32_t size)
1759 {
1760 	struct dp_srng *mon_buf_ring;
1761 	struct dp_mon_desc_pool *rx_mon_desc_pool;
1762 	union dp_mon_desc_list_elem_t *desc_list = NULL;
1763 	union dp_mon_desc_list_elem_t *tail = NULL;
1764 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1765 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1766 
1767 	mon_buf_ring = &soc->rxdma_mon_buf_ring[0];
1768 
1769 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1770 
1771 	return dp_mon_buffers_replenish(soc, mon_buf_ring,
1772 					rx_mon_desc_pool,
1773 					size,
1774 					&desc_list, &tail, NULL);
1775 }
1776 
1777 #ifdef QCA_ENHANCED_STATS_SUPPORT
1778 void
1779 dp_rx_mon_populate_ppdu_usr_info_2_0(struct mon_rx_user_status *rx_user_status,
1780 				     struct cdp_rx_stats_ppdu_user *ppdu_user)
1781 {
1782 	ppdu_user->mpdu_retries = rx_user_status->retry_mpdu;
1783 }
1784 
1785 #ifdef WLAN_FEATURE_11BE
1786 void dp_rx_mon_stats_update_2_0(struct dp_mon_peer *mon_peer,
1787 				struct cdp_rx_indication_ppdu *ppdu,
1788 				struct cdp_rx_stats_ppdu_user *ppdu_user)
1789 {
1790 	uint8_t mcs, preamble, ppdu_type, punc_mode;
1791 	uint32_t num_msdu;
1792 
1793 	preamble = ppdu->u.preamble;
1794 	ppdu_type = ppdu->u.ppdu_type;
1795 	num_msdu = ppdu_user->num_msdu;
1796 	punc_mode = ppdu->punc_bw;
1797 
1798 	if (ppdu_type == HAL_RX_TYPE_SU)
1799 		mcs = ppdu->u.mcs;
1800 	else
1801 		mcs = ppdu_user->mcs;
1802 
1803 	DP_STATS_INC(mon_peer, rx.mpdu_retry_cnt, ppdu_user->mpdu_retries);
1804 	DP_STATS_INC(mon_peer, rx.punc_bw[punc_mode], num_msdu);
1805 	DP_STATS_INCC(mon_peer,
1806 		      rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
1807 		      ((mcs >= MAX_MCS_11BE) && (preamble == DOT11_BE)));
1808 	DP_STATS_INCC(mon_peer,
1809 		      rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
1810 		      ((mcs < MAX_MCS_11BE) && (preamble == DOT11_BE)));
1811 	DP_STATS_INCC(mon_peer,
1812 		      rx.su_be_ppdu_cnt.mcs_count[MAX_MCS - 1], 1,
1813 		      ((mcs >= (MAX_MCS_11BE)) && (preamble == DOT11_BE) &&
1814 		      (ppdu_type == HAL_RX_TYPE_SU)));
1815 	DP_STATS_INCC(mon_peer,
1816 		      rx.su_be_ppdu_cnt.mcs_count[mcs], 1,
1817 		      ((mcs < (MAX_MCS_11BE)) && (preamble == DOT11_BE) &&
1818 		      (ppdu_type == HAL_RX_TYPE_SU)));
1819 	DP_STATS_INCC(mon_peer,
1820 		      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[MAX_MCS - 1],
1821 		      1, ((mcs >= (MAX_MCS_11BE)) &&
1822 		      (preamble == DOT11_BE) &&
1823 		      (ppdu_type == HAL_RX_TYPE_MU_OFDMA)));
1824 	DP_STATS_INCC(mon_peer,
1825 		      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[mcs],
1826 		      1, ((mcs < (MAX_MCS_11BE)) &&
1827 		      (preamble == DOT11_BE) &&
1828 		      (ppdu_type == HAL_RX_TYPE_MU_OFDMA)));
1829 	DP_STATS_INCC(mon_peer,
1830 		      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[MAX_MCS - 1],
1831 		      1, ((mcs >= (MAX_MCS_11BE)) &&
1832 		      (preamble == DOT11_BE) &&
1833 		      (ppdu_type == HAL_RX_TYPE_MU_MIMO)));
1834 	DP_STATS_INCC(mon_peer,
1835 		      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[mcs],
1836 		      1, ((mcs < (MAX_MCS_11BE)) &&
1837 		      (preamble == DOT11_BE) &&
1838 		      (ppdu_type == HAL_RX_TYPE_MU_MIMO)));
1839 }
1840 
1841 void
1842 dp_rx_mon_populate_ppdu_info_2_0(struct hal_rx_ppdu_info *hal_ppdu_info,
1843 				 struct cdp_rx_indication_ppdu *ppdu)
1844 {
1845 	uint16_t puncture_pattern;
1846 	enum cdp_punctured_modes punc_mode;
1847 
1848 	/* Align bw value as per host data structures */
1849 	if (hal_ppdu_info->rx_status.bw == HAL_FULL_RX_BW_320)
1850 		ppdu->u.bw = CMN_BW_320MHZ;
1851 	else
1852 		ppdu->u.bw = hal_ppdu_info->rx_status.bw;
1853 	if (hal_ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11BE) {
1854 		/* Align preamble value as per host data structures */
1855 		ppdu->u.preamble = DOT11_BE;
1856 		ppdu->u.stbc = hal_ppdu_info->rx_status.is_stbc;
1857 		ppdu->u.dcm = hal_ppdu_info->rx_status.dcm;
1858 	} else {
1859 		ppdu->u.preamble = hal_ppdu_info->rx_status.preamble_type;
1860 	}
1861 
1862 	puncture_pattern = hal_ppdu_info->rx_status.punctured_pattern;
1863 	punc_mode = dp_mon_get_puncture_type(puncture_pattern,
1864 					     ppdu->u.bw);
1865 	ppdu->punc_bw = punc_mode;
1866 }
1867 #else
1868 void dp_rx_mon_stats_update_2_0(struct dp_mon_peer *mon_peer,
1869 				struct cdp_rx_indication_ppdu *ppdu,
1870 				struct cdp_rx_stats_ppdu_user *ppdu_user)
1871 {
1872 	DP_STATS_INC(mon_peer, rx.mpdu_retry_cnt, ppdu_user->mpdu_retries);
1873 }
1874 
1875 void
1876 dp_rx_mon_populate_ppdu_info_2_0(struct hal_rx_ppdu_info *hal_ppdu_info,
1877 				 struct cdp_rx_indication_ppdu *ppdu)
1878 {
1879 	ppdu->punc_bw = NO_PUNCTURE;
1880 }
1881 #endif
1882 void dp_mon_rx_print_advanced_stats_2_0(struct dp_soc *soc,
1883 					struct dp_pdev *pdev)
1884 {
1885 	struct cdp_pdev_mon_stats *rx_mon_stats;
1886 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1887 	struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc;
1888 	struct dp_mon_pdev_be *mon_pdev_be =
1889 				dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1890 
1891 	rx_mon_stats = &mon_pdev->rx_mon_stats;
1892 
1893 	DP_PRINT_STATS("total_ppdu_info_alloc = %d",
1894 		       rx_mon_stats->total_ppdu_info_alloc);
1895 	DP_PRINT_STATS("total_ppdu_info_free = %d",
1896 		       rx_mon_stats->total_ppdu_info_free);
1897 	DP_PRINT_STATS("total_ppdu_info_enq = %d",
1898 		       rx_mon_stats->total_ppdu_info_enq);
1899 	DP_PRINT_STATS("total_ppdu_info_drop = %d",
1900 		       rx_mon_stats->total_ppdu_info_drop);
1901 	DP_PRINT_STATS("rx_hdr_not_received = %d",
1902 		       rx_mon_stats->rx_hdr_not_received);
1903 	DP_PRINT_STATS("parent_buf_alloc = %d",
1904 		       rx_mon_stats->parent_buf_alloc);
1905 	DP_PRINT_STATS("parent_buf_free = %d",
1906 		       rx_mon_stats->parent_buf_free);
1907 	DP_PRINT_STATS("mpdus_buf_to_stack = %d",
1908 		       rx_mon_stats->mpdus_buf_to_stack);
1909 	DP_PRINT_STATS("frag_alloc = %d",
1910 		       mon_soc->stats.frag_alloc);
1911 	DP_PRINT_STATS("frag_free = %d",
1912 		       mon_soc->stats.frag_free);
1913 	DP_PRINT_STATS("status_buf_count = %d",
1914 		       rx_mon_stats->status_buf_count);
1915 	DP_PRINT_STATS("pkt_buf_count = %d",
1916 		       rx_mon_stats->pkt_buf_count);
1917 	DP_PRINT_STATS("rx_mon_queue_depth= %d",
1918 		       mon_pdev_be->rx_mon_queue_depth);
1919 	DP_PRINT_STATS("empty_desc= %d",
1920 		       mon_pdev->rx_mon_stats.empty_desc_ppdu);
1921 	DP_PRINT_STATS("mpdu_dropped_due_invalid_decap= %d",
1922 		       mon_pdev->rx_mon_stats.mpdu_decap_type_invalid);
1923 	DP_PRINT_STATS("total_free_elem= %d",
1924 		       mon_pdev_be->total_free_elem);
1925 }
1926 #endif
1927