xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/2.0/dp_rx_mon_2.0.c (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "hal_be_hw_headers.h"
19 #include "dp_types.h"
20 #include "hal_be_rx.h"
21 #include "hal_api.h"
22 #include "qdf_trace.h"
23 #include "hal_be_api_mon.h"
24 #include "dp_internal.h"
25 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
26 #include <qdf_flex_mem.h>
27 #include "qdf_nbuf_frag.h"
28 #include "dp_mon.h"
29 #include <dp_rx_mon.h>
30 #include <dp_mon_2.0.h>
31 #include <dp_rx_mon.h>
32 #include <dp_rx_mon_2.0.h>
33 #include <dp_rx.h>
34 #include <dp_be.h>
35 #include <hal_be_api_mon.h>
36 #ifdef QCA_SUPPORT_LITE_MONITOR
37 #include "dp_lite_mon.h"
38 #endif
39 
40 #define F_MASK 0xFFFF
41 #define TEST_MASK 0xCBF
42 
43 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
44 	    defined(WLAN_SUPPORT_RX_FLOW_TAG)
45 
46 #ifdef QCA_TEST_MON_PF_TAGS_STATS
47 
48 static
49 void dp_rx_mon_print_tag_buf(uint8_t *buf, uint16_t room)
50 {
51 	print_hex_dump(KERN_ERR, "TLV BUFFER: ", DUMP_PREFIX_NONE,
52 		       32, 2, buf, room, false);
53 }
54 
55 #else
56 static
57 void dp_rx_mon_print_tag_buf(uint8_t *buf, uint16_t room)
58 {
59 }
60 
61 #endif
62 
63 static
64 void dp_rx_mon_set_zero(qdf_nbuf_t nbuf)
65 {
66 	qdf_mem_zero(qdf_nbuf_head(nbuf), DP_RX_MON_TLV_ROOM);
67 }
68 
69 /**
70  * dp_rx_mon_get_ppdu_info() - Get PPDU info from freelist
71  *
72  * @mon_pdev: monitor pdev
73  *
74  * Return: ppdu_info
75  */
76 static inline struct hal_rx_ppdu_info*
77 dp_rx_mon_get_ppdu_info(struct dp_mon_pdev *mon_pdev)
78 {
79 	struct dp_mon_pdev_be *mon_pdev_be =
80 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
81 	struct hal_rx_ppdu_info *ppdu_info, *temp_ppdu_info;
82 
83 	qdf_spin_lock_bh(&mon_pdev_be->ppdu_info_lock);
84 	TAILQ_FOREACH_SAFE(ppdu_info,
85 			   &mon_pdev_be->rx_mon_free_queue,
86 			   ppdu_list_elem,
87 			   temp_ppdu_info) {
88 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_free_queue,
89 			     ppdu_info, ppdu_free_list_elem);
90 
91 		if (ppdu_info) {
92 			mon_pdev_be->total_free_elem--;
93 			break;
94 		}
95 	}
96 	qdf_spin_unlock_bh(&mon_pdev_be->ppdu_info_lock);
97 
98 	return ppdu_info;
99 }
100 
101 static inline void
102 __dp_rx_mon_free_ppdu_info(struct dp_mon_pdev *mon_pdev,
103 			   struct hal_rx_ppdu_info *ppdu_info)
104 {
105 	struct dp_mon_pdev_be *mon_pdev_be =
106 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
107 
108 	qdf_spin_lock_bh(&mon_pdev_be->ppdu_info_lock);
109 	if (ppdu_info) {
110 		TAILQ_INSERT_TAIL(&mon_pdev_be->rx_mon_free_queue, ppdu_info,
111 				  ppdu_free_list_elem);
112 		mon_pdev_be->total_free_elem++;
113 	}
114 	qdf_spin_unlock_bh(&mon_pdev_be->ppdu_info_lock);
115 }
116 
117 /**
118  * dp_rx_mon_nbuf_add_rx_frag () -  Add frag to SKB
119  *
120  * @nbuf: SKB to which frag is going to be added
121  * @frag: frag to be added to SKB
122  * @frag_len: frag length
123  * @offset: frag offset
124  * @buf_size: buffer size
125  * @frag_ref: take frag ref
126  *
127  * Return: QDF_STATUS
128  */
129 static inline QDF_STATUS
130 dp_rx_mon_nbuf_add_rx_frag(qdf_nbuf_t nbuf, qdf_frag_t *frag,
131 			   uint16_t frag_len, uint16_t offset,
132 			   uint16_t buf_size, bool frag_ref)
133 {
134 	uint8_t num_frags;
135 
136 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
137 	if (num_frags < QDF_NBUF_MAX_FRAGS) {
138 		qdf_nbuf_add_rx_frag(frag, nbuf,
139 				     offset,
140 				     frag_len,
141 				     buf_size,
142 				     frag_ref);
143 		return QDF_STATUS_SUCCESS;
144 	}
145 	return QDF_STATUS_E_FAILURE;
146 }
147 
148 /**
149  * dp_mon_free_parent_nbuf() - Free parent SKB
150  *
151  * @mon_pdev: monitor pdev
152  * @nbuf: SKB to be freed
153  *
154  * @Return: void
155  */
156 void
157 dp_mon_free_parent_nbuf(struct dp_mon_pdev *mon_pdev,
158 			qdf_nbuf_t nbuf)
159 {
160 	mon_pdev->rx_mon_stats.parent_buf_free++;
161 	qdf_nbuf_free(nbuf);
162 }
163 
164 void
165 dp_rx_mon_shift_pf_tag_in_headroom(qdf_nbuf_t nbuf, struct dp_soc *soc,
166 				   struct hal_rx_ppdu_info *ppdu_info)
167 {
168 	uint32_t room = 0;
169 	uint16_t msdu_count = 0;
170 	uint16_t *dp = NULL;
171 	uint16_t *hp = NULL;
172 	uint16_t tlv_data_len, total_tlv_len;
173 	uint32_t bytes = 0;
174 
175 	if (qdf_unlikely(!soc)) {
176 		dp_mon_err("Soc[%pK] Null. Can't update pftag to nbuf headroom",
177 			   soc);
178 		qdf_assert_always(0);
179 	}
180 
181 	if (!wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx))
182 		return;
183 
184 	if (qdf_unlikely(!nbuf))
185 		return;
186 
187 	/* Headroom must be have enough space for tlv to be added*/
188 	if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < DP_RX_MON_TLV_ROOM)) {
189 		dp_mon_err("Headroom[%d] < DP_RX_MON_TLV_ROOM[%d]",
190 			   qdf_nbuf_headroom(nbuf), DP_RX_MON_TLV_ROOM);
191 		return;
192 	}
193 
194 	hp = (uint16_t *)qdf_nbuf_head(nbuf);
195 	msdu_count = *hp;
196 
197 	if (qdf_unlikely(!msdu_count))
198 		return;
199 
200 	dp_mon_debug("msdu_count: %d", msdu_count);
201 
202 	room = DP_RX_MON_PF_TAG_LEN_PER_FRAG * msdu_count;
203 	tlv_data_len = DP_RX_MON_TLV_MSDU_CNT + (room);
204 	total_tlv_len = DP_RX_MON_TLV_HDR_LEN + tlv_data_len;
205 
206 	//1. store space for MARKER
207 	dp = (uint16_t *)qdf_nbuf_push_head(nbuf, sizeof(uint16_t));
208 	if (qdf_likely(dp)) {
209 		*(uint16_t *)dp = DP_RX_MON_TLV_HDR_MARKER;
210 		bytes += sizeof(uint16_t);
211 	}
212 
213 	//2. store space for total size
214 	dp = (uint16_t *)qdf_nbuf_push_head(nbuf, sizeof(uint16_t));
215 	if (qdf_likely(dp)) {
216 		*(uint16_t *)dp = total_tlv_len;
217 		bytes += sizeof(uint16_t);
218 	}
219 
220 	//create TLV
221 	bytes += dp_mon_rx_add_tlv(DP_RX_MON_TLV_PF_ID, tlv_data_len, hp, nbuf);
222 
223 	dp_rx_mon_print_tag_buf(qdf_nbuf_data(nbuf), total_tlv_len);
224 
225 	qdf_nbuf_pull_head(nbuf, bytes);
226 
227 }
228 
229 void
230 dp_rx_mon_pf_tag_to_buf_headroom_2_0(void *nbuf,
231 				     struct hal_rx_ppdu_info *ppdu_info,
232 				     struct dp_pdev *pdev, struct dp_soc *soc)
233 {
234 	uint8_t *nbuf_head = NULL;
235 	uint8_t user_id;
236 	struct hal_rx_mon_msdu_info *msdu_info;
237 	uint16_t flow_id;
238 	uint16_t cce_metadata;
239 	uint16_t protocol_tag = 0;
240 	uint32_t flow_tag;
241 	uint8_t invalid_cce = 0, invalid_fse = 0;
242 
243 	if (qdf_unlikely(!soc)) {
244 		dp_mon_err("Soc[%pK] Null. Can't update pftag to nbuf headroom",
245 			   soc);
246 		qdf_assert_always(0);
247 	}
248 
249 	if (!wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx))
250 		return;
251 
252 	if (qdf_unlikely(!nbuf))
253 		return;
254 
255 	/* Headroom must be have enough space for tlv to be added*/
256 	if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < DP_RX_MON_TLV_ROOM)) {
257 		dp_mon_err("Headroom[%d] < DP_RX_MON_TLV_ROOM[%d]",
258 			   qdf_nbuf_headroom(nbuf), DP_RX_MON_TLV_ROOM);
259 		return;
260 	}
261 
262 	user_id = ppdu_info->user_id;
263 	if (qdf_unlikely(user_id > HAL_MAX_UL_MU_USERS)) {
264 		dp_mon_debug("Invalid user_id user_id: %d pdev: %pK", user_id, pdev);
265 		return;
266 	}
267 
268 	msdu_info = &ppdu_info->msdu[user_id];
269 	flow_id = ppdu_info->rx_msdu_info[user_id].flow_idx;
270 	cce_metadata = ppdu_info->rx_msdu_info[user_id].cce_metadata -
271 		       RX_PROTOCOL_TAG_START_OFFSET;
272 
273 	flow_tag = ppdu_info->rx_msdu_info[user_id].fse_metadata & F_MASK;
274 
275 	if (qdf_unlikely((cce_metadata > RX_PROTOCOL_TAG_MAX - 1) ||
276 			 (cce_metadata > 0 && cce_metadata < 4))) {
277 		dp_mon_debug("Invalid user_id cce_metadata: %d pdev: %pK", cce_metadata, pdev);
278 		invalid_cce = 1;
279 		protocol_tag = cce_metadata;
280 	} else {
281 		protocol_tag = pdev->rx_proto_tag_map[cce_metadata].tag;
282 		dp_mon_rx_update_rx_protocol_tag_stats(pdev, cce_metadata);
283 	}
284 
285 	if (flow_tag > 0) {
286 		dp_mon_rx_update_rx_flow_tag_stats(pdev, flow_id);
287 	} else {
288 		dp_mon_debug("Invalid flow_tag: %d pdev: %pK ", flow_tag, pdev);
289 		invalid_fse = 1;
290 	}
291 
292 	if (invalid_cce && invalid_fse)
293 		return;
294 
295 	if (msdu_info->msdu_index >= DP_RX_MON_MAX_MSDU) {
296 		dp_mon_err("msdu_index causes overflow in headroom");
297 		return;
298 	}
299 
300 	dp_mon_debug("protocol_tag: %d, cce_metadata: %d, flow_tag: %d",
301 		     protocol_tag, cce_metadata, flow_tag);
302 
303 	dp_mon_debug("msdu_index: %d", msdu_info->msdu_index);
304 
305 
306 	nbuf_head = qdf_nbuf_head(nbuf);
307 
308 	*((uint16_t *)nbuf_head) = msdu_info->msdu_index + 1;
309 	nbuf_head += DP_RX_MON_TLV_MSDU_CNT;
310 
311 	nbuf_head += ((msdu_info->msdu_index) * DP_RX_MON_PF_TAG_SIZE);
312 	if (!invalid_cce)
313 		*((uint16_t *)nbuf_head) = protocol_tag;
314 	nbuf_head += sizeof(uint16_t);
315 	if (!invalid_fse)
316 		*((uint16_t *)nbuf_head) = flow_tag;
317 }
318 
319 #else
320 
321 static
322 void dp_rx_mon_set_zero(qdf_nbuf_t nbuf)
323 {
324 }
325 
326 static
327 void dp_rx_mon_shift_pf_tag_in_headroom(qdf_nbuf_t nbuf, struct dp_soc *soc,
328 					struct hal_rx_ppdu_info *ppdu_info)
329 {
330 }
331 
332 static
333 void dp_rx_mon_pf_tag_to_buf_headroom_2_0(void *nbuf,
334 					  struct hal_rx_ppdu_info *ppdu_info,
335 					  struct dp_pdev *pdev,
336 					  struct dp_soc *soc)
337 {
338 }
339 
340 #endif
341 
342 /**
343  * dp_rx_mon_free_mpdu_queue() - Free MPDU queue
344  * @mon_pdev: monitor pdev
345  * @ppdu_info: PPDU info
346  *
347  * Return: Void
348  */
349 
350 static void dp_rx_mon_free_mpdu_queue(struct dp_mon_pdev *mon_pdev,
351 				      struct hal_rx_ppdu_info *ppdu_info)
352 {
353 	uint8_t user;
354 	qdf_nbuf_t mpdu;
355 
356 	for (user = 0; user < HAL_MAX_UL_MU_USERS; user++) {
357 		if (!qdf_nbuf_is_queue_empty(&ppdu_info->mpdu_q[user])) {
358 			while ((mpdu = qdf_nbuf_queue_remove(&ppdu_info->mpdu_q[user])) != NULL)
359 				dp_mon_free_parent_nbuf(mon_pdev, mpdu);
360 		}
361 	}
362 }
363 
364 /**
365  * dp_rx_mon_free_ppdu_info () - Free PPDU info
366  * @pdev: DP pdev
367  * @ppdu_info: PPDU info
368  *
369  * Return: Void
370  */
371 static void
372 dp_rx_mon_free_ppdu_info(struct dp_pdev *pdev,
373 			 struct hal_rx_ppdu_info *ppdu_info)
374 {
375 	struct dp_mon_pdev *mon_pdev;
376 
377 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
378 	dp_rx_mon_free_mpdu_queue(mon_pdev, ppdu_info);
379 	__dp_rx_mon_free_ppdu_info(mon_pdev, ppdu_info);
380 }
381 
382 void dp_rx_mon_drain_wq(struct dp_pdev *pdev)
383 {
384 	struct dp_mon_pdev *mon_pdev;
385 	struct hal_rx_ppdu_info *ppdu_info = NULL;
386 	struct hal_rx_ppdu_info *temp_ppdu_info = NULL;
387 	struct dp_mon_pdev_be *mon_pdev_be;
388 
389 	if (qdf_unlikely(!pdev)) {
390 		dp_mon_debug("Pdev is NULL");
391 		return;
392 	}
393 
394 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
395 	if (qdf_unlikely(!mon_pdev)) {
396 		dp_mon_debug("monitor pdev is NULL");
397 		return;
398 	}
399 
400 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
401 
402 	qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
403 	TAILQ_FOREACH_SAFE(ppdu_info,
404 			   &mon_pdev_be->rx_mon_queue,
405 			   ppdu_list_elem,
406 			   temp_ppdu_info) {
407 		mon_pdev_be->rx_mon_queue_depth--;
408 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_queue,
409 			     ppdu_info, ppdu_list_elem);
410 
411 		dp_rx_mon_free_ppdu_info(pdev, ppdu_info);
412 	}
413 	qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
414 }
415 
416 /**
417  * dp_rx_mon_deliver_mpdu() - Deliver MPDU to osif layer
418  *
419  * @mon_pdev: monitor pdev
420  * @mpdu: MPDU nbuf
421  * @status: monitor status
422  *
423  * Return: QDF_STATUS
424  */
425 static QDF_STATUS
426 dp_rx_mon_deliver_mpdu(struct dp_mon_pdev *mon_pdev,
427 		       qdf_nbuf_t mpdu,
428 		       struct mon_rx_status *rx_status)
429 {
430 	qdf_nbuf_t nbuf;
431 
432 	if (mon_pdev->mvdev && mon_pdev->mvdev->monitor_vdev->osif_rx_mon) {
433 		mon_pdev->rx_mon_stats.mpdus_buf_to_stack++;
434 		nbuf = qdf_nbuf_get_ext_list(mpdu);
435 
436 		while (nbuf) {
437 			mon_pdev->rx_mon_stats.mpdus_buf_to_stack++;
438 			nbuf = nbuf->next;
439 		}
440 		mon_pdev->mvdev->monitor_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev,
441 							   mpdu,
442 							   rx_status);
443 	} else {
444 		return QDF_STATUS_E_FAILURE;
445 	}
446 
447 	return QDF_STATUS_SUCCESS;
448 }
449 
450 /**
451  * dp_rx_mon_process_ppdu_info () - Process PPDU info
452  * @pdev: DP pdev
453  * @ppdu_info: PPDU info
454  *
455  * Return: Void
456  */
457 static void
458 dp_rx_mon_process_ppdu_info(struct dp_pdev *pdev,
459 			    struct hal_rx_ppdu_info *ppdu_info)
460 {
461 	struct dp_mon_pdev *mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
462 	uint8_t user;
463 	qdf_nbuf_t mpdu;
464 
465 	if (!ppdu_info)
466 		return;
467 
468 	mon_pdev->ppdu_info.rx_status.chan_noise_floor = pdev->chan_noise_floor;
469 
470 	for (user = 0; user < ppdu_info->com_info.num_users; user++) {
471 		uint16_t mpdu_count  = ppdu_info->mpdu_count[user];
472 		uint16_t mpdu_idx;
473 		struct hal_rx_mon_mpdu_info *mpdu_meta;
474 		QDF_STATUS status;
475 
476 		for (mpdu_idx = 0; mpdu_idx < mpdu_count; mpdu_idx++) {
477 			mpdu = qdf_nbuf_queue_remove(&ppdu_info->mpdu_q[user]);
478 
479 			if (!mpdu)
480 				continue;
481 
482 			mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(mpdu);
483 
484 			if (dp_lite_mon_is_rx_enabled(mon_pdev)) {
485 				status = dp_lite_mon_rx_mpdu_process(pdev, ppdu_info,
486 								     mpdu, mpdu_idx, user);
487 				if (status != QDF_STATUS_SUCCESS) {
488 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
489 					continue;
490 				}
491 			} else {
492 				if (mpdu_meta->full_pkt) {
493 					if (qdf_unlikely(mpdu_meta->truncated)) {
494 						dp_mon_free_parent_nbuf(mon_pdev, mpdu);
495 						continue;
496 					}
497 
498 					status = dp_rx_mon_handle_full_mon(pdev,
499 									   ppdu_info, mpdu);
500 					if (status != QDF_STATUS_SUCCESS) {
501 						dp_mon_free_parent_nbuf(mon_pdev, mpdu);
502 						continue;
503 					}
504 				} else {
505 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
506 					continue;
507 				}
508 
509 				/* reset mpdu metadata and apply radiotap header over MPDU */
510 				qdf_mem_zero(mpdu_meta, sizeof(struct hal_rx_mon_mpdu_info));
511 				if (!qdf_nbuf_update_radiotap(&ppdu_info->rx_status,
512 							      mpdu,
513 							      qdf_nbuf_headroom(mpdu))) {
514 					dp_mon_err("failed to update radiotap pdev: %pK",
515 						   pdev);
516 				}
517 
518 				dp_rx_mon_shift_pf_tag_in_headroom(mpdu,
519 								   pdev->soc,
520 								   ppdu_info);
521 
522 				/* Deliver MPDU to osif layer */
523 				status = dp_rx_mon_deliver_mpdu(mon_pdev,
524 								mpdu,
525 								&ppdu_info->rx_status);
526 				if (status != QDF_STATUS_SUCCESS)
527 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
528 			}
529 		}
530 	}
531 
532 	dp_rx_mon_free_mpdu_queue(mon_pdev, ppdu_info);
533 }
534 
535 /**
536  * dp_rx_mon_process_ppdu ()-  Deferred monitor processing
537  * This workqueue API handles:
538  * a. Full monitor
539  * b. Lite monitor
540  *
541  * @context: Opaque work context
542  *
543  * Return: none
544  */
545 void dp_rx_mon_process_ppdu(void *context)
546 {
547 	struct dp_pdev *pdev = (struct dp_pdev *)context;
548 	struct dp_mon_pdev *mon_pdev;
549 	struct hal_rx_ppdu_info *ppdu_info = NULL;
550 	struct hal_rx_ppdu_info *temp_ppdu_info = NULL;
551 	struct dp_mon_pdev_be *mon_pdev_be;
552 
553 	if (qdf_unlikely(!pdev)) {
554 		dp_mon_debug("Pdev is NULL");
555 		return;
556 	}
557 
558 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
559 	if (qdf_unlikely(!mon_pdev)) {
560 		dp_mon_debug("monitor pdev is NULL");
561 		return;
562 	}
563 
564 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
565 
566 	qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
567 	TAILQ_FOREACH_SAFE(ppdu_info,
568 			   &mon_pdev_be->rx_mon_queue,
569 			   ppdu_list_elem, temp_ppdu_info) {
570 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_queue,
571 			     ppdu_info, ppdu_list_elem);
572 
573 		mon_pdev_be->rx_mon_queue_depth--;
574 		dp_rx_mon_process_ppdu_info(pdev, ppdu_info);
575 		__dp_rx_mon_free_ppdu_info(mon_pdev, ppdu_info);
576 	}
577 	qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
578 }
579 
580 /**
581  * dp_rx_mon_add_ppdu_info_to_wq () - Add PPDU info to workqueue
582  *
583  * @mon_pdev: monitor pdev
584  * @ppdu_info: ppdu info to be added to workqueue
585  *
586  * Return: SUCCESS or FAILIRE
587  */
588 
589 static QDF_STATUS
590 dp_rx_mon_add_ppdu_info_to_wq(struct dp_pdev *pdev,
591 			      struct hal_rx_ppdu_info *ppdu_info)
592 {
593 	struct dp_mon_pdev *mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
594 	struct dp_mon_pdev_be *mon_pdev_be =
595 		dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
596 
597 	/* Full monitor or lite monitor mode is not enabled, return */
598 	if (!mon_pdev->monitor_configured &&
599 	    !dp_lite_mon_is_rx_enabled(mon_pdev))
600 		return QDF_STATUS_E_FAILURE;
601 
602 	if (qdf_likely(ppdu_info)) {
603 		if (mon_pdev_be->rx_mon_queue_depth < DP_RX_MON_WQ_THRESHOLD) {
604 			qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
605 			TAILQ_INSERT_TAIL(&mon_pdev_be->rx_mon_queue,
606 					  ppdu_info, ppdu_list_elem);
607 			mon_pdev_be->rx_mon_queue_depth++;
608 			mon_pdev->rx_mon_stats.total_ppdu_info_enq++;
609 		} else {
610 			mon_pdev->rx_mon_stats.total_ppdu_info_drop++;
611 			dp_rx_mon_free_ppdu_info(pdev, ppdu_info);
612 		}
613 		qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
614 
615 		if (mon_pdev_be->rx_mon_queue_depth > DP_MON_QUEUE_DEPTH_MAX) {
616 			qdf_queue_work(0, mon_pdev_be->rx_mon_workqueue,
617 				       &mon_pdev_be->rx_mon_work);
618 		}
619 	}
620 	return QDF_STATUS_SUCCESS;
621 }
622 
623 QDF_STATUS
624 dp_rx_mon_handle_full_mon(struct dp_pdev *pdev,
625 			  struct hal_rx_ppdu_info *ppdu_info,
626 			  qdf_nbuf_t mpdu)
627 {
628 	uint32_t wifi_hdr_len, sec_hdr_len, msdu_llc_len,
629 		 mpdu_buf_len, decap_hdr_pull_bytes, dir,
630 		 is_amsdu, amsdu_pad, frag_size, tot_msdu_len;
631 	struct hal_rx_mon_mpdu_info *mpdu_meta;
632 	struct hal_rx_mon_msdu_info *msdu_meta;
633 	char *hdr_desc;
634 	uint8_t num_frags, frag_iter, l2_hdr_offset;
635 	struct ieee80211_frame *wh;
636 	struct ieee80211_qoscntl *qos;
637 	void *hdr_frag_addr;
638 	uint32_t hdr_frag_size, frag_page_offset, pad_byte_pholder,
639 		 msdu_len;
640 	qdf_nbuf_t head_msdu, msdu_cur;
641 	void *frag_addr;
642 	bool prev_msdu_end_received = false;
643 	bool is_nbuf_head = true;
644 
645 	/***************************************************************************
646 	 *********************** Non-raw packet ************************************
647 	 ---------------------------------------------------------------------------
648 	 |      | frag-0   | frag-1    | frag - 2 | frag - 3  | frag - 4 | frag - 5  |
649 	 | skb  | rx_hdr-1 | rx_msdu-1 | rx_hdr-2 | rx_msdu-2 | rx_hdr-3 | rx-msdu-3 |
650 	 ---------------------------------------------------------------------------
651 	 **************************************************************************/
652 
653 	if (!mpdu) {
654 		dp_mon_debug("nbuf is NULL, return");
655 		return QDF_STATUS_E_FAILURE;
656 	}
657 
658 	head_msdu = mpdu;
659 
660 	mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(mpdu);
661 
662 	if (mpdu_meta->decap_type == HAL_HW_RX_DECAP_FORMAT_RAW) {
663 		qdf_nbuf_trim_add_frag_size(mpdu,
664 					    qdf_nbuf_get_nr_frags(mpdu) - 1,
665 					    -HAL_RX_FCS_LEN, 0);
666 		return QDF_STATUS_SUCCESS;
667 	}
668 
669 	num_frags = qdf_nbuf_get_nr_frags(mpdu);
670 	if (qdf_unlikely(num_frags < DP_MON_MIN_FRAGS_FOR_RESTITCH)) {
671 		dp_mon_debug("not enough frags(%d) for restitch", num_frags);
672 		return QDF_STATUS_E_FAILURE;
673 	}
674 
675 	l2_hdr_offset = DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
676 
677 	/* hdr_desc points to 80211 hdr */
678 	hdr_desc = qdf_nbuf_get_frag_addr(mpdu, 0);
679 
680 	/* Calculate Base header size */
681 	wifi_hdr_len = sizeof(struct ieee80211_frame);
682 	wh = (struct ieee80211_frame *)hdr_desc;
683 
684 	dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
685 
686 	if (dir == IEEE80211_FC1_DIR_DSTODS)
687 		wifi_hdr_len += 6;
688 
689 	is_amsdu = 0;
690 	if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) {
691 		qos = (struct ieee80211_qoscntl *)
692 			(hdr_desc + wifi_hdr_len);
693 		wifi_hdr_len += 2;
694 
695 		is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU);
696 	}
697 
698 	/*Calculate security header length based on 'Protected'
699 	 * and 'EXT_IV' flag
700 	 */
701 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
702 		char *iv = (char *)wh + wifi_hdr_len;
703 
704 		if (iv[3] & KEY_EXTIV)
705 			sec_hdr_len = 8;
706 		else
707 			sec_hdr_len = 4;
708 	} else {
709 		sec_hdr_len = 0;
710 	}
711 	wifi_hdr_len += sec_hdr_len;
712 
713 	/* MSDU related stuff LLC - AMSDU subframe header etc */
714 	msdu_llc_len = is_amsdu ? (DP_RX_MON_DECAP_HDR_SIZE +
715 				   DP_RX_MON_LLC_SIZE +
716 				   DP_RX_MON_SNAP_SIZE) :
717 				   (DP_RX_MON_LLC_SIZE + DP_RX_MON_SNAP_SIZE);
718 
719 	mpdu_buf_len = wifi_hdr_len + msdu_llc_len;
720 
721 	/* "Decap" header to remove from MSDU buffer */
722 	decap_hdr_pull_bytes = DP_RX_MON_DECAP_HDR_SIZE;
723 
724 	amsdu_pad = 0;
725 	tot_msdu_len = 0;
726 	tot_msdu_len = 0;
727 
728 	/*
729 	 * Update protocol and flow tag for MSDU
730 	 * update frag index in ctx_idx field.
731 	 * Reset head pointer data of nbuf before updating.
732 	 */
733 	QDF_NBUF_CB_RX_CTX_ID(mpdu) = 0;
734 
735 	/* Construct destination address */
736 	hdr_frag_addr = qdf_nbuf_get_frag_addr(mpdu, 0);
737 	hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(mpdu, 0);
738 
739 	/* Adjust page frag offset to point to 802.11 header */
740 	qdf_nbuf_trim_add_frag_size(head_msdu, 0, -(hdr_frag_size - mpdu_buf_len), 0);
741 
742 	msdu_meta = (struct hal_rx_mon_msdu_info *)(((void *)qdf_nbuf_get_frag_addr(mpdu, 1)) - (DP_RX_MON_PACKET_OFFSET + DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE));
743 
744 	msdu_len = msdu_meta->msdu_len;
745 
746 	/* Adjust page frag offset to appropriate after decap header */
747 	frag_page_offset =
748 		decap_hdr_pull_bytes;
749 	qdf_nbuf_move_frag_page_offset(head_msdu, 1, frag_page_offset);
750 
751 	frag_size = qdf_nbuf_get_frag_size_by_idx(head_msdu, 1);
752 	pad_byte_pholder =
753 		RX_MONITOR_BUFFER_SIZE - (frag_size + DP_RX_MON_PACKET_OFFSET + DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE);
754 
755 	if (msdu_meta->first_buffer && msdu_meta->last_buffer) {
756 		/* MSDU with single buffer */
757 		amsdu_pad = frag_size & 0x3;
758 		amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
759 		if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) {
760 			char *frag_addr_temp;
761 
762 			qdf_nbuf_trim_add_frag_size(mpdu, 1, amsdu_pad, 0);
763 			frag_addr_temp =
764 				(char *)qdf_nbuf_get_frag_addr(mpdu, 1);
765 			frag_addr_temp = (frag_addr_temp +
766 					  qdf_nbuf_get_frag_size_by_idx(mpdu, 1)) -
767 				amsdu_pad;
768 			qdf_mem_zero(frag_addr_temp, amsdu_pad);
769 			amsdu_pad = 0;
770 		}
771 	} else {
772 		tot_msdu_len = frag_size;
773 		amsdu_pad = 0;
774 	}
775 
776 	pad_byte_pholder = 0;
777 	for (msdu_cur = mpdu; msdu_cur;) {
778 		/* frag_iter will start from 0 for second skb onwards */
779 		if (msdu_cur == mpdu)
780 			frag_iter = 2;
781 		else
782 			frag_iter = 0;
783 
784 		num_frags = qdf_nbuf_get_nr_frags(msdu_cur);
785 
786 		for (; frag_iter < num_frags; frag_iter++) {
787 			/* Construct destination address
788 			 *  ----------------------------------------------------------
789 			 * |            | L2_HDR_PAD   |   Decap HDR | Payload | Pad  |
790 			 * |            | (First buffer)             |         |      |
791 			 * |            |                            /        /       |
792 			 * |            >Frag address points here   /        /        |
793 			 * |            \                          /        /         |
794 			 * |             \ This bytes needs to    /        /          |
795 			 * |              \  removed to frame pkt/        /           |
796 			 * |               ----------------------        /            |
797 			 * |                                     |     /     Add      |
798 			 * |                                     |    /   amsdu pad   |
799 			 * |   LLC HDR will be added here      <-|    |   Byte for    |
800 			 * |        |                            |    |   last frame  |
801 			 * |         >Dest addr will point       |    |    if space   |
802 			 * |            somewhere in this area   |    |    available  |
803 			 * |  And amsdu_pad will be created if   |    |               |
804 			 * | dint get added in last buffer       |    |               |
805 			 * |       (First Buffer)                |    |               |
806 			 *  ----------------------------------------------------------
807 			 */
808 			/* If previous msdu end has received, modify next frag's offset to point to LLC */
809 			if (prev_msdu_end_received) {
810 				hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter);
811 				/* Adjust page frag offset to point to llc/snap header */
812 				qdf_nbuf_trim_add_frag_size(msdu_cur, frag_iter, -(hdr_frag_size - msdu_llc_len), 0);
813 				prev_msdu_end_received = false;
814 				continue;
815 			}
816 
817 			frag_addr =
818 				qdf_nbuf_get_frag_addr(msdu_cur, frag_iter) -
819 						       (DP_RX_MON_PACKET_OFFSET +
820 						       DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE);
821 			msdu_meta = (struct hal_rx_mon_msdu_info *)frag_addr;
822 
823 			/*
824 			 * Update protocol and flow tag for MSDU
825 			 * update frag index in ctx_idx field
826 			 */
827 			QDF_NBUF_CB_RX_CTX_ID(msdu_cur) = frag_iter;
828 
829 			frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur,
830 					frag_iter);
831 
832 			/* If Middle buffer, dont add any header */
833 			if ((!msdu_meta->first_buffer) &&
834 					(!msdu_meta->last_buffer)) {
835 				tot_msdu_len += frag_size;
836 				amsdu_pad = 0;
837 				pad_byte_pholder = 0;
838 				continue;
839 			}
840 
841 			/* Calculate if current buffer has placeholder
842 			 * to accommodate amsdu pad byte
843 			 */
844 			pad_byte_pholder =
845 				RX_MONITOR_BUFFER_SIZE - (frag_size + (DP_RX_MON_PACKET_OFFSET +
846 							  DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE));
847 			/*
848 			 * We will come here only only three condition:
849 			 * 1. Msdu with single Buffer
850 			 * 2. First buffer in case MSDU is spread in multiple
851 			 *    buffer
852 			 * 3. Last buffer in case MSDU is spread in multiple
853 			 *    buffer
854 			 *
855 			 *         First buffER | Last buffer
856 			 * Case 1:      1       |     1
857 			 * Case 2:      1       |     0
858 			 * Case 3:      0       |     1
859 			 *
860 			 * In 3rd case only l2_hdr_padding byte will be Zero and
861 			 * in other case, It will be 2 Bytes.
862 			 */
863 			if (msdu_meta->first_buffer)
864 				l2_hdr_offset =
865 					DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
866 			else
867 				l2_hdr_offset = DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
868 
869 			if (msdu_meta->first_buffer) {
870 				/* Adjust page frag offset to point to 802.11 header */
871 				hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter-1);
872 				qdf_nbuf_trim_add_frag_size(msdu_cur, frag_iter - 1, -(hdr_frag_size - (msdu_llc_len + amsdu_pad)), 0);
873 
874 				/* Adjust page frag offset to appropriate after decap header */
875 				frag_page_offset =
876 					(decap_hdr_pull_bytes + l2_hdr_offset);
877 				if (frag_size > (decap_hdr_pull_bytes + l2_hdr_offset)) {
878 					qdf_nbuf_move_frag_page_offset(msdu_cur, frag_iter, frag_page_offset);
879 					frag_size = frag_size - (l2_hdr_offset + decap_hdr_pull_bytes);
880 				}
881 
882 
883 				/*
884 				 * Calculate new page offset and create hole
885 				 * if amsdu_pad required.
886 				 */
887 				tot_msdu_len = frag_size;
888 				/*
889 				 * No amsdu padding required for first frame of
890 				 * continuation buffer
891 				 */
892 				if (!msdu_meta->last_buffer) {
893 					amsdu_pad = 0;
894 					continue;
895 				}
896 			} else {
897 				tot_msdu_len += frag_size;
898 			}
899 
900 			/* Will reach to this place in only two case:
901 			 * 1. Single buffer MSDU
902 			 * 2. Last buffer of MSDU in case of multiple buf MSDU
903 			 */
904 
905 			/* This flag is used to identify msdu boundary */
906 			prev_msdu_end_received = true;
907 			/* Check size of buffer if amsdu padding required */
908 			amsdu_pad = tot_msdu_len & 0x3;
909 			amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
910 
911 			/* Create placeholder if current buffer can
912 			 * accommodate padding.
913 			 */
914 			if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) {
915 				char *frag_addr_temp;
916 
917 				qdf_nbuf_trim_add_frag_size(msdu_cur,
918 						frag_iter,
919 						amsdu_pad, 0);
920 				frag_addr_temp = (char *)qdf_nbuf_get_frag_addr(msdu_cur,
921 						frag_iter);
922 				frag_addr_temp = (frag_addr_temp +
923 						qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter)) -
924 					amsdu_pad;
925 				qdf_mem_zero(frag_addr_temp, amsdu_pad);
926 				amsdu_pad = 0;
927 			}
928 
929 			/* reset tot_msdu_len */
930 			tot_msdu_len = 0;
931 		}
932 		if (is_nbuf_head) {
933 			msdu_cur = qdf_nbuf_get_ext_list(msdu_cur);
934 			is_nbuf_head = false;
935 		} else {
936 			msdu_cur = qdf_nbuf_queue_next(msdu_cur);
937 		}
938 	}
939 
940 	return QDF_STATUS_SUCCESS;
941 }
942 
943 /**
944  * dp_rx_mon_flush_status_buf_queue () - Flush status buffer queue
945  *
946  * @pdev: DP pdev handle
947  *
948  *Return: void
949  */
950 static inline void
951 dp_rx_mon_flush_status_buf_queue(struct dp_pdev *pdev)
952 {
953 	struct dp_soc *soc = pdev->soc;
954 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
955 	struct dp_mon_pdev_be *mon_pdev_be =
956 		dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
957 	union dp_mon_desc_list_elem_t *desc_list = NULL;
958 	union dp_mon_desc_list_elem_t *tail = NULL;
959 	struct dp_mon_desc *mon_desc;
960 	uint8_t idx;
961 	void *buf;
962 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
963 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
964 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
965 	uint8_t work_done = 0;
966 	uint16_t status_buf_count;
967 
968 	if (!mon_pdev_be->desc_count) {
969 		dp_mon_info("no of status buffer count is zero: %pK", pdev);
970 		return;
971 	}
972 
973 	status_buf_count = mon_pdev_be->desc_count;
974 	for (idx = 0; idx < status_buf_count; idx++) {
975 		mon_desc = mon_pdev_be->status[idx];
976 		if (!mon_desc) {
977 			qdf_assert_always(0);
978 			return;
979 		}
980 
981 		buf = mon_desc->buf_addr;
982 
983 		dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
984 		work_done++;
985 
986 		/* set status buffer pointer to NULL */
987 		mon_pdev_be->status[idx] = NULL;
988 		mon_pdev_be->desc_count--;
989 
990 		qdf_frag_free(buf);
991 		DP_STATS_INC(mon_soc, frag_free, 1);
992 	}
993 
994 	if (work_done) {
995 		mon_pdev->rx_mon_stats.mon_rx_bufs_replenished_dest +=
996 			work_done;
997 		dp_mon_buffers_replenish(soc, &soc->rxdma_mon_buf_ring[0],
998 					 rx_mon_desc_pool,
999 					 work_done,
1000 					 &desc_list, &tail, NULL);
1001 	}
1002 }
1003 
1004 /**
1005  * dp_rx_mon_handle_flush_n_trucated_ppdu () - Handle flush and truncated ppdu
1006  *
1007  * @soc: DP soc handle
1008  * @pdev: pdev handle
1009  * @mon_desc: mon sw desc
1010  */
1011 static inline void
1012 dp_rx_mon_handle_flush_n_trucated_ppdu(struct dp_soc *soc,
1013 				       struct dp_pdev *pdev,
1014 				       struct dp_mon_desc *mon_desc)
1015 {
1016 	union dp_mon_desc_list_elem_t *desc_list = NULL;
1017 	union dp_mon_desc_list_elem_t *tail = NULL;
1018 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1019 	struct dp_mon_soc_be *mon_soc_be =
1020 			dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1021 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1022 	uint16_t work_done;
1023 
1024 	/* Flush status buffers in queue */
1025 	dp_rx_mon_flush_status_buf_queue(pdev);
1026 	qdf_frag_free(mon_desc->buf_addr);
1027 	DP_STATS_INC(mon_soc, frag_free, 1);
1028 	dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
1029 	work_done = 1;
1030 	dp_mon_buffers_replenish(soc, &soc->rxdma_mon_buf_ring[0],
1031 				 rx_mon_desc_pool,
1032 				 work_done,
1033 				 &desc_list, &tail, NULL);
1034 }
1035 
1036 uint8_t dp_rx_mon_process_tlv_status(struct dp_pdev *pdev,
1037 				     struct hal_rx_ppdu_info *ppdu_info,
1038 				     void *status_frag,
1039 				     uint16_t tlv_status,
1040 				     union dp_mon_desc_list_elem_t **desc_list,
1041 				     union dp_mon_desc_list_elem_t **tail)
1042 {
1043 	struct dp_soc *soc  = pdev->soc;
1044 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1045 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1046 	qdf_nbuf_t nbuf, tmp_nbuf;
1047 	qdf_frag_t addr;
1048 	uint8_t user_id = ppdu_info->user_id;
1049 	uint8_t mpdu_idx = ppdu_info->mpdu_count[user_id];
1050 	uint16_t num_frags;
1051 	uint8_t num_buf_reaped = 0;
1052 	QDF_STATUS status;
1053 
1054 	if (!mon_pdev->monitor_configured &&
1055 	    !dp_lite_mon_is_rx_enabled(mon_pdev)) {
1056 		return num_buf_reaped;
1057 	}
1058 
1059 	switch (tlv_status) {
1060 	case HAL_TLV_STATUS_HEADER: {
1061 		/* If this is first RX_HEADER for MPDU, allocate skb
1062 		 * else add frag to already allocated skb
1063 		 */
1064 
1065 		if (!ppdu_info->mpdu_info[user_id].mpdu_start_received) {
1066 
1067 			nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
1068 					      DP_RX_MON_TLV_ROOM +
1069 					      DP_RX_MON_MAX_RADIO_TAP_HDR,
1070 					      DP_RX_MON_TLV_ROOM +
1071 					      DP_RX_MON_MAX_RADIO_TAP_HDR,
1072 					      4, FALSE);
1073 
1074 			/* Set *head_msdu->next as NULL as all msdus are
1075 			 *                          * mapped via nr frags
1076 			 *                                                   */
1077 			if (qdf_unlikely(!nbuf)) {
1078 				dp_mon_err("malloc failed pdev: %pK ", pdev);
1079 				return num_buf_reaped;
1080 			}
1081 
1082 			mon_pdev->rx_mon_stats.parent_buf_alloc++;
1083 
1084 			dp_rx_mon_set_zero(nbuf);
1085 
1086 			qdf_nbuf_set_next(nbuf, NULL);
1087 
1088 			qdf_nbuf_queue_add(&ppdu_info->mpdu_q[user_id], nbuf);
1089 
1090 			status = dp_rx_mon_nbuf_add_rx_frag(nbuf, status_frag,
1091 							    ppdu_info->hdr_len - DP_RX_MON_RX_HDR_OFFSET,
1092 							    ppdu_info->data - (unsigned char *)status_frag + 4,
1093 							    DP_MON_DATA_BUFFER_SIZE, true);
1094 			if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
1095 				dp_mon_err("num_frags exceeding MAX frags");
1096 				qdf_assert_always(0);
1097 			}
1098 			ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = true;
1099 			ppdu_info->mpdu_info[user_id].first_rx_hdr_rcvd = true;
1100 			/* initialize decap type to invalid, this will be set to appropriate
1101 			 * value once the mpdu start tlv is received
1102 			 */
1103 			ppdu_info->mpdu_info[user_id].decap_type = DP_MON_DECAP_FORMAT_INVALID;
1104 		} else {
1105 			if (ppdu_info->mpdu_info[user_id].decap_type ==
1106 					HAL_HW_RX_DECAP_FORMAT_RAW) {
1107 				return num_buf_reaped;
1108 			}
1109 
1110 			if (dp_lite_mon_is_rx_enabled(mon_pdev) &&
1111 			    !dp_lite_mon_is_level_msdu(mon_pdev))
1112 				break;
1113 
1114 			nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1115 			if (qdf_unlikely(!nbuf)) {
1116 				dp_mon_debug("nbuf is NULL");
1117 				return num_buf_reaped;
1118 			}
1119 
1120 			tmp_nbuf = qdf_get_nbuf_valid_frag(nbuf);
1121 
1122 			if (!tmp_nbuf) {
1123 				tmp_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
1124 							  DP_RX_MON_MAX_MONITOR_HEADER,
1125 							  DP_RX_MON_MAX_MONITOR_HEADER,
1126 							  4, FALSE);
1127 				if (qdf_unlikely(!tmp_nbuf)) {
1128 					dp_mon_err("nbuf is NULL");
1129 					qdf_assert_always(0);
1130 				}
1131 				mon_pdev->rx_mon_stats.parent_buf_alloc++;
1132 				/* add new skb to frag list */
1133 				qdf_nbuf_append_ext_list(nbuf, tmp_nbuf,
1134 							 qdf_nbuf_len(tmp_nbuf));
1135 			}
1136 			dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, status_frag,
1137 						   ppdu_info->hdr_len - DP_RX_MON_RX_HDR_OFFSET,
1138 						   ppdu_info->data - (unsigned char *)status_frag + 4,
1139 						   DP_MON_DATA_BUFFER_SIZE,
1140 						   true);
1141 		}
1142 		ppdu_info->rx_hdr_rcvd[user_id] = true;
1143 	}
1144 	break;
1145 	case HAL_TLV_STATUS_MON_BUF_ADDR:
1146 	{
1147 		struct hal_rx_mon_msdu_info *buf_info;
1148 		struct hal_mon_packet_info *packet_info = &ppdu_info->packet_info;
1149 		struct dp_mon_desc *mon_desc = (struct dp_mon_desc *)(uintptr_t)ppdu_info->packet_info.sw_cookie;
1150 		struct hal_rx_mon_mpdu_info *mpdu_info;
1151 		uint16_t frag_idx = 0;
1152 
1153 		qdf_assert_always(mon_desc);
1154 
1155 		if (mon_desc->magic != DP_MON_DESC_MAGIC)
1156 			qdf_assert_always(0);
1157 
1158 		addr = mon_desc->buf_addr;
1159 		qdf_assert_always(addr);
1160 
1161 		mpdu_info = &ppdu_info->mpdu_info[user_id];
1162 		if (!mon_desc->unmapped) {
1163 			qdf_mem_unmap_page(soc->osdev,
1164 					   (qdf_dma_addr_t)mon_desc->paddr,
1165 				   DP_MON_DATA_BUFFER_SIZE,
1166 					   QDF_DMA_FROM_DEVICE);
1167 			mon_desc->unmapped = 1;
1168 		}
1169 		dp_mon_add_to_free_desc_list(desc_list, tail, mon_desc);
1170 		num_buf_reaped++;
1171 
1172 		mon_pdev->rx_mon_stats.pkt_buf_count++;
1173 
1174 		if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
1175 
1176 			/* WAR: RX_HDR is not received for this MPDU, drop this frame */
1177 			mon_pdev->rx_mon_stats.rx_hdr_not_received++;
1178 			DP_STATS_INC(mon_soc, frag_free, 1);
1179 			qdf_frag_free(addr);
1180 			return num_buf_reaped;
1181 		}
1182 
1183 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1184 
1185 		if (mpdu_info->decap_type == DP_MON_DECAP_FORMAT_INVALID) {
1186 			/* decap type is invalid, drop the frame */
1187 			mon_pdev->rx_mon_stats.mpdu_decap_type_invalid++;
1188 			DP_STATS_INC(mon_soc, frag_free, 1);
1189 			mon_pdev->rx_mon_stats.parent_buf_free++;
1190 			qdf_frag_free(addr);
1191 			qdf_nbuf_queue_remove_last(&ppdu_info->mpdu_q[user_id]);
1192 			qdf_nbuf_free(nbuf);
1193 			/* if invalid decap type handling is disabled, assert */
1194 			if (soc->wlan_cfg_ctx->is_handle_invalid_decap_type_disabled) {
1195 				dp_mon_err("Decap type invalid");
1196 				qdf_assert_always(0);
1197 			}
1198 			return num_buf_reaped;
1199 		}
1200 
1201 		tmp_nbuf = qdf_get_nbuf_valid_frag(nbuf);
1202 
1203 		if (!tmp_nbuf) {
1204 			tmp_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
1205 						  DP_RX_MON_MAX_MONITOR_HEADER,
1206 						  DP_RX_MON_MAX_MONITOR_HEADER,
1207 						  4, FALSE);
1208 			if (qdf_unlikely(!tmp_nbuf)) {
1209 				dp_mon_err("nbuf is NULL");
1210 				DP_STATS_INC(mon_soc, frag_free, 1);
1211 				mon_pdev->rx_mon_stats.parent_buf_free++;
1212 				qdf_frag_free(addr);
1213 				/* remove this nbuf from queue */
1214 				qdf_nbuf_queue_remove_last(&ppdu_info->mpdu_q[user_id]);
1215 				qdf_nbuf_free(nbuf);
1216 				return num_buf_reaped;
1217 			}
1218 			mon_pdev->rx_mon_stats.parent_buf_alloc++;
1219 			/* add new skb to frag list */
1220 			qdf_nbuf_append_ext_list(nbuf, tmp_nbuf,
1221 						 qdf_nbuf_len(tmp_nbuf));
1222 		}
1223 		mpdu_info->full_pkt = true;
1224 
1225 		if (mpdu_info->decap_type == HAL_HW_RX_DECAP_FORMAT_RAW) {
1226 			if (mpdu_info->first_rx_hdr_rcvd) {
1227 				qdf_nbuf_remove_frag(nbuf, frag_idx, DP_MON_DATA_BUFFER_SIZE);
1228 				dp_rx_mon_nbuf_add_rx_frag(nbuf, addr,
1229 							   packet_info->dma_length,
1230 							   DP_RX_MON_PACKET_OFFSET,
1231 							   DP_MON_DATA_BUFFER_SIZE,
1232 							   false);
1233 				DP_STATS_INC(mon_soc, frag_free, 1);
1234 				mpdu_info->first_rx_hdr_rcvd = false;
1235 			} else {
1236 				dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, addr,
1237 							   packet_info->dma_length,
1238 							   DP_RX_MON_PACKET_OFFSET,
1239 							   DP_MON_DATA_BUFFER_SIZE,
1240 							   false);
1241 				DP_STATS_INC(mon_soc, frag_free, 1);
1242 			}
1243 		} else {
1244 			dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, addr,
1245 						   packet_info->dma_length,
1246 						   DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE +
1247 						   DP_RX_MON_PACKET_OFFSET,
1248 						   DP_MON_DATA_BUFFER_SIZE,
1249 						   false);
1250 				DP_STATS_INC(mon_soc, frag_free, 1);
1251 			buf_info = addr;
1252 
1253 			if (!ppdu_info->msdu[user_id].first_buffer) {
1254 				buf_info->first_buffer = true;
1255 				ppdu_info->msdu[user_id].first_buffer = true;
1256 			} else {
1257 				buf_info->first_buffer = false;
1258 			}
1259 
1260 			if (packet_info->msdu_continuation)
1261 				buf_info->last_buffer = false;
1262 			else
1263 				buf_info->last_buffer = true;
1264 
1265 			buf_info->frag_len = packet_info->dma_length;
1266 		}
1267 		if (qdf_unlikely(packet_info->truncated))
1268 			mpdu_info->truncated = true;
1269 	}
1270 	break;
1271 	case HAL_TLV_STATUS_MSDU_END:
1272 	{
1273 		struct hal_rx_mon_msdu_info *msdu_info = &ppdu_info->msdu[user_id];
1274 		struct hal_rx_mon_msdu_info *last_buf_info;
1275 		/* update msdu metadata at last buffer of msdu in MPDU */
1276 		if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
1277 			/* reset msdu info for next msdu for same user */
1278 			qdf_mem_zero(msdu_info, sizeof(*msdu_info));
1279 			dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d",
1280 				     __LINE__, user_id, mpdu_idx);
1281 			break;
1282 		}
1283 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1284 		num_frags = qdf_nbuf_get_nr_frags(nbuf);
1285 		if (ppdu_info->mpdu_info[user_id].decap_type ==
1286 				HAL_HW_RX_DECAP_FORMAT_RAW) {
1287 			break;
1288 		}
1289 		/* This points to last buffer of MSDU . update metadata here */
1290 		addr = qdf_nbuf_get_frag_addr(nbuf, num_frags - 1) -
1291 					      (DP_RX_MON_PACKET_OFFSET +
1292 					       DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE);
1293 		last_buf_info = addr;
1294 
1295 		last_buf_info->first_msdu = msdu_info->first_msdu;
1296 		last_buf_info->last_msdu = msdu_info->last_msdu;
1297 		last_buf_info->decap_type = msdu_info->decap_type;
1298 		last_buf_info->msdu_index = msdu_info->msdu_index;
1299 		last_buf_info->user_rssi = msdu_info->user_rssi;
1300 		last_buf_info->reception_type = msdu_info->reception_type;
1301 		last_buf_info->msdu_len = msdu_info->msdu_len;
1302 
1303 		dp_rx_mon_pf_tag_to_buf_headroom_2_0(nbuf, ppdu_info, pdev,
1304 						     soc);
1305 		/* reset msdu info for next msdu for same user */
1306 		qdf_mem_zero(msdu_info, sizeof(*msdu_info));
1307 
1308 		/* If flow classification is enabled,
1309 		 * update cce_metadata and fse_metadata
1310 		 */
1311 	}
1312 	break;
1313 	case HAL_TLV_STATUS_MPDU_START:
1314 	{
1315 		struct hal_rx_mon_mpdu_info *mpdu_info, *mpdu_meta;
1316 
1317 		if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
1318 			dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d", __LINE__, user_id, mpdu_idx);
1319 			break;
1320 		}
1321 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1322 		mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(nbuf);
1323 		mpdu_info = &ppdu_info->mpdu_info[user_id];
1324 		mpdu_meta->decap_type = mpdu_info->decap_type;
1325 		ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = true;
1326 	break;
1327 	}
1328 	case HAL_TLV_STATUS_MPDU_END:
1329 	{
1330 		struct hal_rx_mon_mpdu_info *mpdu_info, *mpdu_meta;
1331 		mpdu_info = &ppdu_info->mpdu_info[user_id];
1332 		if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
1333 			/* reset mpdu info for next mpdu for same user */
1334 			qdf_mem_zero(mpdu_info, sizeof(*mpdu_info));
1335 			dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d",
1336 				     __LINE__, user_id, mpdu_idx);
1337 			break;
1338 		}
1339 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1340 		mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(nbuf);
1341 		mpdu_meta->mpdu_length_err = mpdu_info->mpdu_length_err;
1342 		mpdu_meta->fcs_err = mpdu_info->fcs_err;
1343 		ppdu_info->rx_status.rs_fcs_err = mpdu_info->fcs_err;
1344 		mpdu_meta->overflow_err = mpdu_info->overflow_err;
1345 		mpdu_meta->decrypt_err = mpdu_info->decrypt_err;
1346 		mpdu_meta->full_pkt = mpdu_info->full_pkt;
1347 		mpdu_meta->truncated = mpdu_info->truncated;
1348 
1349 		/* reset mpdu info for next mpdu for same user */
1350 		qdf_mem_zero(mpdu_info, sizeof(*mpdu_info));
1351 		ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = false;
1352 		ppdu_info->mpdu_count[user_id]++;
1353 		ppdu_info->rx_hdr_rcvd[user_id] = false;
1354 	}
1355 	break;
1356 	}
1357 	return num_buf_reaped;
1358 }
1359 
1360 /**
1361  * dp_rx_mon_process_status_tlv () - Handle mon status process TLV
1362  *
1363  * @pdev: DP pdev handle
1364  *
1365  * Return
1366  */
1367 static inline struct hal_rx_ppdu_info *
1368 dp_rx_mon_process_status_tlv(struct dp_pdev *pdev)
1369 {
1370 	struct dp_soc *soc = pdev->soc;
1371 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1372 	struct dp_mon_pdev_be *mon_pdev_be =
1373 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1374 	union dp_mon_desc_list_elem_t *desc_list = NULL;
1375 	union dp_mon_desc_list_elem_t *tail = NULL;
1376 	struct dp_mon_desc *mon_desc;
1377 	uint8_t idx, user;
1378 	void *buf;
1379 	struct hal_rx_ppdu_info *ppdu_info;
1380 	uint8_t *rx_tlv;
1381 	uint8_t *rx_tlv_start;
1382 	uint16_t end_offset = 0;
1383 	uint16_t tlv_status = HAL_TLV_STATUS_BUF_DONE;
1384 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1385 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1386 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1387 	uint8_t work_done = 0;
1388 	uint16_t status_buf_count;
1389 
1390 	if (!mon_pdev_be->desc_count) {
1391 		dp_mon_err("no of status buffer count is zero: %pK", pdev);
1392 		return NULL;
1393 	}
1394 
1395 	ppdu_info = dp_rx_mon_get_ppdu_info(mon_pdev);
1396 
1397 	if (!ppdu_info) {
1398 		dp_mon_err("ppdu_info malloc failed pdev: %pK", pdev);
1399 		dp_rx_mon_flush_status_buf_queue(pdev);
1400 		return NULL;
1401 	}
1402 	mon_pdev->rx_mon_stats.total_ppdu_info_alloc++;
1403 
1404 	for (user = 0; user < HAL_MAX_UL_MU_USERS; user++)
1405 		qdf_nbuf_queue_init(&ppdu_info->mpdu_q[user]);
1406 
1407 	status_buf_count = mon_pdev_be->desc_count;
1408 	for (idx = 0; idx < status_buf_count; idx++) {
1409 		mon_desc = mon_pdev_be->status[idx];
1410 		if (!mon_desc) {
1411 			qdf_assert_always(0);
1412 			return NULL;
1413 		}
1414 
1415 		buf = mon_desc->buf_addr;
1416 		end_offset = mon_desc->end_offset;
1417 
1418 		dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
1419 		work_done++;
1420 
1421 		rx_tlv = buf;
1422 		rx_tlv_start = buf;
1423 
1424 		do {
1425 			tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
1426 								ppdu_info,
1427 								pdev->soc->hal_soc,
1428 								buf);
1429 
1430 			work_done += dp_rx_mon_process_tlv_status(pdev,
1431 								  ppdu_info,
1432 								  buf,
1433 								  tlv_status,
1434 								  &desc_list,
1435 								  &tail);
1436 			rx_tlv = hal_rx_status_get_next_tlv(rx_tlv, 1);
1437 
1438 			/* HW provides end_offset (how many bytes HW DMA'ed)
1439 			 * as part of descriptor, use this as delimiter for
1440 			 * status buffer
1441 			 */
1442 			if ((rx_tlv - rx_tlv_start) >= (end_offset + 1))
1443 				break;
1444 
1445 	} while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) ||
1446 			(tlv_status == HAL_TLV_STATUS_HEADER) ||
1447 			(tlv_status == HAL_TLV_STATUS_MPDU_END) ||
1448 			(tlv_status == HAL_TLV_STATUS_MSDU_END) ||
1449 			(tlv_status == HAL_TLV_STATUS_MON_BUF_ADDR) ||
1450 			(tlv_status == HAL_TLV_STATUS_MPDU_START));
1451 
1452 		/* set status buffer pointer to NULL */
1453 		mon_pdev_be->status[idx] = NULL;
1454 		mon_pdev_be->desc_count--;
1455 
1456 		qdf_frag_free(buf);
1457 		DP_STATS_INC(mon_soc, frag_free, 1);
1458 		mon_pdev->rx_mon_stats.status_buf_count++;
1459 	}
1460 
1461 	dp_mon_rx_stats_update_rssi_dbm_params(mon_pdev, ppdu_info);
1462 	if (work_done) {
1463 		mon_pdev->rx_mon_stats.mon_rx_bufs_replenished_dest +=
1464 				work_done;
1465 		dp_mon_buffers_replenish(soc, &soc->rxdma_mon_buf_ring[0],
1466 					 rx_mon_desc_pool,
1467 					 work_done,
1468 					 &desc_list, &tail, NULL);
1469 	}
1470 
1471 	ppdu_info->rx_status.tsft = ppdu_info->rx_status.tsft +
1472 				    pdev->timestamp.mlo_offset_lo_us +
1473 				    ((uint64_t)pdev->timestamp.mlo_offset_hi_us
1474 				    << 32);
1475 
1476 	return ppdu_info;
1477 }
1478 
1479 /**
1480  * dp_rx_mon_update_peer_id() - Update sw_peer_id with link peer_id
1481  *
1482  * @pdev: DP pdev handle
1483  * @ppdu_info: HAL PPDU Info buffer
1484  *
1485  * Return: none
1486  */
1487 #ifdef WLAN_FEATURE_11BE_MLO
1488 #define DP_PEER_ID_MASK 0x3FFF
1489 static inline
1490 void dp_rx_mon_update_peer_id(struct dp_pdev *pdev,
1491 			      struct hal_rx_ppdu_info *ppdu_info)
1492 {
1493 	uint32_t i;
1494 	uint16_t peer_id;
1495 	struct dp_soc *soc = pdev->soc;
1496 	uint32_t num_users = ppdu_info->com_info.num_users;
1497 
1498 	for (i = 0; i < num_users; i++) {
1499 		peer_id = ppdu_info->rx_user_status[i].sw_peer_id;
1500 		if (peer_id == HTT_INVALID_PEER)
1501 			continue;
1502 		/*
1503 		+---------------------------------------------------------------------+
1504 		| 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
1505 		+---------------------------------------------------------------------+
1506 		| CHIP ID | ML |                     PEER ID                          |
1507 		+---------------------------------------------------------------------+
1508 		*/
1509 		peer_id &= DP_PEER_ID_MASK;
1510 		peer_id = dp_get_link_peer_id_by_lmac_id(soc, peer_id,
1511 							 pdev->lmac_id);
1512 		ppdu_info->rx_user_status[i].sw_peer_id = peer_id;
1513 	}
1514 }
1515 #else
1516 static inline
1517 void dp_rx_mon_update_peer_id(struct dp_pdev *pdev,
1518 			      struct hal_rx_ppdu_info *ppdu_info)
1519 {
1520 }
1521 #endif
1522 
1523 static inline uint32_t
1524 dp_rx_mon_srng_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
1525 			   uint32_t mac_id, uint32_t quota)
1526 {
1527 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1528 	struct dp_mon_pdev *mon_pdev;
1529 	struct dp_mon_pdev_be *mon_pdev_be;
1530 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1531 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1532 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1533 	hal_soc_handle_t hal_soc = soc->hal_soc;
1534 	void *rx_mon_dst_ring_desc;
1535 	void *mon_dst_srng;
1536 	uint32_t work_done = 0;
1537 	struct hal_rx_ppdu_info *ppdu_info = NULL;
1538 	QDF_STATUS status;
1539 
1540 	if (!pdev) {
1541 		dp_mon_err("%pK: pdev is null for mac_id = %d", soc, mac_id);
1542 		return work_done;
1543 	}
1544 
1545 	mon_pdev = pdev->monitor_pdev;
1546 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1547 	mon_dst_srng = soc->rxdma_mon_dst_ring[mac_id].hal_srng;
1548 
1549 	if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) {
1550 		dp_mon_err("%pK: : HAL Monitor Destination Ring Init Failed -- %pK",
1551 			   soc, mon_dst_srng);
1552 		return work_done;
1553 	}
1554 
1555 	hal_soc = soc->hal_soc;
1556 
1557 	qdf_assert((hal_soc && pdev));
1558 
1559 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
1560 
1561 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_dst_srng))) {
1562 		dp_mon_err("%s %d : HAL Mon Dest Ring access Failed -- %pK",
1563 			   __func__, __LINE__, mon_dst_srng);
1564 		qdf_spin_unlock_bh(&mon_pdev->mon_lock);
1565 		return work_done;
1566 	}
1567 
1568 	while (qdf_likely((rx_mon_dst_ring_desc =
1569 			  (void *)hal_srng_dst_peek(hal_soc, mon_dst_srng))
1570 				&& quota--)) {
1571 		struct hal_mon_desc hal_mon_rx_desc = {0};
1572 		struct dp_mon_desc *mon_desc;
1573 		hal_be_get_mon_dest_status(soc->hal_soc,
1574 					   rx_mon_dst_ring_desc,
1575 					   &hal_mon_rx_desc);
1576 		/* If it's empty descriptor, skip processing
1577 		 * and process next hW desc
1578 		 */
1579 		if (hal_mon_rx_desc.empty_descriptor == 1) {
1580 			dp_mon_debug("empty descriptor found mon_pdev: %pK",
1581 				     mon_pdev);
1582 			rx_mon_dst_ring_desc =
1583 				hal_srng_dst_get_next(hal_soc, mon_dst_srng);
1584 			mon_pdev->rx_mon_stats.empty_desc_ppdu++;
1585 			continue;
1586 		}
1587 		mon_desc = (struct dp_mon_desc *)(uintptr_t)(hal_mon_rx_desc.buf_addr);
1588 		qdf_assert_always(mon_desc);
1589 
1590 		if ((mon_desc == mon_pdev_be->prev_rxmon_desc) &&
1591 		    (mon_desc->cookie == mon_pdev_be->prev_rxmon_cookie)) {
1592 			dp_mon_err("duplicate descritout found mon_pdev: %pK mon_desc: %pK cookie: %d",
1593 				   mon_pdev, mon_desc, mon_desc->cookie);
1594 			mon_pdev->rx_mon_stats.dup_mon_buf_cnt++;
1595 			hal_srng_dst_get_next(hal_soc, mon_dst_srng);
1596 			continue;
1597 		}
1598 		mon_pdev_be->prev_rxmon_desc = mon_desc;
1599 		mon_pdev_be->prev_rxmon_cookie = mon_desc->cookie;
1600 
1601 		if (!mon_desc->unmapped) {
1602 			qdf_mem_unmap_page(soc->osdev, mon_desc->paddr,
1603 					   rx_mon_desc_pool->buf_size,
1604 					   QDF_DMA_FROM_DEVICE);
1605 			mon_desc->unmapped = 1;
1606 		}
1607 		mon_desc->end_offset = hal_mon_rx_desc.end_offset;
1608 
1609 		/* Flush and truncated status buffers content
1610 		 * need to discarded
1611 		 */
1612 		if (hal_mon_rx_desc.end_reason == HAL_MON_FLUSH_DETECTED ||
1613 		    hal_mon_rx_desc.end_reason == HAL_MON_PPDU_TRUNCATED) {
1614 			dp_mon_debug("end_resaon: %d mon_pdev: %pK",
1615 				     hal_mon_rx_desc.end_reason, mon_pdev);
1616 			mon_pdev->rx_mon_stats.status_ppdu_drop++;
1617 			dp_rx_mon_handle_flush_n_trucated_ppdu(soc,
1618 							       pdev,
1619 							       mon_desc);
1620 			rx_mon_dst_ring_desc = hal_srng_dst_get_next(hal_soc,
1621 							mon_dst_srng);
1622 			continue;
1623 		}
1624 		if (mon_pdev_be->desc_count >= DP_MON_MAX_STATUS_BUF)
1625 			qdf_assert_always(0);
1626 
1627 		mon_pdev_be->status[mon_pdev_be->desc_count++] = mon_desc;
1628 
1629 		rx_mon_dst_ring_desc = hal_srng_dst_get_next(hal_soc, mon_dst_srng);
1630 
1631 		status = dp_rx_process_pktlog_be(soc, pdev, ppdu_info,
1632 						 mon_desc->buf_addr,
1633 						 hal_mon_rx_desc.end_offset);
1634 
1635 		if (hal_mon_rx_desc.end_reason == HAL_MON_STATUS_BUFFER_FULL)
1636 			continue;
1637 
1638 		mon_pdev->rx_mon_stats.status_ppdu_done++;
1639 
1640 		ppdu_info = dp_rx_mon_process_status_tlv(pdev);
1641 
1642 		if (ppdu_info)
1643 			dp_rx_mon_update_peer_id(pdev, ppdu_info);
1644 
1645 		/* Call enhanced stats update API */
1646 		if (mon_pdev->enhanced_stats_en && ppdu_info)
1647 			dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info);
1648 		else if (dp_cfr_rcc_mode_status(pdev) && ppdu_info)
1649 			dp_rx_handle_cfr(soc, pdev, ppdu_info);
1650 
1651 		dp_rx_mon_update_user_ctrl_frame_stats(pdev, ppdu_info);
1652 
1653 		status = dp_rx_mon_add_ppdu_info_to_wq(pdev, ppdu_info);
1654 		if (status != QDF_STATUS_SUCCESS) {
1655 			if (ppdu_info)
1656 				__dp_rx_mon_free_ppdu_info(mon_pdev, ppdu_info);
1657 		}
1658 
1659 		work_done++;
1660 
1661 		/* desc_count should be zero  after PPDU status processing */
1662 		if (mon_pdev_be->desc_count > 0)
1663 			qdf_assert_always(0);
1664 
1665 		mon_pdev_be->desc_count = 0;
1666 	}
1667 	dp_srng_access_end(int_ctx, soc, mon_dst_srng);
1668 
1669 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
1670 	dp_mon_info("mac_id: %d, work_done:%d", mac_id, work_done);
1671 	return work_done;
1672 }
1673 
1674 uint32_t
1675 dp_rx_mon_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
1676 		      uint32_t mac_id, uint32_t quota)
1677 {
1678 	uint32_t work_done;
1679 
1680 	work_done = dp_rx_mon_srng_process_2_0(soc, int_ctx, mac_id, quota);
1681 
1682 	return work_done;
1683 }
1684 
1685 void
1686 dp_rx_mon_buf_desc_pool_deinit(struct dp_soc *soc)
1687 {
1688 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1689 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1690 
1691 	/* Drain page frag cachce before pool deinit */
1692 	qdf_frag_cache_drain(&mon_soc_be->rx_desc_mon.pf_cache);
1693 	dp_mon_desc_pool_deinit(&mon_soc_be->rx_desc_mon);
1694 }
1695 
1696 QDF_STATUS
1697 dp_rx_mon_buf_desc_pool_init(struct dp_soc *soc)
1698 {
1699 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1700 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1701 	uint32_t num_entries;
1702 
1703 	num_entries =
1704 		wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc->wlan_cfg_ctx);
1705 	return dp_mon_desc_pool_init(&mon_soc_be->rx_desc_mon, num_entries);
1706 }
1707 
1708 void dp_rx_mon_buf_desc_pool_free(struct dp_soc *soc)
1709 {
1710 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1711 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1712 
1713 	if (mon_soc)
1714 		dp_mon_desc_pool_free(&mon_soc_be->rx_desc_mon);
1715 }
1716 
1717 QDF_STATUS
1718 dp_rx_mon_buf_desc_pool_alloc(struct dp_soc *soc)
1719 {
1720 	struct dp_srng *mon_buf_ring;
1721 	struct dp_mon_desc_pool *rx_mon_desc_pool;
1722 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1723 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1724 	int entries;
1725 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1726 
1727 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1728 
1729 	entries = wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc_cfg_ctx);
1730 	mon_buf_ring = &soc->rxdma_mon_buf_ring[0];
1731 
1732 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1733 
1734 	qdf_print("%s:%d rx mon buf desc pool entries: %d", __func__, __LINE__, entries);
1735 	return dp_mon_desc_pool_alloc(entries, rx_mon_desc_pool);
1736 }
1737 
1738 void
1739 dp_rx_mon_buffers_free(struct dp_soc *soc)
1740 {
1741 	struct dp_mon_desc_pool *rx_mon_desc_pool;
1742 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1743 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1744 
1745 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1746 
1747 	dp_mon_pool_frag_unmap_and_free(soc, rx_mon_desc_pool);
1748 }
1749 
1750 QDF_STATUS
1751 dp_rx_mon_buffers_alloc(struct dp_soc *soc, uint32_t size)
1752 {
1753 	struct dp_srng *mon_buf_ring;
1754 	struct dp_mon_desc_pool *rx_mon_desc_pool;
1755 	union dp_mon_desc_list_elem_t *desc_list = NULL;
1756 	union dp_mon_desc_list_elem_t *tail = NULL;
1757 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1758 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1759 
1760 	mon_buf_ring = &soc->rxdma_mon_buf_ring[0];
1761 
1762 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1763 
1764 	return dp_mon_buffers_replenish(soc, mon_buf_ring,
1765 					rx_mon_desc_pool,
1766 					size,
1767 					&desc_list, &tail, NULL);
1768 }
1769 
1770 #ifdef QCA_ENHANCED_STATS_SUPPORT
1771 void
1772 dp_rx_mon_populate_ppdu_usr_info_2_0(struct mon_rx_user_status *rx_user_status,
1773 				     struct cdp_rx_stats_ppdu_user *ppdu_user)
1774 {
1775 	ppdu_user->mpdu_retries = rx_user_status->retry_mpdu;
1776 }
1777 
1778 #ifdef WLAN_FEATURE_11BE
1779 void dp_rx_mon_stats_update_2_0(struct dp_mon_peer *mon_peer,
1780 				struct cdp_rx_indication_ppdu *ppdu,
1781 				struct cdp_rx_stats_ppdu_user *ppdu_user)
1782 {
1783 	uint8_t mcs, preamble, ppdu_type, punc_mode;
1784 	uint32_t num_msdu;
1785 
1786 	preamble = ppdu->u.preamble;
1787 	ppdu_type = ppdu->u.ppdu_type;
1788 	num_msdu = ppdu_user->num_msdu;
1789 	punc_mode = ppdu->punc_bw;
1790 
1791 	if (ppdu_type == HAL_RX_TYPE_SU)
1792 		mcs = ppdu->u.mcs;
1793 	else
1794 		mcs = ppdu_user->mcs;
1795 
1796 	DP_STATS_INC(mon_peer, rx.mpdu_retry_cnt, ppdu_user->mpdu_retries);
1797 	DP_STATS_INC(mon_peer, rx.punc_bw[punc_mode], num_msdu);
1798 	DP_STATS_INCC(mon_peer,
1799 		      rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
1800 		      ((mcs >= MAX_MCS_11BE) && (preamble == DOT11_BE)));
1801 	DP_STATS_INCC(mon_peer,
1802 		      rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
1803 		      ((mcs < MAX_MCS_11BE) && (preamble == DOT11_BE)));
1804 	DP_STATS_INCC(mon_peer,
1805 		      rx.su_be_ppdu_cnt.mcs_count[MAX_MCS - 1], 1,
1806 		      ((mcs >= (MAX_MCS_11BE)) && (preamble == DOT11_BE) &&
1807 		      (ppdu_type == HAL_RX_TYPE_SU)));
1808 	DP_STATS_INCC(mon_peer,
1809 		      rx.su_be_ppdu_cnt.mcs_count[mcs], 1,
1810 		      ((mcs < (MAX_MCS_11BE)) && (preamble == DOT11_BE) &&
1811 		      (ppdu_type == HAL_RX_TYPE_SU)));
1812 	DP_STATS_INCC(mon_peer,
1813 		      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[MAX_MCS - 1],
1814 		      1, ((mcs >= (MAX_MCS_11BE)) &&
1815 		      (preamble == DOT11_BE) &&
1816 		      (ppdu_type == HAL_RX_TYPE_MU_OFDMA)));
1817 	DP_STATS_INCC(mon_peer,
1818 		      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[mcs],
1819 		      1, ((mcs < (MAX_MCS_11BE)) &&
1820 		      (preamble == DOT11_BE) &&
1821 		      (ppdu_type == HAL_RX_TYPE_MU_OFDMA)));
1822 	DP_STATS_INCC(mon_peer,
1823 		      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[MAX_MCS - 1],
1824 		      1, ((mcs >= (MAX_MCS_11BE)) &&
1825 		      (preamble == DOT11_BE) &&
1826 		      (ppdu_type == HAL_RX_TYPE_MU_MIMO)));
1827 	DP_STATS_INCC(mon_peer,
1828 		      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[mcs],
1829 		      1, ((mcs < (MAX_MCS_11BE)) &&
1830 		      (preamble == DOT11_BE) &&
1831 		      (ppdu_type == HAL_RX_TYPE_MU_MIMO)));
1832 }
1833 
1834 void
1835 dp_rx_mon_populate_ppdu_info_2_0(struct hal_rx_ppdu_info *hal_ppdu_info,
1836 				 struct cdp_rx_indication_ppdu *ppdu)
1837 {
1838 	uint16_t puncture_pattern;
1839 	enum cdp_punctured_modes punc_mode;
1840 
1841 	/* Align bw value as per host data structures */
1842 	if (hal_ppdu_info->rx_status.bw == HAL_FULL_RX_BW_320)
1843 		ppdu->u.bw = CMN_BW_320MHZ;
1844 	else
1845 		ppdu->u.bw = hal_ppdu_info->rx_status.bw;
1846 	if (hal_ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11BE) {
1847 		/* Align preamble value as per host data structures */
1848 		ppdu->u.preamble = DOT11_BE;
1849 		ppdu->u.stbc = hal_ppdu_info->rx_status.is_stbc;
1850 		ppdu->u.dcm = hal_ppdu_info->rx_status.dcm;
1851 	} else {
1852 		ppdu->u.preamble = hal_ppdu_info->rx_status.preamble_type;
1853 	}
1854 
1855 	puncture_pattern = hal_ppdu_info->rx_status.punctured_pattern;
1856 	punc_mode = dp_mon_get_puncture_type(puncture_pattern,
1857 					     ppdu->u.bw);
1858 	ppdu->punc_bw = punc_mode;
1859 }
1860 #else
1861 void dp_rx_mon_stats_update_2_0(struct dp_mon_peer *mon_peer,
1862 				struct cdp_rx_indication_ppdu *ppdu,
1863 				struct cdp_rx_stats_ppdu_user *ppdu_user)
1864 {
1865 	DP_STATS_INC(mon_peer, rx.mpdu_retry_cnt, ppdu_user->mpdu_retries);
1866 }
1867 
1868 void
1869 dp_rx_mon_populate_ppdu_info_2_0(struct hal_rx_ppdu_info *hal_ppdu_info,
1870 				 struct cdp_rx_indication_ppdu *ppdu)
1871 {
1872 	ppdu->punc_bw = NO_PUNCTURE;
1873 }
1874 #endif
1875 void dp_mon_rx_print_advanced_stats_2_0(struct dp_soc *soc,
1876 					struct dp_pdev *pdev)
1877 {
1878 	struct cdp_pdev_mon_stats *rx_mon_stats;
1879 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1880 	struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc;
1881 	struct dp_mon_pdev_be *mon_pdev_be =
1882 				dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1883 
1884 	rx_mon_stats = &mon_pdev->rx_mon_stats;
1885 
1886 	DP_PRINT_STATS("total_ppdu_info_alloc = %d",
1887 		       rx_mon_stats->total_ppdu_info_alloc);
1888 	DP_PRINT_STATS("total_ppdu_info_free = %d",
1889 		       rx_mon_stats->total_ppdu_info_free);
1890 	DP_PRINT_STATS("total_ppdu_info_enq = %d",
1891 		       rx_mon_stats->total_ppdu_info_enq);
1892 	DP_PRINT_STATS("total_ppdu_info_drop = %d",
1893 		       rx_mon_stats->total_ppdu_info_drop);
1894 	DP_PRINT_STATS("rx_hdr_not_received = %d",
1895 		       rx_mon_stats->rx_hdr_not_received);
1896 	DP_PRINT_STATS("parent_buf_alloc = %d",
1897 		       rx_mon_stats->parent_buf_alloc);
1898 	DP_PRINT_STATS("parent_buf_free = %d",
1899 		       rx_mon_stats->parent_buf_free);
1900 	DP_PRINT_STATS("mpdus_buf_to_stack = %d",
1901 		       rx_mon_stats->mpdus_buf_to_stack);
1902 	DP_PRINT_STATS("frag_alloc = %d",
1903 		       mon_soc->stats.frag_alloc);
1904 	DP_PRINT_STATS("frag_free = %d",
1905 		       mon_soc->stats.frag_free);
1906 	DP_PRINT_STATS("status_buf_count = %d",
1907 		       rx_mon_stats->status_buf_count);
1908 	DP_PRINT_STATS("pkt_buf_count = %d",
1909 		       rx_mon_stats->pkt_buf_count);
1910 	DP_PRINT_STATS("rx_mon_queue_depth= %d",
1911 		       mon_pdev_be->rx_mon_queue_depth);
1912 	DP_PRINT_STATS("empty_desc= %d",
1913 		       mon_pdev->rx_mon_stats.empty_desc_ppdu);
1914 	DP_PRINT_STATS("mpdu_dropped_due_invalid_decap= %d",
1915 		       mon_pdev->rx_mon_stats.mpdu_decap_type_invalid);
1916 	DP_PRINT_STATS("total_free_elem= %d",
1917 		       mon_pdev_be->total_free_elem);
1918 }
1919 #endif
1920