xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/2.0/dp_rx_mon_2.0.c (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "hal_be_hw_headers.h"
19 #include "dp_types.h"
20 #include "hal_be_rx.h"
21 #include "hal_api.h"
22 #include "qdf_trace.h"
23 #include "hal_be_api_mon.h"
24 #include "dp_internal.h"
25 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
26 #include <qdf_flex_mem.h>
27 #include "qdf_nbuf_frag.h"
28 #include "dp_mon.h"
29 #include <dp_rx_mon.h>
30 #include <dp_mon_2.0.h>
31 #include <dp_rx_mon.h>
32 #include <dp_rx_mon_2.0.h>
33 #include <dp_rx.h>
34 #include <dp_be.h>
35 #include <hal_be_api_mon.h>
36 #ifdef QCA_SUPPORT_LITE_MONITOR
37 #include "dp_lite_mon.h"
38 #endif
39 
40 #define F_MASK 0xFFFF
41 #define TEST_MASK 0xCBF
42 
43 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
44 	    defined(WLAN_SUPPORT_RX_FLOW_TAG)
45 
46 #ifdef QCA_TEST_MON_PF_TAGS_STATS
47 
48 static
49 void dp_rx_mon_print_tag_buf(uint8_t *buf, uint16_t room)
50 {
51 	print_hex_dump(KERN_ERR, "TLV BUFFER: ", DUMP_PREFIX_NONE,
52 		       32, 2, buf, room, false);
53 }
54 
55 #else
56 static
57 void dp_rx_mon_print_tag_buf(uint8_t *buf, uint16_t room)
58 {
59 }
60 
61 #endif
62 
63 /**
64  * dp_rx_mon_update_drop_cnt() - Update drop statistics
65  *
66  * @mon_pdev: monitor pdev
67  * @hal_mon_rx_desc: HAL monitor desc
68  *
69  * Return: void
70  */
71 static inline void
72 dp_rx_mon_update_drop_cnt(struct dp_mon_pdev *mon_pdev,
73 			  struct hal_mon_desc *hal_mon_rx_desc)
74 {
75 	mon_pdev->rx_mon_stats.empty_desc_ppdu++;
76 	mon_pdev->rx_mon_stats.ppdu_drop_cnt +=
77 		hal_mon_rx_desc->ppdu_drop_count;
78 	mon_pdev->rx_mon_stats.mpdu_drop_cnt +=
79 		hal_mon_rx_desc->mpdu_drop_count;
80 	if (hal_mon_rx_desc->end_of_ppdu_dropped)
81 		mon_pdev->rx_mon_stats.end_of_ppdu_drop_cnt++;
82 	mon_pdev->rx_mon_stats.tlv_drop_cnt +=
83 		hal_mon_rx_desc->tlv_drop_count;
84 }
85 
86 static
87 void dp_rx_mon_set_zero(qdf_nbuf_t nbuf)
88 {
89 	qdf_mem_zero(qdf_nbuf_head(nbuf), DP_RX_MON_TLV_ROOM);
90 }
91 
92 /**
93  * dp_rx_mon_get_ppdu_info() - Get PPDU info from freelist
94  *
95  * @mon_pdev: monitor pdev
96  *
97  * Return: ppdu_info
98  */
99 static inline struct hal_rx_ppdu_info*
100 dp_rx_mon_get_ppdu_info(struct dp_mon_pdev *mon_pdev)
101 {
102 	struct dp_mon_pdev_be *mon_pdev_be =
103 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
104 	struct hal_rx_ppdu_info *ppdu_info, *temp_ppdu_info;
105 
106 	qdf_spin_lock_bh(&mon_pdev_be->ppdu_info_lock);
107 	TAILQ_FOREACH_SAFE(ppdu_info,
108 			   &mon_pdev_be->rx_mon_free_queue,
109 			   ppdu_list_elem,
110 			   temp_ppdu_info) {
111 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_free_queue,
112 			     ppdu_info, ppdu_free_list_elem);
113 
114 		if (ppdu_info) {
115 			mon_pdev_be->total_free_elem--;
116 			break;
117 		}
118 	}
119 	qdf_spin_unlock_bh(&mon_pdev_be->ppdu_info_lock);
120 
121 	return ppdu_info;
122 }
123 
124 static inline void
125 __dp_rx_mon_free_ppdu_info(struct dp_mon_pdev *mon_pdev,
126 			   struct hal_rx_ppdu_info *ppdu_info)
127 {
128 	struct dp_mon_pdev_be *mon_pdev_be =
129 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
130 
131 	qdf_spin_lock_bh(&mon_pdev_be->ppdu_info_lock);
132 	if (ppdu_info) {
133 		TAILQ_INSERT_TAIL(&mon_pdev_be->rx_mon_free_queue, ppdu_info,
134 				  ppdu_free_list_elem);
135 		mon_pdev_be->total_free_elem++;
136 	}
137 	qdf_spin_unlock_bh(&mon_pdev_be->ppdu_info_lock);
138 }
139 
140 /**
141  * dp_rx_mon_nbuf_add_rx_frag () -  Add frag to SKB
142  *
143  * @nbuf: SKB to which frag is going to be added
144  * @frag: frag to be added to SKB
145  * @frag_len: frag length
146  * @offset: frag offset
147  * @buf_size: buffer size
148  * @frag_ref: take frag ref
149  *
150  * Return: QDF_STATUS
151  */
152 static inline QDF_STATUS
153 dp_rx_mon_nbuf_add_rx_frag(qdf_nbuf_t nbuf, qdf_frag_t *frag,
154 			   uint16_t frag_len, uint16_t offset,
155 			   uint16_t buf_size, bool frag_ref)
156 {
157 	uint8_t num_frags;
158 
159 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
160 	if (num_frags < QDF_NBUF_MAX_FRAGS) {
161 		qdf_nbuf_add_rx_frag(frag, nbuf,
162 				     offset,
163 				     frag_len,
164 				     buf_size,
165 				     frag_ref);
166 		return QDF_STATUS_SUCCESS;
167 	}
168 	return QDF_STATUS_E_FAILURE;
169 }
170 
171 /**
172  * dp_mon_free_parent_nbuf() - Free parent SKB
173  *
174  * @mon_pdev: monitor pdev
175  * @nbuf: SKB to be freed
176  *
177  * @Return: void
178  */
179 void
180 dp_mon_free_parent_nbuf(struct dp_mon_pdev *mon_pdev,
181 			qdf_nbuf_t nbuf)
182 {
183 	mon_pdev->rx_mon_stats.parent_buf_free++;
184 	qdf_nbuf_free(nbuf);
185 }
186 
187 void
188 dp_rx_mon_shift_pf_tag_in_headroom(qdf_nbuf_t nbuf, struct dp_soc *soc,
189 				   struct hal_rx_ppdu_info *ppdu_info)
190 {
191 	uint32_t room = 0;
192 	uint16_t msdu_count = 0;
193 	uint16_t *dp = NULL;
194 	uint16_t *hp = NULL;
195 	uint16_t tlv_data_len, total_tlv_len;
196 	uint32_t bytes = 0;
197 
198 	if (qdf_unlikely(!soc)) {
199 		dp_mon_err("Soc[%pK] Null. Can't update pftag to nbuf headroom",
200 			   soc);
201 		qdf_assert_always(0);
202 	}
203 
204 	if (!wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx))
205 		return;
206 
207 	if (qdf_unlikely(!nbuf))
208 		return;
209 
210 	/* Headroom must be have enough space for tlv to be added*/
211 	if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < DP_RX_MON_TLV_ROOM)) {
212 		dp_mon_err("Headroom[%d] < DP_RX_MON_TLV_ROOM[%d]",
213 			   qdf_nbuf_headroom(nbuf), DP_RX_MON_TLV_ROOM);
214 		return;
215 	}
216 
217 	hp = (uint16_t *)qdf_nbuf_head(nbuf);
218 	msdu_count = *hp;
219 
220 	if (qdf_unlikely(!msdu_count))
221 		return;
222 
223 	dp_mon_debug("msdu_count: %d", msdu_count);
224 
225 	room = DP_RX_MON_PF_TAG_LEN_PER_FRAG * msdu_count;
226 	tlv_data_len = DP_RX_MON_TLV_MSDU_CNT + (room);
227 	total_tlv_len = DP_RX_MON_TLV_HDR_LEN + tlv_data_len;
228 
229 	//1. store space for MARKER
230 	dp = (uint16_t *)qdf_nbuf_push_head(nbuf, sizeof(uint16_t));
231 	if (qdf_likely(dp)) {
232 		*(uint16_t *)dp = DP_RX_MON_TLV_HDR_MARKER;
233 		bytes += sizeof(uint16_t);
234 	}
235 
236 	//2. store space for total size
237 	dp = (uint16_t *)qdf_nbuf_push_head(nbuf, sizeof(uint16_t));
238 	if (qdf_likely(dp)) {
239 		*(uint16_t *)dp = total_tlv_len;
240 		bytes += sizeof(uint16_t);
241 	}
242 
243 	//create TLV
244 	bytes += dp_mon_rx_add_tlv(DP_RX_MON_TLV_PF_ID, tlv_data_len, hp, nbuf);
245 
246 	dp_rx_mon_print_tag_buf(qdf_nbuf_data(nbuf), total_tlv_len);
247 
248 	qdf_nbuf_pull_head(nbuf, bytes);
249 
250 }
251 
252 void
253 dp_rx_mon_pf_tag_to_buf_headroom_2_0(void *nbuf,
254 				     struct hal_rx_ppdu_info *ppdu_info,
255 				     struct dp_pdev *pdev, struct dp_soc *soc)
256 {
257 	uint8_t *nbuf_head = NULL;
258 	uint8_t user_id;
259 	struct hal_rx_mon_msdu_info *msdu_info;
260 	uint16_t flow_id;
261 	uint16_t cce_metadata;
262 	uint16_t protocol_tag = 0;
263 	uint32_t flow_tag;
264 	uint8_t invalid_cce = 0, invalid_fse = 0;
265 
266 	if (qdf_unlikely(!soc)) {
267 		dp_mon_err("Soc[%pK] Null. Can't update pftag to nbuf headroom",
268 			   soc);
269 		qdf_assert_always(0);
270 	}
271 
272 	if (!wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx))
273 		return;
274 
275 	if (qdf_unlikely(!nbuf))
276 		return;
277 
278 	/* Headroom must be have enough space for tlv to be added*/
279 	if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < DP_RX_MON_TLV_ROOM)) {
280 		dp_mon_err("Headroom[%d] < DP_RX_MON_TLV_ROOM[%d]",
281 			   qdf_nbuf_headroom(nbuf), DP_RX_MON_TLV_ROOM);
282 		return;
283 	}
284 
285 	user_id = ppdu_info->user_id;
286 	if (qdf_unlikely(user_id > HAL_MAX_UL_MU_USERS)) {
287 		dp_mon_debug("Invalid user_id user_id: %d pdev: %pK", user_id, pdev);
288 		return;
289 	}
290 
291 	msdu_info = &ppdu_info->msdu[user_id];
292 	flow_id = ppdu_info->rx_msdu_info[user_id].flow_idx;
293 	cce_metadata = ppdu_info->rx_msdu_info[user_id].cce_metadata -
294 		       RX_PROTOCOL_TAG_START_OFFSET;
295 
296 	flow_tag = ppdu_info->rx_msdu_info[user_id].fse_metadata & F_MASK;
297 
298 	if (qdf_unlikely((cce_metadata > RX_PROTOCOL_TAG_MAX - 1) ||
299 			 (cce_metadata > 0 && cce_metadata < 4))) {
300 		dp_mon_debug("Invalid user_id cce_metadata: %d pdev: %pK", cce_metadata, pdev);
301 		invalid_cce = 1;
302 		protocol_tag = cce_metadata;
303 	} else {
304 		protocol_tag = pdev->rx_proto_tag_map[cce_metadata].tag;
305 		dp_mon_rx_update_rx_protocol_tag_stats(pdev, cce_metadata);
306 	}
307 
308 	if (flow_tag > 0) {
309 		dp_mon_rx_update_rx_flow_tag_stats(pdev, flow_id);
310 	} else {
311 		dp_mon_debug("Invalid flow_tag: %d pdev: %pK ", flow_tag, pdev);
312 		invalid_fse = 1;
313 	}
314 
315 	if (invalid_cce && invalid_fse)
316 		return;
317 
318 	if (msdu_info->msdu_index >= DP_RX_MON_MAX_MSDU) {
319 		dp_mon_err("msdu_index causes overflow in headroom");
320 		return;
321 	}
322 
323 	dp_mon_debug("protocol_tag: %d, cce_metadata: %d, flow_tag: %d",
324 		     protocol_tag, cce_metadata, flow_tag);
325 
326 	dp_mon_debug("msdu_index: %d", msdu_info->msdu_index);
327 
328 
329 	nbuf_head = qdf_nbuf_head(nbuf);
330 
331 	*((uint16_t *)nbuf_head) = msdu_info->msdu_index + 1;
332 	nbuf_head += DP_RX_MON_TLV_MSDU_CNT;
333 
334 	nbuf_head += ((msdu_info->msdu_index) * DP_RX_MON_PF_TAG_SIZE);
335 	if (!invalid_cce)
336 		*((uint16_t *)nbuf_head) = protocol_tag;
337 	nbuf_head += sizeof(uint16_t);
338 	if (!invalid_fse)
339 		*((uint16_t *)nbuf_head) = flow_tag;
340 }
341 
342 #else
343 
344 static
345 void dp_rx_mon_set_zero(qdf_nbuf_t nbuf)
346 {
347 }
348 
349 static
350 void dp_rx_mon_shift_pf_tag_in_headroom(qdf_nbuf_t nbuf, struct dp_soc *soc,
351 					struct hal_rx_ppdu_info *ppdu_info)
352 {
353 }
354 
355 static
356 void dp_rx_mon_pf_tag_to_buf_headroom_2_0(void *nbuf,
357 					  struct hal_rx_ppdu_info *ppdu_info,
358 					  struct dp_pdev *pdev,
359 					  struct dp_soc *soc)
360 {
361 }
362 
363 #endif
364 
365 /**
366  * dp_rx_mon_free_mpdu_queue() - Free MPDU queue
367  * @mon_pdev: monitor pdev
368  * @ppdu_info: PPDU info
369  *
370  * Return: Void
371  */
372 
373 static void dp_rx_mon_free_mpdu_queue(struct dp_mon_pdev *mon_pdev,
374 				      struct hal_rx_ppdu_info *ppdu_info)
375 {
376 	uint8_t user;
377 	qdf_nbuf_t mpdu;
378 
379 	for (user = 0; user < HAL_MAX_UL_MU_USERS; user++) {
380 		if (!qdf_nbuf_is_queue_empty(&ppdu_info->mpdu_q[user])) {
381 			while ((mpdu = qdf_nbuf_queue_remove(&ppdu_info->mpdu_q[user])) != NULL)
382 				dp_mon_free_parent_nbuf(mon_pdev, mpdu);
383 		}
384 	}
385 }
386 
387 /**
388  * dp_rx_mon_free_ppdu_info () - Free PPDU info
389  * @pdev: DP pdev
390  * @ppdu_info: PPDU info
391  *
392  * Return: Void
393  */
394 static void
395 dp_rx_mon_free_ppdu_info(struct dp_pdev *pdev,
396 			 struct hal_rx_ppdu_info *ppdu_info)
397 {
398 	struct dp_mon_pdev *mon_pdev;
399 
400 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
401 	dp_rx_mon_free_mpdu_queue(mon_pdev, ppdu_info);
402 	__dp_rx_mon_free_ppdu_info(mon_pdev, ppdu_info);
403 }
404 
405 void dp_rx_mon_drain_wq(struct dp_pdev *pdev)
406 {
407 	struct dp_mon_pdev *mon_pdev;
408 	struct hal_rx_ppdu_info *ppdu_info = NULL;
409 	struct hal_rx_ppdu_info *temp_ppdu_info = NULL;
410 	struct dp_mon_pdev_be *mon_pdev_be;
411 
412 	if (qdf_unlikely(!pdev)) {
413 		dp_mon_debug("Pdev is NULL");
414 		return;
415 	}
416 
417 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
418 	if (qdf_unlikely(!mon_pdev)) {
419 		dp_mon_debug("monitor pdev is NULL");
420 		return;
421 	}
422 
423 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
424 
425 	qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
426 	TAILQ_FOREACH_SAFE(ppdu_info,
427 			   &mon_pdev_be->rx_mon_queue,
428 			   ppdu_list_elem,
429 			   temp_ppdu_info) {
430 		mon_pdev_be->rx_mon_queue_depth--;
431 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_queue,
432 			     ppdu_info, ppdu_list_elem);
433 
434 		dp_rx_mon_free_ppdu_info(pdev, ppdu_info);
435 	}
436 	qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
437 }
438 
439 /**
440  * dp_rx_mon_deliver_mpdu() - Deliver MPDU to osif layer
441  *
442  * @mon_pdev: monitor pdev
443  * @mpdu: MPDU nbuf
444  * @status: monitor status
445  *
446  * Return: QDF_STATUS
447  */
448 static QDF_STATUS
449 dp_rx_mon_deliver_mpdu(struct dp_mon_pdev *mon_pdev,
450 		       qdf_nbuf_t mpdu,
451 		       struct mon_rx_status *rx_status)
452 {
453 	qdf_nbuf_t nbuf;
454 
455 	if (mon_pdev->mvdev && mon_pdev->mvdev->monitor_vdev->osif_rx_mon) {
456 		mon_pdev->rx_mon_stats.mpdus_buf_to_stack++;
457 		nbuf = qdf_nbuf_get_ext_list(mpdu);
458 
459 		while (nbuf) {
460 			mon_pdev->rx_mon_stats.mpdus_buf_to_stack++;
461 			nbuf = nbuf->next;
462 		}
463 		mon_pdev->mvdev->monitor_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev,
464 							   mpdu,
465 							   rx_status);
466 	} else {
467 		return QDF_STATUS_E_FAILURE;
468 	}
469 
470 	return QDF_STATUS_SUCCESS;
471 }
472 
473 /**
474  * dp_rx_mon_process_ppdu_info () - Process PPDU info
475  * @pdev: DP pdev
476  * @ppdu_info: PPDU info
477  *
478  * Return: Void
479  */
480 static void
481 dp_rx_mon_process_ppdu_info(struct dp_pdev *pdev,
482 			    struct hal_rx_ppdu_info *ppdu_info)
483 {
484 	struct dp_mon_pdev *mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
485 	uint8_t user;
486 	qdf_nbuf_t mpdu;
487 
488 	if (!ppdu_info)
489 		return;
490 
491 	for (user = 0; user < ppdu_info->com_info.num_users; user++) {
492 		uint16_t mpdu_count;
493 		uint16_t mpdu_idx;
494 		struct hal_rx_mon_mpdu_info *mpdu_meta;
495 		QDF_STATUS status;
496 
497 		if (user >= HAL_MAX_UL_MU_USERS) {
498 			dp_mon_err("num user exceeds max limit");
499 			return;
500 		}
501 
502 		mpdu_count  = ppdu_info->mpdu_count[user];
503 		ppdu_info->rx_status.rx_user_status =
504 					&ppdu_info->rx_user_status[user];
505 		for (mpdu_idx = 0; mpdu_idx < mpdu_count; mpdu_idx++) {
506 			mpdu = qdf_nbuf_queue_remove(&ppdu_info->mpdu_q[user]);
507 
508 			if (!mpdu)
509 				continue;
510 
511 			mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(mpdu);
512 
513 			if (dp_lite_mon_is_rx_enabled(mon_pdev)) {
514 				status = dp_lite_mon_rx_mpdu_process(pdev, ppdu_info,
515 								     mpdu, mpdu_idx, user);
516 				if (status != QDF_STATUS_SUCCESS) {
517 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
518 					continue;
519 				}
520 			} else {
521 				if (mpdu_meta->full_pkt) {
522 					if (qdf_unlikely(mpdu_meta->truncated)) {
523 						dp_mon_free_parent_nbuf(mon_pdev, mpdu);
524 						continue;
525 					}
526 
527 					status = dp_rx_mon_handle_full_mon(pdev,
528 									   ppdu_info, mpdu);
529 					if (status != QDF_STATUS_SUCCESS) {
530 						dp_mon_free_parent_nbuf(mon_pdev, mpdu);
531 						continue;
532 					}
533 				} else {
534 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
535 					continue;
536 				}
537 
538 				/* reset mpdu metadata and apply radiotap header over MPDU */
539 				qdf_mem_zero(mpdu_meta, sizeof(struct hal_rx_mon_mpdu_info));
540 				if (!qdf_nbuf_update_radiotap(&ppdu_info->rx_status,
541 							      mpdu,
542 							      qdf_nbuf_headroom(mpdu))) {
543 					dp_mon_err("failed to update radiotap pdev: %pK",
544 						   pdev);
545 				}
546 
547 				dp_rx_mon_shift_pf_tag_in_headroom(mpdu,
548 								   pdev->soc,
549 								   ppdu_info);
550 
551 				dp_rx_mon_process_dest_pktlog(pdev->soc,
552 							      pdev->pdev_id,
553 							      mpdu);
554 				/* Deliver MPDU to osif layer */
555 				status = dp_rx_mon_deliver_mpdu(mon_pdev,
556 								mpdu,
557 								&ppdu_info->rx_status);
558 				if (status != QDF_STATUS_SUCCESS)
559 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
560 			}
561 		}
562 	}
563 
564 	dp_rx_mon_free_mpdu_queue(mon_pdev, ppdu_info);
565 }
566 
567 /**
568  * dp_rx_mon_process_ppdu ()-  Deferred monitor processing
569  * This workqueue API handles:
570  * a. Full monitor
571  * b. Lite monitor
572  *
573  * @context: Opaque work context
574  *
575  * Return: none
576  */
577 void dp_rx_mon_process_ppdu(void *context)
578 {
579 	struct dp_pdev *pdev = (struct dp_pdev *)context;
580 	struct dp_mon_pdev *mon_pdev;
581 	struct hal_rx_ppdu_info *ppdu_info = NULL;
582 	struct hal_rx_ppdu_info *temp_ppdu_info = NULL;
583 	struct dp_mon_pdev_be *mon_pdev_be;
584 
585 	if (qdf_unlikely(!pdev)) {
586 		dp_mon_debug("Pdev is NULL");
587 		return;
588 	}
589 
590 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
591 	if (qdf_unlikely(!mon_pdev)) {
592 		dp_mon_debug("monitor pdev is NULL");
593 		return;
594 	}
595 
596 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
597 
598 	qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
599 	TAILQ_FOREACH_SAFE(ppdu_info,
600 			   &mon_pdev_be->rx_mon_queue,
601 			   ppdu_list_elem, temp_ppdu_info) {
602 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_queue,
603 			     ppdu_info, ppdu_list_elem);
604 
605 		mon_pdev_be->rx_mon_queue_depth--;
606 		dp_rx_mon_process_ppdu_info(pdev, ppdu_info);
607 		__dp_rx_mon_free_ppdu_info(mon_pdev, ppdu_info);
608 	}
609 	qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
610 }
611 
612 /**
613  * dp_rx_mon_add_ppdu_info_to_wq () - Add PPDU info to workqueue
614  *
615  * @mon_pdev: monitor pdev
616  * @ppdu_info: ppdu info to be added to workqueue
617  *
618  * Return: SUCCESS or FAILIRE
619  */
620 
621 static QDF_STATUS
622 dp_rx_mon_add_ppdu_info_to_wq(struct dp_pdev *pdev,
623 			      struct hal_rx_ppdu_info *ppdu_info)
624 {
625 	struct dp_mon_pdev *mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
626 	struct dp_mon_pdev_be *mon_pdev_be =
627 		dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
628 
629 	/* Full monitor or lite monitor mode is not enabled, return */
630 	if (!mon_pdev->monitor_configured &&
631 	    !dp_lite_mon_is_rx_enabled(mon_pdev))
632 		return QDF_STATUS_E_FAILURE;
633 
634 	if (qdf_likely(ppdu_info)) {
635 		if (mon_pdev_be->rx_mon_queue_depth < DP_RX_MON_WQ_THRESHOLD) {
636 			qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
637 			TAILQ_INSERT_TAIL(&mon_pdev_be->rx_mon_queue,
638 					  ppdu_info, ppdu_list_elem);
639 			mon_pdev_be->rx_mon_queue_depth++;
640 			mon_pdev->rx_mon_stats.total_ppdu_info_enq++;
641 		} else {
642 			mon_pdev->rx_mon_stats.total_ppdu_info_drop++;
643 			dp_rx_mon_free_ppdu_info(pdev, ppdu_info);
644 		}
645 		qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
646 
647 		if (mon_pdev_be->rx_mon_queue_depth > DP_MON_QUEUE_DEPTH_MAX) {
648 			qdf_queue_work(0, mon_pdev_be->rx_mon_workqueue,
649 				       &mon_pdev_be->rx_mon_work);
650 		}
651 	}
652 	return QDF_STATUS_SUCCESS;
653 }
654 
655 QDF_STATUS
656 dp_rx_mon_handle_full_mon(struct dp_pdev *pdev,
657 			  struct hal_rx_ppdu_info *ppdu_info,
658 			  qdf_nbuf_t mpdu)
659 {
660 	uint32_t wifi_hdr_len, sec_hdr_len, msdu_llc_len,
661 		 mpdu_buf_len, decap_hdr_pull_bytes, dir,
662 		 is_amsdu, amsdu_pad, frag_size, tot_msdu_len;
663 	struct hal_rx_mon_mpdu_info *mpdu_meta;
664 	struct hal_rx_mon_msdu_info *msdu_meta;
665 	char *hdr_desc;
666 	uint8_t num_frags, frag_iter, l2_hdr_offset;
667 	struct ieee80211_frame *wh;
668 	struct ieee80211_qoscntl *qos;
669 	uint32_t hdr_frag_size, frag_page_offset, pad_byte_pholder;
670 	qdf_nbuf_t head_msdu, msdu_cur;
671 	void *frag_addr;
672 	bool prev_msdu_end_received = false;
673 	bool is_nbuf_head = true;
674 
675 	/***************************************************************************
676 	 *********************** Non-raw packet ************************************
677 	 ---------------------------------------------------------------------------
678 	 |      | frag-0   | frag-1    | frag - 2 | frag - 3  | frag - 4 | frag - 5  |
679 	 | skb  | rx_hdr-1 | rx_msdu-1 | rx_hdr-2 | rx_msdu-2 | rx_hdr-3 | rx-msdu-3 |
680 	 ---------------------------------------------------------------------------
681 	 **************************************************************************/
682 
683 	if (!mpdu) {
684 		dp_mon_debug("nbuf is NULL, return");
685 		return QDF_STATUS_E_FAILURE;
686 	}
687 
688 	head_msdu = mpdu;
689 
690 	mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(mpdu);
691 
692 	if (mpdu_meta->decap_type == HAL_HW_RX_DECAP_FORMAT_RAW) {
693 		qdf_nbuf_trim_add_frag_size(mpdu,
694 					    qdf_nbuf_get_nr_frags(mpdu) - 1,
695 					    -HAL_RX_FCS_LEN, 0);
696 		return QDF_STATUS_SUCCESS;
697 	}
698 
699 	num_frags = qdf_nbuf_get_nr_frags(mpdu);
700 	if (qdf_unlikely(num_frags < DP_MON_MIN_FRAGS_FOR_RESTITCH)) {
701 		dp_mon_debug("not enough frags(%d) for restitch", num_frags);
702 		return QDF_STATUS_E_FAILURE;
703 	}
704 
705 	l2_hdr_offset = DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
706 
707 	/* hdr_desc points to 80211 hdr */
708 	hdr_desc = qdf_nbuf_get_frag_addr(mpdu, 0);
709 
710 	/* Calculate Base header size */
711 	wifi_hdr_len = sizeof(struct ieee80211_frame);
712 	wh = (struct ieee80211_frame *)hdr_desc;
713 
714 	dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
715 
716 	if (dir == IEEE80211_FC1_DIR_DSTODS)
717 		wifi_hdr_len += 6;
718 
719 	is_amsdu = 0;
720 	if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) {
721 		qos = (struct ieee80211_qoscntl *)
722 			(hdr_desc + wifi_hdr_len);
723 		wifi_hdr_len += 2;
724 
725 		is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU);
726 	}
727 
728 	/*Calculate security header length based on 'Protected'
729 	 * and 'EXT_IV' flag
730 	 */
731 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
732 		char *iv = (char *)wh + wifi_hdr_len;
733 
734 		if (iv[3] & KEY_EXTIV)
735 			sec_hdr_len = 8;
736 		else
737 			sec_hdr_len = 4;
738 	} else {
739 		sec_hdr_len = 0;
740 	}
741 	wifi_hdr_len += sec_hdr_len;
742 
743 	/* MSDU related stuff LLC - AMSDU subframe header etc */
744 	msdu_llc_len = is_amsdu ? (DP_RX_MON_DECAP_HDR_SIZE +
745 				   DP_RX_MON_LLC_SIZE +
746 				   DP_RX_MON_SNAP_SIZE) :
747 				   (DP_RX_MON_LLC_SIZE + DP_RX_MON_SNAP_SIZE);
748 
749 	mpdu_buf_len = wifi_hdr_len + msdu_llc_len;
750 
751 	/* "Decap" header to remove from MSDU buffer */
752 	decap_hdr_pull_bytes = DP_RX_MON_DECAP_HDR_SIZE;
753 
754 	amsdu_pad = 0;
755 	tot_msdu_len = 0;
756 	tot_msdu_len = 0;
757 
758 	/*
759 	 * Update protocol and flow tag for MSDU
760 	 * update frag index in ctx_idx field.
761 	 * Reset head pointer data of nbuf before updating.
762 	 */
763 	QDF_NBUF_CB_RX_CTX_ID(mpdu) = 0;
764 
765 	/* Construct destination address */
766 	hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(mpdu, 0);
767 
768 	/* Adjust page frag offset to point to 802.11 header */
769 	if (hdr_frag_size > mpdu_buf_len)
770 		qdf_nbuf_trim_add_frag_size(head_msdu, 0, -(hdr_frag_size - mpdu_buf_len), 0);
771 
772 	msdu_meta = (struct hal_rx_mon_msdu_info *)(((void *)qdf_nbuf_get_frag_addr(mpdu, 1)) - (DP_RX_MON_PACKET_OFFSET + DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE));
773 
774 
775 	/* Adjust page frag offset to appropriate after decap header */
776 	frag_page_offset =
777 		decap_hdr_pull_bytes;
778 	qdf_nbuf_move_frag_page_offset(head_msdu, 1, frag_page_offset);
779 
780 	frag_size = qdf_nbuf_get_frag_size_by_idx(head_msdu, 1);
781 	pad_byte_pholder =
782 		RX_MONITOR_BUFFER_SIZE - (frag_size + DP_RX_MON_PACKET_OFFSET + DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE);
783 
784 	if (msdu_meta->first_buffer && msdu_meta->last_buffer) {
785 		/* MSDU with single buffer */
786 		amsdu_pad = frag_size & 0x3;
787 		amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
788 		if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) {
789 			char *frag_addr_temp;
790 
791 			qdf_nbuf_trim_add_frag_size(mpdu, 1, amsdu_pad, 0);
792 			frag_addr_temp =
793 				(char *)qdf_nbuf_get_frag_addr(mpdu, 1);
794 			frag_addr_temp = (frag_addr_temp +
795 					  qdf_nbuf_get_frag_size_by_idx(mpdu, 1)) -
796 				amsdu_pad;
797 			qdf_mem_zero(frag_addr_temp, amsdu_pad);
798 			amsdu_pad = 0;
799 		}
800 	} else {
801 		tot_msdu_len = frag_size;
802 		amsdu_pad = 0;
803 	}
804 
805 	pad_byte_pholder = 0;
806 	for (msdu_cur = mpdu; msdu_cur;) {
807 		/* frag_iter will start from 0 for second skb onwards */
808 		if (msdu_cur == mpdu)
809 			frag_iter = 2;
810 		else
811 			frag_iter = 0;
812 
813 		num_frags = qdf_nbuf_get_nr_frags(msdu_cur);
814 
815 		for (; frag_iter < num_frags; frag_iter++) {
816 			/* Construct destination address
817 			 *  ----------------------------------------------------------
818 			 * |            | L2_HDR_PAD   |   Decap HDR | Payload | Pad  |
819 			 * |            | (First buffer)             |         |      |
820 			 * |            |                            /        /       |
821 			 * |            >Frag address points here   /        /        |
822 			 * |            \                          /        /         |
823 			 * |             \ This bytes needs to    /        /          |
824 			 * |              \  removed to frame pkt/        /           |
825 			 * |               ----------------------        /            |
826 			 * |                                     |     /     Add      |
827 			 * |                                     |    /   amsdu pad   |
828 			 * |   LLC HDR will be added here      <-|    |   Byte for    |
829 			 * |        |                            |    |   last frame  |
830 			 * |         >Dest addr will point       |    |    if space   |
831 			 * |            somewhere in this area   |    |    available  |
832 			 * |  And amsdu_pad will be created if   |    |               |
833 			 * | dint get added in last buffer       |    |               |
834 			 * |       (First Buffer)                |    |               |
835 			 *  ----------------------------------------------------------
836 			 */
837 			/* If previous msdu end has received, modify next frag's offset to point to LLC */
838 			if (prev_msdu_end_received) {
839 				hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter);
840 				/* Adjust page frag offset to point to llc/snap header */
841 				if (hdr_frag_size > msdu_llc_len)
842 					qdf_nbuf_trim_add_frag_size(msdu_cur, frag_iter, -(hdr_frag_size - msdu_llc_len), 0);
843 				prev_msdu_end_received = false;
844 				continue;
845 			}
846 
847 			frag_addr =
848 				qdf_nbuf_get_frag_addr(msdu_cur, frag_iter) -
849 						       (DP_RX_MON_PACKET_OFFSET +
850 						       DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE);
851 			msdu_meta = (struct hal_rx_mon_msdu_info *)frag_addr;
852 
853 			/*
854 			 * Update protocol and flow tag for MSDU
855 			 * update frag index in ctx_idx field
856 			 */
857 			QDF_NBUF_CB_RX_CTX_ID(msdu_cur) = frag_iter;
858 
859 			frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur,
860 					frag_iter);
861 
862 			/* If Middle buffer, dont add any header */
863 			if ((!msdu_meta->first_buffer) &&
864 					(!msdu_meta->last_buffer)) {
865 				tot_msdu_len += frag_size;
866 				amsdu_pad = 0;
867 				pad_byte_pholder = 0;
868 				continue;
869 			}
870 
871 			/* Calculate if current buffer has placeholder
872 			 * to accommodate amsdu pad byte
873 			 */
874 			pad_byte_pholder =
875 				RX_MONITOR_BUFFER_SIZE - (frag_size + (DP_RX_MON_PACKET_OFFSET +
876 							  DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE));
877 			/*
878 			 * We will come here only only three condition:
879 			 * 1. Msdu with single Buffer
880 			 * 2. First buffer in case MSDU is spread in multiple
881 			 *    buffer
882 			 * 3. Last buffer in case MSDU is spread in multiple
883 			 *    buffer
884 			 *
885 			 *         First buffER | Last buffer
886 			 * Case 1:      1       |     1
887 			 * Case 2:      1       |     0
888 			 * Case 3:      0       |     1
889 			 *
890 			 * In 3rd case only l2_hdr_padding byte will be Zero and
891 			 * in other case, It will be 2 Bytes.
892 			 */
893 			if (msdu_meta->first_buffer)
894 				l2_hdr_offset =
895 					DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
896 			else
897 				l2_hdr_offset = DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
898 
899 			if (msdu_meta->first_buffer) {
900 				/* Adjust page frag offset to point to 802.11 header */
901 				hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter-1);
902 				if (hdr_frag_size > (msdu_llc_len + amsdu_pad))
903 					qdf_nbuf_trim_add_frag_size(msdu_cur, frag_iter - 1, -(hdr_frag_size - (msdu_llc_len + amsdu_pad)), 0);
904 
905 				/* Adjust page frag offset to appropriate after decap header */
906 				frag_page_offset =
907 					(decap_hdr_pull_bytes + l2_hdr_offset);
908 				if (frag_size > (decap_hdr_pull_bytes + l2_hdr_offset)) {
909 					qdf_nbuf_move_frag_page_offset(msdu_cur, frag_iter, frag_page_offset);
910 					frag_size = frag_size - (l2_hdr_offset + decap_hdr_pull_bytes);
911 				}
912 
913 
914 				/*
915 				 * Calculate new page offset and create hole
916 				 * if amsdu_pad required.
917 				 */
918 				tot_msdu_len = frag_size;
919 				/*
920 				 * No amsdu padding required for first frame of
921 				 * continuation buffer
922 				 */
923 				if (!msdu_meta->last_buffer) {
924 					amsdu_pad = 0;
925 					continue;
926 				}
927 			} else {
928 				tot_msdu_len += frag_size;
929 			}
930 
931 			/* Will reach to this place in only two case:
932 			 * 1. Single buffer MSDU
933 			 * 2. Last buffer of MSDU in case of multiple buf MSDU
934 			 */
935 
936 			/* This flag is used to identify msdu boundary */
937 			prev_msdu_end_received = true;
938 			/* Check size of buffer if amsdu padding required */
939 			amsdu_pad = tot_msdu_len & 0x3;
940 			amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
941 
942 			/* Create placeholder if current buffer can
943 			 * accommodate padding.
944 			 */
945 			if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) {
946 				char *frag_addr_temp;
947 
948 				qdf_nbuf_trim_add_frag_size(msdu_cur,
949 						frag_iter,
950 						amsdu_pad, 0);
951 				frag_addr_temp = (char *)qdf_nbuf_get_frag_addr(msdu_cur,
952 						frag_iter);
953 				frag_addr_temp = (frag_addr_temp +
954 						qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter)) -
955 					amsdu_pad;
956 				qdf_mem_zero(frag_addr_temp, amsdu_pad);
957 				amsdu_pad = 0;
958 			}
959 
960 			/* reset tot_msdu_len */
961 			tot_msdu_len = 0;
962 		}
963 		if (is_nbuf_head) {
964 			msdu_cur = qdf_nbuf_get_ext_list(msdu_cur);
965 			is_nbuf_head = false;
966 		} else {
967 			msdu_cur = qdf_nbuf_queue_next(msdu_cur);
968 		}
969 	}
970 
971 	return QDF_STATUS_SUCCESS;
972 }
973 
974 static inline int
975 dp_rx_mon_flush_packet_tlv(struct dp_pdev *pdev, void *buf, uint16_t end_offset,
976 			   union dp_mon_desc_list_elem_t **desc_list,
977 			   union dp_mon_desc_list_elem_t **tail)
978 {
979 	struct dp_soc *soc = pdev->soc;
980 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
981 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
982 	uint16_t work_done = 0;
983 	qdf_frag_t addr;
984 	uint8_t *rx_tlv;
985 	uint8_t *rx_tlv_start;
986 	uint16_t tlv_status = HAL_TLV_STATUS_BUF_DONE;
987 	struct hal_rx_ppdu_info *ppdu_info;
988 
989 	if (!buf)
990 		return work_done;
991 
992 	ppdu_info = &mon_pdev->ppdu_info;
993 	if (!ppdu_info) {
994 		dp_mon_err("ppdu_info malloc failed pdev: %pK", pdev);
995 		return work_done;
996 	}
997 	qdf_mem_zero(ppdu_info, sizeof(struct hal_rx_ppdu_info));
998 	rx_tlv = buf;
999 	rx_tlv_start = buf;
1000 
1001 	do {
1002 		tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
1003 							ppdu_info,
1004 							pdev->soc->hal_soc,
1005 							buf);
1006 
1007 		if (tlv_status == HAL_TLV_STATUS_MON_BUF_ADDR) {
1008 			struct dp_mon_desc *mon_desc = (struct dp_mon_desc *)(uintptr_t)ppdu_info->packet_info.sw_cookie;
1009 
1010 			qdf_assert_always(mon_desc);
1011 			addr = mon_desc->buf_addr;
1012 
1013 			if (!mon_desc->unmapped) {
1014 				qdf_mem_unmap_page(soc->osdev,
1015 						   (qdf_dma_addr_t)mon_desc->paddr,
1016 						   DP_MON_DATA_BUFFER_SIZE,
1017 						   QDF_DMA_FROM_DEVICE);
1018 				mon_desc->unmapped = 1;
1019 			}
1020 			dp_mon_add_to_free_desc_list(desc_list, tail, mon_desc);
1021 			work_done++;
1022 
1023 			if (addr) {
1024 				qdf_frag_free(addr);
1025 				DP_STATS_INC(mon_soc, frag_free, 1);
1026 			}
1027 		}
1028 
1029 		rx_tlv = hal_rx_status_get_next_tlv(rx_tlv, 1);
1030 
1031 		if ((rx_tlv - rx_tlv_start) >= (end_offset + 1))
1032 			break;
1033 
1034 	} while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) ||
1035 		 (tlv_status == HAL_TLV_STATUS_HEADER) ||
1036 		 (tlv_status == HAL_TLV_STATUS_MPDU_END) ||
1037 		 (tlv_status == HAL_TLV_STATUS_MSDU_END) ||
1038 		 (tlv_status == HAL_TLV_STATUS_MON_BUF_ADDR) ||
1039 		 (tlv_status == HAL_TLV_STATUS_MPDU_START));
1040 
1041 	return work_done;
1042 }
1043 
1044 /**
1045  * dp_rx_mon_flush_status_buf_queue () - Flush status buffer queue
1046  *
1047  * @pdev: DP pdev handle
1048  *
1049  *Return: void
1050  */
1051 static inline void
1052 dp_rx_mon_flush_status_buf_queue(struct dp_pdev *pdev)
1053 {
1054 	struct dp_soc *soc = pdev->soc;
1055 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1056 	struct dp_mon_pdev_be *mon_pdev_be =
1057 		dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1058 	union dp_mon_desc_list_elem_t *desc_list = NULL;
1059 	union dp_mon_desc_list_elem_t *tail = NULL;
1060 	struct dp_mon_desc *mon_desc;
1061 	uint16_t idx;
1062 	void *buf;
1063 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1064 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1065 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1066 	uint16_t work_done = 0;
1067 	uint16_t status_buf_count;
1068 	uint16_t end_offset = 0;
1069 
1070 	if (!mon_pdev_be->desc_count) {
1071 		dp_mon_info("no of status buffer count is zero: %pK", pdev);
1072 		return;
1073 	}
1074 
1075 	status_buf_count = mon_pdev_be->desc_count;
1076 	for (idx = 0; idx < status_buf_count; idx++) {
1077 		mon_desc = mon_pdev_be->status[idx];
1078 		if (!mon_desc) {
1079 			qdf_assert_always(0);
1080 			return;
1081 		}
1082 
1083 		buf = mon_desc->buf_addr;
1084 		end_offset = mon_desc->end_offset;
1085 
1086 		dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
1087 		work_done++;
1088 
1089 		work_done += dp_rx_mon_flush_packet_tlv(pdev, buf, end_offset,
1090 							&desc_list, &tail);
1091 
1092 		/* set status buffer pointer to NULL */
1093 		mon_pdev_be->status[idx] = NULL;
1094 		mon_pdev_be->desc_count--;
1095 
1096 		qdf_frag_free(buf);
1097 		DP_STATS_INC(mon_soc, frag_free, 1);
1098 	}
1099 
1100 	if (work_done) {
1101 		mon_pdev->rx_mon_stats.mon_rx_bufs_replenished_dest +=
1102 			work_done;
1103 		if (desc_list)
1104 			dp_mon_add_desc_list_to_free_list(soc,
1105 							  &desc_list, &tail,
1106 							  rx_mon_desc_pool);
1107 	}
1108 }
1109 
1110 /**
1111  * dp_rx_mon_handle_flush_n_trucated_ppdu () - Handle flush and truncated ppdu
1112  *
1113  * @soc: DP soc handle
1114  * @pdev: pdev handle
1115  * @mon_desc: mon sw desc
1116  */
1117 static inline void
1118 dp_rx_mon_handle_flush_n_trucated_ppdu(struct dp_soc *soc,
1119 				       struct dp_pdev *pdev,
1120 				       struct dp_mon_desc *mon_desc)
1121 {
1122 	union dp_mon_desc_list_elem_t *desc_list = NULL;
1123 	union dp_mon_desc_list_elem_t *tail = NULL;
1124 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1125 	struct dp_mon_soc_be *mon_soc_be =
1126 			dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1127 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1128 	uint16_t work_done;
1129 	void *buf;
1130 	uint16_t end_offset = 0;
1131 
1132 	/* Flush status buffers in queue */
1133 	dp_rx_mon_flush_status_buf_queue(pdev);
1134 	buf = mon_desc->buf_addr;
1135 	end_offset = mon_desc->end_offset;
1136 	qdf_frag_free(mon_desc->buf_addr);
1137 	DP_STATS_INC(mon_soc, frag_free, 1);
1138 	dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
1139 	work_done = 1;
1140 	work_done += dp_rx_mon_flush_packet_tlv(pdev, buf, end_offset,
1141 						&desc_list, &tail);
1142 	if (desc_list)
1143 		dp_mon_add_desc_list_to_free_list(soc, &desc_list, &tail,
1144 						  rx_mon_desc_pool);
1145 }
1146 
1147 uint8_t dp_rx_mon_process_tlv_status(struct dp_pdev *pdev,
1148 				     struct hal_rx_ppdu_info *ppdu_info,
1149 				     void *status_frag,
1150 				     uint16_t tlv_status,
1151 				     union dp_mon_desc_list_elem_t **desc_list,
1152 				     union dp_mon_desc_list_elem_t **tail)
1153 {
1154 	struct dp_soc *soc  = pdev->soc;
1155 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1156 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1157 	qdf_nbuf_t nbuf, tmp_nbuf;
1158 	qdf_frag_t addr;
1159 	uint8_t user_id = ppdu_info->user_id;
1160 	uint8_t mpdu_idx = ppdu_info->mpdu_count[user_id];
1161 	uint16_t num_frags;
1162 	uint8_t num_buf_reaped = 0;
1163 	QDF_STATUS status;
1164 
1165 	if (!mon_pdev->monitor_configured &&
1166 	    !dp_lite_mon_is_rx_enabled(mon_pdev)) {
1167 		return num_buf_reaped;
1168 	}
1169 
1170 	switch (tlv_status) {
1171 	case HAL_TLV_STATUS_HEADER: {
1172 		/* If this is first RX_HEADER for MPDU, allocate skb
1173 		 * else add frag to already allocated skb
1174 		 */
1175 
1176 		if (!ppdu_info->mpdu_info[user_id].mpdu_start_received) {
1177 
1178 			nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
1179 					      DP_RX_MON_TLV_ROOM +
1180 					      DP_RX_MON_MAX_RADIO_TAP_HDR,
1181 					      DP_RX_MON_TLV_ROOM +
1182 					      DP_RX_MON_MAX_RADIO_TAP_HDR,
1183 					      4, FALSE);
1184 
1185 			/* Set *head_msdu->next as NULL as all msdus are
1186 			 *                          * mapped via nr frags
1187 			 *                                                   */
1188 			if (qdf_unlikely(!nbuf)) {
1189 				dp_mon_err("malloc failed pdev: %pK ", pdev);
1190 				return num_buf_reaped;
1191 			}
1192 
1193 			mon_pdev->rx_mon_stats.parent_buf_alloc++;
1194 
1195 			dp_rx_mon_set_zero(nbuf);
1196 
1197 			qdf_nbuf_set_next(nbuf, NULL);
1198 
1199 			qdf_nbuf_queue_add(&ppdu_info->mpdu_q[user_id], nbuf);
1200 
1201 			status = dp_rx_mon_nbuf_add_rx_frag(nbuf, status_frag,
1202 							    ppdu_info->hdr_len - DP_RX_MON_RX_HDR_OFFSET,
1203 							    ppdu_info->data - (unsigned char *)status_frag + 4,
1204 							    DP_MON_DATA_BUFFER_SIZE, true);
1205 			if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
1206 				dp_mon_err("num_frags exceeding MAX frags");
1207 				qdf_assert_always(0);
1208 			}
1209 			ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = true;
1210 			ppdu_info->mpdu_info[user_id].first_rx_hdr_rcvd = true;
1211 			/* initialize decap type to invalid, this will be set to appropriate
1212 			 * value once the mpdu start tlv is received
1213 			 */
1214 			ppdu_info->mpdu_info[user_id].decap_type = DP_MON_DECAP_FORMAT_INVALID;
1215 		} else {
1216 			if (ppdu_info->mpdu_info[user_id].decap_type ==
1217 					HAL_HW_RX_DECAP_FORMAT_RAW) {
1218 				return num_buf_reaped;
1219 			}
1220 
1221 			if (dp_lite_mon_is_rx_enabled(mon_pdev) &&
1222 			    !dp_lite_mon_is_level_msdu(mon_pdev))
1223 				break;
1224 
1225 			nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1226 			if (qdf_unlikely(!nbuf)) {
1227 				dp_mon_debug("nbuf is NULL");
1228 				return num_buf_reaped;
1229 			}
1230 
1231 			tmp_nbuf = qdf_get_nbuf_valid_frag(nbuf);
1232 
1233 			if (!tmp_nbuf) {
1234 				tmp_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
1235 							  DP_RX_MON_MAX_MONITOR_HEADER,
1236 							  DP_RX_MON_MAX_MONITOR_HEADER,
1237 							  4, FALSE);
1238 				if (qdf_unlikely(!tmp_nbuf)) {
1239 					dp_mon_err("nbuf is NULL");
1240 					qdf_assert_always(0);
1241 				}
1242 				mon_pdev->rx_mon_stats.parent_buf_alloc++;
1243 				/* add new skb to frag list */
1244 				qdf_nbuf_append_ext_list(nbuf, tmp_nbuf,
1245 							 qdf_nbuf_len(tmp_nbuf));
1246 			}
1247 			dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, status_frag,
1248 						   ppdu_info->hdr_len - DP_RX_MON_RX_HDR_OFFSET,
1249 						   ppdu_info->data - (unsigned char *)status_frag + 4,
1250 						   DP_MON_DATA_BUFFER_SIZE,
1251 						   true);
1252 		}
1253 		ppdu_info->rx_hdr_rcvd[user_id] = true;
1254 	}
1255 	break;
1256 	case HAL_TLV_STATUS_MON_BUF_ADDR:
1257 	{
1258 		struct hal_rx_mon_msdu_info *buf_info;
1259 		struct hal_mon_packet_info *packet_info = &ppdu_info->packet_info;
1260 		struct dp_mon_desc *mon_desc = (struct dp_mon_desc *)(uintptr_t)ppdu_info->packet_info.sw_cookie;
1261 		struct hal_rx_mon_mpdu_info *mpdu_info;
1262 		uint16_t frag_idx = 0;
1263 
1264 		qdf_assert_always(mon_desc);
1265 
1266 		if (mon_desc->magic != DP_MON_DESC_MAGIC)
1267 			qdf_assert_always(0);
1268 
1269 		addr = mon_desc->buf_addr;
1270 		qdf_assert_always(addr);
1271 
1272 		mpdu_info = &ppdu_info->mpdu_info[user_id];
1273 		if (!mon_desc->unmapped) {
1274 			qdf_mem_unmap_page(soc->osdev,
1275 					   (qdf_dma_addr_t)mon_desc->paddr,
1276 				   DP_MON_DATA_BUFFER_SIZE,
1277 					   QDF_DMA_FROM_DEVICE);
1278 			mon_desc->unmapped = 1;
1279 		}
1280 		dp_mon_add_to_free_desc_list(desc_list, tail, mon_desc);
1281 		num_buf_reaped++;
1282 
1283 		mon_pdev->rx_mon_stats.pkt_buf_count++;
1284 
1285 		if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
1286 
1287 			/* WAR: RX_HDR is not received for this MPDU, drop this frame */
1288 			mon_pdev->rx_mon_stats.rx_hdr_not_received++;
1289 			DP_STATS_INC(mon_soc, frag_free, 1);
1290 			qdf_frag_free(addr);
1291 			return num_buf_reaped;
1292 		}
1293 
1294 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1295 		if (qdf_unlikely(!nbuf)) {
1296 			dp_mon_debug("nbuf is NULL");
1297 			return num_buf_reaped;
1298 		}
1299 
1300 		if (mpdu_info->decap_type == DP_MON_DECAP_FORMAT_INVALID) {
1301 			/* decap type is invalid, drop the frame */
1302 			mon_pdev->rx_mon_stats.mpdu_decap_type_invalid++;
1303 			DP_STATS_INC(mon_soc, frag_free, 1);
1304 			mon_pdev->rx_mon_stats.parent_buf_free++;
1305 			qdf_frag_free(addr);
1306 			qdf_nbuf_queue_remove_last(&ppdu_info->mpdu_q[user_id]);
1307 			qdf_nbuf_free(nbuf);
1308 			/* if invalid decap type handling is disabled, assert */
1309 			if (soc->wlan_cfg_ctx->is_handle_invalid_decap_type_disabled) {
1310 				dp_mon_err("Decap type invalid");
1311 				qdf_assert_always(0);
1312 			}
1313 			return num_buf_reaped;
1314 		}
1315 
1316 		tmp_nbuf = qdf_get_nbuf_valid_frag(nbuf);
1317 
1318 		if (!tmp_nbuf) {
1319 			tmp_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
1320 						  DP_RX_MON_MAX_MONITOR_HEADER,
1321 						  DP_RX_MON_MAX_MONITOR_HEADER,
1322 						  4, FALSE);
1323 			if (qdf_unlikely(!tmp_nbuf)) {
1324 				dp_mon_err("nbuf is NULL");
1325 				DP_STATS_INC(mon_soc, frag_free, 1);
1326 				mon_pdev->rx_mon_stats.parent_buf_free++;
1327 				qdf_frag_free(addr);
1328 				/* remove this nbuf from queue */
1329 				qdf_nbuf_queue_remove_last(&ppdu_info->mpdu_q[user_id]);
1330 				qdf_nbuf_free(nbuf);
1331 				return num_buf_reaped;
1332 			}
1333 			mon_pdev->rx_mon_stats.parent_buf_alloc++;
1334 			/* add new skb to frag list */
1335 			qdf_nbuf_append_ext_list(nbuf, tmp_nbuf,
1336 						 qdf_nbuf_len(tmp_nbuf));
1337 		}
1338 		mpdu_info->full_pkt = true;
1339 
1340 		if (mpdu_info->decap_type == HAL_HW_RX_DECAP_FORMAT_RAW) {
1341 			if (mpdu_info->first_rx_hdr_rcvd) {
1342 				qdf_nbuf_remove_frag(nbuf, frag_idx, DP_MON_DATA_BUFFER_SIZE);
1343 				dp_rx_mon_nbuf_add_rx_frag(nbuf, addr,
1344 							   packet_info->dma_length,
1345 							   DP_RX_MON_PACKET_OFFSET,
1346 							   DP_MON_DATA_BUFFER_SIZE,
1347 							   false);
1348 				DP_STATS_INC(mon_soc, frag_free, 1);
1349 				mpdu_info->first_rx_hdr_rcvd = false;
1350 			} else {
1351 				dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, addr,
1352 							   packet_info->dma_length,
1353 							   DP_RX_MON_PACKET_OFFSET,
1354 							   DP_MON_DATA_BUFFER_SIZE,
1355 							   false);
1356 				DP_STATS_INC(mon_soc, frag_free, 1);
1357 			}
1358 		} else {
1359 			dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, addr,
1360 						   packet_info->dma_length,
1361 						   DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE +
1362 						   DP_RX_MON_PACKET_OFFSET,
1363 						   DP_MON_DATA_BUFFER_SIZE,
1364 						   false);
1365 				DP_STATS_INC(mon_soc, frag_free, 1);
1366 			buf_info = addr;
1367 
1368 			if (!ppdu_info->msdu[user_id].first_buffer) {
1369 				buf_info->first_buffer = true;
1370 				ppdu_info->msdu[user_id].first_buffer = true;
1371 			} else {
1372 				buf_info->first_buffer = false;
1373 			}
1374 
1375 			if (packet_info->msdu_continuation)
1376 				buf_info->last_buffer = false;
1377 			else
1378 				buf_info->last_buffer = true;
1379 
1380 			buf_info->frag_len = packet_info->dma_length;
1381 		}
1382 		if (qdf_unlikely(packet_info->truncated))
1383 			mpdu_info->truncated = true;
1384 	}
1385 	break;
1386 	case HAL_TLV_STATUS_MSDU_END:
1387 	{
1388 		struct hal_rx_mon_msdu_info *msdu_info = &ppdu_info->msdu[user_id];
1389 		struct hal_rx_mon_msdu_info *last_buf_info;
1390 		/* update msdu metadata at last buffer of msdu in MPDU */
1391 		if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
1392 			/* reset msdu info for next msdu for same user */
1393 			qdf_mem_zero(msdu_info, sizeof(*msdu_info));
1394 			dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d",
1395 				     __LINE__, user_id, mpdu_idx);
1396 			break;
1397 		}
1398 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1399 		if (qdf_unlikely(!nbuf)) {
1400 			dp_mon_debug("nbuf is NULL");
1401 			break;
1402 		}
1403 		num_frags = qdf_nbuf_get_nr_frags(nbuf);
1404 		if (ppdu_info->mpdu_info[user_id].decap_type ==
1405 				HAL_HW_RX_DECAP_FORMAT_RAW) {
1406 			break;
1407 		}
1408 		/* This points to last buffer of MSDU . update metadata here */
1409 		addr = qdf_nbuf_get_frag_addr(nbuf, num_frags - 1) -
1410 					      (DP_RX_MON_PACKET_OFFSET +
1411 					       DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE);
1412 		last_buf_info = addr;
1413 
1414 		last_buf_info->first_msdu = msdu_info->first_msdu;
1415 		last_buf_info->last_msdu = msdu_info->last_msdu;
1416 		last_buf_info->decap_type = msdu_info->decap_type;
1417 		last_buf_info->msdu_index = msdu_info->msdu_index;
1418 		last_buf_info->user_rssi = msdu_info->user_rssi;
1419 		last_buf_info->reception_type = msdu_info->reception_type;
1420 		last_buf_info->msdu_len = msdu_info->msdu_len;
1421 
1422 		/* If flow classification is enabled,
1423 		 * update protocol and flow tag to buf headroom
1424 		 */
1425 		dp_rx_mon_pf_tag_to_buf_headroom_2_0(nbuf, ppdu_info, pdev,
1426 						     soc);
1427 
1428 		/* reset msdu info for next msdu for same user */
1429 		qdf_mem_zero(msdu_info, sizeof(*msdu_info));
1430 	}
1431 	break;
1432 	case HAL_TLV_STATUS_MPDU_START:
1433 	{
1434 		struct hal_rx_mon_mpdu_info *mpdu_info, *mpdu_meta;
1435 
1436 		if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
1437 			dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d", __LINE__, user_id, mpdu_idx);
1438 			break;
1439 		}
1440 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1441 		if (qdf_unlikely(!nbuf)) {
1442 			dp_mon_debug("nbuf is NULL");
1443 			break;
1444 		}
1445 		mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(nbuf);
1446 		mpdu_info = &ppdu_info->mpdu_info[user_id];
1447 		mpdu_meta->decap_type = mpdu_info->decap_type;
1448 		ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = true;
1449 	break;
1450 	}
1451 	case HAL_TLV_STATUS_MPDU_END:
1452 	{
1453 		struct hal_rx_mon_mpdu_info *mpdu_info, *mpdu_meta;
1454 		mpdu_info = &ppdu_info->mpdu_info[user_id];
1455 		if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
1456 			/* reset mpdu info for next mpdu for same user */
1457 			qdf_mem_zero(mpdu_info, sizeof(*mpdu_info));
1458 			dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d",
1459 				     __LINE__, user_id, mpdu_idx);
1460 			break;
1461 		}
1462 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1463 		if (qdf_unlikely(!nbuf)) {
1464 			dp_mon_debug("nbuf is NULL");
1465 			break;
1466 		}
1467 		mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(nbuf);
1468 		mpdu_meta->mpdu_length_err = mpdu_info->mpdu_length_err;
1469 		mpdu_meta->fcs_err = mpdu_info->fcs_err;
1470 		ppdu_info->rx_status.rs_fcs_err = mpdu_info->fcs_err;
1471 		mpdu_meta->overflow_err = mpdu_info->overflow_err;
1472 		mpdu_meta->decrypt_err = mpdu_info->decrypt_err;
1473 		mpdu_meta->full_pkt = mpdu_info->full_pkt;
1474 		mpdu_meta->truncated = mpdu_info->truncated;
1475 
1476 		/* reset mpdu info for next mpdu for same user */
1477 		qdf_mem_zero(mpdu_info, sizeof(*mpdu_info));
1478 		ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = false;
1479 		ppdu_info->mpdu_count[user_id]++;
1480 		ppdu_info->rx_hdr_rcvd[user_id] = false;
1481 	}
1482 	break;
1483 	case HAL_TLV_STATUS_MON_DROP:
1484 	{
1485 		mon_pdev->rx_mon_stats.ppdu_drop_cnt +=
1486 			ppdu_info->drop_cnt.ppdu_drop_cnt;
1487 		mon_pdev->rx_mon_stats.mpdu_drop_cnt +=
1488 			ppdu_info->drop_cnt.mpdu_drop_cnt;
1489 		mon_pdev->rx_mon_stats.end_of_ppdu_drop_cnt +=
1490 			ppdu_info->drop_cnt.end_of_ppdu_drop_cnt;
1491 		mon_pdev->rx_mon_stats.tlv_drop_cnt +=
1492 			ppdu_info->drop_cnt.tlv_drop_cnt;
1493 	}
1494 	break;
1495 	}
1496 	return num_buf_reaped;
1497 }
1498 
1499 /**
1500  * dp_rx_mon_process_status_tlv () - Handle mon status process TLV
1501  *
1502  * @pdev: DP pdev handle
1503  *
1504  * Return
1505  */
1506 static inline struct hal_rx_ppdu_info *
1507 dp_rx_mon_process_status_tlv(struct dp_pdev *pdev)
1508 {
1509 	struct dp_soc *soc = pdev->soc;
1510 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1511 	struct dp_mon_pdev_be *mon_pdev_be =
1512 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1513 	union dp_mon_desc_list_elem_t *desc_list = NULL;
1514 	union dp_mon_desc_list_elem_t *tail = NULL;
1515 	struct dp_mon_desc *mon_desc;
1516 	uint8_t user;
1517 	uint16_t idx;
1518 	void *buf;
1519 	struct hal_rx_ppdu_info *ppdu_info;
1520 	uint8_t *rx_tlv;
1521 	uint8_t *rx_tlv_start;
1522 	uint16_t end_offset = 0;
1523 	uint16_t tlv_status = HAL_TLV_STATUS_BUF_DONE;
1524 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1525 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1526 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1527 	uint16_t work_done = 0;
1528 	uint16_t status_buf_count;
1529 
1530 	if (!mon_pdev_be->desc_count) {
1531 		dp_mon_err("no of status buffer count is zero: %pK", pdev);
1532 		return NULL;
1533 	}
1534 
1535 	ppdu_info = dp_rx_mon_get_ppdu_info(mon_pdev);
1536 
1537 	if (!ppdu_info) {
1538 		dp_mon_err("ppdu_info malloc failed pdev: %pK", pdev);
1539 		dp_rx_mon_flush_status_buf_queue(pdev);
1540 		return NULL;
1541 	}
1542 
1543 	qdf_mem_zero(ppdu_info, sizeof(struct hal_rx_ppdu_info));
1544 	mon_pdev->rx_mon_stats.total_ppdu_info_alloc++;
1545 
1546 	for (user = 0; user < HAL_MAX_UL_MU_USERS; user++)
1547 		qdf_nbuf_queue_init(&ppdu_info->mpdu_q[user]);
1548 
1549 	status_buf_count = mon_pdev_be->desc_count;
1550 	for (idx = 0; idx < status_buf_count; idx++) {
1551 		mon_desc = mon_pdev_be->status[idx];
1552 		if (!mon_desc) {
1553 			qdf_assert_always(0);
1554 			return NULL;
1555 		}
1556 
1557 		buf = mon_desc->buf_addr;
1558 		end_offset = mon_desc->end_offset;
1559 
1560 		dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
1561 		work_done++;
1562 
1563 		rx_tlv = buf;
1564 		rx_tlv_start = buf;
1565 
1566 		do {
1567 			tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
1568 								ppdu_info,
1569 								pdev->soc->hal_soc,
1570 								buf);
1571 
1572 			work_done += dp_rx_mon_process_tlv_status(pdev,
1573 								  ppdu_info,
1574 								  buf,
1575 								  tlv_status,
1576 								  &desc_list,
1577 								  &tail);
1578 			rx_tlv = hal_rx_status_get_next_tlv(rx_tlv, 1);
1579 
1580 			/* HW provides end_offset (how many bytes HW DMA'ed)
1581 			 * as part of descriptor, use this as delimiter for
1582 			 * status buffer
1583 			 */
1584 			if ((rx_tlv - rx_tlv_start) >= (end_offset + 1))
1585 				break;
1586 
1587 	} while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) ||
1588 			(tlv_status == HAL_TLV_STATUS_HEADER) ||
1589 			(tlv_status == HAL_TLV_STATUS_MPDU_END) ||
1590 			(tlv_status == HAL_TLV_STATUS_MSDU_END) ||
1591 			(tlv_status == HAL_TLV_STATUS_MON_BUF_ADDR) ||
1592 			(tlv_status == HAL_TLV_STATUS_MPDU_START));
1593 
1594 		/* set status buffer pointer to NULL */
1595 		mon_pdev_be->status[idx] = NULL;
1596 		mon_pdev_be->desc_count--;
1597 
1598 		qdf_frag_free(buf);
1599 		DP_STATS_INC(mon_soc, frag_free, 1);
1600 		mon_pdev->rx_mon_stats.status_buf_count++;
1601 	}
1602 
1603 	dp_mon_rx_stats_update_rssi_dbm_params(mon_pdev, ppdu_info);
1604 	if (work_done) {
1605 		mon_pdev->rx_mon_stats.mon_rx_bufs_replenished_dest +=
1606 			work_done;
1607 		if (desc_list)
1608 			dp_mon_add_desc_list_to_free_list(soc,
1609 							  &desc_list, &tail,
1610 							  rx_mon_desc_pool);
1611 	}
1612 
1613 	ppdu_info->rx_status.tsft = ppdu_info->rx_status.tsft +
1614 				    pdev->timestamp.mlo_offset_lo_us +
1615 				    ((uint64_t)pdev->timestamp.mlo_offset_hi_us
1616 				    << 32);
1617 
1618 	return ppdu_info;
1619 }
1620 
1621 /**
1622  * dp_rx_mon_update_peer_id() - Update sw_peer_id with link peer_id
1623  *
1624  * @pdev: DP pdev handle
1625  * @ppdu_info: HAL PPDU Info buffer
1626  *
1627  * Return: none
1628  */
1629 #ifdef WLAN_FEATURE_11BE_MLO
1630 #define DP_PEER_ID_MASK 0x3FFF
1631 static inline
1632 void dp_rx_mon_update_peer_id(struct dp_pdev *pdev,
1633 			      struct hal_rx_ppdu_info *ppdu_info)
1634 {
1635 	uint32_t i;
1636 	uint16_t peer_id;
1637 	struct dp_soc *soc = pdev->soc;
1638 	uint32_t num_users = ppdu_info->com_info.num_users;
1639 
1640 	for (i = 0; i < num_users; i++) {
1641 		peer_id = ppdu_info->rx_user_status[i].sw_peer_id;
1642 		if (peer_id == HTT_INVALID_PEER)
1643 			continue;
1644 		/*
1645 		+---------------------------------------------------------------------+
1646 		| 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
1647 		+---------------------------------------------------------------------+
1648 		| CHIP ID | ML |                     PEER ID                          |
1649 		+---------------------------------------------------------------------+
1650 		*/
1651 		peer_id &= DP_PEER_ID_MASK;
1652 		peer_id = dp_get_link_peer_id_by_lmac_id(soc, peer_id,
1653 							 pdev->lmac_id);
1654 		ppdu_info->rx_user_status[i].sw_peer_id = peer_id;
1655 	}
1656 }
1657 #else
1658 static inline
1659 void dp_rx_mon_update_peer_id(struct dp_pdev *pdev,
1660 			      struct hal_rx_ppdu_info *ppdu_info)
1661 {
1662 }
1663 #endif
1664 
1665 /*
1666  * HAL_RX_PKT_TYPE_11A     0 -> CDP_PKT_TYPE_OFDM
1667  * HAL_RX_PKT_TYPE_11B     1 -> CDP_PKT_TYPE_CCK
1668  * HAL_RX_PKT_TYPE_11N     2 -> CDP_PKT_TYPE_HT
1669  * HAL_RX_PKT_TYPE_11AC    3 -> CDP_PKT_TYPE_VHT
1670  * HAL_RX_PKT_TYPE_11AX    4 -> CDP_PKT_TYPE_HE
1671  * HAL_RX_PKT_TYPE_11BE    6 -> CDP_PKT_TYPE_EHT
1672  */
1673 
1674 static uint32_t const cdp_preamble_type_map[] = {
1675 	CDP_PKT_TYPE_OFDM,
1676 	CDP_PKT_TYPE_CCK,
1677 	CDP_PKT_TYPE_HT,
1678 	CDP_PKT_TYPE_VHT,
1679 	CDP_PKT_TYPE_HE,
1680 	CDP_PKT_TYPE_NO_SUP,
1681 #ifdef WLAN_FEATURE_11BE
1682 	CDP_PKT_TYPE_EHT,
1683 #endif
1684 	CDP_PKT_TYPE_MAX,
1685 };
1686 
1687 /*
1688  * HAL_RX_RECEPTION_TYPE_SU       -> CDP_RX_TYPE_SU
1689  * HAL_RX_RECEPTION_TYPE_MU_MIMO  -> CDP_RX_TYPE_MU_MIMO
1690  * HAL_RX_RECEPTION_TYPE_OFDMA    -> CDP_RX_TYPE_MU_OFDMA
1691  * HAL_RX_RECEPTION_TYPE_MU_OFDMA -> CDP_RX_TYPE_MU_OFDMA_MIMO
1692  */
1693 static uint32_t const cdp_reception_type_map[] = {
1694 	CDP_RX_TYPE_SU,
1695 	CDP_RX_TYPE_MU_MIMO,
1696 	CDP_RX_TYPE_MU_OFDMA,
1697 	CDP_RX_TYPE_MU_OFDMA_MIMO,
1698 };
1699 
1700 static uint32_t const cdp_mu_dl_up_map[] = {
1701 	CDP_MU_TYPE_DL,
1702 	CDP_MU_TYPE_UL,
1703 };
1704 
1705 static inline void
1706 dp_rx_mu_stats_update(
1707 	struct hal_rx_ppdu_info *ppdu_info,
1708 	struct cdp_pdev_mon_stats *rx_mon_sts,
1709 	uint32_t preamble_type,
1710 	uint32_t  recept_type,
1711 	uint32_t  mu_dl_ul,
1712 	uint32_t i
1713 )
1714 {
1715 	struct mon_rx_user_status *rx_user_status;
1716 
1717 	rx_user_status =  &ppdu_info->rx_user_status[i];
1718 	rx_mon_sts->mpdu_cnt_fcs_ok[preamble_type][recept_type][mu_dl_ul][i]
1719 			+= rx_user_status->mpdu_cnt_fcs_ok;
1720 	rx_mon_sts->mpdu_cnt_fcs_err[preamble_type][recept_type][mu_dl_ul][i]
1721 			+= rx_user_status->mpdu_cnt_fcs_err;
1722 }
1723 
1724 static inline void
1725 dp_rx_he_ppdu_stats_update(
1726 	struct cdp_pdev_mon_stats *stats,
1727 	struct hal_rx_u_sig_info *u_sig
1728 )
1729 {
1730 	stats->ppdu_eht_type_mode[u_sig->ppdu_type_comp_mode][u_sig->ul_dl]++;
1731 }
1732 
1733 static inline void
1734 dp_rx_he_ppdu_stats(struct dp_pdev *pdev, struct hal_rx_ppdu_info *ppdu_info)
1735 {
1736 	struct dp_mon_pdev *mon_pdev;
1737 	struct cdp_pdev_mon_stats *rx_mon_stats;
1738 
1739 	mon_pdev = pdev->monitor_pdev;
1740 	rx_mon_stats = &mon_pdev->rx_mon_stats;
1741 
1742 	if (ppdu_info->u_sig_info.ppdu_type_comp_mode < CDP_EHT_TYPE_MODE_MAX &&
1743 	    ppdu_info->u_sig_info.ul_dl < CDP_MU_TYPE_MAX)
1744 		dp_rx_he_ppdu_stats_update(
1745 			rx_mon_stats,
1746 			&ppdu_info->u_sig_info);
1747 		else
1748 			qdf_assert(0);
1749 }
1750 
1751 static inline void
1752 dp_rx_mu_stats(struct dp_pdev *pdev, struct hal_rx_ppdu_info *ppdu_info)
1753 {
1754 	struct dp_mon_pdev *mon_pdev;
1755 	struct cdp_pdev_mon_stats *rx_mon_stats;
1756 	struct mon_rx_status *rx_status;
1757 	uint32_t preamble_type, reception_type, mu_dl_ul, num_users, i;
1758 
1759 	mon_pdev = pdev->monitor_pdev;
1760 	rx_mon_stats = &mon_pdev->rx_mon_stats;
1761 	rx_status = &ppdu_info->rx_status;
1762 
1763 	num_users = ppdu_info->com_info.num_users;
1764 
1765 	if (rx_status->preamble_type < CDP_PKT_TYPE_MAX)
1766 		preamble_type = cdp_preamble_type_map[rx_status->preamble_type];
1767 	else
1768 		preamble_type = CDP_PKT_TYPE_NO_SUP;
1769 
1770 	reception_type = cdp_reception_type_map[rx_status->reception_type];
1771 	mu_dl_ul = cdp_mu_dl_up_map[rx_status->mu_dl_ul];
1772 
1773 	for (i = 0; i < num_users; i++) {
1774 		if (i >= CDP_MU_SNIF_USER_MAX)
1775 			return;
1776 
1777 		dp_rx_mu_stats_update(ppdu_info, rx_mon_stats, preamble_type,
1778 				      reception_type, mu_dl_ul, i);
1779 	}
1780 
1781 	if (rx_status->eht_flags)
1782 		dp_rx_he_ppdu_stats(pdev, ppdu_info);
1783 }
1784 
1785 static inline uint32_t
1786 dp_rx_mon_srng_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
1787 			   uint32_t mac_id, uint32_t quota)
1788 {
1789 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1790 	struct dp_mon_pdev *mon_pdev;
1791 	struct dp_mon_pdev_be *mon_pdev_be;
1792 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1793 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1794 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1795 	hal_soc_handle_t hal_soc = soc->hal_soc;
1796 	void *rx_mon_dst_ring_desc;
1797 	void *mon_dst_srng;
1798 	uint32_t work_done = 0;
1799 	struct hal_rx_ppdu_info *ppdu_info = NULL;
1800 	QDF_STATUS status;
1801 	if (!pdev) {
1802 		dp_mon_err("%pK: pdev is null for mac_id = %d", soc, mac_id);
1803 		return work_done;
1804 	}
1805 
1806 	mon_pdev = pdev->monitor_pdev;
1807 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1808 	mon_dst_srng = soc->rxdma_mon_dst_ring[mac_id].hal_srng;
1809 
1810 	if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) {
1811 		dp_mon_err("%pK: : HAL Monitor Destination Ring Init Failed -- %pK",
1812 			   soc, mon_dst_srng);
1813 		return work_done;
1814 	}
1815 
1816 	hal_soc = soc->hal_soc;
1817 
1818 	qdf_assert((hal_soc && pdev));
1819 
1820 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
1821 
1822 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_dst_srng))) {
1823 		dp_mon_err("%s %d : HAL Mon Dest Ring access Failed -- %pK",
1824 			   __func__, __LINE__, mon_dst_srng);
1825 		qdf_spin_unlock_bh(&mon_pdev->mon_lock);
1826 		return work_done;
1827 	}
1828 
1829 	while (qdf_likely((rx_mon_dst_ring_desc =
1830 			  (void *)hal_srng_dst_peek(hal_soc, mon_dst_srng))
1831 				&& quota--)) {
1832 		struct hal_mon_desc hal_mon_rx_desc = {0};
1833 		struct dp_mon_desc *mon_desc;
1834 		hal_be_get_mon_dest_status(soc->hal_soc,
1835 					   rx_mon_dst_ring_desc,
1836 					   &hal_mon_rx_desc);
1837 		/* If it's empty descriptor, skip processing
1838 		 * and process next hW desc
1839 		 */
1840 		if (hal_mon_rx_desc.empty_descriptor == 1) {
1841 			dp_mon_debug("empty descriptor found mon_pdev: %pK",
1842 				     mon_pdev);
1843 			rx_mon_dst_ring_desc =
1844 				hal_srng_dst_get_next(hal_soc, mon_dst_srng);
1845 			dp_rx_mon_update_drop_cnt(mon_pdev, &hal_mon_rx_desc);
1846 			continue;
1847 		}
1848 		mon_desc = (struct dp_mon_desc *)(uintptr_t)(hal_mon_rx_desc.buf_addr);
1849 		qdf_assert_always(mon_desc);
1850 
1851 		if ((mon_desc == mon_pdev_be->prev_rxmon_desc) &&
1852 		    (mon_desc->cookie == mon_pdev_be->prev_rxmon_cookie)) {
1853 			dp_mon_err("duplicate descritout found mon_pdev: %pK mon_desc: %pK cookie: %d",
1854 				   mon_pdev, mon_desc, mon_desc->cookie);
1855 			mon_pdev->rx_mon_stats.dup_mon_buf_cnt++;
1856 			hal_srng_dst_get_next(hal_soc, mon_dst_srng);
1857 			continue;
1858 		}
1859 		mon_pdev_be->prev_rxmon_desc = mon_desc;
1860 		mon_pdev_be->prev_rxmon_cookie = mon_desc->cookie;
1861 
1862 		if (!mon_desc->unmapped) {
1863 			qdf_mem_unmap_page(soc->osdev, mon_desc->paddr,
1864 					   rx_mon_desc_pool->buf_size,
1865 					   QDF_DMA_FROM_DEVICE);
1866 			mon_desc->unmapped = 1;
1867 		}
1868 		mon_desc->end_offset = hal_mon_rx_desc.end_offset;
1869 
1870 		/* Flush and truncated status buffers content
1871 		 * need to discarded
1872 		 */
1873 		if (hal_mon_rx_desc.end_reason == HAL_MON_FLUSH_DETECTED ||
1874 		    hal_mon_rx_desc.end_reason == HAL_MON_PPDU_TRUNCATED) {
1875 			dp_mon_debug("end_resaon: %d mon_pdev: %pK",
1876 				     hal_mon_rx_desc.end_reason, mon_pdev);
1877 			mon_pdev->rx_mon_stats.status_ppdu_drop++;
1878 			dp_rx_mon_handle_flush_n_trucated_ppdu(soc,
1879 							       pdev,
1880 							       mon_desc);
1881 			rx_mon_dst_ring_desc = hal_srng_dst_get_next(hal_soc,
1882 							mon_dst_srng);
1883 			continue;
1884 		}
1885 		if (mon_pdev_be->desc_count >= DP_MON_MAX_STATUS_BUF)
1886 			qdf_assert_always(0);
1887 
1888 		mon_pdev_be->status[mon_pdev_be->desc_count++] = mon_desc;
1889 
1890 		rx_mon_dst_ring_desc = hal_srng_dst_get_next(hal_soc, mon_dst_srng);
1891 
1892 		dp_rx_process_pktlog_be(soc, pdev, ppdu_info,
1893 					mon_desc->buf_addr,
1894 					hal_mon_rx_desc.end_offset);
1895 
1896 		if (hal_mon_rx_desc.end_reason == HAL_MON_STATUS_BUFFER_FULL)
1897 			continue;
1898 
1899 		mon_pdev->rx_mon_stats.status_ppdu_done++;
1900 
1901 		ppdu_info = dp_rx_mon_process_status_tlv(pdev);
1902 
1903 		if (ppdu_info) {
1904 			mon_pdev->rx_mon_stats.start_user_info_cnt +=
1905 				ppdu_info->start_user_info_cnt;
1906 			ppdu_info->start_user_info_cnt = 0;
1907 
1908 			mon_pdev->rx_mon_stats.end_user_stats_cnt +=
1909 				ppdu_info->end_user_stats_cnt;
1910 			ppdu_info->end_user_stats_cnt = 0;
1911 
1912 			dp_rx_mon_update_peer_id(pdev, ppdu_info);
1913 			dp_rx_mu_stats(pdev, ppdu_info);
1914 		}
1915 
1916 		/* Call enhanced stats update API */
1917 		if (mon_pdev->enhanced_stats_en && ppdu_info)
1918 			dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info);
1919 		else if (dp_cfr_rcc_mode_status(pdev) && ppdu_info)
1920 			dp_rx_handle_cfr(soc, pdev, ppdu_info);
1921 
1922 		dp_rx_mon_update_user_ctrl_frame_stats(pdev, ppdu_info);
1923 
1924 		status = dp_rx_mon_add_ppdu_info_to_wq(pdev, ppdu_info);
1925 		if (status != QDF_STATUS_SUCCESS) {
1926 			if (ppdu_info)
1927 				__dp_rx_mon_free_ppdu_info(mon_pdev, ppdu_info);
1928 		}
1929 
1930 		work_done++;
1931 
1932 		/* desc_count should be zero  after PPDU status processing */
1933 		if (mon_pdev_be->desc_count > 0)
1934 			qdf_assert_always(0);
1935 
1936 		mon_pdev_be->desc_count = 0;
1937 	}
1938 	dp_srng_access_end(int_ctx, soc, mon_dst_srng);
1939 
1940 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
1941 	dp_mon_info("mac_id: %d, work_done:%d", mac_id, work_done);
1942 	return work_done;
1943 }
1944 
1945 uint32_t
1946 dp_rx_mon_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
1947 		      uint32_t mac_id, uint32_t quota)
1948 {
1949 	uint32_t work_done;
1950 
1951 	work_done = dp_rx_mon_srng_process_2_0(soc, int_ctx, mac_id, quota);
1952 
1953 	return work_done;
1954 }
1955 
1956 void
1957 dp_rx_mon_buf_desc_pool_deinit(struct dp_soc *soc)
1958 {
1959 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1960 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1961 
1962 	/* Drain page frag cachce before pool deinit */
1963 	qdf_frag_cache_drain(&mon_soc_be->rx_desc_mon.pf_cache);
1964 	dp_mon_desc_pool_deinit(&mon_soc_be->rx_desc_mon);
1965 }
1966 
1967 QDF_STATUS
1968 dp_rx_mon_buf_desc_pool_init(struct dp_soc *soc)
1969 {
1970 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1971 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1972 	uint32_t num_entries;
1973 
1974 	num_entries =
1975 		wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc->wlan_cfg_ctx);
1976 	return dp_mon_desc_pool_init(&mon_soc_be->rx_desc_mon, num_entries);
1977 }
1978 
1979 void dp_rx_mon_buf_desc_pool_free(struct dp_soc *soc)
1980 {
1981 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1982 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1983 
1984 	if (mon_soc)
1985 		dp_mon_desc_pool_free(&mon_soc_be->rx_desc_mon);
1986 }
1987 
1988 QDF_STATUS
1989 dp_rx_mon_buf_desc_pool_alloc(struct dp_soc *soc)
1990 {
1991 	struct dp_mon_desc_pool *rx_mon_desc_pool;
1992 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1993 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1994 	int entries;
1995 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1996 
1997 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1998 
1999 	entries = wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc_cfg_ctx);
2000 
2001 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
2002 
2003 	qdf_print("%s:%d rx mon buf desc pool entries: %d", __func__, __LINE__, entries);
2004 	return dp_mon_desc_pool_alloc(entries, rx_mon_desc_pool);
2005 }
2006 
2007 void
2008 dp_rx_mon_buffers_free(struct dp_soc *soc)
2009 {
2010 	struct dp_mon_desc_pool *rx_mon_desc_pool;
2011 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
2012 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
2013 
2014 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
2015 
2016 	dp_mon_pool_frag_unmap_and_free(soc, rx_mon_desc_pool);
2017 }
2018 
2019 QDF_STATUS
2020 dp_rx_mon_buffers_alloc(struct dp_soc *soc, uint32_t size)
2021 {
2022 	struct dp_srng *mon_buf_ring;
2023 	struct dp_mon_desc_pool *rx_mon_desc_pool;
2024 	union dp_mon_desc_list_elem_t *desc_list = NULL;
2025 	union dp_mon_desc_list_elem_t *tail = NULL;
2026 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
2027 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
2028 
2029 	mon_buf_ring = &soc->rxdma_mon_buf_ring[0];
2030 
2031 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
2032 
2033 	return dp_mon_buffers_replenish(soc, mon_buf_ring,
2034 					rx_mon_desc_pool,
2035 					size,
2036 					&desc_list, &tail, NULL);
2037 }
2038 
2039 #ifdef QCA_ENHANCED_STATS_SUPPORT
2040 void
2041 dp_rx_mon_populate_ppdu_usr_info_2_0(struct mon_rx_user_status *rx_user_status,
2042 				     struct cdp_rx_stats_ppdu_user *ppdu_user)
2043 {
2044 	ppdu_user->mpdu_retries = rx_user_status->retry_mpdu;
2045 }
2046 
2047 #ifdef WLAN_FEATURE_11BE
2048 void dp_rx_mon_stats_update_2_0(struct dp_mon_peer *mon_peer,
2049 				struct cdp_rx_indication_ppdu *ppdu,
2050 				struct cdp_rx_stats_ppdu_user *ppdu_user)
2051 {
2052 	uint8_t mcs, preamble, ppdu_type, punc_mode;
2053 	uint32_t num_msdu;
2054 
2055 	preamble = ppdu->u.preamble;
2056 	ppdu_type = ppdu->u.ppdu_type;
2057 	num_msdu = ppdu_user->num_msdu;
2058 	punc_mode = ppdu->punc_bw;
2059 
2060 	if (ppdu_type == HAL_RX_TYPE_SU)
2061 		mcs = ppdu->u.mcs;
2062 	else
2063 		mcs = ppdu_user->mcs;
2064 
2065 	DP_STATS_INC(mon_peer, rx.mpdu_retry_cnt, ppdu_user->mpdu_retries);
2066 	DP_STATS_INC(mon_peer, rx.punc_bw[punc_mode], num_msdu);
2067 	DP_STATS_INCC(mon_peer,
2068 		      rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
2069 		      ((mcs >= MAX_MCS_11BE) && (preamble == DOT11_BE)));
2070 	DP_STATS_INCC(mon_peer,
2071 		      rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
2072 		      ((mcs < MAX_MCS_11BE) && (preamble == DOT11_BE)));
2073 	DP_STATS_INCC(mon_peer,
2074 		      rx.su_be_ppdu_cnt.mcs_count[MAX_MCS - 1], 1,
2075 		      ((mcs >= (MAX_MCS_11BE)) && (preamble == DOT11_BE) &&
2076 		      (ppdu_type == HAL_RX_TYPE_SU)));
2077 	DP_STATS_INCC(mon_peer,
2078 		      rx.su_be_ppdu_cnt.mcs_count[mcs], 1,
2079 		      ((mcs < (MAX_MCS_11BE)) && (preamble == DOT11_BE) &&
2080 		      (ppdu_type == HAL_RX_TYPE_SU)));
2081 	DP_STATS_INCC(mon_peer,
2082 		      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[MAX_MCS - 1],
2083 		      1, ((mcs >= (MAX_MCS_11BE)) &&
2084 		      (preamble == DOT11_BE) &&
2085 		      (ppdu_type == HAL_RX_TYPE_MU_OFDMA)));
2086 	DP_STATS_INCC(mon_peer,
2087 		      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[mcs],
2088 		      1, ((mcs < (MAX_MCS_11BE)) &&
2089 		      (preamble == DOT11_BE) &&
2090 		      (ppdu_type == HAL_RX_TYPE_MU_OFDMA)));
2091 	DP_STATS_INCC(mon_peer,
2092 		      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[MAX_MCS - 1],
2093 		      1, ((mcs >= (MAX_MCS_11BE)) &&
2094 		      (preamble == DOT11_BE) &&
2095 		      (ppdu_type == HAL_RX_TYPE_MU_MIMO)));
2096 	DP_STATS_INCC(mon_peer,
2097 		      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[mcs],
2098 		      1, ((mcs < (MAX_MCS_11BE)) &&
2099 		      (preamble == DOT11_BE) &&
2100 		      (ppdu_type == HAL_RX_TYPE_MU_MIMO)));
2101 }
2102 
2103 void
2104 dp_rx_mon_populate_ppdu_info_2_0(struct hal_rx_ppdu_info *hal_ppdu_info,
2105 				 struct cdp_rx_indication_ppdu *ppdu)
2106 {
2107 	uint16_t puncture_pattern;
2108 	enum cdp_punctured_modes punc_mode;
2109 
2110 	/* Align bw value as per host data structures */
2111 	if (hal_ppdu_info->rx_status.bw == HAL_FULL_RX_BW_320)
2112 		ppdu->u.bw = CMN_BW_320MHZ;
2113 	else
2114 		ppdu->u.bw = hal_ppdu_info->rx_status.bw;
2115 	if (hal_ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11BE) {
2116 		/* Align preamble value as per host data structures */
2117 		ppdu->u.preamble = DOT11_BE;
2118 		ppdu->u.stbc = hal_ppdu_info->rx_status.is_stbc;
2119 		ppdu->u.dcm = hal_ppdu_info->rx_status.dcm;
2120 	} else {
2121 		ppdu->u.preamble = hal_ppdu_info->rx_status.preamble_type;
2122 	}
2123 
2124 	puncture_pattern = hal_ppdu_info->rx_status.punctured_pattern;
2125 	punc_mode = dp_mon_get_puncture_type(puncture_pattern,
2126 					     ppdu->u.bw);
2127 	ppdu->punc_bw = punc_mode;
2128 }
2129 #else
2130 void dp_rx_mon_stats_update_2_0(struct dp_mon_peer *mon_peer,
2131 				struct cdp_rx_indication_ppdu *ppdu,
2132 				struct cdp_rx_stats_ppdu_user *ppdu_user)
2133 {
2134 	DP_STATS_INC(mon_peer, rx.mpdu_retry_cnt, ppdu_user->mpdu_retries);
2135 }
2136 
2137 void
2138 dp_rx_mon_populate_ppdu_info_2_0(struct hal_rx_ppdu_info *hal_ppdu_info,
2139 				 struct cdp_rx_indication_ppdu *ppdu)
2140 {
2141 	ppdu->punc_bw = NO_PUNCTURE;
2142 }
2143 #endif
2144 void dp_mon_rx_print_advanced_stats_2_0(struct dp_soc *soc,
2145 					struct dp_pdev *pdev)
2146 {
2147 	struct cdp_pdev_mon_stats *rx_mon_stats;
2148 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
2149 	struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc;
2150 	struct dp_mon_pdev_be *mon_pdev_be =
2151 				dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
2152 
2153 	rx_mon_stats = &mon_pdev->rx_mon_stats;
2154 
2155 	DP_PRINT_STATS("total_ppdu_info_alloc = %d",
2156 		       rx_mon_stats->total_ppdu_info_alloc);
2157 	DP_PRINT_STATS("total_ppdu_info_free = %d",
2158 		       rx_mon_stats->total_ppdu_info_free);
2159 	DP_PRINT_STATS("total_ppdu_info_enq = %d",
2160 		       rx_mon_stats->total_ppdu_info_enq);
2161 	DP_PRINT_STATS("total_ppdu_info_drop = %d",
2162 		       rx_mon_stats->total_ppdu_info_drop);
2163 	DP_PRINT_STATS("rx_hdr_not_received = %d",
2164 		       rx_mon_stats->rx_hdr_not_received);
2165 	DP_PRINT_STATS("parent_buf_alloc = %d",
2166 		       rx_mon_stats->parent_buf_alloc);
2167 	DP_PRINT_STATS("parent_buf_free = %d",
2168 		       rx_mon_stats->parent_buf_free);
2169 	DP_PRINT_STATS("mpdus_buf_to_stack = %d",
2170 		       rx_mon_stats->mpdus_buf_to_stack);
2171 	DP_PRINT_STATS("frag_alloc = %d",
2172 		       mon_soc->stats.frag_alloc);
2173 	DP_PRINT_STATS("frag_free = %d",
2174 		       mon_soc->stats.frag_free);
2175 	DP_PRINT_STATS("status_buf_count = %d",
2176 		       rx_mon_stats->status_buf_count);
2177 	DP_PRINT_STATS("pkt_buf_count = %d",
2178 		       rx_mon_stats->pkt_buf_count);
2179 	DP_PRINT_STATS("rx_mon_queue_depth= %d",
2180 		       mon_pdev_be->rx_mon_queue_depth);
2181 	DP_PRINT_STATS("empty_desc= %d",
2182 		       mon_pdev->rx_mon_stats.empty_desc_ppdu);
2183 	DP_PRINT_STATS("mpdu_dropped_due_invalid_decap= %d",
2184 		       mon_pdev->rx_mon_stats.mpdu_decap_type_invalid);
2185 	DP_PRINT_STATS("total_free_elem= %d",
2186 		       mon_pdev_be->total_free_elem);
2187 	DP_PRINT_STATS("ppdu_drop_cnt= %d",
2188 		       mon_pdev->rx_mon_stats.ppdu_drop_cnt);
2189 	DP_PRINT_STATS("mpdu_drop_cnt= %d",
2190 		       mon_pdev->rx_mon_stats.mpdu_drop_cnt);
2191 	DP_PRINT_STATS("end_of_ppdu_drop_cnt= %d",
2192 		       mon_pdev->rx_mon_stats.end_of_ppdu_drop_cnt);
2193 	DP_PRINT_STATS("tlv_drop_cnt= %d",
2194 		       mon_pdev->rx_mon_stats.tlv_drop_cnt);
2195 }
2196 #endif
2197