xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/dp_mon.c (revision d3be64a66deb873bac895fb0ecea12cbfca02017)
1 /*
2  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 #include <dp_types.h>
18 #include "dp_rx.h"
19 #include "dp_peer.h"
20 #include <dp_htt.h>
21 #include <dp_mon_filter.h>
22 #include <dp_htt.h>
23 #include <dp_mon.h>
24 #include <dp_rx_mon.h>
25 #include <dp_internal.h>
26 #include "htt_ppdu_stats.h"
27 #include "dp_cal_client_api.h"
28 #if defined(DP_CON_MON)
29 #ifndef REMOVE_PKT_LOG
30 #include <pktlog_ac_api.h>
31 #include <pktlog_ac.h>
32 #endif
33 #endif
34 #ifdef FEATURE_PERPKT_INFO
35 #include "dp_ratetable.h"
36 #endif
37 #ifdef QCA_SUPPORT_LITE_MONITOR
38 #include "dp_lite_mon.h"
39 #endif
40 #include "dp_mon_1.0.h"
41 #ifdef WLAN_FEATURE_LOCAL_PKT_CAPTURE
42 #include "dp_mon_2.0.h"
43 #include "dp_mon_filter_2.0.h"
44 #endif
45 
46 #define DP_INTR_POLL_TIMER_MS	5
47 #define INVALID_FREE_BUFF 0xffffffff
48 
49 #ifdef WLAN_RX_PKT_CAPTURE_ENH
50 #include "dp_rx_mon_feature.h"
51 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
52 
53 #ifdef QCA_UNDECODED_METADATA_SUPPORT
54 #define MAX_STRING_LEN_PER_FIELD 6
55 #define DP_UNDECODED_ERR_LENGTH (MAX_STRING_LEN_PER_FIELD * CDP_PHYRX_ERR_MAX)
56 #endif
57 
58 #ifdef QCA_MCOPY_SUPPORT
59 static inline void
60 dp_pdev_disable_mcopy_code(struct dp_pdev *pdev)
61 {
62 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
63 
64 	mon_pdev->mcopy_mode = M_COPY_DISABLED;
65 	mon_pdev->mvdev = NULL;
66 }
67 
68 static inline void
69 dp_reset_mcopy_mode(struct dp_pdev *pdev)
70 {
71 	QDF_STATUS status = QDF_STATUS_SUCCESS;
72 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
73 	struct cdp_mon_ops *cdp_ops;
74 
75 	if (mon_pdev->mcopy_mode) {
76 		cdp_ops = dp_mon_cdp_ops_get(pdev->soc);
77 		if (cdp_ops  && cdp_ops->config_full_mon_mode)
78 			cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
79 							  DP_FULL_MON_ENABLE);
80 		dp_pdev_disable_mcopy_code(pdev);
81 		dp_mon_filter_reset_mcopy_mode(pdev);
82 		status = dp_mon_filter_update(pdev);
83 		if (status != QDF_STATUS_SUCCESS) {
84 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
85 				  FL("Failed to reset AM copy mode filters"));
86 		}
87 		mon_pdev->monitor_configured = false;
88 	}
89 }
90 
91 static QDF_STATUS
92 dp_config_mcopy_mode(struct dp_pdev *pdev, int val)
93 {
94 	QDF_STATUS status = QDF_STATUS_SUCCESS;
95 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
96 	struct dp_mon_ops *mon_ops;
97 	struct cdp_mon_ops *cdp_ops;
98 
99 	if (mon_pdev->mvdev)
100 		return QDF_STATUS_E_RESOURCES;
101 
102 	mon_pdev->mcopy_mode = val;
103 	mon_pdev->tx_sniffer_enable = 0;
104 	mon_pdev->monitor_configured = true;
105 
106 	mon_ops = dp_mon_ops_get(pdev->soc);
107 	if (!wlan_cfg_is_delay_mon_replenish(pdev->soc->wlan_cfg_ctx)) {
108 		if (mon_ops && mon_ops->mon_vdev_set_monitor_mode_rings)
109 			mon_ops->mon_vdev_set_monitor_mode_rings(pdev, true);
110 	}
111 
112 	/*
113 	 * Setup the M copy mode filter.
114 	 */
115 	cdp_ops = dp_mon_cdp_ops_get(pdev->soc);
116 	if (cdp_ops  && cdp_ops->config_full_mon_mode)
117 		cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
118 						  DP_FULL_MON_ENABLE);
119 	dp_mon_filter_setup_mcopy_mode(pdev);
120 	status = dp_mon_filter_update(pdev);
121 	if (status != QDF_STATUS_SUCCESS) {
122 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
123 			  FL("Failed to set M_copy mode filters"));
124 		dp_mon_filter_reset_mcopy_mode(pdev);
125 		dp_pdev_disable_mcopy_code(pdev);
126 		return status;
127 	}
128 
129 	if (!mon_pdev->pktlog_ppdu_stats)
130 		dp_h2t_cfg_stats_msg_send(pdev,
131 					  DP_PPDU_STATS_CFG_SNIFFER,
132 					  pdev->pdev_id);
133 
134 	return status;
135 }
136 #else
137 static inline void
138 dp_reset_mcopy_mode(struct dp_pdev *pdev)
139 {
140 }
141 
142 static inline QDF_STATUS
143 dp_config_mcopy_mode(struct dp_pdev *pdev, int val)
144 {
145 	return QDF_STATUS_E_INVAL;
146 }
147 #endif /* QCA_MCOPY_SUPPORT */
148 
149 #ifdef QCA_UNDECODED_METADATA_SUPPORT
150 static QDF_STATUS
151 dp_reset_undecoded_metadata_capture(struct dp_pdev *pdev)
152 {
153 	QDF_STATUS status = QDF_STATUS_SUCCESS;
154 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
155 
156 	if (mon_pdev->undecoded_metadata_capture) {
157 		dp_mon_filter_reset_undecoded_metadata_mode(pdev);
158 		status = dp_mon_filter_update(pdev);
159 		if (status != QDF_STATUS_SUCCESS) {
160 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
161 				  FL("Undecoded capture filter reset failed"));
162 		}
163 	}
164 	mon_pdev->undecoded_metadata_capture = 0;
165 	return status;
166 }
167 
168 static QDF_STATUS
169 dp_enable_undecoded_metadata_capture(struct dp_pdev *pdev, int val)
170 {
171 	QDF_STATUS status = QDF_STATUS_SUCCESS;
172 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
173 
174 	if (!mon_pdev->mvdev) {
175 		qdf_err("monitor_pdev is NULL");
176 		return QDF_STATUS_E_RESOURCES;
177 	}
178 
179 	mon_pdev->undecoded_metadata_capture = val;
180 	mon_pdev->monitor_configured = true;
181 
182 
183 	/* Setup the undecoded metadata capture mode filter. */
184 	dp_mon_filter_setup_undecoded_metadata_mode(pdev);
185 	status = dp_mon_filter_update(pdev);
186 	if (status != QDF_STATUS_SUCCESS) {
187 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
188 			  FL("Failed to set Undecoded capture filters"));
189 		dp_mon_filter_reset_undecoded_metadata_mode(pdev);
190 		return status;
191 	}
192 
193 	return status;
194 }
195 #else
196 static inline QDF_STATUS
197 dp_reset_undecoded_metadata_capture(struct dp_pdev *pdev)
198 {
199 	return QDF_STATUS_E_INVAL;
200 }
201 
202 static inline QDF_STATUS
203 dp_enable_undecoded_metadata_capture(struct dp_pdev *pdev, int val)
204 {
205 	return QDF_STATUS_E_INVAL;
206 }
207 #endif /* QCA_UNDECODED_METADATA_SUPPORT */
208 
209 QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl,
210 				 uint8_t pdev_id,
211 				 uint8_t special_monitor)
212 {
213 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
214 	struct dp_pdev *pdev =
215 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
216 						   pdev_id);
217 	QDF_STATUS status = QDF_STATUS_SUCCESS;
218 	struct dp_mon_pdev *mon_pdev;
219 	struct cdp_mon_ops *cdp_ops;
220 
221 	if (!pdev)
222 		return QDF_STATUS_E_FAILURE;
223 
224 	mon_pdev = pdev->monitor_pdev;
225 
226 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
227 
228 	cdp_ops = dp_mon_cdp_ops_get(soc);
229 	if (cdp_ops  && cdp_ops->soc_config_full_mon_mode)
230 		cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
231 						  DP_FULL_MON_DISABLE);
232 	mon_pdev->mvdev = NULL;
233 
234 	/*
235 	 * Lite monitor mode, smart monitor mode and monitor
236 	 * mode uses this APIs to filter reset and mode disable
237 	 */
238 	if (mon_pdev->mcopy_mode) {
239 #if defined(QCA_MCOPY_SUPPORT)
240 		dp_pdev_disable_mcopy_code(pdev);
241 		dp_mon_filter_reset_mcopy_mode(pdev);
242 #endif /* QCA_MCOPY_SUPPORT */
243 	} else if (special_monitor) {
244 #if defined(ATH_SUPPORT_NAC)
245 		dp_mon_filter_reset_smart_monitor(pdev);
246 #endif /* ATH_SUPPORT_NAC */
247 		/* for mon 2.0 we make use of lite mon to
248 		 * set filters for smart monitor use case.
249 		 */
250 		dp_monitor_lite_mon_disable_rx(pdev);
251 	} else if (mon_pdev->undecoded_metadata_capture) {
252 #ifdef QCA_UNDECODED_METADATA_SUPPORT
253 		dp_reset_undecoded_metadata_capture(pdev);
254 #endif
255 	} else {
256 		dp_mon_filter_reset_mon_mode(pdev);
257 	}
258 	status = dp_mon_filter_update(pdev);
259 	if (status != QDF_STATUS_SUCCESS) {
260 		dp_rx_mon_dest_err("%pK: Failed to reset monitor filters",
261 				   soc);
262 	}
263 
264 	mon_pdev->monitor_configured = false;
265 
266 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
267 	return QDF_STATUS_SUCCESS;
268 }
269 
270 #ifdef QCA_ADVANCE_MON_FILTER_SUPPORT
271 QDF_STATUS
272 dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
273 				   struct cdp_monitor_filter *filter_val)
274 {
275 	/* Many monitor VAPs can exists in a system but only one can be up at
276 	 * anytime
277 	 */
278 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
279 	struct dp_vdev *vdev;
280 	struct dp_pdev *pdev =
281 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
282 						   pdev_id);
283 	QDF_STATUS status = QDF_STATUS_SUCCESS;
284 	struct dp_mon_pdev *mon_pdev;
285 
286 	if (!pdev || !pdev->monitor_pdev)
287 		return QDF_STATUS_E_FAILURE;
288 
289 	mon_pdev = pdev->monitor_pdev;
290 	vdev = mon_pdev->mvdev;
291 
292 	if (!vdev)
293 		return QDF_STATUS_E_FAILURE;
294 
295 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
296 		  "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
297 		  pdev, pdev_id, soc, vdev);
298 
299 	/*Check if current pdev's monitor_vdev exists */
300 	if (!mon_pdev->mvdev) {
301 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
302 			  "vdev=%pK", vdev);
303 		qdf_assert(vdev);
304 	}
305 
306 	/* update filter mode, type in pdev structure */
307 	mon_pdev->mon_filter_mode = filter_val->mode;
308 	mon_pdev->fp_mgmt_filter = filter_val->fp_mgmt;
309 	mon_pdev->fp_ctrl_filter = filter_val->fp_ctrl;
310 	mon_pdev->fp_data_filter = filter_val->fp_data;
311 	mon_pdev->mo_mgmt_filter = filter_val->mo_mgmt;
312 	mon_pdev->mo_ctrl_filter = filter_val->mo_ctrl;
313 	mon_pdev->mo_data_filter = filter_val->mo_data;
314 
315 	dp_mon_filter_setup_mon_mode(pdev);
316 	status = dp_mon_filter_update(pdev);
317 	if (status != QDF_STATUS_SUCCESS) {
318 		dp_rx_mon_dest_err("%pK: Failed to set filter for adv mon mode",
319 				   soc);
320 		dp_mon_filter_reset_mon_mode(pdev);
321 	}
322 
323 	return status;
324 }
325 #endif
326 
327 QDF_STATUS
328 dp_deliver_tx_mgmt(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf)
329 {
330 	struct dp_pdev *pdev =
331 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
332 						   pdev_id);
333 
334 	if (!pdev)
335 		return QDF_STATUS_E_FAILURE;
336 
337 	dp_deliver_mgmt_frm(pdev, nbuf);
338 
339 	return QDF_STATUS_SUCCESS;
340 }
341 
342 #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
343 /**
344  * dp_scan_spcl_vap_stats_attach() - alloc spcl vap stats struct
345  * @mon_vdev: Datapath mon VDEV handle
346  *
347  * Return: 0 on success, not 0 on failure
348  */
349 static inline QDF_STATUS
350 dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev)
351 {
352 	mon_vdev->scan_spcl_vap_stats =
353 		qdf_mem_malloc(sizeof(struct cdp_scan_spcl_vap_stats));
354 
355 	if (!mon_vdev->scan_spcl_vap_stats) {
356 		dp_mon_err("scan spcl vap stats attach fail");
357 		return QDF_STATUS_E_NOMEM;
358 	}
359 
360 	return QDF_STATUS_SUCCESS;
361 }
362 
363 /**
364  * dp_scan_spcl_vap_stats_detach() - free spcl vap stats struct
365  * @mon_vdev: Datapath mon VDEV handle
366  *
367  * Return: void
368  */
369 static inline void
370 dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev)
371 {
372 	if (mon_vdev->scan_spcl_vap_stats) {
373 		qdf_mem_free(mon_vdev->scan_spcl_vap_stats);
374 		mon_vdev->scan_spcl_vap_stats = NULL;
375 	}
376 }
377 
378 /**
379  * dp_reset_scan_spcl_vap_stats() - reset spcl vap rx stats
380  * @vdev: Datapath VDEV handle
381  *
382  * Return: void
383  */
384 static inline void
385 dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev)
386 {
387 	struct dp_mon_vdev *mon_vdev;
388 	struct dp_mon_pdev *mon_pdev;
389 
390 	mon_pdev = vdev->pdev->monitor_pdev;
391 	if (!mon_pdev || !mon_pdev->reset_scan_spcl_vap_stats_enable)
392 		return;
393 
394 	mon_vdev = vdev->monitor_vdev;
395 	if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats)
396 		return;
397 
398 	qdf_mem_zero(mon_vdev->scan_spcl_vap_stats,
399 		     sizeof(struct cdp_scan_spcl_vap_stats));
400 }
401 
402 /**
403  * dp_get_scan_spcl_vap_stats() - get spcl vap rx stats
404  * @soc_hdl: Datapath soc handle
405  * @vdev_id: vdev id
406  * @stats: structure to hold spcl vap stats
407  *
408  * Return: 0 on success, not 0 on failure
409  */
410 static QDF_STATUS
411 dp_get_scan_spcl_vap_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
412 			   struct cdp_scan_spcl_vap_stats *stats)
413 {
414 	struct dp_mon_vdev *mon_vdev = NULL;
415 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
416 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
417 						     DP_MOD_ID_CDP);
418 
419 	if (!vdev || !stats) {
420 		if (vdev)
421 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
422 		return QDF_STATUS_E_INVAL;
423 	}
424 
425 	mon_vdev = vdev->monitor_vdev;
426 	if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats) {
427 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
428 		return QDF_STATUS_E_INVAL;
429 	}
430 
431 	qdf_mem_copy(stats, mon_vdev->scan_spcl_vap_stats,
432 		     sizeof(struct cdp_scan_spcl_vap_stats));
433 
434 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
435 	return QDF_STATUS_SUCCESS;
436 }
437 #else
438 static inline void
439 dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev)
440 {
441 }
442 
443 static inline QDF_STATUS
444 dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev)
445 {
446 	return QDF_STATUS_SUCCESS;
447 }
448 
449 static inline void
450 dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev)
451 {
452 }
453 #endif
454 
455 /**
456  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
457  * @dp_soc: DP soc context
458  * @vdev_id: vdev ID
459  * @special_monitor: Flag to denote if its smart monitor mode
460  *
461  * Return: 0 on success, not 0 on failure
462  */
463 static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *dp_soc,
464 					   uint8_t vdev_id,
465 					   uint8_t special_monitor)
466 {
467 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
468 	struct dp_pdev *pdev;
469 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
470 						     DP_MOD_ID_CDP);
471 	QDF_STATUS status = QDF_STATUS_SUCCESS;
472 	struct dp_mon_pdev *mon_pdev;
473 	struct cdp_mon_ops *cdp_ops;
474 
475 	if (!vdev)
476 		return QDF_STATUS_E_FAILURE;
477 
478 	pdev = vdev->pdev;
479 
480 	if (!pdev || !pdev->monitor_pdev) {
481 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
482 		return QDF_STATUS_E_FAILURE;
483 	}
484 
485 	mon_pdev = pdev->monitor_pdev;
486 
487 	mon_pdev->mvdev = vdev;
488 
489 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
490 		  "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
491 		  pdev, pdev->pdev_id, pdev->soc, vdev);
492 
493 	/*
494 	 * do not configure monitor buf ring and filter for smart and
495 	 * lite monitor
496 	 * for smart monitor filters are added along with first NAC
497 	 * for lite monitor required configuration done through
498 	 * dp_set_pdev_param
499 	 */
500 
501 	if (special_monitor) {
502 		status = QDF_STATUS_SUCCESS;
503 		goto fail;
504 	}
505 
506 	if (mon_pdev->scan_spcl_vap_configured)
507 		dp_reset_scan_spcl_vap_stats(vdev);
508 
509 	/*Check if current pdev's monitor_vdev exists */
510 	if (mon_pdev->monitor_configured) {
511 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
512 			  "monitor vap already created vdev=%pK\n", vdev);
513 		status = QDF_STATUS_E_RESOURCES;
514 		goto fail;
515 	}
516 
517 	mon_pdev->monitor_configured = true;
518 	mon_pdev->phy_ppdu_id_size = hal_rx_get_phy_ppdu_id_size(soc->hal_soc);
519 
520 	/* If advance monitor filter is applied using lite_mon
521 	 * via vap configuration, required filters are already applied
522 	 * hence returning SUCCESS from here.
523 	 */
524 	if (dp_monitor_lite_mon_is_rx_adv_filter_enable(pdev)) {
525 		status = QDF_STATUS_SUCCESS;
526 		goto fail;
527 	}
528 	/* disable lite mon if configured, monitor vap takes
529 	 * priority over lite mon when its created. Lite mon
530 	 * can be configured later again.
531 	 */
532 	dp_monitor_lite_mon_disable_rx(pdev);
533 
534 	cdp_ops = dp_mon_cdp_ops_get(soc);
535 	if (cdp_ops  && cdp_ops->soc_config_full_mon_mode)
536 		cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
537 						  DP_FULL_MON_ENABLE);
538 	dp_mon_filter_setup_mon_mode(pdev);
539 	status = dp_mon_filter_update(pdev);
540 	if (status != QDF_STATUS_SUCCESS) {
541 		dp_cdp_err("%pK: Failed to reset monitor filters", soc);
542 		dp_mon_filter_reset_mon_mode(pdev);
543 		mon_pdev->monitor_configured = false;
544 		mon_pdev->mvdev = NULL;
545 	}
546 
547 fail:
548 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
549 	return status;
550 }
551 
552 #ifdef QCA_TX_CAPTURE_SUPPORT
553 static QDF_STATUS
554 dp_config_tx_capture_mode(struct dp_pdev *pdev)
555 {
556 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
557 
558 	mon_pdev->tx_sniffer_enable = 1;
559 	mon_pdev->monitor_configured = false;
560 
561 	if (!mon_pdev->pktlog_ppdu_stats)
562 		dp_h2t_cfg_stats_msg_send(pdev,
563 					  DP_PPDU_STATS_CFG_SNIFFER,
564 					  pdev->pdev_id);
565 
566 	return QDF_STATUS_SUCCESS;
567 }
568 #else
569 #ifdef QCA_MCOPY_SUPPORT
570 static QDF_STATUS
571 dp_config_tx_capture_mode(struct dp_pdev *pdev)
572 {
573 	return QDF_STATUS_E_INVAL;
574 }
575 #endif
576 #endif
577 
578 #if defined(QCA_MCOPY_SUPPORT) || defined(QCA_TX_CAPTURE_SUPPORT)
579 QDF_STATUS
580 dp_config_debug_sniffer(struct dp_pdev *pdev, int val)
581 {
582 	QDF_STATUS status = QDF_STATUS_SUCCESS;
583 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
584 
585 	/*
586 	 * Note: The mirror copy mode cannot co-exist with any other
587 	 * monitor modes. Hence disabling the filter for this mode will
588 	 * reset the monitor destination ring filters.
589 	 */
590 	dp_reset_mcopy_mode(pdev);
591 	switch (val) {
592 	case 0:
593 		mon_pdev->tx_sniffer_enable = 0;
594 		mon_pdev->monitor_configured = false;
595 
596 		/*
597 		 * We don't need to reset the Rx monitor status ring  or call
598 		 * the API dp_ppdu_ring_reset() if all debug sniffer mode is
599 		 * disabled. The Rx monitor status ring will be disabled when
600 		 * the last mode using the monitor status ring get disabled.
601 		 */
602 		if (!mon_pdev->pktlog_ppdu_stats &&
603 		    !mon_pdev->enhanced_stats_en &&
604 		    !mon_pdev->bpr_enable) {
605 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
606 		} else if (mon_pdev->enhanced_stats_en &&
607 			   !mon_pdev->bpr_enable) {
608 			dp_h2t_cfg_stats_msg_send(pdev,
609 						  DP_PPDU_STATS_CFG_ENH_STATS,
610 						  pdev->pdev_id);
611 		} else if (!mon_pdev->enhanced_stats_en &&
612 			   mon_pdev->bpr_enable) {
613 			dp_h2t_cfg_stats_msg_send(pdev,
614 						  DP_PPDU_STATS_CFG_BPR_ENH,
615 						  pdev->pdev_id);
616 		} else {
617 			dp_h2t_cfg_stats_msg_send(pdev,
618 						  DP_PPDU_STATS_CFG_BPR,
619 						  pdev->pdev_id);
620 		}
621 		break;
622 
623 	case 1:
624 		status = dp_config_tx_capture_mode(pdev);
625 		break;
626 	case 2:
627 	case 4:
628 		status = dp_config_mcopy_mode(pdev, val);
629 		break;
630 
631 	default:
632 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
633 			  "Invalid value, mode: %d not supported", val);
634 		status = QDF_STATUS_E_INVAL;
635 		break;
636 	}
637 	return status;
638 }
639 #endif
640 
641 #ifdef QCA_UNDECODED_METADATA_SUPPORT
642 QDF_STATUS
643 dp_mon_config_undecoded_metadata_capture(struct dp_pdev *pdev, int val)
644 {
645 	QDF_STATUS status = QDF_STATUS_SUCCESS;
646 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
647 
648 	if (!mon_pdev->mvdev && !mon_pdev->scan_spcl_vap_configured) {
649 		qdf_err("No monitor or Special vap, undecoded capture not supported");
650 		return QDF_STATUS_E_RESOURCES;
651 	}
652 
653 	if (val)
654 		status = dp_enable_undecoded_metadata_capture(pdev, val);
655 	else
656 		status = dp_reset_undecoded_metadata_capture(pdev);
657 
658 	return status;
659 }
660 #endif
661 
662 /**
663  * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
664  *                                 ring based on target
665  * @soc: soc handle
666  * @mac_for_pdev: WIN- pdev_id, MCL- mac id
667  * @pdev: physical device handle
668  * @ring_num: mac id
669  * @htt_tlv_filter: tlv filter
670  *
671  * Return: zero on success, non-zero on failure
672  */
673 static inline QDF_STATUS
674 dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
675 			    struct dp_pdev *pdev, uint8_t ring_num,
676 			    struct htt_rx_ring_tlv_filter htt_tlv_filter)
677 {
678 	QDF_STATUS status;
679 
680 	if (soc->wlan_cfg_ctx->rxdma1_enable)
681 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
682 					     soc->rxdma_mon_buf_ring[ring_num]
683 					     .hal_srng,
684 					     RXDMA_MONITOR_BUF,
685 					     RX_MONITOR_BUFFER_SIZE,
686 					     &htt_tlv_filter);
687 	else
688 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
689 					     pdev->rx_mac_buf_ring[ring_num]
690 					     .hal_srng,
691 					     RXDMA_BUF, RX_DATA_BUFFER_SIZE,
692 					     &htt_tlv_filter);
693 
694 	return status;
695 }
696 
697 /**
698  * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode
699  * @soc_hdl: datapath soc handle
700  * @pdev_id: physical device instance id
701  *
702  * Return: virtual interface id
703  */
704 static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl,
705 		uint8_t pdev_id)
706 {
707 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
708 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
709 
710 	if (qdf_unlikely(!pdev || !pdev->monitor_pdev ||
711 				!pdev->monitor_pdev->mvdev))
712 		return -EINVAL;
713 
714 	return pdev->monitor_pdev->mvdev->vdev_id;
715 }
716 
717 #if defined(QCA_TX_CAPTURE_SUPPORT) || defined(QCA_ENHANCED_STATS_SUPPORT)
718 #ifndef WLAN_TX_PKT_CAPTURE_ENH
719 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
720 {
721 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
722 
723 	if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode) {
724 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
725 				     nbuf, HTT_INVALID_PEER,
726 				     WDI_NO_VAL, pdev->pdev_id);
727 	} else {
728 		if (!mon_pdev->bpr_enable)
729 			qdf_nbuf_free(nbuf);
730 	}
731 }
732 #endif
733 #endif
734 
735 QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
736 {
737 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
738 
739 	mon_pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE);
740 
741 	if (!mon_pdev->ppdu_tlv_buf) {
742 		QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail");
743 		return QDF_STATUS_E_NOMEM;
744 	}
745 
746 	return QDF_STATUS_SUCCESS;
747 }
748 
749 void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
750 {
751 	struct ppdu_info *ppdu_info, *ppdu_info_next;
752 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
753 
754 
755 	TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list,
756 			   ppdu_info_list_elem, ppdu_info_next) {
757 		if (!ppdu_info)
758 			break;
759 		TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
760 			     ppdu_info, ppdu_info_list_elem);
761 		mon_pdev->list_depth--;
762 		qdf_assert_always(ppdu_info->nbuf);
763 		qdf_nbuf_free(ppdu_info->nbuf);
764 		qdf_mem_free(ppdu_info);
765 	}
766 
767 	TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->sched_comp_ppdu_list,
768 			   ppdu_info_list_elem, ppdu_info_next) {
769 		if (!ppdu_info)
770 			break;
771 		TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list,
772 			     ppdu_info, ppdu_info_list_elem);
773 		mon_pdev->sched_comp_list_depth--;
774 		qdf_assert_always(ppdu_info->nbuf);
775 		qdf_nbuf_free(ppdu_info->nbuf);
776 		qdf_mem_free(ppdu_info);
777 	}
778 
779 	if (mon_pdev->ppdu_tlv_buf)
780 		qdf_mem_free(mon_pdev->ppdu_tlv_buf);
781 }
782 
783 QDF_STATUS dp_pdev_get_rx_mon_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
784 				    struct cdp_pdev_mon_stats *stats)
785 {
786 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
787 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
788 	struct dp_mon_pdev *mon_pdev;
789 
790 	if (!pdev)
791 		return QDF_STATUS_E_FAILURE;
792 
793 	mon_pdev = pdev->monitor_pdev;
794 	if (!mon_pdev)
795 		return QDF_STATUS_E_FAILURE;
796 
797 	qdf_mem_copy(stats, &mon_pdev->rx_mon_stats,
798 		     sizeof(struct cdp_pdev_mon_stats));
799 
800 	return QDF_STATUS_SUCCESS;
801 }
802 
803 #ifdef QCA_UNDECODED_METADATA_SUPPORT
804 /**
805  * dp_pdev_get_undecoded_capture_stats() - Get undecoded metadata captured
806  * monitor pdev stats
807  * @mon_pdev: Monitor PDEV handle
808  * @rx_mon_stats: Monitor pdev status/destination ring stats
809  *
810  * Return: None
811  */
812 static inline void
813 dp_pdev_get_undecoded_capture_stats(struct dp_mon_pdev *mon_pdev,
814 				    struct cdp_pdev_mon_stats *rx_mon_stats)
815 {
816 	char undecoded_error[DP_UNDECODED_ERR_LENGTH];
817 	uint8_t index = 0, i;
818 
819 	DP_PRINT_STATS("Rx Undecoded Frame count:%d",
820 		       rx_mon_stats->rx_undecoded_count);
821 	index = 0;
822 	for (i = 0; i < (CDP_PHYRX_ERR_MAX); i++) {
823 		index += qdf_snprint(&undecoded_error[index],
824 				DP_UNDECODED_ERR_LENGTH - index,
825 				" %d", rx_mon_stats->rx_undecoded_error[i]);
826 	}
827 	DP_PRINT_STATS("Undecoded Error (0-63):%s", undecoded_error);
828 }
829 #else
830 static inline void
831 dp_pdev_get_undecoded_capture_stats(struct dp_mon_pdev *mon_pdev,
832 				    struct cdp_pdev_mon_stats *rx_mon_stats)
833 {
834 }
835 #endif
836 
837 static const char *
838 dp_preamble_type_str[] = {
839 	"preamble OFDMA     ",
840 	"preamble CCK       ",
841 	"preamble HT        ",
842 	"preamble VHT       ",
843 	"preamble HE        ",
844 	"preamble EHT       ",
845 	"preamble NO SUPPORT",
846 };
847 
848 static const char *
849 dp_reception_type_str[] = {
850 	"reception su        ",
851 	"reception mu_mimo   ",
852 	"reception ofdma     ",
853 	"reception ofdma mimo",
854 };
855 
856 static const char *
857 dp_mu_dl_ul_str[] = {
858 	"MU DL",
859 	"MU UL",
860 };
861 
862 static inline void
863 dp_print_pdev_mpdu_fcs_ok_cnt(struct cdp_pdev_mon_stats *rx_mon_sts,
864 			      uint32_t pkt_t, uint32_t rx_t,
865 			      uint32_t dl_ul, uint32_t user)
866 {
867 	DP_PRINT_STATS("%s, %s, %s, user=%d, mpdu_fcs_ok=%d",
868 		       dp_preamble_type_str[pkt_t],
869 		       dp_reception_type_str[rx_t],
870 		       dp_mu_dl_ul_str[dl_ul],
871 		       user,
872 		       rx_mon_sts->mpdu_cnt_fcs_ok[pkt_t][rx_t][dl_ul][user]);
873 }
874 
875 static inline void
876 dp_print_pdev_mpdu_fcs_err_cnt(struct cdp_pdev_mon_stats *rx_mon_sts,
877 			       uint32_t pkt_t, uint32_t rx_t,
878 			       uint32_t dl_ul, uint32_t user)
879 {
880 	DP_PRINT_STATS("%s, %s, %s, user=%d, mpdu_fcs_err=%d",
881 		       dp_preamble_type_str[pkt_t],
882 		       dp_reception_type_str[rx_t],
883 		       dp_mu_dl_ul_str[dl_ul],
884 		       user,
885 		       rx_mon_sts->mpdu_cnt_fcs_err[pkt_t][rx_t][dl_ul][user]);
886 }
887 
888 static inline void
889 dp_print_pdev_mpdu_cnt(struct cdp_pdev_mon_stats *rx_mon_sts,
890 		       uint32_t pkt_t, uint32_t rx_t,
891 		       uint32_t dl_ul, uint32_t user)
892 {
893 	if (rx_mon_sts->mpdu_cnt_fcs_ok[pkt_t][rx_t][dl_ul][user])
894 		dp_print_pdev_mpdu_fcs_ok_cnt(rx_mon_sts, pkt_t, rx_t,
895 					      dl_ul, user);
896 
897 	if (rx_mon_sts->mpdu_cnt_fcs_err[pkt_t][rx_t][dl_ul][user])
898 		dp_print_pdev_mpdu_fcs_err_cnt(rx_mon_sts, pkt_t, rx_t,
899 					       dl_ul, user);
900 }
901 
902 static inline void
903 dp_print_pdev_mpdu_user(struct cdp_pdev_mon_stats *rx_mon_sts,
904 			uint32_t pkt_t, uint32_t rx_t,
905 			uint32_t dl_ul)
906 {
907 	uint32_t user;
908 
909 	for (user = 0; user < CDP_MU_SNIF_USER_MAX; user++)
910 		dp_print_pdev_mpdu_cnt(rx_mon_sts, pkt_t, rx_t,
911 				       dl_ul, user);
912 }
913 
914 static inline void
915 dp_print_pdev_mpdu_dl_ul(struct cdp_pdev_mon_stats *rx_mon_sts,
916 			 uint32_t pkt_t, uint32_t rx_t)
917 {
918 	uint32_t dl_ul;
919 
920 	for (dl_ul = CDP_MU_TYPE_DL; dl_ul < CDP_MU_TYPE_MAX; dl_ul++)
921 		dp_print_pdev_mpdu_user(rx_mon_sts, pkt_t, rx_t,
922 					dl_ul);
923 }
924 
925 static inline void
926 dp_print_pdev_mpdu_rx_type(struct cdp_pdev_mon_stats *rx_mon_sts,
927 			   uint32_t pkt_t)
928 {
929 	uint32_t rx_t;
930 
931 	for (rx_t = CDP_RX_TYPE_SU; rx_t < CDP_RX_TYPE_MAX; rx_t++)
932 		dp_print_pdev_mpdu_dl_ul(rx_mon_sts, pkt_t, rx_t);
933 }
934 
935 static inline void
936 dp_print_pdev_mpdu_pkt_type(struct cdp_pdev_mon_stats *rx_mon_sts)
937 {
938 	uint32_t pkt_t;
939 
940 	for (pkt_t = CDP_PKT_TYPE_OFDM; pkt_t < CDP_PKT_TYPE_MAX; pkt_t++)
941 		dp_print_pdev_mpdu_rx_type(rx_mon_sts, pkt_t);
942 }
943 
944 static inline void
945 print_ppdu_eht_type_mode(
946 	struct cdp_pdev_mon_stats *rx_mon_stats,
947 	uint32_t ppdu_type_mode,
948 	uint32_t dl_ul)
949 {
950 	DP_PRINT_STATS("type_mode=%d, dl_ul=%d, cnt=%d",
951 		       ppdu_type_mode,
952 		       dl_ul,
953 		       rx_mon_stats->ppdu_eht_type_mode[ppdu_type_mode][dl_ul]);
954 }
955 
956 static inline void
957 print_ppdu_eth_type_mode_dl_ul(
958 	struct cdp_pdev_mon_stats *rx_mon_stats,
959 	uint32_t ppdu_type_mode
960 )
961 {
962 	uint32_t dl_ul;
963 
964 	for (dl_ul = 0; dl_ul < CDP_MU_TYPE_MAX; dl_ul++) {
965 		if (rx_mon_stats->ppdu_eht_type_mode[ppdu_type_mode][dl_ul])
966 			print_ppdu_eht_type_mode(rx_mon_stats,
967 						 ppdu_type_mode, dl_ul);
968 	}
969 }
970 
971 static inline void
972 dp_print_pdev_eht_ppdu_cnt(struct dp_pdev *pdev)
973 {
974 	struct cdp_pdev_mon_stats *rx_mon_stats;
975 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
976 	uint32_t ppdu_type_mode;
977 
978 	rx_mon_stats = &mon_pdev->rx_mon_stats;
979 	DP_PRINT_STATS("Monitor EHT PPDU  Count");
980 	for (ppdu_type_mode = 0; ppdu_type_mode < CDP_EHT_TYPE_MODE_MAX;
981 	     ppdu_type_mode++) {
982 		print_ppdu_eth_type_mode_dl_ul(rx_mon_stats,
983 					       ppdu_type_mode);
984 	}
985 }
986 
987 static inline void
988 dp_print_pdev_mpdu_stats(struct dp_pdev *pdev)
989 {
990 	struct cdp_pdev_mon_stats *rx_mon_stats;
991 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
992 
993 	rx_mon_stats = &mon_pdev->rx_mon_stats;
994 	DP_PRINT_STATS("Monitor MPDU Count");
995 	dp_print_pdev_mpdu_pkt_type(rx_mon_stats);
996 }
997 
998 void
999 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
1000 {
1001 	struct cdp_pdev_mon_stats *rx_mon_stats;
1002 	uint32_t *stat_ring_ppdu_ids;
1003 	uint32_t *dest_ring_ppdu_ids;
1004 	int i, idx;
1005 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1006 
1007 	rx_mon_stats = &mon_pdev->rx_mon_stats;
1008 
1009 	DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
1010 
1011 	DP_PRINT_STATS("status_ppdu_compl_cnt = %d",
1012 		       rx_mon_stats->status_ppdu_compl);
1013 	DP_PRINT_STATS("status_ppdu_start_cnt = %d",
1014 		       rx_mon_stats->status_ppdu_start);
1015 	DP_PRINT_STATS("status_ppdu_end_cnt = %d",
1016 		       rx_mon_stats->status_ppdu_end);
1017 	DP_PRINT_STATS("status_ppdu_start_mis_cnt = %d",
1018 		       rx_mon_stats->status_ppdu_start_mis);
1019 	DP_PRINT_STATS("status_ppdu_end_mis_cnt = %d",
1020 		       rx_mon_stats->status_ppdu_end_mis);
1021 
1022 	DP_PRINT_STATS("start_user_info_cnt = %d",
1023 		       rx_mon_stats->start_user_info_cnt);
1024 	DP_PRINT_STATS("end_user_stats_cnt = %d",
1025 		       rx_mon_stats->end_user_stats_cnt);
1026 
1027 	DP_PRINT_STATS("status_ppdu_done_cnt = %d",
1028 		       rx_mon_stats->status_ppdu_done);
1029 	DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
1030 		       rx_mon_stats->dest_ppdu_done);
1031 	DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
1032 		       rx_mon_stats->dest_mpdu_done);
1033 	DP_PRINT_STATS("tlv_tag_status_err_cnt = %u",
1034 		       rx_mon_stats->tlv_tag_status_err);
1035 	DP_PRINT_STATS("mon status DMA not done WAR count= %u",
1036 		       rx_mon_stats->status_buf_done_war);
1037 	DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
1038 		       rx_mon_stats->dest_mpdu_drop);
1039 	DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d",
1040 		       rx_mon_stats->dup_mon_linkdesc_cnt);
1041 	DP_PRINT_STATS("dup_mon_buf_cnt = %d",
1042 		       rx_mon_stats->dup_mon_buf_cnt);
1043 	DP_PRINT_STATS("mon_rx_buf_reaped = %u",
1044 		       rx_mon_stats->mon_rx_bufs_reaped_dest);
1045 	DP_PRINT_STATS("mon_rx_buf_replenished = %u",
1046 		       rx_mon_stats->mon_rx_bufs_replenished_dest);
1047 	DP_PRINT_STATS("ppdu_id_mismatch = %u",
1048 		       rx_mon_stats->ppdu_id_mismatch);
1049 	DP_PRINT_STATS("mpdu_ppdu_id_match_cnt = %d",
1050 		       rx_mon_stats->ppdu_id_match);
1051 	DP_PRINT_STATS("ppdus dropped frm status ring = %d",
1052 		       rx_mon_stats->status_ppdu_drop);
1053 	DP_PRINT_STATS("ppdus dropped frm dest ring = %d",
1054 		       rx_mon_stats->dest_ppdu_drop);
1055 	DP_PRINT_STATS("mpdu_ppdu_id_mismatch_drop = %u",
1056 		       rx_mon_stats->mpdu_ppdu_id_mismatch_drop);
1057 	DP_PRINT_STATS("mpdu_decap_type_invalid = %u",
1058 		       rx_mon_stats->mpdu_decap_type_invalid);
1059 	stat_ring_ppdu_ids =
1060 		(uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST);
1061 	dest_ring_ppdu_ids =
1062 		(uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST);
1063 
1064 	if (!stat_ring_ppdu_ids || !dest_ring_ppdu_ids)
1065 		DP_PRINT_STATS("Unable to allocate ppdu id hist mem\n");
1066 
1067 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
1068 	idx = rx_mon_stats->ppdu_id_hist_idx;
1069 	qdf_mem_copy(stat_ring_ppdu_ids,
1070 		     rx_mon_stats->stat_ring_ppdu_id_hist,
1071 		     sizeof(uint32_t) * MAX_PPDU_ID_HIST);
1072 	qdf_mem_copy(dest_ring_ppdu_ids,
1073 		     rx_mon_stats->dest_ring_ppdu_id_hist,
1074 		     sizeof(uint32_t) * MAX_PPDU_ID_HIST);
1075 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
1076 
1077 	DP_PRINT_STATS("PPDU Id history:");
1078 	DP_PRINT_STATS("stat_ring_ppdu_ids\t dest_ring_ppdu_ids");
1079 	for (i = 0; i < MAX_PPDU_ID_HIST; i++) {
1080 		idx = (idx + 1) & (MAX_PPDU_ID_HIST - 1);
1081 		DP_PRINT_STATS("%*u\t%*u", 16,
1082 			       rx_mon_stats->stat_ring_ppdu_id_hist[idx], 16,
1083 			       rx_mon_stats->dest_ring_ppdu_id_hist[idx]);
1084 	}
1085 	qdf_mem_free(stat_ring_ppdu_ids);
1086 	qdf_mem_free(dest_ring_ppdu_ids);
1087 	DP_PRINT_STATS("mon_rx_dest_stuck = %d",
1088 		       rx_mon_stats->mon_rx_dest_stuck);
1089 
1090 	dp_pdev_get_undecoded_capture_stats(mon_pdev, rx_mon_stats);
1091 	dp_mon_rx_print_advanced_stats(pdev->soc, pdev);
1092 
1093 	dp_print_pdev_mpdu_stats(pdev);
1094 	dp_print_pdev_eht_ppdu_cnt(pdev);
1095 
1096 }
1097 
1098 #ifdef QCA_SUPPORT_BPR
1099 QDF_STATUS
1100 dp_set_bpr_enable(struct dp_pdev *pdev, int val)
1101 {
1102 	struct dp_mon_ops *mon_ops;
1103 
1104 	mon_ops = dp_mon_ops_get(pdev->soc);
1105 	if (mon_ops && mon_ops->mon_set_bpr_enable)
1106 		return mon_ops->mon_set_bpr_enable(pdev, val);
1107 
1108 	return QDF_STATUS_E_FAILURE;
1109 }
1110 #endif
1111 
1112 #ifdef WDI_EVENT_ENABLE
1113 #ifdef BE_PKTLOG_SUPPORT
1114 static bool
1115 dp_set_hybrid_pktlog_enable(struct dp_pdev *pdev,
1116 			    struct dp_mon_pdev *mon_pdev,
1117 			    struct dp_soc *soc)
1118 {
1119 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1120 	struct dp_mon_ops *mon_ops = NULL;
1121 	uint16_t num_buffers;
1122 
1123 	if (mon_pdev->mvdev) {
1124 		/* Nothing needs to be done if monitor mode is
1125 		 * enabled
1126 		 */
1127 		mon_pdev->pktlog_hybrid_mode = true;
1128 		return false;
1129 	}
1130 
1131 	mon_ops = dp_mon_ops_get(pdev->soc);
1132 	if (!mon_ops) {
1133 		dp_mon_filter_err("Mon ops uninitialized");
1134 		return QDF_STATUS_E_FAILURE;
1135 	}
1136 
1137 	if (!mon_pdev->pktlog_hybrid_mode) {
1138 		mon_pdev->pktlog_hybrid_mode = true;
1139 		soc_cfg_ctx = soc->wlan_cfg_ctx;
1140 		num_buffers =
1141 			wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx);
1142 
1143 		if (mon_ops && mon_ops->set_mon_mode_buf_rings_tx)
1144 			mon_ops->set_mon_mode_buf_rings_tx(pdev, num_buffers);
1145 
1146 		dp_mon_filter_setup_pktlog_hybrid(pdev);
1147 		if (dp_tx_mon_filter_update(pdev) !=
1148 		    QDF_STATUS_SUCCESS) {
1149 			dp_cdp_err("Set hybrid filters failed");
1150 			dp_mon_filter_reset_pktlog_hybrid(pdev);
1151 			mon_pdev->rx_pktlog_mode =
1152 				DP_RX_PKTLOG_DISABLED;
1153 			return false;
1154 		}
1155 
1156 		dp_monitor_reap_timer_start(soc, CDP_MON_REAP_SOURCE_PKTLOG);
1157 	}
1158 
1159 	return true;
1160 }
1161 
1162 static void
1163 dp_set_hybrid_pktlog_disable(struct dp_mon_pdev *mon_pdev)
1164 {
1165 	mon_pdev->pktlog_hybrid_mode = false;
1166 }
1167 #else
1168 static void
1169 dp_set_hybrid_pktlog_disable(struct dp_mon_pdev *mon_pdev)
1170 {
1171 }
1172 
1173 static bool
1174 dp_set_hybrid_pktlog_enable(struct dp_pdev *pdev,
1175 			    struct dp_mon_pdev *mon_pdev,
1176 			    struct dp_soc *soc)
1177 {
1178 	dp_cdp_err("Hybrid mode is supported only on beryllium");
1179 	return true;
1180 }
1181 #endif
1182 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
1183 		        bool enable)
1184 {
1185 	struct dp_soc *soc = NULL;
1186 	int max_mac_rings = wlan_cfg_get_num_mac_rings
1187 					(pdev->wlan_cfg_ctx);
1188 	uint8_t mac_id = 0;
1189 	struct dp_mon_ops *mon_ops;
1190 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1191 
1192 	soc = pdev->soc;
1193 	mon_ops = dp_mon_ops_get(soc);
1194 
1195 	if (!mon_ops)
1196 		return 0;
1197 
1198 	dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
1199 
1200 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1201 		  FL("Max_mac_rings %d "),
1202 		  max_mac_rings);
1203 
1204 	if (enable) {
1205 		switch (event) {
1206 		case WDI_EVENT_RX_DESC:
1207 			/* Nothing needs to be done if monitor mode is
1208 			 * enabled
1209 			 */
1210 			if (mon_pdev->mvdev)
1211 				return 0;
1212 
1213 			if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL)
1214 				break;
1215 
1216 			mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
1217 			dp_mon_filter_setup_rx_pkt_log_full(pdev);
1218 			if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
1219 				dp_cdp_err("%pK: Pktlog full filters set failed",
1220 					   soc);
1221 				dp_mon_filter_reset_rx_pkt_log_full(pdev);
1222 				mon_pdev->rx_pktlog_mode =
1223 					DP_RX_PKTLOG_DISABLED;
1224 				return 0;
1225 			}
1226 
1227 			dp_monitor_reap_timer_start(soc,
1228 						    CDP_MON_REAP_SOURCE_PKTLOG);
1229 			break;
1230 
1231 		case WDI_EVENT_LITE_RX:
1232 			/* Nothing needs to be done if monitor mode is
1233 			 * enabled
1234 			 */
1235 			if (mon_pdev->mvdev)
1236 				return 0;
1237 
1238 			if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE)
1239 				break;
1240 
1241 			mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
1242 
1243 			/*
1244 			 * Set the packet log lite mode filter.
1245 			 */
1246 			dp_mon_filter_setup_rx_pkt_log_lite(pdev);
1247 			if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
1248 				dp_cdp_err("%pK: Pktlog lite filters set failed",
1249 					   soc);
1250 				dp_mon_filter_reset_rx_pkt_log_lite(pdev);
1251 				mon_pdev->rx_pktlog_mode =
1252 					DP_RX_PKTLOG_DISABLED;
1253 				return 0;
1254 			}
1255 
1256 			dp_monitor_reap_timer_start(soc,
1257 						    CDP_MON_REAP_SOURCE_PKTLOG);
1258 			break;
1259 		case WDI_EVENT_LITE_T2H:
1260 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
1261 				int mac_for_pdev = dp_get_mac_id_for_pdev(
1262 							mac_id,	pdev->pdev_id);
1263 
1264 				mon_pdev->pktlog_ppdu_stats = true;
1265 				dp_h2t_cfg_stats_msg_send(pdev,
1266 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
1267 					mac_for_pdev);
1268 			}
1269 			break;
1270 
1271 		case WDI_EVENT_RX_CBF:
1272 			/* Nothing needs to be done if monitor mode is
1273 			 * enabled
1274 			 */
1275 			if (mon_pdev->mvdev)
1276 				return 0;
1277 
1278 			if (mon_pdev->rx_pktlog_cbf)
1279 				break;
1280 
1281 			mon_pdev->rx_pktlog_cbf = true;
1282 			mon_pdev->monitor_configured = true;
1283 			if (mon_ops->mon_vdev_set_monitor_mode_buf_rings)
1284 				mon_ops->mon_vdev_set_monitor_mode_buf_rings(
1285 					pdev);
1286 
1287 			/*
1288 			 * Set the packet log lite mode filter.
1289 			 */
1290 			qdf_info("Non mon mode: Enable destination ring");
1291 
1292 			dp_mon_filter_setup_rx_pkt_log_cbf(pdev);
1293 			if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
1294 				dp_mon_err("Pktlog set CBF filters failed");
1295 				dp_mon_filter_reset_rx_pktlog_cbf(pdev);
1296 				mon_pdev->rx_pktlog_mode =
1297 					DP_RX_PKTLOG_DISABLED;
1298 				mon_pdev->monitor_configured = false;
1299 				return 0;
1300 			}
1301 
1302 			dp_monitor_reap_timer_start(soc,
1303 						    CDP_MON_REAP_SOURCE_PKTLOG);
1304 			break;
1305 		case WDI_EVENT_HYBRID_TX:
1306 			if (!dp_set_hybrid_pktlog_enable(pdev, mon_pdev, soc))
1307 				return 0;
1308 			break;
1309 
1310 		default:
1311 			/* Nothing needs to be done for other pktlog types */
1312 			break;
1313 		}
1314 	} else {
1315 		switch (event) {
1316 		case WDI_EVENT_RX_DESC:
1317 		case WDI_EVENT_LITE_RX:
1318 			/* Nothing needs to be done if monitor mode is
1319 			 * enabled
1320 			 */
1321 			if (mon_pdev->mvdev)
1322 				return 0;
1323 
1324 			if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_DISABLED)
1325 				break;
1326 
1327 			mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
1328 			dp_mon_filter_reset_rx_pkt_log_full(pdev);
1329 			if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
1330 				dp_cdp_err("%pK: Pktlog filters reset failed",
1331 					   soc);
1332 				return 0;
1333 			}
1334 
1335 			dp_mon_filter_reset_rx_pkt_log_lite(pdev);
1336 			if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
1337 				dp_cdp_err("%pK: Pktlog filters reset failed",
1338 					   soc);
1339 				return 0;
1340 			}
1341 
1342 			dp_monitor_reap_timer_stop(soc,
1343 						   CDP_MON_REAP_SOURCE_PKTLOG);
1344 			break;
1345 		case WDI_EVENT_LITE_T2H:
1346 			/*
1347 			 * To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
1348 			 * passing value 0. Once these macros will define in htt
1349 			 * header file will use proper macros
1350 			 */
1351 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
1352 				int mac_for_pdev =
1353 						dp_get_mac_id_for_pdev(mac_id,
1354 								pdev->pdev_id);
1355 
1356 				mon_pdev->pktlog_ppdu_stats = false;
1357 				if (!mon_pdev->enhanced_stats_en &&
1358 				    !mon_pdev->tx_sniffer_enable &&
1359 				    !mon_pdev->mcopy_mode) {
1360 					dp_h2t_cfg_stats_msg_send(pdev, 0,
1361 								  mac_for_pdev);
1362 				} else if (mon_pdev->tx_sniffer_enable ||
1363 					   mon_pdev->mcopy_mode) {
1364 					dp_h2t_cfg_stats_msg_send(pdev,
1365 						DP_PPDU_STATS_CFG_SNIFFER,
1366 						mac_for_pdev);
1367 				} else if (mon_pdev->enhanced_stats_en) {
1368 					dp_h2t_cfg_stats_msg_send(pdev,
1369 						DP_PPDU_STATS_CFG_ENH_STATS,
1370 						mac_for_pdev);
1371 				}
1372 			}
1373 
1374 			break;
1375 		case WDI_EVENT_RX_CBF:
1376 			mon_pdev->rx_pktlog_cbf = false;
1377 			break;
1378 
1379 		case WDI_EVENT_HYBRID_TX:
1380 			dp_set_hybrid_pktlog_disable(mon_pdev);
1381 			break;
1382 
1383 		default:
1384 			/* Nothing needs to be done for other pktlog types */
1385 			break;
1386 		}
1387 	}
1388 	return 0;
1389 }
1390 #endif
1391 
1392 /* MCL specific functions */
1393 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
1394 void dp_pktlogmod_exit(struct dp_pdev *pdev)
1395 {
1396 	struct dp_soc *soc = pdev->soc;
1397 	struct hif_opaque_softc *scn = soc->hif_handle;
1398 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1399 
1400 	if (!scn) {
1401 		dp_mon_err("Invalid hif(scn) handle");
1402 		return;
1403 	}
1404 
1405 	dp_monitor_reap_timer_stop(soc, CDP_MON_REAP_SOURCE_PKTLOG);
1406 	pktlogmod_exit(scn);
1407 	mon_pdev->pkt_log_init = false;
1408 }
1409 #endif /*DP_CON_MON*/
1410 
1411 #if defined(WDI_EVENT_ENABLE) && defined(QCA_ENHANCED_STATS_SUPPORT)
1412 #ifdef IPA_OFFLOAD
1413 void dp_peer_get_tx_rx_stats(struct dp_peer *peer,
1414 			     struct cdp_interface_peer_stats *peer_stats_intf)
1415 {
1416 	struct dp_rx_tid *rx_tid = NULL;
1417 	uint8_t i = 0;
1418 
1419 	for (i = 0; i < DP_MAX_TIDS; i++) {
1420 		rx_tid = &peer->rx_tid[i];
1421 		peer_stats_intf->rx_byte_count +=
1422 			rx_tid->rx_msdu_cnt.bytes;
1423 		peer_stats_intf->rx_packet_count +=
1424 			rx_tid->rx_msdu_cnt.num;
1425 	}
1426 	peer_stats_intf->tx_packet_count =
1427 		peer->monitor_peer->stats.tx.tx_ucast_success.num;
1428 	peer_stats_intf->tx_byte_count =
1429 		peer->monitor_peer->stats.tx.tx_ucast_success.bytes;
1430 }
1431 #else
1432 void dp_peer_get_tx_rx_stats(struct dp_peer *peer,
1433 			     struct cdp_interface_peer_stats *peer_stats_intf)
1434 {
1435 	struct dp_txrx_peer *txrx_peer = NULL;
1436 	struct dp_peer *tgt_peer = NULL;
1437 	uint8_t inx = 0;
1438 	uint8_t stats_arr_size;
1439 
1440 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
1441 	txrx_peer = tgt_peer->txrx_peer;
1442 	peer_stats_intf->rx_packet_count = txrx_peer->to_stack.num;
1443 	peer_stats_intf->rx_byte_count = txrx_peer->to_stack.bytes;
1444 	stats_arr_size = txrx_peer->stats_arr_size;
1445 
1446 	for (inx = 0; inx < stats_arr_size; inx++) {
1447 		peer_stats_intf->tx_packet_count +=
1448 			txrx_peer->stats[inx].per_pkt_stats.tx.ucast.num;
1449 		peer_stats_intf->tx_byte_count +=
1450 			txrx_peer->stats[inx].per_pkt_stats.tx.tx_success.bytes;
1451 	}
1452 }
1453 #endif
1454 
1455 QDF_STATUS dp_peer_stats_notify(struct dp_pdev *dp_pdev, struct dp_peer *peer)
1456 {
1457 	struct cdp_interface_peer_stats peer_stats_intf = {0};
1458 	struct dp_mon_peer_stats *mon_peer_stats = NULL;
1459 	struct dp_peer *tgt_peer = NULL;
1460 	struct dp_txrx_peer *txrx_peer = NULL;
1461 
1462 	if (qdf_unlikely(!peer || !peer->vdev || !peer->monitor_peer))
1463 		return QDF_STATUS_E_FAULT;
1464 
1465 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
1466 	if (qdf_unlikely(!tgt_peer))
1467 		return QDF_STATUS_E_FAULT;
1468 
1469 	txrx_peer = tgt_peer->txrx_peer;
1470 	if (!qdf_unlikely(txrx_peer))
1471 		return QDF_STATUS_E_FAULT;
1472 
1473 	mon_peer_stats = &peer->monitor_peer->stats;
1474 
1475 	if (mon_peer_stats->rx.last_snr != mon_peer_stats->rx.snr)
1476 		peer_stats_intf.rssi_changed = true;
1477 
1478 	if ((mon_peer_stats->rx.snr && peer_stats_intf.rssi_changed) ||
1479 	    (mon_peer_stats->tx.tx_rate &&
1480 	     mon_peer_stats->tx.tx_rate != mon_peer_stats->tx.last_tx_rate)) {
1481 		qdf_mem_copy(peer_stats_intf.peer_mac, peer->mac_addr.raw,
1482 			     QDF_MAC_ADDR_SIZE);
1483 		peer_stats_intf.vdev_id = peer->vdev->vdev_id;
1484 		peer_stats_intf.last_peer_tx_rate =
1485 					mon_peer_stats->tx.last_tx_rate;
1486 		peer_stats_intf.peer_tx_rate = mon_peer_stats->tx.tx_rate;
1487 		peer_stats_intf.peer_rssi = mon_peer_stats->rx.snr;
1488 		peer_stats_intf.ack_rssi = mon_peer_stats->tx.last_ack_rssi;
1489 		dp_peer_get_tx_rx_stats(peer, &peer_stats_intf);
1490 		peer_stats_intf.per = tgt_peer->stats.tx.last_per;
1491 		peer_stats_intf.free_buff = INVALID_FREE_BUFF;
1492 		dp_wdi_event_handler(WDI_EVENT_PEER_STATS, dp_pdev->soc,
1493 				     (void *)&peer_stats_intf, 0,
1494 				     WDI_NO_VAL, dp_pdev->pdev_id);
1495 	}
1496 
1497 	return QDF_STATUS_SUCCESS;
1498 }
1499 #endif
1500 
1501 #ifdef FEATURE_NAC_RSSI
1502 /**
1503  * dp_rx_nac_filter() - Function to perform filtering of non-associated
1504  * clients
1505  * @pdev: DP pdev handle
1506  * @rx_pkt_hdr: Rx packet Header
1507  *
1508  * Return: dp_vdev*
1509  */
1510 static
1511 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
1512 				 uint8_t *rx_pkt_hdr)
1513 {
1514 	struct ieee80211_frame *wh;
1515 	struct dp_neighbour_peer *peer = NULL;
1516 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1517 
1518 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
1519 
1520 	if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
1521 		return NULL;
1522 
1523 	qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
1524 	TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
1525 		      neighbour_peer_list_elem) {
1526 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
1527 				wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) {
1528 			dp_rx_debug("%pK: NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x",
1529 				    pdev->soc,
1530 				    peer->neighbour_peers_macaddr.raw[0],
1531 				    peer->neighbour_peers_macaddr.raw[1],
1532 				    peer->neighbour_peers_macaddr.raw[2],
1533 				    peer->neighbour_peers_macaddr.raw[3],
1534 				    peer->neighbour_peers_macaddr.raw[4],
1535 				    peer->neighbour_peers_macaddr.raw[5]);
1536 
1537 				qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
1538 
1539 			return mon_pdev->mvdev;
1540 		}
1541 	}
1542 	qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
1543 
1544 	return NULL;
1545 }
1546 
1547 QDF_STATUS dp_filter_neighbour_peer(struct dp_pdev *pdev,
1548 				    uint8_t *rx_pkt_hdr)
1549 {
1550 	struct dp_vdev *vdev = NULL;
1551 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1552 
1553 	if (mon_pdev->filter_neighbour_peers) {
1554 		/* Next Hop scenario not yet handle */
1555 		vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
1556 		if (vdev) {
1557 			dp_rx_mon_deliver(pdev->soc, pdev->pdev_id,
1558 					  pdev->invalid_peer_head_msdu,
1559 					  pdev->invalid_peer_tail_msdu);
1560 
1561 			pdev->invalid_peer_head_msdu = NULL;
1562 			pdev->invalid_peer_tail_msdu = NULL;
1563 			return QDF_STATUS_SUCCESS;
1564 		}
1565 	}
1566 
1567 	return QDF_STATUS_E_FAILURE;
1568 }
1569 #endif
1570 
1571 /**
1572  * dp_update_mon_mac_filter() - Set/reset monitor mac filter
1573  * @soc_hdl: cdp soc handle
1574  * @vdev_id: id of virtual device object
1575  * @cmd: Add/Del command
1576  *
1577  * Return: 0 for success. nonzero for failure.
1578  */
1579 static QDF_STATUS dp_update_mon_mac_filter(struct cdp_soc_t *soc_hdl,
1580 					   uint8_t vdev_id, uint32_t cmd)
1581 {
1582 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
1583 	struct dp_pdev *pdev;
1584 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
1585 						     DP_MOD_ID_CDP);
1586 	struct dp_mon_pdev *mon_pdev;
1587 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
1588 
1589 	if (!vdev)
1590 		return status;
1591 
1592 	pdev = vdev->pdev;
1593 	if (!pdev) {
1594 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
1595 		return status;
1596 	}
1597 
1598 	mon_pdev = pdev->monitor_pdev;
1599 	if (cmd == DP_NAC_PARAM_ADD) {
1600 		/* first neighbour added */
1601 		dp_mon_filter_set_reset_mon_mac_filter(pdev, true);
1602 		status = dp_mon_filter_update(pdev);
1603 		if (status != QDF_STATUS_SUCCESS) {
1604 			dp_cdp_err("%pK: Mon mac filter set failed", soc);
1605 			dp_mon_filter_set_reset_mon_mac_filter(pdev, false);
1606 		}
1607 	} else if (cmd == DP_NAC_PARAM_DEL) {
1608 		/* last neighbour deleted */
1609 		dp_mon_filter_set_reset_mon_mac_filter(pdev, false);
1610 		status = dp_mon_filter_update(pdev);
1611 		if (status != QDF_STATUS_SUCCESS)
1612 			dp_cdp_err("%pK: Mon mac filter reset failed", soc);
1613 	}
1614 
1615 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
1616 	return status;
1617 }
1618 
1619 
1620 bool
1621 dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl,
1622 			 enum cdp_mon_reap_source source,
1623 			 bool enable)
1624 {
1625 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1626 
1627 	if (enable)
1628 		return dp_monitor_reap_timer_start(soc, source);
1629 	else
1630 		return dp_monitor_reap_timer_stop(soc, source);
1631 }
1632 
1633 #if defined(DP_CON_MON)
1634 #ifndef REMOVE_PKT_LOG
1635 void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
1636 {
1637 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1638 	struct dp_pdev *handle =
1639 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1640 	struct dp_mon_pdev *mon_pdev;
1641 
1642 	if (!handle) {
1643 		dp_mon_err("pdev handle is NULL");
1644 		return;
1645 	}
1646 
1647 	mon_pdev = handle->monitor_pdev;
1648 
1649 	if (mon_pdev->pkt_log_init) {
1650 		dp_mon_err("%pK: Packet log not initialized", soc);
1651 		return;
1652 	}
1653 
1654 	pktlog_sethandle(&mon_pdev->pl_dev, scn);
1655 	pktlog_set_pdev_id(mon_pdev->pl_dev, pdev_id);
1656 	pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
1657 
1658 	if (pktlogmod_init(scn)) {
1659 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1660 			  "%s: pktlogmod_init failed", __func__);
1661 		mon_pdev->pkt_log_init = false;
1662 	} else {
1663 		mon_pdev->pkt_log_init = true;
1664 	}
1665 }
1666 
1667 /**
1668  * dp_pkt_log_con_service() - connect packet log service
1669  * @soc_hdl: Datapath soc handle
1670  * @pdev_id: id of data path pdev handle
1671  * @scn: device context
1672  *
1673  * Return: none
1674  */
1675 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
1676 				   uint8_t pdev_id, void *scn)
1677 {
1678 	dp_pkt_log_init(soc_hdl, pdev_id, scn);
1679 	pktlog_htc_attach();
1680 }
1681 
1682 /**
1683  * dp_pkt_log_exit() - Wrapper API to cleanup pktlog info
1684  * @soc_hdl: Datapath soc handle
1685  * @pdev_id: id of data path pdev handle
1686  *
1687  * Return: none
1688  */
1689 static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1690 {
1691 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1692 	struct dp_pdev *pdev =
1693 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1694 
1695 	if (!pdev) {
1696 		dp_err("pdev handle is NULL");
1697 		return;
1698 	}
1699 
1700 	dp_pktlogmod_exit(pdev);
1701 }
1702 
1703 #else
1704 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
1705 				   uint8_t pdev_id, void *scn)
1706 {
1707 }
1708 
1709 static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1710 {
1711 }
1712 #endif
1713 #endif
1714 
1715 void dp_neighbour_peers_detach(struct dp_pdev *pdev)
1716 {
1717 	struct dp_neighbour_peer *peer = NULL;
1718 	struct dp_neighbour_peer *temp_peer = NULL;
1719 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1720 
1721 	TAILQ_FOREACH_SAFE(peer, &mon_pdev->neighbour_peers_list,
1722 			   neighbour_peer_list_elem, temp_peer) {
1723 		/* delete this peer from the list */
1724 		TAILQ_REMOVE(&mon_pdev->neighbour_peers_list,
1725 			     peer, neighbour_peer_list_elem);
1726 		qdf_mem_free(peer);
1727 	}
1728 
1729 	qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex);
1730 }
1731 
1732 #ifdef QCA_ENHANCED_STATS_SUPPORT
1733 /**
1734  * dp_mon_tx_enable_enhanced_stats() - Enable enhanced Tx stats
1735  * @pdev: Datapath pdev handle
1736  *
1737  * Return: void
1738  */
1739 static void dp_mon_tx_enable_enhanced_stats(struct dp_pdev *pdev)
1740 {
1741 	struct dp_soc *soc = pdev->soc;
1742 	struct dp_mon_ops *mon_ops = NULL;
1743 
1744 	mon_ops = dp_mon_ops_get(soc);
1745 	if (mon_ops && mon_ops->mon_tx_enable_enhanced_stats)
1746 		mon_ops->mon_tx_enable_enhanced_stats(pdev);
1747 }
1748 
1749 /**
1750  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
1751  * @soc: DP_SOC handle
1752  * @pdev_id: id of DP_PDEV handle
1753  *
1754  * Return: QDF_STATUS
1755  */
1756 QDF_STATUS
1757 dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
1758 {
1759 	struct dp_pdev *pdev = NULL;
1760 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1761 	struct dp_mon_pdev *mon_pdev;
1762 	struct dp_soc *dp_soc = cdp_soc_t_to_dp_soc(soc);
1763 
1764 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
1765 						  pdev_id);
1766 
1767 	if (!pdev)
1768 		return QDF_STATUS_E_FAILURE;
1769 
1770 	mon_pdev = pdev->monitor_pdev;
1771 
1772 	if (!mon_pdev)
1773 		return QDF_STATUS_E_FAILURE;
1774 
1775 	if (mon_pdev->enhanced_stats_en == 0)
1776 		dp_cal_client_timer_start(mon_pdev->cal_client_ctx);
1777 
1778 	mon_pdev->enhanced_stats_en = 1;
1779 	pdev->enhanced_stats_en = 1;
1780 	pdev->link_peer_stats = wlan_cfg_is_peer_link_stats_enabled(
1781 							dp_soc->wlan_cfg_ctx);
1782 
1783 	dp_mon_filter_setup_enhanced_stats(pdev);
1784 	status = dp_mon_filter_update(pdev);
1785 	if (status != QDF_STATUS_SUCCESS) {
1786 		dp_cdp_err("%pK: Failed to set enhanced mode filters", soc);
1787 		dp_mon_filter_reset_enhanced_stats(pdev);
1788 		dp_cal_client_timer_stop(mon_pdev->cal_client_ctx);
1789 		mon_pdev->enhanced_stats_en = 0;
1790 		pdev->enhanced_stats_en = 0;
1791 		pdev->link_peer_stats = 0;
1792 		return QDF_STATUS_E_FAILURE;
1793 	}
1794 
1795 	dp_mon_tx_enable_enhanced_stats(pdev);
1796 
1797 	return QDF_STATUS_SUCCESS;
1798 }
1799 
1800 /**
1801  * dp_mon_tx_disable_enhanced_stats() - Disable enhanced Tx stats
1802  * @pdev: Datapath pdev handle
1803  *
1804  * Return: void
1805  */
1806 static void dp_mon_tx_disable_enhanced_stats(struct dp_pdev *pdev)
1807 {
1808 	struct dp_soc *soc = pdev->soc;
1809 	struct dp_mon_ops *mon_ops = NULL;
1810 
1811 	mon_ops = dp_mon_ops_get(soc);
1812 	if (mon_ops && mon_ops->mon_tx_disable_enhanced_stats)
1813 		mon_ops->mon_tx_disable_enhanced_stats(pdev);
1814 }
1815 
1816 /**
1817  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
1818  *
1819  * @soc: the soc handle
1820  * @pdev_id: pdev_id of pdev
1821  *
1822  * Return: QDF_STATUS
1823  */
1824 QDF_STATUS
1825 dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
1826 {
1827 	struct dp_pdev *pdev =
1828 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
1829 						   pdev_id);
1830 	struct dp_mon_pdev *mon_pdev;
1831 
1832 	if (!pdev || !pdev->monitor_pdev)
1833 		return QDF_STATUS_E_FAILURE;
1834 
1835 	mon_pdev = pdev->monitor_pdev;
1836 
1837 	if (mon_pdev->enhanced_stats_en == 1)
1838 		dp_cal_client_timer_stop(mon_pdev->cal_client_ctx);
1839 
1840 	mon_pdev->enhanced_stats_en = 0;
1841 	pdev->enhanced_stats_en = 0;
1842 	pdev->link_peer_stats = 0;
1843 
1844 	dp_mon_tx_disable_enhanced_stats(pdev);
1845 
1846 	dp_mon_filter_reset_enhanced_stats(pdev);
1847 	if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
1848 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1849 			  FL("Failed to reset enhanced mode filters"));
1850 	}
1851 
1852 	return QDF_STATUS_SUCCESS;
1853 }
1854 
1855 #ifdef WDI_EVENT_ENABLE
1856 QDF_STATUS dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev,
1857 				    struct cdp_rx_stats_ppdu_user *ppdu_user)
1858 {
1859 	struct cdp_interface_peer_qos_stats qos_stats_intf = {0};
1860 
1861 	if (qdf_unlikely(ppdu_user->peer_id == HTT_INVALID_PEER)) {
1862 		dp_mon_warn("Invalid peer id: %u", ppdu_user->peer_id);
1863 		return QDF_STATUS_E_FAILURE;
1864 	}
1865 
1866 	qdf_mem_copy(qos_stats_intf.peer_mac, ppdu_user->mac_addr,
1867 		     QDF_MAC_ADDR_SIZE);
1868 	qos_stats_intf.frame_control = ppdu_user->frame_control;
1869 	qos_stats_intf.frame_control_info_valid =
1870 			ppdu_user->frame_control_info_valid;
1871 	qos_stats_intf.qos_control = ppdu_user->qos_control;
1872 	qos_stats_intf.qos_control_info_valid =
1873 			ppdu_user->qos_control_info_valid;
1874 	qos_stats_intf.vdev_id = ppdu_user->vdev_id;
1875 	dp_wdi_event_handler(WDI_EVENT_PEER_QOS_STATS, dp_pdev->soc,
1876 			     (void *)&qos_stats_intf, 0,
1877 			     WDI_NO_VAL, dp_pdev->pdev_id);
1878 
1879 	return QDF_STATUS_SUCCESS;
1880 }
1881 #else
1882 static inline QDF_STATUS
1883 dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev,
1884 			 struct cdp_rx_stats_ppdu_user *ppdu_user)
1885 {
1886 	return QDF_STATUS_SUCCESS;
1887 }
1888 #endif
1889 #endif /* QCA_ENHANCED_STATS_SUPPORT */
1890 
1891 /**
1892  * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
1893  * for pktlog
1894  * @soc: cdp_soc handle
1895  * @pdev_id: id of dp pdev handle
1896  * @mac_addr: Peer mac address
1897  * @enb_dsb: Enable or disable peer based filtering
1898  *
1899  * Return: QDF_STATUS
1900  */
1901 static int
1902 dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id,
1903 			    uint8_t *mac_addr, uint8_t enb_dsb)
1904 {
1905 	struct dp_peer *peer;
1906 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
1907 	struct dp_pdev *pdev =
1908 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
1909 						   pdev_id);
1910 	struct dp_mon_pdev *mon_pdev;
1911 
1912 	if (!pdev)
1913 		return QDF_STATUS_E_FAILURE;
1914 
1915 	mon_pdev = pdev->monitor_pdev;
1916 
1917 	peer = dp_peer_find_hash_find((struct dp_soc *)soc, mac_addr,
1918 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
1919 
1920 	if (!peer) {
1921 		dp_mon_err("Peer is NULL");
1922 		return QDF_STATUS_E_FAILURE;
1923 	}
1924 
1925 	if (!IS_MLO_DP_MLD_PEER(peer) && peer->monitor_peer) {
1926 		peer->monitor_peer->peer_based_pktlog_filter = enb_dsb;
1927 		mon_pdev->dp_peer_based_pktlog = enb_dsb;
1928 		status = QDF_STATUS_SUCCESS;
1929 	}
1930 
1931 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1932 
1933 	return status;
1934 }
1935 
1936 /**
1937  * dp_peer_update_pkt_capture_params() - Set Rx & Tx Capture flags for a peer
1938  * @soc: DP_SOC handle
1939  * @pdev_id: id of DP_PDEV handle
1940  * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
1941  * @is_tx_pkt_cap_enable: enable/disable/delete/print
1942  * Tx packet capture in monitor mode
1943  * @peer_mac: MAC address for which the above need to be enabled/disabled
1944  *
1945  * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
1946  */
1947 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
1948 static QDF_STATUS
1949 dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
1950 				  uint8_t pdev_id,
1951 				  bool is_rx_pkt_cap_enable,
1952 				  uint8_t is_tx_pkt_cap_enable,
1953 				  uint8_t *peer_mac)
1954 {
1955 	struct dp_peer *peer;
1956 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
1957 	struct dp_pdev *pdev =
1958 			dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
1959 							   pdev_id);
1960 	if (!pdev)
1961 		return QDF_STATUS_E_FAILURE;
1962 
1963 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
1964 				      peer_mac, 0, DP_VDEV_ALL,
1965 				      DP_MOD_ID_CDP);
1966 	if (!peer)
1967 		return QDF_STATUS_E_FAILURE;
1968 
1969 	/* we need to set tx pkt capture for non associated peer */
1970 	if (!IS_MLO_DP_MLD_PEER(peer)) {
1971 		status = dp_monitor_tx_peer_filter(pdev, peer,
1972 						   is_tx_pkt_cap_enable,
1973 						   peer_mac);
1974 
1975 		status = dp_peer_set_rx_capture_enabled(pdev, peer,
1976 							is_rx_pkt_cap_enable,
1977 							peer_mac);
1978 	}
1979 
1980 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1981 
1982 	return status;
1983 }
1984 #endif
1985 
1986 #ifdef QCA_MCOPY_SUPPORT
1987 QDF_STATUS dp_mcopy_check_deliver(struct dp_pdev *pdev,
1988 				  uint16_t peer_id,
1989 				  uint32_t ppdu_id,
1990 				  uint8_t first_msdu)
1991 {
1992 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1993 
1994 	if (mon_pdev->mcopy_mode) {
1995 		if (mon_pdev->mcopy_mode == M_COPY) {
1996 			if ((mon_pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
1997 			    (mon_pdev->m_copy_id.tx_peer_id == peer_id)) {
1998 				return QDF_STATUS_E_INVAL;
1999 			}
2000 		}
2001 
2002 		if (!first_msdu)
2003 			return QDF_STATUS_E_INVAL;
2004 
2005 		mon_pdev->m_copy_id.tx_ppdu_id = ppdu_id;
2006 		mon_pdev->m_copy_id.tx_peer_id = peer_id;
2007 	}
2008 
2009 	return QDF_STATUS_SUCCESS;
2010 }
2011 #endif
2012 
2013 #ifdef WDI_EVENT_ENABLE
2014 #ifndef REMOVE_PKT_LOG
2015 static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
2016 {
2017 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2018 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2019 
2020 	if (!pdev || !pdev->monitor_pdev)
2021 		return NULL;
2022 
2023 	return pdev->monitor_pdev->pl_dev;
2024 }
2025 #else
2026 static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
2027 {
2028 	return NULL;
2029 }
2030 #endif
2031 #endif
2032 
2033 QDF_STATUS dp_rx_populate_cbf_hdr(struct dp_soc *soc,
2034 				  uint32_t mac_id,
2035 				  uint32_t event,
2036 				  qdf_nbuf_t mpdu,
2037 				  uint32_t msdu_timestamp)
2038 {
2039 	uint32_t data_size, hdr_size, ppdu_id, align4byte;
2040 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2041 	uint32_t *msg_word;
2042 
2043 	if (!pdev)
2044 		return QDF_STATUS_E_INVAL;
2045 
2046 	ppdu_id = pdev->monitor_pdev->ppdu_info.com_info.ppdu_id;
2047 
2048 	hdr_size = HTT_T2H_PPDU_STATS_IND_HDR_SIZE
2049 		+ qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload);
2050 
2051 	data_size = qdf_nbuf_len(mpdu);
2052 
2053 	qdf_nbuf_push_head(mpdu, hdr_size);
2054 
2055 	msg_word = (uint32_t *)qdf_nbuf_data(mpdu);
2056 	/*
2057 	 * Populate the PPDU Stats Indication header
2058 	 */
2059 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_T2H_MSG_TYPE_PPDU_STATS_IND);
2060 	HTT_T2H_PPDU_STATS_MAC_ID_SET(*msg_word, mac_id);
2061 	HTT_T2H_PPDU_STATS_PDEV_ID_SET(*msg_word, pdev->pdev_id);
2062 	align4byte = ((data_size +
2063 		qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload)
2064 		+ 3) >> 2) << 2;
2065 	HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_SET(*msg_word, align4byte);
2066 	msg_word++;
2067 	HTT_T2H_PPDU_STATS_PPDU_ID_SET(*msg_word, ppdu_id);
2068 	msg_word++;
2069 
2070 	*msg_word = msdu_timestamp;
2071 	msg_word++;
2072 	/* Skip reserved field */
2073 	msg_word++;
2074 	/*
2075 	 * Populate MGMT_CTRL Payload TLV first
2076 	 */
2077 	HTT_STATS_TLV_TAG_SET(*msg_word,
2078 			      HTT_PPDU_STATS_RX_MGMTCTRL_PAYLOAD_TLV);
2079 
2080 	align4byte = ((data_size - sizeof(htt_tlv_hdr_t) +
2081 		qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload)
2082 		+ 3) >> 2) << 2;
2083 	HTT_STATS_TLV_LENGTH_SET(*msg_word, align4byte);
2084 	msg_word++;
2085 
2086 	HTT_PPDU_STATS_RX_MGMTCTRL_TLV_FRAME_LENGTH_SET(
2087 		*msg_word, data_size);
2088 	msg_word++;
2089 
2090 	dp_wdi_event_handler(event, soc, (void *)mpdu,
2091 			     HTT_INVALID_PEER, WDI_NO_VAL, pdev->pdev_id);
2092 
2093 	qdf_nbuf_pull_head(mpdu, hdr_size);
2094 
2095 	return QDF_STATUS_SUCCESS;
2096 }
2097 
2098 #ifdef ATH_SUPPORT_EXT_STAT
2099 #ifdef WLAN_CONFIG_TELEMETRY_AGENT
2100 /**
2101  * dp_pdev_clear_link_airtime_stats() - clear airtime stats for given pdev
2102  * @pdev: DP PDEV handle
2103  */
2104 static inline
2105 void dp_pdev_clear_link_airtime_stats(struct dp_pdev *pdev)
2106 {
2107 	uint8_t ac;
2108 
2109 	for (ac = 0; ac < WME_AC_MAX; ac++)
2110 		pdev->stats.telemetry_stats.link_airtime[ac] = 0;
2111 }
2112 
2113 /**
2114  * dp_peer_update_telemetry_stats() - update peer telemetry stats
2115  * @soc: Datapath soc
2116  * @peer: Datapath peer
2117  * @arg: argument to callback function
2118  */
2119 static inline
2120 void dp_peer_update_telemetry_stats(struct dp_soc *soc,
2121 				    struct dp_peer *peer,
2122 				    void *arg)
2123 {
2124 	struct dp_pdev *pdev;
2125 	struct dp_vdev *vdev;
2126 	struct dp_mon_peer *mon_peer = NULL;
2127 	uint8_t ac;
2128 	uint64_t current_time = qdf_get_log_timestamp();
2129 
2130 	vdev = peer->vdev;
2131 	if (!vdev)
2132 		return;
2133 
2134 	pdev = vdev->pdev;
2135 	if (!pdev)
2136 		return;
2137 
2138 	mon_peer = peer->monitor_peer;
2139 	if (qdf_likely(mon_peer)) {
2140 		for (ac = 0; ac < WME_AC_MAX; ac++) {
2141 			mon_peer->stats.airtime_stats.tx_airtime_consumption[ac].avg_consumption_per_sec =
2142 				(uint8_t)qdf_do_div((uint64_t)(mon_peer->stats.airtime_stats.tx_airtime_consumption[ac].consumption * 100),
2143 						    (uint32_t)(current_time - mon_peer->stats.airtime_stats.last_update_time));
2144 			mon_peer->stats.airtime_stats.rx_airtime_consumption[ac].avg_consumption_per_sec =
2145 				(uint8_t)qdf_do_div((uint64_t)(mon_peer->stats.airtime_stats.rx_airtime_consumption[ac].consumption * 100),
2146 						    (uint32_t)(current_time - mon_peer->stats.airtime_stats.last_update_time));
2147 			/* Store each peer airtime consumption in pdev
2148 			 * link_airtime to calculate pdev's total airtime
2149 			 * consumption
2150 			 */
2151 			DP_STATS_INC(
2152 				pdev,
2153 				telemetry_stats.link_airtime[ac],
2154 				mon_peer->stats.airtime_stats.tx_airtime_consumption[ac].consumption);
2155 			DP_STATS_INC(
2156 				pdev,
2157 				telemetry_stats.link_airtime[ac],
2158 				mon_peer->stats.airtime_stats.rx_airtime_consumption[ac].consumption);
2159 			mon_peer->stats.airtime_stats.tx_airtime_consumption[ac].consumption = 0;
2160 			mon_peer->stats.airtime_stats.rx_airtime_consumption[ac].consumption = 0;
2161 		}
2162 		mon_peer->stats.airtime_stats.last_update_time = current_time;
2163 	}
2164 }
2165 
2166 QDF_STATUS dp_pdev_update_telemetry_airtime_stats(struct cdp_soc_t *soc,
2167 						  uint8_t pdev_id)
2168 {
2169 	struct dp_pdev *pdev =
2170 		dp_get_pdev_from_soc_pdev_id_wifi3(cdp_soc_t_to_dp_soc(soc),
2171 						   pdev_id);
2172 	if (!pdev)
2173 		return QDF_STATUS_E_FAILURE;
2174 
2175 	/* Clear current airtime stats as the below API will increment the stats
2176 	 * for all peers on top of current value
2177 	 */
2178 	dp_pdev_clear_link_airtime_stats(pdev);
2179 	dp_pdev_iterate_peer(pdev, dp_peer_update_telemetry_stats, NULL,
2180 			     DP_MOD_ID_CDP);
2181 
2182 	return QDF_STATUS_SUCCESS;
2183 }
2184 #endif
2185 
2186 /**
2187  * dp_peer_cal_clients_stats_update() - update peer stats on cal client timer
2188  * @soc: Datapath SOC
2189  * @peer: Datapath peer
2190  * @arg: argument to iter function
2191  */
2192 #ifdef IPA_OFFLOAD
2193 static void
2194 dp_peer_cal_clients_stats_update(struct dp_soc *soc,
2195 				 struct dp_peer *peer,
2196 				 void *arg)
2197 {
2198 	struct cdp_calibr_stats_intf peer_stats_intf = {0};
2199 	struct dp_peer *tgt_peer = NULL;
2200 	struct dp_txrx_peer *txrx_peer = NULL;
2201 
2202 	if (!dp_peer_is_primary_link_peer(peer))
2203 		return;
2204 
2205 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
2206 	if (!tgt_peer || !(tgt_peer->txrx_peer))
2207 		return;
2208 
2209 	txrx_peer = tgt_peer->txrx_peer;
2210 	peer_stats_intf.to_stack = txrx_peer->to_stack;
2211 	peer_stats_intf.tx_success =
2212 				peer->monitor_peer->stats.tx.tx_ucast_success;
2213 	peer_stats_intf.tx_ucast =
2214 				peer->monitor_peer->stats.tx.tx_ucast_total;
2215 
2216 	dp_cal_client_update_peer_stats_wifi3(&peer_stats_intf,
2217 					      &tgt_peer->stats);
2218 	dp_peer_get_rxtid_stats_ipa(peer, dp_peer_update_tid_stats_from_reo);
2219 }
2220 #else
2221 static void
2222 dp_peer_cal_clients_stats_update(struct dp_soc *soc,
2223 				 struct dp_peer *peer,
2224 				 void *arg)
2225 {
2226 	struct cdp_calibr_stats_intf peer_stats_intf = {0};
2227 	struct dp_peer *tgt_peer = NULL;
2228 	struct dp_txrx_peer *txrx_peer = NULL;
2229 	uint8_t inx = 0;
2230 	uint8_t stats_arr_size;
2231 
2232 	if (!dp_peer_is_primary_link_peer(peer))
2233 		return;
2234 
2235 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
2236 	if (!tgt_peer || !(tgt_peer->txrx_peer))
2237 		return;
2238 
2239 	txrx_peer = tgt_peer->txrx_peer;
2240 	peer_stats_intf.to_stack = txrx_peer->to_stack;
2241 	stats_arr_size = txrx_peer->stats_arr_size;
2242 
2243 	for (inx = 0; inx < stats_arr_size; inx++) {
2244 		peer_stats_intf.tx_success.num +=
2245 			txrx_peer->stats[inx].per_pkt_stats.tx.tx_success.num;
2246 		peer_stats_intf.tx_success.bytes +=
2247 			txrx_peer->stats[inx].per_pkt_stats.tx.tx_success.bytes;
2248 		peer_stats_intf.tx_ucast.num +=
2249 			txrx_peer->stats[inx].per_pkt_stats.tx.ucast.num;
2250 		peer_stats_intf.tx_ucast.bytes +=
2251 			txrx_peer->stats[inx].per_pkt_stats.tx.ucast.bytes;
2252 	}
2253 
2254 	dp_cal_client_update_peer_stats_wifi3(&peer_stats_intf,
2255 					      &tgt_peer->stats);
2256 }
2257 #endif
2258 
2259 /**
2260  * dp_iterate_update_peer_list() - update peer stats on cal client timer
2261  * @pdev_hdl: pdev handle
2262  */
2263 static void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
2264 {
2265 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
2266 
2267 	dp_pdev_iterate_peer(pdev, dp_peer_cal_clients_stats_update, NULL,
2268 			     DP_MOD_ID_CDP);
2269 }
2270 #else
2271 static void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
2272 {
2273 }
2274 #endif
2275 
2276 #ifdef ATH_SUPPORT_NAC
2277 int dp_set_filter_neigh_peers(struct dp_pdev *pdev,
2278 			      bool val)
2279 {
2280 	/* Enable/Disable smart mesh filtering. This flag will be checked
2281 	 * during rx processing to check if packets are from NAC clients.
2282 	 */
2283 	pdev->monitor_pdev->filter_neighbour_peers = val;
2284 	return 0;
2285 }
2286 #endif /* ATH_SUPPORT_NAC */
2287 
2288 #ifdef WLAN_ATF_ENABLE
2289 void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value)
2290 {
2291 	if (!pdev) {
2292 		dp_cdp_err("pdev is NULL");
2293 		return;
2294 	}
2295 
2296 	pdev->monitor_pdev->dp_atf_stats_enable = value;
2297 }
2298 #endif
2299 
2300 #ifdef QCA_ENHANCED_STATS_SUPPORT
2301 /**
2302  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv() - Process
2303  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2304  * @pdev: DP PDEV handle
2305  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2306  * @ppdu_id: PPDU Id
2307  *
2308  * Return: QDF_STATUS_SUCCESS if nbuf has to be freed in caller
2309  */
2310 static QDF_STATUS
2311 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
2312 					      qdf_nbuf_t tag_buf,
2313 					      uint32_t ppdu_id)
2314 {
2315 	uint32_t *nbuf_ptr;
2316 	uint8_t trim_size;
2317 	size_t head_size;
2318 	struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info;
2319 	uint32_t *msg_word;
2320 	uint32_t tsf_hdr;
2321 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
2322 
2323 	if ((!mon_pdev->tx_sniffer_enable) && (!mon_pdev->mcopy_mode) &&
2324 	    (!mon_pdev->bpr_enable) && (!mon_pdev->tx_capture_enabled))
2325 		return QDF_STATUS_SUCCESS;
2326 
2327 	/*
2328 	 * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t
2329 	 */
2330 	msg_word = (uint32_t *)qdf_nbuf_data(tag_buf);
2331 	msg_word = msg_word + 2;
2332 	tsf_hdr = *msg_word;
2333 
2334 	trim_size = ((mon_pdev->mgmtctrl_frm_info.mgmt_buf +
2335 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
2336 		      qdf_nbuf_data(tag_buf));
2337 
2338 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
2339 		return QDF_STATUS_SUCCESS;
2340 
2341 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
2342 			    mon_pdev->mgmtctrl_frm_info.mgmt_buf_len);
2343 
2344 	if (mon_pdev->tx_capture_enabled) {
2345 		head_size = sizeof(struct cdp_tx_mgmt_comp_info);
2346 		if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) {
2347 			qdf_err("Fail to get headroom h_sz %zu h_avail %d\n",
2348 				head_size, qdf_nbuf_headroom(tag_buf));
2349 			qdf_assert_always(0);
2350 			return QDF_STATUS_E_NOMEM;
2351 		}
2352 		ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *)
2353 					qdf_nbuf_push_head(tag_buf, head_size);
2354 		qdf_assert_always(ptr_mgmt_comp_info);
2355 		ptr_mgmt_comp_info->ppdu_id = ppdu_id;
2356 		ptr_mgmt_comp_info->is_sgen_pkt = true;
2357 		ptr_mgmt_comp_info->tx_tsf = tsf_hdr;
2358 	} else {
2359 		head_size = sizeof(ppdu_id);
2360 		nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size);
2361 		*nbuf_ptr = ppdu_id;
2362 	}
2363 	if (mon_pdev->bpr_enable) {
2364 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
2365 				     tag_buf, HTT_INVALID_PEER,
2366 				     WDI_NO_VAL, pdev->pdev_id);
2367 	}
2368 
2369 	dp_deliver_mgmt_frm(pdev, tag_buf);
2370 
2371 	return QDF_STATUS_E_ALREADY;
2372 }
2373 
2374 int
2375 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
2376 {
2377 	if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
2378 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
2379 	else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
2380 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
2381 
2382 	return 0;
2383 }
2384 
2385 /**
2386  * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats.
2387  * @peer: Datapath peer handle
2388  * @ppdu: User PPDU Descriptor
2389  * @cur_ppdu_id: PPDU_ID
2390  *
2391  * Return: None
2392  *
2393  * on Tx data frame, we may get delayed ba set
2394  * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we
2395  * request Block Ack Request(BAR). Successful msdu is received only after Block
2396  * Ack. To populate peer stats we need successful msdu(data frame).
2397  * So we hold the Tx data stats on delayed_ba for stats update.
2398  */
2399 static void
2400 dp_peer_copy_delay_stats(struct dp_peer *peer,
2401 			 struct cdp_tx_completion_ppdu_user *ppdu,
2402 			 uint32_t cur_ppdu_id)
2403 {
2404 	struct dp_pdev *pdev;
2405 	struct dp_vdev *vdev;
2406 	struct dp_mon_peer *mon_peer = peer->monitor_peer;
2407 
2408 	if (mon_peer->last_delayed_ba) {
2409 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2410 			  "BA not yet recv for prev delayed ppdu[%d] - cur ppdu[%d]",
2411 			  mon_peer->last_delayed_ba_ppduid, cur_ppdu_id);
2412 		vdev = peer->vdev;
2413 		if (vdev) {
2414 			pdev = vdev->pdev;
2415 			pdev->stats.cdp_delayed_ba_not_recev++;
2416 		}
2417 	}
2418 
2419 	mon_peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size;
2420 	mon_peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc;
2421 	mon_peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re;
2422 	mon_peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf;
2423 	mon_peer->delayed_ba_ppdu_stats.bw = ppdu->bw;
2424 	mon_peer->delayed_ba_ppdu_stats.nss = ppdu->nss;
2425 	mon_peer->delayed_ba_ppdu_stats.gi = ppdu->gi;
2426 	mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
2427 	mon_peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc;
2428 	mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
2429 	mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast =
2430 					ppdu->mpdu_tried_ucast;
2431 	mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast =
2432 					ppdu->mpdu_tried_mcast;
2433 	mon_peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl;
2434 	mon_peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl;
2435 	mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
2436 
2437 	mon_peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start;
2438 	mon_peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones;
2439 	mon_peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast;
2440 
2441 	mon_peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos;
2442 	mon_peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id;
2443 
2444 	mon_peer->last_delayed_ba = true;
2445 
2446 	ppdu->debug_copied = true;
2447 }
2448 
2449 /**
2450  * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats.
2451  * @peer: Datapath peer handle
2452  * @ppdu: PPDU Descriptor
2453  *
2454  * Return: None
2455  *
2456  * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info
2457  * from Tx BAR frame not required to populate peer stats.
2458  * But we need successful MPDU and MSDU to update previous
2459  * transmitted Tx data frame. Overwrite ppdu stats with the previous
2460  * stored ppdu stats.
2461  */
2462 static void
2463 dp_peer_copy_stats_to_bar(struct dp_peer *peer,
2464 			  struct cdp_tx_completion_ppdu_user *ppdu)
2465 {
2466 	struct dp_mon_peer *mon_peer = peer->monitor_peer;
2467 
2468 	ppdu->ltf_size = mon_peer->delayed_ba_ppdu_stats.ltf_size;
2469 	ppdu->stbc = mon_peer->delayed_ba_ppdu_stats.stbc;
2470 	ppdu->he_re = mon_peer->delayed_ba_ppdu_stats.he_re;
2471 	ppdu->txbf = mon_peer->delayed_ba_ppdu_stats.txbf;
2472 	ppdu->bw = mon_peer->delayed_ba_ppdu_stats.bw;
2473 	ppdu->nss = mon_peer->delayed_ba_ppdu_stats.nss;
2474 	ppdu->gi = mon_peer->delayed_ba_ppdu_stats.gi;
2475 	ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
2476 	ppdu->ldpc = mon_peer->delayed_ba_ppdu_stats.ldpc;
2477 	ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
2478 	ppdu->mpdu_tried_ucast =
2479 			mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast;
2480 	ppdu->mpdu_tried_mcast =
2481 			mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast;
2482 	ppdu->frame_ctrl = mon_peer->delayed_ba_ppdu_stats.frame_ctrl;
2483 	ppdu->qos_ctrl = mon_peer->delayed_ba_ppdu_stats.qos_ctrl;
2484 	ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
2485 
2486 	ppdu->ru_start = mon_peer->delayed_ba_ppdu_stats.ru_start;
2487 	ppdu->ru_tones = mon_peer->delayed_ba_ppdu_stats.ru_tones;
2488 	ppdu->is_mcast = mon_peer->delayed_ba_ppdu_stats.is_mcast;
2489 
2490 	ppdu->user_pos = mon_peer->delayed_ba_ppdu_stats.user_pos;
2491 	ppdu->mu_group_id = mon_peer->delayed_ba_ppdu_stats.mu_group_id;
2492 
2493 	mon_peer->last_delayed_ba = false;
2494 
2495 	ppdu->debug_copied = true;
2496 }
2497 
2498 /**
2499  * dp_tx_rate_stats_update() - Update rate per-peer statistics
2500  * @peer: Datapath peer handle
2501  * @ppdu: PPDU Descriptor
2502  *
2503  * Return: None
2504  */
2505 static void
2506 dp_tx_rate_stats_update(struct dp_peer *peer,
2507 			struct cdp_tx_completion_ppdu_user *ppdu)
2508 {
2509 	uint32_t ratekbps = 0;
2510 	uint64_t ppdu_tx_rate = 0;
2511 	uint32_t rix;
2512 	uint16_t ratecode = 0;
2513 	struct dp_mon_peer *mon_peer = NULL;
2514 
2515 	if (!peer || !ppdu)
2516 		return;
2517 
2518 	if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK)
2519 		return;
2520 
2521 	mon_peer = peer->monitor_peer;
2522 	if (!mon_peer)
2523 		return;
2524 
2525 	ratekbps = dp_getrateindex(ppdu->gi,
2526 				   ppdu->mcs,
2527 				   ppdu->nss,
2528 				   ppdu->preamble,
2529 				   ppdu->bw,
2530 				   ppdu->punc_mode,
2531 				   &rix,
2532 				   &ratecode);
2533 
2534 	if (!ratekbps)
2535 		return;
2536 
2537 	/* Calculate goodput in non-training period
2538 	 * In training period, don't do anything as
2539 	 * pending pkt is send as goodput.
2540 	 */
2541 	if ((!peer->bss_peer) && (!ppdu->sa_is_training)) {
2542 		ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) *
2543 				(CDP_PERCENT_MACRO - ppdu->current_rate_per));
2544 	}
2545 	ppdu->rix = rix;
2546 	ppdu->tx_ratekbps = ratekbps;
2547 	ppdu->tx_ratecode = ratecode;
2548 	DP_STATS_UPD(mon_peer, tx.tx_rate, ratekbps);
2549 	mon_peer->stats.tx.avg_tx_rate =
2550 		dp_ath_rate_lpf(mon_peer->stats.tx.avg_tx_rate, ratekbps);
2551 	ppdu_tx_rate = dp_ath_rate_out(mon_peer->stats.tx.avg_tx_rate);
2552 	DP_STATS_UPD(mon_peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
2553 
2554 	mon_peer->stats.tx.bw_info = ppdu->bw;
2555 	mon_peer->stats.tx.gi_info = ppdu->gi;
2556 	mon_peer->stats.tx.nss_info = ppdu->nss;
2557 	mon_peer->stats.tx.mcs_info = ppdu->mcs;
2558 	mon_peer->stats.tx.preamble_info = ppdu->preamble;
2559 	if (peer->vdev) {
2560 		/*
2561 		 * In STA mode:
2562 		 *	We get ucast stats as BSS peer stats.
2563 		 *
2564 		 * In AP mode:
2565 		 *	We get mcast stats as BSS peer stats.
2566 		 *	We get ucast stats as assoc peer stats.
2567 		 */
2568 		if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) {
2569 			peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
2570 			peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
2571 		} else {
2572 			peer->vdev->stats.tx.last_tx_rate = ratekbps;
2573 			peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
2574 		}
2575 	}
2576 }
2577 
2578 #if defined(FEATURE_PERPKT_INFO) && defined(WDI_EVENT_ENABLE)
2579 void dp_send_stats_event(struct dp_pdev *pdev, struct dp_peer *peer,
2580 			 uint16_t peer_id)
2581 {
2582 	struct cdp_interface_peer_stats peer_stats_intf = {0};
2583 	struct dp_mon_peer *mon_peer = peer->monitor_peer;
2584 	struct dp_txrx_peer *txrx_peer = NULL;
2585 	uint8_t inx = 0;
2586 	uint8_t stats_arr_size;
2587 
2588 	if (qdf_unlikely(!mon_peer))
2589 		return;
2590 
2591 	mon_peer->stats.rx.rx_snr_measured_time = qdf_system_ticks();
2592 	peer_stats_intf.rx_avg_snr = mon_peer->stats.rx.avg_snr;
2593 
2594 	txrx_peer = dp_get_txrx_peer(peer);
2595 	if (qdf_likely(txrx_peer)) {
2596 		stats_arr_size = txrx_peer->stats_arr_size;
2597 		peer_stats_intf.rx_byte_count = txrx_peer->to_stack.bytes;
2598 		for (inx = 0; inx < stats_arr_size; inx++)
2599 			peer_stats_intf.tx_byte_count +=
2600 			txrx_peer->stats[inx].per_pkt_stats.tx.tx_success.bytes;
2601 	}
2602 
2603 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
2604 			     &peer_stats_intf, peer_id,
2605 			     UPDATE_PEER_STATS, pdev->pdev_id);
2606 }
2607 #endif
2608 
2609 #ifdef WLAN_FEATURE_11BE
2610 /**
2611  * dp_get_ru_index_frm_ru_tones() - get ru index
2612  * @ru_tones: ru tones
2613  *
2614  * Return: ru index
2615  */
2616 static inline enum cdp_ru_index dp_get_ru_index_frm_ru_tones(uint16_t ru_tones)
2617 {
2618 	enum cdp_ru_index ru_index;
2619 
2620 	switch (ru_tones) {
2621 	case RU_26:
2622 		ru_index = RU_26_INDEX;
2623 		break;
2624 	case RU_52:
2625 		ru_index = RU_52_INDEX;
2626 		break;
2627 	case RU_52_26:
2628 		ru_index = RU_52_26_INDEX;
2629 		break;
2630 	case RU_106:
2631 		ru_index = RU_106_INDEX;
2632 		break;
2633 	case RU_106_26:
2634 		ru_index = RU_106_26_INDEX;
2635 		break;
2636 	case RU_242:
2637 		ru_index = RU_242_INDEX;
2638 		break;
2639 	case RU_484:
2640 		ru_index = RU_484_INDEX;
2641 		break;
2642 	case RU_484_242:
2643 		ru_index = RU_484_242_INDEX;
2644 		break;
2645 	case RU_996:
2646 		ru_index = RU_996_INDEX;
2647 		break;
2648 	case RU_996_484:
2649 		ru_index = RU_996_484_INDEX;
2650 		break;
2651 	case RU_996_484_242:
2652 		ru_index = RU_996_484_242_INDEX;
2653 		break;
2654 	case RU_2X996:
2655 		ru_index = RU_2X996_INDEX;
2656 		break;
2657 	case RU_2X996_484:
2658 		ru_index = RU_2X996_484_INDEX;
2659 		break;
2660 	case RU_3X996:
2661 		ru_index = RU_3X996_INDEX;
2662 		break;
2663 	case RU_3X996_484:
2664 		ru_index = RU_2X996_484_INDEX;
2665 		break;
2666 	case RU_4X996:
2667 		ru_index = RU_4X996_INDEX;
2668 		break;
2669 	default:
2670 		ru_index = RU_INDEX_MAX;
2671 		break;
2672 	}
2673 
2674 	return ru_index;
2675 }
2676 
2677 /**
2678  * dp_mon_get_ru_width_from_ru_size() - get ru_width from ru_size enum
2679  * @ru_size: HTT ru_size enum
2680  *
2681  * Return: ru_width of uint32_t type
2682  */
2683 static uint32_t dp_mon_get_ru_width_from_ru_size(uint16_t ru_size)
2684 {
2685 	uint32_t width = 0;
2686 
2687 	switch (ru_size) {
2688 	case HTT_PPDU_STATS_RU_26:
2689 		width = RU_26;
2690 		break;
2691 	case HTT_PPDU_STATS_RU_52:
2692 		width = RU_52;
2693 		break;
2694 	case HTT_PPDU_STATS_RU_52_26:
2695 		width = RU_52_26;
2696 		break;
2697 	case HTT_PPDU_STATS_RU_106:
2698 		width = RU_106;
2699 		break;
2700 	case HTT_PPDU_STATS_RU_106_26:
2701 		width = RU_106_26;
2702 		break;
2703 	case HTT_PPDU_STATS_RU_242:
2704 		width = RU_242;
2705 		break;
2706 	case HTT_PPDU_STATS_RU_484:
2707 		width = RU_484;
2708 		break;
2709 	case HTT_PPDU_STATS_RU_484_242:
2710 		width = RU_484_242;
2711 		break;
2712 	case HTT_PPDU_STATS_RU_996:
2713 		width = RU_996;
2714 		break;
2715 	case HTT_PPDU_STATS_RU_996_484:
2716 		width = RU_996_484;
2717 		break;
2718 	case HTT_PPDU_STATS_RU_996_484_242:
2719 		width = RU_996_484_242;
2720 		break;
2721 	case HTT_PPDU_STATS_RU_996x2:
2722 		width = RU_2X996;
2723 		break;
2724 	case HTT_PPDU_STATS_RU_996x2_484:
2725 		width = RU_2X996_484;
2726 		break;
2727 	case HTT_PPDU_STATS_RU_996x3:
2728 		width = RU_3X996;
2729 		break;
2730 	case HTT_PPDU_STATS_RU_996x3_484:
2731 		width = RU_3X996_484;
2732 		break;
2733 	case HTT_PPDU_STATS_RU_996x4:
2734 		width = RU_4X996;
2735 		break;
2736 	default:
2737 		dp_mon_debug("Unsupported ru_size: %d rcvd", ru_size);
2738 	}
2739 
2740 	return width;
2741 }
2742 #else
2743 static inline enum cdp_ru_index dp_get_ru_index_frm_ru_tones(uint16_t ru_tones)
2744 {
2745 	enum cdp_ru_index ru_index;
2746 
2747 	switch (ru_tones) {
2748 	case RU_26:
2749 		ru_index = RU_26_INDEX;
2750 		break;
2751 	case RU_52:
2752 		ru_index = RU_52_INDEX;
2753 		break;
2754 	case RU_106:
2755 		ru_index = RU_106_INDEX;
2756 		break;
2757 	case RU_242:
2758 		ru_index = RU_242_INDEX;
2759 		break;
2760 	case RU_484:
2761 		ru_index = RU_484_INDEX;
2762 		break;
2763 	case RU_996:
2764 		ru_index = RU_996_INDEX;
2765 		break;
2766 	default:
2767 		ru_index = RU_INDEX_MAX;
2768 		break;
2769 	}
2770 
2771 	return ru_index;
2772 }
2773 
2774 static uint32_t dp_mon_get_ru_width_from_ru_size(uint16_t ru_size)
2775 {
2776 	uint32_t width = 0;
2777 
2778 	switch (ru_size) {
2779 	case HTT_PPDU_STATS_RU_26:
2780 		width = RU_26;
2781 		break;
2782 	case HTT_PPDU_STATS_RU_52:
2783 		width = RU_52;
2784 		break;
2785 	case HTT_PPDU_STATS_RU_106:
2786 		width = RU_106;
2787 		break;
2788 	case HTT_PPDU_STATS_RU_242:
2789 		width = RU_242;
2790 		break;
2791 	case HTT_PPDU_STATS_RU_484:
2792 		width = RU_484;
2793 		break;
2794 	case HTT_PPDU_STATS_RU_996:
2795 		width = RU_996;
2796 		break;
2797 	default:
2798 		dp_mon_debug("Unsupported ru_size: %d rcvd", ru_size);
2799 	}
2800 
2801 	return width;
2802 }
2803 #endif
2804 
2805 #ifdef WLAN_CONFIG_TELEMETRY_AGENT
2806 /**
2807  * dp_pdev_telemetry_stats_update() - Update pdev telemetry stats
2808  * @pdev: Datapath pdev handle
2809  * @ppdu: PPDU Descriptor
2810  *
2811  * Return: None
2812  */
2813 static void
2814 dp_pdev_telemetry_stats_update(
2815 		struct dp_pdev *pdev,
2816 		struct cdp_tx_completion_ppdu_user *ppdu)
2817 {
2818 	uint16_t mpdu_tried;
2819 	uint16_t mpdu_failed;
2820 	uint16_t num_mpdu;
2821 	uint8_t ac = 0;
2822 
2823 	num_mpdu = ppdu->mpdu_success;
2824 	mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
2825 	mpdu_failed = mpdu_tried - num_mpdu;
2826 
2827 	ac = TID_TO_WME_AC(ppdu->tid);
2828 
2829 	DP_STATS_INC(pdev, telemetry_stats.tx_mpdu_failed[ac],
2830 		     mpdu_failed);
2831 
2832 	DP_STATS_INC(pdev, telemetry_stats.tx_mpdu_total[ac],
2833 		     mpdu_tried);
2834 }
2835 
2836 /*
2837  * dp_ppdu_desc_get_txmode() - Get TX mode
2838  * @ppdu: PPDU Descriptor
2839  *
2840  * Return: None
2841  */
2842 static inline
2843 void dp_ppdu_desc_get_txmode(struct cdp_tx_completion_ppdu *ppdu)
2844 {
2845 	uint16_t frame_type = ppdu->htt_frame_type;
2846 
2847 	ppdu->txmode_type = TX_MODE_TYPE_UNKNOWN;
2848 
2849 	if (ppdu->frame_type == CDP_PPDU_FTYPE_CTRL &&
2850 	    (frame_type != HTT_STATS_FTYPE_SGEN_MU_TRIG &&
2851 	     frame_type != HTT_STATS_FTYPE_SGEN_BE_MU_TRIG))
2852 		return;
2853 
2854 	if (frame_type == HTT_STATS_FTYPE_SGEN_MU_BAR ||
2855 	    frame_type == HTT_STATS_FTYPE_SGEN_BE_MU_BAR) {
2856 		ppdu->txmode = TX_MODE_UL_OFDMA_MU_BAR_TRIGGER;
2857 		ppdu->txmode_type = TX_MODE_TYPE_UL;
2858 
2859 		return;
2860 	}
2861 
2862 	switch (ppdu->htt_seq_type) {
2863 	case HTT_SEQTYPE_SU:
2864 		if (frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) {
2865 			ppdu->txmode = TX_MODE_DL_SU_DATA;
2866 			ppdu->txmode_type = TX_MODE_TYPE_DL;
2867 		}
2868 		break;
2869 	case HTT_SEQTYPE_MU_OFDMA:
2870 	case HTT_SEQTYPE_BE_MU_OFDMA:
2871 		if (frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU) {
2872 			ppdu->txmode = TX_MODE_DL_OFDMA_DATA;
2873 			ppdu->txmode_type = TX_MODE_TYPE_DL;
2874 		}
2875 		break;
2876 	case HTT_SEQTYPE_AC_MU_MIMO:
2877 	case HTT_SEQTYPE_AX_MU_MIMO:
2878 	case HTT_SEQTYPE_BE_MU_MIMO:
2879 		if (frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU) {
2880 			ppdu->txmode = TX_MODE_DL_MUMIMO_DATA;
2881 			ppdu->txmode_type = TX_MODE_TYPE_DL;
2882 		}
2883 		break;
2884 	case HTT_SEQTYPE_UL_MU_OFDMA_TRIG:
2885 	case HTT_SEQTYPE_BE_UL_MU_OFDMA_TRIG:
2886 		if (frame_type == HTT_STATS_FTYPE_SGEN_MU_TRIG ||
2887 		    frame_type == HTT_STATS_FTYPE_SGEN_BE_MU_TRIG) {
2888 			ppdu->txmode = TX_MODE_UL_OFDMA_BASIC_TRIGGER_DATA;
2889 			ppdu->txmode_type = TX_MODE_TYPE_UL;
2890 		}
2891 		break;
2892 	case HTT_SEQTYPE_UL_MU_MIMO_TRIG:
2893 	case HTT_SEQTYPE_BE_UL_MU_MIMO_TRIG:
2894 		if (frame_type == HTT_STATS_FTYPE_SGEN_MU_TRIG ||
2895 		    frame_type == HTT_STATS_FTYPE_SGEN_BE_MU_TRIG) {
2896 			ppdu->txmode = TX_MODE_UL_MUMIMO_BASIC_TRIGGER_DATA;
2897 			ppdu->txmode_type = TX_MODE_TYPE_UL;
2898 		}
2899 		break;
2900 	default:
2901 		ppdu->txmode_type = TX_MODE_TYPE_UNKNOWN;
2902 		break;
2903 	}
2904 }
2905 
2906 /*
2907  * dp_pdev_update_deter_stats() - Update pdev deterministic stats
2908  * @pdev: Datapath pdev handle
2909  * @ppdu: PPDU Descriptor
2910  *
2911  * Return: None
2912  */
2913 static inline void
2914 dp_pdev_update_deter_stats(struct dp_pdev *pdev,
2915 			   struct cdp_tx_completion_ppdu *ppdu)
2916 {
2917 	uint32_t user_idx;
2918 
2919 	if (!pdev || !ppdu)
2920 		return;
2921 
2922 	if (ppdu->txmode_type == TX_MODE_TYPE_UNKNOWN)
2923 		return;
2924 
2925 	if (ppdu->backoff_ac_valid) {
2926 		if (ppdu->backoff_ac >= WME_AC_MAX) {
2927 			dp_mon_err("backoff_ac %d exceed max limit",
2928 				   ppdu->backoff_ac);
2929 			return;
2930 		}
2931 		DP_STATS_UPD(pdev,
2932 			     deter_stats.ch_access_delay[ppdu->backoff_ac],
2933 			     ppdu->ch_access_delay);
2934 	}
2935 
2936 	if (ppdu->txmode_type == TX_MODE_TYPE_DL) {
2937 		DP_STATS_INC(pdev,
2938 			     deter_stats.dl_mode_cnt[ppdu->txmode],
2939 			     1);
2940 		if (!ppdu->num_users) {
2941 			dp_mon_err("dl users is %d", ppdu->num_users);
2942 			return;
2943 		}
2944 		user_idx = ppdu->num_users - 1;
2945 		switch (ppdu->txmode) {
2946 		case TX_MODE_DL_OFDMA_DATA:
2947 			DP_STATS_INC(pdev,
2948 				     deter_stats.dl_ofdma_usr[user_idx],
2949 				     1);
2950 			break;
2951 		case TX_MODE_DL_MUMIMO_DATA:
2952 			if (user_idx >= CDP_MU_MAX_MIMO_USERS) {
2953 				dp_mon_err("dl mimo users %d exceed max limit",
2954 					   ppdu->num_users);
2955 				return;
2956 			}
2957 			DP_STATS_INC(pdev,
2958 				     deter_stats.dl_mimo_usr[user_idx],
2959 				     1);
2960 			break;
2961 		}
2962 	} else {
2963 		DP_STATS_INC(pdev,
2964 			     deter_stats.ul_mode_cnt[ppdu->txmode],
2965 			     1);
2966 
2967 		if (!ppdu->num_ul_users) {
2968 			dp_mon_err("dl users is %d", ppdu->num_ul_users);
2969 			return;
2970 		}
2971 		user_idx = ppdu->num_ul_users - 1;
2972 		switch (ppdu->txmode) {
2973 		case TX_MODE_UL_OFDMA_BASIC_TRIGGER_DATA:
2974 			DP_STATS_INC(pdev,
2975 				     deter_stats.ul_ofdma_usr[user_idx],
2976 				     1);
2977 			break;
2978 		case TX_MODE_UL_MUMIMO_BASIC_TRIGGER_DATA:
2979 			if (user_idx >= CDP_MU_MAX_MIMO_USERS) {
2980 				dp_mon_err("ul mimo users %d exceed max limit",
2981 					   ppdu->num_ul_users);
2982 				return;
2983 			}
2984 			DP_STATS_INC(pdev,
2985 				     deter_stats.ul_mimo_usr[user_idx],
2986 				     1);
2987 			break;
2988 		}
2989 		if (ppdu->num_ul_user_resp_valid) {
2990 			if (ppdu->num_ul_user_resp) {
2991 				DP_STATS_INC(pdev,
2992 					     deter_stats.ts[ppdu->txmode].trigger_success,
2993 					     1);
2994 			} else {
2995 				DP_STATS_INC(pdev,
2996 					     deter_stats.ts[ppdu->txmode].trigger_fail,
2997 					     1);
2998 			}
2999 		}
3000 	}
3001 }
3002 
3003 /*
3004  * dp_ppdu_desc_get_msduq() - Get msduq index from bitmap
3005  * @ppdu: PPDU Descriptor
3006  * @msduq_index: MSDUQ index
3007  *
3008  * Return: None
3009  */
3010 static inline void
3011 dp_ppdu_desc_get_msduq(uint32_t msduq_bitmap, uint32_t *msduq_index)
3012 {
3013 	if ((msduq_bitmap & BIT(HTT_MSDUQ_INDEX_NON_UDP)) ||
3014 	    (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_UDP))) {
3015 		*msduq_index = MSDUQ_INDEX_DEFAULT;
3016 	} else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_PRIO_0)) {
3017 		*msduq_index = MSDUQ_INDEX_CUSTOM_PRIO_0;
3018 	} else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_PRIO_1)) {
3019 		*msduq_index = MSDUQ_INDEX_CUSTOM_PRIO_1;
3020 	} else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_EXT_PRIO_0)) {
3021 		*msduq_index = MSDUQ_INDEX_CUSTOM_EXT_PRIO_0;
3022 	} else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_EXT_PRIO_1)) {
3023 		*msduq_index = MSDUQ_INDEX_CUSTOM_EXT_PRIO_1;
3024 	} else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_EXT_PRIO_2)) {
3025 		*msduq_index = MSDUQ_INDEX_CUSTOM_EXT_PRIO_2;
3026 	} else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_EXT_PRIO_3)) {
3027 		*msduq_index = MSDUQ_INDEX_CUSTOM_EXT_PRIO_3;
3028 	} else {
3029 		*msduq_index = MSDUQ_INDEX_MAX;
3030 	}
3031 }
3032 
3033 /*
3034  * dp_ppdu_desc_user_deter_stats_update() - Update per-peer deterministic stats
3035  * @pdev: Datapath pdev handle
3036  * @peer: Datapath peer handle
3037  * @ppdu_desc: PPDU Descriptor
3038  * @user: PPDU Descriptor per user
3039  *
3040  * Return: None
3041  */
3042 static void
3043 dp_ppdu_desc_user_deter_stats_update(struct dp_pdev *pdev,
3044 				     struct dp_peer *peer,
3045 				     struct cdp_tx_completion_ppdu *ppdu_desc,
3046 				     struct cdp_tx_completion_ppdu_user *user)
3047 {
3048 	struct dp_mon_peer *mon_peer = NULL;
3049 	uint64_t avg_tx_rate = 0;
3050 	uint32_t ratekbps = 0;
3051 	uint32_t rix;
3052 	uint32_t msduq;
3053 	uint16_t ratecode = 0;
3054 	uint8_t txmode;
3055 	uint8_t tid;
3056 
3057 	if (!pdev || !ppdu_desc || !user || !peer)
3058 		return;
3059 
3060 	mon_peer = peer->monitor_peer;
3061 	if (qdf_unlikely(!mon_peer))
3062 		return;
3063 
3064 	if (ppdu_desc->txmode_type == TX_MODE_TYPE_UNKNOWN)
3065 		return;
3066 
3067 	if (ppdu_desc->txmode_type == TX_MODE_TYPE_UL &&
3068 	    (ppdu_desc->txmode != TX_MODE_UL_OFDMA_MU_BAR_TRIGGER)) {
3069 		if (user->tid < CDP_UL_TRIG_BK_TID ||
3070 		    user->tid > CDP_UL_TRIG_VO_TID)
3071 			return;
3072 
3073 		user->tid = UL_TRIGGER_TID_TO_DATA_TID(user->tid);
3074 	}
3075 
3076 	if (user->tid >= CDP_DATA_TID_MAX)
3077 		return;
3078 
3079 	ratekbps = dp_getrateindex(user->gi,
3080 				   user->mcs,
3081 				   user->nss,
3082 				   user->preamble,
3083 				   user->bw,
3084 				   user->punc_mode,
3085 				   &rix,
3086 				   &ratecode);
3087 
3088 	if (!ratekbps)
3089 		return;
3090 
3091 	avg_tx_rate = mon_peer->stats.deter_stats.avg_tx_rate;
3092 	avg_tx_rate = dp_ath_rate_lpf(avg_tx_rate,
3093 				      ratekbps);
3094 	DP_STATS_UPD(mon_peer,
3095 		     deter_stats.avg_tx_rate,
3096 		     avg_tx_rate);
3097 
3098 	txmode = ppdu_desc->txmode;
3099 	tid = user->tid;
3100 
3101 	if (ppdu_desc->txmode_type == TX_MODE_TYPE_DL) {
3102 		dp_ppdu_desc_get_msduq(user->msduq_bitmap, &msduq);
3103 		if (msduq == MSDUQ_INDEX_MAX)
3104 			return;
3105 
3106 		DP_STATS_INC(mon_peer,
3107 			     deter_stats.deter[tid].dl_det[msduq][txmode].mode_cnt,
3108 			     1);
3109 
3110 		DP_STATS_UPD(mon_peer,
3111 			     deter_stats.deter[tid].dl_det[msduq][txmode].avg_rate,
3112 			     avg_tx_rate);
3113 	} else {
3114 		DP_STATS_INC(mon_peer,
3115 			     deter_stats.deter[tid].ul_det[txmode].mode_cnt,
3116 			     1);
3117 
3118 		DP_STATS_UPD(mon_peer,
3119 			     deter_stats.deter[tid].ul_det[txmode].avg_rate,
3120 			     avg_tx_rate);
3121 		if (!user->completion_status) {
3122 			DP_STATS_INC(mon_peer,
3123 				     deter_stats.deter[tid].ul_det[txmode].trigger_success,
3124 				     1);
3125 		} else {
3126 			DP_STATS_INC(mon_peer,
3127 				     deter_stats.deter[tid].ul_det[txmode].trigger_fail,
3128 				     1);
3129 		}
3130 	}
3131 }
3132 #else
3133 static inline
3134 void dp_ppdu_desc_get_txmode(struct cdp_tx_completion_ppdu *ppdu)
3135 {
3136 }
3137 
3138 static inline void
3139 dp_ppdu_desc_get_msduq(uint32_t msduq_bitmap, uint32_t *msduq_index)
3140 {
3141 }
3142 
3143 static void
3144 dp_ppdu_desc_user_deter_stats_update(struct dp_pdev *pdev,
3145 				     struct dp_peer *peer,
3146 				     struct cdp_tx_completion_ppdu *ppdu_desc,
3147 				     struct cdp_tx_completion_ppdu_user *user)
3148 {
3149 }
3150 
3151 static inline void
3152 dp_pdev_telemetry_stats_update(
3153 		struct dp_pdev *pdev,
3154 		struct cdp_tx_completion_ppdu_user *ppdu)
3155 { }
3156 
3157 static inline void
3158 dp_pdev_update_deter_stats(struct dp_pdev *pdev,
3159 			   struct cdp_tx_completion_ppdu *ppdu)
3160 { }
3161 #endif
3162 
3163 /**
3164  * dp_tx_stats_update() - Update per-peer statistics
3165  * @pdev: Datapath pdev handle
3166  * @peer: Datapath peer handle
3167  * @ppdu: PPDU Descriptor per user
3168  * @ppdu_desc: PPDU Descriptor
3169  *
3170  * Return: None
3171  */
3172 static void
3173 dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
3174 		   struct cdp_tx_completion_ppdu_user *ppdu,
3175 		   struct cdp_tx_completion_ppdu *ppdu_desc)
3176 {
3177 	uint8_t preamble, mcs, res_mcs = 0;
3178 	uint16_t num_msdu;
3179 	uint16_t num_mpdu;
3180 	uint16_t mpdu_tried;
3181 	uint16_t mpdu_failed;
3182 	struct dp_mon_ops *mon_ops;
3183 	enum cdp_ru_index ru_index;
3184 	struct dp_mon_peer *mon_peer = NULL;
3185 	uint32_t ratekbps = 0;
3186 	uint64_t tx_byte_count;
3187 	uint8_t idx = 0;
3188 	bool is_preamble_valid = true;
3189 
3190 	preamble = ppdu->preamble;
3191 	mcs = ppdu->mcs;
3192 	num_msdu = ppdu->num_msdu;
3193 	num_mpdu = ppdu->mpdu_success;
3194 	mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
3195 	mpdu_failed = mpdu_tried - num_mpdu;
3196 	tx_byte_count = ppdu->success_bytes;
3197 
3198 	/* If the peer statistics are already processed as part of
3199 	 * per-MSDU completion handler, do not process these again in per-PPDU
3200 	 * indications
3201 	 */
3202 	if (pdev->soc->process_tx_status)
3203 		return;
3204 
3205 	mon_peer = peer->monitor_peer;
3206 	if (!mon_peer)
3207 		return;
3208 
3209 	if (!ppdu->is_mcast) {
3210 		DP_STATS_INC(mon_peer, tx.tx_ucast_total.num, num_msdu);
3211 		DP_STATS_INC(mon_peer, tx.tx_ucast_total.bytes,
3212 			     tx_byte_count);
3213 	}
3214 
3215 	if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) {
3216 		/*
3217 		 * All failed mpdu will be retried, so incrementing
3218 		 * retries mpdu based on mpdu failed. Even for
3219 		 * ack failure i.e for long retries we get
3220 		 * mpdu failed equal mpdu tried.
3221 		 */
3222 		DP_STATS_INC(mon_peer, tx.retries, mpdu_failed);
3223 		dp_pdev_telemetry_stats_update(pdev, ppdu);
3224 		return;
3225 	}
3226 
3227 	if (ppdu->is_ppdu_cookie_valid)
3228 		DP_STATS_INC(mon_peer, tx.num_ppdu_cookie_valid, 1);
3229 
3230 	if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
3231 	    ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
3232 		if (qdf_unlikely(ppdu->mu_group_id &&
3233 				 !(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
3234 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3235 				  "mu_group_id out of bound!!\n");
3236 		else
3237 			DP_STATS_UPD(mon_peer, tx.mu_group_id[ppdu->mu_group_id],
3238 				     (ppdu->user_pos + 1));
3239 	}
3240 
3241 	if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
3242 	    ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) {
3243 		DP_STATS_UPD(mon_peer, tx.ru_tones, ppdu->ru_tones);
3244 		DP_STATS_UPD(mon_peer, tx.ru_start, ppdu->ru_start);
3245 		ru_index = dp_get_ru_index_frm_ru_tones(ppdu->ru_tones);
3246 		if (ru_index != RU_INDEX_MAX) {
3247 			DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].num_msdu,
3248 				     num_msdu);
3249 			DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].num_mpdu,
3250 				     num_mpdu);
3251 			DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].mpdu_tried,
3252 				     mpdu_tried);
3253 		}
3254 	}
3255 
3256 	/*
3257 	 * All failed mpdu will be retried, so incrementing
3258 	 * retries mpdu based on mpdu failed. Even for
3259 	 * ack failure i.e for long retries we get
3260 	 * mpdu failed equal mpdu tried.
3261 	 */
3262 	DP_STATS_INC(mon_peer, tx.retries, mpdu_failed);
3263 
3264 	DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].num_msdu,
3265 		     num_msdu);
3266 	DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu,
3267 		     num_mpdu);
3268 	DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried,
3269 		     mpdu_tried);
3270 
3271 	DP_STATS_INC(mon_peer, tx.sgi_count[ppdu->gi], num_msdu);
3272 	DP_STATS_INC(mon_peer, tx.bw[ppdu->bw], num_msdu);
3273 	DP_STATS_INC(mon_peer, tx.nss[ppdu->nss], num_msdu);
3274 	if (ppdu->tid < CDP_DATA_TID_MAX) {
3275 		DP_STATS_INC(mon_peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)],
3276 			     num_msdu);
3277 		DP_STATS_INC(mon_peer,
3278 			     tx.wme_ac_type_bytes[TID_TO_WME_AC(ppdu->tid)],
3279 			     tx_byte_count);
3280 	}
3281 
3282 	DP_STATS_INCC(mon_peer, tx.stbc, num_msdu, ppdu->stbc);
3283 	DP_STATS_INCC(mon_peer, tx.ldpc, num_msdu, ppdu->ldpc);
3284 	if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
3285 		DP_STATS_UPD(mon_peer, tx.last_ack_rssi, ppdu_desc->ack_rssi);
3286 
3287 	if (!ppdu->is_mcast) {
3288 		DP_STATS_INC(mon_peer, tx.tx_ucast_success.num, num_msdu);
3289 		DP_STATS_INC(mon_peer, tx.tx_ucast_success.bytes,
3290 			     tx_byte_count);
3291 	}
3292 
3293 	switch (preamble) {
3294 	case DOT11_A:
3295 		res_mcs = (mcs < MAX_MCS_11A) ? mcs : (MAX_MCS - 1);
3296 	break;
3297 	case DOT11_B:
3298 		res_mcs = (mcs < MAX_MCS_11B) ? mcs : (MAX_MCS - 1);
3299 	break;
3300 	case DOT11_N:
3301 		res_mcs = (mcs < MAX_MCS_11N) ? mcs : (MAX_MCS - 1);
3302 	break;
3303 	case DOT11_AC:
3304 		res_mcs = (mcs < MAX_MCS_11AC) ? mcs : (MAX_MCS - 1);
3305 	break;
3306 	case DOT11_AX:
3307 		res_mcs = (mcs < MAX_MCS_11AX) ? mcs : (MAX_MCS - 1);
3308 	break;
3309 	default:
3310 		is_preamble_valid = false;
3311 	}
3312 
3313 	DP_STATS_INCC(mon_peer,
3314 		      tx.pkt_type[preamble].mcs_count[res_mcs], num_msdu,
3315 		      is_preamble_valid);
3316 	DP_STATS_INCC(mon_peer, tx.ampdu_cnt, num_mpdu, ppdu->is_ampdu);
3317 	DP_STATS_INCC(mon_peer, tx.non_ampdu_cnt, num_mpdu, !(ppdu->is_ampdu));
3318 	DP_STATS_INCC(mon_peer, tx.pream_punct_cnt, 1, ppdu->pream_punct);
3319 	DP_STATS_INC(mon_peer, tx.tx_ppdus, 1);
3320 	DP_STATS_INC(mon_peer, tx.tx_mpdus_success, num_mpdu);
3321 	DP_STATS_INC(mon_peer, tx.tx_mpdus_tried, mpdu_tried);
3322 
3323 	for (idx = 0; idx < CDP_RSSI_CHAIN_LEN; idx++)
3324 		DP_STATS_UPD(mon_peer, tx.rssi_chain[idx], ppdu->rssi_chain[idx]);
3325 
3326 	mon_ops = dp_mon_ops_get(pdev->soc);
3327 	if (mon_ops && mon_ops->mon_tx_stats_update)
3328 		mon_ops->mon_tx_stats_update(mon_peer, ppdu);
3329 
3330 	dp_tx_rate_stats_update(peer, ppdu);
3331 	dp_pdev_telemetry_stats_update(pdev, ppdu);
3332 
3333 	dp_peer_stats_notify(pdev, peer);
3334 
3335 	ratekbps = mon_peer->stats.tx.tx_rate;
3336 	DP_STATS_UPD(mon_peer, tx.last_tx_rate, ratekbps);
3337 
3338 	dp_send_stats_event(pdev, peer, ppdu->peer_id);
3339 }
3340 
3341 /**
3342  * dp_get_ppdu_info_user_index() - Find and allocate a per-user
3343  * descriptor for a PPDU, if a new peer id arrives in a PPDU
3344  * @pdev: DP pdev handle
3345  * @peer_id: peer unique identifier
3346  * @ppdu_info: per ppdu tlv structure
3347  *
3348  * Return: user index to be populated
3349  */
3350 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
3351 					   uint16_t peer_id,
3352 					   struct ppdu_info *ppdu_info)
3353 {
3354 	uint8_t user_index = 0;
3355 	struct cdp_tx_completion_ppdu *ppdu_desc;
3356 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
3357 
3358 	ppdu_desc =
3359 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3360 
3361 	while ((user_index + 1) <= ppdu_info->last_user) {
3362 		ppdu_user_desc = &ppdu_desc->user[user_index];
3363 		if (ppdu_user_desc->peer_id != peer_id) {
3364 			user_index++;
3365 			continue;
3366 		} else {
3367 			/* Max users possible is 8 so user array index should
3368 			 * not exceed 7
3369 			 */
3370 			qdf_assert_always(user_index <= (ppdu_desc->max_users - 1));
3371 			return user_index;
3372 		}
3373 	}
3374 
3375 	ppdu_info->last_user++;
3376 	/* Max users possible is 8 so last user should not exceed 8 */
3377 	qdf_assert_always(ppdu_info->last_user <= ppdu_desc->max_users);
3378 	return ppdu_info->last_user - 1;
3379 }
3380 
3381 /**
3382  * dp_process_ppdu_stats_common_tlv() - Process htt_ppdu_stats_common_tlv
3383  * @pdev: DP pdev handle
3384  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
3385  * @ppdu_info: per ppdu tlv structure
3386  *
3387  * Return: void
3388  */
3389 static void
3390 dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
3391 				 uint32_t *tag_buf,
3392 				 struct ppdu_info *ppdu_info)
3393 {
3394 	uint16_t frame_type;
3395 	uint16_t frame_ctrl;
3396 	uint16_t freq;
3397 	struct dp_soc *soc = NULL;
3398 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3399 	uint64_t ppdu_start_timestamp;
3400 	uint32_t eval_start_timestamp;
3401 	uint32_t *start_tag_buf;
3402 	uint32_t *ts_tag_buf;
3403 
3404 	start_tag_buf = tag_buf;
3405 	ppdu_desc =
3406 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3407 
3408 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
3409 
3410 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID);
3411 	ppdu_info->sched_cmdid =
3412 		HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf);
3413 	ppdu_desc->num_users =
3414 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
3415 
3416 	qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
3417 
3418 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE);
3419 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
3420 	ppdu_desc->htt_frame_type = frame_type;
3421 
3422 	ppdu_desc->htt_seq_type =
3423 			HTT_PPDU_STATS_COMMON_TLV_PPDU_SEQ_TYPE_GET(*tag_buf);
3424 
3425 	frame_ctrl = ppdu_desc->frame_ctrl;
3426 
3427 	ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id;
3428 
3429 	switch (frame_type) {
3430 	case HTT_STATS_FTYPE_TIDQ_DATA_SU:
3431 	case HTT_STATS_FTYPE_TIDQ_DATA_MU:
3432 	case HTT_STATS_FTYPE_SGEN_QOS_NULL:
3433 		/*
3434 		 * for management packet, frame type come as DATA_SU
3435 		 * need to check frame_ctrl before setting frame_type
3436 		 */
3437 		if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL)
3438 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
3439 		else
3440 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
3441 	break;
3442 	case HTT_STATS_FTYPE_SGEN_MU_BAR:
3443 	case HTT_STATS_FTYPE_SGEN_BAR:
3444 	case HTT_STATS_FTYPE_SGEN_BE_MU_BAR:
3445 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
3446 	break;
3447 	default:
3448 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
3449 	break;
3450 	}
3451 
3452 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US);
3453 	ppdu_desc->tx_duration = *tag_buf;
3454 
3455 	tag_buf = start_tag_buf +
3456 			HTT_GET_STATS_CMN_INDEX(SCH_EVAL_START_TSTMP_L32_US);
3457 	eval_start_timestamp = *tag_buf;
3458 
3459 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US);
3460 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
3461 
3462 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE);
3463 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
3464 	if (freq != ppdu_desc->channel) {
3465 		soc = pdev->soc;
3466 		ppdu_desc->channel = freq;
3467 		pdev->operating_channel.freq = freq;
3468 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
3469 			pdev->operating_channel.num =
3470 			    soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc,
3471 								 pdev->pdev_id,
3472 								 freq);
3473 
3474 		if (soc && soc->cdp_soc.ol_ops->freq_to_band)
3475 			pdev->operating_channel.band =
3476 			       soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc,
3477 								 pdev->pdev_id,
3478 								 freq);
3479 	}
3480 
3481 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
3482 
3483 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM);
3484 	ppdu_desc->phy_ppdu_tx_time_us =
3485 		HTT_PPDU_STATS_COMMON_TLV_PHY_PPDU_TX_TIME_US_GET(*tag_buf);
3486 	ppdu_desc->beam_change =
3487 		HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf);
3488 	ppdu_desc->doppler =
3489 		HTT_PPDU_STATS_COMMON_TLV_DOPPLER_INDICATION_GET(*tag_buf);
3490 	ppdu_desc->spatial_reuse =
3491 		HTT_PPDU_STATS_COMMON_TLV_SPATIAL_REUSE_GET(*tag_buf);
3492 	ppdu_desc->num_ul_users =
3493 		HTT_PPDU_STATS_COMMON_TLV_NUM_UL_EXPECTED_USERS_GET(*tag_buf);
3494 
3495 	dp_tx_capture_htt_frame_counter(pdev, frame_type);
3496 
3497 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US);
3498 	ppdu_start_timestamp = *tag_buf;
3499 	ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp <<
3500 					     HTT_SHIFT_UPPER_TIMESTAMP) &
3501 					    HTT_MASK_UPPER_TIMESTAMP);
3502 
3503 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
3504 					ppdu_desc->tx_duration;
3505 	/* Ack time stamp is same as end time stamp*/
3506 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
3507 
3508 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
3509 					ppdu_desc->tx_duration;
3510 
3511 	ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp;
3512 	ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp;
3513 	ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration;
3514 
3515 	/* Ack time stamp is same as end time stamp*/
3516 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
3517 
3518 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(BSSCOLOR_OBSS_PSR);
3519 	ppdu_desc->bss_color =
3520 		HTT_PPDU_STATS_COMMON_TLV_BSS_COLOR_ID_GET(*tag_buf);
3521 
3522 	ppdu_desc->backoff_ac_valid =
3523 		HTT_PPDU_STATS_COMMON_TLV_BACKOFF_AC_VALID_GET(*tag_buf);
3524 	if (ppdu_desc->backoff_ac_valid) {
3525 		ppdu_desc->backoff_ac =
3526 			HTT_PPDU_STATS_COMMON_TLV_BACKOFF_AC_GET(*tag_buf);
3527 		ts_tag_buf = start_tag_buf +
3528 			HTT_GET_STATS_CMN_INDEX(SCH_EVAL_START_TSTMP_L32_US);
3529 		eval_start_timestamp = *ts_tag_buf;
3530 
3531 		ts_tag_buf = start_tag_buf +
3532 			HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US);
3533 		ppdu_desc->ch_access_delay =
3534 			*ts_tag_buf - eval_start_timestamp;
3535 	}
3536 	ppdu_desc->num_ul_user_resp_valid =
3537 		HTT_PPDU_STATS_COMMON_TLV_NUM_UL_USER_RESPONSES_VALID_GET(*tag_buf);
3538 	if (ppdu_desc->num_ul_user_resp_valid)
3539 		ppdu_desc->num_ul_user_resp =
3540 			HTT_PPDU_STATS_COMMON_TLV_NUM_UL_USER_RESPONSES_GET(*tag_buf);
3541 }
3542 
3543 /**
3544  * dp_process_ppdu_stats_user_common_tlv() - Process ppdu_stats_user_common
3545  * @pdev: DP PDEV handle
3546  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
3547  * @ppdu_info: per ppdu tlv structure
3548  *
3549  * Return: void
3550  */
3551 static void dp_process_ppdu_stats_user_common_tlv(
3552 		struct dp_pdev *pdev, uint32_t *tag_buf,
3553 		struct ppdu_info *ppdu_info)
3554 {
3555 	uint16_t peer_id;
3556 	struct cdp_tx_completion_ppdu *ppdu_desc;
3557 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
3558 	uint8_t curr_user_index = 0;
3559 	struct dp_peer *peer;
3560 	struct dp_vdev *vdev;
3561 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3562 
3563 	ppdu_desc =
3564 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3565 
3566 	tag_buf++;
3567 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
3568 
3569 	curr_user_index =
3570 		dp_get_ppdu_info_user_index(pdev,
3571 					    peer_id, ppdu_info);
3572 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
3573 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
3574 
3575 	ppdu_desc->vdev_id =
3576 		HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
3577 
3578 	ppdu_user_desc->peer_id = peer_id;
3579 
3580 	tag_buf++;
3581 
3582 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
3583 		ppdu_user_desc->delayed_ba = 1;
3584 		ppdu_desc->delayed_ba = 1;
3585 	}
3586 
3587 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
3588 		ppdu_user_desc->is_mcast = true;
3589 		ppdu_user_desc->mpdu_tried_mcast =
3590 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
3591 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
3592 	} else {
3593 		ppdu_user_desc->mpdu_tried_ucast =
3594 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
3595 	}
3596 
3597 	ppdu_user_desc->is_seq_num_valid =
3598 	HTT_PPDU_STATS_USER_COMMON_TLV_IS_SQNUM_VALID_IN_BUFFER_GET(*tag_buf);
3599 	tag_buf++;
3600 
3601 	ppdu_user_desc->qos_ctrl =
3602 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
3603 	ppdu_user_desc->frame_ctrl =
3604 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
3605 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
3606 
3607 	if (ppdu_user_desc->delayed_ba)
3608 		ppdu_user_desc->mpdu_success = 0;
3609 
3610 	tag_buf += 3;
3611 
3612 	if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) {
3613 		ppdu_user_desc->ppdu_cookie =
3614 			HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf);
3615 		ppdu_user_desc->is_ppdu_cookie_valid = 1;
3616 	}
3617 
3618 	/* returning earlier causes other feilds unpopulated */
3619 	if (peer_id == DP_SCAN_PEER_ID) {
3620 		vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
3621 					     DP_MOD_ID_TX_PPDU_STATS);
3622 		if (!vdev)
3623 			return;
3624 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
3625 			     QDF_MAC_ADDR_SIZE);
3626 		dp_vdev_unref_delete(pdev->soc, vdev, DP_MOD_ID_TX_PPDU_STATS);
3627 	} else {
3628 		peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
3629 					     DP_MOD_ID_TX_PPDU_STATS);
3630 		if (!peer) {
3631 			/*
3632 			 * fw sends peer_id which is about to removed but
3633 			 * it was already removed in host.
3634 			 * eg: for disassoc, fw send ppdu stats
3635 			 * with peer id equal to previously associated
3636 			 * peer's peer_id but it was removed
3637 			 */
3638 			vdev = dp_vdev_get_ref_by_id(pdev->soc,
3639 						     ppdu_desc->vdev_id,
3640 						     DP_MOD_ID_TX_PPDU_STATS);
3641 			if (!vdev)
3642 				return;
3643 			qdf_mem_copy(ppdu_user_desc->mac_addr,
3644 				     vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE);
3645 			dp_vdev_unref_delete(pdev->soc, vdev,
3646 					     DP_MOD_ID_TX_PPDU_STATS);
3647 			return;
3648 		}
3649 		qdf_mem_copy(ppdu_user_desc->mac_addr,
3650 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
3651 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
3652 	}
3653 
3654 	tag_buf += 10;
3655 	ppdu_user_desc->msduq_bitmap = *tag_buf;
3656 }
3657 
3658 /**
3659  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
3660  * @pdev: DP pdev handle
3661  * @tag_buf: T2H message buffer carrying the user rate TLV
3662  * @ppdu_info: per ppdu tlv structure
3663  *
3664  * Return: void
3665  */
3666 static void
3667 dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
3668 				    uint32_t *tag_buf,
3669 				    struct ppdu_info *ppdu_info)
3670 {
3671 	uint16_t peer_id;
3672 	struct cdp_tx_completion_ppdu *ppdu_desc;
3673 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
3674 	uint8_t curr_user_index = 0;
3675 	struct dp_vdev *vdev;
3676 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3677 	uint8_t bw, ru_format;
3678 	uint16_t ru_size;
3679 
3680 	ppdu_desc =
3681 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3682 
3683 	tag_buf++;
3684 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
3685 
3686 	curr_user_index =
3687 		dp_get_ppdu_info_user_index(pdev,
3688 					    peer_id, ppdu_info);
3689 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
3690 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
3691 	if (peer_id == DP_SCAN_PEER_ID) {
3692 		vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
3693 					     DP_MOD_ID_TX_PPDU_STATS);
3694 		if (!vdev)
3695 			return;
3696 		dp_vdev_unref_delete(pdev->soc, vdev,
3697 				     DP_MOD_ID_TX_PPDU_STATS);
3698 	}
3699 	ppdu_user_desc->peer_id = peer_id;
3700 
3701 	ppdu_user_desc->tid =
3702 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
3703 
3704 	tag_buf += 1;
3705 
3706 	ppdu_user_desc->user_pos =
3707 		HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
3708 	ppdu_user_desc->mu_group_id =
3709 		HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
3710 
3711 	ru_format = HTT_PPDU_STATS_USER_RATE_TLV_RU_FORMAT_GET(*tag_buf);
3712 
3713 	tag_buf += 1;
3714 
3715 	if (!ru_format) {
3716 		/* ru_format = 0: ru_end, ru_start */
3717 		ppdu_user_desc->ru_start =
3718 			HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
3719 		ppdu_user_desc->ru_tones =
3720 			(HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
3721 			HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
3722 	} else if (ru_format == 1) {
3723 		/* ru_format = 1: ru_index, ru_size */
3724 		ru_size = HTT_PPDU_STATS_USER_RATE_TLV_RU_SIZE_GET(*tag_buf);
3725 		ppdu_user_desc->ru_tones =
3726 				dp_mon_get_ru_width_from_ru_size(ru_size);
3727 	} else {
3728 		dp_mon_debug("Unsupported ru_format: %d rcvd", ru_format);
3729 	}
3730 	ppdu_desc->usr_ru_tones_sum += ppdu_user_desc->ru_tones;
3731 
3732 	tag_buf += 2;
3733 
3734 	ppdu_user_desc->ppdu_type =
3735 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
3736 
3737 	tag_buf++;
3738 	ppdu_user_desc->tx_rate = *tag_buf;
3739 
3740 	ppdu_user_desc->ltf_size =
3741 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
3742 	ppdu_user_desc->stbc =
3743 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
3744 	ppdu_user_desc->he_re =
3745 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
3746 	ppdu_user_desc->txbf =
3747 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
3748 	bw = HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf);
3749 	/* Align bw value as per host data structures */
3750 	if (bw == HTT_PPDU_STATS_BANDWIDTH_320MHZ)
3751 		ppdu_user_desc->bw = bw - 3;
3752 	else
3753 		ppdu_user_desc->bw = bw - 2;
3754 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
3755 	ppdu_desc->usr_nss_sum += ppdu_user_desc->nss;
3756 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
3757 	ppdu_user_desc->preamble =
3758 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
3759 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
3760 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
3761 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
3762 
3763 	tag_buf += 2;
3764 	ppdu_user_desc->punc_pattern_bitmap =
3765 		HTT_PPDU_STATS_USER_RATE_TLV_PUNC_PATTERN_BITMAP_GET(*tag_buf);
3766 }
3767 
3768 /**
3769  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv() - Process
3770  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
3771  * @pdev: DP PDEV handle
3772  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
3773  * @ppdu_info: per ppdu tlv structure
3774  *
3775  * Return: void
3776  */
3777 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
3778 		struct dp_pdev *pdev, uint32_t *tag_buf,
3779 		struct ppdu_info *ppdu_info)
3780 {
3781 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
3782 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
3783 
3784 	struct cdp_tx_completion_ppdu *ppdu_desc;
3785 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
3786 	uint8_t curr_user_index = 0;
3787 	uint16_t peer_id;
3788 	uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS;
3789 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3790 
3791 	ppdu_desc =
3792 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3793 
3794 	tag_buf++;
3795 
3796 	peer_id =
3797 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
3798 
3799 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
3800 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
3801 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
3802 	ppdu_user_desc->peer_id = peer_id;
3803 
3804 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
3805 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
3806 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
3807 
3808 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
3809 						   (void *)ppdu_user_desc,
3810 						   ppdu_info->ppdu_id,
3811 						   size);
3812 }
3813 
3814 /**
3815  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv() - Process
3816  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
3817  * @pdev: DP PDEV handle
3818  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
3819  * @ppdu_info: per ppdu tlv structure
3820  *
3821  * Return: void
3822  */
3823 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
3824 		struct dp_pdev *pdev, uint32_t *tag_buf,
3825 		struct ppdu_info *ppdu_info)
3826 {
3827 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
3828 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
3829 
3830 	struct cdp_tx_completion_ppdu *ppdu_desc;
3831 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
3832 	uint8_t curr_user_index = 0;
3833 	uint16_t peer_id;
3834 	uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS;
3835 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3836 
3837 	ppdu_desc =
3838 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3839 
3840 	tag_buf++;
3841 
3842 	peer_id =
3843 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
3844 
3845 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
3846 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
3847 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
3848 	ppdu_user_desc->peer_id = peer_id;
3849 
3850 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
3851 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
3852 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
3853 
3854 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
3855 						   (void *)ppdu_user_desc,
3856 						   ppdu_info->ppdu_id,
3857 						   size);
3858 }
3859 
3860 /**
3861  * dp_process_ppdu_stats_user_cmpltn_common_tlv() - Process
3862  * htt_ppdu_stats_user_cmpltn_common_tlv
3863  * @pdev: DP PDEV handle
3864  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
3865  * @ppdu_info: per ppdu tlv structure
3866  *
3867  * Return: void
3868  */
3869 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
3870 		struct dp_pdev *pdev, uint32_t *tag_buf,
3871 		struct ppdu_info *ppdu_info)
3872 {
3873 	uint16_t peer_id;
3874 	struct cdp_tx_completion_ppdu *ppdu_desc;
3875 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
3876 	uint8_t curr_user_index = 0;
3877 	uint8_t bw_iter;
3878 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
3879 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
3880 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3881 
3882 	ppdu_desc =
3883 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3884 
3885 	tag_buf++;
3886 	peer_id =
3887 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
3888 
3889 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
3890 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
3891 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
3892 	ppdu_user_desc->peer_id = peer_id;
3893 
3894 	ppdu_user_desc->completion_status =
3895 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
3896 				*tag_buf);
3897 
3898 	ppdu_user_desc->tid =
3899 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
3900 
3901 	tag_buf++;
3902 	if (qdf_likely(ppdu_user_desc->completion_status ==
3903 			HTT_PPDU_STATS_USER_STATUS_OK)) {
3904 		ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
3905 		ppdu_user_desc->usr_ack_rssi = dp_stats_buf->ack_rssi;
3906 		ppdu_user_desc->ack_rssi_valid = 1;
3907 	} else {
3908 		ppdu_user_desc->ack_rssi_valid = 0;
3909 	}
3910 
3911 	tag_buf++;
3912 
3913 	ppdu_user_desc->mpdu_success =
3914 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
3915 
3916 	ppdu_user_desc->mpdu_failed =
3917 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) -
3918 						ppdu_user_desc->mpdu_success;
3919 
3920 	tag_buf++;
3921 
3922 	ppdu_user_desc->long_retries =
3923 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
3924 
3925 	ppdu_user_desc->short_retries =
3926 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
3927 	ppdu_user_desc->retry_mpdus =
3928 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
3929 
3930 	ppdu_user_desc->is_ampdu =
3931 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
3932 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
3933 
3934 	ppdu_desc->resp_type =
3935 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf);
3936 	ppdu_desc->mprot_type =
3937 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf);
3938 	ppdu_desc->rts_success =
3939 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf);
3940 	ppdu_desc->rts_failure =
3941 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf);
3942 
3943 	ppdu_user_desc->mprot_type = ppdu_desc->mprot_type;
3944 	ppdu_user_desc->rts_success = ppdu_desc->rts_success;
3945 	ppdu_user_desc->rts_failure = ppdu_desc->rts_failure;
3946 
3947 	ppdu_user_desc->pream_punct =
3948 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PREAM_PUNC_TX_GET(*tag_buf);
3949 
3950 	ppdu_info->compltn_common_tlv++;
3951 
3952 	/*
3953 	 * MU BAR may send request to n users but we may received ack only from
3954 	 * m users. To have count of number of users respond back, we have a
3955 	 * separate counter bar_num_users per PPDU that get increment for every
3956 	 * htt_ppdu_stats_user_cmpltn_common_tlv
3957 	 */
3958 	ppdu_desc->bar_num_users++;
3959 
3960 	tag_buf++;
3961 	for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) {
3962 		ppdu_user_desc->rssi_chain[bw_iter] =
3963 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf);
3964 		tag_buf++;
3965 	}
3966 
3967 	ppdu_user_desc->sa_tx_antenna =
3968 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf);
3969 
3970 	tag_buf++;
3971 	ppdu_user_desc->sa_is_training =
3972 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf);
3973 	if (ppdu_user_desc->sa_is_training) {
3974 		ppdu_user_desc->sa_goodput =
3975 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf);
3976 	}
3977 
3978 	tag_buf++;
3979 	for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) {
3980 		ppdu_user_desc->sa_max_rates[bw_iter] =
3981 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]);
3982 	}
3983 
3984 	tag_buf += CDP_NUM_SA_BW;
3985 	ppdu_user_desc->current_rate_per =
3986 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf);
3987 
3988 	tag_buf++;
3989 	/* Skip SW RTS */
3990 
3991 	tag_buf++;
3992 	/* Extract 320MHz MAX PHY ratecode */
3993 	ppdu_user_desc->sa_max_rates[CDP_SA_BW320_INX] =
3994 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(*tag_buf);
3995 }
3996 
3997 /**
3998  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv() - Process
3999  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
4000  * @pdev: DP PDEV handle
4001  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
4002  * @ppdu_info: per ppdu tlv structure
4003  *
4004  * Return: void
4005  */
4006 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
4007 		struct dp_pdev *pdev, uint32_t *tag_buf,
4008 		struct ppdu_info *ppdu_info)
4009 {
4010 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
4011 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
4012 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
4013 	struct cdp_tx_completion_ppdu *ppdu_desc;
4014 	uint8_t curr_user_index = 0;
4015 	uint16_t peer_id;
4016 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
4017 
4018 	ppdu_desc =
4019 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
4020 
4021 	tag_buf++;
4022 
4023 	peer_id =
4024 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
4025 
4026 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
4027 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
4028 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
4029 	ppdu_user_desc->peer_id = peer_id;
4030 
4031 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
4032 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
4033 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
4034 	ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32;
4035 }
4036 
4037 /**
4038  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv() - Process
4039  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
4040  * @pdev: DP PDEV handle
4041  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
4042  * @ppdu_info: per ppdu tlv structure
4043  *
4044  * Return: void
4045  */
4046 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
4047 		struct dp_pdev *pdev, uint32_t *tag_buf,
4048 		struct ppdu_info *ppdu_info)
4049 {
4050 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
4051 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
4052 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
4053 	struct cdp_tx_completion_ppdu *ppdu_desc;
4054 	uint8_t curr_user_index = 0;
4055 	uint16_t peer_id;
4056 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
4057 
4058 	ppdu_desc =
4059 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
4060 
4061 	tag_buf++;
4062 
4063 	peer_id =
4064 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
4065 
4066 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
4067 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
4068 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
4069 	ppdu_user_desc->peer_id = peer_id;
4070 
4071 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
4072 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
4073 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
4074 	ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32;
4075 }
4076 
4077 /**
4078  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv() - Process
4079  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
4080  * @pdev: DP PDEV handle
4081  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
4082  * @ppdu_info: per ppdu tlv structure
4083  *
4084  * Return: void
4085  */
4086 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
4087 		struct dp_pdev *pdev, uint32_t *tag_buf,
4088 		struct ppdu_info *ppdu_info)
4089 {
4090 	uint16_t peer_id;
4091 	struct cdp_tx_completion_ppdu *ppdu_desc;
4092 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
4093 	uint8_t curr_user_index = 0;
4094 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
4095 
4096 	ppdu_desc =
4097 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
4098 
4099 	tag_buf += 2;
4100 	peer_id =
4101 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
4102 
4103 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
4104 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
4105 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
4106 	if (!ppdu_user_desc->ack_ba_tlv) {
4107 		ppdu_user_desc->ack_ba_tlv = 1;
4108 	} else {
4109 		pdev->stats.ack_ba_comes_twice++;
4110 		return;
4111 	}
4112 
4113 	ppdu_user_desc->peer_id = peer_id;
4114 
4115 	tag_buf++;
4116 	/* not to update ppdu_desc->tid from this TLV */
4117 	ppdu_user_desc->num_mpdu =
4118 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
4119 
4120 	ppdu_user_desc->num_msdu =
4121 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
4122 
4123 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
4124 
4125 	tag_buf++;
4126 	ppdu_user_desc->start_seq =
4127 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET(
4128 			*tag_buf);
4129 
4130 	tag_buf++;
4131 	ppdu_user_desc->success_bytes = *tag_buf;
4132 
4133 	/* increase ack ba tlv counter on successful mpdu */
4134 	if (ppdu_user_desc->num_mpdu)
4135 		ppdu_info->ack_ba_tlv++;
4136 
4137 	if (ppdu_user_desc->ba_size == 0) {
4138 		ppdu_user_desc->ba_seq_no = ppdu_user_desc->start_seq;
4139 		ppdu_user_desc->ba_bitmap[0] = 1;
4140 		ppdu_user_desc->ba_size = 1;
4141 	}
4142 }
4143 
4144 /**
4145  * dp_process_ppdu_stats_user_common_array_tlv() - Process
4146  * htt_ppdu_stats_user_common_array_tlv
4147  * @pdev: DP PDEV handle
4148  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
4149  * @ppdu_info: per ppdu tlv structure
4150  *
4151  * Return: void
4152  */
4153 static void dp_process_ppdu_stats_user_common_array_tlv(
4154 		struct dp_pdev *pdev, uint32_t *tag_buf,
4155 		struct ppdu_info *ppdu_info)
4156 {
4157 	uint32_t peer_id;
4158 	struct cdp_tx_completion_ppdu *ppdu_desc;
4159 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
4160 	uint8_t curr_user_index = 0;
4161 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
4162 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
4163 
4164 	ppdu_desc =
4165 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
4166 
4167 	tag_buf++;
4168 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
4169 	tag_buf += 3;
4170 	peer_id =
4171 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
4172 
4173 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
4174 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4175 			  "Peer with peer_id: %u not found", peer_id);
4176 		return;
4177 	}
4178 
4179 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
4180 
4181 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
4182 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
4183 
4184 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
4185 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
4186 
4187 	tag_buf++;
4188 
4189 	ppdu_user_desc->success_msdus =
4190 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
4191 	ppdu_user_desc->retry_msdus =
4192 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
4193 	tag_buf++;
4194 	ppdu_user_desc->failed_msdus =
4195 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
4196 }
4197 
4198 /**
4199  * dp_process_ppdu_stats_user_compltn_flush_tlv() - Process
4200  * htt_ppdu_stats_flush_tlv
4201  * @pdev: DP PDEV handle
4202  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
4203  * @ppdu_info: per ppdu tlv structure
4204  *
4205  * Return: void
4206  */
4207 static void
4208 dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
4209 					     uint32_t *tag_buf,
4210 					     struct ppdu_info *ppdu_info)
4211 {
4212 	struct cdp_tx_completion_ppdu *ppdu_desc;
4213 	uint32_t peer_id;
4214 	uint8_t tid;
4215 	struct dp_peer *peer;
4216 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4217 	struct dp_mon_peer *mon_peer = NULL;
4218 
4219 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
4220 				qdf_nbuf_data(ppdu_info->nbuf);
4221 	ppdu_desc->is_flush = 1;
4222 
4223 	tag_buf++;
4224 	ppdu_desc->drop_reason = *tag_buf;
4225 
4226 	tag_buf++;
4227 	ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
4228 	ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf);
4229 	ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf);
4230 
4231 	tag_buf++;
4232 	peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
4233 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
4234 
4235 	ppdu_desc->num_users = 1;
4236 	ppdu_desc->user[0].peer_id = peer_id;
4237 	ppdu_desc->user[0].tid = tid;
4238 
4239 	ppdu_desc->queue_type =
4240 			HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf);
4241 
4242 	peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
4243 				     DP_MOD_ID_TX_PPDU_STATS);
4244 	if (!peer)
4245 		goto add_ppdu_to_sched_list;
4246 
4247 	if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
4248 		mon_peer = peer->monitor_peer;
4249 		DP_STATS_INC(mon_peer,
4250 			     tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
4251 			     ppdu_desc->num_msdu);
4252 	}
4253 
4254 	dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
4255 
4256 add_ppdu_to_sched_list:
4257 	ppdu_info->done = 1;
4258 	TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
4259 	mon_pdev->list_depth--;
4260 	TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info,
4261 			  ppdu_info_list_elem);
4262 	mon_pdev->sched_comp_list_depth++;
4263 }
4264 
4265 /**
4266  * dp_process_ppdu_stats_sch_cmd_status_tlv() - Process schedule command status tlv
4267  * Here we are not going to process the buffer.
4268  * @pdev: DP PDEV handle
4269  * @ppdu_info: per ppdu tlv structure
4270  *
4271  * Return: void
4272  */
4273 static void
4274 dp_process_ppdu_stats_sch_cmd_status_tlv(struct dp_pdev *pdev,
4275 					 struct ppdu_info *ppdu_info)
4276 {
4277 	struct cdp_tx_completion_ppdu *ppdu_desc;
4278 	struct dp_peer *peer;
4279 	uint8_t num_users;
4280 	uint8_t i;
4281 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4282 
4283 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
4284 				qdf_nbuf_data(ppdu_info->nbuf);
4285 
4286 	num_users = ppdu_desc->bar_num_users;
4287 
4288 	for (i = 0; i < num_users; i++) {
4289 		if (ppdu_desc->user[i].user_pos == 0) {
4290 			if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
4291 				/* update phy mode for bar frame */
4292 				ppdu_desc->phy_mode =
4293 					ppdu_desc->user[i].preamble;
4294 				ppdu_desc->user[0].mcs = ppdu_desc->user[i].mcs;
4295 				break;
4296 			}
4297 			if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL) {
4298 				ppdu_desc->frame_ctrl =
4299 					ppdu_desc->user[i].frame_ctrl;
4300 				break;
4301 			}
4302 		}
4303 	}
4304 
4305 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
4306 	    ppdu_desc->delayed_ba) {
4307 		qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
4308 
4309 		for (i = 0; i < ppdu_desc->num_users; i++) {
4310 			struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
4311 			uint64_t start_tsf;
4312 			uint64_t end_tsf;
4313 			uint32_t ppdu_id;
4314 			struct dp_mon_peer *mon_peer;
4315 
4316 			ppdu_id = ppdu_desc->ppdu_id;
4317 			peer = dp_peer_get_ref_by_id
4318 				(pdev->soc, ppdu_desc->user[i].peer_id,
4319 				 DP_MOD_ID_TX_PPDU_STATS);
4320 			/*
4321 			 * This check is to make sure peer is not deleted
4322 			 * after processing the TLVs.
4323 			 */
4324 			if (!peer)
4325 				continue;
4326 
4327 			if (!peer->monitor_peer) {
4328 				dp_peer_unref_delete(peer,
4329 						     DP_MOD_ID_TX_PPDU_STATS);
4330 				continue;
4331 			}
4332 
4333 			mon_peer = peer->monitor_peer;
4334 			delay_ppdu = &mon_peer->delayed_ba_ppdu_stats;
4335 			start_tsf = ppdu_desc->ppdu_start_timestamp;
4336 			end_tsf = ppdu_desc->ppdu_end_timestamp;
4337 			/*
4338 			 * save delayed ba user info
4339 			 */
4340 			if (ppdu_desc->user[i].delayed_ba) {
4341 				dp_peer_copy_delay_stats(peer,
4342 							 &ppdu_desc->user[i],
4343 							 ppdu_id);
4344 				mon_peer->last_delayed_ba_ppduid = ppdu_id;
4345 				delay_ppdu->ppdu_start_timestamp = start_tsf;
4346 				delay_ppdu->ppdu_end_timestamp = end_tsf;
4347 			}
4348 			ppdu_desc->user[i].peer_last_delayed_ba =
4349 				mon_peer->last_delayed_ba;
4350 
4351 			dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
4352 
4353 			if (ppdu_desc->user[i].delayed_ba &&
4354 			    !ppdu_desc->user[i].debug_copied) {
4355 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4356 					  QDF_TRACE_LEVEL_INFO_MED,
4357 					  "%s: %d ppdu_id[%d] bar_ppdu_id[%d] num_users[%d] usr[%d] htt_frame_type[%d]\n",
4358 					  __func__, __LINE__,
4359 					  ppdu_desc->ppdu_id,
4360 					  ppdu_desc->bar_ppdu_id,
4361 					  ppdu_desc->num_users,
4362 					  i,
4363 					  ppdu_desc->htt_frame_type);
4364 			}
4365 		}
4366 	}
4367 
4368 	/*
4369 	 * when frame type is BAR and STATS_COMMON_TLV is set
4370 	 * copy the store peer delayed info to BAR status
4371 	 */
4372 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
4373 		for (i = 0; i < ppdu_desc->bar_num_users; i++) {
4374 			struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
4375 			uint64_t start_tsf;
4376 			uint64_t end_tsf;
4377 			struct dp_mon_peer *mon_peer;
4378 
4379 			peer = dp_peer_get_ref_by_id
4380 				(pdev->soc,
4381 				 ppdu_desc->user[i].peer_id,
4382 				 DP_MOD_ID_TX_PPDU_STATS);
4383 			/*
4384 			 * This check is to make sure peer is not deleted
4385 			 * after processing the TLVs.
4386 			 */
4387 			if (!peer)
4388 				continue;
4389 
4390 			if (!peer->monitor_peer) {
4391 				dp_peer_unref_delete(peer,
4392 						     DP_MOD_ID_TX_PPDU_STATS);
4393 				continue;
4394 			}
4395 
4396 			mon_peer = peer->monitor_peer;
4397 			if (ppdu_desc->user[i].completion_status !=
4398 			    HTT_PPDU_STATS_USER_STATUS_OK) {
4399 				dp_peer_unref_delete(peer,
4400 						     DP_MOD_ID_TX_PPDU_STATS);
4401 				continue;
4402 			}
4403 
4404 			delay_ppdu = &mon_peer->delayed_ba_ppdu_stats;
4405 			start_tsf = delay_ppdu->ppdu_start_timestamp;
4406 			end_tsf = delay_ppdu->ppdu_end_timestamp;
4407 
4408 			if (mon_peer->last_delayed_ba) {
4409 				dp_peer_copy_stats_to_bar(peer,
4410 							  &ppdu_desc->user[i]);
4411 				ppdu_desc->ppdu_id =
4412 					mon_peer->last_delayed_ba_ppduid;
4413 				ppdu_desc->ppdu_start_timestamp = start_tsf;
4414 				ppdu_desc->ppdu_end_timestamp = end_tsf;
4415 			}
4416 			ppdu_desc->user[i].peer_last_delayed_ba =
4417 						mon_peer->last_delayed_ba;
4418 			dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
4419 		}
4420 	}
4421 
4422 	TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
4423 	mon_pdev->list_depth--;
4424 	TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info,
4425 			  ppdu_info_list_elem);
4426 	mon_pdev->sched_comp_list_depth++;
4427 }
4428 
4429 /**
4430  * dp_validate_fix_ppdu_tlv() - Function to validate the length of PPDU
4431  * @pdev: DP pdev handle
4432  * @tag_buf: TLV buffer
4433  * @tlv_expected_size: Expected size of Tag
4434  * @tlv_len: TLV length received from FW
4435  *
4436  * If the TLV length sent as part of PPDU TLV is less that expected size i.e
4437  * size of corresponding data structure, pad the remaining bytes with zeros
4438  * and continue processing the TLVs
4439  *
4440  * Return: Pointer to updated TLV
4441  */
4442 static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev,
4443 						 uint32_t *tag_buf,
4444 						 uint16_t tlv_expected_size,
4445 						 uint16_t tlv_len)
4446 {
4447 	uint32_t *tlv_desc = tag_buf;
4448 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4449 
4450 	qdf_assert_always(tlv_len != 0);
4451 
4452 	if (tlv_len < tlv_expected_size) {
4453 		qdf_mem_zero(mon_pdev->ppdu_tlv_buf, tlv_expected_size);
4454 		qdf_mem_copy(mon_pdev->ppdu_tlv_buf, tag_buf, tlv_len);
4455 		tlv_desc = mon_pdev->ppdu_tlv_buf;
4456 	}
4457 
4458 	return tlv_desc;
4459 }
4460 
4461 /**
4462  * dp_process_ppdu_tag() - Function to process the PPDU TLVs
4463  * @pdev: DP pdev handle
4464  * @tag_buf: TLV buffer
4465  * @tlv_len: length of tlv
4466  * @ppdu_info: per ppdu tlv structure
4467  *
4468  * Return: void
4469  */
4470 static void dp_process_ppdu_tag(struct dp_pdev *pdev,
4471 				uint32_t *tag_buf,
4472 				uint32_t tlv_len,
4473 				struct ppdu_info *ppdu_info)
4474 {
4475 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
4476 	uint16_t tlv_expected_size;
4477 	uint32_t *tlv_desc;
4478 
4479 	switch (tlv_type) {
4480 	case HTT_PPDU_STATS_COMMON_TLV:
4481 		tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv);
4482 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4483 						    tlv_expected_size, tlv_len);
4484 		dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info);
4485 		break;
4486 	case HTT_PPDU_STATS_USR_COMMON_TLV:
4487 		tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv);
4488 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4489 						    tlv_expected_size, tlv_len);
4490 		dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc,
4491 						      ppdu_info);
4492 		break;
4493 	case HTT_PPDU_STATS_USR_RATE_TLV:
4494 		tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv);
4495 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4496 						    tlv_expected_size, tlv_len);
4497 		dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc,
4498 						    ppdu_info);
4499 		break;
4500 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
4501 		tlv_expected_size =
4502 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv);
4503 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4504 						    tlv_expected_size, tlv_len);
4505 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
4506 				pdev, tlv_desc, ppdu_info);
4507 		break;
4508 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
4509 		tlv_expected_size =
4510 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv);
4511 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4512 						    tlv_expected_size, tlv_len);
4513 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
4514 				pdev, tlv_desc, ppdu_info);
4515 		break;
4516 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
4517 		tlv_expected_size =
4518 			sizeof(htt_ppdu_stats_user_cmpltn_common_tlv);
4519 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4520 						    tlv_expected_size, tlv_len);
4521 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
4522 				pdev, tlv_desc, ppdu_info);
4523 		break;
4524 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
4525 		tlv_expected_size =
4526 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv);
4527 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4528 						    tlv_expected_size, tlv_len);
4529 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
4530 				pdev, tlv_desc, ppdu_info);
4531 		break;
4532 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
4533 		tlv_expected_size =
4534 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv);
4535 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4536 						    tlv_expected_size, tlv_len);
4537 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
4538 				pdev, tlv_desc, ppdu_info);
4539 		break;
4540 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
4541 		tlv_expected_size =
4542 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv);
4543 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4544 						    tlv_expected_size, tlv_len);
4545 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
4546 				pdev, tlv_desc, ppdu_info);
4547 		break;
4548 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
4549 		tlv_expected_size =
4550 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v);
4551 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4552 						    tlv_expected_size, tlv_len);
4553 		dp_process_ppdu_stats_user_common_array_tlv(
4554 				pdev, tlv_desc, ppdu_info);
4555 		break;
4556 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
4557 		tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv);
4558 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4559 						    tlv_expected_size, tlv_len);
4560 		dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc,
4561 							     ppdu_info);
4562 		break;
4563 	case HTT_PPDU_STATS_SCH_CMD_STATUS_TLV:
4564 		dp_process_ppdu_stats_sch_cmd_status_tlv(pdev, ppdu_info);
4565 		break;
4566 	default:
4567 		break;
4568 	}
4569 }
4570 
4571 #ifdef WLAN_CONFIG_TELEMETRY_AGENT
4572 static inline
4573 void dp_ppdu_desc_user_airtime_consumption_update(
4574 			struct dp_peer *peer,
4575 			struct cdp_tx_completion_ppdu_user *user)
4576 {
4577 	struct dp_mon_peer *mon_peer = NULL;
4578 	uint8_t ac = 0;
4579 
4580 	mon_peer = peer->monitor_peer;
4581 	if (qdf_unlikely(!mon_peer))
4582 		return;
4583 
4584 	ac = TID_TO_WME_AC(user->tid);
4585 	DP_STATS_INC(mon_peer, airtime_stats.tx_airtime_consumption[ac].consumption,
4586 		     user->phy_tx_time_us);
4587 }
4588 #else
4589 static inline
4590 void dp_ppdu_desc_user_airtime_consumption_update(
4591 			struct dp_peer *peer,
4592 			struct cdp_tx_completion_ppdu_user *user)
4593 { }
4594 #endif
4595 
4596 #if defined(WLAN_ATF_ENABLE) || defined(WLAN_CONFIG_TELEMETRY_AGENT)
4597 static void
4598 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
4599 				     struct dp_peer *peer,
4600 				     struct cdp_tx_completion_ppdu *ppdu_desc,
4601 				     struct cdp_tx_completion_ppdu_user *user)
4602 {
4603 	uint32_t nss_ru_width_sum = 0;
4604 	struct dp_mon_peer *mon_peer = NULL;
4605 
4606 	if (!pdev || !ppdu_desc || !user || !peer)
4607 		return;
4608 
4609 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_DATA)
4610 		return;
4611 
4612 	mon_peer = peer->monitor_peer;
4613 	if (qdf_unlikely(!mon_peer))
4614 		return;
4615 
4616 	nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum;
4617 	if (!nss_ru_width_sum)
4618 		nss_ru_width_sum = 1;
4619 
4620 	/*
4621 	 * For SU-MIMO PPDU phy Tx time is same for the single user.
4622 	 * For MU-MIMO phy Tx time is calculated per user as below
4623 	 *     user phy tx time =
4624 	 *           Entire PPDU duration * MU Ratio * OFDMA Ratio
4625 	 *     MU Ratio = usr_nss / Sum_of_nss_of_all_users
4626 	 *     OFDMA_ratio = usr_ru_width / Sum_of_ru_width_of_all_users
4627 	 *     usr_ru_widt = ru_end – ru_start + 1
4628 	 */
4629 	if (ppdu_desc->htt_frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) {
4630 		user->phy_tx_time_us = ppdu_desc->phy_ppdu_tx_time_us;
4631 	} else {
4632 		user->phy_tx_time_us = (ppdu_desc->phy_ppdu_tx_time_us *
4633 				user->nss * user->ru_tones) / nss_ru_width_sum;
4634 	}
4635 
4636 	dp_ppdu_desc_user_airtime_consumption_update(peer, user);
4637 }
4638 #else
4639 static void
4640 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
4641 				     struct dp_peer *peer,
4642 				     struct cdp_tx_completion_ppdu *ppdu_desc,
4643 				     struct cdp_tx_completion_ppdu_user *user)
4644 {
4645 }
4646 #endif
4647 
4648 #ifdef WLAN_SUPPORT_CTRL_FRAME_STATS
4649 static void
4650 dp_tx_ctrl_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
4651 			struct cdp_tx_completion_ppdu_user *user)
4652 {
4653 	struct dp_mon_peer *mon_peer = NULL;
4654 	uint16_t fc = 0;
4655 
4656 	if (!pdev || !peer || !user)
4657 		return;
4658 
4659 	mon_peer = peer->monitor_peer;
4660 	if (qdf_unlikely(!mon_peer))
4661 		return;
4662 
4663 	if (user->mprot_type) {
4664 		DP_STATS_INCC(mon_peer,
4665 			      tx.rts_success, 1, user->rts_success);
4666 		DP_STATS_INCC(mon_peer,
4667 			      tx.rts_failure, 1, user->rts_failure);
4668 	}
4669 	fc = user->frame_ctrl;
4670 	if ((qdf_cpu_to_le16(fc) & QDF_IEEE80211_FC0_TYPE_MASK) ==
4671 	    QDF_IEEE80211_FC0_TYPE_CTL) {
4672 		if ((qdf_cpu_to_le16(fc) & QDF_IEEE80211_FC0_SUBTYPE_MASK) ==
4673 		    QDF_IEEE80211_FC0_SUBTYPE_VHT_NDP_AN)
4674 			DP_STATS_INC(mon_peer, tx.ndpa_cnt, 1);
4675 		if ((qdf_cpu_to_le16(fc) & QDF_IEEE80211_FC0_SUBTYPE_MASK) ==
4676 		    QDF_IEEE80211_FC0_SUBTYPE_BAR)
4677 			DP_STATS_INC(mon_peer, tx.bar_cnt, 1);
4678 	}
4679 }
4680 #else
4681 static void
4682 dp_tx_ctrl_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
4683 			struct cdp_tx_completion_ppdu_user *user)
4684 {
4685 }
4686 #endif /* WLAN_SUPPORT_CTRL_FRAME_STATS */
4687 
4688 void
4689 dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
4690 			       struct ppdu_info *ppdu_info)
4691 {
4692 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
4693 	struct dp_peer *peer = NULL;
4694 	uint32_t tlv_bitmap_expected;
4695 	uint32_t tlv_bitmap_default;
4696 	uint16_t i;
4697 	uint32_t num_users;
4698 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4699 
4700 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
4701 		qdf_nbuf_data(ppdu_info->nbuf);
4702 
4703 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_BAR)
4704 		ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
4705 
4706 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
4707 	if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode ||
4708 	    mon_pdev->tx_capture_enabled) {
4709 		if (ppdu_info->is_ampdu)
4710 			tlv_bitmap_expected =
4711 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
4712 					ppdu_info->tlv_bitmap);
4713 	}
4714 
4715 	tlv_bitmap_default = tlv_bitmap_expected;
4716 
4717 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
4718 		num_users = ppdu_desc->bar_num_users;
4719 		ppdu_desc->num_users = ppdu_desc->bar_num_users;
4720 	} else {
4721 		num_users = ppdu_desc->num_users;
4722 	}
4723 	qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
4724 
4725 	if (wlan_cfg_get_sawf_stats_config(pdev->soc->wlan_cfg_ctx)) {
4726 		dp_ppdu_desc_get_txmode(ppdu_desc);
4727 		dp_pdev_update_deter_stats(pdev, ppdu_desc);
4728 	}
4729 
4730 	for (i = 0; i < num_users; i++) {
4731 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
4732 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
4733 
4734 		peer = dp_peer_get_ref_by_id(pdev->soc,
4735 					     ppdu_desc->user[i].peer_id,
4736 					     DP_MOD_ID_TX_PPDU_STATS);
4737 		/*
4738 		 * This check is to make sure peer is not deleted
4739 		 * after processing the TLVs.
4740 		 */
4741 		if (!peer)
4742 			continue;
4743 
4744 		ppdu_desc->user[i].is_bss_peer = peer->bss_peer;
4745 
4746 		dp_ppdu_desc_user_phy_tx_time_update(pdev, peer, ppdu_desc,
4747 						     &ppdu_desc->user[i]);
4748 
4749 		dp_tx_ctrl_stats_update(pdev, peer, &ppdu_desc->user[i]);
4750 
4751 		if (wlan_cfg_get_sawf_stats_config(pdev->soc->wlan_cfg_ctx)) {
4752 			dp_ppdu_desc_user_deter_stats_update(pdev,
4753 							     peer,
4754 							     ppdu_desc,
4755 							     &ppdu_desc->user[i]);
4756 		}
4757 
4758 		/*
4759 		 * different frame like DATA, BAR or CTRL has different
4760 		 * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we
4761 		 * receive other tlv in-order/sequential from fw.
4762 		 * Since ACK_BA_STATUS TLV come from Hardware it is
4763 		 * asynchronous So we need to depend on some tlv to confirm
4764 		 * all tlv is received for a ppdu.
4765 		 * So we depend on both SCHED_CMD_STATUS_TLV and
4766 		 * ACK_BA_STATUS_TLV. for failure packet we won't get
4767 		 * ACK_BA_STATUS_TLV.
4768 		 */
4769 		if (!(ppdu_info->tlv_bitmap &
4770 		      (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)) ||
4771 		    (!(ppdu_info->tlv_bitmap &
4772 		       (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) &&
4773 		     (ppdu_desc->user[i].completion_status ==
4774 		      HTT_PPDU_STATS_USER_STATUS_OK))) {
4775 			dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
4776 			continue;
4777 		}
4778 
4779 		/*
4780 		 * Update tx stats for data frames having Qos as well as
4781 		 * non-Qos data tid
4782 		 */
4783 
4784 		if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX ||
4785 		     (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) ||
4786 		     (ppdu_desc->htt_frame_type ==
4787 		      HTT_STATS_FTYPE_SGEN_QOS_NULL) ||
4788 		     ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) &&
4789 		      (ppdu_desc->num_mpdu > 1))) &&
4790 		      (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) {
4791 			dp_tx_stats_update(pdev, peer,
4792 					   &ppdu_desc->user[i],
4793 					   ppdu_desc);
4794 		}
4795 
4796 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
4797 		tlv_bitmap_expected = tlv_bitmap_default;
4798 	}
4799 }
4800 
4801 #if !defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_PKT_CAPTURE_TX_2_0) || \
4802 	defined(WLAN_PKT_CAPTURE_RX_2_0)
4803 /**
4804  * dp_tx_ppdu_desc_notify() - Notify to upper layer about PPDU via WDI
4805  *
4806  * @pdev: Datapath pdev handle
4807  * @nbuf: Buffer to be delivered to upper layer
4808  *
4809  * Return: void
4810  */
4811 static void dp_tx_ppdu_desc_notify(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
4812 {
4813 	struct dp_soc *soc = pdev->soc;
4814 	struct dp_mon_ops *mon_ops = NULL;
4815 
4816 	mon_ops = dp_mon_ops_get(soc);
4817 	if (mon_ops && mon_ops->mon_ppdu_desc_notify)
4818 		mon_ops->mon_ppdu_desc_notify(pdev, nbuf);
4819 	else
4820 		qdf_nbuf_free(nbuf);
4821 }
4822 
4823 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
4824 			  struct ppdu_info *ppdu_info)
4825 {
4826 	struct ppdu_info *s_ppdu_info = NULL;
4827 	struct ppdu_info *ppdu_info_next = NULL;
4828 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
4829 	qdf_nbuf_t nbuf;
4830 	uint32_t time_delta = 0;
4831 	bool starved = 0;
4832 	bool matched = 0;
4833 	bool recv_ack_ba_done = 0;
4834 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4835 
4836 	if (ppdu_info->tlv_bitmap &
4837 	    (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
4838 	    ppdu_info->done)
4839 		recv_ack_ba_done = 1;
4840 
4841 	mon_pdev->last_sched_cmdid = ppdu_info->sched_cmdid;
4842 
4843 	s_ppdu_info = TAILQ_FIRST(&mon_pdev->sched_comp_ppdu_list);
4844 
4845 	TAILQ_FOREACH_SAFE(s_ppdu_info, &mon_pdev->sched_comp_ppdu_list,
4846 			   ppdu_info_list_elem, ppdu_info_next) {
4847 		if (s_ppdu_info->tsf_l32 > ppdu_info->tsf_l32)
4848 			time_delta = (MAX_TSF_32 - s_ppdu_info->tsf_l32) +
4849 					ppdu_info->tsf_l32;
4850 		else
4851 			time_delta = ppdu_info->tsf_l32 - s_ppdu_info->tsf_l32;
4852 
4853 		if (!s_ppdu_info->done && !recv_ack_ba_done) {
4854 			if (time_delta < MAX_SCHED_STARVE) {
4855 				dp_mon_info("pdev[%d] ppdu_id[%d] sched_cmdid[%d] TLV_B[0x%x] TSF[%u] D[%d]",
4856 					    pdev->pdev_id,
4857 					    s_ppdu_info->ppdu_id,
4858 					    s_ppdu_info->sched_cmdid,
4859 					    s_ppdu_info->tlv_bitmap,
4860 					    s_ppdu_info->tsf_l32,
4861 					    s_ppdu_info->done);
4862 				break;
4863 			}
4864 			starved = 1;
4865 		}
4866 
4867 		mon_pdev->delivered_sched_cmdid = s_ppdu_info->sched_cmdid;
4868 		TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list, s_ppdu_info,
4869 			     ppdu_info_list_elem);
4870 		mon_pdev->sched_comp_list_depth--;
4871 
4872 		nbuf = s_ppdu_info->nbuf;
4873 		qdf_assert_always(nbuf);
4874 		ppdu_desc = (struct cdp_tx_completion_ppdu *)
4875 				qdf_nbuf_data(nbuf);
4876 		ppdu_desc->tlv_bitmap = s_ppdu_info->tlv_bitmap;
4877 
4878 		if (starved) {
4879 			dp_mon_info("ppdu starved fc[0x%x] h_ftype[%d] tlv_bitmap[0x%x] cs[%d]\n",
4880 				    ppdu_desc->frame_ctrl,
4881 				    ppdu_desc->htt_frame_type,
4882 				    ppdu_desc->tlv_bitmap,
4883 				    ppdu_desc->user[0].completion_status);
4884 			starved = 0;
4885 		}
4886 
4887 		if (ppdu_info->ppdu_id == s_ppdu_info->ppdu_id &&
4888 		    ppdu_info->sched_cmdid == s_ppdu_info->sched_cmdid)
4889 			matched = 1;
4890 
4891 		dp_ppdu_desc_user_stats_update(pdev, s_ppdu_info);
4892 
4893 		qdf_mem_free(s_ppdu_info);
4894 
4895 		dp_tx_ppdu_desc_notify(pdev, nbuf);
4896 
4897 		if (matched)
4898 			break;
4899 	}
4900 }
4901 #endif
4902 
4903 /**
4904  * dp_tx_ppdu_desc_deliver() - Deliver PPDU desc to upper layer
4905  * @pdev: Datapath pdev handle
4906  * @ppdu_info: per PPDU TLV descriptor
4907  *
4908  * Return: void
4909  */
4910 static void dp_tx_ppdu_desc_deliver(struct dp_pdev *pdev,
4911 				    struct ppdu_info *ppdu_info)
4912 {
4913 	struct dp_soc *soc = pdev->soc;
4914 	struct dp_mon_ops *mon_ops = NULL;
4915 
4916 	mon_ops = dp_mon_ops_get(soc);
4917 
4918 	if (mon_ops && mon_ops->mon_ppdu_desc_deliver) {
4919 		mon_ops->mon_ppdu_desc_deliver(pdev, ppdu_info);
4920 	} else {
4921 		qdf_nbuf_free(ppdu_info->nbuf);
4922 		ppdu_info->nbuf = NULL;
4923 		qdf_mem_free(ppdu_info);
4924 	}
4925 }
4926 
4927 /**
4928  * dp_get_ppdu_desc() - Function to allocate new PPDU status
4929  * desc for new ppdu id
4930  * @pdev: DP pdev handle
4931  * @ppdu_id: PPDU unique identifier
4932  * @tlv_type: TLV type received
4933  * @tsf_l32: timestamp received along with ppdu stats indication header
4934  * @max_users: Maximum user for that particular ppdu
4935  *
4936  * Return: ppdu_info per ppdu tlv structure
4937  */
4938 static
4939 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
4940 				   uint8_t tlv_type, uint32_t tsf_l32,
4941 				   uint8_t max_users)
4942 {
4943 	struct ppdu_info *ppdu_info = NULL;
4944 	struct ppdu_info *s_ppdu_info = NULL;
4945 	struct ppdu_info *ppdu_info_next = NULL;
4946 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
4947 	uint32_t size = 0;
4948 	struct cdp_tx_completion_ppdu *tmp_ppdu_desc = NULL;
4949 	struct cdp_tx_completion_ppdu_user *tmp_user;
4950 	uint32_t time_delta;
4951 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4952 
4953 	/*
4954 	 * Find ppdu_id node exists or not
4955 	 */
4956 	TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list,
4957 			   ppdu_info_list_elem, ppdu_info_next) {
4958 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
4959 			if (ppdu_info->tsf_l32 > tsf_l32)
4960 				time_delta  = (MAX_TSF_32 -
4961 					       ppdu_info->tsf_l32) + tsf_l32;
4962 			else
4963 				time_delta  = tsf_l32 - ppdu_info->tsf_l32;
4964 
4965 			if (time_delta > WRAP_DROP_TSF_DELTA) {
4966 				TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
4967 					     ppdu_info, ppdu_info_list_elem);
4968 				mon_pdev->list_depth--;
4969 				pdev->stats.ppdu_wrap_drop++;
4970 				tmp_ppdu_desc =
4971 					(struct cdp_tx_completion_ppdu *)
4972 					qdf_nbuf_data(ppdu_info->nbuf);
4973 				tmp_user = &tmp_ppdu_desc->user[0];
4974 				dp_htt_tx_stats_info("S_PID [%d] S_TSF[%u] TLV_BITMAP[0x%x] [CMPLTN - %d ACK_BA - %d] CS[%d] - R_PID[%d] R_TSF[%u] R_TLV_TAG[0x%x]\n",
4975 						     ppdu_info->ppdu_id,
4976 						     ppdu_info->tsf_l32,
4977 						     ppdu_info->tlv_bitmap,
4978 						     tmp_user->completion_status,
4979 						     ppdu_info->compltn_common_tlv,
4980 						     ppdu_info->ack_ba_tlv,
4981 						     ppdu_id, tsf_l32,
4982 						     tlv_type);
4983 				qdf_nbuf_free(ppdu_info->nbuf);
4984 				ppdu_info->nbuf = NULL;
4985 				qdf_mem_free(ppdu_info);
4986 			} else {
4987 				break;
4988 			}
4989 		}
4990 	}
4991 
4992 	/*
4993 	 * check if it is ack ba tlv and if it is not there in ppdu info
4994 	 * list then check it in sched completion ppdu list
4995 	 */
4996 	if (!ppdu_info &&
4997 	    tlv_type == HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) {
4998 		TAILQ_FOREACH(s_ppdu_info,
4999 			      &mon_pdev->sched_comp_ppdu_list,
5000 			      ppdu_info_list_elem) {
5001 			if (s_ppdu_info && (s_ppdu_info->ppdu_id == ppdu_id)) {
5002 				if (s_ppdu_info->tsf_l32 > tsf_l32)
5003 					time_delta  = (MAX_TSF_32 -
5004 						       s_ppdu_info->tsf_l32) +
5005 							tsf_l32;
5006 				else
5007 					time_delta  = tsf_l32 -
5008 						s_ppdu_info->tsf_l32;
5009 				if (time_delta < WRAP_DROP_TSF_DELTA) {
5010 					ppdu_info = s_ppdu_info;
5011 					break;
5012 				}
5013 			} else {
5014 				/*
5015 				 * ACK BA STATUS TLV comes sequential order
5016 				 * if we received ack ba status tlv for second
5017 				 * ppdu and first ppdu is still waiting for
5018 				 * ACK BA STATUS TLV. Based on fw comment
5019 				 * we won't receive it tlv later. So we can
5020 				 * set ppdu info done.
5021 				 */
5022 				if (s_ppdu_info)
5023 					s_ppdu_info->done = 1;
5024 			}
5025 		}
5026 	}
5027 
5028 	if (ppdu_info) {
5029 		if (ppdu_info->tlv_bitmap & (1 << tlv_type)) {
5030 			/*
5031 			 * if we get tlv_type that is already been processed
5032 			 * for ppdu, that means we got a new ppdu with same
5033 			 * ppdu id. Hence Flush the older ppdu
5034 			 * for MUMIMO and OFDMA, In a PPDU we have
5035 			 * multiple user with same tlv types. tlv bitmap is
5036 			 * used to check whether SU or MU_MIMO/OFDMA
5037 			 */
5038 			if (!(ppdu_info->tlv_bitmap &
5039 			    (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)))
5040 				return ppdu_info;
5041 
5042 			ppdu_desc = (struct cdp_tx_completion_ppdu *)
5043 				qdf_nbuf_data(ppdu_info->nbuf);
5044 
5045 			/*
5046 			 * apart from ACK BA STATUS TLV rest all comes in order
5047 			 * so if tlv type not ACK BA STATUS TLV we can deliver
5048 			 * ppdu_info
5049 			 */
5050 			if ((tlv_type ==
5051 			     HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
5052 			    ((ppdu_desc->htt_frame_type ==
5053 			     HTT_STATS_FTYPE_SGEN_MU_BAR) ||
5054 			    (ppdu_desc->htt_frame_type ==
5055 			     HTT_STATS_FTYPE_SGEN_BE_MU_BAR)))
5056 				return ppdu_info;
5057 
5058 			dp_tx_ppdu_desc_deliver(pdev, ppdu_info);
5059 		} else {
5060 			return ppdu_info;
5061 		}
5062 	}
5063 
5064 	/*
5065 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
5066 	 * threshold
5067 	 */
5068 	if (mon_pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
5069 		ppdu_info = TAILQ_FIRST(&mon_pdev->ppdu_info_list);
5070 		TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
5071 			     ppdu_info, ppdu_info_list_elem);
5072 		mon_pdev->list_depth--;
5073 		pdev->stats.ppdu_drop++;
5074 		qdf_nbuf_free(ppdu_info->nbuf);
5075 		ppdu_info->nbuf = NULL;
5076 		qdf_mem_free(ppdu_info);
5077 	}
5078 
5079 	size = sizeof(struct cdp_tx_completion_ppdu) +
5080 		(max_users * sizeof(struct cdp_tx_completion_ppdu_user));
5081 
5082 	/*
5083 	 * Allocate new ppdu_info node
5084 	 */
5085 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
5086 	if (!ppdu_info)
5087 		return NULL;
5088 
5089 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, size,
5090 					 0, 4, TRUE);
5091 	if (!ppdu_info->nbuf) {
5092 		qdf_mem_free(ppdu_info);
5093 		return NULL;
5094 	}
5095 
5096 	ppdu_info->ppdu_desc =
5097 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
5098 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), size);
5099 
5100 	if (!qdf_nbuf_put_tail(ppdu_info->nbuf, size)) {
5101 		dp_mon_err("No tailroom for HTT PPDU");
5102 		qdf_nbuf_free(ppdu_info->nbuf);
5103 		ppdu_info->nbuf = NULL;
5104 		ppdu_info->last_user = 0;
5105 		qdf_mem_free(ppdu_info);
5106 		return NULL;
5107 	}
5108 
5109 	ppdu_info->ppdu_desc->max_users = max_users;
5110 	ppdu_info->tsf_l32 = tsf_l32;
5111 	/*
5112 	 * No lock is needed because all PPDU TLVs are processed in
5113 	 * same context and this list is updated in same context
5114 	 */
5115 	TAILQ_INSERT_TAIL(&mon_pdev->ppdu_info_list, ppdu_info,
5116 			  ppdu_info_list_elem);
5117 	mon_pdev->list_depth++;
5118 	return ppdu_info;
5119 }
5120 
5121 #define DP_HTT_PPDU_ID_MASK 0x00FFFFFF
5122 /**
5123  * dp_htt_mask_ppdu_id() - Function to mask ppdu_id
5124  * @ppdu_id: PPDU ID
5125  *
5126  * Return: Masked ppdu_id
5127  */
5128 static inline uint32_t dp_htt_mask_ppdu_id(uint32_t ppdu_id)
5129 {
5130 	return (ppdu_id & DP_HTT_PPDU_ID_MASK);
5131 }
5132 
5133 /**
5134  * dp_htt_process_tlv() - Function to process each PPDU TLVs
5135  * @pdev: DP pdev handle
5136  * @htt_t2h_msg: HTT target to host message
5137  *
5138  * Return: ppdu_info per ppdu tlv structure
5139  */
5140 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
5141 					    qdf_nbuf_t htt_t2h_msg)
5142 {
5143 	uint32_t length;
5144 	uint32_t ppdu_id;
5145 	uint8_t tlv_type;
5146 	uint32_t tlv_length, tlv_bitmap_expected;
5147 	uint8_t *tlv_buf;
5148 	struct ppdu_info *ppdu_info = NULL;
5149 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
5150 	uint8_t max_users = CDP_MU_MAX_USERS;
5151 	uint32_t tsf_l32;
5152 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5153 
5154 	uint32_t *msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
5155 
5156 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
5157 
5158 	msg_word = msg_word + 1;
5159 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
5160 	ppdu_id = dp_htt_mask_ppdu_id(ppdu_id);
5161 
5162 	msg_word = msg_word + 1;
5163 	tsf_l32 = (uint32_t)(*msg_word);
5164 
5165 	msg_word = msg_word + 2;
5166 	while (length > 0) {
5167 		tlv_buf = (uint8_t *)msg_word;
5168 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
5169 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
5170 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
5171 			pdev->stats.ppdu_stats_counter[tlv_type]++;
5172 
5173 		if (tlv_length == 0)
5174 			break;
5175 
5176 		tlv_length += HTT_TLV_HDR_LEN;
5177 
5178 		/*
5179 		 * Not allocating separate ppdu descriptor for MGMT Payload
5180 		 * TLV as this is sent as separate WDI indication and it
5181 		 * doesn't contain any ppdu information
5182 		 */
5183 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
5184 			mon_pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
5185 			mon_pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
5186 			mon_pdev->mgmtctrl_frm_info.mgmt_buf_len =
5187 				HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET
5188 						(*(msg_word + 1));
5189 			msg_word =
5190 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
5191 			length -= (tlv_length);
5192 			continue;
5193 		}
5194 
5195 		/*
5196 		 * retrieve max_users if it's USERS_INFO,
5197 		 * else, it's 1 for COMPLTN_FLUSH,
5198 		 * else, use CDP_MU_MAX_USERS
5199 		 */
5200 		if (tlv_type == HTT_PPDU_STATS_USERS_INFO_TLV) {
5201 			max_users =
5202 				HTT_PPDU_STATS_USERS_INFO_TLV_MAX_USERS_GET(*(msg_word + 1));
5203 		} else if (tlv_type == HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) {
5204 			max_users = 1;
5205 		}
5206 
5207 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type,
5208 					     tsf_l32, max_users);
5209 		if (!ppdu_info)
5210 			return NULL;
5211 
5212 		ppdu_info->ppdu_id = ppdu_id;
5213 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
5214 
5215 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
5216 
5217 		/*
5218 		 * Increment pdev level tlv count to monitor
5219 		 * missing TLVs
5220 		 */
5221 		mon_pdev->tlv_count++;
5222 		ppdu_info->last_tlv_cnt = mon_pdev->tlv_count;
5223 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
5224 		length -= (tlv_length);
5225 	}
5226 
5227 	if (!ppdu_info)
5228 		return NULL;
5229 
5230 	mon_pdev->last_ppdu_id = ppdu_id;
5231 
5232 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
5233 
5234 	if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode ||
5235 	    mon_pdev->tx_capture_enabled) {
5236 		if (ppdu_info->is_ampdu)
5237 			tlv_bitmap_expected =
5238 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
5239 					ppdu_info->tlv_bitmap);
5240 	}
5241 
5242 	ppdu_desc = ppdu_info->ppdu_desc;
5243 
5244 	if (!ppdu_desc)
5245 		return NULL;
5246 
5247 	if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status !=
5248 	    HTT_PPDU_STATS_USER_STATUS_OK) {
5249 		tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
5250 	}
5251 
5252 	/*
5253 	 * for frame type DATA and BAR, we update stats based on MSDU,
5254 	 * successful msdu and mpdu are populate from ACK BA STATUS TLV
5255 	 * which comes out of order. successful mpdu also populated from
5256 	 * COMPLTN COMMON TLV which comes in order. for every ppdu_info
5257 	 * we store successful mpdu from both tlv and compare before delivering
5258 	 * to make sure we received ACK BA STATUS TLV. For some self generated
5259 	 * frame we won't get ack ba status tlv so no need to wait for
5260 	 * ack ba status tlv.
5261 	 */
5262 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL &&
5263 	    ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) {
5264 		/*
5265 		 * most of the time bar frame will have duplicate ack ba
5266 		 * status tlv
5267 		 */
5268 		if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR &&
5269 		    (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv))
5270 			return NULL;
5271 		/*
5272 		 * For data frame, compltn common tlv should match ack ba status
5273 		 * tlv and completion status. Reason we are checking first user
5274 		 * for ofdma, completion seen at next MU BAR frm, for mimo
5275 		 * only for first user completion will be immediate.
5276 		 */
5277 		if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
5278 		    (ppdu_desc->user[0].completion_status == 0 &&
5279 		     (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv)))
5280 			return NULL;
5281 	}
5282 
5283 	/*
5284 	 * Once all the TLVs for a given PPDU has been processed,
5285 	 * return PPDU status to be delivered to higher layer.
5286 	 * tlv_bitmap_expected can't be available for different frame type.
5287 	 * But SCHED CMD STATS TLV is the last TLV from the FW for a ppdu.
5288 	 * apart from ACK BA TLV, FW sends other TLV in sequential order.
5289 	 * flush tlv comes separate.
5290 	 */
5291 	if ((ppdu_info->tlv_bitmap != 0 &&
5292 	     (ppdu_info->tlv_bitmap &
5293 	      (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) ||
5294 	    (ppdu_info->tlv_bitmap &
5295 	     (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV))) {
5296 		ppdu_info->done = 1;
5297 		return ppdu_info;
5298 	}
5299 
5300 	return NULL;
5301 }
5302 #endif /* QCA_ENHANCED_STATS_SUPPORT */
5303 
5304 #ifdef QCA_ENHANCED_STATS_SUPPORT
5305 /**
5306  * dp_tx_ppdu_stats_feat_enable_check() - Check if feature(s) is enabled to
5307  *			consume stats received from FW via HTT
5308  * @pdev: Datapath pdev handle
5309  *
5310  * Return: void
5311  */
5312 static bool dp_tx_ppdu_stats_feat_enable_check(struct dp_pdev *pdev)
5313 {
5314 	struct dp_soc *soc = pdev->soc;
5315 	struct dp_mon_ops *mon_ops = NULL;
5316 
5317 	mon_ops = dp_mon_ops_get(soc);
5318 	if (mon_ops && mon_ops->mon_ppdu_stats_feat_enable_check)
5319 		return mon_ops->mon_ppdu_stats_feat_enable_check(pdev);
5320 	else
5321 		return false;
5322 }
5323 #endif
5324 
5325 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
5326 static void dp_htt_process_smu_ppdu_stats_tlv(struct dp_soc *soc,
5327 					      qdf_nbuf_t htt_t2h_msg)
5328 {
5329 	uint32_t length;
5330 	uint8_t tlv_type;
5331 	uint32_t tlv_length, tlv_expected_size;
5332 	uint8_t *tlv_buf;
5333 
5334 	uint32_t *msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
5335 
5336 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
5337 
5338 	msg_word = msg_word + 4;
5339 
5340 	while (length > 0) {
5341 		tlv_buf = (uint8_t *)msg_word;
5342 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
5343 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
5344 
5345 		if (tlv_length == 0)
5346 			break;
5347 
5348 		tlv_length += HTT_TLV_HDR_LEN;
5349 
5350 		if (tlv_type == HTT_PPDU_STATS_FOR_SMU_TLV) {
5351 			tlv_expected_size = sizeof(htt_ppdu_stats_for_smu_tlv);
5352 
5353 			if (tlv_length >= tlv_expected_size)
5354 				dp_wdi_event_handler(
5355 					WDI_EVENT_PKT_CAPTURE_PPDU_STATS,
5356 					soc, msg_word, HTT_INVALID_VDEV,
5357 					WDI_NO_VAL, 0);
5358 		}
5359 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
5360 		length -= (tlv_length);
5361 	}
5362 }
5363 #endif
5364 
5365 #if defined(WDI_EVENT_ENABLE)
5366 #ifdef QCA_ENHANCED_STATS_SUPPORT
5367 /**
5368  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
5369  * @soc: DP SOC handle
5370  * @pdev_id: pdev id
5371  * @htt_t2h_msg: HTT message nbuf
5372  *
5373  * Return: void
5374  */
5375 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
5376 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
5377 {
5378 	struct dp_pdev *pdev;
5379 	struct ppdu_info *ppdu_info = NULL;
5380 	bool free_buf = true;
5381 	struct dp_mon_pdev *mon_pdev;
5382 
5383 	if (pdev_id >= MAX_PDEV_CNT)
5384 		return true;
5385 
5386 	pdev = soc->pdev_list[pdev_id];
5387 	if (!pdev)
5388 		return true;
5389 
5390 	mon_pdev = pdev->monitor_pdev;
5391 	if (!mon_pdev)
5392 		return true;
5393 
5394 	if (!dp_tx_ppdu_stats_feat_enable_check(pdev))
5395 		return free_buf;
5396 
5397 	qdf_spin_lock_bh(&mon_pdev->ppdu_stats_lock);
5398 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
5399 
5400 	if (mon_pdev->mgmtctrl_frm_info.mgmt_buf) {
5401 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
5402 		    (pdev, htt_t2h_msg, mon_pdev->mgmtctrl_frm_info.ppdu_id) !=
5403 		    QDF_STATUS_SUCCESS)
5404 			free_buf = false;
5405 	}
5406 
5407 	if (ppdu_info)
5408 		dp_tx_ppdu_desc_deliver(pdev, ppdu_info);
5409 
5410 	mon_pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
5411 	mon_pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
5412 	mon_pdev->mgmtctrl_frm_info.ppdu_id = 0;
5413 
5414 	qdf_spin_unlock_bh(&mon_pdev->ppdu_stats_lock);
5415 
5416 	return free_buf;
5417 }
5418 #elif defined(WLAN_FEATURE_PKT_CAPTURE_V2)
5419 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
5420 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
5421 {
5422 	if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx))
5423 		dp_htt_process_smu_ppdu_stats_tlv(soc, htt_t2h_msg);
5424 
5425 	return true;
5426 }
5427 #elif (!defined(REMOVE_PKT_LOG))
5428 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
5429 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
5430 {
5431 	return true;
5432 }
5433 #endif/* QCA_ENHANCED_STATS_SUPPORT */
5434 #endif
5435 
5436 #if defined(WDI_EVENT_ENABLE) &&\
5437 	(defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG) || \
5438 	 defined(WLAN_FEATURE_PKT_CAPTURE_V2))
5439 bool
5440 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
5441 			  uint32_t *msg_word,
5442 			  qdf_nbuf_t htt_t2h_msg)
5443 {
5444 	u_int8_t pdev_id;
5445 	u_int8_t target_pdev_id;
5446 	bool free_buf;
5447 
5448 	target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
5449 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
5450 							 target_pdev_id);
5451 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
5452 			     htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
5453 			     pdev_id);
5454 
5455 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
5456 					      htt_t2h_msg);
5457 
5458 	return free_buf;
5459 }
5460 #endif
5461 
5462 void
5463 dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor)
5464 {
5465 	pdev->monitor_pdev->rx_mon_recv_status.bsscolor = bsscolor;
5466 }
5467 
5468 bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle)
5469 {
5470 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5471 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5472 
5473 	if ((mon_pdev->fp_data_filter & FILTER_DATA_UCAST) ||
5474 	    (mon_pdev->mo_data_filter & FILTER_DATA_UCAST))
5475 		return true;
5476 
5477 	return false;
5478 }
5479 
5480 bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle)
5481 {
5482 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5483 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5484 
5485 	if ((mon_pdev->fp_data_filter & FILTER_DATA_MCAST) ||
5486 	    (mon_pdev->mo_data_filter & FILTER_DATA_MCAST))
5487 		return true;
5488 
5489 	return false;
5490 }
5491 
5492 bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle)
5493 {
5494 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5495 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5496 
5497 	if ((mon_pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
5498 	    (mon_pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
5499 		if ((mon_pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
5500 		    (mon_pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
5501 			return true;
5502 		}
5503 	}
5504 
5505 	return false;
5506 }
5507 
5508 QDF_STATUS dp_mon_soc_cfg_init(struct dp_soc *soc)
5509 {
5510 	int target_type;
5511 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
5512 	struct cdp_mon_ops *cdp_ops;
5513 
5514 	cdp_ops = dp_mon_cdp_ops_get(soc);
5515 	target_type = hal_get_target_type(soc->hal_soc);
5516 	switch (target_type) {
5517 	case TARGET_TYPE_QCA6290:
5518 	case TARGET_TYPE_QCA6390:
5519 	case TARGET_TYPE_QCA6490:
5520 	case TARGET_TYPE_QCA6750:
5521 	case TARGET_TYPE_KIWI:
5522 	case TARGET_TYPE_MANGO:
5523 	case TARGET_TYPE_PEACH:
5524 	case TARGET_TYPE_WCN6450:
5525 		/* do nothing */
5526 		break;
5527 	case TARGET_TYPE_QCA8074:
5528 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
5529 							   MON_BUF_MIN_ENTRIES);
5530 		break;
5531 	case TARGET_TYPE_QCA8074V2:
5532 	case TARGET_TYPE_QCA6018:
5533 	case TARGET_TYPE_QCA9574:
5534 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
5535 							   MON_BUF_MIN_ENTRIES);
5536 		mon_soc->hw_nac_monitor_support = 1;
5537 		break;
5538 	case TARGET_TYPE_QCN9000:
5539 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
5540 							   MON_BUF_MIN_ENTRIES);
5541 		mon_soc->hw_nac_monitor_support = 1;
5542 		if (cfg_get(soc->ctrl_psoc, CFG_DP_FULL_MON_MODE)) {
5543 			if (cdp_ops  && cdp_ops->config_full_mon_mode)
5544 				cdp_ops->config_full_mon_mode((struct cdp_soc_t *)soc, 1);
5545 		}
5546 		break;
5547 	case TARGET_TYPE_QCA5018:
5548 	case TARGET_TYPE_QCN6122:
5549 	case TARGET_TYPE_QCN9160:
5550 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
5551 							   MON_BUF_MIN_ENTRIES);
5552 		mon_soc->hw_nac_monitor_support = 1;
5553 		break;
5554 	case TARGET_TYPE_QCN9224:
5555 	case TARGET_TYPE_QCA5332:
5556 	case TARGET_TYPE_QCN6432:
5557 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
5558 							   MON_BUF_MIN_ENTRIES);
5559 		mon_soc->hw_nac_monitor_support = 1;
5560 		mon_soc->monitor_mode_v2 = 1;
5561 		break;
5562 	default:
5563 		dp_mon_info("%s: Unknown tgt type %d\n", __func__, target_type);
5564 		qdf_assert_always(0);
5565 		break;
5566 	}
5567 
5568 	dp_mon_info("hw_nac_monitor_support = %d",
5569 		    mon_soc->hw_nac_monitor_support);
5570 
5571 	return QDF_STATUS_SUCCESS;
5572 }
5573 
5574 /**
5575  * dp_mon_pdev_per_target_config() - Target specific monitor pdev configuration
5576  * @pdev: PDEV handle [Should be valid]
5577  *
5578  * Return: None
5579  */
5580 static void dp_mon_pdev_per_target_config(struct dp_pdev *pdev)
5581 {
5582 	struct dp_soc *soc = pdev->soc;
5583 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5584 	int target_type;
5585 
5586 	target_type = hal_get_target_type(soc->hal_soc);
5587 	switch (target_type) {
5588 	case TARGET_TYPE_KIWI:
5589 	case TARGET_TYPE_QCN9224:
5590 	case TARGET_TYPE_MANGO:
5591 		mon_pdev->is_tlv_hdr_64_bit = true;
5592 		mon_pdev->tlv_hdr_size = HAL_RX_TLV64_HDR_SIZE;
5593 		break;
5594 	case TARGET_TYPE_PEACH:
5595 	default:
5596 		mon_pdev->is_tlv_hdr_64_bit = false;
5597 		mon_pdev->tlv_hdr_size = HAL_RX_TLV32_HDR_SIZE;
5598 		break;
5599 	}
5600 }
5601 
5602 static
5603 QDF_STATUS dp_mon_rings_alloc(struct dp_pdev *pdev)
5604 {
5605 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5606 	struct dp_mon_ops *mon_ops;
5607 
5608 	mon_ops = dp_mon_ops_get(pdev->soc);
5609 	if (!mon_ops) {
5610 		dp_mon_err("mon_ops is NULL");
5611 		return QDF_STATUS_E_FAILURE;
5612 	}
5613 
5614 	if (mon_ops->mon_rings_alloc[0]) {
5615 		status = mon_ops->mon_rings_alloc[0](pdev);
5616 		if (QDF_IS_STATUS_ERROR(status)) {
5617 			dp_mon_err("error: %d", status);
5618 			goto error;
5619 		}
5620 	}
5621 
5622 	if (mon_ops->mon_rings_alloc[1]) {
5623 		status = mon_ops->mon_rings_alloc[1](pdev);
5624 		if (QDF_IS_STATUS_ERROR(status)) {
5625 			dp_mon_err("error: %d", status);
5626 			goto error;
5627 		}
5628 	}
5629 
5630 error:
5631 	return status;
5632 }
5633 
5634 static
5635 void dp_mon_rings_free(struct dp_pdev *pdev)
5636 {
5637 	struct dp_mon_ops *mon_ops;
5638 
5639 	mon_ops = dp_mon_ops_get(pdev->soc);
5640 	if (!mon_ops) {
5641 		dp_mon_err("mon_ops is NULL");
5642 		return;
5643 	}
5644 
5645 	if (mon_ops->mon_rings_free[0])
5646 		mon_ops->mon_rings_free[0](pdev);
5647 
5648 	if (mon_ops->mon_rings_free[1])
5649 		mon_ops->mon_rings_free[1](pdev);
5650 }
5651 
5652 static
5653 QDF_STATUS dp_mon_rings_init(struct dp_pdev *pdev)
5654 {
5655 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5656 	struct dp_mon_ops *mon_ops;
5657 
5658 	mon_ops = dp_mon_ops_get(pdev->soc);
5659 	if (!mon_ops) {
5660 		dp_mon_err("mon_ops is NULL");
5661 		return QDF_STATUS_E_FAILURE;
5662 	}
5663 
5664 	if (mon_ops->mon_rings_init[0]) {
5665 		status = mon_ops->mon_rings_init[0](pdev);
5666 		if (QDF_IS_STATUS_ERROR(status)) {
5667 			dp_mon_err("error: %d", status);
5668 			goto error;
5669 		}
5670 	}
5671 
5672 	if (mon_ops->mon_rings_init[1]) {
5673 		status = mon_ops->mon_rings_init[1](pdev);
5674 		if (QDF_IS_STATUS_ERROR(status)) {
5675 			dp_mon_err("error: %d", status);
5676 			goto error;
5677 		}
5678 	}
5679 
5680 error:
5681 	return status;
5682 }
5683 
5684 static
5685 void dp_mon_rings_deinit(struct dp_pdev *pdev)
5686 {
5687 	struct dp_mon_ops *mon_ops;
5688 
5689 	mon_ops = dp_mon_ops_get(pdev->soc);
5690 	if (!mon_ops) {
5691 		dp_mon_err("mon_ops is NULL");
5692 		return;
5693 	}
5694 
5695 	if (mon_ops->mon_rings_deinit[0])
5696 		mon_ops->mon_rings_deinit[0](pdev);
5697 
5698 	if (mon_ops->mon_rings_deinit[1])
5699 		mon_ops->mon_rings_deinit[1](pdev);
5700 }
5701 
5702 QDF_STATUS dp_mon_pdev_attach(struct dp_pdev *pdev)
5703 {
5704 	struct dp_soc *soc;
5705 	struct dp_mon_pdev *mon_pdev;
5706 	struct dp_mon_ops *mon_ops;
5707 	qdf_size_t mon_pdev_context_size;
5708 
5709 	if (!pdev) {
5710 		dp_mon_err("pdev is NULL");
5711 		goto fail0;
5712 	}
5713 
5714 	soc = pdev->soc;
5715 
5716 	mon_pdev_context_size = soc->arch_ops.txrx_get_mon_context_size(DP_CONTEXT_TYPE_MON_PDEV);
5717 	mon_pdev = dp_context_alloc_mem(soc, DP_MON_PDEV_TYPE, mon_pdev_context_size);
5718 	if (!mon_pdev) {
5719 		dp_mon_err("%pK: MONITOR pdev allocation failed", pdev);
5720 		goto fail0;
5721 	}
5722 
5723 	pdev->monitor_pdev = mon_pdev;
5724 	mon_ops = dp_mon_ops_get(pdev->soc);
5725 	if (!mon_ops) {
5726 		dp_mon_err("%pK: Invalid monitor ops", pdev);
5727 		goto fail1;
5728 	}
5729 
5730 	if (mon_ops->mon_pdev_alloc) {
5731 		if (mon_ops->mon_pdev_alloc(pdev)) {
5732 			dp_mon_err("%pK: MONITOR pdev alloc failed", pdev);
5733 			goto fail1;
5734 		}
5735 	}
5736 
5737 	if (dp_mon_rings_alloc(pdev)) {
5738 		dp_mon_err("%pK: MONITOR rings setup failed", pdev);
5739 		goto fail2;
5740 	}
5741 
5742 	/* Rx monitor mode specific init */
5743 	if (mon_ops->rx_mon_desc_pool_alloc) {
5744 		if (mon_ops->rx_mon_desc_pool_alloc(pdev)) {
5745 			dp_mon_err("%pK: dp_rx_pdev_mon_attach failed", pdev);
5746 			goto fail3;
5747 		}
5748 	}
5749 
5750 	if (mon_ops->mon_rx_ppdu_info_cache_create) {
5751 		if (mon_ops->mon_rx_ppdu_info_cache_create(pdev)) {
5752 			dp_mon_err("%pK: dp_rx_pdev_mon_attach failed", pdev);
5753 			goto fail4;
5754 		}
5755 	}
5756 	pdev->monitor_pdev = mon_pdev;
5757 	dp_mon_pdev_per_target_config(pdev);
5758 
5759 	return QDF_STATUS_SUCCESS;
5760 fail4:
5761 	if (mon_ops->rx_mon_desc_pool_free)
5762 		mon_ops->rx_mon_desc_pool_free(pdev);
5763 fail3:
5764 	dp_mon_rings_free(pdev);
5765 fail2:
5766 	if (mon_ops->mon_pdev_free)
5767 		mon_ops->mon_pdev_free(pdev);
5768 fail1:
5769 	pdev->monitor_pdev = NULL;
5770 	dp_context_free_mem(soc, DP_MON_PDEV_TYPE, mon_pdev);
5771 fail0:
5772 	return QDF_STATUS_E_NOMEM;
5773 }
5774 
5775 QDF_STATUS dp_mon_pdev_detach(struct dp_pdev *pdev)
5776 {
5777 	struct dp_mon_pdev *mon_pdev;
5778 	struct dp_mon_ops *mon_ops = NULL;
5779 
5780 	if (!pdev) {
5781 		dp_mon_err("pdev is NULL");
5782 		return QDF_STATUS_E_FAILURE;
5783 	}
5784 
5785 	mon_pdev = pdev->monitor_pdev;
5786 	if (!mon_pdev) {
5787 		dp_mon_err("Monitor pdev is NULL");
5788 		return QDF_STATUS_E_FAILURE;
5789 	}
5790 
5791 	mon_ops = dp_mon_ops_get(pdev->soc);
5792 	if (!mon_ops) {
5793 		dp_mon_err("Monitor ops is NULL");
5794 		return QDF_STATUS_E_FAILURE;
5795 	}
5796 
5797 	if (mon_ops->mon_rx_ppdu_info_cache_destroy)
5798 		mon_ops->mon_rx_ppdu_info_cache_destroy(pdev);
5799 	if (mon_ops->rx_mon_desc_pool_free)
5800 		mon_ops->rx_mon_desc_pool_free(pdev);
5801 	dp_mon_rings_free(pdev);
5802 	if (mon_ops->mon_pdev_free)
5803 		mon_ops->mon_pdev_free(pdev);
5804 
5805 	dp_context_free_mem(pdev->soc, DP_MON_PDEV_TYPE, mon_pdev);
5806 	pdev->monitor_pdev = NULL;
5807 	return QDF_STATUS_SUCCESS;
5808 }
5809 
5810 static void dp_mon_pdev_filter_init(struct dp_mon_pdev *mon_pdev)
5811 {
5812 	if (!mon_pdev)
5813 		return;
5814 
5815 	mon_pdev->mon_filter_mode = MON_FILTER_ALL;
5816 	mon_pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
5817 	mon_pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
5818 	mon_pdev->fp_data_filter = FILTER_DATA_ALL;
5819 	mon_pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
5820 	mon_pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
5821 	mon_pdev->mo_data_filter = FILTER_DATA_ALL;
5822 }
5823 
5824 #ifdef WLAN_TX_PKT_CAPTURE_ENH
5825 void dp_mon_register_tx_pkt_enh_ops_1_0(struct dp_mon_ops *mon_ops)
5826 {
5827 	mon_ops->mon_tx_ppdu_stats_attach = dp_tx_ppdu_stats_attach_1_0;
5828 	mon_ops->mon_tx_ppdu_stats_detach = dp_tx_ppdu_stats_detach_1_0;
5829 	mon_ops->mon_peer_tx_capture_filter_check =
5830 				dp_peer_tx_capture_filter_check_1_0;
5831 }
5832 #elif defined(WLAN_TX_PKT_CAPTURE_ENH_BE) && defined(WLAN_FEATURE_LOCAL_PKT_CAPTURE)
5833 void dp_mon_register_tx_pkt_enh_ops_1_0(struct dp_mon_ops *mon_ops)
5834 {
5835 	mon_ops->mon_tx_ppdu_stats_attach = dp_tx_ppdu_stats_attach_2_0;
5836 	mon_ops->mon_tx_ppdu_stats_detach = dp_tx_ppdu_stats_detach_2_0;
5837 	mon_ops->mon_peer_tx_capture_filter_check = NULL;
5838 }
5839 #elif (defined(WIFI_MONITOR_SUPPORT) && !defined(WLAN_TX_PKT_CAPTURE_ENH))
5840 void dp_mon_register_tx_pkt_enh_ops_1_0(struct dp_mon_ops *mon_ops)
5841 {
5842 	mon_ops->mon_tx_ppdu_stats_attach = NULL;
5843 	mon_ops->mon_tx_ppdu_stats_detach = NULL;
5844 	mon_ops->mon_peer_tx_capture_filter_check = NULL;
5845 }
5846 #endif
5847 
5848 #ifdef WLAN_FEATURE_LOCAL_PKT_CAPTURE
5849 #if !defined(DISABLE_MON_CONFIG)
5850 static inline void dp_mon_config_register_ops(struct dp_mon_ops *mon_ops)
5851 {
5852 	mon_ops->mon_pdev_htt_srng_setup[0] = dp_mon_htt_srng_setup_1_0;
5853 	mon_ops->mon_pdev_htt_srng_setup[1] = dp_mon_pdev_htt_srng_setup_2_0;
5854 	mon_ops->mon_soc_htt_srng_setup = dp_mon_soc_htt_srng_setup_2_0;
5855 }
5856 #else
5857 static inline void dp_mon_config_register_ops(struct dp_mon_ops *mon_ops)
5858 {
5859 }
5860 #endif
5861 
5862 void dp_mon_register_lpc_ops_1_0(struct dp_mon_ops *mon_ops)
5863 {
5864 	mon_ops->mon_soc_attach[0] = NULL;
5865 	mon_ops->mon_soc_detach[0] = NULL;
5866 	mon_ops->mon_soc_init[0] = NULL;
5867 	mon_ops->mon_soc_deinit[0] = NULL;
5868 	mon_ops->mon_soc_attach[1] = dp_mon_soc_attach_2_0;
5869 	mon_ops->mon_soc_detach[1] = dp_mon_soc_detach_2_0;
5870 	mon_ops->mon_soc_init[1] = dp_mon_soc_init_2_0;
5871 	mon_ops->mon_soc_deinit[1] = dp_mon_soc_deinit_2_0;
5872 
5873 	dp_mon_config_register_ops(mon_ops);
5874 
5875 	mon_ops->mon_rings_alloc[0] = dp_mon_rings_alloc_1_0;
5876 	mon_ops->mon_rings_free[0] = dp_mon_rings_free_1_0;
5877 	mon_ops->mon_rings_init[0] = dp_mon_rings_init_1_0;
5878 	mon_ops->mon_rings_deinit[0] = dp_mon_rings_deinit_1_0;
5879 	mon_ops->mon_rings_alloc[1] = dp_pdev_mon_rings_alloc_2_0;
5880 	mon_ops->mon_rings_free[1] = dp_pdev_mon_rings_free_2_0;
5881 	mon_ops->mon_rings_init[1] = dp_pdev_mon_rings_init_2_0;
5882 	mon_ops->mon_rings_deinit[1] = dp_pdev_mon_rings_deinit_2_0;
5883 
5884 	mon_ops->mon_filter_setup_tx_mon_mode =
5885 				dp_mon_filter_setup_local_pkt_capture_tx;
5886 	mon_ops->mon_filter_reset_tx_mon_mode =
5887 				dp_mon_filter_reset_local_pkt_capture_tx;
5888 	mon_ops->tx_mon_filter_update = dp_tx_mon_filter_update_2_0;
5889 
5890 	mon_ops->rx_hdr_length_set = dp_rx_mon_hdr_length_set;
5891 	dp_mon_register_tx_pkt_enh_ops_1_0(mon_ops);
5892 }
5893 #else
5894 #if !defined(DISABLE_MON_CONFIG)
5895 static inline void dp_mon_config_register_ops(struct dp_mon_ops *mon_ops)
5896 {
5897 	mon_ops->mon_pdev_htt_srng_setup[0] = dp_mon_htt_srng_setup_1_0;
5898 	mon_ops->mon_pdev_htt_srng_setup[1] = NULL;
5899 	mon_ops->mon_soc_htt_srng_setup = NULL;
5900 }
5901 #else
5902 static inline void dp_mon_config_register_ops(struct dp_mon_ops *mon_ops)
5903 {
5904 }
5905 #endif
5906 
5907 void dp_mon_register_lpc_ops_1_0(struct dp_mon_ops *mon_ops)
5908 {
5909 	mon_ops->mon_soc_attach[0] = NULL;
5910 	mon_ops->mon_soc_detach[0] = NULL;
5911 	mon_ops->mon_soc_init[0] = NULL;
5912 	mon_ops->mon_soc_deinit[0] = NULL;
5913 	mon_ops->mon_soc_attach[1] = NULL;
5914 	mon_ops->mon_soc_detach[1] = NULL;
5915 	mon_ops->mon_soc_init[1] = NULL;
5916 	mon_ops->mon_soc_deinit[1] = NULL;
5917 
5918 	dp_mon_config_register_ops(mon_ops);
5919 
5920 	mon_ops->mon_rings_alloc[0] = dp_mon_rings_alloc_1_0;
5921 	mon_ops->mon_rings_free[0] = dp_mon_rings_free_1_0;
5922 	mon_ops->mon_rings_init[0] = dp_mon_rings_init_1_0;
5923 	mon_ops->mon_rings_deinit[0] = dp_mon_rings_deinit_1_0;
5924 	mon_ops->mon_rings_alloc[1] = NULL;
5925 	mon_ops->mon_rings_free[1] = NULL;
5926 	mon_ops->mon_rings_init[1] = NULL;
5927 	mon_ops->mon_rings_deinit[1] = NULL;
5928 
5929 	mon_ops->mon_filter_setup_tx_mon_mode = NULL;
5930 	mon_ops->mon_filter_reset_tx_mon_mode = NULL;
5931 	mon_ops->tx_mon_filter_update = NULL;
5932 
5933 	mon_ops->rx_hdr_length_set = NULL;
5934 	dp_mon_register_tx_pkt_enh_ops_1_0(mon_ops);
5935 }
5936 #endif
5937 
5938 QDF_STATUS dp_mon_pdev_init(struct dp_pdev *pdev)
5939 {
5940 	struct dp_mon_pdev *mon_pdev;
5941 	struct dp_mon_ops *mon_ops = NULL;
5942 
5943 	if (!pdev) {
5944 		dp_mon_err("pdev is NULL");
5945 		return QDF_STATUS_E_FAILURE;
5946 	}
5947 
5948 	mon_pdev = pdev->monitor_pdev;
5949 
5950 	mon_pdev->invalid_mon_peer = qdf_mem_malloc(sizeof(struct dp_mon_peer));
5951 	if (!mon_pdev->invalid_mon_peer) {
5952 		dp_mon_err("%pK: Memory allocation failed for invalid "
5953 			   "monitor peer", pdev);
5954 		return QDF_STATUS_E_NOMEM;
5955 	}
5956 
5957 	mon_ops = dp_mon_ops_get(pdev->soc);
5958 	if (!mon_ops) {
5959 		dp_mon_err("Monitor ops is NULL");
5960 		goto fail0;
5961 	}
5962 
5963 	mon_pdev->filter = dp_mon_filter_alloc(mon_pdev);
5964 	if (!mon_pdev->filter) {
5965 		dp_mon_err("%pK: Memory allocation failed for monitor filter",
5966 			   pdev);
5967 		goto fail0;
5968 	}
5969 
5970 	if (mon_ops->tx_mon_filter_alloc) {
5971 		if (mon_ops->tx_mon_filter_alloc(pdev)) {
5972 			dp_mon_err("%pK: Memory allocation failed for tx monitor "
5973 				   "filter", pdev);
5974 			goto fail1;
5975 		}
5976 	}
5977 
5978 	qdf_spinlock_create(&mon_pdev->ppdu_stats_lock);
5979 	qdf_spinlock_create(&mon_pdev->neighbour_peer_mutex);
5980 	mon_pdev->monitor_configured = false;
5981 	mon_pdev->mon_chan_band = REG_BAND_UNKNOWN;
5982 
5983 	TAILQ_INIT(&mon_pdev->neighbour_peers_list);
5984 	mon_pdev->neighbour_peers_added = false;
5985 	mon_pdev->monitor_configured = false;
5986 
5987 	dp_mon_pdev_filter_init(mon_pdev);
5988 	/*
5989 	 * initialize ppdu tlv list
5990 	 */
5991 	TAILQ_INIT(&mon_pdev->ppdu_info_list);
5992 	TAILQ_INIT(&mon_pdev->sched_comp_ppdu_list);
5993 
5994 	mon_pdev->list_depth = 0;
5995 	mon_pdev->tlv_count = 0;
5996 	/* initlialize cal client timer */
5997 	dp_cal_client_attach(&mon_pdev->cal_client_ctx,
5998 			     dp_pdev_to_cdp_pdev(pdev),
5999 			     pdev->soc->osdev,
6000 			     &dp_iterate_update_peer_list);
6001 	if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS)
6002 		goto fail2;
6003 
6004 	if (mon_ops->mon_lite_mon_alloc) {
6005 		if (mon_ops->mon_lite_mon_alloc(pdev) != QDF_STATUS_SUCCESS) {
6006 			dp_mon_err("%pK: lite mon alloc failed", pdev);
6007 			goto fail3;
6008 		}
6009 	}
6010 
6011 	if (dp_mon_rings_init(pdev)) {
6012 		dp_mon_err("%pK: MONITOR rings setup failed", pdev);
6013 		goto fail4;
6014 	}
6015 
6016 	/* initialize sw monitor rx descriptors */
6017 	if (mon_ops->rx_mon_desc_pool_init)
6018 		mon_ops->rx_mon_desc_pool_init(pdev);
6019 
6020 	/* allocate buffers and replenish the monitor RxDMA ring */
6021 	if (mon_ops->rx_mon_buffers_alloc) {
6022 		if (mon_ops->rx_mon_buffers_alloc(pdev)) {
6023 			dp_mon_err("%pK: rx mon buffers alloc failed", pdev);
6024 			goto fail5;
6025 		}
6026 	}
6027 
6028 	/* attach monitor function */
6029 	dp_monitor_tx_ppdu_stats_attach(pdev);
6030 
6031 	/* mon pdev extended init */
6032 	if (mon_ops->mon_pdev_ext_init)
6033 		mon_ops->mon_pdev_ext_init(pdev);
6034 
6035 	if (mon_ops->mon_rx_pdev_tlv_logger_init)
6036 		mon_ops->mon_rx_pdev_tlv_logger_init(pdev);
6037 
6038 	mon_pdev->is_dp_mon_pdev_initialized = true;
6039 	dp_mon_set_local_pkt_capture_running(mon_pdev, false);
6040 
6041 	return QDF_STATUS_SUCCESS;
6042 
6043 fail5:
6044 	if (mon_ops->rx_mon_desc_pool_deinit)
6045 		mon_ops->rx_mon_desc_pool_deinit(pdev);
6046 
6047 	dp_mon_rings_deinit(pdev);
6048 fail4:
6049 	if (mon_ops->mon_lite_mon_dealloc)
6050 		mon_ops->mon_lite_mon_dealloc(pdev);
6051 fail3:
6052 	dp_htt_ppdu_stats_detach(pdev);
6053 fail2:
6054 	qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex);
6055 	qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock);
6056 	if (mon_ops->tx_mon_filter_dealloc)
6057 		mon_ops->tx_mon_filter_dealloc(pdev);
6058 fail1:
6059 	dp_mon_filter_dealloc(mon_pdev);
6060 fail0:
6061 	qdf_mem_free(mon_pdev->invalid_mon_peer);
6062 	return QDF_STATUS_E_FAILURE;
6063 }
6064 
6065 QDF_STATUS dp_mon_pdev_deinit(struct dp_pdev *pdev)
6066 {
6067 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
6068 	struct dp_mon_ops *mon_ops = NULL;
6069 
6070 	mon_ops = dp_mon_ops_get(pdev->soc);
6071 	if (!mon_ops) {
6072 		dp_mon_err("Monitor ops is NULL");
6073 		return QDF_STATUS_E_FAILURE;
6074 	}
6075 
6076 	if (!mon_pdev->is_dp_mon_pdev_initialized)
6077 		return QDF_STATUS_SUCCESS;
6078 
6079 	dp_mon_filters_reset(pdev);
6080 
6081 	/* mon pdev extended deinit */
6082 	if (mon_ops->mon_pdev_ext_deinit)
6083 		mon_ops->mon_pdev_ext_deinit(pdev);
6084 
6085 	if (mon_ops->mon_rx_pdev_tlv_logger_deinit)
6086 		mon_ops->mon_rx_pdev_tlv_logger_deinit(pdev);
6087 
6088 	/* detach monitor function */
6089 	dp_monitor_tx_ppdu_stats_detach(pdev);
6090 
6091 	if (mon_ops->rx_mon_buffers_free)
6092 		mon_ops->rx_mon_buffers_free(pdev);
6093 	if (mon_ops->rx_mon_desc_pool_deinit)
6094 		mon_ops->rx_mon_desc_pool_deinit(pdev);
6095 	dp_mon_rings_deinit(pdev);
6096 	dp_cal_client_detach(&mon_pdev->cal_client_ctx);
6097 	if (mon_ops->mon_lite_mon_dealloc)
6098 		mon_ops->mon_lite_mon_dealloc(pdev);
6099 	dp_htt_ppdu_stats_detach(pdev);
6100 	qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock);
6101 	dp_neighbour_peers_detach(pdev);
6102 	dp_pktlogmod_exit(pdev);
6103 	if (mon_ops->tx_mon_filter_dealloc)
6104 		mon_ops->tx_mon_filter_dealloc(pdev);
6105 	if (mon_pdev->filter)
6106 		dp_mon_filter_dealloc(mon_pdev);
6107 	dp_mon_rings_deinit(pdev);
6108 	if (mon_pdev->invalid_mon_peer)
6109 		qdf_mem_free(mon_pdev->invalid_mon_peer);
6110 	mon_pdev->is_dp_mon_pdev_initialized = false;
6111 	dp_mon_set_local_pkt_capture_running(mon_pdev, false);
6112 
6113 	return QDF_STATUS_SUCCESS;
6114 }
6115 
6116 QDF_STATUS dp_mon_vdev_attach(struct dp_vdev *vdev)
6117 {
6118 	struct dp_mon_vdev *mon_vdev;
6119 	struct dp_pdev *pdev = vdev->pdev;
6120 
6121 	mon_vdev = (struct dp_mon_vdev *)qdf_mem_malloc(sizeof(*mon_vdev));
6122 	if (!mon_vdev) {
6123 		dp_mon_err("%pK: Monitor vdev allocation failed", vdev);
6124 		return QDF_STATUS_E_NOMEM;
6125 	}
6126 
6127 	if (pdev && pdev->monitor_pdev &&
6128 	    pdev->monitor_pdev->scan_spcl_vap_configured)
6129 		dp_scan_spcl_vap_stats_attach(mon_vdev);
6130 
6131 	vdev->monitor_vdev = mon_vdev;
6132 
6133 	return QDF_STATUS_SUCCESS;
6134 }
6135 
6136 QDF_STATUS dp_mon_vdev_detach(struct dp_vdev *vdev)
6137 {
6138 	struct dp_mon_vdev *mon_vdev = vdev->monitor_vdev;
6139 	struct dp_pdev *pdev = vdev->pdev;
6140 	struct dp_mon_ops *mon_ops = dp_mon_ops_get(pdev->soc);
6141 
6142 	if (!mon_ops)
6143 		return QDF_STATUS_E_FAILURE;
6144 
6145 	if (!mon_vdev)
6146 		return QDF_STATUS_E_FAILURE;
6147 
6148 	if (pdev->monitor_pdev->scan_spcl_vap_configured)
6149 		dp_scan_spcl_vap_stats_detach(mon_vdev);
6150 
6151 	qdf_mem_free(mon_vdev);
6152 	vdev->monitor_vdev = NULL;
6153 	/* set mvdev to NULL only if detach is called for monitor/special vap
6154 	 */
6155 	if (pdev->monitor_pdev->mvdev == vdev)
6156 		pdev->monitor_pdev->mvdev = NULL;
6157 
6158 	if (mon_ops->mon_lite_mon_vdev_delete)
6159 		mon_ops->mon_lite_mon_vdev_delete(pdev, vdev);
6160 
6161 	return QDF_STATUS_SUCCESS;
6162 }
6163 
6164 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6165 /**
6166  * dp_mon_peer_attach_notify() - Raise WDI event for peer create
6167  * @peer: DP Peer handle
6168  *
6169  * Return: none
6170  */
6171 static inline
6172 void dp_mon_peer_attach_notify(struct dp_peer *peer)
6173 {
6174 	struct dp_mon_peer *mon_peer = peer->monitor_peer;
6175 	struct dp_pdev *pdev;
6176 	struct dp_soc *soc;
6177 	struct cdp_peer_cookie peer_cookie;
6178 
6179 	pdev = peer->vdev->pdev;
6180 	soc = pdev->soc;
6181 
6182 	qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
6183 		     QDF_MAC_ADDR_SIZE);
6184 
6185 	peer_cookie.ctx = NULL;
6186 	peer_cookie.pdev_id = pdev->pdev_id;
6187 	peer_cookie.cookie = pdev->next_peer_cookie++;
6188 
6189 	dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, soc,
6190 			     (void *)&peer_cookie,
6191 			     peer->peer_id, WDI_NO_VAL, pdev->pdev_id);
6192 
6193 	if (soc->peerstats_enabled) {
6194 		if (!peer_cookie.ctx) {
6195 			pdev->next_peer_cookie--;
6196 			qdf_err("Failed to initialize peer rate stats");
6197 			mon_peer->peerstats_ctx = NULL;
6198 		} else {
6199 			mon_peer->peerstats_ctx =
6200 				(struct cdp_peer_rate_stats_ctx *)
6201 				 peer_cookie.ctx;
6202 		}
6203 	}
6204 }
6205 
6206 /**
6207  * dp_mon_peer_detach_notify() - Raise WDI event for peer destroy
6208  * @peer: DP Peer handle
6209  *
6210  * Return: none
6211  */
6212 static inline
6213 void dp_mon_peer_detach_notify(struct dp_peer *peer)
6214 {
6215 	struct dp_mon_peer *mon_peer = peer->monitor_peer;
6216 	struct dp_pdev *pdev;
6217 	struct dp_soc *soc;
6218 	struct cdp_peer_cookie peer_cookie;
6219 
6220 	pdev = peer->vdev->pdev;
6221 	soc = pdev->soc;
6222 	/* send peer destroy event to upper layer */
6223 	qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
6224 		     QDF_MAC_ADDR_SIZE);
6225 	peer_cookie.ctx = NULL;
6226 	peer_cookie.ctx = (struct cdp_stats_cookie *)mon_peer->peerstats_ctx;
6227 
6228 	dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
6229 			     soc,
6230 			     (void *)&peer_cookie,
6231 			     peer->peer_id,
6232 			     WDI_NO_VAL,
6233 			     pdev->pdev_id);
6234 
6235 	mon_peer->peerstats_ctx = NULL;
6236 }
6237 #else
6238 static inline
6239 void dp_mon_peer_attach_notify(struct dp_peer *peer)
6240 {
6241 	peer->monitor_peer->peerstats_ctx = NULL;
6242 }
6243 
6244 static inline
6245 void dp_mon_peer_detach_notify(struct dp_peer *peer)
6246 {
6247 	peer->monitor_peer->peerstats_ctx = NULL;
6248 }
6249 #endif
6250 
6251 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
6252 QDF_STATUS dp_mon_peer_attach(struct dp_peer *peer)
6253 {
6254 	struct dp_mon_peer *mon_peer;
6255 	struct dp_pdev *pdev;
6256 
6257 	mon_peer = (struct dp_mon_peer *)qdf_mem_malloc(sizeof(*mon_peer));
6258 	if (!mon_peer) {
6259 		dp_mon_err("%pK: MONITOR peer allocation failed", peer);
6260 		return QDF_STATUS_E_NOMEM;
6261 	}
6262 
6263 	peer->monitor_peer = mon_peer;
6264 	pdev = peer->vdev->pdev;
6265 	/*
6266 	 * In tx_monitor mode, filter may be set for unassociated peer
6267 	 * when unassociated peer get associated peer need to
6268 	 * update tx_cap_enabled flag to support peer filter.
6269 	 */
6270 	dp_monitor_peer_tx_capture_filter_check(pdev, peer);
6271 
6272 	DP_STATS_INIT(mon_peer);
6273 	DP_STATS_UPD(mon_peer, rx.avg_snr, CDP_INVALID_SNR);
6274 
6275 	dp_mon_peer_attach_notify(peer);
6276 
6277 	return QDF_STATUS_SUCCESS;
6278 }
6279 #endif
6280 
6281 QDF_STATUS dp_mon_peer_detach(struct dp_peer *peer)
6282 {
6283 	struct dp_mon_peer *mon_peer = peer->monitor_peer;
6284 
6285 	if (!mon_peer)
6286 		return QDF_STATUS_SUCCESS;
6287 
6288 	dp_mon_peer_detach_notify(peer);
6289 
6290 	qdf_mem_free(mon_peer);
6291 	peer->monitor_peer = NULL;
6292 
6293 	return QDF_STATUS_SUCCESS;
6294 }
6295 
6296 #ifndef DISABLE_MON_CONFIG
6297 void dp_mon_register_intr_ops(struct dp_soc *soc)
6298 {
6299 	struct dp_mon_ops *mon_ops = NULL;
6300 
6301 	mon_ops = dp_mon_ops_get(soc);
6302 	if (!mon_ops) {
6303 		dp_mon_err("Monitor ops is NULL");
6304 		return;
6305 	}
6306 	if (mon_ops->mon_register_intr_ops)
6307 		mon_ops->mon_register_intr_ops(soc);
6308 }
6309 #endif
6310 
6311 struct cdp_peer_rate_stats_ctx *dp_mon_peer_get_peerstats_ctx(struct
6312 							      dp_peer *peer)
6313 {
6314 	struct dp_mon_peer *mon_peer = peer->monitor_peer;
6315 
6316 	if (mon_peer)
6317 		return mon_peer->peerstats_ctx;
6318 	else
6319 		return NULL;
6320 }
6321 
6322 #ifdef QCA_ENHANCED_STATS_SUPPORT
6323 void dp_mon_peer_reset_stats(struct dp_peer *peer)
6324 {
6325 	struct dp_mon_peer *mon_peer = NULL;
6326 
6327 	mon_peer = peer->monitor_peer;
6328 	if (!mon_peer)
6329 		return;
6330 
6331 	DP_STATS_CLR(mon_peer);
6332 	DP_STATS_UPD(mon_peer, rx.avg_snr, CDP_INVALID_SNR);
6333 }
6334 
6335 void dp_mon_peer_get_stats(struct dp_peer *peer, void *arg,
6336 			   enum cdp_stat_update_type type)
6337 {
6338 	struct dp_mon_peer *mon_peer = peer->monitor_peer;
6339 	struct dp_mon_peer_stats *mon_peer_stats;
6340 
6341 	if (!mon_peer || !arg)
6342 		return;
6343 
6344 	mon_peer_stats = &mon_peer->stats;
6345 
6346 	switch (type) {
6347 	case UPDATE_PEER_STATS:
6348 	{
6349 		struct cdp_peer_stats *peer_stats =
6350 						(struct cdp_peer_stats *)arg;
6351 		DP_UPDATE_MON_STATS(peer_stats, mon_peer_stats);
6352 		break;
6353 	}
6354 	case UPDATE_VDEV_STATS:
6355 	{
6356 		struct cdp_vdev_stats *vdev_stats =
6357 						(struct cdp_vdev_stats *)arg;
6358 		DP_UPDATE_MON_STATS(vdev_stats, mon_peer_stats);
6359 		break;
6360 	}
6361 	default:
6362 		dp_mon_err("Invalid stats_update_type: %u", type);
6363 	}
6364 }
6365 
6366 void dp_mon_invalid_peer_update_pdev_stats(struct dp_pdev *pdev)
6367 {
6368 	struct dp_mon_peer *mon_peer;
6369 	struct dp_mon_peer_stats *mon_peer_stats;
6370 	struct cdp_pdev_stats *pdev_stats;
6371 
6372 	if (!pdev || !pdev->monitor_pdev)
6373 		return;
6374 
6375 	mon_peer = pdev->monitor_pdev->invalid_mon_peer;
6376 	if (!mon_peer)
6377 		return;
6378 
6379 	mon_peer_stats = &mon_peer->stats;
6380 	pdev_stats = &pdev->stats;
6381 	DP_UPDATE_MON_STATS(pdev_stats, mon_peer_stats);
6382 }
6383 
6384 QDF_STATUS
6385 dp_mon_peer_get_stats_param(struct dp_peer *peer, enum cdp_peer_stats_type type,
6386 			    cdp_peer_stats_param_t *buf)
6387 {
6388 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
6389 	struct dp_mon_peer *mon_peer;
6390 
6391 	mon_peer = peer->monitor_peer;
6392 	if (!mon_peer)
6393 		return QDF_STATUS_E_FAILURE;
6394 
6395 	switch (type) {
6396 	case cdp_peer_tx_rate:
6397 		buf->tx_rate = mon_peer->stats.tx.tx_rate;
6398 		break;
6399 	case cdp_peer_tx_last_tx_rate:
6400 		buf->last_tx_rate = mon_peer->stats.tx.last_tx_rate;
6401 		break;
6402 	case cdp_peer_tx_ratecode:
6403 		buf->tx_ratecode = mon_peer->stats.tx.tx_ratecode;
6404 		break;
6405 	case cdp_peer_rx_rate:
6406 		buf->rx_rate = mon_peer->stats.rx.rx_rate;
6407 		break;
6408 	case cdp_peer_rx_last_rx_rate:
6409 		buf->last_rx_rate = mon_peer->stats.rx.last_rx_rate;
6410 		break;
6411 	case cdp_peer_rx_ratecode:
6412 		buf->rx_ratecode = mon_peer->stats.rx.rx_ratecode;
6413 		break;
6414 	case cdp_peer_rx_avg_snr:
6415 		buf->rx_avg_snr = mon_peer->stats.rx.avg_snr;
6416 		break;
6417 	case cdp_peer_rx_snr:
6418 		buf->rx_snr = mon_peer->stats.rx.snr;
6419 		break;
6420 	default:
6421 		dp_err("Invalid stats type: %u requested", type);
6422 		ret = QDF_STATUS_E_FAILURE;
6423 	}
6424 
6425 	return ret;
6426 }
6427 #endif
6428 
6429 void dp_mon_ops_register(struct dp_soc *soc)
6430 {
6431 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
6432 	uint32_t target_type;
6433 
6434 	target_type = hal_get_target_type(soc->hal_soc);
6435 	switch (target_type) {
6436 	case TARGET_TYPE_QCA6290:
6437 	case TARGET_TYPE_QCA6390:
6438 	case TARGET_TYPE_QCA6490:
6439 	case TARGET_TYPE_QCA6750:
6440 	case TARGET_TYPE_KIWI:
6441 	case TARGET_TYPE_MANGO:
6442 	case TARGET_TYPE_PEACH:
6443 	case TARGET_TYPE_QCA8074:
6444 	case TARGET_TYPE_QCA8074V2:
6445 	case TARGET_TYPE_QCA6018:
6446 	case TARGET_TYPE_QCA9574:
6447 	case TARGET_TYPE_QCN9160:
6448 	case TARGET_TYPE_QCN9000:
6449 	case TARGET_TYPE_QCA5018:
6450 	case TARGET_TYPE_QCN6122:
6451 	case TARGET_TYPE_WCN6450:
6452 		dp_mon_ops_register_1_0(mon_soc);
6453 		dp_mon_ops_register_tx_2_0(mon_soc);
6454 		break;
6455 	case TARGET_TYPE_QCN9224:
6456 	case TARGET_TYPE_QCA5332:
6457 	case TARGET_TYPE_QCN6432:
6458 #if defined(WLAN_PKT_CAPTURE_TX_2_0) || defined(WLAN_PKT_CAPTURE_RX_2_0)
6459 		dp_mon_ops_register_2_0(mon_soc);
6460 #endif
6461 		break;
6462 	default:
6463 		dp_mon_err("%s: Unknown tgt type %d", __func__, target_type);
6464 		qdf_assert_always(0);
6465 		break;
6466 	}
6467 }
6468 
6469 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT
6470 void dp_mon_ops_free(struct dp_soc *soc)
6471 {
6472 	struct cdp_ops *ops = soc->cdp_soc.ops;
6473 	struct cdp_mon_ops *cdp_mon_ops = ops->mon_ops;
6474 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
6475 	struct dp_mon_ops *mon_ops = mon_soc->mon_ops;
6476 
6477 	if (cdp_mon_ops)
6478 		qdf_mem_free(cdp_mon_ops);
6479 
6480 	if (mon_ops)
6481 		qdf_mem_free(mon_ops);
6482 }
6483 #else
6484 void dp_mon_ops_free(struct dp_soc *soc)
6485 {
6486 }
6487 #endif
6488 
6489 void dp_mon_cdp_ops_register(struct dp_soc *soc)
6490 {
6491 	struct cdp_ops *ops = soc->cdp_soc.ops;
6492 	uint32_t target_type;
6493 
6494 	if (!ops) {
6495 		dp_mon_err("cdp_ops is NULL");
6496 		return;
6497 	}
6498 
6499 	target_type = hal_get_target_type(soc->hal_soc);
6500 	switch (target_type) {
6501 	case TARGET_TYPE_QCA6290:
6502 	case TARGET_TYPE_QCA6390:
6503 	case TARGET_TYPE_QCA6490:
6504 	case TARGET_TYPE_QCA6750:
6505 	case TARGET_TYPE_KIWI:
6506 	case TARGET_TYPE_MANGO:
6507 	case TARGET_TYPE_PEACH:
6508 	case TARGET_TYPE_QCA8074:
6509 	case TARGET_TYPE_QCA8074V2:
6510 	case TARGET_TYPE_QCA6018:
6511 	case TARGET_TYPE_QCA9574:
6512 	case TARGET_TYPE_QCN9160:
6513 	case TARGET_TYPE_QCN9000:
6514 	case TARGET_TYPE_QCA5018:
6515 	case TARGET_TYPE_QCN6122:
6516 	case TARGET_TYPE_WCN6450:
6517 		dp_mon_cdp_ops_register_1_0(ops);
6518 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
6519 		dp_cfr_filter_register_1_0(ops);
6520 #endif
6521 		if (target_type == TARGET_TYPE_QCN9000 ||
6522 		    target_type == TARGET_TYPE_QCN9160)
6523 			ops->mon_ops->txrx_update_mon_mac_filter =
6524 					dp_update_mon_mac_filter;
6525 		break;
6526 	case TARGET_TYPE_QCN9224:
6527 	case TARGET_TYPE_QCA5332:
6528 	case TARGET_TYPE_QCN6432:
6529 #if defined(WLAN_PKT_CAPTURE_TX_2_0) || defined(WLAN_PKT_CAPTURE_RX_2_0)
6530 		dp_mon_cdp_ops_register_2_0(ops);
6531 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
6532 		dp_cfr_filter_register_2_0(ops);
6533 #endif
6534 #endif /* WLAN_PKT_CAPTURE_TX_2_0 && WLAN_PKT_CAPTURE_RX_2_0 */
6535 		break;
6536 	default:
6537 		dp_mon_err("%s: Unknown tgt type %d", __func__, target_type);
6538 		qdf_assert_always(0);
6539 		break;
6540 	}
6541 
6542 	ops->cmn_drv_ops->txrx_set_monitor_mode = dp_vdev_set_monitor_mode;
6543 	ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev =
6544 				dp_get_mon_vdev_from_pdev_wifi3;
6545 #ifdef DP_PEER_EXTENDED_API
6546 	ops->misc_ops->pkt_log_init = dp_pkt_log_init;
6547 	ops->misc_ops->pkt_log_con_service = dp_pkt_log_con_service;
6548 	ops->misc_ops->pkt_log_exit = dp_pkt_log_exit;
6549 #endif
6550 	ops->ctrl_ops->enable_peer_based_pktlog =
6551 				dp_enable_peer_based_pktlog;
6552 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
6553 	ops->ctrl_ops->txrx_update_peer_pkt_capture_params =
6554 				 dp_peer_update_pkt_capture_params;
6555 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
6556 #ifdef WDI_EVENT_ENABLE
6557 	ops->ctrl_ops->txrx_get_pldev = dp_get_pldev;
6558 #endif
6559 #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
6560 	ops->host_stats_ops->txrx_get_scan_spcl_vap_stats =
6561 					dp_get_scan_spcl_vap_stats;
6562 #endif
6563 	return;
6564 }
6565 
6566 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT
6567 static inline void
6568 dp_mon_cdp_mon_ops_deregister(struct cdp_ops *ops)
6569 {
6570 	if (ops->mon_ops) {
6571 		qdf_mem_free(ops->mon_ops);
6572 		ops->mon_ops = NULL;
6573 	}
6574 }
6575 #else
6576 static inline void
6577 dp_mon_cdp_mon_ops_deregister(struct cdp_ops *ops)
6578 {
6579 	ops->mon_ops = NULL;
6580 }
6581 #endif
6582 
6583 void dp_mon_cdp_ops_deregister(struct dp_soc *soc)
6584 {
6585 	struct cdp_ops *ops = soc->cdp_soc.ops;
6586 
6587 	if (!ops) {
6588 		dp_mon_err("cdp_ops is NULL");
6589 		return;
6590 	}
6591 
6592 	dp_mon_cdp_mon_ops_deregister(ops);
6593 
6594 	ops->cmn_drv_ops->txrx_set_monitor_mode = NULL;
6595 	ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev = NULL;
6596 #ifdef DP_PEER_EXTENDED_API
6597 	ops->misc_ops->pkt_log_init = NULL;
6598 	ops->misc_ops->pkt_log_con_service = NULL;
6599 	ops->misc_ops->pkt_log_exit = NULL;
6600 #endif
6601 	ops->ctrl_ops->enable_peer_based_pktlog = NULL;
6602 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
6603 	ops->ctrl_ops->txrx_update_peer_pkt_capture_params = NULL;
6604 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
6605 #ifdef WDI_EVENT_ENABLE
6606 	ops->ctrl_ops->txrx_get_pldev = NULL;
6607 #endif
6608 	return;
6609 }
6610 
6611 #if defined(WDI_EVENT_ENABLE) &&\
6612 	(defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG))
6613 static inline
6614 void dp_mon_ppdu_stats_handler_deregister(struct dp_mon_soc *mon_soc)
6615 {
6616 	mon_soc->mon_ops->mon_ppdu_stats_ind_handler = NULL;
6617 }
6618 #else
6619 static inline
6620 void dp_mon_ppdu_stats_handler_deregister(struct dp_mon_soc *mon_soc)
6621 {
6622 }
6623 #endif
6624 
6625 #ifdef QCA_RSSI_DB2DBM
6626 /**
6627  * dp_mon_compute_min_nf() - calculate the min nf value in the
6628  *                      active chains 20 MHz subbands.
6629  * @conv_params: cdp_rssi_dbm_conv_param_dp structure value
6630  * @min_nf: location to store min NF value
6631  * @chain_idx: active chain index in nfHwdbm array
6632  *
6633  * computation: Need to calculate nfInDbm[][] to A_MIN(nfHwDbm[][])
6634  *              considering row index as active chains and column
6635  *              index as 20MHZ subbands per chain.
6636  * example: chain_mask = 0x07 (consider 3 active chains 0,1,2 index)
6637  *          BandWidth = 40MHZ (40MHZ includes two 20MHZ subbands so need to
6638  *                      consider 0,1 index calculate min_nf value)
6639  *
6640  * Return: QDF_STATUS_SUCCESS if value set successfully
6641  *         QDF_STATUS_E_INVAL false if error
6642  */
6643 static QDF_STATUS
6644 dp_mon_compute_min_nf(struct cdp_rssi_dbm_conv_param_dp *conv_params,
6645 		      int8_t *min_nf, int chain_idx)
6646 {
6647 	int j;
6648 	*min_nf = conv_params->nf_hw_dbm[chain_idx][0];
6649 
6650 	switch (conv_params->curr_bw) {
6651 	case CHAN_WIDTH_20:
6652 	case CHAN_WIDTH_5:
6653 	case CHAN_WIDTH_10:
6654 		break;
6655 	case CHAN_WIDTH_40:
6656 		for (j = 1; j < SUB40BW; j++) {
6657 			if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf)
6658 				*min_nf = conv_params->nf_hw_dbm[chain_idx][j];
6659 		}
6660 		break;
6661 	case CHAN_WIDTH_80:
6662 		for (j = 1; j < SUB80BW; j++) {
6663 			if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf)
6664 				*min_nf = conv_params->nf_hw_dbm[chain_idx][j];
6665 		}
6666 		break;
6667 	case CHAN_WIDTH_160:
6668 	case CHAN_WIDTH_80P80:
6669 	case CHAN_WIDTH_165:
6670 		for (j = 1; j < SUB160BW; j++) {
6671 			if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf)
6672 				*min_nf = conv_params->nf_hw_dbm[chain_idx][j];
6673 		}
6674 		break;
6675 	case CHAN_WIDTH_160P160:
6676 	case CHAN_WIDTH_320:
6677 		for (j = 1; j < SUB320BW; j++) {
6678 			if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf)
6679 				*min_nf = conv_params->nf_hw_dbm[chain_idx][j];
6680 		}
6681 		break;
6682 	default:
6683 		dp_cdp_err("Invalid bandwidth %u", conv_params->curr_bw);
6684 		return QDF_STATUS_E_INVAL;
6685 	}
6686 	return QDF_STATUS_SUCCESS;
6687 }
6688 
6689 /**
6690  * dp_mon_pdev_params_rssi_dbm_conv() - to set rssi in dbm conversion
6691  *                                      params into monitor pdev.
6692  * @cdp_soc: dp soc handle.
6693  * @params: cdp_rssi_db2dbm_param_dp structure value.
6694  *
6695  * Return: QDF_STATUS_SUCCESS if value set successfully
6696  *         QDF_STATUS_E_INVAL false if error
6697  */
6698 QDF_STATUS
6699 dp_mon_pdev_params_rssi_dbm_conv(struct cdp_soc_t *cdp_soc,
6700 				 struct cdp_rssi_db2dbm_param_dp *params)
6701 {
6702 	struct cdp_rssi_db2dbm_param_dp *dp_rssi_params = params;
6703 	uint8_t pdev_id = params->pdev_id;
6704 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6705 	struct dp_pdev *pdev =
6706 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
6707 	struct dp_mon_pdev *mon_pdev;
6708 	struct cdp_rssi_temp_off_param_dp temp_off_param;
6709 	struct cdp_rssi_dbm_conv_param_dp conv_params;
6710 	int8_t min_nf = 0;
6711 	int i;
6712 
6713 	if (!soc->features.rssi_dbm_conv_support) {
6714 		dp_cdp_err("rssi dbm conversion support is false");
6715 		return QDF_STATUS_E_INVAL;
6716 	}
6717 	if (!pdev || !pdev->monitor_pdev) {
6718 		dp_cdp_err("Invalid pdev_id %u", pdev_id);
6719 		return QDF_STATUS_E_FAILURE;
6720 	}
6721 
6722 	mon_pdev = pdev->monitor_pdev;
6723 	mon_pdev->rssi_dbm_conv_support =
6724 				soc->features.rssi_dbm_conv_support;
6725 
6726 	if (dp_rssi_params->rssi_temp_off_present) {
6727 		temp_off_param = dp_rssi_params->temp_off_param;
6728 		mon_pdev->rssi_offsets.rssi_temp_offset =
6729 				temp_off_param.rssi_temp_offset;
6730 	}
6731 	if (dp_rssi_params->rssi_dbm_info_present) {
6732 		conv_params = dp_rssi_params->rssi_dbm_param;
6733 		for (i = 0; i < CDP_MAX_NUM_ANTENNA; i++) {
6734 			if (conv_params.curr_rx_chainmask & (0x01 << i)) {
6735 				if (QDF_STATUS_E_INVAL == dp_mon_compute_min_nf
6736 						(&conv_params, &min_nf, i))
6737 					return QDF_STATUS_E_INVAL;
6738 			} else {
6739 				continue;
6740 			}
6741 		}
6742 		mon_pdev->rssi_offsets.xlna_bypass_offset =
6743 					conv_params.xlna_bypass_offset;
6744 		mon_pdev->rssi_offsets.xlna_bypass_threshold =
6745 					conv_params.xlna_bypass_threshold;
6746 		mon_pdev->rssi_offsets.xbar_config = conv_params.xbar_config;
6747 		mon_pdev->rssi_offsets.min_nf_dbm = min_nf;
6748 		mon_pdev->rssi_offsets.rssi_offset =
6749 					mon_pdev->rssi_offsets.min_nf_dbm +
6750 				     mon_pdev->rssi_offsets.rssi_temp_offset;
6751 	}
6752 	return QDF_STATUS_SUCCESS;
6753 }
6754 #endif
6755 
6756 void dp_mon_intr_ops_deregister(struct dp_soc *soc)
6757 {
6758 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
6759 
6760 	mon_soc->mon_rx_process = NULL;
6761 	dp_mon_ppdu_stats_handler_deregister(mon_soc);
6762 }
6763 
6764 void dp_mon_feature_ops_deregister(struct dp_soc *soc)
6765 {
6766 	struct dp_mon_ops *mon_ops = dp_mon_ops_get(soc);
6767 
6768 	if (!mon_ops) {
6769 		dp_err("mon_ops is NULL");
6770 		return;
6771 	}
6772 
6773 	mon_ops->mon_config_debug_sniffer = NULL;
6774 	mon_ops->mon_peer_tx_init = NULL;
6775 	mon_ops->mon_peer_tx_cleanup = NULL;
6776 	mon_ops->mon_htt_ppdu_stats_attach = NULL;
6777 	mon_ops->mon_htt_ppdu_stats_detach = NULL;
6778 	mon_ops->mon_print_pdev_rx_mon_stats = NULL;
6779 	mon_ops->mon_set_bsscolor = NULL;
6780 	mon_ops->mon_pdev_get_filter_ucast_data = NULL;
6781 	mon_ops->mon_pdev_get_filter_mcast_data = NULL;
6782 	mon_ops->mon_pdev_get_filter_non_data = NULL;
6783 	mon_ops->mon_neighbour_peer_add_ast = NULL;
6784 #ifdef WLAN_TX_PKT_CAPTURE_ENH
6785 	mon_ops->mon_peer_tid_peer_id_update = NULL;
6786 	mon_ops->mon_tx_ppdu_stats_attach = NULL;
6787 	mon_ops->mon_tx_ppdu_stats_detach = NULL;
6788 	mon_ops->mon_tx_capture_debugfs_init = NULL;
6789 	mon_ops->mon_tx_add_to_comp_queue = NULL;
6790 	mon_ops->mon_peer_tx_capture_filter_check = NULL;
6791 	mon_ops->mon_print_pdev_tx_capture_stats = NULL;
6792 	mon_ops->mon_config_enh_tx_capture = NULL;
6793 #endif
6794 #ifdef WLAN_RX_PKT_CAPTURE_ENH
6795 	mon_ops->mon_config_enh_rx_capture = NULL;
6796 #endif
6797 #ifdef QCA_SUPPORT_BPR
6798 	mon_ops->mon_set_bpr_enable = NULL;
6799 #endif
6800 #ifdef ATH_SUPPORT_NAC
6801 	mon_ops->mon_set_filter_neigh_peers = NULL;
6802 #endif
6803 #ifdef WLAN_ATF_ENABLE
6804 	mon_ops->mon_set_atf_stats_enable = NULL;
6805 #endif
6806 #ifdef FEATURE_NAC_RSSI
6807 	mon_ops->mon_filter_neighbour_peer = NULL;
6808 #endif
6809 #ifdef QCA_MCOPY_SUPPORT
6810 	mon_ops->mon_filter_setup_mcopy_mode = NULL;
6811 	mon_ops->mon_filter_reset_mcopy_mode = NULL;
6812 	mon_ops->mon_mcopy_check_deliver = NULL;
6813 #endif
6814 #ifdef QCA_ENHANCED_STATS_SUPPORT
6815 	mon_ops->mon_filter_setup_enhanced_stats = NULL;
6816 	mon_ops->mon_tx_enable_enhanced_stats = NULL;
6817 	mon_ops->mon_tx_disable_enhanced_stats = NULL;
6818 	mon_ops->mon_ppdu_desc_deliver = NULL;
6819 	mon_ops->mon_ppdu_desc_notify = NULL;
6820 	mon_ops->mon_ppdu_stats_feat_enable_check = NULL;
6821 #ifdef WLAN_FEATURE_11BE
6822 	mon_ops->mon_tx_stats_update = NULL;
6823 #endif
6824 #endif
6825 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
6826 	mon_ops->mon_filter_setup_smart_monitor = NULL;
6827 #endif
6828 	mon_ops->mon_filter_set_reset_mon_mac_filter = NULL;
6829 #ifdef WLAN_RX_PKT_CAPTURE_ENH
6830 	mon_ops->mon_filter_setup_rx_enh_capture = NULL;
6831 #endif
6832 #ifdef WDI_EVENT_ENABLE
6833 	mon_ops->mon_set_pktlog_wifi3 = NULL;
6834 	mon_ops->mon_filter_setup_rx_pkt_log_full = NULL;
6835 	mon_ops->mon_filter_reset_rx_pkt_log_full = NULL;
6836 	mon_ops->mon_filter_setup_rx_pkt_log_lite = NULL;
6837 	mon_ops->mon_filter_reset_rx_pkt_log_lite = NULL;
6838 	mon_ops->mon_filter_setup_rx_pkt_log_cbf = NULL;
6839 	mon_ops->mon_filter_reset_rx_pkt_log_cbf = NULL;
6840 #ifdef BE_PKTLOG_SUPPORT
6841 	mon_ops->mon_filter_setup_pktlog_hybrid = NULL;
6842 	mon_ops->mon_filter_reset_pktlog_hybrid = NULL;
6843 #endif
6844 #endif
6845 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
6846 	mon_ops->mon_pktlogmod_exit = NULL;
6847 #endif
6848 	mon_ops->rx_hdr_length_set = NULL;
6849 	mon_ops->rx_packet_length_set = NULL;
6850 	mon_ops->rx_wmask_subscribe = NULL;
6851 	mon_ops->rx_pkt_tlv_offset = NULL;
6852 	mon_ops->rx_enable_mpdu_logging = NULL;
6853 	mon_ops->rx_enable_fpmo = NULL;
6854 	mon_ops->mon_neighbour_peers_detach = NULL;
6855 	mon_ops->mon_vdev_set_monitor_mode_buf_rings = NULL;
6856 	mon_ops->mon_vdev_set_monitor_mode_rings = NULL;
6857 #ifdef QCA_ENHANCED_STATS_SUPPORT
6858 	mon_ops->mon_rx_stats_update = NULL;
6859 	mon_ops->mon_rx_populate_ppdu_usr_info = NULL;
6860 	mon_ops->mon_rx_populate_ppdu_info = NULL;
6861 #endif
6862 }
6863 
6864 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc)
6865 {
6866 	struct dp_mon_soc *mon_soc;
6867 	qdf_size_t soc_context_size;
6868 
6869 	if (!soc) {
6870 		dp_mon_err("dp_soc is NULL");
6871 		return QDF_STATUS_E_FAILURE;
6872 	}
6873 
6874 	if (soc->arch_ops.txrx_get_mon_context_size) {
6875 		soc_context_size = soc->arch_ops.txrx_get_mon_context_size(DP_CONTEXT_TYPE_MON_SOC);
6876 		mon_soc = dp_context_alloc_mem(soc, DP_MON_SOC_TYPE,
6877 					       soc_context_size);
6878 	} else {
6879 		mon_soc = (struct dp_mon_soc *)qdf_mem_malloc(sizeof(*mon_soc));
6880 	}
6881 	if (!mon_soc) {
6882 		dp_mon_err("%pK: mem allocation failed", soc);
6883 		return QDF_STATUS_E_NOMEM;
6884 	}
6885 	/* register monitor ops */
6886 	soc->monitor_soc = mon_soc;
6887 	dp_mon_ops_register(soc);
6888 	dp_mon_register_intr_ops(soc);
6889 
6890 	dp_mon_cdp_ops_register(soc);
6891 	dp_monitor_soc_attach(soc);
6892 	dp_mon_register_feature_ops(soc);
6893 	return QDF_STATUS_SUCCESS;
6894 }
6895 
6896 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc)
6897 {
6898 	struct dp_mon_soc *mon_soc;
6899 
6900 	if (!soc) {
6901 		dp_mon_err("dp_soc is NULL");
6902 		return QDF_STATUS_E_FAILURE;
6903 	}
6904 
6905 	mon_soc = soc->monitor_soc;
6906 	dp_monitor_vdev_timer_deinit(soc);
6907 	dp_mon_cdp_ops_deregister(soc);
6908 	dp_monitor_soc_detach(soc);
6909 	soc->monitor_soc = NULL;
6910 	qdf_mem_free(mon_soc);
6911 	return QDF_STATUS_SUCCESS;
6912 }
6913