xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/dp_mon.c (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 #include <dp_types.h>
18 #include "dp_rx.h"
19 #include "dp_peer.h"
20 #include <dp_htt.h>
21 #include <dp_mon_filter.h>
22 #include <dp_htt.h>
23 #include <dp_mon.h>
24 #include <dp_rx_mon.h>
25 #include <dp_internal.h>
26 #include "htt_ppdu_stats.h"
27 #include "dp_cal_client_api.h"
28 #if defined(DP_CON_MON)
29 #ifndef REMOVE_PKT_LOG
30 #include <pktlog_ac_api.h>
31 #include <pktlog_ac.h>
32 #endif
33 #endif
34 #ifdef FEATURE_PERPKT_INFO
35 #include "dp_ratetable.h"
36 #endif
37 #ifdef QCA_SUPPORT_LITE_MONITOR
38 #include "dp_lite_mon.h"
39 #endif
40 
41 #define DP_INTR_POLL_TIMER_MS	5
42 #define INVALID_FREE_BUFF 0xffffffff
43 
44 #ifdef WLAN_RX_PKT_CAPTURE_ENH
45 #include "dp_rx_mon_feature.h"
46 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
47 
48 #ifdef QCA_UNDECODED_METADATA_SUPPORT
49 #define MAX_STRING_LEN_PER_FIELD 6
50 #define DP_UNDECODED_ERR_LENGTH (MAX_STRING_LEN_PER_FIELD * CDP_PHYRX_ERR_MAX)
51 #endif
52 
53 #ifdef QCA_MCOPY_SUPPORT
54 static inline void
55 dp_pdev_disable_mcopy_code(struct dp_pdev *pdev)
56 {
57 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
58 
59 	mon_pdev->mcopy_mode = M_COPY_DISABLED;
60 	mon_pdev->mvdev = NULL;
61 }
62 
63 static inline void
64 dp_reset_mcopy_mode(struct dp_pdev *pdev)
65 {
66 	QDF_STATUS status = QDF_STATUS_SUCCESS;
67 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
68 	struct cdp_mon_ops *cdp_ops;
69 
70 	if (mon_pdev->mcopy_mode) {
71 		cdp_ops = dp_mon_cdp_ops_get(pdev->soc);
72 		if (cdp_ops  && cdp_ops->config_full_mon_mode)
73 			cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
74 							  DP_FULL_MON_ENABLE);
75 		dp_pdev_disable_mcopy_code(pdev);
76 		dp_mon_filter_reset_mcopy_mode(pdev);
77 		status = dp_mon_filter_update(pdev);
78 		if (status != QDF_STATUS_SUCCESS) {
79 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
80 				  FL("Failed to reset AM copy mode filters"));
81 		}
82 		mon_pdev->monitor_configured = false;
83 	}
84 }
85 
86 static QDF_STATUS
87 dp_config_mcopy_mode(struct dp_pdev *pdev, int val)
88 {
89 	QDF_STATUS status = QDF_STATUS_SUCCESS;
90 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
91 	struct dp_mon_ops *mon_ops;
92 	struct cdp_mon_ops *cdp_ops;
93 
94 	if (mon_pdev->mvdev)
95 		return QDF_STATUS_E_RESOURCES;
96 
97 	mon_pdev->mcopy_mode = val;
98 	mon_pdev->tx_sniffer_enable = 0;
99 	mon_pdev->monitor_configured = true;
100 
101 	mon_ops = dp_mon_ops_get(pdev->soc);
102 	if (!wlan_cfg_is_delay_mon_replenish(pdev->soc->wlan_cfg_ctx)) {
103 		if (mon_ops && mon_ops->mon_vdev_set_monitor_mode_rings)
104 			mon_ops->mon_vdev_set_monitor_mode_rings(pdev, true);
105 	}
106 
107 	/*
108 	 * Setup the M copy mode filter.
109 	 */
110 	cdp_ops = dp_mon_cdp_ops_get(pdev->soc);
111 	if (cdp_ops  && cdp_ops->config_full_mon_mode)
112 		cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
113 						  DP_FULL_MON_ENABLE);
114 	dp_mon_filter_setup_mcopy_mode(pdev);
115 	status = dp_mon_filter_update(pdev);
116 	if (status != QDF_STATUS_SUCCESS) {
117 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
118 			  FL("Failed to set M_copy mode filters"));
119 		dp_mon_filter_reset_mcopy_mode(pdev);
120 		dp_pdev_disable_mcopy_code(pdev);
121 		return status;
122 	}
123 
124 	if (!mon_pdev->pktlog_ppdu_stats)
125 		dp_h2t_cfg_stats_msg_send(pdev,
126 					  DP_PPDU_STATS_CFG_SNIFFER,
127 					  pdev->pdev_id);
128 
129 	return status;
130 }
131 #else
132 static inline void
133 dp_reset_mcopy_mode(struct dp_pdev *pdev)
134 {
135 }
136 
137 static inline QDF_STATUS
138 dp_config_mcopy_mode(struct dp_pdev *pdev, int val)
139 {
140 	return QDF_STATUS_E_INVAL;
141 }
142 #endif /* QCA_MCOPY_SUPPORT */
143 
144 #ifdef QCA_UNDECODED_METADATA_SUPPORT
145 static QDF_STATUS
146 dp_reset_undecoded_metadata_capture(struct dp_pdev *pdev)
147 {
148 	QDF_STATUS status = QDF_STATUS_SUCCESS;
149 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
150 
151 	if (mon_pdev->undecoded_metadata_capture) {
152 		dp_mon_filter_reset_undecoded_metadata_mode(pdev);
153 		status = dp_mon_filter_update(pdev);
154 		if (status != QDF_STATUS_SUCCESS) {
155 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
156 				  FL("Undecoded capture filter reset failed"));
157 		}
158 	}
159 	mon_pdev->undecoded_metadata_capture = 0;
160 	return status;
161 }
162 
163 static QDF_STATUS
164 dp_enable_undecoded_metadata_capture(struct dp_pdev *pdev, int val)
165 {
166 	QDF_STATUS status = QDF_STATUS_SUCCESS;
167 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
168 
169 	if (!mon_pdev->mvdev) {
170 		qdf_err("monitor_pdev is NULL");
171 		return QDF_STATUS_E_RESOURCES;
172 	}
173 
174 	mon_pdev->undecoded_metadata_capture = val;
175 	mon_pdev->monitor_configured = true;
176 
177 
178 	/* Setup the undecoded metadata capture mode filter. */
179 	dp_mon_filter_setup_undecoded_metadata_mode(pdev);
180 	status = dp_mon_filter_update(pdev);
181 	if (status != QDF_STATUS_SUCCESS) {
182 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
183 			  FL("Failed to set Undecoded capture filters"));
184 		dp_mon_filter_reset_undecoded_metadata_mode(pdev);
185 		return status;
186 	}
187 
188 	return status;
189 }
190 #else
191 static inline QDF_STATUS
192 dp_reset_undecoded_metadata_capture(struct dp_pdev *pdev)
193 {
194 	return QDF_STATUS_E_INVAL;
195 }
196 
197 static inline QDF_STATUS
198 dp_enable_undecoded_metadata_capture(struct dp_pdev *pdev, int val)
199 {
200 	return QDF_STATUS_E_INVAL;
201 }
202 #endif /* QCA_UNDECODED_METADATA_SUPPORT */
203 
204 QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl,
205 				 uint8_t pdev_id,
206 				 uint8_t special_monitor)
207 {
208 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
209 	struct dp_pdev *pdev =
210 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
211 						   pdev_id);
212 	QDF_STATUS status = QDF_STATUS_SUCCESS;
213 	struct dp_mon_pdev *mon_pdev;
214 	struct cdp_mon_ops *cdp_ops;
215 
216 	if (!pdev)
217 		return QDF_STATUS_E_FAILURE;
218 
219 	mon_pdev = pdev->monitor_pdev;
220 
221 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
222 
223 	cdp_ops = dp_mon_cdp_ops_get(soc);
224 	if (cdp_ops  && cdp_ops->soc_config_full_mon_mode)
225 		cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
226 						  DP_FULL_MON_DISABLE);
227 	mon_pdev->mvdev = NULL;
228 
229 	/*
230 	 * Lite monitor mode, smart monitor mode and monitor
231 	 * mode uses this APIs to filter reset and mode disable
232 	 */
233 	if (mon_pdev->mcopy_mode) {
234 #if defined(QCA_MCOPY_SUPPORT)
235 		dp_pdev_disable_mcopy_code(pdev);
236 		dp_mon_filter_reset_mcopy_mode(pdev);
237 #endif /* QCA_MCOPY_SUPPORT */
238 	} else if (special_monitor) {
239 #if defined(ATH_SUPPORT_NAC)
240 		dp_mon_filter_reset_smart_monitor(pdev);
241 #endif /* ATH_SUPPORT_NAC */
242 		/* for mon 2.0 we make use of lite mon to
243 		 * set filters for smart monitor use case.
244 		 */
245 		dp_monitor_lite_mon_disable_rx(pdev);
246 	} else if (mon_pdev->undecoded_metadata_capture) {
247 #ifdef QCA_UNDECODED_METADATA_SUPPORT
248 		dp_reset_undecoded_metadata_capture(pdev);
249 #endif
250 	} else {
251 		dp_mon_filter_reset_mon_mode(pdev);
252 	}
253 	status = dp_mon_filter_update(pdev);
254 	if (status != QDF_STATUS_SUCCESS) {
255 		dp_rx_mon_dest_err("%pK: Failed to reset monitor filters",
256 				   soc);
257 	}
258 
259 	mon_pdev->monitor_configured = false;
260 
261 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
262 	return QDF_STATUS_SUCCESS;
263 }
264 
265 #ifdef QCA_ADVANCE_MON_FILTER_SUPPORT
266 QDF_STATUS
267 dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
268 				   struct cdp_monitor_filter *filter_val)
269 {
270 	/* Many monitor VAPs can exists in a system but only one can be up at
271 	 * anytime
272 	 */
273 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
274 	struct dp_vdev *vdev;
275 	struct dp_pdev *pdev =
276 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
277 						   pdev_id);
278 	QDF_STATUS status = QDF_STATUS_SUCCESS;
279 	struct dp_mon_pdev *mon_pdev;
280 
281 	if (!pdev || !pdev->monitor_pdev)
282 		return QDF_STATUS_E_FAILURE;
283 
284 	mon_pdev = pdev->monitor_pdev;
285 	vdev = mon_pdev->mvdev;
286 
287 	if (!vdev)
288 		return QDF_STATUS_E_FAILURE;
289 
290 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
291 		  "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
292 		  pdev, pdev_id, soc, vdev);
293 
294 	/*Check if current pdev's monitor_vdev exists */
295 	if (!mon_pdev->mvdev) {
296 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
297 			  "vdev=%pK", vdev);
298 		qdf_assert(vdev);
299 	}
300 
301 	/* update filter mode, type in pdev structure */
302 	mon_pdev->mon_filter_mode = filter_val->mode;
303 	mon_pdev->fp_mgmt_filter = filter_val->fp_mgmt;
304 	mon_pdev->fp_ctrl_filter = filter_val->fp_ctrl;
305 	mon_pdev->fp_data_filter = filter_val->fp_data;
306 	mon_pdev->mo_mgmt_filter = filter_val->mo_mgmt;
307 	mon_pdev->mo_ctrl_filter = filter_val->mo_ctrl;
308 	mon_pdev->mo_data_filter = filter_val->mo_data;
309 
310 	dp_mon_filter_setup_mon_mode(pdev);
311 	status = dp_mon_filter_update(pdev);
312 	if (status != QDF_STATUS_SUCCESS) {
313 		dp_rx_mon_dest_err("%pK: Failed to set filter for adv mon mode",
314 				   soc);
315 		dp_mon_filter_reset_mon_mode(pdev);
316 	}
317 
318 	return status;
319 }
320 #endif
321 
322 QDF_STATUS
323 dp_deliver_tx_mgmt(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf)
324 {
325 	struct dp_pdev *pdev =
326 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
327 						   pdev_id);
328 
329 	if (!pdev)
330 		return QDF_STATUS_E_FAILURE;
331 
332 	dp_deliver_mgmt_frm(pdev, nbuf);
333 
334 	return QDF_STATUS_SUCCESS;
335 }
336 
337 #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
338 /**
339  * dp_scan_spcl_vap_stats_attach() - alloc spcl vap stats struct
340  * @mon_vdev: Datapath mon VDEV handle
341  *
342  * Return: 0 on success, not 0 on failure
343  */
344 static inline QDF_STATUS
345 dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev)
346 {
347 	mon_vdev->scan_spcl_vap_stats =
348 		qdf_mem_malloc(sizeof(struct cdp_scan_spcl_vap_stats));
349 
350 	if (!mon_vdev->scan_spcl_vap_stats) {
351 		dp_mon_err("scan spcl vap stats attach fail");
352 		return QDF_STATUS_E_NOMEM;
353 	}
354 
355 	return QDF_STATUS_SUCCESS;
356 }
357 
358 /**
359  * dp_scan_spcl_vap_stats_detach() - free spcl vap stats struct
360  * @mon_vdev: Datapath mon VDEV handle
361  *
362  * Return: void
363  */
364 static inline void
365 dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev)
366 {
367 	if (mon_vdev->scan_spcl_vap_stats) {
368 		qdf_mem_free(mon_vdev->scan_spcl_vap_stats);
369 		mon_vdev->scan_spcl_vap_stats = NULL;
370 	}
371 }
372 
373 /**
374  * dp_reset_scan_spcl_vap_stats() - reset spcl vap rx stats
375  * @vdev: Datapath VDEV handle
376  *
377  * Return: void
378  */
379 static inline void
380 dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev)
381 {
382 	struct dp_mon_vdev *mon_vdev;
383 	struct dp_mon_pdev *mon_pdev;
384 
385 	mon_pdev = vdev->pdev->monitor_pdev;
386 	if (!mon_pdev || !mon_pdev->reset_scan_spcl_vap_stats_enable)
387 		return;
388 
389 	mon_vdev = vdev->monitor_vdev;
390 	if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats)
391 		return;
392 
393 	qdf_mem_zero(mon_vdev->scan_spcl_vap_stats,
394 		     sizeof(struct cdp_scan_spcl_vap_stats));
395 }
396 
397 /**
398  * dp_get_scan_spcl_vap_stats() - get spcl vap rx stats
399  * @soc_hdl: Datapath soc handle
400  * @vdev_id: vdev id
401  * @stats: structure to hold spcl vap stats
402  *
403  * Return: 0 on success, not 0 on failure
404  */
405 static QDF_STATUS
406 dp_get_scan_spcl_vap_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
407 			   struct cdp_scan_spcl_vap_stats *stats)
408 {
409 	struct dp_mon_vdev *mon_vdev = NULL;
410 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
411 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
412 						     DP_MOD_ID_CDP);
413 
414 	if (!vdev || !stats) {
415 		if (vdev)
416 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
417 		return QDF_STATUS_E_INVAL;
418 	}
419 
420 	mon_vdev = vdev->monitor_vdev;
421 	if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats) {
422 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
423 		return QDF_STATUS_E_INVAL;
424 	}
425 
426 	qdf_mem_copy(stats, mon_vdev->scan_spcl_vap_stats,
427 		     sizeof(struct cdp_scan_spcl_vap_stats));
428 
429 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
430 	return QDF_STATUS_SUCCESS;
431 }
432 #else
433 static inline void
434 dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev)
435 {
436 }
437 
438 static inline QDF_STATUS
439 dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev)
440 {
441 	return QDF_STATUS_SUCCESS;
442 }
443 
444 static inline void
445 dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev)
446 {
447 }
448 #endif
449 
450 /**
451  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
452  * @dp_soc: DP soc context
453  * @vdev_id: vdev ID
454  * @special_monitor: Flag to denote if its smart monitor mode
455  *
456  * Return: 0 on success, not 0 on failure
457  */
458 static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *dp_soc,
459 					   uint8_t vdev_id,
460 					   uint8_t special_monitor)
461 {
462 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
463 	struct dp_pdev *pdev;
464 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
465 						     DP_MOD_ID_CDP);
466 	QDF_STATUS status = QDF_STATUS_SUCCESS;
467 	struct dp_mon_pdev *mon_pdev;
468 	struct cdp_mon_ops *cdp_ops;
469 
470 	if (!vdev)
471 		return QDF_STATUS_E_FAILURE;
472 
473 	pdev = vdev->pdev;
474 
475 	if (!pdev || !pdev->monitor_pdev)
476 		return QDF_STATUS_E_FAILURE;
477 
478 	mon_pdev = pdev->monitor_pdev;
479 
480 	mon_pdev->mvdev = vdev;
481 
482 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
483 		  "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
484 		  pdev, pdev->pdev_id, pdev->soc, vdev);
485 
486 	/*
487 	 * do not configure monitor buf ring and filter for smart and
488 	 * lite monitor
489 	 * for smart monitor filters are added along with first NAC
490 	 * for lite monitor required configuration done through
491 	 * dp_set_pdev_param
492 	 */
493 
494 	if (special_monitor) {
495 		status = QDF_STATUS_SUCCESS;
496 		goto fail;
497 	}
498 
499 	if (mon_pdev->scan_spcl_vap_configured)
500 		dp_reset_scan_spcl_vap_stats(vdev);
501 
502 	/*Check if current pdev's monitor_vdev exists */
503 	if (mon_pdev->monitor_configured) {
504 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
505 			  "monitor vap already created vdev=%pK\n", vdev);
506 		status = QDF_STATUS_E_RESOURCES;
507 		goto fail;
508 	}
509 
510 	mon_pdev->monitor_configured = true;
511 
512 	/* If advance monitor filter is applied using lite_mon
513 	 * via vap configuration, required filters are already applied
514 	 * hence returning SUCCESS from here.
515 	 */
516 	if (dp_monitor_lite_mon_is_rx_adv_filter_enable(pdev)) {
517 		status = QDF_STATUS_SUCCESS;
518 		goto fail;
519 	}
520 	/* disable lite mon if configured, monitor vap takes
521 	 * priority over lite mon when its created. Lite mon
522 	 * can be configured later again.
523 	 */
524 	dp_monitor_lite_mon_disable_rx(pdev);
525 
526 	cdp_ops = dp_mon_cdp_ops_get(soc);
527 	if (cdp_ops  && cdp_ops->soc_config_full_mon_mode)
528 		cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
529 						  DP_FULL_MON_ENABLE);
530 	dp_mon_filter_setup_mon_mode(pdev);
531 	status = dp_mon_filter_update(pdev);
532 	if (status != QDF_STATUS_SUCCESS) {
533 		dp_cdp_err("%pK: Failed to reset monitor filters", soc);
534 		dp_mon_filter_reset_mon_mode(pdev);
535 		mon_pdev->monitor_configured = false;
536 		mon_pdev->mvdev = NULL;
537 	}
538 
539 fail:
540 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
541 	return status;
542 }
543 
544 #ifdef QCA_TX_CAPTURE_SUPPORT
545 static QDF_STATUS
546 dp_config_tx_capture_mode(struct dp_pdev *pdev)
547 {
548 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
549 
550 	mon_pdev->tx_sniffer_enable = 1;
551 	mon_pdev->monitor_configured = false;
552 
553 	if (!mon_pdev->pktlog_ppdu_stats)
554 		dp_h2t_cfg_stats_msg_send(pdev,
555 					  DP_PPDU_STATS_CFG_SNIFFER,
556 					  pdev->pdev_id);
557 
558 	return QDF_STATUS_SUCCESS;
559 }
560 #else
561 #ifdef QCA_MCOPY_SUPPORT
562 static QDF_STATUS
563 dp_config_tx_capture_mode(struct dp_pdev *pdev)
564 {
565 	return QDF_STATUS_E_INVAL;
566 }
567 #endif
568 #endif
569 
570 #if defined(QCA_MCOPY_SUPPORT) || defined(QCA_TX_CAPTURE_SUPPORT)
571 QDF_STATUS
572 dp_config_debug_sniffer(struct dp_pdev *pdev, int val)
573 {
574 	QDF_STATUS status = QDF_STATUS_SUCCESS;
575 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
576 
577 	/*
578 	 * Note: The mirror copy mode cannot co-exist with any other
579 	 * monitor modes. Hence disabling the filter for this mode will
580 	 * reset the monitor destination ring filters.
581 	 */
582 	dp_reset_mcopy_mode(pdev);
583 	switch (val) {
584 	case 0:
585 		mon_pdev->tx_sniffer_enable = 0;
586 		mon_pdev->monitor_configured = false;
587 
588 		/*
589 		 * We don't need to reset the Rx monitor status ring  or call
590 		 * the API dp_ppdu_ring_reset() if all debug sniffer mode is
591 		 * disabled. The Rx monitor status ring will be disabled when
592 		 * the last mode using the monitor status ring get disabled.
593 		 */
594 		if (!mon_pdev->pktlog_ppdu_stats &&
595 		    !mon_pdev->enhanced_stats_en &&
596 		    !mon_pdev->bpr_enable) {
597 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
598 		} else if (mon_pdev->enhanced_stats_en &&
599 			   !mon_pdev->bpr_enable) {
600 			dp_h2t_cfg_stats_msg_send(pdev,
601 						  DP_PPDU_STATS_CFG_ENH_STATS,
602 						  pdev->pdev_id);
603 		} else if (!mon_pdev->enhanced_stats_en &&
604 			   mon_pdev->bpr_enable) {
605 			dp_h2t_cfg_stats_msg_send(pdev,
606 						  DP_PPDU_STATS_CFG_BPR_ENH,
607 						  pdev->pdev_id);
608 		} else {
609 			dp_h2t_cfg_stats_msg_send(pdev,
610 						  DP_PPDU_STATS_CFG_BPR,
611 						  pdev->pdev_id);
612 		}
613 		break;
614 
615 	case 1:
616 		status = dp_config_tx_capture_mode(pdev);
617 		break;
618 	case 2:
619 	case 4:
620 		status = dp_config_mcopy_mode(pdev, val);
621 		break;
622 
623 	default:
624 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
625 			  "Invalid value, mode not supported");
626 		status = QDF_STATUS_E_INVAL;
627 		break;
628 	}
629 	return status;
630 }
631 #endif
632 
633 #ifdef QCA_UNDECODED_METADATA_SUPPORT
634 QDF_STATUS
635 dp_mon_config_undecoded_metadata_capture(struct dp_pdev *pdev, int val)
636 {
637 	QDF_STATUS status = QDF_STATUS_SUCCESS;
638 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
639 
640 	if (!mon_pdev->mvdev && !mon_pdev->scan_spcl_vap_configured) {
641 		qdf_err("No monitor or Special vap, undecoded capture not supported");
642 		return QDF_STATUS_E_RESOURCES;
643 	}
644 
645 	if (val)
646 		status = dp_enable_undecoded_metadata_capture(pdev, val);
647 	else
648 		status = dp_reset_undecoded_metadata_capture(pdev);
649 
650 	return status;
651 }
652 #endif
653 
654 /**
655  * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
656  *                                 ring based on target
657  * @soc: soc handle
658  * @mac_for_pdev: WIN- pdev_id, MCL- mac id
659  * @pdev: physical device handle
660  * @ring_num: mac id
661  * @htt_tlv_filter: tlv filter
662  *
663  * Return: zero on success, non-zero on failure
664  */
665 static inline QDF_STATUS
666 dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
667 			    struct dp_pdev *pdev, uint8_t ring_num,
668 			    struct htt_rx_ring_tlv_filter htt_tlv_filter)
669 {
670 	QDF_STATUS status;
671 
672 	if (soc->wlan_cfg_ctx->rxdma1_enable)
673 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
674 					     soc->rxdma_mon_buf_ring[ring_num]
675 					     .hal_srng,
676 					     RXDMA_MONITOR_BUF,
677 					     RX_MONITOR_BUFFER_SIZE,
678 					     &htt_tlv_filter);
679 	else
680 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
681 					     pdev->rx_mac_buf_ring[ring_num]
682 					     .hal_srng,
683 					     RXDMA_BUF, RX_DATA_BUFFER_SIZE,
684 					     &htt_tlv_filter);
685 
686 	return status;
687 }
688 
689 /**
690  * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode
691  * @soc_hdl: datapath soc handle
692  * @pdev_id: physical device instance id
693  *
694  * Return: virtual interface id
695  */
696 static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl,
697 		uint8_t pdev_id)
698 {
699 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
700 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
701 
702 	if (qdf_unlikely(!pdev || !pdev->monitor_pdev ||
703 				!pdev->monitor_pdev->mvdev))
704 		return -EINVAL;
705 
706 	return pdev->monitor_pdev->mvdev->vdev_id;
707 }
708 
709 #if defined(QCA_TX_CAPTURE_SUPPORT) || defined(QCA_ENHANCED_STATS_SUPPORT)
710 #ifndef WLAN_TX_PKT_CAPTURE_ENH
711 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
712 {
713 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
714 
715 	if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode) {
716 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
717 				     nbuf, HTT_INVALID_PEER,
718 				     WDI_NO_VAL, pdev->pdev_id);
719 	} else {
720 		if (!mon_pdev->bpr_enable)
721 			qdf_nbuf_free(nbuf);
722 	}
723 }
724 #endif
725 #endif
726 
727 QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
728 {
729 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
730 
731 	mon_pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE);
732 
733 	if (!mon_pdev->ppdu_tlv_buf) {
734 		QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail");
735 		return QDF_STATUS_E_NOMEM;
736 	}
737 
738 	return QDF_STATUS_SUCCESS;
739 }
740 
741 void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
742 {
743 	struct ppdu_info *ppdu_info, *ppdu_info_next;
744 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
745 
746 
747 	TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list,
748 			   ppdu_info_list_elem, ppdu_info_next) {
749 		if (!ppdu_info)
750 			break;
751 		TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
752 			     ppdu_info, ppdu_info_list_elem);
753 		mon_pdev->list_depth--;
754 		qdf_assert_always(ppdu_info->nbuf);
755 		qdf_nbuf_free(ppdu_info->nbuf);
756 		qdf_mem_free(ppdu_info);
757 	}
758 
759 	TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->sched_comp_ppdu_list,
760 			   ppdu_info_list_elem, ppdu_info_next) {
761 		if (!ppdu_info)
762 			break;
763 		TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list,
764 			     ppdu_info, ppdu_info_list_elem);
765 		mon_pdev->sched_comp_list_depth--;
766 		qdf_assert_always(ppdu_info->nbuf);
767 		qdf_nbuf_free(ppdu_info->nbuf);
768 		qdf_mem_free(ppdu_info);
769 	}
770 
771 	if (mon_pdev->ppdu_tlv_buf)
772 		qdf_mem_free(mon_pdev->ppdu_tlv_buf);
773 }
774 
775 QDF_STATUS dp_pdev_get_rx_mon_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
776 				    struct cdp_pdev_mon_stats *stats)
777 {
778 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
779 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
780 	struct dp_mon_pdev *mon_pdev;
781 
782 	if (!pdev)
783 		return QDF_STATUS_E_FAILURE;
784 
785 	mon_pdev = pdev->monitor_pdev;
786 	if (!mon_pdev)
787 		return QDF_STATUS_E_FAILURE;
788 
789 	qdf_mem_copy(stats, &mon_pdev->rx_mon_stats,
790 		     sizeof(struct cdp_pdev_mon_stats));
791 
792 	return QDF_STATUS_SUCCESS;
793 }
794 
795 #ifdef QCA_UNDECODED_METADATA_SUPPORT
796 /**
797  * dp_pdev_get_undecoded_capture_stats() - Get undecoded metadata captured
798  * monitor pdev stats
799  * @mon_pdev: Monitor PDEV handle
800  * @rx_mon_stats: Monitor pdev status/destination ring stats
801  *
802  * Return: None
803  */
804 static inline void
805 dp_pdev_get_undecoded_capture_stats(struct dp_mon_pdev *mon_pdev,
806 				    struct cdp_pdev_mon_stats *rx_mon_stats)
807 {
808 	char undecoded_error[DP_UNDECODED_ERR_LENGTH];
809 	uint8_t index = 0, i;
810 
811 	DP_PRINT_STATS("Rx Undecoded Frame count:%d",
812 		       rx_mon_stats->rx_undecoded_count);
813 	index = 0;
814 	for (i = 0; i < (CDP_PHYRX_ERR_MAX); i++) {
815 		index += qdf_snprint(&undecoded_error[index],
816 				DP_UNDECODED_ERR_LENGTH - index,
817 				" %d", rx_mon_stats->rx_undecoded_error[i]);
818 	}
819 	DP_PRINT_STATS("Undecoded Error (0-63):%s", undecoded_error);
820 }
821 #else
822 static inline void
823 dp_pdev_get_undecoded_capture_stats(struct dp_mon_pdev *mon_pdev,
824 				    struct cdp_pdev_mon_stats *rx_mon_stats)
825 {
826 }
827 #endif
828 
829 static const char *
830 dp_preamble_type_str[] = {
831 	"preamble OFDMA     ",
832 	"preamble CCK       ",
833 	"preamble HT        ",
834 	"preamble VHT       ",
835 	"preamble HE        ",
836 	"preamble EHT       ",
837 	"preamble NO SUPPORT",
838 };
839 
840 static const char *
841 dp_reception_type_str[] = {
842 	"reception su        ",
843 	"reception mu_mimo   ",
844 	"reception ofdma     ",
845 	"reception ofdma mimo",
846 };
847 
848 static const char *
849 dp_mu_dl_ul_str[] = {
850 	"MU DL",
851 	"MU UL",
852 };
853 
854 static inline void
855 dp_print_pdev_mpdu_fcs_ok_cnt(struct cdp_pdev_mon_stats *rx_mon_sts,
856 			      uint32_t pkt_t, uint32_t rx_t,
857 			      uint32_t dl_ul, uint32_t user)
858 {
859 	DP_PRINT_STATS("%s, %s, %s, user=%d, mpdu_fcs_ok=%d",
860 		       dp_preamble_type_str[pkt_t],
861 		       dp_reception_type_str[rx_t],
862 		       dp_mu_dl_ul_str[dl_ul],
863 		       user,
864 		       rx_mon_sts->mpdu_cnt_fcs_ok[pkt_t][rx_t][dl_ul][user]);
865 }
866 
867 static inline void
868 dp_print_pdev_mpdu_fcs_err_cnt(struct cdp_pdev_mon_stats *rx_mon_sts,
869 			       uint32_t pkt_t, uint32_t rx_t,
870 			       uint32_t dl_ul, uint32_t user)
871 {
872 	DP_PRINT_STATS("%s, %s, %s, user=%d, mpdu_fcs_err=%d",
873 		       dp_preamble_type_str[pkt_t],
874 		       dp_reception_type_str[rx_t],
875 		       dp_mu_dl_ul_str[dl_ul],
876 		       user,
877 		       rx_mon_sts->mpdu_cnt_fcs_err[pkt_t][rx_t][dl_ul][user]);
878 }
879 
880 static inline void
881 dp_print_pdev_mpdu_cnt(struct cdp_pdev_mon_stats *rx_mon_sts,
882 		       uint32_t pkt_t, uint32_t rx_t,
883 		       uint32_t dl_ul, uint32_t user)
884 {
885 	if (rx_mon_sts->mpdu_cnt_fcs_ok[pkt_t][rx_t][dl_ul][user])
886 		dp_print_pdev_mpdu_fcs_ok_cnt(rx_mon_sts, pkt_t, rx_t,
887 					      dl_ul, user);
888 
889 	if (rx_mon_sts->mpdu_cnt_fcs_err[pkt_t][rx_t][dl_ul][user])
890 		dp_print_pdev_mpdu_fcs_err_cnt(rx_mon_sts, pkt_t, rx_t,
891 					       dl_ul, user);
892 }
893 
894 static inline void
895 dp_print_pdev_mpdu_user(struct cdp_pdev_mon_stats *rx_mon_sts,
896 			uint32_t pkt_t, uint32_t rx_t,
897 			uint32_t dl_ul)
898 {
899 	uint32_t user;
900 
901 	for (user = 0; user < CDP_MU_SNIF_USER_MAX; user++)
902 		dp_print_pdev_mpdu_cnt(rx_mon_sts, pkt_t, rx_t,
903 				       dl_ul, user);
904 }
905 
906 static inline void
907 dp_print_pdev_mpdu_dl_ul(struct cdp_pdev_mon_stats *rx_mon_sts,
908 			 uint32_t pkt_t, uint32_t rx_t)
909 {
910 	uint32_t dl_ul;
911 
912 	for (dl_ul = CDP_MU_TYPE_DL; dl_ul < CDP_MU_TYPE_MAX; dl_ul++)
913 		dp_print_pdev_mpdu_user(rx_mon_sts, pkt_t, rx_t,
914 					dl_ul);
915 }
916 
917 static inline void
918 dp_print_pdev_mpdu_rx_type(struct cdp_pdev_mon_stats *rx_mon_sts,
919 			   uint32_t pkt_t)
920 {
921 	uint32_t rx_t;
922 
923 	for (rx_t = CDP_RX_TYPE_SU; rx_t < CDP_RX_TYPE_MAX; rx_t++)
924 		dp_print_pdev_mpdu_dl_ul(rx_mon_sts, pkt_t, rx_t);
925 }
926 
927 static inline void
928 dp_print_pdev_mpdu_pkt_type(struct cdp_pdev_mon_stats *rx_mon_sts)
929 {
930 	uint32_t pkt_t;
931 
932 	for (pkt_t = CDP_PKT_TYPE_OFDM; pkt_t < CDP_PKT_TYPE_MAX; pkt_t++)
933 		dp_print_pdev_mpdu_rx_type(rx_mon_sts, pkt_t);
934 }
935 
936 static inline void
937 print_ppdu_eht_type_mode(
938 	struct cdp_pdev_mon_stats *rx_mon_stats,
939 	uint32_t ppdu_type_mode,
940 	uint32_t dl_ul)
941 {
942 	DP_PRINT_STATS("type_mode=%d, dl_ul=%d, cnt=%d",
943 		       ppdu_type_mode,
944 		       dl_ul,
945 		       rx_mon_stats->ppdu_eht_type_mode[ppdu_type_mode][dl_ul]);
946 }
947 
948 static inline void
949 print_ppdu_eth_type_mode_dl_ul(
950 	struct cdp_pdev_mon_stats *rx_mon_stats,
951 	uint32_t ppdu_type_mode
952 )
953 {
954 	uint32_t dl_ul;
955 
956 	for (dl_ul = 0; dl_ul < CDP_MU_TYPE_MAX; dl_ul++) {
957 		if (rx_mon_stats->ppdu_eht_type_mode[ppdu_type_mode][dl_ul])
958 			print_ppdu_eht_type_mode(rx_mon_stats,
959 						 ppdu_type_mode, dl_ul);
960 	}
961 }
962 
963 static inline void
964 dp_print_pdev_eht_ppdu_cnt(struct dp_pdev *pdev)
965 {
966 	struct cdp_pdev_mon_stats *rx_mon_stats;
967 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
968 	uint32_t ppdu_type_mode;
969 
970 	rx_mon_stats = &mon_pdev->rx_mon_stats;
971 	DP_PRINT_STATS("Monitor EHT PPDU  Count");
972 	for (ppdu_type_mode = 0; ppdu_type_mode < CDP_EHT_TYPE_MODE_MAX;
973 	     ppdu_type_mode++) {
974 		print_ppdu_eth_type_mode_dl_ul(rx_mon_stats,
975 					       ppdu_type_mode);
976 	}
977 }
978 
979 static inline void
980 dp_print_pdev_mpdu_stats(struct dp_pdev *pdev)
981 {
982 	struct cdp_pdev_mon_stats *rx_mon_stats;
983 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
984 
985 	rx_mon_stats = &mon_pdev->rx_mon_stats;
986 	DP_PRINT_STATS("Monitor MPDU Count");
987 	dp_print_pdev_mpdu_pkt_type(rx_mon_stats);
988 }
989 
990 void
991 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
992 {
993 	struct cdp_pdev_mon_stats *rx_mon_stats;
994 	uint32_t *stat_ring_ppdu_ids;
995 	uint32_t *dest_ring_ppdu_ids;
996 	int i, idx;
997 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
998 
999 	rx_mon_stats = &mon_pdev->rx_mon_stats;
1000 
1001 	DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
1002 
1003 	DP_PRINT_STATS("status_ppdu_compl_cnt = %d",
1004 		       rx_mon_stats->status_ppdu_compl);
1005 	DP_PRINT_STATS("status_ppdu_start_cnt = %d",
1006 		       rx_mon_stats->status_ppdu_start);
1007 	DP_PRINT_STATS("status_ppdu_end_cnt = %d",
1008 		       rx_mon_stats->status_ppdu_end);
1009 	DP_PRINT_STATS("status_ppdu_start_mis_cnt = %d",
1010 		       rx_mon_stats->status_ppdu_start_mis);
1011 	DP_PRINT_STATS("status_ppdu_end_mis_cnt = %d",
1012 		       rx_mon_stats->status_ppdu_end_mis);
1013 
1014 	DP_PRINT_STATS("start_user_info_cnt = %d",
1015 		       rx_mon_stats->start_user_info_cnt);
1016 	DP_PRINT_STATS("end_user_stats_cnt = %d",
1017 		       rx_mon_stats->end_user_stats_cnt);
1018 
1019 	DP_PRINT_STATS("status_ppdu_done_cnt = %d",
1020 		       rx_mon_stats->status_ppdu_done);
1021 	DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
1022 		       rx_mon_stats->dest_ppdu_done);
1023 	DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
1024 		       rx_mon_stats->dest_mpdu_done);
1025 	DP_PRINT_STATS("tlv_tag_status_err_cnt = %u",
1026 		       rx_mon_stats->tlv_tag_status_err);
1027 	DP_PRINT_STATS("mon status DMA not done WAR count= %u",
1028 		       rx_mon_stats->status_buf_done_war);
1029 	DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
1030 		       rx_mon_stats->dest_mpdu_drop);
1031 	DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d",
1032 		       rx_mon_stats->dup_mon_linkdesc_cnt);
1033 	DP_PRINT_STATS("dup_mon_buf_cnt = %d",
1034 		       rx_mon_stats->dup_mon_buf_cnt);
1035 	DP_PRINT_STATS("mon_rx_buf_reaped = %u",
1036 		       rx_mon_stats->mon_rx_bufs_reaped_dest);
1037 	DP_PRINT_STATS("mon_rx_buf_replenished = %u",
1038 		       rx_mon_stats->mon_rx_bufs_replenished_dest);
1039 	DP_PRINT_STATS("ppdu_id_mismatch = %u",
1040 		       rx_mon_stats->ppdu_id_mismatch);
1041 	DP_PRINT_STATS("mpdu_ppdu_id_match_cnt = %d",
1042 		       rx_mon_stats->ppdu_id_match);
1043 	DP_PRINT_STATS("ppdus dropped frm status ring = %d",
1044 		       rx_mon_stats->status_ppdu_drop);
1045 	DP_PRINT_STATS("ppdus dropped frm dest ring = %d",
1046 		       rx_mon_stats->dest_ppdu_drop);
1047 	DP_PRINT_STATS("mpdu_ppdu_id_mismatch_drop = %u",
1048 		       rx_mon_stats->mpdu_ppdu_id_mismatch_drop);
1049 	DP_PRINT_STATS("mpdu_decap_type_invalid = %u",
1050 		       rx_mon_stats->mpdu_decap_type_invalid);
1051 	stat_ring_ppdu_ids =
1052 		(uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST);
1053 	dest_ring_ppdu_ids =
1054 		(uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST);
1055 
1056 	if (!stat_ring_ppdu_ids || !dest_ring_ppdu_ids)
1057 		DP_PRINT_STATS("Unable to allocate ppdu id hist mem\n");
1058 
1059 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
1060 	idx = rx_mon_stats->ppdu_id_hist_idx;
1061 	qdf_mem_copy(stat_ring_ppdu_ids,
1062 		     rx_mon_stats->stat_ring_ppdu_id_hist,
1063 		     sizeof(uint32_t) * MAX_PPDU_ID_HIST);
1064 	qdf_mem_copy(dest_ring_ppdu_ids,
1065 		     rx_mon_stats->dest_ring_ppdu_id_hist,
1066 		     sizeof(uint32_t) * MAX_PPDU_ID_HIST);
1067 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
1068 
1069 	DP_PRINT_STATS("PPDU Id history:");
1070 	DP_PRINT_STATS("stat_ring_ppdu_ids\t dest_ring_ppdu_ids");
1071 	for (i = 0; i < MAX_PPDU_ID_HIST; i++) {
1072 		idx = (idx + 1) & (MAX_PPDU_ID_HIST - 1);
1073 		DP_PRINT_STATS("%*u\t%*u", 16,
1074 			       rx_mon_stats->stat_ring_ppdu_id_hist[idx], 16,
1075 			       rx_mon_stats->dest_ring_ppdu_id_hist[idx]);
1076 	}
1077 	qdf_mem_free(stat_ring_ppdu_ids);
1078 	qdf_mem_free(dest_ring_ppdu_ids);
1079 	DP_PRINT_STATS("mon_rx_dest_stuck = %d",
1080 		       rx_mon_stats->mon_rx_dest_stuck);
1081 
1082 	dp_pdev_get_undecoded_capture_stats(mon_pdev, rx_mon_stats);
1083 	dp_mon_rx_print_advanced_stats(pdev->soc, pdev);
1084 
1085 	dp_print_pdev_mpdu_stats(pdev);
1086 	dp_print_pdev_eht_ppdu_cnt(pdev);
1087 
1088 }
1089 
1090 #ifdef QCA_SUPPORT_BPR
1091 QDF_STATUS
1092 dp_set_bpr_enable(struct dp_pdev *pdev, int val)
1093 {
1094 	struct dp_mon_ops *mon_ops;
1095 
1096 	mon_ops = dp_mon_ops_get(pdev->soc);
1097 	if (mon_ops && mon_ops->mon_set_bpr_enable)
1098 		return mon_ops->mon_set_bpr_enable(pdev, val);
1099 
1100 	return QDF_STATUS_E_FAILURE;
1101 }
1102 #endif
1103 
1104 #ifdef WDI_EVENT_ENABLE
1105 #ifdef BE_PKTLOG_SUPPORT
1106 static bool
1107 dp_set_hybrid_pktlog_enable(struct dp_pdev *pdev,
1108 			    struct dp_mon_pdev *mon_pdev,
1109 			    struct dp_soc *soc)
1110 {
1111 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1112 	struct dp_mon_ops *mon_ops = NULL;
1113 	uint16_t num_buffers;
1114 
1115 	if (mon_pdev->mvdev) {
1116 		/* Nothing needs to be done if monitor mode is
1117 		 * enabled
1118 		 */
1119 		mon_pdev->pktlog_hybrid_mode = true;
1120 		return false;
1121 	}
1122 
1123 	mon_ops = dp_mon_ops_get(pdev->soc);
1124 	if (!mon_ops) {
1125 		dp_mon_filter_err("Mon ops uninitialized");
1126 		return QDF_STATUS_E_FAILURE;
1127 	}
1128 
1129 	if (!mon_pdev->pktlog_hybrid_mode) {
1130 		mon_pdev->pktlog_hybrid_mode = true;
1131 		soc_cfg_ctx = soc->wlan_cfg_ctx;
1132 		num_buffers =
1133 			wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx);
1134 
1135 		if (mon_ops && mon_ops->set_mon_mode_buf_rings_tx)
1136 			mon_ops->set_mon_mode_buf_rings_tx(pdev, num_buffers);
1137 
1138 		dp_mon_filter_setup_pktlog_hybrid(pdev);
1139 		if (dp_tx_mon_filter_update(pdev) !=
1140 		    QDF_STATUS_SUCCESS) {
1141 			dp_cdp_err("Set hybrid filters failed");
1142 			dp_mon_filter_reset_pktlog_hybrid(pdev);
1143 			mon_pdev->rx_pktlog_mode =
1144 				DP_RX_PKTLOG_DISABLED;
1145 			return false;
1146 		}
1147 
1148 		dp_monitor_reap_timer_start(soc, CDP_MON_REAP_SOURCE_PKTLOG);
1149 	}
1150 
1151 	return true;
1152 }
1153 
1154 static void
1155 dp_set_hybrid_pktlog_disable(struct dp_mon_pdev *mon_pdev)
1156 {
1157 	mon_pdev->pktlog_hybrid_mode = false;
1158 }
1159 #else
1160 static void
1161 dp_set_hybrid_pktlog_disable(struct dp_mon_pdev *mon_pdev)
1162 {
1163 }
1164 
1165 static bool
1166 dp_set_hybrid_pktlog_enable(struct dp_pdev *pdev,
1167 			    struct dp_mon_pdev *mon_pdev,
1168 			    struct dp_soc *soc)
1169 {
1170 	dp_cdp_err("Hybrid mode is supported only on beryllium");
1171 	return true;
1172 }
1173 #endif
1174 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
1175 		        bool enable)
1176 {
1177 	struct dp_soc *soc = NULL;
1178 	int max_mac_rings = wlan_cfg_get_num_mac_rings
1179 					(pdev->wlan_cfg_ctx);
1180 	uint8_t mac_id = 0;
1181 	struct dp_mon_ops *mon_ops;
1182 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1183 
1184 	soc = pdev->soc;
1185 	mon_ops = dp_mon_ops_get(soc);
1186 
1187 	if (!mon_ops)
1188 		return 0;
1189 
1190 	dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
1191 
1192 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1193 		  FL("Max_mac_rings %d "),
1194 		  max_mac_rings);
1195 
1196 	if (enable) {
1197 		switch (event) {
1198 		case WDI_EVENT_RX_DESC:
1199 			/* Nothing needs to be done if monitor mode is
1200 			 * enabled
1201 			 */
1202 			if (mon_pdev->mvdev)
1203 				return 0;
1204 
1205 			if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL)
1206 				break;
1207 
1208 			mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
1209 			dp_mon_filter_setup_rx_pkt_log_full(pdev);
1210 			if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
1211 				dp_cdp_err("%pK: Pktlog full filters set failed",
1212 					   soc);
1213 				dp_mon_filter_reset_rx_pkt_log_full(pdev);
1214 				mon_pdev->rx_pktlog_mode =
1215 					DP_RX_PKTLOG_DISABLED;
1216 				return 0;
1217 			}
1218 
1219 			dp_monitor_reap_timer_start(soc,
1220 						    CDP_MON_REAP_SOURCE_PKTLOG);
1221 			break;
1222 
1223 		case WDI_EVENT_LITE_RX:
1224 			/* Nothing needs to be done if monitor mode is
1225 			 * enabled
1226 			 */
1227 			if (mon_pdev->mvdev)
1228 				return 0;
1229 
1230 			if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE)
1231 				break;
1232 
1233 			mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
1234 
1235 			/*
1236 			 * Set the packet log lite mode filter.
1237 			 */
1238 			dp_mon_filter_setup_rx_pkt_log_lite(pdev);
1239 			if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
1240 				dp_cdp_err("%pK: Pktlog lite filters set failed",
1241 					   soc);
1242 				dp_mon_filter_reset_rx_pkt_log_lite(pdev);
1243 				mon_pdev->rx_pktlog_mode =
1244 					DP_RX_PKTLOG_DISABLED;
1245 				return 0;
1246 			}
1247 
1248 			dp_monitor_reap_timer_start(soc,
1249 						    CDP_MON_REAP_SOURCE_PKTLOG);
1250 			break;
1251 		case WDI_EVENT_LITE_T2H:
1252 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
1253 				int mac_for_pdev = dp_get_mac_id_for_pdev(
1254 							mac_id,	pdev->pdev_id);
1255 
1256 				mon_pdev->pktlog_ppdu_stats = true;
1257 				dp_h2t_cfg_stats_msg_send(pdev,
1258 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
1259 					mac_for_pdev);
1260 			}
1261 			break;
1262 
1263 		case WDI_EVENT_RX_CBF:
1264 			/* Nothing needs to be done if monitor mode is
1265 			 * enabled
1266 			 */
1267 			if (mon_pdev->mvdev)
1268 				return 0;
1269 
1270 			if (mon_pdev->rx_pktlog_cbf)
1271 				break;
1272 
1273 			mon_pdev->rx_pktlog_cbf = true;
1274 			mon_pdev->monitor_configured = true;
1275 			if (mon_ops->mon_vdev_set_monitor_mode_buf_rings)
1276 				mon_ops->mon_vdev_set_monitor_mode_buf_rings(
1277 					pdev);
1278 
1279 			/*
1280 			 * Set the packet log lite mode filter.
1281 			 */
1282 			qdf_info("Non mon mode: Enable destination ring");
1283 
1284 			dp_mon_filter_setup_rx_pkt_log_cbf(pdev);
1285 			if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
1286 				dp_mon_err("Pktlog set CBF filters failed");
1287 				dp_mon_filter_reset_rx_pktlog_cbf(pdev);
1288 				mon_pdev->rx_pktlog_mode =
1289 					DP_RX_PKTLOG_DISABLED;
1290 				mon_pdev->monitor_configured = false;
1291 				return 0;
1292 			}
1293 
1294 			dp_monitor_reap_timer_start(soc,
1295 						    CDP_MON_REAP_SOURCE_PKTLOG);
1296 			break;
1297 		case WDI_EVENT_HYBRID_TX:
1298 			if (!dp_set_hybrid_pktlog_enable(pdev, mon_pdev, soc))
1299 				return 0;
1300 			break;
1301 
1302 		default:
1303 			/* Nothing needs to be done for other pktlog types */
1304 			break;
1305 		}
1306 	} else {
1307 		switch (event) {
1308 		case WDI_EVENT_RX_DESC:
1309 		case WDI_EVENT_LITE_RX:
1310 			/* Nothing needs to be done if monitor mode is
1311 			 * enabled
1312 			 */
1313 			if (mon_pdev->mvdev)
1314 				return 0;
1315 
1316 			if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_DISABLED)
1317 				break;
1318 
1319 			mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
1320 			dp_mon_filter_reset_rx_pkt_log_full(pdev);
1321 			if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
1322 				dp_cdp_err("%pK: Pktlog filters reset failed",
1323 					   soc);
1324 				return 0;
1325 			}
1326 
1327 			dp_mon_filter_reset_rx_pkt_log_lite(pdev);
1328 			if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
1329 				dp_cdp_err("%pK: Pktlog filters reset failed",
1330 					   soc);
1331 				return 0;
1332 			}
1333 
1334 			dp_monitor_reap_timer_stop(soc,
1335 						   CDP_MON_REAP_SOURCE_PKTLOG);
1336 			break;
1337 		case WDI_EVENT_LITE_T2H:
1338 			/*
1339 			 * To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
1340 			 * passing value 0. Once these macros will define in htt
1341 			 * header file will use proper macros
1342 			 */
1343 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
1344 				int mac_for_pdev =
1345 						dp_get_mac_id_for_pdev(mac_id,
1346 								pdev->pdev_id);
1347 
1348 				mon_pdev->pktlog_ppdu_stats = false;
1349 				if (!mon_pdev->enhanced_stats_en &&
1350 				    !mon_pdev->tx_sniffer_enable &&
1351 				    !mon_pdev->mcopy_mode) {
1352 					dp_h2t_cfg_stats_msg_send(pdev, 0,
1353 								  mac_for_pdev);
1354 				} else if (mon_pdev->tx_sniffer_enable ||
1355 					   mon_pdev->mcopy_mode) {
1356 					dp_h2t_cfg_stats_msg_send(pdev,
1357 						DP_PPDU_STATS_CFG_SNIFFER,
1358 						mac_for_pdev);
1359 				} else if (mon_pdev->enhanced_stats_en) {
1360 					dp_h2t_cfg_stats_msg_send(pdev,
1361 						DP_PPDU_STATS_CFG_ENH_STATS,
1362 						mac_for_pdev);
1363 				}
1364 			}
1365 
1366 			break;
1367 		case WDI_EVENT_RX_CBF:
1368 			mon_pdev->rx_pktlog_cbf = false;
1369 			break;
1370 
1371 		case WDI_EVENT_HYBRID_TX:
1372 			dp_set_hybrid_pktlog_disable(mon_pdev);
1373 			break;
1374 
1375 		default:
1376 			/* Nothing needs to be done for other pktlog types */
1377 			break;
1378 		}
1379 	}
1380 	return 0;
1381 }
1382 #endif
1383 
1384 /* MCL specific functions */
1385 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
1386 void dp_pktlogmod_exit(struct dp_pdev *pdev)
1387 {
1388 	struct dp_soc *soc = pdev->soc;
1389 	struct hif_opaque_softc *scn = soc->hif_handle;
1390 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1391 
1392 	if (!scn) {
1393 		dp_mon_err("Invalid hif(scn) handle");
1394 		return;
1395 	}
1396 
1397 	dp_monitor_reap_timer_stop(soc, CDP_MON_REAP_SOURCE_PKTLOG);
1398 	pktlogmod_exit(scn);
1399 	mon_pdev->pkt_log_init = false;
1400 }
1401 #endif /*DP_CON_MON*/
1402 
1403 #if defined(WDI_EVENT_ENABLE) && defined(QCA_ENHANCED_STATS_SUPPORT)
1404 #ifdef IPA_OFFLOAD
1405 void dp_peer_get_tx_rx_stats(struct dp_peer *peer,
1406 			     struct cdp_interface_peer_stats *peer_stats_intf)
1407 {
1408 	struct dp_rx_tid *rx_tid = NULL;
1409 	uint8_t i = 0;
1410 
1411 	for (i = 0; i < DP_MAX_TIDS; i++) {
1412 		rx_tid = &peer->rx_tid[i];
1413 		peer_stats_intf->rx_byte_count +=
1414 			rx_tid->rx_msdu_cnt.bytes;
1415 		peer_stats_intf->rx_packet_count +=
1416 			rx_tid->rx_msdu_cnt.num;
1417 	}
1418 	peer_stats_intf->tx_packet_count =
1419 		peer->monitor_peer->stats.tx.tx_ucast_success.num;
1420 	peer_stats_intf->tx_byte_count =
1421 		peer->monitor_peer->stats.tx.tx_ucast_success.bytes;
1422 }
1423 #else
1424 void dp_peer_get_tx_rx_stats(struct dp_peer *peer,
1425 			     struct cdp_interface_peer_stats *peer_stats_intf)
1426 {
1427 	struct dp_txrx_peer *txrx_peer = NULL;
1428 	struct dp_peer *tgt_peer = NULL;
1429 
1430 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
1431 	txrx_peer = tgt_peer->txrx_peer;
1432 	peer_stats_intf->rx_packet_count = txrx_peer->to_stack.num;
1433 	peer_stats_intf->rx_byte_count = txrx_peer->to_stack.bytes;
1434 	peer_stats_intf->tx_packet_count =
1435 			txrx_peer->stats.per_pkt_stats.tx.ucast.num;
1436 	peer_stats_intf->tx_byte_count =
1437 			txrx_peer->stats.per_pkt_stats.tx.tx_success.bytes;
1438 }
1439 #endif
1440 
1441 QDF_STATUS dp_peer_stats_notify(struct dp_pdev *dp_pdev, struct dp_peer *peer)
1442 {
1443 	struct cdp_interface_peer_stats peer_stats_intf = {0};
1444 	struct dp_mon_peer_stats *mon_peer_stats = NULL;
1445 	struct dp_peer *tgt_peer = NULL;
1446 	struct dp_txrx_peer *txrx_peer = NULL;
1447 
1448 	if (qdf_unlikely(!peer || !peer->vdev || !peer->monitor_peer))
1449 		return QDF_STATUS_E_FAULT;
1450 
1451 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
1452 	if (qdf_unlikely(!tgt_peer))
1453 		return QDF_STATUS_E_FAULT;
1454 
1455 	txrx_peer = tgt_peer->txrx_peer;
1456 	if (!qdf_unlikely(txrx_peer))
1457 		return QDF_STATUS_E_FAULT;
1458 
1459 	mon_peer_stats = &peer->monitor_peer->stats;
1460 
1461 	if (mon_peer_stats->rx.last_snr != mon_peer_stats->rx.snr)
1462 		peer_stats_intf.rssi_changed = true;
1463 
1464 	if ((mon_peer_stats->rx.snr && peer_stats_intf.rssi_changed) ||
1465 	    (mon_peer_stats->tx.tx_rate &&
1466 	     mon_peer_stats->tx.tx_rate != mon_peer_stats->tx.last_tx_rate)) {
1467 		qdf_mem_copy(peer_stats_intf.peer_mac, peer->mac_addr.raw,
1468 			     QDF_MAC_ADDR_SIZE);
1469 		peer_stats_intf.vdev_id = peer->vdev->vdev_id;
1470 		peer_stats_intf.last_peer_tx_rate =
1471 					mon_peer_stats->tx.last_tx_rate;
1472 		peer_stats_intf.peer_tx_rate = mon_peer_stats->tx.tx_rate;
1473 		peer_stats_intf.peer_rssi = mon_peer_stats->rx.snr;
1474 		peer_stats_intf.ack_rssi = mon_peer_stats->tx.last_ack_rssi;
1475 		dp_peer_get_tx_rx_stats(peer, &peer_stats_intf);
1476 		peer_stats_intf.per = tgt_peer->stats.tx.last_per;
1477 		peer_stats_intf.free_buff = INVALID_FREE_BUFF;
1478 		dp_wdi_event_handler(WDI_EVENT_PEER_STATS, dp_pdev->soc,
1479 				     (void *)&peer_stats_intf, 0,
1480 				     WDI_NO_VAL, dp_pdev->pdev_id);
1481 	}
1482 
1483 	return QDF_STATUS_SUCCESS;
1484 }
1485 #endif
1486 
1487 #ifdef FEATURE_NAC_RSSI
1488 /**
1489  * dp_rx_nac_filter() - Function to perform filtering of non-associated
1490  * clients
1491  * @pdev: DP pdev handle
1492  * @rx_pkt_hdr: Rx packet Header
1493  *
1494  * Return: dp_vdev*
1495  */
1496 static
1497 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
1498 				 uint8_t *rx_pkt_hdr)
1499 {
1500 	struct ieee80211_frame *wh;
1501 	struct dp_neighbour_peer *peer = NULL;
1502 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1503 
1504 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
1505 
1506 	if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
1507 		return NULL;
1508 
1509 	qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
1510 	TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
1511 		      neighbour_peer_list_elem) {
1512 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
1513 				wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) {
1514 			dp_rx_debug("%pK: NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x",
1515 				    pdev->soc,
1516 				    peer->neighbour_peers_macaddr.raw[0],
1517 				    peer->neighbour_peers_macaddr.raw[1],
1518 				    peer->neighbour_peers_macaddr.raw[2],
1519 				    peer->neighbour_peers_macaddr.raw[3],
1520 				    peer->neighbour_peers_macaddr.raw[4],
1521 				    peer->neighbour_peers_macaddr.raw[5]);
1522 
1523 				qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
1524 
1525 			return mon_pdev->mvdev;
1526 		}
1527 	}
1528 	qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
1529 
1530 	return NULL;
1531 }
1532 
1533 QDF_STATUS dp_filter_neighbour_peer(struct dp_pdev *pdev,
1534 				    uint8_t *rx_pkt_hdr)
1535 {
1536 	struct dp_vdev *vdev = NULL;
1537 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1538 
1539 	if (mon_pdev->filter_neighbour_peers) {
1540 		/* Next Hop scenario not yet handle */
1541 		vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
1542 		if (vdev) {
1543 			dp_rx_mon_deliver(pdev->soc, pdev->pdev_id,
1544 					  pdev->invalid_peer_head_msdu,
1545 					  pdev->invalid_peer_tail_msdu);
1546 
1547 			pdev->invalid_peer_head_msdu = NULL;
1548 			pdev->invalid_peer_tail_msdu = NULL;
1549 			return QDF_STATUS_SUCCESS;
1550 		}
1551 	}
1552 
1553 	return QDF_STATUS_E_FAILURE;
1554 }
1555 #endif
1556 
1557 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
1558 /**
1559  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
1560  * address for smart mesh filtering
1561  * @soc_hdl: cdp soc handle
1562  * @vdev_id: id of virtual device object
1563  * @cmd: Add/Del command
1564  * @macaddr: nac client mac address
1565  *
1566  * Return: success/failure
1567  */
1568 static int dp_update_filter_neighbour_peers(struct cdp_soc_t *soc_hdl,
1569 					    uint8_t vdev_id,
1570 					    uint32_t cmd, uint8_t *macaddr)
1571 {
1572 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
1573 	struct dp_pdev *pdev;
1574 	struct dp_neighbour_peer *peer = NULL;
1575 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
1576 						     DP_MOD_ID_CDP);
1577 	struct dp_mon_pdev *mon_pdev;
1578 
1579 	if (!vdev || !macaddr)
1580 		goto fail0;
1581 
1582 	pdev = vdev->pdev;
1583 
1584 	if (!pdev)
1585 		goto fail0;
1586 
1587 	mon_pdev = pdev->monitor_pdev;
1588 
1589 	/* Store address of NAC (neighbour peer) which will be checked
1590 	 * against TA of received packets.
1591 	 */
1592 	if (cmd == DP_NAC_PARAM_ADD) {
1593 		peer = (struct dp_neighbour_peer *)qdf_mem_malloc(
1594 				sizeof(*peer));
1595 
1596 		if (!peer) {
1597 			dp_cdp_err("%pK: DP neighbour peer node memory allocation failed"
1598 				   , soc);
1599 			goto fail0;
1600 		}
1601 
1602 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
1603 			     macaddr, QDF_MAC_ADDR_SIZE);
1604 		peer->vdev = vdev;
1605 
1606 		qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
1607 
1608 		/* add this neighbour peer into the list */
1609 		TAILQ_INSERT_TAIL(&mon_pdev->neighbour_peers_list, peer,
1610 				  neighbour_peer_list_elem);
1611 		qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
1612 
1613 		/* first neighbour */
1614 		if (!mon_pdev->neighbour_peers_added) {
1615 			QDF_STATUS status = QDF_STATUS_SUCCESS;
1616 
1617 			mon_pdev->neighbour_peers_added = true;
1618 			dp_mon_filter_setup_smart_monitor(pdev);
1619 			status = dp_mon_filter_update(pdev);
1620 			if (status != QDF_STATUS_SUCCESS) {
1621 				dp_cdp_err("%pK: smart mon filter setup failed",
1622 					   soc);
1623 				dp_mon_filter_reset_smart_monitor(pdev);
1624 				mon_pdev->neighbour_peers_added = false;
1625 			}
1626 		}
1627 
1628 	} else if (cmd == DP_NAC_PARAM_DEL) {
1629 		qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
1630 		TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
1631 			      neighbour_peer_list_elem) {
1632 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
1633 					 macaddr, QDF_MAC_ADDR_SIZE)) {
1634 				/* delete this peer from the list */
1635 				TAILQ_REMOVE(&mon_pdev->neighbour_peers_list,
1636 					     peer, neighbour_peer_list_elem);
1637 				qdf_mem_free(peer);
1638 				break;
1639 			}
1640 		}
1641 		/* last neighbour deleted */
1642 		if (TAILQ_EMPTY(&mon_pdev->neighbour_peers_list)) {
1643 			QDF_STATUS status = QDF_STATUS_SUCCESS;
1644 
1645 			dp_mon_filter_reset_smart_monitor(pdev);
1646 			status = dp_mon_filter_update(pdev);
1647 			if (status != QDF_STATUS_SUCCESS) {
1648 				dp_cdp_err("%pK: smart mon filter clear failed",
1649 					   soc);
1650 			}
1651 			mon_pdev->neighbour_peers_added = false;
1652 		}
1653 		qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
1654 	}
1655 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
1656 	return 1;
1657 
1658 fail0:
1659 	if (vdev)
1660 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
1661 	return 0;
1662 }
1663 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
1664 
1665 /**
1666  * dp_update_mon_mac_filter() - Set/reset monitor mac filter
1667  * @soc_hdl: cdp soc handle
1668  * @vdev_id: id of virtual device object
1669  * @cmd: Add/Del command
1670  *
1671  * Return: 0 for success. nonzero for failure.
1672  */
1673 static QDF_STATUS dp_update_mon_mac_filter(struct cdp_soc_t *soc_hdl,
1674 					   uint8_t vdev_id, uint32_t cmd)
1675 {
1676 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
1677 	struct dp_pdev *pdev;
1678 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
1679 						     DP_MOD_ID_CDP);
1680 	struct dp_mon_pdev *mon_pdev;
1681 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
1682 
1683 	if (!vdev)
1684 		return status;
1685 
1686 	pdev = vdev->pdev;
1687 	if (!pdev) {
1688 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
1689 		return status;
1690 	}
1691 
1692 	mon_pdev = pdev->monitor_pdev;
1693 	if (cmd == DP_NAC_PARAM_ADD) {
1694 		/* first neighbour added */
1695 		dp_mon_filter_set_reset_mon_mac_filter(pdev, true);
1696 		status = dp_mon_filter_update(pdev);
1697 		if (status != QDF_STATUS_SUCCESS) {
1698 			dp_cdp_err("%pK: Mon mac filter set failed", soc);
1699 			dp_mon_filter_set_reset_mon_mac_filter(pdev, false);
1700 		}
1701 	} else if (cmd == DP_NAC_PARAM_DEL) {
1702 		/* last neighbour deleted */
1703 		dp_mon_filter_set_reset_mon_mac_filter(pdev, false);
1704 		status = dp_mon_filter_update(pdev);
1705 		if (status != QDF_STATUS_SUCCESS)
1706 			dp_cdp_err("%pK: Mon mac filter reset failed", soc);
1707 	}
1708 
1709 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
1710 	return status;
1711 }
1712 
1713 #ifdef ATH_SUPPORT_NAC_RSSI
1714 /**
1715  * dp_vdev_get_neighbour_rssi() - Store RSSI for configured NAC
1716  * @soc_hdl: DP soc handle
1717  * @vdev_id: id of DP vdev handle
1718  * @mac_addr: neighbour mac
1719  * @rssi: rssi value
1720  *
1721  * Return: 0 for success. nonzero for failure.
1722  */
1723 static QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_soc_t *soc_hdl,
1724 					      uint8_t vdev_id,
1725 					      char *mac_addr,
1726 					      uint8_t *rssi)
1727 {
1728 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1729 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
1730 						     DP_MOD_ID_CDP);
1731 	struct dp_pdev *pdev;
1732 	struct dp_neighbour_peer *peer = NULL;
1733 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
1734 	struct dp_mon_pdev *mon_pdev;
1735 
1736 	if (!vdev)
1737 		return status;
1738 
1739 	pdev = vdev->pdev;
1740 	mon_pdev = pdev->monitor_pdev;
1741 
1742 	*rssi = 0;
1743 	qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
1744 	TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
1745 		      neighbour_peer_list_elem) {
1746 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
1747 				mac_addr, QDF_MAC_ADDR_SIZE) == 0) {
1748 			*rssi = peer->rssi;
1749 			status = QDF_STATUS_SUCCESS;
1750 			break;
1751 		}
1752 	}
1753 	qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
1754 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
1755 	return status;
1756 }
1757 
1758 static QDF_STATUS
1759 dp_config_for_nac_rssi(struct cdp_soc_t *cdp_soc,
1760 		       uint8_t vdev_id,
1761 		       enum cdp_nac_param_cmd cmd, char *bssid,
1762 		       char *client_macaddr,
1763 		       uint8_t chan_num)
1764 {
1765 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
1766 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
1767 						     DP_MOD_ID_CDP);
1768 	struct dp_pdev *pdev;
1769 	struct dp_mon_pdev *mon_pdev;
1770 
1771 	if (!vdev)
1772 		return QDF_STATUS_E_FAILURE;
1773 
1774 	pdev = (struct dp_pdev *)vdev->pdev;
1775 
1776 	mon_pdev = pdev->monitor_pdev;
1777 	mon_pdev->nac_rssi_filtering = 1;
1778 	/* Store address of NAC (neighbour peer) which will be checked
1779 	 * against TA of received packets.
1780 	 */
1781 
1782 	if (cmd == CDP_NAC_PARAM_ADD) {
1783 		dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
1784 						 DP_NAC_PARAM_ADD,
1785 						 (uint8_t *)client_macaddr);
1786 	} else if (cmd == CDP_NAC_PARAM_DEL) {
1787 		dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
1788 						 DP_NAC_PARAM_DEL,
1789 						 (uint8_t *)client_macaddr);
1790 	}
1791 
1792 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
1793 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
1794 			(soc->ctrl_psoc, pdev->pdev_id,
1795 			 vdev->vdev_id, cmd, bssid, client_macaddr);
1796 
1797 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
1798 	return QDF_STATUS_SUCCESS;
1799 }
1800 #endif
1801 
1802 bool
1803 dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl,
1804 			 enum cdp_mon_reap_source source,
1805 			 bool enable)
1806 {
1807 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1808 
1809 	if (enable)
1810 		return dp_monitor_reap_timer_start(soc, source);
1811 	else
1812 		return dp_monitor_reap_timer_stop(soc, source);
1813 }
1814 
1815 #if defined(DP_CON_MON)
1816 #ifndef REMOVE_PKT_LOG
1817 void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
1818 {
1819 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1820 	struct dp_pdev *handle =
1821 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1822 	struct dp_mon_pdev *mon_pdev;
1823 
1824 	if (!handle) {
1825 		dp_mon_err("pdev handle is NULL");
1826 		return;
1827 	}
1828 
1829 	mon_pdev = handle->monitor_pdev;
1830 
1831 	if (mon_pdev->pkt_log_init) {
1832 		dp_mon_err("%pK: Packet log not initialized", soc);
1833 		return;
1834 	}
1835 
1836 	pktlog_sethandle(&mon_pdev->pl_dev, scn);
1837 	pktlog_set_pdev_id(mon_pdev->pl_dev, pdev_id);
1838 	pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
1839 
1840 	if (pktlogmod_init(scn)) {
1841 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1842 			  "%s: pktlogmod_init failed", __func__);
1843 		mon_pdev->pkt_log_init = false;
1844 	} else {
1845 		mon_pdev->pkt_log_init = true;
1846 	}
1847 }
1848 
1849 /**
1850  * dp_pkt_log_con_service() - connect packet log service
1851  * @soc_hdl: Datapath soc handle
1852  * @pdev_id: id of data path pdev handle
1853  * @scn: device context
1854  *
1855  * Return: none
1856  */
1857 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
1858 				   uint8_t pdev_id, void *scn)
1859 {
1860 	dp_pkt_log_init(soc_hdl, pdev_id, scn);
1861 	pktlog_htc_attach();
1862 }
1863 
1864 /**
1865  * dp_pkt_log_exit() - Wrapper API to cleanup pktlog info
1866  * @soc_hdl: Datapath soc handle
1867  * @pdev_id: id of data path pdev handle
1868  *
1869  * Return: none
1870  */
1871 static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1872 {
1873 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1874 	struct dp_pdev *pdev =
1875 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1876 
1877 	if (!pdev) {
1878 		dp_err("pdev handle is NULL");
1879 		return;
1880 	}
1881 
1882 	dp_pktlogmod_exit(pdev);
1883 }
1884 
1885 #else
1886 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
1887 				   uint8_t pdev_id, void *scn)
1888 {
1889 }
1890 
1891 static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1892 {
1893 }
1894 #endif
1895 #endif
1896 
1897 void dp_neighbour_peers_detach(struct dp_pdev *pdev)
1898 {
1899 	struct dp_neighbour_peer *peer = NULL;
1900 	struct dp_neighbour_peer *temp_peer = NULL;
1901 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1902 
1903 	TAILQ_FOREACH_SAFE(peer, &mon_pdev->neighbour_peers_list,
1904 			   neighbour_peer_list_elem, temp_peer) {
1905 		/* delete this peer from the list */
1906 		TAILQ_REMOVE(&mon_pdev->neighbour_peers_list,
1907 			     peer, neighbour_peer_list_elem);
1908 		qdf_mem_free(peer);
1909 	}
1910 
1911 	qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex);
1912 }
1913 
1914 #ifdef QCA_ENHANCED_STATS_SUPPORT
1915 /**
1916  * dp_mon_tx_enable_enhanced_stats() - Enable enhanced Tx stats
1917  * @pdev: Datapath pdev handle
1918  *
1919  * Return: void
1920  */
1921 static void dp_mon_tx_enable_enhanced_stats(struct dp_pdev *pdev)
1922 {
1923 	struct dp_soc *soc = pdev->soc;
1924 	struct dp_mon_ops *mon_ops = NULL;
1925 
1926 	mon_ops = dp_mon_ops_get(soc);
1927 	if (mon_ops && mon_ops->mon_tx_enable_enhanced_stats)
1928 		mon_ops->mon_tx_enable_enhanced_stats(pdev);
1929 }
1930 
1931 /**
1932  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
1933  * @soc: DP_SOC handle
1934  * @pdev_id: id of DP_PDEV handle
1935  *
1936  * Return: QDF_STATUS
1937  */
1938 static QDF_STATUS
1939 dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
1940 {
1941 	struct dp_pdev *pdev = NULL;
1942 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1943 	struct dp_mon_pdev *mon_pdev;
1944 
1945 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
1946 						  pdev_id);
1947 
1948 	if (!pdev)
1949 		return QDF_STATUS_E_FAILURE;
1950 
1951 	mon_pdev = pdev->monitor_pdev;
1952 
1953 	if (!mon_pdev)
1954 		return QDF_STATUS_E_FAILURE;
1955 
1956 	if (mon_pdev->enhanced_stats_en == 0)
1957 		dp_cal_client_timer_start(mon_pdev->cal_client_ctx);
1958 
1959 	mon_pdev->enhanced_stats_en = 1;
1960 	pdev->enhanced_stats_en = true;
1961 
1962 	dp_mon_filter_setup_enhanced_stats(pdev);
1963 	status = dp_mon_filter_update(pdev);
1964 	if (status != QDF_STATUS_SUCCESS) {
1965 		dp_cdp_err("%pK: Failed to set enhanced mode filters", soc);
1966 		dp_mon_filter_reset_enhanced_stats(pdev);
1967 		dp_cal_client_timer_stop(mon_pdev->cal_client_ctx);
1968 		mon_pdev->enhanced_stats_en = 0;
1969 		pdev->enhanced_stats_en = false;
1970 		return QDF_STATUS_E_FAILURE;
1971 	}
1972 
1973 	dp_mon_tx_enable_enhanced_stats(pdev);
1974 
1975 	return QDF_STATUS_SUCCESS;
1976 }
1977 
1978 /**
1979  * dp_mon_tx_disable_enhanced_stats() - Disable enhanced Tx stats
1980  * @pdev: Datapath pdev handle
1981  *
1982  * Return: void
1983  */
1984 static void dp_mon_tx_disable_enhanced_stats(struct dp_pdev *pdev)
1985 {
1986 	struct dp_soc *soc = pdev->soc;
1987 	struct dp_mon_ops *mon_ops = NULL;
1988 
1989 	mon_ops = dp_mon_ops_get(soc);
1990 	if (mon_ops && mon_ops->mon_tx_disable_enhanced_stats)
1991 		mon_ops->mon_tx_disable_enhanced_stats(pdev);
1992 }
1993 
1994 /**
1995  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
1996  *
1997  * @soc: the soc handle
1998  * @pdev_id: pdev_id of pdev
1999  *
2000  * Return: QDF_STATUS
2001  */
2002 static QDF_STATUS
2003 dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
2004 {
2005 	struct dp_pdev *pdev =
2006 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
2007 						   pdev_id);
2008 	struct dp_mon_pdev *mon_pdev;
2009 
2010 
2011 	if (!pdev || !pdev->monitor_pdev)
2012 		return QDF_STATUS_E_FAILURE;
2013 
2014 	mon_pdev = pdev->monitor_pdev;
2015 
2016 	if (mon_pdev->enhanced_stats_en == 1)
2017 		dp_cal_client_timer_stop(mon_pdev->cal_client_ctx);
2018 
2019 	mon_pdev->enhanced_stats_en = 0;
2020 	pdev->enhanced_stats_en = false;
2021 
2022 	dp_mon_tx_disable_enhanced_stats(pdev);
2023 
2024 	dp_mon_filter_reset_enhanced_stats(pdev);
2025 	if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
2026 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2027 			  FL("Failed to reset enhanced mode filters"));
2028 	}
2029 
2030 	return QDF_STATUS_SUCCESS;
2031 }
2032 
2033 #ifdef WDI_EVENT_ENABLE
2034 QDF_STATUS dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev,
2035 				    struct cdp_rx_stats_ppdu_user *ppdu_user)
2036 {
2037 	struct cdp_interface_peer_qos_stats qos_stats_intf = {0};
2038 
2039 	if (qdf_unlikely(ppdu_user->peer_id == HTT_INVALID_PEER)) {
2040 		dp_mon_warn("Invalid peer id");
2041 		return QDF_STATUS_E_FAILURE;
2042 	}
2043 
2044 	qdf_mem_copy(qos_stats_intf.peer_mac, ppdu_user->mac_addr,
2045 		     QDF_MAC_ADDR_SIZE);
2046 	qos_stats_intf.frame_control = ppdu_user->frame_control;
2047 	qos_stats_intf.frame_control_info_valid =
2048 			ppdu_user->frame_control_info_valid;
2049 	qos_stats_intf.qos_control = ppdu_user->qos_control;
2050 	qos_stats_intf.qos_control_info_valid =
2051 			ppdu_user->qos_control_info_valid;
2052 	qos_stats_intf.vdev_id = ppdu_user->vdev_id;
2053 	dp_wdi_event_handler(WDI_EVENT_PEER_QOS_STATS, dp_pdev->soc,
2054 			     (void *)&qos_stats_intf, 0,
2055 			     WDI_NO_VAL, dp_pdev->pdev_id);
2056 
2057 	return QDF_STATUS_SUCCESS;
2058 }
2059 #else
2060 static inline QDF_STATUS
2061 dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev,
2062 			 struct cdp_rx_stats_ppdu_user *ppdu_user)
2063 {
2064 	return QDF_STATUS_SUCCESS;
2065 }
2066 #endif
2067 #endif /* QCA_ENHANCED_STATS_SUPPORT */
2068 
2069 /**
2070  * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
2071  * for pktlog
2072  * @soc: cdp_soc handle
2073  * @pdev_id: id of dp pdev handle
2074  * @mac_addr: Peer mac address
2075  * @enb_dsb: Enable or disable peer based filtering
2076  *
2077  * Return: QDF_STATUS
2078  */
2079 static int
2080 dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id,
2081 			    uint8_t *mac_addr, uint8_t enb_dsb)
2082 {
2083 	struct dp_peer *peer;
2084 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
2085 	struct dp_pdev *pdev =
2086 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
2087 						   pdev_id);
2088 	struct dp_mon_pdev *mon_pdev;
2089 
2090 	if (!pdev)
2091 		return QDF_STATUS_E_FAILURE;
2092 
2093 	mon_pdev = pdev->monitor_pdev;
2094 
2095 	peer = dp_peer_find_hash_find((struct dp_soc *)soc, mac_addr,
2096 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
2097 
2098 	if (!peer) {
2099 		dp_mon_err("Invalid Peer");
2100 		return QDF_STATUS_E_FAILURE;
2101 	}
2102 
2103 	if (!IS_MLO_DP_MLD_PEER(peer) && peer->monitor_peer) {
2104 		peer->monitor_peer->peer_based_pktlog_filter = enb_dsb;
2105 		mon_pdev->dp_peer_based_pktlog = enb_dsb;
2106 		status = QDF_STATUS_SUCCESS;
2107 	}
2108 
2109 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
2110 
2111 	return status;
2112 }
2113 
2114 /**
2115  * dp_peer_update_pkt_capture_params() - Set Rx & Tx Capture flags for a peer
2116  * @soc: DP_SOC handle
2117  * @pdev_id: id of DP_PDEV handle
2118  * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
2119  * @is_tx_pkt_cap_enable: enable/disable/delete/print
2120  * Tx packet capture in monitor mode
2121  * @peer_mac: MAC address for which the above need to be enabled/disabled
2122  *
2123  * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
2124  */
2125 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
2126 static QDF_STATUS
2127 dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
2128 				  uint8_t pdev_id,
2129 				  bool is_rx_pkt_cap_enable,
2130 				  uint8_t is_tx_pkt_cap_enable,
2131 				  uint8_t *peer_mac)
2132 {
2133 	struct dp_peer *peer;
2134 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
2135 	struct dp_pdev *pdev =
2136 			dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
2137 							   pdev_id);
2138 	if (!pdev)
2139 		return QDF_STATUS_E_FAILURE;
2140 
2141 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
2142 				      peer_mac, 0, DP_VDEV_ALL,
2143 				      DP_MOD_ID_CDP);
2144 	if (!peer)
2145 		return QDF_STATUS_E_FAILURE;
2146 
2147 	/* we need to set tx pkt capture for non associated peer */
2148 	if (!IS_MLO_DP_MLD_PEER(peer)) {
2149 		status = dp_monitor_tx_peer_filter(pdev, peer,
2150 						   is_tx_pkt_cap_enable,
2151 						   peer_mac);
2152 
2153 		status = dp_peer_set_rx_capture_enabled(pdev, peer,
2154 							is_rx_pkt_cap_enable,
2155 							peer_mac);
2156 	}
2157 
2158 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
2159 
2160 	return status;
2161 }
2162 #endif
2163 
2164 #ifdef QCA_MCOPY_SUPPORT
2165 QDF_STATUS dp_mcopy_check_deliver(struct dp_pdev *pdev,
2166 				  uint16_t peer_id,
2167 				  uint32_t ppdu_id,
2168 				  uint8_t first_msdu)
2169 {
2170 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
2171 
2172 	if (mon_pdev->mcopy_mode) {
2173 		if (mon_pdev->mcopy_mode == M_COPY) {
2174 			if ((mon_pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
2175 			    (mon_pdev->m_copy_id.tx_peer_id == peer_id)) {
2176 				return QDF_STATUS_E_INVAL;
2177 			}
2178 		}
2179 
2180 		if (!first_msdu)
2181 			return QDF_STATUS_E_INVAL;
2182 
2183 		mon_pdev->m_copy_id.tx_ppdu_id = ppdu_id;
2184 		mon_pdev->m_copy_id.tx_peer_id = peer_id;
2185 	}
2186 
2187 	return QDF_STATUS_SUCCESS;
2188 }
2189 #endif
2190 
2191 #ifdef WDI_EVENT_ENABLE
2192 #ifndef REMOVE_PKT_LOG
2193 static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
2194 {
2195 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2196 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2197 
2198 	if (!pdev || !pdev->monitor_pdev)
2199 		return NULL;
2200 
2201 	return pdev->monitor_pdev->pl_dev;
2202 }
2203 #else
2204 static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
2205 {
2206 	return NULL;
2207 }
2208 #endif
2209 #endif
2210 
2211 QDF_STATUS dp_rx_populate_cbf_hdr(struct dp_soc *soc,
2212 				  uint32_t mac_id,
2213 				  uint32_t event,
2214 				  qdf_nbuf_t mpdu,
2215 				  uint32_t msdu_timestamp)
2216 {
2217 	uint32_t data_size, hdr_size, ppdu_id, align4byte;
2218 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2219 	uint32_t *msg_word;
2220 
2221 	if (!pdev)
2222 		return QDF_STATUS_E_INVAL;
2223 
2224 	ppdu_id = pdev->monitor_pdev->ppdu_info.com_info.ppdu_id;
2225 
2226 	hdr_size = HTT_T2H_PPDU_STATS_IND_HDR_SIZE
2227 		+ qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload);
2228 
2229 	data_size = qdf_nbuf_len(mpdu);
2230 
2231 	qdf_nbuf_push_head(mpdu, hdr_size);
2232 
2233 	msg_word = (uint32_t *)qdf_nbuf_data(mpdu);
2234 	/*
2235 	 * Populate the PPDU Stats Indication header
2236 	 */
2237 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_T2H_MSG_TYPE_PPDU_STATS_IND);
2238 	HTT_T2H_PPDU_STATS_MAC_ID_SET(*msg_word, mac_id);
2239 	HTT_T2H_PPDU_STATS_PDEV_ID_SET(*msg_word, pdev->pdev_id);
2240 	align4byte = ((data_size +
2241 		qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload)
2242 		+ 3) >> 2) << 2;
2243 	HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_SET(*msg_word, align4byte);
2244 	msg_word++;
2245 	HTT_T2H_PPDU_STATS_PPDU_ID_SET(*msg_word, ppdu_id);
2246 	msg_word++;
2247 
2248 	*msg_word = msdu_timestamp;
2249 	msg_word++;
2250 	/* Skip reserved field */
2251 	msg_word++;
2252 	/*
2253 	 * Populate MGMT_CTRL Payload TLV first
2254 	 */
2255 	HTT_STATS_TLV_TAG_SET(*msg_word,
2256 			      HTT_PPDU_STATS_RX_MGMTCTRL_PAYLOAD_TLV);
2257 
2258 	align4byte = ((data_size - sizeof(htt_tlv_hdr_t) +
2259 		qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload)
2260 		+ 3) >> 2) << 2;
2261 	HTT_STATS_TLV_LENGTH_SET(*msg_word, align4byte);
2262 	msg_word++;
2263 
2264 	HTT_PPDU_STATS_RX_MGMTCTRL_TLV_FRAME_LENGTH_SET(
2265 		*msg_word, data_size);
2266 	msg_word++;
2267 
2268 	dp_wdi_event_handler(event, soc, (void *)mpdu,
2269 			     HTT_INVALID_PEER, WDI_NO_VAL, pdev->pdev_id);
2270 
2271 	qdf_nbuf_pull_head(mpdu, hdr_size);
2272 
2273 	return QDF_STATUS_SUCCESS;
2274 }
2275 
2276 #ifdef ATH_SUPPORT_EXT_STAT
2277 #ifdef WLAN_TELEMETRY_STATS_SUPPORT
2278 /**
2279  * dp_pdev_clear_link_airtime_stats() - clear airtime stats for given pdev
2280  * @pdev: DP PDEV handle
2281  */
2282 static inline
2283 void dp_pdev_clear_link_airtime_stats(struct dp_pdev *pdev)
2284 {
2285 	uint8_t ac;
2286 
2287 	for (ac = 0; ac < WME_AC_MAX; ac++)
2288 		pdev->stats.telemetry_stats.link_airtime[ac] = 0;
2289 }
2290 
2291 /**
2292  * dp_peer_update_telemetry_stats() - update peer telemetry stats
2293  * @soc: Datapath soc
2294  * @peer: Datapath peer
2295  * @arg: argument to callback function
2296  */
2297 static inline
2298 void dp_peer_update_telemetry_stats(struct dp_soc *soc,
2299 				    struct dp_peer *peer,
2300 				    void *arg)
2301 {
2302 	struct dp_pdev *pdev;
2303 	struct dp_vdev *vdev;
2304 	struct dp_mon_peer *mon_peer = NULL;
2305 	uint8_t ac;
2306 	uint64_t current_time = qdf_get_log_timestamp();
2307 
2308 	vdev = peer->vdev;
2309 	if (!vdev)
2310 		return;
2311 
2312 	pdev = vdev->pdev;
2313 	if (!pdev)
2314 		return;
2315 
2316 	mon_peer = peer->monitor_peer;
2317 	if (qdf_likely(mon_peer)) {
2318 		for (ac = 0; ac < WME_AC_MAX; ac++) {
2319 			mon_peer->stats.airtime_stats.tx_airtime_consumption[ac].avg_consumption_per_sec =
2320 				(uint8_t)qdf_do_div((uint64_t)(mon_peer->stats.airtime_stats.tx_airtime_consumption[ac].consumption * 100),
2321 						    (uint32_t)(current_time - mon_peer->stats.airtime_stats.last_update_time));
2322 			mon_peer->stats.airtime_stats.rx_airtime_consumption[ac].avg_consumption_per_sec =
2323 				(uint8_t)qdf_do_div((uint64_t)(mon_peer->stats.airtime_stats.rx_airtime_consumption[ac].consumption * 100),
2324 						    (uint32_t)(current_time - mon_peer->stats.airtime_stats.last_update_time));
2325 			mon_peer->stats.airtime_stats.last_update_time = current_time;
2326 			/* Store each peer airtime consumption in pdev
2327 			 * link_airtime to calculate pdev's total airtime
2328 			 * consumption
2329 			 */
2330 			DP_STATS_INC(
2331 				pdev,
2332 				telemetry_stats.link_airtime[ac],
2333 				mon_peer->stats.airtime_stats.tx_airtime_consumption[ac].consumption);
2334 			DP_STATS_INC(
2335 				pdev,
2336 				telemetry_stats.link_airtime[ac],
2337 				mon_peer->stats.airtime_stats.rx_airtime_consumption[ac].consumption);
2338 			mon_peer->stats.airtime_stats.tx_airtime_consumption[ac].consumption = 0;
2339 			mon_peer->stats.airtime_stats.rx_airtime_consumption[ac].consumption = 0;
2340 		}
2341 	}
2342 }
2343 
2344 QDF_STATUS dp_pdev_update_telemetry_airtime_stats(struct cdp_soc_t *soc,
2345 						  uint8_t pdev_id)
2346 {
2347 	struct dp_pdev *pdev =
2348 		dp_get_pdev_from_soc_pdev_id_wifi3(cdp_soc_t_to_dp_soc(soc),
2349 						   pdev_id);
2350 	if (!pdev)
2351 		return QDF_STATUS_E_FAILURE;
2352 
2353 	/* Clear current airtime stats as the below API will increment the stats
2354 	 * for all peers on top of current value
2355 	 */
2356 	dp_pdev_clear_link_airtime_stats(pdev);
2357 	dp_pdev_iterate_peer(pdev, dp_peer_update_telemetry_stats, NULL,
2358 			     DP_MOD_ID_CDP);
2359 
2360 	return QDF_STATUS_SUCCESS;
2361 }
2362 #endif
2363 
2364 /**
2365  * dp_peer_cal_clients_stats_update() - update peer stats on cal client timer
2366  * @soc: Datapath SOC
2367  * @peer: Datapath peer
2368  * @arg: argument to iter function
2369  */
2370 #ifdef IPA_OFFLOAD
2371 static void
2372 dp_peer_cal_clients_stats_update(struct dp_soc *soc,
2373 				 struct dp_peer *peer,
2374 				 void *arg)
2375 {
2376 	struct cdp_calibr_stats_intf peer_stats_intf = {0};
2377 	struct dp_peer *tgt_peer = NULL;
2378 	struct dp_txrx_peer *txrx_peer = NULL;
2379 
2380 	if (!dp_peer_is_primary_link_peer(peer))
2381 		return;
2382 
2383 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
2384 	if (!tgt_peer || !(tgt_peer->txrx_peer))
2385 		return;
2386 
2387 	txrx_peer = tgt_peer->txrx_peer;
2388 	peer_stats_intf.to_stack = txrx_peer->to_stack;
2389 	peer_stats_intf.tx_success =
2390 				peer->monitor_peer->stats.tx.tx_ucast_success;
2391 	peer_stats_intf.tx_ucast =
2392 				peer->monitor_peer->stats.tx.tx_ucast_total;
2393 
2394 	dp_cal_client_update_peer_stats_wifi3(&peer_stats_intf,
2395 					      &tgt_peer->stats);
2396 	dp_peer_get_rxtid_stats_ipa(peer, dp_peer_update_tid_stats_from_reo);
2397 }
2398 #else
2399 static void
2400 dp_peer_cal_clients_stats_update(struct dp_soc *soc,
2401 				 struct dp_peer *peer,
2402 				 void *arg)
2403 {
2404 	struct cdp_calibr_stats_intf peer_stats_intf = {0};
2405 	struct dp_peer *tgt_peer = NULL;
2406 	struct dp_txrx_peer *txrx_peer = NULL;
2407 
2408 	if (!dp_peer_is_primary_link_peer(peer))
2409 		return;
2410 
2411 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
2412 	if (!tgt_peer || !(tgt_peer->txrx_peer))
2413 		return;
2414 
2415 	txrx_peer = tgt_peer->txrx_peer;
2416 	peer_stats_intf.to_stack = txrx_peer->to_stack;
2417 	peer_stats_intf.tx_success =
2418 				txrx_peer->stats.per_pkt_stats.tx.tx_success;
2419 	peer_stats_intf.tx_ucast =
2420 				txrx_peer->stats.per_pkt_stats.tx.ucast;
2421 
2422 	dp_cal_client_update_peer_stats_wifi3(&peer_stats_intf,
2423 					      &tgt_peer->stats);
2424 }
2425 #endif
2426 
2427 /**
2428  * dp_iterate_update_peer_list() - update peer stats on cal client timer
2429  * @pdev_hdl: pdev handle
2430  */
2431 static void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
2432 {
2433 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
2434 
2435 	dp_pdev_iterate_peer(pdev, dp_peer_cal_clients_stats_update, NULL,
2436 			     DP_MOD_ID_CDP);
2437 }
2438 #else
2439 static void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
2440 {
2441 }
2442 #endif
2443 
2444 #ifdef ATH_SUPPORT_NAC
2445 int dp_set_filter_neigh_peers(struct dp_pdev *pdev,
2446 			      bool val)
2447 {
2448 	/* Enable/Disable smart mesh filtering. This flag will be checked
2449 	 * during rx processing to check if packets are from NAC clients.
2450 	 */
2451 	pdev->monitor_pdev->filter_neighbour_peers = val;
2452 	return 0;
2453 }
2454 #endif /* ATH_SUPPORT_NAC */
2455 
2456 #ifdef WLAN_ATF_ENABLE
2457 void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value)
2458 {
2459 	if (!pdev) {
2460 		dp_cdp_err("Invalid pdev");
2461 		return;
2462 	}
2463 
2464 	pdev->monitor_pdev->dp_atf_stats_enable = value;
2465 }
2466 #endif
2467 
2468 #ifdef QCA_ENHANCED_STATS_SUPPORT
2469 /**
2470  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv() - Process
2471  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2472  * @pdev: DP PDEV handle
2473  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2474  * @ppdu_id: PPDU Id
2475  *
2476  * Return: QDF_STATUS_SUCCESS if nbuf has to be freed in caller
2477  */
2478 static QDF_STATUS
2479 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
2480 					      qdf_nbuf_t tag_buf,
2481 					      uint32_t ppdu_id)
2482 {
2483 	uint32_t *nbuf_ptr;
2484 	uint8_t trim_size;
2485 	size_t head_size;
2486 	struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info;
2487 	uint32_t *msg_word;
2488 	uint32_t tsf_hdr;
2489 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
2490 
2491 	if ((!mon_pdev->tx_sniffer_enable) && (!mon_pdev->mcopy_mode) &&
2492 	    (!mon_pdev->bpr_enable) && (!mon_pdev->tx_capture_enabled))
2493 		return QDF_STATUS_SUCCESS;
2494 
2495 	/*
2496 	 * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t
2497 	 */
2498 	msg_word = (uint32_t *)qdf_nbuf_data(tag_buf);
2499 	msg_word = msg_word + 2;
2500 	tsf_hdr = *msg_word;
2501 
2502 	trim_size = ((mon_pdev->mgmtctrl_frm_info.mgmt_buf +
2503 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
2504 		      qdf_nbuf_data(tag_buf));
2505 
2506 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
2507 		return QDF_STATUS_SUCCESS;
2508 
2509 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
2510 			    mon_pdev->mgmtctrl_frm_info.mgmt_buf_len);
2511 
2512 	if (mon_pdev->tx_capture_enabled) {
2513 		head_size = sizeof(struct cdp_tx_mgmt_comp_info);
2514 		if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) {
2515 			qdf_err("Fail to get headroom h_sz %zu h_avail %d\n",
2516 				head_size, qdf_nbuf_headroom(tag_buf));
2517 			qdf_assert_always(0);
2518 			return QDF_STATUS_E_NOMEM;
2519 		}
2520 		ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *)
2521 					qdf_nbuf_push_head(tag_buf, head_size);
2522 		qdf_assert_always(ptr_mgmt_comp_info);
2523 		ptr_mgmt_comp_info->ppdu_id = ppdu_id;
2524 		ptr_mgmt_comp_info->is_sgen_pkt = true;
2525 		ptr_mgmt_comp_info->tx_tsf = tsf_hdr;
2526 	} else {
2527 		head_size = sizeof(ppdu_id);
2528 		nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size);
2529 		*nbuf_ptr = ppdu_id;
2530 	}
2531 	if (mon_pdev->bpr_enable) {
2532 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
2533 				     tag_buf, HTT_INVALID_PEER,
2534 				     WDI_NO_VAL, pdev->pdev_id);
2535 	}
2536 
2537 	dp_deliver_mgmt_frm(pdev, tag_buf);
2538 
2539 	return QDF_STATUS_E_ALREADY;
2540 }
2541 
2542 int
2543 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
2544 {
2545 	if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
2546 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
2547 	else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
2548 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
2549 
2550 	return 0;
2551 }
2552 
2553 /**
2554  * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats.
2555  * @peer: Datapath peer handle
2556  * @ppdu: User PPDU Descriptor
2557  * @cur_ppdu_id: PPDU_ID
2558  *
2559  * Return: None
2560  *
2561  * on Tx data frame, we may get delayed ba set
2562  * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we
2563  * request Block Ack Request(BAR). Successful msdu is received only after Block
2564  * Ack. To populate peer stats we need successful msdu(data frame).
2565  * So we hold the Tx data stats on delayed_ba for stats update.
2566  */
2567 static void
2568 dp_peer_copy_delay_stats(struct dp_peer *peer,
2569 			 struct cdp_tx_completion_ppdu_user *ppdu,
2570 			 uint32_t cur_ppdu_id)
2571 {
2572 	struct dp_pdev *pdev;
2573 	struct dp_vdev *vdev;
2574 	struct dp_mon_peer *mon_peer = peer->monitor_peer;
2575 
2576 	if (mon_peer->last_delayed_ba) {
2577 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2578 			  "BA not yet recv for prev delayed ppdu[%d] - cur ppdu[%d]",
2579 			  mon_peer->last_delayed_ba_ppduid, cur_ppdu_id);
2580 		vdev = peer->vdev;
2581 		if (vdev) {
2582 			pdev = vdev->pdev;
2583 			pdev->stats.cdp_delayed_ba_not_recev++;
2584 		}
2585 	}
2586 
2587 	mon_peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size;
2588 	mon_peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc;
2589 	mon_peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re;
2590 	mon_peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf;
2591 	mon_peer->delayed_ba_ppdu_stats.bw = ppdu->bw;
2592 	mon_peer->delayed_ba_ppdu_stats.nss = ppdu->nss;
2593 	mon_peer->delayed_ba_ppdu_stats.gi = ppdu->gi;
2594 	mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
2595 	mon_peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc;
2596 	mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
2597 	mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast =
2598 					ppdu->mpdu_tried_ucast;
2599 	mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast =
2600 					ppdu->mpdu_tried_mcast;
2601 	mon_peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl;
2602 	mon_peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl;
2603 	mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
2604 
2605 	mon_peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start;
2606 	mon_peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones;
2607 	mon_peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast;
2608 
2609 	mon_peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos;
2610 	mon_peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id;
2611 
2612 	mon_peer->last_delayed_ba = true;
2613 
2614 	ppdu->debug_copied = true;
2615 }
2616 
2617 /**
2618  * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats.
2619  * @peer: Datapath peer handle
2620  * @ppdu: PPDU Descriptor
2621  *
2622  * Return: None
2623  *
2624  * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info
2625  * from Tx BAR frame not required to populate peer stats.
2626  * But we need successful MPDU and MSDU to update previous
2627  * transmitted Tx data frame. Overwrite ppdu stats with the previous
2628  * stored ppdu stats.
2629  */
2630 static void
2631 dp_peer_copy_stats_to_bar(struct dp_peer *peer,
2632 			  struct cdp_tx_completion_ppdu_user *ppdu)
2633 {
2634 	struct dp_mon_peer *mon_peer = peer->monitor_peer;
2635 
2636 	ppdu->ltf_size = mon_peer->delayed_ba_ppdu_stats.ltf_size;
2637 	ppdu->stbc = mon_peer->delayed_ba_ppdu_stats.stbc;
2638 	ppdu->he_re = mon_peer->delayed_ba_ppdu_stats.he_re;
2639 	ppdu->txbf = mon_peer->delayed_ba_ppdu_stats.txbf;
2640 	ppdu->bw = mon_peer->delayed_ba_ppdu_stats.bw;
2641 	ppdu->nss = mon_peer->delayed_ba_ppdu_stats.nss;
2642 	ppdu->gi = mon_peer->delayed_ba_ppdu_stats.gi;
2643 	ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
2644 	ppdu->ldpc = mon_peer->delayed_ba_ppdu_stats.ldpc;
2645 	ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
2646 	ppdu->mpdu_tried_ucast =
2647 			mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast;
2648 	ppdu->mpdu_tried_mcast =
2649 			mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast;
2650 	ppdu->frame_ctrl = mon_peer->delayed_ba_ppdu_stats.frame_ctrl;
2651 	ppdu->qos_ctrl = mon_peer->delayed_ba_ppdu_stats.qos_ctrl;
2652 	ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
2653 
2654 	ppdu->ru_start = mon_peer->delayed_ba_ppdu_stats.ru_start;
2655 	ppdu->ru_tones = mon_peer->delayed_ba_ppdu_stats.ru_tones;
2656 	ppdu->is_mcast = mon_peer->delayed_ba_ppdu_stats.is_mcast;
2657 
2658 	ppdu->user_pos = mon_peer->delayed_ba_ppdu_stats.user_pos;
2659 	ppdu->mu_group_id = mon_peer->delayed_ba_ppdu_stats.mu_group_id;
2660 
2661 	mon_peer->last_delayed_ba = false;
2662 
2663 	ppdu->debug_copied = true;
2664 }
2665 
2666 /**
2667  * dp_tx_rate_stats_update() - Update rate per-peer statistics
2668  * @peer: Datapath peer handle
2669  * @ppdu: PPDU Descriptor
2670  *
2671  * Return: None
2672  */
2673 static void
2674 dp_tx_rate_stats_update(struct dp_peer *peer,
2675 			struct cdp_tx_completion_ppdu_user *ppdu)
2676 {
2677 	uint32_t ratekbps = 0;
2678 	uint64_t ppdu_tx_rate = 0;
2679 	uint32_t rix;
2680 	uint16_t ratecode = 0;
2681 	struct dp_mon_peer *mon_peer = NULL;
2682 
2683 	if (!peer || !ppdu)
2684 		return;
2685 
2686 	if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK)
2687 		return;
2688 
2689 	mon_peer = peer->monitor_peer;
2690 	if (!mon_peer)
2691 		return;
2692 
2693 	ratekbps = dp_getrateindex(ppdu->gi,
2694 				   ppdu->mcs,
2695 				   ppdu->nss,
2696 				   ppdu->preamble,
2697 				   ppdu->bw,
2698 				   ppdu->punc_mode,
2699 				   &rix,
2700 				   &ratecode);
2701 
2702 	if (!ratekbps)
2703 		return;
2704 
2705 	/* Calculate goodput in non-training period
2706 	 * In training period, don't do anything as
2707 	 * pending pkt is send as goodput.
2708 	 */
2709 	if ((!peer->bss_peer) && (!ppdu->sa_is_training)) {
2710 		ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) *
2711 				(CDP_PERCENT_MACRO - ppdu->current_rate_per));
2712 	}
2713 	ppdu->rix = rix;
2714 	ppdu->tx_ratekbps = ratekbps;
2715 	ppdu->tx_ratecode = ratecode;
2716 	DP_STATS_UPD(mon_peer, tx.tx_rate, ratekbps);
2717 	mon_peer->stats.tx.avg_tx_rate =
2718 		dp_ath_rate_lpf(mon_peer->stats.tx.avg_tx_rate, ratekbps);
2719 	ppdu_tx_rate = dp_ath_rate_out(mon_peer->stats.tx.avg_tx_rate);
2720 	DP_STATS_UPD(mon_peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
2721 
2722 	mon_peer->stats.tx.bw_info = ppdu->bw;
2723 	mon_peer->stats.tx.gi_info = ppdu->gi;
2724 	mon_peer->stats.tx.nss_info = ppdu->nss;
2725 	mon_peer->stats.tx.mcs_info = ppdu->mcs;
2726 	mon_peer->stats.tx.preamble_info = ppdu->preamble;
2727 	if (peer->vdev) {
2728 		/*
2729 		 * In STA mode:
2730 		 *	We get ucast stats as BSS peer stats.
2731 		 *
2732 		 * In AP mode:
2733 		 *	We get mcast stats as BSS peer stats.
2734 		 *	We get ucast stats as assoc peer stats.
2735 		 */
2736 		if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) {
2737 			peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
2738 			peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
2739 		} else {
2740 			peer->vdev->stats.tx.last_tx_rate = ratekbps;
2741 			peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
2742 		}
2743 	}
2744 }
2745 
2746 #if defined(FEATURE_PERPKT_INFO) && defined(WDI_EVENT_ENABLE)
2747 void dp_send_stats_event(struct dp_pdev *pdev, struct dp_peer *peer,
2748 			 uint16_t peer_id)
2749 {
2750 	struct cdp_interface_peer_stats peer_stats_intf = {0};
2751 	struct dp_mon_peer *mon_peer = peer->monitor_peer;
2752 	struct dp_txrx_peer *txrx_peer = NULL;
2753 
2754 	if (qdf_unlikely(!mon_peer))
2755 		return;
2756 
2757 	mon_peer->stats.rx.rx_snr_measured_time = qdf_system_ticks();
2758 	peer_stats_intf.rx_avg_snr = mon_peer->stats.rx.avg_snr;
2759 
2760 	txrx_peer = dp_get_txrx_peer(peer);
2761 	if (qdf_likely(txrx_peer)) {
2762 		peer_stats_intf.rx_byte_count = txrx_peer->to_stack.bytes;
2763 		peer_stats_intf.tx_byte_count =
2764 			txrx_peer->stats.per_pkt_stats.tx.tx_success.bytes;
2765 	}
2766 
2767 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
2768 			     &peer_stats_intf, peer_id,
2769 			     UPDATE_PEER_STATS, pdev->pdev_id);
2770 }
2771 #endif
2772 
2773 #ifdef WLAN_FEATURE_11BE
2774 /**
2775  * dp_get_ru_index_frm_ru_tones() - get ru index
2776  * @ru_tones: ru tones
2777  *
2778  * Return: ru index
2779  */
2780 static inline enum cdp_ru_index dp_get_ru_index_frm_ru_tones(uint16_t ru_tones)
2781 {
2782 	enum cdp_ru_index ru_index;
2783 
2784 	switch (ru_tones) {
2785 	case RU_26:
2786 		ru_index = RU_26_INDEX;
2787 		break;
2788 	case RU_52:
2789 		ru_index = RU_52_INDEX;
2790 		break;
2791 	case RU_52_26:
2792 		ru_index = RU_52_26_INDEX;
2793 		break;
2794 	case RU_106:
2795 		ru_index = RU_106_INDEX;
2796 		break;
2797 	case RU_106_26:
2798 		ru_index = RU_106_26_INDEX;
2799 		break;
2800 	case RU_242:
2801 		ru_index = RU_242_INDEX;
2802 		break;
2803 	case RU_484:
2804 		ru_index = RU_484_INDEX;
2805 		break;
2806 	case RU_484_242:
2807 		ru_index = RU_484_242_INDEX;
2808 		break;
2809 	case RU_996:
2810 		ru_index = RU_996_INDEX;
2811 		break;
2812 	case RU_996_484:
2813 		ru_index = RU_996_484_INDEX;
2814 		break;
2815 	case RU_996_484_242:
2816 		ru_index = RU_996_484_242_INDEX;
2817 		break;
2818 	case RU_2X996:
2819 		ru_index = RU_2X996_INDEX;
2820 		break;
2821 	case RU_2X996_484:
2822 		ru_index = RU_2X996_484_INDEX;
2823 		break;
2824 	case RU_3X996:
2825 		ru_index = RU_3X996_INDEX;
2826 		break;
2827 	case RU_3X996_484:
2828 		ru_index = RU_2X996_484_INDEX;
2829 		break;
2830 	case RU_4X996:
2831 		ru_index = RU_4X996_INDEX;
2832 		break;
2833 	default:
2834 		ru_index = RU_INDEX_MAX;
2835 		break;
2836 	}
2837 
2838 	return ru_index;
2839 }
2840 
2841 /**
2842  * dp_mon_get_ru_width_from_ru_size() - get ru_width from ru_size enum
2843  * @ru_size: HTT ru_size enum
2844  *
2845  * Return: ru_width of uint32_t type
2846  */
2847 static uint32_t dp_mon_get_ru_width_from_ru_size(uint16_t ru_size)
2848 {
2849 	uint32_t width = 0;
2850 
2851 	switch (ru_size) {
2852 	case HTT_PPDU_STATS_RU_26:
2853 		width = RU_26;
2854 		break;
2855 	case HTT_PPDU_STATS_RU_52:
2856 		width = RU_52;
2857 		break;
2858 	case HTT_PPDU_STATS_RU_52_26:
2859 		width = RU_52_26;
2860 		break;
2861 	case HTT_PPDU_STATS_RU_106:
2862 		width = RU_106;
2863 		break;
2864 	case HTT_PPDU_STATS_RU_106_26:
2865 		width = RU_106_26;
2866 		break;
2867 	case HTT_PPDU_STATS_RU_242:
2868 		width = RU_242;
2869 		break;
2870 	case HTT_PPDU_STATS_RU_484:
2871 		width = RU_484;
2872 		break;
2873 	case HTT_PPDU_STATS_RU_484_242:
2874 		width = RU_484_242;
2875 		break;
2876 	case HTT_PPDU_STATS_RU_996:
2877 		width = RU_996;
2878 		break;
2879 	case HTT_PPDU_STATS_RU_996_484:
2880 		width = RU_996_484;
2881 		break;
2882 	case HTT_PPDU_STATS_RU_996_484_242:
2883 		width = RU_996_484_242;
2884 		break;
2885 	case HTT_PPDU_STATS_RU_996x2:
2886 		width = RU_2X996;
2887 		break;
2888 	case HTT_PPDU_STATS_RU_996x2_484:
2889 		width = RU_2X996_484;
2890 		break;
2891 	case HTT_PPDU_STATS_RU_996x3:
2892 		width = RU_3X996;
2893 		break;
2894 	case HTT_PPDU_STATS_RU_996x3_484:
2895 		width = RU_3X996_484;
2896 		break;
2897 	case HTT_PPDU_STATS_RU_996x4:
2898 		width = RU_4X996;
2899 		break;
2900 	default:
2901 		dp_mon_debug("Unsupported ru_size: %d rcvd", ru_size);
2902 	}
2903 
2904 	return width;
2905 }
2906 #else
2907 static inline enum cdp_ru_index dp_get_ru_index_frm_ru_tones(uint16_t ru_tones)
2908 {
2909 	enum cdp_ru_index ru_index;
2910 
2911 	switch (ru_tones) {
2912 	case RU_26:
2913 		ru_index = RU_26_INDEX;
2914 		break;
2915 	case RU_52:
2916 		ru_index = RU_52_INDEX;
2917 		break;
2918 	case RU_106:
2919 		ru_index = RU_106_INDEX;
2920 		break;
2921 	case RU_242:
2922 		ru_index = RU_242_INDEX;
2923 		break;
2924 	case RU_484:
2925 		ru_index = RU_484_INDEX;
2926 		break;
2927 	case RU_996:
2928 		ru_index = RU_996_INDEX;
2929 		break;
2930 	default:
2931 		ru_index = RU_INDEX_MAX;
2932 		break;
2933 	}
2934 
2935 	return ru_index;
2936 }
2937 
2938 static uint32_t dp_mon_get_ru_width_from_ru_size(uint16_t ru_size)
2939 {
2940 	uint32_t width = 0;
2941 
2942 	switch (ru_size) {
2943 	case HTT_PPDU_STATS_RU_26:
2944 		width = RU_26;
2945 		break;
2946 	case HTT_PPDU_STATS_RU_52:
2947 		width = RU_52;
2948 		break;
2949 	case HTT_PPDU_STATS_RU_106:
2950 		width = RU_106;
2951 		break;
2952 	case HTT_PPDU_STATS_RU_242:
2953 		width = RU_242;
2954 		break;
2955 	case HTT_PPDU_STATS_RU_484:
2956 		width = RU_484;
2957 		break;
2958 	case HTT_PPDU_STATS_RU_996:
2959 		width = RU_996;
2960 		break;
2961 	default:
2962 		dp_mon_debug("Unsupported ru_size: %d rcvd", ru_size);
2963 	}
2964 
2965 	return width;
2966 }
2967 #endif
2968 
2969 #ifdef WLAN_TELEMETRY_STATS_SUPPORT
2970 /**
2971  * dp_pdev_telemetry_stats_update() - Update pdev telemetry stats
2972  * @pdev: Datapath pdev handle
2973  * @ppdu: PPDU Descriptor
2974  *
2975  * Return: None
2976  */
2977 static void
2978 dp_pdev_telemetry_stats_update(
2979 		struct dp_pdev *pdev,
2980 		struct cdp_tx_completion_ppdu_user *ppdu)
2981 {
2982 	uint16_t mpdu_tried;
2983 	uint16_t mpdu_failed;
2984 	uint16_t num_mpdu;
2985 	uint8_t ac = 0;
2986 
2987 	num_mpdu = ppdu->mpdu_success;
2988 	mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
2989 	mpdu_failed = mpdu_tried - num_mpdu;
2990 
2991 	ac = TID_TO_WME_AC(ppdu->tid);
2992 
2993 	DP_STATS_INC(pdev, telemetry_stats.tx_mpdu_failed[ac],
2994 		     mpdu_failed);
2995 
2996 	DP_STATS_INC(pdev, telemetry_stats.tx_mpdu_total[ac],
2997 		     mpdu_tried);
2998 }
2999 
3000 /*
3001  * dp_ppdu_desc_get_txmode() - Get TX mode
3002  * @ppdu: PPDU Descriptor
3003  *
3004  * Return: None
3005  */
3006 static inline
3007 void dp_ppdu_desc_get_txmode(struct cdp_tx_completion_ppdu *ppdu)
3008 {
3009 	uint16_t frame_type = ppdu->htt_frame_type;
3010 
3011 	if (ppdu->frame_type != CDP_PPDU_FTYPE_DATA)
3012 		return;
3013 
3014 	ppdu->txmode_type = TX_MODE_TYPE_UNKNOWN;
3015 
3016 	if (frame_type == HTT_STATS_FTYPE_SGEN_MU_BAR ||
3017 	    frame_type == HTT_STATS_FTYPE_SGEN_BE_MU_BAR) {
3018 		ppdu->txmode = TX_MODE_UL_OFDMA_MU_BAR_TRIGGER;
3019 		ppdu->txmode_type = TX_MODE_TYPE_UL;
3020 
3021 		return;
3022 	}
3023 
3024 	switch (ppdu->htt_seq_type) {
3025 	case HTT_SEQTYPE_SU:
3026 		if (frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) {
3027 			ppdu->txmode = TX_MODE_DL_SU_DATA;
3028 			ppdu->txmode_type = TX_MODE_TYPE_DL;
3029 		}
3030 		break;
3031 	case HTT_SEQTYPE_MU_OFDMA:
3032 	case HTT_SEQTYPE_BE_MU_OFDMA:
3033 		if (frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU) {
3034 			ppdu->txmode = TX_MODE_DL_OFDMA_DATA;
3035 			ppdu->txmode_type = TX_MODE_TYPE_DL;
3036 		}
3037 		break;
3038 	case HTT_SEQTYPE_AC_MU_MIMO:
3039 	case HTT_SEQTYPE_AX_MU_MIMO:
3040 	case HTT_SEQTYPE_BE_MU_MIMO:
3041 		if (frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU) {
3042 			ppdu->txmode = TX_MODE_DL_MUMIMO_DATA;
3043 			ppdu->txmode_type = TX_MODE_TYPE_DL;
3044 		}
3045 		break;
3046 	case HTT_SEQTYPE_UL_MU_OFDMA_TRIG:
3047 	case HTT_SEQTYPE_BE_UL_MU_OFDMA_TRIG:
3048 		if (frame_type == HTT_STATS_FTYPE_SGEN_MU_TRIG ||
3049 		    frame_type == HTT_STATS_FTYPE_SGEN_BE_MU_TRIG) {
3050 			ppdu->txmode = TX_MODE_UL_OFDMA_BASIC_TRIGGER_DATA;
3051 			ppdu->txmode_type = TX_MODE_TYPE_UL;
3052 		}
3053 		break;
3054 	case HTT_SEQTYPE_UL_MU_MIMO_TRIG:
3055 	case HTT_SEQTYPE_BE_UL_MU_MIMO_TRIG:
3056 		if (frame_type == HTT_STATS_FTYPE_SGEN_MU_TRIG ||
3057 		    frame_type == HTT_STATS_FTYPE_SGEN_BE_MU_TRIG) {
3058 			ppdu->txmode = TX_MODE_UL_MUMIMO_BASIC_TRIGGER_DATA;
3059 			ppdu->txmode_type = TX_MODE_TYPE_UL;
3060 		}
3061 		break;
3062 	default:
3063 		ppdu->txmode_type = TX_MODE_TYPE_UNKNOWN;
3064 		break;
3065 	}
3066 }
3067 
3068 /*
3069  * dp_pdev_update_deter_stats() - Update pdev deterministic stats
3070  * @pdev: Datapath pdev handle
3071  * @ppdu: PPDU Descriptor
3072  *
3073  * Return: None
3074  */
3075 static inline void
3076 dp_pdev_update_deter_stats(struct dp_pdev *pdev,
3077 			   struct cdp_tx_completion_ppdu *ppdu)
3078 {
3079 	if (!pdev || !ppdu)
3080 		return;
3081 
3082 	if (ppdu->frame_type != CDP_PPDU_FTYPE_DATA)
3083 		return;
3084 
3085 	if (ppdu->txmode_type == TX_MODE_TYPE_UNKNOWN)
3086 		return;
3087 
3088 	if (ppdu->backoff_ac_valid)
3089 		DP_STATS_UPD(pdev,
3090 			     deter_stats.ch_access_delay[ppdu->backoff_ac],
3091 			     ppdu->ch_access_delay);
3092 
3093 	if (ppdu->num_ul_user_resp_valid &&
3094 	    (ppdu->txmode_type == TX_MODE_TYPE_UL)) {
3095 		if (ppdu->num_ul_user_resp) {
3096 			DP_STATS_INC(pdev,
3097 				     deter_stats.trigger_success,
3098 				     1);
3099 		} else {
3100 			DP_STATS_INC(pdev,
3101 				     deter_stats.trigger_fail,
3102 				     1);
3103 		}
3104 	}
3105 
3106 	if (ppdu->txmode_type == TX_MODE_TYPE_DL) {
3107 		DP_STATS_INC(pdev,
3108 			     deter_stats.dl_mode_cnt[ppdu->txmode],
3109 			     1);
3110 		switch (ppdu->txmode) {
3111 		case TX_MODE_DL_OFDMA_DATA:
3112 			DP_STATS_INC(pdev,
3113 				     deter_stats.dl_ofdma_usr[ppdu->num_users],
3114 				     1);
3115 			break;
3116 		case TX_MODE_DL_MUMIMO_DATA:
3117 			DP_STATS_INC(pdev,
3118 				     deter_stats.dl_mimo_usr[ppdu->num_users],
3119 				     1);
3120 			break;
3121 		}
3122 	} else {
3123 		DP_STATS_INC(pdev,
3124 			     deter_stats.ul_mode_cnt[ppdu->txmode],
3125 			     1);
3126 		switch (ppdu->txmode) {
3127 		case TX_MODE_UL_OFDMA_BASIC_TRIGGER_DATA:
3128 			DP_STATS_INC(pdev,
3129 				     deter_stats.ul_ofdma_usr[ppdu->num_ul_users],
3130 				     1);
3131 			break;
3132 		case TX_MODE_UL_MUMIMO_BASIC_TRIGGER_DATA:
3133 			DP_STATS_INC(pdev,
3134 				     deter_stats.ul_mimo_usr[ppdu->num_ul_users],
3135 				     1);
3136 			break;
3137 		}
3138 	}
3139 }
3140 
3141 /*
3142  * dp_ppdu_desc_get_msduq() - Get msduq index from bitmap
3143  * @ppdu: PPDU Descriptor
3144  * @msduq_index: MSDUQ index
3145  *
3146  * Return: None
3147  */
3148 static inline void
3149 dp_ppdu_desc_get_msduq(uint32_t msduq_bitmap, uint32_t *msduq_index)
3150 {
3151 	if ((msduq_bitmap & BIT(HTT_MSDUQ_INDEX_NON_UDP)) ||
3152 	    (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_UDP))) {
3153 		*msduq_index = MSDUQ_INDEX_DEFAULT;
3154 	} else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_PRIO_0)) {
3155 		*msduq_index = MSDUQ_INDEX_CUSTOM_PRIO_0;
3156 	} else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_PRIO_1)) {
3157 		*msduq_index = MSDUQ_INDEX_CUSTOM_PRIO_1;
3158 	} else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_EXT_PRIO_0)) {
3159 		*msduq_index = MSDUQ_INDEX_CUSTOM_EXT_PRIO_0;
3160 	} else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_EXT_PRIO_1)) {
3161 		*msduq_index = MSDUQ_INDEX_CUSTOM_EXT_PRIO_1;
3162 	} else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_EXT_PRIO_2)) {
3163 		*msduq_index = MSDUQ_INDEX_CUSTOM_EXT_PRIO_2;
3164 	} else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_EXT_PRIO_3)) {
3165 		*msduq_index = MSDUQ_INDEX_CUSTOM_EXT_PRIO_3;
3166 	} else {
3167 		*msduq_index = MSDUQ_INDEX_MAX;
3168 	}
3169 }
3170 
3171 /*
3172  * dp_ppdu_desc_user_deter_stats_update() - Update per-peer deterministic stats
3173  * @pdev: Datapath pdev handle
3174  * @peer: Datapath peer handle
3175  * @ppdu_desc: PPDU Descriptor
3176  * @user: PPDU Descriptor per user
3177  *
3178  * Return: None
3179  */
3180 static void
3181 dp_ppdu_desc_user_deter_stats_update(struct dp_pdev *pdev,
3182 				     struct dp_peer *peer,
3183 				     struct cdp_tx_completion_ppdu *ppdu_desc,
3184 				     struct cdp_tx_completion_ppdu_user *user)
3185 {
3186 	struct dp_mon_peer *mon_peer = NULL;
3187 	uint32_t msduq;
3188 	uint8_t txmode;
3189 	uint8_t tid;
3190 
3191 	if (!pdev || !ppdu_desc || !user || !peer)
3192 		return;
3193 
3194 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_DATA)
3195 		return;
3196 
3197 	if (user->tid >= CDP_DATA_TID_MAX)
3198 		return;
3199 
3200 	mon_peer = peer->monitor_peer;
3201 	if (qdf_unlikely(!mon_peer))
3202 		return;
3203 
3204 	if (ppdu_desc->txmode_type == TX_MODE_TYPE_UNKNOWN)
3205 		return;
3206 
3207 	txmode = ppdu_desc->txmode;
3208 	tid = user->tid;
3209 	if (ppdu_desc->txmode_type == TX_MODE_TYPE_DL) {
3210 		dp_ppdu_desc_get_msduq(user->msduq_bitmap, &msduq);
3211 		if (msduq == MSDUQ_INDEX_MAX)
3212 			return;
3213 
3214 		DP_STATS_INC(mon_peer,
3215 			     deter_stats[tid].dl_det[msduq][txmode].mode_cnt,
3216 			     1);
3217 		DP_STATS_UPD(mon_peer,
3218 			     deter_stats[tid].dl_det[msduq][txmode].avg_rate,
3219 			     mon_peer->stats.tx.avg_tx_rate);
3220 	} else {
3221 		DP_STATS_INC(mon_peer,
3222 			     deter_stats[tid].ul_det[txmode].mode_cnt,
3223 			     1);
3224 		DP_STATS_UPD(mon_peer,
3225 			     deter_stats[tid].ul_det[txmode].avg_rate,
3226 			     mon_peer->stats.tx.avg_tx_rate);
3227 		if (!user->completion_status) {
3228 			DP_STATS_INC(mon_peer,
3229 				     deter_stats[tid].ul_det[txmode].trigger_success,
3230 				     1);
3231 		} else {
3232 			DP_STATS_INC(mon_peer,
3233 				     deter_stats[tid].ul_det[txmode].trigger_fail,
3234 				     1);
3235 		}
3236 	}
3237 }
3238 #else
3239 static inline
3240 void dp_ppdu_desc_get_txmode(struct cdp_tx_completion_ppdu *ppdu)
3241 {
3242 }
3243 
3244 static inline void
3245 dp_ppdu_desc_get_msduq(uint32_t msduq_bitmap, uint32_t *msduq_index)
3246 {
3247 }
3248 
3249 static void
3250 dp_ppdu_desc_user_deter_stats_update(struct dp_pdev *pdev,
3251 				     struct dp_peer *peer,
3252 				     struct cdp_tx_completion_ppdu *ppdu_desc,
3253 				     struct cdp_tx_completion_ppdu_user *user)
3254 {
3255 }
3256 
3257 static inline void
3258 dp_pdev_telemetry_stats_update(
3259 		struct dp_pdev *pdev,
3260 		struct cdp_tx_completion_ppdu_user *ppdu)
3261 { }
3262 
3263 static inline void
3264 dp_pdev_update_deter_stats(struct dp_pdev *pdev,
3265 			   struct cdp_tx_completion_ppdu *ppdu)
3266 { }
3267 #endif
3268 
3269 /**
3270  * dp_tx_stats_update() - Update per-peer statistics
3271  * @pdev: Datapath pdev handle
3272  * @peer: Datapath peer handle
3273  * @ppdu: PPDU Descriptor per user
3274  * @ppdu_desc: PPDU Descriptor
3275  *
3276  * Return: None
3277  */
3278 static void
3279 dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
3280 		   struct cdp_tx_completion_ppdu_user *ppdu,
3281 		   struct cdp_tx_completion_ppdu *ppdu_desc)
3282 {
3283 	uint8_t preamble, mcs;
3284 	uint16_t num_msdu;
3285 	uint16_t num_mpdu;
3286 	uint16_t mpdu_tried;
3287 	uint16_t mpdu_failed;
3288 	struct dp_mon_ops *mon_ops;
3289 	enum cdp_ru_index ru_index;
3290 	struct dp_mon_peer *mon_peer = NULL;
3291 	uint32_t ratekbps = 0;
3292 	uint64_t tx_byte_count;
3293 
3294 	preamble = ppdu->preamble;
3295 	mcs = ppdu->mcs;
3296 	num_msdu = ppdu->num_msdu;
3297 	num_mpdu = ppdu->mpdu_success;
3298 	mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
3299 	mpdu_failed = mpdu_tried - num_mpdu;
3300 	tx_byte_count = ppdu->success_bytes;
3301 
3302 	/* If the peer statistics are already processed as part of
3303 	 * per-MSDU completion handler, do not process these again in per-PPDU
3304 	 * indications
3305 	 */
3306 	if (pdev->soc->process_tx_status)
3307 		return;
3308 
3309 	mon_peer = peer->monitor_peer;
3310 	if (!mon_peer)
3311 		return;
3312 
3313 	if (!ppdu->is_mcast) {
3314 		DP_STATS_INC(mon_peer, tx.tx_ucast_total.num, num_msdu);
3315 		DP_STATS_INC(mon_peer, tx.tx_ucast_total.bytes,
3316 			     tx_byte_count);
3317 	}
3318 
3319 	if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) {
3320 		/*
3321 		 * All failed mpdu will be retried, so incrementing
3322 		 * retries mpdu based on mpdu failed. Even for
3323 		 * ack failure i.e for long retries we get
3324 		 * mpdu failed equal mpdu tried.
3325 		 */
3326 		DP_STATS_INC(mon_peer, tx.retries, mpdu_failed);
3327 		dp_pdev_telemetry_stats_update(pdev, ppdu);
3328 		return;
3329 	}
3330 
3331 	if (ppdu->is_ppdu_cookie_valid)
3332 		DP_STATS_INC(mon_peer, tx.num_ppdu_cookie_valid, 1);
3333 
3334 	if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
3335 	    ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
3336 		if (qdf_unlikely(ppdu->mu_group_id &&
3337 				 !(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
3338 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3339 				  "mu_group_id out of bound!!\n");
3340 		else
3341 			DP_STATS_UPD(mon_peer, tx.mu_group_id[ppdu->mu_group_id],
3342 				     (ppdu->user_pos + 1));
3343 	}
3344 
3345 	if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
3346 	    ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) {
3347 		DP_STATS_UPD(mon_peer, tx.ru_tones, ppdu->ru_tones);
3348 		DP_STATS_UPD(mon_peer, tx.ru_start, ppdu->ru_start);
3349 		ru_index = dp_get_ru_index_frm_ru_tones(ppdu->ru_tones);
3350 		if (ru_index != RU_INDEX_MAX) {
3351 			DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].num_msdu,
3352 				     num_msdu);
3353 			DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].num_mpdu,
3354 				     num_mpdu);
3355 			DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].mpdu_tried,
3356 				     mpdu_tried);
3357 		}
3358 	}
3359 
3360 	/*
3361 	 * All failed mpdu will be retried, so incrementing
3362 	 * retries mpdu based on mpdu failed. Even for
3363 	 * ack failure i.e for long retries we get
3364 	 * mpdu failed equal mpdu tried.
3365 	 */
3366 	DP_STATS_INC(mon_peer, tx.retries, mpdu_failed);
3367 
3368 	DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].num_msdu,
3369 		     num_msdu);
3370 	DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu,
3371 		     num_mpdu);
3372 	DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried,
3373 		     mpdu_tried);
3374 
3375 	DP_STATS_INC(mon_peer, tx.sgi_count[ppdu->gi], num_msdu);
3376 	DP_STATS_INC(mon_peer, tx.bw[ppdu->bw], num_msdu);
3377 	DP_STATS_INC(mon_peer, tx.nss[ppdu->nss], num_msdu);
3378 	if (ppdu->tid < CDP_DATA_TID_MAX) {
3379 		DP_STATS_INC(mon_peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)],
3380 			     num_msdu);
3381 		DP_STATS_INC(mon_peer,
3382 			     tx.wme_ac_type_bytes[TID_TO_WME_AC(ppdu->tid)],
3383 			     tx_byte_count);
3384 	}
3385 
3386 	DP_STATS_INCC(mon_peer, tx.stbc, num_msdu, ppdu->stbc);
3387 	DP_STATS_INCC(mon_peer, tx.ldpc, num_msdu, ppdu->ldpc);
3388 	if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
3389 		DP_STATS_UPD(mon_peer, tx.last_ack_rssi, ppdu_desc->ack_rssi);
3390 
3391 	if (!ppdu->is_mcast) {
3392 		DP_STATS_INC(mon_peer, tx.tx_ucast_success.num, num_msdu);
3393 		DP_STATS_INC(mon_peer, tx.tx_ucast_success.bytes,
3394 			     tx_byte_count);
3395 	}
3396 
3397 	DP_STATS_INCC(mon_peer,
3398 		      tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
3399 		      ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
3400 	DP_STATS_INCC(mon_peer,
3401 		      tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
3402 		      ((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
3403 	DP_STATS_INCC(mon_peer,
3404 		      tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
3405 		      ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
3406 	DP_STATS_INCC(mon_peer,
3407 		      tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
3408 		      ((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
3409 	DP_STATS_INCC(mon_peer,
3410 		      tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
3411 		      ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
3412 	DP_STATS_INCC(mon_peer,
3413 		      tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
3414 		      ((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
3415 	DP_STATS_INCC(mon_peer,
3416 		      tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
3417 		      ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
3418 	DP_STATS_INCC(mon_peer,
3419 		      tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
3420 		      ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
3421 	DP_STATS_INCC(mon_peer,
3422 		      tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
3423 		      ((mcs >= MAX_MCS_11AX) && (preamble == DOT11_AX)));
3424 	DP_STATS_INCC(mon_peer,
3425 		      tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
3426 		      ((mcs < MAX_MCS_11AX) && (preamble == DOT11_AX)));
3427 	DP_STATS_INCC(mon_peer, tx.ampdu_cnt, num_mpdu, ppdu->is_ampdu);
3428 	DP_STATS_INCC(mon_peer, tx.non_ampdu_cnt, num_mpdu, !(ppdu->is_ampdu));
3429 	DP_STATS_INCC(mon_peer, tx.pream_punct_cnt, 1, ppdu->pream_punct);
3430 	DP_STATS_INC(mon_peer, tx.tx_ppdus, 1);
3431 	DP_STATS_INC(mon_peer, tx.tx_mpdus_success, num_mpdu);
3432 	DP_STATS_INC(mon_peer, tx.tx_mpdus_tried, mpdu_tried);
3433 
3434 	mon_ops = dp_mon_ops_get(pdev->soc);
3435 	if (mon_ops && mon_ops->mon_tx_stats_update)
3436 		mon_ops->mon_tx_stats_update(mon_peer, ppdu);
3437 
3438 	dp_tx_rate_stats_update(peer, ppdu);
3439 	dp_pdev_telemetry_stats_update(pdev, ppdu);
3440 
3441 	dp_ppdu_desc_user_deter_stats_update(pdev, peer, ppdu_desc,
3442 					     ppdu);
3443 
3444 	dp_peer_stats_notify(pdev, peer);
3445 
3446 	ratekbps = mon_peer->stats.tx.tx_rate;
3447 	DP_STATS_UPD(mon_peer, tx.last_tx_rate, ratekbps);
3448 
3449 	dp_send_stats_event(pdev, peer, ppdu->peer_id);
3450 }
3451 
3452 /**
3453  * dp_get_ppdu_info_user_index() - Find and allocate a per-user
3454  * descriptor for a PPDU, if a new peer id arrives in a PPDU
3455  * @pdev: DP pdev handle
3456  * @peer_id: peer unique identifier
3457  * @ppdu_info: per ppdu tlv structure
3458  *
3459  * Return: user index to be populated
3460  */
3461 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
3462 					   uint16_t peer_id,
3463 					   struct ppdu_info *ppdu_info)
3464 {
3465 	uint8_t user_index = 0;
3466 	struct cdp_tx_completion_ppdu *ppdu_desc;
3467 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
3468 
3469 	ppdu_desc =
3470 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3471 
3472 	while ((user_index + 1) <= ppdu_info->last_user) {
3473 		ppdu_user_desc = &ppdu_desc->user[user_index];
3474 		if (ppdu_user_desc->peer_id != peer_id) {
3475 			user_index++;
3476 			continue;
3477 		} else {
3478 			/* Max users possible is 8 so user array index should
3479 			 * not exceed 7
3480 			 */
3481 			qdf_assert_always(user_index <= (ppdu_desc->max_users - 1));
3482 			return user_index;
3483 		}
3484 	}
3485 
3486 	ppdu_info->last_user++;
3487 	/* Max users possible is 8 so last user should not exceed 8 */
3488 	qdf_assert_always(ppdu_info->last_user <= ppdu_desc->max_users);
3489 	return ppdu_info->last_user - 1;
3490 }
3491 
3492 /**
3493  * dp_process_ppdu_stats_common_tlv() - Process htt_ppdu_stats_common_tlv
3494  * @pdev: DP pdev handle
3495  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
3496  * @ppdu_info: per ppdu tlv structure
3497  *
3498  * Return: void
3499  */
3500 static void
3501 dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
3502 				 uint32_t *tag_buf,
3503 				 struct ppdu_info *ppdu_info)
3504 {
3505 	uint16_t frame_type;
3506 	uint16_t frame_ctrl;
3507 	uint16_t freq;
3508 	struct dp_soc *soc = NULL;
3509 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3510 	uint64_t ppdu_start_timestamp;
3511 	uint32_t eval_start_timestamp;
3512 	uint32_t *start_tag_buf;
3513 	uint32_t *ts_tag_buf;
3514 
3515 	start_tag_buf = tag_buf;
3516 	ppdu_desc =
3517 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3518 
3519 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
3520 
3521 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID);
3522 	ppdu_info->sched_cmdid =
3523 		HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf);
3524 	ppdu_desc->num_users =
3525 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
3526 
3527 	qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
3528 
3529 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE);
3530 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
3531 	ppdu_desc->htt_frame_type = frame_type;
3532 
3533 	ppdu_desc->htt_seq_type =
3534 			HTT_PPDU_STATS_COMMON_TLV_PPDU_SEQ_TYPE_GET(*tag_buf);
3535 
3536 	frame_ctrl = ppdu_desc->frame_ctrl;
3537 
3538 	ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id;
3539 
3540 	switch (frame_type) {
3541 	case HTT_STATS_FTYPE_TIDQ_DATA_SU:
3542 	case HTT_STATS_FTYPE_TIDQ_DATA_MU:
3543 	case HTT_STATS_FTYPE_SGEN_QOS_NULL:
3544 		/*
3545 		 * for management packet, frame type come as DATA_SU
3546 		 * need to check frame_ctrl before setting frame_type
3547 		 */
3548 		if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL)
3549 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
3550 		else
3551 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
3552 	break;
3553 	case HTT_STATS_FTYPE_SGEN_MU_BAR:
3554 	case HTT_STATS_FTYPE_SGEN_BAR:
3555 	case HTT_STATS_FTYPE_SGEN_BE_MU_BAR:
3556 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
3557 	break;
3558 	default:
3559 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
3560 	break;
3561 	}
3562 
3563 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US);
3564 	ppdu_desc->tx_duration = *tag_buf;
3565 
3566 	tag_buf = start_tag_buf +
3567 			HTT_GET_STATS_CMN_INDEX(SCH_EVAL_START_TSTMP_L32_US);
3568 	eval_start_timestamp = *tag_buf;
3569 
3570 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US);
3571 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
3572 
3573 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE);
3574 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
3575 	if (freq != ppdu_desc->channel) {
3576 		soc = pdev->soc;
3577 		ppdu_desc->channel = freq;
3578 		pdev->operating_channel.freq = freq;
3579 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
3580 			pdev->operating_channel.num =
3581 			    soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc,
3582 								 pdev->pdev_id,
3583 								 freq);
3584 
3585 		if (soc && soc->cdp_soc.ol_ops->freq_to_band)
3586 			pdev->operating_channel.band =
3587 			       soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc,
3588 								 pdev->pdev_id,
3589 								 freq);
3590 	}
3591 
3592 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
3593 
3594 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM);
3595 	ppdu_desc->phy_ppdu_tx_time_us =
3596 		HTT_PPDU_STATS_COMMON_TLV_PHY_PPDU_TX_TIME_US_GET(*tag_buf);
3597 	ppdu_desc->beam_change =
3598 		HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf);
3599 	ppdu_desc->doppler =
3600 		HTT_PPDU_STATS_COMMON_TLV_DOPPLER_INDICATION_GET(*tag_buf);
3601 	ppdu_desc->spatial_reuse =
3602 		HTT_PPDU_STATS_COMMON_TLV_SPATIAL_REUSE_GET(*tag_buf);
3603 	ppdu_desc->num_ul_users =
3604 		HTT_PPDU_STATS_COMMON_TLV_NUM_UL_EXPECTED_USERS_GET(*tag_buf);
3605 
3606 	dp_tx_capture_htt_frame_counter(pdev, frame_type);
3607 
3608 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US);
3609 	ppdu_start_timestamp = *tag_buf;
3610 	ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp <<
3611 					     HTT_SHIFT_UPPER_TIMESTAMP) &
3612 					    HTT_MASK_UPPER_TIMESTAMP);
3613 
3614 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
3615 					ppdu_desc->tx_duration;
3616 	/* Ack time stamp is same as end time stamp*/
3617 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
3618 
3619 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
3620 					ppdu_desc->tx_duration;
3621 
3622 	ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp;
3623 	ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp;
3624 	ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration;
3625 
3626 	/* Ack time stamp is same as end time stamp*/
3627 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
3628 
3629 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(BSSCOLOR_OBSS_PSR);
3630 	ppdu_desc->bss_color =
3631 		HTT_PPDU_STATS_COMMON_TLV_BSS_COLOR_ID_GET(*tag_buf);
3632 
3633 	ppdu_desc->backoff_ac_valid =
3634 		HTT_PPDU_STATS_COMMON_TLV_BACKOFF_AC_VALID_GET(*tag_buf);
3635 	if (ppdu_desc->backoff_ac_valid) {
3636 		ppdu_desc->backoff_ac =
3637 			HTT_PPDU_STATS_COMMON_TLV_BACKOFF_AC_GET(*tag_buf);
3638 		ts_tag_buf = start_tag_buf +
3639 			HTT_GET_STATS_CMN_INDEX(SCH_EVAL_START_TSTMP_L32_US);
3640 		eval_start_timestamp = *ts_tag_buf;
3641 
3642 		ts_tag_buf = start_tag_buf +
3643 			HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US);
3644 		ppdu_desc->ch_access_delay =
3645 			*ts_tag_buf - eval_start_timestamp;
3646 	}
3647 	ppdu_desc->num_ul_user_resp_valid =
3648 		HTT_PPDU_STATS_COMMON_TLV_NUM_UL_USER_RESPONSES_VALID_GET(*tag_buf);
3649 	if (ppdu_desc->num_ul_user_resp_valid)
3650 		ppdu_desc->num_ul_user_resp =
3651 			HTT_PPDU_STATS_COMMON_TLV_NUM_UL_USER_RESPONSES_GET(*tag_buf);
3652 }
3653 
3654 /**
3655  * dp_process_ppdu_stats_user_common_tlv() - Process ppdu_stats_user_common
3656  * @pdev: DP PDEV handle
3657  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
3658  * @ppdu_info: per ppdu tlv structure
3659  *
3660  * Return: void
3661  */
3662 static void dp_process_ppdu_stats_user_common_tlv(
3663 		struct dp_pdev *pdev, uint32_t *tag_buf,
3664 		struct ppdu_info *ppdu_info)
3665 {
3666 	uint16_t peer_id;
3667 	struct cdp_tx_completion_ppdu *ppdu_desc;
3668 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
3669 	uint8_t curr_user_index = 0;
3670 	struct dp_peer *peer;
3671 	struct dp_vdev *vdev;
3672 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3673 
3674 	ppdu_desc =
3675 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3676 
3677 	tag_buf++;
3678 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
3679 
3680 	curr_user_index =
3681 		dp_get_ppdu_info_user_index(pdev,
3682 					    peer_id, ppdu_info);
3683 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
3684 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
3685 
3686 	ppdu_desc->vdev_id =
3687 		HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
3688 
3689 	ppdu_user_desc->peer_id = peer_id;
3690 
3691 	tag_buf++;
3692 
3693 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
3694 		ppdu_user_desc->delayed_ba = 1;
3695 		ppdu_desc->delayed_ba = 1;
3696 	}
3697 
3698 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
3699 		ppdu_user_desc->is_mcast = true;
3700 		ppdu_user_desc->mpdu_tried_mcast =
3701 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
3702 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
3703 	} else {
3704 		ppdu_user_desc->mpdu_tried_ucast =
3705 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
3706 	}
3707 
3708 	ppdu_user_desc->is_seq_num_valid =
3709 	HTT_PPDU_STATS_USER_COMMON_TLV_IS_SQNUM_VALID_IN_BUFFER_GET(*tag_buf);
3710 	tag_buf++;
3711 
3712 	ppdu_user_desc->qos_ctrl =
3713 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
3714 	ppdu_user_desc->frame_ctrl =
3715 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
3716 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
3717 
3718 	if (ppdu_user_desc->delayed_ba)
3719 		ppdu_user_desc->mpdu_success = 0;
3720 
3721 	tag_buf += 3;
3722 
3723 	if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) {
3724 		ppdu_user_desc->ppdu_cookie =
3725 			HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf);
3726 		ppdu_user_desc->is_ppdu_cookie_valid = 1;
3727 	}
3728 
3729 	/* returning earlier causes other feilds unpopulated */
3730 	if (peer_id == DP_SCAN_PEER_ID) {
3731 		vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
3732 					     DP_MOD_ID_TX_PPDU_STATS);
3733 		if (!vdev)
3734 			return;
3735 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
3736 			     QDF_MAC_ADDR_SIZE);
3737 		dp_vdev_unref_delete(pdev->soc, vdev, DP_MOD_ID_TX_PPDU_STATS);
3738 	} else {
3739 		peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
3740 					     DP_MOD_ID_TX_PPDU_STATS);
3741 		if (!peer) {
3742 			/*
3743 			 * fw sends peer_id which is about to removed but
3744 			 * it was already removed in host.
3745 			 * eg: for disassoc, fw send ppdu stats
3746 			 * with peer id equal to previously associated
3747 			 * peer's peer_id but it was removed
3748 			 */
3749 			vdev = dp_vdev_get_ref_by_id(pdev->soc,
3750 						     ppdu_desc->vdev_id,
3751 						     DP_MOD_ID_TX_PPDU_STATS);
3752 			if (!vdev)
3753 				return;
3754 			qdf_mem_copy(ppdu_user_desc->mac_addr,
3755 				     vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE);
3756 			dp_vdev_unref_delete(pdev->soc, vdev,
3757 					     DP_MOD_ID_TX_PPDU_STATS);
3758 			return;
3759 		}
3760 		qdf_mem_copy(ppdu_user_desc->mac_addr,
3761 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
3762 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
3763 	}
3764 
3765 	tag_buf += 10;
3766 	ppdu_user_desc->msduq_bitmap = *tag_buf;
3767 }
3768 
3769 /**
3770  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
3771  * @pdev: DP pdev handle
3772  * @tag_buf: T2H message buffer carrying the user rate TLV
3773  * @ppdu_info: per ppdu tlv structure
3774  *
3775  * Return: void
3776  */
3777 static void
3778 dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
3779 				    uint32_t *tag_buf,
3780 				    struct ppdu_info *ppdu_info)
3781 {
3782 	uint16_t peer_id;
3783 	struct cdp_tx_completion_ppdu *ppdu_desc;
3784 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
3785 	uint8_t curr_user_index = 0;
3786 	struct dp_vdev *vdev;
3787 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3788 	uint8_t bw, ru_format;
3789 	uint16_t ru_size;
3790 
3791 	ppdu_desc =
3792 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3793 
3794 	tag_buf++;
3795 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
3796 
3797 	curr_user_index =
3798 		dp_get_ppdu_info_user_index(pdev,
3799 					    peer_id, ppdu_info);
3800 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
3801 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
3802 	if (peer_id == DP_SCAN_PEER_ID) {
3803 		vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
3804 					     DP_MOD_ID_TX_PPDU_STATS);
3805 		if (!vdev)
3806 			return;
3807 		dp_vdev_unref_delete(pdev->soc, vdev,
3808 				     DP_MOD_ID_TX_PPDU_STATS);
3809 	}
3810 	ppdu_user_desc->peer_id = peer_id;
3811 
3812 	ppdu_user_desc->tid =
3813 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
3814 
3815 	tag_buf += 1;
3816 
3817 	ppdu_user_desc->user_pos =
3818 		HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
3819 	ppdu_user_desc->mu_group_id =
3820 		HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
3821 
3822 	ru_format = HTT_PPDU_STATS_USER_RATE_TLV_RU_FORMAT_GET(*tag_buf);
3823 
3824 	tag_buf += 1;
3825 
3826 	if (!ru_format) {
3827 		/* ru_format = 0: ru_end, ru_start */
3828 		ppdu_user_desc->ru_start =
3829 			HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
3830 		ppdu_user_desc->ru_tones =
3831 			(HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
3832 			HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
3833 	} else if (ru_format == 1) {
3834 		/* ru_format = 1: ru_index, ru_size */
3835 		ru_size = HTT_PPDU_STATS_USER_RATE_TLV_RU_SIZE_GET(*tag_buf);
3836 		ppdu_user_desc->ru_tones =
3837 				dp_mon_get_ru_width_from_ru_size(ru_size);
3838 	} else {
3839 		dp_mon_debug("Unsupported ru_format: %d rcvd", ru_format);
3840 	}
3841 	ppdu_desc->usr_ru_tones_sum += ppdu_user_desc->ru_tones;
3842 
3843 	tag_buf += 2;
3844 
3845 	ppdu_user_desc->ppdu_type =
3846 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
3847 
3848 	tag_buf++;
3849 	ppdu_user_desc->tx_rate = *tag_buf;
3850 
3851 	ppdu_user_desc->ltf_size =
3852 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
3853 	ppdu_user_desc->stbc =
3854 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
3855 	ppdu_user_desc->he_re =
3856 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
3857 	ppdu_user_desc->txbf =
3858 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
3859 	bw = HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf);
3860 	/* Align bw value as per host data structures */
3861 	if (bw == HTT_PPDU_STATS_BANDWIDTH_320MHZ)
3862 		ppdu_user_desc->bw = bw - 3;
3863 	else
3864 		ppdu_user_desc->bw = bw - 2;
3865 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
3866 	ppdu_desc->usr_nss_sum += ppdu_user_desc->nss;
3867 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
3868 	ppdu_user_desc->preamble =
3869 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
3870 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
3871 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
3872 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
3873 
3874 	tag_buf += 2;
3875 	ppdu_user_desc->punc_pattern_bitmap =
3876 		HTT_PPDU_STATS_USER_RATE_TLV_PUNC_PATTERN_BITMAP_GET(*tag_buf);
3877 }
3878 
3879 /**
3880  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv() - Process
3881  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
3882  * @pdev: DP PDEV handle
3883  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
3884  * @ppdu_info: per ppdu tlv structure
3885  *
3886  * Return: void
3887  */
3888 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
3889 		struct dp_pdev *pdev, uint32_t *tag_buf,
3890 		struct ppdu_info *ppdu_info)
3891 {
3892 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
3893 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
3894 
3895 	struct cdp_tx_completion_ppdu *ppdu_desc;
3896 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
3897 	uint8_t curr_user_index = 0;
3898 	uint16_t peer_id;
3899 	uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS;
3900 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3901 
3902 	ppdu_desc =
3903 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3904 
3905 	tag_buf++;
3906 
3907 	peer_id =
3908 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
3909 
3910 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
3911 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
3912 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
3913 	ppdu_user_desc->peer_id = peer_id;
3914 
3915 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
3916 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
3917 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
3918 
3919 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
3920 						   (void *)ppdu_user_desc,
3921 						   ppdu_info->ppdu_id,
3922 						   size);
3923 }
3924 
3925 /**
3926  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv() - Process
3927  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
3928  * @pdev: DP PDEV handle
3929  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
3930  * @ppdu_info: per ppdu tlv structure
3931  *
3932  * Return: void
3933  */
3934 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
3935 		struct dp_pdev *pdev, uint32_t *tag_buf,
3936 		struct ppdu_info *ppdu_info)
3937 {
3938 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
3939 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
3940 
3941 	struct cdp_tx_completion_ppdu *ppdu_desc;
3942 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
3943 	uint8_t curr_user_index = 0;
3944 	uint16_t peer_id;
3945 	uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS;
3946 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3947 
3948 	ppdu_desc =
3949 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3950 
3951 	tag_buf++;
3952 
3953 	peer_id =
3954 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
3955 
3956 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
3957 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
3958 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
3959 	ppdu_user_desc->peer_id = peer_id;
3960 
3961 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
3962 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
3963 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
3964 
3965 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
3966 						   (void *)ppdu_user_desc,
3967 						   ppdu_info->ppdu_id,
3968 						   size);
3969 }
3970 
3971 /**
3972  * dp_process_ppdu_stats_user_cmpltn_common_tlv() - Process
3973  * htt_ppdu_stats_user_cmpltn_common_tlv
3974  * @pdev: DP PDEV handle
3975  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
3976  * @ppdu_info: per ppdu tlv structure
3977  *
3978  * Return: void
3979  */
3980 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
3981 		struct dp_pdev *pdev, uint32_t *tag_buf,
3982 		struct ppdu_info *ppdu_info)
3983 {
3984 	uint16_t peer_id;
3985 	struct cdp_tx_completion_ppdu *ppdu_desc;
3986 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
3987 	uint8_t curr_user_index = 0;
3988 	uint8_t bw_iter;
3989 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
3990 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
3991 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3992 
3993 	ppdu_desc =
3994 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3995 
3996 	tag_buf++;
3997 	peer_id =
3998 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
3999 
4000 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
4001 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
4002 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
4003 	ppdu_user_desc->peer_id = peer_id;
4004 
4005 	ppdu_user_desc->completion_status =
4006 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
4007 				*tag_buf);
4008 
4009 	ppdu_user_desc->tid =
4010 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
4011 
4012 	tag_buf++;
4013 	if (qdf_likely(ppdu_user_desc->completion_status ==
4014 			HTT_PPDU_STATS_USER_STATUS_OK)) {
4015 		ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
4016 		ppdu_user_desc->usr_ack_rssi = dp_stats_buf->ack_rssi;
4017 		ppdu_user_desc->ack_rssi_valid = 1;
4018 	} else {
4019 		ppdu_user_desc->ack_rssi_valid = 0;
4020 	}
4021 
4022 	tag_buf++;
4023 
4024 	ppdu_user_desc->mpdu_success =
4025 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
4026 
4027 	ppdu_user_desc->mpdu_failed =
4028 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) -
4029 						ppdu_user_desc->mpdu_success;
4030 
4031 	tag_buf++;
4032 
4033 	ppdu_user_desc->long_retries =
4034 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
4035 
4036 	ppdu_user_desc->short_retries =
4037 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
4038 	ppdu_user_desc->retry_mpdus =
4039 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
4040 
4041 	ppdu_user_desc->is_ampdu =
4042 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
4043 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
4044 
4045 	ppdu_desc->resp_type =
4046 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf);
4047 	ppdu_desc->mprot_type =
4048 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf);
4049 	ppdu_desc->rts_success =
4050 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf);
4051 	ppdu_desc->rts_failure =
4052 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf);
4053 
4054 	ppdu_user_desc->mprot_type = ppdu_desc->mprot_type;
4055 	ppdu_user_desc->rts_success = ppdu_desc->rts_success;
4056 	ppdu_user_desc->rts_failure = ppdu_desc->rts_failure;
4057 
4058 	ppdu_user_desc->pream_punct =
4059 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PREAM_PUNC_TX_GET(*tag_buf);
4060 
4061 	ppdu_info->compltn_common_tlv++;
4062 
4063 	/*
4064 	 * MU BAR may send request to n users but we may received ack only from
4065 	 * m users. To have count of number of users respond back, we have a
4066 	 * separate counter bar_num_users per PPDU that get increment for every
4067 	 * htt_ppdu_stats_user_cmpltn_common_tlv
4068 	 */
4069 	ppdu_desc->bar_num_users++;
4070 
4071 	tag_buf++;
4072 	for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) {
4073 		ppdu_user_desc->rssi_chain[bw_iter] =
4074 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf);
4075 		tag_buf++;
4076 	}
4077 
4078 	ppdu_user_desc->sa_tx_antenna =
4079 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf);
4080 
4081 	tag_buf++;
4082 	ppdu_user_desc->sa_is_training =
4083 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf);
4084 	if (ppdu_user_desc->sa_is_training) {
4085 		ppdu_user_desc->sa_goodput =
4086 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf);
4087 	}
4088 
4089 	tag_buf++;
4090 	for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) {
4091 		ppdu_user_desc->sa_max_rates[bw_iter] =
4092 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]);
4093 	}
4094 
4095 	tag_buf += CDP_NUM_SA_BW;
4096 	ppdu_user_desc->current_rate_per =
4097 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf);
4098 }
4099 
4100 /**
4101  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv() - Process
4102  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
4103  * @pdev: DP PDEV handle
4104  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
4105  * @ppdu_info: per ppdu tlv structure
4106  *
4107  * Return: void
4108  */
4109 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
4110 		struct dp_pdev *pdev, uint32_t *tag_buf,
4111 		struct ppdu_info *ppdu_info)
4112 {
4113 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
4114 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
4115 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
4116 	struct cdp_tx_completion_ppdu *ppdu_desc;
4117 	uint8_t curr_user_index = 0;
4118 	uint16_t peer_id;
4119 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
4120 
4121 	ppdu_desc =
4122 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
4123 
4124 	tag_buf++;
4125 
4126 	peer_id =
4127 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
4128 
4129 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
4130 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
4131 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
4132 	ppdu_user_desc->peer_id = peer_id;
4133 
4134 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
4135 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
4136 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
4137 	ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32;
4138 }
4139 
4140 /**
4141  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv() - Process
4142  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
4143  * @pdev: DP PDEV handle
4144  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
4145  * @ppdu_info: per ppdu tlv structure
4146  *
4147  * Return: void
4148  */
4149 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
4150 		struct dp_pdev *pdev, uint32_t *tag_buf,
4151 		struct ppdu_info *ppdu_info)
4152 {
4153 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
4154 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
4155 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
4156 	struct cdp_tx_completion_ppdu *ppdu_desc;
4157 	uint8_t curr_user_index = 0;
4158 	uint16_t peer_id;
4159 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
4160 
4161 	ppdu_desc =
4162 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
4163 
4164 	tag_buf++;
4165 
4166 	peer_id =
4167 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
4168 
4169 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
4170 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
4171 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
4172 	ppdu_user_desc->peer_id = peer_id;
4173 
4174 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
4175 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
4176 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
4177 	ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32;
4178 }
4179 
4180 /**
4181  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv() - Process
4182  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
4183  * @pdev: DP PDEV handle
4184  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
4185  * @ppdu_info: per ppdu tlv structure
4186  *
4187  * Return: void
4188  */
4189 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
4190 		struct dp_pdev *pdev, uint32_t *tag_buf,
4191 		struct ppdu_info *ppdu_info)
4192 {
4193 	uint16_t peer_id;
4194 	struct cdp_tx_completion_ppdu *ppdu_desc;
4195 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
4196 	uint8_t curr_user_index = 0;
4197 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
4198 
4199 	ppdu_desc =
4200 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
4201 
4202 	tag_buf += 2;
4203 	peer_id =
4204 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
4205 
4206 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
4207 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
4208 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
4209 	if (!ppdu_user_desc->ack_ba_tlv) {
4210 		ppdu_user_desc->ack_ba_tlv = 1;
4211 	} else {
4212 		pdev->stats.ack_ba_comes_twice++;
4213 		return;
4214 	}
4215 
4216 	ppdu_user_desc->peer_id = peer_id;
4217 
4218 	tag_buf++;
4219 	/* not to update ppdu_desc->tid from this TLV */
4220 	ppdu_user_desc->num_mpdu =
4221 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
4222 
4223 	ppdu_user_desc->num_msdu =
4224 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
4225 
4226 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
4227 
4228 	tag_buf++;
4229 	ppdu_user_desc->start_seq =
4230 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET(
4231 			*tag_buf);
4232 
4233 	tag_buf++;
4234 	ppdu_user_desc->success_bytes = *tag_buf;
4235 
4236 	/* increase ack ba tlv counter on successful mpdu */
4237 	if (ppdu_user_desc->num_mpdu)
4238 		ppdu_info->ack_ba_tlv++;
4239 
4240 	if (ppdu_user_desc->ba_size == 0) {
4241 		ppdu_user_desc->ba_seq_no = ppdu_user_desc->start_seq;
4242 		ppdu_user_desc->ba_bitmap[0] = 1;
4243 		ppdu_user_desc->ba_size = 1;
4244 	}
4245 }
4246 
4247 /**
4248  * dp_process_ppdu_stats_user_common_array_tlv() - Process
4249  * htt_ppdu_stats_user_common_array_tlv
4250  * @pdev: DP PDEV handle
4251  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
4252  * @ppdu_info: per ppdu tlv structure
4253  *
4254  * Return: void
4255  */
4256 static void dp_process_ppdu_stats_user_common_array_tlv(
4257 		struct dp_pdev *pdev, uint32_t *tag_buf,
4258 		struct ppdu_info *ppdu_info)
4259 {
4260 	uint32_t peer_id;
4261 	struct cdp_tx_completion_ppdu *ppdu_desc;
4262 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
4263 	uint8_t curr_user_index = 0;
4264 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
4265 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
4266 
4267 	ppdu_desc =
4268 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
4269 
4270 	tag_buf++;
4271 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
4272 	tag_buf += 3;
4273 	peer_id =
4274 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
4275 
4276 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
4277 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4278 			  "Invalid peer");
4279 		return;
4280 	}
4281 
4282 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
4283 
4284 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
4285 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
4286 
4287 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
4288 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
4289 
4290 	tag_buf++;
4291 
4292 	ppdu_user_desc->success_msdus =
4293 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
4294 	ppdu_user_desc->retry_msdus =
4295 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
4296 	tag_buf++;
4297 	ppdu_user_desc->failed_msdus =
4298 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
4299 }
4300 
4301 /**
4302  * dp_process_ppdu_stats_user_compltn_flush_tlv() - Process
4303  * htt_ppdu_stats_flush_tlv
4304  * @pdev: DP PDEV handle
4305  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
4306  * @ppdu_info: per ppdu tlv structure
4307  *
4308  * Return: void
4309  */
4310 static void
4311 dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
4312 					     uint32_t *tag_buf,
4313 					     struct ppdu_info *ppdu_info)
4314 {
4315 	struct cdp_tx_completion_ppdu *ppdu_desc;
4316 	uint32_t peer_id;
4317 	uint8_t tid;
4318 	struct dp_peer *peer;
4319 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4320 	struct dp_mon_peer *mon_peer = NULL;
4321 
4322 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
4323 				qdf_nbuf_data(ppdu_info->nbuf);
4324 	ppdu_desc->is_flush = 1;
4325 
4326 	tag_buf++;
4327 	ppdu_desc->drop_reason = *tag_buf;
4328 
4329 	tag_buf++;
4330 	ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
4331 	ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf);
4332 	ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf);
4333 
4334 	tag_buf++;
4335 	peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
4336 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
4337 
4338 	ppdu_desc->num_users = 1;
4339 	ppdu_desc->user[0].peer_id = peer_id;
4340 	ppdu_desc->user[0].tid = tid;
4341 
4342 	ppdu_desc->queue_type =
4343 			HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf);
4344 
4345 	peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
4346 				     DP_MOD_ID_TX_PPDU_STATS);
4347 	if (!peer)
4348 		goto add_ppdu_to_sched_list;
4349 
4350 	if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
4351 		mon_peer = peer->monitor_peer;
4352 		DP_STATS_INC(mon_peer,
4353 			     tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
4354 			     ppdu_desc->num_msdu);
4355 	}
4356 
4357 	dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
4358 
4359 add_ppdu_to_sched_list:
4360 	ppdu_info->done = 1;
4361 	TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
4362 	mon_pdev->list_depth--;
4363 	TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info,
4364 			  ppdu_info_list_elem);
4365 	mon_pdev->sched_comp_list_depth++;
4366 }
4367 
4368 /**
4369  * dp_process_ppdu_stats_sch_cmd_status_tlv() - Process schedule command status tlv
4370  * Here we are not going to process the buffer.
4371  * @pdev: DP PDEV handle
4372  * @ppdu_info: per ppdu tlv structure
4373  *
4374  * Return: void
4375  */
4376 static void
4377 dp_process_ppdu_stats_sch_cmd_status_tlv(struct dp_pdev *pdev,
4378 					 struct ppdu_info *ppdu_info)
4379 {
4380 	struct cdp_tx_completion_ppdu *ppdu_desc;
4381 	struct dp_peer *peer;
4382 	uint8_t num_users;
4383 	uint8_t i;
4384 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4385 
4386 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
4387 				qdf_nbuf_data(ppdu_info->nbuf);
4388 
4389 	num_users = ppdu_desc->bar_num_users;
4390 
4391 	for (i = 0; i < num_users; i++) {
4392 		if (ppdu_desc->user[i].user_pos == 0) {
4393 			if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
4394 				/* update phy mode for bar frame */
4395 				ppdu_desc->phy_mode =
4396 					ppdu_desc->user[i].preamble;
4397 				ppdu_desc->user[0].mcs = ppdu_desc->user[i].mcs;
4398 				break;
4399 			}
4400 			if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL) {
4401 				ppdu_desc->frame_ctrl =
4402 					ppdu_desc->user[i].frame_ctrl;
4403 				break;
4404 			}
4405 		}
4406 	}
4407 
4408 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
4409 	    ppdu_desc->delayed_ba) {
4410 		qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
4411 
4412 		for (i = 0; i < ppdu_desc->num_users; i++) {
4413 			struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
4414 			uint64_t start_tsf;
4415 			uint64_t end_tsf;
4416 			uint32_t ppdu_id;
4417 			struct dp_mon_peer *mon_peer;
4418 
4419 			ppdu_id = ppdu_desc->ppdu_id;
4420 			peer = dp_peer_get_ref_by_id
4421 				(pdev->soc, ppdu_desc->user[i].peer_id,
4422 				 DP_MOD_ID_TX_PPDU_STATS);
4423 			/*
4424 			 * This check is to make sure peer is not deleted
4425 			 * after processing the TLVs.
4426 			 */
4427 			if (!peer)
4428 				continue;
4429 
4430 			if (!peer->monitor_peer) {
4431 				dp_peer_unref_delete(peer,
4432 						     DP_MOD_ID_TX_PPDU_STATS);
4433 				continue;
4434 			}
4435 
4436 			mon_peer = peer->monitor_peer;
4437 			delay_ppdu = &mon_peer->delayed_ba_ppdu_stats;
4438 			start_tsf = ppdu_desc->ppdu_start_timestamp;
4439 			end_tsf = ppdu_desc->ppdu_end_timestamp;
4440 			/*
4441 			 * save delayed ba user info
4442 			 */
4443 			if (ppdu_desc->user[i].delayed_ba) {
4444 				dp_peer_copy_delay_stats(peer,
4445 							 &ppdu_desc->user[i],
4446 							 ppdu_id);
4447 				mon_peer->last_delayed_ba_ppduid = ppdu_id;
4448 				delay_ppdu->ppdu_start_timestamp = start_tsf;
4449 				delay_ppdu->ppdu_end_timestamp = end_tsf;
4450 			}
4451 			ppdu_desc->user[i].peer_last_delayed_ba =
4452 				mon_peer->last_delayed_ba;
4453 
4454 			dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
4455 
4456 			if (ppdu_desc->user[i].delayed_ba &&
4457 			    !ppdu_desc->user[i].debug_copied) {
4458 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4459 					  QDF_TRACE_LEVEL_INFO_MED,
4460 					  "%s: %d ppdu_id[%d] bar_ppdu_id[%d] num_users[%d] usr[%d] htt_frame_type[%d]\n",
4461 					  __func__, __LINE__,
4462 					  ppdu_desc->ppdu_id,
4463 					  ppdu_desc->bar_ppdu_id,
4464 					  ppdu_desc->num_users,
4465 					  i,
4466 					  ppdu_desc->htt_frame_type);
4467 			}
4468 		}
4469 	}
4470 
4471 	/*
4472 	 * when frame type is BAR and STATS_COMMON_TLV is set
4473 	 * copy the store peer delayed info to BAR status
4474 	 */
4475 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
4476 		for (i = 0; i < ppdu_desc->bar_num_users; i++) {
4477 			struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
4478 			uint64_t start_tsf;
4479 			uint64_t end_tsf;
4480 			struct dp_mon_peer *mon_peer;
4481 
4482 			peer = dp_peer_get_ref_by_id
4483 				(pdev->soc,
4484 				 ppdu_desc->user[i].peer_id,
4485 				 DP_MOD_ID_TX_PPDU_STATS);
4486 			/*
4487 			 * This check is to make sure peer is not deleted
4488 			 * after processing the TLVs.
4489 			 */
4490 			if (!peer)
4491 				continue;
4492 
4493 			if (!peer->monitor_peer) {
4494 				dp_peer_unref_delete(peer,
4495 						     DP_MOD_ID_TX_PPDU_STATS);
4496 				continue;
4497 			}
4498 
4499 			mon_peer = peer->monitor_peer;
4500 			if (ppdu_desc->user[i].completion_status !=
4501 			    HTT_PPDU_STATS_USER_STATUS_OK) {
4502 				dp_peer_unref_delete(peer,
4503 						     DP_MOD_ID_TX_PPDU_STATS);
4504 				continue;
4505 			}
4506 
4507 			delay_ppdu = &mon_peer->delayed_ba_ppdu_stats;
4508 			start_tsf = delay_ppdu->ppdu_start_timestamp;
4509 			end_tsf = delay_ppdu->ppdu_end_timestamp;
4510 
4511 			if (mon_peer->last_delayed_ba) {
4512 				dp_peer_copy_stats_to_bar(peer,
4513 							  &ppdu_desc->user[i]);
4514 				ppdu_desc->ppdu_id =
4515 					mon_peer->last_delayed_ba_ppduid;
4516 				ppdu_desc->ppdu_start_timestamp = start_tsf;
4517 				ppdu_desc->ppdu_end_timestamp = end_tsf;
4518 			}
4519 			ppdu_desc->user[i].peer_last_delayed_ba =
4520 						mon_peer->last_delayed_ba;
4521 			dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
4522 		}
4523 	}
4524 
4525 	TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
4526 	mon_pdev->list_depth--;
4527 	TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info,
4528 			  ppdu_info_list_elem);
4529 	mon_pdev->sched_comp_list_depth++;
4530 }
4531 
4532 /**
4533  * dp_validate_fix_ppdu_tlv() - Function to validate the length of PPDU
4534  * @pdev: DP pdev handle
4535  * @tag_buf: TLV buffer
4536  * @tlv_expected_size: Expected size of Tag
4537  * @tlv_len: TLV length received from FW
4538  *
4539  * If the TLV length sent as part of PPDU TLV is less that expected size i.e
4540  * size of corresponding data structure, pad the remaining bytes with zeros
4541  * and continue processing the TLVs
4542  *
4543  * Return: Pointer to updated TLV
4544  */
4545 static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev,
4546 						 uint32_t *tag_buf,
4547 						 uint16_t tlv_expected_size,
4548 						 uint16_t tlv_len)
4549 {
4550 	uint32_t *tlv_desc = tag_buf;
4551 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4552 
4553 	qdf_assert_always(tlv_len != 0);
4554 
4555 	if (tlv_len < tlv_expected_size) {
4556 		qdf_mem_zero(mon_pdev->ppdu_tlv_buf, tlv_expected_size);
4557 		qdf_mem_copy(mon_pdev->ppdu_tlv_buf, tag_buf, tlv_len);
4558 		tlv_desc = mon_pdev->ppdu_tlv_buf;
4559 	}
4560 
4561 	return tlv_desc;
4562 }
4563 
4564 /**
4565  * dp_process_ppdu_tag() - Function to process the PPDU TLVs
4566  * @pdev: DP pdev handle
4567  * @tag_buf: TLV buffer
4568  * @tlv_len: length of tlv
4569  * @ppdu_info: per ppdu tlv structure
4570  *
4571  * Return: void
4572  */
4573 static void dp_process_ppdu_tag(struct dp_pdev *pdev,
4574 				uint32_t *tag_buf,
4575 				uint32_t tlv_len,
4576 				struct ppdu_info *ppdu_info)
4577 {
4578 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
4579 	uint16_t tlv_expected_size;
4580 	uint32_t *tlv_desc;
4581 
4582 	switch (tlv_type) {
4583 	case HTT_PPDU_STATS_COMMON_TLV:
4584 		tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv);
4585 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4586 						    tlv_expected_size, tlv_len);
4587 		dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info);
4588 		break;
4589 	case HTT_PPDU_STATS_USR_COMMON_TLV:
4590 		tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv);
4591 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4592 						    tlv_expected_size, tlv_len);
4593 		dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc,
4594 						      ppdu_info);
4595 		break;
4596 	case HTT_PPDU_STATS_USR_RATE_TLV:
4597 		tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv);
4598 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4599 						    tlv_expected_size, tlv_len);
4600 		dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc,
4601 						    ppdu_info);
4602 		break;
4603 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
4604 		tlv_expected_size =
4605 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv);
4606 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4607 						    tlv_expected_size, tlv_len);
4608 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
4609 				pdev, tlv_desc, ppdu_info);
4610 		break;
4611 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
4612 		tlv_expected_size =
4613 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv);
4614 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4615 						    tlv_expected_size, tlv_len);
4616 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
4617 				pdev, tlv_desc, ppdu_info);
4618 		break;
4619 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
4620 		tlv_expected_size =
4621 			sizeof(htt_ppdu_stats_user_cmpltn_common_tlv);
4622 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4623 						    tlv_expected_size, tlv_len);
4624 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
4625 				pdev, tlv_desc, ppdu_info);
4626 		break;
4627 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
4628 		tlv_expected_size =
4629 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv);
4630 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4631 						    tlv_expected_size, tlv_len);
4632 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
4633 				pdev, tlv_desc, ppdu_info);
4634 		break;
4635 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
4636 		tlv_expected_size =
4637 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv);
4638 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4639 						    tlv_expected_size, tlv_len);
4640 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
4641 				pdev, tlv_desc, ppdu_info);
4642 		break;
4643 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
4644 		tlv_expected_size =
4645 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv);
4646 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4647 						    tlv_expected_size, tlv_len);
4648 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
4649 				pdev, tlv_desc, ppdu_info);
4650 		break;
4651 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
4652 		tlv_expected_size =
4653 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v);
4654 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4655 						    tlv_expected_size, tlv_len);
4656 		dp_process_ppdu_stats_user_common_array_tlv(
4657 				pdev, tlv_desc, ppdu_info);
4658 		break;
4659 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
4660 		tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv);
4661 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4662 						    tlv_expected_size, tlv_len);
4663 		dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc,
4664 							     ppdu_info);
4665 		break;
4666 	case HTT_PPDU_STATS_SCH_CMD_STATUS_TLV:
4667 		dp_process_ppdu_stats_sch_cmd_status_tlv(pdev, ppdu_info);
4668 		break;
4669 	default:
4670 		break;
4671 	}
4672 }
4673 
4674 #ifdef WLAN_TELEMETRY_STATS_SUPPORT
4675 static inline
4676 void dp_ppdu_desc_user_airtime_consumption_update(
4677 			struct dp_peer *peer,
4678 			struct cdp_tx_completion_ppdu_user *user)
4679 {
4680 	struct dp_mon_peer *mon_peer = NULL;
4681 	uint8_t ac = 0;
4682 
4683 	mon_peer = peer->monitor_peer;
4684 	if (qdf_unlikely(!mon_peer))
4685 		return;
4686 
4687 	ac = TID_TO_WME_AC(user->tid);
4688 	DP_STATS_INC(mon_peer, airtime_stats.tx_airtime_consumption[ac].consumption,
4689 		     user->phy_tx_time_us);
4690 }
4691 #else
4692 static inline
4693 void dp_ppdu_desc_user_airtime_consumption_update(
4694 			struct dp_peer *peer,
4695 			struct cdp_tx_completion_ppdu_user *user)
4696 { }
4697 #endif
4698 
4699 #if defined(WLAN_ATF_ENABLE) || defined(WLAN_TELEMETRY_STATS_SUPPORT)
4700 static void
4701 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
4702 				     struct dp_peer *peer,
4703 				     struct cdp_tx_completion_ppdu *ppdu_desc,
4704 				     struct cdp_tx_completion_ppdu_user *user)
4705 {
4706 	uint32_t nss_ru_width_sum = 0;
4707 	struct dp_mon_peer *mon_peer = NULL;
4708 
4709 	if (!pdev || !ppdu_desc || !user || !peer)
4710 		return;
4711 
4712 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_DATA)
4713 		return;
4714 
4715 	mon_peer = peer->monitor_peer;
4716 	if (qdf_unlikely(!mon_peer))
4717 		return;
4718 
4719 	nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum;
4720 	if (!nss_ru_width_sum)
4721 		nss_ru_width_sum = 1;
4722 
4723 	/*
4724 	 * For SU-MIMO PPDU phy Tx time is same for the single user.
4725 	 * For MU-MIMO phy Tx time is calculated per user as below
4726 	 *     user phy tx time =
4727 	 *           Entire PPDU duration * MU Ratio * OFDMA Ratio
4728 	 *     MU Ratio = usr_nss / Sum_of_nss_of_all_users
4729 	 *     OFDMA_ratio = usr_ru_width / Sum_of_ru_width_of_all_users
4730 	 *     usr_ru_widt = ru_end – ru_start + 1
4731 	 */
4732 	if (ppdu_desc->htt_frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) {
4733 		user->phy_tx_time_us = ppdu_desc->phy_ppdu_tx_time_us;
4734 	} else {
4735 		user->phy_tx_time_us = (ppdu_desc->phy_ppdu_tx_time_us *
4736 				user->nss * user->ru_tones) / nss_ru_width_sum;
4737 	}
4738 
4739 	dp_ppdu_desc_user_airtime_consumption_update(peer, user);
4740 }
4741 #else
4742 static void
4743 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
4744 				     struct dp_peer *peer,
4745 				     struct cdp_tx_completion_ppdu *ppdu_desc,
4746 				     struct cdp_tx_completion_ppdu_user *user)
4747 {
4748 }
4749 #endif
4750 
4751 #ifdef WLAN_SUPPORT_CTRL_FRAME_STATS
4752 static void
4753 dp_tx_ctrl_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
4754 			struct cdp_tx_completion_ppdu_user *user)
4755 {
4756 	struct dp_mon_peer *mon_peer = NULL;
4757 	uint16_t fc = 0;
4758 
4759 	if (!pdev || !peer || !user)
4760 		return;
4761 
4762 	mon_peer = peer->monitor_peer;
4763 	if (qdf_unlikely(!mon_peer))
4764 		return;
4765 
4766 	if (user->mprot_type) {
4767 		DP_STATS_INCC(mon_peer,
4768 			      tx.rts_success, 1, user->rts_success);
4769 		DP_STATS_INCC(mon_peer,
4770 			      tx.rts_failure, 1, user->rts_failure);
4771 	}
4772 	fc = user->frame_ctrl;
4773 	if ((qdf_cpu_to_le16(fc) & QDF_IEEE80211_FC0_TYPE_MASK) ==
4774 	    QDF_IEEE80211_FC0_TYPE_CTL) {
4775 		if ((qdf_cpu_to_le16(fc) & QDF_IEEE80211_FC0_SUBTYPE_MASK) ==
4776 		    QDF_IEEE80211_FC0_SUBTYPE_VHT_NDP_AN)
4777 			DP_STATS_INC(mon_peer, tx.ndpa_cnt, 1);
4778 		if ((qdf_cpu_to_le16(fc) & QDF_IEEE80211_FC0_SUBTYPE_MASK) ==
4779 		    QDF_IEEE80211_FC0_SUBTYPE_BAR)
4780 			DP_STATS_INC(mon_peer, tx.bar_cnt, 1);
4781 	}
4782 }
4783 #else
4784 static void
4785 dp_tx_ctrl_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
4786 			struct cdp_tx_completion_ppdu_user *user)
4787 {
4788 }
4789 #endif /* WLAN_SUPPORT_CTRL_FRAME_STATS */
4790 
4791 void
4792 dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
4793 			       struct ppdu_info *ppdu_info)
4794 {
4795 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
4796 	struct dp_peer *peer = NULL;
4797 	uint32_t tlv_bitmap_expected;
4798 	uint32_t tlv_bitmap_default;
4799 	uint16_t i;
4800 	uint32_t num_users;
4801 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4802 
4803 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
4804 		qdf_nbuf_data(ppdu_info->nbuf);
4805 
4806 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_BAR)
4807 		ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
4808 
4809 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
4810 	if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode ||
4811 	    mon_pdev->tx_capture_enabled) {
4812 		if (ppdu_info->is_ampdu)
4813 			tlv_bitmap_expected =
4814 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
4815 					ppdu_info->tlv_bitmap);
4816 	}
4817 
4818 	tlv_bitmap_default = tlv_bitmap_expected;
4819 
4820 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
4821 		num_users = ppdu_desc->bar_num_users;
4822 		ppdu_desc->num_users = ppdu_desc->bar_num_users;
4823 	} else {
4824 		num_users = ppdu_desc->num_users;
4825 	}
4826 	qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
4827 
4828 	dp_ppdu_desc_get_txmode(ppdu_desc);
4829 	dp_pdev_update_deter_stats(pdev, ppdu_desc);
4830 
4831 	for (i = 0; i < num_users; i++) {
4832 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
4833 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
4834 
4835 		peer = dp_peer_get_ref_by_id(pdev->soc,
4836 					     ppdu_desc->user[i].peer_id,
4837 					     DP_MOD_ID_TX_PPDU_STATS);
4838 		/*
4839 		 * This check is to make sure peer is not deleted
4840 		 * after processing the TLVs.
4841 		 */
4842 		if (!peer)
4843 			continue;
4844 
4845 		ppdu_desc->user[i].is_bss_peer = peer->bss_peer;
4846 
4847 		dp_ppdu_desc_user_phy_tx_time_update(pdev, peer, ppdu_desc,
4848 						     &ppdu_desc->user[i]);
4849 
4850 		dp_tx_ctrl_stats_update(pdev, peer, &ppdu_desc->user[i]);
4851 
4852 		/*
4853 		 * different frame like DATA, BAR or CTRL has different
4854 		 * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we
4855 		 * receive other tlv in-order/sequential from fw.
4856 		 * Since ACK_BA_STATUS TLV come from Hardware it is
4857 		 * asynchronous So we need to depend on some tlv to confirm
4858 		 * all tlv is received for a ppdu.
4859 		 * So we depend on both SCHED_CMD_STATUS_TLV and
4860 		 * ACK_BA_STATUS_TLV. for failure packet we won't get
4861 		 * ACK_BA_STATUS_TLV.
4862 		 */
4863 		if (!(ppdu_info->tlv_bitmap &
4864 		      (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)) ||
4865 		    (!(ppdu_info->tlv_bitmap &
4866 		       (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) &&
4867 		     (ppdu_desc->user[i].completion_status ==
4868 		      HTT_PPDU_STATS_USER_STATUS_OK))) {
4869 			dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
4870 			continue;
4871 		}
4872 
4873 		/*
4874 		 * Update tx stats for data frames having Qos as well as
4875 		 * non-Qos data tid
4876 		 */
4877 
4878 		if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX ||
4879 		     (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) ||
4880 		     (ppdu_desc->htt_frame_type ==
4881 		      HTT_STATS_FTYPE_SGEN_QOS_NULL) ||
4882 		     ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) &&
4883 		      (ppdu_desc->num_mpdu > 1))) &&
4884 		      (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) {
4885 			dp_tx_stats_update(pdev, peer,
4886 					   &ppdu_desc->user[i],
4887 					   ppdu_desc);
4888 		}
4889 
4890 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
4891 		tlv_bitmap_expected = tlv_bitmap_default;
4892 	}
4893 }
4894 
4895 #if !defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(QCA_MONITOR_2_0_SUPPORT)
4896 /**
4897  * dp_tx_ppdu_desc_notify() - Notify to upper layer about PPDU via WDI
4898  *
4899  * @pdev: Datapath pdev handle
4900  * @nbuf: Buffer to be delivered to upper layer
4901  *
4902  * Return: void
4903  */
4904 static void dp_tx_ppdu_desc_notify(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
4905 {
4906 	struct dp_soc *soc = pdev->soc;
4907 	struct dp_mon_ops *mon_ops = NULL;
4908 
4909 	mon_ops = dp_mon_ops_get(soc);
4910 	if (mon_ops && mon_ops->mon_ppdu_desc_notify)
4911 		mon_ops->mon_ppdu_desc_notify(pdev, nbuf);
4912 	else
4913 		qdf_nbuf_free(nbuf);
4914 }
4915 
4916 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
4917 			  struct ppdu_info *ppdu_info)
4918 {
4919 	struct ppdu_info *s_ppdu_info = NULL;
4920 	struct ppdu_info *ppdu_info_next = NULL;
4921 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
4922 	qdf_nbuf_t nbuf;
4923 	uint32_t time_delta = 0;
4924 	bool starved = 0;
4925 	bool matched = 0;
4926 	bool recv_ack_ba_done = 0;
4927 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4928 
4929 	if (ppdu_info->tlv_bitmap &
4930 	    (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
4931 	    ppdu_info->done)
4932 		recv_ack_ba_done = 1;
4933 
4934 	mon_pdev->last_sched_cmdid = ppdu_info->sched_cmdid;
4935 
4936 	s_ppdu_info = TAILQ_FIRST(&mon_pdev->sched_comp_ppdu_list);
4937 
4938 	TAILQ_FOREACH_SAFE(s_ppdu_info, &mon_pdev->sched_comp_ppdu_list,
4939 			   ppdu_info_list_elem, ppdu_info_next) {
4940 		if (s_ppdu_info->tsf_l32 > ppdu_info->tsf_l32)
4941 			time_delta = (MAX_TSF_32 - s_ppdu_info->tsf_l32) +
4942 					ppdu_info->tsf_l32;
4943 		else
4944 			time_delta = ppdu_info->tsf_l32 - s_ppdu_info->tsf_l32;
4945 
4946 		if (!s_ppdu_info->done && !recv_ack_ba_done) {
4947 			if (time_delta < MAX_SCHED_STARVE) {
4948 				dp_mon_info("pdev[%d] ppdu_id[%d] sched_cmdid[%d] TLV_B[0x%x] TSF[%u] D[%d]",
4949 					    pdev->pdev_id,
4950 					    s_ppdu_info->ppdu_id,
4951 					    s_ppdu_info->sched_cmdid,
4952 					    s_ppdu_info->tlv_bitmap,
4953 					    s_ppdu_info->tsf_l32,
4954 					    s_ppdu_info->done);
4955 				break;
4956 			}
4957 			starved = 1;
4958 		}
4959 
4960 		mon_pdev->delivered_sched_cmdid = s_ppdu_info->sched_cmdid;
4961 		TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list, s_ppdu_info,
4962 			     ppdu_info_list_elem);
4963 		mon_pdev->sched_comp_list_depth--;
4964 
4965 		nbuf = s_ppdu_info->nbuf;
4966 		qdf_assert_always(nbuf);
4967 		ppdu_desc = (struct cdp_tx_completion_ppdu *)
4968 				qdf_nbuf_data(nbuf);
4969 		ppdu_desc->tlv_bitmap = s_ppdu_info->tlv_bitmap;
4970 
4971 		if (starved) {
4972 			dp_mon_info("ppdu starved fc[0x%x] h_ftype[%d] tlv_bitmap[0x%x] cs[%d]\n",
4973 				    ppdu_desc->frame_ctrl,
4974 				    ppdu_desc->htt_frame_type,
4975 				    ppdu_desc->tlv_bitmap,
4976 				    ppdu_desc->user[0].completion_status);
4977 			starved = 0;
4978 		}
4979 
4980 		if (ppdu_info->ppdu_id == s_ppdu_info->ppdu_id &&
4981 		    ppdu_info->sched_cmdid == s_ppdu_info->sched_cmdid)
4982 			matched = 1;
4983 
4984 		dp_ppdu_desc_user_stats_update(pdev, s_ppdu_info);
4985 
4986 		qdf_mem_free(s_ppdu_info);
4987 
4988 		dp_tx_ppdu_desc_notify(pdev, nbuf);
4989 
4990 		if (matched)
4991 			break;
4992 	}
4993 }
4994 #endif
4995 
4996 /**
4997  * dp_tx_ppdu_desc_deliver() - Deliver PPDU desc to upper layer
4998  * @pdev: Datapath pdev handle
4999  * @ppdu_info: per PPDU TLV descriptor
5000  *
5001  * Return: void
5002  */
5003 static void dp_tx_ppdu_desc_deliver(struct dp_pdev *pdev,
5004 				    struct ppdu_info *ppdu_info)
5005 {
5006 	struct dp_soc *soc = pdev->soc;
5007 	struct dp_mon_ops *mon_ops = NULL;
5008 
5009 	mon_ops = dp_mon_ops_get(soc);
5010 
5011 	if (mon_ops && mon_ops->mon_ppdu_desc_deliver) {
5012 		mon_ops->mon_ppdu_desc_deliver(pdev, ppdu_info);
5013 	} else {
5014 		qdf_nbuf_free(ppdu_info->nbuf);
5015 		ppdu_info->nbuf = NULL;
5016 		qdf_mem_free(ppdu_info);
5017 	}
5018 }
5019 
5020 /**
5021  * dp_get_ppdu_desc() - Function to allocate new PPDU status
5022  * desc for new ppdu id
5023  * @pdev: DP pdev handle
5024  * @ppdu_id: PPDU unique identifier
5025  * @tlv_type: TLV type received
5026  * @tsf_l32: timestamp received along with ppdu stats indication header
5027  * @max_users: Maximum user for that particular ppdu
5028  *
5029  * Return: ppdu_info per ppdu tlv structure
5030  */
5031 static
5032 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
5033 				   uint8_t tlv_type, uint32_t tsf_l32,
5034 				   uint8_t max_users)
5035 {
5036 	struct ppdu_info *ppdu_info = NULL;
5037 	struct ppdu_info *s_ppdu_info = NULL;
5038 	struct ppdu_info *ppdu_info_next = NULL;
5039 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
5040 	uint32_t size = 0;
5041 	struct cdp_tx_completion_ppdu *tmp_ppdu_desc = NULL;
5042 	struct cdp_tx_completion_ppdu_user *tmp_user;
5043 	uint32_t time_delta;
5044 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5045 
5046 	/*
5047 	 * Find ppdu_id node exists or not
5048 	 */
5049 	TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list,
5050 			   ppdu_info_list_elem, ppdu_info_next) {
5051 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
5052 			if (ppdu_info->tsf_l32 > tsf_l32)
5053 				time_delta  = (MAX_TSF_32 -
5054 					       ppdu_info->tsf_l32) + tsf_l32;
5055 			else
5056 				time_delta  = tsf_l32 - ppdu_info->tsf_l32;
5057 
5058 			if (time_delta > WRAP_DROP_TSF_DELTA) {
5059 				TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
5060 					     ppdu_info, ppdu_info_list_elem);
5061 				mon_pdev->list_depth--;
5062 				pdev->stats.ppdu_wrap_drop++;
5063 				tmp_ppdu_desc =
5064 					(struct cdp_tx_completion_ppdu *)
5065 					qdf_nbuf_data(ppdu_info->nbuf);
5066 				tmp_user = &tmp_ppdu_desc->user[0];
5067 				dp_htt_tx_stats_info("S_PID [%d] S_TSF[%u] TLV_BITMAP[0x%x] [CMPLTN - %d ACK_BA - %d] CS[%d] - R_PID[%d] R_TSF[%u] R_TLV_TAG[0x%x]\n",
5068 						     ppdu_info->ppdu_id,
5069 						     ppdu_info->tsf_l32,
5070 						     ppdu_info->tlv_bitmap,
5071 						     tmp_user->completion_status,
5072 						     ppdu_info->compltn_common_tlv,
5073 						     ppdu_info->ack_ba_tlv,
5074 						     ppdu_id, tsf_l32,
5075 						     tlv_type);
5076 				qdf_nbuf_free(ppdu_info->nbuf);
5077 				ppdu_info->nbuf = NULL;
5078 				qdf_mem_free(ppdu_info);
5079 			} else {
5080 				break;
5081 			}
5082 		}
5083 	}
5084 
5085 	/*
5086 	 * check if it is ack ba tlv and if it is not there in ppdu info
5087 	 * list then check it in sched completion ppdu list
5088 	 */
5089 	if (!ppdu_info &&
5090 	    tlv_type == HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) {
5091 		TAILQ_FOREACH(s_ppdu_info,
5092 			      &mon_pdev->sched_comp_ppdu_list,
5093 			      ppdu_info_list_elem) {
5094 			if (s_ppdu_info && (s_ppdu_info->ppdu_id == ppdu_id)) {
5095 				if (s_ppdu_info->tsf_l32 > tsf_l32)
5096 					time_delta  = (MAX_TSF_32 -
5097 						       s_ppdu_info->tsf_l32) +
5098 							tsf_l32;
5099 				else
5100 					time_delta  = tsf_l32 -
5101 						s_ppdu_info->tsf_l32;
5102 				if (time_delta < WRAP_DROP_TSF_DELTA) {
5103 					ppdu_info = s_ppdu_info;
5104 					break;
5105 				}
5106 			} else {
5107 				/*
5108 				 * ACK BA STATUS TLV comes sequential order
5109 				 * if we received ack ba status tlv for second
5110 				 * ppdu and first ppdu is still waiting for
5111 				 * ACK BA STATUS TLV. Based on fw comment
5112 				 * we won't receive it tlv later. So we can
5113 				 * set ppdu info done.
5114 				 */
5115 				if (s_ppdu_info)
5116 					s_ppdu_info->done = 1;
5117 			}
5118 		}
5119 	}
5120 
5121 	if (ppdu_info) {
5122 		if (ppdu_info->tlv_bitmap & (1 << tlv_type)) {
5123 			/*
5124 			 * if we get tlv_type that is already been processed
5125 			 * for ppdu, that means we got a new ppdu with same
5126 			 * ppdu id. Hence Flush the older ppdu
5127 			 * for MUMIMO and OFDMA, In a PPDU we have
5128 			 * multiple user with same tlv types. tlv bitmap is
5129 			 * used to check whether SU or MU_MIMO/OFDMA
5130 			 */
5131 			if (!(ppdu_info->tlv_bitmap &
5132 			    (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)))
5133 				return ppdu_info;
5134 
5135 			ppdu_desc = (struct cdp_tx_completion_ppdu *)
5136 				qdf_nbuf_data(ppdu_info->nbuf);
5137 
5138 			/*
5139 			 * apart from ACK BA STATUS TLV rest all comes in order
5140 			 * so if tlv type not ACK BA STATUS TLV we can deliver
5141 			 * ppdu_info
5142 			 */
5143 			if ((tlv_type ==
5144 			     HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
5145 			    ((ppdu_desc->htt_frame_type ==
5146 			     HTT_STATS_FTYPE_SGEN_MU_BAR) ||
5147 			    (ppdu_desc->htt_frame_type ==
5148 			     HTT_STATS_FTYPE_SGEN_BE_MU_BAR)))
5149 				return ppdu_info;
5150 
5151 			dp_tx_ppdu_desc_deliver(pdev, ppdu_info);
5152 		} else {
5153 			return ppdu_info;
5154 		}
5155 	}
5156 
5157 	/*
5158 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
5159 	 * threshold
5160 	 */
5161 	if (mon_pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
5162 		ppdu_info = TAILQ_FIRST(&mon_pdev->ppdu_info_list);
5163 		TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
5164 			     ppdu_info, ppdu_info_list_elem);
5165 		mon_pdev->list_depth--;
5166 		pdev->stats.ppdu_drop++;
5167 		qdf_nbuf_free(ppdu_info->nbuf);
5168 		ppdu_info->nbuf = NULL;
5169 		qdf_mem_free(ppdu_info);
5170 	}
5171 
5172 	size = sizeof(struct cdp_tx_completion_ppdu) +
5173 		(max_users * sizeof(struct cdp_tx_completion_ppdu_user));
5174 
5175 	/*
5176 	 * Allocate new ppdu_info node
5177 	 */
5178 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
5179 	if (!ppdu_info)
5180 		return NULL;
5181 
5182 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, size,
5183 					 0, 4, TRUE);
5184 	if (!ppdu_info->nbuf) {
5185 		qdf_mem_free(ppdu_info);
5186 		return NULL;
5187 	}
5188 
5189 	ppdu_info->ppdu_desc =
5190 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
5191 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), size);
5192 
5193 	if (!qdf_nbuf_put_tail(ppdu_info->nbuf, size)) {
5194 		dp_mon_err("No tailroom for HTT PPDU");
5195 		qdf_nbuf_free(ppdu_info->nbuf);
5196 		ppdu_info->nbuf = NULL;
5197 		ppdu_info->last_user = 0;
5198 		qdf_mem_free(ppdu_info);
5199 		return NULL;
5200 	}
5201 
5202 	ppdu_info->ppdu_desc->max_users = max_users;
5203 	ppdu_info->tsf_l32 = tsf_l32;
5204 	/*
5205 	 * No lock is needed because all PPDU TLVs are processed in
5206 	 * same context and this list is updated in same context
5207 	 */
5208 	TAILQ_INSERT_TAIL(&mon_pdev->ppdu_info_list, ppdu_info,
5209 			  ppdu_info_list_elem);
5210 	mon_pdev->list_depth++;
5211 	return ppdu_info;
5212 }
5213 
5214 /**
5215  * dp_htt_process_tlv() - Function to process each PPDU TLVs
5216  * @pdev: DP pdev handle
5217  * @htt_t2h_msg: HTT target to host message
5218  *
5219  * Return: ppdu_info per ppdu tlv structure
5220  */
5221 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
5222 					    qdf_nbuf_t htt_t2h_msg)
5223 {
5224 	uint32_t length;
5225 	uint32_t ppdu_id;
5226 	uint8_t tlv_type;
5227 	uint32_t tlv_length, tlv_bitmap_expected;
5228 	uint8_t *tlv_buf;
5229 	struct ppdu_info *ppdu_info = NULL;
5230 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
5231 	uint8_t max_users = CDP_MU_MAX_USERS;
5232 	uint32_t tsf_l32;
5233 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5234 
5235 	uint32_t *msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
5236 
5237 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
5238 
5239 	msg_word = msg_word + 1;
5240 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
5241 
5242 	msg_word = msg_word + 1;
5243 	tsf_l32 = (uint32_t)(*msg_word);
5244 
5245 	msg_word = msg_word + 2;
5246 	while (length > 0) {
5247 		tlv_buf = (uint8_t *)msg_word;
5248 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
5249 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
5250 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
5251 			pdev->stats.ppdu_stats_counter[tlv_type]++;
5252 
5253 		if (tlv_length == 0)
5254 			break;
5255 
5256 		tlv_length += HTT_TLV_HDR_LEN;
5257 
5258 		/*
5259 		 * Not allocating separate ppdu descriptor for MGMT Payload
5260 		 * TLV as this is sent as separate WDI indication and it
5261 		 * doesn't contain any ppdu information
5262 		 */
5263 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
5264 			mon_pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
5265 			mon_pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
5266 			mon_pdev->mgmtctrl_frm_info.mgmt_buf_len =
5267 				HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET
5268 						(*(msg_word + 1));
5269 			msg_word =
5270 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
5271 			length -= (tlv_length);
5272 			continue;
5273 		}
5274 
5275 		/*
5276 		 * retrieve max_users if it's USERS_INFO,
5277 		 * else, it's 1 for COMPLTN_FLUSH,
5278 		 * else, use CDP_MU_MAX_USERS
5279 		 */
5280 		if (tlv_type == HTT_PPDU_STATS_USERS_INFO_TLV) {
5281 			max_users =
5282 				HTT_PPDU_STATS_USERS_INFO_TLV_MAX_USERS_GET(*(msg_word + 1));
5283 		} else if (tlv_type == HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) {
5284 			max_users = 1;
5285 		}
5286 
5287 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type,
5288 					     tsf_l32, max_users);
5289 		if (!ppdu_info)
5290 			return NULL;
5291 
5292 		ppdu_info->ppdu_id = ppdu_id;
5293 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
5294 
5295 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
5296 
5297 		/*
5298 		 * Increment pdev level tlv count to monitor
5299 		 * missing TLVs
5300 		 */
5301 		mon_pdev->tlv_count++;
5302 		ppdu_info->last_tlv_cnt = mon_pdev->tlv_count;
5303 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
5304 		length -= (tlv_length);
5305 	}
5306 
5307 	if (!ppdu_info)
5308 		return NULL;
5309 
5310 	mon_pdev->last_ppdu_id = ppdu_id;
5311 
5312 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
5313 
5314 	if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode ||
5315 	    mon_pdev->tx_capture_enabled) {
5316 		if (ppdu_info->is_ampdu)
5317 			tlv_bitmap_expected =
5318 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
5319 					ppdu_info->tlv_bitmap);
5320 	}
5321 
5322 	ppdu_desc = ppdu_info->ppdu_desc;
5323 
5324 	if (!ppdu_desc)
5325 		return NULL;
5326 
5327 	if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status !=
5328 	    HTT_PPDU_STATS_USER_STATUS_OK) {
5329 		tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
5330 	}
5331 
5332 	/*
5333 	 * for frame type DATA and BAR, we update stats based on MSDU,
5334 	 * successful msdu and mpdu are populate from ACK BA STATUS TLV
5335 	 * which comes out of order. successful mpdu also populated from
5336 	 * COMPLTN COMMON TLV which comes in order. for every ppdu_info
5337 	 * we store successful mpdu from both tlv and compare before delivering
5338 	 * to make sure we received ACK BA STATUS TLV. For some self generated
5339 	 * frame we won't get ack ba status tlv so no need to wait for
5340 	 * ack ba status tlv.
5341 	 */
5342 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL &&
5343 	    ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) {
5344 		/*
5345 		 * most of the time bar frame will have duplicate ack ba
5346 		 * status tlv
5347 		 */
5348 		if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR &&
5349 		    (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv))
5350 			return NULL;
5351 		/*
5352 		 * For data frame, compltn common tlv should match ack ba status
5353 		 * tlv and completion status. Reason we are checking first user
5354 		 * for ofdma, completion seen at next MU BAR frm, for mimo
5355 		 * only for first user completion will be immediate.
5356 		 */
5357 		if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
5358 		    (ppdu_desc->user[0].completion_status == 0 &&
5359 		     (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv)))
5360 			return NULL;
5361 	}
5362 
5363 	/*
5364 	 * Once all the TLVs for a given PPDU has been processed,
5365 	 * return PPDU status to be delivered to higher layer.
5366 	 * tlv_bitmap_expected can't be available for different frame type.
5367 	 * But SCHED CMD STATS TLV is the last TLV from the FW for a ppdu.
5368 	 * apart from ACK BA TLV, FW sends other TLV in sequential order.
5369 	 * flush tlv comes separate.
5370 	 */
5371 	if ((ppdu_info->tlv_bitmap != 0 &&
5372 	     (ppdu_info->tlv_bitmap &
5373 	      (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) ||
5374 	    (ppdu_info->tlv_bitmap &
5375 	     (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV))) {
5376 		ppdu_info->done = 1;
5377 		return ppdu_info;
5378 	}
5379 
5380 	return NULL;
5381 }
5382 #endif /* QCA_ENHANCED_STATS_SUPPORT */
5383 
5384 #ifdef QCA_ENHANCED_STATS_SUPPORT
5385 /**
5386  * dp_tx_ppdu_stats_feat_enable_check() - Check if feature(s) is enabled to
5387  *			consume stats received from FW via HTT
5388  * @pdev: Datapath pdev handle
5389  *
5390  * Return: void
5391  */
5392 static bool dp_tx_ppdu_stats_feat_enable_check(struct dp_pdev *pdev)
5393 {
5394 	struct dp_soc *soc = pdev->soc;
5395 	struct dp_mon_ops *mon_ops = NULL;
5396 
5397 	mon_ops = dp_mon_ops_get(soc);
5398 	if (mon_ops && mon_ops->mon_ppdu_stats_feat_enable_check)
5399 		return mon_ops->mon_ppdu_stats_feat_enable_check(pdev);
5400 	else
5401 		return false;
5402 }
5403 #endif
5404 
5405 #if defined(WDI_EVENT_ENABLE)
5406 #ifdef QCA_ENHANCED_STATS_SUPPORT
5407 /**
5408  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
5409  * @soc: DP SOC handle
5410  * @pdev_id: pdev id
5411  * @htt_t2h_msg: HTT message nbuf
5412  *
5413  * Return: void
5414  */
5415 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
5416 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
5417 {
5418 	struct dp_pdev *pdev;
5419 	struct ppdu_info *ppdu_info = NULL;
5420 	bool free_buf = true;
5421 	struct dp_mon_pdev *mon_pdev;
5422 
5423 	if (pdev_id >= MAX_PDEV_CNT)
5424 		return true;
5425 
5426 	pdev = soc->pdev_list[pdev_id];
5427 	if (!pdev)
5428 		return true;
5429 
5430 	mon_pdev = pdev->monitor_pdev;
5431 	if (!mon_pdev)
5432 		return true;
5433 
5434 	if (!dp_tx_ppdu_stats_feat_enable_check(pdev))
5435 		return free_buf;
5436 
5437 	qdf_spin_lock_bh(&mon_pdev->ppdu_stats_lock);
5438 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
5439 
5440 	if (mon_pdev->mgmtctrl_frm_info.mgmt_buf) {
5441 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
5442 		    (pdev, htt_t2h_msg, mon_pdev->mgmtctrl_frm_info.ppdu_id) !=
5443 		    QDF_STATUS_SUCCESS)
5444 			free_buf = false;
5445 	}
5446 
5447 	if (ppdu_info)
5448 		dp_tx_ppdu_desc_deliver(pdev, ppdu_info);
5449 
5450 	mon_pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
5451 	mon_pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
5452 	mon_pdev->mgmtctrl_frm_info.ppdu_id = 0;
5453 
5454 	qdf_spin_unlock_bh(&mon_pdev->ppdu_stats_lock);
5455 
5456 	return free_buf;
5457 }
5458 #elif (!defined(REMOVE_PKT_LOG))
5459 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
5460 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
5461 {
5462 	return true;
5463 }
5464 #endif/* QCA_ENHANCED_STATS_SUPPORT */
5465 #endif
5466 
5467 #if defined(WDI_EVENT_ENABLE) &&\
5468 	(defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG))
5469 bool
5470 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
5471 			  uint32_t *msg_word,
5472 			  qdf_nbuf_t htt_t2h_msg)
5473 {
5474 	u_int8_t pdev_id;
5475 	u_int8_t target_pdev_id;
5476 	bool free_buf;
5477 
5478 	target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
5479 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
5480 							 target_pdev_id);
5481 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
5482 			     htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
5483 			     pdev_id);
5484 
5485 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
5486 					      htt_t2h_msg);
5487 
5488 	return free_buf;
5489 }
5490 #endif
5491 
5492 void
5493 dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor)
5494 {
5495 	pdev->monitor_pdev->rx_mon_recv_status.bsscolor = bsscolor;
5496 }
5497 
5498 bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle)
5499 {
5500 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5501 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5502 
5503 	if ((mon_pdev->fp_data_filter & FILTER_DATA_UCAST) ||
5504 	    (mon_pdev->mo_data_filter & FILTER_DATA_UCAST))
5505 		return true;
5506 
5507 	return false;
5508 }
5509 
5510 bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle)
5511 {
5512 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5513 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5514 
5515 	if ((mon_pdev->fp_data_filter & FILTER_DATA_MCAST) ||
5516 	    (mon_pdev->mo_data_filter & FILTER_DATA_MCAST))
5517 		return true;
5518 
5519 	return false;
5520 }
5521 
5522 bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle)
5523 {
5524 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5525 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5526 
5527 	if ((mon_pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
5528 	    (mon_pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
5529 		if ((mon_pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
5530 		    (mon_pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
5531 			return true;
5532 		}
5533 	}
5534 
5535 	return false;
5536 }
5537 
5538 QDF_STATUS dp_mon_soc_cfg_init(struct dp_soc *soc)
5539 {
5540 	int target_type;
5541 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
5542 	struct cdp_mon_ops *cdp_ops;
5543 
5544 	cdp_ops = dp_mon_cdp_ops_get(soc);
5545 	target_type = hal_get_target_type(soc->hal_soc);
5546 	switch (target_type) {
5547 	case TARGET_TYPE_QCA6290:
5548 	case TARGET_TYPE_QCA6390:
5549 	case TARGET_TYPE_QCA6490:
5550 	case TARGET_TYPE_QCA6750:
5551 	case TARGET_TYPE_KIWI:
5552 	case TARGET_TYPE_MANGO:
5553 	case TARGET_TYPE_PEACH:
5554 		/* do nothing */
5555 		break;
5556 	case TARGET_TYPE_QCA8074:
5557 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
5558 							   MON_BUF_MIN_ENTRIES);
5559 		break;
5560 	case TARGET_TYPE_QCA8074V2:
5561 	case TARGET_TYPE_QCA6018:
5562 	case TARGET_TYPE_QCA9574:
5563 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
5564 							   MON_BUF_MIN_ENTRIES);
5565 		mon_soc->hw_nac_monitor_support = 1;
5566 		break;
5567 	case TARGET_TYPE_QCN9000:
5568 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
5569 							   MON_BUF_MIN_ENTRIES);
5570 		mon_soc->hw_nac_monitor_support = 1;
5571 		if (cfg_get(soc->ctrl_psoc, CFG_DP_FULL_MON_MODE)) {
5572 			if (cdp_ops  && cdp_ops->config_full_mon_mode)
5573 				cdp_ops->config_full_mon_mode((struct cdp_soc_t *)soc, 1);
5574 		}
5575 		break;
5576 	case TARGET_TYPE_QCA5018:
5577 	case TARGET_TYPE_QCN6122:
5578 	case TARGET_TYPE_QCN9160:
5579 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
5580 							   MON_BUF_MIN_ENTRIES);
5581 		mon_soc->hw_nac_monitor_support = 1;
5582 		break;
5583 	case TARGET_TYPE_QCN9224:
5584 	case TARGET_TYPE_QCA5332:
5585 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
5586 							   MON_BUF_MIN_ENTRIES);
5587 		mon_soc->hw_nac_monitor_support = 1;
5588 		mon_soc->monitor_mode_v2 = 1;
5589 		break;
5590 	default:
5591 		dp_mon_info("%s: Unknown tgt type %d\n", __func__, target_type);
5592 		qdf_assert_always(0);
5593 		break;
5594 	}
5595 
5596 	dp_mon_info("hw_nac_monitor_support = %d",
5597 		    mon_soc->hw_nac_monitor_support);
5598 
5599 	return QDF_STATUS_SUCCESS;
5600 }
5601 
5602 /**
5603  * dp_mon_pdev_per_target_config() - Target specific monitor pdev configuration
5604  * @pdev: PDEV handle [Should be valid]
5605  *
5606  * Return: None
5607  */
5608 static void dp_mon_pdev_per_target_config(struct dp_pdev *pdev)
5609 {
5610 	struct dp_soc *soc = pdev->soc;
5611 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5612 	int target_type;
5613 
5614 	target_type = hal_get_target_type(soc->hal_soc);
5615 	switch (target_type) {
5616 	case TARGET_TYPE_KIWI:
5617 	case TARGET_TYPE_MANGO:
5618 	case TARGET_TYPE_PEACH:
5619 		mon_pdev->is_tlv_hdr_64_bit = true;
5620 		break;
5621 	default:
5622 		mon_pdev->is_tlv_hdr_64_bit = false;
5623 		break;
5624 	}
5625 }
5626 
5627 QDF_STATUS dp_mon_pdev_attach(struct dp_pdev *pdev)
5628 {
5629 	struct dp_soc *soc;
5630 	struct dp_mon_pdev *mon_pdev;
5631 	struct dp_mon_ops *mon_ops;
5632 	qdf_size_t mon_pdev_context_size;
5633 
5634 	if (!pdev) {
5635 		dp_mon_err("pdev is NULL");
5636 		goto fail0;
5637 	}
5638 
5639 	soc = pdev->soc;
5640 
5641 	mon_pdev_context_size = soc->arch_ops.txrx_get_mon_context_size(DP_CONTEXT_TYPE_MON_PDEV);
5642 	mon_pdev = dp_context_alloc_mem(soc, DP_MON_PDEV_TYPE, mon_pdev_context_size);
5643 	if (!mon_pdev) {
5644 		dp_mon_err("%pK: MONITOR pdev allocation failed", pdev);
5645 		goto fail0;
5646 	}
5647 
5648 	pdev->monitor_pdev = mon_pdev;
5649 	mon_ops = dp_mon_ops_get(pdev->soc);
5650 	if (!mon_ops) {
5651 		dp_mon_err("%pK: Invalid monitor ops", pdev);
5652 		goto fail1;
5653 	}
5654 
5655 	if (mon_ops->mon_pdev_alloc) {
5656 		if (mon_ops->mon_pdev_alloc(pdev)) {
5657 			dp_mon_err("%pK: MONITOR pdev alloc failed", pdev);
5658 			goto fail1;
5659 		}
5660 	}
5661 
5662 	if (mon_ops->mon_rings_alloc) {
5663 		if (mon_ops->mon_rings_alloc(pdev)) {
5664 			dp_mon_err("%pK: MONITOR rings setup failed", pdev);
5665 			goto fail2;
5666 		}
5667 	}
5668 
5669 	/* Rx monitor mode specific init */
5670 	if (mon_ops->rx_mon_desc_pool_alloc) {
5671 		if (mon_ops->rx_mon_desc_pool_alloc(pdev)) {
5672 			dp_mon_err("%pK: dp_rx_pdev_mon_attach failed", pdev);
5673 			goto fail3;
5674 		}
5675 	}
5676 
5677 	if (mon_ops->mon_rx_ppdu_info_cache_create) {
5678 		if (mon_ops->mon_rx_ppdu_info_cache_create(pdev)) {
5679 			dp_mon_err("%pK: dp_rx_pdev_mon_attach failed", pdev);
5680 			goto fail4;
5681 		}
5682 	}
5683 	pdev->monitor_pdev = mon_pdev;
5684 	dp_mon_pdev_per_target_config(pdev);
5685 
5686 	return QDF_STATUS_SUCCESS;
5687 fail4:
5688 	if (mon_ops->rx_mon_desc_pool_free)
5689 		mon_ops->rx_mon_desc_pool_free(pdev);
5690 fail3:
5691 	if (mon_ops->mon_rings_free)
5692 		mon_ops->mon_rings_free(pdev);
5693 fail2:
5694 	if (mon_ops->mon_pdev_free)
5695 		mon_ops->mon_pdev_free(pdev);
5696 fail1:
5697 	pdev->monitor_pdev = NULL;
5698 	dp_context_free_mem(soc, DP_MON_PDEV_TYPE, mon_pdev);
5699 fail0:
5700 	return QDF_STATUS_E_NOMEM;
5701 }
5702 
5703 QDF_STATUS dp_mon_pdev_detach(struct dp_pdev *pdev)
5704 {
5705 	struct dp_mon_pdev *mon_pdev;
5706 	struct dp_mon_ops *mon_ops = NULL;
5707 
5708 	if (!pdev) {
5709 		dp_mon_err("pdev is NULL");
5710 		return QDF_STATUS_E_FAILURE;
5711 	}
5712 
5713 	mon_pdev = pdev->monitor_pdev;
5714 	if (!mon_pdev) {
5715 		dp_mon_err("Monitor pdev is NULL");
5716 		return QDF_STATUS_E_FAILURE;
5717 	}
5718 
5719 	mon_ops = dp_mon_ops_get(pdev->soc);
5720 	if (!mon_ops) {
5721 		dp_mon_err("Monitor ops is NULL");
5722 		return QDF_STATUS_E_FAILURE;
5723 	}
5724 
5725 	if (mon_ops->mon_rx_ppdu_info_cache_destroy)
5726 		mon_ops->mon_rx_ppdu_info_cache_destroy(pdev);
5727 	if (mon_ops->rx_mon_desc_pool_free)
5728 		mon_ops->rx_mon_desc_pool_free(pdev);
5729 	if (mon_ops->mon_rings_free)
5730 		mon_ops->mon_rings_free(pdev);
5731 	if (mon_ops->mon_pdev_free)
5732 		mon_ops->mon_pdev_free(pdev);
5733 
5734 	dp_context_free_mem(pdev->soc, DP_MON_PDEV_TYPE, mon_pdev);
5735 	pdev->monitor_pdev = NULL;
5736 	return QDF_STATUS_SUCCESS;
5737 }
5738 
5739 QDF_STATUS dp_mon_pdev_init(struct dp_pdev *pdev)
5740 {
5741 	struct dp_mon_pdev *mon_pdev;
5742 	struct dp_mon_ops *mon_ops = NULL;
5743 
5744 	if (!pdev) {
5745 		dp_mon_err("pdev is NULL");
5746 		return QDF_STATUS_E_FAILURE;
5747 	}
5748 
5749 	mon_pdev = pdev->monitor_pdev;
5750 
5751 	mon_pdev->invalid_mon_peer = qdf_mem_malloc(sizeof(struct dp_mon_peer));
5752 	if (!mon_pdev->invalid_mon_peer) {
5753 		dp_mon_err("%pK: Memory allocation failed for invalid "
5754 			   "monitor peer", pdev);
5755 		return QDF_STATUS_E_NOMEM;
5756 	}
5757 
5758 	mon_ops = dp_mon_ops_get(pdev->soc);
5759 	if (!mon_ops) {
5760 		dp_mon_err("Monitor ops is NULL");
5761 		goto fail0;
5762 	}
5763 
5764 	mon_pdev->filter = dp_mon_filter_alloc(mon_pdev);
5765 	if (!mon_pdev->filter) {
5766 		dp_mon_err("%pK: Memory allocation failed for monitor filter",
5767 			   pdev);
5768 		goto fail0;
5769 	}
5770 
5771 	if (mon_ops->tx_mon_filter_alloc) {
5772 		if (mon_ops->tx_mon_filter_alloc(pdev)) {
5773 			dp_mon_err("%pK: Memory allocation failed for tx monitor "
5774 				   "filter", pdev);
5775 			goto fail1;
5776 		}
5777 	}
5778 
5779 	qdf_spinlock_create(&mon_pdev->ppdu_stats_lock);
5780 	qdf_spinlock_create(&mon_pdev->neighbour_peer_mutex);
5781 	mon_pdev->monitor_configured = false;
5782 	mon_pdev->mon_chan_band = REG_BAND_UNKNOWN;
5783 
5784 	TAILQ_INIT(&mon_pdev->neighbour_peers_list);
5785 	mon_pdev->neighbour_peers_added = false;
5786 	mon_pdev->monitor_configured = false;
5787 	/* Monitor filter init */
5788 	mon_pdev->mon_filter_mode = MON_FILTER_ALL;
5789 	mon_pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
5790 	mon_pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
5791 	mon_pdev->fp_data_filter = FILTER_DATA_ALL;
5792 	mon_pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
5793 	mon_pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
5794 	mon_pdev->mo_data_filter = FILTER_DATA_ALL;
5795 
5796 	/*
5797 	 * initialize ppdu tlv list
5798 	 */
5799 	TAILQ_INIT(&mon_pdev->ppdu_info_list);
5800 	TAILQ_INIT(&mon_pdev->sched_comp_ppdu_list);
5801 
5802 	mon_pdev->list_depth = 0;
5803 	mon_pdev->tlv_count = 0;
5804 	/* initlialize cal client timer */
5805 	dp_cal_client_attach(&mon_pdev->cal_client_ctx,
5806 			     dp_pdev_to_cdp_pdev(pdev),
5807 			     pdev->soc->osdev,
5808 			     &dp_iterate_update_peer_list);
5809 	if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS)
5810 		goto fail2;
5811 
5812 	if (mon_ops->mon_lite_mon_alloc) {
5813 		if (mon_ops->mon_lite_mon_alloc(pdev) != QDF_STATUS_SUCCESS) {
5814 			dp_mon_err("%pK: lite mon alloc failed", pdev);
5815 			goto fail3;
5816 		}
5817 	}
5818 
5819 	if (mon_ops->mon_rings_init) {
5820 		if (mon_ops->mon_rings_init(pdev)) {
5821 			dp_mon_err("%pK: MONITOR rings setup failed", pdev);
5822 			goto fail4;
5823 		}
5824 	}
5825 
5826 	/* initialize sw monitor rx descriptors */
5827 	if (mon_ops->rx_mon_desc_pool_init)
5828 		mon_ops->rx_mon_desc_pool_init(pdev);
5829 
5830 	/* allocate buffers and replenish the monitor RxDMA ring */
5831 	if (mon_ops->rx_mon_buffers_alloc) {
5832 		if (mon_ops->rx_mon_buffers_alloc(pdev)) {
5833 			dp_mon_err("%pK: rx mon buffers alloc failed", pdev);
5834 			goto fail5;
5835 		}
5836 	}
5837 
5838 	/* attach monitor function */
5839 	dp_monitor_tx_ppdu_stats_attach(pdev);
5840 
5841 	/* mon pdev extended init */
5842 	if (mon_ops->mon_pdev_ext_init)
5843 		mon_ops->mon_pdev_ext_init(pdev);
5844 
5845 	mon_pdev->is_dp_mon_pdev_initialized = true;
5846 
5847 	return QDF_STATUS_SUCCESS;
5848 
5849 fail5:
5850 	if (mon_ops->rx_mon_desc_pool_deinit)
5851 		mon_ops->rx_mon_desc_pool_deinit(pdev);
5852 
5853 	if (mon_ops->mon_rings_deinit)
5854 		mon_ops->mon_rings_deinit(pdev);
5855 fail4:
5856 	if (mon_ops->mon_lite_mon_dealloc)
5857 		mon_ops->mon_lite_mon_dealloc(pdev);
5858 fail3:
5859 	dp_htt_ppdu_stats_detach(pdev);
5860 fail2:
5861 	qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex);
5862 	qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock);
5863 	if (mon_ops->tx_mon_filter_dealloc)
5864 		mon_ops->tx_mon_filter_dealloc(pdev);
5865 fail1:
5866 	dp_mon_filter_dealloc(mon_pdev);
5867 fail0:
5868 	qdf_mem_free(mon_pdev->invalid_mon_peer);
5869 	return QDF_STATUS_E_FAILURE;
5870 }
5871 
5872 QDF_STATUS dp_mon_pdev_deinit(struct dp_pdev *pdev)
5873 {
5874 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5875 	struct dp_mon_ops *mon_ops = NULL;
5876 
5877 	mon_ops = dp_mon_ops_get(pdev->soc);
5878 	if (!mon_ops) {
5879 		dp_mon_err("Monitor ops is NULL");
5880 		return QDF_STATUS_E_FAILURE;
5881 	}
5882 
5883 	if (!mon_pdev->is_dp_mon_pdev_initialized)
5884 		return QDF_STATUS_SUCCESS;
5885 
5886 	dp_mon_filters_reset(pdev);
5887 
5888 	/* mon pdev extended deinit */
5889 	if (mon_ops->mon_pdev_ext_deinit)
5890 		mon_ops->mon_pdev_ext_deinit(pdev);
5891 
5892 	/* detach monitor function */
5893 	dp_monitor_tx_ppdu_stats_detach(pdev);
5894 
5895 	if (mon_ops->rx_mon_buffers_free)
5896 		mon_ops->rx_mon_buffers_free(pdev);
5897 	if (mon_ops->rx_mon_desc_pool_deinit)
5898 		mon_ops->rx_mon_desc_pool_deinit(pdev);
5899 	if (mon_ops->mon_rings_deinit)
5900 		mon_ops->mon_rings_deinit(pdev);
5901 	dp_cal_client_detach(&mon_pdev->cal_client_ctx);
5902 	if (mon_ops->mon_lite_mon_dealloc)
5903 		mon_ops->mon_lite_mon_dealloc(pdev);
5904 	dp_htt_ppdu_stats_detach(pdev);
5905 	qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock);
5906 	dp_neighbour_peers_detach(pdev);
5907 	dp_pktlogmod_exit(pdev);
5908 	if (mon_ops->tx_mon_filter_dealloc)
5909 		mon_ops->tx_mon_filter_dealloc(pdev);
5910 	if (mon_pdev->filter)
5911 		dp_mon_filter_dealloc(mon_pdev);
5912 	if (mon_ops->mon_rings_deinit)
5913 		mon_ops->mon_rings_deinit(pdev);
5914 	if (mon_pdev->invalid_mon_peer)
5915 		qdf_mem_free(mon_pdev->invalid_mon_peer);
5916 	mon_pdev->is_dp_mon_pdev_initialized = false;
5917 
5918 	return QDF_STATUS_SUCCESS;
5919 }
5920 
5921 QDF_STATUS dp_mon_vdev_attach(struct dp_vdev *vdev)
5922 {
5923 	struct dp_mon_vdev *mon_vdev;
5924 	struct dp_pdev *pdev = vdev->pdev;
5925 
5926 	mon_vdev = (struct dp_mon_vdev *)qdf_mem_malloc(sizeof(*mon_vdev));
5927 	if (!mon_vdev) {
5928 		dp_mon_err("%pK: Monitor vdev allocation failed", vdev);
5929 		return QDF_STATUS_E_NOMEM;
5930 	}
5931 
5932 	if (pdev && pdev->monitor_pdev &&
5933 	    pdev->monitor_pdev->scan_spcl_vap_configured)
5934 		dp_scan_spcl_vap_stats_attach(mon_vdev);
5935 
5936 	vdev->monitor_vdev = mon_vdev;
5937 
5938 	return QDF_STATUS_SUCCESS;
5939 }
5940 
5941 QDF_STATUS dp_mon_vdev_detach(struct dp_vdev *vdev)
5942 {
5943 	struct dp_mon_vdev *mon_vdev = vdev->monitor_vdev;
5944 	struct dp_pdev *pdev = vdev->pdev;
5945 	struct dp_mon_ops *mon_ops = dp_mon_ops_get(pdev->soc);
5946 
5947 	if (!mon_ops)
5948 		return QDF_STATUS_E_FAILURE;
5949 
5950 	if (!mon_vdev)
5951 		return QDF_STATUS_E_FAILURE;
5952 
5953 	if (pdev->monitor_pdev->scan_spcl_vap_configured)
5954 		dp_scan_spcl_vap_stats_detach(mon_vdev);
5955 
5956 	qdf_mem_free(mon_vdev);
5957 	vdev->monitor_vdev = NULL;
5958 	/* set mvdev to NULL only if detach is called for monitor/special vap
5959 	 */
5960 	if (pdev->monitor_pdev->mvdev == vdev)
5961 		pdev->monitor_pdev->mvdev = NULL;
5962 
5963 	if (mon_ops->mon_lite_mon_vdev_delete)
5964 		mon_ops->mon_lite_mon_vdev_delete(pdev, vdev);
5965 
5966 	return QDF_STATUS_SUCCESS;
5967 }
5968 
5969 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
5970 /**
5971  * dp_mon_peer_attach_notify() - Raise WDI event for peer create
5972  * @peer: DP Peer handle
5973  *
5974  * Return: none
5975  */
5976 static inline
5977 void dp_mon_peer_attach_notify(struct dp_peer *peer)
5978 {
5979 	struct dp_mon_peer *mon_peer = peer->monitor_peer;
5980 	struct dp_pdev *pdev;
5981 	struct dp_soc *soc;
5982 	struct cdp_peer_cookie peer_cookie;
5983 
5984 	pdev = peer->vdev->pdev;
5985 	soc = pdev->soc;
5986 
5987 	qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
5988 		     QDF_MAC_ADDR_SIZE);
5989 
5990 	peer_cookie.ctx = NULL;
5991 	peer_cookie.pdev_id = pdev->pdev_id;
5992 	peer_cookie.cookie = pdev->next_peer_cookie++;
5993 
5994 	dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, soc,
5995 			     (void *)&peer_cookie,
5996 			     peer->peer_id, WDI_NO_VAL, pdev->pdev_id);
5997 
5998 	if (soc->peerstats_enabled) {
5999 		if (!peer_cookie.ctx) {
6000 			pdev->next_peer_cookie--;
6001 			qdf_err("Failed to initialize peer rate stats");
6002 			mon_peer->peerstats_ctx = NULL;
6003 		} else {
6004 			mon_peer->peerstats_ctx =
6005 				(struct cdp_peer_rate_stats_ctx *)
6006 				 peer_cookie.ctx;
6007 		}
6008 	}
6009 }
6010 
6011 /**
6012  * dp_mon_peer_detach_notify() - Raise WDI event for peer destroy
6013  * @peer: DP Peer handle
6014  *
6015  * Return: none
6016  */
6017 static inline
6018 void dp_mon_peer_detach_notify(struct dp_peer *peer)
6019 {
6020 	struct dp_mon_peer *mon_peer = peer->monitor_peer;
6021 	struct dp_pdev *pdev;
6022 	struct dp_soc *soc;
6023 	struct cdp_peer_cookie peer_cookie;
6024 
6025 	pdev = peer->vdev->pdev;
6026 	soc = pdev->soc;
6027 	/* send peer destroy event to upper layer */
6028 	qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
6029 		     QDF_MAC_ADDR_SIZE);
6030 	peer_cookie.ctx = NULL;
6031 	peer_cookie.ctx = (struct cdp_stats_cookie *)mon_peer->peerstats_ctx;
6032 
6033 	dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
6034 			     soc,
6035 			     (void *)&peer_cookie,
6036 			     peer->peer_id,
6037 			     WDI_NO_VAL,
6038 			     pdev->pdev_id);
6039 
6040 	mon_peer->peerstats_ctx = NULL;
6041 }
6042 #else
6043 static inline
6044 void dp_mon_peer_attach_notify(struct dp_peer *peer)
6045 {
6046 	peer->monitor_peer->peerstats_ctx = NULL;
6047 }
6048 
6049 static inline
6050 void dp_mon_peer_detach_notify(struct dp_peer *peer)
6051 {
6052 	peer->monitor_peer->peerstats_ctx = NULL;
6053 }
6054 #endif
6055 
6056 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
6057 QDF_STATUS dp_mon_peer_attach(struct dp_peer *peer)
6058 {
6059 	struct dp_mon_peer *mon_peer;
6060 	struct dp_pdev *pdev;
6061 
6062 	mon_peer = (struct dp_mon_peer *)qdf_mem_malloc(sizeof(*mon_peer));
6063 	if (!mon_peer) {
6064 		dp_mon_err("%pK: MONITOR peer allocation failed", peer);
6065 		return QDF_STATUS_E_NOMEM;
6066 	}
6067 
6068 	peer->monitor_peer = mon_peer;
6069 	pdev = peer->vdev->pdev;
6070 	/*
6071 	 * In tx_monitor mode, filter may be set for unassociated peer
6072 	 * when unassociated peer get associated peer need to
6073 	 * update tx_cap_enabled flag to support peer filter.
6074 	 */
6075 	dp_monitor_peer_tx_capture_filter_check(pdev, peer);
6076 
6077 	DP_STATS_INIT(mon_peer);
6078 	DP_STATS_UPD(mon_peer, rx.avg_snr, CDP_INVALID_SNR);
6079 
6080 	dp_mon_peer_attach_notify(peer);
6081 
6082 	return QDF_STATUS_SUCCESS;
6083 }
6084 #endif
6085 
6086 QDF_STATUS dp_mon_peer_detach(struct dp_peer *peer)
6087 {
6088 	struct dp_mon_peer *mon_peer = peer->monitor_peer;
6089 
6090 	if (!mon_peer)
6091 		return QDF_STATUS_SUCCESS;
6092 
6093 	dp_mon_peer_detach_notify(peer);
6094 
6095 	qdf_mem_free(mon_peer);
6096 	peer->monitor_peer = NULL;
6097 
6098 	return QDF_STATUS_SUCCESS;
6099 }
6100 
6101 #ifndef DISABLE_MON_CONFIG
6102 void dp_mon_register_intr_ops(struct dp_soc *soc)
6103 {
6104 	struct dp_mon_ops *mon_ops = NULL;
6105 
6106 	mon_ops = dp_mon_ops_get(soc);
6107 	if (!mon_ops) {
6108 		dp_mon_err("Monitor ops is NULL");
6109 		return;
6110 	}
6111 	if (mon_ops->mon_register_intr_ops)
6112 		mon_ops->mon_register_intr_ops(soc);
6113 }
6114 #endif
6115 
6116 struct cdp_peer_rate_stats_ctx *dp_mon_peer_get_peerstats_ctx(struct
6117 							      dp_peer *peer)
6118 {
6119 	struct dp_mon_peer *mon_peer = peer->monitor_peer;
6120 
6121 	if (mon_peer)
6122 		return mon_peer->peerstats_ctx;
6123 	else
6124 		return NULL;
6125 }
6126 
6127 #ifdef QCA_ENHANCED_STATS_SUPPORT
6128 void dp_mon_peer_reset_stats(struct dp_peer *peer)
6129 {
6130 	struct dp_mon_peer *mon_peer = NULL;
6131 
6132 	mon_peer = peer->monitor_peer;
6133 	if (!mon_peer)
6134 		return;
6135 
6136 	DP_STATS_CLR(mon_peer);
6137 	DP_STATS_UPD(mon_peer, rx.avg_snr, CDP_INVALID_SNR);
6138 }
6139 
6140 void dp_mon_peer_get_stats(struct dp_peer *peer, void *arg,
6141 			   enum cdp_stat_update_type type)
6142 {
6143 	struct dp_mon_peer *mon_peer = peer->monitor_peer;
6144 	struct dp_mon_peer_stats *mon_peer_stats;
6145 
6146 	if (!mon_peer || !arg)
6147 		return;
6148 
6149 	mon_peer_stats = &mon_peer->stats;
6150 
6151 	switch (type) {
6152 	case UPDATE_PEER_STATS:
6153 	{
6154 		struct cdp_peer_stats *peer_stats =
6155 						(struct cdp_peer_stats *)arg;
6156 		DP_UPDATE_MON_STATS(peer_stats, mon_peer_stats);
6157 		break;
6158 	}
6159 	case UPDATE_VDEV_STATS:
6160 	{
6161 		struct cdp_vdev_stats *vdev_stats =
6162 						(struct cdp_vdev_stats *)arg;
6163 		DP_UPDATE_MON_STATS(vdev_stats, mon_peer_stats);
6164 		break;
6165 	}
6166 	default:
6167 		dp_mon_err("Invalid stats_update_type");
6168 	}
6169 }
6170 
6171 void dp_mon_invalid_peer_update_pdev_stats(struct dp_pdev *pdev)
6172 {
6173 	struct dp_mon_peer *mon_peer;
6174 	struct dp_mon_peer_stats *mon_peer_stats;
6175 	struct cdp_pdev_stats *pdev_stats;
6176 
6177 	if (!pdev || !pdev->monitor_pdev)
6178 		return;
6179 
6180 	mon_peer = pdev->monitor_pdev->invalid_mon_peer;
6181 	if (!mon_peer)
6182 		return;
6183 
6184 	mon_peer_stats = &mon_peer->stats;
6185 	pdev_stats = &pdev->stats;
6186 	DP_UPDATE_MON_STATS(pdev_stats, mon_peer_stats);
6187 }
6188 
6189 QDF_STATUS
6190 dp_mon_peer_get_stats_param(struct dp_peer *peer, enum cdp_peer_stats_type type,
6191 			    cdp_peer_stats_param_t *buf)
6192 {
6193 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
6194 	struct dp_mon_peer *mon_peer;
6195 
6196 	mon_peer = peer->monitor_peer;
6197 	if (!mon_peer)
6198 		return QDF_STATUS_E_FAILURE;
6199 
6200 	switch (type) {
6201 	case cdp_peer_tx_rate:
6202 		buf->tx_rate = mon_peer->stats.tx.tx_rate;
6203 		break;
6204 	case cdp_peer_tx_last_tx_rate:
6205 		buf->last_tx_rate = mon_peer->stats.tx.last_tx_rate;
6206 		break;
6207 	case cdp_peer_tx_ratecode:
6208 		buf->tx_ratecode = mon_peer->stats.tx.tx_ratecode;
6209 		break;
6210 	case cdp_peer_rx_rate:
6211 		buf->rx_rate = mon_peer->stats.rx.rx_rate;
6212 		break;
6213 	case cdp_peer_rx_last_rx_rate:
6214 		buf->last_rx_rate = mon_peer->stats.rx.last_rx_rate;
6215 		break;
6216 	case cdp_peer_rx_ratecode:
6217 		buf->rx_ratecode = mon_peer->stats.rx.rx_ratecode;
6218 		break;
6219 	case cdp_peer_rx_avg_snr:
6220 		buf->rx_avg_snr = mon_peer->stats.rx.avg_snr;
6221 		break;
6222 	case cdp_peer_rx_snr:
6223 		buf->rx_snr = mon_peer->stats.rx.snr;
6224 		break;
6225 	default:
6226 		dp_err("Invalid stats type requested");
6227 		ret = QDF_STATUS_E_FAILURE;
6228 	}
6229 
6230 	return ret;
6231 }
6232 #endif
6233 
6234 void dp_mon_ops_register(struct dp_soc *soc)
6235 {
6236 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
6237 	uint32_t target_type;
6238 
6239 	target_type = hal_get_target_type(soc->hal_soc);
6240 	switch (target_type) {
6241 	case TARGET_TYPE_QCA6290:
6242 	case TARGET_TYPE_QCA6390:
6243 	case TARGET_TYPE_QCA6490:
6244 	case TARGET_TYPE_QCA6750:
6245 	case TARGET_TYPE_KIWI:
6246 	case TARGET_TYPE_MANGO:
6247 	case TARGET_TYPE_PEACH:
6248 	case TARGET_TYPE_QCA8074:
6249 	case TARGET_TYPE_QCA8074V2:
6250 	case TARGET_TYPE_QCA6018:
6251 	case TARGET_TYPE_QCA9574:
6252 	case TARGET_TYPE_QCN9160:
6253 	case TARGET_TYPE_QCN9000:
6254 	case TARGET_TYPE_QCA5018:
6255 	case TARGET_TYPE_QCN6122:
6256 		dp_mon_ops_register_1_0(mon_soc);
6257 		break;
6258 	case TARGET_TYPE_QCN9224:
6259 	case TARGET_TYPE_QCA5332:
6260 #ifdef QCA_MONITOR_2_0_SUPPORT
6261 		dp_mon_ops_register_2_0(mon_soc);
6262 #endif
6263 		break;
6264 	default:
6265 		dp_mon_err("%s: Unknown tgt type %d", __func__, target_type);
6266 		qdf_assert_always(0);
6267 		break;
6268 	}
6269 }
6270 
6271 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT
6272 void dp_mon_ops_free(struct dp_soc *soc)
6273 {
6274 	struct cdp_ops *ops = soc->cdp_soc.ops;
6275 	struct cdp_mon_ops *cdp_mon_ops = ops->mon_ops;
6276 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
6277 	struct dp_mon_ops *mon_ops = mon_soc->mon_ops;
6278 
6279 	if (cdp_mon_ops)
6280 		qdf_mem_free(cdp_mon_ops);
6281 
6282 	if (mon_ops)
6283 		qdf_mem_free(mon_ops);
6284 }
6285 #else
6286 void dp_mon_ops_free(struct dp_soc *soc)
6287 {
6288 }
6289 #endif
6290 
6291 void dp_mon_cdp_ops_register(struct dp_soc *soc)
6292 {
6293 	struct cdp_ops *ops = soc->cdp_soc.ops;
6294 	uint32_t target_type;
6295 
6296 	if (!ops) {
6297 		dp_mon_err("cdp_ops is NULL");
6298 		return;
6299 	}
6300 
6301 	target_type = hal_get_target_type(soc->hal_soc);
6302 	switch (target_type) {
6303 	case TARGET_TYPE_QCA6290:
6304 	case TARGET_TYPE_QCA6390:
6305 	case TARGET_TYPE_QCA6490:
6306 	case TARGET_TYPE_QCA6750:
6307 	case TARGET_TYPE_KIWI:
6308 	case TARGET_TYPE_MANGO:
6309 	case TARGET_TYPE_PEACH:
6310 	case TARGET_TYPE_QCA8074:
6311 	case TARGET_TYPE_QCA8074V2:
6312 	case TARGET_TYPE_QCA6018:
6313 	case TARGET_TYPE_QCA9574:
6314 	case TARGET_TYPE_QCN9160:
6315 	case TARGET_TYPE_QCN9000:
6316 	case TARGET_TYPE_QCA5018:
6317 	case TARGET_TYPE_QCN6122:
6318 		dp_mon_cdp_ops_register_1_0(ops);
6319 #ifdef ATH_SUPPORT_NAC_RSSI
6320 		ops->ctrl_ops->txrx_vdev_config_for_nac_rssi =
6321 					dp_config_for_nac_rssi;
6322 		ops->ctrl_ops->txrx_vdev_get_neighbour_rssi =
6323 					dp_vdev_get_neighbour_rssi;
6324 #endif
6325 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
6326 		ops->ctrl_ops->txrx_update_filter_neighbour_peers =
6327 					dp_update_filter_neighbour_peers;
6328 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
6329 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
6330 		dp_cfr_filter_register_1_0(ops);
6331 #endif
6332 		if (target_type == TARGET_TYPE_QCN9000)
6333 			ops->ctrl_ops->txrx_update_mon_mac_filter =
6334 					dp_update_mon_mac_filter;
6335 		break;
6336 	case TARGET_TYPE_QCN9224:
6337 	case TARGET_TYPE_QCA5332:
6338 #ifdef QCA_MONITOR_2_0_SUPPORT
6339 		dp_mon_cdp_ops_register_2_0(ops);
6340 #ifdef ATH_SUPPORT_NAC_RSSI
6341 		ops->ctrl_ops->txrx_vdev_config_for_nac_rssi =
6342 				dp_lite_mon_config_nac_rssi_peer;
6343 		ops->ctrl_ops->txrx_vdev_get_neighbour_rssi =
6344 				dp_lite_mon_get_nac_peer_rssi;
6345 #endif
6346 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
6347 		ops->ctrl_ops->txrx_update_filter_neighbour_peers =
6348 					dp_lite_mon_config_nac_peer;
6349 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
6350 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
6351 		dp_cfr_filter_register_2_0(ops);
6352 #endif
6353 #endif /* QCA_MONITOR_2_0_SUPPORT */
6354 		break;
6355 	default:
6356 		dp_mon_err("%s: Unknown tgt type %d", __func__, target_type);
6357 		qdf_assert_always(0);
6358 		break;
6359 	}
6360 
6361 	ops->cmn_drv_ops->txrx_set_monitor_mode = dp_vdev_set_monitor_mode;
6362 	ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev =
6363 				dp_get_mon_vdev_from_pdev_wifi3;
6364 #ifdef DP_PEER_EXTENDED_API
6365 	ops->misc_ops->pkt_log_init = dp_pkt_log_init;
6366 	ops->misc_ops->pkt_log_con_service = dp_pkt_log_con_service;
6367 	ops->misc_ops->pkt_log_exit = dp_pkt_log_exit;
6368 #endif
6369 	ops->ctrl_ops->enable_peer_based_pktlog =
6370 				dp_enable_peer_based_pktlog;
6371 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
6372 	ops->ctrl_ops->txrx_update_peer_pkt_capture_params =
6373 				 dp_peer_update_pkt_capture_params;
6374 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
6375 #ifdef QCA_ENHANCED_STATS_SUPPORT
6376 	ops->host_stats_ops->txrx_enable_enhanced_stats =
6377 					dp_enable_enhanced_stats;
6378 	ops->host_stats_ops->txrx_disable_enhanced_stats =
6379 					dp_disable_enhanced_stats;
6380 #endif /* QCA_ENHANCED_STATS_SUPPORT */
6381 #ifdef WDI_EVENT_ENABLE
6382 	ops->ctrl_ops->txrx_get_pldev = dp_get_pldev;
6383 #endif
6384 #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
6385 	ops->host_stats_ops->txrx_get_scan_spcl_vap_stats =
6386 					dp_get_scan_spcl_vap_stats;
6387 #endif
6388 	return;
6389 }
6390 
6391 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT
6392 static inline void
6393 dp_mon_cdp_mon_ops_deregister(struct cdp_ops *ops)
6394 {
6395 	if (ops->mon_ops) {
6396 		qdf_mem_free(ops->mon_ops);
6397 		ops->mon_ops = NULL;
6398 	}
6399 }
6400 #else
6401 static inline void
6402 dp_mon_cdp_mon_ops_deregister(struct cdp_ops *ops)
6403 {
6404 	ops->mon_ops = NULL;
6405 }
6406 #endif
6407 
6408 void dp_mon_cdp_ops_deregister(struct dp_soc *soc)
6409 {
6410 	struct cdp_ops *ops = soc->cdp_soc.ops;
6411 
6412 	if (!ops) {
6413 		dp_mon_err("cdp_ops is NULL");
6414 		return;
6415 	}
6416 
6417 	dp_mon_cdp_mon_ops_deregister(ops);
6418 
6419 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
6420 	ops->cfr_ops->txrx_cfr_filter = NULL;
6421 #endif
6422 	ops->cmn_drv_ops->txrx_set_monitor_mode = NULL;
6423 	ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev = NULL;
6424 #ifdef DP_PEER_EXTENDED_API
6425 	ops->misc_ops->pkt_log_init = NULL;
6426 	ops->misc_ops->pkt_log_con_service = NULL;
6427 	ops->misc_ops->pkt_log_exit = NULL;
6428 #endif
6429 #ifdef ATH_SUPPORT_NAC_RSSI
6430 	ops->ctrl_ops->txrx_vdev_config_for_nac_rssi = NULL;
6431 	ops->ctrl_ops->txrx_vdev_get_neighbour_rssi = NULL;
6432 #endif
6433 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
6434 	ops->ctrl_ops->txrx_update_filter_neighbour_peers = NULL;
6435 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
6436 	ops->ctrl_ops->enable_peer_based_pktlog = NULL;
6437 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
6438 	ops->ctrl_ops->txrx_update_peer_pkt_capture_params = NULL;
6439 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
6440 #ifdef FEATURE_PERPKT_INFO
6441 	ops->host_stats_ops->txrx_enable_enhanced_stats = NULL;
6442 	ops->host_stats_ops->txrx_disable_enhanced_stats = NULL;
6443 #endif /* FEATURE_PERPKT_INFO */
6444 #ifdef WDI_EVENT_ENABLE
6445 	ops->ctrl_ops->txrx_get_pldev = NULL;
6446 #endif
6447 	return;
6448 }
6449 
6450 #if defined(WDI_EVENT_ENABLE) &&\
6451 	(defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG))
6452 static inline
6453 void dp_mon_ppdu_stats_handler_deregister(struct dp_mon_soc *mon_soc)
6454 {
6455 	mon_soc->mon_ops->mon_ppdu_stats_ind_handler = NULL;
6456 }
6457 #else
6458 static inline
6459 void dp_mon_ppdu_stats_handler_deregister(struct dp_mon_soc *mon_soc)
6460 {
6461 }
6462 #endif
6463 
6464 #ifdef QCA_RSSI_DB2DBM
6465 /**
6466  * dp_mon_compute_min_nf() - calculate the min nf value in the
6467  *                      active chains 20 MHz subbands.
6468  * @conv_params: cdp_rssi_dbm_conv_param_dp structure value
6469  * @min_nf: location to store min NF value
6470  * @chain_idx: active chain index in nfHwdbm array
6471  *
6472  * computation: Need to calculate nfInDbm[][] to A_MIN(nfHwDbm[][])
6473  *              considering row index as active chains and column
6474  *              index as 20MHZ subbands per chain.
6475  * example: chain_mask = 0x07 (consider 3 active chains 0,1,2 index)
6476  *          BandWidth = 40MHZ (40MHZ includes two 20MHZ subbands so need to
6477  *                      consider 0,1 index calculate min_nf value)
6478  *
6479  * Return: QDF_STATUS_SUCCESS if value set successfully
6480  *         QDF_STATUS_E_INVAL false if error
6481  */
6482 static QDF_STATUS
6483 dp_mon_compute_min_nf(struct cdp_rssi_dbm_conv_param_dp *conv_params,
6484 		      int8_t *min_nf, int chain_idx)
6485 {
6486 	int j;
6487 	*min_nf = conv_params->nf_hw_dbm[chain_idx][0];
6488 
6489 	switch (conv_params->curr_bw) {
6490 	case CHAN_WIDTH_20:
6491 	case CHAN_WIDTH_5:
6492 	case CHAN_WIDTH_10:
6493 		break;
6494 	case CHAN_WIDTH_40:
6495 		for (j = 1; j < SUB40BW; j++) {
6496 			if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf)
6497 				*min_nf = conv_params->nf_hw_dbm[chain_idx][j];
6498 		}
6499 		break;
6500 	case CHAN_WIDTH_80:
6501 		for (j = 1; j < SUB80BW; j++) {
6502 			if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf)
6503 				*min_nf = conv_params->nf_hw_dbm[chain_idx][j];
6504 		}
6505 		break;
6506 	case CHAN_WIDTH_160:
6507 	case CHAN_WIDTH_80P80:
6508 	case CHAN_WIDTH_165:
6509 		for (j = 1; j < SUB160BW; j++) {
6510 			if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf)
6511 				*min_nf = conv_params->nf_hw_dbm[chain_idx][j];
6512 		}
6513 		break;
6514 	case CHAN_WIDTH_160P160:
6515 	case CHAN_WIDTH_320:
6516 		for (j = 1; j < SUB320BW; j++) {
6517 			if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf)
6518 				*min_nf = conv_params->nf_hw_dbm[chain_idx][j];
6519 		}
6520 		break;
6521 	default:
6522 		dp_cdp_err("Invalid bandwidth %u", conv_params->curr_bw);
6523 		return QDF_STATUS_E_INVAL;
6524 	}
6525 	return QDF_STATUS_SUCCESS;
6526 }
6527 
6528 /**
6529  * dp_mon_pdev_params_rssi_dbm_conv() - to set rssi in dbm conversion
6530  *                                      params into monitor pdev.
6531  * @cdp_soc: dp soc handle.
6532  * @params: cdp_rssi_db2dbm_param_dp structure value.
6533  *
6534  * Return: QDF_STATUS_SUCCESS if value set successfully
6535  *         QDF_STATUS_E_INVAL false if error
6536  */
6537 QDF_STATUS
6538 dp_mon_pdev_params_rssi_dbm_conv(struct cdp_soc_t *cdp_soc,
6539 				 struct cdp_rssi_db2dbm_param_dp *params)
6540 {
6541 	struct cdp_rssi_db2dbm_param_dp *dp_rssi_params = params;
6542 	uint8_t pdev_id = params->pdev_id;
6543 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6544 	struct dp_pdev *pdev =
6545 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
6546 	struct dp_mon_pdev *mon_pdev;
6547 	struct cdp_rssi_temp_off_param_dp temp_off_param;
6548 	struct cdp_rssi_dbm_conv_param_dp conv_params;
6549 	int8_t min_nf = 0;
6550 	int i;
6551 
6552 	if (!soc->features.rssi_dbm_conv_support) {
6553 		dp_cdp_err("rssi dbm conversion support is false");
6554 		return QDF_STATUS_E_INVAL;
6555 	}
6556 	if (!pdev || !pdev->monitor_pdev) {
6557 		dp_cdp_err("Invalid pdev_id %u", pdev_id);
6558 		return QDF_STATUS_E_FAILURE;
6559 	}
6560 
6561 	mon_pdev = pdev->monitor_pdev;
6562 	mon_pdev->rssi_dbm_conv_support =
6563 				soc->features.rssi_dbm_conv_support;
6564 
6565 	if (dp_rssi_params->rssi_temp_off_present) {
6566 		temp_off_param = dp_rssi_params->temp_off_param;
6567 		mon_pdev->rssi_offsets.rssi_temp_offset =
6568 				temp_off_param.rssi_temp_offset;
6569 	}
6570 	if (dp_rssi_params->rssi_dbm_info_present) {
6571 		conv_params = dp_rssi_params->rssi_dbm_param;
6572 		for (i = 0; i < CDP_MAX_NUM_ANTENNA; i++) {
6573 			if (conv_params.curr_rx_chainmask & (0x01 << i)) {
6574 				if (QDF_STATUS_E_INVAL == dp_mon_compute_min_nf
6575 						(&conv_params, &min_nf, i))
6576 					return QDF_STATUS_E_INVAL;
6577 			} else {
6578 				continue;
6579 			}
6580 		}
6581 		mon_pdev->rssi_offsets.xlna_bypass_offset =
6582 					conv_params.xlna_bypass_offset;
6583 		mon_pdev->rssi_offsets.xlna_bypass_threshold =
6584 					conv_params.xlna_bypass_threshold;
6585 		mon_pdev->rssi_offsets.xbar_config = conv_params.xbar_config;
6586 		mon_pdev->rssi_offsets.min_nf_dbm = min_nf;
6587 		mon_pdev->rssi_offsets.rssi_offset =
6588 					mon_pdev->rssi_offsets.min_nf_dbm +
6589 				     mon_pdev->rssi_offsets.rssi_temp_offset;
6590 	}
6591 	return QDF_STATUS_SUCCESS;
6592 }
6593 #endif
6594 
6595 void dp_mon_intr_ops_deregister(struct dp_soc *soc)
6596 {
6597 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
6598 
6599 	mon_soc->mon_rx_process = NULL;
6600 	dp_mon_ppdu_stats_handler_deregister(mon_soc);
6601 }
6602 
6603 void dp_mon_feature_ops_deregister(struct dp_soc *soc)
6604 {
6605 	struct dp_mon_ops *mon_ops = dp_mon_ops_get(soc);
6606 
6607 	if (!mon_ops) {
6608 		dp_err("mon_ops is NULL");
6609 		return;
6610 	}
6611 
6612 	mon_ops->mon_config_debug_sniffer = NULL;
6613 	mon_ops->mon_peer_tx_init = NULL;
6614 	mon_ops->mon_peer_tx_cleanup = NULL;
6615 	mon_ops->mon_htt_ppdu_stats_attach = NULL;
6616 	mon_ops->mon_htt_ppdu_stats_detach = NULL;
6617 	mon_ops->mon_print_pdev_rx_mon_stats = NULL;
6618 	mon_ops->mon_set_bsscolor = NULL;
6619 	mon_ops->mon_pdev_get_filter_ucast_data = NULL;
6620 	mon_ops->mon_pdev_get_filter_mcast_data = NULL;
6621 	mon_ops->mon_pdev_get_filter_non_data = NULL;
6622 	mon_ops->mon_neighbour_peer_add_ast = NULL;
6623 #ifdef WLAN_TX_PKT_CAPTURE_ENH
6624 	mon_ops->mon_peer_tid_peer_id_update = NULL;
6625 	mon_ops->mon_tx_ppdu_stats_attach = NULL;
6626 	mon_ops->mon_tx_ppdu_stats_detach = NULL;
6627 	mon_ops->mon_tx_capture_debugfs_init = NULL;
6628 	mon_ops->mon_tx_add_to_comp_queue = NULL;
6629 	mon_ops->mon_peer_tx_capture_filter_check = NULL;
6630 	mon_ops->mon_print_pdev_tx_capture_stats = NULL;
6631 	mon_ops->mon_config_enh_tx_capture = NULL;
6632 #endif
6633 #ifdef WLAN_RX_PKT_CAPTURE_ENH
6634 	mon_ops->mon_config_enh_rx_capture = NULL;
6635 #endif
6636 #ifdef QCA_SUPPORT_BPR
6637 	mon_ops->mon_set_bpr_enable = NULL;
6638 #endif
6639 #ifdef ATH_SUPPORT_NAC
6640 	mon_ops->mon_set_filter_neigh_peers = NULL;
6641 #endif
6642 #ifdef WLAN_ATF_ENABLE
6643 	mon_ops->mon_set_atf_stats_enable = NULL;
6644 #endif
6645 #ifdef FEATURE_NAC_RSSI
6646 	mon_ops->mon_filter_neighbour_peer = NULL;
6647 #endif
6648 #ifdef QCA_MCOPY_SUPPORT
6649 	mon_ops->mon_filter_setup_mcopy_mode = NULL;
6650 	mon_ops->mon_filter_reset_mcopy_mode = NULL;
6651 	mon_ops->mon_mcopy_check_deliver = NULL;
6652 #endif
6653 #ifdef QCA_ENHANCED_STATS_SUPPORT
6654 	mon_ops->mon_filter_setup_enhanced_stats = NULL;
6655 	mon_ops->mon_tx_enable_enhanced_stats = NULL;
6656 	mon_ops->mon_tx_disable_enhanced_stats = NULL;
6657 	mon_ops->mon_ppdu_desc_deliver = NULL;
6658 	mon_ops->mon_ppdu_desc_notify = NULL;
6659 	mon_ops->mon_ppdu_stats_feat_enable_check = NULL;
6660 #ifdef WLAN_FEATURE_11BE
6661 	mon_ops->mon_tx_stats_update = NULL;
6662 #endif
6663 #endif
6664 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
6665 	mon_ops->mon_filter_setup_smart_monitor = NULL;
6666 #endif
6667 	mon_ops->mon_filter_set_reset_mon_mac_filter = NULL;
6668 #ifdef WLAN_RX_PKT_CAPTURE_ENH
6669 	mon_ops->mon_filter_setup_rx_enh_capture = NULL;
6670 #endif
6671 #ifdef WDI_EVENT_ENABLE
6672 	mon_ops->mon_set_pktlog_wifi3 = NULL;
6673 	mon_ops->mon_filter_setup_rx_pkt_log_full = NULL;
6674 	mon_ops->mon_filter_reset_rx_pkt_log_full = NULL;
6675 	mon_ops->mon_filter_setup_rx_pkt_log_lite = NULL;
6676 	mon_ops->mon_filter_reset_rx_pkt_log_lite = NULL;
6677 	mon_ops->mon_filter_setup_rx_pkt_log_cbf = NULL;
6678 	mon_ops->mon_filter_reset_rx_pkt_log_cbf = NULL;
6679 #ifdef BE_PKTLOG_SUPPORT
6680 	mon_ops->mon_filter_setup_pktlog_hybrid = NULL;
6681 	mon_ops->mon_filter_reset_pktlog_hybrid = NULL;
6682 #endif
6683 #endif
6684 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
6685 	mon_ops->mon_pktlogmod_exit = NULL;
6686 #endif
6687 	mon_ops->rx_hdr_length_set = NULL;
6688 	mon_ops->rx_packet_length_set = NULL;
6689 	mon_ops->rx_wmask_subscribe = NULL;
6690 	mon_ops->rx_pkt_tlv_offset = NULL;
6691 	mon_ops->rx_enable_mpdu_logging = NULL;
6692 	mon_ops->rx_enable_fpmo = NULL;
6693 	mon_ops->mon_neighbour_peers_detach = NULL;
6694 	mon_ops->mon_vdev_set_monitor_mode_buf_rings = NULL;
6695 	mon_ops->mon_vdev_set_monitor_mode_rings = NULL;
6696 #ifdef QCA_ENHANCED_STATS_SUPPORT
6697 	mon_ops->mon_rx_stats_update = NULL;
6698 	mon_ops->mon_rx_populate_ppdu_usr_info = NULL;
6699 	mon_ops->mon_rx_populate_ppdu_info = NULL;
6700 #endif
6701 }
6702 
6703 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc)
6704 {
6705 	struct dp_mon_soc *mon_soc;
6706 
6707 	if (!soc) {
6708 		dp_mon_err("dp_soc is NULL");
6709 		return QDF_STATUS_E_FAILURE;
6710 	}
6711 
6712 	mon_soc = (struct dp_mon_soc *)qdf_mem_malloc(sizeof(*mon_soc));
6713 	if (!mon_soc) {
6714 		dp_mon_err("%pK: mem allocation failed", soc);
6715 		return QDF_STATUS_E_NOMEM;
6716 	}
6717 	/* register monitor ops */
6718 	soc->monitor_soc = mon_soc;
6719 	dp_mon_ops_register(soc);
6720 	dp_mon_register_intr_ops(soc);
6721 
6722 	dp_mon_cdp_ops_register(soc);
6723 	dp_mon_register_feature_ops(soc);
6724 	return QDF_STATUS_SUCCESS;
6725 }
6726 
6727 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc)
6728 {
6729 	struct dp_mon_soc *mon_soc;
6730 
6731 	if (!soc) {
6732 		dp_mon_err("dp_soc is NULL");
6733 		return QDF_STATUS_E_FAILURE;
6734 	}
6735 
6736 	mon_soc = soc->monitor_soc;
6737 	dp_monitor_vdev_timer_deinit(soc);
6738 	dp_mon_cdp_ops_deregister(soc);
6739 	soc->monitor_soc = NULL;
6740 	qdf_mem_free(mon_soc);
6741 	return QDF_STATUS_SUCCESS;
6742 }
6743