xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/dp_mon.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
3 
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7 
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 #include <dp_types.h>
17 #include "dp_rx.h"
18 #include "dp_peer.h"
19 #include <dp_htt.h>
20 #include <dp_mon_filter.h>
21 #include <dp_mon.h>
22 #include <dp_rx_mon.h>
23 #include "htt_ppdu_stats.h"
24 #include "dp_cal_client_api.h"
25 #if defined(DP_CON_MON)
26 #ifndef REMOVE_PKT_LOG
27 #include <pktlog_ac_api.h>
28 #include <pktlog_ac.h>
29 #endif
30 #endif
31 #ifdef FEATURE_PERPKT_INFO
32 #include "dp_ratetable.h"
33 #endif
34 
35 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
36 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
37 #define HTT_SHIFT_UPPER_TIMESTAMP 32
38 #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
39 #define DP_INTR_POLL_TIMER_MS	5
40 #define INVALID_FREE_BUFF 0xffffffff
41 
42 #ifdef WLAN_RX_PKT_CAPTURE_ENH
43 #include "dp_rx_mon_feature.h"
44 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
45 
46 #ifdef WLAN_TX_PKT_CAPTURE_ENH
47 #include "dp_tx_capture.h"
48 #endif
49 
50 #if defined(QCA_MONITOR_PKT_SUPPORT) || defined(QCA_MCOPY_SUPPORT)
51 static QDF_STATUS dp_vdev_set_monitor_mode_rings(struct dp_pdev *pdev,
52 						 uint8_t delayed_replenish);
53 #endif
54 
55 #ifndef WLAN_TX_PKT_CAPTURE_ENH
56 static inline void
57 dp_process_ppdu_stats_update_failed_bitmap(struct dp_pdev *pdev,
58 					   void *data,
59 					   uint32_t ppdu_id,
60 					   uint32_t size)
61 {
62 }
63 #endif
64 
65 #if !defined(DISABLE_MON_CONFIG)
66 
67 #ifdef QCA_MONITOR_PKT_SUPPORT
68 static void dp_mon_dest_rings_deinit(struct dp_pdev *pdev, int lmac_id)
69 {
70 	struct dp_soc *soc = pdev->soc;
71 
72 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
73 		dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[lmac_id],
74 			       RXDMA_MONITOR_BUF, 0);
75 		dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id],
76 			       RXDMA_MONITOR_DST, 0);
77 		dp_srng_deinit(soc, &soc->rxdma_mon_desc_ring[lmac_id],
78 			       RXDMA_MONITOR_DESC, 0);
79 	}
80 }
81 #else
82 static void dp_mon_dest_rings_deinit(struct dp_pdev *pdev, int lmac_id)
83 {
84 }
85 #endif
86 
87 /**
88  * dp_mon_rings_deinit() - Deinitialize monitor rings
89  * @pdev: DP pdev handle
90  *
91  * Return: None
92  *
93  */
94 static void dp_mon_rings_deinit(struct dp_pdev *pdev)
95 {
96 	int mac_id = 0;
97 	struct dp_soc *soc = pdev->soc;
98 	struct dp_mon_soc *mon_soc;
99 
100 	mon_soc = soc->monitor_soc;
101 
102 	if(!mon_soc) {
103 		dp_mon_err("%pK: monitor SOC not initialized",
104 			   soc);
105 		return;
106 	}
107 
108 	if (mon_soc->monitor_mode_v2)
109 		return;
110 
111 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
112 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
113 							 pdev->pdev_id);
114 
115 		dp_srng_deinit(soc, &soc->rxdma_mon_status_ring[lmac_id],
116 			       RXDMA_MONITOR_STATUS, 0);
117 
118 		dp_mon_dest_rings_deinit(pdev, lmac_id);
119 	}
120 }
121 
122 #ifdef QCA_MONITOR_PKT_SUPPORT
123 static void dp_mon_dest_rings_free(struct dp_pdev *pdev, int lmac_id)
124 {
125 	struct dp_soc *soc = pdev->soc;
126 
127 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
128 		dp_srng_free(soc, &soc->rxdma_mon_buf_ring[lmac_id]);
129 		dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]);
130 		dp_srng_free(soc, &soc->rxdma_mon_desc_ring[lmac_id]);
131 	}
132 }
133 #else
134 static void dp_mon_dest_rings_free(struct dp_pdev *pdev, int lmac_id)
135 {
136 }
137 #endif
138 
139 /**
140  * dp_mon_rings_free() - free monitor rings
141  * @pdev: Datapath pdev handle
142  *
143  * Return: None
144  *
145  */
146 static void dp_mon_rings_free(struct dp_pdev *pdev)
147 {
148 	int mac_id = 0;
149 	struct dp_soc *soc = pdev->soc;
150 	struct dp_mon_soc *mon_soc;
151 
152 	mon_soc = soc->monitor_soc;
153 
154 	if(!mon_soc) {
155 		dp_mon_err("%pK: monitor SOC not initialized",
156 			   soc);
157 		return;
158 	}
159 
160 	if (soc->monitor_soc->monitor_mode_v2)
161 		return;
162 
163 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
164 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
165 							 pdev->pdev_id);
166 
167 		dp_srng_free(soc, &soc->rxdma_mon_status_ring[lmac_id]);
168 
169 		dp_mon_dest_rings_free(pdev, lmac_id);
170 	}
171 }
172 
173 #ifdef QCA_MONITOR_PKT_SUPPORT
174 static
175 QDF_STATUS dp_mon_dest_rings_init(struct dp_pdev *pdev, int lmac_id)
176 {
177 	struct dp_soc *soc = pdev->soc;
178 
179 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
180 		if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[lmac_id],
181 				 RXDMA_MONITOR_BUF, 0, lmac_id)) {
182 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_buf_ring ",
183 				   soc);
184 			goto fail1;
185 		}
186 
187 		if (dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id],
188 				 RXDMA_MONITOR_DST, 0, lmac_id)) {
189 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc);
190 			goto fail1;
191 		}
192 
193 		if (dp_srng_init(soc, &soc->rxdma_mon_desc_ring[lmac_id],
194 				 RXDMA_MONITOR_DESC, 0, lmac_id)) {
195 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_desc_ring",
196 				   soc);
197 			goto fail1;
198 		}
199 	}
200 	return QDF_STATUS_SUCCESS;
201 
202 fail1:
203 	return QDF_STATUS_E_NOMEM;
204 }
205 #else
206 static
207 QDF_STATUS dp_mon_dest_rings_init(struct dp_pdev *pdev, int lmac_id)
208 {
209 	return QDF_STATUS_SUCCESS;
210 }
211 #endif
212 
213 /**
214  * dp_mon_rings_init() - Initialize monitor srng rings
215  * @pdev: Datapath pdev handle
216  *
217  * return: QDF_STATUS_SUCCESS on success
218  *	   QDF_STATUS_E_NOMEM on failure
219  */
220 static
221 QDF_STATUS dp_mon_rings_init(struct dp_soc *soc, struct dp_pdev *pdev)
222 {
223 	int mac_id = 0;
224 	struct dp_mon_soc *mon_soc;
225 
226 	mon_soc = soc->monitor_soc;
227 
228 	if(!mon_soc) {
229 		dp_mon_err("%pK: monitor SOC not initialized",
230 			   soc);
231 		return QDF_STATUS_SUCCESS;
232 	}
233 
234 	if (soc->monitor_soc->monitor_mode_v2)
235 		return QDF_STATUS_SUCCESS;
236 
237 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
238 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
239 							 pdev->pdev_id);
240 
241 		if (dp_srng_init(soc, &soc->rxdma_mon_status_ring[lmac_id],
242 				 RXDMA_MONITOR_STATUS, 0, lmac_id)) {
243 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_status_ring",
244 				   soc);
245 			goto fail1;
246 		}
247 
248 		if (dp_mon_dest_rings_init(pdev, lmac_id))
249 			goto fail1;
250 	}
251 	return QDF_STATUS_SUCCESS;
252 
253 fail1:
254 	dp_mon_rings_deinit(pdev);
255 	return QDF_STATUS_E_NOMEM;
256 }
257 
258 #ifdef QCA_MONITOR_PKT_SUPPORT
259 static
260 QDF_STATUS dp_mon_dest_rings_alloc(struct dp_pdev *pdev, int lmac_id)
261 {
262 	int entries;
263 	struct dp_soc *soc = pdev->soc;
264 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx = pdev->wlan_cfg_ctx;
265 
266 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
267 		entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
268 		if (dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[lmac_id],
269 				  RXDMA_MONITOR_BUF, entries, 0)) {
270 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_buf_ring ",
271 				   soc);
272 			goto fail1;
273 		}
274 		entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
275 		if (dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id],
276 				  RXDMA_MONITOR_DST, entries, 0)) {
277 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc);
278 			goto fail1;
279 		}
280 		entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
281 		if (dp_srng_alloc(soc, &soc->rxdma_mon_desc_ring[lmac_id],
282 				  RXDMA_MONITOR_DESC, entries, 0)) {
283 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_desc_ring",
284 				   soc);
285 			goto fail1;
286 		}
287 	}
288 	return QDF_STATUS_SUCCESS;
289 
290 fail1:
291 	return QDF_STATUS_E_NOMEM;
292 }
293 #else
294 static
295 QDF_STATUS dp_mon_dest_rings_alloc(struct dp_pdev *pdev, int lmac_id)
296 {
297 	return QDF_STATUS_SUCCESS;
298 }
299 #endif
300 
301 /**
302  * dp_mon_rings_alloc() - Allocate memory for monitor srng rings
303  * @soc: Datapath soc handle
304  * @pdev: Datapath pdev handle
305  *
306  * return: QDF_STATUS_SUCCESS on success
307  *	   QDF_STATUS_E_NOMEM on failure
308  */
309 static
310 QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
311 {
312 	int mac_id = 0;
313 	int entries;
314 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
315 	struct dp_mon_soc *mon_soc;
316 
317 	mon_soc = soc->monitor_soc;
318 
319 	if(!mon_soc) {
320 		dp_mon_err("%pK: monitor SOC not initialized",
321 			   soc);
322 		return QDF_STATUS_SUCCESS;
323 	}
324 
325 	if (mon_soc->monitor_mode_v2)
326 		return QDF_STATUS_SUCCESS;
327 
328 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
329 
330 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
331 		int lmac_id =
332 		dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
333 		entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
334 		if (dp_srng_alloc(soc, &soc->rxdma_mon_status_ring[lmac_id],
335 				  RXDMA_MONITOR_STATUS, entries, 0)) {
336 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_status_ring",
337 				   soc);
338 			goto fail1;
339 		}
340 
341 		if (dp_mon_dest_rings_alloc(pdev, lmac_id))
342 			goto fail1;
343 	}
344 	return QDF_STATUS_SUCCESS;
345 
346 fail1:
347 	dp_mon_rings_free(pdev);
348 	return QDF_STATUS_E_NOMEM;
349 }
350 #else
351 static void dp_mon_rings_free(struct dp_pdev *pdev)
352 {
353 }
354 
355 static void dp_mon_rings_deinit(struct dp_pdev *pdev)
356 {
357 }
358 
359 static
360 QDF_STATUS dp_mon_rings_init(struct dp_soc *soc, struct dp_pdev *pdev)
361 {
362 	return QDF_STATUS_SUCCESS;
363 }
364 
365 static
366 QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
367 {
368 	return QDF_STATUS_SUCCESS;
369 }
370 #endif
371 #ifdef QCA_SUPPORT_FULL_MON
372 static inline QDF_STATUS
373 dp_config_full_mon_mode(struct cdp_soc_t *soc_handle,
374 			uint8_t val)
375 {
376 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
377 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
378 
379 	mon_soc->full_mon_mode = val;
380 	dp_cdp_err("Configure full monitor mode val: %d ", val);
381 
382 	return QDF_STATUS_SUCCESS;
383 }
384 #else
385 static inline QDF_STATUS
386 dp_config_full_mon_mode(struct cdp_soc_t *soc_handle,
387 			uint8_t val)
388 {
389 	return 0;
390 }
391 #endif
392 
393 #ifdef QCA_SUPPORT_FULL_MON
394 static inline QDF_STATUS
395 dp_soc_config_full_mon_mode(struct dp_pdev *pdev, enum dp_full_mon_config val)
396 {
397 	struct dp_soc *soc = pdev->soc;
398 	QDF_STATUS status = QDF_STATUS_SUCCESS;
399 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
400 
401 	if (!mon_soc->full_mon_mode)
402 		return QDF_STATUS_SUCCESS;
403 
404 	if ((htt_h2t_full_mon_cfg(soc->htt_handle,
405 				  pdev->pdev_id,
406 				  val)) != QDF_STATUS_SUCCESS) {
407 		status = QDF_STATUS_E_FAILURE;
408 	}
409 
410 	return status;
411 }
412 #else
413 static inline QDF_STATUS
414 dp_soc_config_full_mon_mode(struct dp_pdev *pdev, enum dp_full_mon_config val)
415 {
416 	return 0;
417 }
418 #endif
419 
420 #ifdef QCA_MCOPY_SUPPORT
421 static inline void
422 dp_pdev_disable_mcopy_code(struct dp_pdev *pdev)
423 {
424 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
425 
426 	mon_pdev->mcopy_mode = M_COPY_DISABLED;
427 	mon_pdev->monitor_configured = false;
428 	mon_pdev->mvdev = NULL;
429 }
430 
431 static inline void
432 dp_reset_mcopy_mode(struct dp_pdev *pdev)
433 {
434 	QDF_STATUS status = QDF_STATUS_SUCCESS;
435 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
436 
437 	if (mon_pdev->mcopy_mode) {
438 		dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_DISABLE);
439 		dp_pdev_disable_mcopy_code(pdev);
440 		dp_mon_filter_reset_mcopy_mode(pdev);
441 		status = dp_mon_filter_update(pdev);
442 		if (status != QDF_STATUS_SUCCESS) {
443 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
444 				  FL("Failed to reset AM copy mode filters"));
445 		}
446 		mon_pdev->monitor_configured = false;
447 	}
448 }
449 
450 static QDF_STATUS
451 dp_config_mcopy_mode(struct dp_pdev *pdev, int val)
452 {
453 	QDF_STATUS status = QDF_STATUS_SUCCESS;
454 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
455 
456 	if (mon_pdev->mvdev)
457 		return QDF_STATUS_E_RESOURCES;
458 
459 	mon_pdev->mcopy_mode = val;
460 	mon_pdev->tx_sniffer_enable = 0;
461 	mon_pdev->monitor_configured = true;
462 
463 	if (!wlan_cfg_is_delay_mon_replenish(pdev->soc->wlan_cfg_ctx))
464 		dp_vdev_set_monitor_mode_rings(pdev, true);
465 
466 	/*
467 	 * Setup the M copy mode filter.
468 	 */
469 	dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_ENABLE);
470 	dp_mon_filter_setup_mcopy_mode(pdev);
471 	status = dp_mon_filter_update(pdev);
472 	if (status != QDF_STATUS_SUCCESS) {
473 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
474 			  FL("Failed to set M_copy mode filters"));
475 		dp_mon_filter_reset_mcopy_mode(pdev);
476 		dp_pdev_disable_mcopy_code(pdev);
477 		return status;
478 	}
479 
480 	if (!mon_pdev->pktlog_ppdu_stats)
481 		dp_h2t_cfg_stats_msg_send(pdev,
482 					  DP_PPDU_STATS_CFG_SNIFFER,
483 					  pdev->pdev_id);
484 
485 	return status;
486 }
487 #else
488 static inline void
489 dp_reset_mcopy_mode(struct dp_pdev *pdev)
490 {
491 }
492 
493 static inline QDF_STATUS
494 dp_config_mcopy_mode(struct dp_pdev *pdev, int val)
495 {
496 	return QDF_STATUS_E_INVAL;
497 }
498 #endif /* QCA_MCOPY_SUPPORT */
499 
500 /**
501  * dp_reset_monitor_mode() - Disable monitor mode
502  * @soc_hdl: Datapath soc handle
503  * @pdev_id: id of datapath PDEV handle
504  *
505  * Return: QDF_STATUS
506  */
507 QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl,
508 				 uint8_t pdev_id,
509 				 uint8_t special_monitor)
510 {
511 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
512 	struct dp_pdev *pdev =
513 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
514 						   pdev_id);
515 	QDF_STATUS status = QDF_STATUS_SUCCESS;
516 	struct dp_mon_pdev *mon_pdev;
517 
518 	if (!pdev)
519 		return QDF_STATUS_E_FAILURE;
520 
521 	mon_pdev = pdev->monitor_pdev;
522 
523 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
524 
525 	dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_DISABLE);
526 	mon_pdev->mvdev = NULL;
527 	mon_pdev->monitor_configured = false;
528 
529 	/*
530 	 * Lite monitor mode, smart monitor mode and monitor
531 	 * mode uses this APIs to filter reset and mode disable
532 	 */
533 	if (mon_pdev->mcopy_mode) {
534 #if defined(QCA_MCOPY_SUPPORT)
535 		dp_pdev_disable_mcopy_code(pdev);
536 		dp_mon_filter_reset_mcopy_mode(pdev);
537 #endif /* QCA_MCOPY_SUPPORT */
538 	} else if (special_monitor) {
539 #if defined(ATH_SUPPORT_NAC)
540 		dp_mon_filter_reset_smart_monitor(pdev);
541 #endif /* ATH_SUPPORT_NAC */
542 	} else {
543 		dp_mon_filter_reset_mon_mode(pdev);
544 	}
545 
546 	status = dp_mon_filter_update(pdev);
547 	if (status != QDF_STATUS_SUCCESS) {
548 		dp_rx_mon_dest_err("%pK: Failed to reset monitor filters",
549 				   soc);
550 	}
551 
552 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
553 	return QDF_STATUS_SUCCESS;
554 }
555 
556 /**
557  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
558  * @soc: soc handle
559  * @pdev_id: id of Datapath PDEV handle
560  * @filter_val: Flag to select Filter for monitor mode
561  * Return: 0 on success, not 0 on failure
562  */
563 #ifdef QCA_ADVANCE_MON_FILTER_SUPPORT
564 static QDF_STATUS
565 dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
566 				   struct cdp_monitor_filter *filter_val)
567 {
568 	/* Many monitor VAPs can exists in a system but only one can be up at
569 	 * anytime
570 	 */
571 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
572 	struct dp_vdev *vdev;
573 	struct dp_pdev *pdev =
574 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
575 						   pdev_id);
576 	QDF_STATUS status = QDF_STATUS_SUCCESS;
577 	struct dp_mon_pdev *mon_pdev;
578 
579 	if (!pdev || !pdev->monitor_pdev)
580 		return QDF_STATUS_E_FAILURE;
581 
582 	mon_pdev = pdev->monitor_pdev;
583 	vdev = mon_pdev->mvdev;
584 
585 	if (!vdev)
586 		return QDF_STATUS_E_FAILURE;
587 
588 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
589 		  "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
590 		  pdev, pdev_id, soc, vdev);
591 
592 	/*Check if current pdev's monitor_vdev exists */
593 	if (!mon_pdev->mvdev) {
594 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
595 			  "vdev=%pK", vdev);
596 		qdf_assert(vdev);
597 	}
598 
599 	/* update filter mode, type in pdev structure */
600 	mon_pdev->mon_filter_mode = filter_val->mode;
601 	mon_pdev->fp_mgmt_filter = filter_val->fp_mgmt;
602 	mon_pdev->fp_ctrl_filter = filter_val->fp_ctrl;
603 	mon_pdev->fp_data_filter = filter_val->fp_data;
604 	mon_pdev->mo_mgmt_filter = filter_val->mo_mgmt;
605 	mon_pdev->mo_ctrl_filter = filter_val->mo_ctrl;
606 	mon_pdev->mo_data_filter = filter_val->mo_data;
607 
608 	dp_mon_filter_setup_mon_mode(pdev);
609 	status = dp_mon_filter_update(pdev);
610 	if (status != QDF_STATUS_SUCCESS) {
611 		dp_rx_mon_dest_err("%pK: Failed to set filter for adv mon mode",
612 				   soc);
613 		dp_mon_filter_reset_mon_mode(pdev);
614 	}
615 
616 	return status;
617 }
618 #else
619 static QDF_STATUS
620 dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
621 				   struct cdp_monitor_filter *filter_val)
622 {
623 	return QDF_STATUS_E_INVAL;
624 }
625 #endif
626 
627 /**
628  * dp_deliver_tx_mgmt() - Deliver mgmt frame for tx capture
629  * @cdp_soc : data path soc handle
630  * @pdev_id : pdev_id
631  * @nbuf: Management frame buffer
632  */
633 static QDF_STATUS
634 dp_deliver_tx_mgmt(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf)
635 {
636 	struct dp_pdev *pdev =
637 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
638 						   pdev_id);
639 
640 	if (!pdev)
641 		return QDF_STATUS_E_FAILURE;
642 
643 	dp_deliver_mgmt_frm(pdev, nbuf);
644 
645 	return QDF_STATUS_SUCCESS;
646 }
647 
648 #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
649 /**
650  * dp_scan_spcl_vap_stats_attach() - alloc spcl vap stats struct
651  * @mon_vdev: Datapath mon VDEV handle
652  *
653  * Return: 0 on success, not 0 on failure
654  */
655 static inline QDF_STATUS
656 dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev)
657 {
658 	mon_vdev->scan_spcl_vap_stats =
659 		qdf_mem_malloc(sizeof(struct cdp_scan_spcl_vap_stats));
660 
661 	if (!mon_vdev->scan_spcl_vap_stats) {
662 		dp_mon_err("scan spcl vap stats attach fail");
663 		return QDF_STATUS_E_NOMEM;
664 	}
665 
666 	return QDF_STATUS_SUCCESS;
667 }
668 
669 /**
670  * dp_scan_spcl_vap_stats_detach() - free spcl vap stats struct
671  * @mon_vdev: Datapath mon VDEV handle
672  *
673  * Return: void
674  */
675 static inline void
676 dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev)
677 {
678 	if (mon_vdev->scan_spcl_vap_stats) {
679 		qdf_mem_free(mon_vdev->scan_spcl_vap_stats);
680 		mon_vdev->scan_spcl_vap_stats = NULL;
681 	}
682 }
683 
684 /**
685  * dp_reset_scan_spcl_vap_stats() - reset spcl vap rx stats
686  * @vdev: Datapath VDEV handle
687  *
688  * Return: void
689  */
690 static inline void
691 dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev)
692 {
693 	struct dp_mon_vdev *mon_vdev;
694 	struct dp_mon_pdev *mon_pdev;
695 
696 	mon_pdev = vdev->pdev->monitor_pdev;
697 	if (!mon_pdev || !mon_pdev->reset_scan_spcl_vap_stats_enable)
698 		return;
699 
700 	mon_vdev = vdev->monitor_vdev;
701 	if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats)
702 		return;
703 
704 	qdf_mem_zero(mon_vdev->scan_spcl_vap_stats,
705 		     sizeof(struct cdp_scan_spcl_vap_stats));
706 }
707 
708 /**
709  * dp_get_scan_spcl_vap_stats() - get spcl vap rx stats
710  * @soc_hdl: Datapath soc handle
711  * @vdev_id: vdev id
712  * @stats: structure to hold spcl vap stats
713  *
714  * Return: 0 on success, not 0 on failure
715  */
716 static QDF_STATUS
717 dp_get_scan_spcl_vap_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
718 			   struct cdp_scan_spcl_vap_stats *stats)
719 {
720 	struct dp_mon_vdev *mon_vdev = NULL;
721 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
722 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
723 						     DP_MOD_ID_CDP);
724 
725 	if (!vdev || !stats) {
726 		if (vdev)
727 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
728 		return QDF_STATUS_E_INVAL;
729 	}
730 
731 	mon_vdev = vdev->monitor_vdev;
732 	if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats) {
733 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
734 		return QDF_STATUS_E_INVAL;
735 	}
736 
737 	qdf_mem_copy(stats, mon_vdev->scan_spcl_vap_stats,
738 		     sizeof(struct cdp_scan_spcl_vap_stats));
739 
740 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
741 	return QDF_STATUS_SUCCESS;
742 }
743 #else
744 static inline void
745 dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev)
746 {
747 }
748 
749 static inline QDF_STATUS
750 dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev)
751 {
752 	return QDF_STATUS_SUCCESS;
753 }
754 
755 static inline void
756 dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev)
757 {
758 }
759 #endif
760 
761 /**
762  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
763  * @vdev_handle: Datapath VDEV handle
764  * @smart_monitor: Flag to denote if its smart monitor mode
765  *
766  * Return: 0 on success, not 0 on failure
767  */
768 static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *dp_soc,
769 					   uint8_t vdev_id,
770 					   uint8_t special_monitor)
771 {
772 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
773 	struct dp_pdev *pdev;
774 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
775 						     DP_MOD_ID_CDP);
776 	QDF_STATUS status = QDF_STATUS_SUCCESS;
777 	struct dp_mon_pdev *mon_pdev;
778 
779 	if (!vdev)
780 		return QDF_STATUS_E_FAILURE;
781 
782 	pdev = vdev->pdev;
783 
784 	if (!pdev || !pdev->monitor_pdev)
785 		return QDF_STATUS_E_FAILURE;
786 
787 	mon_pdev = pdev->monitor_pdev;
788 
789 	mon_pdev->mvdev = vdev;
790 
791 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
792 		  "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
793 		  pdev, pdev->pdev_id, pdev->soc, vdev);
794 
795 	/*
796 	 * do not configure monitor buf ring and filter for smart and
797 	 * lite monitor
798 	 * for smart monitor filters are added along with first NAC
799 	 * for lite monitor required configuration done through
800 	 * dp_set_pdev_param
801 	 */
802 
803 	if (special_monitor) {
804 		status = QDF_STATUS_SUCCESS;
805 		goto fail;
806 	}
807 
808 	if (mon_pdev->scan_spcl_vap_configured)
809 		dp_reset_scan_spcl_vap_stats(vdev);
810 
811 	/*Check if current pdev's monitor_vdev exists */
812 	if (mon_pdev->monitor_configured) {
813 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
814 			  "monitor vap already created vdev=%pK\n", vdev);
815 		status = QDF_STATUS_E_RESOURCES;
816 		goto fail;
817 	}
818 
819 	mon_pdev->monitor_configured = true;
820 
821 	dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_ENABLE);
822 	dp_mon_filter_setup_mon_mode(pdev);
823 	status = dp_mon_filter_update(pdev);
824 	if (status != QDF_STATUS_SUCCESS) {
825 		dp_cdp_err("%pK: Failed to reset monitor filters", soc);
826 		dp_mon_filter_reset_mon_mode(pdev);
827 		mon_pdev->monitor_configured = false;
828 		mon_pdev->mvdev = NULL;
829 	}
830 
831 fail:
832 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
833 	return status;
834 }
835 
836 #ifdef QCA_TX_CAPTURE_SUPPORT
837 static QDF_STATUS
838 dp_config_tx_capture_mode(struct dp_pdev *pdev)
839 {
840 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
841 
842 	mon_pdev->tx_sniffer_enable = 1;
843 	mon_pdev->monitor_configured = false;
844 
845 	if (!mon_pdev->pktlog_ppdu_stats)
846 		dp_h2t_cfg_stats_msg_send(pdev,
847 					  DP_PPDU_STATS_CFG_SNIFFER,
848 					  pdev->pdev_id);
849 
850 	return QDF_STATUS_SUCCESS;
851 }
852 #else
853 #ifdef QCA_MCOPY_SUPPORT
854 static QDF_STATUS
855 dp_config_tx_capture_mode(struct dp_pdev *pdev)
856 {
857 	return QDF_STATUS_E_INVAL;
858 }
859 #endif
860 #endif
861 
862 /*
863  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
864  * @pdev: DP_PDEV handle
865  * @val: user provided value
866  *
867  * Return: 0 for success. nonzero for failure.
868  */
869 #if defined(QCA_MCOPY_SUPPORT) || defined(QCA_TX_CAPTURE_SUPPORT)
870 static QDF_STATUS
871 dp_config_debug_sniffer(struct dp_pdev *pdev, int val)
872 {
873 	QDF_STATUS status = QDF_STATUS_SUCCESS;
874 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
875 
876 	/*
877 	 * Note: The mirror copy mode cannot co-exist with any other
878 	 * monitor modes. Hence disabling the filter for this mode will
879 	 * reset the monitor destination ring filters.
880 	 */
881 	dp_reset_mcopy_mode(pdev);
882 	switch (val) {
883 	case 0:
884 		mon_pdev->tx_sniffer_enable = 0;
885 		mon_pdev->monitor_configured = false;
886 
887 		/*
888 		 * We don't need to reset the Rx monitor status ring  or call
889 		 * the API dp_ppdu_ring_reset() if all debug sniffer mode is
890 		 * disabled. The Rx monitor status ring will be disabled when
891 		 * the last mode using the monitor status ring get disabled.
892 		 */
893 		if (!mon_pdev->pktlog_ppdu_stats &&
894 		    !mon_pdev->enhanced_stats_en &&
895 		    !mon_pdev->bpr_enable) {
896 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
897 		} else if (mon_pdev->enhanced_stats_en &&
898 			   !mon_pdev->bpr_enable) {
899 			dp_h2t_cfg_stats_msg_send(pdev,
900 						  DP_PPDU_STATS_CFG_ENH_STATS,
901 						  pdev->pdev_id);
902 		} else if (!mon_pdev->enhanced_stats_en &&
903 			   mon_pdev->bpr_enable) {
904 			dp_h2t_cfg_stats_msg_send(pdev,
905 						  DP_PPDU_STATS_CFG_BPR_ENH,
906 						  pdev->pdev_id);
907 		} else {
908 			dp_h2t_cfg_stats_msg_send(pdev,
909 						  DP_PPDU_STATS_CFG_BPR,
910 						  pdev->pdev_id);
911 		}
912 		break;
913 
914 	case 1:
915 		status = dp_config_tx_capture_mode(pdev);
916 		break;
917 	case 2:
918 	case 4:
919 		status = dp_config_mcopy_mode(pdev, val);
920 		break;
921 
922 	default:
923 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
924 			  "Invalid value, mode not supported");
925 		status = QDF_STATUS_E_INVAL;
926 		break;
927 	}
928 	return status;
929 }
930 #else
931 static QDF_STATUS
932 dp_config_debug_sniffer(struct dp_pdev *pdev, int val)
933 {
934 return QDF_STATUS_E_INVAL;
935 }
936 #endif
937 
938 static void dp_flush_monitor_rings(struct dp_soc *soc)
939 {
940 	struct dp_pdev *pdev = soc->pdev_list[0];
941 	hal_soc_handle_t hal_soc = soc->hal_soc;
942 	uint32_t lmac_id;
943 	uint32_t hp, tp;
944 	int dp_intr_id;
945 	int budget;
946 	void *mon_dst_srng;
947 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
948 
949 	/* Reset monitor filters before reaping the ring*/
950 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
951 	dp_mon_filter_reset_mon_mode(pdev);
952 	if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS)
953 		dp_mon_info("failed to reset monitor filters");
954 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
955 
956 	if (mon_pdev->mon_chan_band == REG_BAND_UNKNOWN)
957 		return;
958 
959 	lmac_id = pdev->ch_band_lmac_id_mapping[mon_pdev->mon_chan_band];
960 	if (qdf_unlikely(lmac_id == DP_MON_INVALID_LMAC_ID))
961 		return;
962 
963 	dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
964 	if (qdf_unlikely(dp_intr_id == DP_MON_INVALID_LMAC_ID))
965 		return;
966 
967 	mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, lmac_id);
968 
969 	/* reap full ring */
970 	budget = wlan_cfg_get_dma_mon_stat_ring_size(pdev->wlan_cfg_ctx);
971 
972 	hal_get_sw_hptp(hal_soc, mon_dst_srng, &tp, &hp);
973 	dp_mon_info("Before reap: Monitor DST ring HP %u TP %u", hp, tp);
974 
975 	dp_mon_process(soc, &soc->intr_ctx[dp_intr_id], lmac_id, budget);
976 
977 	hal_get_sw_hptp(hal_soc, mon_dst_srng, &tp, &hp);
978 	dp_mon_info("After reap: Monitor DST ring HP %u TP %u", hp, tp);
979 }
980 
981 #if !defined(DISABLE_MON_CONFIG)
982 
983 #ifdef QCA_MONITOR_PKT_SUPPORT
984 static
985 QDF_STATUS dp_mon_htt_dest_srng_setup(struct dp_soc *soc,
986 				      struct dp_pdev *pdev,
987 				      int mac_id,
988 				      int mac_for_pdev)
989 {
990 	QDF_STATUS status = QDF_STATUS_SUCCESS;
991 
992 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
993 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
994 					soc->rxdma_mon_buf_ring[mac_id]
995 					.hal_srng,
996 					RXDMA_MONITOR_BUF);
997 
998 		if (status != QDF_STATUS_SUCCESS) {
999 			dp_mon_err("Failed to send htt srng setup message for Rxdma mon buf ring");
1000 			return status;
1001 		}
1002 
1003 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
1004 					soc->rxdma_mon_dst_ring[mac_id]
1005 					.hal_srng,
1006 					RXDMA_MONITOR_DST);
1007 
1008 		if (status != QDF_STATUS_SUCCESS) {
1009 			dp_mon_err("Failed to send htt srng setup message for Rxdma mon dst ring");
1010 			return status;
1011 		}
1012 
1013 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
1014 					soc->rxdma_mon_desc_ring[mac_id]
1015 					.hal_srng,
1016 					RXDMA_MONITOR_DESC);
1017 
1018 		if (status != QDF_STATUS_SUCCESS) {
1019 			dp_mon_err("Failed to send htt srng message for Rxdma mon desc ring");
1020 			return status;
1021 		}
1022 	}
1023 
1024 	return status;
1025 }
1026 #else
1027 static
1028 QDF_STATUS dp_mon_htt_dest_srng_setup(struct dp_soc *soc,
1029 				      struct dp_pdev *pdev,
1030 				      int mac_id,
1031 				      int mac_for_pdev)
1032 {
1033 	return QDF_STATUS_SUCCESS;
1034 }
1035 #endif
1036 
1037 /**
1038  * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
1039  * @soc: soc handle
1040  * @pdev: physical device handle
1041  * @mac_id: ring number
1042  * @mac_for_pdev: mac_id
1043  *
1044  * Return: non-zero for failure, zero for success
1045  */
1046 #ifdef QCA_HOST2FW_RXBUF_RING
1047 QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
1048 				 struct dp_pdev *pdev,
1049 				 int mac_id,
1050 				 int mac_for_pdev)
1051 {
1052 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1053 
1054 	status = dp_mon_htt_dest_srng_setup(soc, pdev, mac_id, mac_for_pdev);
1055 	if (status != QDF_STATUS_SUCCESS)
1056 		return status;
1057 
1058 	status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
1059 				soc->rxdma_mon_status_ring[mac_id]
1060 				.hal_srng,
1061 				RXDMA_MONITOR_STATUS);
1062 
1063 	if (status != QDF_STATUS_SUCCESS) {
1064 		dp_mon_err("Failed to send htt srng setup message for Rxdma mon status ring");
1065 		return status;
1066 	}
1067 
1068 	return status;
1069 }
1070 #else
1071 /* This is only for WIN */
1072 QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
1073 				 struct dp_pdev *pdev,
1074 				 int mac_id,
1075 				 int mac_for_pdev)
1076 {
1077 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1078 	struct dp_mon_soc *mon_soc;
1079 
1080 	mon_soc = soc->monitor_soc;
1081 	if(!mon_soc) {
1082 		dp_mon_err("%pK: monitor SOC not initialized",
1083 			   soc);
1084 		return status;
1085 	}
1086 
1087 	if (mon_soc->monitor_mode_v2)
1088 		return status;
1089 
1090 	if (wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) {
1091 		status = dp_mon_htt_dest_srng_setup(soc, pdev,
1092 						    mac_id, mac_for_pdev);
1093 		if (status != QDF_STATUS_SUCCESS)
1094 			return status;
1095 	}
1096 
1097 	status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
1098 				soc->rxdma_mon_status_ring[mac_id]
1099 				.hal_srng,
1100 				RXDMA_MONITOR_STATUS);
1101 
1102 	if (status != QDF_STATUS_SUCCESS) {
1103 		dp_mon_err("Failed to send htt srng setup msg for Rxdma mon status ring");
1104 		return status;
1105 	}
1106 
1107 	return status;
1108 }
1109 #endif
1110 #else
1111 QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
1112 				 struct dp_pdev *pdev,
1113 				 int mac_id,
1114 				 int mac_for_pdev)
1115 {
1116 	return QDF_STATUS_SUCCESS;
1117 }
1118 #endif
1119 
1120 /* MCL specific functions */
1121 #if defined(DP_CON_MON)
1122 
1123 /*
1124  * dp_service_mon_rings()- service monitor rings
1125  * @soc: soc dp handle
1126  * @quota: number of ring entry that can be serviced
1127  *
1128  * Return: None
1129  *
1130  */
1131 static void dp_service_mon_rings(struct  dp_soc *soc, uint32_t quota)
1132 {
1133 	int ring = 0, work_done;
1134 	struct dp_pdev *pdev = NULL;
1135 
1136 	for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
1137 		pdev = dp_get_pdev_for_lmac_id(soc, ring);
1138 		if (!pdev)
1139 			continue;
1140 		work_done = dp_mon_process(soc, NULL, ring, quota);
1141 
1142 		dp_rx_mon_dest_debug("Reaped %d descs from Monitor rings",
1143 				     work_done);
1144 	}
1145 }
1146 #endif
1147 
1148 /**
1149  * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
1150  *                                 ring based on target
1151  * @soc: soc handle
1152  * @mac_for_pdev: WIN- pdev_id, MCL- mac id
1153  * @pdev: physical device handle
1154  * @ring_num: mac id
1155  * @htt_tlv_filter: tlv filter
1156  *
1157  * Return: zero on success, non-zero on failure
1158  */
1159 static inline QDF_STATUS
1160 dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
1161 			    struct dp_pdev *pdev, uint8_t ring_num,
1162 			    struct htt_rx_ring_tlv_filter htt_tlv_filter)
1163 {
1164 	QDF_STATUS status;
1165 
1166 	if (soc->wlan_cfg_ctx->rxdma1_enable)
1167 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
1168 					     soc->rxdma_mon_buf_ring[ring_num]
1169 					     .hal_srng,
1170 					     RXDMA_MONITOR_BUF,
1171 					     RX_MONITOR_BUFFER_SIZE,
1172 					     &htt_tlv_filter);
1173 	else
1174 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
1175 					     pdev->rx_mac_buf_ring[ring_num]
1176 					     .hal_srng,
1177 					     RXDMA_BUF, RX_DATA_BUFFER_SIZE,
1178 					     &htt_tlv_filter);
1179 
1180 	return status;
1181 }
1182 
1183 /*
1184  * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode
1185  * @soc_hdl: datapath soc handle
1186  * @pdev_id: physical device instance id
1187  *
1188  * Return: virtual interface id
1189  */
1190 static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl,
1191 					       uint8_t pdev_id)
1192 {
1193 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1194 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1195 
1196 	if (qdf_unlikely(!pdev || !pdev->monitor_pdev ||
1197 			 !pdev->monitor_pdev->mvdev))
1198 		return -EINVAL;
1199 
1200 	return pdev->monitor_pdev->mvdev->vdev_id;
1201 }
1202 
1203 /*
1204  * dp_peer_tx_init() – Initialize receive TID state
1205  * @pdev: Datapath pdev
1206  * @peer: Datapath peer
1207  *
1208  */
1209 static void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer)
1210 {
1211 	dp_peer_tid_queue_init(peer);
1212 	dp_peer_update_80211_hdr(peer->vdev, peer);
1213 }
1214 
1215 /*
1216  * dp_peer_tx_cleanup() – Deinitialize receive TID state
1217  * @vdev: Datapath vdev
1218  * @peer: Datapath peer
1219  *
1220  */
1221 static inline void
1222 dp_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1223 {
1224 	dp_peer_tid_queue_cleanup(peer);
1225 }
1226 
1227 #if defined(QCA_TX_CAPTURE_SUPPORT) || defined(QCA_ENHANCED_STATS_SUPPORT)
1228 #ifndef WLAN_TX_PKT_CAPTURE_ENH
1229 /*
1230  * dp_deliver_mgmt_frm: Process
1231  * @pdev: DP PDEV handle
1232  * @nbuf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
1233  *
1234  * return: void
1235  */
1236 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
1237 {
1238 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1239 
1240 	if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode) {
1241 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
1242 				     nbuf, HTT_INVALID_PEER,
1243 				     WDI_NO_VAL, pdev->pdev_id);
1244 	} else {
1245 		if (!mon_pdev->bpr_enable)
1246 			qdf_nbuf_free(nbuf);
1247 	}
1248 }
1249 #endif
1250 #endif
1251 
1252 #ifdef QCA_ENHANCED_STATS_SUPPORT
1253 /*
1254  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
1255  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
1256  * @pdev: DP PDEV handle
1257  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
1258  * @length: tlv_length
1259  *
1260  * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
1261  */
1262 QDF_STATUS
1263 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
1264 					      qdf_nbuf_t tag_buf,
1265 					      uint32_t ppdu_id)
1266 {
1267 	uint32_t *nbuf_ptr;
1268 	uint8_t trim_size;
1269 	size_t head_size;
1270 	struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info;
1271 	uint32_t *msg_word;
1272 	uint32_t tsf_hdr;
1273 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1274 
1275 	if ((!mon_pdev->tx_sniffer_enable) && (!mon_pdev->mcopy_mode) &&
1276 	    (!mon_pdev->bpr_enable) && (!mon_pdev->tx_capture_enabled))
1277 		return QDF_STATUS_SUCCESS;
1278 
1279 	/*
1280 	 * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t
1281 	 */
1282 	msg_word = (uint32_t *)qdf_nbuf_data(tag_buf);
1283 	msg_word = msg_word + 2;
1284 	tsf_hdr = *msg_word;
1285 
1286 	trim_size = ((mon_pdev->mgmtctrl_frm_info.mgmt_buf +
1287 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
1288 		      qdf_nbuf_data(tag_buf));
1289 
1290 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
1291 		return QDF_STATUS_SUCCESS;
1292 
1293 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
1294 			    mon_pdev->mgmtctrl_frm_info.mgmt_buf_len);
1295 
1296 	if (mon_pdev->tx_capture_enabled) {
1297 		head_size = sizeof(struct cdp_tx_mgmt_comp_info);
1298 		if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) {
1299 			qdf_err("Fail to get headroom h_sz %zu h_avail %d\n",
1300 				head_size, qdf_nbuf_headroom(tag_buf));
1301 			qdf_assert_always(0);
1302 			return QDF_STATUS_E_NOMEM;
1303 		}
1304 		ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *)
1305 					qdf_nbuf_push_head(tag_buf, head_size);
1306 		qdf_assert_always(ptr_mgmt_comp_info);
1307 		ptr_mgmt_comp_info->ppdu_id = ppdu_id;
1308 		ptr_mgmt_comp_info->is_sgen_pkt = true;
1309 		ptr_mgmt_comp_info->tx_tsf = tsf_hdr;
1310 	} else {
1311 		head_size = sizeof(ppdu_id);
1312 		nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size);
1313 		*nbuf_ptr = ppdu_id;
1314 	}
1315 	if (mon_pdev->bpr_enable) {
1316 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
1317 				     tag_buf, HTT_INVALID_PEER,
1318 				     WDI_NO_VAL, pdev->pdev_id);
1319 	}
1320 
1321 	dp_deliver_mgmt_frm(pdev, tag_buf);
1322 
1323 	return QDF_STATUS_E_ALREADY;
1324 }
1325 
1326 /*
1327  * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv
1328  * bitmap for sniffer mode
1329  * @bitmap: received bitmap
1330  *
1331  * Return: expected bitmap value, returns zero if doesn't match with
1332  * either 64-bit Tx window or 256-bit window tlv bitmap
1333  */
1334 int
1335 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
1336 {
1337 	if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
1338 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
1339 	else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
1340 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
1341 
1342 	return 0;
1343 }
1344 
1345 /*
1346  * dp_peer_find_by_id_valid - check if peer exists for given id
1347  * @soc: core DP soc context
1348  * @peer_id: peer id from peer object can be retrieved
1349  *
1350  * Return: true if peer exists of false otherwise
1351  */
1352 
1353 static
1354 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
1355 {
1356 	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id,
1357 						     DP_MOD_ID_HTT);
1358 
1359 	if (peer) {
1360 		/*
1361 		 * Decrement the peer ref which is taken as part of
1362 		 * dp_peer_get_ref_by_id if PEER_LOCK_REF_PROTECT is enabled
1363 		 */
1364 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
1365 
1366 		return true;
1367 	}
1368 
1369 	return false;
1370 }
1371 
1372 /*
1373  * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats.
1374  * @peer: Datapath peer handle
1375  * @ppdu: User PPDU Descriptor
1376  * @cur_ppdu_id: PPDU_ID
1377  *
1378  * Return: None
1379  *
1380  * on Tx data frame, we may get delayed ba set
1381  * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we
1382  * request Block Ack Request(BAR). Successful msdu is received only after Block
1383  * Ack. To populate peer stats we need successful msdu(data frame).
1384  * So we hold the Tx data stats on delayed_ba for stats update.
1385  */
1386 static void
1387 dp_peer_copy_delay_stats(struct dp_peer *peer,
1388 			 struct cdp_tx_completion_ppdu_user *ppdu,
1389 			 uint32_t cur_ppdu_id)
1390 {
1391 	struct dp_pdev *pdev;
1392 	struct dp_vdev *vdev;
1393 	struct dp_mon_peer *mon_peer = peer->monitor_peer;
1394 
1395 	if (mon_peer->last_delayed_ba) {
1396 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1397 			  "BA not yet recv for prev delayed ppdu[%d] - cur ppdu[%d]",
1398 			  mon_peer->last_delayed_ba_ppduid, cur_ppdu_id);
1399 		vdev = peer->vdev;
1400 		if (vdev) {
1401 			pdev = vdev->pdev;
1402 			pdev->stats.cdp_delayed_ba_not_recev++;
1403 		}
1404 	}
1405 
1406 	mon_peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size;
1407 	mon_peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc;
1408 	mon_peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re;
1409 	mon_peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf;
1410 	mon_peer->delayed_ba_ppdu_stats.bw = ppdu->bw;
1411 	mon_peer->delayed_ba_ppdu_stats.nss = ppdu->nss;
1412 	mon_peer->delayed_ba_ppdu_stats.gi = ppdu->gi;
1413 	mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
1414 	mon_peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc;
1415 	mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
1416 	mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast =
1417 					ppdu->mpdu_tried_ucast;
1418 	mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast =
1419 					ppdu->mpdu_tried_mcast;
1420 	mon_peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl;
1421 	mon_peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl;
1422 	mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
1423 
1424 	mon_peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start;
1425 	mon_peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones;
1426 	mon_peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast;
1427 
1428 	mon_peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos;
1429 	mon_peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id;
1430 
1431 	mon_peer->last_delayed_ba = true;
1432 
1433 	ppdu->debug_copied = true;
1434 }
1435 
1436 /*
1437  * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats.
1438  * @peer: Datapath peer handle
1439  * @ppdu: PPDU Descriptor
1440  *
1441  * Return: None
1442  *
1443  * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info
1444  * from Tx BAR frame not required to populate peer stats.
1445  * But we need successful MPDU and MSDU to update previous
1446  * transmitted Tx data frame. Overwrite ppdu stats with the previous
1447  * stored ppdu stats.
1448  */
1449 static void
1450 dp_peer_copy_stats_to_bar(struct dp_peer *peer,
1451 			  struct cdp_tx_completion_ppdu_user *ppdu)
1452 {
1453 	struct dp_mon_peer *mon_peer = peer->monitor_peer;
1454 
1455 	ppdu->ltf_size = mon_peer->delayed_ba_ppdu_stats.ltf_size;
1456 	ppdu->stbc = mon_peer->delayed_ba_ppdu_stats.stbc;
1457 	ppdu->he_re = mon_peer->delayed_ba_ppdu_stats.he_re;
1458 	ppdu->txbf = mon_peer->delayed_ba_ppdu_stats.txbf;
1459 	ppdu->bw = mon_peer->delayed_ba_ppdu_stats.bw;
1460 	ppdu->nss = mon_peer->delayed_ba_ppdu_stats.nss;
1461 	ppdu->gi = mon_peer->delayed_ba_ppdu_stats.gi;
1462 	ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
1463 	ppdu->ldpc = mon_peer->delayed_ba_ppdu_stats.ldpc;
1464 	ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
1465 	ppdu->mpdu_tried_ucast =
1466 			mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast;
1467 	ppdu->mpdu_tried_mcast =
1468 			mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast;
1469 	ppdu->frame_ctrl = mon_peer->delayed_ba_ppdu_stats.frame_ctrl;
1470 	ppdu->qos_ctrl = mon_peer->delayed_ba_ppdu_stats.qos_ctrl;
1471 	ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
1472 
1473 	ppdu->ru_start = mon_peer->delayed_ba_ppdu_stats.ru_start;
1474 	ppdu->ru_tones = mon_peer->delayed_ba_ppdu_stats.ru_tones;
1475 	ppdu->is_mcast = mon_peer->delayed_ba_ppdu_stats.is_mcast;
1476 
1477 	ppdu->user_pos = mon_peer->delayed_ba_ppdu_stats.user_pos;
1478 	ppdu->mu_group_id = mon_peer->delayed_ba_ppdu_stats.mu_group_id;
1479 
1480 	mon_peer->last_delayed_ba = false;
1481 
1482 	ppdu->debug_copied = true;
1483 }
1484 
1485 /*
1486  * dp_tx_rate_stats_update() - Update rate per-peer statistics
1487  * @peer: Datapath peer handle
1488  * @ppdu: PPDU Descriptor
1489  *
1490  * Return: None
1491  */
1492 static void
1493 dp_tx_rate_stats_update(struct dp_peer *peer,
1494 			struct cdp_tx_completion_ppdu_user *ppdu)
1495 {
1496 	uint32_t ratekbps = 0;
1497 	uint64_t ppdu_tx_rate = 0;
1498 	uint32_t rix;
1499 	uint16_t ratecode = 0;
1500 
1501 	if (!peer || !ppdu)
1502 		return;
1503 
1504 	if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK)
1505 		return;
1506 
1507 	ratekbps = dp_getrateindex(ppdu->gi,
1508 				   ppdu->mcs,
1509 				   ppdu->nss,
1510 				   ppdu->preamble,
1511 				   ppdu->bw,
1512 				   &rix,
1513 				   &ratecode);
1514 
1515 	DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
1516 
1517 	if (!ratekbps)
1518 		return;
1519 
1520 	/* Calculate goodput in non-training period
1521 	 * In training period, don't do anything as
1522 	 * pending pkt is send as goodput.
1523 	 */
1524 	if ((!peer->bss_peer) && (!ppdu->sa_is_training)) {
1525 		ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) *
1526 				(CDP_PERCENT_MACRO - ppdu->current_rate_per));
1527 	}
1528 	ppdu->rix = rix;
1529 	ppdu->tx_ratekbps = ratekbps;
1530 	ppdu->tx_ratecode = ratecode;
1531 	peer->stats.tx.avg_tx_rate =
1532 		dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
1533 	ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
1534 	DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
1535 
1536 	peer->stats.tx.bw_info = ppdu->bw;
1537 	peer->stats.tx.gi_info = ppdu->gi;
1538 	peer->stats.tx.nss_info = ppdu->nss;
1539 	peer->stats.tx.mcs_info = ppdu->mcs;
1540 	peer->stats.tx.preamble_info = ppdu->preamble;
1541 	if (peer->vdev) {
1542 		/*
1543 		 * In STA mode:
1544 		 *	We get ucast stats as BSS peer stats.
1545 		 *
1546 		 * In AP mode:
1547 		 *	We get mcast stats as BSS peer stats.
1548 		 *	We get ucast stats as assoc peer stats.
1549 		 */
1550 		if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) {
1551 			peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
1552 			peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
1553 		} else {
1554 			peer->vdev->stats.tx.last_tx_rate = ratekbps;
1555 			peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
1556 		}
1557 	}
1558 }
1559 
1560 /*
1561  * dp_tx_stats_update() - Update per-peer statistics
1562  * @pdev: Datapath pdev handle
1563  * @peer: Datapath peer handle
1564  * @ppdu: PPDU Descriptor
1565  * @ack_rssi: RSSI of last ack received
1566  *
1567  * Return: None
1568  */
1569 static void
1570 dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
1571 		   struct cdp_tx_completion_ppdu_user *ppdu,
1572 		   uint32_t ack_rssi)
1573 {
1574 	uint8_t preamble, mcs;
1575 	uint16_t num_msdu;
1576 	uint16_t num_mpdu;
1577 	uint16_t mpdu_tried;
1578 	uint16_t mpdu_failed;
1579 
1580 	preamble = ppdu->preamble;
1581 	mcs = ppdu->mcs;
1582 	num_msdu = ppdu->num_msdu;
1583 	num_mpdu = ppdu->mpdu_success;
1584 	mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
1585 	mpdu_failed = mpdu_tried - num_mpdu;
1586 
1587 	/* If the peer statistics are already processed as part of
1588 	 * per-MSDU completion handler, do not process these again in per-PPDU
1589 	 * indications
1590 	 */
1591 	if (pdev->soc->process_tx_status)
1592 		return;
1593 
1594 	if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) {
1595 		/*
1596 		 * All failed mpdu will be retried, so incrementing
1597 		 * retries mpdu based on mpdu failed. Even for
1598 		 * ack failure i.e for long retries we get
1599 		 * mpdu failed equal mpdu tried.
1600 		 */
1601 		DP_STATS_INC(peer, tx.retries, mpdu_failed);
1602 		DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
1603 		return;
1604 	}
1605 
1606 	if (ppdu->is_ppdu_cookie_valid)
1607 		DP_STATS_INC(peer, tx.num_ppdu_cookie_valid, 1);
1608 
1609 	if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
1610 	    ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
1611 		if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
1612 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1613 				  "mu_group_id out of bound!!\n");
1614 		else
1615 			DP_STATS_UPD(peer, tx.mu_group_id[ppdu->mu_group_id],
1616 				     (ppdu->user_pos + 1));
1617 	}
1618 
1619 	if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
1620 	    ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) {
1621 		DP_STATS_UPD(peer, tx.ru_tones, ppdu->ru_tones);
1622 		DP_STATS_UPD(peer, tx.ru_start, ppdu->ru_start);
1623 		switch (ppdu->ru_tones) {
1624 		case RU_26:
1625 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_msdu,
1626 				     num_msdu);
1627 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_mpdu,
1628 				     num_mpdu);
1629 			DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].mpdu_tried,
1630 				     mpdu_tried);
1631 		break;
1632 		case RU_52:
1633 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_msdu,
1634 				     num_msdu);
1635 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_mpdu,
1636 				     num_mpdu);
1637 			DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].mpdu_tried,
1638 				     mpdu_tried);
1639 		break;
1640 		case RU_106:
1641 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_msdu,
1642 				     num_msdu);
1643 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_mpdu,
1644 				     num_mpdu);
1645 			DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].mpdu_tried,
1646 				     mpdu_tried);
1647 		break;
1648 		case RU_242:
1649 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_msdu,
1650 				     num_msdu);
1651 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_mpdu,
1652 				     num_mpdu);
1653 			DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].mpdu_tried,
1654 				     mpdu_tried);
1655 		break;
1656 		case RU_484:
1657 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_msdu,
1658 				     num_msdu);
1659 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_mpdu,
1660 				     num_mpdu);
1661 			DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].mpdu_tried,
1662 				     mpdu_tried);
1663 		break;
1664 		case RU_996:
1665 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_msdu,
1666 				     num_msdu);
1667 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_mpdu,
1668 				     num_mpdu);
1669 			DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].mpdu_tried,
1670 				     mpdu_tried);
1671 		break;
1672 		}
1673 	}
1674 
1675 	/*
1676 	 * All failed mpdu will be retried, so incrementing
1677 	 * retries mpdu based on mpdu failed. Even for
1678 	 * ack failure i.e for long retries we get
1679 	 * mpdu failed equal mpdu tried.
1680 	 */
1681 	DP_STATS_INC(peer, tx.retries, mpdu_failed);
1682 	DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
1683 
1684 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_msdu,
1685 		     num_msdu);
1686 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu,
1687 		     num_mpdu);
1688 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried,
1689 		     mpdu_tried);
1690 
1691 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
1692 			 num_msdu, (ppdu->success_bytes +
1693 				    ppdu->retry_bytes + ppdu->failed_bytes));
1694 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
1695 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
1696 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
1697 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
1698 	if (ppdu->tid < CDP_DATA_TID_MAX)
1699 		DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)],
1700 			     num_msdu);
1701 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
1702 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
1703 	if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
1704 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
1705 
1706 	DP_STATS_INCC(peer,
1707 		      tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
1708 		      ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
1709 	DP_STATS_INCC(peer,
1710 		      tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
1711 		      ((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
1712 	DP_STATS_INCC(peer,
1713 		      tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
1714 		      ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
1715 	DP_STATS_INCC(peer,
1716 		      tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
1717 		      ((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
1718 	DP_STATS_INCC(peer,
1719 		      tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
1720 		      ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
1721 	DP_STATS_INCC(peer,
1722 		      tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
1723 		      ((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
1724 	DP_STATS_INCC(peer,
1725 		      tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
1726 		      ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
1727 	DP_STATS_INCC(peer,
1728 		      tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
1729 		      ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
1730 	DP_STATS_INCC(peer,
1731 		      tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
1732 		      ((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
1733 	DP_STATS_INCC(peer,
1734 		      tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
1735 		      ((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
1736 	DP_STATS_INCC(peer, tx.ampdu_cnt, num_msdu, ppdu->is_ampdu);
1737 	DP_STATS_INCC(peer, tx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu));
1738 	DP_STATS_INCC(peer, tx.pream_punct_cnt, 1, ppdu->pream_punct);
1739 
1740 	dp_peer_stats_notify(pdev, peer);
1741 
1742 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
1743 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
1744 			     &peer->stats, ppdu->peer_id,
1745 			     UPDATE_PEER_STATS, pdev->pdev_id);
1746 #endif
1747 }
1748 
1749 /*
1750  * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
1751  * if a new peer id arrives in a PPDU
1752  * pdev: DP pdev handle
1753  * @peer_id : peer unique identifier
1754  * @ppdu_info: per ppdu tlv structure
1755  *
1756  * return:user index to be populated
1757  */
1758 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
1759 					   uint16_t peer_id,
1760 					   struct ppdu_info *ppdu_info)
1761 {
1762 	uint8_t user_index = 0;
1763 	struct cdp_tx_completion_ppdu *ppdu_desc;
1764 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1765 
1766 	ppdu_desc =
1767 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1768 
1769 	while ((user_index + 1) <= ppdu_info->last_user) {
1770 		ppdu_user_desc = &ppdu_desc->user[user_index];
1771 		if (ppdu_user_desc->peer_id != peer_id) {
1772 			user_index++;
1773 			continue;
1774 		} else {
1775 			/* Max users possible is 8 so user array index should
1776 			 * not exceed 7
1777 			 */
1778 			qdf_assert_always(user_index <= (ppdu_desc->max_users - 1));
1779 			return user_index;
1780 		}
1781 	}
1782 
1783 	ppdu_info->last_user++;
1784 	/* Max users possible is 8 so last user should not exceed 8 */
1785 	qdf_assert_always(ppdu_info->last_user <= ppdu_desc->max_users);
1786 	return ppdu_info->last_user - 1;
1787 }
1788 
1789 /*
1790  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
1791  * pdev: DP pdev handle
1792  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
1793  * @ppdu_info: per ppdu tlv structure
1794  *
1795  * return:void
1796  */
1797 static void
1798 dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
1799 				 uint32_t *tag_buf,
1800 				 struct ppdu_info *ppdu_info)
1801 {
1802 	uint16_t frame_type;
1803 	uint16_t frame_ctrl;
1804 	uint16_t freq;
1805 	struct dp_soc *soc = NULL;
1806 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
1807 	uint64_t ppdu_start_timestamp;
1808 	uint32_t *start_tag_buf;
1809 
1810 	start_tag_buf = tag_buf;
1811 	ppdu_desc =
1812 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1813 
1814 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
1815 
1816 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID);
1817 	ppdu_info->sched_cmdid =
1818 		HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf);
1819 	ppdu_desc->num_users =
1820 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
1821 
1822 	qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
1823 
1824 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE);
1825 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
1826 	ppdu_desc->htt_frame_type = frame_type;
1827 
1828 	frame_ctrl = ppdu_desc->frame_ctrl;
1829 
1830 	ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id;
1831 
1832 	switch (frame_type) {
1833 	case HTT_STATS_FTYPE_TIDQ_DATA_SU:
1834 	case HTT_STATS_FTYPE_TIDQ_DATA_MU:
1835 	case HTT_STATS_FTYPE_SGEN_QOS_NULL:
1836 		/*
1837 		 * for management packet, frame type come as DATA_SU
1838 		 * need to check frame_ctrl before setting frame_type
1839 		 */
1840 		if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL)
1841 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
1842 		else
1843 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
1844 	break;
1845 	case HTT_STATS_FTYPE_SGEN_MU_BAR:
1846 	case HTT_STATS_FTYPE_SGEN_BAR:
1847 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
1848 	break;
1849 	default:
1850 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
1851 	break;
1852 	}
1853 
1854 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US);
1855 	ppdu_desc->tx_duration = *tag_buf;
1856 
1857 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US);
1858 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
1859 
1860 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE);
1861 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
1862 	if (freq != ppdu_desc->channel) {
1863 		soc = pdev->soc;
1864 		ppdu_desc->channel = freq;
1865 		pdev->operating_channel.freq = freq;
1866 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
1867 			pdev->operating_channel.num =
1868 			    soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc,
1869 								 pdev->pdev_id,
1870 								 freq);
1871 
1872 		if (soc && soc->cdp_soc.ol_ops->freq_to_band)
1873 			pdev->operating_channel.band =
1874 			       soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc,
1875 								 pdev->pdev_id,
1876 								 freq);
1877 	}
1878 
1879 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
1880 
1881 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM);
1882 	ppdu_desc->phy_ppdu_tx_time_us =
1883 		HTT_PPDU_STATS_COMMON_TLV_PHY_PPDU_TX_TIME_US_GET(*tag_buf);
1884 	ppdu_desc->beam_change =
1885 		HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf);
1886 	ppdu_desc->doppler =
1887 		HTT_PPDU_STATS_COMMON_TLV_DOPPLER_INDICATION_GET(*tag_buf);
1888 	ppdu_desc->spatial_reuse =
1889 		HTT_PPDU_STATS_COMMON_TLV_SPATIAL_REUSE_GET(*tag_buf);
1890 
1891 	dp_tx_capture_htt_frame_counter(pdev, frame_type);
1892 
1893 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US);
1894 	ppdu_start_timestamp = *tag_buf;
1895 	ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp <<
1896 					     HTT_SHIFT_UPPER_TIMESTAMP) &
1897 					    HTT_MASK_UPPER_TIMESTAMP);
1898 
1899 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
1900 					ppdu_desc->tx_duration;
1901 	/* Ack time stamp is same as end time stamp*/
1902 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
1903 
1904 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
1905 					ppdu_desc->tx_duration;
1906 
1907 	ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp;
1908 	ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp;
1909 	ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration;
1910 
1911 	/* Ack time stamp is same as end time stamp*/
1912 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
1913 
1914 	tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(BSSCOLOR_OBSS_PSR);
1915 	ppdu_desc->bss_color =
1916 		HTT_PPDU_STATS_COMMON_TLV_BSS_COLOR_ID_GET(*tag_buf);
1917 }
1918 
1919 /*
1920  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
1921  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
1922  * @ppdu_info: per ppdu tlv structure
1923  *
1924  * return:void
1925  */
1926 static void dp_process_ppdu_stats_user_common_tlv(
1927 		struct dp_pdev *pdev, uint32_t *tag_buf,
1928 		struct ppdu_info *ppdu_info)
1929 {
1930 	uint16_t peer_id;
1931 	struct cdp_tx_completion_ppdu *ppdu_desc;
1932 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1933 	uint8_t curr_user_index = 0;
1934 	struct dp_peer *peer;
1935 	struct dp_vdev *vdev;
1936 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
1937 
1938 	ppdu_desc =
1939 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1940 
1941 	tag_buf++;
1942 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1943 
1944 	curr_user_index =
1945 		dp_get_ppdu_info_user_index(pdev,
1946 					    peer_id, ppdu_info);
1947 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1948 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
1949 
1950 	ppdu_desc->vdev_id =
1951 		HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
1952 
1953 	ppdu_user_desc->peer_id = peer_id;
1954 
1955 	tag_buf++;
1956 
1957 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
1958 		ppdu_user_desc->delayed_ba = 1;
1959 		ppdu_desc->delayed_ba = 1;
1960 	}
1961 
1962 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
1963 		ppdu_user_desc->is_mcast = true;
1964 		ppdu_user_desc->mpdu_tried_mcast =
1965 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1966 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
1967 	} else {
1968 		ppdu_user_desc->mpdu_tried_ucast =
1969 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1970 	}
1971 
1972 	ppdu_user_desc->is_seq_num_valid =
1973 	HTT_PPDU_STATS_USER_COMMON_TLV_IS_SQNUM_VALID_IN_BUFFER_GET(*tag_buf);
1974 	tag_buf++;
1975 
1976 	ppdu_user_desc->qos_ctrl =
1977 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
1978 	ppdu_user_desc->frame_ctrl =
1979 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
1980 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
1981 
1982 	if (ppdu_user_desc->delayed_ba)
1983 		ppdu_user_desc->mpdu_success = 0;
1984 
1985 	tag_buf += 3;
1986 
1987 	if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) {
1988 		ppdu_user_desc->ppdu_cookie =
1989 			HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf);
1990 		ppdu_user_desc->is_ppdu_cookie_valid = 1;
1991 	}
1992 
1993 	/* returning earlier causes other feilds unpopulated */
1994 	if (peer_id == DP_SCAN_PEER_ID) {
1995 		vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
1996 					     DP_MOD_ID_TX_PPDU_STATS);
1997 		if (!vdev)
1998 			return;
1999 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
2000 			     QDF_MAC_ADDR_SIZE);
2001 		dp_vdev_unref_delete(pdev->soc, vdev, DP_MOD_ID_TX_PPDU_STATS);
2002 	} else {
2003 		peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
2004 					     DP_MOD_ID_TX_PPDU_STATS);
2005 		if (!peer) {
2006 			/*
2007 			 * fw sends peer_id which is about to removed but
2008 			 * it was already removed in host.
2009 			 * eg: for disassoc, fw send ppdu stats
2010 			 * with peer id equal to previously associated
2011 			 * peer's peer_id but it was removed
2012 			 */
2013 			vdev = dp_vdev_get_ref_by_id(pdev->soc,
2014 						     ppdu_desc->vdev_id,
2015 						     DP_MOD_ID_TX_PPDU_STATS);
2016 			if (!vdev)
2017 				return;
2018 			qdf_mem_copy(ppdu_user_desc->mac_addr,
2019 				     vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2020 			dp_vdev_unref_delete(pdev->soc, vdev,
2021 					     DP_MOD_ID_TX_PPDU_STATS);
2022 			return;
2023 		}
2024 		qdf_mem_copy(ppdu_user_desc->mac_addr,
2025 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2026 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
2027 	}
2028 }
2029 
2030 /**
2031  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
2032  * @pdev: DP pdev handle
2033  * @tag_buf: T2H message buffer carrying the user rate TLV
2034  * @ppdu_info: per ppdu tlv structure
2035  *
2036  * return:void
2037  */
2038 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
2039 		uint32_t *tag_buf,
2040 		struct ppdu_info *ppdu_info)
2041 {
2042 	uint16_t peer_id;
2043 	struct cdp_tx_completion_ppdu *ppdu_desc;
2044 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2045 	uint8_t curr_user_index = 0;
2046 	struct dp_vdev *vdev;
2047 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2048 
2049 	ppdu_desc =
2050 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2051 
2052 	tag_buf++;
2053 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
2054 
2055 	curr_user_index =
2056 		dp_get_ppdu_info_user_index(pdev,
2057 					    peer_id, ppdu_info);
2058 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2059 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2060 	if (peer_id == DP_SCAN_PEER_ID) {
2061 		vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
2062 					     DP_MOD_ID_TX_PPDU_STATS);
2063 		if (!vdev)
2064 			return;
2065 		dp_vdev_unref_delete(pdev->soc, vdev,
2066 				     DP_MOD_ID_TX_PPDU_STATS);
2067 	}
2068 	ppdu_user_desc->peer_id = peer_id;
2069 
2070 	ppdu_user_desc->tid =
2071 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
2072 
2073 	tag_buf += 1;
2074 
2075 	ppdu_user_desc->user_pos =
2076 		HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
2077 	ppdu_user_desc->mu_group_id =
2078 		HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
2079 
2080 	tag_buf += 1;
2081 
2082 	ppdu_user_desc->ru_start =
2083 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
2084 	ppdu_user_desc->ru_tones =
2085 		(HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
2086 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
2087 	ppdu_desc->usr_ru_tones_sum += ppdu_user_desc->ru_tones;
2088 
2089 	tag_buf += 2;
2090 
2091 	ppdu_user_desc->ppdu_type =
2092 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
2093 
2094 	tag_buf++;
2095 	ppdu_user_desc->tx_rate = *tag_buf;
2096 
2097 	ppdu_user_desc->ltf_size =
2098 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
2099 	ppdu_user_desc->stbc =
2100 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
2101 	ppdu_user_desc->he_re =
2102 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
2103 	ppdu_user_desc->txbf =
2104 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
2105 	ppdu_user_desc->bw =
2106 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
2107 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
2108 	ppdu_desc->usr_nss_sum += ppdu_user_desc->nss;
2109 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
2110 	ppdu_user_desc->preamble =
2111 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
2112 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
2113 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
2114 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
2115 }
2116 
2117 /*
2118  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
2119  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2120  * pdev: DP PDEV handle
2121  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2122  * @ppdu_info: per ppdu tlv structure
2123  *
2124  * return:void
2125  */
2126 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2127 		struct dp_pdev *pdev, uint32_t *tag_buf,
2128 		struct ppdu_info *ppdu_info)
2129 {
2130 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
2131 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
2132 
2133 	struct cdp_tx_completion_ppdu *ppdu_desc;
2134 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2135 	uint8_t curr_user_index = 0;
2136 	uint16_t peer_id;
2137 	uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS;
2138 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2139 
2140 	ppdu_desc =
2141 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2142 
2143 	tag_buf++;
2144 
2145 	peer_id =
2146 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2147 
2148 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2149 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2150 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2151 	ppdu_user_desc->peer_id = peer_id;
2152 
2153 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2154 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2155 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2156 
2157 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
2158 						   (void *)ppdu_user_desc,
2159 						   ppdu_info->ppdu_id,
2160 						   size);
2161 }
2162 
2163 /*
2164  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
2165  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2166  * soc: DP SOC handle
2167  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2168  * @ppdu_info: per ppdu tlv structure
2169  *
2170  * return:void
2171  */
2172 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2173 		struct dp_pdev *pdev, uint32_t *tag_buf,
2174 		struct ppdu_info *ppdu_info)
2175 {
2176 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
2177 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
2178 
2179 	struct cdp_tx_completion_ppdu *ppdu_desc;
2180 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2181 	uint8_t curr_user_index = 0;
2182 	uint16_t peer_id;
2183 	uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS;
2184 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2185 
2186 	ppdu_desc =
2187 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2188 
2189 	tag_buf++;
2190 
2191 	peer_id =
2192 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2193 
2194 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2195 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2196 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2197 	ppdu_user_desc->peer_id = peer_id;
2198 
2199 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2200 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2201 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2202 
2203 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
2204 						   (void *)ppdu_user_desc,
2205 						   ppdu_info->ppdu_id,
2206 						   size);
2207 }
2208 
2209 /*
2210  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
2211  * htt_ppdu_stats_user_cmpltn_common_tlv
2212  * soc: DP SOC handle
2213  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
2214  * @ppdu_info: per ppdu tlv structure
2215  *
2216  * return:void
2217  */
2218 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
2219 		struct dp_pdev *pdev, uint32_t *tag_buf,
2220 		struct ppdu_info *ppdu_info)
2221 {
2222 	uint16_t peer_id;
2223 	struct cdp_tx_completion_ppdu *ppdu_desc;
2224 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2225 	uint8_t curr_user_index = 0;
2226 	uint8_t bw_iter;
2227 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
2228 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
2229 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2230 
2231 	ppdu_desc =
2232 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2233 
2234 	tag_buf++;
2235 	peer_id =
2236 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
2237 
2238 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2239 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2240 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2241 	ppdu_user_desc->peer_id = peer_id;
2242 
2243 	ppdu_user_desc->completion_status =
2244 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
2245 				*tag_buf);
2246 
2247 	ppdu_user_desc->tid =
2248 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
2249 
2250 	tag_buf++;
2251 	if (qdf_likely(ppdu_user_desc->completion_status ==
2252 			HTT_PPDU_STATS_USER_STATUS_OK)) {
2253 		ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
2254 		ppdu_user_desc->usr_ack_rssi = dp_stats_buf->ack_rssi;
2255 		ppdu_user_desc->ack_rssi_valid = 1;
2256 	} else {
2257 		ppdu_user_desc->ack_rssi_valid = 0;
2258 	}
2259 
2260 	tag_buf++;
2261 
2262 	ppdu_user_desc->mpdu_success =
2263 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
2264 
2265 	ppdu_user_desc->mpdu_failed =
2266 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) -
2267 						ppdu_user_desc->mpdu_success;
2268 
2269 	tag_buf++;
2270 
2271 	ppdu_user_desc->long_retries =
2272 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
2273 
2274 	ppdu_user_desc->short_retries =
2275 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
2276 	ppdu_user_desc->retry_msdus =
2277 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
2278 
2279 	ppdu_user_desc->is_ampdu =
2280 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
2281 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
2282 
2283 	ppdu_desc->resp_type =
2284 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf);
2285 	ppdu_desc->mprot_type =
2286 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf);
2287 	ppdu_desc->rts_success =
2288 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf);
2289 	ppdu_desc->rts_failure =
2290 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf);
2291 	ppdu_user_desc->pream_punct =
2292 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PREAM_PUNC_TX_GET(*tag_buf);
2293 
2294 	ppdu_info->compltn_common_tlv++;
2295 
2296 	/*
2297 	 * MU BAR may send request to n users but we may received ack only from
2298 	 * m users. To have count of number of users respond back, we have a
2299 	 * separate counter bar_num_users per PPDU that get increment for every
2300 	 * htt_ppdu_stats_user_cmpltn_common_tlv
2301 	 */
2302 	ppdu_desc->bar_num_users++;
2303 
2304 	tag_buf++;
2305 	for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) {
2306 		ppdu_user_desc->rssi_chain[bw_iter] =
2307 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf);
2308 		tag_buf++;
2309 	}
2310 
2311 	ppdu_user_desc->sa_tx_antenna =
2312 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf);
2313 
2314 	tag_buf++;
2315 	ppdu_user_desc->sa_is_training =
2316 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf);
2317 	if (ppdu_user_desc->sa_is_training) {
2318 		ppdu_user_desc->sa_goodput =
2319 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf);
2320 	}
2321 
2322 	tag_buf++;
2323 	for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) {
2324 		ppdu_user_desc->sa_max_rates[bw_iter] =
2325 			HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]);
2326 	}
2327 
2328 	tag_buf += CDP_NUM_SA_BW;
2329 	ppdu_user_desc->current_rate_per =
2330 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf);
2331 }
2332 
2333 /*
2334  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
2335  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2336  * pdev: DP PDEV handle
2337  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2338  * @ppdu_info: per ppdu tlv structure
2339  *
2340  * return:void
2341  */
2342 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2343 		struct dp_pdev *pdev, uint32_t *tag_buf,
2344 		struct ppdu_info *ppdu_info)
2345 {
2346 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2347 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2348 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2349 	struct cdp_tx_completion_ppdu *ppdu_desc;
2350 	uint8_t curr_user_index = 0;
2351 	uint16_t peer_id;
2352 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2353 
2354 	ppdu_desc =
2355 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2356 
2357 	tag_buf++;
2358 
2359 	peer_id =
2360 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2361 
2362 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2363 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2364 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2365 	ppdu_user_desc->peer_id = peer_id;
2366 
2367 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2368 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2369 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2370 	ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32;
2371 }
2372 
2373 /*
2374  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2375  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2376  * pdev: DP PDEV handle
2377  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2378  * @ppdu_info: per ppdu tlv structure
2379  *
2380  * return:void
2381  */
2382 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2383 		struct dp_pdev *pdev, uint32_t *tag_buf,
2384 		struct ppdu_info *ppdu_info)
2385 {
2386 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2387 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2388 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2389 	struct cdp_tx_completion_ppdu *ppdu_desc;
2390 	uint8_t curr_user_index = 0;
2391 	uint16_t peer_id;
2392 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2393 
2394 	ppdu_desc =
2395 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2396 
2397 	tag_buf++;
2398 
2399 	peer_id =
2400 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2401 
2402 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2403 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2404 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2405 	ppdu_user_desc->peer_id = peer_id;
2406 
2407 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2408 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2409 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2410 	ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32;
2411 }
2412 
2413 /*
2414  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2415  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2416  * pdev: DP PDE handle
2417  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2418  * @ppdu_info: per ppdu tlv structure
2419  *
2420  * return:void
2421  */
2422 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2423 		struct dp_pdev *pdev, uint32_t *tag_buf,
2424 		struct ppdu_info *ppdu_info)
2425 {
2426 	uint16_t peer_id;
2427 	struct cdp_tx_completion_ppdu *ppdu_desc;
2428 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2429 	uint8_t curr_user_index = 0;
2430 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2431 
2432 	ppdu_desc =
2433 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2434 
2435 	tag_buf += 2;
2436 	peer_id =
2437 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2438 
2439 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2440 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2441 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2442 	if (!ppdu_user_desc->ack_ba_tlv) {
2443 		ppdu_user_desc->ack_ba_tlv = 1;
2444 	} else {
2445 		pdev->stats.ack_ba_comes_twice++;
2446 		return;
2447 	}
2448 
2449 	ppdu_user_desc->peer_id = peer_id;
2450 
2451 	tag_buf++;
2452 	/* not to update ppdu_desc->tid from this TLV */
2453 	ppdu_user_desc->num_mpdu =
2454 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
2455 
2456 	ppdu_user_desc->num_msdu =
2457 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
2458 
2459 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2460 
2461 	tag_buf++;
2462 	ppdu_user_desc->start_seq =
2463 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET(
2464 			*tag_buf);
2465 
2466 	tag_buf++;
2467 	ppdu_user_desc->success_bytes = *tag_buf;
2468 
2469 	/* increase ack ba tlv counter on successful mpdu */
2470 	if (ppdu_user_desc->num_mpdu)
2471 		ppdu_info->ack_ba_tlv++;
2472 
2473 	if (ppdu_user_desc->ba_size == 0) {
2474 		ppdu_user_desc->ba_seq_no = ppdu_user_desc->start_seq;
2475 		ppdu_user_desc->ba_bitmap[0] = 1;
2476 		ppdu_user_desc->ba_size = 1;
2477 	}
2478 }
2479 
2480 /*
2481  * dp_process_ppdu_stats_user_common_array_tlv: Process
2482  * htt_ppdu_stats_user_common_array_tlv
2483  * pdev: DP PDEV handle
2484  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2485  * @ppdu_info: per ppdu tlv structure
2486  *
2487  * return:void
2488  */
2489 static void dp_process_ppdu_stats_user_common_array_tlv(
2490 		struct dp_pdev *pdev, uint32_t *tag_buf,
2491 		struct ppdu_info *ppdu_info)
2492 {
2493 	uint32_t peer_id;
2494 	struct cdp_tx_completion_ppdu *ppdu_desc;
2495 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2496 	uint8_t curr_user_index = 0;
2497 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
2498 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2499 
2500 	ppdu_desc =
2501 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2502 
2503 	tag_buf++;
2504 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
2505 	tag_buf += 3;
2506 	peer_id =
2507 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2508 
2509 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
2510 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2511 			  "Invalid peer");
2512 		return;
2513 	}
2514 
2515 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2516 
2517 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2518 	ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
2519 
2520 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2521 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2522 
2523 	tag_buf++;
2524 
2525 	ppdu_user_desc->success_msdus =
2526 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2527 	ppdu_user_desc->retry_bytes =
2528 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2529 	tag_buf++;
2530 	ppdu_user_desc->failed_msdus =
2531 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
2532 }
2533 
2534 /*
2535  * dp_process_ppdu_stats_flush_tlv: Process
2536  * htt_ppdu_stats_flush_tlv
2537  * @pdev: DP PDEV handle
2538  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
2539  * @ppdu_info: per ppdu tlv structure
2540  *
2541  * return:void
2542  */
2543 static void
2544 dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2545 					     uint32_t *tag_buf,
2546 					     struct ppdu_info *ppdu_info)
2547 {
2548 	struct cdp_tx_completion_ppdu *ppdu_desc;
2549 	uint32_t peer_id;
2550 	uint8_t tid;
2551 	struct dp_peer *peer;
2552 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
2553 
2554 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2555 				qdf_nbuf_data(ppdu_info->nbuf);
2556 	ppdu_desc->is_flush = 1;
2557 
2558 	tag_buf++;
2559 	ppdu_desc->drop_reason = *tag_buf;
2560 
2561 	tag_buf++;
2562 	ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
2563 	ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf);
2564 	ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf);
2565 
2566 	tag_buf++;
2567 	peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
2568 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
2569 
2570 	ppdu_desc->num_users = 1;
2571 	ppdu_desc->user[0].peer_id = peer_id;
2572 	ppdu_desc->user[0].tid = tid;
2573 
2574 	ppdu_desc->queue_type =
2575 			HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf);
2576 
2577 	peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
2578 				     DP_MOD_ID_TX_PPDU_STATS);
2579 	if (!peer)
2580 		goto add_ppdu_to_sched_list;
2581 
2582 	if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
2583 		DP_STATS_INC(peer,
2584 			     tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
2585 			     ppdu_desc->num_msdu);
2586 	}
2587 
2588 	dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
2589 
2590 add_ppdu_to_sched_list:
2591 	ppdu_info->done = 1;
2592 	TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
2593 	mon_pdev->list_depth--;
2594 	TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info,
2595 			  ppdu_info_list_elem);
2596 	mon_pdev->sched_comp_list_depth++;
2597 }
2598 
2599 /**
2600  * dp_process_ppdu_stats_sch_cmd_status_tlv: Process schedule command status tlv
2601  * Here we are not going to process the buffer.
2602  * @pdev: DP PDEV handle
2603  * @ppdu_info: per ppdu tlv structure
2604  *
2605  * return:void
2606  */
2607 static void
2608 dp_process_ppdu_stats_sch_cmd_status_tlv(struct dp_pdev *pdev,
2609 					 struct ppdu_info *ppdu_info)
2610 {
2611 	struct cdp_tx_completion_ppdu *ppdu_desc;
2612 	struct dp_peer *peer;
2613 	uint8_t num_users;
2614 	uint8_t i;
2615 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
2616 
2617 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2618 				qdf_nbuf_data(ppdu_info->nbuf);
2619 
2620 	num_users = ppdu_desc->bar_num_users;
2621 
2622 	for (i = 0; i < num_users; i++) {
2623 		if (ppdu_desc->user[i].user_pos == 0) {
2624 			if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
2625 				/* update phy mode for bar frame */
2626 				ppdu_desc->phy_mode =
2627 					ppdu_desc->user[i].preamble;
2628 				ppdu_desc->user[0].mcs = ppdu_desc->user[i].mcs;
2629 				break;
2630 			}
2631 			if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL) {
2632 				ppdu_desc->frame_ctrl =
2633 					ppdu_desc->user[i].frame_ctrl;
2634 				break;
2635 			}
2636 		}
2637 	}
2638 
2639 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
2640 	    ppdu_desc->delayed_ba) {
2641 		qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
2642 
2643 		for (i = 0; i < ppdu_desc->num_users; i++) {
2644 			struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
2645 			uint64_t start_tsf;
2646 			uint64_t end_tsf;
2647 			uint32_t ppdu_id;
2648 			struct dp_mon_peer *mon_peer;
2649 
2650 			ppdu_id = ppdu_desc->ppdu_id;
2651 			peer = dp_peer_get_ref_by_id
2652 				(pdev->soc, ppdu_desc->user[i].peer_id,
2653 				 DP_MOD_ID_TX_PPDU_STATS);
2654 			/**
2655 			 * This check is to make sure peer is not deleted
2656 			 * after processing the TLVs.
2657 			 */
2658 			if (!peer)
2659 				continue;
2660 
2661 			mon_peer = peer->monitor_peer;
2662 			delay_ppdu = &mon_peer->delayed_ba_ppdu_stats;
2663 			start_tsf = ppdu_desc->ppdu_start_timestamp;
2664 			end_tsf = ppdu_desc->ppdu_end_timestamp;
2665 			/**
2666 			 * save delayed ba user info
2667 			 */
2668 			if (ppdu_desc->user[i].delayed_ba) {
2669 				dp_peer_copy_delay_stats(peer,
2670 							 &ppdu_desc->user[i],
2671 							 ppdu_id);
2672 				mon_peer->last_delayed_ba_ppduid = ppdu_id;
2673 				delay_ppdu->ppdu_start_timestamp = start_tsf;
2674 				delay_ppdu->ppdu_end_timestamp = end_tsf;
2675 			}
2676 			ppdu_desc->user[i].peer_last_delayed_ba =
2677 				mon_peer->last_delayed_ba;
2678 
2679 			dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
2680 
2681 			if (ppdu_desc->user[i].delayed_ba &&
2682 			    !ppdu_desc->user[i].debug_copied) {
2683 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2684 					  QDF_TRACE_LEVEL_INFO_MED,
2685 					  "%s: %d ppdu_id[%d] bar_ppdu_id[%d] num_users[%d] usr[%d] htt_frame_type[%d]\n",
2686 					  __func__, __LINE__,
2687 					  ppdu_desc->ppdu_id,
2688 					  ppdu_desc->bar_ppdu_id,
2689 					  ppdu_desc->num_users,
2690 					  i,
2691 					  ppdu_desc->htt_frame_type);
2692 			}
2693 		}
2694 	}
2695 
2696 	/*
2697 	 * when frame type is BAR and STATS_COMMON_TLV is set
2698 	 * copy the store peer delayed info to BAR status
2699 	 */
2700 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
2701 		for (i = 0; i < ppdu_desc->bar_num_users; i++) {
2702 			struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
2703 			uint64_t start_tsf;
2704 			uint64_t end_tsf;
2705 			struct dp_mon_peer *mon_peer;
2706 
2707 			peer = dp_peer_get_ref_by_id
2708 				(pdev->soc,
2709 				 ppdu_desc->user[i].peer_id,
2710 				 DP_MOD_ID_TX_PPDU_STATS);
2711 			/**
2712 			 * This check is to make sure peer is not deleted
2713 			 * after processing the TLVs.
2714 			 */
2715 			if (!peer)
2716 				continue;
2717 
2718 			mon_peer = peer->monitor_peer;
2719 			if (ppdu_desc->user[i].completion_status !=
2720 			    HTT_PPDU_STATS_USER_STATUS_OK) {
2721 				dp_peer_unref_delete(peer,
2722 						     DP_MOD_ID_TX_PPDU_STATS);
2723 				continue;
2724 			}
2725 
2726 			delay_ppdu = &mon_peer->delayed_ba_ppdu_stats;
2727 			start_tsf = delay_ppdu->ppdu_start_timestamp;
2728 			end_tsf = delay_ppdu->ppdu_end_timestamp;
2729 
2730 			if (mon_peer->last_delayed_ba) {
2731 				dp_peer_copy_stats_to_bar(peer,
2732 							  &ppdu_desc->user[i]);
2733 				ppdu_desc->ppdu_id =
2734 					mon_peer->last_delayed_ba_ppduid;
2735 				ppdu_desc->ppdu_start_timestamp = start_tsf;
2736 				ppdu_desc->ppdu_end_timestamp = end_tsf;
2737 			}
2738 			ppdu_desc->user[i].peer_last_delayed_ba =
2739 						mon_peer->last_delayed_ba;
2740 			dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
2741 		}
2742 	}
2743 
2744 	TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
2745 	mon_pdev->list_depth--;
2746 	TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info,
2747 			  ppdu_info_list_elem);
2748 	mon_pdev->sched_comp_list_depth++;
2749 }
2750 
2751 /**
2752  * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU
2753  *
2754  * If the TLV length sent as part of PPDU TLV is less that expected size i.e
2755  * size of corresponding data structure, pad the remaining bytes with zeros
2756  * and continue processing the TLVs
2757  *
2758  * @pdev: DP pdev handle
2759  * @tag_buf: TLV buffer
2760  * @tlv_expected_size: Expected size of Tag
2761  * @tlv_len: TLV length received from FW
2762  *
2763  * Return: Pointer to updated TLV
2764  */
2765 static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev,
2766 						 uint32_t *tag_buf,
2767 						 uint16_t tlv_expected_size,
2768 						 uint16_t tlv_len)
2769 {
2770 	uint32_t *tlv_desc = tag_buf;
2771 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
2772 
2773 	qdf_assert_always(tlv_len != 0);
2774 
2775 	if (tlv_len < tlv_expected_size) {
2776 		qdf_mem_zero(mon_pdev->ppdu_tlv_buf, tlv_expected_size);
2777 		qdf_mem_copy(mon_pdev->ppdu_tlv_buf, tag_buf, tlv_len);
2778 		tlv_desc = mon_pdev->ppdu_tlv_buf;
2779 	}
2780 
2781 	return tlv_desc;
2782 }
2783 
2784 /**
2785  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
2786  * @pdev: DP pdev handle
2787  * @tag_buf: TLV buffer
2788  * @tlv_len: length of tlv
2789  * @ppdu_info: per ppdu tlv structure
2790  *
2791  * return: void
2792  */
2793 static void dp_process_ppdu_tag(struct dp_pdev *pdev,
2794 				uint32_t *tag_buf,
2795 				uint32_t tlv_len,
2796 				struct ppdu_info *ppdu_info)
2797 {
2798 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2799 	uint16_t tlv_expected_size;
2800 	uint32_t *tlv_desc;
2801 
2802 	switch (tlv_type) {
2803 	case HTT_PPDU_STATS_COMMON_TLV:
2804 		tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv);
2805 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2806 						    tlv_expected_size, tlv_len);
2807 		dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info);
2808 		break;
2809 	case HTT_PPDU_STATS_USR_COMMON_TLV:
2810 		tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv);
2811 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2812 						    tlv_expected_size, tlv_len);
2813 		dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc,
2814 						      ppdu_info);
2815 		break;
2816 	case HTT_PPDU_STATS_USR_RATE_TLV:
2817 		tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv);
2818 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2819 						    tlv_expected_size, tlv_len);
2820 		dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc,
2821 						    ppdu_info);
2822 		break;
2823 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
2824 		tlv_expected_size =
2825 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv);
2826 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2827 						    tlv_expected_size, tlv_len);
2828 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2829 				pdev, tlv_desc, ppdu_info);
2830 		break;
2831 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
2832 		tlv_expected_size =
2833 			sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv);
2834 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2835 						    tlv_expected_size, tlv_len);
2836 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2837 				pdev, tlv_desc, ppdu_info);
2838 		break;
2839 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
2840 		tlv_expected_size =
2841 			sizeof(htt_ppdu_stats_user_cmpltn_common_tlv);
2842 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2843 						    tlv_expected_size, tlv_len);
2844 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
2845 				pdev, tlv_desc, ppdu_info);
2846 		break;
2847 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
2848 		tlv_expected_size =
2849 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv);
2850 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2851 						    tlv_expected_size, tlv_len);
2852 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2853 				pdev, tlv_desc, ppdu_info);
2854 		break;
2855 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
2856 		tlv_expected_size =
2857 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv);
2858 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2859 						    tlv_expected_size, tlv_len);
2860 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2861 				pdev, tlv_desc, ppdu_info);
2862 		break;
2863 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
2864 		tlv_expected_size =
2865 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv);
2866 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2867 						    tlv_expected_size, tlv_len);
2868 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2869 				pdev, tlv_desc, ppdu_info);
2870 		break;
2871 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
2872 		tlv_expected_size =
2873 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v);
2874 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2875 						    tlv_expected_size, tlv_len);
2876 		dp_process_ppdu_stats_user_common_array_tlv(
2877 				pdev, tlv_desc, ppdu_info);
2878 		break;
2879 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
2880 		tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv);
2881 		tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2882 						    tlv_expected_size, tlv_len);
2883 		dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc,
2884 							     ppdu_info);
2885 		break;
2886 	case HTT_PPDU_STATS_SCH_CMD_STATUS_TLV:
2887 		dp_process_ppdu_stats_sch_cmd_status_tlv(pdev, ppdu_info);
2888 		break;
2889 	default:
2890 		break;
2891 	}
2892 }
2893 
2894 #ifdef WLAN_ATF_ENABLE
2895 static void
2896 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
2897 				     struct cdp_tx_completion_ppdu *ppdu_desc,
2898 				     struct cdp_tx_completion_ppdu_user *user)
2899 {
2900 	uint32_t nss_ru_width_sum = 0;
2901 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
2902 
2903 	if (!pdev || !ppdu_desc || !user)
2904 		return;
2905 
2906 	if (!mon_pdev->dp_atf_stats_enable)
2907 		return;
2908 
2909 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_DATA)
2910 		return;
2911 
2912 	nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum;
2913 	if (!nss_ru_width_sum)
2914 		nss_ru_width_sum = 1;
2915 
2916 	/**
2917 	 * For SU-MIMO PPDU phy Tx time is same for the single user.
2918 	 * For MU-MIMO phy Tx time is calculated per user as below
2919 	 *     user phy tx time =
2920 	 *           Entire PPDU duration * MU Ratio * OFDMA Ratio
2921 	 *     MU Ratio = usr_nss / Sum_of_nss_of_all_users
2922 	 *     OFDMA_ratio = usr_ru_width / Sum_of_ru_width_of_all_users
2923 	 *     usr_ru_widt = ru_end – ru_start + 1
2924 	 */
2925 	if (ppdu_desc->htt_frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) {
2926 		user->phy_tx_time_us = ppdu_desc->phy_ppdu_tx_time_us;
2927 	} else {
2928 		user->phy_tx_time_us = (ppdu_desc->phy_ppdu_tx_time_us *
2929 				user->nss * user->ru_tones) / nss_ru_width_sum;
2930 	}
2931 }
2932 #else
2933 static void
2934 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
2935 				     struct cdp_tx_completion_ppdu *ppdu_desc,
2936 				     struct cdp_tx_completion_ppdu_user *user)
2937 {
2938 }
2939 #endif
2940 
2941 /**
2942  * dp_ppdu_desc_user_stats_update(): Function to update TX user stats
2943  * @pdev: DP pdev handle
2944  * @ppdu_info: per PPDU TLV descriptor
2945  *
2946  * return: void
2947  */
2948 void
2949 dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
2950 			       struct ppdu_info *ppdu_info)
2951 {
2952 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2953 	struct dp_peer *peer = NULL;
2954 	uint32_t tlv_bitmap_expected;
2955 	uint32_t tlv_bitmap_default;
2956 	uint16_t i;
2957 	uint32_t num_users;
2958 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
2959 
2960 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2961 		qdf_nbuf_data(ppdu_info->nbuf);
2962 
2963 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_BAR)
2964 		ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
2965 
2966 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
2967 	if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode ||
2968 	    mon_pdev->tx_capture_enabled) {
2969 		if (ppdu_info->is_ampdu)
2970 			tlv_bitmap_expected =
2971 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
2972 					ppdu_info->tlv_bitmap);
2973 	}
2974 
2975 	tlv_bitmap_default = tlv_bitmap_expected;
2976 
2977 	if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
2978 		num_users = ppdu_desc->bar_num_users;
2979 		ppdu_desc->num_users = ppdu_desc->bar_num_users;
2980 	} else {
2981 		num_users = ppdu_desc->num_users;
2982 	}
2983 	qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
2984 
2985 	for (i = 0; i < num_users; i++) {
2986 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
2987 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
2988 
2989 		peer = dp_peer_get_ref_by_id(pdev->soc,
2990 					     ppdu_desc->user[i].peer_id,
2991 					     DP_MOD_ID_TX_PPDU_STATS);
2992 		/**
2993 		 * This check is to make sure peer is not deleted
2994 		 * after processing the TLVs.
2995 		 */
2996 		if (!peer)
2997 			continue;
2998 
2999 		ppdu_desc->user[i].is_bss_peer = peer->bss_peer;
3000 		/*
3001 		 * different frame like DATA, BAR or CTRL has different
3002 		 * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we
3003 		 * receive other tlv in-order/sequential from fw.
3004 		 * Since ACK_BA_STATUS TLV come from Hardware it is
3005 		 * asynchronous So we need to depend on some tlv to confirm
3006 		 * all tlv is received for a ppdu.
3007 		 * So we depend on both SCHED_CMD_STATUS_TLV and
3008 		 * ACK_BA_STATUS_TLV. for failure packet we won't get
3009 		 * ACK_BA_STATUS_TLV.
3010 		 */
3011 		if (!(ppdu_info->tlv_bitmap &
3012 		      (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)) ||
3013 		    (!(ppdu_info->tlv_bitmap &
3014 		       (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) &&
3015 		     (ppdu_desc->user[i].completion_status ==
3016 		      HTT_PPDU_STATS_USER_STATUS_OK))) {
3017 			dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
3018 			continue;
3019 		}
3020 
3021 		/**
3022 		 * Update tx stats for data frames having Qos as well as
3023 		 * non-Qos data tid
3024 		 */
3025 
3026 		if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX ||
3027 		     (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) ||
3028 		     (ppdu_desc->htt_frame_type ==
3029 		      HTT_STATS_FTYPE_SGEN_QOS_NULL) ||
3030 		     ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) &&
3031 		      (ppdu_desc->num_mpdu > 1))) &&
3032 		      (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) {
3033 			dp_tx_stats_update(pdev, peer,
3034 					   &ppdu_desc->user[i],
3035 					   ppdu_desc->ack_rssi);
3036 			dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]);
3037 		}
3038 
3039 		dp_ppdu_desc_user_phy_tx_time_update(pdev, ppdu_desc,
3040 						     &ppdu_desc->user[i]);
3041 
3042 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
3043 		tlv_bitmap_expected = tlv_bitmap_default;
3044 	}
3045 }
3046 
3047 #ifndef WLAN_TX_PKT_CAPTURE_ENH
3048 
3049 /**
3050  * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
3051  * to upper layer
3052  * @pdev: DP pdev handle
3053  * @ppdu_info: per PPDU TLV descriptor
3054  *
3055  * return: void
3056  */
3057 static
3058 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
3059 			  struct ppdu_info *ppdu_info)
3060 {
3061 	struct ppdu_info *s_ppdu_info = NULL;
3062 	struct ppdu_info *ppdu_info_next = NULL;
3063 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3064 	qdf_nbuf_t nbuf;
3065 	uint32_t time_delta = 0;
3066 	bool starved = 0;
3067 	bool matched = 0;
3068 	bool recv_ack_ba_done = 0;
3069 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
3070 
3071 	if (ppdu_info->tlv_bitmap &
3072 	    (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
3073 	    ppdu_info->done)
3074 		recv_ack_ba_done = 1;
3075 
3076 	mon_pdev->last_sched_cmdid = ppdu_info->sched_cmdid;
3077 
3078 	s_ppdu_info = TAILQ_FIRST(&mon_pdev->sched_comp_ppdu_list);
3079 
3080 	TAILQ_FOREACH_SAFE(s_ppdu_info, &mon_pdev->sched_comp_ppdu_list,
3081 			   ppdu_info_list_elem, ppdu_info_next) {
3082 		if (s_ppdu_info->tsf_l32 > ppdu_info->tsf_l32)
3083 			time_delta = (MAX_TSF_32 - s_ppdu_info->tsf_l32) +
3084 					ppdu_info->tsf_l32;
3085 		else
3086 			time_delta = ppdu_info->tsf_l32 - s_ppdu_info->tsf_l32;
3087 
3088 		if (!s_ppdu_info->done && !recv_ack_ba_done) {
3089 			if (time_delta < MAX_SCHED_STARVE) {
3090 				dp_mon_info("pdev[%d] ppdu_id[%d] sched_cmdid[%d] TLV_B[0x%x] TSF[%u] D[%d]",
3091 					    pdev->pdev_id,
3092 					    s_ppdu_info->ppdu_id,
3093 					    s_ppdu_info->sched_cmdid,
3094 					    s_ppdu_info->tlv_bitmap,
3095 					    s_ppdu_info->tsf_l32,
3096 					    s_ppdu_info->done);
3097 				break;
3098 			}
3099 			starved = 1;
3100 		}
3101 
3102 		mon_pdev->delivered_sched_cmdid = s_ppdu_info->sched_cmdid;
3103 		TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list, s_ppdu_info,
3104 			     ppdu_info_list_elem);
3105 		mon_pdev->sched_comp_list_depth--;
3106 
3107 		nbuf = s_ppdu_info->nbuf;
3108 		qdf_assert_always(nbuf);
3109 		ppdu_desc = (struct cdp_tx_completion_ppdu *)
3110 				qdf_nbuf_data(nbuf);
3111 		ppdu_desc->tlv_bitmap = s_ppdu_info->tlv_bitmap;
3112 
3113 		if (starved) {
3114 			dp_mon_info("ppdu starved fc[0x%x] h_ftype[%d] tlv_bitmap[0x%x] cs[%d]\n",
3115 				    ppdu_desc->frame_ctrl,
3116 				    ppdu_desc->htt_frame_type,
3117 				    ppdu_desc->tlv_bitmap,
3118 				    ppdu_desc->user[0].completion_status);
3119 			starved = 0;
3120 		}
3121 
3122 		if (ppdu_info->ppdu_id == s_ppdu_info->ppdu_id &&
3123 		    ppdu_info->sched_cmdid == s_ppdu_info->sched_cmdid)
3124 			matched = 1;
3125 
3126 		dp_ppdu_desc_user_stats_update(pdev, s_ppdu_info);
3127 
3128 		qdf_mem_free(s_ppdu_info);
3129 
3130 		/**
3131 		 * Deliver PPDU stats only for valid (acked) data
3132 		 * frames if sniffer mode is not enabled.
3133 		 * If sniffer mode is enabled, PPDU stats
3134 		 * for all frames including mgmt/control
3135 		 * frames should be delivered to upper layer
3136 		 */
3137 		if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode) {
3138 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
3139 					     pdev->soc,
3140 					     nbuf, HTT_INVALID_PEER,
3141 					     WDI_NO_VAL,
3142 					     pdev->pdev_id);
3143 		} else {
3144 			if ((ppdu_desc->num_mpdu != 0 ||
3145 			     ppdu_desc->delayed_ba) &&
3146 			    ppdu_desc->num_users != 0 &&
3147 			    ((ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) ||
3148 			     ((ppdu_desc->htt_frame_type ==
3149 			       HTT_STATS_FTYPE_SGEN_MU_BAR) ||
3150 			      (ppdu_desc->htt_frame_type ==
3151 			       HTT_STATS_FTYPE_SGEN_BAR)))) {
3152 				dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
3153 						     pdev->soc,
3154 						     nbuf, HTT_INVALID_PEER,
3155 						     WDI_NO_VAL,
3156 						     pdev->pdev_id);
3157 			} else {
3158 				qdf_nbuf_free(nbuf);
3159 			}
3160 		}
3161 
3162 		if (matched)
3163 			break;
3164 	}
3165 }
3166 
3167 #endif
3168 
3169 /**
3170  * dp_get_ppdu_desc(): Function to allocate new PPDU status
3171  * desc for new ppdu id
3172  * @pdev: DP pdev handle
3173  * @ppdu_id: PPDU unique identifier
3174  * @tlv_type: TLV type received
3175  * @tsf_l32: timestamp received along with ppdu stats indication header
3176  * @max_users: Maximum user for that particular ppdu
3177  *
3178  * return: ppdu_info per ppdu tlv structure
3179  */
3180 static
3181 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
3182 				   uint8_t tlv_type, uint32_t tsf_l32,
3183 				   uint8_t max_users)
3184 {
3185 	struct ppdu_info *ppdu_info = NULL;
3186 	struct ppdu_info *s_ppdu_info = NULL;
3187 	struct ppdu_info *ppdu_info_next = NULL;
3188 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3189 	uint32_t size = 0;
3190 	struct cdp_tx_completion_ppdu *tmp_ppdu_desc = NULL;
3191 	struct cdp_tx_completion_ppdu_user *tmp_user;
3192 	uint32_t time_delta;
3193 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
3194 
3195 	/*
3196 	 * Find ppdu_id node exists or not
3197 	 */
3198 	TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list,
3199 			   ppdu_info_list_elem, ppdu_info_next) {
3200 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
3201 			if (ppdu_info->tsf_l32 > tsf_l32)
3202 				time_delta  = (MAX_TSF_32 -
3203 					       ppdu_info->tsf_l32) + tsf_l32;
3204 			else
3205 				time_delta  = tsf_l32 - ppdu_info->tsf_l32;
3206 
3207 			if (time_delta > WRAP_DROP_TSF_DELTA) {
3208 				TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
3209 					     ppdu_info, ppdu_info_list_elem);
3210 				mon_pdev->list_depth--;
3211 				pdev->stats.ppdu_wrap_drop++;
3212 				tmp_ppdu_desc =
3213 					(struct cdp_tx_completion_ppdu *)
3214 					qdf_nbuf_data(ppdu_info->nbuf);
3215 				tmp_user = &tmp_ppdu_desc->user[0];
3216 				dp_htt_tx_stats_info("S_PID [%d] S_TSF[%u] TLV_BITMAP[0x%x] [CMPLTN - %d ACK_BA - %d] CS[%d] - R_PID[%d] R_TSF[%u] R_TLV_TAG[0x%x]\n",
3217 						     ppdu_info->ppdu_id,
3218 						     ppdu_info->tsf_l32,
3219 						     ppdu_info->tlv_bitmap,
3220 						     tmp_user->completion_status,
3221 						     ppdu_info->compltn_common_tlv,
3222 						     ppdu_info->ack_ba_tlv,
3223 						     ppdu_id, tsf_l32,
3224 						     tlv_type);
3225 				qdf_nbuf_free(ppdu_info->nbuf);
3226 				ppdu_info->nbuf = NULL;
3227 				qdf_mem_free(ppdu_info);
3228 			} else {
3229 				break;
3230 			}
3231 		}
3232 	}
3233 
3234 	/*
3235 	 * check if it is ack ba tlv and if it is not there in ppdu info
3236 	 * list then check it in sched completion ppdu list
3237 	 */
3238 	if (!ppdu_info &&
3239 	    tlv_type == HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) {
3240 		TAILQ_FOREACH(s_ppdu_info,
3241 			      &mon_pdev->sched_comp_ppdu_list,
3242 			      ppdu_info_list_elem) {
3243 			if (s_ppdu_info && (s_ppdu_info->ppdu_id == ppdu_id)) {
3244 				if (s_ppdu_info->tsf_l32 > tsf_l32)
3245 					time_delta  = (MAX_TSF_32 -
3246 						       s_ppdu_info->tsf_l32) +
3247 							tsf_l32;
3248 				else
3249 					time_delta  = tsf_l32 -
3250 						s_ppdu_info->tsf_l32;
3251 				if (time_delta < WRAP_DROP_TSF_DELTA) {
3252 					ppdu_info = s_ppdu_info;
3253 					break;
3254 				}
3255 			} else {
3256 				/*
3257 				 * ACK BA STATUS TLV comes sequential order
3258 				 * if we received ack ba status tlv for second
3259 				 * ppdu and first ppdu is still waiting for
3260 				 * ACK BA STATUS TLV. Based on fw comment
3261 				 * we won't receive it tlv later. So we can
3262 				 * set ppdu info done.
3263 				 */
3264 				if (s_ppdu_info)
3265 					s_ppdu_info->done = 1;
3266 			}
3267 		}
3268 	}
3269 
3270 	if (ppdu_info) {
3271 		if (ppdu_info->tlv_bitmap & (1 << tlv_type)) {
3272 			/**
3273 			 * if we get tlv_type that is already been processed
3274 			 * for ppdu, that means we got a new ppdu with same
3275 			 * ppdu id. Hence Flush the older ppdu
3276 			 * for MUMIMO and OFDMA, In a PPDU we have
3277 			 * multiple user with same tlv types. tlv bitmap is
3278 			 * used to check whether SU or MU_MIMO/OFDMA
3279 			 */
3280 			if (!(ppdu_info->tlv_bitmap &
3281 			    (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)))
3282 				return ppdu_info;
3283 
3284 			ppdu_desc = (struct cdp_tx_completion_ppdu *)
3285 				qdf_nbuf_data(ppdu_info->nbuf);
3286 
3287 			/**
3288 			 * apart from ACK BA STATUS TLV rest all comes in order
3289 			 * so if tlv type not ACK BA STATUS TLV we can deliver
3290 			 * ppdu_info
3291 			 */
3292 			if ((tlv_type ==
3293 			     HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
3294 			    (ppdu_desc->htt_frame_type ==
3295 			     HTT_STATS_FTYPE_SGEN_MU_BAR ||
3296 			     ppdu_desc->htt_frame_type ==
3297 			     HTT_STATS_FTYPE_SGEN_BAR))
3298 				return ppdu_info;
3299 
3300 			dp_ppdu_desc_deliver(pdev, ppdu_info);
3301 		} else {
3302 			return ppdu_info;
3303 		}
3304 	}
3305 
3306 	/**
3307 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
3308 	 * threshold
3309 	 */
3310 	if (mon_pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
3311 		ppdu_info = TAILQ_FIRST(&mon_pdev->ppdu_info_list);
3312 		TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
3313 			     ppdu_info, ppdu_info_list_elem);
3314 		mon_pdev->list_depth--;
3315 		pdev->stats.ppdu_drop++;
3316 		qdf_nbuf_free(ppdu_info->nbuf);
3317 		ppdu_info->nbuf = NULL;
3318 		qdf_mem_free(ppdu_info);
3319 	}
3320 
3321 	size = sizeof(struct cdp_tx_completion_ppdu) +
3322 		(max_users * sizeof(struct cdp_tx_completion_ppdu_user));
3323 
3324 	/*
3325 	 * Allocate new ppdu_info node
3326 	 */
3327 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
3328 	if (!ppdu_info)
3329 		return NULL;
3330 
3331 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, size,
3332 					 0, 4, TRUE);
3333 	if (!ppdu_info->nbuf) {
3334 		qdf_mem_free(ppdu_info);
3335 		return NULL;
3336 	}
3337 
3338 	ppdu_info->ppdu_desc =
3339 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3340 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), size);
3341 
3342 	if (qdf_nbuf_put_tail(ppdu_info->nbuf, size) == NULL) {
3343 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3344 			  "No tailroom for HTT PPDU");
3345 		qdf_nbuf_free(ppdu_info->nbuf);
3346 		ppdu_info->nbuf = NULL;
3347 		ppdu_info->last_user = 0;
3348 		qdf_mem_free(ppdu_info);
3349 		return NULL;
3350 	}
3351 
3352 	ppdu_info->ppdu_desc->max_users = max_users;
3353 	ppdu_info->tsf_l32 = tsf_l32;
3354 	/**
3355 	 * No lock is needed because all PPDU TLVs are processed in
3356 	 * same context and this list is updated in same context
3357 	 */
3358 	TAILQ_INSERT_TAIL(&mon_pdev->ppdu_info_list, ppdu_info,
3359 			  ppdu_info_list_elem);
3360 	mon_pdev->list_depth++;
3361 	return ppdu_info;
3362 }
3363 
3364 /**
3365  * dp_htt_process_tlv(): Function to process each PPDU TLVs
3366  * @pdev: DP pdev handle
3367  * @htt_t2h_msg: HTT target to host message
3368  *
3369  * return: ppdu_info per ppdu tlv structure
3370  */
3371 
3372 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
3373 					    qdf_nbuf_t htt_t2h_msg)
3374 {
3375 	uint32_t length;
3376 	uint32_t ppdu_id;
3377 	uint8_t tlv_type;
3378 	uint32_t tlv_length, tlv_bitmap_expected;
3379 	uint8_t *tlv_buf;
3380 	struct ppdu_info *ppdu_info = NULL;
3381 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3382 	uint8_t max_users = CDP_MU_MAX_USERS;
3383 	uint32_t tsf_l32;
3384 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
3385 
3386 	uint32_t *msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
3387 
3388 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
3389 
3390 	msg_word = msg_word + 1;
3391 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
3392 
3393 	msg_word = msg_word + 1;
3394 	tsf_l32 = (uint32_t)(*msg_word);
3395 
3396 	msg_word = msg_word + 2;
3397 	while (length > 0) {
3398 		tlv_buf = (uint8_t *)msg_word;
3399 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
3400 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
3401 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
3402 			pdev->stats.ppdu_stats_counter[tlv_type]++;
3403 
3404 		if (tlv_length == 0)
3405 			break;
3406 
3407 		tlv_length += HTT_TLV_HDR_LEN;
3408 
3409 		/**
3410 		 * Not allocating separate ppdu descriptor for MGMT Payload
3411 		 * TLV as this is sent as separate WDI indication and it
3412 		 * doesn't contain any ppdu information
3413 		 */
3414 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
3415 			mon_pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
3416 			mon_pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
3417 			mon_pdev->mgmtctrl_frm_info.mgmt_buf_len =
3418 				HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET
3419 						(*(msg_word + 1));
3420 			msg_word =
3421 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
3422 			length -= (tlv_length);
3423 			continue;
3424 		}
3425 
3426 		/*
3427 		 * retrieve max_users if it's USERS_INFO,
3428 		 * else, it's 1 for COMPLTN_FLUSH,
3429 		 * else, use CDP_MU_MAX_USERS
3430 		 */
3431 		if (tlv_type == HTT_PPDU_STATS_USERS_INFO_TLV) {
3432 			max_users =
3433 				HTT_PPDU_STATS_USERS_INFO_TLV_MAX_USERS_GET(*(msg_word + 1));
3434 		} else if (tlv_type == HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) {
3435 			max_users = 1;
3436 		}
3437 
3438 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type,
3439 					     tsf_l32, max_users);
3440 		if (!ppdu_info)
3441 			return NULL;
3442 
3443 		ppdu_info->ppdu_id = ppdu_id;
3444 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
3445 
3446 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
3447 
3448 		/**
3449 		 * Increment pdev level tlv count to monitor
3450 		 * missing TLVs
3451 		 */
3452 		mon_pdev->tlv_count++;
3453 		ppdu_info->last_tlv_cnt = mon_pdev->tlv_count;
3454 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
3455 		length -= (tlv_length);
3456 	}
3457 
3458 	if (!ppdu_info)
3459 		return NULL;
3460 
3461 	mon_pdev->last_ppdu_id = ppdu_id;
3462 
3463 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
3464 
3465 	if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode ||
3466 	    mon_pdev->tx_capture_enabled) {
3467 		if (ppdu_info->is_ampdu)
3468 			tlv_bitmap_expected =
3469 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
3470 					ppdu_info->tlv_bitmap);
3471 	}
3472 
3473 	ppdu_desc = ppdu_info->ppdu_desc;
3474 
3475 	if (!ppdu_desc)
3476 		return NULL;
3477 
3478 	if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status !=
3479 	    HTT_PPDU_STATS_USER_STATUS_OK) {
3480 		tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
3481 	}
3482 
3483 	/*
3484 	 * for frame type DATA and BAR, we update stats based on MSDU,
3485 	 * successful msdu and mpdu are populate from ACK BA STATUS TLV
3486 	 * which comes out of order. successful mpdu also populated from
3487 	 * COMPLTN COMMON TLV which comes in order. for every ppdu_info
3488 	 * we store successful mpdu from both tlv and compare before delivering
3489 	 * to make sure we received ACK BA STATUS TLV. For some self generated
3490 	 * frame we won't get ack ba status tlv so no need to wait for
3491 	 * ack ba status tlv.
3492 	 */
3493 	if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL &&
3494 	    ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) {
3495 		/*
3496 		 * most of the time bar frame will have duplicate ack ba
3497 		 * status tlv
3498 		 */
3499 		if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR &&
3500 		    (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv))
3501 			return NULL;
3502 		/*
3503 		 * For data frame, compltn common tlv should match ack ba status
3504 		 * tlv and completion status. Reason we are checking first user
3505 		 * for ofdma, completion seen at next MU BAR frm, for mimo
3506 		 * only for first user completion will be immediate.
3507 		 */
3508 		if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
3509 		    (ppdu_desc->user[0].completion_status == 0 &&
3510 		     (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv)))
3511 			return NULL;
3512 	}
3513 
3514 	/**
3515 	 * Once all the TLVs for a given PPDU has been processed,
3516 	 * return PPDU status to be delivered to higher layer.
3517 	 * tlv_bitmap_expected can't be available for different frame type.
3518 	 * But SCHED CMD STATS TLV is the last TLV from the FW for a ppdu.
3519 	 * apart from ACK BA TLV, FW sends other TLV in sequential order.
3520 	 * flush tlv comes separate.
3521 	 */
3522 	if ((ppdu_info->tlv_bitmap != 0 &&
3523 	     (ppdu_info->tlv_bitmap &
3524 	      (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) ||
3525 	    (ppdu_info->tlv_bitmap &
3526 	     (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV))) {
3527 		ppdu_info->done = 1;
3528 		return ppdu_info;
3529 	}
3530 
3531 	return NULL;
3532 }
3533 #else
3534 void
3535 dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
3536 			       struct ppdu_info *ppdu_info)
3537 {
3538 }
3539 #endif /* QCA_ENHANCED_STATS_SUPPORT */
3540 
3541 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
3542 static void dp_htt_process_smu_ppdu_stats_tlv(struct dp_soc *soc,
3543 					      qdf_nbuf_t htt_t2h_msg)
3544 {
3545 	uint32_t length;
3546 	uint8_t tlv_type;
3547 	uint32_t tlv_length, tlv_expected_size;
3548 	uint8_t *tlv_buf;
3549 
3550 	uint32_t *msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
3551 
3552 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
3553 
3554 	msg_word = msg_word + 4;
3555 
3556 	while (length > 0) {
3557 		tlv_buf = (uint8_t *)msg_word;
3558 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
3559 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
3560 
3561 		if (tlv_length == 0)
3562 			break;
3563 
3564 		tlv_length += HTT_TLV_HDR_LEN;
3565 
3566 		if (tlv_type == HTT_PPDU_STATS_FOR_SMU_TLV) {
3567 			tlv_expected_size = sizeof(htt_ppdu_stats_for_smu_tlv);
3568 
3569 			if (tlv_length >= tlv_expected_size)
3570 				dp_wdi_event_handler(
3571 					WDI_EVENT_PKT_CAPTURE_PPDU_STATS,
3572 					soc, msg_word, HTT_INVALID_VDEV,
3573 					WDI_NO_VAL, 0);
3574 		}
3575 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
3576 		length -= (tlv_length);
3577 	}
3578 }
3579 #endif
3580 
3581 /**
3582  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
3583  * @soc: DP SOC handle
3584  * @pdev_id: pdev id
3585  * @htt_t2h_msg: HTT message nbuf
3586  *
3587  * return:void
3588  */
3589 #if defined(WDI_EVENT_ENABLE)
3590 #ifdef QCA_ENHANCED_STATS_SUPPORT
3591 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
3592 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
3593 {
3594 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
3595 	struct ppdu_info *ppdu_info = NULL;
3596 	bool free_buf = true;
3597 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
3598 
3599 	if (pdev_id >= MAX_PDEV_CNT)
3600 		return true;
3601 
3602 	pdev = soc->pdev_list[pdev_id];
3603 	if (!pdev)
3604 		return true;
3605 
3606 	if (!mon_pdev->enhanced_stats_en && !mon_pdev->tx_sniffer_enable &&
3607 	    !mon_pdev->mcopy_mode && !mon_pdev->bpr_enable)
3608 		return free_buf;
3609 
3610 	qdf_spin_lock_bh(&mon_pdev->ppdu_stats_lock);
3611 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
3612 
3613 	if (mon_pdev->mgmtctrl_frm_info.mgmt_buf) {
3614 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
3615 		    (pdev, htt_t2h_msg, mon_pdev->mgmtctrl_frm_info.ppdu_id) !=
3616 		    QDF_STATUS_SUCCESS)
3617 			free_buf = false;
3618 	}
3619 
3620 	if (ppdu_info)
3621 		dp_ppdu_desc_deliver(pdev, ppdu_info);
3622 
3623 	mon_pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
3624 	mon_pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
3625 	mon_pdev->mgmtctrl_frm_info.ppdu_id = 0;
3626 
3627 	qdf_spin_unlock_bh(&mon_pdev->ppdu_stats_lock);
3628 
3629 	return free_buf;
3630 }
3631 #elif defined(WLAN_FEATURE_PKT_CAPTURE_V2)
3632 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
3633 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
3634 {
3635 	if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx))
3636 		dp_htt_process_smu_ppdu_stats_tlv(soc, htt_t2h_msg);
3637 
3638 	return true;
3639 }
3640 #else
3641 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
3642 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
3643 {
3644 	return true;
3645 }
3646 #endif/* QCA_ENHANCED_STATS_SUPPORT */
3647 #endif
3648 
3649 #if defined(WDI_EVENT_ENABLE) &&\
3650 	(defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG))
3651 /*
3652  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
3653  * @htt_soc:	 HTT SOC handle
3654  * @msg_word:    Pointer to payload
3655  * @htt_t2h_msg: HTT msg nbuf
3656  *
3657  * Return: True if buffer should be freed by caller.
3658  */
3659 static bool
3660 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
3661 			  uint32_t *msg_word,
3662 			  qdf_nbuf_t htt_t2h_msg)
3663 {
3664 	u_int8_t pdev_id;
3665 	u_int8_t target_pdev_id;
3666 	bool free_buf;
3667 
3668 	target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
3669 	pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
3670 							 target_pdev_id);
3671 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
3672 			     htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
3673 			     pdev_id);
3674 
3675 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
3676 					      htt_t2h_msg);
3677 
3678 	return free_buf;
3679 }
3680 #endif
3681 
3682 /*
3683  * dp_htt_ppdu_stats_attach() - attach resources for HTT PPDU stats processing
3684  * @pdev: Datapath PDEV handle
3685  *
3686  * Return: QDF_STATUS_SUCCESS: Success
3687  *         QDF_STATUS_E_NOMEM: Error
3688  */
3689 static QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
3690 {
3691 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
3692 
3693 	mon_pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE);
3694 
3695 	if (!mon_pdev->ppdu_tlv_buf) {
3696 		QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail");
3697 		return QDF_STATUS_E_NOMEM;
3698 	}
3699 
3700 	return QDF_STATUS_SUCCESS;
3701 }
3702 
3703 /*
3704  * dp_htt_ppdu_stats_detach() - detach stats resources
3705  * @pdev: Datapath PDEV handle
3706  *
3707  * Return: void
3708  */
3709 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3710 {
3711 	struct ppdu_info *ppdu_info, *ppdu_info_next;
3712 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
3713 
3714 
3715 	TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list,
3716 			   ppdu_info_list_elem, ppdu_info_next) {
3717 		if (!ppdu_info)
3718 			break;
3719 		TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
3720 			     ppdu_info, ppdu_info_list_elem);
3721 		mon_pdev->list_depth--;
3722 		qdf_assert_always(ppdu_info->nbuf);
3723 		qdf_nbuf_free(ppdu_info->nbuf);
3724 		qdf_mem_free(ppdu_info);
3725 	}
3726 
3727 	TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->sched_comp_ppdu_list,
3728 			   ppdu_info_list_elem, ppdu_info_next) {
3729 		if (!ppdu_info)
3730 			break;
3731 		TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list,
3732 			     ppdu_info, ppdu_info_list_elem);
3733 		mon_pdev->sched_comp_list_depth--;
3734 		qdf_assert_always(ppdu_info->nbuf);
3735 		qdf_nbuf_free(ppdu_info->nbuf);
3736 		qdf_mem_free(ppdu_info);
3737 	}
3738 
3739 	if (mon_pdev->ppdu_tlv_buf)
3740 		qdf_mem_free(mon_pdev->ppdu_tlv_buf);
3741 }
3742 
3743 static void
3744 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
3745 {
3746 	struct cdp_pdev_mon_stats *rx_mon_stats;
3747 	uint32_t *stat_ring_ppdu_ids;
3748 	uint32_t *dest_ring_ppdu_ids;
3749 	int i, idx;
3750 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
3751 
3752 	rx_mon_stats = &mon_pdev->rx_mon_stats;
3753 
3754 	DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
3755 
3756 	DP_PRINT_STATS("status_ppdu_compl_cnt = %d",
3757 		       rx_mon_stats->status_ppdu_compl);
3758 	DP_PRINT_STATS("status_ppdu_start_cnt = %d",
3759 		       rx_mon_stats->status_ppdu_start);
3760 	DP_PRINT_STATS("status_ppdu_end_cnt = %d",
3761 		       rx_mon_stats->status_ppdu_end);
3762 	DP_PRINT_STATS("status_ppdu_start_mis_cnt = %d",
3763 		       rx_mon_stats->status_ppdu_start_mis);
3764 	DP_PRINT_STATS("status_ppdu_end_mis_cnt = %d",
3765 		       rx_mon_stats->status_ppdu_end_mis);
3766 	DP_PRINT_STATS("status_ppdu_done_cnt = %d",
3767 		       rx_mon_stats->status_ppdu_done);
3768 	DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
3769 		       rx_mon_stats->dest_ppdu_done);
3770 	DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
3771 		       rx_mon_stats->dest_mpdu_done);
3772 	DP_PRINT_STATS("tlv_tag_status_err_cnt = %u",
3773 		       rx_mon_stats->tlv_tag_status_err);
3774 	DP_PRINT_STATS("mon status DMA not done WAR count= %u",
3775 		       rx_mon_stats->status_buf_done_war);
3776 	DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
3777 		       rx_mon_stats->dest_mpdu_drop);
3778 	DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d",
3779 		       rx_mon_stats->dup_mon_linkdesc_cnt);
3780 	DP_PRINT_STATS("dup_mon_buf_cnt = %d",
3781 		       rx_mon_stats->dup_mon_buf_cnt);
3782 	DP_PRINT_STATS("mon_rx_buf_reaped = %u",
3783 		       rx_mon_stats->mon_rx_bufs_reaped_dest);
3784 	DP_PRINT_STATS("mon_rx_buf_replenished = %u",
3785 		       rx_mon_stats->mon_rx_bufs_replenished_dest);
3786 	DP_PRINT_STATS("ppdu_id_mismatch = %u",
3787 		       rx_mon_stats->ppdu_id_mismatch);
3788 	DP_PRINT_STATS("mpdu_ppdu_id_match_cnt = %d",
3789 		       rx_mon_stats->ppdu_id_match);
3790 	DP_PRINT_STATS("ppdus dropped frm status ring = %d",
3791 		       rx_mon_stats->status_ppdu_drop);
3792 	DP_PRINT_STATS("ppdus dropped frm dest ring = %d",
3793 		       rx_mon_stats->dest_ppdu_drop);
3794 	stat_ring_ppdu_ids =
3795 		(uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST);
3796 	dest_ring_ppdu_ids =
3797 		(uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST);
3798 
3799 	if (!stat_ring_ppdu_ids || !dest_ring_ppdu_ids)
3800 		DP_PRINT_STATS("Unable to allocate ppdu id hist mem\n");
3801 
3802 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
3803 	idx = rx_mon_stats->ppdu_id_hist_idx;
3804 	qdf_mem_copy(stat_ring_ppdu_ids,
3805 		     rx_mon_stats->stat_ring_ppdu_id_hist,
3806 		     sizeof(uint32_t) * MAX_PPDU_ID_HIST);
3807 	qdf_mem_copy(dest_ring_ppdu_ids,
3808 		     rx_mon_stats->dest_ring_ppdu_id_hist,
3809 		     sizeof(uint32_t) * MAX_PPDU_ID_HIST);
3810 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
3811 
3812 	DP_PRINT_STATS("PPDU Id history:");
3813 	DP_PRINT_STATS("stat_ring_ppdu_ids\t dest_ring_ppdu_ids");
3814 	for (i = 0; i < MAX_PPDU_ID_HIST; i++) {
3815 		idx = (idx + 1) & (MAX_PPDU_ID_HIST - 1);
3816 		DP_PRINT_STATS("%*u\t%*u", 16,
3817 			       rx_mon_stats->stat_ring_ppdu_id_hist[idx], 16,
3818 			       rx_mon_stats->dest_ring_ppdu_id_hist[idx]);
3819 	}
3820 	qdf_mem_free(stat_ring_ppdu_ids);
3821 	qdf_mem_free(dest_ring_ppdu_ids);
3822 	DP_PRINT_STATS("mon_rx_dest_stuck = %d",
3823 		       rx_mon_stats->mon_rx_dest_stuck);
3824 }
3825 
3826 /*
3827  *dp_set_bpr_enable() - API to enable/disable bpr feature
3828  *@pdev_handle: DP_PDEV handle.
3829  *@val: Provided value.
3830  *
3831  *Return: 0 for success. nonzero for failure.
3832  */
3833 #ifdef QCA_SUPPORT_BPR
3834 static QDF_STATUS
3835 dp_set_bpr_enable(struct dp_pdev *pdev, int val)
3836 {
3837 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
3838 
3839 	switch (val) {
3840 	case CDP_BPR_DISABLE:
3841 		mon_pdev->bpr_enable = CDP_BPR_DISABLE;
3842 		if (!mon_pdev->pktlog_ppdu_stats &&
3843 		    !mon_pdev->enhanced_stats_en &&
3844 		    !mon_pdev->tx_sniffer_enable && !mon_pdev->mcopy_mode) {
3845 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
3846 		} else if (mon_pdev->enhanced_stats_en &&
3847 			   !mon_pdev->tx_sniffer_enable &&
3848 			   !mon_pdev->mcopy_mode &&
3849 			   !mon_pdev->pktlog_ppdu_stats) {
3850 			dp_h2t_cfg_stats_msg_send(pdev,
3851 						  DP_PPDU_STATS_CFG_ENH_STATS,
3852 						  pdev->pdev_id);
3853 		}
3854 		break;
3855 	case CDP_BPR_ENABLE:
3856 		mon_pdev->bpr_enable = CDP_BPR_ENABLE;
3857 		if (!mon_pdev->enhanced_stats_en &&
3858 		    !mon_pdev->tx_sniffer_enable &&
3859 		    !mon_pdev->mcopy_mode && !mon_pdev->pktlog_ppdu_stats) {
3860 			dp_h2t_cfg_stats_msg_send(pdev,
3861 						  DP_PPDU_STATS_CFG_BPR,
3862 						  pdev->pdev_id);
3863 		} else if (mon_pdev->enhanced_stats_en &&
3864 			   !mon_pdev->tx_sniffer_enable &&
3865 			   !mon_pdev->mcopy_mode &&
3866 			   !mon_pdev->pktlog_ppdu_stats) {
3867 			dp_h2t_cfg_stats_msg_send(pdev,
3868 						  DP_PPDU_STATS_CFG_BPR_ENH,
3869 						  pdev->pdev_id);
3870 		} else if (mon_pdev->pktlog_ppdu_stats) {
3871 			dp_h2t_cfg_stats_msg_send(pdev,
3872 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
3873 						  pdev->pdev_id);
3874 		}
3875 		break;
3876 	default:
3877 		break;
3878 	}
3879 
3880 	return QDF_STATUS_SUCCESS;
3881 }
3882 #endif
3883 
3884 #ifdef ATH_SUPPORT_NAC
3885 /*
3886  * dp_set_filter_neigh_peers() - set filter neighbour peers for smart mesh
3887  * @pdev_handle: device object
3888  * @val: value to be set
3889  *
3890  * Return: void
3891  */
3892 static int dp_set_filter_neigh_peers(struct dp_pdev *pdev,
3893 				     bool val)
3894 {
3895 	/* Enable/Disable smart mesh filtering. This flag will be checked
3896 	 * during rx processing to check if packets are from NAC clients.
3897 	 */
3898 	pdev->monitor_pdev->filter_neighbour_peers = val;
3899 	return 0;
3900 }
3901 #endif /* ATH_SUPPORT_NAC */
3902 
3903 #ifdef WLAN_ATF_ENABLE
3904 static void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value)
3905 {
3906 	if (!pdev) {
3907 		dp_cdp_err("Invalid pdev");
3908 		return;
3909 	}
3910 
3911 	pdev->monitor_pdev->dp_atf_stats_enable = value;
3912 }
3913 #endif
3914 
3915 /**
3916  * dp_set_bsscolor() - sets bsscolor for tx capture
3917  * @pdev: Datapath PDEV handle
3918  * @bsscolor: new bsscolor
3919  */
3920 static void
3921 dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor)
3922 {
3923 	pdev->monitor_pdev->rx_mon_recv_status.bsscolor = bsscolor;
3924 }
3925 
3926 /**
3927  * dp_pdev_get_filter_ucast_data() - get DP PDEV monitor ucast filter
3928  * @soc : data path soc handle
3929  * @pdev_id : pdev_id
3930  * Return: true on ucast filter flag set
3931  */
3932 static bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle)
3933 {
3934 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3935 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
3936 
3937 	if ((mon_pdev->fp_data_filter & FILTER_DATA_UCAST) ||
3938 	    (mon_pdev->mo_data_filter & FILTER_DATA_UCAST))
3939 		return true;
3940 
3941 	return false;
3942 }
3943 
3944 /**
3945  * dp_pdev_get_filter_mcast_data() - get DP PDEV monitor mcast filter
3946  * @pdev_handle: Datapath PDEV handle
3947  * Return: true on mcast filter flag set
3948  */
3949 static bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle)
3950 {
3951 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3952 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
3953 
3954 	if ((mon_pdev->fp_data_filter & FILTER_DATA_MCAST) ||
3955 	    (mon_pdev->mo_data_filter & FILTER_DATA_MCAST))
3956 		return true;
3957 
3958 	return false;
3959 }
3960 
3961 /**
3962  * dp_pdev_get_filter_non_data() - get DP PDEV monitor non_data filter
3963  * @pdev_handle: Datapath PDEV handle
3964  * Return: true on non data filter flag set
3965  */
3966 static bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle)
3967 {
3968 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3969 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
3970 
3971 	if ((mon_pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
3972 	    (mon_pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
3973 		if ((mon_pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
3974 		    (mon_pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
3975 			return true;
3976 		}
3977 	}
3978 
3979 	return false;
3980 }
3981 
3982 #ifdef QCA_MONITOR_PKT_SUPPORT
3983 /**
3984  * dp_vdev_set_monitor_mode_buf_rings () - set monitor mode buf rings
3985  *
3986  * Allocate SW descriptor pool, buffers, link descriptor memory
3987  * Initialize monitor related SRNGs
3988  *
3989  * @pdev: DP pdev object
3990  *
3991  * Return: void
3992  */
3993 static void dp_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev)
3994 {
3995 	uint32_t mac_id;
3996 	uint32_t mac_for_pdev;
3997 	struct dp_srng *mon_buf_ring;
3998 	uint32_t num_entries;
3999 	struct dp_soc *soc = pdev->soc;
4000 
4001 	/* If delay monitor replenish is disabled, allocate link descriptor
4002 	 * monitor ring buffers of ring size.
4003 	 */
4004 	if (!wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) {
4005 		dp_vdev_set_monitor_mode_rings(pdev, false);
4006 	} else {
4007 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4008 			mac_for_pdev =
4009 				dp_get_lmac_id_for_pdev_id(pdev->soc,
4010 							   mac_id,
4011 							   pdev->pdev_id);
4012 
4013 			dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
4014 							 FALSE);
4015 			mon_buf_ring =
4016 				&pdev->soc->rxdma_mon_buf_ring[mac_for_pdev];
4017 			/*
4018 			 * Configure low interrupt threshld when monitor mode is
4019 			 * configured.
4020 			 */
4021 			if (mon_buf_ring->hal_srng) {
4022 				num_entries = mon_buf_ring->num_entries;
4023 				hal_set_low_threshold(mon_buf_ring->hal_srng,
4024 						      num_entries >> 3);
4025 				htt_srng_setup(pdev->soc->htt_handle,
4026 					       pdev->pdev_id,
4027 					       mon_buf_ring->hal_srng,
4028 					       RXDMA_MONITOR_BUF);
4029 			}
4030 		}
4031 	}
4032 }
4033 #else
4034 static void dp_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev)
4035 {
4036 }
4037 #endif
4038 
4039 /*
4040  * dp_set_pktlog_wifi3() - attach txrx vdev
4041  * @pdev: Datapath PDEV handle
4042  * @event: which event's notifications are being subscribed to
4043  * @enable: WDI event subscribe or not. (True or False)
4044  *
4045  * Return: Success, NULL on failure
4046  */
4047 #ifdef WDI_EVENT_ENABLE
4048 static int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
4049 			       bool enable)
4050 {
4051 	struct dp_soc *soc = NULL;
4052 	int max_mac_rings = wlan_cfg_get_num_mac_rings
4053 					(pdev->wlan_cfg_ctx);
4054 	uint8_t mac_id = 0;
4055 	struct dp_mon_soc *mon_soc;
4056 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4057 
4058 	soc = pdev->soc;
4059 	mon_soc = soc->monitor_soc;
4060 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
4061 
4062 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
4063 		  FL("Max_mac_rings %d "),
4064 		  max_mac_rings);
4065 
4066 	if (enable) {
4067 		switch (event) {
4068 		case WDI_EVENT_RX_DESC:
4069 			if (mon_pdev->mvdev) {
4070 				/* Nothing needs to be done if monitor mode is
4071 				 * enabled
4072 				 */
4073 				mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
4074 				return 0;
4075 			}
4076 
4077 			if (mon_pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
4078 				mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
4079 				dp_mon_filter_setup_rx_pkt_log_full(pdev);
4080 				if (dp_mon_filter_update(pdev) !=
4081 						QDF_STATUS_SUCCESS) {
4082 					dp_cdp_err("%pK: Pktlog full filters set failed", soc);
4083 					dp_mon_filter_reset_rx_pkt_log_full(pdev);
4084 					mon_pdev->rx_pktlog_mode =
4085 							DP_RX_PKTLOG_DISABLED;
4086 					return 0;
4087 				}
4088 
4089 				if (mon_soc->reap_timer_init &&
4090 				    (!dp_mon_is_enable_reap_timer_non_pkt(pdev)))
4091 					qdf_timer_mod(&mon_soc->mon_reap_timer,
4092 						      DP_INTR_POLL_TIMER_MS);
4093 			}
4094 			break;
4095 
4096 		case WDI_EVENT_LITE_RX:
4097 			if (mon_pdev->mvdev) {
4098 				/* Nothing needs to be done if monitor mode is
4099 				 * enabled
4100 				 */
4101 				mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
4102 				return 0;
4103 			}
4104 			if (mon_pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
4105 				mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
4106 
4107 				/*
4108 				 * Set the packet log lite mode filter.
4109 				 */
4110 				dp_mon_filter_setup_rx_pkt_log_lite(pdev);
4111 				if (dp_mon_filter_update(pdev) !=
4112 				    QDF_STATUS_SUCCESS) {
4113 					dp_cdp_err("%pK: Pktlog lite filters set failed", soc);
4114 					dp_mon_filter_reset_rx_pkt_log_lite(pdev);
4115 					mon_pdev->rx_pktlog_mode =
4116 						DP_RX_PKTLOG_DISABLED;
4117 					return 0;
4118 				}
4119 
4120 				if (mon_soc->reap_timer_init &&
4121 				    (!dp_mon_is_enable_reap_timer_non_pkt(pdev)))
4122 					qdf_timer_mod(&mon_soc->mon_reap_timer,
4123 						      DP_INTR_POLL_TIMER_MS);
4124 			}
4125 			break;
4126 
4127 		case WDI_EVENT_LITE_T2H:
4128 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
4129 				int mac_for_pdev = dp_get_mac_id_for_pdev(
4130 							mac_id,	pdev->pdev_id);
4131 
4132 				mon_pdev->pktlog_ppdu_stats = true;
4133 				dp_h2t_cfg_stats_msg_send(pdev,
4134 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
4135 					mac_for_pdev);
4136 			}
4137 			break;
4138 
4139 		case WDI_EVENT_RX_CBF:
4140 			if (mon_pdev->mvdev) {
4141 				/* Nothing needs to be done if monitor mode is
4142 				 * enabled
4143 				 */
4144 				dp_mon_info("Mon mode, CBF setting filters");
4145 				mon_pdev->rx_pktlog_cbf = true;
4146 				return 0;
4147 			}
4148 			if (!mon_pdev->rx_pktlog_cbf) {
4149 				mon_pdev->rx_pktlog_cbf = true;
4150 				mon_pdev->monitor_configured = true;
4151 				dp_vdev_set_monitor_mode_buf_rings(pdev);
4152 				/*
4153 				 * Set the packet log lite mode filter.
4154 				 */
4155 				qdf_info("Non mon mode: Enable destination ring");
4156 
4157 				dp_mon_filter_setup_rx_pkt_log_cbf(pdev);
4158 				if (dp_mon_filter_update(pdev) !=
4159 				    QDF_STATUS_SUCCESS) {
4160 					dp_mon_err("Pktlog set CBF filters failed");
4161 					dp_mon_filter_reset_rx_pktlog_cbf(pdev);
4162 					mon_pdev->rx_pktlog_mode =
4163 						DP_RX_PKTLOG_DISABLED;
4164 					mon_pdev->monitor_configured = false;
4165 					return 0;
4166 				}
4167 
4168 				if (mon_soc->reap_timer_init &&
4169 				    !dp_mon_is_enable_reap_timer_non_pkt(pdev))
4170 					qdf_timer_mod(&mon_soc->mon_reap_timer,
4171 						      DP_INTR_POLL_TIMER_MS);
4172 			}
4173 			break;
4174 
4175 		default:
4176 			/* Nothing needs to be done for other pktlog types */
4177 			break;
4178 		}
4179 	} else {
4180 		switch (event) {
4181 		case WDI_EVENT_RX_DESC:
4182 		case WDI_EVENT_LITE_RX:
4183 			if (mon_pdev->mvdev) {
4184 				/* Nothing needs to be done if monitor mode is
4185 				 * enabled
4186 				 */
4187 				mon_pdev->rx_pktlog_mode =
4188 						DP_RX_PKTLOG_DISABLED;
4189 				return 0;
4190 			}
4191 			if (mon_pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
4192 				mon_pdev->rx_pktlog_mode =
4193 						DP_RX_PKTLOG_DISABLED;
4194 				dp_mon_filter_reset_rx_pkt_log_full(pdev);
4195 				if (dp_mon_filter_update(pdev) !=
4196 						QDF_STATUS_SUCCESS) {
4197 					dp_cdp_err("%pK: Pktlog filters reset failed", soc);
4198 					return 0;
4199 				}
4200 
4201 				dp_mon_filter_reset_rx_pkt_log_lite(pdev);
4202 				if (dp_mon_filter_update(pdev) !=
4203 						QDF_STATUS_SUCCESS) {
4204 					dp_cdp_err("%pK: Pktlog filters reset failed", soc);
4205 					return 0;
4206 				}
4207 
4208 				if (mon_soc->reap_timer_init &&
4209 				    (!dp_mon_is_enable_reap_timer_non_pkt(pdev)))
4210 					qdf_timer_stop(&mon_soc->mon_reap_timer);
4211 			}
4212 			break;
4213 		case WDI_EVENT_LITE_T2H:
4214 			/*
4215 			 * To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
4216 			 * passing value 0. Once these macros will define in htt
4217 			 * header file will use proper macros
4218 			 */
4219 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
4220 				int mac_for_pdev =
4221 						dp_get_mac_id_for_pdev(mac_id,
4222 								pdev->pdev_id);
4223 
4224 				mon_pdev->pktlog_ppdu_stats = false;
4225 				if (!mon_pdev->enhanced_stats_en &&
4226 				    !mon_pdev->tx_sniffer_enable &&
4227 				    !mon_pdev->mcopy_mode) {
4228 					dp_h2t_cfg_stats_msg_send(pdev, 0,
4229 								  mac_for_pdev);
4230 				} else if (mon_pdev->tx_sniffer_enable ||
4231 					   mon_pdev->mcopy_mode) {
4232 					dp_h2t_cfg_stats_msg_send(pdev,
4233 						DP_PPDU_STATS_CFG_SNIFFER,
4234 						mac_for_pdev);
4235 				} else if (mon_pdev->enhanced_stats_en) {
4236 					dp_h2t_cfg_stats_msg_send(pdev,
4237 						DP_PPDU_STATS_CFG_ENH_STATS,
4238 						mac_for_pdev);
4239 				}
4240 			}
4241 
4242 			break;
4243 		case WDI_EVENT_RX_CBF:
4244 			mon_pdev->rx_pktlog_cbf = false;
4245 			break;
4246 
4247 		default:
4248 			/* Nothing needs to be done for other pktlog types */
4249 			break;
4250 		}
4251 	}
4252 	return 0;
4253 }
4254 #endif
4255 
4256 /* MCL specific functions */
4257 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
4258 /**
4259  * dp_pktlogmod_exit() - API to cleanup pktlog info
4260  * @pdev: Pdev handle
4261  *
4262  * Return: none
4263  */
4264 static void dp_pktlogmod_exit(struct dp_pdev *pdev)
4265 {
4266 	struct dp_soc *soc = pdev->soc;
4267 	struct hif_opaque_softc *scn = soc->hif_handle;
4268 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
4269 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4270 
4271 	if (!scn) {
4272 		dp_mon_err("Invalid hif(scn) handle");
4273 		return;
4274 	}
4275 
4276 	/* stop mon_reap_timer if it has been started */
4277 	if (mon_pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED &&
4278 	    mon_soc->reap_timer_init &&
4279 	    (!dp_mon_is_enable_reap_timer_non_pkt(pdev)))
4280 		qdf_timer_sync_cancel(&mon_soc->mon_reap_timer);
4281 
4282 	pktlogmod_exit(scn);
4283 	mon_pdev->pkt_log_init = false;
4284 }
4285 #else
4286 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
4287 #endif /*DP_CON_MON*/
4288 
4289 #ifdef WDI_EVENT_ENABLE
4290 QDF_STATUS dp_peer_stats_notify(struct dp_pdev *dp_pdev, struct dp_peer *peer)
4291 {
4292 	struct cdp_interface_peer_stats peer_stats_intf;
4293 	struct cdp_peer_stats *peer_stats = &peer->stats;
4294 
4295 	if (!peer->vdev)
4296 		return QDF_STATUS_E_FAULT;
4297 
4298 	qdf_mem_zero(&peer_stats_intf, sizeof(peer_stats_intf));
4299 	if (peer_stats->rx.last_snr != peer_stats->rx.snr)
4300 		peer_stats_intf.rssi_changed = true;
4301 
4302 	if ((peer_stats->rx.snr && peer_stats_intf.rssi_changed) ||
4303 	    (peer_stats->tx.tx_rate &&
4304 	     peer_stats->tx.tx_rate != peer_stats->tx.last_tx_rate)) {
4305 		qdf_mem_copy(peer_stats_intf.peer_mac, peer->mac_addr.raw,
4306 			     QDF_MAC_ADDR_SIZE);
4307 		peer_stats_intf.vdev_id = peer->vdev->vdev_id;
4308 		peer_stats_intf.last_peer_tx_rate = peer_stats->tx.last_tx_rate;
4309 		peer_stats_intf.peer_tx_rate = peer_stats->tx.tx_rate;
4310 		peer_stats_intf.peer_rssi = peer_stats->rx.snr;
4311 		peer_stats_intf.tx_packet_count = peer_stats->tx.ucast.num;
4312 		peer_stats_intf.rx_packet_count = peer_stats->rx.to_stack.num;
4313 		peer_stats_intf.tx_byte_count = peer_stats->tx.tx_success.bytes;
4314 		peer_stats_intf.rx_byte_count = peer_stats->rx.to_stack.bytes;
4315 		peer_stats_intf.per = peer_stats->tx.last_per;
4316 		peer_stats_intf.ack_rssi = peer_stats->tx.last_ack_rssi;
4317 		peer_stats_intf.free_buff = INVALID_FREE_BUFF;
4318 		dp_wdi_event_handler(WDI_EVENT_PEER_STATS, dp_pdev->soc,
4319 				     (void *)&peer_stats_intf, 0,
4320 				     WDI_NO_VAL, dp_pdev->pdev_id);
4321 	}
4322 
4323 	return QDF_STATUS_SUCCESS;
4324 }
4325 #endif
4326 
4327 #ifdef FEATURE_NAC_RSSI
4328 /**
4329  * dp_rx_nac_filter(): Function to perform filtering of non-associated
4330  * clients
4331  * @pdev: DP pdev handle
4332  * @rx_pkt_hdr: Rx packet Header
4333  *
4334  * return: dp_vdev*
4335  */
4336 static
4337 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
4338 				 uint8_t *rx_pkt_hdr)
4339 {
4340 	struct ieee80211_frame *wh;
4341 	struct dp_neighbour_peer *peer = NULL;
4342 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4343 
4344 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
4345 
4346 	if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
4347 		return NULL;
4348 
4349 	qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
4350 	TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
4351 		      neighbour_peer_list_elem) {
4352 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
4353 				wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) {
4354 			dp_rx_debug("%pK: NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x",
4355 				    pdev->soc,
4356 				    peer->neighbour_peers_macaddr.raw[0],
4357 				    peer->neighbour_peers_macaddr.raw[1],
4358 				    peer->neighbour_peers_macaddr.raw[2],
4359 				    peer->neighbour_peers_macaddr.raw[3],
4360 				    peer->neighbour_peers_macaddr.raw[4],
4361 				    peer->neighbour_peers_macaddr.raw[5]);
4362 
4363 				qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
4364 
4365 			return mon_pdev->mvdev;
4366 		}
4367 	}
4368 	qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
4369 
4370 	return NULL;
4371 }
4372 
4373 static QDF_STATUS dp_filter_neighbour_peer(struct dp_pdev *pdev,
4374 					   uint8_t *rx_pkt_hdr)
4375 {
4376 	struct dp_vdev *vdev = NULL;
4377 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4378 
4379 	if (mon_pdev->filter_neighbour_peers) {
4380 		/* Next Hop scenario not yet handle */
4381 		vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
4382 		if (vdev) {
4383 			dp_rx_mon_deliver(pdev->soc, pdev->pdev_id,
4384 					  pdev->invalid_peer_head_msdu,
4385 					  pdev->invalid_peer_tail_msdu);
4386 
4387 			pdev->invalid_peer_head_msdu = NULL;
4388 			pdev->invalid_peer_tail_msdu = NULL;
4389 			return QDF_STATUS_SUCCESS;
4390 		}
4391 	}
4392 
4393 	return QDF_STATUS_E_FAILURE;
4394 }
4395 #endif
4396 
4397 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
4398 /*
4399  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
4400  * address for smart mesh filtering
4401  * @txrx_soc: cdp soc handle
4402  * @vdev_id: id of virtual device object
4403  * @cmd: Add/Del command
4404  * @macaddr: nac client mac address
4405  *
4406  * Return: success/failure
4407  */
4408 static int dp_update_filter_neighbour_peers(struct cdp_soc_t *soc_hdl,
4409 					    uint8_t vdev_id,
4410 					    uint32_t cmd, uint8_t *macaddr)
4411 {
4412 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
4413 	struct dp_pdev *pdev;
4414 	struct dp_neighbour_peer *peer = NULL;
4415 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4416 						     DP_MOD_ID_CDP);
4417 	struct dp_mon_pdev *mon_pdev;
4418 
4419 	if (!vdev || !macaddr)
4420 		goto fail0;
4421 
4422 	pdev = vdev->pdev;
4423 
4424 	if (!pdev)
4425 		goto fail0;
4426 
4427 	mon_pdev = pdev->monitor_pdev;
4428 
4429 	/* Store address of NAC (neighbour peer) which will be checked
4430 	 * against TA of received packets.
4431 	 */
4432 	if (cmd == DP_NAC_PARAM_ADD) {
4433 		peer = (struct dp_neighbour_peer *)qdf_mem_malloc(
4434 				sizeof(*peer));
4435 
4436 		if (!peer) {
4437 			dp_cdp_err("%pK: DP neighbour peer node memory allocation failed"
4438 				   , soc);
4439 			goto fail0;
4440 		}
4441 
4442 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
4443 			     macaddr, QDF_MAC_ADDR_SIZE);
4444 		peer->vdev = vdev;
4445 
4446 		qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
4447 
4448 		/* add this neighbour peer into the list */
4449 		TAILQ_INSERT_TAIL(&mon_pdev->neighbour_peers_list, peer,
4450 				  neighbour_peer_list_elem);
4451 		qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
4452 
4453 		/* first neighbour */
4454 		if (!mon_pdev->neighbour_peers_added) {
4455 			QDF_STATUS status = QDF_STATUS_SUCCESS;
4456 
4457 			mon_pdev->neighbour_peers_added = true;
4458 			dp_mon_filter_setup_smart_monitor(pdev);
4459 			status = dp_mon_filter_update(pdev);
4460 			if (status != QDF_STATUS_SUCCESS) {
4461 				dp_cdp_err("%pK: smart mon filter setup failed",
4462 					   soc);
4463 				dp_mon_filter_reset_smart_monitor(pdev);
4464 				mon_pdev->neighbour_peers_added = false;
4465 			}
4466 		}
4467 
4468 	} else if (cmd == DP_NAC_PARAM_DEL) {
4469 		qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
4470 		TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
4471 			      neighbour_peer_list_elem) {
4472 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
4473 					 macaddr, QDF_MAC_ADDR_SIZE)) {
4474 				/* delete this peer from the list */
4475 				TAILQ_REMOVE(&mon_pdev->neighbour_peers_list,
4476 					     peer, neighbour_peer_list_elem);
4477 				qdf_mem_free(peer);
4478 				break;
4479 			}
4480 		}
4481 		/* last neighbour deleted */
4482 		if (TAILQ_EMPTY(&mon_pdev->neighbour_peers_list)) {
4483 			QDF_STATUS status = QDF_STATUS_SUCCESS;
4484 
4485 			dp_mon_filter_reset_smart_monitor(pdev);
4486 			status = dp_mon_filter_update(pdev);
4487 			if (status != QDF_STATUS_SUCCESS) {
4488 				dp_cdp_err("%pK: smart mon filter clear failed",
4489 					   soc);
4490 			}
4491 			mon_pdev->neighbour_peers_added = false;
4492 		}
4493 		qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
4494 	}
4495 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4496 	return 1;
4497 
4498 fail0:
4499 	if (vdev)
4500 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4501 	return 0;
4502 }
4503 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
4504 
4505 #ifdef ATH_SUPPORT_NAC_RSSI
4506 /**
4507  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
4508  * @soc_hdl: DP soc handle
4509  * @vdev_id: id of DP vdev handle
4510  * @mac_addr: neighbour mac
4511  * @rssi: rssi value
4512  *
4513  * Return: 0 for success. nonzero for failure.
4514  */
4515 static QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_soc_t *soc_hdl,
4516 					      uint8_t vdev_id,
4517 					      char *mac_addr,
4518 					      uint8_t *rssi)
4519 {
4520 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4521 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4522 						     DP_MOD_ID_CDP);
4523 	struct dp_pdev *pdev;
4524 	struct dp_neighbour_peer *peer = NULL;
4525 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
4526 	struct dp_mon_pdev *mon_pdev;
4527 
4528 	if (!vdev)
4529 		return status;
4530 
4531 	pdev = vdev->pdev;
4532 	mon_pdev = pdev->monitor_pdev;
4533 
4534 	*rssi = 0;
4535 	qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
4536 	TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
4537 		      neighbour_peer_list_elem) {
4538 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
4539 				mac_addr, QDF_MAC_ADDR_SIZE) == 0) {
4540 			*rssi = peer->rssi;
4541 			status = QDF_STATUS_SUCCESS;
4542 			break;
4543 		}
4544 	}
4545 	qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
4546 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4547 	return status;
4548 }
4549 
4550 static QDF_STATUS
4551 dp_config_for_nac_rssi(struct cdp_soc_t *cdp_soc,
4552 		       uint8_t vdev_id,
4553 		       enum cdp_nac_param_cmd cmd, char *bssid,
4554 		       char *client_macaddr,
4555 		       uint8_t chan_num)
4556 {
4557 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4558 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4559 						     DP_MOD_ID_CDP);
4560 	struct dp_pdev *pdev;
4561 	struct dp_mon_pdev *mon_pdev;
4562 
4563 	if (!vdev)
4564 		return QDF_STATUS_E_FAILURE;
4565 
4566 	pdev = (struct dp_pdev *)vdev->pdev;
4567 
4568 	mon_pdev = pdev->monitor_pdev;
4569 	mon_pdev->nac_rssi_filtering = 1;
4570 	/* Store address of NAC (neighbour peer) which will be checked
4571 	 * against TA of received packets.
4572 	 */
4573 
4574 	if (cmd == CDP_NAC_PARAM_ADD) {
4575 		dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
4576 						 DP_NAC_PARAM_ADD,
4577 						 (uint8_t *)client_macaddr);
4578 	} else if (cmd == CDP_NAC_PARAM_DEL) {
4579 		dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
4580 						 DP_NAC_PARAM_DEL,
4581 						 (uint8_t *)client_macaddr);
4582 	}
4583 
4584 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
4585 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
4586 			(soc->ctrl_psoc, pdev->pdev_id,
4587 			 vdev->vdev_id, cmd, bssid, client_macaddr);
4588 
4589 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4590 	return QDF_STATUS_SUCCESS;
4591 }
4592 #endif
4593 
4594 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
4595 /*
4596  * dp_cfr_filter() -  Configure HOST RX monitor status ring for CFR
4597  * @soc_hdl: Datapath soc handle
4598  * @pdev_id: id of data path pdev handle
4599  * @enable: Enable/Disable CFR
4600  * @filter_val: Flag to select Filter for monitor mode
4601  */
4602 static void dp_cfr_filter(struct cdp_soc_t *soc_hdl,
4603 			  uint8_t pdev_id,
4604 			  bool enable,
4605 			  struct cdp_monitor_filter *filter_val)
4606 {
4607 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4608 	struct dp_pdev *pdev = NULL;
4609 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
4610 	int max_mac_rings;
4611 	uint8_t mac_id = 0;
4612 	struct dp_mon_pdev *mon_pdev;
4613 
4614 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
4615 	if (!pdev) {
4616 		dp_mon_err("pdev is NULL");
4617 		return;
4618 	}
4619 
4620 	mon_pdev = pdev->monitor_pdev;
4621 
4622 	if (mon_pdev->mvdev) {
4623 		dp_mon_info("No action is needed since mon mode is enabled\n");
4624 		return;
4625 	}
4626 	soc = pdev->soc;
4627 	pdev->cfr_rcc_mode = false;
4628 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
4629 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
4630 
4631 	dp_mon_debug("Max_mac_rings %d", max_mac_rings);
4632 	dp_mon_info("enable : %d, mode: 0x%x", enable, filter_val->mode);
4633 
4634 	if (enable) {
4635 		pdev->cfr_rcc_mode = true;
4636 
4637 		htt_tlv_filter.ppdu_start = 1;
4638 		htt_tlv_filter.ppdu_end = 1;
4639 		htt_tlv_filter.ppdu_end_user_stats = 1;
4640 		htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4641 		htt_tlv_filter.ppdu_end_status_done = 1;
4642 		htt_tlv_filter.mpdu_start = 1;
4643 		htt_tlv_filter.offset_valid = false;
4644 
4645 		htt_tlv_filter.enable_fp =
4646 			(filter_val->mode & MON_FILTER_PASS) ? 1 : 0;
4647 		htt_tlv_filter.enable_md = 0;
4648 		htt_tlv_filter.enable_mo =
4649 			(filter_val->mode & MON_FILTER_OTHER) ? 1 : 0;
4650 		htt_tlv_filter.fp_mgmt_filter = filter_val->fp_mgmt;
4651 		htt_tlv_filter.fp_ctrl_filter = filter_val->fp_ctrl;
4652 		htt_tlv_filter.fp_data_filter = filter_val->fp_data;
4653 		htt_tlv_filter.mo_mgmt_filter = filter_val->mo_mgmt;
4654 		htt_tlv_filter.mo_ctrl_filter = filter_val->mo_ctrl;
4655 		htt_tlv_filter.mo_data_filter = filter_val->mo_data;
4656 	}
4657 
4658 	for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
4659 		int mac_for_pdev =
4660 			dp_get_mac_id_for_pdev(mac_id,
4661 					       pdev->pdev_id);
4662 
4663 		htt_h2t_rx_ring_cfg(soc->htt_handle,
4664 				    mac_for_pdev,
4665 				    soc->rxdma_mon_status_ring[mac_id]
4666 				    .hal_srng,
4667 				    RXDMA_MONITOR_STATUS,
4668 				    RX_MON_STATUS_BUF_SIZE,
4669 				    &htt_tlv_filter);
4670 	}
4671 }
4672 
4673 /*
4674  * dp_enable_mon_reap_timer() - enable/disable reap timer
4675  * @soc_hdl: Datapath soc handle
4676  * @pdev_id: id of objmgr pdev
4677  * @enable: Enable/Disable reap timer of monitor status ring
4678  *
4679  * Return: none
4680  */
4681 static void
4682 dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
4683 			 bool enable)
4684 {
4685 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4686 	struct dp_pdev *pdev = NULL;
4687 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
4688 	struct dp_mon_pdev *mon_pdev;
4689 
4690 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
4691 	if (!pdev) {
4692 		dp_mon_err("pdev is NULL");
4693 		return;
4694 	}
4695 
4696 	mon_pdev = pdev->monitor_pdev;
4697 	mon_pdev->enable_reap_timer_non_pkt = enable;
4698 	if (mon_pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
4699 		dp_mon_debug("pktlog enabled %d", mon_pdev->rx_pktlog_mode);
4700 		return;
4701 	}
4702 
4703 	if (!mon_soc->reap_timer_init) {
4704 		dp_mon_err("reap timer not init");
4705 		return;
4706 	}
4707 
4708 	if (enable)
4709 		qdf_timer_mod(&mon_soc->mon_reap_timer,
4710 			      DP_INTR_POLL_TIMER_MS);
4711 	else
4712 		qdf_timer_sync_cancel(&mon_soc->mon_reap_timer);
4713 }
4714 #endif
4715 
4716 #if defined(DP_CON_MON)
4717 #ifndef REMOVE_PKT_LOG
4718 /**
4719  * dp_pkt_log_init() - API to initialize packet log
4720  * @soc_hdl: Datapath soc handle
4721  * @pdev_id: id of data path pdev handle
4722  * @scn: HIF context
4723  *
4724  * Return: none
4725  */
4726 void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
4727 {
4728 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4729 	struct dp_pdev *handle =
4730 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
4731 	struct dp_mon_pdev *mon_pdev;
4732 
4733 	if (!handle) {
4734 		dp_mon_err("pdev handle is NULL");
4735 		return;
4736 	}
4737 
4738 	mon_pdev = handle->monitor_pdev;
4739 
4740 	if (mon_pdev->pkt_log_init) {
4741 		dp_mon_err("%pK: Packet log not initialized", soc);
4742 		return;
4743 	}
4744 
4745 	pktlog_sethandle(&mon_pdev->pl_dev, scn);
4746 	pktlog_set_pdev_id(mon_pdev->pl_dev, pdev_id);
4747 	pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
4748 
4749 	if (pktlogmod_init(scn)) {
4750 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4751 			  "%s: pktlogmod_init failed", __func__);
4752 		mon_pdev->pkt_log_init = false;
4753 	} else {
4754 		mon_pdev->pkt_log_init = true;
4755 	}
4756 }
4757 
4758 /**
4759  * dp_pkt_log_con_service() - connect packet log service
4760  * @soc_hdl: Datapath soc handle
4761  * @pdev_id: id of data path pdev handle
4762  * @scn: device context
4763  *
4764  * Return: none
4765  */
4766 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
4767 				   uint8_t pdev_id, void *scn)
4768 {
4769 	dp_pkt_log_init(soc_hdl, pdev_id, scn);
4770 	pktlog_htc_attach();
4771 }
4772 
4773 /**
4774  * dp_pkt_log_exit() - Wrapper API to cleanup pktlog info
4775  * @soc_hdl: Datapath soc handle
4776  * @pdev_id: id of data path pdev handle
4777  *
4778  * Return: none
4779  */
4780 static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
4781 {
4782 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4783 	struct dp_pdev *pdev =
4784 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
4785 
4786 	if (!pdev) {
4787 		dp_err("pdev handle is NULL");
4788 		return;
4789 	}
4790 
4791 	dp_pktlogmod_exit(pdev);
4792 }
4793 
4794 #else
4795 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
4796 				   uint8_t pdev_id, void *scn)
4797 {
4798 }
4799 
4800 static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
4801 {
4802 }
4803 #endif
4804 #endif
4805 
4806 /*
4807  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
4808  * @pdev: device object
4809  *
4810  * Return: void
4811  */
4812 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
4813 {
4814 	struct dp_neighbour_peer *peer = NULL;
4815 	struct dp_neighbour_peer *temp_peer = NULL;
4816 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4817 
4818 	TAILQ_FOREACH_SAFE(peer, &mon_pdev->neighbour_peers_list,
4819 			   neighbour_peer_list_elem, temp_peer) {
4820 		/* delete this peer from the list */
4821 		TAILQ_REMOVE(&mon_pdev->neighbour_peers_list,
4822 			     peer, neighbour_peer_list_elem);
4823 		qdf_mem_free(peer);
4824 	}
4825 
4826 	qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex);
4827 }
4828 
4829 /*
4830  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
4831  *                              modes are enabled or not.
4832  * @dp_pdev: dp pdev handle.
4833  *
4834  * Return: bool
4835  */
4836 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
4837 {
4838 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4839 
4840 	if (!mon_pdev->pktlog_ppdu_stats && !mon_pdev->tx_sniffer_enable &&
4841 	    !mon_pdev->mcopy_mode)
4842 		return true;
4843 	else
4844 		return false;
4845 }
4846 
4847 #ifdef QCA_ENHANCED_STATS_SUPPORT
4848 /*
4849  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
4850  * @soc_handle: DP_SOC handle
4851  * @pdev_id: id of DP_PDEV handle
4852  *
4853  * Return: QDF_STATUS
4854  */
4855 static QDF_STATUS
4856 dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
4857 {
4858 	struct dp_pdev *pdev = NULL;
4859 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4860 	struct dp_mon_pdev *mon_pdev;
4861 
4862 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
4863 						  pdev_id);
4864 
4865 	if (!pdev)
4866 		return QDF_STATUS_E_FAILURE;
4867 
4868 	mon_pdev = pdev->monitor_pdev;
4869 
4870 	if (mon_pdev->enhanced_stats_en == 0)
4871 		dp_cal_client_timer_start(mon_pdev->cal_client_ctx);
4872 
4873 	mon_pdev->enhanced_stats_en = 1;
4874 
4875 	dp_mon_filter_setup_enhanced_stats(mon_pdev);
4876 	status = dp_mon_filter_update(pdev);
4877 	if (status != QDF_STATUS_SUCCESS) {
4878 		dp_cdp_err("%pK: Failed to set enhanced mode filters", soc);
4879 		dp_mon_filter_reset_enhanced_stats(mon_pdev);
4880 		dp_cal_client_timer_stop(mon_pdev->cal_client_ctx);
4881 		mon_pdev->enhanced_stats_en = 0;
4882 		return QDF_STATUS_E_FAILURE;
4883 	}
4884 
4885 	pdev->enhanced_stats_en = true;
4886 	if (is_ppdu_txrx_capture_enabled(pdev) && !mon_pdev->bpr_enable) {
4887 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
4888 					  pdev->pdev_id);
4889 	} else if (is_ppdu_txrx_capture_enabled(pdev) &&
4890 		   mon_pdev->bpr_enable) {
4891 		dp_h2t_cfg_stats_msg_send(pdev,
4892 					  DP_PPDU_STATS_CFG_BPR_ENH,
4893 					  pdev->pdev_id);
4894 	}
4895 
4896 	return QDF_STATUS_SUCCESS;
4897 }
4898 
4899 /*
4900  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
4901  *
4902  * @param soc - the soc handle
4903  * @param pdev_id - pdev_id of pdev
4904  * @return - QDF_STATUS
4905  */
4906 static QDF_STATUS
4907 dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
4908 {
4909 	struct dp_pdev *pdev =
4910 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
4911 						   pdev_id);
4912 	struct dp_mon_pdev *mon_pdev;
4913 
4914 
4915 	if (!pdev)
4916 		return QDF_STATUS_E_FAILURE;
4917 
4918 	mon_pdev = pdev->monitor_pdev;
4919 
4920 	if (mon_pdev->enhanced_stats_en == 1)
4921 		dp_cal_client_timer_stop(mon_pdev->cal_client_ctx);
4922 
4923 	mon_pdev->enhanced_stats_en = 0;
4924 	pdev->enhanced_stats_en = false;
4925 
4926 	if (is_ppdu_txrx_capture_enabled(pdev) && !mon_pdev->bpr_enable) {
4927 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
4928 	} else if (is_ppdu_txrx_capture_enabled(pdev) && mon_pdev->bpr_enable) {
4929 		dp_h2t_cfg_stats_msg_send(pdev,
4930 					  DP_PPDU_STATS_CFG_BPR,
4931 					  pdev->pdev_id);
4932 	}
4933 
4934 	dp_mon_filter_reset_enhanced_stats(mon_pdev);
4935 	if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
4936 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4937 			  FL("Failed to reset enhanced mode filters"));
4938 	}
4939 
4940 	return QDF_STATUS_SUCCESS;
4941 }
4942 
4943 #ifdef WDI_EVENT_ENABLE
4944 QDF_STATUS dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev,
4945 				    struct cdp_rx_stats_ppdu_user *ppdu_user)
4946 {
4947 	struct cdp_interface_peer_qos_stats qos_stats_intf;
4948 
4949 	if (ppdu_user->peer_id == HTT_INVALID_PEER) {
4950 		dp_mon_warn("Invalid peer id");
4951 		return QDF_STATUS_E_FAILURE;
4952 	}
4953 	qdf_mem_zero(&qos_stats_intf, sizeof(qos_stats_intf));
4954 
4955 	qdf_mem_copy(qos_stats_intf.peer_mac, ppdu_user->mac_addr,
4956 		     QDF_MAC_ADDR_SIZE);
4957 	qos_stats_intf.frame_control = ppdu_user->frame_control;
4958 	qos_stats_intf.frame_control_info_valid =
4959 			ppdu_user->frame_control_info_valid;
4960 	qos_stats_intf.qos_control = ppdu_user->qos_control;
4961 	qos_stats_intf.qos_control_info_valid =
4962 			ppdu_user->qos_control_info_valid;
4963 	qos_stats_intf.vdev_id = ppdu_user->vdev_id;
4964 	dp_wdi_event_handler(WDI_EVENT_PEER_QOS_STATS, dp_pdev->soc,
4965 			     (void *)&qos_stats_intf, 0,
4966 			     WDI_NO_VAL, dp_pdev->pdev_id);
4967 
4968 	return QDF_STATUS_SUCCESS;
4969 }
4970 #else
4971 static inline QDF_STATUS
4972 dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev,
4973 			 struct cdp_rx_stats_ppdu_user *ppdu_user)
4974 {
4975 	return QDF_STATUS_SUCCESS;
4976 }
4977 #endif
4978 #endif /* QCA_ENHANCED_STATS_SUPPORT */
4979 
4980 /**
4981  * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
4982  * for pktlog
4983  * @soc: cdp_soc handle
4984  * @pdev_id: id of dp pdev handle
4985  * @mac_addr: Peer mac address
4986  * @enb_dsb: Enable or disable peer based filtering
4987  *
4988  * Return: QDF_STATUS
4989  */
4990 static int
4991 dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id,
4992 			    uint8_t *mac_addr, uint8_t enb_dsb)
4993 {
4994 	struct dp_peer *peer;
4995 	struct dp_pdev *pdev =
4996 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
4997 						   pdev_id);
4998 	struct dp_mon_pdev *mon_pdev;
4999 
5000 	if (!pdev)
5001 		return QDF_STATUS_E_FAILURE;
5002 
5003 	mon_pdev = pdev->monitor_pdev;
5004 
5005 	peer = dp_peer_find_hash_find((struct dp_soc *)soc, mac_addr,
5006 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
5007 
5008 	if (!peer) {
5009 		dp_mon_err("Invalid Peer");
5010 		return QDF_STATUS_E_FAILURE;
5011 	}
5012 
5013 	peer->peer_based_pktlog_filter = enb_dsb;
5014 	mon_pdev->dp_peer_based_pktlog = enb_dsb;
5015 
5016 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5017 
5018 	return QDF_STATUS_SUCCESS;
5019 }
5020 
5021 /**
5022  * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
5023  * @soc: DP_SOC handle
5024  * @pdev_id: id of DP_PDEV handle
5025  * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
5026  * @is_tx_pkt_cap_enable: enable/disable/delete/print
5027  * Tx packet capture in monitor mode
5028  * @peer_mac: MAC address for which the above need to be enabled/disabled
5029  *
5030  * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
5031  */
5032 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
5033 static QDF_STATUS
5034 dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
5035 				  uint8_t pdev_id,
5036 				  bool is_rx_pkt_cap_enable,
5037 				  uint8_t is_tx_pkt_cap_enable,
5038 				  uint8_t *peer_mac)
5039 {
5040 	struct dp_peer *peer;
5041 	QDF_STATUS status;
5042 	struct dp_pdev *pdev =
5043 			dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
5044 							   pdev_id);
5045 	if (!pdev)
5046 		return QDF_STATUS_E_FAILURE;
5047 
5048 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
5049 				      peer_mac, 0, DP_VDEV_ALL,
5050 				      DP_MOD_ID_CDP);
5051 	if (!peer)
5052 		return QDF_STATUS_E_FAILURE;
5053 
5054 	/* we need to set tx pkt capture for non associated peer */
5055 	status = dp_peer_set_tx_capture_enabled(pdev, peer,
5056 						is_tx_pkt_cap_enable,
5057 						peer_mac);
5058 
5059 	status = dp_peer_set_rx_capture_enabled(pdev, peer,
5060 						is_rx_pkt_cap_enable,
5061 						peer_mac);
5062 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5063 
5064 	return status;
5065 }
5066 #endif
5067 
5068 #if defined(QCA_MONITOR_PKT_SUPPORT) || defined(QCA_MCOPY_SUPPORT)
5069 /**
5070  * dp_vdev_set_monitor_mode_rings () - set monitor mode rings
5071  *
5072  * Allocate SW descriptor pool, buffers, link descriptor memory
5073  * Initialize monitor related SRNGs
5074  *
5075  * @pdev: DP pdev object
5076  *
5077  * Return: QDF_STATUS
5078  */
5079 static QDF_STATUS dp_vdev_set_monitor_mode_rings(struct dp_pdev *pdev,
5080 						 uint8_t delayed_replenish)
5081 {
5082 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
5083 	uint32_t mac_id;
5084 	uint32_t mac_for_pdev;
5085 	struct dp_soc *soc = pdev->soc;
5086 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5087 	struct dp_srng *mon_buf_ring;
5088 	uint32_t num_entries;
5089 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5090 
5091 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
5092 
5093 	/* If monitor rings are aleady initilized, return from here */
5094 	if (mon_pdev->pdev_mon_init)
5095 		return QDF_STATUS_SUCCESS;
5096 
5097 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5098 		mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id,
5099 							  pdev->pdev_id);
5100 
5101 		/* Allocate sw rx descriptor pool for mon RxDMA buffer ring */
5102 		status = dp_rx_pdev_mon_buf_desc_pool_alloc(pdev, mac_for_pdev);
5103 		if (!QDF_IS_STATUS_SUCCESS(status)) {
5104 			dp_mon_err("%s: dp_rx_pdev_mon_buf_desc_pool_alloc() failed\n",
5105 				   __func__);
5106 			goto fail0;
5107 		}
5108 
5109 		dp_rx_pdev_mon_buf_desc_pool_init(pdev, mac_for_pdev);
5110 
5111 		/* If monitor buffers are already allocated,
5112 		 * do not allocate.
5113 		 */
5114 		status = dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
5115 							  delayed_replenish);
5116 
5117 		mon_buf_ring = &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev];
5118 		/*
5119 		 * Configure low interrupt threshld when monitor mode is
5120 		 * configured.
5121 		 */
5122 		if (mon_buf_ring->hal_srng) {
5123 			num_entries = mon_buf_ring->num_entries;
5124 			hal_set_low_threshold(mon_buf_ring->hal_srng,
5125 					      num_entries >> 3);
5126 			htt_srng_setup(pdev->soc->htt_handle,
5127 				       pdev->pdev_id,
5128 				       mon_buf_ring->hal_srng,
5129 				       RXDMA_MONITOR_BUF);
5130 		}
5131 
5132 		/* Allocate link descriptors for the mon link descriptor ring */
5133 		status = dp_hw_link_desc_pool_banks_alloc(soc, mac_for_pdev);
5134 		if (!QDF_IS_STATUS_SUCCESS(status)) {
5135 			dp_mon_err("%s: dp_hw_link_desc_pool_banks_alloc() failed",
5136 				   __func__);
5137 			goto fail0;
5138 		}
5139 		dp_link_desc_ring_replenish(soc, mac_for_pdev);
5140 
5141 		htt_srng_setup(soc->htt_handle, pdev->pdev_id,
5142 			       soc->rxdma_mon_desc_ring[mac_for_pdev].hal_srng,
5143 			       RXDMA_MONITOR_DESC);
5144 		htt_srng_setup(soc->htt_handle, pdev->pdev_id,
5145 			       soc->rxdma_mon_dst_ring[mac_for_pdev].hal_srng,
5146 			       RXDMA_MONITOR_DST);
5147 	}
5148 	mon_pdev->pdev_mon_init = 1;
5149 
5150 	return QDF_STATUS_SUCCESS;
5151 
5152 fail0:
5153 	return QDF_STATUS_E_FAILURE;
5154 }
5155 #endif
5156 
5157 /* dp_mon_vdev_timer()- timer poll for interrupts
5158  *
5159  * @arg: SoC Handle
5160  *
5161  * Return:
5162  *
5163  */
5164 static void dp_mon_vdev_timer(void *arg)
5165 {
5166 	struct dp_soc *soc = (struct dp_soc *)arg;
5167 	struct dp_pdev *pdev = soc->pdev_list[0];
5168 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
5169 	uint32_t work_done  = 0, total_work_done = 0;
5170 	int budget = 0xffff;
5171 	uint32_t remaining_quota = budget;
5172 	uint64_t start_time;
5173 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
5174 	uint32_t lmac_iter;
5175 	int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
5176 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
5177 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5178 
5179 	if (!qdf_atomic_read(&soc->cmn_init_done))
5180 		return;
5181 
5182 	if (mon_pdev->mon_chan_band != REG_BAND_UNKNOWN)
5183 		lmac_id =
5184 			pdev->ch_band_lmac_id_mapping[mon_pdev->mon_chan_band];
5185 
5186 	start_time = qdf_get_log_timestamp();
5187 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
5188 
5189 	while (yield == DP_TIMER_NO_YIELD) {
5190 		for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) {
5191 			if (lmac_iter == lmac_id)
5192 				work_done = dp_mon_process(soc, NULL,
5193 							   lmac_iter,
5194 							   remaining_quota);
5195 			else
5196 				work_done =
5197 					dp_mon_drop_packets_for_mac(pdev,
5198 								    lmac_iter,
5199 								    remaining_quota);
5200 			if (work_done) {
5201 				budget -=  work_done;
5202 				if (budget <= 0) {
5203 					yield = DP_TIMER_WORK_EXHAUST;
5204 					goto budget_done;
5205 				}
5206 				remaining_quota = budget;
5207 				total_work_done += work_done;
5208 			}
5209 		}
5210 
5211 		yield = dp_should_timer_irq_yield(soc, total_work_done,
5212 						  start_time);
5213 		total_work_done = 0;
5214 	}
5215 
5216 budget_done:
5217 	if (yield == DP_TIMER_WORK_EXHAUST ||
5218 	    yield == DP_TIMER_TIME_EXHAUST)
5219 		qdf_timer_mod(&mon_soc->mon_vdev_timer, 1);
5220 	else
5221 		qdf_timer_mod(&mon_soc->mon_vdev_timer, DP_INTR_POLL_TIMER_MS);
5222 }
5223 
5224 /* MCL specific functions */
5225 #if defined(DP_CON_MON)
5226 /*
5227  * dp_mon_reap_timer_handler()- timer to reap monitor rings
5228  * reqd as we are not getting ppdu end interrupts
5229  * @arg: SoC Handle
5230  *
5231  * Return:
5232  *
5233  */
5234 static void dp_mon_reap_timer_handler(void *arg)
5235 {
5236 	struct dp_soc *soc = (struct dp_soc *)arg;
5237 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
5238 
5239 	dp_service_mon_rings(soc, QCA_NAPI_BUDGET);
5240 	qdf_timer_mod(&mon_soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
5241 }
5242 #endif
5243 
5244 #ifdef QCA_HOST2FW_RXBUF_RING
5245 static void dp_mon_reap_timer_init(struct dp_soc *soc)
5246 {
5247 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
5248 
5249 	qdf_timer_init(soc->osdev, &mon_soc->mon_reap_timer,
5250 		       dp_mon_reap_timer_handler, (void *)soc,
5251 		       QDF_TIMER_TYPE_WAKE_APPS);
5252 	mon_soc->reap_timer_init = 1;
5253 }
5254 #else
5255 static void dp_mon_reap_timer_init(struct dp_soc *soc)
5256 {
5257 }
5258 #endif
5259 
5260 static void dp_mon_reap_timer_deinit(struct dp_soc *soc)
5261 {
5262 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
5263 
5264 	if (mon_soc->reap_timer_init) {
5265 		qdf_timer_free(&mon_soc->mon_reap_timer);
5266 		mon_soc->reap_timer_init = 0;
5267 	}
5268 }
5269 
5270 static void dp_mon_reap_timer_start(struct dp_soc *soc)
5271 {
5272 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
5273 
5274 	if (mon_soc->reap_timer_init)
5275 		qdf_timer_mod(&mon_soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
5276 }
5277 
5278 static bool dp_mon_reap_timer_stop(struct dp_soc *soc)
5279 {
5280 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
5281 
5282 	if (mon_soc->reap_timer_init) {
5283 		qdf_timer_sync_cancel(&mon_soc->mon_reap_timer);
5284 		return true;
5285 	}
5286 
5287 	return false;
5288 }
5289 
5290 static void dp_mon_vdev_timer_init(struct dp_soc *soc)
5291 {
5292 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
5293 
5294 	qdf_timer_init(soc->osdev, &mon_soc->mon_vdev_timer,
5295 		       dp_mon_vdev_timer, (void *)soc,
5296 		       QDF_TIMER_TYPE_WAKE_APPS);
5297 	mon_soc->mon_vdev_timer_state |= MON_VDEV_TIMER_INIT;
5298 }
5299 
5300 static void dp_mon_vdev_timer_deinit(struct dp_soc *soc)
5301 {
5302 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
5303 
5304 	if (mon_soc->mon_vdev_timer_state & MON_VDEV_TIMER_INIT) {
5305 		qdf_timer_free(&mon_soc->mon_vdev_timer);
5306 		mon_soc->mon_vdev_timer_state = 0;
5307 	}
5308 }
5309 
5310 static void dp_mon_vdev_timer_start(struct dp_soc *soc)
5311 {
5312 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
5313 
5314 	if (mon_soc->mon_vdev_timer_state & MON_VDEV_TIMER_INIT) {
5315 		qdf_timer_mod(&mon_soc->mon_vdev_timer, DP_INTR_POLL_TIMER_MS);
5316 		mon_soc->mon_vdev_timer_state |= MON_VDEV_TIMER_RUNNING;
5317 	}
5318 }
5319 
5320 static bool dp_mon_vdev_timer_stop(struct dp_soc *soc)
5321 {
5322 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
5323 
5324 	if (mon_soc->mon_vdev_timer_state & MON_VDEV_TIMER_RUNNING) {
5325 		qdf_timer_sync_cancel(&mon_soc->mon_vdev_timer);
5326 		mon_soc->mon_vdev_timer_state &= ~MON_VDEV_TIMER_RUNNING;
5327 		return true;
5328 	}
5329 
5330 	return false;
5331 }
5332 
5333 #ifdef QCA_MCOPY_SUPPORT
5334 static QDF_STATUS dp_mcopy_check_deliver(struct dp_pdev *pdev,
5335 					 uint16_t peer_id,
5336 					 uint32_t ppdu_id,
5337 					 uint8_t first_msdu)
5338 {
5339 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5340 
5341 	if (mon_pdev->mcopy_mode) {
5342 		if (mon_pdev->mcopy_mode == M_COPY) {
5343 			if ((mon_pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
5344 			    (mon_pdev->m_copy_id.tx_peer_id == peer_id)) {
5345 				return QDF_STATUS_E_INVAL;
5346 			}
5347 		}
5348 
5349 		if (!first_msdu)
5350 			return QDF_STATUS_E_INVAL;
5351 
5352 		mon_pdev->m_copy_id.tx_ppdu_id = ppdu_id;
5353 		mon_pdev->m_copy_id.tx_peer_id = peer_id;
5354 	}
5355 
5356 	return QDF_STATUS_SUCCESS;
5357 }
5358 #endif
5359 
5360 static void dp_mon_neighbour_peer_add_ast(struct dp_pdev *pdev,
5361 					  struct dp_peer *ta_peer,
5362 					  uint8_t *mac_addr,
5363 					  qdf_nbuf_t nbuf,
5364 					  uint32_t flags)
5365 {
5366 	struct dp_neighbour_peer *neighbour_peer = NULL;
5367 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5368 	struct dp_soc *soc = pdev->soc;
5369 	uint32_t ret = 0;
5370 
5371 	if (mon_pdev->neighbour_peers_added) {
5372 		qdf_mem_copy(mac_addr,
5373 			     (qdf_nbuf_data(nbuf) +
5374 			      QDF_MAC_ADDR_SIZE),
5375 			     QDF_MAC_ADDR_SIZE);
5376 
5377 		qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
5378 		TAILQ_FOREACH(neighbour_peer,
5379 			      &mon_pdev->neighbour_peers_list,
5380 			      neighbour_peer_list_elem) {
5381 			if (!qdf_mem_cmp(&neighbour_peer->neighbour_peers_macaddr,
5382 					 mac_addr,
5383 					 QDF_MAC_ADDR_SIZE)) {
5384 				ret = dp_peer_add_ast(soc,
5385 						      ta_peer,
5386 						      mac_addr,
5387 						      CDP_TXRX_AST_TYPE_WDS,
5388 						      flags);
5389 				QDF_TRACE(QDF_MODULE_ID_DP,
5390 					  QDF_TRACE_LEVEL_INFO,
5391 					  "sa valid and nac roamed to wds");
5392 				break;
5393 			}
5394 		}
5395 		qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
5396 	}
5397 }
5398 
5399 #ifdef WDI_EVENT_ENABLE
5400 #ifndef REMOVE_PKT_LOG
5401 static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
5402 {
5403 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5404 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
5405 
5406 	if (!pdev || !pdev->monitor_pdev)
5407 		return NULL;
5408 
5409 	return pdev->monitor_pdev->pl_dev;
5410 }
5411 #else
5412 static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
5413 {
5414 	return NULL;
5415 }
5416 #endif
5417 #endif
5418 
5419 QDF_STATUS dp_rx_populate_cbf_hdr(struct dp_soc *soc,
5420 				  uint32_t mac_id,
5421 				  uint32_t event,
5422 				  qdf_nbuf_t mpdu,
5423 				  uint32_t msdu_timestamp)
5424 {
5425 	uint32_t data_size, hdr_size, ppdu_id, align4byte;
5426 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
5427 	uint32_t *msg_word;
5428 
5429 	if (!pdev)
5430 		return QDF_STATUS_E_INVAL;
5431 
5432 	ppdu_id = pdev->monitor_pdev->ppdu_info.com_info.ppdu_id;
5433 
5434 	hdr_size = HTT_T2H_PPDU_STATS_IND_HDR_SIZE
5435 		+ qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload);
5436 
5437 	data_size = qdf_nbuf_len(mpdu);
5438 
5439 	qdf_nbuf_push_head(mpdu, hdr_size);
5440 
5441 	msg_word = (uint32_t *)qdf_nbuf_data(mpdu);
5442 	/*
5443 	 * Populate the PPDU Stats Indication header
5444 	 */
5445 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_T2H_MSG_TYPE_PPDU_STATS_IND);
5446 	HTT_T2H_PPDU_STATS_MAC_ID_SET(*msg_word, mac_id);
5447 	HTT_T2H_PPDU_STATS_PDEV_ID_SET(*msg_word, pdev->pdev_id);
5448 	align4byte = ((data_size +
5449 		qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload)
5450 		+ 3) >> 2) << 2;
5451 	HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_SET(*msg_word, align4byte);
5452 	msg_word++;
5453 	HTT_T2H_PPDU_STATS_PPDU_ID_SET(*msg_word, ppdu_id);
5454 	msg_word++;
5455 
5456 	*msg_word = msdu_timestamp;
5457 	msg_word++;
5458 	/* Skip reserved field */
5459 	msg_word++;
5460 	/*
5461 	 * Populate MGMT_CTRL Payload TLV first
5462 	 */
5463 	HTT_STATS_TLV_TAG_SET(*msg_word,
5464 			      HTT_PPDU_STATS_RX_MGMTCTRL_PAYLOAD_TLV);
5465 
5466 	align4byte = ((data_size - sizeof(htt_tlv_hdr_t) +
5467 		qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload)
5468 		+ 3) >> 2) << 2;
5469 	HTT_STATS_TLV_LENGTH_SET(*msg_word, align4byte);
5470 	msg_word++;
5471 
5472 	HTT_PPDU_STATS_RX_MGMTCTRL_TLV_FRAME_LENGTH_SET(
5473 		*msg_word, data_size);
5474 	msg_word++;
5475 
5476 	dp_wdi_event_handler(event, soc, (void *)mpdu,
5477 			     HTT_INVALID_PEER, WDI_NO_VAL, pdev->pdev_id);
5478 
5479 	qdf_nbuf_pull_head(mpdu, hdr_size);
5480 
5481 	return QDF_STATUS_SUCCESS;
5482 }
5483 
5484 #ifdef ATH_SUPPORT_EXT_STAT
5485 /*dp_peer_cal_clients_stats_update - update peer stats on cal client timer
5486  * @soc : Datapath SOC
5487  * @peer : Datapath peer
5488  * @arg : argument to iter function
5489  */
5490 static void
5491 dp_peer_cal_clients_stats_update(struct dp_soc *soc,
5492 				 struct dp_peer *peer,
5493 				 void *arg)
5494 {
5495 	dp_cal_client_update_peer_stats(&peer->stats);
5496 }
5497 
5498 /*dp_iterate_update_peer_list - update peer stats on cal client timer
5499  * @pdev_hdl: pdev handle
5500  */
5501 static void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
5502 {
5503 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
5504 
5505 	dp_pdev_iterate_peer(pdev, dp_peer_cal_clients_stats_update, NULL,
5506 			     DP_MOD_ID_CDP);
5507 }
5508 #else
5509 static void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
5510 {
5511 }
5512 #endif
5513 
5514 QDF_STATUS dp_mon_soc_cfg_init(struct dp_soc *soc)
5515 {
5516 	int target_type;
5517 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
5518 
5519 	target_type = hal_get_target_type(soc->hal_soc);
5520 	switch (target_type) {
5521 	case TARGET_TYPE_QCA6290:
5522 	case TARGET_TYPE_QCA6390:
5523 	case TARGET_TYPE_QCA6490:
5524 	case TARGET_TYPE_QCA6750:
5525 	case TARGET_TYPE_WCN7850:
5526 		/* do nothing */
5527 		break;
5528 	case TARGET_TYPE_QCA8074:
5529 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
5530 							   MON_BUF_MIN_ENTRIES);
5531 		break;
5532 	case TARGET_TYPE_QCA8074V2:
5533 	case TARGET_TYPE_QCA6018:
5534 	case TARGET_TYPE_QCA9574:
5535 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
5536 							   MON_BUF_MIN_ENTRIES);
5537 		mon_soc->hw_nac_monitor_support = 1;
5538 		break;
5539 	case TARGET_TYPE_QCN9000:
5540 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
5541 							   MON_BUF_MIN_ENTRIES);
5542 		mon_soc->hw_nac_monitor_support = 1;
5543 		if (cfg_get(soc->ctrl_psoc, CFG_DP_FULL_MON_MODE))
5544 			dp_config_full_mon_mode((struct cdp_soc_t *)soc, 1);
5545 		break;
5546 	case TARGET_TYPE_QCA5018:
5547 	case TARGET_TYPE_QCN6122:
5548 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
5549 							   MON_BUF_MIN_ENTRIES);
5550 		mon_soc->hw_nac_monitor_support = 1;
5551 		break;
5552 	case TARGET_TYPE_QCN9224:
5553 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
5554 							   MON_BUF_MIN_ENTRIES);
5555 		mon_soc->hw_nac_monitor_support = 1;
5556 		mon_soc->monitor_mode_v2 = 1;
5557 		break;
5558 	default:
5559 		dp_mon_info("%s: Unknown tgt type %d\n", __func__, target_type);
5560 		qdf_assert_always(0);
5561 		break;
5562 	}
5563 
5564 	dp_mon_info("hw_nac_monitor_support = %d",
5565 		    mon_soc->hw_nac_monitor_support);
5566 
5567 	return QDF_STATUS_SUCCESS;
5568 }
5569 
5570 QDF_STATUS dp_mon_pdev_attach(struct dp_pdev *pdev)
5571 {
5572 	struct dp_soc *soc;
5573 	struct dp_mon_pdev *mon_pdev;
5574 
5575 	if (!pdev) {
5576 		dp_mon_err("pdev is NULL");
5577 		goto fail0;
5578 	}
5579 
5580 	soc = pdev->soc;
5581 
5582 	mon_pdev = (struct dp_mon_pdev *)qdf_mem_malloc(sizeof(*mon_pdev));
5583 	if (!mon_pdev) {
5584 		dp_mon_err("%pK: MONITOR pdev allocation failed", pdev);
5585 		goto fail0;
5586 	}
5587 
5588 	if (dp_mon_rings_alloc(soc, pdev)) {
5589 		dp_mon_err("%pK: MONITOR rings setup failed", pdev);
5590 		goto fail1;
5591 	}
5592 
5593 	/* Rx monitor mode specific init */
5594 	if (dp_rx_pdev_mon_desc_pool_alloc(pdev)) {
5595 		dp_mon_err("%pK: dp_rx_pdev_mon_desc_pool_alloc failed",
5596 			   pdev);
5597 		goto fail2;
5598 	}
5599 
5600 	pdev->monitor_pdev = mon_pdev;
5601 
5602 	return QDF_STATUS_SUCCESS;
5603 fail2:
5604 	dp_mon_rings_free(pdev);
5605 fail1:
5606 	pdev->monitor_pdev = NULL;
5607 	qdf_mem_free(mon_pdev);
5608 fail0:
5609 	return QDF_STATUS_E_NOMEM;
5610 }
5611 
5612 QDF_STATUS dp_mon_pdev_detach(struct dp_pdev *pdev)
5613 {
5614 	struct dp_mon_pdev *mon_pdev;
5615 
5616 	if (!pdev) {
5617 		dp_mon_err("pdev is NULL");
5618 		return QDF_STATUS_E_FAILURE;
5619 	}
5620 	mon_pdev = pdev->monitor_pdev;
5621 	dp_rx_pdev_mon_desc_pool_free(pdev);
5622 	dp_mon_rings_free(pdev);
5623 	pdev->monitor_pdev = NULL;
5624 	qdf_mem_free(mon_pdev);
5625 	return QDF_STATUS_SUCCESS;
5626 }
5627 
5628 QDF_STATUS dp_mon_pdev_init(struct dp_pdev *pdev)
5629 {
5630 	struct dp_soc *soc;
5631 	struct dp_mon_pdev *mon_pdev;
5632 
5633 	if (!pdev) {
5634 		dp_mon_err("pdev is NULL");
5635 		return QDF_STATUS_E_FAILURE;
5636 	}
5637 
5638 	soc = pdev->soc;
5639 	mon_pdev = pdev->monitor_pdev;
5640 
5641 	mon_pdev->filter = dp_mon_filter_alloc(mon_pdev);
5642 	if (!mon_pdev->filter) {
5643 		dp_mon_err("%pK: Memory allocation failed for monitor filter",
5644 			   pdev);
5645 		return QDF_STATUS_E_NOMEM;
5646 	}
5647 
5648 	qdf_spinlock_create(&mon_pdev->ppdu_stats_lock);
5649 	qdf_spinlock_create(&mon_pdev->neighbour_peer_mutex);
5650 	mon_pdev->monitor_configured = false;
5651 	mon_pdev->mon_chan_band = REG_BAND_UNKNOWN;
5652 
5653 	TAILQ_INIT(&mon_pdev->neighbour_peers_list);
5654 	mon_pdev->neighbour_peers_added = false;
5655 	mon_pdev->monitor_configured = false;
5656 	/* Monitor filter init */
5657 	mon_pdev->mon_filter_mode = MON_FILTER_ALL;
5658 	mon_pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
5659 	mon_pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
5660 	mon_pdev->fp_data_filter = FILTER_DATA_ALL;
5661 	mon_pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
5662 	mon_pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
5663 	mon_pdev->mo_data_filter = FILTER_DATA_ALL;
5664 
5665 	/*
5666 	 * initialize ppdu tlv list
5667 	 */
5668 	TAILQ_INIT(&mon_pdev->ppdu_info_list);
5669 	TAILQ_INIT(&mon_pdev->sched_comp_ppdu_list);
5670 
5671 	mon_pdev->list_depth = 0;
5672 	mon_pdev->tlv_count = 0;
5673 	/* initlialize cal client timer */
5674 	dp_cal_client_attach(&mon_pdev->cal_client_ctx,
5675 			     dp_pdev_to_cdp_pdev(pdev),
5676 			     pdev->soc->osdev,
5677 			     &dp_iterate_update_peer_list);
5678 	if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS)
5679 		goto fail0;
5680 
5681 	if (dp_mon_rings_init(soc, pdev)) {
5682 		dp_mon_err("%pK: MONITOR rings setup failed", pdev);
5683 		goto fail1;
5684 	}
5685 	/* initialize sw monitor rx descriptors */
5686 	dp_rx_pdev_mon_desc_pool_init(pdev);
5687 	/* allocate buffers and replenish the monitor RxDMA ring */
5688 	dp_rx_pdev_mon_buffers_alloc(pdev);
5689 	dp_tx_ppdu_stats_attach(pdev);
5690 	mon_pdev->is_dp_mon_pdev_initialized = true;
5691 
5692 	return QDF_STATUS_SUCCESS;
5693 fail1:
5694 	dp_htt_ppdu_stats_detach(pdev);
5695 fail0:
5696 	qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex);
5697 	qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock);
5698 	dp_mon_filter_dealloc(mon_pdev);
5699 	return QDF_STATUS_E_FAILURE;
5700 }
5701 
5702 QDF_STATUS dp_mon_pdev_deinit(struct dp_pdev *pdev)
5703 {
5704 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5705 
5706 	if (!mon_pdev->is_dp_mon_pdev_initialized)
5707 		return QDF_STATUS_SUCCESS;
5708 
5709 	dp_tx_ppdu_stats_detach(pdev);
5710 	dp_rx_pdev_mon_buffers_free(pdev);
5711 	dp_rx_pdev_mon_desc_pool_deinit(pdev);
5712 	dp_mon_rings_deinit(pdev);
5713 	dp_cal_client_detach(&mon_pdev->cal_client_ctx);
5714 	dp_htt_ppdu_stats_detach(pdev);
5715 	qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock);
5716 	dp_neighbour_peers_detach(pdev);
5717 	dp_pktlogmod_exit(pdev);
5718 	if (mon_pdev->filter)
5719 		dp_mon_filter_dealloc(mon_pdev);
5720 	dp_mon_rings_deinit(pdev);
5721 	mon_pdev->is_dp_mon_pdev_initialized = false;
5722 
5723 	return QDF_STATUS_SUCCESS;
5724 }
5725 
5726 static QDF_STATUS dp_mon_vdev_attach(struct dp_vdev *vdev)
5727 {
5728 	struct dp_mon_vdev *mon_vdev;
5729 	struct dp_pdev *pdev = vdev->pdev;
5730 
5731 	mon_vdev = (struct dp_mon_vdev *)qdf_mem_malloc(sizeof(*mon_vdev));
5732 	if (!mon_vdev) {
5733 		dp_mon_err("%pK: Monitor vdev allocation failed", vdev);
5734 		return QDF_STATUS_E_NOMEM;
5735 	}
5736 
5737 	if (pdev->monitor_pdev->scan_spcl_vap_configured)
5738 		dp_scan_spcl_vap_stats_attach(mon_vdev);
5739 
5740 	vdev->monitor_vdev = mon_vdev;
5741 
5742 	return QDF_STATUS_SUCCESS;
5743 }
5744 
5745 static QDF_STATUS dp_mon_vdev_detach(struct dp_vdev *vdev)
5746 {
5747 	struct dp_mon_vdev *mon_vdev = vdev->monitor_vdev;
5748 	struct dp_pdev *pdev = vdev->pdev;
5749 
5750 	if (!mon_vdev)
5751 		return QDF_STATUS_E_FAILURE;
5752 
5753 	if (pdev->monitor_pdev->scan_spcl_vap_configured)
5754 		dp_scan_spcl_vap_stats_detach(mon_vdev);
5755 
5756 	qdf_mem_free(mon_vdev);
5757 	vdev->monitor_vdev = NULL;
5758 	/* set mvdev to NULL only if detach is called for monitor/special vap
5759 	 */
5760 	if (pdev->monitor_pdev->mvdev == vdev)
5761 		pdev->monitor_pdev->mvdev = NULL;
5762 
5763 	return QDF_STATUS_SUCCESS;
5764 }
5765 
5766 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
5767 static QDF_STATUS dp_mon_peer_attach(struct dp_peer *peer)
5768 {
5769 	struct dp_mon_peer *mon_peer;
5770 	struct dp_pdev *pdev;
5771 
5772 	mon_peer = (struct dp_mon_peer *)qdf_mem_malloc(sizeof(*mon_peer));
5773 	if (!mon_peer) {
5774 		dp_mon_err("%pK: MONITOR peer allocation failed", peer);
5775 		return QDF_STATUS_E_NOMEM;
5776 	}
5777 
5778 	peer->monitor_peer = mon_peer;
5779 	pdev = peer->vdev->pdev;
5780 	/*
5781 	 * In tx_monitor mode, filter may be set for unassociated peer
5782 	 * when unassociated peer get associated peer need to
5783 	 * update tx_cap_enabled flag to support peer filter.
5784 	 */
5785 	dp_peer_tx_capture_filter_check(pdev, peer);
5786 
5787 	return QDF_STATUS_SUCCESS;
5788 }
5789 #else
5790 static QDF_STATUS dp_mon_peer_attach(struct dp_peer *peer)
5791 {
5792 	return QDF_STATUS_SUCCESS;
5793 }
5794 #endif
5795 
5796 static QDF_STATUS dp_mon_peer_detach(struct dp_peer *peer)
5797 {
5798 	struct dp_mon_peer *mon_peer = peer->monitor_peer;
5799 
5800 	qdf_mem_free(mon_peer);
5801 	peer->monitor_peer = NULL;
5802 
5803 	return QDF_STATUS_SUCCESS;
5804 }
5805 
5806 static struct dp_mon_ops monitor_ops = {
5807 	.mon_soc_cfg_init = dp_mon_soc_cfg_init,
5808 	.mon_pdev_attach = dp_mon_pdev_attach,
5809 	.mon_pdev_detach = dp_mon_pdev_detach,
5810 	.mon_pdev_init = dp_mon_pdev_init,
5811 	.mon_pdev_deinit = dp_mon_pdev_deinit,
5812 	.mon_vdev_attach = dp_mon_vdev_attach,
5813 	.mon_vdev_detach = dp_mon_vdev_detach,
5814 	.mon_peer_attach = dp_mon_peer_attach,
5815 	.mon_peer_detach = dp_mon_peer_detach,
5816 	.mon_config_debug_sniffer = dp_config_debug_sniffer,
5817 	.mon_flush_rings = dp_flush_monitor_rings,
5818 #if !defined(DISABLE_MON_CONFIG)
5819 	.mon_htt_srng_setup = dp_mon_htt_srng_setup,
5820 #endif
5821 #if defined(DP_CON_MON)
5822 	.mon_service_rings = dp_service_mon_rings,
5823 #endif
5824 #ifndef DISABLE_MON_CONFIG
5825 	.mon_process = dp_mon_process,
5826 #endif
5827 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
5828 	.mon_drop_packets_for_mac = dp_mon_drop_packets_for_mac,
5829 #endif
5830 	.mon_peer_tx_init = dp_peer_tx_init,
5831 	.mon_peer_tx_cleanup = dp_peer_tx_cleanup,
5832 #ifdef WLAN_TX_PKT_CAPTURE_ENH
5833 	.mon_peer_tid_peer_id_update = dp_peer_tid_peer_id_update,
5834 	.mon_tx_ppdu_stats_attach = dp_tx_ppdu_stats_attach,
5835 	.mon_tx_ppdu_stats_detach = dp_tx_ppdu_stats_detach,
5836 	.mon_tx_capture_debugfs_init = dp_tx_capture_debugfs_init,
5837 	.mon_tx_add_to_comp_queue = dp_tx_add_to_comp_queue,
5838 	.mon_peer_tx_capture_filter_check = dp_peer_tx_capture_filter_check,
5839 	.mon_update_msdu_to_list = dp_update_msdu_to_list,
5840 #endif
5841 #if defined(WDI_EVENT_ENABLE) &&\
5842 	(defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG))
5843 	.mon_ppdu_stats_ind_handler = dp_ppdu_stats_ind_handler,
5844 #endif
5845 	.mon_htt_ppdu_stats_attach = dp_htt_ppdu_stats_attach,
5846 	.mon_htt_ppdu_stats_detach = dp_htt_ppdu_stats_detach,
5847 	.mon_print_pdev_rx_mon_stats = dp_print_pdev_rx_mon_stats,
5848 #ifdef WLAN_TX_PKT_CAPTURE_ENH
5849 	.mon_print_pdev_tx_capture_stats = dp_print_pdev_tx_capture_stats,
5850 	.mon_config_enh_tx_capture = dp_config_enh_tx_capture,
5851 #endif
5852 #ifdef WLAN_RX_PKT_CAPTURE_ENH
5853 	.mon_config_enh_rx_capture = dp_config_enh_rx_capture,
5854 #endif
5855 #ifdef QCA_SUPPORT_BPR
5856 	.mon_set_bpr_enable = dp_set_bpr_enable,
5857 #endif
5858 
5859 #ifdef ATH_SUPPORT_NAC
5860 	.mon_set_filter_neigh_peers = dp_set_filter_neigh_peers,
5861 #endif
5862 #ifdef WLAN_ATF_ENABLE
5863 	.mon_set_atf_stats_enable = dp_set_atf_stats_enable,
5864 #endif
5865 	.mon_set_bsscolor = dp_mon_set_bsscolor,
5866 	.mon_pdev_get_filter_ucast_data = dp_pdev_get_filter_ucast_data,
5867 	.mon_pdev_get_filter_mcast_data = dp_pdev_get_filter_mcast_data,
5868 	.mon_pdev_get_filter_non_data = dp_pdev_get_filter_non_data,
5869 #ifdef WDI_EVENT_ENABLE
5870 	.mon_set_pktlog_wifi3 = dp_set_pktlog_wifi3,
5871 #endif
5872 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
5873 	.mon_pktlogmod_exit = dp_pktlogmod_exit,
5874 #endif
5875 	.mon_vdev_set_monitor_mode_buf_rings =
5876 			dp_vdev_set_monitor_mode_buf_rings,
5877 	.mon_neighbour_peers_detach = dp_neighbour_peers_detach,
5878 #ifdef FEATURE_NAC_RSSI
5879 	.mon_filter_neighbour_peer = dp_filter_neighbour_peer,
5880 #endif
5881 	.mon_vdev_timer_init = dp_mon_vdev_timer_init,
5882 	.mon_vdev_timer_start = dp_mon_vdev_timer_start,
5883 	.mon_vdev_timer_stop = dp_mon_vdev_timer_stop,
5884 	.mon_vdev_timer_deinit = dp_mon_vdev_timer_deinit,
5885 	.mon_reap_timer_init = dp_mon_reap_timer_init,
5886 	.mon_reap_timer_start = dp_mon_reap_timer_start,
5887 	.mon_reap_timer_stop = dp_mon_reap_timer_stop,
5888 	.mon_reap_timer_deinit = dp_mon_reap_timer_deinit,
5889 #ifdef QCA_MCOPY_SUPPORT
5890 	.mon_mcopy_check_deliver = dp_mcopy_check_deliver,
5891 #endif
5892 	.mon_neighbour_peer_add_ast = dp_mon_neighbour_peer_add_ast,
5893 };
5894 
5895 static struct cdp_mon_ops dp_ops_mon = {
5896 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
5897 	/* Added support for HK advance filter */
5898 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
5899 	.txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
5900 	.config_full_mon_mode = dp_config_full_mon_mode,
5901 };
5902 
5903 void dp_mon_ops_register(struct dp_mon_soc *mon_soc)
5904 {
5905 	mon_soc->mon_ops = &monitor_ops;
5906 }
5907 
5908 void dp_mon_cdp_ops_register(struct dp_soc *soc)
5909 {
5910 	struct cdp_ops *ops = soc->cdp_soc.ops;
5911 
5912 	if (!ops) {
5913 		dp_mon_err("cdp_ops is NULL");
5914 		return;
5915 	}
5916 
5917 	ops->mon_ops = &dp_ops_mon;
5918 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
5919 	ops->cfr_ops->txrx_cfr_filter = dp_cfr_filter;
5920 	ops->cfr_ops->txrx_enable_mon_reap_timer = dp_enable_mon_reap_timer;
5921 #endif
5922 	ops->cmn_drv_ops->txrx_set_monitor_mode = dp_vdev_set_monitor_mode;
5923 	ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev =
5924 				dp_get_mon_vdev_from_pdev_wifi3;
5925 #ifdef DP_PEER_EXTENDED_API
5926 	ops->misc_ops->pkt_log_init = dp_pkt_log_init;
5927 	ops->misc_ops->pkt_log_con_service = dp_pkt_log_con_service;
5928 	ops->misc_ops->pkt_log_exit = dp_pkt_log_exit;
5929 #endif
5930 #ifdef ATH_SUPPORT_NAC_RSSI
5931 	ops->ctrl_ops->txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi;
5932 	ops->ctrl_ops->txrx_vdev_get_neighbour_rssi =
5933 					dp_vdev_get_neighbour_rssi;
5934 #endif
5935 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
5936 	ops->ctrl_ops->txrx_update_filter_neighbour_peers =
5937 		dp_update_filter_neighbour_peers;
5938 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
5939 	ops->ctrl_ops->enable_peer_based_pktlog =
5940 				dp_enable_peer_based_pktlog;
5941 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
5942 	ops->ctrl_ops->txrx_update_peer_pkt_capture_params =
5943 				 dp_peer_update_pkt_capture_params;
5944 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
5945 #ifdef QCA_ENHANCED_STATS_SUPPORT
5946 	ops->host_stats_ops->txrx_enable_enhanced_stats =
5947 					dp_enable_enhanced_stats;
5948 	ops->host_stats_ops->txrx_disable_enhanced_stats =
5949 					dp_disable_enhanced_stats;
5950 #endif /* QCA_ENHANCED_STATS_SUPPORT */
5951 #ifdef WDI_EVENT_ENABLE
5952 	ops->ctrl_ops->txrx_get_pldev = dp_get_pldev;
5953 #endif
5954 #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
5955 	ops->host_stats_ops->txrx_get_scan_spcl_vap_stats =
5956 					dp_get_scan_spcl_vap_stats;
5957 #endif
5958 	return;
5959 }
5960 
5961 void dp_mon_cdp_ops_deregister(struct dp_soc *soc)
5962 {
5963 	struct cdp_ops *ops = soc->cdp_soc.ops;
5964 
5965 	if (!ops) {
5966 		dp_mon_err("cdp_ops is NULL");
5967 		return;
5968 	}
5969 
5970 	ops->mon_ops = NULL;
5971 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
5972 	ops->cfr_ops->txrx_cfr_filter = NULL;
5973 	ops->cfr_ops->txrx_enable_mon_reap_timer = NULL;
5974 #endif
5975 	ops->cmn_drv_ops->txrx_set_monitor_mode = NULL;
5976 	ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev = NULL;
5977 #ifdef DP_PEER_EXTENDED_API
5978 	ops->misc_ops->pkt_log_init = NULL;
5979 	ops->misc_ops->pkt_log_con_service = NULL;
5980 	ops->misc_ops->pkt_log_exit = NULL;
5981 #endif
5982 #ifdef ATH_SUPPORT_NAC_RSSI
5983 	ops->ctrl_ops->txrx_vdev_config_for_nac_rssi = NULL;
5984 	ops->ctrl_ops->txrx_vdev_get_neighbour_rssi = NULL;
5985 #endif
5986 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
5987 	ops->ctrl_ops->txrx_update_filter_neighbour_peers = NULL;
5988 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
5989 	ops->ctrl_ops->enable_peer_based_pktlog = NULL;
5990 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
5991 	ops->ctrl_ops->txrx_update_peer_pkt_capture_params = NULL;
5992 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
5993 #ifdef FEATURE_PERPKT_INFO
5994 	ops->host_stats_ops->txrx_enable_enhanced_stats = NULL;
5995 	ops->host_stats_ops->txrx_disable_enhanced_stats = NULL;
5996 #endif /* FEATURE_PERPKT_INFO */
5997 #ifdef WDI_EVENT_ENABLE
5998 	ops->ctrl_ops->txrx_get_pldev = NULL;
5999 #endif
6000 	return;
6001 }
6002 
6003 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc)
6004 {
6005 	struct dp_mon_soc *mon_soc;
6006 
6007 	if (!soc) {
6008 		dp_mon_err("dp_soc is NULL");
6009 		return QDF_STATUS_E_FAILURE;
6010 	}
6011 
6012 	mon_soc = (struct dp_mon_soc *)qdf_mem_malloc(sizeof(*mon_soc));
6013 	if (!mon_soc) {
6014 		dp_mon_err("%pK: mem allocation failed", soc);
6015 		return QDF_STATUS_E_NOMEM;
6016 	}
6017 	/* register monitor ops */
6018 	dp_mon_ops_register(mon_soc);
6019 	soc->monitor_soc = mon_soc;
6020 
6021 	dp_mon_cdp_ops_register(soc);
6022 	return QDF_STATUS_SUCCESS;
6023 }
6024 
6025 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc)
6026 {
6027 	struct dp_mon_soc *mon_soc;
6028 
6029 	if (!soc) {
6030 		dp_mon_err("dp_soc is NULL");
6031 		return QDF_STATUS_E_FAILURE;
6032 	}
6033 
6034 	mon_soc = soc->monitor_soc;
6035 	dp_mon_vdev_timer_deinit(soc);
6036 	dp_mon_cdp_ops_deregister(soc);
6037 	soc->monitor_soc = NULL;
6038 	qdf_mem_free(mon_soc);
6039 	return QDF_STATUS_SUCCESS;
6040 }
6041