xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/1.0/dp_mon_1.0.c (revision 2888b71da71bce103343119fa1b31f4a0cee07c8)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 #include <dp_types.h>
18 #include "dp_rx.h"
19 #include "dp_peer.h"
20 #include <dp_htt.h>
21 #include <dp_mon_filter.h>
22 #include <dp_mon.h>
23 #include <dp_rx_mon.h>
24 #include <dp_rx_mon_1.0.h>
25 #include <dp_mon_1.0.h>
26 #include <dp_mon_filter_1.0.h>
27 
28 #include "htt_ppdu_stats.h"
29 #if defined(DP_CON_MON)
30 #ifndef REMOVE_PKT_LOG
31 #include <pktlog_ac_api.h>
32 #include <pktlog_ac.h>
33 #endif
34 #endif
35 #ifdef FEATURE_PERPKT_INFO
36 #include "dp_ratetable.h"
37 #endif
38 
39 #ifdef WLAN_TX_PKT_CAPTURE_ENH
40 #include "dp_tx_capture.h"
41 #endif
42 
43 extern QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
44 				int ring_type, uint32_t num_entries,
45 				bool cached);
46 extern void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng);
47 extern QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
48 			       int ring_type, int ring_num, int mac_id);
49 extern void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
50 			   int ring_type, int ring_num);
51 
52 extern enum timer_yield_status
53 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
54 			  uint64_t start_time);
55 
56 #ifdef QCA_ENHANCED_STATS_SUPPORT
57 void
58 dp_mon_populate_ppdu_info_1_0(struct hal_rx_ppdu_info *hal_ppdu_info,
59 			      struct cdp_rx_indication_ppdu *ppdu)
60 {
61 	ppdu->u.preamble = hal_ppdu_info->rx_status.preamble_type;
62 	ppdu->u.bw = hal_ppdu_info->rx_status.bw;
63 	ppdu->punc_bw = 0;
64 }
65 
66 /*
67  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
68  *                              modes are enabled or not.
69  * @dp_pdev: dp pdev handle.
70  *
71  * Return: bool
72  */
73 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
74 {
75 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
76 
77 	if (!mon_pdev->pktlog_ppdu_stats && !mon_pdev->tx_sniffer_enable &&
78 	    !mon_pdev->mcopy_mode)
79 		return true;
80 	else
81 		return false;
82 }
83 
84 /**
85  * dp_mon_tx_enable_enhanced_stats_1_0() - Send HTT cmd to FW to enable stats
86  * @pdev: Datapath pdev handle
87  *
88  * Return: none
89  */
90 static void dp_mon_tx_enable_enhanced_stats_1_0(struct dp_pdev *pdev)
91 {
92 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
93 
94 	if (is_ppdu_txrx_capture_enabled(pdev) && !mon_pdev->bpr_enable) {
95 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
96 					  pdev->pdev_id);
97 	} else if (is_ppdu_txrx_capture_enabled(pdev) &&
98 		   mon_pdev->bpr_enable) {
99 		dp_h2t_cfg_stats_msg_send(pdev,
100 					  DP_PPDU_STATS_CFG_BPR_ENH,
101 					  pdev->pdev_id);
102 	}
103 }
104 
105 /**
106  * dp_mon_tx_disable_enhanced_stats_1_0() - Send HTT cmd to FW to disable stats
107  * @pdev: Datapath pdev handle
108  *
109  * Return: none
110  */
111 static void dp_mon_tx_disable_enhanced_stats_1_0(struct dp_pdev *pdev)
112 {
113 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
114 
115 	if (is_ppdu_txrx_capture_enabled(pdev) && !mon_pdev->bpr_enable) {
116 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
117 	} else if (is_ppdu_txrx_capture_enabled(pdev) && mon_pdev->bpr_enable) {
118 		dp_h2t_cfg_stats_msg_send(pdev,
119 					  DP_PPDU_STATS_CFG_BPR,
120 					  pdev->pdev_id);
121 	}
122 }
123 #endif
124 
125 #ifdef QCA_SUPPORT_FULL_MON
126 static QDF_STATUS
127 dp_config_full_mon_mode(struct cdp_soc_t *soc_handle,
128 			uint8_t val)
129 {
130 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
131 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
132 
133 	mon_soc->full_mon_mode = val;
134 	dp_cdp_err("Configure full monitor mode val: %d ", val);
135 
136 	return QDF_STATUS_SUCCESS;
137 }
138 
139 static QDF_STATUS
140 dp_soc_config_full_mon_mode(struct cdp_pdev *cdp_pdev, uint8_t val)
141 {
142 	struct dp_pdev *pdev = (struct dp_pdev *)cdp_pdev;
143 	struct dp_soc *soc = pdev->soc;
144 	QDF_STATUS status = QDF_STATUS_SUCCESS;
145 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
146 
147 	if (!mon_soc->full_mon_mode)
148 		return QDF_STATUS_SUCCESS;
149 
150 	if ((htt_h2t_full_mon_cfg(soc->htt_handle,
151 				  pdev->pdev_id,
152 				  val)) != QDF_STATUS_SUCCESS) {
153 		status = QDF_STATUS_E_FAILURE;
154 	}
155 
156 	return status;
157 }
158 #else
159 static inline QDF_STATUS
160 dp_config_full_mon_mode(struct cdp_soc_t *soc_handle,
161 			uint8_t val)
162 {
163 	return 0;
164 }
165 
166 static inline QDF_STATUS
167 dp_soc_config_full_mon_mode(struct cdp_pdev *cdp_pdev,
168 			    uint8_t val)
169 {
170 	return 0;
171 }
172 #endif
173 
174 #if !defined(DISABLE_MON_CONFIG)
175 void dp_flush_monitor_rings(struct dp_soc *soc)
176 {
177 	struct dp_pdev *pdev = soc->pdev_list[0];
178 	hal_soc_handle_t hal_soc = soc->hal_soc;
179 	uint32_t lmac_id;
180 	uint32_t hp, tp;
181 	int dp_intr_id;
182 	int budget;
183 	void *mon_dst_srng;
184 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
185 
186 	/* Reset monitor filters before reaping the ring*/
187 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
188 	dp_mon_filter_reset_mon_mode(pdev);
189 	if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS)
190 		dp_info("failed to reset monitor filters");
191 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
192 
193 	if (mon_pdev->mon_chan_band == REG_BAND_UNKNOWN)
194 		return;
195 
196 	lmac_id = pdev->ch_band_lmac_id_mapping[mon_pdev->mon_chan_band];
197 	if (qdf_unlikely(lmac_id == DP_MON_INVALID_LMAC_ID))
198 		return;
199 
200 	dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
201 	if (qdf_unlikely(dp_intr_id == DP_MON_INVALID_LMAC_ID))
202 		return;
203 
204 	mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, lmac_id);
205 
206 	/* reap full ring */
207 	budget = wlan_cfg_get_dma_mon_stat_ring_size(pdev->wlan_cfg_ctx);
208 
209 	hal_get_sw_hptp(hal_soc, mon_dst_srng, &tp, &hp);
210 	dp_info("Before reap: Monitor DST ring HP %u TP %u", hp, tp);
211 
212 	dp_mon_process(soc, &soc->intr_ctx[dp_intr_id], lmac_id, budget);
213 
214 	hal_get_sw_hptp(hal_soc, mon_dst_srng, &tp, &hp);
215 	dp_info("After reap: Monitor DST ring HP %u TP %u", hp, tp);
216 }
217 
218 static
219 void dp_mon_rings_deinit_1_0(struct dp_pdev *pdev)
220 {
221 	int mac_id = 0;
222 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
223 	struct dp_soc *soc = pdev->soc;
224 
225 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
226 
227 	for (mac_id = 0;
228 	     mac_id  < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev;
229 	     mac_id++) {
230 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
231 							 pdev->pdev_id);
232 
233 		dp_srng_deinit(soc, &soc->rxdma_mon_status_ring[lmac_id],
234 			       RXDMA_MONITOR_STATUS, 0);
235 
236 		dp_mon_dest_rings_deinit(pdev, lmac_id);
237 	}
238 }
239 
240 static
241 void dp_mon_rings_free_1_0(struct dp_pdev *pdev)
242 {
243 	int mac_id = 0;
244 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
245 	struct dp_soc *soc = pdev->soc;
246 
247 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
248 
249 	for (mac_id = 0;
250 	     mac_id  < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev;
251 	     mac_id++) {
252 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
253 							 pdev->pdev_id);
254 
255 		dp_srng_free(soc, &soc->rxdma_mon_status_ring[lmac_id]);
256 
257 		dp_mon_dest_rings_free(pdev, lmac_id);
258 	}
259 }
260 
261 static
262 QDF_STATUS dp_mon_rings_init_1_0(struct dp_pdev *pdev)
263 {
264 	struct dp_soc *soc = pdev->soc;
265 	int mac_id = 0;
266 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
267 
268 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
269 
270 	for (mac_id = 0;
271 	     mac_id  < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev;
272 	     mac_id++) {
273 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
274 							 pdev->pdev_id);
275 
276 		if (dp_srng_init(soc, &soc->rxdma_mon_status_ring[lmac_id],
277 				 RXDMA_MONITOR_STATUS, 0, lmac_id)) {
278 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_status_ring",
279 				   soc);
280 			goto fail1;
281 		}
282 
283 		if (dp_mon_dest_rings_init(pdev, lmac_id))
284 			goto fail1;
285 	}
286 	return QDF_STATUS_SUCCESS;
287 
288 fail1:
289 	dp_mon_rings_deinit_1_0(pdev);
290 	return QDF_STATUS_E_NOMEM;
291 }
292 
293 static
294 QDF_STATUS dp_mon_rings_alloc_1_0(struct dp_pdev *pdev)
295 {
296 	struct dp_soc *soc = pdev->soc;
297 	int mac_id = 0;
298 	int entries;
299 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
300 
301 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
302 
303 	for (mac_id = 0;
304 	     mac_id  < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev;
305 	     mac_id++) {
306 		int lmac_id =
307 		dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
308 		entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
309 		if (dp_srng_alloc(soc, &soc->rxdma_mon_status_ring[lmac_id],
310 				  RXDMA_MONITOR_STATUS, entries, 0)) {
311 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_status_ring",
312 				   soc);
313 			goto fail1;
314 		}
315 
316 		if (dp_mon_dest_rings_alloc(pdev, lmac_id))
317 			goto fail1;
318 	}
319 	return QDF_STATUS_SUCCESS;
320 
321 fail1:
322 	dp_mon_rings_free_1_0(pdev);
323 	return QDF_STATUS_E_NOMEM;
324 }
325 #else
326 inline
327 void dp_flush_monitor_rings(struct dp_soc *soc)
328 {
329 }
330 
331 static inline
332 void dp_mon_rings_deinit_1_0(struct dp_pdev *pdev)
333 {
334 }
335 
336 static inline
337 void dp_mon_rings_free_1_0(struct dp_pdev *pdev)
338 {
339 }
340 
341 static inline
342 QDF_STATUS dp_mon_rings_init_1_0(struct dp_pdev *pdev)
343 {
344 	return QDF_STATUS_SUCCESS;
345 }
346 
347 static inline
348 QDF_STATUS dp_mon_rings_alloc_1_0(struct dp_pdev *pdev)
349 {
350 	return QDF_STATUS_SUCCESS;
351 }
352 
353 #endif
354 
355 #ifdef QCA_MONITOR_PKT_SUPPORT
356 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev)
357 {
358 	uint32_t mac_id;
359 	uint32_t mac_for_pdev;
360 	struct dp_srng *mon_buf_ring;
361 	uint32_t num_entries;
362 	struct dp_soc *soc = pdev->soc;
363 
364 	/* If delay monitor replenish is disabled, allocate link descriptor
365 	 * monitor ring buffers of ring size.
366 	 */
367 	if (!wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) {
368 		dp_vdev_set_monitor_mode_rings(pdev, false);
369 	} else {
370 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
371 			mac_for_pdev =
372 				dp_get_lmac_id_for_pdev_id(pdev->soc,
373 							   mac_id,
374 							   pdev->pdev_id);
375 
376 			dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
377 							 FALSE);
378 			mon_buf_ring =
379 				&pdev->soc->rxdma_mon_buf_ring[mac_for_pdev];
380 			/*
381 			 * Configure low interrupt threshld when monitor mode is
382 			 * configured.
383 			 */
384 			if (mon_buf_ring->hal_srng) {
385 				num_entries = mon_buf_ring->num_entries;
386 				hal_set_low_threshold(mon_buf_ring->hal_srng,
387 						      num_entries >> 3);
388 				htt_srng_setup(pdev->soc->htt_handle,
389 					       pdev->pdev_id,
390 					       mon_buf_ring->hal_srng,
391 					       RXDMA_MONITOR_BUF);
392 			}
393 		}
394 	}
395 	return QDF_STATUS_SUCCESS;
396 }
397 #endif
398 
399 #ifdef QCA_MONITOR_PKT_SUPPORT
400 QDF_STATUS dp_vdev_set_monitor_mode_rings(struct dp_pdev *pdev,
401 					  uint8_t delayed_replenish)
402 {
403 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
404 	uint32_t mac_id;
405 	uint32_t mac_for_pdev;
406 	struct dp_soc *soc = pdev->soc;
407 	QDF_STATUS status = QDF_STATUS_SUCCESS;
408 	struct dp_srng *mon_buf_ring;
409 	uint32_t num_entries;
410 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
411 
412 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
413 
414 	/* If monitor rings are aleady initilized, return from here */
415 	if (mon_pdev->pdev_mon_init)
416 		return QDF_STATUS_SUCCESS;
417 
418 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
419 		mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id,
420 							  pdev->pdev_id);
421 
422 		/* Allocate sw rx descriptor pool for mon RxDMA buffer ring */
423 		status = dp_rx_pdev_mon_buf_desc_pool_alloc(pdev, mac_for_pdev);
424 		if (!QDF_IS_STATUS_SUCCESS(status)) {
425 			dp_err("%s: dp_rx_pdev_mon_buf_desc_pool_alloc() failed\n",
426 			       __func__);
427 			goto fail0;
428 		}
429 
430 		dp_rx_pdev_mon_buf_desc_pool_init(pdev, mac_for_pdev);
431 
432 		/* If monitor buffers are already allocated,
433 		 * do not allocate.
434 		 */
435 		status = dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
436 							  delayed_replenish);
437 
438 		mon_buf_ring = &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev];
439 		/*
440 		 * Configure low interrupt threshld when monitor mode is
441 		 * configured.
442 		 */
443 		if (mon_buf_ring->hal_srng) {
444 			num_entries = mon_buf_ring->num_entries;
445 			hal_set_low_threshold(mon_buf_ring->hal_srng,
446 					      num_entries >> 3);
447 			htt_srng_setup(pdev->soc->htt_handle,
448 				       pdev->pdev_id,
449 				       mon_buf_ring->hal_srng,
450 				       RXDMA_MONITOR_BUF);
451 		}
452 
453 		/* Allocate link descriptors for the mon link descriptor ring */
454 		status = dp_hw_link_desc_pool_banks_alloc(soc, mac_for_pdev);
455 		if (!QDF_IS_STATUS_SUCCESS(status)) {
456 			dp_err("%s: dp_hw_link_desc_pool_banks_alloc() failed",
457 			       __func__);
458 			goto fail0;
459 		}
460 		dp_link_desc_ring_replenish(soc, mac_for_pdev);
461 
462 		htt_srng_setup(soc->htt_handle, pdev->pdev_id,
463 			       soc->rxdma_mon_desc_ring[mac_for_pdev].hal_srng,
464 			       RXDMA_MONITOR_DESC);
465 		htt_srng_setup(soc->htt_handle, pdev->pdev_id,
466 			       soc->rxdma_mon_dst_ring[mac_for_pdev].hal_srng,
467 			       RXDMA_MONITOR_DST);
468 	}
469 	mon_pdev->pdev_mon_init = 1;
470 
471 	return QDF_STATUS_SUCCESS;
472 
473 fail0:
474 	return QDF_STATUS_E_FAILURE;
475 }
476 #endif
477 
478 /* dp_mon_vdev_timer()- timer poll for interrupts
479  *
480  * @arg: SoC Handle
481  *
482  * Return:
483  *
484  */
485 static void dp_mon_vdev_timer(void *arg)
486 {
487 	struct dp_soc *soc = (struct dp_soc *)arg;
488 	struct dp_pdev *pdev = soc->pdev_list[0];
489 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
490 	uint32_t work_done  = 0, total_work_done = 0;
491 	int budget = 0xffff;
492 	uint32_t remaining_quota = budget;
493 	uint64_t start_time;
494 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
495 	uint32_t lmac_iter;
496 	int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
497 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
498 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
499 
500 	if (!qdf_atomic_read(&soc->cmn_init_done))
501 		return;
502 
503 	if (mon_pdev->mon_chan_band != REG_BAND_UNKNOWN)
504 		lmac_id = pdev->ch_band_lmac_id_mapping[mon_pdev->mon_chan_band];
505 
506 	start_time = qdf_get_log_timestamp();
507 	dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
508 
509 	while (yield == DP_TIMER_NO_YIELD) {
510 		for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) {
511 			if (lmac_iter == lmac_id)
512 				work_done = dp_monitor_process(
513 						    soc, NULL,
514 						    lmac_iter, remaining_quota);
515 			else
516 				work_done =
517 					dp_monitor_drop_packets_for_mac(pdev,
518 								     lmac_iter,
519 								     remaining_quota);
520 			if (work_done) {
521 				budget -=  work_done;
522 				if (budget <= 0) {
523 					yield = DP_TIMER_WORK_EXHAUST;
524 					goto budget_done;
525 				}
526 				remaining_quota = budget;
527 				total_work_done += work_done;
528 			}
529 		}
530 
531 		yield = dp_should_timer_irq_yield(soc, total_work_done,
532 						  start_time);
533 		total_work_done = 0;
534 	}
535 
536 budget_done:
537 	if (yield == DP_TIMER_WORK_EXHAUST ||
538 	    yield == DP_TIMER_TIME_EXHAUST)
539 		qdf_timer_mod(&mon_soc->mon_vdev_timer, 1);
540 	else
541 		qdf_timer_mod(&mon_soc->mon_vdev_timer, DP_INTR_POLL_TIMER_MS);
542 }
543 
544 /* MCL specific functions */
545 #if defined(DP_CON_MON)
546 /*
547  * dp_mon_reap_timer_handler()- timer to reap monitor rings
548  * reqd as we are not getting ppdu end interrupts
549  * @arg: SoC Handle
550  *
551  * Return:
552  *
553  */
554 static void dp_mon_reap_timer_handler(void *arg)
555 {
556 	struct dp_soc *soc = (struct dp_soc *)arg;
557 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
558 
559 	dp_service_mon_rings(soc, QCA_NAPI_BUDGET);
560 
561 	qdf_timer_mod(&mon_soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
562 }
563 
564 static void dp_mon_reap_timer_init(struct dp_soc *soc)
565 {
566 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
567 
568 	qdf_spinlock_create(&mon_soc->reap_timer_lock);
569 	qdf_timer_init(soc->osdev, &mon_soc->mon_reap_timer,
570 		       dp_mon_reap_timer_handler, (void *)soc,
571 		       QDF_TIMER_TYPE_WAKE_APPS);
572 	qdf_mem_zero(mon_soc->mon_reap_src_bitmap,
573 		     sizeof(mon_soc->mon_reap_src_bitmap));
574 	mon_soc->reap_timer_init = 1;
575 }
576 #else
577 static void dp_mon_reap_timer_init(struct dp_soc *soc)
578 {
579 }
580 #endif
581 
582 static void dp_mon_reap_timer_deinit(struct dp_soc *soc)
583 {
584 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
585         if (mon_soc->reap_timer_init) {
586 		mon_soc->reap_timer_init = 0;
587 		qdf_timer_free(&mon_soc->mon_reap_timer);
588 		qdf_spinlock_destroy(&mon_soc->reap_timer_lock);
589         }
590 }
591 
592 /**
593  * dp_mon_reap_timer_start() - start reap timer of monitor status ring
594  * @soc: point to soc
595  * @source: trigger source
596  *
597  * If the source is CDP_MON_REAP_SOURCE_ANY, skip bit set, and start timer
598  * if any bit has been set in the bitmap; while for the other sources, set
599  * the bit and start timer if the bitmap is empty before that.
600  *
601  * Return: true if timer-start is performed, false otherwise.
602  */
603 static bool
604 dp_mon_reap_timer_start(struct dp_soc *soc, enum cdp_mon_reap_source source)
605 {
606 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
607 	bool do_start;
608 
609 	if (!mon_soc->reap_timer_init)
610 		return false;
611 
612 	qdf_spin_lock_bh(&mon_soc->reap_timer_lock);
613 	do_start = qdf_bitmap_empty(mon_soc->mon_reap_src_bitmap,
614 				    CDP_MON_REAP_SOURCE_NUM);
615 	if (source == CDP_MON_REAP_SOURCE_ANY)
616 		do_start = !do_start;
617 	else
618 		qdf_set_bit(source, mon_soc->mon_reap_src_bitmap);
619 	qdf_spin_unlock_bh(&mon_soc->reap_timer_lock);
620 
621 	if (do_start)
622 		qdf_timer_mod(&mon_soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
623 
624 	return do_start;
625 }
626 
627 /**
628  * dp_mon_reap_timer_stop() - stop reap timer of monitor status ring
629  * @soc: point to soc
630  * @source: trigger source
631  *
632  * If the source is CDP_MON_REAP_SOURCE_ANY, skip bit clear, and stop timer
633  * if any bit has been set in the bitmap; while for the other sources, clear
634  * the bit and stop the timer if the bitmap is empty after that.
635  *
636  * Return: true if timer-stop is performed, false otherwise.
637  */
638 static bool
639 dp_mon_reap_timer_stop(struct dp_soc *soc, enum cdp_mon_reap_source source)
640 {
641 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
642 	bool do_stop;
643 
644 	if (!mon_soc->reap_timer_init)
645 		return false;
646 
647 	qdf_spin_lock_bh(&mon_soc->reap_timer_lock);
648 	if (source != CDP_MON_REAP_SOURCE_ANY)
649 		qdf_clear_bit(source, mon_soc->mon_reap_src_bitmap);
650 
651 	do_stop = qdf_bitmap_empty(mon_soc->mon_reap_src_bitmap,
652 				   CDP_MON_REAP_SOURCE_NUM);
653 	if (source == CDP_MON_REAP_SOURCE_ANY)
654 		do_stop = !do_stop;
655 	qdf_spin_unlock_bh(&mon_soc->reap_timer_lock);
656 
657 	if (do_stop)
658 		qdf_timer_sync_cancel(&mon_soc->mon_reap_timer);
659 
660 	return do_stop;
661 }
662 
663 static void dp_mon_vdev_timer_init(struct dp_soc *soc)
664 {
665 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
666 
667         qdf_timer_init(soc->osdev, &mon_soc->mon_vdev_timer,
668                        dp_mon_vdev_timer, (void *)soc,
669                        QDF_TIMER_TYPE_WAKE_APPS);
670         mon_soc->mon_vdev_timer_state |= MON_VDEV_TIMER_INIT;
671 }
672 
673 static void dp_mon_vdev_timer_deinit(struct dp_soc *soc)
674 {
675 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
676         if (mon_soc->mon_vdev_timer_state & MON_VDEV_TIMER_INIT) {
677                 qdf_timer_free(&mon_soc->mon_vdev_timer);
678                 mon_soc->mon_vdev_timer_state = 0;
679         }
680 }
681 
682 static void dp_mon_vdev_timer_start(struct dp_soc *soc)
683 {
684 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
685         if (mon_soc->mon_vdev_timer_state & MON_VDEV_TIMER_INIT) {
686                 qdf_timer_mod(&mon_soc->mon_vdev_timer, DP_INTR_POLL_TIMER_MS);
687                 mon_soc->mon_vdev_timer_state |= MON_VDEV_TIMER_RUNNING;
688         }
689 }
690 
691 static bool dp_mon_vdev_timer_stop(struct dp_soc *soc)
692 {
693 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
694         if (mon_soc->mon_vdev_timer_state & MON_VDEV_TIMER_RUNNING) {
695                 qdf_timer_sync_cancel(&mon_soc->mon_vdev_timer);
696                 mon_soc->mon_vdev_timer_state &= ~MON_VDEV_TIMER_RUNNING;
697 		return true;
698         }
699 
700 	return false;
701 }
702 
703 static void dp_mon_neighbour_peer_add_ast(struct dp_pdev *pdev,
704 					  struct dp_peer *ta_peer,
705 					  uint8_t *mac_addr,
706 					  qdf_nbuf_t nbuf,
707 					  uint32_t flags)
708 {
709 	struct dp_neighbour_peer *neighbour_peer = NULL;
710 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
711 	struct dp_soc *soc = pdev->soc;
712 	uint32_t ret = 0;
713 
714 	if (mon_pdev->neighbour_peers_added) {
715 		qdf_mem_copy(mac_addr,
716 			     (qdf_nbuf_data(nbuf) +
717 			      QDF_MAC_ADDR_SIZE),
718 			      QDF_MAC_ADDR_SIZE);
719 
720 		qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
721 		TAILQ_FOREACH(neighbour_peer,
722 			      &mon_pdev->neighbour_peers_list,
723 			      neighbour_peer_list_elem) {
724 			if (!qdf_mem_cmp(&neighbour_peer->neighbour_peers_macaddr,
725 					 mac_addr,
726 					 QDF_MAC_ADDR_SIZE)) {
727 				ret = dp_peer_add_ast(soc,
728 						      ta_peer,
729 						      mac_addr,
730 						      CDP_TXRX_AST_TYPE_WDS,
731 						      flags);
732 				QDF_TRACE(QDF_MODULE_ID_DP,
733 					  QDF_TRACE_LEVEL_INFO,
734 					  "sa valid and nac roamed to wds");
735 				break;
736 			}
737 		}
738 		qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
739 	}
740 }
741 
742 #if !defined(DISABLE_MON_CONFIG)
743 
744 /**
745  * dp_mon_htt_srng_setup_1_0() - Prepare HTT messages for Monitor rings
746  * @soc: soc handle
747  * @pdev: physical device handle
748  * @mac_id: ring number
749  * @mac_for_pdev: mac_id
750  *
751  * Return: non-zero for failure, zero for success
752  */
753 #if defined(DP_CON_MON)
754 static
755 QDF_STATUS dp_mon_htt_srng_setup_1_0(struct dp_soc *soc,
756 				     struct dp_pdev *pdev,
757 				     int mac_id,
758 				     int mac_for_pdev)
759 {
760 	QDF_STATUS status = QDF_STATUS_SUCCESS;
761 
762 	status = dp_mon_htt_dest_srng_setup(soc, pdev, mac_id, mac_for_pdev);
763 	if (status != QDF_STATUS_SUCCESS)
764 		return status;
765 
766 	if (!soc->rxdma_mon_status_ring[mac_id].hal_srng)
767 		return QDF_STATUS_SUCCESS;
768 
769 	status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
770 				soc->rxdma_mon_status_ring[mac_id]
771 				.hal_srng,
772 				RXDMA_MONITOR_STATUS);
773 
774 	if (status != QDF_STATUS_SUCCESS) {
775 		dp_mon_err("Failed to send htt srng setup message for Rxdma mon status ring");
776 		return status;
777 	}
778 
779 	return status;
780 }
781 #else
782 /* This is only for WIN */
783 static
784 QDF_STATUS dp_mon_htt_srng_setup_1_0(struct dp_soc *soc,
785 				     struct dp_pdev *pdev,
786 				     int mac_id,
787 				     int mac_for_pdev)
788 {
789 	QDF_STATUS status = QDF_STATUS_SUCCESS;
790 	struct dp_mon_soc *mon_soc;
791 
792 	mon_soc = soc->monitor_soc;
793 	if(!mon_soc) {
794 		dp_mon_err("%pK: monitor SOC not initialized", soc);
795 		return status;
796 	}
797 
798 	if (mon_soc->monitor_mode_v2)
799 		return status;
800 
801 	if (wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) {
802 		status = dp_mon_htt_dest_srng_setup(soc, pdev,
803 						    mac_id, mac_for_pdev);
804 		if (status != QDF_STATUS_SUCCESS)
805 			return status;
806 	}
807 
808 	if (!soc->rxdma_mon_status_ring[mac_id].hal_srng)
809 		return QDF_STATUS_SUCCESS;
810 
811 	status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
812 				soc->rxdma_mon_status_ring[mac_id]
813 				.hal_srng,
814 				RXDMA_MONITOR_STATUS);
815 
816 	if (status != QDF_STATUS_SUCCESS) {
817 		dp_mon_err("Failed to send htt srng setup msg for Rxdma mon status ring");
818 		return status;
819 	}
820 
821 	return status;
822 }
823 #endif
824 #endif
825 
826 /* MCL specific functions */
827 #if defined(DP_CON_MON)
828 
829 /*
830  * dp_service_mon_rings()- service monitor rings
831  * @soc: soc dp handle
832  * @quota: number of ring entry that can be serviced
833  *
834  * Return: None
835  *
836  */
837 void dp_service_mon_rings(struct  dp_soc *soc, uint32_t quota)
838 {
839 	int ring = 0, work_done;
840 	struct dp_pdev *pdev = NULL;
841 
842 	for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
843 		pdev = dp_get_pdev_for_lmac_id(soc, ring);
844 		if (!pdev)
845 			continue;
846 		work_done = dp_mon_process(soc, NULL, ring, quota);
847 
848 		dp_rx_mon_dest_debug("Reaped %d descs from Monitor rings",
849 				     work_done);
850 	}
851 }
852 #endif
853 
854 /*
855  * dp_mon_peer_tx_init() – Initialize receive TID state in monitor peer
856  * @pdev: Datapath pdev
857  * @peer: Datapath peer
858  *
859  */
860 static void
861 dp_mon_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer)
862 {
863 	if (!peer->monitor_peer)
864 		return;
865 
866 	dp_peer_tid_queue_init(peer);
867 	dp_peer_update_80211_hdr(peer->vdev, peer);
868 }
869 
870 /*
871  * dp_mon_peer_tx_cleanup() – Deinitialize receive TID state in monitor peer
872  * @vdev: Datapath vdev
873  * @peer: Datapath peer
874  *
875  */
876 static void
877 dp_mon_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
878 {
879 	if (!peer->monitor_peer)
880 		return;
881 
882 	dp_peer_tid_queue_cleanup(peer);
883 }
884 
885 #ifdef QCA_SUPPORT_BPR
886 static QDF_STATUS
887 dp_set_bpr_enable_1_0(struct dp_pdev *pdev, int val)
888 {
889 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
890 
891 	switch (val) {
892 	case CDP_BPR_DISABLE:
893 		mon_pdev->bpr_enable = CDP_BPR_DISABLE;
894 		if (!mon_pdev->pktlog_ppdu_stats &&
895 		    !mon_pdev->enhanced_stats_en &&
896 		    !mon_pdev->tx_sniffer_enable && !mon_pdev->mcopy_mode) {
897 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
898 		} else if (mon_pdev->enhanced_stats_en &&
899 			   !mon_pdev->tx_sniffer_enable &&
900 			   !mon_pdev->mcopy_mode &&
901 			   !mon_pdev->pktlog_ppdu_stats) {
902 			dp_h2t_cfg_stats_msg_send(pdev,
903 						  DP_PPDU_STATS_CFG_ENH_STATS,
904 						  pdev->pdev_id);
905 		}
906 		break;
907 	case CDP_BPR_ENABLE:
908 		mon_pdev->bpr_enable = CDP_BPR_ENABLE;
909 		if (!mon_pdev->enhanced_stats_en &&
910 		    !mon_pdev->tx_sniffer_enable &&
911 		    !mon_pdev->mcopy_mode && !mon_pdev->pktlog_ppdu_stats) {
912 			dp_h2t_cfg_stats_msg_send(pdev,
913 						  DP_PPDU_STATS_CFG_BPR,
914 						  pdev->pdev_id);
915 		} else if (mon_pdev->enhanced_stats_en &&
916 			   !mon_pdev->tx_sniffer_enable &&
917 			   !mon_pdev->mcopy_mode &&
918 			   !mon_pdev->pktlog_ppdu_stats) {
919 			dp_h2t_cfg_stats_msg_send(pdev,
920 						  DP_PPDU_STATS_CFG_BPR_ENH,
921 						  pdev->pdev_id);
922 		} else if (mon_pdev->pktlog_ppdu_stats) {
923 			dp_h2t_cfg_stats_msg_send(pdev,
924 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
925 						  pdev->pdev_id);
926 		}
927 		break;
928 	default:
929 		break;
930 	}
931 
932 	return QDF_STATUS_SUCCESS;
933 }
934 #endif
935 
936 #ifdef QCA_ENHANCED_STATS_SUPPORT
937 #if defined(WDI_EVENT_ENABLE) && !defined(WLAN_TX_PKT_CAPTURE_ENH)
938 /**
939  * dp_ppdu_desc_notify_1_0 - Notify upper layer for PPDU indication via WDI
940  *
941  * @pdev: Datapath pdev handle
942  * @nbuf: Buffer to be shipped
943  *
944  * Return: void
945  */
946 static void dp_ppdu_desc_notify_1_0(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
947 {
948 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
949 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
950 
951 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(nbuf);
952 
953 	/**
954 	 * Deliver PPDU stats only for valid (acked) data
955 	 * frames if sniffer mode is not enabled.
956 	 * If sniffer mode is enabled, PPDU stats
957 	 * for all frames including mgmt/control
958 	 * frames should be delivered to upper layer
959 	 */
960 	if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode) {
961 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
962 				     pdev->soc,
963 				     nbuf, HTT_INVALID_PEER,
964 				     WDI_NO_VAL,
965 				     pdev->pdev_id);
966 	} else {
967 		if (ppdu_desc->num_mpdu != 0 &&
968 		    ppdu_desc->num_users != 0 &&
969 		    ppdu_desc->frame_ctrl &
970 		    HTT_FRAMECTRL_DATATYPE) {
971 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
972 					     pdev->soc,
973 					     nbuf, HTT_INVALID_PEER,
974 					     WDI_NO_VAL,
975 					     pdev->pdev_id);
976 		} else {
977 			qdf_nbuf_free(nbuf);
978 		}
979 	}
980 }
981 #endif
982 
983 /**
984  * dp_ppdu_stats_feat_enable_check_1_0 - Check if feature(s) is enabled to
985  *				consume ppdu stats from FW
986  *
987  * @pdev: Datapath pdev handle
988  *
989  * Return: true if enabled, else return false
990  */
991 static bool dp_ppdu_stats_feat_enable_check_1_0(struct dp_pdev *pdev)
992 {
993 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
994 
995 	if (!mon_pdev->enhanced_stats_en && !mon_pdev->tx_sniffer_enable &&
996 	    !mon_pdev->mcopy_mode && !mon_pdev->bpr_enable)
997 		return false;
998 	else
999 		return true;
1000 }
1001 
1002 /**
1003  * dp_mon_tx_stats_update_1_0 - Update Tx stats from HTT PPDU completion path
1004  *
1005  * @monitor: Monitor peer
1006  * @ppdu: Tx PPDU user completion info
1007  */
1008 void
1009 dp_mon_tx_stats_update_1_0(struct dp_mon_peer *mon_peer,
1010 			   struct cdp_tx_completion_ppdu_user *ppdu)
1011 {
1012 	ppdu->punc_mode = NO_PUNCTURE;
1013 }
1014 #endif
1015 
1016 #ifndef QCA_SUPPORT_FULL_MON
1017 /**
1018  * dp_rx_mon_process () - Core brain processing for monitor mode
1019  *
1020  * This API processes monitor destination ring followed by monitor status ring
1021  * Called from bottom half (tasklet/NET_RX_SOFTIRQ)
1022  *
1023  * @soc: datapath soc context
1024  * @int_ctx: interrupt context
1025  * @mac_id: mac_id on which interrupt is received
1026  * @quota: Number of status ring entry that can be serviced in one shot.
1027  *
1028  * @Return: Number of reaped status ring entries
1029  */
1030 static inline uint32_t
1031 dp_rx_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx,
1032 		  uint32_t mac_id, uint32_t quota)
1033 {
1034 	return quota;
1035 }
1036 #endif
1037 
1038 #ifndef DISABLE_MON_CONFIG
1039 static uint32_t
1040 dp_rx_mon_process_1_0(struct dp_soc *soc, struct dp_intr *int_ctx,
1041 	              uint32_t mac_id, uint32_t quota)
1042 {
1043 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1044 
1045 	if (qdf_unlikely(mon_soc->full_mon_mode))
1046 		return dp_rx_mon_process(soc, int_ctx, mac_id, quota);
1047 
1048 	return dp_rx_mon_status_process(soc, int_ctx, mac_id, quota);
1049 }
1050 
1051 #if defined(WDI_EVENT_ENABLE) &&\
1052 	(defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG))
1053 static inline
1054 void dp_mon_ppdu_stats_handler_register(struct dp_mon_soc *mon_soc)
1055 {
1056 	mon_soc->mon_ops->mon_ppdu_stats_ind_handler =
1057 					dp_ppdu_stats_ind_handler;
1058 }
1059 #else
1060 static inline
1061 void dp_mon_ppdu_stats_handler_register(struct dp_mon_soc *mon_soc)
1062 {
1063 }
1064 #endif
1065 
1066 static void dp_mon_register_intr_ops_1_0(struct dp_soc *soc)
1067 {
1068 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1069 
1070 	mon_soc->mon_rx_process = dp_rx_mon_process_1_0;
1071 	dp_mon_ppdu_stats_handler_register(mon_soc);
1072 }
1073 #endif
1074 
1075 /**
1076  * dp_mon_register_feature_ops_1_0() - register feature ops
1077  *
1078  * @soc: dp soc context
1079  *
1080  * @return: void
1081  */
1082 static void
1083 dp_mon_register_feature_ops_1_0(struct dp_soc *soc)
1084 {
1085 	struct dp_mon_ops *mon_ops = dp_mon_ops_get(soc);
1086 
1087 	if (!mon_ops) {
1088 		dp_err("mon_ops is NULL, feature ops registration failed");
1089 		return;
1090 	}
1091 
1092 	mon_ops->mon_config_debug_sniffer = dp_config_debug_sniffer;
1093 	mon_ops->mon_peer_tx_init = dp_mon_peer_tx_init;
1094 	mon_ops->mon_peer_tx_cleanup = dp_mon_peer_tx_cleanup;
1095 	mon_ops->mon_htt_ppdu_stats_attach = dp_htt_ppdu_stats_attach;
1096 	mon_ops->mon_htt_ppdu_stats_detach = dp_htt_ppdu_stats_detach;
1097 	mon_ops->mon_print_pdev_rx_mon_stats = dp_print_pdev_rx_mon_stats;
1098 	mon_ops->mon_set_bsscolor = dp_mon_set_bsscolor;
1099 	mon_ops->mon_pdev_get_filter_ucast_data =
1100 				dp_pdev_get_filter_ucast_data;
1101 	mon_ops->mon_pdev_get_filter_mcast_data =
1102 				dp_pdev_get_filter_mcast_data;
1103 	mon_ops->mon_pdev_get_filter_non_data = dp_pdev_get_filter_non_data;
1104 	mon_ops->mon_neighbour_peer_add_ast = dp_mon_neighbour_peer_add_ast;
1105 #ifdef WLAN_TX_PKT_CAPTURE_ENH
1106 	mon_ops->mon_peer_tid_peer_id_update = dp_peer_tid_peer_id_update_1_0;
1107 	mon_ops->mon_tx_capture_debugfs_init = dp_tx_capture_debugfs_init_1_0;
1108 	mon_ops->mon_tx_add_to_comp_queue = dp_tx_add_to_comp_queue_1_0;
1109 	mon_ops->mon_print_pdev_tx_capture_stats =
1110 				dp_print_pdev_tx_capture_stats_1_0;
1111 	mon_ops->mon_config_enh_tx_capture = dp_config_enh_tx_capture_1_0;
1112 	mon_ops->mon_tx_peer_filter = dp_peer_set_tx_capture_enabled_1_0;
1113 	mon_ops->mon_peer_tx_capture_get_stats = dp_get_peer_tx_capture_stats;
1114 	mon_ops->mon_pdev_tx_capture_get_stats = dp_get_pdev_tx_capture_stats;
1115 #endif
1116 #if (defined(WIFI_MONITOR_SUPPORT) && !defined(WLAN_TX_PKT_CAPTURE_ENH))
1117 	mon_ops->mon_peer_tid_peer_id_update = NULL;
1118 	mon_ops->mon_tx_capture_debugfs_init = NULL;
1119 	mon_ops->mon_tx_add_to_comp_queue = NULL;
1120 	mon_ops->mon_print_pdev_tx_capture_stats = NULL;
1121 	mon_ops->mon_config_enh_tx_capture = NULL;
1122 	mon_ops->mon_tx_peer_filter = NULL;
1123 #endif
1124 #ifdef WLAN_RX_PKT_CAPTURE_ENH
1125 	mon_ops->mon_config_enh_rx_capture = dp_config_enh_rx_capture;
1126 #endif
1127 #ifdef QCA_SUPPORT_BPR
1128 	mon_ops->mon_set_bpr_enable = dp_set_bpr_enable_1_0;
1129 #endif
1130 #ifdef ATH_SUPPORT_NAC
1131 	mon_ops->mon_set_filter_neigh_peers = dp_set_filter_neigh_peers;
1132 #endif
1133 #ifdef WLAN_ATF_ENABLE
1134 	mon_ops->mon_set_atf_stats_enable = dp_set_atf_stats_enable;
1135 #endif
1136 #ifdef FEATURE_NAC_RSSI
1137 	mon_ops->mon_filter_neighbour_peer = dp_filter_neighbour_peer;
1138 #endif
1139 #ifdef QCA_MCOPY_SUPPORT
1140 	mon_ops->mon_filter_setup_mcopy_mode =
1141 				dp_mon_filter_setup_mcopy_mode_1_0;
1142 	mon_ops->mon_filter_reset_mcopy_mode =
1143 				dp_mon_filter_reset_mcopy_mode_1_0;
1144 	mon_ops->mon_mcopy_check_deliver = dp_mcopy_check_deliver;
1145 #endif
1146 #ifdef QCA_ENHANCED_STATS_SUPPORT
1147 	mon_ops->mon_filter_setup_enhanced_stats =
1148 				dp_mon_filter_setup_enhanced_stats_1_0;
1149 	mon_ops->mon_filter_reset_enhanced_stats =
1150 				dp_mon_filter_reset_enhanced_stats_1_0;
1151 	mon_ops->mon_tx_enable_enhanced_stats =
1152 				dp_mon_tx_enable_enhanced_stats_1_0;
1153 	mon_ops->mon_tx_disable_enhanced_stats =
1154 				dp_mon_tx_disable_enhanced_stats_1_0;
1155 	mon_ops->mon_ppdu_stats_feat_enable_check =
1156 				dp_ppdu_stats_feat_enable_check_1_0;
1157 #ifndef WLAN_TX_PKT_CAPTURE_ENH
1158 	mon_ops->mon_ppdu_desc_deliver = dp_ppdu_desc_deliver;
1159 #ifdef WDI_EVENT_ENABLE
1160 	mon_ops->mon_ppdu_desc_notify = dp_ppdu_desc_notify_1_0;
1161 #endif
1162 #else
1163 	mon_ops->mon_ppdu_desc_deliver = dp_ppdu_desc_deliver_1_0;
1164 #endif
1165 	mon_ops->mon_tx_stats_update = dp_mon_tx_stats_update_1_0;
1166 #endif
1167 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
1168 	mon_ops->mon_filter_setup_smart_monitor =
1169 				dp_mon_filter_setup_smart_monitor_1_0;
1170 	mon_ops->mon_filter_reset_smart_monitor =
1171 				dp_mon_filter_reset_smart_monitor_1_0;
1172 #endif
1173 #ifdef WLAN_RX_PKT_CAPTURE_ENH
1174 	mon_ops->mon_filter_setup_rx_enh_capture =
1175 				dp_mon_filter_setup_rx_enh_capture_1_0;
1176 #endif
1177 #ifdef WDI_EVENT_ENABLE
1178 	mon_ops->mon_set_pktlog_wifi3 = dp_set_pktlog_wifi3;
1179 	mon_ops->mon_filter_setup_rx_pkt_log_full =
1180 				dp_mon_filter_setup_rx_pkt_log_full_1_0;
1181 	mon_ops->mon_filter_reset_rx_pkt_log_full =
1182 				dp_mon_filter_reset_rx_pkt_log_full_1_0;
1183 	mon_ops->mon_filter_setup_rx_pkt_log_lite =
1184 				dp_mon_filter_setup_rx_pkt_log_lite_1_0;
1185 	mon_ops->mon_filter_reset_rx_pkt_log_lite =
1186 				dp_mon_filter_reset_rx_pkt_log_lite_1_0;
1187 	mon_ops->mon_filter_setup_rx_pkt_log_cbf =
1188 				dp_mon_filter_setup_rx_pkt_log_cbf_1_0;
1189 	mon_ops->mon_filter_reset_rx_pkt_log_cbf =
1190 				dp_mon_filter_reset_rx_pktlog_cbf_1_0;
1191 #ifdef BE_PKTLOG_SUPPORT
1192 	mon_ops->mon_filter_setup_pktlog_hybrid = NULL;
1193 	mon_ops->mon_filter_reset_pktlog_hybrid = NULL;
1194 #endif
1195 #endif
1196 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
1197 	mon_ops->mon_pktlogmod_exit = dp_pktlogmod_exit;
1198 #endif
1199 	mon_ops->rx_hdr_length_set = NULL;
1200 	mon_ops->rx_packet_length_set = NULL;
1201 	mon_ops->rx_mon_enable = NULL;
1202 	mon_ops->rx_wmask_subscribe = NULL;
1203 	mon_ops->rx_enable_mpdu_logging = NULL;
1204 	mon_ops->rx_enable_fpmo = NULL;
1205 	mon_ops->mon_neighbour_peers_detach = dp_neighbour_peers_detach;
1206 	mon_ops->mon_vdev_set_monitor_mode_buf_rings =
1207 				dp_vdev_set_monitor_mode_buf_rings;
1208 	mon_ops->mon_vdev_set_monitor_mode_rings =
1209 				dp_vdev_set_monitor_mode_rings;
1210 #ifdef QCA_ENHANCED_STATS_SUPPORT
1211 	mon_ops->mon_rx_stats_update = NULL;
1212 	mon_ops->mon_rx_populate_ppdu_usr_info = NULL;
1213 	mon_ops->mon_rx_populate_ppdu_info = dp_mon_populate_ppdu_info_1_0;
1214 #endif
1215 #ifdef QCA_UNDECODED_METADATA_SUPPORT
1216 	mon_ops->mon_config_undecoded_metadata_capture =
1217 		dp_mon_config_undecoded_metadata_capture;
1218 	mon_ops->mon_filter_setup_undecoded_metadata_capture =
1219 		dp_mon_filter_setup_undecoded_metadata_capture_1_0;
1220 	mon_ops->mon_filter_reset_undecoded_metadata_capture =
1221 		dp_mon_filter_reset_undecoded_metadata_capture_1_0;
1222 #endif
1223 	mon_ops->mon_rx_print_advanced_stats = NULL;
1224 }
1225 
1226 struct dp_mon_ops monitor_ops_1_0 = {
1227 	.mon_soc_cfg_init = dp_mon_soc_cfg_init,
1228 	.mon_pdev_alloc = NULL,
1229 	.mon_pdev_free = NULL,
1230 	.mon_pdev_attach = dp_mon_pdev_attach,
1231 	.mon_pdev_detach = dp_mon_pdev_detach,
1232 	.mon_pdev_init = dp_mon_pdev_init,
1233 	.mon_pdev_deinit = dp_mon_pdev_deinit,
1234 	.mon_vdev_attach = dp_mon_vdev_attach,
1235 	.mon_vdev_detach = dp_mon_vdev_detach,
1236 	.mon_peer_attach = dp_mon_peer_attach,
1237 	.mon_peer_detach = dp_mon_peer_detach,
1238 	.mon_peer_get_peerstats_ctx = dp_mon_peer_get_peerstats_ctx,
1239 	.mon_peer_reset_stats = dp_mon_peer_reset_stats,
1240 	.mon_peer_get_stats = dp_mon_peer_get_stats,
1241 	.mon_invalid_peer_update_pdev_stats =
1242 				dp_mon_invalid_peer_update_pdev_stats,
1243 	.mon_peer_get_stats_param = dp_mon_peer_get_stats_param,
1244 	.mon_flush_rings = dp_flush_monitor_rings,
1245 #if !defined(DISABLE_MON_CONFIG)
1246 	.mon_pdev_htt_srng_setup = dp_mon_htt_srng_setup_1_0,
1247 #endif
1248 #if defined(DP_CON_MON)
1249 	.mon_service_rings = dp_service_mon_rings,
1250 #endif
1251 #ifndef DISABLE_MON_CONFIG
1252 	.mon_rx_process = NULL,
1253 #endif
1254 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
1255 	.mon_drop_packets_for_mac = dp_mon_drop_packets_for_mac,
1256 #endif
1257 	.mon_vdev_timer_init = dp_mon_vdev_timer_init,
1258 	.mon_vdev_timer_start = dp_mon_vdev_timer_start,
1259 	.mon_vdev_timer_stop = dp_mon_vdev_timer_stop,
1260 	.mon_vdev_timer_deinit = dp_mon_vdev_timer_deinit,
1261 	.mon_reap_timer_init = dp_mon_reap_timer_init,
1262 	.mon_reap_timer_start = dp_mon_reap_timer_start,
1263 	.mon_reap_timer_stop = dp_mon_reap_timer_stop,
1264 	.mon_reap_timer_deinit = dp_mon_reap_timer_deinit,
1265 	.mon_filter_setup_rx_mon_mode = dp_mon_filter_setup_mon_mode_1_0,
1266 	.mon_filter_reset_rx_mon_mode = dp_mon_filter_reset_mon_mode_1_0,
1267 	.mon_filter_setup_tx_mon_mode = NULL,
1268 	.mon_filter_reset_tx_mon_mode = NULL,
1269 	.rx_mon_filter_update = dp_mon_filter_update_1_0,
1270 	.tx_mon_filter_update = NULL,
1271 	.rx_mon_desc_pool_init = dp_rx_pdev_mon_desc_pool_init,
1272 	.rx_mon_desc_pool_deinit = dp_rx_pdev_mon_desc_pool_deinit,
1273 	.rx_mon_desc_pool_alloc = dp_rx_pdev_mon_desc_pool_alloc,
1274 	.rx_mon_desc_pool_free = dp_rx_pdev_mon_desc_pool_free,
1275 	.rx_mon_buffers_alloc = dp_rx_pdev_mon_buffers_alloc,
1276 	.rx_mon_buffers_free = dp_rx_pdev_mon_buffers_free,
1277 	.tx_mon_desc_pool_init = NULL,
1278 	.tx_mon_desc_pool_deinit = NULL,
1279 	.tx_mon_desc_pool_alloc = NULL,
1280 	.tx_mon_desc_pool_free = NULL,
1281 	.tx_mon_filter_alloc = NULL,
1282 	.mon_rings_alloc = dp_mon_rings_alloc_1_0,
1283 	.mon_rings_free = dp_mon_rings_free_1_0,
1284 	.mon_rings_init = dp_mon_rings_init_1_0,
1285 	.mon_rings_deinit = dp_mon_rings_deinit_1_0,
1286 #if !defined(DISABLE_MON_CONFIG)
1287 	.mon_register_intr_ops = dp_mon_register_intr_ops_1_0,
1288 #endif
1289 	.mon_register_feature_ops = dp_mon_register_feature_ops_1_0,
1290 #ifdef WLAN_TX_PKT_CAPTURE_ENH
1291 	.mon_tx_ppdu_stats_attach = dp_tx_ppdu_stats_attach_1_0,
1292 	.mon_tx_ppdu_stats_detach = dp_tx_ppdu_stats_detach_1_0,
1293 	.mon_peer_tx_capture_filter_check = dp_peer_tx_capture_filter_check_1_0,
1294 #endif
1295 #if (defined(WIFI_MONITOR_SUPPORT) && !defined(WLAN_TX_PKT_CAPTURE_ENH))
1296 	.mon_tx_ppdu_stats_attach = NULL,
1297 	.mon_tx_ppdu_stats_detach = NULL,
1298 	.mon_peer_tx_capture_filter_check = NULL,
1299 #endif
1300 	.mon_lite_mon_alloc = NULL,
1301 	.mon_lite_mon_dealloc = NULL,
1302 	.mon_lite_mon_vdev_delete = NULL,
1303 	.mon_lite_mon_disable_rx = NULL,
1304 };
1305 
1306 struct cdp_mon_ops dp_ops_mon_1_0 = {
1307 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
1308 	/* Added support for HK advance filter */
1309 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
1310 	.txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
1311 	.config_full_mon_mode = dp_config_full_mon_mode,
1312 	.soc_config_full_mon_mode = dp_soc_config_full_mon_mode,
1313 	.get_mon_pdev_rx_stats = dp_pdev_get_rx_mon_stats,
1314 	.txrx_enable_mon_reap_timer = dp_enable_mon_reap_timer,
1315 #ifdef QCA_SUPPORT_LITE_MONITOR
1316 	.txrx_set_lite_mon_config = NULL,
1317 	.txrx_get_lite_mon_config = NULL,
1318 	.txrx_set_lite_mon_peer_config = NULL,
1319 	.txrx_get_lite_mon_peer_config = NULL,
1320 	.txrx_is_lite_mon_enabled = NULL,
1321 #endif
1322 	.txrx_set_mon_pdev_params_rssi_dbm_conv =
1323 				dp_mon_pdev_params_rssi_dbm_conv,
1324 };
1325 
1326 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT
1327 void dp_mon_ops_register_1_0(struct dp_mon_soc *mon_soc)
1328 {
1329 	struct dp_mon_ops *mon_ops = NULL;
1330 
1331 	if (mon_soc->mon_ops) {
1332 		dp_mon_err("monitor ops is allocated");
1333 		return;
1334 	}
1335 
1336 	mon_ops = qdf_mem_malloc(sizeof(struct dp_mon_ops));
1337 	if (!mon_ops) {
1338 		dp_mon_err("Failed to allocate memory for mon ops");
1339 		return;
1340 	}
1341 
1342 	qdf_mem_copy(mon_ops, &monitor_ops_1_0, sizeof(struct dp_mon_ops));
1343 	mon_soc->mon_ops = mon_ops;
1344 }
1345 
1346 void dp_mon_cdp_ops_register_1_0(struct cdp_ops *ops)
1347 {
1348 	struct cdp_mon_ops *mon_ops = NULL;
1349 
1350 	if (ops->mon_ops) {
1351 		dp_mon_err("cdp monitor ops is allocated");
1352 		return;
1353 	}
1354 
1355 	mon_ops = qdf_mem_malloc(sizeof(struct cdp_mon_ops));
1356 	if (!mon_ops) {
1357 		dp_mon_err("Failed to allocate memory for cdp mon ops");
1358 		return;
1359 	}
1360 
1361 	qdf_mem_copy(mon_ops, &dp_ops_mon_1_0, sizeof(struct cdp_mon_ops));
1362 	ops->mon_ops = mon_ops;
1363 }
1364 #else
1365 void dp_mon_ops_register_1_0(struct dp_mon_soc *mon_soc)
1366 {
1367 	mon_soc->mon_ops = &monitor_ops_1_0;
1368 }
1369 
1370 void dp_mon_cdp_ops_register_1_0(struct cdp_ops *ops)
1371 {
1372 	ops->mon_ops = &dp_ops_mon_1_0;
1373 }
1374 #endif
1375