xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/1.0/dp_mon_1.0.c (revision 3b082b0bf8d2e31f3268608a8140a8dfa0215204)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 #include <dp_types.h>
18 #include "dp_rx.h"
19 #include "dp_peer.h"
20 #include <dp_htt.h>
21 #include <dp_mon_filter.h>
22 #include <dp_mon.h>
23 #include <dp_rx_mon.h>
24 #include <dp_rx_mon_1.0.h>
25 #include <dp_mon_1.0.h>
26 #include <dp_mon_filter_1.0.h>
27 
28 #include "htt_ppdu_stats.h"
29 #if defined(DP_CON_MON)
30 #ifndef REMOVE_PKT_LOG
31 #include <pktlog_ac_api.h>
32 #include <pktlog_ac.h>
33 #endif
34 #endif
35 #ifdef FEATURE_PERPKT_INFO
36 #include "dp_ratetable.h"
37 #endif
38 
39 #ifdef WLAN_TX_PKT_CAPTURE_ENH
40 #include "dp_tx_capture.h"
41 #endif
42 
43 extern QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
44 				int ring_type, uint32_t num_entries,
45 				bool cached);
46 extern void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng);
47 extern QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
48 			       int ring_type, int ring_num, int mac_id);
49 extern void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
50 			   int ring_type, int ring_num);
51 
52 extern enum timer_yield_status
53 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
54 			  uint64_t start_time);
55 
56 #ifdef QCA_ENHANCED_STATS_SUPPORT
57 void
58 dp_mon_populate_ppdu_info_1_0(struct hal_rx_ppdu_info *hal_ppdu_info,
59 			      struct cdp_rx_indication_ppdu *ppdu)
60 {
61 	ppdu->u.preamble = hal_ppdu_info->rx_status.preamble_type;
62 	ppdu->u.bw = hal_ppdu_info->rx_status.bw;
63 	ppdu->punc_bw = 0;
64 }
65 
66 /**
67  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
68  *                              modes are enabled or not.
69  * @pdev: dp pdev handle.
70  *
71  * Return: bool
72  */
73 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
74 {
75 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
76 
77 	if (!mon_pdev->pktlog_ppdu_stats && !mon_pdev->tx_sniffer_enable &&
78 	    !mon_pdev->mcopy_mode)
79 		return true;
80 	else
81 		return false;
82 }
83 
84 /**
85  * dp_mon_tx_enable_enhanced_stats_1_0() - Send HTT cmd to FW to enable stats
86  * @pdev: Datapath pdev handle
87  *
88  * Return: none
89  */
90 static void dp_mon_tx_enable_enhanced_stats_1_0(struct dp_pdev *pdev)
91 {
92 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
93 
94 	if (is_ppdu_txrx_capture_enabled(pdev) && !mon_pdev->bpr_enable) {
95 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
96 					  pdev->pdev_id);
97 	} else if (is_ppdu_txrx_capture_enabled(pdev) &&
98 		   mon_pdev->bpr_enable) {
99 		dp_h2t_cfg_stats_msg_send(pdev,
100 					  DP_PPDU_STATS_CFG_BPR_ENH,
101 					  pdev->pdev_id);
102 	}
103 }
104 
105 /**
106  * dp_mon_tx_disable_enhanced_stats_1_0() - Send HTT cmd to FW to disable stats
107  * @pdev: Datapath pdev handle
108  *
109  * Return: none
110  */
111 static void dp_mon_tx_disable_enhanced_stats_1_0(struct dp_pdev *pdev)
112 {
113 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
114 
115 	if (is_ppdu_txrx_capture_enabled(pdev) && !mon_pdev->bpr_enable) {
116 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
117 	} else if (is_ppdu_txrx_capture_enabled(pdev) && mon_pdev->bpr_enable) {
118 		dp_h2t_cfg_stats_msg_send(pdev,
119 					  DP_PPDU_STATS_CFG_BPR,
120 					  pdev->pdev_id);
121 	}
122 }
123 #endif
124 
125 #ifdef QCA_SUPPORT_FULL_MON
126 static QDF_STATUS
127 dp_config_full_mon_mode(struct cdp_soc_t *soc_handle,
128 			uint8_t val)
129 {
130 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
131 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
132 
133 	mon_soc->full_mon_mode = val;
134 	dp_cdp_err("Configure full monitor mode val: %d ", val);
135 
136 	return QDF_STATUS_SUCCESS;
137 }
138 
139 static QDF_STATUS
140 dp_soc_config_full_mon_mode(struct cdp_pdev *cdp_pdev, uint8_t val)
141 {
142 	struct dp_pdev *pdev = (struct dp_pdev *)cdp_pdev;
143 	struct dp_soc *soc = pdev->soc;
144 	QDF_STATUS status = QDF_STATUS_SUCCESS;
145 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
146 
147 	if (!mon_soc->full_mon_mode)
148 		return QDF_STATUS_SUCCESS;
149 
150 	if ((htt_h2t_full_mon_cfg(soc->htt_handle,
151 				  pdev->pdev_id,
152 				  val)) != QDF_STATUS_SUCCESS) {
153 		status = QDF_STATUS_E_FAILURE;
154 	}
155 
156 	return status;
157 }
158 #else
159 static inline QDF_STATUS
160 dp_config_full_mon_mode(struct cdp_soc_t *soc_handle,
161 			uint8_t val)
162 {
163 	return 0;
164 }
165 
166 static inline QDF_STATUS
167 dp_soc_config_full_mon_mode(struct cdp_pdev *cdp_pdev,
168 			    uint8_t val)
169 {
170 	return 0;
171 }
172 #endif
173 
174 #if !defined(DISABLE_MON_CONFIG)
175 void dp_flush_monitor_rings(struct dp_soc *soc)
176 {
177 	struct dp_pdev *pdev = soc->pdev_list[0];
178 	hal_soc_handle_t hal_soc = soc->hal_soc;
179 	uint32_t lmac_id;
180 	uint32_t hp, tp;
181 	int budget;
182 	void *mon_dst_srng;
183 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
184 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
185 
186 	if (qdf_unlikely(mon_soc->full_mon_mode))
187 		return;
188 
189 	/* Reset monitor filters before reaping the ring*/
190 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
191 	dp_mon_filter_reset_mon_mode(pdev);
192 	if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS)
193 		dp_info("failed to reset monitor filters");
194 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
195 
196 	lmac_id = pdev->ch_band_lmac_id_mapping[mon_pdev->mon_chan_band];
197 	if (qdf_unlikely(lmac_id == DP_MON_INVALID_LMAC_ID))
198 		return;
199 
200 	mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, lmac_id);
201 
202 	/* reap full ring */
203 	budget = wlan_cfg_get_dma_mon_stat_ring_size(pdev->wlan_cfg_ctx);
204 
205 	hal_get_sw_hptp(hal_soc, mon_dst_srng, &tp, &hp);
206 	dp_info("Before flush: Monitor DST ring HP %u TP %u", hp, tp);
207 
208 	dp_mon_drop_packets_for_mac(pdev, lmac_id, budget, true);
209 
210 	hal_get_sw_hptp(hal_soc, mon_dst_srng, &tp, &hp);
211 	dp_info("After flush: Monitor DST ring HP %u TP %u", hp, tp);
212 }
213 
214 void dp_mon_rings_deinit_1_0(struct dp_pdev *pdev)
215 {
216 	int mac_id = 0;
217 	struct dp_soc *soc = pdev->soc;
218 
219 
220 	for (mac_id = 0;
221 	     mac_id  < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev;
222 	     mac_id++) {
223 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
224 							 pdev->pdev_id);
225 
226 		dp_srng_deinit(soc, &soc->rxdma_mon_status_ring[lmac_id],
227 			       RXDMA_MONITOR_STATUS, 0);
228 
229 		dp_mon_dest_rings_deinit(pdev, lmac_id);
230 	}
231 }
232 
233 void dp_mon_rings_free_1_0(struct dp_pdev *pdev)
234 {
235 	int mac_id = 0;
236 	struct dp_soc *soc = pdev->soc;
237 
238 
239 	for (mac_id = 0;
240 	     mac_id  < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev;
241 	     mac_id++) {
242 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
243 							 pdev->pdev_id);
244 
245 		dp_srng_free(soc, &soc->rxdma_mon_status_ring[lmac_id]);
246 
247 		dp_mon_dest_rings_free(pdev, lmac_id);
248 	}
249 }
250 
251 QDF_STATUS dp_mon_rings_init_1_0(struct dp_pdev *pdev)
252 {
253 	struct dp_soc *soc = pdev->soc;
254 	int mac_id = 0;
255 
256 	for (mac_id = 0;
257 	     mac_id  < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev;
258 	     mac_id++) {
259 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
260 							 pdev->pdev_id);
261 
262 		if (dp_srng_init(soc, &soc->rxdma_mon_status_ring[lmac_id],
263 				 RXDMA_MONITOR_STATUS, 0, lmac_id)) {
264 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_status_ring",
265 				   soc);
266 			goto fail1;
267 		}
268 
269 		if (dp_mon_dest_rings_init(pdev, lmac_id))
270 			goto fail1;
271 	}
272 	return QDF_STATUS_SUCCESS;
273 
274 fail1:
275 	dp_mon_rings_deinit_1_0(pdev);
276 	return QDF_STATUS_E_NOMEM;
277 }
278 
279 QDF_STATUS dp_mon_rings_alloc_1_0(struct dp_pdev *pdev)
280 {
281 	struct dp_soc *soc = pdev->soc;
282 	int mac_id = 0;
283 	int entries;
284 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
285 
286 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
287 
288 	for (mac_id = 0;
289 	     mac_id  < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev;
290 	     mac_id++) {
291 		int lmac_id =
292 		dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
293 		entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
294 		if (dp_srng_alloc(soc, &soc->rxdma_mon_status_ring[lmac_id],
295 				  RXDMA_MONITOR_STATUS, entries, 0)) {
296 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_status_ring",
297 				   soc);
298 			goto fail1;
299 		}
300 
301 		if (dp_mon_dest_rings_alloc(pdev, lmac_id))
302 			goto fail1;
303 	}
304 	return QDF_STATUS_SUCCESS;
305 
306 fail1:
307 	dp_mon_rings_free_1_0(pdev);
308 	return QDF_STATUS_E_NOMEM;
309 }
310 #else
311 inline
312 void dp_flush_monitor_rings(struct dp_soc *soc)
313 {
314 }
315 
316 #endif
317 
318 #ifdef QCA_MONITOR_PKT_SUPPORT
319 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev)
320 {
321 	uint32_t mac_id;
322 	uint32_t mac_for_pdev;
323 	struct dp_srng *mon_buf_ring;
324 	uint32_t num_entries;
325 	struct dp_soc *soc = pdev->soc;
326 
327 	/* If delay monitor replenish is disabled, allocate link descriptor
328 	 * monitor ring buffers of ring size.
329 	 */
330 	if (!wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) {
331 		dp_vdev_set_monitor_mode_rings(pdev, false);
332 	} else {
333 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
334 			mac_for_pdev =
335 				dp_get_lmac_id_for_pdev_id(pdev->soc,
336 							   mac_id,
337 							   pdev->pdev_id);
338 
339 			dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
340 							 FALSE);
341 			mon_buf_ring =
342 				&pdev->soc->rxdma_mon_buf_ring[mac_for_pdev];
343 			/*
344 			 * Configure low interrupt threshld when monitor mode is
345 			 * configured.
346 			 */
347 			if (mon_buf_ring->hal_srng) {
348 				num_entries = mon_buf_ring->num_entries;
349 				hal_set_low_threshold(mon_buf_ring->hal_srng,
350 						      num_entries >> 3);
351 				htt_srng_setup(pdev->soc->htt_handle,
352 					       pdev->pdev_id,
353 					       mon_buf_ring->hal_srng,
354 					       RXDMA_MONITOR_BUF);
355 			}
356 		}
357 	}
358 	return QDF_STATUS_SUCCESS;
359 }
360 #endif
361 
362 #ifdef QCA_MONITOR_PKT_SUPPORT
363 QDF_STATUS dp_vdev_set_monitor_mode_rings(struct dp_pdev *pdev,
364 					  uint8_t delayed_replenish)
365 {
366 	uint32_t mac_id;
367 	uint32_t mac_for_pdev;
368 	struct dp_soc *soc = pdev->soc;
369 	QDF_STATUS status = QDF_STATUS_SUCCESS;
370 	struct dp_srng *mon_buf_ring;
371 	uint32_t num_entries;
372 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
373 	uint32_t target_type = hal_get_target_type(soc->hal_soc);
374 
375 	/* If monitor rings are already initialized, return from here */
376 	if (mon_pdev->pdev_mon_init)
377 		return QDF_STATUS_SUCCESS;
378 
379 	if (target_type == TARGET_TYPE_QCN9160) {
380 		dp_alert("Mon SOC:%pK config, skip desc pool alloc", soc);
381 		goto pass;
382 	}
383 
384 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
385 		mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id,
386 							  pdev->pdev_id);
387 
388 		/* Allocate sw rx descriptor pool for mon RxDMA buffer ring */
389 		status = dp_rx_pdev_mon_buf_desc_pool_alloc(pdev, mac_for_pdev);
390 		if (!QDF_IS_STATUS_SUCCESS(status)) {
391 			dp_err("%s: dp_rx_pdev_mon_buf_desc_pool_alloc() failed\n",
392 			       __func__);
393 			goto fail0;
394 		}
395 
396 		dp_rx_pdev_mon_buf_desc_pool_init(pdev, mac_for_pdev);
397 
398 		/* If monitor buffers are already allocated,
399 		 * do not allocate.
400 		 */
401 		status = dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
402 							  delayed_replenish);
403 
404 		mon_buf_ring = &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev];
405 		/*
406 		 * Configure low interrupt threshld when monitor mode is
407 		 * configured.
408 		 */
409 		if (mon_buf_ring->hal_srng) {
410 			num_entries = mon_buf_ring->num_entries;
411 			hal_set_low_threshold(mon_buf_ring->hal_srng,
412 					      num_entries >> 3);
413 			htt_srng_setup(pdev->soc->htt_handle,
414 				       pdev->pdev_id,
415 				       mon_buf_ring->hal_srng,
416 				       RXDMA_MONITOR_BUF);
417 		}
418 
419 		/* Allocate link descriptors for the mon link descriptor ring */
420 		status = dp_hw_link_desc_pool_banks_alloc(soc, mac_for_pdev);
421 		if (!QDF_IS_STATUS_SUCCESS(status)) {
422 			dp_err("%s: dp_hw_link_desc_pool_banks_alloc() failed",
423 			       __func__);
424 			goto fail0;
425 		}
426 		dp_link_desc_ring_replenish(soc, mac_for_pdev);
427 
428 		htt_srng_setup(soc->htt_handle, pdev->pdev_id,
429 			       soc->rxdma_mon_desc_ring[mac_for_pdev].hal_srng,
430 			       RXDMA_MONITOR_DESC);
431 		htt_srng_setup(soc->htt_handle, pdev->pdev_id,
432 			       soc->rxdma_mon_dst_ring[mac_for_pdev].hal_srng,
433 			       RXDMA_MONITOR_DST);
434 	}
435 pass:
436 	mon_pdev->pdev_mon_init = 1;
437 	return QDF_STATUS_SUCCESS;
438 
439 fail0:
440 	return QDF_STATUS_E_FAILURE;
441 }
442 #endif
443 
444 /* dp_mon_vdev_timer()- timer poll for interrupts
445  *
446  * @arg: SoC Handle
447  *
448  * Return:
449  *
450  */
451 static void dp_mon_vdev_timer(void *arg)
452 {
453 	struct dp_soc *soc = (struct dp_soc *)arg;
454 	struct dp_pdev *pdev = soc->pdev_list[0];
455 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
456 	uint32_t work_done  = 0, total_work_done = 0;
457 	int budget = 0xffff;
458 	uint32_t remaining_quota = budget;
459 	uint64_t start_time;
460 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
461 	uint32_t lmac_iter;
462 	int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
463 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
464 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
465 
466 	if (!qdf_atomic_read(&soc->cmn_init_done))
467 		return;
468 
469 	if (mon_pdev->mon_chan_band != REG_BAND_UNKNOWN)
470 		lmac_id = pdev->ch_band_lmac_id_mapping[mon_pdev->mon_chan_band];
471 
472 	start_time = qdf_get_log_timestamp();
473 	dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
474 
475 	while (yield == DP_TIMER_NO_YIELD) {
476 		for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) {
477 			if (lmac_iter == lmac_id)
478 				work_done = dp_monitor_process(
479 						    soc, NULL,
480 						    lmac_iter, remaining_quota);
481 			else
482 				work_done =
483 					dp_monitor_drop_packets_for_mac(pdev,
484 								     lmac_iter,
485 								     remaining_quota);
486 			if (work_done) {
487 				budget -=  work_done;
488 				if (budget <= 0) {
489 					yield = DP_TIMER_WORK_EXHAUST;
490 					goto budget_done;
491 				}
492 				remaining_quota = budget;
493 				total_work_done += work_done;
494 			}
495 		}
496 
497 		yield = dp_should_timer_irq_yield(soc, total_work_done,
498 						  start_time);
499 		total_work_done = 0;
500 	}
501 
502 budget_done:
503 	if (yield == DP_TIMER_WORK_EXHAUST ||
504 	    yield == DP_TIMER_TIME_EXHAUST)
505 		qdf_timer_mod(&mon_soc->mon_vdev_timer, 1);
506 	else
507 		qdf_timer_mod(&mon_soc->mon_vdev_timer, DP_INTR_POLL_TIMER_MS);
508 }
509 
510 /* MCL specific functions */
511 #if defined(DP_CON_MON)
512 /**
513  * dp_mon_reap_timer_handler()- timer to reap monitor rings
514  * reqd as we are not getting ppdu end interrupts
515  * @arg: SoC Handle
516  *
517  * Return:
518  *
519  */
520 static void dp_mon_reap_timer_handler(void *arg)
521 {
522 	struct dp_soc *soc = (struct dp_soc *)arg;
523 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
524 
525 	dp_service_mon_rings(soc, QCA_NAPI_BUDGET);
526 
527 	qdf_timer_mod(&mon_soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
528 }
529 
530 static void dp_mon_reap_timer_init(struct dp_soc *soc)
531 {
532 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
533 
534 	qdf_spinlock_create(&mon_soc->reap_timer_lock);
535 	qdf_timer_init(soc->osdev, &mon_soc->mon_reap_timer,
536 		       dp_mon_reap_timer_handler, (void *)soc,
537 		       QDF_TIMER_TYPE_WAKE_APPS);
538 	qdf_mem_zero(mon_soc->mon_reap_src_bitmap,
539 		     sizeof(mon_soc->mon_reap_src_bitmap));
540 	mon_soc->reap_timer_init = 1;
541 }
542 #else
543 static void dp_mon_reap_timer_init(struct dp_soc *soc)
544 {
545 }
546 #endif
547 
548 static void dp_mon_reap_timer_deinit(struct dp_soc *soc)
549 {
550 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
551         if (mon_soc->reap_timer_init) {
552 		mon_soc->reap_timer_init = 0;
553 		qdf_timer_free(&mon_soc->mon_reap_timer);
554 		qdf_spinlock_destroy(&mon_soc->reap_timer_lock);
555         }
556 }
557 
558 /**
559  * dp_mon_reap_timer_start() - start reap timer of monitor status ring
560  * @soc: point to soc
561  * @source: trigger source
562  *
563  * If the source is CDP_MON_REAP_SOURCE_ANY, skip bit set, and start timer
564  * if any bit has been set in the bitmap; while for the other sources, set
565  * the bit and start timer if the bitmap is empty before that.
566  *
567  * Return: true if timer-start is performed, false otherwise.
568  */
569 static bool
570 dp_mon_reap_timer_start(struct dp_soc *soc, enum cdp_mon_reap_source source)
571 {
572 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
573 	bool do_start;
574 
575 	if (!mon_soc->reap_timer_init)
576 		return false;
577 
578 	qdf_spin_lock_bh(&mon_soc->reap_timer_lock);
579 	do_start = qdf_bitmap_empty(mon_soc->mon_reap_src_bitmap,
580 				    CDP_MON_REAP_SOURCE_NUM);
581 	if (source == CDP_MON_REAP_SOURCE_ANY)
582 		do_start = !do_start;
583 	else
584 		qdf_set_bit(source, mon_soc->mon_reap_src_bitmap);
585 	qdf_spin_unlock_bh(&mon_soc->reap_timer_lock);
586 
587 	if (do_start)
588 		qdf_timer_mod(&mon_soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
589 
590 	return do_start;
591 }
592 
593 /**
594  * dp_mon_reap_timer_stop() - stop reap timer of monitor status ring
595  * @soc: point to soc
596  * @source: trigger source
597  *
598  * If the source is CDP_MON_REAP_SOURCE_ANY, skip bit clear, and stop timer
599  * if any bit has been set in the bitmap; while for the other sources, clear
600  * the bit and stop the timer if the bitmap is empty after that.
601  *
602  * Return: true if timer-stop is performed, false otherwise.
603  */
604 static bool
605 dp_mon_reap_timer_stop(struct dp_soc *soc, enum cdp_mon_reap_source source)
606 {
607 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
608 	bool do_stop;
609 
610 	if (!mon_soc->reap_timer_init)
611 		return false;
612 
613 	qdf_spin_lock_bh(&mon_soc->reap_timer_lock);
614 	if (source != CDP_MON_REAP_SOURCE_ANY)
615 		qdf_clear_bit(source, mon_soc->mon_reap_src_bitmap);
616 
617 	do_stop = qdf_bitmap_empty(mon_soc->mon_reap_src_bitmap,
618 				   CDP_MON_REAP_SOURCE_NUM);
619 	if (source == CDP_MON_REAP_SOURCE_ANY)
620 		do_stop = !do_stop;
621 	qdf_spin_unlock_bh(&mon_soc->reap_timer_lock);
622 
623 	if (do_stop)
624 		qdf_timer_sync_cancel(&mon_soc->mon_reap_timer);
625 
626 	return do_stop;
627 }
628 
629 static void dp_mon_vdev_timer_init(struct dp_soc *soc)
630 {
631 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
632 
633         qdf_timer_init(soc->osdev, &mon_soc->mon_vdev_timer,
634                        dp_mon_vdev_timer, (void *)soc,
635                        QDF_TIMER_TYPE_WAKE_APPS);
636         mon_soc->mon_vdev_timer_state |= MON_VDEV_TIMER_INIT;
637 }
638 
639 static void dp_mon_vdev_timer_deinit(struct dp_soc *soc)
640 {
641 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
642         if (mon_soc->mon_vdev_timer_state & MON_VDEV_TIMER_INIT) {
643                 qdf_timer_free(&mon_soc->mon_vdev_timer);
644                 mon_soc->mon_vdev_timer_state = 0;
645         }
646 }
647 
648 static void dp_mon_vdev_timer_start(struct dp_soc *soc)
649 {
650 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
651         if (mon_soc->mon_vdev_timer_state & MON_VDEV_TIMER_INIT) {
652                 qdf_timer_mod(&mon_soc->mon_vdev_timer, DP_INTR_POLL_TIMER_MS);
653                 mon_soc->mon_vdev_timer_state |= MON_VDEV_TIMER_RUNNING;
654         }
655 }
656 
657 static bool dp_mon_vdev_timer_stop(struct dp_soc *soc)
658 {
659 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
660         if (mon_soc->mon_vdev_timer_state & MON_VDEV_TIMER_RUNNING) {
661                 qdf_timer_sync_cancel(&mon_soc->mon_vdev_timer);
662                 mon_soc->mon_vdev_timer_state &= ~MON_VDEV_TIMER_RUNNING;
663 		return true;
664         }
665 
666 	return false;
667 }
668 
669 static void dp_mon_neighbour_peer_add_ast(struct dp_pdev *pdev,
670 					  struct dp_peer *ta_peer,
671 					  uint8_t *mac_addr,
672 					  qdf_nbuf_t nbuf,
673 					  uint32_t flags)
674 {
675 	struct dp_neighbour_peer *neighbour_peer = NULL;
676 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
677 	struct dp_soc *soc = pdev->soc;
678 
679 	if (mon_pdev->neighbour_peers_added) {
680 		qdf_mem_copy(mac_addr,
681 			     (qdf_nbuf_data(nbuf) +
682 			      QDF_MAC_ADDR_SIZE),
683 			      QDF_MAC_ADDR_SIZE);
684 
685 		qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
686 		TAILQ_FOREACH(neighbour_peer,
687 			      &mon_pdev->neighbour_peers_list,
688 			      neighbour_peer_list_elem) {
689 			if (!qdf_mem_cmp(&neighbour_peer->neighbour_peers_macaddr,
690 					 mac_addr,
691 					 QDF_MAC_ADDR_SIZE)) {
692 				dp_peer_add_ast(soc,
693 						ta_peer,
694 						mac_addr,
695 						CDP_TXRX_AST_TYPE_WDS,
696 						flags);
697 				QDF_TRACE(QDF_MODULE_ID_DP,
698 					  QDF_TRACE_LEVEL_INFO,
699 					  "sa valid and nac roamed to wds");
700 				break;
701 			}
702 		}
703 		qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
704 	}
705 }
706 
707 #if !defined(DISABLE_MON_CONFIG)
708 #if defined(DP_CON_MON)
709 QDF_STATUS dp_mon_htt_srng_setup_1_0(struct dp_soc *soc,
710 				     struct dp_pdev *pdev,
711 				     int mac_id,
712 				     int mac_for_pdev)
713 {
714 	QDF_STATUS status = QDF_STATUS_SUCCESS;
715 
716 	status = dp_mon_htt_dest_srng_setup(soc, pdev, mac_id, mac_for_pdev);
717 	if (status != QDF_STATUS_SUCCESS)
718 		return status;
719 
720 	if (!soc->rxdma_mon_status_ring[mac_id].hal_srng)
721 		return QDF_STATUS_SUCCESS;
722 
723 	status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
724 				soc->rxdma_mon_status_ring[mac_id]
725 				.hal_srng,
726 				RXDMA_MONITOR_STATUS);
727 
728 	if (status != QDF_STATUS_SUCCESS) {
729 		dp_mon_err("Failed to send htt srng setup message for Rxdma mon status ring");
730 		return status;
731 	}
732 
733 	return status;
734 }
735 #else
736 /* This is only for WIN */
737 QDF_STATUS dp_mon_htt_srng_setup_1_0(struct dp_soc *soc,
738 				     struct dp_pdev *pdev,
739 				     int mac_id,
740 				     int mac_for_pdev)
741 {
742 	QDF_STATUS status = QDF_STATUS_SUCCESS;
743 	struct dp_mon_soc *mon_soc;
744 
745 	mon_soc = soc->monitor_soc;
746 	if(!mon_soc) {
747 		dp_mon_err("%pK: monitor SOC not initialized", soc);
748 		return status;
749 	}
750 
751 	if (mon_soc->monitor_mode_v2)
752 		return status;
753 
754 	if (wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) {
755 		status = dp_mon_htt_dest_srng_setup(soc, pdev,
756 						    mac_id, mac_for_pdev);
757 		if (status != QDF_STATUS_SUCCESS)
758 			return status;
759 	}
760 
761 	if (!soc->rxdma_mon_status_ring[mac_id].hal_srng)
762 		return QDF_STATUS_SUCCESS;
763 
764 	status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
765 				soc->rxdma_mon_status_ring[mac_id]
766 				.hal_srng,
767 				RXDMA_MONITOR_STATUS);
768 
769 	if (status != QDF_STATUS_SUCCESS) {
770 		dp_mon_err("Failed to send htt srng setup msg for Rxdma mon status ring");
771 		return status;
772 	}
773 
774 	return status;
775 }
776 #endif
777 #endif
778 
779 /* MCL specific functions */
780 #if defined(DP_CON_MON)
781 
782 /**
783  * dp_service_mon_rings() - service monitor rings
784  * @soc: soc dp handle
785  * @quota: number of ring entry that can be serviced
786  *
787  * Return: None
788  *
789  */
790 void dp_service_mon_rings(struct  dp_soc *soc, uint32_t quota)
791 {
792 	int ring = 0, work_done;
793 	struct dp_pdev *pdev = NULL;
794 
795 	for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
796 		pdev = dp_get_pdev_for_lmac_id(soc, ring);
797 		if (!pdev)
798 			continue;
799 		work_done = dp_mon_process(soc, NULL, ring, quota);
800 
801 		dp_rx_mon_dest_debug("Reaped %d descs from Monitor rings",
802 				     work_done);
803 	}
804 }
805 #endif
806 
807 /**
808  * dp_mon_peer_tx_init() - Initialize receive TID state in monitor peer
809  * @pdev: Datapath pdev
810  * @peer: Datapath peer
811  *
812  */
813 static void
814 dp_mon_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer)
815 {
816 	if (!peer->monitor_peer)
817 		return;
818 
819 	dp_peer_tid_queue_init(peer);
820 	dp_peer_update_80211_hdr(peer->vdev, peer);
821 }
822 
823 /**
824  * dp_mon_peer_tx_cleanup() - Deinitialize receive TID state in monitor peer
825  * @vdev: Datapath vdev
826  * @peer: Datapath peer
827  *
828  */
829 static void
830 dp_mon_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
831 {
832 	if (!peer->monitor_peer)
833 		return;
834 
835 	dp_peer_tid_queue_cleanup(peer);
836 }
837 
838 #ifdef QCA_SUPPORT_BPR
839 static QDF_STATUS
840 dp_set_bpr_enable_1_0(struct dp_pdev *pdev, int val)
841 {
842 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
843 
844 	switch (val) {
845 	case CDP_BPR_DISABLE:
846 		mon_pdev->bpr_enable = CDP_BPR_DISABLE;
847 		if (!mon_pdev->pktlog_ppdu_stats &&
848 		    !mon_pdev->enhanced_stats_en &&
849 		    !mon_pdev->tx_sniffer_enable && !mon_pdev->mcopy_mode) {
850 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
851 		} else if (mon_pdev->enhanced_stats_en &&
852 			   !mon_pdev->tx_sniffer_enable &&
853 			   !mon_pdev->mcopy_mode &&
854 			   !mon_pdev->pktlog_ppdu_stats) {
855 			dp_h2t_cfg_stats_msg_send(pdev,
856 						  DP_PPDU_STATS_CFG_ENH_STATS,
857 						  pdev->pdev_id);
858 		}
859 		break;
860 	case CDP_BPR_ENABLE:
861 		mon_pdev->bpr_enable = CDP_BPR_ENABLE;
862 		if (!mon_pdev->enhanced_stats_en &&
863 		    !mon_pdev->tx_sniffer_enable &&
864 		    !mon_pdev->mcopy_mode && !mon_pdev->pktlog_ppdu_stats) {
865 			dp_h2t_cfg_stats_msg_send(pdev,
866 						  DP_PPDU_STATS_CFG_BPR,
867 						  pdev->pdev_id);
868 		} else if (mon_pdev->enhanced_stats_en &&
869 			   !mon_pdev->tx_sniffer_enable &&
870 			   !mon_pdev->mcopy_mode &&
871 			   !mon_pdev->pktlog_ppdu_stats) {
872 			dp_h2t_cfg_stats_msg_send(pdev,
873 						  DP_PPDU_STATS_CFG_BPR_ENH,
874 						  pdev->pdev_id);
875 		} else if (mon_pdev->pktlog_ppdu_stats) {
876 			dp_h2t_cfg_stats_msg_send(pdev,
877 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
878 						  pdev->pdev_id);
879 		}
880 		break;
881 	default:
882 		break;
883 	}
884 
885 	return QDF_STATUS_SUCCESS;
886 }
887 #endif
888 
889 #ifdef QCA_ENHANCED_STATS_SUPPORT
890 #if defined(WDI_EVENT_ENABLE) && !defined(WLAN_TX_PKT_CAPTURE_ENH)
891 /**
892  * dp_ppdu_desc_notify_1_0 - Notify upper layer for PPDU indication via WDI
893  *
894  * @pdev: Datapath pdev handle
895  * @nbuf: Buffer to be shipped
896  *
897  * Return: void
898  */
899 static void dp_ppdu_desc_notify_1_0(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
900 {
901 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
902 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
903 
904 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(nbuf);
905 
906 	/*
907 	 * Deliver PPDU stats only for valid (acked) data
908 	 * frames if sniffer mode is not enabled.
909 	 * If sniffer mode is enabled, PPDU stats
910 	 * for all frames including mgmt/control
911 	 * frames should be delivered to upper layer
912 	 */
913 	if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode) {
914 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
915 				     pdev->soc,
916 				     nbuf, HTT_INVALID_PEER,
917 				     WDI_NO_VAL,
918 				     pdev->pdev_id);
919 	} else {
920 		if (ppdu_desc->num_mpdu != 0 &&
921 		    ppdu_desc->num_users != 0 &&
922 		    ppdu_desc->frame_ctrl &
923 		    HTT_FRAMECTRL_DATATYPE) {
924 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
925 					     pdev->soc,
926 					     nbuf, HTT_INVALID_PEER,
927 					     WDI_NO_VAL,
928 					     pdev->pdev_id);
929 		} else {
930 			qdf_nbuf_free(nbuf);
931 		}
932 	}
933 }
934 #endif
935 
936 /**
937  * dp_ppdu_stats_feat_enable_check_1_0() - Check if feature(s) is enabled to
938  *				consume ppdu stats from FW
939  *
940  * @pdev: Datapath pdev handle
941  *
942  * Return: true if enabled, else return false
943  */
944 static bool dp_ppdu_stats_feat_enable_check_1_0(struct dp_pdev *pdev)
945 {
946 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
947 
948 	if (!mon_pdev->enhanced_stats_en && !mon_pdev->tx_sniffer_enable &&
949 	    !mon_pdev->mcopy_mode && !mon_pdev->bpr_enable)
950 		return false;
951 	else
952 		return true;
953 }
954 
955 /**
956  * dp_mon_tx_stats_update_1_0() - Update Tx stats from HTT PPDU completion path
957  *
958  * @mon_peer: Monitor peer
959  * @ppdu: Tx PPDU user completion info
960  */
961 static void
962 dp_mon_tx_stats_update_1_0(struct dp_mon_peer *mon_peer,
963 			   struct cdp_tx_completion_ppdu_user *ppdu)
964 {
965 	ppdu->punc_mode = NO_PUNCTURE;
966 }
967 #endif
968 
969 #ifndef QCA_SUPPORT_FULL_MON
970 /**
971  * dp_rx_mon_process() - Core brain processing for monitor mode
972  *
973  * This API processes monitor destination ring followed by monitor status ring
974  * Called from bottom half (tasklet/NET_RX_SOFTIRQ)
975  *
976  * @soc: datapath soc context
977  * @int_ctx: interrupt context
978  * @mac_id: mac_id on which interrupt is received
979  * @quota: Number of status ring entry that can be serviced in one shot.
980  *
981  * Return: Number of reaped status ring entries
982  */
983 static inline uint32_t
984 dp_rx_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx,
985 		  uint32_t mac_id, uint32_t quota)
986 {
987 	return quota;
988 }
989 #endif
990 
991 #ifndef DISABLE_MON_CONFIG
992 static uint32_t
993 dp_rx_mon_process_1_0(struct dp_soc *soc, struct dp_intr *int_ctx,
994 	              uint32_t mac_id, uint32_t quota)
995 {
996 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
997 
998 	if (qdf_unlikely(mon_soc->full_mon_mode))
999 		return dp_rx_mon_process(soc, int_ctx, mac_id, quota);
1000 
1001 	return dp_rx_mon_status_process(soc, int_ctx, mac_id, quota);
1002 }
1003 
1004 #if defined(WDI_EVENT_ENABLE) &&\
1005 	(defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG))
1006 static inline
1007 void dp_mon_ppdu_stats_handler_register(struct dp_mon_soc *mon_soc)
1008 {
1009 	mon_soc->mon_ops->mon_ppdu_stats_ind_handler =
1010 					dp_ppdu_stats_ind_handler;
1011 }
1012 #else
1013 static inline
1014 void dp_mon_ppdu_stats_handler_register(struct dp_mon_soc *mon_soc)
1015 {
1016 }
1017 #endif
1018 
1019 static void dp_mon_register_intr_ops_1_0(struct dp_soc *soc)
1020 {
1021 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1022 
1023 	mon_soc->mon_rx_process = dp_rx_mon_process_1_0;
1024 	dp_mon_ppdu_stats_handler_register(mon_soc);
1025 }
1026 #endif
1027 
1028 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
1029 /*
1030  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
1031  * address for smart mesh filtering
1032  * @txrx_soc: cdp soc handle
1033  * @vdev_id: id of virtual device object
1034  * @cmd: Add/Del command
1035  * @macaddr: nac client mac address
1036  *
1037  * Return: success/failure
1038  */
1039 static int dp_update_filter_neighbour_peers(struct cdp_soc_t *soc_hdl,
1040 					    uint8_t vdev_id,
1041 					    uint32_t cmd, uint8_t *macaddr)
1042 {
1043 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
1044 	struct dp_pdev *pdev;
1045 	struct dp_neighbour_peer *peer = NULL;
1046 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
1047 						     DP_MOD_ID_CDP);
1048 	struct dp_mon_pdev *mon_pdev;
1049 
1050 	if (!vdev || !macaddr)
1051 		goto fail0;
1052 
1053 	pdev = vdev->pdev;
1054 
1055 	if (!pdev)
1056 		goto fail0;
1057 
1058 	mon_pdev = pdev->monitor_pdev;
1059 
1060 	/* Store address of NAC (neighbour peer) which will be checked
1061 	 * against TA of received packets.
1062 	 */
1063 	if (cmd == DP_NAC_PARAM_ADD) {
1064 		peer = (struct dp_neighbour_peer *)qdf_mem_malloc(
1065 				sizeof(*peer));
1066 
1067 		if (!peer) {
1068 			dp_cdp_err("%pK: DP neighbour peer node memory allocation failed"
1069 				   , soc);
1070 			goto fail0;
1071 		}
1072 
1073 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
1074 			     macaddr, QDF_MAC_ADDR_SIZE);
1075 		peer->vdev = vdev;
1076 
1077 		qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
1078 
1079 		/* add this neighbour peer into the list */
1080 		TAILQ_INSERT_TAIL(&mon_pdev->neighbour_peers_list, peer,
1081 				  neighbour_peer_list_elem);
1082 		qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
1083 
1084 		/* first neighbour */
1085 		if (!mon_pdev->neighbour_peers_added) {
1086 			QDF_STATUS status = QDF_STATUS_SUCCESS;
1087 
1088 			mon_pdev->neighbour_peers_added = true;
1089 			dp_mon_filter_setup_smart_monitor(pdev);
1090 			status = dp_mon_filter_update(pdev);
1091 			if (status != QDF_STATUS_SUCCESS) {
1092 				dp_cdp_err("%pK: smart mon filter setup failed",
1093 					   soc);
1094 				dp_mon_filter_reset_smart_monitor(pdev);
1095 				mon_pdev->neighbour_peers_added = false;
1096 			}
1097 		}
1098 
1099 	} else if (cmd == DP_NAC_PARAM_DEL) {
1100 		qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
1101 		TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
1102 			      neighbour_peer_list_elem) {
1103 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
1104 					 macaddr, QDF_MAC_ADDR_SIZE)) {
1105 				/* delete this peer from the list */
1106 				TAILQ_REMOVE(&mon_pdev->neighbour_peers_list,
1107 					     peer, neighbour_peer_list_elem);
1108 				qdf_mem_free(peer);
1109 				break;
1110 			}
1111 		}
1112 		/* last neighbour deleted */
1113 		if (TAILQ_EMPTY(&mon_pdev->neighbour_peers_list)) {
1114 			QDF_STATUS status = QDF_STATUS_SUCCESS;
1115 
1116 			dp_mon_filter_reset_smart_monitor(pdev);
1117 			status = dp_mon_filter_update(pdev);
1118 			if (status != QDF_STATUS_SUCCESS) {
1119 				dp_cdp_err("%pK: smart mon filter clear failed",
1120 					   soc);
1121 			}
1122 			mon_pdev->neighbour_peers_added = false;
1123 		}
1124 		qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
1125 	}
1126 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
1127 	return 1;
1128 
1129 fail0:
1130 	if (vdev)
1131 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
1132 	return 0;
1133 }
1134 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
1135 
1136 #ifdef ATH_SUPPORT_NAC_RSSI
1137 /**
1138  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
1139  * @soc_hdl: DP soc handle
1140  * @vdev_id: id of DP vdev handle
1141  * @mac_addr: neighbour mac
1142  * @rssi: rssi value
1143  *
1144  * Return: 0 for success. nonzero for failure.
1145  */
1146 static QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_soc_t *soc_hdl,
1147 					      uint8_t vdev_id,
1148 					      char *mac_addr,
1149 					      uint8_t *rssi)
1150 {
1151 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1152 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
1153 						     DP_MOD_ID_CDP);
1154 	struct dp_pdev *pdev;
1155 	struct dp_neighbour_peer *peer = NULL;
1156 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
1157 	struct dp_mon_pdev *mon_pdev;
1158 
1159 	if (!vdev)
1160 		return status;
1161 
1162 	pdev = vdev->pdev;
1163 	mon_pdev = pdev->monitor_pdev;
1164 
1165 	*rssi = 0;
1166 	qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
1167 	TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
1168 		      neighbour_peer_list_elem) {
1169 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
1170 				mac_addr, QDF_MAC_ADDR_SIZE) == 0) {
1171 			*rssi = peer->rssi;
1172 			status = QDF_STATUS_SUCCESS;
1173 			break;
1174 		}
1175 	}
1176 	qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
1177 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
1178 	return status;
1179 }
1180 
1181 static QDF_STATUS
1182 dp_config_for_nac_rssi(struct cdp_soc_t *cdp_soc,
1183 		       uint8_t vdev_id,
1184 		       enum cdp_nac_param_cmd cmd, char *bssid,
1185 		       char *client_macaddr,
1186 		       uint8_t chan_num)
1187 {
1188 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
1189 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
1190 						     DP_MOD_ID_CDP);
1191 	struct dp_pdev *pdev;
1192 	struct dp_mon_pdev *mon_pdev;
1193 
1194 	if (!vdev)
1195 		return QDF_STATUS_E_FAILURE;
1196 
1197 	pdev = (struct dp_pdev *)vdev->pdev;
1198 
1199 	mon_pdev = pdev->monitor_pdev;
1200 	mon_pdev->nac_rssi_filtering = 1;
1201 	/* Store address of NAC (neighbour peer) which will be checked
1202 	 * against TA of received packets.
1203 	 */
1204 
1205 	if (cmd == CDP_NAC_PARAM_ADD) {
1206 		dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
1207 						 DP_NAC_PARAM_ADD,
1208 						 (uint8_t *)client_macaddr);
1209 	} else if (cmd == CDP_NAC_PARAM_DEL) {
1210 		dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
1211 						 DP_NAC_PARAM_DEL,
1212 						 (uint8_t *)client_macaddr);
1213 	}
1214 
1215 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
1216 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
1217 			(soc->ctrl_psoc, pdev->pdev_id,
1218 			 vdev->vdev_id, cmd, bssid, client_macaddr);
1219 
1220 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
1221 	return QDF_STATUS_SUCCESS;
1222 }
1223 #endif
1224 
1225 /**
1226  * dp_mon_register_feature_ops_1_0() - register feature ops
1227  *
1228  * @soc: dp soc context
1229  *
1230  * @return: void
1231  */
1232 static void
1233 dp_mon_register_feature_ops_1_0(struct dp_soc *soc)
1234 {
1235 	struct dp_mon_ops *mon_ops = dp_mon_ops_get(soc);
1236 
1237 	if (!mon_ops) {
1238 		dp_err("mon_ops is NULL, feature ops registration failed");
1239 		return;
1240 	}
1241 
1242 	mon_ops->mon_config_debug_sniffer = dp_config_debug_sniffer;
1243 	mon_ops->mon_peer_tx_init = dp_mon_peer_tx_init;
1244 	mon_ops->mon_peer_tx_cleanup = dp_mon_peer_tx_cleanup;
1245 	mon_ops->mon_htt_ppdu_stats_attach = dp_htt_ppdu_stats_attach;
1246 	mon_ops->mon_htt_ppdu_stats_detach = dp_htt_ppdu_stats_detach;
1247 	mon_ops->mon_print_pdev_rx_mon_stats = dp_print_pdev_rx_mon_stats;
1248 	mon_ops->mon_set_bsscolor = dp_mon_set_bsscolor;
1249 	mon_ops->mon_pdev_get_filter_ucast_data =
1250 				dp_pdev_get_filter_ucast_data;
1251 	mon_ops->mon_pdev_get_filter_mcast_data =
1252 				dp_pdev_get_filter_mcast_data;
1253 	mon_ops->mon_pdev_get_filter_non_data = dp_pdev_get_filter_non_data;
1254 	mon_ops->mon_neighbour_peer_add_ast = dp_mon_neighbour_peer_add_ast;
1255 #ifdef WLAN_TX_PKT_CAPTURE_ENH
1256 	mon_ops->mon_peer_tid_peer_id_update = dp_peer_tid_peer_id_update_1_0;
1257 	mon_ops->mon_tx_capture_debugfs_init = dp_tx_capture_debugfs_init_1_0;
1258 	mon_ops->mon_tx_add_to_comp_queue = dp_tx_add_to_comp_queue_1_0;
1259 	mon_ops->mon_print_pdev_tx_capture_stats =
1260 				dp_print_pdev_tx_capture_stats_1_0;
1261 	mon_ops->mon_config_enh_tx_capture = dp_config_enh_tx_capture_1_0;
1262 	mon_ops->mon_tx_peer_filter = dp_peer_set_tx_capture_enabled_1_0;
1263 	mon_ops->mon_peer_tx_capture_get_stats = dp_get_peer_tx_capture_stats;
1264 	mon_ops->mon_pdev_tx_capture_get_stats = dp_get_pdev_tx_capture_stats;
1265 #endif
1266 #if (defined(WIFI_MONITOR_SUPPORT) && !defined(WLAN_TX_PKT_CAPTURE_ENH))
1267 	mon_ops->mon_peer_tid_peer_id_update = NULL;
1268 	mon_ops->mon_tx_capture_debugfs_init = NULL;
1269 	mon_ops->mon_tx_add_to_comp_queue = NULL;
1270 	mon_ops->mon_print_pdev_tx_capture_stats = NULL;
1271 	mon_ops->mon_config_enh_tx_capture = NULL;
1272 	mon_ops->mon_tx_peer_filter = NULL;
1273 #endif
1274 #ifdef WLAN_RX_PKT_CAPTURE_ENH
1275 	mon_ops->mon_config_enh_rx_capture = dp_config_enh_rx_capture;
1276 #endif
1277 #ifdef QCA_SUPPORT_BPR
1278 	mon_ops->mon_set_bpr_enable = dp_set_bpr_enable_1_0;
1279 #endif
1280 #ifdef ATH_SUPPORT_NAC
1281 	mon_ops->mon_set_filter_neigh_peers = dp_set_filter_neigh_peers;
1282 #endif
1283 #ifdef WLAN_ATF_ENABLE
1284 	mon_ops->mon_set_atf_stats_enable = dp_set_atf_stats_enable;
1285 #endif
1286 #ifdef FEATURE_NAC_RSSI
1287 	mon_ops->mon_filter_neighbour_peer = dp_filter_neighbour_peer;
1288 #endif
1289 #ifdef QCA_MCOPY_SUPPORT
1290 	mon_ops->mon_filter_setup_mcopy_mode =
1291 				dp_mon_filter_setup_mcopy_mode_1_0;
1292 	mon_ops->mon_filter_reset_mcopy_mode =
1293 				dp_mon_filter_reset_mcopy_mode_1_0;
1294 	mon_ops->mon_mcopy_check_deliver = dp_mcopy_check_deliver;
1295 #endif
1296 #ifdef QCA_ENHANCED_STATS_SUPPORT
1297 	mon_ops->mon_filter_setup_enhanced_stats =
1298 				dp_mon_filter_setup_enhanced_stats_1_0;
1299 	mon_ops->mon_filter_reset_enhanced_stats =
1300 				dp_mon_filter_reset_enhanced_stats_1_0;
1301 	mon_ops->mon_tx_enable_enhanced_stats =
1302 				dp_mon_tx_enable_enhanced_stats_1_0;
1303 	mon_ops->mon_tx_disable_enhanced_stats =
1304 				dp_mon_tx_disable_enhanced_stats_1_0;
1305 	mon_ops->mon_ppdu_stats_feat_enable_check =
1306 				dp_ppdu_stats_feat_enable_check_1_0;
1307 #ifndef WLAN_TX_PKT_CAPTURE_ENH
1308 	mon_ops->mon_ppdu_desc_deliver = dp_ppdu_desc_deliver;
1309 #ifdef WDI_EVENT_ENABLE
1310 	mon_ops->mon_ppdu_desc_notify = dp_ppdu_desc_notify_1_0;
1311 #endif
1312 #else
1313 	mon_ops->mon_ppdu_desc_deliver = dp_ppdu_desc_deliver_1_0;
1314 #endif
1315 	mon_ops->mon_tx_stats_update = dp_mon_tx_stats_update_1_0;
1316 #endif
1317 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
1318 	mon_ops->mon_filter_setup_smart_monitor =
1319 				dp_mon_filter_setup_smart_monitor_1_0;
1320 	mon_ops->mon_filter_reset_smart_monitor =
1321 				dp_mon_filter_reset_smart_monitor_1_0;
1322 #endif
1323 	mon_ops->mon_filter_set_reset_mon_mac_filter =
1324 				dp_mon_set_reset_mon_mac_filter_1_0;
1325 #ifdef WLAN_RX_PKT_CAPTURE_ENH
1326 	mon_ops->mon_filter_setup_rx_enh_capture =
1327 				dp_mon_filter_setup_rx_enh_capture_1_0;
1328 #endif
1329 #ifdef WDI_EVENT_ENABLE
1330 	mon_ops->mon_set_pktlog_wifi3 = dp_set_pktlog_wifi3;
1331 	mon_ops->mon_filter_setup_rx_pkt_log_full =
1332 				dp_mon_filter_setup_rx_pkt_log_full_1_0;
1333 	mon_ops->mon_filter_reset_rx_pkt_log_full =
1334 				dp_mon_filter_reset_rx_pkt_log_full_1_0;
1335 	mon_ops->mon_filter_setup_rx_pkt_log_lite =
1336 				dp_mon_filter_setup_rx_pkt_log_lite_1_0;
1337 	mon_ops->mon_filter_reset_rx_pkt_log_lite =
1338 				dp_mon_filter_reset_rx_pkt_log_lite_1_0;
1339 	mon_ops->mon_filter_setup_rx_pkt_log_cbf =
1340 				dp_mon_filter_setup_rx_pkt_log_cbf_1_0;
1341 	mon_ops->mon_filter_reset_rx_pkt_log_cbf =
1342 				dp_mon_filter_reset_rx_pktlog_cbf_1_0;
1343 #ifdef BE_PKTLOG_SUPPORT
1344 	mon_ops->mon_filter_setup_pktlog_hybrid = NULL;
1345 	mon_ops->mon_filter_reset_pktlog_hybrid = NULL;
1346 #endif
1347 #endif
1348 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
1349 	mon_ops->mon_pktlogmod_exit = dp_pktlogmod_exit;
1350 #endif
1351 	mon_ops->rx_packet_length_set = NULL;
1352 	mon_ops->rx_mon_enable = NULL;
1353 	mon_ops->rx_wmask_subscribe = NULL;
1354 	mon_ops->rx_pkt_tlv_offset = NULL;
1355 	mon_ops->rx_enable_mpdu_logging = NULL;
1356 	mon_ops->rx_enable_fpmo = NULL;
1357 	mon_ops->mon_neighbour_peers_detach = dp_neighbour_peers_detach;
1358 	mon_ops->mon_vdev_set_monitor_mode_buf_rings =
1359 				dp_vdev_set_monitor_mode_buf_rings;
1360 	mon_ops->mon_vdev_set_monitor_mode_rings =
1361 				dp_vdev_set_monitor_mode_rings;
1362 #ifdef QCA_ENHANCED_STATS_SUPPORT
1363 	mon_ops->mon_rx_stats_update = NULL;
1364 	mon_ops->mon_rx_populate_ppdu_usr_info = NULL;
1365 	mon_ops->mon_rx_populate_ppdu_info = dp_mon_populate_ppdu_info_1_0;
1366 #endif
1367 #ifdef QCA_UNDECODED_METADATA_SUPPORT
1368 	mon_ops->mon_config_undecoded_metadata_capture =
1369 		dp_mon_config_undecoded_metadata_capture;
1370 	mon_ops->mon_filter_setup_undecoded_metadata_capture =
1371 		dp_mon_filter_setup_undecoded_metadata_capture_1_0;
1372 	mon_ops->mon_filter_reset_undecoded_metadata_capture =
1373 		dp_mon_filter_reset_undecoded_metadata_capture_1_0;
1374 #endif
1375 	mon_ops->mon_rx_print_advanced_stats = NULL;
1376 	mon_ops->mon_mac_filter_set = dp_mon_mac_filter_set;
1377 }
1378 
1379 struct dp_mon_ops monitor_ops_1_0 = {
1380 	.mon_soc_cfg_init = dp_mon_soc_cfg_init,
1381 
1382 	.mon_pdev_alloc = NULL,
1383 	.mon_pdev_free = NULL,
1384 	.mon_pdev_attach = dp_mon_pdev_attach,
1385 	.mon_pdev_detach = dp_mon_pdev_detach,
1386 	.mon_pdev_init = dp_mon_pdev_init,
1387 	.mon_pdev_deinit = dp_mon_pdev_deinit,
1388 	.mon_vdev_attach = dp_mon_vdev_attach,
1389 	.mon_vdev_detach = dp_mon_vdev_detach,
1390 	.mon_peer_attach = dp_mon_peer_attach,
1391 	.mon_peer_detach = dp_mon_peer_detach,
1392 	.mon_peer_get_peerstats_ctx = dp_mon_peer_get_peerstats_ctx,
1393 	.mon_peer_reset_stats = dp_mon_peer_reset_stats,
1394 	.mon_peer_get_stats = dp_mon_peer_get_stats,
1395 	.mon_invalid_peer_update_pdev_stats =
1396 				dp_mon_invalid_peer_update_pdev_stats,
1397 	.mon_peer_get_stats_param = dp_mon_peer_get_stats_param,
1398 	.mon_flush_rings = dp_flush_monitor_rings,
1399 #if defined(DP_CON_MON)
1400 	.mon_service_rings = dp_service_mon_rings,
1401 #endif
1402 #ifndef DISABLE_MON_CONFIG
1403 	.mon_rx_process = NULL,
1404 #endif
1405 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
1406 	.mon_drop_packets_for_mac = dp_mon_drop_packets_for_mac,
1407 #endif
1408 	.mon_vdev_timer_init = dp_mon_vdev_timer_init,
1409 	.mon_vdev_timer_start = dp_mon_vdev_timer_start,
1410 	.mon_vdev_timer_stop = dp_mon_vdev_timer_stop,
1411 	.mon_vdev_timer_deinit = dp_mon_vdev_timer_deinit,
1412 	.mon_reap_timer_init = dp_mon_reap_timer_init,
1413 	.mon_reap_timer_start = dp_mon_reap_timer_start,
1414 	.mon_reap_timer_stop = dp_mon_reap_timer_stop,
1415 	.mon_reap_timer_deinit = dp_mon_reap_timer_deinit,
1416 	.mon_filter_setup_rx_mon_mode = dp_mon_filter_setup_mon_mode_1_0,
1417 	.mon_filter_reset_rx_mon_mode = dp_mon_filter_reset_mon_mode_1_0,
1418 	.rx_mon_filter_update = dp_mon_filter_update_1_0,
1419 	.set_mon_mode_buf_rings_tx = NULL,
1420 	.rx_mon_desc_pool_init = dp_rx_pdev_mon_desc_pool_init,
1421 	.rx_mon_desc_pool_deinit = dp_rx_pdev_mon_desc_pool_deinit,
1422 	.rx_mon_desc_pool_alloc = dp_rx_pdev_mon_desc_pool_alloc,
1423 	.rx_mon_desc_pool_free = dp_rx_pdev_mon_desc_pool_free,
1424 	.rx_mon_buffers_alloc = dp_rx_pdev_mon_buffers_alloc,
1425 	.rx_mon_buffers_free = dp_rx_pdev_mon_buffers_free,
1426 	.tx_mon_desc_pool_init = NULL,
1427 	.tx_mon_desc_pool_deinit = NULL,
1428 	.tx_mon_desc_pool_alloc = NULL,
1429 	.tx_mon_desc_pool_free = NULL,
1430 	.tx_mon_filter_alloc = NULL,
1431 #if !defined(DISABLE_MON_CONFIG)
1432 	.mon_register_intr_ops = dp_mon_register_intr_ops_1_0,
1433 #endif
1434 	.mon_register_feature_ops = dp_mon_register_feature_ops_1_0,
1435 	.mon_lite_mon_alloc = NULL,
1436 	.mon_lite_mon_dealloc = NULL,
1437 	.mon_lite_mon_vdev_delete = NULL,
1438 	.mon_lite_mon_disable_rx = NULL,
1439 	.mon_lite_mon_is_rx_adv_filter_enable = NULL,
1440 };
1441 
1442 struct cdp_mon_ops dp_ops_mon_1_0 = {
1443 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
1444 	/* Added support for HK advance filter */
1445 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
1446 	.txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
1447 	.config_full_mon_mode = dp_config_full_mon_mode,
1448 	.soc_config_full_mon_mode = dp_soc_config_full_mon_mode,
1449 	.get_mon_pdev_rx_stats = dp_pdev_get_rx_mon_stats,
1450 	.txrx_enable_mon_reap_timer = dp_enable_mon_reap_timer,
1451 #ifdef QCA_ENHANCED_STATS_SUPPORT
1452 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
1453 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
1454 #endif /* QCA_ENHANCED_STATS_SUPPORT */
1455 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
1456 	.txrx_update_filter_neighbour_peers = dp_update_filter_neighbour_peers,
1457 #endif
1458 #ifdef ATH_SUPPORT_NAC_RSSI
1459 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
1460 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
1461 #endif
1462 #ifdef QCA_SUPPORT_LITE_MONITOR
1463 	.txrx_set_lite_mon_config = NULL,
1464 	.txrx_get_lite_mon_config = NULL,
1465 	.txrx_set_lite_mon_peer_config = NULL,
1466 	.txrx_get_lite_mon_peer_config = NULL,
1467 	.txrx_is_lite_mon_enabled = NULL,
1468 	.txrx_get_lite_mon_legacy_feature_enabled = NULL,
1469 #endif
1470 	.txrx_set_mon_pdev_params_rssi_dbm_conv =
1471 				dp_mon_pdev_params_rssi_dbm_conv,
1472 #ifdef WLAN_FEATURE_LOCAL_PKT_CAPTURE
1473 	.start_local_pkt_capture = dp_mon_start_local_pkt_capture,
1474 	.stop_local_pkt_capture = dp_mon_stop_local_pkt_capture,
1475 	.is_local_pkt_capture_running = dp_mon_get_is_local_pkt_capture_running,
1476 #endif /* WLAN_FEATURE_LOCAL_PKT_CAPTURE */
1477 };
1478 
1479 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT
1480 void dp_mon_ops_register_1_0(struct dp_mon_soc *mon_soc)
1481 {
1482 	struct dp_mon_ops *mon_ops = NULL;
1483 
1484 	if (mon_soc->mon_ops) {
1485 		dp_mon_err("monitor ops is allocated");
1486 		return;
1487 	}
1488 
1489 	mon_ops = qdf_mem_malloc(sizeof(struct dp_mon_ops));
1490 	if (!mon_ops) {
1491 		dp_mon_err("Failed to allocate memory for mon ops");
1492 		return;
1493 	}
1494 
1495 	qdf_mem_copy(mon_ops, &monitor_ops_1_0, sizeof(struct dp_mon_ops));
1496 	mon_soc->mon_ops = mon_ops;
1497 	dp_mon_register_lpc_ops_1_0(mon_ops);
1498 }
1499 
1500 void dp_mon_cdp_ops_register_1_0(struct cdp_ops *ops)
1501 {
1502 	struct cdp_mon_ops *mon_ops = NULL;
1503 
1504 	if (ops->mon_ops) {
1505 		dp_mon_err("cdp monitor ops is allocated");
1506 		return;
1507 	}
1508 
1509 	mon_ops = qdf_mem_malloc(sizeof(struct cdp_mon_ops));
1510 	if (!mon_ops) {
1511 		dp_mon_err("Failed to allocate memory for cdp mon ops");
1512 		return;
1513 	}
1514 
1515 	qdf_mem_copy(mon_ops, &dp_ops_mon_1_0, sizeof(struct cdp_mon_ops));
1516 	ops->mon_ops = mon_ops;
1517 }
1518 #else
1519 void dp_mon_ops_register_1_0(struct dp_mon_soc *mon_soc)
1520 {
1521 	mon_soc->mon_ops = &monitor_ops_1_0;
1522 	dp_mon_register_lpc_ops_1_0(mon_soc->mon_ops);
1523 }
1524 
1525 void dp_mon_cdp_ops_register_1_0(struct cdp_ops *ops)
1526 {
1527 	ops->mon_ops = &dp_ops_mon_1_0;
1528 }
1529 #endif
1530