xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/2.0/dp_mon_2.0.c (revision 449758b4de7a219dad7b7a0e20ce2ea1c8388e34)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <dp_types.h>
19 #include "dp_rx.h"
20 #include "dp_peer.h"
21 #include <dp_htt.h>
22 #include <dp_mon_filter.h>
23 #include <dp_mon.h>
24 #include <dp_rx_mon.h>
25 #include <dp_rx_mon_2.0.h>
26 #include <dp_mon_2.0.h>
27 #include <dp_mon_filter_2.0.h>
28 #include <dp_tx_mon_2.0.h>
29 #include <hal_be_api_mon.h>
30 #include <dp_be.h>
31 #include <htt_ppdu_stats.h>
32 #ifdef QCA_SUPPORT_LITE_MONITOR
33 #include "dp_lite_mon.h"
34 #endif
35 
36 #if !defined(DISABLE_MON_CONFIG)
37 /*
38  * dp_mon_add_desc_list_to_free_list() - append unused desc_list back to
39  *					freelist.
40  *
41  * @soc: core txrx main context
42  * @local_desc_list: local desc list provided by the caller
43  * @tail: attach the point to last desc of local desc list
44  * @mon_desc_pool: monitor descriptor pool pointer
45  */
46 void
47 dp_mon_add_desc_list_to_free_list(struct dp_soc *soc,
48 				  union dp_mon_desc_list_elem_t **local_desc_list,
49 				  union dp_mon_desc_list_elem_t **tail,
50 				  struct dp_mon_desc_pool *mon_desc_pool)
51 {
52 	union dp_mon_desc_list_elem_t *temp_list = NULL;
53 
54 	qdf_spin_lock_bh(&mon_desc_pool->lock);
55 
56 	temp_list = mon_desc_pool->freelist;
57 	mon_desc_pool->freelist = *local_desc_list;
58 	(*tail)->next = temp_list;
59 	*tail = NULL;
60 	*local_desc_list = NULL;
61 
62 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
63 }
64 
65 /*
66  * dp_mon_get_free_desc_list() - provide a list of descriptors from
67  *				the free mon desc pool.
68  *
69  * @soc: core txrx main context
70  * @mon_desc_pool: monitor descriptor pool pointer
71  * @num_descs: number of descs requested from freelist
72  * @desc_list: attach the descs to this list (output parameter)
73  * @tail: attach the point to last desc of free list (output parameter)
74  *
75  * Return: number of descs allocated from free list.
76  */
77 static uint16_t
78 dp_mon_get_free_desc_list(struct dp_soc *soc,
79 			  struct dp_mon_desc_pool *mon_desc_pool,
80 			  uint16_t num_descs,
81 			  union dp_mon_desc_list_elem_t **desc_list,
82 			  union dp_mon_desc_list_elem_t **tail)
83 {
84 	uint16_t count;
85 
86 	qdf_spin_lock_bh(&mon_desc_pool->lock);
87 
88 	*desc_list = *tail = mon_desc_pool->freelist;
89 
90 	for (count = 0; count < num_descs; count++) {
91 		if (qdf_unlikely(!mon_desc_pool->freelist)) {
92 			qdf_spin_unlock_bh(&mon_desc_pool->lock);
93 			return count;
94 		}
95 		*tail = mon_desc_pool->freelist;
96 		mon_desc_pool->freelist = mon_desc_pool->freelist->next;
97 	}
98 	(*tail)->next = NULL;
99 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
100 	return count;
101 }
102 
103 static inline QDF_STATUS
104 dp_mon_frag_alloc_and_map(struct dp_soc *dp_soc,
105 			  struct dp_mon_desc *mon_desc,
106 			  struct dp_mon_desc_pool *mon_desc_pool)
107 {
108 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
109 
110 	mon_desc->buf_addr = qdf_frag_alloc(&mon_desc_pool->pf_cache,
111 					    mon_desc_pool->buf_size);
112 
113 	if (!mon_desc->buf_addr) {
114 		dp_mon_err("Frag alloc failed");
115 		return QDF_STATUS_E_NOMEM;
116 	}
117 
118 	ret = qdf_mem_map_page(dp_soc->osdev,
119 			       mon_desc->buf_addr,
120 			       QDF_DMA_FROM_DEVICE,
121 			       mon_desc_pool->buf_size,
122 			       &mon_desc->paddr);
123 
124 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
125 		qdf_frag_free(mon_desc->buf_addr);
126 		dp_mon_err("Frag map failed");
127 		return QDF_STATUS_E_FAULT;
128 	}
129 
130 	return QDF_STATUS_SUCCESS;
131 }
132 
133 QDF_STATUS
134 dp_mon_desc_pool_init(struct dp_mon_desc_pool *mon_desc_pool,
135 		      uint32_t pool_size)
136 {
137 	int desc_id;
138 	/* Initialize monitor desc lock */
139 	qdf_spinlock_create(&mon_desc_pool->lock);
140 
141 	qdf_spin_lock_bh(&mon_desc_pool->lock);
142 
143 	mon_desc_pool->buf_size = DP_MON_DATA_BUFFER_SIZE;
144 	/* link SW descs into a freelist */
145 	mon_desc_pool->freelist = &mon_desc_pool->array[0];
146 	mon_desc_pool->pool_size = pool_size - 1;
147 	qdf_mem_zero(mon_desc_pool->freelist,
148 		     mon_desc_pool->pool_size *
149 		     sizeof(union dp_mon_desc_list_elem_t));
150 
151 	for (desc_id = 0; desc_id < mon_desc_pool->pool_size; desc_id++) {
152 		if (desc_id == mon_desc_pool->pool_size - 1)
153 			mon_desc_pool->array[desc_id].next = NULL;
154 		else
155 			mon_desc_pool->array[desc_id].next =
156 				&mon_desc_pool->array[desc_id + 1];
157 		mon_desc_pool->array[desc_id].mon_desc.in_use = 0;
158 		mon_desc_pool->array[desc_id].mon_desc.cookie = desc_id;
159 	}
160 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
161 
162 	return QDF_STATUS_SUCCESS;
163 }
164 
165 void dp_mon_desc_pool_deinit(struct dp_mon_desc_pool *mon_desc_pool)
166 {
167 	qdf_spin_lock_bh(&mon_desc_pool->lock);
168 
169 	mon_desc_pool->freelist = NULL;
170 	mon_desc_pool->pool_size = 0;
171 
172 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
173 	qdf_spinlock_destroy(&mon_desc_pool->lock);
174 }
175 
176 void dp_mon_desc_pool_free(struct dp_soc *soc,
177 			   struct dp_mon_desc_pool *mon_desc_pool,
178 			   enum dp_ctxt_type ctx_type)
179 {
180 	dp_context_free_mem(soc, ctx_type, mon_desc_pool->array);
181 }
182 
183 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings_rx_2_0(struct dp_pdev *pdev)
184 {
185 	int rx_mon_max_entries;
186 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
187 	struct dp_soc *soc = pdev->soc;
188 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
189 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
190 	QDF_STATUS status;
191 
192 	if (!mon_soc_be) {
193 		dp_mon_err("DP MON SOC is NULL");
194 		return QDF_STATUS_E_FAILURE;
195 	}
196 
197 	soc_cfg_ctx = soc->wlan_cfg_ctx;
198 	rx_mon_max_entries = wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc_cfg_ctx);
199 
200 	hal_set_low_threshold(soc->rxdma_mon_buf_ring[0].hal_srng,
201 			      MON_BUF_MIN_ENTRIES << 2);
202 	status = htt_srng_setup(soc->htt_handle, 0,
203 				soc->rxdma_mon_buf_ring[0].hal_srng,
204 				RXDMA_MONITOR_BUF);
205 
206 	if (status != QDF_STATUS_SUCCESS) {
207 		dp_mon_err("Failed to send htt srng setup message for Rx mon buf ring");
208 		return status;
209 	}
210 
211 	if (mon_soc_be->rx_mon_ring_fill_level < rx_mon_max_entries) {
212 		status = dp_rx_mon_buffers_alloc(soc,
213 						 (rx_mon_max_entries -
214 						 mon_soc_be->rx_mon_ring_fill_level));
215 		if (status != QDF_STATUS_SUCCESS) {
216 			dp_mon_err("%pK: Rx mon buffers allocation failed", soc);
217 			return status;
218 		}
219 		mon_soc_be->rx_mon_ring_fill_level +=
220 				(rx_mon_max_entries -
221 				mon_soc_be->rx_mon_ring_fill_level);
222 	}
223 
224 	return QDF_STATUS_SUCCESS;
225 }
226 
227 static
228 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings_2_0(struct dp_pdev *pdev)
229 {
230 	int status;
231 	struct dp_soc *soc = pdev->soc;
232 
233 	status = dp_vdev_set_monitor_mode_buf_rings_rx_2_0(pdev);
234 	if (status != QDF_STATUS_SUCCESS) {
235 		dp_mon_err("%pK: Rx monitor extra buffer allocation failed",
236 			   soc);
237 		return status;
238 	}
239 
240 	return QDF_STATUS_SUCCESS;
241 }
242 
243 static
244 QDF_STATUS dp_vdev_set_monitor_mode_rings_2_0(struct dp_pdev *pdev,
245 					      uint8_t delayed_replenish)
246 {
247 	return QDF_STATUS_SUCCESS;
248 }
249 
250 #ifdef QCA_ENHANCED_STATS_SUPPORT
251 /**
252  * dp_mon_tx_enable_enhanced_stats_2_0() - Send HTT cmd to FW to enable stats
253  * @pdev: Datapath pdev handle
254  *
255  * Return: none
256  */
257 static void dp_mon_tx_enable_enhanced_stats_2_0(struct dp_pdev *pdev)
258 {
259 	dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
260 				  pdev->pdev_id);
261 }
262 
263 /**
264  * dp_mon_tx_disable_enhanced_stats_2_0() - Send HTT cmd to FW to disable stats
265  * @pdev: Datapath pdev handle
266  *
267  * Return: none
268  */
269 static void dp_mon_tx_disable_enhanced_stats_2_0(struct dp_pdev *pdev)
270 {
271 	dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
272 }
273 #endif
274 
275 #if defined(QCA_ENHANCED_STATS_SUPPORT) && defined(WLAN_FEATURE_11BE)
276 void
277 dp_mon_tx_stats_update_2_0(struct dp_mon_peer *mon_peer,
278 			   struct cdp_tx_completion_ppdu_user *ppdu)
279 {
280 	uint8_t preamble, mcs, punc_mode, res_mcs;
281 
282 	preamble = ppdu->preamble;
283 	mcs = ppdu->mcs;
284 
285 	punc_mode = dp_mon_get_puncture_type(ppdu->punc_pattern_bitmap,
286 					     ppdu->bw);
287 	ppdu->punc_mode = punc_mode;
288 
289 	DP_STATS_INC(mon_peer, tx.punc_bw[punc_mode], ppdu->num_msdu);
290 
291 	if (preamble == DOT11_BE) {
292 		res_mcs = (mcs < MAX_MCS_11BE) ? mcs : (MAX_MCS - 1);
293 
294 		DP_STATS_INC(mon_peer,
295 			     tx.pkt_type[preamble].mcs_count[res_mcs],
296 			     ppdu->num_msdu);
297 		DP_STATS_INCC(mon_peer,
298 			      tx.su_be_ppdu_cnt.mcs_count[res_mcs], 1,
299 			      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_SU));
300 		DP_STATS_INCC(mon_peer,
301 			      tx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[res_mcs],
302 			      1, (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA));
303 		DP_STATS_INCC(mon_peer,
304 			      tx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[res_mcs],
305 			      1, (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO));
306 	}
307 }
308 
309 enum cdp_punctured_modes
310 dp_mon_get_puncture_type(uint16_t puncture_pattern, uint8_t bw)
311 {
312 	uint16_t mask;
313 	uint8_t punctured_bits;
314 
315 	if (!puncture_pattern)
316 		return NO_PUNCTURE;
317 
318 	switch (bw) {
319 	case CMN_BW_80MHZ:
320 		mask = PUNCTURE_80MHZ_MASK;
321 		break;
322 	case CMN_BW_160MHZ:
323 		mask = PUNCTURE_160MHZ_MASK;
324 		break;
325 	case CMN_BW_320MHZ:
326 		mask = PUNCTURE_320MHZ_MASK;
327 		break;
328 	default:
329 		return NO_PUNCTURE;
330 	}
331 
332 	/* 0s in puncture pattern received in TLV indicates punctured 20Mhz,
333 	 * after complement, 1s will indicate punctured 20Mhz
334 	 */
335 	puncture_pattern = ~puncture_pattern;
336 	puncture_pattern &= mask;
337 
338 	if (puncture_pattern) {
339 		punctured_bits = 0;
340 		while (puncture_pattern != 0) {
341 			punctured_bits++;
342 			puncture_pattern &= (puncture_pattern - 1);
343 		}
344 
345 		if (bw == CMN_BW_80MHZ) {
346 			if (punctured_bits == IEEE80211_PUNC_MINUS20MHZ)
347 				return PUNCTURED_20MHZ;
348 			else
349 				return NO_PUNCTURE;
350 		} else if (bw == CMN_BW_160MHZ) {
351 			if (punctured_bits == IEEE80211_PUNC_MINUS20MHZ)
352 				return PUNCTURED_20MHZ;
353 			else if (punctured_bits == IEEE80211_PUNC_MINUS40MHZ)
354 				return PUNCTURED_40MHZ;
355 			else
356 				return NO_PUNCTURE;
357 		} else if (bw == CMN_BW_320MHZ) {
358 			if (punctured_bits == IEEE80211_PUNC_MINUS40MHZ)
359 				return PUNCTURED_40MHZ;
360 			else if (punctured_bits == IEEE80211_PUNC_MINUS80MHZ)
361 				return PUNCTURED_80MHZ;
362 			else if (punctured_bits == IEEE80211_PUNC_MINUS120MHZ)
363 				return PUNCTURED_120MHZ;
364 			else
365 				return NO_PUNCTURE;
366 		}
367 	}
368 	return NO_PUNCTURE;
369 }
370 #endif
371 
372 #if defined(QCA_ENHANCED_STATS_SUPPORT) && !defined(WLAN_FEATURE_11BE)
373 void
374 dp_mon_tx_stats_update_2_0(struct dp_mon_peer *mon_peer,
375 			   struct cdp_tx_completion_ppdu_user *ppdu)
376 {
377 	ppdu->punc_mode = NO_PUNCTURE;
378 }
379 
380 enum cdp_punctured_modes
381 dp_mon_get_puncture_type(uint16_t puncture_pattern, uint8_t bw)
382 {
383 	return NO_PUNCTURE;
384 }
385 #endif /* QCA_ENHANCED_STATS_SUPPORT && WLAN_FEATURE_11BE */
386 
387 #ifdef QCA_SUPPORT_BPR
388 static QDF_STATUS
389 dp_set_bpr_enable_2_0(struct dp_pdev *pdev, int val)
390 {
391 	return QDF_STATUS_SUCCESS;
392 }
393 #endif /* QCA_SUPPORT_BPR */
394 
395 #ifdef QCA_ENHANCED_STATS_SUPPORT
396 #ifdef WDI_EVENT_ENABLE
397 /**
398  * dp_ppdu_desc_notify_2_0 - Notify upper layer for PPDU indication via WDI
399  *
400  * @pdev: Datapath pdev handle
401  * @nbuf: Buffer to be shipped
402  *
403  * Return: void
404  */
405 static void dp_ppdu_desc_notify_2_0(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
406 {
407 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
408 
409 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(nbuf);
410 
411 	if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
412 	    ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
413 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
414 				     pdev->soc,
415 				     nbuf, HTT_INVALID_PEER,
416 				     WDI_NO_VAL,
417 				     pdev->pdev_id);
418 	} else {
419 		qdf_nbuf_free(nbuf);
420 	}
421 }
422 #endif
423 
424 /**
425  * dp_ppdu_stats_feat_enable_check_2_0 - Check if feature(s) is enabled to
426  *				consume ppdu stats from FW
427  *
428  * @pdev: Datapath pdev handle
429  *
430  * Return: true if enabled, else return false
431  */
432 static bool dp_ppdu_stats_feat_enable_check_2_0(struct dp_pdev *pdev)
433 {
434 	return pdev->monitor_pdev->enhanced_stats_en;
435 }
436 #endif
437 
438 QDF_STATUS dp_mon_soc_htt_srng_setup_2_0(struct dp_soc *soc)
439 {
440 	QDF_STATUS status;
441 
442 	status = dp_rx_mon_soc_htt_srng_setup_2_0(soc, 0);
443 	if (status != QDF_STATUS_SUCCESS) {
444 		dp_err("Failed to send htt srng setup message for Rx mon buf ring");
445 		return status;
446 	}
447 
448 	status = dp_tx_mon_soc_htt_srng_setup_2_0(soc, 0);
449 	if (status != QDF_STATUS_SUCCESS) {
450 		dp_err("Failed to send htt srng setup message for Tx mon buf ring");
451 		return status;
452 	}
453 
454 	return status;
455 }
456 
457 #if defined(DP_CON_MON)
458 QDF_STATUS dp_mon_pdev_htt_srng_setup_2_0(struct dp_soc *soc,
459 					  struct dp_pdev *pdev,
460 					  int mac_id,
461 					  int mac_for_pdev)
462 {
463 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
464 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
465 	QDF_STATUS status;
466 
467 	if (!mon_soc_be->tx_mon_dst_ring[mac_id].hal_srng)
468 		return QDF_STATUS_SUCCESS;
469 
470 	status = dp_tx_mon_pdev_htt_srng_setup_2_0(soc, pdev, mac_id,
471 						   mac_for_pdev);
472 	if (status != QDF_STATUS_SUCCESS) {
473 		dp_mon_err("Failed to send htt srng message for Tx mon dst ring");
474 		return status;
475 	}
476 
477 	return status;
478 }
479 #else
480 /* This is for WIN case */
481 QDF_STATUS dp_mon_pdev_htt_srng_setup_2_0(struct dp_soc *soc,
482 					  struct dp_pdev *pdev,
483 					  int mac_id,
484 					  int mac_for_pdev)
485 {
486 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
487 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
488 	QDF_STATUS status;
489 
490 	if (!soc->rxdma_mon_dst_ring[mac_id].hal_srng)
491 		return QDF_STATUS_SUCCESS;
492 
493 	status = dp_rx_mon_pdev_htt_srng_setup_2_0(soc, pdev, mac_id,
494 						   mac_for_pdev);
495 	if (status != QDF_STATUS_SUCCESS) {
496 		dp_mon_err("Failed to send htt srng setup message for Rxdma dst ring");
497 		return status;
498 	}
499 
500 	if (!mon_soc_be->tx_mon_dst_ring[mac_id].hal_srng)
501 		return QDF_STATUS_SUCCESS;
502 
503 	status = dp_tx_mon_pdev_htt_srng_setup_2_0(soc, pdev, mac_id,
504 						   mac_for_pdev);
505 	if (status != QDF_STATUS_SUCCESS) {
506 		dp_mon_err("Failed to send htt srng message for Tx mon dst ring");
507 		return status;
508 	}
509 
510 	return status;
511 }
512 #endif
513 
514 static
515 QDF_STATUS dp_rx_mon_refill_buf_ring_2_0(struct dp_intr *int_ctx)
516 {
517 	struct dp_soc *soc  = int_ctx->soc;
518 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
519 	union dp_mon_desc_list_elem_t *desc_list = NULL;
520 	union dp_mon_desc_list_elem_t *tail = NULL;
521 	struct dp_srng *rx_mon_buf_ring;
522 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
523 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
524 	uint32_t num_entries_avail, num_entries, num_entries_in_ring;
525 	int sync_hw_ptr = 1, hp = 0, tp = 0;
526 	void *hal_srng;
527 
528 	rx_mon_buf_ring = &soc->rxdma_mon_buf_ring[0];
529 	hal_srng = rx_mon_buf_ring->hal_srng;
530 
531 	intr_stats->num_host2rxdma_ring_masks++;
532 	mon_soc_be->rx_low_thresh_intrs++;
533 	hal_srng_access_start(soc->hal_soc, hal_srng);
534 	num_entries_avail = hal_srng_src_num_avail(soc->hal_soc,
535 						   hal_srng,
536 						   sync_hw_ptr);
537 	num_entries_in_ring = rx_mon_buf_ring->num_entries - num_entries_avail;
538 	hal_get_sw_hptp(soc->hal_soc, (hal_ring_handle_t)hal_srng, &tp, &hp);
539 	hal_srng_access_end(soc->hal_soc, hal_srng);
540 
541 	if (num_entries_avail) {
542 		if (num_entries_in_ring < mon_soc_be->rx_mon_ring_fill_level)
543 			num_entries = mon_soc_be->rx_mon_ring_fill_level
544 				      - num_entries_in_ring;
545 		else
546 			return QDF_STATUS_SUCCESS;
547 
548 		dp_mon_buffers_replenish(soc, rx_mon_buf_ring,
549 					 &mon_soc_be->rx_desc_mon,
550 					 num_entries, &desc_list, &tail,
551 					 NULL);
552 	}
553 
554 	return QDF_STATUS_SUCCESS;
555 }
556 
557 QDF_STATUS dp_mon_soc_detach_2_0(struct dp_soc *soc)
558 {
559 	dp_rx_mon_soc_detach_2_0(soc, 0);
560 	dp_tx_mon_soc_detach_2_0(soc, 0);
561 	return QDF_STATUS_SUCCESS;
562 }
563 
564 void dp_mon_soc_deinit_2_0(struct dp_soc *soc)
565 {
566 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
567 	struct dp_mon_soc_be *mon_soc_be =
568 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
569 
570 	if (!mon_soc_be->is_dp_mon_soc_initialized)
571 		return;
572 
573 	dp_rx_mon_soc_deinit_2_0(soc, 0);
574 	dp_tx_mon_soc_deinit_2_0(soc, 0);
575 
576 	mon_soc_be->is_dp_mon_soc_initialized = false;
577 }
578 
579 QDF_STATUS dp_mon_soc_init_2_0(struct dp_soc *soc)
580 {
581 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
582 	struct dp_mon_soc_be *mon_soc_be =
583 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
584 
585 	if (soc->rxdma_mon_buf_ring[0].hal_srng) {
586 		dp_mon_info("%pK: mon soc init is done", soc);
587 		return QDF_STATUS_SUCCESS;
588 	}
589 
590 	if (dp_rx_mon_soc_init_2_0(soc)) {
591 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
592 		goto fail;
593 	}
594 
595 	if (dp_tx_mon_soc_init_2_0(soc)) {
596 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
597 		goto fail;
598 	}
599 
600 	mon_soc_be->tx_mon_ring_fill_level = 0;
601 	if (soc->rxdma_mon_buf_ring[0].num_entries < DP_MON_RING_FILL_LEVEL_DEFAULT)
602 		mon_soc_be->rx_mon_ring_fill_level = soc->rxdma_mon_buf_ring[0].num_entries;
603 	else
604 		mon_soc_be->rx_mon_ring_fill_level = DP_MON_RING_FILL_LEVEL_DEFAULT;
605 
606 	mon_soc_be->is_dp_mon_soc_initialized = true;
607 	return QDF_STATUS_SUCCESS;
608 fail:
609 	dp_mon_soc_deinit_2_0(soc);
610 	return QDF_STATUS_E_FAILURE;
611 }
612 
613 QDF_STATUS dp_mon_soc_attach_2_0(struct dp_soc *soc)
614 {
615 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
616 	struct dp_mon_soc_be *mon_soc_be =
617 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
618 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
619 
620 	soc_cfg_ctx = soc->wlan_cfg_ctx;
621 	if (!mon_soc_be) {
622 		dp_mon_err("DP MON SOC is NULL");
623 		return QDF_STATUS_E_FAILURE;
624 	}
625 
626 	if (dp_rx_mon_soc_attach_2_0(soc, 0)) {
627 		dp_mon_err("%pK: " RNG_ERR "rx_mon_buf_ring", soc);
628 		goto fail;
629 	}
630 
631 	if (dp_tx_mon_soc_attach_2_0(soc, 0)) {
632 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
633 		goto fail;
634 	}
635 
636 	/* allocate sw desc pool */
637 	if (dp_rx_mon_buf_desc_pool_alloc(soc)) {
638 		dp_mon_err("%pK: Rx mon desc pool allocation failed", soc);
639 		goto fail;
640 	}
641 
642 	if (dp_tx_mon_buf_desc_pool_alloc(soc)) {
643 		dp_mon_err("%pK: Tx mon desc pool allocation failed", soc);
644 		goto fail;
645 	}
646 
647 	return QDF_STATUS_SUCCESS;
648 fail:
649 	dp_mon_soc_detach_2_0(soc);
650 	return QDF_STATUS_E_NOMEM;
651 }
652 
653 static
654 void dp_mon_pdev_free_2_0(struct dp_pdev *pdev)
655 {
656 }
657 
658 static
659 QDF_STATUS dp_mon_pdev_alloc_2_0(struct dp_pdev *pdev)
660 {
661 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
662 	struct dp_mon_pdev_be *mon_pdev_be =
663 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
664 
665 	if (!mon_pdev_be) {
666 		dp_mon_err("DP MON PDEV is NULL");
667 		return QDF_STATUS_E_FAILURE;
668 	}
669 
670 	return QDF_STATUS_SUCCESS;
671 }
672 
673 #else
674 static inline
675 QDF_STATUS dp_mon_htt_srng_setup_2_0(struct dp_soc *soc,
676 				     struct dp_pdev *pdev,
677 				     int mac_id,
678 				     int mac_for_pdev)
679 {
680 	return QDF_STATUS_SUCCESS;
681 }
682 
683 static uint32_t
684 dp_tx_mon_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
685 		      uint32_t mac_id, uint32_t quota)
686 {
687 	return 0;
688 }
689 
690 static inline
691 QDF_STATUS dp_mon_soc_attach_2_0(struct dp_soc *soc)
692 {
693 	return status;
694 }
695 
696 static inline
697 QDF_STATUS dp_mon_soc_detach_2_0(struct dp_soc *soc)
698 {
699 	return status;
700 }
701 
702 static inline
703 void dp_pdev_mon_rings_deinit_2_0(struct dp_pdev *pdev)
704 {
705 }
706 
707 static inline
708 QDF_STATUS dp_pdev_mon_rings_init_2_0(struct dp_soc *soc, struct dp_pdev *pdev)
709 {
710 	return QDF_STATUS_SUCCESS;
711 }
712 
713 static inline
714 void dp_pdev_mon_rings_free_2_0(struct dp_pdev *pdev)
715 {
716 }
717 
718 static inline
719 QDF_STATUS dp_pdev_mon_rings_alloc_2_0(struct dp_soc *soc, struct dp_pdev *pdev)
720 {
721 	return QDF_STATUS_SUCCESS;
722 }
723 
724 static inline
725 void dp_mon_pdev_free_2_0(struct dp_pdev *pdev)
726 {
727 }
728 
729 static inline
730 QDF_STATUS dp_mon_pdev_alloc_2_0(struct dp_pdev *pdev)
731 {
732 	return QDF_STATUS_SUCCESS;
733 }
734 
735 static inline
736 void dp_vdev_set_monitor_mode_buf_rings_2_0(struct dp_pdev *pdev)
737 {
738 }
739 
740 static inline
741 QDF_STATUS dp_vdev_set_monitor_mode_rings_2_0(struct dp_pdev *pdev,
742 					      uint8_t delayed_replenish)
743 {
744 	return QDF_STATUS_SUCCESS;
745 }
746 #endif
747 
748 void dp_pdev_mon_rings_deinit_2_0(struct dp_pdev *pdev)
749 {
750 	int mac_id = 0;
751 	struct dp_soc *soc = pdev->soc;
752 
753 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
754 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
755 							 pdev->pdev_id);
756 
757 		dp_rx_mon_pdev_rings_deinit_2_0(pdev, lmac_id);
758 		dp_tx_mon_pdev_rings_deinit_2_0(pdev, lmac_id);
759 	}
760 }
761 
762 QDF_STATUS dp_pdev_mon_rings_init_2_0(struct dp_pdev *pdev)
763 {
764 	struct dp_soc *soc = pdev->soc;
765 	int mac_id = 0;
766 	QDF_STATUS status = QDF_STATUS_SUCCESS;
767 
768 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
769 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
770 							 pdev->pdev_id);
771 
772 		status = dp_rx_mon_pdev_rings_init_2_0(pdev, lmac_id);
773 		if (QDF_IS_STATUS_ERROR(status)) {
774 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc);
775 			goto fail;
776 		}
777 
778 		status = dp_tx_mon_pdev_rings_init_2_0(pdev, lmac_id);
779 		if (QDF_IS_STATUS_ERROR(status)) {
780 			dp_mon_err("%pK: " RNG_ERR "tx_mon_dst_ring", soc);
781 			goto fail;
782 		}
783 	}
784 	return status;
785 
786 fail:
787 	dp_pdev_mon_rings_deinit_2_0(pdev);
788 	return status;
789 }
790 
791 void dp_pdev_mon_rings_free_2_0(struct dp_pdev *pdev)
792 {
793 	int mac_id = 0;
794 	struct dp_soc *soc = pdev->soc;
795 
796 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
797 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
798 							 pdev->pdev_id);
799 
800 		dp_rx_mon_pdev_rings_free_2_0(pdev, lmac_id);
801 		dp_tx_mon_pdev_rings_free_2_0(pdev, lmac_id);
802 	}
803 }
804 
805 QDF_STATUS dp_pdev_mon_rings_alloc_2_0(struct dp_pdev *pdev)
806 {
807 	struct dp_soc *soc = pdev->soc;
808 	int mac_id = 0;
809 	QDF_STATUS status = QDF_STATUS_SUCCESS;
810 
811 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
812 		int lmac_id =
813 		dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
814 
815 		status = dp_rx_mon_pdev_rings_alloc_2_0(pdev, lmac_id);
816 		if (QDF_IS_STATUS_ERROR(status)) {
817 			dp_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", pdev);
818 			goto fail;
819 		}
820 
821 		status = dp_tx_mon_pdev_rings_alloc_2_0(pdev, lmac_id);
822 		if (QDF_IS_STATUS_ERROR(status)) {
823 			dp_err("%pK: " RNG_ERR "tx_mon_dst_ring", pdev);
824 			goto fail;
825 		}
826 	}
827 	return status;
828 
829 fail:
830 	dp_pdev_mon_rings_free_2_0(pdev);
831 	return status;
832 }
833 
834 void dp_mon_pool_frag_unmap_and_free(struct dp_soc *soc,
835 				     struct dp_mon_desc_pool *mon_desc_pool)
836 {
837 	int desc_id;
838 	qdf_frag_t vaddr;
839 	qdf_dma_addr_t paddr;
840 
841 	qdf_spin_lock_bh(&mon_desc_pool->lock);
842 	for (desc_id = 0; desc_id < mon_desc_pool->pool_size; desc_id++) {
843 		if (mon_desc_pool->array[desc_id].mon_desc.in_use) {
844 			vaddr = mon_desc_pool->array[desc_id].mon_desc.buf_addr;
845 			paddr = mon_desc_pool->array[desc_id].mon_desc.paddr;
846 
847 			if (!(mon_desc_pool->array[desc_id].mon_desc.unmapped)) {
848 				qdf_mem_unmap_page(soc->osdev, paddr,
849 						   mon_desc_pool->buf_size,
850 						   QDF_DMA_FROM_DEVICE);
851 				mon_desc_pool->array[desc_id].mon_desc.unmapped = 1;
852 				mon_desc_pool->array[desc_id].mon_desc.cookie = desc_id;
853 			}
854 			qdf_frag_free(vaddr);
855 		}
856 	}
857 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
858 }
859 
860 QDF_STATUS
861 dp_mon_buffers_replenish(struct dp_soc *dp_soc,
862 			 struct dp_srng *dp_mon_srng,
863 			 struct dp_mon_desc_pool *mon_desc_pool,
864 			 uint32_t num_req_buffers,
865 			 union dp_mon_desc_list_elem_t **desc_list,
866 			 union dp_mon_desc_list_elem_t **tail,
867 			 uint32_t *replenish_cnt_ref)
868 {
869 	uint32_t num_alloc_desc;
870 	uint32_t num_entries_avail;
871 	uint32_t count = 0;
872 	int sync_hw_ptr = 1;
873 	struct dp_mon_desc mon_desc = {0};
874 	void *mon_ring_entry;
875 	union dp_mon_desc_list_elem_t *next;
876 	void *mon_srng;
877 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
878 	struct dp_mon_soc *mon_soc = dp_soc->monitor_soc;
879 
880 	if (!num_req_buffers) {
881 		dp_mon_debug("%pK: Received request for 0 buffers replenish",
882 			     dp_soc);
883 		ret = QDF_STATUS_E_INVAL;
884 		goto free_desc;
885 	}
886 
887 	mon_srng = dp_mon_srng->hal_srng;
888 
889 	hal_srng_access_start(dp_soc->hal_soc, mon_srng);
890 
891 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
892 						   mon_srng, sync_hw_ptr);
893 
894 	if (!num_entries_avail) {
895 		hal_srng_access_end(dp_soc->hal_soc, mon_srng);
896 		goto free_desc;
897 	}
898 	if (num_entries_avail < num_req_buffers)
899 		num_req_buffers = num_entries_avail;
900 
901 	/*
902 	 * if desc_list is NULL, allocate the descs from freelist
903 	 */
904 	if (!(*desc_list)) {
905 		num_alloc_desc = dp_mon_get_free_desc_list(dp_soc,
906 							   mon_desc_pool,
907 							   num_req_buffers,
908 							   desc_list,
909 							   tail);
910 
911 		if (!num_alloc_desc) {
912 			dp_mon_debug("%pK: no free rx_descs in freelist", dp_soc);
913 			hal_srng_access_end(dp_soc->hal_soc, mon_srng);
914 			return QDF_STATUS_E_NOMEM;
915 		}
916 
917 		dp_mon_info("%pK: %d rx desc allocated",
918 			    dp_soc, num_alloc_desc);
919 
920 		num_req_buffers = num_alloc_desc;
921 	}
922 
923 	while (count <= num_req_buffers - 1) {
924 		ret = dp_mon_frag_alloc_and_map(dp_soc,
925 						&mon_desc,
926 						mon_desc_pool);
927 
928 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
929 			if (qdf_unlikely(ret  == QDF_STATUS_E_FAULT))
930 				continue;
931 			break;
932 		}
933 
934 		count++;
935 		next = (*desc_list)->next;
936 		mon_ring_entry = hal_srng_src_get_next(
937 						dp_soc->hal_soc,
938 						mon_srng);
939 
940 		if (!mon_ring_entry)
941 			break;
942 
943 		qdf_assert_always((*desc_list)->mon_desc.in_use == 0);
944 
945 		(*desc_list)->mon_desc.in_use = 1;
946 		(*desc_list)->mon_desc.unmapped = 0;
947 		(*desc_list)->mon_desc.buf_addr = mon_desc.buf_addr;
948 		(*desc_list)->mon_desc.paddr = mon_desc.paddr;
949 		(*desc_list)->mon_desc.magic = DP_MON_DESC_MAGIC;
950 
951 		mon_soc->stats.frag_alloc++;
952 		hal_mon_buff_addr_info_set(dp_soc->hal_soc,
953 					   mon_ring_entry,
954 					   &((*desc_list)->mon_desc),
955 					   mon_desc.paddr);
956 
957 		*desc_list = next;
958 	}
959 
960 	hal_srng_access_end(dp_soc->hal_soc, mon_srng);
961 	if (replenish_cnt_ref)
962 		*replenish_cnt_ref += count;
963 
964 free_desc:
965 	/*
966 	 * add any available free desc back to the free list
967 	 */
968 	if (*desc_list) {
969 		dp_mon_add_desc_list_to_free_list(dp_soc, desc_list, tail,
970 						  mon_desc_pool);
971 	}
972 
973 	return ret;
974 }
975 
976 QDF_STATUS dp_mon_desc_pool_alloc(struct dp_soc *soc,
977 				  enum dp_ctxt_type ctx_type,
978 				  uint32_t pool_size,
979 				  struct dp_mon_desc_pool *mon_desc_pool)
980 {
981 	size_t mem_size;
982 
983 	mon_desc_pool->pool_size = pool_size - 1;
984 	mem_size = mon_desc_pool->pool_size *
985 			sizeof(union dp_mon_desc_list_elem_t);
986 	mon_desc_pool->array = dp_context_alloc_mem(soc, ctx_type, mem_size);
987 
988 	return QDF_STATUS_SUCCESS;
989 }
990 
991 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings_tx_2_0(struct dp_pdev *pdev,
992 						     uint16_t num_of_buffers)
993 {
994 	int tx_mon_max_entries;
995 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
996 	struct dp_soc *soc = pdev->soc;
997 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
998 	struct dp_mon_soc_be *mon_soc_be =
999 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1000 	QDF_STATUS status;
1001 
1002 	if (!mon_soc_be) {
1003 		dp_mon_err("DP MON SOC is NULL");
1004 		return QDF_STATUS_E_FAILURE;
1005 	}
1006 
1007 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1008 	tx_mon_max_entries =
1009 		wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx);
1010 
1011 	hal_set_low_threshold(mon_soc_be->tx_mon_buf_ring.hal_srng,
1012 			      tx_mon_max_entries >> 2);
1013 	status = htt_srng_setup(soc->htt_handle, 0,
1014 				mon_soc_be->tx_mon_buf_ring.hal_srng,
1015 				TX_MONITOR_BUF);
1016 
1017 	if (status != QDF_STATUS_SUCCESS) {
1018 		dp_mon_err("Failed to send htt srng setup message for Tx mon buf ring");
1019 		return status;
1020 	}
1021 
1022 	if (mon_soc_be->tx_mon_ring_fill_level < num_of_buffers) {
1023 		if (dp_tx_mon_buffers_alloc(soc,
1024 					    (num_of_buffers -
1025 					     mon_soc_be->tx_mon_ring_fill_level))) {
1026 			dp_mon_err("%pK: Tx mon buffers allocation failed",
1027 				   soc);
1028 			return QDF_STATUS_E_FAILURE;
1029 		}
1030 		mon_soc_be->tx_mon_ring_fill_level +=
1031 					(num_of_buffers -
1032 					mon_soc_be->tx_mon_ring_fill_level);
1033 	}
1034 
1035 	return QDF_STATUS_SUCCESS;
1036 }
1037 
1038 #if defined(WDI_EVENT_ENABLE) &&\
1039 	(defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG) ||\
1040 	 defined(WLAN_FEATURE_PKT_CAPTURE_V2))
1041 static inline
1042 void dp_mon_ppdu_stats_handler_register(struct dp_mon_soc *mon_soc)
1043 {
1044 	mon_soc->mon_ops->mon_ppdu_stats_ind_handler =
1045 					dp_ppdu_stats_ind_handler;
1046 }
1047 #else
1048 static inline
1049 void dp_mon_ppdu_stats_handler_register(struct dp_mon_soc *mon_soc)
1050 {
1051 }
1052 #endif
1053 
1054 static void dp_mon_register_intr_ops_2_0(struct dp_soc *soc)
1055 {
1056 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1057 
1058 	mon_soc->mon_ops->rx_mon_refill_buf_ring =
1059 			dp_rx_mon_refill_buf_ring_2_0,
1060 	mon_soc->mon_ops->tx_mon_refill_buf_ring =
1061 			NULL,
1062 	mon_soc->mon_rx_process = dp_rx_mon_process_2_0;
1063 	dp_mon_ppdu_stats_handler_register(mon_soc);
1064 }
1065 
1066 #ifdef MONITOR_TLV_RECORDING_ENABLE
1067 /**
1068  * dp_mon_pdev_initialize_tlv_logger() - initialize dp_mon_tlv_logger for
1069  *					Rx and Tx
1070  *
1071  * @tlv_logger : double pointer to dp_mon_tlv_logger
1072  * @direction: Rx/Tx
1073  * Return: QDF_STATUS
1074  */
1075 static QDF_STATUS
1076 dp_mon_pdev_initialize_tlv_logger(struct dp_mon_tlv_logger **tlv_logger,
1077 				  uint8_t direction)
1078 {
1079 	struct dp_mon_tlv_logger *tlv_log = NULL;
1080 
1081 	tlv_log = qdf_mem_malloc(sizeof(struct dp_mon_tlv_logger));
1082 	if (!tlv_log) {
1083 		dp_mon_err("Memory allocation failed");
1084 		return QDF_STATUS_E_NOMEM;
1085 	}
1086 
1087 	if (direction == MONITOR_TLV_RECORDING_RX)
1088 		tlv_log->buff = qdf_mem_malloc(MAX_TLV_LOGGING_SIZE *
1089 					sizeof(struct dp_mon_tlv_info));
1090 	else if (direction == MONITOR_TLV_RECORDING_TX)
1091 		tlv_log->buff = qdf_mem_malloc(MAX_TLV_LOGGING_SIZE *
1092 					sizeof(struct dp_tx_mon_tlv_info));
1093 
1094 	if (!tlv_log->buff) {
1095 		dp_mon_err("Memory allocation failed");
1096 		qdf_mem_free(tlv_log);
1097 		tlv_log = NULL;
1098 		return QDF_STATUS_E_NOMEM;
1099 	}
1100 
1101 	tlv_log->curr_ppdu_pos = 0;
1102 	tlv_log->wrap_flag = 0;
1103 	tlv_log->ppdu_start_idx = 0;
1104 	tlv_log->mpdu_idx = MAX_PPDU_START_TLV_NUM;
1105 	tlv_log->ppdu_end_idx = MAX_PPDU_START_TLV_NUM + MAX_MPDU_TLV_NUM;
1106 	tlv_log->max_ppdu_start_idx = MAX_PPDU_START_TLV_NUM - 1;
1107 	tlv_log->max_mpdu_idx = MAX_PPDU_START_TLV_NUM + MAX_MPDU_TLV_NUM - 1;
1108 	tlv_log->max_ppdu_end_idx = MAX_TLVS_PER_PPDU - 1;
1109 
1110 	tlv_log->tlv_logging_enable = 1;
1111 	*tlv_logger = tlv_log;
1112 
1113 	return QDF_STATUS_SUCCESS;
1114 }
1115 
1116 /*
1117  * dp_mon_pdev_tlv_logger_init() - initializes struct dp_mon_tlv_logger
1118  *
1119  * @pdev: pointer to dp_pdev
1120  *
1121  * Return: QDF_STATUS
1122  */
1123 static
1124 QDF_STATUS dp_mon_pdev_tlv_logger_init(struct dp_pdev *pdev)
1125 {
1126 	struct dp_mon_pdev *mon_pdev = NULL;
1127 	struct dp_mon_pdev_be *mon_pdev_be = NULL;
1128 	struct dp_soc *soc = NULL;
1129 
1130 	if (!pdev)
1131 		return QDF_STATUS_E_INVAL;
1132 
1133 	soc = pdev->soc;
1134 	mon_pdev = pdev->monitor_pdev;
1135 	if (!mon_pdev)
1136 		return QDF_STATUS_E_INVAL;
1137 
1138 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1139 
1140 	if (dp_mon_pdev_initialize_tlv_logger(&mon_pdev_be->rx_tlv_log,
1141 					      MONITOR_TLV_RECORDING_RX))
1142 		return QDF_STATUS_E_FAILURE;
1143 
1144 	if (dp_mon_pdev_initialize_tlv_logger(&mon_pdev_be->tx_tlv_log,
1145 					      MONITOR_TLV_RECORDING_TX))
1146 		return QDF_STATUS_E_FAILURE;
1147 
1148 	return QDF_STATUS_SUCCESS;
1149 }
1150 
1151 /**
1152  * dp_mon_pdev_deinitialize_tlv_logger() - deinitialize dp_mon_tlv_logger for
1153  *					Rx and Tx
1154  *
1155  * @tlv_logger : double pointer to dp_mon_tlv_logger
1156  *
1157  * Return: QDF_STATUS
1158  */
1159 static QDF_STATUS
1160 dp_mon_pdev_deinitialize_tlv_logger(struct dp_mon_tlv_logger **tlv_logger)
1161 {
1162 	struct dp_mon_tlv_logger *tlv_log = *tlv_logger;
1163 
1164 	if (!tlv_log)
1165 		return QDF_STATUS_SUCCESS;
1166 	if (!(tlv_log->buff))
1167 		return QDF_STATUS_E_INVAL;
1168 
1169 	tlv_log->tlv_logging_enable = 0;
1170 	qdf_mem_free(tlv_log->buff);
1171 	tlv_log->buff = NULL;
1172 	qdf_mem_free(tlv_log);
1173 	tlv_log = NULL;
1174 	*tlv_logger = NULL;
1175 
1176 	return QDF_STATUS_SUCCESS;
1177 }
1178 
1179 /*
1180  * dp_mon_pdev_tlv_logger_deinit() - deinitializes struct dp_mon_tlv_logger
1181  *
1182  * @pdev: pointer to dp_pdev
1183  *
1184  * Return: QDF_STATUS
1185  */
1186 static
1187 QDF_STATUS dp_mon_pdev_tlv_logger_deinit(struct dp_pdev *pdev)
1188 {
1189 	struct dp_mon_pdev *mon_pdev = NULL;
1190 	struct dp_mon_pdev_be *mon_pdev_be = NULL;
1191 
1192 	if (!pdev)
1193 		return QDF_STATUS_E_INVAL;
1194 
1195 	mon_pdev = pdev->monitor_pdev;
1196 	if (!mon_pdev)
1197 		return QDF_STATUS_E_INVAL;
1198 
1199 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1200 
1201 	if (dp_mon_pdev_deinitialize_tlv_logger(&mon_pdev_be->rx_tlv_log))
1202 		return QDF_STATUS_E_FAILURE;
1203 	if (dp_mon_pdev_deinitialize_tlv_logger(&mon_pdev_be->tx_tlv_log))
1204 		return QDF_STATUS_E_FAILURE;
1205 
1206 	return QDF_STATUS_SUCCESS;
1207 }
1208 
1209 #else
1210 
1211 static inline
1212 QDF_STATUS dp_mon_pdev_tlv_logger_init(struct dp_pdev *pdev)
1213 {
1214 	return QDF_STATUS_SUCCESS;
1215 }
1216 
1217 static inline
1218 QDF_STATUS dp_mon_pdev_tlv_logger_deinit(struct dp_pdev *pdev)
1219 {
1220 	return QDF_STATUS_SUCCESS;
1221 }
1222 
1223 #endif
1224 
1225 /**
1226  * dp_mon_register_feature_ops_2_0() - register feature ops
1227  *
1228  * @soc: dp soc context
1229  *
1230  * @return: void
1231  */
1232 static void
1233 dp_mon_register_feature_ops_2_0(struct dp_soc *soc)
1234 {
1235 	struct dp_mon_ops *mon_ops = dp_mon_ops_get(soc);
1236 
1237 	if (!mon_ops) {
1238 		dp_err("mon_ops is NULL, feature ops registration failed");
1239 		return;
1240 	}
1241 
1242 	mon_ops->mon_config_debug_sniffer = dp_config_debug_sniffer;
1243 	mon_ops->mon_peer_tx_init = NULL;
1244 	mon_ops->mon_peer_tx_cleanup = NULL;
1245 	mon_ops->mon_htt_ppdu_stats_attach = dp_htt_ppdu_stats_attach;
1246 	mon_ops->mon_htt_ppdu_stats_detach = dp_htt_ppdu_stats_detach;
1247 	mon_ops->mon_print_pdev_rx_mon_stats = dp_print_pdev_rx_mon_stats;
1248 	mon_ops->mon_set_bsscolor = dp_mon_set_bsscolor;
1249 	mon_ops->mon_pdev_get_filter_ucast_data =
1250 					dp_lite_mon_get_filter_ucast_data;
1251 	mon_ops->mon_pdev_get_filter_mcast_data =
1252 					dp_lite_mon_get_filter_mcast_data;
1253 	mon_ops->mon_pdev_get_filter_non_data =
1254 					dp_lite_mon_get_filter_non_data;
1255 	mon_ops->mon_neighbour_peer_add_ast = NULL;
1256 #ifdef WLAN_TX_PKT_CAPTURE_ENH_BE
1257 	mon_ops->mon_peer_tid_peer_id_update = NULL;
1258 	mon_ops->mon_tx_capture_debugfs_init = NULL;
1259 	mon_ops->mon_tx_add_to_comp_queue = NULL;
1260 	mon_ops->mon_print_pdev_tx_capture_stats =
1261 					dp_print_pdev_tx_monitor_stats_2_0;
1262 	mon_ops->mon_config_enh_tx_capture = dp_config_enh_tx_monitor_2_0;
1263 	mon_ops->mon_tx_peer_filter = dp_peer_set_tx_capture_enabled_2_0;
1264 #endif
1265 #if (defined(WIFI_MONITOR_SUPPORT) && defined(WLAN_TX_MON_CORE_DEBUG))
1266 	mon_ops->mon_peer_tid_peer_id_update = NULL;
1267 	mon_ops->mon_tx_capture_debugfs_init = NULL;
1268 	mon_ops->mon_tx_add_to_comp_queue = NULL;
1269 	mon_ops->mon_print_pdev_tx_capture_stats = NULL;
1270 	mon_ops->mon_config_enh_tx_capture = dp_config_enh_tx_core_monitor_2_0;
1271 	mon_ops->mon_tx_peer_filter = NULL;
1272 #endif
1273 #ifdef WLAN_RX_PKT_CAPTURE_ENH
1274 	mon_ops->mon_config_enh_rx_capture = NULL;
1275 #endif
1276 #ifdef QCA_SUPPORT_BPR
1277 	mon_ops->mon_set_bpr_enable = dp_set_bpr_enable_2_0;
1278 #endif
1279 #ifdef ATH_SUPPORT_NAC
1280 	mon_ops->mon_set_filter_neigh_peers = NULL;
1281 #endif
1282 #ifdef WLAN_ATF_ENABLE
1283 	mon_ops->mon_set_atf_stats_enable = dp_set_atf_stats_enable;
1284 #endif
1285 #ifdef FEATURE_NAC_RSSI
1286 	mon_ops->mon_filter_neighbour_peer = NULL;
1287 #endif
1288 #ifdef QCA_MCOPY_SUPPORT
1289 	mon_ops->mon_filter_setup_mcopy_mode = NULL;
1290 	mon_ops->mon_filter_reset_mcopy_mode = NULL;
1291 	mon_ops->mon_mcopy_check_deliver = NULL;
1292 #endif
1293 #ifdef QCA_ENHANCED_STATS_SUPPORT
1294 	mon_ops->mon_filter_setup_enhanced_stats =
1295 				dp_mon_filter_setup_enhanced_stats_2_0;
1296 	mon_ops->mon_filter_reset_enhanced_stats =
1297 				dp_mon_filter_reset_enhanced_stats_2_0;
1298 	mon_ops->mon_tx_enable_enhanced_stats =
1299 				dp_mon_tx_enable_enhanced_stats_2_0;
1300 	mon_ops->mon_tx_disable_enhanced_stats =
1301 				dp_mon_tx_disable_enhanced_stats_2_0;
1302 	mon_ops->mon_ppdu_stats_feat_enable_check =
1303 				dp_ppdu_stats_feat_enable_check_2_0;
1304 	mon_ops->mon_tx_stats_update = dp_mon_tx_stats_update_2_0;
1305 	mon_ops->mon_ppdu_desc_deliver = dp_ppdu_desc_deliver;
1306 #ifdef WDI_EVENT_ENABLE
1307 	mon_ops->mon_ppdu_desc_notify = dp_ppdu_desc_notify_2_0;
1308 #endif
1309 #endif
1310 #ifdef WLAN_RX_PKT_CAPTURE_ENH
1311 	mon_ops->mon_filter_setup_rx_enh_capture = NULL;
1312 #endif
1313 #ifdef WDI_EVENT_ENABLE
1314 	mon_ops->mon_set_pktlog_wifi3 = dp_set_pktlog_wifi3;
1315 	mon_ops->mon_filter_setup_rx_pkt_log_full =
1316 				dp_mon_filter_setup_rx_pkt_log_full_2_0;
1317 	mon_ops->mon_filter_reset_rx_pkt_log_full =
1318 				dp_mon_filter_reset_rx_pkt_log_full_2_0;
1319 	mon_ops->mon_filter_setup_rx_pkt_log_lite =
1320 				dp_mon_filter_setup_rx_pkt_log_lite_2_0;
1321 	mon_ops->mon_filter_reset_rx_pkt_log_lite =
1322 				dp_mon_filter_reset_rx_pkt_log_lite_2_0;
1323 	mon_ops->mon_filter_setup_rx_pkt_log_cbf =
1324 				dp_mon_filter_setup_rx_pkt_log_cbf_2_0;
1325 	mon_ops->mon_filter_reset_rx_pkt_log_cbf =
1326 				dp_mon_filter_reset_rx_pktlog_cbf_2_0;
1327 #if defined(BE_PKTLOG_SUPPORT) && defined(WLAN_PKT_CAPTURE_TX_2_0)
1328 	mon_ops->mon_filter_setup_pktlog_hybrid =
1329 				dp_mon_filter_setup_pktlog_hybrid_2_0;
1330 	mon_ops->mon_filter_reset_pktlog_hybrid =
1331 				dp_mon_filter_reset_pktlog_hybrid_2_0;
1332 #endif
1333 #endif
1334 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
1335 	mon_ops->mon_pktlogmod_exit = dp_pktlogmod_exit;
1336 #endif
1337 	mon_ops->rx_hdr_length_set = dp_rx_mon_hdr_length_set;
1338 	mon_ops->rx_packet_length_set = dp_rx_mon_packet_length_set;
1339 	mon_ops->rx_mon_enable = dp_rx_mon_enable_set;
1340 	mon_ops->rx_wmask_subscribe = dp_rx_mon_word_mask_subscribe;
1341 	mon_ops->rx_pkt_tlv_offset = dp_rx_mon_pkt_tlv_offset_subscribe;
1342 	mon_ops->rx_enable_mpdu_logging = dp_rx_mon_enable_mpdu_logging;
1343 	mon_ops->mon_neighbour_peers_detach = NULL;
1344 	mon_ops->mon_vdev_set_monitor_mode_buf_rings =
1345 				dp_vdev_set_monitor_mode_buf_rings_2_0;
1346 	mon_ops->mon_vdev_set_monitor_mode_rings =
1347 				dp_vdev_set_monitor_mode_rings_2_0;
1348 #ifdef QCA_ENHANCED_STATS_SUPPORT
1349 	mon_ops->mon_rx_stats_update = dp_rx_mon_stats_update_2_0;
1350 	mon_ops->mon_rx_populate_ppdu_usr_info =
1351 			dp_rx_mon_populate_ppdu_usr_info_2_0;
1352 	mon_ops->mon_rx_populate_ppdu_info = dp_rx_mon_populate_ppdu_info_2_0;
1353 #endif
1354 #ifdef QCA_UNDECODED_METADATA_SUPPORT
1355 	mon_ops->mon_config_undecoded_metadata_capture =
1356 		dp_mon_config_undecoded_metadata_capture;
1357 	mon_ops->mon_filter_setup_undecoded_metadata_capture =
1358 		dp_mon_filter_setup_undecoded_metadata_capture_2_0;
1359 	mon_ops->mon_filter_reset_undecoded_metadata_capture =
1360 		dp_mon_filter_reset_undecoded_metadata_capture_2_0;
1361 #endif
1362 	mon_ops->rx_enable_fpmo = dp_rx_mon_enable_fpmo;
1363 	mon_ops->mon_rx_print_advanced_stats =
1364 		dp_mon_rx_print_advanced_stats_2_0;
1365 	mon_ops->mon_mac_filter_set = NULL;
1366 }
1367 
1368 struct dp_mon_ops monitor_ops_2_0 = {
1369 	.mon_soc_cfg_init = dp_mon_soc_cfg_init,
1370 	.mon_soc_attach[0] = NULL,
1371 	.mon_soc_attach[1] = dp_mon_soc_attach_2_0,
1372 	.mon_soc_detach[0] = NULL,
1373 	.mon_soc_detach[1] = dp_mon_soc_detach_2_0,
1374 	.mon_soc_init[0] = NULL,
1375 	.mon_soc_init[1] = dp_mon_soc_init_2_0,
1376 	.mon_soc_deinit[0] = NULL,
1377 	.mon_soc_deinit[1] = dp_mon_soc_deinit_2_0,
1378 	.mon_pdev_alloc = dp_mon_pdev_alloc_2_0,
1379 	.mon_pdev_free = dp_mon_pdev_free_2_0,
1380 	.mon_pdev_attach = dp_mon_pdev_attach,
1381 	.mon_pdev_detach = dp_mon_pdev_detach,
1382 	.mon_pdev_init = dp_mon_pdev_init,
1383 	.mon_pdev_deinit = dp_mon_pdev_deinit,
1384 	.mon_vdev_attach = dp_mon_vdev_attach,
1385 	.mon_vdev_detach = dp_mon_vdev_detach,
1386 	.mon_peer_attach = dp_mon_peer_attach,
1387 	.mon_peer_detach = dp_mon_peer_detach,
1388 	.mon_peer_get_peerstats_ctx = dp_mon_peer_get_peerstats_ctx,
1389 	.mon_peer_reset_stats = dp_mon_peer_reset_stats,
1390 	.mon_peer_get_stats = dp_mon_peer_get_stats,
1391 	.mon_invalid_peer_update_pdev_stats =
1392 				dp_mon_invalid_peer_update_pdev_stats,
1393 	.mon_peer_get_stats_param = dp_mon_peer_get_stats_param,
1394 	.mon_flush_rings = NULL,
1395 #if !defined(DISABLE_MON_CONFIG)
1396 	.mon_pdev_htt_srng_setup[0] = NULL,
1397 	.mon_pdev_htt_srng_setup[1] = dp_mon_pdev_htt_srng_setup_2_0,
1398 	.mon_soc_htt_srng_setup = dp_mon_soc_htt_srng_setup_2_0,
1399 #endif
1400 #if defined(DP_CON_MON)
1401 	.mon_service_rings = NULL,
1402 #endif
1403 #ifndef DISABLE_MON_CONFIG
1404 	.mon_rx_process = NULL,
1405 #endif
1406 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
1407 	.mon_drop_packets_for_mac = NULL,
1408 #endif
1409 	.mon_vdev_timer_init = NULL,
1410 	.mon_vdev_timer_start = NULL,
1411 	.mon_vdev_timer_stop = NULL,
1412 	.mon_vdev_timer_deinit = NULL,
1413 	.mon_reap_timer_init = NULL,
1414 	.mon_reap_timer_start = NULL,
1415 	.mon_reap_timer_stop = NULL,
1416 	.mon_reap_timer_deinit = NULL,
1417 	.mon_filter_setup_tx_mon_mode = dp_mon_filter_setup_tx_mon_mode_2_0,
1418 	.mon_filter_reset_tx_mon_mode = dp_mon_filter_reset_tx_mon_mode_2_0,
1419 	.mon_rings_alloc[0] = NULL,
1420 	.mon_rings_free[0] = NULL,
1421 	.mon_rings_init[0] = NULL,
1422 	.mon_rings_deinit[0] = NULL,
1423 	.mon_rings_alloc[1] = dp_pdev_mon_rings_alloc_2_0,
1424 	.mon_rings_free[1] = dp_pdev_mon_rings_free_2_0,
1425 	.mon_rings_init[1] = dp_pdev_mon_rings_init_2_0,
1426 	.mon_rings_deinit[1] = dp_pdev_mon_rings_deinit_2_0,
1427 	.rx_mon_desc_pool_init = NULL,
1428 	.rx_mon_desc_pool_deinit = NULL,
1429 	.rx_mon_desc_pool_alloc = NULL,
1430 	.rx_mon_desc_pool_free = NULL,
1431 	.rx_mon_buffers_alloc = NULL,
1432 	.rx_mon_buffers_free = NULL,
1433 	.tx_mon_desc_pool_init = NULL,
1434 	.tx_mon_desc_pool_deinit = NULL,
1435 	.tx_mon_desc_pool_alloc = NULL,
1436 	.tx_mon_desc_pool_free = NULL,
1437 #ifndef DISABLE_MON_CONFIG
1438 	.mon_register_intr_ops = dp_mon_register_intr_ops_2_0,
1439 #endif
1440 	.mon_register_feature_ops = dp_mon_register_feature_ops_2_0,
1441 #ifdef WLAN_TX_PKT_CAPTURE_ENH_BE
1442 	.mon_tx_ppdu_stats_attach = dp_tx_ppdu_stats_attach_2_0,
1443 	.mon_tx_ppdu_stats_detach = dp_tx_ppdu_stats_detach_2_0,
1444 	.mon_peer_tx_capture_filter_check = NULL,
1445 #endif
1446 #if (defined(WIFI_MONITOR_SUPPORT) && defined(WLAN_TX_MON_CORE_DEBUG))
1447 	.mon_tx_ppdu_stats_attach = NULL,
1448 	.mon_tx_ppdu_stats_detach = NULL,
1449 	.mon_peer_tx_capture_filter_check = NULL,
1450 #endif
1451 	.mon_pdev_ext_init = dp_mon_pdev_ext_init_2_0,
1452 	.mon_pdev_ext_deinit = dp_mon_pdev_ext_deinit_2_0,
1453 	.mon_lite_mon_alloc = dp_lite_mon_alloc,
1454 	.mon_lite_mon_dealloc = dp_lite_mon_dealloc,
1455 	.mon_lite_mon_vdev_delete = dp_lite_mon_vdev_delete,
1456 	.mon_lite_mon_disable_rx = dp_lite_mon_disable_rx,
1457 	.mon_lite_mon_is_rx_adv_filter_enable = dp_lite_mon_is_rx_adv_filter_enable,
1458 #ifdef QCA_KMEM_CACHE_SUPPORT
1459 	.mon_rx_ppdu_info_cache_create = dp_rx_mon_ppdu_info_cache_create,
1460 	.mon_rx_ppdu_info_cache_destroy = dp_rx_mon_ppdu_info_cache_destroy,
1461 #endif
1462 	.mon_rx_pdev_tlv_logger_init = dp_mon_pdev_tlv_logger_init,
1463 	.mon_rx_pdev_tlv_logger_deinit = dp_mon_pdev_tlv_logger_deinit,
1464 };
1465 
1466 struct cdp_mon_ops dp_ops_mon_2_0 = {
1467 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
1468 	/* Added support for HK advance filter */
1469 	.txrx_set_advance_monitor_filter = NULL,
1470 	.txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
1471 	.config_full_mon_mode = NULL,
1472 	.soc_config_full_mon_mode = NULL,
1473 	.get_mon_pdev_rx_stats = dp_pdev_get_rx_mon_stats,
1474 	.txrx_enable_mon_reap_timer = NULL,
1475 #ifdef QCA_ENHANCED_STATS_SUPPORT
1476 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
1477 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
1478 #endif /* QCA_ENHANCED_STATS_SUPPORT */
1479 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
1480 	.txrx_update_filter_neighbour_peers = dp_lite_mon_config_nac_peer,
1481 #endif
1482 #ifdef ATH_SUPPORT_NAC_RSSI
1483 	.txrx_vdev_config_for_nac_rssi = dp_lite_mon_config_nac_rssi_peer,
1484 	.txrx_vdev_get_neighbour_rssi = dp_lite_mon_get_nac_peer_rssi,
1485 #endif
1486 #ifdef QCA_SUPPORT_LITE_MONITOR
1487 	.txrx_set_lite_mon_config = dp_lite_mon_set_config,
1488 	.txrx_get_lite_mon_config = dp_lite_mon_get_config,
1489 	.txrx_set_lite_mon_peer_config = dp_lite_mon_set_peer_config,
1490 	.txrx_get_lite_mon_peer_config = dp_lite_mon_get_peer_config,
1491 	.txrx_is_lite_mon_enabled = dp_lite_mon_is_enabled,
1492 	.txrx_get_lite_mon_legacy_feature_enabled =
1493 				dp_lite_mon_get_legacy_feature_enabled,
1494 #endif
1495 	.txrx_set_mon_pdev_params_rssi_dbm_conv =
1496 				dp_mon_pdev_params_rssi_dbm_conv,
1497 #ifdef WLAN_CONFIG_TELEMETRY_AGENT
1498 	.txrx_update_pdev_mon_telemetry_airtime_stats =
1499 			dp_pdev_update_telemetry_airtime_stats,
1500 #endif
1501 	.txrx_update_mon_mac_filter = NULL,
1502 #ifdef WLAN_FEATURE_LOCAL_PKT_CAPTURE
1503 	.start_local_pkt_capture = NULL,
1504 	.stop_local_pkt_capture = NULL,
1505 	.is_local_pkt_capture_running = NULL,
1506 #endif /* WLAN_FEATURE_LOCAL_PKT_CAPTURE */
1507 };
1508 
1509 #if defined(WLAN_PKT_CAPTURE_TX_2_0) || \
1510 defined(WLAN_PKT_CAPTURE_RX_2_0)
1511 void dp_mon_ops_register_cmn_2_0(struct dp_mon_soc *mon_soc)
1512 {
1513 	struct dp_mon_ops *mon_ops = mon_soc->mon_ops;
1514 
1515 	if (!mon_ops) {
1516 		dp_err("tx 2.0 ops registration failed");
1517 		return;
1518 	}
1519 	mon_ops->tx_mon_filter_alloc = dp_mon_filter_alloc_2_0;
1520 	mon_ops->tx_mon_filter_dealloc = dp_mon_filter_dealloc_2_0;
1521 }
1522 #endif
1523 
1524 #ifdef WLAN_PKT_CAPTURE_TX_2_0
1525 void dp_mon_ops_register_tx_2_0(struct dp_mon_soc *mon_soc)
1526 {
1527 	struct dp_mon_ops *mon_ops = mon_soc->mon_ops;
1528 
1529 	if (!mon_ops) {
1530 		dp_err("tx 2.0 ops registration failed");
1531 		return;
1532 	}
1533 	mon_ops->tx_mon_filter_update = dp_tx_mon_filter_update_2_0;
1534 #ifndef DISABLE_MON_CONFIG
1535 	mon_ops->mon_tx_process = dp_tx_mon_process_2_0;
1536 	mon_ops->print_txmon_ring_stat = dp_tx_mon_print_ring_stat_2_0;
1537 #endif
1538 }
1539 #endif
1540 
1541 #ifdef WLAN_PKT_CAPTURE_RX_2_0
1542 void dp_mon_ops_register_rx_2_0(struct dp_mon_soc *mon_soc)
1543 {
1544 	struct dp_mon_ops *mon_ops = mon_soc->mon_ops;
1545 
1546 	if (!mon_ops) {
1547 		dp_err("rx 2.0 ops registration failed");
1548 		return;
1549 	}
1550 	mon_ops->mon_filter_setup_rx_mon_mode =
1551 				dp_mon_filter_setup_rx_mon_mode_2_0;
1552 	mon_ops->mon_filter_reset_rx_mon_mode =
1553 				dp_mon_filter_reset_rx_mon_mode_2_0;
1554 	mon_ops->rx_mon_filter_update = dp_rx_mon_filter_update_2_0;
1555 }
1556 #endif
1557 
1558 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT
1559 void dp_mon_ops_register_2_0(struct dp_mon_soc *mon_soc)
1560 {
1561 	struct dp_mon_ops *mon_ops = NULL;
1562 
1563 	if (mon_soc->mon_ops) {
1564 		dp_mon_err("monitor ops is allocated");
1565 		return;
1566 	}
1567 
1568 	mon_ops = qdf_mem_malloc(sizeof(struct dp_mon_ops));
1569 	if (!mon_ops) {
1570 		dp_mon_err("Failed to allocate memory for mon ops");
1571 		return;
1572 	}
1573 
1574 	qdf_mem_copy(mon_ops, &monitor_ops_2_0, sizeof(struct dp_mon_ops));
1575 	mon_soc->mon_ops = mon_ops;
1576 	dp_mon_ops_register_tx_2_0(mon_soc);
1577 	dp_mon_ops_register_rx_2_0(mon_soc);
1578 	dp_mon_ops_register_cmn_2_0(mon_soc);
1579 }
1580 
1581 void dp_mon_cdp_ops_register_2_0(struct cdp_ops *ops)
1582 {
1583 	struct cdp_mon_ops *mon_ops = NULL;
1584 
1585 	if (ops->mon_ops) {
1586 		dp_mon_err("cdp monitor ops is allocated");
1587 		return;
1588 	}
1589 
1590 	mon_ops = qdf_mem_malloc(sizeof(struct cdp_mon_ops));
1591 	if (!mon_ops) {
1592 		dp_mon_err("Failed to allocate memory for mon ops");
1593 		return;
1594 	}
1595 
1596 	qdf_mem_copy(mon_ops, &dp_ops_mon_2_0, sizeof(struct cdp_mon_ops));
1597 	ops->mon_ops = mon_ops;
1598 }
1599 #else
1600 void dp_mon_ops_register_2_0(struct dp_mon_soc *mon_soc)
1601 {
1602 	mon_soc->mon_ops = &monitor_ops_2_0;
1603 }
1604 
1605 void dp_mon_cdp_ops_register_2_0(struct cdp_ops *ops)
1606 {
1607 	ops->mon_ops = &dp_ops_mon_2_0;
1608 }
1609 #endif
1610 
1611 #ifdef QCA_ENHANCED_STATS_SUPPORT
1612 static void
1613 dp_enable_enhanced_stats_for_each_pdev(struct dp_soc *soc, void *arg,
1614 				       int chip_id) {
1615 	uint8_t i = 0;
1616 
1617 	for (i = 0; i < MAX_PDEV_CNT; i++)
1618 		dp_enable_enhanced_stats(dp_soc_to_cdp_soc_t(soc), i);
1619 }
1620 
1621 QDF_STATUS
1622 dp_enable_enhanced_stats_2_0(struct cdp_soc_t *soc, uint8_t pdev_id)
1623 {
1624 	struct dp_soc *dp_soc = cdp_soc_t_to_dp_soc(soc);
1625 	struct dp_soc_be *be_soc = NULL;
1626 
1627 	be_soc = dp_get_be_soc_from_dp_soc(dp_soc);
1628 
1629 	/* enable only on one soc if MLD is disabled */
1630 	if (!be_soc->mlo_enabled || !be_soc->ml_ctxt) {
1631 		dp_enable_enhanced_stats(soc, pdev_id);
1632 		return QDF_STATUS_SUCCESS;
1633 	}
1634 
1635 	dp_mlo_iter_ptnr_soc(be_soc,
1636 			     dp_enable_enhanced_stats_for_each_pdev,
1637 			     NULL);
1638 	return QDF_STATUS_SUCCESS;
1639 }
1640 
1641 static void
1642 dp_disable_enhanced_stats_for_each_pdev(struct dp_soc *soc, void *arg,
1643 					int chip_id) {
1644 	uint8_t i = 0;
1645 
1646 	for (i = 0; i < MAX_PDEV_CNT; i++)
1647 		dp_disable_enhanced_stats(dp_soc_to_cdp_soc_t(soc), i);
1648 }
1649 
1650 QDF_STATUS
1651 dp_disable_enhanced_stats_2_0(struct cdp_soc_t *soc, uint8_t pdev_id)
1652 {
1653 	struct dp_soc *dp_soc = cdp_soc_t_to_dp_soc(soc);
1654 	struct dp_soc_be *be_soc = NULL;
1655 
1656 	be_soc = dp_get_be_soc_from_dp_soc(dp_soc);
1657 
1658 	/* enable only on one soc if MLD is disabled */
1659 	if (!be_soc->mlo_enabled || !be_soc->ml_ctxt) {
1660 		dp_disable_enhanced_stats(soc, pdev_id);
1661 		return QDF_STATUS_SUCCESS;
1662 	}
1663 
1664 	dp_mlo_iter_ptnr_soc(be_soc,
1665 			     dp_disable_enhanced_stats_for_each_pdev,
1666 			     NULL);
1667 	return QDF_STATUS_SUCCESS;
1668 }
1669 #endif
1670 
1671 #ifdef WLAN_FEATURE_LOCAL_PKT_CAPTURE
1672 QDF_STATUS dp_local_pkt_capture_tx_config(struct dp_pdev *pdev)
1673 {
1674 	struct dp_soc *soc = pdev->soc;
1675 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1676 	uint16_t num_buffers;
1677 	QDF_STATUS status;
1678 
1679 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1680 	num_buffers = wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx);
1681 
1682 	status = dp_vdev_set_monitor_mode_buf_rings_tx_2_0(pdev, num_buffers);
1683 
1684 	if (QDF_IS_STATUS_ERROR(status))
1685 		dp_mon_err("Tx monitor buffer allocation failed");
1686 
1687 	return status;
1688 }
1689 #endif
1690