xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/2.0/dp_mon_2.0.c (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <dp_types.h>
19 #include "dp_rx.h"
20 #include "dp_peer.h"
21 #include <dp_htt.h>
22 #include <dp_mon_filter.h>
23 #include <dp_mon.h>
24 #include <dp_rx_mon.h>
25 #include <dp_rx_mon_2.0.h>
26 #include <dp_mon_2.0.h>
27 #include <dp_mon_filter_2.0.h>
28 #include <dp_tx_mon_2.0.h>
29 #include <hal_be_api_mon.h>
30 #include <dp_be.h>
31 #include <htt_ppdu_stats.h>
32 #ifdef QCA_SUPPORT_LITE_MONITOR
33 #include "dp_lite_mon.h"
34 #endif
35 
36 #if !defined(DISABLE_MON_CONFIG)
37 
38 QDF_STATUS dp_rx_mon_ppdu_info_cache_create(struct dp_pdev *pdev)
39 {
40 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
41 	struct dp_mon_pdev_be *mon_pdev_be =
42 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
43 	uint16_t obj;
44 	struct hal_rx_ppdu_info *ppdu_info = NULL;
45 
46 	mon_pdev_be->ppdu_info_cache =
47 		qdf_kmem_cache_create("rx_mon_ppdu_info_cache",
48 				      sizeof(struct hal_rx_ppdu_info));
49 
50 	if (!mon_pdev_be->ppdu_info_cache) {
51 		dp_mon_err("cache creation failed pdev :%px", pdev);
52 		return QDF_STATUS_E_NOMEM;
53 	}
54 
55 	TAILQ_INIT(&mon_pdev_be->rx_mon_free_queue);
56 	for (obj = 0; obj < DP_RX_MON_WQ_THRESHOLD; obj++) {
57 		ppdu_info =  (struct hal_rx_ppdu_info *)qdf_kmem_cache_alloc(mon_pdev_be->ppdu_info_cache);
58 
59 		if (ppdu_info) {
60 			TAILQ_INSERT_TAIL(&mon_pdev_be->rx_mon_free_queue,
61 					  ppdu_info,
62 					  ppdu_free_list_elem);
63 			mon_pdev_be->total_free_elem++;
64 		}
65 	}
66 	qdf_spinlock_create(&mon_pdev_be->ppdu_info_lock);
67 
68 	return QDF_STATUS_SUCCESS;
69 }
70 
71 void dp_rx_mon_ppdu_info_cache_destroy(struct dp_pdev *pdev)
72 {
73 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
74 	struct dp_mon_pdev_be *mon_pdev_be =
75 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
76 	struct hal_rx_ppdu_info *ppdu_info = NULL, *temp_ppdu_info = NULL;
77 
78 	qdf_spin_lock(&mon_pdev_be->ppdu_info_lock);
79 	TAILQ_FOREACH_SAFE(ppdu_info,
80 			   &mon_pdev_be->rx_mon_free_queue,
81 			   ppdu_free_list_elem,
82 			   temp_ppdu_info) {
83 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_free_queue,
84 			     ppdu_info, ppdu_free_list_elem);
85 		if (ppdu_info) {
86 			mon_pdev_be->total_free_elem--;
87 			qdf_kmem_cache_free(mon_pdev_be->ppdu_info_cache,
88 					    ppdu_info);
89 		}
90 	}
91 	qdf_spin_unlock(&mon_pdev_be->ppdu_info_lock);
92 	dp_mon_debug(" total free element: %d", mon_pdev_be->total_free_elem);
93 	qdf_kmem_cache_destroy(mon_pdev_be->ppdu_info_cache);
94 }
95 
96 /**
97  * dp_mon_pdev_ext_init_2_0() - Init pdev ext param
98  *
99  * @pdev: DP pdev handle
100  *
101  * Return:  QDF_STATUS_SUCCESS: Success
102  *          QDF_STATUS_E_FAILURE: failure
103  */
104 QDF_STATUS dp_mon_pdev_ext_init_2_0(struct dp_pdev *pdev)
105 {
106 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
107 	struct dp_mon_pdev_be *mon_pdev_be =
108 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
109 
110 	qdf_create_work(0, &mon_pdev_be->rx_mon_work,
111 			dp_rx_mon_process_ppdu, pdev);
112 	mon_pdev_be->rx_mon_workqueue =
113 		qdf_alloc_unbound_workqueue("rx_mon_work_queue");
114 
115 	if (!mon_pdev_be->rx_mon_workqueue) {
116 		dp_mon_err("failed to create rxmon wq mon_pdev: %pK", mon_pdev);
117 		goto fail;
118 	}
119 	TAILQ_INIT(&mon_pdev_be->rx_mon_queue);
120 
121 	qdf_spinlock_create(&mon_pdev_be->rx_mon_wq_lock);
122 
123 	return QDF_STATUS_SUCCESS;
124 
125 fail:
126 	return QDF_STATUS_E_FAILURE;
127 }
128 
129 /**
130  * dp_mon_pdev_ext_deinit_2_0() - denit pdev ext param
131  *
132  * @pdev: DP pdev handle
133  *
134  * Return: QDF_STATUS_SUCCESS
135  */
136 QDF_STATUS dp_mon_pdev_ext_deinit_2_0(struct dp_pdev *pdev)
137 {
138 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
139 	struct dp_mon_pdev_be *mon_pdev_be =
140 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
141 
142 	if (!mon_pdev_be->rx_mon_workqueue)
143 		return QDF_STATUS_E_FAILURE;
144 
145 	qdf_err(" total free element: %d", mon_pdev_be->total_free_elem);
146 	qdf_flush_workqueue(0, mon_pdev_be->rx_mon_workqueue);
147 	qdf_destroy_workqueue(0, mon_pdev_be->rx_mon_workqueue);
148 	qdf_flush_work(&mon_pdev_be->rx_mon_work);
149 	qdf_disable_work(&mon_pdev_be->rx_mon_work);
150 	dp_rx_mon_drain_wq(pdev);
151 	mon_pdev_be->rx_mon_workqueue = NULL;
152 	qdf_spinlock_destroy(&mon_pdev_be->rx_mon_wq_lock);
153 
154 	return QDF_STATUS_SUCCESS;
155 }
156 
157 /*
158  * dp_mon_add_desc_list_to_free_list() - append unused desc_list back to
159  *					freelist.
160  *
161  * @soc: core txrx main context
162  * @local_desc_list: local desc list provided by the caller
163  * @tail: attach the point to last desc of local desc list
164  * @mon_desc_pool: monitor descriptor pool pointer
165  */
166 void
167 dp_mon_add_desc_list_to_free_list(struct dp_soc *soc,
168 				  union dp_mon_desc_list_elem_t **local_desc_list,
169 				  union dp_mon_desc_list_elem_t **tail,
170 				  struct dp_mon_desc_pool *mon_desc_pool)
171 {
172 	union dp_mon_desc_list_elem_t *temp_list = NULL;
173 
174 	qdf_spin_lock_bh(&mon_desc_pool->lock);
175 
176 	temp_list = mon_desc_pool->freelist;
177 	mon_desc_pool->freelist = *local_desc_list;
178 	(*tail)->next = temp_list;
179 	*tail = NULL;
180 	*local_desc_list = NULL;
181 
182 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
183 }
184 
185 /*
186  * dp_mon_get_free_desc_list() - provide a list of descriptors from
187  *				the free mon desc pool.
188  *
189  * @soc: core txrx main context
190  * @mon_desc_pool: monitor descriptor pool pointer
191  * @num_descs: number of descs requested from freelist
192  * @desc_list: attach the descs to this list (output parameter)
193  * @tail: attach the point to last desc of free list (output parameter)
194  *
195  * Return: number of descs allocated from free list.
196  */
197 static uint16_t
198 dp_mon_get_free_desc_list(struct dp_soc *soc,
199 			  struct dp_mon_desc_pool *mon_desc_pool,
200 			  uint16_t num_descs,
201 			  union dp_mon_desc_list_elem_t **desc_list,
202 			  union dp_mon_desc_list_elem_t **tail)
203 {
204 	uint16_t count;
205 
206 	qdf_spin_lock_bh(&mon_desc_pool->lock);
207 
208 	*desc_list = *tail = mon_desc_pool->freelist;
209 
210 	for (count = 0; count < num_descs; count++) {
211 		if (qdf_unlikely(!mon_desc_pool->freelist)) {
212 			qdf_spin_unlock_bh(&mon_desc_pool->lock);
213 			return count;
214 		}
215 		*tail = mon_desc_pool->freelist;
216 		mon_desc_pool->freelist = mon_desc_pool->freelist->next;
217 	}
218 	(*tail)->next = NULL;
219 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
220 	return count;
221 }
222 
223 void dp_mon_pool_frag_unmap_and_free(struct dp_soc *soc,
224 				     struct dp_mon_desc_pool *mon_desc_pool)
225 {
226 	int desc_id;
227 	qdf_frag_t vaddr;
228 	qdf_dma_addr_t paddr;
229 
230 	qdf_spin_lock_bh(&mon_desc_pool->lock);
231 	for (desc_id = 0; desc_id < mon_desc_pool->pool_size; desc_id++) {
232 		if (mon_desc_pool->array[desc_id].mon_desc.in_use) {
233 			vaddr = mon_desc_pool->array[desc_id].mon_desc.buf_addr;
234 			paddr = mon_desc_pool->array[desc_id].mon_desc.paddr;
235 
236 			if (!(mon_desc_pool->array[desc_id].mon_desc.unmapped)) {
237 				qdf_mem_unmap_page(soc->osdev, paddr,
238 						   mon_desc_pool->buf_size,
239 						   QDF_DMA_FROM_DEVICE);
240 				mon_desc_pool->array[desc_id].mon_desc.unmapped = 1;
241 				mon_desc_pool->array[desc_id].mon_desc.cookie = desc_id;
242 			}
243 			qdf_frag_free(vaddr);
244 		}
245 	}
246 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
247 }
248 
249 static inline QDF_STATUS
250 dp_mon_frag_alloc_and_map(struct dp_soc *dp_soc,
251 			  struct dp_mon_desc *mon_desc,
252 			  struct dp_mon_desc_pool *mon_desc_pool)
253 {
254 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
255 
256 	mon_desc->buf_addr = qdf_frag_alloc(&mon_desc_pool->pf_cache,
257 					    mon_desc_pool->buf_size);
258 
259 	if (!mon_desc->buf_addr) {
260 		dp_mon_err("Frag alloc failed");
261 		return QDF_STATUS_E_NOMEM;
262 	}
263 
264 	ret = qdf_mem_map_page(dp_soc->osdev,
265 			       mon_desc->buf_addr,
266 			       QDF_DMA_FROM_DEVICE,
267 			       mon_desc_pool->buf_size,
268 			       &mon_desc->paddr);
269 
270 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
271 		qdf_frag_free(mon_desc->buf_addr);
272 		dp_mon_err("Frag map failed");
273 		return QDF_STATUS_E_FAULT;
274 	}
275 
276 	return QDF_STATUS_SUCCESS;
277 }
278 
279 QDF_STATUS
280 dp_mon_buffers_replenish(struct dp_soc *dp_soc,
281 			 struct dp_srng *dp_mon_srng,
282 			 struct dp_mon_desc_pool *mon_desc_pool,
283 			 uint32_t num_req_buffers,
284 			 union dp_mon_desc_list_elem_t **desc_list,
285 			 union dp_mon_desc_list_elem_t **tail,
286 			 uint32_t *replenish_cnt_ref)
287 {
288 	uint32_t num_alloc_desc;
289 	uint32_t num_entries_avail;
290 	uint32_t count = 0;
291 	int sync_hw_ptr = 1;
292 	struct dp_mon_desc mon_desc = {0};
293 	void *mon_ring_entry;
294 	union dp_mon_desc_list_elem_t *next;
295 	void *mon_srng;
296 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
297 	struct dp_mon_soc *mon_soc = dp_soc->monitor_soc;
298 
299 	if (!num_req_buffers) {
300 		dp_mon_debug("%pK: Received request for 0 buffers replenish",
301 			     dp_soc);
302 		ret = QDF_STATUS_E_INVAL;
303 		goto free_desc;
304 	}
305 
306 	mon_srng = dp_mon_srng->hal_srng;
307 
308 	hal_srng_access_start(dp_soc->hal_soc, mon_srng);
309 
310 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
311 						   mon_srng, sync_hw_ptr);
312 
313 	if (!num_entries_avail) {
314 		hal_srng_access_end(dp_soc->hal_soc, mon_srng);
315 		goto free_desc;
316 	}
317 	if (num_entries_avail < num_req_buffers) {
318 		num_req_buffers = num_entries_avail;
319 	}
320 
321 	/*
322 	 * if desc_list is NULL, allocate the descs from freelist
323 	 */
324 	if (!(*desc_list)) {
325 		num_alloc_desc = dp_mon_get_free_desc_list(dp_soc,
326 							   mon_desc_pool,
327 							   num_req_buffers,
328 							   desc_list,
329 							   tail);
330 
331 		if (!num_alloc_desc) {
332 			dp_mon_debug("%pK: no free rx_descs in freelist", dp_soc);
333 			hal_srng_access_end(dp_soc->hal_soc, mon_srng);
334 			return QDF_STATUS_E_NOMEM;
335 		}
336 
337 		dp_mon_info("%pK: %d rx desc allocated",
338 			    dp_soc, num_alloc_desc);
339 
340 		num_req_buffers = num_alloc_desc;
341 	}
342 
343 	while (count <= num_req_buffers - 1) {
344 		ret = dp_mon_frag_alloc_and_map(dp_soc,
345 						&mon_desc,
346 						mon_desc_pool);
347 
348 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
349 			if (qdf_unlikely(ret  == QDF_STATUS_E_FAULT))
350 				continue;
351 			break;
352 		}
353 
354 		count++;
355 		next = (*desc_list)->next;
356 		mon_ring_entry = hal_srng_src_get_next(
357 						dp_soc->hal_soc,
358 						mon_srng);
359 
360 		if (!mon_ring_entry)
361 			break;
362 
363 		qdf_assert_always((*desc_list)->mon_desc.in_use == 0);
364 
365 		(*desc_list)->mon_desc.in_use = 1;
366 		(*desc_list)->mon_desc.unmapped = 0;
367 		(*desc_list)->mon_desc.buf_addr = mon_desc.buf_addr;
368 		(*desc_list)->mon_desc.paddr = mon_desc.paddr;
369 		(*desc_list)->mon_desc.magic = DP_MON_DESC_MAGIC;
370 
371 		mon_soc->stats.frag_alloc++;
372 		hal_mon_buff_addr_info_set(dp_soc->hal_soc,
373 					   mon_ring_entry,
374 					   &((*desc_list)->mon_desc),
375 					   mon_desc.paddr);
376 
377 		*desc_list = next;
378 	}
379 
380 	hal_srng_access_end(dp_soc->hal_soc, mon_srng);
381 	if (replenish_cnt_ref)
382 		*replenish_cnt_ref += count;
383 
384 free_desc:
385 	/*
386 	 * add any available free desc back to the free list
387 	 */
388 	if (*desc_list) {
389 		dp_mon_add_desc_list_to_free_list(dp_soc, desc_list, tail,
390 						  mon_desc_pool);
391 	}
392 
393 	return ret;
394 }
395 
396 QDF_STATUS
397 dp_mon_desc_pool_init(struct dp_mon_desc_pool *mon_desc_pool,
398 		      uint32_t pool_size)
399 {
400 	int desc_id;
401 	/* Initialize monitor desc lock */
402 	qdf_spinlock_create(&mon_desc_pool->lock);
403 
404 	qdf_spin_lock_bh(&mon_desc_pool->lock);
405 
406 	mon_desc_pool->buf_size = DP_MON_DATA_BUFFER_SIZE;
407 	/* link SW descs into a freelist */
408 	mon_desc_pool->freelist = &mon_desc_pool->array[0];
409 	mon_desc_pool->pool_size = pool_size - 1;
410 	qdf_mem_zero(mon_desc_pool->freelist,
411 		     mon_desc_pool->pool_size *
412 		     sizeof(union dp_mon_desc_list_elem_t));
413 
414 	for (desc_id = 0; desc_id < mon_desc_pool->pool_size; desc_id++) {
415 		if (desc_id == mon_desc_pool->pool_size - 1)
416 			mon_desc_pool->array[desc_id].next = NULL;
417 		else
418 			mon_desc_pool->array[desc_id].next =
419 				&mon_desc_pool->array[desc_id + 1];
420 		mon_desc_pool->array[desc_id].mon_desc.in_use = 0;
421 		mon_desc_pool->array[desc_id].mon_desc.cookie = desc_id;
422 	}
423 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
424 
425 	return QDF_STATUS_SUCCESS;
426 }
427 
428 void dp_mon_desc_pool_deinit(struct dp_mon_desc_pool *mon_desc_pool)
429 {
430 	qdf_spin_lock_bh(&mon_desc_pool->lock);
431 
432 	mon_desc_pool->freelist = NULL;
433 	mon_desc_pool->pool_size = 0;
434 
435 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
436 	qdf_spinlock_destroy(&mon_desc_pool->lock);
437 }
438 
439 void dp_mon_desc_pool_free(struct dp_mon_desc_pool *mon_desc_pool)
440 {
441 	qdf_mem_free(mon_desc_pool->array);
442 }
443 
444 QDF_STATUS dp_mon_desc_pool_alloc(uint32_t pool_size,
445 				  struct dp_mon_desc_pool *mon_desc_pool)
446 {
447 	mon_desc_pool->pool_size = pool_size - 1;
448 	mon_desc_pool->array = qdf_mem_malloc((mon_desc_pool->pool_size) *
449 				     sizeof(union dp_mon_desc_list_elem_t));
450 
451 	return QDF_STATUS_SUCCESS;
452 }
453 
454 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings_rx_2_0(struct dp_pdev *pdev)
455 {
456 	int rx_mon_max_entries;
457 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
458 	struct dp_soc *soc = pdev->soc;
459 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
460 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
461 	QDF_STATUS status;
462 
463 	if (!mon_soc_be) {
464 		dp_mon_err("DP MON SOC is NULL");
465 		return QDF_STATUS_E_FAILURE;
466 	}
467 
468 	soc_cfg_ctx = soc->wlan_cfg_ctx;
469 	rx_mon_max_entries = wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc_cfg_ctx);
470 
471 	hal_set_low_threshold(soc->rxdma_mon_buf_ring[0].hal_srng,
472 			      MON_BUF_MIN_ENTRIES << 2);
473 	status = htt_srng_setup(soc->htt_handle, 0,
474 				soc->rxdma_mon_buf_ring[0].hal_srng,
475 				RXDMA_MONITOR_BUF);
476 
477 	if (status != QDF_STATUS_SUCCESS) {
478 		dp_mon_err("Failed to send htt srng setup message for Rx mon buf ring");
479 		return status;
480 	}
481 
482 	if (mon_soc_be->rx_mon_ring_fill_level < rx_mon_max_entries) {
483 		status = dp_rx_mon_buffers_alloc(soc,
484 						 (rx_mon_max_entries -
485 						 mon_soc_be->rx_mon_ring_fill_level));
486 		if (status != QDF_STATUS_SUCCESS) {
487 			dp_mon_err("%pK: Rx mon buffers allocation failed", soc);
488 			return status;
489 		}
490 		mon_soc_be->rx_mon_ring_fill_level +=
491 				(rx_mon_max_entries -
492 				mon_soc_be->rx_mon_ring_fill_level);
493 	}
494 
495 	return QDF_STATUS_SUCCESS;
496 }
497 
498 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings_tx_2_0(struct dp_pdev *pdev,
499 						     uint16_t num_of_buffers)
500 {
501 	int tx_mon_max_entries;
502 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
503 	struct dp_soc *soc = pdev->soc;
504 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
505 	struct dp_mon_soc_be *mon_soc_be =
506 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
507 	QDF_STATUS status;
508 
509 	if (!mon_soc_be) {
510 		dp_mon_err("DP MON SOC is NULL");
511 		return QDF_STATUS_E_FAILURE;
512 	}
513 
514 	soc_cfg_ctx = soc->wlan_cfg_ctx;
515 	tx_mon_max_entries =
516 		wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx);
517 
518 	hal_set_low_threshold(mon_soc_be->tx_mon_buf_ring.hal_srng,
519 			      tx_mon_max_entries >> 2);
520 	status = htt_srng_setup(soc->htt_handle, 0,
521 				mon_soc_be->tx_mon_buf_ring.hal_srng,
522 				TX_MONITOR_BUF);
523 
524 	if (status != QDF_STATUS_SUCCESS) {
525 		dp_mon_err("Failed to send htt srng setup message for Tx mon buf ring");
526 		return status;
527 	}
528 
529 	if (mon_soc_be->tx_mon_ring_fill_level < num_of_buffers) {
530 		if (dp_tx_mon_buffers_alloc(soc,
531 					    (num_of_buffers -
532 					     mon_soc_be->tx_mon_ring_fill_level))) {
533 			dp_mon_err("%pK: Tx mon buffers allocation failed",
534 				   soc);
535 			return QDF_STATUS_E_FAILURE;
536 		}
537 		mon_soc_be->tx_mon_ring_fill_level +=
538 					(num_of_buffers -
539 					mon_soc_be->tx_mon_ring_fill_level);
540 	}
541 
542 	return QDF_STATUS_SUCCESS;
543 }
544 
545 static
546 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings_2_0(struct dp_pdev *pdev)
547 {
548 	int status;
549 	struct dp_soc *soc = pdev->soc;
550 
551 	status = dp_vdev_set_monitor_mode_buf_rings_rx_2_0(pdev);
552 	if (status != QDF_STATUS_SUCCESS) {
553 		dp_mon_err("%pK: Rx monitor extra buffer allocation failed",
554 			   soc);
555 		return status;
556 	}
557 
558 	return QDF_STATUS_SUCCESS;
559 }
560 
561 static
562 QDF_STATUS dp_vdev_set_monitor_mode_rings_2_0(struct dp_pdev *pdev,
563 					      uint8_t delayed_replenish)
564 {
565 	return QDF_STATUS_SUCCESS;
566 }
567 
568 #ifdef QCA_ENHANCED_STATS_SUPPORT
569 /**
570  * dp_mon_tx_enable_enhanced_stats_2_0() - Send HTT cmd to FW to enable stats
571  * @pdev: Datapath pdev handle
572  *
573  * Return: none
574  */
575 static void dp_mon_tx_enable_enhanced_stats_2_0(struct dp_pdev *pdev)
576 {
577 	dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
578 				  pdev->pdev_id);
579 }
580 
581 /**
582  * dp_mon_tx_disable_enhanced_stats_2_0() - Send HTT cmd to FW to disable stats
583  * @pdev: Datapath pdev handle
584  *
585  * Return: none
586  */
587 static void dp_mon_tx_disable_enhanced_stats_2_0(struct dp_pdev *pdev)
588 {
589 	dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
590 }
591 #endif
592 
593 #if defined(QCA_ENHANCED_STATS_SUPPORT) && defined(WLAN_FEATURE_11BE)
594 void
595 dp_mon_tx_stats_update_2_0(struct dp_mon_peer *mon_peer,
596 			   struct cdp_tx_completion_ppdu_user *ppdu)
597 {
598 	uint8_t preamble, mcs, punc_mode;
599 
600 	preamble = ppdu->preamble;
601 	mcs = ppdu->mcs;
602 
603 	punc_mode = dp_mon_get_puncture_type(ppdu->punc_pattern_bitmap,
604 					     ppdu->bw);
605 	ppdu->punc_mode = punc_mode;
606 
607 	DP_STATS_INC(mon_peer, tx.punc_bw[punc_mode], ppdu->num_msdu);
608 	DP_STATS_INCC(mon_peer,
609 		      tx.pkt_type[preamble].mcs_count[MAX_MCS - 1],
610 		      ppdu->num_msdu,
611 		      ((mcs >= MAX_MCS_11BE) && (preamble == DOT11_BE)));
612 	DP_STATS_INCC(mon_peer,
613 		      tx.pkt_type[preamble].mcs_count[mcs],
614 		      ppdu->num_msdu,
615 		      ((mcs < MAX_MCS_11BE) && (preamble == DOT11_BE)));
616 	DP_STATS_INCC(mon_peer,
617 		      tx.su_be_ppdu_cnt.mcs_count[MAX_MCS - 1], 1,
618 		      ((mcs >= MAX_MCS_11BE) && (preamble == DOT11_BE) &&
619 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_SU)));
620 	DP_STATS_INCC(mon_peer,
621 		      tx.su_be_ppdu_cnt.mcs_count[mcs], 1,
622 		      ((mcs < MAX_MCS_11BE) && (preamble == DOT11_BE) &&
623 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_SU)));
624 	DP_STATS_INCC(mon_peer,
625 		      tx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[MAX_MCS - 1],
626 		      1, ((mcs >= MAX_MCS_11BE) &&
627 		      (preamble == DOT11_BE) &&
628 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA)));
629 	DP_STATS_INCC(mon_peer,
630 		      tx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[mcs],
631 		      1, ((mcs < MAX_MCS_11BE) &&
632 		      (preamble == DOT11_BE) &&
633 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA)));
634 	DP_STATS_INCC(mon_peer,
635 		      tx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[MAX_MCS - 1],
636 		      1, ((mcs >= MAX_MCS_11BE) &&
637 		      (preamble == DOT11_BE) &&
638 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO)));
639 	DP_STATS_INCC(mon_peer,
640 		      tx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[mcs],
641 		      1, ((mcs < MAX_MCS_11BE) &&
642 		      (preamble == DOT11_BE) &&
643 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO)));
644 }
645 
646 enum cdp_punctured_modes
647 dp_mon_get_puncture_type(uint16_t puncture_pattern, uint8_t bw)
648 {
649 	uint16_t mask;
650 	uint8_t punctured_bits;
651 
652 	if (!puncture_pattern)
653 		return NO_PUNCTURE;
654 
655 	switch (bw) {
656 	case CMN_BW_80MHZ:
657 		mask = PUNCTURE_80MHZ_MASK;
658 		break;
659 	case CMN_BW_160MHZ:
660 		mask = PUNCTURE_160MHZ_MASK;
661 		break;
662 	case CMN_BW_320MHZ:
663 		mask = PUNCTURE_320MHZ_MASK;
664 		break;
665 	default:
666 		return NO_PUNCTURE;
667 	}
668 
669 	/* 0s in puncture pattern received in TLV indicates punctured 20Mhz,
670 	 * after complement, 1s will indicate punctured 20Mhz
671 	 */
672 	puncture_pattern = ~puncture_pattern;
673 	puncture_pattern &= mask;
674 
675 	if (puncture_pattern) {
676 		punctured_bits = 0;
677 		while (puncture_pattern != 0) {
678 			punctured_bits++;
679 			puncture_pattern &= (puncture_pattern - 1);
680 		}
681 
682 		if (bw == CMN_BW_80MHZ) {
683 			if (punctured_bits == IEEE80211_PUNC_MINUS20MHZ)
684 				return PUNCTURED_20MHZ;
685 			else
686 				return NO_PUNCTURE;
687 		} else if (bw == CMN_BW_160MHZ) {
688 			if (punctured_bits == IEEE80211_PUNC_MINUS20MHZ)
689 				return PUNCTURED_20MHZ;
690 			else if (punctured_bits == IEEE80211_PUNC_MINUS40MHZ)
691 				return PUNCTURED_40MHZ;
692 			else
693 				return NO_PUNCTURE;
694 		} else if (bw == CMN_BW_320MHZ) {
695 			if (punctured_bits == IEEE80211_PUNC_MINUS40MHZ)
696 				return PUNCTURED_40MHZ;
697 			else if (punctured_bits == IEEE80211_PUNC_MINUS80MHZ)
698 				return PUNCTURED_80MHZ;
699 			else if (punctured_bits == IEEE80211_PUNC_MINUS120MHZ)
700 				return PUNCTURED_120MHZ;
701 			else
702 				return NO_PUNCTURE;
703 		}
704 	}
705 	return NO_PUNCTURE;
706 }
707 #endif
708 
709 #if defined(QCA_ENHANCED_STATS_SUPPORT) && !defined(WLAN_FEATURE_11BE)
710 void
711 dp_mon_tx_stats_update_2_0(struct dp_mon_peer *mon_peer,
712 			   struct cdp_tx_completion_ppdu_user *ppdu)
713 {
714 	ppdu->punc_mode = NO_PUNCTURE;
715 }
716 
717 enum cdp_punctured_modes
718 dp_mon_get_puncture_type(uint16_t puncture_pattern, uint8_t bw)
719 {
720 	return NO_PUNCTURE;
721 }
722 #endif /* QCA_ENHANCED_STATS_SUPPORT && WLAN_FEATURE_11BE */
723 
724 #ifdef QCA_SUPPORT_BPR
725 static QDF_STATUS
726 dp_set_bpr_enable_2_0(struct dp_pdev *pdev, int val)
727 {
728 	return QDF_STATUS_SUCCESS;
729 }
730 #endif /* QCA_SUPPORT_BPR */
731 
732 #ifdef QCA_ENHANCED_STATS_SUPPORT
733 #ifdef WDI_EVENT_ENABLE
734 /**
735  * dp_ppdu_desc_notify_2_0 - Notify upper layer for PPDU indication via WDI
736  *
737  * @pdev: Datapath pdev handle
738  * @nbuf: Buffer to be shipped
739  *
740  * Return: void
741  */
742 static void dp_ppdu_desc_notify_2_0(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
743 {
744 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
745 
746 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(nbuf);
747 
748 	if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
749 	    ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
750 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
751 				     pdev->soc,
752 				     nbuf, HTT_INVALID_PEER,
753 				     WDI_NO_VAL,
754 				     pdev->pdev_id);
755 	} else {
756 		qdf_nbuf_free(nbuf);
757 	}
758 }
759 #endif
760 
761 /**
762  * dp_ppdu_stats_feat_enable_check_2_0 - Check if feature(s) is enabled to
763  *				consume ppdu stats from FW
764  *
765  * @pdev: Datapath pdev handle
766  *
767  * Return: true if enabled, else return false
768  */
769 static bool dp_ppdu_stats_feat_enable_check_2_0(struct dp_pdev *pdev)
770 {
771 	return pdev->monitor_pdev->enhanced_stats_en;
772 }
773 #endif
774 
775 static
776 QDF_STATUS dp_mon_soc_htt_srng_setup_2_0(struct dp_soc *soc)
777 {
778 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
779 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
780 	QDF_STATUS status;
781 
782 	hal_set_low_threshold(soc->rxdma_mon_buf_ring[0].hal_srng,
783 			      MON_BUF_MIN_ENTRIES << 2);
784 	status = htt_srng_setup(soc->htt_handle, 0,
785 				soc->rxdma_mon_buf_ring[0].hal_srng,
786 				RXDMA_MONITOR_BUF);
787 
788 	if (status != QDF_STATUS_SUCCESS) {
789 		dp_err("Failed to send htt srng setup message for Rx mon buf ring");
790 		return status;
791 	}
792 
793 	hal_set_low_threshold(mon_soc_be->tx_mon_buf_ring.hal_srng, 0);
794 	status = htt_srng_setup(soc->htt_handle, 0,
795 				mon_soc_be->tx_mon_buf_ring.hal_srng,
796 				TX_MONITOR_BUF);
797 	if (status != QDF_STATUS_SUCCESS) {
798 		dp_err("Failed to send htt srng setup message for Tx mon buf ring");
799 		return status;
800 	}
801 
802 	return status;
803 }
804 
805 static
806 QDF_STATUS dp_mon_pdev_htt_srng_setup_2_0(struct dp_soc *soc,
807 					  struct dp_pdev *pdev,
808 					  int mac_id,
809 					  int mac_for_pdev)
810 {
811 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
812 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
813 	QDF_STATUS status;
814 
815 	if (!soc->rxdma_mon_dst_ring[mac_id].hal_srng)
816 		return QDF_STATUS_SUCCESS;
817 
818 	status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
819 				soc->rxdma_mon_dst_ring[mac_id].hal_srng,
820 				RXDMA_MONITOR_DST);
821 
822 	if (status != QDF_STATUS_SUCCESS) {
823 		dp_mon_err("Failed to send htt srng setup message for Rxdma dst ring");
824 		return status;
825 	}
826 
827 	if (!mon_soc_be->tx_mon_dst_ring[mac_id].hal_srng)
828 		return QDF_STATUS_SUCCESS;
829 
830 	status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
831 				mon_soc_be->tx_mon_dst_ring[mac_id].hal_srng,
832 				TX_MONITOR_DST);
833 
834 	if (status != QDF_STATUS_SUCCESS) {
835 		dp_mon_err("Failed to send htt srng message for Tx mon dst ring");
836 		return status;
837 	}
838 
839 	return status;
840 }
841 
842 QDF_STATUS dp_tx_mon_refill_buf_ring_2_0(struct dp_intr *int_ctx)
843 {
844 	struct dp_soc *soc  = int_ctx->soc;
845 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
846 	union dp_mon_desc_list_elem_t *desc_list = NULL;
847 	union dp_mon_desc_list_elem_t *tail = NULL;
848 	struct dp_srng *tx_mon_buf_ring;
849 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
850 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
851 	uint32_t num_entries_avail;
852 	int sync_hw_ptr = 1;
853 	void *hal_srng;
854 
855 	tx_mon_buf_ring = &mon_soc_be->tx_mon_buf_ring;
856 	hal_srng = tx_mon_buf_ring->hal_srng;
857 
858 	intr_stats->num_host2txmon_ring__masks++;
859 	mon_soc_be->tx_low_thresh_intrs++;
860 	hal_srng_access_start(soc->hal_soc, hal_srng);
861 	num_entries_avail = hal_srng_src_num_avail(soc->hal_soc,
862 						   hal_srng,
863 						   sync_hw_ptr);
864 	hal_srng_access_end(soc->hal_soc, hal_srng);
865 
866 	if (num_entries_avail)
867 		dp_mon_buffers_replenish(soc, tx_mon_buf_ring,
868 					 &mon_soc_be->tx_desc_mon,
869 					 num_entries_avail, &desc_list, &tail,
870 					 NULL);
871 
872 	return QDF_STATUS_SUCCESS;
873 }
874 
875 QDF_STATUS dp_rx_mon_refill_buf_ring_2_0(struct dp_intr *int_ctx)
876 {
877 	struct dp_soc *soc  = int_ctx->soc;
878 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
879 	union dp_mon_desc_list_elem_t *desc_list = NULL;
880 	union dp_mon_desc_list_elem_t *tail = NULL;
881 	struct dp_srng *rx_mon_buf_ring;
882 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
883 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
884 	uint32_t num_entries_avail;
885 	int sync_hw_ptr = 1, hp = 0, tp = 0, num_entries;
886 	void *hal_srng;
887 
888 	rx_mon_buf_ring = &soc->rxdma_mon_buf_ring[0];
889 	hal_srng = rx_mon_buf_ring->hal_srng;
890 
891 	intr_stats->num_host2rxdma_ring_masks++;
892 	mon_soc_be->rx_low_thresh_intrs++;
893 	hal_srng_access_start(soc->hal_soc, hal_srng);
894 	num_entries_avail = hal_srng_src_num_avail(soc->hal_soc,
895 						   hal_srng,
896 						   sync_hw_ptr);
897 	hal_get_sw_hptp(soc->hal_soc, (hal_ring_handle_t)hal_srng, &tp, &hp);
898 	hal_srng_access_end(soc->hal_soc, hal_srng);
899 
900 	num_entries = num_entries_avail;
901 	if (mon_soc_be->rx_mon_ring_fill_level < rx_mon_buf_ring->num_entries)
902 		num_entries = num_entries_avail - mon_soc_be->rx_mon_ring_fill_level;
903 
904 	if (num_entries)
905 		dp_mon_buffers_replenish(soc, rx_mon_buf_ring,
906 					 &mon_soc_be->rx_desc_mon,
907 					 num_entries, &desc_list, &tail,
908 					 NULL);
909 
910 	return QDF_STATUS_SUCCESS;
911 }
912 
913 static
914 QDF_STATUS dp_mon_soc_detach_2_0(struct dp_soc *soc)
915 {
916 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
917 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
918 
919 	if (!mon_soc_be) {
920 		dp_mon_err("DP MON SOC NULL");
921 		return QDF_STATUS_E_FAILURE;
922 	}
923 
924 	dp_rx_mon_buf_desc_pool_free(soc);
925 	dp_srng_free(soc, &soc->rxdma_mon_buf_ring[0]);
926 	dp_tx_mon_buf_desc_pool_free(soc);
927 	dp_srng_free(soc, &mon_soc_be->tx_mon_buf_ring);
928 
929 	return QDF_STATUS_SUCCESS;
930 }
931 
932 static void dp_mon_soc_deinit_2_0(struct dp_soc *soc)
933 {
934 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
935 	struct dp_mon_soc_be *mon_soc_be =
936 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
937 
938 	if (!mon_soc_be->is_dp_mon_soc_initialized)
939 		return;
940 
941 	dp_rx_mon_buffers_free(soc);
942 	dp_tx_mon_buffers_free(soc);
943 
944 	dp_rx_mon_buf_desc_pool_deinit(soc);
945 	dp_tx_mon_buf_desc_pool_deinit(soc);
946 
947 	dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[0], RXDMA_MONITOR_BUF, 0);
948 	dp_srng_deinit(soc, &mon_soc_be->tx_mon_buf_ring, TX_MONITOR_BUF, 0);
949 
950 	mon_soc_be->is_dp_mon_soc_initialized = false;
951 }
952 
953 static
954 QDF_STATUS dp_rx_mon_soc_init_2_0(struct dp_soc *soc)
955 {
956 	if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[0],
957 			 RXDMA_MONITOR_BUF, 0, 0)) {
958 		dp_mon_err("%pK: " RNG_ERR "rx_mon_buf_ring", soc);
959 		goto fail;
960 	}
961 
962 	if (dp_rx_mon_buf_desc_pool_init(soc)) {
963 		dp_mon_err("%pK: " RNG_ERR "rx mon desc pool init", soc);
964 		goto fail;
965 	}
966 
967 	/* monitor buffers for src */
968 	if (dp_rx_mon_buffers_alloc(soc, DP_MON_RING_FILL_LEVEL_DEFAULT)) {
969 		dp_mon_err("%pK: Rx mon buffers allocation failed", soc);
970 		goto fail;
971 	}
972 
973 	return QDF_STATUS_SUCCESS;
974 fail:
975 	return QDF_STATUS_E_FAILURE;
976 }
977 
978 static
979 QDF_STATUS dp_tx_mon_soc_init_2_0(struct dp_soc *soc)
980 {
981 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
982 	struct dp_mon_soc_be *mon_soc_be =
983 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
984 
985 	if (dp_srng_init(soc, &mon_soc_be->tx_mon_buf_ring,
986 			 TX_MONITOR_BUF, 0, 0)) {
987 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
988 		goto fail;
989 	}
990 
991 	if (dp_tx_mon_buf_desc_pool_init(soc)) {
992 		dp_mon_err("%pK: " RNG_ERR "tx mon desc pool init", soc);
993 		goto fail;
994 	}
995 
996 	return QDF_STATUS_SUCCESS;
997 fail:
998 	return QDF_STATUS_E_FAILURE;
999 }
1000 
1001 static
1002 QDF_STATUS dp_mon_soc_init_2_0(struct dp_soc *soc)
1003 {
1004 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1005 	struct dp_mon_soc_be *mon_soc_be =
1006 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1007 
1008 	if (soc->rxdma_mon_buf_ring[0].hal_srng) {
1009 		dp_mon_info("%pK: mon soc init is done", soc);
1010 		return QDF_STATUS_SUCCESS;
1011 	}
1012 
1013 	if (dp_rx_mon_soc_init_2_0(soc)) {
1014 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
1015 		goto fail;
1016 	}
1017 
1018 	if (dp_tx_mon_soc_init_2_0(soc)) {
1019 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
1020 		goto fail;
1021 	}
1022 
1023 	mon_soc_be->tx_mon_ring_fill_level = 0;
1024 	mon_soc_be->rx_mon_ring_fill_level = DP_MON_RING_FILL_LEVEL_DEFAULT;
1025 
1026 	mon_soc_be->is_dp_mon_soc_initialized = true;
1027 	return QDF_STATUS_SUCCESS;
1028 fail:
1029 	dp_mon_soc_deinit_2_0(soc);
1030 	return QDF_STATUS_E_FAILURE;
1031 }
1032 
1033 static
1034 QDF_STATUS dp_mon_soc_attach_2_0(struct dp_soc *soc)
1035 {
1036 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1037 	struct dp_mon_soc_be *mon_soc_be =
1038 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1039 	int entries;
1040 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1041 
1042 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1043 	if (!mon_soc_be) {
1044 		dp_mon_err("DP MON SOC is NULL");
1045 		return QDF_STATUS_E_FAILURE;
1046 	}
1047 
1048 	entries = wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc_cfg_ctx);
1049 	qdf_print("%s:%d rx mon buf entries: %d", __func__, __LINE__, entries);
1050 	if (dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[0],
1051 			  RXDMA_MONITOR_BUF, entries, 0)) {
1052 		dp_mon_err("%pK: " RNG_ERR "rx_mon_buf_ring", soc);
1053 		goto fail;
1054 	}
1055 
1056 	entries = wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx);
1057 	qdf_print("%s:%d tx mon buf entries: %d", __func__, __LINE__, entries);
1058 	if (dp_srng_alloc(soc, &mon_soc_be->tx_mon_buf_ring,
1059 			  TX_MONITOR_BUF, entries, 0)) {
1060 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
1061 		goto fail;
1062 	}
1063 
1064 	/* allocate sw desc pool */
1065 	if (dp_rx_mon_buf_desc_pool_alloc(soc)) {
1066 		dp_mon_err("%pK: Rx mon desc pool allocation failed", soc);
1067 		goto fail;
1068 	}
1069 
1070 	if (dp_tx_mon_buf_desc_pool_alloc(soc)) {
1071 		dp_mon_err("%pK: Tx mon desc pool allocation failed", soc);
1072 		goto fail;
1073 	}
1074 
1075 	return QDF_STATUS_SUCCESS;
1076 fail:
1077 	dp_mon_soc_detach_2_0(soc);
1078 	return QDF_STATUS_E_NOMEM;
1079 }
1080 
1081 static
1082 void dp_pdev_mon_rings_deinit_2_0(struct dp_pdev *pdev)
1083 {
1084 	int mac_id = 0;
1085 	struct dp_soc *soc = pdev->soc;
1086 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1087 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1088 
1089 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
1090 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
1091 							 pdev->pdev_id);
1092 
1093 		dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id],
1094 			       RXDMA_MONITOR_DST, pdev->pdev_id);
1095 		dp_srng_deinit(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id],
1096 			       TX_MONITOR_DST, pdev->pdev_id);
1097 	}
1098 }
1099 
1100 static
1101 QDF_STATUS dp_pdev_mon_rings_init_2_0(struct dp_pdev *pdev)
1102 {
1103 	struct dp_soc *soc = pdev->soc;
1104 	int mac_id = 0;
1105 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1106 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1107 
1108 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
1109 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
1110 							 pdev->pdev_id);
1111 
1112 		if (dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id],
1113 				 RXDMA_MONITOR_DST, pdev->pdev_id, lmac_id)) {
1114 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc);
1115 			goto fail;
1116 		}
1117 
1118 		if (dp_srng_init(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id],
1119 				 TX_MONITOR_DST, pdev->pdev_id, lmac_id)) {
1120 			dp_mon_err("%pK: " RNG_ERR "tx_mon_dst_ring", soc);
1121 			goto fail;
1122 		}
1123 	}
1124 	return QDF_STATUS_SUCCESS;
1125 
1126 fail:
1127 	dp_pdev_mon_rings_deinit_2_0(pdev);
1128 	return QDF_STATUS_E_NOMEM;
1129 }
1130 
1131 static
1132 void dp_pdev_mon_rings_free_2_0(struct dp_pdev *pdev)
1133 {
1134 	int mac_id = 0;
1135 	struct dp_soc *soc = pdev->soc;
1136 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1137 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1138 
1139 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
1140 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
1141 							 pdev->pdev_id);
1142 
1143 		dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]);
1144 		dp_srng_free(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id]);
1145 	}
1146 }
1147 
1148 static
1149 QDF_STATUS dp_pdev_mon_rings_alloc_2_0(struct dp_pdev *pdev)
1150 {
1151 	struct dp_soc *soc = pdev->soc;
1152 	int mac_id = 0;
1153 	int entries;
1154 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
1155 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1156 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1157 
1158 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
1159 
1160 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
1161 		int lmac_id =
1162 		dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
1163 
1164 		entries = wlan_cfg_get_dma_rx_mon_dest_ring_size(pdev_cfg_ctx);
1165 		if (dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id],
1166 				  RXDMA_MONITOR_DST, entries, 0)) {
1167 			dp_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", pdev);
1168 			goto fail;
1169 		}
1170 
1171 		entries = wlan_cfg_get_dma_tx_mon_dest_ring_size(pdev_cfg_ctx);
1172 		if (dp_srng_alloc(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id],
1173 				  TX_MONITOR_DST, entries, 0)) {
1174 			dp_err("%pK: " RNG_ERR "tx_mon_dst_ring", pdev);
1175 			goto fail;
1176 		}
1177 	}
1178 	return QDF_STATUS_SUCCESS;
1179 
1180 fail:
1181 	dp_pdev_mon_rings_free_2_0(pdev);
1182 	return QDF_STATUS_E_NOMEM;
1183 }
1184 
1185 static
1186 void dp_mon_pdev_free_2_0(struct dp_pdev *pdev)
1187 {
1188 }
1189 
1190 static
1191 QDF_STATUS dp_mon_pdev_alloc_2_0(struct dp_pdev *pdev)
1192 {
1193 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1194 	struct dp_mon_pdev_be *mon_pdev_be =
1195 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1196 
1197 	if (!mon_pdev_be) {
1198 		dp_mon_err("DP MON PDEV is NULL");
1199 		return QDF_STATUS_E_FAILURE;
1200 	}
1201 
1202 	return QDF_STATUS_SUCCESS;
1203 }
1204 
1205 #else
1206 static inline
1207 QDF_STATUS dp_mon_htt_srng_setup_2_0(struct dp_soc *soc,
1208 				     struct dp_pdev *pdev,
1209 				     int mac_id,
1210 				     int mac_for_pdev)
1211 {
1212 	return QDF_STATUS_SUCCESS;
1213 }
1214 
1215 static uint32_t
1216 dp_tx_mon_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
1217 		      uint32_t mac_id, uint32_t quota)
1218 {
1219 	return 0;
1220 }
1221 
1222 static void
1223 dp_tx_mon_print_ring_stat_2_0(struct dp_pdev *pdev)
1224 {
1225 }
1226 
1227 static inline
1228 QDF_STATUS dp_mon_soc_attach_2_0(struct dp_soc *soc)
1229 {
1230 	return status;
1231 }
1232 
1233 static inline
1234 QDF_STATUS dp_mon_soc_detach_2_0(struct dp_soc *soc)
1235 {
1236 	return status;
1237 }
1238 
1239 static inline
1240 void dp_pdev_mon_rings_deinit_2_0(struct dp_pdev *pdev)
1241 {
1242 }
1243 
1244 static inline
1245 QDF_STATUS dp_pdev_mon_rings_init_2_0(struct dp_soc *soc, struct dp_pdev *pdev)
1246 {
1247 	return QDF_STATUS_SUCCESS;
1248 }
1249 
1250 static inline
1251 void dp_pdev_mon_rings_free_2_0(struct dp_pdev *pdev)
1252 {
1253 }
1254 
1255 static inline
1256 QDF_STATUS dp_pdev_mon_rings_alloc_2_0(struct dp_soc *soc, struct dp_pdev *pdev)
1257 {
1258 	return QDF_STATUS_SUCCESS;
1259 }
1260 
1261 static inline
1262 void dp_mon_pdev_free_2_0(struct dp_pdev *pdev)
1263 {
1264 }
1265 
1266 static inline
1267 QDF_STATUS dp_mon_pdev_alloc_2_0(struct dp_pdev *pdev)
1268 {
1269 	return QDF_STATUS_SUCCESS;
1270 }
1271 
1272 static inline
1273 void dp_vdev_set_monitor_mode_buf_rings_2_0(struct dp_pdev *pdev)
1274 {
1275 }
1276 
1277 static inline
1278 QDF_STATUS dp_vdev_set_monitor_mode_rings_2_0(struct dp_pdev *pdev,
1279 					      uint8_t delayed_replenish)
1280 {
1281 	return QDF_STATUS_SUCCESS;
1282 }
1283 #endif
1284 
1285 #if defined(WDI_EVENT_ENABLE) &&\
1286 	(defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG))
1287 static inline
1288 void dp_mon_ppdu_stats_handler_register(struct dp_mon_soc *mon_soc)
1289 {
1290 	mon_soc->mon_ops->mon_ppdu_stats_ind_handler =
1291 					dp_ppdu_stats_ind_handler;
1292 }
1293 #else
1294 static inline
1295 void dp_mon_ppdu_stats_handler_register(struct dp_mon_soc *mon_soc)
1296 {
1297 }
1298 #endif
1299 
1300 static void dp_mon_register_intr_ops_2_0(struct dp_soc *soc)
1301 {
1302 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1303 
1304 	mon_soc->mon_ops->rx_mon_refill_buf_ring =
1305 			dp_rx_mon_refill_buf_ring_2_0,
1306 	mon_soc->mon_ops->tx_mon_refill_buf_ring =
1307 			NULL,
1308 	mon_soc->mon_rx_process = dp_rx_mon_process_2_0;
1309 	dp_mon_ppdu_stats_handler_register(mon_soc);
1310 }
1311 
1312 /**
1313  * dp_mon_register_feature_ops_2_0() - register feature ops
1314  *
1315  * @soc: dp soc context
1316  *
1317  * @return: void
1318  */
1319 static void
1320 dp_mon_register_feature_ops_2_0(struct dp_soc *soc)
1321 {
1322 	struct dp_mon_ops *mon_ops = dp_mon_ops_get(soc);
1323 
1324 	if (!mon_ops) {
1325 		dp_err("mon_ops is NULL, feature ops registration failed");
1326 		return;
1327 	}
1328 
1329 	mon_ops->mon_config_debug_sniffer = dp_config_debug_sniffer;
1330 	mon_ops->mon_peer_tx_init = NULL;
1331 	mon_ops->mon_peer_tx_cleanup = NULL;
1332 	mon_ops->mon_htt_ppdu_stats_attach = dp_htt_ppdu_stats_attach;
1333 	mon_ops->mon_htt_ppdu_stats_detach = dp_htt_ppdu_stats_detach;
1334 	mon_ops->mon_print_pdev_rx_mon_stats = dp_print_pdev_rx_mon_stats;
1335 	mon_ops->mon_set_bsscolor = dp_mon_set_bsscolor;
1336 	mon_ops->mon_pdev_get_filter_ucast_data =
1337 					dp_lite_mon_get_filter_ucast_data;
1338 	mon_ops->mon_pdev_get_filter_mcast_data =
1339 					dp_lite_mon_get_filter_mcast_data;
1340 	mon_ops->mon_pdev_get_filter_non_data =
1341 					dp_lite_mon_get_filter_non_data;
1342 	mon_ops->mon_neighbour_peer_add_ast = NULL;
1343 #ifndef DISABLE_MON_CONFIG
1344 	mon_ops->mon_tx_process = dp_tx_mon_process_2_0;
1345 	mon_ops->print_txmon_ring_stat = dp_tx_mon_print_ring_stat_2_0;
1346 #endif
1347 #ifdef WLAN_TX_PKT_CAPTURE_ENH_BE
1348 	mon_ops->mon_peer_tid_peer_id_update = NULL;
1349 	mon_ops->mon_tx_capture_debugfs_init = NULL;
1350 	mon_ops->mon_tx_add_to_comp_queue = NULL;
1351 	mon_ops->mon_print_pdev_tx_capture_stats =
1352 					dp_print_pdev_tx_monitor_stats_2_0;
1353 	mon_ops->mon_config_enh_tx_capture = dp_config_enh_tx_monitor_2_0;
1354 	mon_ops->mon_tx_peer_filter = dp_peer_set_tx_capture_enabled_2_0;
1355 #endif
1356 #if (defined(WIFI_MONITOR_SUPPORT) && !defined(WLAN_TX_PKT_CAPTURE_ENH_BE))
1357 	mon_ops->mon_peer_tid_peer_id_update = NULL;
1358 	mon_ops->mon_tx_capture_debugfs_init = NULL;
1359 	mon_ops->mon_tx_add_to_comp_queue = NULL;
1360 	mon_ops->mon_print_pdev_tx_capture_stats = NULL;
1361 	mon_ops->mon_config_enh_tx_capture = dp_config_enh_tx_core_monitor_2_0;
1362 	mon_ops->mon_tx_peer_filter = NULL;
1363 #endif
1364 #ifdef WLAN_RX_PKT_CAPTURE_ENH
1365 	mon_ops->mon_config_enh_rx_capture = NULL;
1366 #endif
1367 #ifdef QCA_SUPPORT_BPR
1368 	mon_ops->mon_set_bpr_enable = dp_set_bpr_enable_2_0;
1369 #endif
1370 #ifdef ATH_SUPPORT_NAC
1371 	mon_ops->mon_set_filter_neigh_peers = NULL;
1372 #endif
1373 #ifdef WLAN_ATF_ENABLE
1374 	mon_ops->mon_set_atf_stats_enable = dp_set_atf_stats_enable;
1375 #endif
1376 #ifdef FEATURE_NAC_RSSI
1377 	mon_ops->mon_filter_neighbour_peer = NULL;
1378 #endif
1379 #ifdef QCA_MCOPY_SUPPORT
1380 	mon_ops->mon_filter_setup_mcopy_mode = NULL;
1381 	mon_ops->mon_filter_reset_mcopy_mode = NULL;
1382 	mon_ops->mon_mcopy_check_deliver = NULL;
1383 #endif
1384 #ifdef QCA_ENHANCED_STATS_SUPPORT
1385 	mon_ops->mon_filter_setup_enhanced_stats =
1386 				dp_mon_filter_setup_enhanced_stats_2_0;
1387 	mon_ops->mon_filter_reset_enhanced_stats =
1388 				dp_mon_filter_reset_enhanced_stats_2_0;
1389 	mon_ops->mon_tx_enable_enhanced_stats =
1390 				dp_mon_tx_enable_enhanced_stats_2_0;
1391 	mon_ops->mon_tx_disable_enhanced_stats =
1392 				dp_mon_tx_disable_enhanced_stats_2_0;
1393 	mon_ops->mon_ppdu_stats_feat_enable_check =
1394 				dp_ppdu_stats_feat_enable_check_2_0;
1395 	mon_ops->mon_tx_stats_update = dp_mon_tx_stats_update_2_0;
1396 	mon_ops->mon_ppdu_desc_deliver = dp_ppdu_desc_deliver;
1397 #ifdef WDI_EVENT_ENABLE
1398 	mon_ops->mon_ppdu_desc_notify = dp_ppdu_desc_notify_2_0;
1399 #endif
1400 #endif
1401 #ifdef WLAN_RX_PKT_CAPTURE_ENH
1402 	mon_ops->mon_filter_setup_rx_enh_capture = NULL;
1403 #endif
1404 #ifdef WDI_EVENT_ENABLE
1405 	mon_ops->mon_set_pktlog_wifi3 = dp_set_pktlog_wifi3;
1406 	mon_ops->mon_filter_setup_rx_pkt_log_full =
1407 				dp_mon_filter_setup_rx_pkt_log_full_2_0;
1408 	mon_ops->mon_filter_reset_rx_pkt_log_full =
1409 				dp_mon_filter_reset_rx_pkt_log_full_2_0;
1410 	mon_ops->mon_filter_setup_rx_pkt_log_lite =
1411 				dp_mon_filter_setup_rx_pkt_log_lite_2_0;
1412 	mon_ops->mon_filter_reset_rx_pkt_log_lite =
1413 				dp_mon_filter_reset_rx_pkt_log_lite_2_0;
1414 	mon_ops->mon_filter_setup_rx_pkt_log_cbf =
1415 				dp_mon_filter_setup_rx_pkt_log_cbf_2_0;
1416 	mon_ops->mon_filter_reset_rx_pkt_log_cbf =
1417 				dp_mon_filter_reset_rx_pktlog_cbf_2_0;
1418 	mon_ops->mon_filter_setup_pktlog_hybrid =
1419 				dp_mon_filter_setup_pktlog_hybrid_2_0;
1420 	mon_ops->mon_filter_reset_pktlog_hybrid =
1421 				dp_mon_filter_reset_pktlog_hybrid_2_0;
1422 #endif
1423 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
1424 	mon_ops->mon_pktlogmod_exit = dp_pktlogmod_exit;
1425 #endif
1426 	mon_ops->rx_hdr_length_set = dp_rx_mon_hdr_length_set;
1427 	mon_ops->rx_packet_length_set = dp_rx_mon_packet_length_set;
1428 	mon_ops->rx_mon_enable = dp_rx_mon_enable_set;
1429 	mon_ops->rx_wmask_subscribe = dp_rx_mon_word_mask_subscribe;
1430 	mon_ops->rx_pkt_tlv_offset = dp_rx_mon_pkt_tlv_offset_subscribe;
1431 	mon_ops->rx_enable_mpdu_logging = dp_rx_mon_enable_mpdu_logging;
1432 	mon_ops->mon_neighbour_peers_detach = NULL;
1433 	mon_ops->mon_vdev_set_monitor_mode_buf_rings =
1434 				dp_vdev_set_monitor_mode_buf_rings_2_0;
1435 	mon_ops->mon_vdev_set_monitor_mode_rings =
1436 				dp_vdev_set_monitor_mode_rings_2_0;
1437 #ifdef QCA_ENHANCED_STATS_SUPPORT
1438 	mon_ops->mon_rx_stats_update = dp_rx_mon_stats_update_2_0;
1439 	mon_ops->mon_rx_populate_ppdu_usr_info =
1440 			dp_rx_mon_populate_ppdu_usr_info_2_0;
1441 	mon_ops->mon_rx_populate_ppdu_info = dp_rx_mon_populate_ppdu_info_2_0;
1442 #endif
1443 #ifdef QCA_UNDECODED_METADATA_SUPPORT
1444 	mon_ops->mon_config_undecoded_metadata_capture =
1445 		dp_mon_config_undecoded_metadata_capture;
1446 	mon_ops->mon_filter_setup_undecoded_metadata_capture =
1447 		dp_mon_filter_setup_undecoded_metadata_capture_2_0;
1448 	mon_ops->mon_filter_reset_undecoded_metadata_capture =
1449 		dp_mon_filter_reset_undecoded_metadata_capture_2_0;
1450 #endif
1451 	mon_ops->rx_enable_fpmo = dp_rx_mon_enable_fpmo;
1452 	mon_ops->mon_rx_print_advanced_stats =
1453 		dp_mon_rx_print_advanced_stats_2_0;
1454 	mon_ops->mon_mac_filter_set = NULL;
1455 }
1456 
1457 struct dp_mon_ops monitor_ops_2_0 = {
1458 	.mon_soc_cfg_init = dp_mon_soc_cfg_init,
1459 	.mon_soc_attach = dp_mon_soc_attach_2_0,
1460 	.mon_soc_detach = dp_mon_soc_detach_2_0,
1461 	.mon_soc_init = dp_mon_soc_init_2_0,
1462 	.mon_soc_deinit = dp_mon_soc_deinit_2_0,
1463 	.mon_pdev_alloc = dp_mon_pdev_alloc_2_0,
1464 	.mon_pdev_free = dp_mon_pdev_free_2_0,
1465 	.mon_pdev_attach = dp_mon_pdev_attach,
1466 	.mon_pdev_detach = dp_mon_pdev_detach,
1467 	.mon_pdev_init = dp_mon_pdev_init,
1468 	.mon_pdev_deinit = dp_mon_pdev_deinit,
1469 	.mon_vdev_attach = dp_mon_vdev_attach,
1470 	.mon_vdev_detach = dp_mon_vdev_detach,
1471 	.mon_peer_attach = dp_mon_peer_attach,
1472 	.mon_peer_detach = dp_mon_peer_detach,
1473 	.mon_peer_get_peerstats_ctx = dp_mon_peer_get_peerstats_ctx,
1474 	.mon_peer_reset_stats = dp_mon_peer_reset_stats,
1475 	.mon_peer_get_stats = dp_mon_peer_get_stats,
1476 	.mon_invalid_peer_update_pdev_stats =
1477 				dp_mon_invalid_peer_update_pdev_stats,
1478 	.mon_peer_get_stats_param = dp_mon_peer_get_stats_param,
1479 	.mon_flush_rings = NULL,
1480 #if !defined(DISABLE_MON_CONFIG)
1481 	.mon_pdev_htt_srng_setup = dp_mon_pdev_htt_srng_setup_2_0,
1482 	.mon_soc_htt_srng_setup = dp_mon_soc_htt_srng_setup_2_0,
1483 #endif
1484 #if defined(DP_CON_MON)
1485 	.mon_service_rings = NULL,
1486 #endif
1487 #ifndef DISABLE_MON_CONFIG
1488 	.mon_rx_process = NULL,
1489 #endif
1490 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
1491 	.mon_drop_packets_for_mac = NULL,
1492 #endif
1493 	.mon_vdev_timer_init = NULL,
1494 	.mon_vdev_timer_start = NULL,
1495 	.mon_vdev_timer_stop = NULL,
1496 	.mon_vdev_timer_deinit = NULL,
1497 	.mon_reap_timer_init = NULL,
1498 	.mon_reap_timer_start = NULL,
1499 	.mon_reap_timer_stop = NULL,
1500 	.mon_reap_timer_deinit = NULL,
1501 	.mon_filter_setup_rx_mon_mode = dp_mon_filter_setup_rx_mon_mode_2_0,
1502 	.mon_filter_reset_rx_mon_mode = dp_mon_filter_reset_rx_mon_mode_2_0,
1503 	.mon_filter_setup_tx_mon_mode = dp_mon_filter_setup_tx_mon_mode_2_0,
1504 	.mon_filter_reset_tx_mon_mode = dp_mon_filter_reset_tx_mon_mode_2_0,
1505 	.tx_mon_filter_update = dp_tx_mon_filter_update_2_0,
1506 	.rx_mon_filter_update = dp_rx_mon_filter_update_2_0,
1507 	.set_mon_mode_buf_rings_tx = dp_vdev_set_monitor_mode_buf_rings_tx_2_0,
1508 	.tx_mon_filter_alloc = dp_mon_filter_alloc_2_0,
1509 	.tx_mon_filter_dealloc = dp_mon_filter_dealloc_2_0,
1510 	.mon_rings_alloc = dp_pdev_mon_rings_alloc_2_0,
1511 	.mon_rings_free = dp_pdev_mon_rings_free_2_0,
1512 	.mon_rings_init = dp_pdev_mon_rings_init_2_0,
1513 	.mon_rings_deinit = dp_pdev_mon_rings_deinit_2_0,
1514 	.rx_mon_desc_pool_init = NULL,
1515 	.rx_mon_desc_pool_deinit = NULL,
1516 	.rx_mon_desc_pool_alloc = NULL,
1517 	.rx_mon_desc_pool_free = NULL,
1518 	.rx_mon_buffers_alloc = NULL,
1519 	.rx_mon_buffers_free = NULL,
1520 	.tx_mon_desc_pool_init = NULL,
1521 	.tx_mon_desc_pool_deinit = NULL,
1522 	.tx_mon_desc_pool_alloc = NULL,
1523 	.tx_mon_desc_pool_free = NULL,
1524 #ifndef DISABLE_MON_CONFIG
1525 	.mon_register_intr_ops = dp_mon_register_intr_ops_2_0,
1526 #endif
1527 	.mon_register_feature_ops = dp_mon_register_feature_ops_2_0,
1528 #ifdef WLAN_TX_PKT_CAPTURE_ENH_BE
1529 	.mon_tx_ppdu_stats_attach = dp_tx_ppdu_stats_attach_2_0,
1530 	.mon_tx_ppdu_stats_detach = dp_tx_ppdu_stats_detach_2_0,
1531 	.mon_peer_tx_capture_filter_check = NULL,
1532 #endif
1533 #if (defined(WIFI_MONITOR_SUPPORT) && !defined(WLAN_TX_PKT_CAPTURE_ENH_BE))
1534 	.mon_tx_ppdu_stats_attach = NULL,
1535 	.mon_tx_ppdu_stats_detach = NULL,
1536 	.mon_peer_tx_capture_filter_check = NULL,
1537 #endif
1538 	.mon_pdev_ext_init = dp_mon_pdev_ext_init_2_0,
1539 	.mon_pdev_ext_deinit = dp_mon_pdev_ext_deinit_2_0,
1540 	.mon_lite_mon_alloc = dp_lite_mon_alloc,
1541 	.mon_lite_mon_dealloc = dp_lite_mon_dealloc,
1542 	.mon_lite_mon_vdev_delete = dp_lite_mon_vdev_delete,
1543 	.mon_lite_mon_disable_rx = dp_lite_mon_disable_rx,
1544 	.mon_lite_mon_is_rx_adv_filter_enable = dp_lite_mon_is_rx_adv_filter_enable,
1545 	.mon_rx_ppdu_info_cache_create = dp_rx_mon_ppdu_info_cache_create,
1546 	.mon_rx_ppdu_info_cache_destroy = dp_rx_mon_ppdu_info_cache_destroy,
1547 };
1548 
1549 struct cdp_mon_ops dp_ops_mon_2_0 = {
1550 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
1551 	/* Added support for HK advance filter */
1552 	.txrx_set_advance_monitor_filter = NULL,
1553 	.txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
1554 	.config_full_mon_mode = NULL,
1555 	.soc_config_full_mon_mode = NULL,
1556 	.get_mon_pdev_rx_stats = dp_pdev_get_rx_mon_stats,
1557 	.txrx_enable_mon_reap_timer = NULL,
1558 #ifdef QCA_SUPPORT_LITE_MONITOR
1559 	.txrx_set_lite_mon_config = dp_lite_mon_set_config,
1560 	.txrx_get_lite_mon_config = dp_lite_mon_get_config,
1561 	.txrx_set_lite_mon_peer_config = dp_lite_mon_set_peer_config,
1562 	.txrx_get_lite_mon_peer_config = dp_lite_mon_get_peer_config,
1563 	.txrx_is_lite_mon_enabled = dp_lite_mon_is_enabled,
1564 	.txrx_get_lite_mon_legacy_feature_enabled =
1565 				dp_lite_mon_get_legacy_feature_enabled,
1566 #endif
1567 	.txrx_set_mon_pdev_params_rssi_dbm_conv =
1568 				dp_mon_pdev_params_rssi_dbm_conv,
1569 #ifdef WLAN_TELEMETRY_STATS_SUPPORT
1570 	.txrx_update_pdev_mon_telemetry_airtime_stats =
1571 			dp_pdev_update_telemetry_airtime_stats,
1572 #endif
1573 };
1574 
1575 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT
1576 void dp_mon_ops_register_2_0(struct dp_mon_soc *mon_soc)
1577 {
1578 	struct dp_mon_ops *mon_ops = NULL;
1579 
1580 	if (mon_soc->mon_ops) {
1581 		dp_mon_err("monitor ops is allocated");
1582 		return;
1583 	}
1584 
1585 	mon_ops = qdf_mem_malloc(sizeof(struct dp_mon_ops));
1586 	if (!mon_ops) {
1587 		dp_mon_err("Failed to allocate memory for mon ops");
1588 		return;
1589 	}
1590 
1591 	qdf_mem_copy(mon_ops, &monitor_ops_2_0, sizeof(struct dp_mon_ops));
1592 	mon_soc->mon_ops = mon_ops;
1593 }
1594 
1595 void dp_mon_cdp_ops_register_2_0(struct cdp_ops *ops)
1596 {
1597 	struct cdp_mon_ops *mon_ops = NULL;
1598 
1599 	if (ops->mon_ops) {
1600 		dp_mon_err("cdp monitor ops is allocated");
1601 		return;
1602 	}
1603 
1604 	mon_ops = qdf_mem_malloc(sizeof(struct cdp_mon_ops));
1605 	if (!mon_ops) {
1606 		dp_mon_err("Failed to allocate memory for mon ops");
1607 		return;
1608 	}
1609 
1610 	qdf_mem_copy(mon_ops, &dp_ops_mon_2_0, sizeof(struct cdp_mon_ops));
1611 	ops->mon_ops = mon_ops;
1612 }
1613 #else
1614 void dp_mon_ops_register_2_0(struct dp_mon_soc *mon_soc)
1615 {
1616 	mon_soc->mon_ops = &monitor_ops_2_0;
1617 }
1618 
1619 void dp_mon_cdp_ops_register_2_0(struct cdp_ops *ops)
1620 {
1621 	ops->mon_ops = &dp_ops_mon_2_0;
1622 }
1623 #endif
1624 
1625 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
1626 	defined(WLAN_SUPPORT_RX_FLOW_TAG)
1627 /** dp_mon_rx_update_rx_protocol_tag_stats() - Update mon protocols's
1628  *					      statistics
1629  * @pdev: pdev handle
1630  * @protocol_index: Protocol index for which the stats should be incremented
1631  * @ring_index: REO ring number from which this tag was received.
1632  *
1633  * Return: void
1634  */
1635 void dp_mon_rx_update_rx_protocol_tag_stats(struct dp_pdev *pdev,
1636 					    uint16_t protocol_index)
1637 {
1638 	pdev->mon_proto_tag_stats[protocol_index].tag_ctr++;
1639 }
1640 #else
1641 void dp_mon_rx_update_rx_protocol_tag_stats(struct dp_pdev *pdev,
1642 					    uint16_t protocol_index)
1643 {
1644 }
1645 #endif
1646