xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/2.0/dp_mon_2.0.c (revision 2888b71da71bce103343119fa1b31f4a0cee07c8)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <dp_types.h>
19 #include "dp_rx.h"
20 #include "dp_peer.h"
21 #include <dp_htt.h>
22 #include <dp_mon_filter.h>
23 #include <dp_mon.h>
24 #include <dp_rx_mon.h>
25 #include <dp_rx_mon_2.0.h>
26 #include <dp_mon_2.0.h>
27 #include <dp_mon_filter_2.0.h>
28 #include <dp_tx_mon_2.0.h>
29 #include <hal_be_api_mon.h>
30 #include <dp_be.h>
31 #include <htt_ppdu_stats.h>
32 #ifdef QCA_SUPPORT_LITE_MONITOR
33 #include "dp_lite_mon.h"
34 #endif
35 
36 #if !defined(DISABLE_MON_CONFIG)
37 
38 QDF_STATUS dp_rx_mon_ppdu_info_cache_create(struct dp_pdev *pdev)
39 {
40 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
41 	struct dp_mon_pdev_be *mon_pdev_be =
42 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
43 	uint16_t obj;
44 	struct hal_rx_ppdu_info *ppdu_info = NULL;
45 
46 	mon_pdev_be->ppdu_info_cache =
47 		qdf_kmem_cache_create("rx_mon_ppdu_info_cache",
48 				      sizeof(struct hal_rx_ppdu_info));
49 
50 	if (!mon_pdev_be->ppdu_info_cache) {
51 		dp_mon_err("cache creation failed pdev :%px", pdev);
52 		return QDF_STATUS_E_NOMEM;
53 	}
54 
55 	TAILQ_INIT(&mon_pdev_be->rx_mon_free_queue);
56 	for (obj = 0; obj < DP_RX_MON_WQ_THRESHOLD; obj++) {
57 		ppdu_info =  (struct hal_rx_ppdu_info *)qdf_kmem_cache_alloc(mon_pdev_be->ppdu_info_cache);
58 
59 		if (ppdu_info) {
60 			TAILQ_INSERT_TAIL(&mon_pdev_be->rx_mon_free_queue,
61 					  ppdu_info,
62 					  ppdu_free_list_elem);
63 			mon_pdev_be->total_free_elem++;
64 		}
65 	}
66 	qdf_spinlock_create(&mon_pdev_be->ppdu_info_lock);
67 
68 	return QDF_STATUS_SUCCESS;
69 }
70 
71 void dp_rx_mon_ppdu_info_cache_destroy(struct dp_pdev *pdev)
72 {
73 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
74 	struct dp_mon_pdev_be *mon_pdev_be =
75 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
76 	struct hal_rx_ppdu_info *ppdu_info = NULL, *temp_ppdu_info = NULL;
77 
78 	qdf_err(" total free element: %d", mon_pdev_be->total_free_elem);
79 	qdf_spin_lock(&mon_pdev_be->ppdu_info_lock);
80 	TAILQ_FOREACH_SAFE(ppdu_info,
81 			   &mon_pdev_be->rx_mon_free_queue,
82 			   ppdu_free_list_elem,
83 			   temp_ppdu_info) {
84 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_free_queue,
85 			     ppdu_info, ppdu_free_list_elem);
86 		if (ppdu_info) {
87 			mon_pdev_be->total_free_elem--;
88 			qdf_err(" total free element: %d", mon_pdev_be->total_free_elem);
89 			qdf_kmem_cache_free(mon_pdev_be->ppdu_info_cache,
90 					    ppdu_info);
91 		}
92 	}
93 	qdf_spin_unlock(&mon_pdev_be->ppdu_info_lock);
94 	qdf_kmem_cache_destroy(mon_pdev_be->ppdu_info_cache);
95 }
96 
97 /**
98  * dp_mon_pdev_ext_init_2_0() - Init pdev ext param
99  *
100  * @pdev: DP pdev handle
101  *
102  * Return:  QDF_STATUS_SUCCESS: Success
103  *          QDF_STATUS_E_FAILURE: failure
104  */
105 QDF_STATUS dp_mon_pdev_ext_init_2_0(struct dp_pdev *pdev)
106 {
107 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
108 	struct dp_mon_pdev_be *mon_pdev_be =
109 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
110 
111 	qdf_create_work(0, &mon_pdev_be->rx_mon_work,
112 			dp_rx_mon_process_ppdu, pdev);
113 	mon_pdev_be->rx_mon_workqueue =
114 		qdf_alloc_unbound_workqueue("rx_mon_work_queue");
115 
116 	if (!mon_pdev_be->rx_mon_workqueue) {
117 		dp_mon_err("failed to create rxmon wq mon_pdev: %pK", mon_pdev);
118 		goto fail;
119 	}
120 	TAILQ_INIT(&mon_pdev_be->rx_mon_queue);
121 
122 	qdf_spinlock_create(&mon_pdev_be->rx_mon_wq_lock);
123 	qdf_err(" total free element: %d", mon_pdev_be->total_free_elem);
124 
125 	return QDF_STATUS_SUCCESS;
126 
127 fail:
128 	return QDF_STATUS_E_FAILURE;
129 }
130 
131 /**
132  * dp_mon_pdev_ext_deinit_2_0() - denit pdev ext param
133  *
134  * @pdev: DP pdev handle
135  *
136  * Return: QDF_STATUS_SUCCESS
137  */
138 QDF_STATUS dp_mon_pdev_ext_deinit_2_0(struct dp_pdev *pdev)
139 {
140 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
141 	struct dp_mon_pdev_be *mon_pdev_be =
142 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
143 
144 	if (!mon_pdev_be->rx_mon_workqueue)
145 		return QDF_STATUS_E_FAILURE;
146 
147 	qdf_err(" total free element: %d", mon_pdev_be->total_free_elem);
148 	qdf_flush_workqueue(0, mon_pdev_be->rx_mon_workqueue);
149 	qdf_destroy_workqueue(0, mon_pdev_be->rx_mon_workqueue);
150 	qdf_flush_work(&mon_pdev_be->rx_mon_work);
151 	qdf_disable_work(&mon_pdev_be->rx_mon_work);
152 	dp_rx_mon_drain_wq(pdev);
153 	mon_pdev_be->rx_mon_workqueue = NULL;
154 	qdf_spinlock_destroy(&mon_pdev_be->rx_mon_wq_lock);
155 
156 	return QDF_STATUS_SUCCESS;
157 }
158 
159 /*
160  * dp_mon_add_desc_list_to_free_list() - append unused desc_list back to
161  *					freelist.
162  *
163  * @soc: core txrx main context
164  * @local_desc_list: local desc list provided by the caller
165  * @tail: attach the point to last desc of local desc list
166  * @mon_desc_pool: monitor descriptor pool pointer
167  */
168 void
169 dp_mon_add_desc_list_to_free_list(struct dp_soc *soc,
170 				  union dp_mon_desc_list_elem_t **local_desc_list,
171 				  union dp_mon_desc_list_elem_t **tail,
172 				  struct dp_mon_desc_pool *mon_desc_pool)
173 {
174 	union dp_mon_desc_list_elem_t *temp_list = NULL;
175 
176 	qdf_spin_lock_bh(&mon_desc_pool->lock);
177 
178 	temp_list = mon_desc_pool->freelist;
179 	mon_desc_pool->freelist = *local_desc_list;
180 	(*tail)->next = temp_list;
181 	*tail = NULL;
182 	*local_desc_list = NULL;
183 
184 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
185 }
186 
187 /*
188  * dp_mon_get_free_desc_list() - provide a list of descriptors from
189  *				the free mon desc pool.
190  *
191  * @soc: core txrx main context
192  * @mon_desc_pool: monitor descriptor pool pointer
193  * @num_descs: number of descs requested from freelist
194  * @desc_list: attach the descs to this list (output parameter)
195  * @tail: attach the point to last desc of free list (output parameter)
196  *
197  * Return: number of descs allocated from free list.
198  */
199 static uint16_t
200 dp_mon_get_free_desc_list(struct dp_soc *soc,
201 			  struct dp_mon_desc_pool *mon_desc_pool,
202 			  uint16_t num_descs,
203 			  union dp_mon_desc_list_elem_t **desc_list,
204 			  union dp_mon_desc_list_elem_t **tail)
205 {
206 	uint16_t count;
207 
208 	qdf_spin_lock_bh(&mon_desc_pool->lock);
209 
210 	*desc_list = *tail = mon_desc_pool->freelist;
211 
212 	for (count = 0; count < num_descs; count++) {
213 		if (qdf_unlikely(!mon_desc_pool->freelist)) {
214 			qdf_spin_unlock_bh(&mon_desc_pool->lock);
215 			return count;
216 		}
217 		*tail = mon_desc_pool->freelist;
218 		mon_desc_pool->freelist = mon_desc_pool->freelist->next;
219 	}
220 	(*tail)->next = NULL;
221 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
222 	return count;
223 }
224 
225 void dp_mon_pool_frag_unmap_and_free(struct dp_soc *soc,
226 				     struct dp_mon_desc_pool *mon_desc_pool)
227 {
228 	int desc_id;
229 	qdf_frag_t vaddr;
230 	qdf_dma_addr_t paddr;
231 
232 	qdf_spin_lock_bh(&mon_desc_pool->lock);
233 	for (desc_id = 0; desc_id < mon_desc_pool->pool_size; desc_id++) {
234 		if (mon_desc_pool->array[desc_id].mon_desc.in_use) {
235 			vaddr = mon_desc_pool->array[desc_id].mon_desc.buf_addr;
236 			paddr = mon_desc_pool->array[desc_id].mon_desc.paddr;
237 
238 			if (!(mon_desc_pool->array[desc_id].mon_desc.unmapped)) {
239 				qdf_mem_unmap_page(soc->osdev, paddr,
240 						   mon_desc_pool->buf_size,
241 						   QDF_DMA_FROM_DEVICE);
242 				mon_desc_pool->array[desc_id].mon_desc.unmapped = 1;
243 				mon_desc_pool->array[desc_id].mon_desc.cookie = desc_id;
244 			}
245 			qdf_frag_free(vaddr);
246 		}
247 	}
248 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
249 }
250 
251 static inline QDF_STATUS
252 dp_mon_frag_alloc_and_map(struct dp_soc *dp_soc,
253 			  struct dp_mon_desc *mon_desc,
254 			  struct dp_mon_desc_pool *mon_desc_pool)
255 {
256 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
257 
258 	mon_desc->buf_addr = qdf_frag_alloc(&mon_desc_pool->pf_cache,
259 					    mon_desc_pool->buf_size);
260 
261 	if (!mon_desc->buf_addr) {
262 		dp_mon_err("Frag alloc failed");
263 		return QDF_STATUS_E_NOMEM;
264 	}
265 
266 	ret = qdf_mem_map_page(dp_soc->osdev,
267 			       mon_desc->buf_addr,
268 			       QDF_DMA_FROM_DEVICE,
269 			       mon_desc_pool->buf_size,
270 			       &mon_desc->paddr);
271 
272 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
273 		qdf_frag_free(mon_desc->buf_addr);
274 		dp_mon_err("Frag map failed");
275 		return QDF_STATUS_E_FAULT;
276 	}
277 
278 	return QDF_STATUS_SUCCESS;
279 }
280 
281 QDF_STATUS
282 dp_mon_buffers_replenish(struct dp_soc *dp_soc,
283 			 struct dp_srng *dp_mon_srng,
284 			 struct dp_mon_desc_pool *mon_desc_pool,
285 			 uint32_t num_req_buffers,
286 			 union dp_mon_desc_list_elem_t **desc_list,
287 			 union dp_mon_desc_list_elem_t **tail,
288 			 uint32_t *replenish_cnt_ref)
289 {
290 	uint32_t num_alloc_desc;
291 	uint16_t num_desc_to_free = 0;
292 	uint32_t num_entries_avail;
293 	uint32_t count = 0;
294 	int sync_hw_ptr = 1;
295 	struct dp_mon_desc mon_desc = {0};
296 	void *mon_ring_entry;
297 	union dp_mon_desc_list_elem_t *next;
298 	void *mon_srng;
299 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
300 	struct dp_mon_soc *mon_soc = dp_soc->monitor_soc;
301 
302 	if (!num_req_buffers) {
303 		dp_mon_debug("%pK: Received request for 0 buffers replenish",
304 			     dp_soc);
305 		ret = QDF_STATUS_E_INVAL;
306 		goto free_desc;
307 	}
308 
309 	mon_srng = dp_mon_srng->hal_srng;
310 
311 	hal_srng_access_start(dp_soc->hal_soc, mon_srng);
312 
313 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
314 						   mon_srng, sync_hw_ptr);
315 
316 	if (!num_entries_avail) {
317 		num_desc_to_free = num_req_buffers;
318 		hal_srng_access_end(dp_soc->hal_soc, mon_srng);
319 		goto free_desc;
320 	}
321 	if (num_entries_avail < num_req_buffers) {
322 		num_desc_to_free = num_req_buffers - num_entries_avail;
323 		num_req_buffers = num_entries_avail;
324 	}
325 
326 	/*
327 	 * if desc_list is NULL, allocate the descs from freelist
328 	 */
329 	if (!(*desc_list)) {
330 		num_alloc_desc = dp_mon_get_free_desc_list(dp_soc,
331 							   mon_desc_pool,
332 							   num_req_buffers,
333 							   desc_list,
334 							   tail);
335 
336 		if (!num_alloc_desc) {
337 			dp_mon_debug("%pK: no free rx_descs in freelist", dp_soc);
338 			hal_srng_access_end(dp_soc->hal_soc, mon_srng);
339 			return QDF_STATUS_E_NOMEM;
340 		}
341 
342 		dp_mon_info("%pK: %d rx desc allocated",
343 			    dp_soc, num_alloc_desc);
344 
345 		num_req_buffers = num_alloc_desc;
346 	}
347 
348 	while (count <= num_req_buffers - 1) {
349 		ret = dp_mon_frag_alloc_and_map(dp_soc,
350 						&mon_desc,
351 						mon_desc_pool);
352 
353 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
354 			if (qdf_unlikely(ret  == QDF_STATUS_E_FAULT))
355 				continue;
356 			break;
357 		}
358 
359 		count++;
360 		next = (*desc_list)->next;
361 		mon_ring_entry = hal_srng_src_get_next(
362 						dp_soc->hal_soc,
363 						mon_srng);
364 
365 		if (!mon_ring_entry)
366 			break;
367 
368 		qdf_assert_always((*desc_list)->mon_desc.in_use == 0);
369 
370 		(*desc_list)->mon_desc.in_use = 1;
371 		(*desc_list)->mon_desc.unmapped = 0;
372 		(*desc_list)->mon_desc.buf_addr = mon_desc.buf_addr;
373 		(*desc_list)->mon_desc.paddr = mon_desc.paddr;
374 		(*desc_list)->mon_desc.magic = DP_MON_DESC_MAGIC;
375 
376 		mon_soc->stats.frag_alloc++;
377 		hal_mon_buff_addr_info_set(dp_soc->hal_soc,
378 					   mon_ring_entry,
379 					   &((*desc_list)->mon_desc),
380 					   mon_desc.paddr);
381 
382 		*desc_list = next;
383 	}
384 
385 	hal_srng_access_end(dp_soc->hal_soc, mon_srng);
386 	if (replenish_cnt_ref)
387 		*replenish_cnt_ref += count;
388 
389 free_desc:
390 	/*
391 	 * add any available free desc back to the free list
392 	 */
393 	if (*desc_list) {
394 		dp_mon_add_desc_list_to_free_list(dp_soc, desc_list, tail,
395 						  mon_desc_pool);
396 	}
397 
398 	return ret;
399 }
400 
401 QDF_STATUS
402 dp_mon_desc_pool_init(struct dp_mon_desc_pool *mon_desc_pool,
403 		      uint32_t pool_size)
404 {
405 	int desc_id;
406 	/* Initialize monitor desc lock */
407 	qdf_spinlock_create(&mon_desc_pool->lock);
408 
409 	qdf_spin_lock_bh(&mon_desc_pool->lock);
410 
411 	mon_desc_pool->buf_size = DP_MON_DATA_BUFFER_SIZE;
412 	/* link SW descs into a freelist */
413 	mon_desc_pool->freelist = &mon_desc_pool->array[0];
414 	mon_desc_pool->pool_size = pool_size - 1;
415 	qdf_mem_zero(mon_desc_pool->freelist,
416 		     mon_desc_pool->pool_size *
417 		     sizeof(union dp_mon_desc_list_elem_t));
418 
419 	for (desc_id = 0; desc_id < mon_desc_pool->pool_size; desc_id++) {
420 		if (desc_id == mon_desc_pool->pool_size - 1)
421 			mon_desc_pool->array[desc_id].next = NULL;
422 		else
423 			mon_desc_pool->array[desc_id].next =
424 				&mon_desc_pool->array[desc_id + 1];
425 		mon_desc_pool->array[desc_id].mon_desc.in_use = 0;
426 		mon_desc_pool->array[desc_id].mon_desc.cookie = desc_id;
427 	}
428 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
429 
430 	return QDF_STATUS_SUCCESS;
431 }
432 
433 void dp_mon_desc_pool_deinit(struct dp_mon_desc_pool *mon_desc_pool)
434 {
435 	qdf_spin_lock_bh(&mon_desc_pool->lock);
436 
437 	mon_desc_pool->freelist = NULL;
438 	mon_desc_pool->pool_size = 0;
439 
440 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
441 	qdf_spinlock_destroy(&mon_desc_pool->lock);
442 }
443 
444 void dp_mon_desc_pool_free(struct dp_mon_desc_pool *mon_desc_pool)
445 {
446 	qdf_mem_free(mon_desc_pool->array);
447 }
448 
449 QDF_STATUS dp_mon_desc_pool_alloc(uint32_t pool_size,
450 				  struct dp_mon_desc_pool *mon_desc_pool)
451 {
452 	mon_desc_pool->pool_size = pool_size - 1;
453 	mon_desc_pool->array = qdf_mem_malloc((mon_desc_pool->pool_size) *
454 				     sizeof(union dp_mon_desc_list_elem_t));
455 
456 	return QDF_STATUS_SUCCESS;
457 }
458 
459 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings_rx_2_0(struct dp_pdev *pdev)
460 {
461 	int rx_mon_max_entries;
462 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
463 	struct dp_soc *soc = pdev->soc;
464 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
465 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
466 	QDF_STATUS status;
467 
468 	if (!mon_soc_be) {
469 		dp_mon_err("DP MON SOC is NULL");
470 		return QDF_STATUS_E_FAILURE;
471 	}
472 
473 	soc_cfg_ctx = soc->wlan_cfg_ctx;
474 	rx_mon_max_entries = wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc_cfg_ctx);
475 
476 	hal_set_low_threshold(soc->rxdma_mon_buf_ring[0].hal_srng,
477 			      rx_mon_max_entries >> 2);
478 	status = htt_srng_setup(soc->htt_handle, 0,
479 				soc->rxdma_mon_buf_ring[0].hal_srng,
480 				RXDMA_MONITOR_BUF);
481 
482 	if (status != QDF_STATUS_SUCCESS) {
483 		dp_mon_err("Failed to send htt srng setup message for Rx mon buf ring");
484 		return status;
485 	}
486 
487 	if (mon_soc_be->rx_mon_ring_fill_level < rx_mon_max_entries) {
488 		status = dp_rx_mon_buffers_alloc(soc,
489 						 (rx_mon_max_entries -
490 						 mon_soc_be->rx_mon_ring_fill_level));
491 		if (status != QDF_STATUS_SUCCESS) {
492 			dp_mon_err("%pK: Rx mon buffers allocation failed", soc);
493 			return status;
494 		}
495 		mon_soc_be->rx_mon_ring_fill_level +=
496 				(rx_mon_max_entries -
497 				mon_soc_be->rx_mon_ring_fill_level);
498 	}
499 
500 	return QDF_STATUS_SUCCESS;
501 }
502 
503 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings_tx_2_0(struct dp_pdev *pdev)
504 {
505 	int tx_mon_max_entries;
506 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
507 	struct dp_soc *soc = pdev->soc;
508 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
509 	struct dp_mon_soc_be *mon_soc_be =
510 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
511 	QDF_STATUS status;
512 
513 	if (!mon_soc_be) {
514 		dp_mon_err("DP MON SOC is NULL");
515 		return QDF_STATUS_E_FAILURE;
516 	}
517 
518 	soc_cfg_ctx = soc->wlan_cfg_ctx;
519 	tx_mon_max_entries =
520 		wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx);
521 
522 	hal_set_low_threshold(mon_soc_be->tx_mon_buf_ring.hal_srng,
523 			      tx_mon_max_entries >> 2);
524 	status = htt_srng_setup(soc->htt_handle, 0,
525 				mon_soc_be->tx_mon_buf_ring.hal_srng,
526 				TX_MONITOR_BUF);
527 
528 	if (status != QDF_STATUS_SUCCESS) {
529 		dp_mon_err("Failed to send htt srng setup message for Tx mon buf ring");
530 		return status;
531 	}
532 
533 	if (mon_soc_be->tx_mon_ring_fill_level < tx_mon_max_entries) {
534 		status = dp_tx_mon_buffers_alloc(soc,
535 						 (tx_mon_max_entries -
536 						 mon_soc_be->tx_mon_ring_fill_level));
537 		if (status != QDF_STATUS_SUCCESS) {
538 			dp_mon_err("%pK: Tx mon buffers allocation failed", soc);
539 			return status;
540 		}
541 		mon_soc_be->tx_mon_ring_fill_level +=
542 				(tx_mon_max_entries -
543 				mon_soc_be->tx_mon_ring_fill_level);
544 	}
545 
546 	return QDF_STATUS_SUCCESS;
547 }
548 
549 static
550 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings_2_0(struct dp_pdev *pdev)
551 {
552 	int status;
553 	struct dp_soc *soc = pdev->soc;
554 
555 	status = dp_vdev_set_monitor_mode_buf_rings_rx_2_0(pdev);
556 	if (status != QDF_STATUS_SUCCESS) {
557 		dp_mon_err("%pK: Rx monitor extra buffer allocation failed",
558 			   soc);
559 		return status;
560 	}
561 
562 	status = dp_vdev_set_monitor_mode_buf_rings_tx_2_0(pdev);
563 	if (status != QDF_STATUS_SUCCESS) {
564 		dp_mon_err("%pK: Tx monitor extra buffer allocation failed",
565 			   soc);
566 		return status;
567 	}
568 
569 	return QDF_STATUS_SUCCESS;
570 }
571 
572 static
573 QDF_STATUS dp_vdev_set_monitor_mode_rings_2_0(struct dp_pdev *pdev,
574 					      uint8_t delayed_replenish)
575 {
576 	return QDF_STATUS_SUCCESS;
577 }
578 
579 #ifdef QCA_ENHANCED_STATS_SUPPORT
580 /**
581  * dp_mon_tx_enable_enhanced_stats_2_0() - Send HTT cmd to FW to enable stats
582  * @pdev: Datapath pdev handle
583  *
584  * Return: none
585  */
586 static void dp_mon_tx_enable_enhanced_stats_2_0(struct dp_pdev *pdev)
587 {
588 	dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
589 				  pdev->pdev_id);
590 }
591 
592 /**
593  * dp_mon_tx_disable_enhanced_stats_2_0() - Send HTT cmd to FW to disable stats
594  * @pdev: Datapath pdev handle
595  *
596  * Return: none
597  */
598 static void dp_mon_tx_disable_enhanced_stats_2_0(struct dp_pdev *pdev)
599 {
600 	dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
601 }
602 #endif
603 
604 #if defined(QCA_ENHANCED_STATS_SUPPORT) && defined(WLAN_FEATURE_11BE)
605 void
606 dp_mon_tx_stats_update_2_0(struct dp_mon_peer *mon_peer,
607 			   struct cdp_tx_completion_ppdu_user *ppdu)
608 {
609 	uint8_t preamble, mcs, punc_mode;
610 
611 	preamble = ppdu->preamble;
612 	mcs = ppdu->mcs;
613 
614 	punc_mode = dp_mon_get_puncture_type(ppdu->punc_pattern_bitmap,
615 					     ppdu->bw);
616 	ppdu->punc_mode = punc_mode;
617 
618 	DP_STATS_INC(mon_peer, tx.punc_bw[punc_mode], ppdu->num_msdu);
619 	DP_STATS_INCC(mon_peer,
620 		      tx.pkt_type[preamble].mcs_count[MAX_MCS - 1],
621 		      ppdu->num_msdu,
622 		      ((mcs >= MAX_MCS_11BE) && (preamble == DOT11_BE)));
623 	DP_STATS_INCC(mon_peer,
624 		      tx.pkt_type[preamble].mcs_count[mcs],
625 		      ppdu->num_msdu,
626 		      ((mcs < MAX_MCS_11BE) && (preamble == DOT11_BE)));
627 	DP_STATS_INCC(mon_peer,
628 		      tx.su_be_ppdu_cnt.mcs_count[MAX_MCS - 1], 1,
629 		      ((mcs >= MAX_MCS_11BE) && (preamble == DOT11_BE) &&
630 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_SU)));
631 	DP_STATS_INCC(mon_peer,
632 		      tx.su_be_ppdu_cnt.mcs_count[mcs], 1,
633 		      ((mcs < MAX_MCS_11BE) && (preamble == DOT11_BE) &&
634 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_SU)));
635 	DP_STATS_INCC(mon_peer,
636 		      tx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[MAX_MCS - 1],
637 		      1, ((mcs >= MAX_MCS_11BE) &&
638 		      (preamble == DOT11_BE) &&
639 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA)));
640 	DP_STATS_INCC(mon_peer,
641 		      tx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[mcs],
642 		      1, ((mcs < MAX_MCS_11BE) &&
643 		      (preamble == DOT11_BE) &&
644 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA)));
645 	DP_STATS_INCC(mon_peer,
646 		      tx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[MAX_MCS - 1],
647 		      1, ((mcs >= MAX_MCS_11BE) &&
648 		      (preamble == DOT11_BE) &&
649 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO)));
650 	DP_STATS_INCC(mon_peer,
651 		      tx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[mcs],
652 		      1, ((mcs < MAX_MCS_11BE) &&
653 		      (preamble == DOT11_BE) &&
654 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO)));
655 }
656 
657 enum cdp_punctured_modes
658 dp_mon_get_puncture_type(uint16_t puncture_pattern, uint8_t bw)
659 {
660 	uint16_t mask;
661 	uint8_t punctured_bits;
662 
663 	if (!puncture_pattern)
664 		return NO_PUNCTURE;
665 
666 	switch (bw) {
667 	case CMN_BW_80MHZ:
668 		mask = PUNCTURE_80MHZ_MASK;
669 		break;
670 	case CMN_BW_160MHZ:
671 		mask = PUNCTURE_160MHZ_MASK;
672 		break;
673 	case CMN_BW_320MHZ:
674 		mask = PUNCTURE_320MHZ_MASK;
675 		break;
676 	default:
677 		return NO_PUNCTURE;
678 	}
679 
680 	/* 0s in puncture pattern received in TLV indicates punctured 20Mhz,
681 	 * after complement, 1s will indicate punctured 20Mhz
682 	 */
683 	puncture_pattern = ~puncture_pattern;
684 	puncture_pattern &= mask;
685 
686 	if (puncture_pattern) {
687 		punctured_bits = 0;
688 		while (puncture_pattern != 0) {
689 			punctured_bits++;
690 			puncture_pattern &= (puncture_pattern - 1);
691 		}
692 
693 		if (bw == CMN_BW_80MHZ) {
694 			if (punctured_bits == IEEE80211_PUNC_MINUS20MHZ)
695 				return PUNCTURED_20MHZ;
696 			else
697 				return NO_PUNCTURE;
698 		} else if (bw == CMN_BW_160MHZ) {
699 			if (punctured_bits == IEEE80211_PUNC_MINUS20MHZ)
700 				return PUNCTURED_20MHZ;
701 			else if (punctured_bits == IEEE80211_PUNC_MINUS40MHZ)
702 				return PUNCTURED_40MHZ;
703 			else
704 				return NO_PUNCTURE;
705 		} else if (bw == CMN_BW_320MHZ) {
706 			if (punctured_bits == IEEE80211_PUNC_MINUS40MHZ)
707 				return PUNCTURED_40MHZ;
708 			else if (punctured_bits == IEEE80211_PUNC_MINUS80MHZ)
709 				return PUNCTURED_80MHZ;
710 			else if (punctured_bits == IEEE80211_PUNC_MINUS120MHZ)
711 				return PUNCTURED_120MHZ;
712 			else
713 				return NO_PUNCTURE;
714 		}
715 	}
716 	return NO_PUNCTURE;
717 }
718 #endif
719 
720 #if defined(QCA_ENHANCED_STATS_SUPPORT) && !defined(WLAN_FEATURE_11BE)
721 void
722 dp_mon_tx_stats_update_2_0(struct dp_mon_peer *mon_peer,
723 			   struct cdp_tx_completion_ppdu_user *ppdu)
724 {
725 	ppdu->punc_mode = NO_PUNCTURE;
726 }
727 
728 enum cdp_punctured_modes
729 dp_mon_get_puncture_type(uint16_t puncture_pattern, uint8_t bw)
730 {
731 	return NO_PUNCTURE;
732 }
733 #endif /* QCA_ENHANCED_STATS_SUPPORT && WLAN_FEATURE_11BE */
734 
735 #ifdef QCA_SUPPORT_BPR
736 static QDF_STATUS
737 dp_set_bpr_enable_2_0(struct dp_pdev *pdev, int val)
738 {
739 	return QDF_STATUS_SUCCESS;
740 }
741 #endif /* QCA_SUPPORT_BPR */
742 
743 #ifdef QCA_ENHANCED_STATS_SUPPORT
744 #ifdef WDI_EVENT_ENABLE
745 /**
746  * dp_ppdu_desc_notify_2_0 - Notify upper layer for PPDU indication via WDI
747  *
748  * @pdev: Datapath pdev handle
749  * @nbuf: Buffer to be shipped
750  *
751  * Return: void
752  */
753 static void dp_ppdu_desc_notify_2_0(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
754 {
755 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
756 
757 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(nbuf);
758 
759 	if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
760 	    ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
761 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
762 				     pdev->soc,
763 				     nbuf, HTT_INVALID_PEER,
764 				     WDI_NO_VAL,
765 				     pdev->pdev_id);
766 	} else {
767 		qdf_nbuf_free(nbuf);
768 	}
769 }
770 #endif
771 
772 /**
773  * dp_ppdu_stats_feat_enable_check_2_0 - Check if feature(s) is enabled to
774  *				consume ppdu stats from FW
775  *
776  * @pdev: Datapath pdev handle
777  *
778  * Return: true if enabled, else return false
779  */
780 static bool dp_ppdu_stats_feat_enable_check_2_0(struct dp_pdev *pdev)
781 {
782 	return pdev->monitor_pdev->enhanced_stats_en;
783 }
784 #endif
785 
786 static
787 QDF_STATUS dp_mon_soc_htt_srng_setup_2_0(struct dp_soc *soc)
788 {
789 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
790 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
791 	QDF_STATUS status;
792 
793 	hal_set_low_threshold(soc->rxdma_mon_buf_ring[0].hal_srng, 0);
794 	status = htt_srng_setup(soc->htt_handle, 0,
795 				soc->rxdma_mon_buf_ring[0].hal_srng,
796 				RXDMA_MONITOR_BUF);
797 
798 	if (status != QDF_STATUS_SUCCESS) {
799 		dp_err("Failed to send htt srng setup message for Rx mon buf ring");
800 		return status;
801 	}
802 
803 	hal_set_low_threshold(mon_soc_be->tx_mon_buf_ring.hal_srng, 0);
804 	status = htt_srng_setup(soc->htt_handle, 0,
805 				mon_soc_be->tx_mon_buf_ring.hal_srng,
806 				TX_MONITOR_BUF);
807 	if (status != QDF_STATUS_SUCCESS) {
808 		dp_err("Failed to send htt srng setup message for Tx mon buf ring");
809 		return status;
810 	}
811 
812 	return status;
813 }
814 
815 static
816 QDF_STATUS dp_mon_pdev_htt_srng_setup_2_0(struct dp_soc *soc,
817 					  struct dp_pdev *pdev,
818 					  int mac_id,
819 					  int mac_for_pdev)
820 {
821 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
822 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
823 	QDF_STATUS status;
824 
825 	if (!soc->rxdma_mon_dst_ring[mac_id].hal_srng)
826 		return QDF_STATUS_SUCCESS;
827 
828 	status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
829 				soc->rxdma_mon_dst_ring[mac_id].hal_srng,
830 				RXDMA_MONITOR_DST);
831 
832 	if (status != QDF_STATUS_SUCCESS) {
833 		dp_mon_err("Failed to send htt srng setup message for Rxdma dst ring");
834 		return status;
835 	}
836 
837 	if (!mon_soc_be->tx_mon_dst_ring[mac_id].hal_srng)
838 		return QDF_STATUS_SUCCESS;
839 
840 	status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
841 				mon_soc_be->tx_mon_dst_ring[mac_id].hal_srng,
842 				TX_MONITOR_DST);
843 
844 	if (status != QDF_STATUS_SUCCESS) {
845 		dp_mon_err("Failed to send htt srng message for Tx mon dst ring");
846 		return status;
847 	}
848 
849 	return status;
850 }
851 
852 QDF_STATUS dp_tx_mon_refill_buf_ring_2_0(struct dp_intr *int_ctx)
853 {
854 	struct dp_soc *soc  = int_ctx->soc;
855 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
856 	union dp_mon_desc_list_elem_t *desc_list = NULL;
857 	union dp_mon_desc_list_elem_t *tail = NULL;
858 	struct dp_srng *tx_mon_buf_ring;
859 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
860 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
861 	uint32_t num_entries_avail;
862 	int sync_hw_ptr = 1;
863 	void *hal_srng;
864 
865 	tx_mon_buf_ring = &mon_soc_be->tx_mon_buf_ring;
866 	hal_srng = tx_mon_buf_ring->hal_srng;
867 
868 	intr_stats->num_host2txmon_ring__masks++;
869 	mon_soc_be->tx_low_thresh_intrs++;
870 	hal_srng_access_start(soc->hal_soc, hal_srng);
871 	num_entries_avail = hal_srng_src_num_avail(soc->hal_soc,
872 						   hal_srng,
873 						   sync_hw_ptr);
874 	hal_srng_access_end(soc->hal_soc, hal_srng);
875 
876 	if (num_entries_avail)
877 		dp_mon_buffers_replenish(soc, tx_mon_buf_ring,
878 					 &mon_soc_be->tx_desc_mon,
879 					 num_entries_avail, &desc_list, &tail,
880 					 NULL);
881 
882 	return QDF_STATUS_SUCCESS;
883 }
884 
885 QDF_STATUS dp_rx_mon_refill_buf_ring_2_0(struct dp_intr *int_ctx)
886 {
887 	struct dp_soc *soc  = int_ctx->soc;
888 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
889 	union dp_mon_desc_list_elem_t *desc_list = NULL;
890 	union dp_mon_desc_list_elem_t *tail = NULL;
891 	struct dp_srng *rx_mon_buf_ring;
892 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
893 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
894 	uint32_t num_entries_avail;
895 	int sync_hw_ptr = 1;
896 	void *hal_srng;
897 
898 	rx_mon_buf_ring = &soc->rxdma_mon_buf_ring[0];
899 	hal_srng = rx_mon_buf_ring->hal_srng;
900 
901 	intr_stats->num_host2rxdma_ring_masks++;
902 	mon_soc_be->rx_low_thresh_intrs++;
903 	hal_srng_access_start(soc->hal_soc, hal_srng);
904 	num_entries_avail = hal_srng_src_num_avail(soc->hal_soc,
905 						   hal_srng,
906 						   sync_hw_ptr);
907 	hal_srng_access_end(soc->hal_soc, hal_srng);
908 
909 	if (num_entries_avail)
910 		dp_mon_buffers_replenish(soc, rx_mon_buf_ring,
911 					 &mon_soc_be->rx_desc_mon,
912 					 num_entries_avail, &desc_list, &tail,
913 					 NULL);
914 
915 	return QDF_STATUS_SUCCESS;
916 }
917 
918 static
919 QDF_STATUS dp_mon_soc_detach_2_0(struct dp_soc *soc)
920 {
921 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
922 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
923 
924 	if (!mon_soc_be) {
925 		dp_mon_err("DP MON SOC NULL");
926 		return QDF_STATUS_E_FAILURE;
927 	}
928 
929 	dp_rx_mon_buf_desc_pool_free(soc);
930 	dp_srng_free(soc, &soc->rxdma_mon_buf_ring[0]);
931 	dp_tx_mon_buf_desc_pool_free(soc);
932 	dp_srng_free(soc, &mon_soc_be->tx_mon_buf_ring);
933 
934 	return QDF_STATUS_SUCCESS;
935 }
936 
937 static void dp_mon_soc_deinit_2_0(struct dp_soc *soc)
938 {
939 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
940 	struct dp_mon_soc_be *mon_soc_be =
941 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
942 
943 	if (!mon_soc_be->is_dp_mon_soc_initialized)
944 		return;
945 
946 	dp_rx_mon_buffers_free(soc);
947 	dp_tx_mon_buffers_free(soc);
948 
949 	dp_rx_mon_buf_desc_pool_deinit(soc);
950 	dp_tx_mon_buf_desc_pool_deinit(soc);
951 
952 	dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[0], RXDMA_MONITOR_BUF, 0);
953 	dp_srng_deinit(soc, &mon_soc_be->tx_mon_buf_ring, TX_MONITOR_BUF, 0);
954 
955 	mon_soc_be->is_dp_mon_soc_initialized = false;
956 }
957 
958 static
959 QDF_STATUS dp_rx_mon_soc_init_2_0(struct dp_soc *soc)
960 {
961 	if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[0],
962 			 RXDMA_MONITOR_BUF, 0, 0)) {
963 		dp_mon_err("%pK: " RNG_ERR "rx_mon_buf_ring", soc);
964 		goto fail;
965 	}
966 
967 	if (dp_rx_mon_buf_desc_pool_init(soc)) {
968 		dp_mon_err("%pK: " RNG_ERR "rx mon desc pool init", soc);
969 		goto fail;
970 	}
971 
972 	/* monitor buffers for src */
973 	if (dp_rx_mon_buffers_alloc(soc, DP_MON_RING_FILL_LEVEL_DEFAULT)) {
974 		dp_mon_err("%pK: Rx mon buffers allocation failed", soc);
975 		goto fail;
976 	}
977 
978 	return QDF_STATUS_SUCCESS;
979 fail:
980 	return QDF_STATUS_E_FAILURE;
981 }
982 
983 static
984 QDF_STATUS dp_tx_mon_soc_init_2_0(struct dp_soc *soc)
985 {
986 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
987 	struct dp_mon_soc_be *mon_soc_be =
988 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
989 
990 	if (dp_srng_init(soc, &mon_soc_be->tx_mon_buf_ring,
991 			 TX_MONITOR_BUF, 0, 0)) {
992 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
993 		goto fail;
994 	}
995 
996 	if (dp_tx_mon_buf_desc_pool_init(soc)) {
997 		dp_mon_err("%pK: " RNG_ERR "tx mon desc pool init", soc);
998 		goto fail;
999 	}
1000 
1001 	/* monitor buffers for src */
1002 	if (dp_tx_mon_buffers_alloc(soc, DP_MON_RING_FILL_LEVEL_DEFAULT)) {
1003 		dp_mon_err("%pK: Tx mon buffers allocation failed", soc);
1004 		goto fail;
1005 	}
1006 
1007 	return QDF_STATUS_SUCCESS;
1008 fail:
1009 	return QDF_STATUS_E_FAILURE;
1010 }
1011 
1012 static
1013 QDF_STATUS dp_mon_soc_init_2_0(struct dp_soc *soc)
1014 {
1015 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1016 	struct dp_mon_soc_be *mon_soc_be =
1017 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1018 
1019 	if (soc->rxdma_mon_buf_ring[0].hal_srng) {
1020 		dp_mon_info("%pK: mon soc init is done", soc);
1021 		return QDF_STATUS_SUCCESS;
1022 	}
1023 
1024 	if (dp_rx_mon_soc_init_2_0(soc)) {
1025 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
1026 		goto fail;
1027 	}
1028 
1029 	if (dp_tx_mon_soc_init_2_0(soc)) {
1030 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
1031 		goto fail;
1032 	}
1033 
1034 	mon_soc_be->tx_mon_ring_fill_level = DP_MON_RING_FILL_LEVEL_DEFAULT;
1035 	mon_soc_be->rx_mon_ring_fill_level = DP_MON_RING_FILL_LEVEL_DEFAULT;
1036 
1037 	mon_soc_be->is_dp_mon_soc_initialized = true;
1038 	return QDF_STATUS_SUCCESS;
1039 fail:
1040 	dp_mon_soc_deinit_2_0(soc);
1041 	return QDF_STATUS_E_FAILURE;
1042 }
1043 
1044 static
1045 QDF_STATUS dp_mon_soc_attach_2_0(struct dp_soc *soc)
1046 {
1047 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1048 	struct dp_mon_soc_be *mon_soc_be =
1049 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1050 	int entries;
1051 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1052 
1053 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1054 	if (!mon_soc_be) {
1055 		dp_mon_err("DP MON SOC is NULL");
1056 		return QDF_STATUS_E_FAILURE;
1057 	}
1058 
1059 	entries = wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc_cfg_ctx);
1060 	qdf_print("%s:%d rx mon buf entries: %d", __func__, __LINE__, entries);
1061 	if (dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[0],
1062 			  RXDMA_MONITOR_BUF, entries, 0)) {
1063 		dp_mon_err("%pK: " RNG_ERR "rx_mon_buf_ring", soc);
1064 		goto fail;
1065 	}
1066 
1067 	entries = wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx);
1068 	qdf_print("%s:%d tx mon buf entries: %d", __func__, __LINE__, entries);
1069 	if (dp_srng_alloc(soc, &mon_soc_be->tx_mon_buf_ring,
1070 			  TX_MONITOR_BUF, entries, 0)) {
1071 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
1072 		goto fail;
1073 	}
1074 
1075 	/* allocate sw desc pool */
1076 	if (dp_rx_mon_buf_desc_pool_alloc(soc)) {
1077 		dp_mon_err("%pK: Rx mon desc pool allocation failed", soc);
1078 		goto fail;
1079 	}
1080 
1081 	if (dp_tx_mon_buf_desc_pool_alloc(soc)) {
1082 		dp_mon_err("%pK: Tx mon desc pool allocation failed", soc);
1083 		goto fail;
1084 	}
1085 
1086 	return QDF_STATUS_SUCCESS;
1087 fail:
1088 	dp_mon_soc_detach_2_0(soc);
1089 	return QDF_STATUS_E_NOMEM;
1090 }
1091 
1092 static
1093 void dp_pdev_mon_rings_deinit_2_0(struct dp_pdev *pdev)
1094 {
1095 	int mac_id = 0;
1096 	struct dp_soc *soc = pdev->soc;
1097 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1098 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1099 
1100 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
1101 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
1102 							 pdev->pdev_id);
1103 
1104 		dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id],
1105 			       RXDMA_MONITOR_DST, pdev->pdev_id);
1106 		dp_srng_deinit(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id],
1107 			       TX_MONITOR_DST, pdev->pdev_id);
1108 	}
1109 }
1110 
1111 static
1112 QDF_STATUS dp_pdev_mon_rings_init_2_0(struct dp_pdev *pdev)
1113 {
1114 	struct dp_soc *soc = pdev->soc;
1115 	int mac_id = 0;
1116 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1117 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1118 
1119 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
1120 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
1121 							 pdev->pdev_id);
1122 
1123 		if (dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id],
1124 				 RXDMA_MONITOR_DST, pdev->pdev_id, lmac_id)) {
1125 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc);
1126 			goto fail;
1127 		}
1128 
1129 		if (dp_srng_init(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id],
1130 				 TX_MONITOR_DST, pdev->pdev_id, lmac_id)) {
1131 			dp_mon_err("%pK: " RNG_ERR "tx_mon_dst_ring", soc);
1132 			goto fail;
1133 		}
1134 	}
1135 	return QDF_STATUS_SUCCESS;
1136 
1137 fail:
1138 	dp_pdev_mon_rings_deinit_2_0(pdev);
1139 	return QDF_STATUS_E_NOMEM;
1140 }
1141 
1142 static
1143 void dp_pdev_mon_rings_free_2_0(struct dp_pdev *pdev)
1144 {
1145 	int mac_id = 0;
1146 	struct dp_soc *soc = pdev->soc;
1147 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1148 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1149 
1150 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
1151 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
1152 							 pdev->pdev_id);
1153 
1154 		dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]);
1155 		dp_srng_free(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id]);
1156 	}
1157 }
1158 
1159 static
1160 QDF_STATUS dp_pdev_mon_rings_alloc_2_0(struct dp_pdev *pdev)
1161 {
1162 	struct dp_soc *soc = pdev->soc;
1163 	int mac_id = 0;
1164 	int entries;
1165 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
1166 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1167 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1168 
1169 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
1170 
1171 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
1172 		int lmac_id =
1173 		dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
1174 
1175 		entries = wlan_cfg_get_dma_rx_mon_dest_ring_size(pdev_cfg_ctx);
1176 		if (dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id],
1177 				  RXDMA_MONITOR_DST, entries, 0)) {
1178 			dp_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", pdev);
1179 			goto fail;
1180 		}
1181 
1182 		entries = wlan_cfg_get_dma_tx_mon_dest_ring_size(pdev_cfg_ctx);
1183 		if (dp_srng_alloc(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id],
1184 				  TX_MONITOR_DST, entries, 0)) {
1185 			dp_err("%pK: " RNG_ERR "tx_mon_dst_ring", pdev);
1186 			goto fail;
1187 		}
1188 	}
1189 	return QDF_STATUS_SUCCESS;
1190 
1191 fail:
1192 	dp_pdev_mon_rings_free_2_0(pdev);
1193 	return QDF_STATUS_E_NOMEM;
1194 }
1195 
1196 static
1197 void dp_mon_pdev_free_2_0(struct dp_pdev *pdev)
1198 {
1199 }
1200 
1201 static
1202 QDF_STATUS dp_mon_pdev_alloc_2_0(struct dp_pdev *pdev)
1203 {
1204 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1205 	struct dp_mon_pdev_be *mon_pdev_be =
1206 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1207 
1208 	if (!mon_pdev_be) {
1209 		dp_mon_err("DP MON PDEV is NULL");
1210 		return QDF_STATUS_E_FAILURE;
1211 	}
1212 
1213 	return QDF_STATUS_SUCCESS;
1214 }
1215 
1216 #else
1217 static inline
1218 QDF_STATUS dp_mon_htt_srng_setup_2_0(struct dp_soc *soc,
1219 				     struct dp_pdev *pdev,
1220 				     int mac_id,
1221 				     int mac_for_pdev)
1222 {
1223 	return QDF_STATUS_SUCCESS;
1224 }
1225 
1226 static uint32_t
1227 dp_tx_mon_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
1228 		      uint32_t mac_id, uint32_t quota)
1229 {
1230 	return 0;
1231 }
1232 
1233 static inline
1234 QDF_STATUS dp_mon_soc_attach_2_0(struct dp_soc *soc)
1235 {
1236 	return status;
1237 }
1238 
1239 static inline
1240 QDF_STATUS dp_mon_soc_detach_2_0(struct dp_soc *soc)
1241 {
1242 	return status;
1243 }
1244 
1245 static inline
1246 void dp_pdev_mon_rings_deinit_2_0(struct dp_pdev *pdev)
1247 {
1248 }
1249 
1250 static inline
1251 QDF_STATUS dp_pdev_mon_rings_init_2_0(struct dp_soc *soc, struct dp_pdev *pdev)
1252 {
1253 	return QDF_STATUS_SUCCESS;
1254 }
1255 
1256 static inline
1257 void dp_pdev_mon_rings_free_2_0(struct dp_pdev *pdev)
1258 {
1259 }
1260 
1261 static inline
1262 QDF_STATUS dp_pdev_mon_rings_alloc_2_0(struct dp_soc *soc, struct dp_pdev *pdev)
1263 {
1264 	return QDF_STATUS_SUCCESS;
1265 }
1266 
1267 static inline
1268 void dp_mon_pdev_free_2_0(struct dp_pdev *pdev)
1269 {
1270 }
1271 
1272 static inline
1273 QDF_STATUS dp_mon_pdev_alloc_2_0(struct dp_pdev *pdev)
1274 {
1275 	return QDF_STATUS_SUCCESS;
1276 }
1277 
1278 static inline
1279 void dp_vdev_set_monitor_mode_buf_rings_2_0(struct dp_pdev *pdev)
1280 {
1281 }
1282 
1283 static inline
1284 QDF_STATUS dp_vdev_set_monitor_mode_rings_2_0(struct dp_pdev *pdev,
1285 					      uint8_t delayed_replenish)
1286 {
1287 	return QDF_STATUS_SUCCESS;
1288 }
1289 #endif
1290 
1291 #if defined(WDI_EVENT_ENABLE) &&\
1292 	(defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG))
1293 static inline
1294 void dp_mon_ppdu_stats_handler_register(struct dp_mon_soc *mon_soc)
1295 {
1296 	mon_soc->mon_ops->mon_ppdu_stats_ind_handler =
1297 					dp_ppdu_stats_ind_handler;
1298 }
1299 #else
1300 static inline
1301 void dp_mon_ppdu_stats_handler_register(struct dp_mon_soc *mon_soc)
1302 {
1303 }
1304 #endif
1305 
1306 static void dp_mon_register_intr_ops_2_0(struct dp_soc *soc)
1307 {
1308 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1309 
1310 	mon_soc->mon_ops->rx_mon_refill_buf_ring =
1311 			NULL,
1312 	mon_soc->mon_ops->tx_mon_refill_buf_ring =
1313 			NULL,
1314 	mon_soc->mon_rx_process = dp_rx_mon_process_2_0;
1315 	dp_mon_ppdu_stats_handler_register(mon_soc);
1316 }
1317 
1318 /**
1319  * dp_mon_register_feature_ops_2_0() - register feature ops
1320  *
1321  * @soc: dp soc context
1322  *
1323  * @return: void
1324  */
1325 static void
1326 dp_mon_register_feature_ops_2_0(struct dp_soc *soc)
1327 {
1328 	struct dp_mon_ops *mon_ops = dp_mon_ops_get(soc);
1329 
1330 	if (!mon_ops) {
1331 		dp_err("mon_ops is NULL, feature ops registration failed");
1332 		return;
1333 	}
1334 
1335 	mon_ops->mon_config_debug_sniffer = dp_config_debug_sniffer;
1336 	mon_ops->mon_peer_tx_init = NULL;
1337 	mon_ops->mon_peer_tx_cleanup = NULL;
1338 	mon_ops->mon_htt_ppdu_stats_attach = dp_htt_ppdu_stats_attach;
1339 	mon_ops->mon_htt_ppdu_stats_detach = dp_htt_ppdu_stats_detach;
1340 	mon_ops->mon_print_pdev_rx_mon_stats = dp_print_pdev_rx_mon_stats;
1341 	mon_ops->mon_set_bsscolor = dp_mon_set_bsscolor;
1342 	mon_ops->mon_pdev_get_filter_ucast_data = NULL;
1343 	mon_ops->mon_pdev_get_filter_mcast_data = NULL;
1344 	mon_ops->mon_pdev_get_filter_non_data = NULL;
1345 	mon_ops->mon_neighbour_peer_add_ast = NULL;
1346 #ifndef DISABLE_MON_CONFIG
1347 	mon_ops->mon_tx_process = dp_tx_mon_process_2_0;
1348 #endif
1349 #ifdef WLAN_TX_PKT_CAPTURE_ENH_BE
1350 	mon_ops->mon_peer_tid_peer_id_update = NULL;
1351 	mon_ops->mon_tx_capture_debugfs_init = NULL;
1352 	mon_ops->mon_tx_add_to_comp_queue = NULL;
1353 	mon_ops->mon_print_pdev_tx_capture_stats =
1354 					dp_print_pdev_tx_monitor_stats_2_0;
1355 	mon_ops->mon_config_enh_tx_capture = dp_config_enh_tx_monitor_2_0;
1356 	mon_ops->mon_tx_peer_filter = dp_peer_set_tx_capture_enabled_2_0;
1357 #endif
1358 #if (defined(WIFI_MONITOR_SUPPORT) && !defined(WLAN_TX_PKT_CAPTURE_ENH_BE))
1359 	mon_ops->mon_peer_tid_peer_id_update = NULL;
1360 	mon_ops->mon_tx_capture_debugfs_init = NULL;
1361 	mon_ops->mon_tx_add_to_comp_queue = NULL;
1362 	mon_ops->mon_print_pdev_tx_capture_stats = NULL;
1363 	mon_ops->mon_config_enh_tx_capture = dp_config_enh_tx_core_monitor_2_0;
1364 	mon_ops->mon_tx_peer_filter = NULL;
1365 #endif
1366 #ifdef WLAN_RX_PKT_CAPTURE_ENH
1367 	mon_ops->mon_config_enh_rx_capture = NULL;
1368 #endif
1369 #ifdef QCA_SUPPORT_BPR
1370 	mon_ops->mon_set_bpr_enable = dp_set_bpr_enable_2_0;
1371 #endif
1372 #ifdef ATH_SUPPORT_NAC
1373 	mon_ops->mon_set_filter_neigh_peers = NULL;
1374 #endif
1375 #ifdef WLAN_ATF_ENABLE
1376 	mon_ops->mon_set_atf_stats_enable = dp_set_atf_stats_enable;
1377 #endif
1378 #ifdef FEATURE_NAC_RSSI
1379 	mon_ops->mon_filter_neighbour_peer = NULL;
1380 #endif
1381 #ifdef QCA_MCOPY_SUPPORT
1382 	mon_ops->mon_filter_setup_mcopy_mode = NULL;
1383 	mon_ops->mon_filter_reset_mcopy_mode = NULL;
1384 	mon_ops->mon_mcopy_check_deliver = NULL;
1385 #endif
1386 #ifdef QCA_ENHANCED_STATS_SUPPORT
1387 	mon_ops->mon_filter_setup_enhanced_stats =
1388 				dp_mon_filter_setup_enhanced_stats_2_0;
1389 	mon_ops->mon_filter_reset_enhanced_stats =
1390 				dp_mon_filter_reset_enhanced_stats_2_0;
1391 	mon_ops->mon_tx_enable_enhanced_stats =
1392 				dp_mon_tx_enable_enhanced_stats_2_0;
1393 	mon_ops->mon_tx_disable_enhanced_stats =
1394 				dp_mon_tx_disable_enhanced_stats_2_0;
1395 	mon_ops->mon_ppdu_stats_feat_enable_check =
1396 				dp_ppdu_stats_feat_enable_check_2_0;
1397 	mon_ops->mon_tx_stats_update = dp_mon_tx_stats_update_2_0;
1398 	mon_ops->mon_ppdu_desc_deliver = dp_ppdu_desc_deliver;
1399 #ifdef WDI_EVENT_ENABLE
1400 	mon_ops->mon_ppdu_desc_notify = dp_ppdu_desc_notify_2_0;
1401 #endif
1402 #endif
1403 #ifdef WLAN_RX_PKT_CAPTURE_ENH
1404 	mon_ops->mon_filter_setup_rx_enh_capture = NULL;
1405 #endif
1406 #ifdef WDI_EVENT_ENABLE
1407 	mon_ops->mon_set_pktlog_wifi3 = dp_set_pktlog_wifi3;
1408 	mon_ops->mon_filter_setup_rx_pkt_log_full =
1409 				dp_mon_filter_setup_rx_pkt_log_full_2_0;
1410 	mon_ops->mon_filter_reset_rx_pkt_log_full =
1411 				dp_mon_filter_reset_rx_pkt_log_full_2_0;
1412 	mon_ops->mon_filter_setup_rx_pkt_log_lite =
1413 				dp_mon_filter_setup_rx_pkt_log_lite_2_0;
1414 	mon_ops->mon_filter_reset_rx_pkt_log_lite =
1415 				dp_mon_filter_reset_rx_pkt_log_lite_2_0;
1416 	mon_ops->mon_filter_setup_rx_pkt_log_cbf =
1417 				dp_mon_filter_setup_rx_pkt_log_cbf_2_0;
1418 	mon_ops->mon_filter_reset_rx_pkt_log_cbf =
1419 				dp_mon_filter_reset_rx_pktlog_cbf_2_0;
1420 	mon_ops->mon_filter_setup_pktlog_hybrid =
1421 				dp_mon_filter_setup_pktlog_hybrid_2_0;
1422 	mon_ops->mon_filter_reset_pktlog_hybrid =
1423 				dp_mon_filter_reset_pktlog_hybrid_2_0;
1424 #endif
1425 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
1426 	mon_ops->mon_pktlogmod_exit = dp_pktlogmod_exit;
1427 #endif
1428 	mon_ops->rx_hdr_length_set = dp_rx_mon_hdr_length_set;
1429 	mon_ops->rx_packet_length_set = dp_rx_mon_packet_length_set;
1430 	mon_ops->rx_mon_enable = dp_rx_mon_enable_set;
1431 	mon_ops->rx_wmask_subscribe = dp_rx_mon_word_mask_subscribe;
1432 	mon_ops->rx_enable_mpdu_logging = dp_rx_mon_enable_mpdu_logging;
1433 	mon_ops->mon_neighbour_peers_detach = NULL;
1434 	mon_ops->mon_vdev_set_monitor_mode_buf_rings =
1435 				dp_vdev_set_monitor_mode_buf_rings_2_0;
1436 	mon_ops->mon_vdev_set_monitor_mode_rings =
1437 				dp_vdev_set_monitor_mode_rings_2_0;
1438 #ifdef QCA_ENHANCED_STATS_SUPPORT
1439 	mon_ops->mon_rx_stats_update = dp_rx_mon_stats_update_2_0;
1440 	mon_ops->mon_rx_populate_ppdu_usr_info =
1441 			dp_rx_mon_populate_ppdu_usr_info_2_0;
1442 	mon_ops->mon_rx_populate_ppdu_info = dp_rx_mon_populate_ppdu_info_2_0;
1443 #endif
1444 #ifdef QCA_UNDECODED_METADATA_SUPPORT
1445 	mon_ops->mon_config_undecoded_metadata_capture =
1446 		dp_mon_config_undecoded_metadata_capture;
1447 	mon_ops->mon_filter_setup_undecoded_metadata_capture =
1448 		dp_mon_filter_setup_undecoded_metadata_capture_2_0;
1449 	mon_ops->mon_filter_reset_undecoded_metadata_capture =
1450 		dp_mon_filter_reset_undecoded_metadata_capture_2_0;
1451 #endif
1452 	mon_ops->rx_enable_fpmo = dp_rx_mon_enable_fpmo;
1453 	mon_ops->mon_rx_print_advanced_stats =
1454 		dp_mon_rx_print_advanced_stats_2_0;
1455 }
1456 
1457 struct dp_mon_ops monitor_ops_2_0 = {
1458 	.mon_soc_cfg_init = dp_mon_soc_cfg_init,
1459 	.mon_soc_attach = dp_mon_soc_attach_2_0,
1460 	.mon_soc_detach = dp_mon_soc_detach_2_0,
1461 	.mon_soc_init = dp_mon_soc_init_2_0,
1462 	.mon_soc_deinit = dp_mon_soc_deinit_2_0,
1463 	.mon_pdev_alloc = dp_mon_pdev_alloc_2_0,
1464 	.mon_pdev_free = dp_mon_pdev_free_2_0,
1465 	.mon_pdev_attach = dp_mon_pdev_attach,
1466 	.mon_pdev_detach = dp_mon_pdev_detach,
1467 	.mon_pdev_init = dp_mon_pdev_init,
1468 	.mon_pdev_deinit = dp_mon_pdev_deinit,
1469 	.mon_vdev_attach = dp_mon_vdev_attach,
1470 	.mon_vdev_detach = dp_mon_vdev_detach,
1471 	.mon_peer_attach = dp_mon_peer_attach,
1472 	.mon_peer_detach = dp_mon_peer_detach,
1473 	.mon_peer_get_peerstats_ctx = dp_mon_peer_get_peerstats_ctx,
1474 	.mon_peer_reset_stats = dp_mon_peer_reset_stats,
1475 	.mon_peer_get_stats = dp_mon_peer_get_stats,
1476 	.mon_invalid_peer_update_pdev_stats =
1477 				dp_mon_invalid_peer_update_pdev_stats,
1478 	.mon_peer_get_stats_param = dp_mon_peer_get_stats_param,
1479 	.mon_flush_rings = NULL,
1480 #if !defined(DISABLE_MON_CONFIG)
1481 	.mon_pdev_htt_srng_setup = dp_mon_pdev_htt_srng_setup_2_0,
1482 	.mon_soc_htt_srng_setup = dp_mon_soc_htt_srng_setup_2_0,
1483 #endif
1484 #if defined(DP_CON_MON)
1485 	.mon_service_rings = NULL,
1486 #endif
1487 #ifndef DISABLE_MON_CONFIG
1488 	.mon_rx_process = NULL,
1489 #endif
1490 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
1491 	.mon_drop_packets_for_mac = NULL,
1492 #endif
1493 	.mon_vdev_timer_init = NULL,
1494 	.mon_vdev_timer_start = NULL,
1495 	.mon_vdev_timer_stop = NULL,
1496 	.mon_vdev_timer_deinit = NULL,
1497 	.mon_reap_timer_init = NULL,
1498 	.mon_reap_timer_start = NULL,
1499 	.mon_reap_timer_stop = NULL,
1500 	.mon_reap_timer_deinit = NULL,
1501 	.mon_filter_setup_rx_mon_mode = dp_mon_filter_setup_rx_mon_mode_2_0,
1502 	.mon_filter_reset_rx_mon_mode = dp_mon_filter_reset_rx_mon_mode_2_0,
1503 	.mon_filter_setup_tx_mon_mode = dp_mon_filter_setup_tx_mon_mode_2_0,
1504 	.mon_filter_reset_tx_mon_mode = dp_mon_filter_reset_tx_mon_mode_2_0,
1505 	.tx_mon_filter_update = dp_tx_mon_filter_update_2_0,
1506 	.rx_mon_filter_update = dp_rx_mon_filter_update_2_0,
1507 	.tx_mon_filter_alloc = dp_mon_filter_alloc_2_0,
1508 	.tx_mon_filter_dealloc = dp_mon_filter_dealloc_2_0,
1509 	.mon_rings_alloc = dp_pdev_mon_rings_alloc_2_0,
1510 	.mon_rings_free = dp_pdev_mon_rings_free_2_0,
1511 	.mon_rings_init = dp_pdev_mon_rings_init_2_0,
1512 	.mon_rings_deinit = dp_pdev_mon_rings_deinit_2_0,
1513 	.rx_mon_desc_pool_init = NULL,
1514 	.rx_mon_desc_pool_deinit = NULL,
1515 	.rx_mon_desc_pool_alloc = NULL,
1516 	.rx_mon_desc_pool_free = NULL,
1517 	.rx_mon_buffers_alloc = NULL,
1518 	.rx_mon_buffers_free = NULL,
1519 	.tx_mon_desc_pool_init = NULL,
1520 	.tx_mon_desc_pool_deinit = NULL,
1521 	.tx_mon_desc_pool_alloc = NULL,
1522 	.tx_mon_desc_pool_free = NULL,
1523 #ifndef DISABLE_MON_CONFIG
1524 	.mon_register_intr_ops = dp_mon_register_intr_ops_2_0,
1525 #endif
1526 	.mon_register_feature_ops = dp_mon_register_feature_ops_2_0,
1527 #ifdef WLAN_TX_PKT_CAPTURE_ENH_BE
1528 	.mon_tx_ppdu_stats_attach = dp_tx_ppdu_stats_attach_2_0,
1529 	.mon_tx_ppdu_stats_detach = dp_tx_ppdu_stats_detach_2_0,
1530 	.mon_peer_tx_capture_filter_check = NULL,
1531 #endif
1532 #if (defined(WIFI_MONITOR_SUPPORT) && !defined(WLAN_TX_PKT_CAPTURE_ENH_BE))
1533 	.mon_tx_ppdu_stats_attach = NULL,
1534 	.mon_tx_ppdu_stats_detach = NULL,
1535 	.mon_peer_tx_capture_filter_check = NULL,
1536 #endif
1537 	.mon_pdev_ext_init = dp_mon_pdev_ext_init_2_0,
1538 	.mon_pdev_ext_deinit = dp_mon_pdev_ext_deinit_2_0,
1539 	.mon_lite_mon_alloc = dp_lite_mon_alloc,
1540 	.mon_lite_mon_dealloc = dp_lite_mon_dealloc,
1541 	.mon_lite_mon_vdev_delete = dp_lite_mon_vdev_delete,
1542 	.mon_lite_mon_disable_rx = dp_lite_mon_disable_rx,
1543 	.mon_rx_ppdu_info_cache_create = dp_rx_mon_ppdu_info_cache_create,
1544 	.mon_rx_ppdu_info_cache_destroy = dp_rx_mon_ppdu_info_cache_destroy,
1545 };
1546 
1547 struct cdp_mon_ops dp_ops_mon_2_0 = {
1548 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
1549 	/* Added support for HK advance filter */
1550 	.txrx_set_advance_monitor_filter = NULL,
1551 	.txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
1552 	.config_full_mon_mode = NULL,
1553 	.soc_config_full_mon_mode = NULL,
1554 	.get_mon_pdev_rx_stats = dp_pdev_get_rx_mon_stats,
1555 	.txrx_enable_mon_reap_timer = NULL,
1556 #ifdef QCA_SUPPORT_LITE_MONITOR
1557 	.txrx_set_lite_mon_config = dp_lite_mon_set_config,
1558 	.txrx_get_lite_mon_config = dp_lite_mon_get_config,
1559 	.txrx_set_lite_mon_peer_config = dp_lite_mon_set_peer_config,
1560 	.txrx_get_lite_mon_peer_config = dp_lite_mon_get_peer_config,
1561 	.txrx_is_lite_mon_enabled = dp_lite_mon_is_enabled,
1562 #endif
1563 	.txrx_set_mon_pdev_params_rssi_dbm_conv =
1564 				dp_mon_pdev_params_rssi_dbm_conv,
1565 };
1566 
1567 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT
1568 void dp_mon_ops_register_2_0(struct dp_mon_soc *mon_soc)
1569 {
1570 	struct dp_mon_ops *mon_ops = NULL;
1571 
1572 	if (mon_soc->mon_ops) {
1573 		dp_mon_err("monitor ops is allocated");
1574 		return;
1575 	}
1576 
1577 	mon_ops = qdf_mem_malloc(sizeof(struct dp_mon_ops));
1578 	if (!mon_ops) {
1579 		dp_mon_err("Failed to allocate memory for mon ops");
1580 		return;
1581 	}
1582 
1583 	qdf_mem_copy(mon_ops, &monitor_ops_2_0, sizeof(struct dp_mon_ops));
1584 	mon_soc->mon_ops = mon_ops;
1585 }
1586 
1587 void dp_mon_cdp_ops_register_2_0(struct cdp_ops *ops)
1588 {
1589 	struct cdp_mon_ops *mon_ops = NULL;
1590 
1591 	if (ops->mon_ops) {
1592 		dp_mon_err("cdp monitor ops is allocated");
1593 		return;
1594 	}
1595 
1596 	mon_ops = qdf_mem_malloc(sizeof(struct cdp_mon_ops));
1597 	if (!mon_ops) {
1598 		dp_mon_err("Failed to allocate memory for mon ops");
1599 		return;
1600 	}
1601 
1602 	qdf_mem_copy(mon_ops, &dp_ops_mon_2_0, sizeof(struct cdp_mon_ops));
1603 	ops->mon_ops = mon_ops;
1604 }
1605 #else
1606 void dp_mon_ops_register_2_0(struct dp_mon_soc *mon_soc)
1607 {
1608 	mon_soc->mon_ops = &monitor_ops_2_0;
1609 }
1610 
1611 void dp_mon_cdp_ops_register_2_0(struct cdp_ops *ops)
1612 {
1613 	ops->mon_ops = &dp_ops_mon_2_0;
1614 }
1615 #endif
1616 
1617 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
1618 	defined(WLAN_SUPPORT_RX_FLOW_TAG)
1619 #if QCA_TEST_MON_PF_TAGS_STATS
1620 /** dp_mon_rx_update_rx_protocol_tag_stats() - Update mon protocols's
1621  *					      statistics
1622  * @pdev: pdev handle
1623  * @protocol_index: Protocol index for which the stats should be incremented
1624  * @ring_index: REO ring number from which this tag was received.
1625  *
1626  * Return: void
1627  */
1628 void dp_mon_rx_update_rx_protocol_tag_stats(struct dp_pdev *pdev,
1629 					    uint16_t protocol_index)
1630 {
1631 	pdev->mon_proto_tag_stats[protocol_index].tag_ctr++;
1632 }
1633 #else
1634 void dp_mon_rx_update_rx_protocol_tag_stats(struct dp_pdev *pdev,
1635 					    uint16_t protocol_index)
1636 {
1637 }
1638 #endif
1639 #endif
1640