xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/2.0/dp_mon_2.0.c (revision 8b3dca18206e1a0461492f082fa6e270b092c035)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <dp_types.h>
19 #include "dp_rx.h"
20 #include "dp_peer.h"
21 #include <dp_htt.h>
22 #include <dp_mon_filter.h>
23 #include <dp_mon.h>
24 #include <dp_rx_mon.h>
25 #include <dp_rx_mon_2.0.h>
26 #include <dp_mon_2.0.h>
27 #include <dp_mon_filter_2.0.h>
28 #include <dp_tx_mon_2.0.h>
29 #include <hal_be_api_mon.h>
30 #include <dp_be.h>
31 #include <htt_ppdu_stats.h>
32 #ifdef QCA_SUPPORT_LITE_MONITOR
33 #include "dp_lite_mon.h"
34 #endif
35 
36 #if !defined(DISABLE_MON_CONFIG)
37 
38 QDF_STATUS dp_rx_mon_ppdu_info_cache_create(struct dp_pdev *pdev)
39 {
40 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
41 	struct dp_mon_pdev_be *mon_pdev_be =
42 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
43 	uint16_t obj;
44 	struct hal_rx_ppdu_info *ppdu_info = NULL;
45 
46 	mon_pdev_be->ppdu_info_cache =
47 		qdf_kmem_cache_create("rx_mon_ppdu_info_cache",
48 				      sizeof(struct hal_rx_ppdu_info));
49 
50 	if (!mon_pdev_be->ppdu_info_cache) {
51 		dp_mon_err("cache creation failed pdev :%px", pdev);
52 		return QDF_STATUS_E_NOMEM;
53 	}
54 
55 	TAILQ_INIT(&mon_pdev_be->rx_mon_free_queue);
56 	for (obj = 0; obj < DP_RX_MON_WQ_THRESHOLD; obj++) {
57 		ppdu_info =  (struct hal_rx_ppdu_info *)qdf_kmem_cache_alloc(mon_pdev_be->ppdu_info_cache);
58 
59 		if (ppdu_info) {
60 			TAILQ_INSERT_TAIL(&mon_pdev_be->rx_mon_free_queue,
61 					  ppdu_info,
62 					  ppdu_free_list_elem);
63 			mon_pdev_be->total_free_elem++;
64 		}
65 	}
66 	qdf_spinlock_create(&mon_pdev_be->ppdu_info_lock);
67 
68 	return QDF_STATUS_SUCCESS;
69 }
70 
71 void dp_rx_mon_ppdu_info_cache_destroy(struct dp_pdev *pdev)
72 {
73 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
74 	struct dp_mon_pdev_be *mon_pdev_be =
75 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
76 	struct hal_rx_ppdu_info *ppdu_info = NULL, *temp_ppdu_info = NULL;
77 
78 	qdf_spin_lock(&mon_pdev_be->ppdu_info_lock);
79 	TAILQ_FOREACH_SAFE(ppdu_info,
80 			   &mon_pdev_be->rx_mon_free_queue,
81 			   ppdu_free_list_elem,
82 			   temp_ppdu_info) {
83 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_free_queue,
84 			     ppdu_info, ppdu_free_list_elem);
85 		if (ppdu_info) {
86 			mon_pdev_be->total_free_elem--;
87 			qdf_kmem_cache_free(mon_pdev_be->ppdu_info_cache,
88 					    ppdu_info);
89 		}
90 	}
91 	qdf_spin_unlock(&mon_pdev_be->ppdu_info_lock);
92 	dp_mon_debug(" total free element: %d", mon_pdev_be->total_free_elem);
93 	qdf_kmem_cache_destroy(mon_pdev_be->ppdu_info_cache);
94 }
95 
96 /**
97  * dp_mon_pdev_ext_init_2_0() - Init pdev ext param
98  *
99  * @pdev: DP pdev handle
100  *
101  * Return:  QDF_STATUS_SUCCESS: Success
102  *          QDF_STATUS_E_FAILURE: failure
103  */
104 QDF_STATUS dp_mon_pdev_ext_init_2_0(struct dp_pdev *pdev)
105 {
106 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
107 	struct dp_mon_pdev_be *mon_pdev_be =
108 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
109 
110 	qdf_create_work(0, &mon_pdev_be->rx_mon_work,
111 			dp_rx_mon_process_ppdu, pdev);
112 	mon_pdev_be->rx_mon_workqueue =
113 		qdf_alloc_unbound_workqueue("rx_mon_work_queue");
114 
115 	if (!mon_pdev_be->rx_mon_workqueue) {
116 		dp_mon_err("failed to create rxmon wq mon_pdev: %pK", mon_pdev);
117 		goto fail;
118 	}
119 	TAILQ_INIT(&mon_pdev_be->rx_mon_queue);
120 
121 	qdf_spinlock_create(&mon_pdev_be->rx_mon_wq_lock);
122 
123 	return QDF_STATUS_SUCCESS;
124 
125 fail:
126 	return QDF_STATUS_E_FAILURE;
127 }
128 
129 /**
130  * dp_mon_pdev_ext_deinit_2_0() - denit pdev ext param
131  *
132  * @pdev: DP pdev handle
133  *
134  * Return: QDF_STATUS_SUCCESS
135  */
136 QDF_STATUS dp_mon_pdev_ext_deinit_2_0(struct dp_pdev *pdev)
137 {
138 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
139 	struct dp_mon_pdev_be *mon_pdev_be =
140 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
141 
142 	if (!mon_pdev_be->rx_mon_workqueue)
143 		return QDF_STATUS_E_FAILURE;
144 
145 	qdf_err(" total free element: %d", mon_pdev_be->total_free_elem);
146 	qdf_flush_workqueue(0, mon_pdev_be->rx_mon_workqueue);
147 	qdf_destroy_workqueue(0, mon_pdev_be->rx_mon_workqueue);
148 	qdf_flush_work(&mon_pdev_be->rx_mon_work);
149 	qdf_disable_work(&mon_pdev_be->rx_mon_work);
150 	dp_rx_mon_drain_wq(pdev);
151 	mon_pdev_be->rx_mon_workqueue = NULL;
152 	qdf_spinlock_destroy(&mon_pdev_be->rx_mon_wq_lock);
153 
154 	return QDF_STATUS_SUCCESS;
155 }
156 
157 /*
158  * dp_mon_add_desc_list_to_free_list() - append unused desc_list back to
159  *					freelist.
160  *
161  * @soc: core txrx main context
162  * @local_desc_list: local desc list provided by the caller
163  * @tail: attach the point to last desc of local desc list
164  * @mon_desc_pool: monitor descriptor pool pointer
165  */
166 void
167 dp_mon_add_desc_list_to_free_list(struct dp_soc *soc,
168 				  union dp_mon_desc_list_elem_t **local_desc_list,
169 				  union dp_mon_desc_list_elem_t **tail,
170 				  struct dp_mon_desc_pool *mon_desc_pool)
171 {
172 	union dp_mon_desc_list_elem_t *temp_list = NULL;
173 
174 	qdf_spin_lock_bh(&mon_desc_pool->lock);
175 
176 	temp_list = mon_desc_pool->freelist;
177 	mon_desc_pool->freelist = *local_desc_list;
178 	(*tail)->next = temp_list;
179 	*tail = NULL;
180 	*local_desc_list = NULL;
181 
182 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
183 }
184 
185 /*
186  * dp_mon_get_free_desc_list() - provide a list of descriptors from
187  *				the free mon desc pool.
188  *
189  * @soc: core txrx main context
190  * @mon_desc_pool: monitor descriptor pool pointer
191  * @num_descs: number of descs requested from freelist
192  * @desc_list: attach the descs to this list (output parameter)
193  * @tail: attach the point to last desc of free list (output parameter)
194  *
195  * Return: number of descs allocated from free list.
196  */
197 static uint16_t
198 dp_mon_get_free_desc_list(struct dp_soc *soc,
199 			  struct dp_mon_desc_pool *mon_desc_pool,
200 			  uint16_t num_descs,
201 			  union dp_mon_desc_list_elem_t **desc_list,
202 			  union dp_mon_desc_list_elem_t **tail)
203 {
204 	uint16_t count;
205 
206 	qdf_spin_lock_bh(&mon_desc_pool->lock);
207 
208 	*desc_list = *tail = mon_desc_pool->freelist;
209 
210 	for (count = 0; count < num_descs; count++) {
211 		if (qdf_unlikely(!mon_desc_pool->freelist)) {
212 			qdf_spin_unlock_bh(&mon_desc_pool->lock);
213 			return count;
214 		}
215 		*tail = mon_desc_pool->freelist;
216 		mon_desc_pool->freelist = mon_desc_pool->freelist->next;
217 	}
218 	(*tail)->next = NULL;
219 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
220 	return count;
221 }
222 
223 void dp_mon_pool_frag_unmap_and_free(struct dp_soc *soc,
224 				     struct dp_mon_desc_pool *mon_desc_pool)
225 {
226 	int desc_id;
227 	qdf_frag_t vaddr;
228 	qdf_dma_addr_t paddr;
229 
230 	qdf_spin_lock_bh(&mon_desc_pool->lock);
231 	for (desc_id = 0; desc_id < mon_desc_pool->pool_size; desc_id++) {
232 		if (mon_desc_pool->array[desc_id].mon_desc.in_use) {
233 			vaddr = mon_desc_pool->array[desc_id].mon_desc.buf_addr;
234 			paddr = mon_desc_pool->array[desc_id].mon_desc.paddr;
235 
236 			if (!(mon_desc_pool->array[desc_id].mon_desc.unmapped)) {
237 				qdf_mem_unmap_page(soc->osdev, paddr,
238 						   mon_desc_pool->buf_size,
239 						   QDF_DMA_FROM_DEVICE);
240 				mon_desc_pool->array[desc_id].mon_desc.unmapped = 1;
241 				mon_desc_pool->array[desc_id].mon_desc.cookie = desc_id;
242 			}
243 			qdf_frag_free(vaddr);
244 		}
245 	}
246 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
247 }
248 
249 static inline QDF_STATUS
250 dp_mon_frag_alloc_and_map(struct dp_soc *dp_soc,
251 			  struct dp_mon_desc *mon_desc,
252 			  struct dp_mon_desc_pool *mon_desc_pool)
253 {
254 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
255 
256 	mon_desc->buf_addr = qdf_frag_alloc(&mon_desc_pool->pf_cache,
257 					    mon_desc_pool->buf_size);
258 
259 	if (!mon_desc->buf_addr) {
260 		dp_mon_err("Frag alloc failed");
261 		return QDF_STATUS_E_NOMEM;
262 	}
263 
264 	ret = qdf_mem_map_page(dp_soc->osdev,
265 			       mon_desc->buf_addr,
266 			       QDF_DMA_FROM_DEVICE,
267 			       mon_desc_pool->buf_size,
268 			       &mon_desc->paddr);
269 
270 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
271 		qdf_frag_free(mon_desc->buf_addr);
272 		dp_mon_err("Frag map failed");
273 		return QDF_STATUS_E_FAULT;
274 	}
275 
276 	return QDF_STATUS_SUCCESS;
277 }
278 
279 QDF_STATUS
280 dp_mon_buffers_replenish(struct dp_soc *dp_soc,
281 			 struct dp_srng *dp_mon_srng,
282 			 struct dp_mon_desc_pool *mon_desc_pool,
283 			 uint32_t num_req_buffers,
284 			 union dp_mon_desc_list_elem_t **desc_list,
285 			 union dp_mon_desc_list_elem_t **tail,
286 			 uint32_t *replenish_cnt_ref)
287 {
288 	uint32_t num_alloc_desc;
289 	uint16_t num_desc_to_free = 0;
290 	uint32_t num_entries_avail;
291 	uint32_t count = 0;
292 	int sync_hw_ptr = 1;
293 	struct dp_mon_desc mon_desc = {0};
294 	void *mon_ring_entry;
295 	union dp_mon_desc_list_elem_t *next;
296 	void *mon_srng;
297 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
298 	struct dp_mon_soc *mon_soc = dp_soc->monitor_soc;
299 
300 	if (!num_req_buffers) {
301 		dp_mon_debug("%pK: Received request for 0 buffers replenish",
302 			     dp_soc);
303 		ret = QDF_STATUS_E_INVAL;
304 		goto free_desc;
305 	}
306 
307 	mon_srng = dp_mon_srng->hal_srng;
308 
309 	hal_srng_access_start(dp_soc->hal_soc, mon_srng);
310 
311 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
312 						   mon_srng, sync_hw_ptr);
313 
314 	if (!num_entries_avail) {
315 		num_desc_to_free = num_req_buffers;
316 		hal_srng_access_end(dp_soc->hal_soc, mon_srng);
317 		goto free_desc;
318 	}
319 	if (num_entries_avail < num_req_buffers) {
320 		num_desc_to_free = num_req_buffers - num_entries_avail;
321 		num_req_buffers = num_entries_avail;
322 	}
323 
324 	/*
325 	 * if desc_list is NULL, allocate the descs from freelist
326 	 */
327 	if (!(*desc_list)) {
328 		num_alloc_desc = dp_mon_get_free_desc_list(dp_soc,
329 							   mon_desc_pool,
330 							   num_req_buffers,
331 							   desc_list,
332 							   tail);
333 
334 		if (!num_alloc_desc) {
335 			dp_mon_debug("%pK: no free rx_descs in freelist", dp_soc);
336 			hal_srng_access_end(dp_soc->hal_soc, mon_srng);
337 			return QDF_STATUS_E_NOMEM;
338 		}
339 
340 		dp_mon_info("%pK: %d rx desc allocated",
341 			    dp_soc, num_alloc_desc);
342 
343 		num_req_buffers = num_alloc_desc;
344 	}
345 
346 	while (count <= num_req_buffers - 1) {
347 		ret = dp_mon_frag_alloc_and_map(dp_soc,
348 						&mon_desc,
349 						mon_desc_pool);
350 
351 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
352 			if (qdf_unlikely(ret  == QDF_STATUS_E_FAULT))
353 				continue;
354 			break;
355 		}
356 
357 		count++;
358 		next = (*desc_list)->next;
359 		mon_ring_entry = hal_srng_src_get_next(
360 						dp_soc->hal_soc,
361 						mon_srng);
362 
363 		if (!mon_ring_entry)
364 			break;
365 
366 		qdf_assert_always((*desc_list)->mon_desc.in_use == 0);
367 
368 		(*desc_list)->mon_desc.in_use = 1;
369 		(*desc_list)->mon_desc.unmapped = 0;
370 		(*desc_list)->mon_desc.buf_addr = mon_desc.buf_addr;
371 		(*desc_list)->mon_desc.paddr = mon_desc.paddr;
372 		(*desc_list)->mon_desc.magic = DP_MON_DESC_MAGIC;
373 
374 		mon_soc->stats.frag_alloc++;
375 		hal_mon_buff_addr_info_set(dp_soc->hal_soc,
376 					   mon_ring_entry,
377 					   &((*desc_list)->mon_desc),
378 					   mon_desc.paddr);
379 
380 		*desc_list = next;
381 	}
382 
383 	hal_srng_access_end(dp_soc->hal_soc, mon_srng);
384 	if (replenish_cnt_ref)
385 		*replenish_cnt_ref += count;
386 
387 free_desc:
388 	/*
389 	 * add any available free desc back to the free list
390 	 */
391 	if (*desc_list) {
392 		dp_mon_add_desc_list_to_free_list(dp_soc, desc_list, tail,
393 						  mon_desc_pool);
394 	}
395 
396 	return ret;
397 }
398 
399 QDF_STATUS
400 dp_mon_desc_pool_init(struct dp_mon_desc_pool *mon_desc_pool,
401 		      uint32_t pool_size)
402 {
403 	int desc_id;
404 	/* Initialize monitor desc lock */
405 	qdf_spinlock_create(&mon_desc_pool->lock);
406 
407 	qdf_spin_lock_bh(&mon_desc_pool->lock);
408 
409 	mon_desc_pool->buf_size = DP_MON_DATA_BUFFER_SIZE;
410 	/* link SW descs into a freelist */
411 	mon_desc_pool->freelist = &mon_desc_pool->array[0];
412 	mon_desc_pool->pool_size = pool_size - 1;
413 	qdf_mem_zero(mon_desc_pool->freelist,
414 		     mon_desc_pool->pool_size *
415 		     sizeof(union dp_mon_desc_list_elem_t));
416 
417 	for (desc_id = 0; desc_id < mon_desc_pool->pool_size; desc_id++) {
418 		if (desc_id == mon_desc_pool->pool_size - 1)
419 			mon_desc_pool->array[desc_id].next = NULL;
420 		else
421 			mon_desc_pool->array[desc_id].next =
422 				&mon_desc_pool->array[desc_id + 1];
423 		mon_desc_pool->array[desc_id].mon_desc.in_use = 0;
424 		mon_desc_pool->array[desc_id].mon_desc.cookie = desc_id;
425 	}
426 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
427 
428 	return QDF_STATUS_SUCCESS;
429 }
430 
431 void dp_mon_desc_pool_deinit(struct dp_mon_desc_pool *mon_desc_pool)
432 {
433 	qdf_spin_lock_bh(&mon_desc_pool->lock);
434 
435 	mon_desc_pool->freelist = NULL;
436 	mon_desc_pool->pool_size = 0;
437 
438 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
439 	qdf_spinlock_destroy(&mon_desc_pool->lock);
440 }
441 
442 void dp_mon_desc_pool_free(struct dp_mon_desc_pool *mon_desc_pool)
443 {
444 	qdf_mem_free(mon_desc_pool->array);
445 }
446 
447 QDF_STATUS dp_mon_desc_pool_alloc(uint32_t pool_size,
448 				  struct dp_mon_desc_pool *mon_desc_pool)
449 {
450 	mon_desc_pool->pool_size = pool_size - 1;
451 	mon_desc_pool->array = qdf_mem_malloc((mon_desc_pool->pool_size) *
452 				     sizeof(union dp_mon_desc_list_elem_t));
453 
454 	return QDF_STATUS_SUCCESS;
455 }
456 
457 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings_rx_2_0(struct dp_pdev *pdev)
458 {
459 	int rx_mon_max_entries;
460 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
461 	struct dp_soc *soc = pdev->soc;
462 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
463 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
464 	QDF_STATUS status;
465 
466 	if (!mon_soc_be) {
467 		dp_mon_err("DP MON SOC is NULL");
468 		return QDF_STATUS_E_FAILURE;
469 	}
470 
471 	soc_cfg_ctx = soc->wlan_cfg_ctx;
472 	rx_mon_max_entries = wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc_cfg_ctx);
473 
474 	hal_set_low_threshold(soc->rxdma_mon_buf_ring[0].hal_srng,
475 			      rx_mon_max_entries >> 2);
476 	status = htt_srng_setup(soc->htt_handle, 0,
477 				soc->rxdma_mon_buf_ring[0].hal_srng,
478 				RXDMA_MONITOR_BUF);
479 
480 	if (status != QDF_STATUS_SUCCESS) {
481 		dp_mon_err("Failed to send htt srng setup message for Rx mon buf ring");
482 		return status;
483 	}
484 
485 	if (mon_soc_be->rx_mon_ring_fill_level < rx_mon_max_entries) {
486 		status = dp_rx_mon_buffers_alloc(soc,
487 						 (rx_mon_max_entries -
488 						 mon_soc_be->rx_mon_ring_fill_level));
489 		if (status != QDF_STATUS_SUCCESS) {
490 			dp_mon_err("%pK: Rx mon buffers allocation failed", soc);
491 			return status;
492 		}
493 		mon_soc_be->rx_mon_ring_fill_level +=
494 				(rx_mon_max_entries -
495 				mon_soc_be->rx_mon_ring_fill_level);
496 	}
497 
498 	return QDF_STATUS_SUCCESS;
499 }
500 
501 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings_tx_2_0(struct dp_pdev *pdev,
502 						     uint16_t num_of_buffers)
503 {
504 	int tx_mon_max_entries;
505 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
506 	struct dp_soc *soc = pdev->soc;
507 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
508 	struct dp_mon_soc_be *mon_soc_be =
509 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
510 	QDF_STATUS status;
511 
512 	if (!mon_soc_be) {
513 		dp_mon_err("DP MON SOC is NULL");
514 		return QDF_STATUS_E_FAILURE;
515 	}
516 
517 	soc_cfg_ctx = soc->wlan_cfg_ctx;
518 	tx_mon_max_entries =
519 		wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx);
520 
521 	hal_set_low_threshold(mon_soc_be->tx_mon_buf_ring.hal_srng,
522 			      tx_mon_max_entries >> 2);
523 	status = htt_srng_setup(soc->htt_handle, 0,
524 				mon_soc_be->tx_mon_buf_ring.hal_srng,
525 				TX_MONITOR_BUF);
526 
527 	if (status != QDF_STATUS_SUCCESS) {
528 		dp_mon_err("Failed to send htt srng setup message for Tx mon buf ring");
529 		return status;
530 	}
531 
532 	if (mon_soc_be->tx_mon_ring_fill_level < num_of_buffers) {
533 		if (dp_tx_mon_buffers_alloc(soc,
534 					    (num_of_buffers -
535 					     mon_soc_be->tx_mon_ring_fill_level))) {
536 			dp_mon_err("%pK: Tx mon buffers allocation failed",
537 				   soc);
538 			return QDF_STATUS_E_FAILURE;
539 		}
540 		mon_soc_be->tx_mon_ring_fill_level +=
541 					(num_of_buffers -
542 					mon_soc_be->tx_mon_ring_fill_level);
543 	}
544 
545 	return QDF_STATUS_SUCCESS;
546 }
547 
548 static
549 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings_2_0(struct dp_pdev *pdev)
550 {
551 	int status;
552 	struct dp_soc *soc = pdev->soc;
553 
554 	status = dp_vdev_set_monitor_mode_buf_rings_rx_2_0(pdev);
555 	if (status != QDF_STATUS_SUCCESS) {
556 		dp_mon_err("%pK: Rx monitor extra buffer allocation failed",
557 			   soc);
558 		return status;
559 	}
560 
561 	return QDF_STATUS_SUCCESS;
562 }
563 
564 static
565 QDF_STATUS dp_vdev_set_monitor_mode_rings_2_0(struct dp_pdev *pdev,
566 					      uint8_t delayed_replenish)
567 {
568 	return QDF_STATUS_SUCCESS;
569 }
570 
571 #ifdef QCA_ENHANCED_STATS_SUPPORT
572 /**
573  * dp_mon_tx_enable_enhanced_stats_2_0() - Send HTT cmd to FW to enable stats
574  * @pdev: Datapath pdev handle
575  *
576  * Return: none
577  */
578 static void dp_mon_tx_enable_enhanced_stats_2_0(struct dp_pdev *pdev)
579 {
580 	dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
581 				  pdev->pdev_id);
582 }
583 
584 /**
585  * dp_mon_tx_disable_enhanced_stats_2_0() - Send HTT cmd to FW to disable stats
586  * @pdev: Datapath pdev handle
587  *
588  * Return: none
589  */
590 static void dp_mon_tx_disable_enhanced_stats_2_0(struct dp_pdev *pdev)
591 {
592 	dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
593 }
594 #endif
595 
596 #if defined(QCA_ENHANCED_STATS_SUPPORT) && defined(WLAN_FEATURE_11BE)
597 void
598 dp_mon_tx_stats_update_2_0(struct dp_mon_peer *mon_peer,
599 			   struct cdp_tx_completion_ppdu_user *ppdu)
600 {
601 	uint8_t preamble, mcs, punc_mode;
602 
603 	preamble = ppdu->preamble;
604 	mcs = ppdu->mcs;
605 
606 	punc_mode = dp_mon_get_puncture_type(ppdu->punc_pattern_bitmap,
607 					     ppdu->bw);
608 	ppdu->punc_mode = punc_mode;
609 
610 	DP_STATS_INC(mon_peer, tx.punc_bw[punc_mode], ppdu->num_msdu);
611 	DP_STATS_INCC(mon_peer,
612 		      tx.pkt_type[preamble].mcs_count[MAX_MCS - 1],
613 		      ppdu->num_msdu,
614 		      ((mcs >= MAX_MCS_11BE) && (preamble == DOT11_BE)));
615 	DP_STATS_INCC(mon_peer,
616 		      tx.pkt_type[preamble].mcs_count[mcs],
617 		      ppdu->num_msdu,
618 		      ((mcs < MAX_MCS_11BE) && (preamble == DOT11_BE)));
619 	DP_STATS_INCC(mon_peer,
620 		      tx.su_be_ppdu_cnt.mcs_count[MAX_MCS - 1], 1,
621 		      ((mcs >= MAX_MCS_11BE) && (preamble == DOT11_BE) &&
622 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_SU)));
623 	DP_STATS_INCC(mon_peer,
624 		      tx.su_be_ppdu_cnt.mcs_count[mcs], 1,
625 		      ((mcs < MAX_MCS_11BE) && (preamble == DOT11_BE) &&
626 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_SU)));
627 	DP_STATS_INCC(mon_peer,
628 		      tx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[MAX_MCS - 1],
629 		      1, ((mcs >= MAX_MCS_11BE) &&
630 		      (preamble == DOT11_BE) &&
631 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA)));
632 	DP_STATS_INCC(mon_peer,
633 		      tx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[mcs],
634 		      1, ((mcs < MAX_MCS_11BE) &&
635 		      (preamble == DOT11_BE) &&
636 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA)));
637 	DP_STATS_INCC(mon_peer,
638 		      tx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[MAX_MCS - 1],
639 		      1, ((mcs >= MAX_MCS_11BE) &&
640 		      (preamble == DOT11_BE) &&
641 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO)));
642 	DP_STATS_INCC(mon_peer,
643 		      tx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[mcs],
644 		      1, ((mcs < MAX_MCS_11BE) &&
645 		      (preamble == DOT11_BE) &&
646 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO)));
647 }
648 
649 enum cdp_punctured_modes
650 dp_mon_get_puncture_type(uint16_t puncture_pattern, uint8_t bw)
651 {
652 	uint16_t mask;
653 	uint8_t punctured_bits;
654 
655 	if (!puncture_pattern)
656 		return NO_PUNCTURE;
657 
658 	switch (bw) {
659 	case CMN_BW_80MHZ:
660 		mask = PUNCTURE_80MHZ_MASK;
661 		break;
662 	case CMN_BW_160MHZ:
663 		mask = PUNCTURE_160MHZ_MASK;
664 		break;
665 	case CMN_BW_320MHZ:
666 		mask = PUNCTURE_320MHZ_MASK;
667 		break;
668 	default:
669 		return NO_PUNCTURE;
670 	}
671 
672 	/* 0s in puncture pattern received in TLV indicates punctured 20Mhz,
673 	 * after complement, 1s will indicate punctured 20Mhz
674 	 */
675 	puncture_pattern = ~puncture_pattern;
676 	puncture_pattern &= mask;
677 
678 	if (puncture_pattern) {
679 		punctured_bits = 0;
680 		while (puncture_pattern != 0) {
681 			punctured_bits++;
682 			puncture_pattern &= (puncture_pattern - 1);
683 		}
684 
685 		if (bw == CMN_BW_80MHZ) {
686 			if (punctured_bits == IEEE80211_PUNC_MINUS20MHZ)
687 				return PUNCTURED_20MHZ;
688 			else
689 				return NO_PUNCTURE;
690 		} else if (bw == CMN_BW_160MHZ) {
691 			if (punctured_bits == IEEE80211_PUNC_MINUS20MHZ)
692 				return PUNCTURED_20MHZ;
693 			else if (punctured_bits == IEEE80211_PUNC_MINUS40MHZ)
694 				return PUNCTURED_40MHZ;
695 			else
696 				return NO_PUNCTURE;
697 		} else if (bw == CMN_BW_320MHZ) {
698 			if (punctured_bits == IEEE80211_PUNC_MINUS40MHZ)
699 				return PUNCTURED_40MHZ;
700 			else if (punctured_bits == IEEE80211_PUNC_MINUS80MHZ)
701 				return PUNCTURED_80MHZ;
702 			else if (punctured_bits == IEEE80211_PUNC_MINUS120MHZ)
703 				return PUNCTURED_120MHZ;
704 			else
705 				return NO_PUNCTURE;
706 		}
707 	}
708 	return NO_PUNCTURE;
709 }
710 #endif
711 
712 #if defined(QCA_ENHANCED_STATS_SUPPORT) && !defined(WLAN_FEATURE_11BE)
713 void
714 dp_mon_tx_stats_update_2_0(struct dp_mon_peer *mon_peer,
715 			   struct cdp_tx_completion_ppdu_user *ppdu)
716 {
717 	ppdu->punc_mode = NO_PUNCTURE;
718 }
719 
720 enum cdp_punctured_modes
721 dp_mon_get_puncture_type(uint16_t puncture_pattern, uint8_t bw)
722 {
723 	return NO_PUNCTURE;
724 }
725 #endif /* QCA_ENHANCED_STATS_SUPPORT && WLAN_FEATURE_11BE */
726 
727 #ifdef QCA_SUPPORT_BPR
728 static QDF_STATUS
729 dp_set_bpr_enable_2_0(struct dp_pdev *pdev, int val)
730 {
731 	return QDF_STATUS_SUCCESS;
732 }
733 #endif /* QCA_SUPPORT_BPR */
734 
735 #ifdef QCA_ENHANCED_STATS_SUPPORT
736 #ifdef WDI_EVENT_ENABLE
737 /**
738  * dp_ppdu_desc_notify_2_0 - Notify upper layer for PPDU indication via WDI
739  *
740  * @pdev: Datapath pdev handle
741  * @nbuf: Buffer to be shipped
742  *
743  * Return: void
744  */
745 static void dp_ppdu_desc_notify_2_0(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
746 {
747 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
748 
749 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(nbuf);
750 
751 	if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
752 	    ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
753 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
754 				     pdev->soc,
755 				     nbuf, HTT_INVALID_PEER,
756 				     WDI_NO_VAL,
757 				     pdev->pdev_id);
758 	} else {
759 		qdf_nbuf_free(nbuf);
760 	}
761 }
762 #endif
763 
764 /**
765  * dp_ppdu_stats_feat_enable_check_2_0 - Check if feature(s) is enabled to
766  *				consume ppdu stats from FW
767  *
768  * @pdev: Datapath pdev handle
769  *
770  * Return: true if enabled, else return false
771  */
772 static bool dp_ppdu_stats_feat_enable_check_2_0(struct dp_pdev *pdev)
773 {
774 	return pdev->monitor_pdev->enhanced_stats_en;
775 }
776 #endif
777 
778 static
779 QDF_STATUS dp_mon_soc_htt_srng_setup_2_0(struct dp_soc *soc)
780 {
781 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
782 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
783 	QDF_STATUS status;
784 
785 	hal_set_low_threshold(soc->rxdma_mon_buf_ring[0].hal_srng, 0);
786 	status = htt_srng_setup(soc->htt_handle, 0,
787 				soc->rxdma_mon_buf_ring[0].hal_srng,
788 				RXDMA_MONITOR_BUF);
789 
790 	if (status != QDF_STATUS_SUCCESS) {
791 		dp_err("Failed to send htt srng setup message for Rx mon buf ring");
792 		return status;
793 	}
794 
795 	hal_set_low_threshold(mon_soc_be->tx_mon_buf_ring.hal_srng, 0);
796 	status = htt_srng_setup(soc->htt_handle, 0,
797 				mon_soc_be->tx_mon_buf_ring.hal_srng,
798 				TX_MONITOR_BUF);
799 	if (status != QDF_STATUS_SUCCESS) {
800 		dp_err("Failed to send htt srng setup message for Tx mon buf ring");
801 		return status;
802 	}
803 
804 	return status;
805 }
806 
807 static
808 QDF_STATUS dp_mon_pdev_htt_srng_setup_2_0(struct dp_soc *soc,
809 					  struct dp_pdev *pdev,
810 					  int mac_id,
811 					  int mac_for_pdev)
812 {
813 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
814 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
815 	QDF_STATUS status;
816 
817 	if (!soc->rxdma_mon_dst_ring[mac_id].hal_srng)
818 		return QDF_STATUS_SUCCESS;
819 
820 	status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
821 				soc->rxdma_mon_dst_ring[mac_id].hal_srng,
822 				RXDMA_MONITOR_DST);
823 
824 	if (status != QDF_STATUS_SUCCESS) {
825 		dp_mon_err("Failed to send htt srng setup message for Rxdma dst ring");
826 		return status;
827 	}
828 
829 	if (!mon_soc_be->tx_mon_dst_ring[mac_id].hal_srng)
830 		return QDF_STATUS_SUCCESS;
831 
832 	status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
833 				mon_soc_be->tx_mon_dst_ring[mac_id].hal_srng,
834 				TX_MONITOR_DST);
835 
836 	if (status != QDF_STATUS_SUCCESS) {
837 		dp_mon_err("Failed to send htt srng message for Tx mon dst ring");
838 		return status;
839 	}
840 
841 	return status;
842 }
843 
844 QDF_STATUS dp_tx_mon_refill_buf_ring_2_0(struct dp_intr *int_ctx)
845 {
846 	struct dp_soc *soc  = int_ctx->soc;
847 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
848 	union dp_mon_desc_list_elem_t *desc_list = NULL;
849 	union dp_mon_desc_list_elem_t *tail = NULL;
850 	struct dp_srng *tx_mon_buf_ring;
851 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
852 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
853 	uint32_t num_entries_avail;
854 	int sync_hw_ptr = 1;
855 	void *hal_srng;
856 
857 	tx_mon_buf_ring = &mon_soc_be->tx_mon_buf_ring;
858 	hal_srng = tx_mon_buf_ring->hal_srng;
859 
860 	intr_stats->num_host2txmon_ring__masks++;
861 	mon_soc_be->tx_low_thresh_intrs++;
862 	hal_srng_access_start(soc->hal_soc, hal_srng);
863 	num_entries_avail = hal_srng_src_num_avail(soc->hal_soc,
864 						   hal_srng,
865 						   sync_hw_ptr);
866 	hal_srng_access_end(soc->hal_soc, hal_srng);
867 
868 	if (num_entries_avail)
869 		dp_mon_buffers_replenish(soc, tx_mon_buf_ring,
870 					 &mon_soc_be->tx_desc_mon,
871 					 num_entries_avail, &desc_list, &tail,
872 					 NULL);
873 
874 	return QDF_STATUS_SUCCESS;
875 }
876 
877 QDF_STATUS dp_rx_mon_refill_buf_ring_2_0(struct dp_intr *int_ctx)
878 {
879 	struct dp_soc *soc  = int_ctx->soc;
880 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
881 	union dp_mon_desc_list_elem_t *desc_list = NULL;
882 	union dp_mon_desc_list_elem_t *tail = NULL;
883 	struct dp_srng *rx_mon_buf_ring;
884 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
885 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
886 	uint32_t num_entries_avail;
887 	int sync_hw_ptr = 1;
888 	void *hal_srng;
889 
890 	rx_mon_buf_ring = &soc->rxdma_mon_buf_ring[0];
891 	hal_srng = rx_mon_buf_ring->hal_srng;
892 
893 	intr_stats->num_host2rxdma_ring_masks++;
894 	mon_soc_be->rx_low_thresh_intrs++;
895 	hal_srng_access_start(soc->hal_soc, hal_srng);
896 	num_entries_avail = hal_srng_src_num_avail(soc->hal_soc,
897 						   hal_srng,
898 						   sync_hw_ptr);
899 	hal_srng_access_end(soc->hal_soc, hal_srng);
900 
901 	if (num_entries_avail)
902 		dp_mon_buffers_replenish(soc, rx_mon_buf_ring,
903 					 &mon_soc_be->rx_desc_mon,
904 					 num_entries_avail, &desc_list, &tail,
905 					 NULL);
906 
907 	return QDF_STATUS_SUCCESS;
908 }
909 
910 static
911 QDF_STATUS dp_mon_soc_detach_2_0(struct dp_soc *soc)
912 {
913 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
914 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
915 
916 	if (!mon_soc_be) {
917 		dp_mon_err("DP MON SOC NULL");
918 		return QDF_STATUS_E_FAILURE;
919 	}
920 
921 	dp_rx_mon_buf_desc_pool_free(soc);
922 	dp_srng_free(soc, &soc->rxdma_mon_buf_ring[0]);
923 	dp_tx_mon_buf_desc_pool_free(soc);
924 	dp_srng_free(soc, &mon_soc_be->tx_mon_buf_ring);
925 
926 	return QDF_STATUS_SUCCESS;
927 }
928 
929 static void dp_mon_soc_deinit_2_0(struct dp_soc *soc)
930 {
931 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
932 	struct dp_mon_soc_be *mon_soc_be =
933 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
934 
935 	if (!mon_soc_be->is_dp_mon_soc_initialized)
936 		return;
937 
938 	dp_rx_mon_buffers_free(soc);
939 	dp_tx_mon_buffers_free(soc);
940 
941 	dp_rx_mon_buf_desc_pool_deinit(soc);
942 	dp_tx_mon_buf_desc_pool_deinit(soc);
943 
944 	dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[0], RXDMA_MONITOR_BUF, 0);
945 	dp_srng_deinit(soc, &mon_soc_be->tx_mon_buf_ring, TX_MONITOR_BUF, 0);
946 
947 	mon_soc_be->is_dp_mon_soc_initialized = false;
948 }
949 
950 static
951 QDF_STATUS dp_rx_mon_soc_init_2_0(struct dp_soc *soc)
952 {
953 	if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[0],
954 			 RXDMA_MONITOR_BUF, 0, 0)) {
955 		dp_mon_err("%pK: " RNG_ERR "rx_mon_buf_ring", soc);
956 		goto fail;
957 	}
958 
959 	if (dp_rx_mon_buf_desc_pool_init(soc)) {
960 		dp_mon_err("%pK: " RNG_ERR "rx mon desc pool init", soc);
961 		goto fail;
962 	}
963 
964 	/* monitor buffers for src */
965 	if (dp_rx_mon_buffers_alloc(soc, DP_MON_RING_FILL_LEVEL_DEFAULT)) {
966 		dp_mon_err("%pK: Rx mon buffers allocation failed", soc);
967 		goto fail;
968 	}
969 
970 	return QDF_STATUS_SUCCESS;
971 fail:
972 	return QDF_STATUS_E_FAILURE;
973 }
974 
975 static
976 QDF_STATUS dp_tx_mon_soc_init_2_0(struct dp_soc *soc)
977 {
978 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
979 	struct dp_mon_soc_be *mon_soc_be =
980 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
981 
982 	if (dp_srng_init(soc, &mon_soc_be->tx_mon_buf_ring,
983 			 TX_MONITOR_BUF, 0, 0)) {
984 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
985 		goto fail;
986 	}
987 
988 	if (dp_tx_mon_buf_desc_pool_init(soc)) {
989 		dp_mon_err("%pK: " RNG_ERR "tx mon desc pool init", soc);
990 		goto fail;
991 	}
992 
993 	return QDF_STATUS_SUCCESS;
994 fail:
995 	return QDF_STATUS_E_FAILURE;
996 }
997 
998 static
999 QDF_STATUS dp_mon_soc_init_2_0(struct dp_soc *soc)
1000 {
1001 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1002 	struct dp_mon_soc_be *mon_soc_be =
1003 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1004 
1005 	if (soc->rxdma_mon_buf_ring[0].hal_srng) {
1006 		dp_mon_info("%pK: mon soc init is done", soc);
1007 		return QDF_STATUS_SUCCESS;
1008 	}
1009 
1010 	if (dp_rx_mon_soc_init_2_0(soc)) {
1011 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
1012 		goto fail;
1013 	}
1014 
1015 	if (dp_tx_mon_soc_init_2_0(soc)) {
1016 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
1017 		goto fail;
1018 	}
1019 
1020 	mon_soc_be->tx_mon_ring_fill_level = 0;
1021 	mon_soc_be->rx_mon_ring_fill_level = DP_MON_RING_FILL_LEVEL_DEFAULT;
1022 
1023 	mon_soc_be->is_dp_mon_soc_initialized = true;
1024 	return QDF_STATUS_SUCCESS;
1025 fail:
1026 	dp_mon_soc_deinit_2_0(soc);
1027 	return QDF_STATUS_E_FAILURE;
1028 }
1029 
1030 static
1031 QDF_STATUS dp_mon_soc_attach_2_0(struct dp_soc *soc)
1032 {
1033 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1034 	struct dp_mon_soc_be *mon_soc_be =
1035 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1036 	int entries;
1037 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1038 
1039 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1040 	if (!mon_soc_be) {
1041 		dp_mon_err("DP MON SOC is NULL");
1042 		return QDF_STATUS_E_FAILURE;
1043 	}
1044 
1045 	entries = wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc_cfg_ctx);
1046 	qdf_print("%s:%d rx mon buf entries: %d", __func__, __LINE__, entries);
1047 	if (dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[0],
1048 			  RXDMA_MONITOR_BUF, entries, 0)) {
1049 		dp_mon_err("%pK: " RNG_ERR "rx_mon_buf_ring", soc);
1050 		goto fail;
1051 	}
1052 
1053 	entries = wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx);
1054 	qdf_print("%s:%d tx mon buf entries: %d", __func__, __LINE__, entries);
1055 	if (dp_srng_alloc(soc, &mon_soc_be->tx_mon_buf_ring,
1056 			  TX_MONITOR_BUF, entries, 0)) {
1057 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
1058 		goto fail;
1059 	}
1060 
1061 	/* allocate sw desc pool */
1062 	if (dp_rx_mon_buf_desc_pool_alloc(soc)) {
1063 		dp_mon_err("%pK: Rx mon desc pool allocation failed", soc);
1064 		goto fail;
1065 	}
1066 
1067 	if (dp_tx_mon_buf_desc_pool_alloc(soc)) {
1068 		dp_mon_err("%pK: Tx mon desc pool allocation failed", soc);
1069 		goto fail;
1070 	}
1071 
1072 	return QDF_STATUS_SUCCESS;
1073 fail:
1074 	dp_mon_soc_detach_2_0(soc);
1075 	return QDF_STATUS_E_NOMEM;
1076 }
1077 
1078 static
1079 void dp_pdev_mon_rings_deinit_2_0(struct dp_pdev *pdev)
1080 {
1081 	int mac_id = 0;
1082 	struct dp_soc *soc = pdev->soc;
1083 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1084 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1085 
1086 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
1087 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
1088 							 pdev->pdev_id);
1089 
1090 		dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id],
1091 			       RXDMA_MONITOR_DST, pdev->pdev_id);
1092 		dp_srng_deinit(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id],
1093 			       TX_MONITOR_DST, pdev->pdev_id);
1094 	}
1095 }
1096 
1097 static
1098 QDF_STATUS dp_pdev_mon_rings_init_2_0(struct dp_pdev *pdev)
1099 {
1100 	struct dp_soc *soc = pdev->soc;
1101 	int mac_id = 0;
1102 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1103 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1104 
1105 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
1106 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
1107 							 pdev->pdev_id);
1108 
1109 		if (dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id],
1110 				 RXDMA_MONITOR_DST, pdev->pdev_id, lmac_id)) {
1111 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc);
1112 			goto fail;
1113 		}
1114 
1115 		if (dp_srng_init(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id],
1116 				 TX_MONITOR_DST, pdev->pdev_id, lmac_id)) {
1117 			dp_mon_err("%pK: " RNG_ERR "tx_mon_dst_ring", soc);
1118 			goto fail;
1119 		}
1120 	}
1121 	return QDF_STATUS_SUCCESS;
1122 
1123 fail:
1124 	dp_pdev_mon_rings_deinit_2_0(pdev);
1125 	return QDF_STATUS_E_NOMEM;
1126 }
1127 
1128 static
1129 void dp_pdev_mon_rings_free_2_0(struct dp_pdev *pdev)
1130 {
1131 	int mac_id = 0;
1132 	struct dp_soc *soc = pdev->soc;
1133 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1134 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1135 
1136 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
1137 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
1138 							 pdev->pdev_id);
1139 
1140 		dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]);
1141 		dp_srng_free(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id]);
1142 	}
1143 }
1144 
1145 static
1146 QDF_STATUS dp_pdev_mon_rings_alloc_2_0(struct dp_pdev *pdev)
1147 {
1148 	struct dp_soc *soc = pdev->soc;
1149 	int mac_id = 0;
1150 	int entries;
1151 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
1152 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1153 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1154 
1155 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
1156 
1157 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
1158 		int lmac_id =
1159 		dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
1160 
1161 		entries = wlan_cfg_get_dma_rx_mon_dest_ring_size(pdev_cfg_ctx);
1162 		if (dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id],
1163 				  RXDMA_MONITOR_DST, entries, 0)) {
1164 			dp_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", pdev);
1165 			goto fail;
1166 		}
1167 
1168 		entries = wlan_cfg_get_dma_tx_mon_dest_ring_size(pdev_cfg_ctx);
1169 		if (dp_srng_alloc(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id],
1170 				  TX_MONITOR_DST, entries, 0)) {
1171 			dp_err("%pK: " RNG_ERR "tx_mon_dst_ring", pdev);
1172 			goto fail;
1173 		}
1174 	}
1175 	return QDF_STATUS_SUCCESS;
1176 
1177 fail:
1178 	dp_pdev_mon_rings_free_2_0(pdev);
1179 	return QDF_STATUS_E_NOMEM;
1180 }
1181 
1182 static
1183 void dp_mon_pdev_free_2_0(struct dp_pdev *pdev)
1184 {
1185 }
1186 
1187 static
1188 QDF_STATUS dp_mon_pdev_alloc_2_0(struct dp_pdev *pdev)
1189 {
1190 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1191 	struct dp_mon_pdev_be *mon_pdev_be =
1192 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1193 
1194 	if (!mon_pdev_be) {
1195 		dp_mon_err("DP MON PDEV is NULL");
1196 		return QDF_STATUS_E_FAILURE;
1197 	}
1198 
1199 	return QDF_STATUS_SUCCESS;
1200 }
1201 
1202 #else
1203 static inline
1204 QDF_STATUS dp_mon_htt_srng_setup_2_0(struct dp_soc *soc,
1205 				     struct dp_pdev *pdev,
1206 				     int mac_id,
1207 				     int mac_for_pdev)
1208 {
1209 	return QDF_STATUS_SUCCESS;
1210 }
1211 
1212 static uint32_t
1213 dp_tx_mon_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
1214 		      uint32_t mac_id, uint32_t quota)
1215 {
1216 	return 0;
1217 }
1218 
1219 static inline
1220 QDF_STATUS dp_mon_soc_attach_2_0(struct dp_soc *soc)
1221 {
1222 	return status;
1223 }
1224 
1225 static inline
1226 QDF_STATUS dp_mon_soc_detach_2_0(struct dp_soc *soc)
1227 {
1228 	return status;
1229 }
1230 
1231 static inline
1232 void dp_pdev_mon_rings_deinit_2_0(struct dp_pdev *pdev)
1233 {
1234 }
1235 
1236 static inline
1237 QDF_STATUS dp_pdev_mon_rings_init_2_0(struct dp_soc *soc, struct dp_pdev *pdev)
1238 {
1239 	return QDF_STATUS_SUCCESS;
1240 }
1241 
1242 static inline
1243 void dp_pdev_mon_rings_free_2_0(struct dp_pdev *pdev)
1244 {
1245 }
1246 
1247 static inline
1248 QDF_STATUS dp_pdev_mon_rings_alloc_2_0(struct dp_soc *soc, struct dp_pdev *pdev)
1249 {
1250 	return QDF_STATUS_SUCCESS;
1251 }
1252 
1253 static inline
1254 void dp_mon_pdev_free_2_0(struct dp_pdev *pdev)
1255 {
1256 }
1257 
1258 static inline
1259 QDF_STATUS dp_mon_pdev_alloc_2_0(struct dp_pdev *pdev)
1260 {
1261 	return QDF_STATUS_SUCCESS;
1262 }
1263 
1264 static inline
1265 void dp_vdev_set_monitor_mode_buf_rings_2_0(struct dp_pdev *pdev)
1266 {
1267 }
1268 
1269 static inline
1270 QDF_STATUS dp_vdev_set_monitor_mode_rings_2_0(struct dp_pdev *pdev,
1271 					      uint8_t delayed_replenish)
1272 {
1273 	return QDF_STATUS_SUCCESS;
1274 }
1275 #endif
1276 
1277 #if defined(WDI_EVENT_ENABLE) &&\
1278 	(defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG))
1279 static inline
1280 void dp_mon_ppdu_stats_handler_register(struct dp_mon_soc *mon_soc)
1281 {
1282 	mon_soc->mon_ops->mon_ppdu_stats_ind_handler =
1283 					dp_ppdu_stats_ind_handler;
1284 }
1285 #else
1286 static inline
1287 void dp_mon_ppdu_stats_handler_register(struct dp_mon_soc *mon_soc)
1288 {
1289 }
1290 #endif
1291 
1292 static void dp_mon_register_intr_ops_2_0(struct dp_soc *soc)
1293 {
1294 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1295 
1296 	mon_soc->mon_ops->rx_mon_refill_buf_ring =
1297 			NULL,
1298 	mon_soc->mon_ops->tx_mon_refill_buf_ring =
1299 			NULL,
1300 	mon_soc->mon_rx_process = dp_rx_mon_process_2_0;
1301 	dp_mon_ppdu_stats_handler_register(mon_soc);
1302 }
1303 
1304 /**
1305  * dp_mon_register_feature_ops_2_0() - register feature ops
1306  *
1307  * @soc: dp soc context
1308  *
1309  * @return: void
1310  */
1311 static void
1312 dp_mon_register_feature_ops_2_0(struct dp_soc *soc)
1313 {
1314 	struct dp_mon_ops *mon_ops = dp_mon_ops_get(soc);
1315 
1316 	if (!mon_ops) {
1317 		dp_err("mon_ops is NULL, feature ops registration failed");
1318 		return;
1319 	}
1320 
1321 	mon_ops->mon_config_debug_sniffer = dp_config_debug_sniffer;
1322 	mon_ops->mon_peer_tx_init = NULL;
1323 	mon_ops->mon_peer_tx_cleanup = NULL;
1324 	mon_ops->mon_htt_ppdu_stats_attach = dp_htt_ppdu_stats_attach;
1325 	mon_ops->mon_htt_ppdu_stats_detach = dp_htt_ppdu_stats_detach;
1326 	mon_ops->mon_print_pdev_rx_mon_stats = dp_print_pdev_rx_mon_stats;
1327 	mon_ops->mon_set_bsscolor = dp_mon_set_bsscolor;
1328 	mon_ops->mon_pdev_get_filter_ucast_data = NULL;
1329 	mon_ops->mon_pdev_get_filter_mcast_data = NULL;
1330 	mon_ops->mon_pdev_get_filter_non_data = NULL;
1331 	mon_ops->mon_neighbour_peer_add_ast = NULL;
1332 #ifndef DISABLE_MON_CONFIG
1333 	mon_ops->mon_tx_process = dp_tx_mon_process_2_0;
1334 #endif
1335 #ifdef WLAN_TX_PKT_CAPTURE_ENH_BE
1336 	mon_ops->mon_peer_tid_peer_id_update = NULL;
1337 	mon_ops->mon_tx_capture_debugfs_init = NULL;
1338 	mon_ops->mon_tx_add_to_comp_queue = NULL;
1339 	mon_ops->mon_print_pdev_tx_capture_stats =
1340 					dp_print_pdev_tx_monitor_stats_2_0;
1341 	mon_ops->mon_config_enh_tx_capture = dp_config_enh_tx_monitor_2_0;
1342 	mon_ops->mon_tx_peer_filter = dp_peer_set_tx_capture_enabled_2_0;
1343 #endif
1344 #if (defined(WIFI_MONITOR_SUPPORT) && !defined(WLAN_TX_PKT_CAPTURE_ENH_BE))
1345 	mon_ops->mon_peer_tid_peer_id_update = NULL;
1346 	mon_ops->mon_tx_capture_debugfs_init = NULL;
1347 	mon_ops->mon_tx_add_to_comp_queue = NULL;
1348 	mon_ops->mon_print_pdev_tx_capture_stats = NULL;
1349 	mon_ops->mon_config_enh_tx_capture = dp_config_enh_tx_core_monitor_2_0;
1350 	mon_ops->mon_tx_peer_filter = NULL;
1351 #endif
1352 #ifdef WLAN_RX_PKT_CAPTURE_ENH
1353 	mon_ops->mon_config_enh_rx_capture = NULL;
1354 #endif
1355 #ifdef QCA_SUPPORT_BPR
1356 	mon_ops->mon_set_bpr_enable = dp_set_bpr_enable_2_0;
1357 #endif
1358 #ifdef ATH_SUPPORT_NAC
1359 	mon_ops->mon_set_filter_neigh_peers = NULL;
1360 #endif
1361 #ifdef WLAN_ATF_ENABLE
1362 	mon_ops->mon_set_atf_stats_enable = dp_set_atf_stats_enable;
1363 #endif
1364 #ifdef FEATURE_NAC_RSSI
1365 	mon_ops->mon_filter_neighbour_peer = NULL;
1366 #endif
1367 #ifdef QCA_MCOPY_SUPPORT
1368 	mon_ops->mon_filter_setup_mcopy_mode = NULL;
1369 	mon_ops->mon_filter_reset_mcopy_mode = NULL;
1370 	mon_ops->mon_mcopy_check_deliver = NULL;
1371 #endif
1372 #ifdef QCA_ENHANCED_STATS_SUPPORT
1373 	mon_ops->mon_filter_setup_enhanced_stats =
1374 				dp_mon_filter_setup_enhanced_stats_2_0;
1375 	mon_ops->mon_filter_reset_enhanced_stats =
1376 				dp_mon_filter_reset_enhanced_stats_2_0;
1377 	mon_ops->mon_tx_enable_enhanced_stats =
1378 				dp_mon_tx_enable_enhanced_stats_2_0;
1379 	mon_ops->mon_tx_disable_enhanced_stats =
1380 				dp_mon_tx_disable_enhanced_stats_2_0;
1381 	mon_ops->mon_ppdu_stats_feat_enable_check =
1382 				dp_ppdu_stats_feat_enable_check_2_0;
1383 	mon_ops->mon_tx_stats_update = dp_mon_tx_stats_update_2_0;
1384 	mon_ops->mon_ppdu_desc_deliver = dp_ppdu_desc_deliver;
1385 #ifdef WDI_EVENT_ENABLE
1386 	mon_ops->mon_ppdu_desc_notify = dp_ppdu_desc_notify_2_0;
1387 #endif
1388 #endif
1389 #ifdef WLAN_RX_PKT_CAPTURE_ENH
1390 	mon_ops->mon_filter_setup_rx_enh_capture = NULL;
1391 #endif
1392 #ifdef WDI_EVENT_ENABLE
1393 	mon_ops->mon_set_pktlog_wifi3 = dp_set_pktlog_wifi3;
1394 	mon_ops->mon_filter_setup_rx_pkt_log_full =
1395 				dp_mon_filter_setup_rx_pkt_log_full_2_0;
1396 	mon_ops->mon_filter_reset_rx_pkt_log_full =
1397 				dp_mon_filter_reset_rx_pkt_log_full_2_0;
1398 	mon_ops->mon_filter_setup_rx_pkt_log_lite =
1399 				dp_mon_filter_setup_rx_pkt_log_lite_2_0;
1400 	mon_ops->mon_filter_reset_rx_pkt_log_lite =
1401 				dp_mon_filter_reset_rx_pkt_log_lite_2_0;
1402 	mon_ops->mon_filter_setup_rx_pkt_log_cbf =
1403 				dp_mon_filter_setup_rx_pkt_log_cbf_2_0;
1404 	mon_ops->mon_filter_reset_rx_pkt_log_cbf =
1405 				dp_mon_filter_reset_rx_pktlog_cbf_2_0;
1406 	mon_ops->mon_filter_setup_pktlog_hybrid =
1407 				dp_mon_filter_setup_pktlog_hybrid_2_0;
1408 	mon_ops->mon_filter_reset_pktlog_hybrid =
1409 				dp_mon_filter_reset_pktlog_hybrid_2_0;
1410 #endif
1411 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
1412 	mon_ops->mon_pktlogmod_exit = dp_pktlogmod_exit;
1413 #endif
1414 	mon_ops->rx_hdr_length_set = dp_rx_mon_hdr_length_set;
1415 	mon_ops->rx_packet_length_set = dp_rx_mon_packet_length_set;
1416 	mon_ops->rx_mon_enable = dp_rx_mon_enable_set;
1417 	mon_ops->rx_wmask_subscribe = dp_rx_mon_word_mask_subscribe;
1418 	mon_ops->rx_enable_mpdu_logging = dp_rx_mon_enable_mpdu_logging;
1419 	mon_ops->mon_neighbour_peers_detach = NULL;
1420 	mon_ops->mon_vdev_set_monitor_mode_buf_rings =
1421 				dp_vdev_set_monitor_mode_buf_rings_2_0;
1422 	mon_ops->mon_vdev_set_monitor_mode_rings =
1423 				dp_vdev_set_monitor_mode_rings_2_0;
1424 #ifdef QCA_ENHANCED_STATS_SUPPORT
1425 	mon_ops->mon_rx_stats_update = dp_rx_mon_stats_update_2_0;
1426 	mon_ops->mon_rx_populate_ppdu_usr_info =
1427 			dp_rx_mon_populate_ppdu_usr_info_2_0;
1428 	mon_ops->mon_rx_populate_ppdu_info = dp_rx_mon_populate_ppdu_info_2_0;
1429 #endif
1430 #ifdef QCA_UNDECODED_METADATA_SUPPORT
1431 	mon_ops->mon_config_undecoded_metadata_capture =
1432 		dp_mon_config_undecoded_metadata_capture;
1433 	mon_ops->mon_filter_setup_undecoded_metadata_capture =
1434 		dp_mon_filter_setup_undecoded_metadata_capture_2_0;
1435 	mon_ops->mon_filter_reset_undecoded_metadata_capture =
1436 		dp_mon_filter_reset_undecoded_metadata_capture_2_0;
1437 #endif
1438 	mon_ops->rx_enable_fpmo = dp_rx_mon_enable_fpmo;
1439 	mon_ops->mon_rx_print_advanced_stats =
1440 		dp_mon_rx_print_advanced_stats_2_0;
1441 }
1442 
1443 struct dp_mon_ops monitor_ops_2_0 = {
1444 	.mon_soc_cfg_init = dp_mon_soc_cfg_init,
1445 	.mon_soc_attach = dp_mon_soc_attach_2_0,
1446 	.mon_soc_detach = dp_mon_soc_detach_2_0,
1447 	.mon_soc_init = dp_mon_soc_init_2_0,
1448 	.mon_soc_deinit = dp_mon_soc_deinit_2_0,
1449 	.mon_pdev_alloc = dp_mon_pdev_alloc_2_0,
1450 	.mon_pdev_free = dp_mon_pdev_free_2_0,
1451 	.mon_pdev_attach = dp_mon_pdev_attach,
1452 	.mon_pdev_detach = dp_mon_pdev_detach,
1453 	.mon_pdev_init = dp_mon_pdev_init,
1454 	.mon_pdev_deinit = dp_mon_pdev_deinit,
1455 	.mon_vdev_attach = dp_mon_vdev_attach,
1456 	.mon_vdev_detach = dp_mon_vdev_detach,
1457 	.mon_peer_attach = dp_mon_peer_attach,
1458 	.mon_peer_detach = dp_mon_peer_detach,
1459 	.mon_peer_get_peerstats_ctx = dp_mon_peer_get_peerstats_ctx,
1460 	.mon_peer_reset_stats = dp_mon_peer_reset_stats,
1461 	.mon_peer_get_stats = dp_mon_peer_get_stats,
1462 	.mon_invalid_peer_update_pdev_stats =
1463 				dp_mon_invalid_peer_update_pdev_stats,
1464 	.mon_peer_get_stats_param = dp_mon_peer_get_stats_param,
1465 	.mon_flush_rings = NULL,
1466 #if !defined(DISABLE_MON_CONFIG)
1467 	.mon_pdev_htt_srng_setup = dp_mon_pdev_htt_srng_setup_2_0,
1468 	.mon_soc_htt_srng_setup = dp_mon_soc_htt_srng_setup_2_0,
1469 #endif
1470 #if defined(DP_CON_MON)
1471 	.mon_service_rings = NULL,
1472 #endif
1473 #ifndef DISABLE_MON_CONFIG
1474 	.mon_rx_process = NULL,
1475 #endif
1476 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
1477 	.mon_drop_packets_for_mac = NULL,
1478 #endif
1479 	.mon_vdev_timer_init = NULL,
1480 	.mon_vdev_timer_start = NULL,
1481 	.mon_vdev_timer_stop = NULL,
1482 	.mon_vdev_timer_deinit = NULL,
1483 	.mon_reap_timer_init = NULL,
1484 	.mon_reap_timer_start = NULL,
1485 	.mon_reap_timer_stop = NULL,
1486 	.mon_reap_timer_deinit = NULL,
1487 	.mon_filter_setup_rx_mon_mode = dp_mon_filter_setup_rx_mon_mode_2_0,
1488 	.mon_filter_reset_rx_mon_mode = dp_mon_filter_reset_rx_mon_mode_2_0,
1489 	.mon_filter_setup_tx_mon_mode = dp_mon_filter_setup_tx_mon_mode_2_0,
1490 	.mon_filter_reset_tx_mon_mode = dp_mon_filter_reset_tx_mon_mode_2_0,
1491 	.tx_mon_filter_update = dp_tx_mon_filter_update_2_0,
1492 	.rx_mon_filter_update = dp_rx_mon_filter_update_2_0,
1493 	.tx_mon_filter_alloc = dp_mon_filter_alloc_2_0,
1494 	.tx_mon_filter_dealloc = dp_mon_filter_dealloc_2_0,
1495 	.mon_rings_alloc = dp_pdev_mon_rings_alloc_2_0,
1496 	.mon_rings_free = dp_pdev_mon_rings_free_2_0,
1497 	.mon_rings_init = dp_pdev_mon_rings_init_2_0,
1498 	.mon_rings_deinit = dp_pdev_mon_rings_deinit_2_0,
1499 	.rx_mon_desc_pool_init = NULL,
1500 	.rx_mon_desc_pool_deinit = NULL,
1501 	.rx_mon_desc_pool_alloc = NULL,
1502 	.rx_mon_desc_pool_free = NULL,
1503 	.rx_mon_buffers_alloc = NULL,
1504 	.rx_mon_buffers_free = NULL,
1505 	.tx_mon_desc_pool_init = NULL,
1506 	.tx_mon_desc_pool_deinit = NULL,
1507 	.tx_mon_desc_pool_alloc = NULL,
1508 	.tx_mon_desc_pool_free = NULL,
1509 #ifndef DISABLE_MON_CONFIG
1510 	.mon_register_intr_ops = dp_mon_register_intr_ops_2_0,
1511 #endif
1512 	.mon_register_feature_ops = dp_mon_register_feature_ops_2_0,
1513 #ifdef WLAN_TX_PKT_CAPTURE_ENH_BE
1514 	.mon_tx_ppdu_stats_attach = dp_tx_ppdu_stats_attach_2_0,
1515 	.mon_tx_ppdu_stats_detach = dp_tx_ppdu_stats_detach_2_0,
1516 	.mon_peer_tx_capture_filter_check = NULL,
1517 #endif
1518 #if (defined(WIFI_MONITOR_SUPPORT) && !defined(WLAN_TX_PKT_CAPTURE_ENH_BE))
1519 	.mon_tx_ppdu_stats_attach = NULL,
1520 	.mon_tx_ppdu_stats_detach = NULL,
1521 	.mon_peer_tx_capture_filter_check = NULL,
1522 #endif
1523 	.mon_pdev_ext_init = dp_mon_pdev_ext_init_2_0,
1524 	.mon_pdev_ext_deinit = dp_mon_pdev_ext_deinit_2_0,
1525 	.mon_lite_mon_alloc = dp_lite_mon_alloc,
1526 	.mon_lite_mon_dealloc = dp_lite_mon_dealloc,
1527 	.mon_lite_mon_vdev_delete = dp_lite_mon_vdev_delete,
1528 	.mon_lite_mon_disable_rx = dp_lite_mon_disable_rx,
1529 	.mon_rx_ppdu_info_cache_create = dp_rx_mon_ppdu_info_cache_create,
1530 	.mon_rx_ppdu_info_cache_destroy = dp_rx_mon_ppdu_info_cache_destroy,
1531 };
1532 
1533 struct cdp_mon_ops dp_ops_mon_2_0 = {
1534 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
1535 	/* Added support for HK advance filter */
1536 	.txrx_set_advance_monitor_filter = NULL,
1537 	.txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
1538 	.config_full_mon_mode = NULL,
1539 	.soc_config_full_mon_mode = NULL,
1540 	.get_mon_pdev_rx_stats = dp_pdev_get_rx_mon_stats,
1541 	.txrx_enable_mon_reap_timer = NULL,
1542 #ifdef QCA_SUPPORT_LITE_MONITOR
1543 	.txrx_set_lite_mon_config = dp_lite_mon_set_config,
1544 	.txrx_get_lite_mon_config = dp_lite_mon_get_config,
1545 	.txrx_set_lite_mon_peer_config = dp_lite_mon_set_peer_config,
1546 	.txrx_get_lite_mon_peer_config = dp_lite_mon_get_peer_config,
1547 	.txrx_is_lite_mon_enabled = dp_lite_mon_is_enabled,
1548 #endif
1549 	.txrx_set_mon_pdev_params_rssi_dbm_conv =
1550 				dp_mon_pdev_params_rssi_dbm_conv,
1551 };
1552 
1553 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT
1554 void dp_mon_ops_register_2_0(struct dp_mon_soc *mon_soc)
1555 {
1556 	struct dp_mon_ops *mon_ops = NULL;
1557 
1558 	if (mon_soc->mon_ops) {
1559 		dp_mon_err("monitor ops is allocated");
1560 		return;
1561 	}
1562 
1563 	mon_ops = qdf_mem_malloc(sizeof(struct dp_mon_ops));
1564 	if (!mon_ops) {
1565 		dp_mon_err("Failed to allocate memory for mon ops");
1566 		return;
1567 	}
1568 
1569 	qdf_mem_copy(mon_ops, &monitor_ops_2_0, sizeof(struct dp_mon_ops));
1570 	mon_soc->mon_ops = mon_ops;
1571 }
1572 
1573 void dp_mon_cdp_ops_register_2_0(struct cdp_ops *ops)
1574 {
1575 	struct cdp_mon_ops *mon_ops = NULL;
1576 
1577 	if (ops->mon_ops) {
1578 		dp_mon_err("cdp monitor ops is allocated");
1579 		return;
1580 	}
1581 
1582 	mon_ops = qdf_mem_malloc(sizeof(struct cdp_mon_ops));
1583 	if (!mon_ops) {
1584 		dp_mon_err("Failed to allocate memory for mon ops");
1585 		return;
1586 	}
1587 
1588 	qdf_mem_copy(mon_ops, &dp_ops_mon_2_0, sizeof(struct cdp_mon_ops));
1589 	ops->mon_ops = mon_ops;
1590 }
1591 #else
1592 void dp_mon_ops_register_2_0(struct dp_mon_soc *mon_soc)
1593 {
1594 	mon_soc->mon_ops = &monitor_ops_2_0;
1595 }
1596 
1597 void dp_mon_cdp_ops_register_2_0(struct cdp_ops *ops)
1598 {
1599 	ops->mon_ops = &dp_ops_mon_2_0;
1600 }
1601 #endif
1602 
1603 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
1604 	defined(WLAN_SUPPORT_RX_FLOW_TAG)
1605 /** dp_mon_rx_update_rx_protocol_tag_stats() - Update mon protocols's
1606  *					      statistics
1607  * @pdev: pdev handle
1608  * @protocol_index: Protocol index for which the stats should be incremented
1609  * @ring_index: REO ring number from which this tag was received.
1610  *
1611  * Return: void
1612  */
1613 void dp_mon_rx_update_rx_protocol_tag_stats(struct dp_pdev *pdev,
1614 					    uint16_t protocol_index)
1615 {
1616 	pdev->mon_proto_tag_stats[protocol_index].tag_ctr++;
1617 }
1618 #else
1619 void dp_mon_rx_update_rx_protocol_tag_stats(struct dp_pdev *pdev,
1620 					    uint16_t protocol_index)
1621 {
1622 }
1623 #endif
1624