xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/2.0/dp_mon_2.0.c (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <dp_types.h>
19 #include "dp_rx.h"
20 #include "dp_peer.h"
21 #include <dp_htt.h>
22 #include <dp_mon_filter.h>
23 #include <dp_mon.h>
24 #include <dp_rx_mon.h>
25 #include <dp_rx_mon_2.0.h>
26 #include <dp_mon_2.0.h>
27 #include <dp_mon_filter_2.0.h>
28 #include <dp_tx_mon_2.0.h>
29 #include <hal_be_api_mon.h>
30 #include <dp_be.h>
31 #include <htt_ppdu_stats.h>
32 #ifdef QCA_SUPPORT_LITE_MONITOR
33 #include "dp_lite_mon.h"
34 #endif
35 
36 #if !defined(DISABLE_MON_CONFIG)
37 
38 /**
39  * dp_mon_pdev_ext_init_2_0() - Init pdev ext param
40  *
41  * @pdev: DP pdev handle
42  *
43  * Return:  QDF_STATUS_SUCCESS: Success
44  *          QDF_STATUS_E_FAILURE: failure
45  */
46 QDF_STATUS dp_mon_pdev_ext_init_2_0(struct dp_pdev *pdev)
47 {
48 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
49 	struct dp_mon_pdev_be *mon_pdev_be =
50 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
51 
52 	qdf_create_work(0, &mon_pdev_be->rx_mon_work,
53 			dp_rx_mon_process_ppdu, pdev);
54 	mon_pdev_be->rx_mon_workqueue =
55 		qdf_alloc_unbound_workqueue("rx_mon_work_queue");
56 
57 	if (!mon_pdev_be->rx_mon_workqueue) {
58 		dp_mon_err("failed to create rxmon wq mon_pdev: %pK", mon_pdev);
59 		goto fail;
60 	}
61 	TAILQ_INIT(&mon_pdev_be->rx_mon_queue);
62 
63 	qdf_spinlock_create(&mon_pdev_be->rx_mon_wq_lock);
64 
65 	return QDF_STATUS_SUCCESS;
66 
67 fail:
68 	return QDF_STATUS_E_FAILURE;
69 }
70 
71 /**
72  * dp_mon_pdev_ext_deinit_2_0() - denit pdev ext param
73  *
74  * @pdev: DP pdev handle
75  *
76  * Return: QDF_STATUS_SUCCESS
77  */
78 QDF_STATUS dp_mon_pdev_ext_deinit_2_0(struct dp_pdev *pdev)
79 {
80 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
81 	struct dp_mon_pdev_be *mon_pdev_be =
82 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
83 
84 	if (!mon_pdev_be->rx_mon_workqueue)
85 		return QDF_STATUS_E_FAILURE;
86 
87 	dp_rx_mon_drain_wq(pdev);
88 	qdf_flush_workqueue(0, mon_pdev_be->rx_mon_workqueue);
89 	qdf_destroy_workqueue(0, mon_pdev_be->rx_mon_workqueue);
90 	qdf_flush_work(&mon_pdev_be->rx_mon_work);
91 	qdf_disable_work(&mon_pdev_be->rx_mon_work);
92 	mon_pdev_be->rx_mon_workqueue = NULL;
93 	qdf_spinlock_destroy(&mon_pdev_be->rx_mon_wq_lock);
94 
95 	return QDF_STATUS_SUCCESS;
96 }
97 
98 /*
99  * dp_mon_add_desc_list_to_free_list() - append unused desc_list back to
100  *					freelist.
101  *
102  * @soc: core txrx main context
103  * @local_desc_list: local desc list provided by the caller
104  * @tail: attach the point to last desc of local desc list
105  * @mon_desc_pool: monitor descriptor pool pointer
106  */
107 void
108 dp_mon_add_desc_list_to_free_list(struct dp_soc *soc,
109 				  union dp_mon_desc_list_elem_t **local_desc_list,
110 				  union dp_mon_desc_list_elem_t **tail,
111 				  struct dp_mon_desc_pool *mon_desc_pool)
112 {
113 	union dp_mon_desc_list_elem_t *temp_list = NULL;
114 
115 	qdf_spin_lock_bh(&mon_desc_pool->lock);
116 
117 	temp_list = mon_desc_pool->freelist;
118 	mon_desc_pool->freelist = *local_desc_list;
119 	(*tail)->next = temp_list;
120 	*tail = NULL;
121 	*local_desc_list = NULL;
122 
123 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
124 }
125 
126 /*
127  * dp_mon_get_free_desc_list() - provide a list of descriptors from
128  *				the free mon desc pool.
129  *
130  * @soc: core txrx main context
131  * @mon_desc_pool: monitor descriptor pool pointer
132  * @num_descs: number of descs requested from freelist
133  * @desc_list: attach the descs to this list (output parameter)
134  * @tail: attach the point to last desc of free list (output parameter)
135  *
136  * Return: number of descs allocated from free list.
137  */
138 static uint16_t
139 dp_mon_get_free_desc_list(struct dp_soc *soc,
140 			  struct dp_mon_desc_pool *mon_desc_pool,
141 			  uint16_t num_descs,
142 			  union dp_mon_desc_list_elem_t **desc_list,
143 			  union dp_mon_desc_list_elem_t **tail)
144 {
145 	uint16_t count;
146 
147 	qdf_spin_lock_bh(&mon_desc_pool->lock);
148 
149 	*desc_list = *tail = mon_desc_pool->freelist;
150 
151 	for (count = 0; count < num_descs; count++) {
152 		if (qdf_unlikely(!mon_desc_pool->freelist)) {
153 			qdf_spin_unlock_bh(&mon_desc_pool->lock);
154 			return count;
155 		}
156 		*tail = mon_desc_pool->freelist;
157 		mon_desc_pool->freelist = mon_desc_pool->freelist->next;
158 	}
159 	(*tail)->next = NULL;
160 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
161 	return count;
162 }
163 
164 void dp_mon_pool_frag_unmap_and_free(struct dp_soc *soc,
165 				     struct dp_mon_desc_pool *mon_desc_pool)
166 {
167 	int desc_id;
168 	qdf_frag_t vaddr;
169 	qdf_dma_addr_t paddr;
170 
171 	qdf_spin_lock_bh(&mon_desc_pool->lock);
172 	for (desc_id = 0; desc_id < mon_desc_pool->pool_size; desc_id++) {
173 		if (mon_desc_pool->array[desc_id].mon_desc.in_use) {
174 			vaddr = mon_desc_pool->array[desc_id].mon_desc.buf_addr;
175 			paddr = mon_desc_pool->array[desc_id].mon_desc.paddr;
176 
177 			if (!(mon_desc_pool->array[desc_id].mon_desc.unmapped)) {
178 				qdf_mem_unmap_page(soc->osdev, paddr,
179 						   mon_desc_pool->buf_size,
180 						   QDF_DMA_FROM_DEVICE);
181 				mon_desc_pool->array[desc_id].mon_desc.unmapped = 1;
182 				mon_desc_pool->array[desc_id].mon_desc.cookie = desc_id;
183 			}
184 			qdf_frag_free(vaddr);
185 		}
186 	}
187 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
188 }
189 
190 static inline QDF_STATUS
191 dp_mon_frag_alloc_and_map(struct dp_soc *dp_soc,
192 			  struct dp_mon_desc *mon_desc,
193 			  struct dp_mon_desc_pool *mon_desc_pool)
194 {
195 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
196 
197 	mon_desc->buf_addr = qdf_frag_alloc(&mon_desc_pool->pf_cache,
198 					    mon_desc_pool->buf_size);
199 
200 	if (!mon_desc->buf_addr) {
201 		dp_mon_err("Frag alloc failed");
202 		return QDF_STATUS_E_NOMEM;
203 	}
204 
205 	ret = qdf_mem_map_page(dp_soc->osdev,
206 			       mon_desc->buf_addr,
207 			       QDF_DMA_FROM_DEVICE,
208 			       mon_desc_pool->buf_size,
209 			       &mon_desc->paddr);
210 
211 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
212 		qdf_frag_free(mon_desc->buf_addr);
213 		dp_mon_err("Frag map failed");
214 		return QDF_STATUS_E_FAULT;
215 	}
216 
217 	return QDF_STATUS_SUCCESS;
218 }
219 
220 QDF_STATUS
221 dp_mon_buffers_replenish(struct dp_soc *dp_soc,
222 			 struct dp_srng *dp_mon_srng,
223 			 struct dp_mon_desc_pool *mon_desc_pool,
224 			 uint32_t num_req_buffers,
225 			 union dp_mon_desc_list_elem_t **desc_list,
226 			 union dp_mon_desc_list_elem_t **tail,
227 			 uint32_t *replenish_cnt_ref)
228 {
229 	uint32_t num_alloc_desc;
230 	uint16_t num_desc_to_free = 0;
231 	uint32_t num_entries_avail;
232 	uint32_t count = 0;
233 	int sync_hw_ptr = 1;
234 	struct dp_mon_desc mon_desc = {0};
235 	void *mon_ring_entry;
236 	union dp_mon_desc_list_elem_t *next;
237 	void *mon_srng;
238 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
239 	struct dp_mon_soc *mon_soc = dp_soc->monitor_soc;
240 
241 	if (!num_req_buffers) {
242 		dp_mon_debug("%pK: Received request for 0 buffers replenish",
243 			     dp_soc);
244 		ret = QDF_STATUS_E_INVAL;
245 		goto free_desc;
246 	}
247 
248 	mon_srng = dp_mon_srng->hal_srng;
249 
250 	hal_srng_access_start(dp_soc->hal_soc, mon_srng);
251 
252 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
253 						   mon_srng, sync_hw_ptr);
254 
255 	if (!num_entries_avail) {
256 		num_desc_to_free = num_req_buffers;
257 		hal_srng_access_end(dp_soc->hal_soc, mon_srng);
258 		goto free_desc;
259 	}
260 	if (num_entries_avail < num_req_buffers) {
261 		num_desc_to_free = num_req_buffers - num_entries_avail;
262 		num_req_buffers = num_entries_avail;
263 	}
264 
265 	/*
266 	 * if desc_list is NULL, allocate the descs from freelist
267 	 */
268 	if (!(*desc_list)) {
269 		num_alloc_desc = dp_mon_get_free_desc_list(dp_soc,
270 							   mon_desc_pool,
271 							   num_req_buffers,
272 							   desc_list,
273 							   tail);
274 
275 		if (!num_alloc_desc) {
276 			dp_mon_debug("%pK: no free rx_descs in freelist", dp_soc);
277 			hal_srng_access_end(dp_soc->hal_soc, mon_srng);
278 			return QDF_STATUS_E_NOMEM;
279 		}
280 
281 		dp_mon_info("%pK: %d rx desc allocated",
282 			    dp_soc, num_alloc_desc);
283 
284 		num_req_buffers = num_alloc_desc;
285 	}
286 
287 	while (count <= num_req_buffers - 1) {
288 		ret = dp_mon_frag_alloc_and_map(dp_soc,
289 						&mon_desc,
290 						mon_desc_pool);
291 
292 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
293 			if (qdf_unlikely(ret  == QDF_STATUS_E_FAULT))
294 				continue;
295 			break;
296 		}
297 
298 		count++;
299 		next = (*desc_list)->next;
300 		mon_ring_entry = hal_srng_src_get_next(
301 						dp_soc->hal_soc,
302 						mon_srng);
303 
304 		if (!mon_ring_entry)
305 			break;
306 
307 		qdf_assert_always((*desc_list)->mon_desc.in_use == 0);
308 
309 		(*desc_list)->mon_desc.in_use = 1;
310 		(*desc_list)->mon_desc.unmapped = 0;
311 		(*desc_list)->mon_desc.buf_addr = mon_desc.buf_addr;
312 		(*desc_list)->mon_desc.paddr = mon_desc.paddr;
313 		(*desc_list)->mon_desc.magic = DP_MON_DESC_MAGIC;
314 
315 		mon_soc->stats.frag_alloc++;
316 		hal_mon_buff_addr_info_set(dp_soc->hal_soc,
317 					   mon_ring_entry,
318 					   &((*desc_list)->mon_desc),
319 					   mon_desc.paddr);
320 
321 		*desc_list = next;
322 	}
323 
324 	hal_srng_access_end(dp_soc->hal_soc, mon_srng);
325 	if (replenish_cnt_ref)
326 		*replenish_cnt_ref += count;
327 
328 free_desc:
329 	/*
330 	 * add any available free desc back to the free list
331 	 */
332 	if (*desc_list) {
333 		dp_mon_add_desc_list_to_free_list(dp_soc, desc_list, tail,
334 						  mon_desc_pool);
335 	}
336 
337 	return ret;
338 }
339 
340 QDF_STATUS
341 dp_mon_desc_pool_init(struct dp_mon_desc_pool *mon_desc_pool,
342 		      uint32_t pool_size)
343 {
344 	int desc_id;
345 	/* Initialize monitor desc lock */
346 	qdf_spinlock_create(&mon_desc_pool->lock);
347 
348 	qdf_spin_lock_bh(&mon_desc_pool->lock);
349 
350 	mon_desc_pool->buf_size = DP_MON_DATA_BUFFER_SIZE;
351 	/* link SW descs into a freelist */
352 	mon_desc_pool->freelist = &mon_desc_pool->array[0];
353 	mon_desc_pool->pool_size = pool_size - 1;
354 	qdf_mem_zero(mon_desc_pool->freelist,
355 		     mon_desc_pool->pool_size *
356 		     sizeof(union dp_mon_desc_list_elem_t));
357 
358 	for (desc_id = 0; desc_id < mon_desc_pool->pool_size; desc_id++) {
359 		if (desc_id == mon_desc_pool->pool_size - 1)
360 			mon_desc_pool->array[desc_id].next = NULL;
361 		else
362 			mon_desc_pool->array[desc_id].next =
363 				&mon_desc_pool->array[desc_id + 1];
364 		mon_desc_pool->array[desc_id].mon_desc.in_use = 0;
365 		mon_desc_pool->array[desc_id].mon_desc.cookie = desc_id;
366 	}
367 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
368 
369 	return QDF_STATUS_SUCCESS;
370 }
371 
372 void dp_mon_desc_pool_deinit(struct dp_mon_desc_pool *mon_desc_pool)
373 {
374 	qdf_spin_lock_bh(&mon_desc_pool->lock);
375 
376 	mon_desc_pool->freelist = NULL;
377 	mon_desc_pool->pool_size = 0;
378 
379 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
380 	qdf_spinlock_destroy(&mon_desc_pool->lock);
381 }
382 
383 void dp_mon_desc_pool_free(struct dp_mon_desc_pool *mon_desc_pool)
384 {
385 	qdf_mem_free(mon_desc_pool->array);
386 }
387 
388 QDF_STATUS dp_mon_desc_pool_alloc(uint32_t pool_size,
389 				  struct dp_mon_desc_pool *mon_desc_pool)
390 {
391 	mon_desc_pool->pool_size = pool_size - 1;
392 	mon_desc_pool->array = qdf_mem_malloc((mon_desc_pool->pool_size) *
393 				     sizeof(union dp_mon_desc_list_elem_t));
394 
395 	return QDF_STATUS_SUCCESS;
396 }
397 
398 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings_rx_2_0(struct dp_pdev *pdev)
399 {
400 	int rx_mon_max_entries;
401 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
402 	struct dp_soc *soc = pdev->soc;
403 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
404 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
405 	QDF_STATUS status;
406 
407 	if (!mon_soc_be) {
408 		dp_mon_err("DP MON SOC is NULL");
409 		return QDF_STATUS_E_FAILURE;
410 	}
411 
412 	soc_cfg_ctx = soc->wlan_cfg_ctx;
413 	rx_mon_max_entries = wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc_cfg_ctx);
414 
415 	hal_set_low_threshold(soc->rxdma_mon_buf_ring[0].hal_srng,
416 			      rx_mon_max_entries >> 2);
417 	status = htt_srng_setup(soc->htt_handle, 0,
418 				soc->rxdma_mon_buf_ring[0].hal_srng,
419 				RXDMA_MONITOR_BUF);
420 
421 	if (status != QDF_STATUS_SUCCESS) {
422 		dp_mon_err("Failed to send htt srng setup message for Rx mon buf ring");
423 		return status;
424 	}
425 
426 	if (mon_soc_be->rx_mon_ring_fill_level < rx_mon_max_entries) {
427 		status = dp_rx_mon_buffers_alloc(soc,
428 						 (rx_mon_max_entries -
429 						 mon_soc_be->rx_mon_ring_fill_level));
430 		if (status != QDF_STATUS_SUCCESS) {
431 			dp_mon_err("%pK: Rx mon buffers allocation failed", soc);
432 			return status;
433 		}
434 		mon_soc_be->rx_mon_ring_fill_level +=
435 				(rx_mon_max_entries -
436 				mon_soc_be->rx_mon_ring_fill_level);
437 	}
438 
439 	return QDF_STATUS_SUCCESS;
440 }
441 
442 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings_tx_2_0(struct dp_pdev *pdev)
443 {
444 	int tx_mon_max_entries;
445 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
446 	struct dp_soc *soc = pdev->soc;
447 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
448 	struct dp_mon_soc_be *mon_soc_be =
449 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
450 	QDF_STATUS status;
451 
452 	if (!mon_soc_be) {
453 		dp_mon_err("DP MON SOC is NULL");
454 		return QDF_STATUS_E_FAILURE;
455 	}
456 
457 	soc_cfg_ctx = soc->wlan_cfg_ctx;
458 	tx_mon_max_entries =
459 		wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx);
460 
461 	hal_set_low_threshold(mon_soc_be->tx_mon_buf_ring.hal_srng,
462 			      tx_mon_max_entries >> 2);
463 	status = htt_srng_setup(soc->htt_handle, 0,
464 				mon_soc_be->tx_mon_buf_ring.hal_srng,
465 				TX_MONITOR_BUF);
466 
467 	if (status != QDF_STATUS_SUCCESS) {
468 		dp_mon_err("Failed to send htt srng setup message for Tx mon buf ring");
469 		return status;
470 	}
471 
472 	if (mon_soc_be->tx_mon_ring_fill_level < tx_mon_max_entries) {
473 		status = dp_tx_mon_buffers_alloc(soc,
474 						 (tx_mon_max_entries -
475 						 mon_soc_be->tx_mon_ring_fill_level));
476 		if (status != QDF_STATUS_SUCCESS) {
477 			dp_mon_err("%pK: Tx mon buffers allocation failed", soc);
478 			return status;
479 		}
480 		mon_soc_be->tx_mon_ring_fill_level +=
481 				(tx_mon_max_entries -
482 				mon_soc_be->tx_mon_ring_fill_level);
483 	}
484 
485 	return QDF_STATUS_SUCCESS;
486 }
487 
488 static
489 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings_2_0(struct dp_pdev *pdev)
490 {
491 	int status;
492 	struct dp_soc *soc = pdev->soc;
493 
494 	status = dp_vdev_set_monitor_mode_buf_rings_rx_2_0(pdev);
495 	if (status != QDF_STATUS_SUCCESS) {
496 		dp_mon_err("%pK: Rx monitor extra buffer allocation failed",
497 			   soc);
498 		return status;
499 	}
500 
501 	status = dp_vdev_set_monitor_mode_buf_rings_tx_2_0(pdev);
502 	if (status != QDF_STATUS_SUCCESS) {
503 		dp_mon_err("%pK: Tx monitor extra buffer allocation failed",
504 			   soc);
505 		return status;
506 	}
507 
508 	return QDF_STATUS_SUCCESS;
509 }
510 
511 static
512 QDF_STATUS dp_vdev_set_monitor_mode_rings_2_0(struct dp_pdev *pdev,
513 					      uint8_t delayed_replenish)
514 {
515 	return QDF_STATUS_SUCCESS;
516 }
517 
518 #ifdef QCA_ENHANCED_STATS_SUPPORT
519 /**
520  * dp_mon_tx_enable_enhanced_stats_2_0() - Send HTT cmd to FW to enable stats
521  * @pdev: Datapath pdev handle
522  *
523  * Return: none
524  */
525 static void dp_mon_tx_enable_enhanced_stats_2_0(struct dp_pdev *pdev)
526 {
527 	dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
528 				  pdev->pdev_id);
529 }
530 
531 /**
532  * dp_mon_tx_disable_enhanced_stats_2_0() - Send HTT cmd to FW to disable stats
533  * @pdev: Datapath pdev handle
534  *
535  * Return: none
536  */
537 static void dp_mon_tx_disable_enhanced_stats_2_0(struct dp_pdev *pdev)
538 {
539 	dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
540 }
541 #endif
542 
543 #if defined(QCA_ENHANCED_STATS_SUPPORT) && defined(WLAN_FEATURE_11BE)
544 void
545 dp_mon_tx_stats_update_2_0(struct dp_mon_peer *mon_peer,
546 			   struct cdp_tx_completion_ppdu_user *ppdu)
547 {
548 	uint8_t preamble, mcs, punc_mode;
549 
550 	preamble = ppdu->preamble;
551 	mcs = ppdu->mcs;
552 
553 	punc_mode = dp_mon_get_puncture_type(ppdu->punc_pattern_bitmap,
554 					     ppdu->bw);
555 	ppdu->punc_mode = punc_mode;
556 
557 	DP_STATS_INC(mon_peer, tx.punc_bw[punc_mode], ppdu->num_msdu);
558 	DP_STATS_INCC(mon_peer,
559 		      tx.pkt_type[preamble].mcs_count[MAX_MCS - 1],
560 		      ppdu->num_msdu,
561 		      ((mcs >= MAX_MCS_11BE) && (preamble == DOT11_BE)));
562 	DP_STATS_INCC(mon_peer,
563 		      tx.pkt_type[preamble].mcs_count[mcs],
564 		      ppdu->num_msdu,
565 		      ((mcs < MAX_MCS_11BE) && (preamble == DOT11_BE)));
566 	DP_STATS_INCC(mon_peer,
567 		      tx.su_be_ppdu_cnt.mcs_count[MAX_MCS - 1], 1,
568 		      ((mcs >= MAX_MCS_11BE) && (preamble == DOT11_BE) &&
569 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_SU)));
570 	DP_STATS_INCC(mon_peer,
571 		      tx.su_be_ppdu_cnt.mcs_count[mcs], 1,
572 		      ((mcs < MAX_MCS_11BE) && (preamble == DOT11_BE) &&
573 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_SU)));
574 	DP_STATS_INCC(mon_peer,
575 		      tx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[MAX_MCS - 1],
576 		      1, ((mcs >= MAX_MCS_11BE) &&
577 		      (preamble == DOT11_BE) &&
578 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA)));
579 	DP_STATS_INCC(mon_peer,
580 		      tx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[mcs],
581 		      1, ((mcs < MAX_MCS_11BE) &&
582 		      (preamble == DOT11_BE) &&
583 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA)));
584 	DP_STATS_INCC(mon_peer,
585 		      tx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[MAX_MCS - 1],
586 		      1, ((mcs >= MAX_MCS_11BE) &&
587 		      (preamble == DOT11_BE) &&
588 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO)));
589 	DP_STATS_INCC(mon_peer,
590 		      tx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[mcs],
591 		      1, ((mcs < MAX_MCS_11BE) &&
592 		      (preamble == DOT11_BE) &&
593 		      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO)));
594 }
595 
596 enum cdp_punctured_modes
597 dp_mon_get_puncture_type(uint16_t puncture_pattern, uint8_t bw)
598 {
599 	uint16_t mask;
600 	uint8_t punctured_bits;
601 
602 	if (!puncture_pattern)
603 		return NO_PUNCTURE;
604 
605 	switch (bw) {
606 	case CMN_BW_80MHZ:
607 		mask = PUNCTURE_80MHZ_MASK;
608 		break;
609 	case CMN_BW_160MHZ:
610 		mask = PUNCTURE_160MHZ_MASK;
611 		break;
612 	case CMN_BW_320MHZ:
613 		mask = PUNCTURE_320MHZ_MASK;
614 		break;
615 	default:
616 		return NO_PUNCTURE;
617 	}
618 
619 	/* 0s in puncture pattern received in TLV indicates punctured 20Mhz,
620 	 * after complement, 1s will indicate punctured 20Mhz
621 	 */
622 	puncture_pattern = ~puncture_pattern;
623 	puncture_pattern &= mask;
624 
625 	if (puncture_pattern) {
626 		punctured_bits = 0;
627 		while (puncture_pattern != 0) {
628 			punctured_bits++;
629 			puncture_pattern &= (puncture_pattern - 1);
630 		}
631 
632 		if (bw == CMN_BW_80MHZ) {
633 			if (punctured_bits == IEEE80211_PUNC_MINUS20MHZ)
634 				return PUNCTURED_20MHZ;
635 			else
636 				return NO_PUNCTURE;
637 		} else if (bw == CMN_BW_160MHZ) {
638 			if (punctured_bits == IEEE80211_PUNC_MINUS20MHZ)
639 				return PUNCTURED_20MHZ;
640 			else if (punctured_bits == IEEE80211_PUNC_MINUS40MHZ)
641 				return PUNCTURED_40MHZ;
642 			else
643 				return NO_PUNCTURE;
644 		} else if (bw == CMN_BW_320MHZ) {
645 			if (punctured_bits == IEEE80211_PUNC_MINUS40MHZ)
646 				return PUNCTURED_40MHZ;
647 			else if (punctured_bits == IEEE80211_PUNC_MINUS80MHZ)
648 				return PUNCTURED_80MHZ;
649 			else if (punctured_bits == IEEE80211_PUNC_MINUS120MHZ)
650 				return PUNCTURED_120MHZ;
651 			else
652 				return NO_PUNCTURE;
653 		}
654 	}
655 	return NO_PUNCTURE;
656 }
657 #endif
658 
659 #if defined(QCA_ENHANCED_STATS_SUPPORT) && !defined(WLAN_FEATURE_11BE)
660 void
661 dp_mon_tx_stats_update_2_0(struct dp_mon_peer *mon_peer,
662 			   struct cdp_tx_completion_ppdu_user *ppdu)
663 {
664 	ppdu->punc_mode = NO_PUNCTURE;
665 }
666 
667 enum cdp_punctured_modes
668 dp_mon_get_puncture_type(uint16_t puncture_pattern, uint8_t bw)
669 {
670 	return NO_PUNCTURE;
671 }
672 #endif /* QCA_ENHANCED_STATS_SUPPORT && WLAN_FEATURE_11BE */
673 
674 #ifdef QCA_SUPPORT_BPR
675 static QDF_STATUS
676 dp_set_bpr_enable_2_0(struct dp_pdev *pdev, int val)
677 {
678 	return QDF_STATUS_SUCCESS;
679 }
680 #endif /* QCA_SUPPORT_BPR */
681 
682 #ifdef QCA_ENHANCED_STATS_SUPPORT
683 #ifdef WDI_EVENT_ENABLE
684 /**
685  * dp_ppdu_desc_notify_2_0 - Notify upper layer for PPDU indication via WDI
686  *
687  * @pdev: Datapath pdev handle
688  * @nbuf: Buffer to be shipped
689  *
690  * Return: void
691  */
692 static void dp_ppdu_desc_notify_2_0(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
693 {
694 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
695 
696 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(nbuf);
697 
698 	if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
699 	    ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
700 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
701 				     pdev->soc,
702 				     nbuf, HTT_INVALID_PEER,
703 				     WDI_NO_VAL,
704 				     pdev->pdev_id);
705 	} else {
706 		qdf_nbuf_free(nbuf);
707 	}
708 }
709 #endif
710 
711 /**
712  * dp_ppdu_stats_feat_enable_check_2_0 - Check if feature(s) is enabled to
713  *				consume ppdu stats from FW
714  *
715  * @pdev: Datapath pdev handle
716  *
717  * Return: true if enabled, else return false
718  */
719 static bool dp_ppdu_stats_feat_enable_check_2_0(struct dp_pdev *pdev)
720 {
721 	return pdev->monitor_pdev->enhanced_stats_en;
722 }
723 #endif
724 
725 static
726 QDF_STATUS dp_mon_soc_htt_srng_setup_2_0(struct dp_soc *soc)
727 {
728 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
729 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
730 	QDF_STATUS status;
731 
732 	hal_set_low_threshold(soc->rxdma_mon_buf_ring[0].hal_srng, 0);
733 	status = htt_srng_setup(soc->htt_handle, 0,
734 				soc->rxdma_mon_buf_ring[0].hal_srng,
735 				RXDMA_MONITOR_BUF);
736 
737 	if (status != QDF_STATUS_SUCCESS) {
738 		dp_err("Failed to send htt srng setup message for Rx mon buf ring");
739 		return status;
740 	}
741 
742 	hal_set_low_threshold(mon_soc_be->tx_mon_buf_ring.hal_srng, 0);
743 	status = htt_srng_setup(soc->htt_handle, 0,
744 				mon_soc_be->tx_mon_buf_ring.hal_srng,
745 				TX_MONITOR_BUF);
746 	if (status != QDF_STATUS_SUCCESS) {
747 		dp_err("Failed to send htt srng setup message for Tx mon buf ring");
748 		return status;
749 	}
750 
751 	return status;
752 }
753 
754 static
755 QDF_STATUS dp_mon_pdev_htt_srng_setup_2_0(struct dp_soc *soc,
756 					  struct dp_pdev *pdev,
757 					  int mac_id,
758 					  int mac_for_pdev)
759 {
760 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
761 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
762 	QDF_STATUS status;
763 
764 	if (!soc->rxdma_mon_dst_ring[mac_id].hal_srng)
765 		return QDF_STATUS_SUCCESS;
766 
767 	status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
768 				soc->rxdma_mon_dst_ring[mac_id].hal_srng,
769 				RXDMA_MONITOR_DST);
770 
771 	if (status != QDF_STATUS_SUCCESS) {
772 		dp_mon_err("Failed to send htt srng setup message for Rxdma dst ring");
773 		return status;
774 	}
775 
776 	if (!mon_soc_be->tx_mon_dst_ring[mac_id].hal_srng)
777 		return QDF_STATUS_SUCCESS;
778 
779 	status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
780 				mon_soc_be->tx_mon_dst_ring[mac_id].hal_srng,
781 				TX_MONITOR_DST);
782 
783 	if (status != QDF_STATUS_SUCCESS) {
784 		dp_mon_err("Failed to send htt srng message for Tx mon dst ring");
785 		return status;
786 	}
787 
788 	return status;
789 }
790 
791 QDF_STATUS dp_tx_mon_refill_buf_ring_2_0(struct dp_intr *int_ctx)
792 {
793 	struct dp_soc *soc  = int_ctx->soc;
794 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
795 	union dp_mon_desc_list_elem_t *desc_list = NULL;
796 	union dp_mon_desc_list_elem_t *tail = NULL;
797 	struct dp_srng *tx_mon_buf_ring;
798 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
799 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
800 	uint32_t num_entries_avail;
801 	int sync_hw_ptr = 1;
802 	void *hal_srng;
803 
804 	tx_mon_buf_ring = &mon_soc_be->tx_mon_buf_ring;
805 	hal_srng = tx_mon_buf_ring->hal_srng;
806 
807 	intr_stats->num_host2txmon_ring__masks++;
808 	mon_soc_be->tx_low_thresh_intrs++;
809 	hal_srng_access_start(soc->hal_soc, hal_srng);
810 	num_entries_avail = hal_srng_src_num_avail(soc->hal_soc,
811 						   hal_srng,
812 						   sync_hw_ptr);
813 	hal_srng_access_end(soc->hal_soc, hal_srng);
814 
815 	if (num_entries_avail)
816 		dp_mon_buffers_replenish(soc, tx_mon_buf_ring,
817 					 &mon_soc_be->tx_desc_mon,
818 					 num_entries_avail, &desc_list, &tail,
819 					 NULL);
820 
821 	return QDF_STATUS_SUCCESS;
822 }
823 
824 QDF_STATUS dp_rx_mon_refill_buf_ring_2_0(struct dp_intr *int_ctx)
825 {
826 	struct dp_soc *soc  = int_ctx->soc;
827 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
828 	union dp_mon_desc_list_elem_t *desc_list = NULL;
829 	union dp_mon_desc_list_elem_t *tail = NULL;
830 	struct dp_srng *rx_mon_buf_ring;
831 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
832 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
833 	uint32_t num_entries_avail;
834 	int sync_hw_ptr = 1;
835 	void *hal_srng;
836 
837 	rx_mon_buf_ring = &soc->rxdma_mon_buf_ring[0];
838 	hal_srng = rx_mon_buf_ring->hal_srng;
839 
840 	intr_stats->num_host2rxdma_ring_masks++;
841 	mon_soc_be->rx_low_thresh_intrs++;
842 	hal_srng_access_start(soc->hal_soc, hal_srng);
843 	num_entries_avail = hal_srng_src_num_avail(soc->hal_soc,
844 						   hal_srng,
845 						   sync_hw_ptr);
846 	hal_srng_access_end(soc->hal_soc, hal_srng);
847 
848 	if (num_entries_avail)
849 		dp_mon_buffers_replenish(soc, rx_mon_buf_ring,
850 					 &mon_soc_be->rx_desc_mon,
851 					 num_entries_avail, &desc_list, &tail,
852 					 NULL);
853 
854 	return QDF_STATUS_SUCCESS;
855 }
856 
857 static
858 QDF_STATUS dp_mon_soc_detach_2_0(struct dp_soc *soc)
859 {
860 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
861 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
862 
863 	if (!mon_soc_be) {
864 		dp_mon_err("DP MON SOC NULL");
865 		return QDF_STATUS_E_FAILURE;
866 	}
867 
868 	dp_rx_mon_buf_desc_pool_free(soc);
869 	dp_srng_free(soc, &soc->rxdma_mon_buf_ring[0]);
870 	dp_tx_mon_buf_desc_pool_free(soc);
871 	dp_srng_free(soc, &mon_soc_be->tx_mon_buf_ring);
872 
873 	return QDF_STATUS_SUCCESS;
874 }
875 
876 static void dp_mon_soc_deinit_2_0(struct dp_soc *soc)
877 {
878 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
879 	struct dp_mon_soc_be *mon_soc_be =
880 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
881 
882 	if (!mon_soc_be->is_dp_mon_soc_initialized)
883 		return;
884 
885 	dp_rx_mon_buffers_free(soc);
886 	dp_tx_mon_buffers_free(soc);
887 
888 	dp_rx_mon_buf_desc_pool_deinit(soc);
889 	dp_tx_mon_buf_desc_pool_deinit(soc);
890 
891 	dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[0], RXDMA_MONITOR_BUF, 0);
892 	dp_srng_deinit(soc, &mon_soc_be->tx_mon_buf_ring, TX_MONITOR_BUF, 0);
893 
894 	mon_soc_be->is_dp_mon_soc_initialized = false;
895 }
896 
897 static
898 QDF_STATUS dp_rx_mon_soc_init_2_0(struct dp_soc *soc)
899 {
900 	if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[0],
901 			 RXDMA_MONITOR_BUF, 0, 0)) {
902 		dp_mon_err("%pK: " RNG_ERR "rx_mon_buf_ring", soc);
903 		goto fail;
904 	}
905 
906 	if (dp_rx_mon_buf_desc_pool_init(soc)) {
907 		dp_mon_err("%pK: " RNG_ERR "rx mon desc pool init", soc);
908 		goto fail;
909 	}
910 
911 	/* monitor buffers for src */
912 	if (dp_rx_mon_buffers_alloc(soc, DP_MON_RING_FILL_LEVEL_DEFAULT)) {
913 		dp_mon_err("%pK: Rx mon buffers allocation failed", soc);
914 		goto fail;
915 	}
916 
917 	return QDF_STATUS_SUCCESS;
918 fail:
919 	return QDF_STATUS_E_FAILURE;
920 }
921 
922 static
923 QDF_STATUS dp_tx_mon_soc_init_2_0(struct dp_soc *soc)
924 {
925 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
926 	struct dp_mon_soc_be *mon_soc_be =
927 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
928 
929 	if (dp_srng_init(soc, &mon_soc_be->tx_mon_buf_ring,
930 			 TX_MONITOR_BUF, 0, 0)) {
931 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
932 		goto fail;
933 	}
934 
935 	if (dp_tx_mon_buf_desc_pool_init(soc)) {
936 		dp_mon_err("%pK: " RNG_ERR "tx mon desc pool init", soc);
937 		goto fail;
938 	}
939 
940 	/* monitor buffers for src */
941 	if (dp_tx_mon_buffers_alloc(soc, DP_MON_RING_FILL_LEVEL_DEFAULT)) {
942 		dp_mon_err("%pK: Tx mon buffers allocation failed", soc);
943 		goto fail;
944 	}
945 
946 	return QDF_STATUS_SUCCESS;
947 fail:
948 	return QDF_STATUS_E_FAILURE;
949 }
950 
951 static
952 QDF_STATUS dp_mon_soc_init_2_0(struct dp_soc *soc)
953 {
954 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
955 	struct dp_mon_soc_be *mon_soc_be =
956 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
957 
958 	if (soc->rxdma_mon_buf_ring[0].hal_srng) {
959 		dp_mon_info("%pK: mon soc init is done", soc);
960 		return QDF_STATUS_SUCCESS;
961 	}
962 
963 	if (dp_rx_mon_soc_init_2_0(soc)) {
964 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
965 		goto fail;
966 	}
967 
968 	if (dp_tx_mon_soc_init_2_0(soc)) {
969 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
970 		goto fail;
971 	}
972 
973 	mon_soc_be->tx_mon_ring_fill_level = DP_MON_RING_FILL_LEVEL_DEFAULT;
974 	mon_soc_be->rx_mon_ring_fill_level = DP_MON_RING_FILL_LEVEL_DEFAULT;
975 
976 	mon_soc_be->is_dp_mon_soc_initialized = true;
977 	return QDF_STATUS_SUCCESS;
978 fail:
979 	dp_mon_soc_deinit_2_0(soc);
980 	return QDF_STATUS_E_FAILURE;
981 }
982 
983 static
984 QDF_STATUS dp_mon_soc_attach_2_0(struct dp_soc *soc)
985 {
986 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
987 	struct dp_mon_soc_be *mon_soc_be =
988 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
989 	int entries;
990 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
991 
992 	soc_cfg_ctx = soc->wlan_cfg_ctx;
993 	if (!mon_soc_be) {
994 		dp_mon_err("DP MON SOC is NULL");
995 		return QDF_STATUS_E_FAILURE;
996 	}
997 
998 	entries = wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc_cfg_ctx);
999 	qdf_print("%s:%d rx mon buf entries: %d", __func__, __LINE__, entries);
1000 	if (dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[0],
1001 			  RXDMA_MONITOR_BUF, entries, 0)) {
1002 		dp_mon_err("%pK: " RNG_ERR "rx_mon_buf_ring", soc);
1003 		goto fail;
1004 	}
1005 
1006 	entries = wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx);
1007 	qdf_print("%s:%d tx mon buf entries: %d", __func__, __LINE__, entries);
1008 	if (dp_srng_alloc(soc, &mon_soc_be->tx_mon_buf_ring,
1009 			  TX_MONITOR_BUF, entries, 0)) {
1010 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
1011 		goto fail;
1012 	}
1013 
1014 	/* allocate sw desc pool */
1015 	if (dp_rx_mon_buf_desc_pool_alloc(soc)) {
1016 		dp_mon_err("%pK: Rx mon desc pool allocation failed", soc);
1017 		goto fail;
1018 	}
1019 
1020 	if (dp_tx_mon_buf_desc_pool_alloc(soc)) {
1021 		dp_mon_err("%pK: Tx mon desc pool allocation failed", soc);
1022 		goto fail;
1023 	}
1024 
1025 	return QDF_STATUS_SUCCESS;
1026 fail:
1027 	dp_mon_soc_detach_2_0(soc);
1028 	return QDF_STATUS_E_NOMEM;
1029 }
1030 
1031 static
1032 void dp_pdev_mon_rings_deinit_2_0(struct dp_pdev *pdev)
1033 {
1034 	int mac_id = 0;
1035 	struct dp_soc *soc = pdev->soc;
1036 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1037 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1038 
1039 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
1040 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
1041 							 pdev->pdev_id);
1042 
1043 		dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id],
1044 			       RXDMA_MONITOR_DST, pdev->pdev_id);
1045 		dp_srng_deinit(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id],
1046 			       TX_MONITOR_DST, pdev->pdev_id);
1047 	}
1048 }
1049 
1050 static
1051 QDF_STATUS dp_pdev_mon_rings_init_2_0(struct dp_pdev *pdev)
1052 {
1053 	struct dp_soc *soc = pdev->soc;
1054 	int mac_id = 0;
1055 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1056 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1057 
1058 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
1059 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
1060 							 pdev->pdev_id);
1061 
1062 		if (dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id],
1063 				 RXDMA_MONITOR_DST, pdev->pdev_id, lmac_id)) {
1064 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc);
1065 			goto fail;
1066 		}
1067 
1068 		if (dp_srng_init(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id],
1069 				 TX_MONITOR_DST, pdev->pdev_id, lmac_id)) {
1070 			dp_mon_err("%pK: " RNG_ERR "tx_mon_dst_ring", soc);
1071 			goto fail;
1072 		}
1073 	}
1074 	return QDF_STATUS_SUCCESS;
1075 
1076 fail:
1077 	dp_pdev_mon_rings_deinit_2_0(pdev);
1078 	return QDF_STATUS_E_NOMEM;
1079 }
1080 
1081 static
1082 void dp_pdev_mon_rings_free_2_0(struct dp_pdev *pdev)
1083 {
1084 	int mac_id = 0;
1085 	struct dp_soc *soc = pdev->soc;
1086 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1087 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1088 
1089 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
1090 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
1091 							 pdev->pdev_id);
1092 
1093 		dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]);
1094 		dp_srng_free(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id]);
1095 	}
1096 }
1097 
1098 static
1099 QDF_STATUS dp_pdev_mon_rings_alloc_2_0(struct dp_pdev *pdev)
1100 {
1101 	struct dp_soc *soc = pdev->soc;
1102 	int mac_id = 0;
1103 	int entries;
1104 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
1105 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1106 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1107 
1108 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
1109 
1110 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
1111 		int lmac_id =
1112 		dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
1113 
1114 		entries = wlan_cfg_get_dma_rx_mon_dest_ring_size(pdev_cfg_ctx);
1115 		if (dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id],
1116 				  RXDMA_MONITOR_DST, entries, 0)) {
1117 			dp_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", pdev);
1118 			goto fail;
1119 		}
1120 
1121 		entries = wlan_cfg_get_dma_tx_mon_dest_ring_size(pdev_cfg_ctx);
1122 		if (dp_srng_alloc(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id],
1123 				  TX_MONITOR_DST, entries, 0)) {
1124 			dp_err("%pK: " RNG_ERR "tx_mon_dst_ring", pdev);
1125 			goto fail;
1126 		}
1127 	}
1128 	return QDF_STATUS_SUCCESS;
1129 
1130 fail:
1131 	dp_pdev_mon_rings_free_2_0(pdev);
1132 	return QDF_STATUS_E_NOMEM;
1133 }
1134 
1135 static
1136 void dp_mon_pdev_free_2_0(struct dp_pdev *pdev)
1137 {
1138 }
1139 
1140 static
1141 QDF_STATUS dp_mon_pdev_alloc_2_0(struct dp_pdev *pdev)
1142 {
1143 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1144 	struct dp_mon_pdev_be *mon_pdev_be =
1145 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1146 
1147 	if (!mon_pdev_be) {
1148 		dp_mon_err("DP MON PDEV is NULL");
1149 		return QDF_STATUS_E_FAILURE;
1150 	}
1151 
1152 	return QDF_STATUS_SUCCESS;
1153 }
1154 
1155 #else
1156 static inline
1157 QDF_STATUS dp_mon_htt_srng_setup_2_0(struct dp_soc *soc,
1158 				     struct dp_pdev *pdev,
1159 				     int mac_id,
1160 				     int mac_for_pdev)
1161 {
1162 	return QDF_STATUS_SUCCESS;
1163 }
1164 
1165 static uint32_t
1166 dp_tx_mon_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
1167 		      uint32_t mac_id, uint32_t quota)
1168 {
1169 	return 0;
1170 }
1171 
1172 static inline
1173 QDF_STATUS dp_mon_soc_attach_2_0(struct dp_soc *soc)
1174 {
1175 	return status;
1176 }
1177 
1178 static inline
1179 QDF_STATUS dp_mon_soc_detach_2_0(struct dp_soc *soc)
1180 {
1181 	return status;
1182 }
1183 
1184 static inline
1185 void dp_pdev_mon_rings_deinit_2_0(struct dp_pdev *pdev)
1186 {
1187 }
1188 
1189 static inline
1190 QDF_STATUS dp_pdev_mon_rings_init_2_0(struct dp_soc *soc, struct dp_pdev *pdev)
1191 {
1192 	return QDF_STATUS_SUCCESS;
1193 }
1194 
1195 static inline
1196 void dp_pdev_mon_rings_free_2_0(struct dp_pdev *pdev)
1197 {
1198 }
1199 
1200 static inline
1201 QDF_STATUS dp_pdev_mon_rings_alloc_2_0(struct dp_soc *soc, struct dp_pdev *pdev)
1202 {
1203 	return QDF_STATUS_SUCCESS;
1204 }
1205 
1206 static inline
1207 void dp_mon_pdev_free_2_0(struct dp_pdev *pdev)
1208 {
1209 }
1210 
1211 static inline
1212 QDF_STATUS dp_mon_pdev_alloc_2_0(struct dp_pdev *pdev)
1213 {
1214 	return QDF_STATUS_SUCCESS;
1215 }
1216 
1217 static inline
1218 void dp_vdev_set_monitor_mode_buf_rings_2_0(struct dp_pdev *pdev)
1219 {
1220 }
1221 
1222 static inline
1223 QDF_STATUS dp_vdev_set_monitor_mode_rings_2_0(struct dp_pdev *pdev,
1224 					      uint8_t delayed_replenish)
1225 {
1226 	return QDF_STATUS_SUCCESS;
1227 }
1228 #endif
1229 
1230 #if defined(WDI_EVENT_ENABLE) &&\
1231 	(defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG))
1232 static inline
1233 void dp_mon_ppdu_stats_handler_register(struct dp_mon_soc *mon_soc)
1234 {
1235 	mon_soc->mon_ops->mon_ppdu_stats_ind_handler =
1236 					dp_ppdu_stats_ind_handler;
1237 }
1238 #else
1239 static inline
1240 void dp_mon_ppdu_stats_handler_register(struct dp_mon_soc *mon_soc)
1241 {
1242 }
1243 #endif
1244 
1245 static void dp_mon_register_intr_ops_2_0(struct dp_soc *soc)
1246 {
1247 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1248 
1249 	mon_soc->mon_ops->rx_mon_refill_buf_ring =
1250 			NULL,
1251 	mon_soc->mon_ops->tx_mon_refill_buf_ring =
1252 			NULL,
1253 	mon_soc->mon_rx_process = dp_rx_mon_process_2_0;
1254 	dp_mon_ppdu_stats_handler_register(mon_soc);
1255 }
1256 
1257 /**
1258  * dp_mon_register_feature_ops_2_0() - register feature ops
1259  *
1260  * @soc: dp soc context
1261  *
1262  * @return: void
1263  */
1264 static void
1265 dp_mon_register_feature_ops_2_0(struct dp_soc *soc)
1266 {
1267 	struct dp_mon_ops *mon_ops = dp_mon_ops_get(soc);
1268 
1269 	if (!mon_ops) {
1270 		dp_err("mon_ops is NULL, feature ops registration failed");
1271 		return;
1272 	}
1273 
1274 	mon_ops->mon_config_debug_sniffer = dp_config_debug_sniffer;
1275 	mon_ops->mon_peer_tx_init = NULL;
1276 	mon_ops->mon_peer_tx_cleanup = NULL;
1277 	mon_ops->mon_htt_ppdu_stats_attach = dp_htt_ppdu_stats_attach;
1278 	mon_ops->mon_htt_ppdu_stats_detach = dp_htt_ppdu_stats_detach;
1279 	mon_ops->mon_print_pdev_rx_mon_stats = dp_print_pdev_rx_mon_stats;
1280 	mon_ops->mon_set_bsscolor = dp_mon_set_bsscolor;
1281 	mon_ops->mon_pdev_get_filter_ucast_data = NULL;
1282 	mon_ops->mon_pdev_get_filter_mcast_data = NULL;
1283 	mon_ops->mon_pdev_get_filter_non_data = NULL;
1284 	mon_ops->mon_neighbour_peer_add_ast = NULL;
1285 #ifndef DISABLE_MON_CONFIG
1286 	mon_ops->mon_tx_process = dp_tx_mon_process_2_0;
1287 #endif
1288 #ifdef WLAN_TX_PKT_CAPTURE_ENH_BE
1289 	mon_ops->mon_peer_tid_peer_id_update = NULL;
1290 	mon_ops->mon_tx_capture_debugfs_init = NULL;
1291 	mon_ops->mon_tx_add_to_comp_queue = NULL;
1292 	mon_ops->mon_print_pdev_tx_capture_stats =
1293 					dp_print_pdev_tx_monitor_stats_2_0;
1294 	mon_ops->mon_config_enh_tx_capture = dp_config_enh_tx_monitor_2_0;
1295 	mon_ops->mon_tx_peer_filter = dp_peer_set_tx_capture_enabled_2_0;
1296 #endif
1297 #if (defined(WIFI_MONITOR_SUPPORT) && !defined(WLAN_TX_PKT_CAPTURE_ENH_BE))
1298 	mon_ops->mon_peer_tid_peer_id_update = NULL;
1299 	mon_ops->mon_tx_capture_debugfs_init = NULL;
1300 	mon_ops->mon_tx_add_to_comp_queue = NULL;
1301 	mon_ops->mon_print_pdev_tx_capture_stats = NULL;
1302 	mon_ops->mon_config_enh_tx_capture = dp_config_enh_tx_core_monitor_2_0;
1303 	mon_ops->mon_tx_peer_filter = NULL;
1304 #endif
1305 #ifdef WLAN_RX_PKT_CAPTURE_ENH
1306 	mon_ops->mon_config_enh_rx_capture = NULL;
1307 #endif
1308 #ifdef QCA_SUPPORT_BPR
1309 	mon_ops->mon_set_bpr_enable = dp_set_bpr_enable_2_0;
1310 #endif
1311 #ifdef ATH_SUPPORT_NAC
1312 	mon_ops->mon_set_filter_neigh_peers = NULL;
1313 #endif
1314 #ifdef WLAN_ATF_ENABLE
1315 	mon_ops->mon_set_atf_stats_enable = dp_set_atf_stats_enable;
1316 #endif
1317 #ifdef FEATURE_NAC_RSSI
1318 	mon_ops->mon_filter_neighbour_peer = NULL;
1319 #endif
1320 #ifdef QCA_MCOPY_SUPPORT
1321 	mon_ops->mon_filter_setup_mcopy_mode = NULL;
1322 	mon_ops->mon_filter_reset_mcopy_mode = NULL;
1323 	mon_ops->mon_mcopy_check_deliver = NULL;
1324 #endif
1325 #ifdef QCA_ENHANCED_STATS_SUPPORT
1326 	mon_ops->mon_filter_setup_enhanced_stats =
1327 				dp_mon_filter_setup_enhanced_stats_2_0;
1328 	mon_ops->mon_filter_reset_enhanced_stats =
1329 				dp_mon_filter_reset_enhanced_stats_2_0;
1330 	mon_ops->mon_tx_enable_enhanced_stats =
1331 				dp_mon_tx_enable_enhanced_stats_2_0;
1332 	mon_ops->mon_tx_disable_enhanced_stats =
1333 				dp_mon_tx_disable_enhanced_stats_2_0;
1334 	mon_ops->mon_ppdu_stats_feat_enable_check =
1335 				dp_ppdu_stats_feat_enable_check_2_0;
1336 	mon_ops->mon_tx_stats_update = dp_mon_tx_stats_update_2_0;
1337 	mon_ops->mon_ppdu_desc_deliver = dp_ppdu_desc_deliver;
1338 #ifdef WDI_EVENT_ENABLE
1339 	mon_ops->mon_ppdu_desc_notify = dp_ppdu_desc_notify_2_0;
1340 #endif
1341 #endif
1342 #ifdef WLAN_RX_PKT_CAPTURE_ENH
1343 	mon_ops->mon_filter_setup_rx_enh_capture = NULL;
1344 #endif
1345 #ifdef WDI_EVENT_ENABLE
1346 	mon_ops->mon_set_pktlog_wifi3 = dp_set_pktlog_wifi3;
1347 	mon_ops->mon_filter_setup_rx_pkt_log_full =
1348 				dp_mon_filter_setup_rx_pkt_log_full_2_0;
1349 	mon_ops->mon_filter_reset_rx_pkt_log_full =
1350 				dp_mon_filter_reset_rx_pkt_log_full_2_0;
1351 	mon_ops->mon_filter_setup_rx_pkt_log_lite =
1352 				dp_mon_filter_setup_rx_pkt_log_lite_2_0;
1353 	mon_ops->mon_filter_reset_rx_pkt_log_lite =
1354 				dp_mon_filter_reset_rx_pkt_log_lite_2_0;
1355 	mon_ops->mon_filter_setup_rx_pkt_log_cbf =
1356 				dp_mon_filter_setup_rx_pkt_log_cbf_2_0;
1357 	mon_ops->mon_filter_reset_rx_pkt_log_cbf =
1358 				dp_mon_filter_reset_rx_pktlog_cbf_2_0;
1359 	mon_ops->mon_filter_setup_pktlog_hybrid =
1360 				dp_mon_filter_setup_pktlog_hybrid_2_0;
1361 	mon_ops->mon_filter_reset_pktlog_hybrid =
1362 				dp_mon_filter_reset_pktlog_hybrid_2_0;
1363 #endif
1364 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
1365 	mon_ops->mon_pktlogmod_exit = dp_pktlogmod_exit;
1366 #endif
1367 	mon_ops->rx_hdr_length_set = dp_rx_mon_hdr_length_set;
1368 	mon_ops->rx_packet_length_set = dp_rx_mon_packet_length_set;
1369 	mon_ops->rx_mon_enable = dp_rx_mon_enable_set;
1370 	mon_ops->rx_wmask_subscribe = dp_rx_mon_word_mask_subscribe;
1371 	mon_ops->rx_enable_mpdu_logging = dp_rx_mon_enable_mpdu_logging;
1372 	mon_ops->mon_neighbour_peers_detach = NULL;
1373 	mon_ops->mon_vdev_set_monitor_mode_buf_rings =
1374 				dp_vdev_set_monitor_mode_buf_rings_2_0;
1375 	mon_ops->mon_vdev_set_monitor_mode_rings =
1376 				dp_vdev_set_monitor_mode_rings_2_0;
1377 #ifdef QCA_ENHANCED_STATS_SUPPORT
1378 	mon_ops->mon_rx_stats_update = dp_rx_mon_stats_update_2_0;
1379 	mon_ops->mon_rx_populate_ppdu_usr_info =
1380 			dp_rx_mon_populate_ppdu_usr_info_2_0;
1381 	mon_ops->mon_rx_populate_ppdu_info = dp_rx_mon_populate_ppdu_info_2_0;
1382 #endif
1383 #ifdef QCA_UNDECODED_METADATA_SUPPORT
1384 	mon_ops->mon_config_undecoded_metadata_capture =
1385 		dp_mon_config_undecoded_metadata_capture;
1386 	mon_ops->mon_filter_setup_undecoded_metadata_capture =
1387 		dp_mon_filter_setup_undecoded_metadata_capture_2_0;
1388 	mon_ops->mon_filter_reset_undecoded_metadata_capture =
1389 		dp_mon_filter_reset_undecoded_metadata_capture_2_0;
1390 #endif
1391 	mon_ops->rx_enable_fpmo = dp_rx_mon_enable_fpmo;
1392 }
1393 
1394 struct dp_mon_ops monitor_ops_2_0 = {
1395 	.mon_soc_cfg_init = dp_mon_soc_cfg_init,
1396 	.mon_soc_attach = dp_mon_soc_attach_2_0,
1397 	.mon_soc_detach = dp_mon_soc_detach_2_0,
1398 	.mon_soc_init = dp_mon_soc_init_2_0,
1399 	.mon_soc_deinit = dp_mon_soc_deinit_2_0,
1400 	.mon_pdev_alloc = dp_mon_pdev_alloc_2_0,
1401 	.mon_pdev_free = dp_mon_pdev_free_2_0,
1402 	.mon_pdev_attach = dp_mon_pdev_attach,
1403 	.mon_pdev_detach = dp_mon_pdev_detach,
1404 	.mon_pdev_init = dp_mon_pdev_init,
1405 	.mon_pdev_deinit = dp_mon_pdev_deinit,
1406 	.mon_vdev_attach = dp_mon_vdev_attach,
1407 	.mon_vdev_detach = dp_mon_vdev_detach,
1408 	.mon_peer_attach = dp_mon_peer_attach,
1409 	.mon_peer_detach = dp_mon_peer_detach,
1410 	.mon_peer_get_peerstats_ctx = dp_mon_peer_get_peerstats_ctx,
1411 	.mon_peer_reset_stats = dp_mon_peer_reset_stats,
1412 	.mon_peer_get_stats = dp_mon_peer_get_stats,
1413 	.mon_invalid_peer_update_pdev_stats =
1414 				dp_mon_invalid_peer_update_pdev_stats,
1415 	.mon_peer_get_stats_param = dp_mon_peer_get_stats_param,
1416 	.mon_flush_rings = NULL,
1417 #if !defined(DISABLE_MON_CONFIG)
1418 	.mon_pdev_htt_srng_setup = dp_mon_pdev_htt_srng_setup_2_0,
1419 	.mon_soc_htt_srng_setup = dp_mon_soc_htt_srng_setup_2_0,
1420 #endif
1421 #if defined(DP_CON_MON)
1422 	.mon_service_rings = NULL,
1423 #endif
1424 #ifndef DISABLE_MON_CONFIG
1425 	.mon_rx_process = NULL,
1426 #endif
1427 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
1428 	.mon_drop_packets_for_mac = NULL,
1429 #endif
1430 	.mon_vdev_timer_init = NULL,
1431 	.mon_vdev_timer_start = NULL,
1432 	.mon_vdev_timer_stop = NULL,
1433 	.mon_vdev_timer_deinit = NULL,
1434 	.mon_reap_timer_init = NULL,
1435 	.mon_reap_timer_start = NULL,
1436 	.mon_reap_timer_stop = NULL,
1437 	.mon_reap_timer_deinit = NULL,
1438 	.mon_filter_setup_rx_mon_mode = dp_mon_filter_setup_rx_mon_mode_2_0,
1439 	.mon_filter_reset_rx_mon_mode = dp_mon_filter_reset_rx_mon_mode_2_0,
1440 	.mon_filter_setup_tx_mon_mode = dp_mon_filter_setup_tx_mon_mode_2_0,
1441 	.mon_filter_reset_tx_mon_mode = dp_mon_filter_reset_tx_mon_mode_2_0,
1442 	.tx_mon_filter_update = dp_tx_mon_filter_update_2_0,
1443 	.rx_mon_filter_update = dp_rx_mon_filter_update_2_0,
1444 	.tx_mon_filter_alloc = dp_mon_filter_alloc_2_0,
1445 	.tx_mon_filter_dealloc = dp_mon_filter_dealloc_2_0,
1446 	.mon_rings_alloc = dp_pdev_mon_rings_alloc_2_0,
1447 	.mon_rings_free = dp_pdev_mon_rings_free_2_0,
1448 	.mon_rings_init = dp_pdev_mon_rings_init_2_0,
1449 	.mon_rings_deinit = dp_pdev_mon_rings_deinit_2_0,
1450 	.rx_mon_desc_pool_init = NULL,
1451 	.rx_mon_desc_pool_deinit = NULL,
1452 	.rx_mon_desc_pool_alloc = NULL,
1453 	.rx_mon_desc_pool_free = NULL,
1454 	.rx_mon_buffers_alloc = NULL,
1455 	.rx_mon_buffers_free = NULL,
1456 	.tx_mon_desc_pool_init = NULL,
1457 	.tx_mon_desc_pool_deinit = NULL,
1458 	.tx_mon_desc_pool_alloc = NULL,
1459 	.tx_mon_desc_pool_free = NULL,
1460 #ifndef DISABLE_MON_CONFIG
1461 	.mon_register_intr_ops = dp_mon_register_intr_ops_2_0,
1462 #endif
1463 	.mon_register_feature_ops = dp_mon_register_feature_ops_2_0,
1464 #ifdef WLAN_TX_PKT_CAPTURE_ENH_BE
1465 	.mon_tx_ppdu_stats_attach = dp_tx_ppdu_stats_attach_2_0,
1466 	.mon_tx_ppdu_stats_detach = dp_tx_ppdu_stats_detach_2_0,
1467 	.mon_peer_tx_capture_filter_check = NULL,
1468 #endif
1469 #if (defined(WIFI_MONITOR_SUPPORT) && !defined(WLAN_TX_PKT_CAPTURE_ENH_BE))
1470 	.mon_tx_ppdu_stats_attach = NULL,
1471 	.mon_tx_ppdu_stats_detach = NULL,
1472 	.mon_peer_tx_capture_filter_check = NULL,
1473 #endif
1474 	.mon_pdev_ext_init = dp_mon_pdev_ext_init_2_0,
1475 	.mon_pdev_ext_deinit = dp_mon_pdev_ext_deinit_2_0,
1476 	.mon_lite_mon_alloc = dp_lite_mon_alloc,
1477 	.mon_lite_mon_dealloc = dp_lite_mon_dealloc,
1478 	.mon_lite_mon_vdev_delete = dp_lite_mon_vdev_delete,
1479 	.mon_lite_mon_disable_rx = dp_lite_mon_disable_rx,
1480 };
1481 
1482 struct cdp_mon_ops dp_ops_mon_2_0 = {
1483 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
1484 	/* Added support for HK advance filter */
1485 	.txrx_set_advance_monitor_filter = NULL,
1486 	.txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
1487 	.config_full_mon_mode = NULL,
1488 	.soc_config_full_mon_mode = NULL,
1489 	.get_mon_pdev_rx_stats = dp_pdev_get_rx_mon_stats,
1490 	.txrx_enable_mon_reap_timer = NULL,
1491 #ifdef QCA_SUPPORT_LITE_MONITOR
1492 	.txrx_set_lite_mon_config = dp_lite_mon_set_config,
1493 	.txrx_get_lite_mon_config = dp_lite_mon_get_config,
1494 	.txrx_set_lite_mon_peer_config = dp_lite_mon_set_peer_config,
1495 	.txrx_get_lite_mon_peer_config = dp_lite_mon_get_peer_config,
1496 	.txrx_is_lite_mon_enabled = dp_lite_mon_is_enabled,
1497 #endif
1498 	.txrx_set_mon_pdev_params_rssi_dbm_conv =
1499 				dp_mon_pdev_params_rssi_dbm_conv,
1500 };
1501 
1502 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT
1503 void dp_mon_ops_register_2_0(struct dp_mon_soc *mon_soc)
1504 {
1505 	struct dp_mon_ops *mon_ops = NULL;
1506 
1507 	if (mon_soc->mon_ops) {
1508 		dp_mon_err("monitor ops is allocated");
1509 		return;
1510 	}
1511 
1512 	mon_ops = qdf_mem_malloc(sizeof(struct dp_mon_ops));
1513 	if (!mon_ops) {
1514 		dp_mon_err("Failed to allocate memory for mon ops");
1515 		return;
1516 	}
1517 
1518 	qdf_mem_copy(mon_ops, &monitor_ops_2_0, sizeof(struct dp_mon_ops));
1519 	mon_soc->mon_ops = mon_ops;
1520 }
1521 
1522 void dp_mon_cdp_ops_register_2_0(struct cdp_ops *ops)
1523 {
1524 	struct cdp_mon_ops *mon_ops = NULL;
1525 
1526 	if (ops->mon_ops) {
1527 		dp_mon_err("cdp monitor ops is allocated");
1528 		return;
1529 	}
1530 
1531 	mon_ops = qdf_mem_malloc(sizeof(struct cdp_mon_ops));
1532 	if (!mon_ops) {
1533 		dp_mon_err("Failed to allocate memory for mon ops");
1534 		return;
1535 	}
1536 
1537 	qdf_mem_copy(mon_ops, &dp_ops_mon_2_0, sizeof(struct cdp_mon_ops));
1538 	ops->mon_ops = mon_ops;
1539 }
1540 #else
1541 void dp_mon_ops_register_2_0(struct dp_mon_soc *mon_soc)
1542 {
1543 	mon_soc->mon_ops = &monitor_ops_2_0;
1544 }
1545 
1546 void dp_mon_cdp_ops_register_2_0(struct cdp_ops *ops)
1547 {
1548 	ops->mon_ops = &dp_ops_mon_2_0;
1549 }
1550 #endif
1551 
1552 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
1553 	defined(WLAN_SUPPORT_RX_FLOW_TAG)
1554 #if QCA_TEST_MON_PF_TAGS_STATS
1555 /** dp_mon_rx_update_rx_protocol_tag_stats() - Update mon protocols's
1556  *					      statistics
1557  * @pdev: pdev handle
1558  * @protocol_index: Protocol index for which the stats should be incremented
1559  * @ring_index: REO ring number from which this tag was received.
1560  *
1561  * Return: void
1562  */
1563 void dp_mon_rx_update_rx_protocol_tag_stats(struct dp_pdev *pdev,
1564 					    uint16_t protocol_index)
1565 {
1566 	pdev->mon_proto_tag_stats[protocol_index].tag_ctr++;
1567 }
1568 #else
1569 void dp_mon_rx_update_rx_protocol_tag_stats(struct dp_pdev *pdev,
1570 					    uint16_t protocol_index)
1571 {
1572 }
1573 #endif
1574 #endif
1575