1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <dp_types.h>
19 #include "dp_rx.h"
20 #include "dp_peer.h"
21 #include <dp_htt.h>
22 #include <dp_mon_filter.h>
23 #include <dp_mon.h>
24 #include <dp_rx_mon.h>
25 #include <dp_rx_mon_2.0.h>
26 #include <dp_mon_2.0.h>
27 #include <dp_mon_filter_2.0.h>
28 #include <dp_tx_mon_2.0.h>
29 #include <hal_be_api_mon.h>
30 #include <dp_be.h>
31 #include <htt_ppdu_stats.h>
32 #ifdef QCA_SUPPORT_LITE_MONITOR
33 #include "dp_lite_mon.h"
34 #endif
35 
36 #if !defined(DISABLE_MON_CONFIG)
37 /*
38  * dp_mon_add_desc_list_to_free_list() - append unused desc_list back to
39  *					freelist.
40  *
41  * @soc: core txrx main context
42  * @local_desc_list: local desc list provided by the caller
43  * @tail: attach the point to last desc of local desc list
44  * @mon_desc_pool: monitor descriptor pool pointer
45  */
46 void
dp_mon_add_desc_list_to_free_list(struct dp_soc * soc,union dp_mon_desc_list_elem_t ** local_desc_list,union dp_mon_desc_list_elem_t ** tail,struct dp_mon_desc_pool * mon_desc_pool)47 dp_mon_add_desc_list_to_free_list(struct dp_soc *soc,
48 				  union dp_mon_desc_list_elem_t **local_desc_list,
49 				  union dp_mon_desc_list_elem_t **tail,
50 				  struct dp_mon_desc_pool *mon_desc_pool)
51 {
52 	union dp_mon_desc_list_elem_t *temp_list = NULL;
53 
54 	qdf_spin_lock_bh(&mon_desc_pool->lock);
55 
56 	temp_list = mon_desc_pool->freelist;
57 	mon_desc_pool->freelist = *local_desc_list;
58 	(*tail)->next = temp_list;
59 	*tail = NULL;
60 	*local_desc_list = NULL;
61 
62 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
63 }
64 
65 /*
66  * dp_mon_get_free_desc_list() - provide a list of descriptors from
67  *				the free mon desc pool.
68  *
69  * @soc: core txrx main context
70  * @mon_desc_pool: monitor descriptor pool pointer
71  * @num_descs: number of descs requested from freelist
72  * @desc_list: attach the descs to this list (output parameter)
73  * @tail: attach the point to last desc of free list (output parameter)
74  *
75  * Return: number of descs allocated from free list.
76  */
77 static uint16_t
dp_mon_get_free_desc_list(struct dp_soc * soc,struct dp_mon_desc_pool * mon_desc_pool,uint16_t num_descs,union dp_mon_desc_list_elem_t ** desc_list,union dp_mon_desc_list_elem_t ** tail)78 dp_mon_get_free_desc_list(struct dp_soc *soc,
79 			  struct dp_mon_desc_pool *mon_desc_pool,
80 			  uint16_t num_descs,
81 			  union dp_mon_desc_list_elem_t **desc_list,
82 			  union dp_mon_desc_list_elem_t **tail)
83 {
84 	uint16_t count;
85 
86 	qdf_spin_lock_bh(&mon_desc_pool->lock);
87 
88 	*desc_list = *tail = mon_desc_pool->freelist;
89 
90 	for (count = 0; count < num_descs; count++) {
91 		if (qdf_unlikely(!mon_desc_pool->freelist)) {
92 			qdf_spin_unlock_bh(&mon_desc_pool->lock);
93 			return count;
94 		}
95 		*tail = mon_desc_pool->freelist;
96 		mon_desc_pool->freelist = mon_desc_pool->freelist->next;
97 	}
98 	(*tail)->next = NULL;
99 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
100 	return count;
101 }
102 
103 static inline QDF_STATUS
dp_mon_frag_alloc_and_map(struct dp_soc * dp_soc,struct dp_mon_desc * mon_desc,struct dp_mon_desc_pool * mon_desc_pool)104 dp_mon_frag_alloc_and_map(struct dp_soc *dp_soc,
105 			  struct dp_mon_desc *mon_desc,
106 			  struct dp_mon_desc_pool *mon_desc_pool)
107 {
108 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
109 
110 	mon_desc->buf_addr = qdf_frag_alloc(&mon_desc_pool->pf_cache,
111 					    mon_desc_pool->buf_size);
112 
113 	if (!mon_desc->buf_addr) {
114 		dp_mon_err("Frag alloc failed");
115 		return QDF_STATUS_E_NOMEM;
116 	}
117 
118 	ret = qdf_mem_map_page(dp_soc->osdev,
119 			       mon_desc->buf_addr,
120 			       QDF_DMA_FROM_DEVICE,
121 			       mon_desc_pool->buf_size,
122 			       &mon_desc->paddr);
123 
124 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
125 		qdf_frag_free(mon_desc->buf_addr);
126 		dp_mon_err("Frag map failed");
127 		return QDF_STATUS_E_FAULT;
128 	}
129 
130 	return QDF_STATUS_SUCCESS;
131 }
132 
133 QDF_STATUS
dp_mon_desc_pool_init(struct dp_mon_desc_pool * mon_desc_pool,uint32_t pool_size)134 dp_mon_desc_pool_init(struct dp_mon_desc_pool *mon_desc_pool,
135 		      uint32_t pool_size)
136 {
137 	int desc_id;
138 	/* Initialize monitor desc lock */
139 	qdf_spinlock_create(&mon_desc_pool->lock);
140 
141 	qdf_spin_lock_bh(&mon_desc_pool->lock);
142 
143 	mon_desc_pool->buf_size = DP_MON_DATA_BUFFER_SIZE;
144 	/* link SW descs into a freelist */
145 	mon_desc_pool->freelist = &mon_desc_pool->array[0];
146 	mon_desc_pool->pool_size = pool_size - 1;
147 	qdf_mem_zero(mon_desc_pool->freelist,
148 		     mon_desc_pool->pool_size *
149 		     sizeof(union dp_mon_desc_list_elem_t));
150 
151 	for (desc_id = 0; desc_id < mon_desc_pool->pool_size; desc_id++) {
152 		if (desc_id == mon_desc_pool->pool_size - 1)
153 			mon_desc_pool->array[desc_id].next = NULL;
154 		else
155 			mon_desc_pool->array[desc_id].next =
156 				&mon_desc_pool->array[desc_id + 1];
157 		mon_desc_pool->array[desc_id].mon_desc.in_use = 0;
158 		mon_desc_pool->array[desc_id].mon_desc.cookie = desc_id;
159 	}
160 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
161 
162 	return QDF_STATUS_SUCCESS;
163 }
164 
dp_mon_desc_pool_deinit(struct dp_mon_desc_pool * mon_desc_pool)165 void dp_mon_desc_pool_deinit(struct dp_mon_desc_pool *mon_desc_pool)
166 {
167 	qdf_spin_lock_bh(&mon_desc_pool->lock);
168 
169 	mon_desc_pool->freelist = NULL;
170 	mon_desc_pool->pool_size = 0;
171 
172 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
173 	qdf_spinlock_destroy(&mon_desc_pool->lock);
174 }
175 
dp_mon_desc_pool_free(struct dp_soc * soc,struct dp_mon_desc_pool * mon_desc_pool,enum dp_ctxt_type ctx_type)176 void dp_mon_desc_pool_free(struct dp_soc *soc,
177 			   struct dp_mon_desc_pool *mon_desc_pool,
178 			   enum dp_ctxt_type ctx_type)
179 {
180 	dp_context_free_mem(soc, ctx_type, mon_desc_pool->array);
181 }
182 
dp_vdev_set_monitor_mode_buf_rings_rx_2_0(struct dp_pdev * pdev)183 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings_rx_2_0(struct dp_pdev *pdev)
184 {
185 	int rx_mon_max_entries;
186 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
187 	struct dp_soc *soc = pdev->soc;
188 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
189 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
190 	QDF_STATUS status;
191 
192 	if (!mon_soc_be) {
193 		dp_mon_err("DP MON SOC is NULL");
194 		return QDF_STATUS_E_FAILURE;
195 	}
196 
197 	soc_cfg_ctx = soc->wlan_cfg_ctx;
198 	rx_mon_max_entries = wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc_cfg_ctx);
199 
200 	hal_set_low_threshold(soc->rxdma_mon_buf_ring[0].hal_srng,
201 			      MON_BUF_MIN_ENTRIES << 2);
202 	status = htt_srng_setup(soc->htt_handle, 0,
203 				soc->rxdma_mon_buf_ring[0].hal_srng,
204 				RXDMA_MONITOR_BUF);
205 
206 	if (status != QDF_STATUS_SUCCESS) {
207 		dp_mon_err("Failed to send htt srng setup message for Rx mon buf ring");
208 		return status;
209 	}
210 
211 	if (mon_soc_be->rx_mon_ring_fill_level < rx_mon_max_entries) {
212 		status = dp_rx_mon_buffers_alloc(soc,
213 						 (rx_mon_max_entries -
214 						 mon_soc_be->rx_mon_ring_fill_level));
215 		if (status != QDF_STATUS_SUCCESS) {
216 			dp_mon_err("%pK: Rx mon buffers allocation failed", soc);
217 			return status;
218 		}
219 		mon_soc_be->rx_mon_ring_fill_level +=
220 				(rx_mon_max_entries -
221 				mon_soc_be->rx_mon_ring_fill_level);
222 	}
223 
224 	return QDF_STATUS_SUCCESS;
225 }
226 
227 static
dp_vdev_set_monitor_mode_buf_rings_2_0(struct dp_pdev * pdev)228 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings_2_0(struct dp_pdev *pdev)
229 {
230 	int status;
231 	struct dp_soc *soc = pdev->soc;
232 
233 	status = dp_vdev_set_monitor_mode_buf_rings_rx_2_0(pdev);
234 	if (status != QDF_STATUS_SUCCESS) {
235 		dp_mon_err("%pK: Rx monitor extra buffer allocation failed",
236 			   soc);
237 		return status;
238 	}
239 
240 	return QDF_STATUS_SUCCESS;
241 }
242 
243 static
dp_vdev_set_monitor_mode_rings_2_0(struct dp_pdev * pdev,uint8_t delayed_replenish)244 QDF_STATUS dp_vdev_set_monitor_mode_rings_2_0(struct dp_pdev *pdev,
245 					      uint8_t delayed_replenish)
246 {
247 	return QDF_STATUS_SUCCESS;
248 }
249 
250 #ifdef QCA_ENHANCED_STATS_SUPPORT
251 /**
252  * dp_mon_tx_enable_enhanced_stats_2_0() - Send HTT cmd to FW to enable stats
253  * @pdev: Datapath pdev handle
254  *
255  * Return: none
256  */
dp_mon_tx_enable_enhanced_stats_2_0(struct dp_pdev * pdev)257 static void dp_mon_tx_enable_enhanced_stats_2_0(struct dp_pdev *pdev)
258 {
259 	dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
260 				  pdev->pdev_id);
261 }
262 
263 /**
264  * dp_mon_tx_disable_enhanced_stats_2_0() - Send HTT cmd to FW to disable stats
265  * @pdev: Datapath pdev handle
266  *
267  * Return: none
268  */
dp_mon_tx_disable_enhanced_stats_2_0(struct dp_pdev * pdev)269 static void dp_mon_tx_disable_enhanced_stats_2_0(struct dp_pdev *pdev)
270 {
271 	dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
272 }
273 #endif
274 
275 #if defined(QCA_ENHANCED_STATS_SUPPORT) && defined(WLAN_FEATURE_11BE)
276 void
dp_mon_tx_stats_update_2_0(struct dp_mon_peer * mon_peer,struct cdp_tx_completion_ppdu_user * ppdu)277 dp_mon_tx_stats_update_2_0(struct dp_mon_peer *mon_peer,
278 			   struct cdp_tx_completion_ppdu_user *ppdu)
279 {
280 	uint8_t preamble, mcs, punc_mode, res_mcs;
281 
282 	preamble = ppdu->preamble;
283 	mcs = ppdu->mcs;
284 
285 	punc_mode = dp_mon_get_puncture_type(ppdu->punc_pattern_bitmap,
286 					     ppdu->bw);
287 	ppdu->punc_mode = punc_mode;
288 
289 	DP_STATS_INC(mon_peer, tx.punc_bw[punc_mode], ppdu->num_msdu);
290 
291 	if (preamble == DOT11_BE) {
292 		res_mcs = (mcs < MAX_MCS_11BE) ? mcs : (MAX_MCS - 1);
293 
294 		DP_STATS_INC(mon_peer,
295 			     tx.pkt_type[preamble].mcs_count[res_mcs],
296 			     ppdu->num_msdu);
297 		DP_STATS_INCC(mon_peer,
298 			      tx.su_be_ppdu_cnt.mcs_count[res_mcs], 1,
299 			      (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_SU));
300 		DP_STATS_INCC(mon_peer,
301 			      tx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[res_mcs],
302 			      1, (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA));
303 		DP_STATS_INCC(mon_peer,
304 			      tx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[res_mcs],
305 			      1, (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO));
306 	}
307 }
308 
309 enum cdp_punctured_modes
dp_mon_get_puncture_type(uint16_t puncture_pattern,uint8_t bw)310 dp_mon_get_puncture_type(uint16_t puncture_pattern, uint8_t bw)
311 {
312 	uint16_t mask;
313 	uint8_t punctured_bits;
314 
315 	if (!puncture_pattern)
316 		return NO_PUNCTURE;
317 
318 	switch (bw) {
319 	case CMN_BW_80MHZ:
320 		mask = PUNCTURE_80MHZ_MASK;
321 		break;
322 	case CMN_BW_160MHZ:
323 		mask = PUNCTURE_160MHZ_MASK;
324 		break;
325 	case CMN_BW_320MHZ:
326 		mask = PUNCTURE_320MHZ_MASK;
327 		break;
328 	default:
329 		return NO_PUNCTURE;
330 	}
331 
332 	/* 0s in puncture pattern received in TLV indicates punctured 20Mhz,
333 	 * after complement, 1s will indicate punctured 20Mhz
334 	 */
335 	puncture_pattern = ~puncture_pattern;
336 	puncture_pattern &= mask;
337 
338 	if (puncture_pattern) {
339 		punctured_bits = 0;
340 		while (puncture_pattern != 0) {
341 			punctured_bits++;
342 			puncture_pattern &= (puncture_pattern - 1);
343 		}
344 
345 		if (bw == CMN_BW_80MHZ) {
346 			if (punctured_bits == IEEE80211_PUNC_MINUS20MHZ)
347 				return PUNCTURED_20MHZ;
348 			else
349 				return NO_PUNCTURE;
350 		} else if (bw == CMN_BW_160MHZ) {
351 			if (punctured_bits == IEEE80211_PUNC_MINUS20MHZ)
352 				return PUNCTURED_20MHZ;
353 			else if (punctured_bits == IEEE80211_PUNC_MINUS40MHZ)
354 				return PUNCTURED_40MHZ;
355 			else
356 				return NO_PUNCTURE;
357 		} else if (bw == CMN_BW_320MHZ) {
358 			if (punctured_bits == IEEE80211_PUNC_MINUS40MHZ)
359 				return PUNCTURED_40MHZ;
360 			else if (punctured_bits == IEEE80211_PUNC_MINUS80MHZ)
361 				return PUNCTURED_80MHZ;
362 			else if (punctured_bits == IEEE80211_PUNC_MINUS120MHZ)
363 				return PUNCTURED_120MHZ;
364 			else
365 				return NO_PUNCTURE;
366 		}
367 	}
368 	return NO_PUNCTURE;
369 }
370 #endif
371 
372 #if defined(QCA_ENHANCED_STATS_SUPPORT) && !defined(WLAN_FEATURE_11BE)
373 void
dp_mon_tx_stats_update_2_0(struct dp_mon_peer * mon_peer,struct cdp_tx_completion_ppdu_user * ppdu)374 dp_mon_tx_stats_update_2_0(struct dp_mon_peer *mon_peer,
375 			   struct cdp_tx_completion_ppdu_user *ppdu)
376 {
377 	ppdu->punc_mode = NO_PUNCTURE;
378 }
379 
380 enum cdp_punctured_modes
dp_mon_get_puncture_type(uint16_t puncture_pattern,uint8_t bw)381 dp_mon_get_puncture_type(uint16_t puncture_pattern, uint8_t bw)
382 {
383 	return NO_PUNCTURE;
384 }
385 #endif /* QCA_ENHANCED_STATS_SUPPORT && WLAN_FEATURE_11BE */
386 
387 #ifdef QCA_SUPPORT_BPR
388 static QDF_STATUS
dp_set_bpr_enable_2_0(struct dp_pdev * pdev,int val)389 dp_set_bpr_enable_2_0(struct dp_pdev *pdev, int val)
390 {
391 	return QDF_STATUS_SUCCESS;
392 }
393 #endif /* QCA_SUPPORT_BPR */
394 
395 #ifdef QCA_ENHANCED_STATS_SUPPORT
396 #ifdef WDI_EVENT_ENABLE
397 /**
398  * dp_ppdu_desc_notify_2_0 - Notify upper layer for PPDU indication via WDI
399  *
400  * @pdev: Datapath pdev handle
401  * @nbuf: Buffer to be shipped
402  *
403  * Return: void
404  */
dp_ppdu_desc_notify_2_0(struct dp_pdev * pdev,qdf_nbuf_t nbuf)405 static void dp_ppdu_desc_notify_2_0(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
406 {
407 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
408 
409 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(nbuf);
410 
411 	if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
412 	    ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
413 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
414 				     pdev->soc,
415 				     nbuf, HTT_INVALID_PEER,
416 				     WDI_NO_VAL,
417 				     pdev->pdev_id);
418 	} else {
419 		qdf_nbuf_free(nbuf);
420 	}
421 }
422 #endif
423 
424 /**
425  * dp_ppdu_stats_feat_enable_check_2_0 - Check if feature(s) is enabled to
426  *				consume ppdu stats from FW
427  *
428  * @pdev: Datapath pdev handle
429  *
430  * Return: true if enabled, else return false
431  */
dp_ppdu_stats_feat_enable_check_2_0(struct dp_pdev * pdev)432 static bool dp_ppdu_stats_feat_enable_check_2_0(struct dp_pdev *pdev)
433 {
434 	return pdev->monitor_pdev->enhanced_stats_en;
435 }
436 #endif
437 
dp_mon_soc_htt_srng_setup_2_0(struct dp_soc * soc)438 QDF_STATUS dp_mon_soc_htt_srng_setup_2_0(struct dp_soc *soc)
439 {
440 	QDF_STATUS status;
441 
442 	status = dp_rx_mon_soc_htt_srng_setup_2_0(soc, 0);
443 	if (status != QDF_STATUS_SUCCESS) {
444 		dp_err("Failed to send htt srng setup message for Rx mon buf ring");
445 		return status;
446 	}
447 
448 	status = dp_tx_mon_soc_htt_srng_setup_2_0(soc, 0);
449 	if (status != QDF_STATUS_SUCCESS) {
450 		dp_err("Failed to send htt srng setup message for Tx mon buf ring");
451 		return status;
452 	}
453 
454 	return status;
455 }
456 
457 #if defined(DP_CON_MON)
dp_mon_pdev_htt_srng_setup_2_0(struct dp_soc * soc,struct dp_pdev * pdev,int mac_id,int mac_for_pdev)458 QDF_STATUS dp_mon_pdev_htt_srng_setup_2_0(struct dp_soc *soc,
459 					  struct dp_pdev *pdev,
460 					  int mac_id,
461 					  int mac_for_pdev)
462 {
463 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
464 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
465 	QDF_STATUS status;
466 
467 	if (!mon_soc_be->tx_mon_dst_ring[mac_id].hal_srng)
468 		return QDF_STATUS_SUCCESS;
469 
470 	status = dp_tx_mon_pdev_htt_srng_setup_2_0(soc, pdev, mac_id,
471 						   mac_for_pdev);
472 	if (status != QDF_STATUS_SUCCESS) {
473 		dp_mon_err("Failed to send htt srng message for Tx mon dst ring");
474 		return status;
475 	}
476 
477 	return status;
478 }
479 #else
480 /* This is for WIN case */
dp_mon_pdev_htt_srng_setup_2_0(struct dp_soc * soc,struct dp_pdev * pdev,int mac_id,int mac_for_pdev)481 QDF_STATUS dp_mon_pdev_htt_srng_setup_2_0(struct dp_soc *soc,
482 					  struct dp_pdev *pdev,
483 					  int mac_id,
484 					  int mac_for_pdev)
485 {
486 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
487 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
488 	QDF_STATUS status;
489 
490 	if (!soc->rxdma_mon_dst_ring[mac_id].hal_srng)
491 		return QDF_STATUS_SUCCESS;
492 
493 	status = dp_rx_mon_pdev_htt_srng_setup_2_0(soc, pdev, mac_id,
494 						   mac_for_pdev);
495 	if (status != QDF_STATUS_SUCCESS) {
496 		dp_mon_err("Failed to send htt srng setup message for Rxdma dst ring");
497 		return status;
498 	}
499 
500 	if (!mon_soc_be->tx_mon_dst_ring[mac_id].hal_srng)
501 		return QDF_STATUS_SUCCESS;
502 
503 	status = dp_tx_mon_pdev_htt_srng_setup_2_0(soc, pdev, mac_id,
504 						   mac_for_pdev);
505 	if (status != QDF_STATUS_SUCCESS) {
506 		dp_mon_err("Failed to send htt srng message for Tx mon dst ring");
507 		return status;
508 	}
509 
510 	return status;
511 }
512 #endif
513 
514 static
dp_rx_mon_refill_buf_ring_2_0(struct dp_intr * int_ctx)515 QDF_STATUS dp_rx_mon_refill_buf_ring_2_0(struct dp_intr *int_ctx)
516 {
517 	struct dp_soc *soc  = int_ctx->soc;
518 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
519 	union dp_mon_desc_list_elem_t *desc_list = NULL;
520 	union dp_mon_desc_list_elem_t *tail = NULL;
521 	struct dp_srng *rx_mon_buf_ring;
522 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
523 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
524 	uint32_t num_entries_avail, num_entries, num_entries_in_ring;
525 	int sync_hw_ptr = 1, hp = 0, tp = 0;
526 	void *hal_srng;
527 
528 	rx_mon_buf_ring = &soc->rxdma_mon_buf_ring[0];
529 	hal_srng = rx_mon_buf_ring->hal_srng;
530 
531 	intr_stats->num_host2rxdma_ring_masks++;
532 	mon_soc_be->rx_low_thresh_intrs++;
533 	hal_srng_access_start(soc->hal_soc, hal_srng);
534 	num_entries_avail = hal_srng_src_num_avail(soc->hal_soc,
535 						   hal_srng,
536 						   sync_hw_ptr);
537 	num_entries_in_ring = rx_mon_buf_ring->num_entries - num_entries_avail;
538 	hal_get_sw_hptp(soc->hal_soc, (hal_ring_handle_t)hal_srng, &tp, &hp);
539 	hal_srng_access_end(soc->hal_soc, hal_srng);
540 
541 	if (num_entries_avail) {
542 		if (num_entries_in_ring < mon_soc_be->rx_mon_ring_fill_level)
543 			num_entries = mon_soc_be->rx_mon_ring_fill_level
544 				      - num_entries_in_ring;
545 		else
546 			return QDF_STATUS_SUCCESS;
547 
548 		dp_mon_buffers_replenish(soc, rx_mon_buf_ring,
549 					 &mon_soc_be->rx_desc_mon,
550 					 num_entries, &desc_list, &tail,
551 					 NULL);
552 	}
553 
554 	return QDF_STATUS_SUCCESS;
555 }
556 
dp_mon_soc_detach_2_0(struct dp_soc * soc)557 QDF_STATUS dp_mon_soc_detach_2_0(struct dp_soc *soc)
558 {
559 	dp_rx_mon_soc_detach_2_0(soc, 0);
560 	dp_tx_mon_soc_detach_2_0(soc, 0);
561 	return QDF_STATUS_SUCCESS;
562 }
563 
dp_mon_soc_deinit_2_0(struct dp_soc * soc)564 void dp_mon_soc_deinit_2_0(struct dp_soc *soc)
565 {
566 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
567 	struct dp_mon_soc_be *mon_soc_be =
568 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
569 
570 	if (!mon_soc_be->is_dp_mon_soc_initialized)
571 		return;
572 
573 	dp_rx_mon_soc_deinit_2_0(soc, 0);
574 	dp_tx_mon_soc_deinit_2_0(soc, 0);
575 
576 	mon_soc_be->is_dp_mon_soc_initialized = false;
577 }
578 
dp_mon_soc_init_2_0(struct dp_soc * soc)579 QDF_STATUS dp_mon_soc_init_2_0(struct dp_soc *soc)
580 {
581 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
582 	struct dp_mon_soc_be *mon_soc_be =
583 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
584 
585 	if (soc->rxdma_mon_buf_ring[0].hal_srng) {
586 		dp_mon_info("%pK: mon soc init is done", soc);
587 		return QDF_STATUS_SUCCESS;
588 	}
589 
590 	if (dp_rx_mon_soc_init_2_0(soc)) {
591 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
592 		goto fail;
593 	}
594 
595 	if (dp_tx_mon_soc_init_2_0(soc)) {
596 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
597 		goto fail;
598 	}
599 
600 	mon_soc_be->tx_mon_ring_fill_level = 0;
601 	if (soc->rxdma_mon_buf_ring[0].num_entries < DP_MON_RING_FILL_LEVEL_DEFAULT)
602 		mon_soc_be->rx_mon_ring_fill_level = soc->rxdma_mon_buf_ring[0].num_entries;
603 	else
604 		mon_soc_be->rx_mon_ring_fill_level = DP_MON_RING_FILL_LEVEL_DEFAULT;
605 
606 	mon_soc_be->is_dp_mon_soc_initialized = true;
607 	return QDF_STATUS_SUCCESS;
608 fail:
609 	dp_mon_soc_deinit_2_0(soc);
610 	return QDF_STATUS_E_FAILURE;
611 }
612 
dp_mon_soc_attach_2_0(struct dp_soc * soc)613 QDF_STATUS dp_mon_soc_attach_2_0(struct dp_soc *soc)
614 {
615 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
616 	struct dp_mon_soc_be *mon_soc_be =
617 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
618 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
619 
620 	soc_cfg_ctx = soc->wlan_cfg_ctx;
621 	if (!mon_soc_be) {
622 		dp_mon_err("DP MON SOC is NULL");
623 		return QDF_STATUS_E_FAILURE;
624 	}
625 
626 	if (dp_rx_mon_soc_attach_2_0(soc, 0)) {
627 		dp_mon_err("%pK: " RNG_ERR "rx_mon_buf_ring", soc);
628 		goto fail;
629 	}
630 
631 	if (dp_tx_mon_soc_attach_2_0(soc, 0)) {
632 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
633 		goto fail;
634 	}
635 
636 	/* allocate sw desc pool */
637 	if (dp_rx_mon_buf_desc_pool_alloc(soc)) {
638 		dp_mon_err("%pK: Rx mon desc pool allocation failed", soc);
639 		goto fail;
640 	}
641 
642 	if (dp_tx_mon_buf_desc_pool_alloc(soc)) {
643 		dp_mon_err("%pK: Tx mon desc pool allocation failed", soc);
644 		goto fail;
645 	}
646 
647 	return QDF_STATUS_SUCCESS;
648 fail:
649 	dp_mon_soc_detach_2_0(soc);
650 	return QDF_STATUS_E_NOMEM;
651 }
652 
653 static
dp_mon_pdev_free_2_0(struct dp_pdev * pdev)654 void dp_mon_pdev_free_2_0(struct dp_pdev *pdev)
655 {
656 }
657 
658 static
dp_mon_pdev_alloc_2_0(struct dp_pdev * pdev)659 QDF_STATUS dp_mon_pdev_alloc_2_0(struct dp_pdev *pdev)
660 {
661 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
662 	struct dp_mon_pdev_be *mon_pdev_be =
663 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
664 
665 	if (!mon_pdev_be) {
666 		dp_mon_err("DP MON PDEV is NULL");
667 		return QDF_STATUS_E_FAILURE;
668 	}
669 
670 	return QDF_STATUS_SUCCESS;
671 }
672 
673 #else
674 static inline
dp_mon_htt_srng_setup_2_0(struct dp_soc * soc,struct dp_pdev * pdev,int mac_id,int mac_for_pdev)675 QDF_STATUS dp_mon_htt_srng_setup_2_0(struct dp_soc *soc,
676 				     struct dp_pdev *pdev,
677 				     int mac_id,
678 				     int mac_for_pdev)
679 {
680 	return QDF_STATUS_SUCCESS;
681 }
682 
683 static uint32_t
dp_tx_mon_process_2_0(struct dp_soc * soc,struct dp_intr * int_ctx,uint32_t mac_id,uint32_t quota)684 dp_tx_mon_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
685 		      uint32_t mac_id, uint32_t quota)
686 {
687 	return 0;
688 }
689 
690 static inline
dp_mon_soc_attach_2_0(struct dp_soc * soc)691 QDF_STATUS dp_mon_soc_attach_2_0(struct dp_soc *soc)
692 {
693 	return status;
694 }
695 
696 static inline
dp_mon_soc_detach_2_0(struct dp_soc * soc)697 QDF_STATUS dp_mon_soc_detach_2_0(struct dp_soc *soc)
698 {
699 	return status;
700 }
701 
702 static inline
dp_pdev_mon_rings_deinit_2_0(struct dp_pdev * pdev)703 void dp_pdev_mon_rings_deinit_2_0(struct dp_pdev *pdev)
704 {
705 }
706 
707 static inline
dp_pdev_mon_rings_init_2_0(struct dp_soc * soc,struct dp_pdev * pdev)708 QDF_STATUS dp_pdev_mon_rings_init_2_0(struct dp_soc *soc, struct dp_pdev *pdev)
709 {
710 	return QDF_STATUS_SUCCESS;
711 }
712 
713 static inline
dp_pdev_mon_rings_free_2_0(struct dp_pdev * pdev)714 void dp_pdev_mon_rings_free_2_0(struct dp_pdev *pdev)
715 {
716 }
717 
718 static inline
dp_pdev_mon_rings_alloc_2_0(struct dp_soc * soc,struct dp_pdev * pdev)719 QDF_STATUS dp_pdev_mon_rings_alloc_2_0(struct dp_soc *soc, struct dp_pdev *pdev)
720 {
721 	return QDF_STATUS_SUCCESS;
722 }
723 
724 static inline
dp_mon_pdev_free_2_0(struct dp_pdev * pdev)725 void dp_mon_pdev_free_2_0(struct dp_pdev *pdev)
726 {
727 }
728 
729 static inline
dp_mon_pdev_alloc_2_0(struct dp_pdev * pdev)730 QDF_STATUS dp_mon_pdev_alloc_2_0(struct dp_pdev *pdev)
731 {
732 	return QDF_STATUS_SUCCESS;
733 }
734 
735 static inline
dp_vdev_set_monitor_mode_buf_rings_2_0(struct dp_pdev * pdev)736 void dp_vdev_set_monitor_mode_buf_rings_2_0(struct dp_pdev *pdev)
737 {
738 }
739 
740 static inline
dp_vdev_set_monitor_mode_rings_2_0(struct dp_pdev * pdev,uint8_t delayed_replenish)741 QDF_STATUS dp_vdev_set_monitor_mode_rings_2_0(struct dp_pdev *pdev,
742 					      uint8_t delayed_replenish)
743 {
744 	return QDF_STATUS_SUCCESS;
745 }
746 #endif
747 
dp_pdev_mon_rings_deinit_2_0(struct dp_pdev * pdev)748 void dp_pdev_mon_rings_deinit_2_0(struct dp_pdev *pdev)
749 {
750 	int mac_id = 0;
751 	struct dp_soc *soc = pdev->soc;
752 
753 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
754 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
755 							 pdev->pdev_id);
756 
757 		dp_rx_mon_pdev_rings_deinit_2_0(pdev, lmac_id);
758 		dp_tx_mon_pdev_rings_deinit_2_0(pdev, lmac_id);
759 	}
760 }
761 
dp_pdev_mon_rings_init_2_0(struct dp_pdev * pdev)762 QDF_STATUS dp_pdev_mon_rings_init_2_0(struct dp_pdev *pdev)
763 {
764 	struct dp_soc *soc = pdev->soc;
765 	int mac_id = 0;
766 	QDF_STATUS status = QDF_STATUS_SUCCESS;
767 
768 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
769 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
770 							 pdev->pdev_id);
771 
772 		status = dp_rx_mon_pdev_rings_init_2_0(pdev, lmac_id);
773 		if (QDF_IS_STATUS_ERROR(status)) {
774 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc);
775 			goto fail;
776 		}
777 
778 		status = dp_tx_mon_pdev_rings_init_2_0(pdev, lmac_id);
779 		if (QDF_IS_STATUS_ERROR(status)) {
780 			dp_mon_err("%pK: " RNG_ERR "tx_mon_dst_ring", soc);
781 			goto fail;
782 		}
783 	}
784 	return status;
785 
786 fail:
787 	dp_pdev_mon_rings_deinit_2_0(pdev);
788 	return status;
789 }
790 
dp_pdev_mon_rings_free_2_0(struct dp_pdev * pdev)791 void dp_pdev_mon_rings_free_2_0(struct dp_pdev *pdev)
792 {
793 	int mac_id = 0;
794 	struct dp_soc *soc = pdev->soc;
795 
796 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
797 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
798 							 pdev->pdev_id);
799 
800 		dp_rx_mon_pdev_rings_free_2_0(pdev, lmac_id);
801 		dp_tx_mon_pdev_rings_free_2_0(pdev, lmac_id);
802 	}
803 }
804 
dp_pdev_mon_rings_alloc_2_0(struct dp_pdev * pdev)805 QDF_STATUS dp_pdev_mon_rings_alloc_2_0(struct dp_pdev *pdev)
806 {
807 	struct dp_soc *soc = pdev->soc;
808 	int mac_id = 0;
809 	QDF_STATUS status = QDF_STATUS_SUCCESS;
810 
811 	for (mac_id = 0; mac_id < DP_NUM_MACS_PER_PDEV; mac_id++) {
812 		int lmac_id =
813 		dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
814 
815 		status = dp_rx_mon_pdev_rings_alloc_2_0(pdev, lmac_id);
816 		if (QDF_IS_STATUS_ERROR(status)) {
817 			dp_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", pdev);
818 			goto fail;
819 		}
820 
821 		status = dp_tx_mon_pdev_rings_alloc_2_0(pdev, lmac_id);
822 		if (QDF_IS_STATUS_ERROR(status)) {
823 			dp_err("%pK: " RNG_ERR "tx_mon_dst_ring", pdev);
824 			goto fail;
825 		}
826 	}
827 	return status;
828 
829 fail:
830 	dp_pdev_mon_rings_free_2_0(pdev);
831 	return status;
832 }
833 
dp_mon_pool_frag_unmap_and_free(struct dp_soc * soc,struct dp_mon_desc_pool * mon_desc_pool)834 void dp_mon_pool_frag_unmap_and_free(struct dp_soc *soc,
835 				     struct dp_mon_desc_pool *mon_desc_pool)
836 {
837 	int desc_id;
838 	qdf_frag_t vaddr;
839 	qdf_dma_addr_t paddr;
840 
841 	qdf_spin_lock_bh(&mon_desc_pool->lock);
842 	for (desc_id = 0; desc_id < mon_desc_pool->pool_size; desc_id++) {
843 		if (mon_desc_pool->array[desc_id].mon_desc.in_use) {
844 			vaddr = mon_desc_pool->array[desc_id].mon_desc.buf_addr;
845 			paddr = mon_desc_pool->array[desc_id].mon_desc.paddr;
846 
847 			if (!(mon_desc_pool->array[desc_id].mon_desc.unmapped)) {
848 				qdf_mem_unmap_page(soc->osdev, paddr,
849 						   mon_desc_pool->buf_size,
850 						   QDF_DMA_FROM_DEVICE);
851 				mon_desc_pool->array[desc_id].mon_desc.unmapped = 1;
852 				mon_desc_pool->array[desc_id].mon_desc.cookie = desc_id;
853 			}
854 			qdf_frag_free(vaddr);
855 		}
856 	}
857 	qdf_spin_unlock_bh(&mon_desc_pool->lock);
858 }
859 
860 QDF_STATUS
dp_mon_buffers_replenish(struct dp_soc * dp_soc,struct dp_srng * dp_mon_srng,struct dp_mon_desc_pool * mon_desc_pool,uint32_t num_req_buffers,union dp_mon_desc_list_elem_t ** desc_list,union dp_mon_desc_list_elem_t ** tail,uint32_t * replenish_cnt_ref)861 dp_mon_buffers_replenish(struct dp_soc *dp_soc,
862 			 struct dp_srng *dp_mon_srng,
863 			 struct dp_mon_desc_pool *mon_desc_pool,
864 			 uint32_t num_req_buffers,
865 			 union dp_mon_desc_list_elem_t **desc_list,
866 			 union dp_mon_desc_list_elem_t **tail,
867 			 uint32_t *replenish_cnt_ref)
868 {
869 	uint32_t num_alloc_desc;
870 	uint32_t num_entries_avail;
871 	uint32_t count = 0;
872 	int sync_hw_ptr = 1;
873 	struct dp_mon_desc mon_desc = {0};
874 	void *mon_ring_entry;
875 	union dp_mon_desc_list_elem_t *next;
876 	void *mon_srng;
877 	unsigned long long desc;
878 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
879 	struct dp_mon_soc *mon_soc = dp_soc->monitor_soc;
880 
881 	if (!num_req_buffers) {
882 		dp_mon_debug("%pK: Received request for 0 buffers replenish",
883 			     dp_soc);
884 		ret = QDF_STATUS_E_INVAL;
885 		goto free_desc;
886 	}
887 
888 	mon_srng = dp_mon_srng->hal_srng;
889 
890 	hal_srng_access_start(dp_soc->hal_soc, mon_srng);
891 
892 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
893 						   mon_srng, sync_hw_ptr);
894 
895 	if (!num_entries_avail) {
896 		hal_srng_access_end(dp_soc->hal_soc, mon_srng);
897 		goto free_desc;
898 	}
899 	if (num_entries_avail < num_req_buffers)
900 		num_req_buffers = num_entries_avail;
901 
902 	/*
903 	 * if desc_list is NULL, allocate the descs from freelist
904 	 */
905 	if (!(*desc_list)) {
906 		num_alloc_desc = dp_mon_get_free_desc_list(dp_soc,
907 							   mon_desc_pool,
908 							   num_req_buffers,
909 							   desc_list,
910 							   tail);
911 
912 		if (!num_alloc_desc) {
913 			dp_mon_debug("%pK: no free rx_descs in freelist", dp_soc);
914 			hal_srng_access_end(dp_soc->hal_soc, mon_srng);
915 			return QDF_STATUS_E_NOMEM;
916 		}
917 
918 		dp_mon_info("%pK: %d rx desc allocated",
919 			    dp_soc, num_alloc_desc);
920 
921 		num_req_buffers = num_alloc_desc;
922 	}
923 
924 	while (count <= num_req_buffers - 1) {
925 		ret = dp_mon_frag_alloc_and_map(dp_soc,
926 						&mon_desc,
927 						mon_desc_pool);
928 
929 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
930 			if (qdf_unlikely(ret  == QDF_STATUS_E_FAULT))
931 				continue;
932 			break;
933 		}
934 
935 		count++;
936 		next = (*desc_list)->next;
937 		mon_ring_entry = hal_srng_src_get_next(
938 						dp_soc->hal_soc,
939 						mon_srng);
940 
941 		if (!mon_ring_entry)
942 			break;
943 
944 		qdf_assert_always((*desc_list)->mon_desc.in_use == 0);
945 
946 		(*desc_list)->mon_desc.in_use = 1;
947 		(*desc_list)->mon_desc.unmapped = 0;
948 		(*desc_list)->mon_desc.buf_addr = mon_desc.buf_addr;
949 		(*desc_list)->mon_desc.paddr = mon_desc.paddr;
950 		(*desc_list)->mon_desc.magic = DP_MON_DESC_MAGIC;
951 		(*desc_list)->mon_desc.cookie_2++;
952 
953 		mon_soc->stats.frag_alloc++;
954 
955 		/* populate lower 40 bit mon_desc address in desc
956 		 * and cookie_2 in upper 24 bits
957 		 */
958 		desc = dp_mon_get_debug_desc_addr(desc_list);
959 		hal_mon_buff_addr_info_set(dp_soc->hal_soc,
960 					   mon_ring_entry,
961 					   desc,
962 					   mon_desc.paddr);
963 
964 		*desc_list = next;
965 	}
966 
967 	hal_srng_access_end(dp_soc->hal_soc, mon_srng);
968 	if (replenish_cnt_ref)
969 		*replenish_cnt_ref += count;
970 
971 free_desc:
972 	/*
973 	 * add any available free desc back to the free list
974 	 */
975 	if (*desc_list) {
976 		dp_mon_add_desc_list_to_free_list(dp_soc, desc_list, tail,
977 						  mon_desc_pool);
978 	}
979 
980 	return ret;
981 }
982 
dp_mon_desc_pool_alloc(struct dp_soc * soc,enum dp_ctxt_type ctx_type,uint32_t pool_size,struct dp_mon_desc_pool * mon_desc_pool)983 QDF_STATUS dp_mon_desc_pool_alloc(struct dp_soc *soc,
984 				  enum dp_ctxt_type ctx_type,
985 				  uint32_t pool_size,
986 				  struct dp_mon_desc_pool *mon_desc_pool)
987 {
988 	size_t mem_size;
989 
990 	mon_desc_pool->pool_size = pool_size - 1;
991 	mem_size = mon_desc_pool->pool_size *
992 			sizeof(union dp_mon_desc_list_elem_t);
993 	mon_desc_pool->array = dp_context_alloc_mem(soc, ctx_type, mem_size);
994 
995 	return QDF_STATUS_SUCCESS;
996 }
997 
dp_vdev_set_monitor_mode_buf_rings_tx_2_0(struct dp_pdev * pdev,uint16_t num_of_buffers)998 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings_tx_2_0(struct dp_pdev *pdev,
999 						     uint16_t num_of_buffers)
1000 {
1001 	int tx_mon_max_entries;
1002 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1003 	struct dp_soc *soc = pdev->soc;
1004 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1005 	struct dp_mon_soc_be *mon_soc_be =
1006 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1007 	QDF_STATUS status;
1008 
1009 	if (!mon_soc_be) {
1010 		dp_mon_err("DP MON SOC is NULL");
1011 		return QDF_STATUS_E_FAILURE;
1012 	}
1013 
1014 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1015 	tx_mon_max_entries =
1016 		wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx);
1017 
1018 	hal_set_low_threshold(mon_soc_be->tx_mon_buf_ring.hal_srng,
1019 			      tx_mon_max_entries >> 2);
1020 	status = htt_srng_setup(soc->htt_handle, 0,
1021 				mon_soc_be->tx_mon_buf_ring.hal_srng,
1022 				TX_MONITOR_BUF);
1023 
1024 	if (status != QDF_STATUS_SUCCESS) {
1025 		dp_mon_err("Failed to send htt srng setup message for Tx mon buf ring");
1026 		return status;
1027 	}
1028 
1029 	if (mon_soc_be->tx_mon_ring_fill_level < num_of_buffers) {
1030 		if (dp_tx_mon_buffers_alloc(soc,
1031 					    (num_of_buffers -
1032 					     mon_soc_be->tx_mon_ring_fill_level))) {
1033 			dp_mon_err("%pK: Tx mon buffers allocation failed",
1034 				   soc);
1035 			return QDF_STATUS_E_FAILURE;
1036 		}
1037 		mon_soc_be->tx_mon_ring_fill_level +=
1038 					(num_of_buffers -
1039 					mon_soc_be->tx_mon_ring_fill_level);
1040 	}
1041 
1042 	return QDF_STATUS_SUCCESS;
1043 }
1044 
1045 #if defined(WDI_EVENT_ENABLE) &&\
1046 	(defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG) ||\
1047 	 defined(WLAN_FEATURE_PKT_CAPTURE_V2))
1048 static inline
dp_mon_ppdu_stats_handler_register(struct dp_mon_soc * mon_soc)1049 void dp_mon_ppdu_stats_handler_register(struct dp_mon_soc *mon_soc)
1050 {
1051 	mon_soc->mon_ops->mon_ppdu_stats_ind_handler =
1052 					dp_ppdu_stats_ind_handler;
1053 }
1054 #else
1055 static inline
dp_mon_ppdu_stats_handler_register(struct dp_mon_soc * mon_soc)1056 void dp_mon_ppdu_stats_handler_register(struct dp_mon_soc *mon_soc)
1057 {
1058 }
1059 #endif
1060 
dp_mon_register_intr_ops_2_0(struct dp_soc * soc)1061 static void dp_mon_register_intr_ops_2_0(struct dp_soc *soc)
1062 {
1063 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1064 
1065 	mon_soc->mon_ops->rx_mon_refill_buf_ring =
1066 			dp_rx_mon_refill_buf_ring_2_0,
1067 	mon_soc->mon_ops->tx_mon_refill_buf_ring =
1068 			NULL,
1069 	mon_soc->mon_rx_process = dp_rx_mon_process_2_0;
1070 	dp_mon_ppdu_stats_handler_register(mon_soc);
1071 }
1072 
1073 #ifdef MONITOR_TLV_RECORDING_ENABLE
1074 /**
1075  * dp_mon_pdev_initialize_tlv_logger() - initialize dp_mon_tlv_logger for
1076  *					Rx and Tx
1077  *
1078  * @tlv_logger : double pointer to dp_mon_tlv_logger
1079  * @direction: Rx/Tx
1080  * Return: QDF_STATUS
1081  */
1082 static QDF_STATUS
dp_mon_pdev_initialize_tlv_logger(struct dp_mon_tlv_logger ** tlv_logger,uint8_t direction)1083 dp_mon_pdev_initialize_tlv_logger(struct dp_mon_tlv_logger **tlv_logger,
1084 				  uint8_t direction)
1085 {
1086 	struct dp_mon_tlv_logger *tlv_log = NULL;
1087 
1088 	tlv_log = qdf_mem_malloc(sizeof(struct dp_mon_tlv_logger));
1089 	if (!tlv_log) {
1090 		dp_mon_err("Memory allocation failed");
1091 		return QDF_STATUS_E_NOMEM;
1092 	}
1093 
1094 	if (direction == MONITOR_TLV_RECORDING_RX)
1095 		tlv_log->buff = qdf_mem_malloc(MAX_TLV_LOGGING_SIZE *
1096 					sizeof(struct dp_mon_tlv_info));
1097 	else if (direction == MONITOR_TLV_RECORDING_TX)
1098 		tlv_log->buff = qdf_mem_malloc(MAX_TLV_LOGGING_SIZE *
1099 					sizeof(struct dp_tx_mon_tlv_info));
1100 
1101 	if (!tlv_log->buff) {
1102 		dp_mon_err("Memory allocation failed");
1103 		qdf_mem_free(tlv_log);
1104 		tlv_log = NULL;
1105 		return QDF_STATUS_E_NOMEM;
1106 	}
1107 
1108 	tlv_log->curr_ppdu_pos = 0;
1109 	tlv_log->wrap_flag = 0;
1110 	tlv_log->ppdu_start_idx = 0;
1111 	tlv_log->mpdu_idx = MAX_PPDU_START_TLV_NUM;
1112 	tlv_log->ppdu_end_idx = MAX_PPDU_START_TLV_NUM + MAX_MPDU_TLV_NUM;
1113 	tlv_log->max_ppdu_start_idx = MAX_PPDU_START_TLV_NUM - 1;
1114 	tlv_log->max_mpdu_idx = MAX_PPDU_START_TLV_NUM + MAX_MPDU_TLV_NUM - 1;
1115 	tlv_log->max_ppdu_end_idx = MAX_TLVS_PER_PPDU - 1;
1116 
1117 	tlv_log->tlv_logging_enable = 1;
1118 	*tlv_logger = tlv_log;
1119 
1120 	return QDF_STATUS_SUCCESS;
1121 }
1122 
1123 /*
1124  * dp_mon_pdev_tlv_logger_init() - initializes struct dp_mon_tlv_logger
1125  *
1126  * @pdev: pointer to dp_pdev
1127  *
1128  * Return: QDF_STATUS
1129  */
1130 static
dp_mon_pdev_tlv_logger_init(struct dp_pdev * pdev)1131 QDF_STATUS dp_mon_pdev_tlv_logger_init(struct dp_pdev *pdev)
1132 {
1133 	struct dp_mon_pdev *mon_pdev = NULL;
1134 	struct dp_mon_pdev_be *mon_pdev_be = NULL;
1135 	struct dp_soc *soc = NULL;
1136 
1137 	if (!pdev)
1138 		return QDF_STATUS_E_INVAL;
1139 
1140 	soc = pdev->soc;
1141 	mon_pdev = pdev->monitor_pdev;
1142 	if (!mon_pdev)
1143 		return QDF_STATUS_E_INVAL;
1144 
1145 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1146 
1147 	if (dp_mon_pdev_initialize_tlv_logger(&mon_pdev_be->rx_tlv_log,
1148 					      MONITOR_TLV_RECORDING_RX))
1149 		return QDF_STATUS_E_FAILURE;
1150 
1151 	if (dp_mon_pdev_initialize_tlv_logger(&mon_pdev_be->tx_tlv_log,
1152 					      MONITOR_TLV_RECORDING_TX))
1153 		return QDF_STATUS_E_FAILURE;
1154 
1155 	return QDF_STATUS_SUCCESS;
1156 }
1157 
1158 /**
1159  * dp_mon_pdev_deinitialize_tlv_logger() - deinitialize dp_mon_tlv_logger for
1160  *					Rx and Tx
1161  *
1162  * @tlv_logger : double pointer to dp_mon_tlv_logger
1163  *
1164  * Return: QDF_STATUS
1165  */
1166 static QDF_STATUS
dp_mon_pdev_deinitialize_tlv_logger(struct dp_mon_tlv_logger ** tlv_logger)1167 dp_mon_pdev_deinitialize_tlv_logger(struct dp_mon_tlv_logger **tlv_logger)
1168 {
1169 	struct dp_mon_tlv_logger *tlv_log = *tlv_logger;
1170 
1171 	if (!tlv_log)
1172 		return QDF_STATUS_SUCCESS;
1173 	if (!(tlv_log->buff))
1174 		return QDF_STATUS_E_INVAL;
1175 
1176 	tlv_log->tlv_logging_enable = 0;
1177 	qdf_mem_free(tlv_log->buff);
1178 	tlv_log->buff = NULL;
1179 	qdf_mem_free(tlv_log);
1180 	tlv_log = NULL;
1181 	*tlv_logger = NULL;
1182 
1183 	return QDF_STATUS_SUCCESS;
1184 }
1185 
1186 /*
1187  * dp_mon_pdev_tlv_logger_deinit() - deinitializes struct dp_mon_tlv_logger
1188  *
1189  * @pdev: pointer to dp_pdev
1190  *
1191  * Return: QDF_STATUS
1192  */
1193 static
dp_mon_pdev_tlv_logger_deinit(struct dp_pdev * pdev)1194 QDF_STATUS dp_mon_pdev_tlv_logger_deinit(struct dp_pdev *pdev)
1195 {
1196 	struct dp_mon_pdev *mon_pdev = NULL;
1197 	struct dp_mon_pdev_be *mon_pdev_be = NULL;
1198 
1199 	if (!pdev)
1200 		return QDF_STATUS_E_INVAL;
1201 
1202 	mon_pdev = pdev->monitor_pdev;
1203 	if (!mon_pdev)
1204 		return QDF_STATUS_E_INVAL;
1205 
1206 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1207 
1208 	if (dp_mon_pdev_deinitialize_tlv_logger(&mon_pdev_be->rx_tlv_log))
1209 		return QDF_STATUS_E_FAILURE;
1210 	if (dp_mon_pdev_deinitialize_tlv_logger(&mon_pdev_be->tx_tlv_log))
1211 		return QDF_STATUS_E_FAILURE;
1212 
1213 	return QDF_STATUS_SUCCESS;
1214 }
1215 
1216 #else
1217 
1218 static inline
dp_mon_pdev_tlv_logger_init(struct dp_pdev * pdev)1219 QDF_STATUS dp_mon_pdev_tlv_logger_init(struct dp_pdev *pdev)
1220 {
1221 	return QDF_STATUS_SUCCESS;
1222 }
1223 
1224 static inline
dp_mon_pdev_tlv_logger_deinit(struct dp_pdev * pdev)1225 QDF_STATUS dp_mon_pdev_tlv_logger_deinit(struct dp_pdev *pdev)
1226 {
1227 	return QDF_STATUS_SUCCESS;
1228 }
1229 
1230 #endif
1231 
1232 /**
1233  * dp_mon_register_feature_ops_2_0() - register feature ops
1234  *
1235  * @soc: dp soc context
1236  *
1237  * @return: void
1238  */
1239 static void
dp_mon_register_feature_ops_2_0(struct dp_soc * soc)1240 dp_mon_register_feature_ops_2_0(struct dp_soc *soc)
1241 {
1242 	struct dp_mon_ops *mon_ops = dp_mon_ops_get(soc);
1243 
1244 	if (!mon_ops) {
1245 		dp_err("mon_ops is NULL, feature ops registration failed");
1246 		return;
1247 	}
1248 
1249 	mon_ops->mon_config_debug_sniffer = dp_config_debug_sniffer;
1250 	mon_ops->mon_peer_tx_init = NULL;
1251 	mon_ops->mon_peer_tx_cleanup = NULL;
1252 	mon_ops->mon_htt_ppdu_stats_attach = dp_htt_ppdu_stats_attach;
1253 	mon_ops->mon_htt_ppdu_stats_detach = dp_htt_ppdu_stats_detach;
1254 	mon_ops->mon_print_pdev_rx_mon_stats = dp_print_pdev_rx_mon_stats;
1255 	mon_ops->mon_set_bsscolor = dp_mon_set_bsscolor;
1256 	mon_ops->mon_pdev_get_filter_ucast_data =
1257 					dp_lite_mon_get_filter_ucast_data;
1258 	mon_ops->mon_pdev_get_filter_mcast_data =
1259 					dp_lite_mon_get_filter_mcast_data;
1260 	mon_ops->mon_pdev_get_filter_non_data =
1261 					dp_lite_mon_get_filter_non_data;
1262 	mon_ops->mon_neighbour_peer_add_ast = NULL;
1263 #ifdef WLAN_TX_PKT_CAPTURE_ENH_BE
1264 	mon_ops->mon_peer_tid_peer_id_update = NULL;
1265 	mon_ops->mon_tx_capture_debugfs_init = NULL;
1266 	mon_ops->mon_tx_add_to_comp_queue = NULL;
1267 	mon_ops->mon_print_pdev_tx_capture_stats =
1268 					dp_print_pdev_tx_monitor_stats_2_0;
1269 	mon_ops->mon_config_enh_tx_capture = dp_config_enh_tx_monitor_2_0;
1270 	mon_ops->mon_tx_peer_filter = dp_peer_set_tx_capture_enabled_2_0;
1271 #endif
1272 #if (defined(WIFI_MONITOR_SUPPORT) && defined(WLAN_TX_MON_CORE_DEBUG))
1273 	mon_ops->mon_peer_tid_peer_id_update = NULL;
1274 	mon_ops->mon_tx_capture_debugfs_init = NULL;
1275 	mon_ops->mon_tx_add_to_comp_queue = NULL;
1276 	mon_ops->mon_print_pdev_tx_capture_stats = NULL;
1277 	mon_ops->mon_config_enh_tx_capture = dp_config_enh_tx_core_monitor_2_0;
1278 	mon_ops->mon_tx_peer_filter = NULL;
1279 #endif
1280 #ifdef WLAN_RX_PKT_CAPTURE_ENH
1281 	mon_ops->mon_config_enh_rx_capture = NULL;
1282 #endif
1283 #ifdef QCA_SUPPORT_BPR
1284 	mon_ops->mon_set_bpr_enable = dp_set_bpr_enable_2_0;
1285 #endif
1286 #ifdef ATH_SUPPORT_NAC
1287 	mon_ops->mon_set_filter_neigh_peers = NULL;
1288 #endif
1289 #ifdef WLAN_ATF_ENABLE
1290 	mon_ops->mon_set_atf_stats_enable = dp_set_atf_stats_enable;
1291 #endif
1292 #ifdef FEATURE_NAC_RSSI
1293 	mon_ops->mon_filter_neighbour_peer = NULL;
1294 #endif
1295 #ifdef QCA_MCOPY_SUPPORT
1296 	mon_ops->mon_filter_setup_mcopy_mode = NULL;
1297 	mon_ops->mon_filter_reset_mcopy_mode = NULL;
1298 	mon_ops->mon_mcopy_check_deliver = NULL;
1299 #endif
1300 #ifdef QCA_ENHANCED_STATS_SUPPORT
1301 	mon_ops->mon_filter_setup_enhanced_stats =
1302 				dp_mon_filter_setup_enhanced_stats_2_0;
1303 	mon_ops->mon_filter_reset_enhanced_stats =
1304 				dp_mon_filter_reset_enhanced_stats_2_0;
1305 	mon_ops->mon_tx_enable_enhanced_stats =
1306 				dp_mon_tx_enable_enhanced_stats_2_0;
1307 	mon_ops->mon_tx_disable_enhanced_stats =
1308 				dp_mon_tx_disable_enhanced_stats_2_0;
1309 	mon_ops->mon_ppdu_stats_feat_enable_check =
1310 				dp_ppdu_stats_feat_enable_check_2_0;
1311 	mon_ops->mon_tx_stats_update = dp_mon_tx_stats_update_2_0;
1312 	mon_ops->mon_ppdu_desc_deliver = dp_ppdu_desc_deliver;
1313 #ifdef WDI_EVENT_ENABLE
1314 	mon_ops->mon_ppdu_desc_notify = dp_ppdu_desc_notify_2_0;
1315 #endif
1316 #endif
1317 #ifdef WLAN_RX_PKT_CAPTURE_ENH
1318 	mon_ops->mon_filter_setup_rx_enh_capture = NULL;
1319 #endif
1320 #ifdef WDI_EVENT_ENABLE
1321 	mon_ops->mon_set_pktlog_wifi3 = dp_set_pktlog_wifi3;
1322 	mon_ops->mon_filter_setup_rx_pkt_log_full =
1323 				dp_mon_filter_setup_rx_pkt_log_full_2_0;
1324 	mon_ops->mon_filter_reset_rx_pkt_log_full =
1325 				dp_mon_filter_reset_rx_pkt_log_full_2_0;
1326 	mon_ops->mon_filter_setup_rx_pkt_log_lite =
1327 				dp_mon_filter_setup_rx_pkt_log_lite_2_0;
1328 	mon_ops->mon_filter_reset_rx_pkt_log_lite =
1329 				dp_mon_filter_reset_rx_pkt_log_lite_2_0;
1330 	mon_ops->mon_filter_setup_rx_pkt_log_cbf =
1331 				dp_mon_filter_setup_rx_pkt_log_cbf_2_0;
1332 	mon_ops->mon_filter_reset_rx_pkt_log_cbf =
1333 				dp_mon_filter_reset_rx_pktlog_cbf_2_0;
1334 #if defined(BE_PKTLOG_SUPPORT) && defined(WLAN_PKT_CAPTURE_TX_2_0)
1335 	mon_ops->mon_filter_setup_pktlog_hybrid =
1336 				dp_mon_filter_setup_pktlog_hybrid_2_0;
1337 	mon_ops->mon_filter_reset_pktlog_hybrid =
1338 				dp_mon_filter_reset_pktlog_hybrid_2_0;
1339 #endif
1340 #endif
1341 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
1342 	mon_ops->mon_pktlogmod_exit = dp_pktlogmod_exit;
1343 #endif
1344 	mon_ops->rx_hdr_length_set = dp_rx_mon_hdr_length_set;
1345 	mon_ops->rx_packet_length_set = dp_rx_mon_packet_length_set;
1346 	mon_ops->rx_mon_enable = dp_rx_mon_enable_set;
1347 	mon_ops->rx_wmask_subscribe = dp_rx_mon_word_mask_subscribe;
1348 	mon_ops->rx_pkt_tlv_offset = dp_rx_mon_pkt_tlv_offset_subscribe;
1349 	mon_ops->rx_enable_mpdu_logging = dp_rx_mon_enable_mpdu_logging;
1350 	mon_ops->mon_neighbour_peers_detach = NULL;
1351 	mon_ops->mon_vdev_set_monitor_mode_buf_rings =
1352 				dp_vdev_set_monitor_mode_buf_rings_2_0;
1353 	mon_ops->mon_vdev_set_monitor_mode_rings =
1354 				dp_vdev_set_monitor_mode_rings_2_0;
1355 #ifdef QCA_ENHANCED_STATS_SUPPORT
1356 	mon_ops->mon_rx_stats_update = dp_rx_mon_stats_update_2_0;
1357 	mon_ops->mon_rx_populate_ppdu_usr_info =
1358 			dp_rx_mon_populate_ppdu_usr_info_2_0;
1359 	mon_ops->mon_rx_populate_ppdu_info = dp_rx_mon_populate_ppdu_info_2_0;
1360 #endif
1361 #ifdef QCA_UNDECODED_METADATA_SUPPORT
1362 	mon_ops->mon_config_undecoded_metadata_capture =
1363 		dp_mon_config_undecoded_metadata_capture;
1364 	mon_ops->mon_filter_setup_undecoded_metadata_capture =
1365 		dp_mon_filter_setup_undecoded_metadata_capture_2_0;
1366 	mon_ops->mon_filter_reset_undecoded_metadata_capture =
1367 		dp_mon_filter_reset_undecoded_metadata_capture_2_0;
1368 #endif
1369 	mon_ops->rx_enable_fpmo = dp_rx_mon_enable_fpmo;
1370 	mon_ops->mon_rx_print_advanced_stats =
1371 		dp_mon_rx_print_advanced_stats_2_0;
1372 	mon_ops->mon_mac_filter_set = NULL;
1373 }
1374 
1375 struct dp_mon_ops monitor_ops_2_0 = {
1376 	.mon_soc_cfg_init = dp_mon_soc_cfg_init,
1377 	.mon_soc_attach[0] = NULL,
1378 	.mon_soc_attach[1] = dp_mon_soc_attach_2_0,
1379 	.mon_soc_detach[0] = NULL,
1380 	.mon_soc_detach[1] = dp_mon_soc_detach_2_0,
1381 	.mon_soc_init[0] = NULL,
1382 	.mon_soc_init[1] = dp_mon_soc_init_2_0,
1383 	.mon_soc_deinit[0] = NULL,
1384 	.mon_soc_deinit[1] = dp_mon_soc_deinit_2_0,
1385 	.mon_pdev_alloc = dp_mon_pdev_alloc_2_0,
1386 	.mon_pdev_free = dp_mon_pdev_free_2_0,
1387 	.mon_pdev_attach = dp_mon_pdev_attach,
1388 	.mon_pdev_detach = dp_mon_pdev_detach,
1389 	.mon_pdev_init = dp_mon_pdev_init,
1390 	.mon_pdev_deinit = dp_mon_pdev_deinit,
1391 	.mon_vdev_attach = dp_mon_vdev_attach,
1392 	.mon_vdev_detach = dp_mon_vdev_detach,
1393 	.mon_peer_attach = dp_mon_peer_attach,
1394 	.mon_peer_detach = dp_mon_peer_detach,
1395 	.mon_peer_get_peerstats_ctx = dp_mon_peer_get_peerstats_ctx,
1396 	.mon_peer_reset_stats = dp_mon_peer_reset_stats,
1397 	.mon_peer_get_stats = dp_mon_peer_get_stats,
1398 	.mon_invalid_peer_update_pdev_stats =
1399 				dp_mon_invalid_peer_update_pdev_stats,
1400 	.mon_peer_get_stats_param = dp_mon_peer_get_stats_param,
1401 	.mon_flush_rings = NULL,
1402 #if !defined(DISABLE_MON_CONFIG)
1403 	.mon_pdev_htt_srng_setup[0] = NULL,
1404 	.mon_pdev_htt_srng_setup[1] = dp_mon_pdev_htt_srng_setup_2_0,
1405 	.mon_soc_htt_srng_setup = dp_mon_soc_htt_srng_setup_2_0,
1406 #endif
1407 #if defined(DP_CON_MON)
1408 	.mon_service_rings = NULL,
1409 #endif
1410 #ifndef DISABLE_MON_CONFIG
1411 	.mon_rx_process = NULL,
1412 #endif
1413 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
1414 	.mon_drop_packets_for_mac = NULL,
1415 #endif
1416 	.mon_vdev_timer_init = NULL,
1417 	.mon_vdev_timer_start = NULL,
1418 	.mon_vdev_timer_stop = NULL,
1419 	.mon_vdev_timer_deinit = NULL,
1420 	.mon_reap_timer_init = NULL,
1421 	.mon_reap_timer_start = NULL,
1422 	.mon_reap_timer_stop = NULL,
1423 	.mon_reap_timer_deinit = NULL,
1424 	.mon_filter_setup_tx_mon_mode = dp_mon_filter_setup_tx_mon_mode_2_0,
1425 	.mon_filter_reset_tx_mon_mode = dp_mon_filter_reset_tx_mon_mode_2_0,
1426 	.mon_rings_alloc[0] = NULL,
1427 	.mon_rings_free[0] = NULL,
1428 	.mon_rings_init[0] = NULL,
1429 	.mon_rings_deinit[0] = NULL,
1430 	.mon_rings_alloc[1] = dp_pdev_mon_rings_alloc_2_0,
1431 	.mon_rings_free[1] = dp_pdev_mon_rings_free_2_0,
1432 	.mon_rings_init[1] = dp_pdev_mon_rings_init_2_0,
1433 	.mon_rings_deinit[1] = dp_pdev_mon_rings_deinit_2_0,
1434 	.rx_mon_desc_pool_init = NULL,
1435 	.rx_mon_desc_pool_deinit = NULL,
1436 	.rx_mon_desc_pool_alloc = NULL,
1437 	.rx_mon_desc_pool_free = NULL,
1438 	.rx_mon_buffers_alloc = NULL,
1439 	.rx_mon_buffers_free = NULL,
1440 	.tx_mon_desc_pool_init = NULL,
1441 	.tx_mon_desc_pool_deinit = NULL,
1442 	.tx_mon_desc_pool_alloc = NULL,
1443 	.tx_mon_desc_pool_free = NULL,
1444 #ifndef DISABLE_MON_CONFIG
1445 	.mon_register_intr_ops = dp_mon_register_intr_ops_2_0,
1446 #endif
1447 	.mon_register_feature_ops = dp_mon_register_feature_ops_2_0,
1448 #ifdef WLAN_TX_PKT_CAPTURE_ENH_BE
1449 	.mon_tx_ppdu_stats_attach = dp_tx_ppdu_stats_attach_2_0,
1450 	.mon_tx_ppdu_stats_detach = dp_tx_ppdu_stats_detach_2_0,
1451 	.mon_peer_tx_capture_filter_check = NULL,
1452 #endif
1453 #if (defined(WIFI_MONITOR_SUPPORT) && defined(WLAN_TX_MON_CORE_DEBUG))
1454 	.mon_tx_ppdu_stats_attach = NULL,
1455 	.mon_tx_ppdu_stats_detach = NULL,
1456 	.mon_peer_tx_capture_filter_check = NULL,
1457 #endif
1458 	.mon_pdev_ext_init = dp_mon_pdev_ext_init_2_0,
1459 	.mon_pdev_ext_deinit = dp_mon_pdev_ext_deinit_2_0,
1460 	.mon_lite_mon_alloc = dp_lite_mon_alloc,
1461 	.mon_lite_mon_dealloc = dp_lite_mon_dealloc,
1462 	.mon_lite_mon_vdev_delete = dp_lite_mon_vdev_delete,
1463 	.mon_lite_mon_disable_rx = dp_lite_mon_disable_rx,
1464 	.mon_lite_mon_is_rx_adv_filter_enable = dp_lite_mon_is_rx_adv_filter_enable,
1465 #ifdef QCA_KMEM_CACHE_SUPPORT
1466 	.mon_rx_ppdu_info_cache_create = dp_rx_mon_ppdu_info_cache_create,
1467 	.mon_rx_ppdu_info_cache_destroy = dp_rx_mon_ppdu_info_cache_destroy,
1468 #endif
1469 	.mon_rx_pdev_tlv_logger_init = dp_mon_pdev_tlv_logger_init,
1470 	.mon_rx_pdev_tlv_logger_deinit = dp_mon_pdev_tlv_logger_deinit,
1471 };
1472 
1473 struct cdp_mon_ops dp_ops_mon_2_0 = {
1474 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
1475 	/* Added support for HK advance filter */
1476 	.txrx_set_advance_monitor_filter = NULL,
1477 	.txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
1478 	.config_full_mon_mode = NULL,
1479 	.soc_config_full_mon_mode = NULL,
1480 	.get_mon_pdev_rx_stats = dp_pdev_get_rx_mon_stats,
1481 	.txrx_enable_mon_reap_timer = NULL,
1482 #ifdef QCA_ENHANCED_STATS_SUPPORT
1483 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
1484 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
1485 #endif /* QCA_ENHANCED_STATS_SUPPORT */
1486 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
1487 	.txrx_update_filter_neighbour_peers = dp_lite_mon_config_nac_peer,
1488 #endif
1489 #ifdef ATH_SUPPORT_NAC_RSSI
1490 	.txrx_vdev_config_for_nac_rssi = dp_lite_mon_config_nac_rssi_peer,
1491 	.txrx_vdev_get_neighbour_rssi = dp_lite_mon_get_nac_peer_rssi,
1492 #endif
1493 #ifdef QCA_SUPPORT_LITE_MONITOR
1494 	.txrx_set_lite_mon_config = dp_lite_mon_set_config,
1495 	.txrx_get_lite_mon_config = dp_lite_mon_get_config,
1496 	.txrx_set_lite_mon_peer_config = dp_lite_mon_set_peer_config,
1497 	.txrx_get_lite_mon_peer_config = dp_lite_mon_get_peer_config,
1498 	.txrx_is_lite_mon_enabled = dp_lite_mon_is_enabled,
1499 	.txrx_get_lite_mon_legacy_feature_enabled =
1500 				dp_lite_mon_get_legacy_feature_enabled,
1501 #endif
1502 	.txrx_set_mon_pdev_params_rssi_dbm_conv =
1503 				dp_mon_pdev_params_rssi_dbm_conv,
1504 #ifdef WLAN_CONFIG_TELEMETRY_AGENT
1505 	.txrx_update_pdev_mon_telemetry_airtime_stats =
1506 			dp_pdev_update_telemetry_airtime_stats,
1507 #endif
1508 	.txrx_update_mon_mac_filter = NULL,
1509 #ifdef WLAN_FEATURE_LOCAL_PKT_CAPTURE
1510 	.start_local_pkt_capture = NULL,
1511 	.stop_local_pkt_capture = NULL,
1512 	.is_local_pkt_capture_running = NULL,
1513 #endif /* WLAN_FEATURE_LOCAL_PKT_CAPTURE */
1514 };
1515 
1516 #if defined(WLAN_PKT_CAPTURE_TX_2_0) || \
1517 defined(WLAN_PKT_CAPTURE_RX_2_0)
dp_mon_ops_register_cmn_2_0(struct dp_mon_soc * mon_soc)1518 void dp_mon_ops_register_cmn_2_0(struct dp_mon_soc *mon_soc)
1519 {
1520 	struct dp_mon_ops *mon_ops = mon_soc->mon_ops;
1521 
1522 	if (!mon_ops) {
1523 		dp_err("tx 2.0 ops registration failed");
1524 		return;
1525 	}
1526 	mon_ops->tx_mon_filter_alloc = dp_mon_filter_alloc_2_0;
1527 	mon_ops->tx_mon_filter_dealloc = dp_mon_filter_dealloc_2_0;
1528 }
1529 #endif
1530 
1531 #ifdef WLAN_PKT_CAPTURE_TX_2_0
dp_mon_ops_register_tx_2_0(struct dp_mon_soc * mon_soc)1532 void dp_mon_ops_register_tx_2_0(struct dp_mon_soc *mon_soc)
1533 {
1534 	struct dp_mon_ops *mon_ops = mon_soc->mon_ops;
1535 
1536 	if (!mon_ops) {
1537 		dp_err("tx 2.0 ops registration failed");
1538 		return;
1539 	}
1540 	mon_ops->tx_mon_filter_update = dp_tx_mon_filter_update_2_0;
1541 #ifndef DISABLE_MON_CONFIG
1542 	mon_ops->mon_tx_process = dp_tx_mon_process_2_0;
1543 	mon_ops->print_txmon_ring_stat = dp_tx_mon_print_ring_stat_2_0;
1544 #endif
1545 }
1546 #endif
1547 
1548 #ifdef WLAN_PKT_CAPTURE_RX_2_0
dp_mon_ops_register_rx_2_0(struct dp_mon_soc * mon_soc)1549 void dp_mon_ops_register_rx_2_0(struct dp_mon_soc *mon_soc)
1550 {
1551 	struct dp_mon_ops *mon_ops = mon_soc->mon_ops;
1552 
1553 	if (!mon_ops) {
1554 		dp_err("rx 2.0 ops registration failed");
1555 		return;
1556 	}
1557 	mon_ops->mon_filter_setup_rx_mon_mode =
1558 				dp_mon_filter_setup_rx_mon_mode_2_0;
1559 	mon_ops->mon_filter_reset_rx_mon_mode =
1560 				dp_mon_filter_reset_rx_mon_mode_2_0;
1561 	mon_ops->rx_mon_filter_update = dp_rx_mon_filter_update_2_0;
1562 }
1563 #endif
1564 
1565 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT
dp_mon_ops_register_2_0(struct dp_mon_soc * mon_soc)1566 void dp_mon_ops_register_2_0(struct dp_mon_soc *mon_soc)
1567 {
1568 	struct dp_mon_ops *mon_ops = NULL;
1569 
1570 	if (mon_soc->mon_ops) {
1571 		dp_mon_err("monitor ops is allocated");
1572 		return;
1573 	}
1574 
1575 	mon_ops = qdf_mem_malloc(sizeof(struct dp_mon_ops));
1576 	if (!mon_ops) {
1577 		dp_mon_err("Failed to allocate memory for mon ops");
1578 		return;
1579 	}
1580 
1581 	qdf_mem_copy(mon_ops, &monitor_ops_2_0, sizeof(struct dp_mon_ops));
1582 	mon_soc->mon_ops = mon_ops;
1583 	dp_mon_ops_register_tx_2_0(mon_soc);
1584 	dp_mon_ops_register_rx_2_0(mon_soc);
1585 	dp_mon_ops_register_cmn_2_0(mon_soc);
1586 }
1587 
dp_mon_cdp_ops_register_2_0(struct cdp_ops * ops)1588 void dp_mon_cdp_ops_register_2_0(struct cdp_ops *ops)
1589 {
1590 	struct cdp_mon_ops *mon_ops = NULL;
1591 
1592 	if (ops->mon_ops) {
1593 		dp_mon_err("cdp monitor ops is allocated");
1594 		return;
1595 	}
1596 
1597 	mon_ops = qdf_mem_malloc(sizeof(struct cdp_mon_ops));
1598 	if (!mon_ops) {
1599 		dp_mon_err("Failed to allocate memory for mon ops");
1600 		return;
1601 	}
1602 
1603 	qdf_mem_copy(mon_ops, &dp_ops_mon_2_0, sizeof(struct cdp_mon_ops));
1604 	ops->mon_ops = mon_ops;
1605 }
1606 #else
dp_mon_ops_register_2_0(struct dp_mon_soc * mon_soc)1607 void dp_mon_ops_register_2_0(struct dp_mon_soc *mon_soc)
1608 {
1609 	mon_soc->mon_ops = &monitor_ops_2_0;
1610 }
1611 
dp_mon_cdp_ops_register_2_0(struct cdp_ops * ops)1612 void dp_mon_cdp_ops_register_2_0(struct cdp_ops *ops)
1613 {
1614 	ops->mon_ops = &dp_ops_mon_2_0;
1615 }
1616 #endif
1617 
1618 #ifdef QCA_ENHANCED_STATS_SUPPORT
1619 static void
dp_enable_enhanced_stats_for_each_pdev(struct dp_soc * soc,void * arg,int chip_id)1620 dp_enable_enhanced_stats_for_each_pdev(struct dp_soc *soc, void *arg,
1621 				       int chip_id) {
1622 	uint8_t i = 0;
1623 
1624 	for (i = 0; i < MAX_PDEV_CNT; i++)
1625 		dp_enable_enhanced_stats(dp_soc_to_cdp_soc_t(soc), i);
1626 }
1627 
1628 QDF_STATUS
dp_enable_enhanced_stats_2_0(struct cdp_soc_t * soc,uint8_t pdev_id)1629 dp_enable_enhanced_stats_2_0(struct cdp_soc_t *soc, uint8_t pdev_id)
1630 {
1631 	struct dp_soc *dp_soc = cdp_soc_t_to_dp_soc(soc);
1632 	struct dp_soc_be *be_soc = NULL;
1633 
1634 	be_soc = dp_get_be_soc_from_dp_soc(dp_soc);
1635 
1636 	/* enable only on one soc if MLD is disabled */
1637 	if (!be_soc->mlo_enabled || !be_soc->ml_ctxt) {
1638 		dp_enable_enhanced_stats(soc, pdev_id);
1639 		return QDF_STATUS_SUCCESS;
1640 	}
1641 
1642 	dp_mlo_iter_ptnr_soc(be_soc,
1643 			     dp_enable_enhanced_stats_for_each_pdev,
1644 			     NULL);
1645 	return QDF_STATUS_SUCCESS;
1646 }
1647 
1648 static void
dp_disable_enhanced_stats_for_each_pdev(struct dp_soc * soc,void * arg,int chip_id)1649 dp_disable_enhanced_stats_for_each_pdev(struct dp_soc *soc, void *arg,
1650 					int chip_id) {
1651 	uint8_t i = 0;
1652 
1653 	for (i = 0; i < MAX_PDEV_CNT; i++)
1654 		dp_disable_enhanced_stats(dp_soc_to_cdp_soc_t(soc), i);
1655 }
1656 
1657 QDF_STATUS
dp_disable_enhanced_stats_2_0(struct cdp_soc_t * soc,uint8_t pdev_id)1658 dp_disable_enhanced_stats_2_0(struct cdp_soc_t *soc, uint8_t pdev_id)
1659 {
1660 	struct dp_soc *dp_soc = cdp_soc_t_to_dp_soc(soc);
1661 	struct dp_soc_be *be_soc = NULL;
1662 
1663 	be_soc = dp_get_be_soc_from_dp_soc(dp_soc);
1664 
1665 	/* enable only on one soc if MLD is disabled */
1666 	if (!be_soc->mlo_enabled || !be_soc->ml_ctxt) {
1667 		dp_disable_enhanced_stats(soc, pdev_id);
1668 		return QDF_STATUS_SUCCESS;
1669 	}
1670 
1671 	dp_mlo_iter_ptnr_soc(be_soc,
1672 			     dp_disable_enhanced_stats_for_each_pdev,
1673 			     NULL);
1674 	return QDF_STATUS_SUCCESS;
1675 }
1676 #endif
1677 
1678 #ifdef WLAN_FEATURE_LOCAL_PKT_CAPTURE
dp_local_pkt_capture_tx_config(struct dp_pdev * pdev)1679 QDF_STATUS dp_local_pkt_capture_tx_config(struct dp_pdev *pdev)
1680 {
1681 	struct dp_soc *soc = pdev->soc;
1682 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1683 	uint16_t num_buffers;
1684 	QDF_STATUS status;
1685 
1686 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1687 	num_buffers = wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx);
1688 
1689 	status = dp_vdev_set_monitor_mode_buf_rings_tx_2_0(pdev, num_buffers);
1690 
1691 	if (QDF_IS_STATUS_ERROR(status))
1692 		dp_mon_err("Tx monitor buffer allocation failed");
1693 
1694 	return status;
1695 }
1696 #endif
1697