xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_internal.h (revision f3fcb9b56ef41dfe15c1ffbe6562d58a0c5525c2)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _DP_INTERNAL_H_
21 #define _DP_INTERNAL_H_
22 
23 #include "dp_types.h"
24 #include "dp_htt.h"
25 #include "dp_rx_tid.h"
26 
27 #define RX_BUFFER_SIZE_PKTLOG_LITE 1024
28 
29 #define DP_PEER_WDS_COUNT_INVALID UINT_MAX
30 
31 #define DP_BLOCKMEM_SIZE 4096
32 #define WBM2_SW_PPE_REL_RING_ID 6
33 #define WBM2_SW_PPE_REL_MAP_ID 11
34 #define DP_TX_PPEDS_POOL_ID 0xF
35 
36 /* Alignment for consistent memory for DP rings*/
37 #define DP_RING_BASE_ALIGN 32
38 
39 #define DP_RSSI_INVAL 0x80
40 #define DP_RSSI_AVG_WEIGHT 2
41 /*
42  * Formula to derive avg_rssi is taken from wifi2.o firmware
43  */
44 #define DP_GET_AVG_RSSI(avg_rssi, last_rssi) \
45 	(((avg_rssi) - (((uint8_t)(avg_rssi)) >> DP_RSSI_AVG_WEIGHT)) \
46 	+ ((((uint8_t)(last_rssi)) >> DP_RSSI_AVG_WEIGHT)))
47 
48 /* Macro For NYSM value received in VHT TLV */
49 #define VHT_SGI_NYSM 3
50 
51 #define INVALID_WBM_RING_NUM 0xF
52 
53 #ifdef FEATURE_DIRECT_LINK
54 #define DIRECT_LINK_REFILL_RING_ENTRIES 64
55 #ifdef IPA_OFFLOAD
56 #ifdef IPA_WDI3_VLAN_SUPPORT
57 #define DIRECT_LINK_REFILL_RING_IDX     4
58 #else
59 #define DIRECT_LINK_REFILL_RING_IDX     3
60 #endif
61 #else
62 #define DIRECT_LINK_REFILL_RING_IDX     2
63 #endif
64 #endif
65 
66 #define DP_MAX_VLAN_IDS 4096
67 #define DP_VLAN_UNTAGGED 0
68 #define DP_VLAN_TAGGED_MULTICAST 1
69 #define DP_VLAN_TAGGED_UNICAST 2
70 
71 /**
72  * struct htt_dbgfs_cfg - structure to maintain required htt data
73  * @msg_word: htt msg sent to upper layer
74  * @m: qdf debugfs file pointer
75  */
76 struct htt_dbgfs_cfg {
77 	uint32_t *msg_word;
78 	qdf_debugfs_file_t m;
79 };
80 
81 /* Cookie MSB bits assigned for different use case.
82  * Note: User can't use last 3 bits, as it is reserved for pdev_id.
83  * If in future number of pdev are more than 3.
84  */
85 /* Reserve for default case */
86 #define DBG_STATS_COOKIE_DEFAULT 0x0
87 
88 /* Reserve for DP Stats: 3rd bit */
89 #define DBG_STATS_COOKIE_DP_STATS BIT(3)
90 
91 /* Reserve for HTT Stats debugfs support: 4th bit */
92 #define DBG_STATS_COOKIE_HTT_DBGFS BIT(4)
93 
94 /*Reserve for HTT Stats debugfs support: 5th bit */
95 #define DBG_SYSFS_STATS_COOKIE BIT(5)
96 
97 /* Reserve for HTT Stats OBSS PD support: 6th bit */
98 #define DBG_STATS_COOKIE_HTT_OBSS BIT(6)
99 
100 /*
101  * Bitmap of HTT PPDU TLV types for Default mode
102  */
103 #define HTT_PPDU_DEFAULT_TLV_BITMAP \
104 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
105 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
106 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
107 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
108 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
109 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
110 
111 /* PPDU STATS CFG */
112 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
113 
114 /* PPDU stats mask sent to FW to enable enhanced stats */
115 #define DP_PPDU_STATS_CFG_ENH_STATS \
116 	(HTT_PPDU_DEFAULT_TLV_BITMAP) | \
117 	(1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \
118 	(1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \
119 	(1 << HTT_PPDU_STATS_USERS_INFO_TLV)
120 
121 /* PPDU stats mask sent to FW to support debug sniffer feature */
122 #define DP_PPDU_STATS_CFG_SNIFFER \
123 	(HTT_PPDU_DEFAULT_TLV_BITMAP) | \
124 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \
125 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV) | \
126 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
127 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \
128 	(1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \
129 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \
130 	(1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \
131 	(1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \
132 	(1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \
133 	(1 << HTT_PPDU_STATS_USERS_INFO_TLV)
134 
135 /* PPDU stats mask sent to FW to support BPR feature*/
136 #define DP_PPDU_STATS_CFG_BPR \
137 	(1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \
138 	(1 << HTT_PPDU_STATS_USERS_INFO_TLV)
139 
140 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
141 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
142 				   DP_PPDU_STATS_CFG_ENH_STATS)
143 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
144 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
145 				      DP_PPDU_TXLITE_STATS_BITMASK_CFG)
146 
147 /*
148  * Bitmap of HTT PPDU delayed ba TLV types for Default mode
149  */
150 #define HTT_PPDU_DELAYED_BA_TLV_BITMAP \
151 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
152 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
153 	(1 << HTT_PPDU_STATS_USR_RATE_TLV)
154 
155 /*
156  * Bitmap of HTT PPDU TLV types for Delayed BA
157  */
158 #define HTT_PPDU_STATUS_TLV_BITMAP \
159 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
160 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
161 
162 /*
163  * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 64
164  */
165 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64 \
166 	((1 << HTT_PPDU_STATS_COMMON_TLV) | \
167 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
168 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
169 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
170 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
171 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
172 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
173 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV))
174 
175 /*
176  * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 256
177  */
178 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256 \
179 	((1 << HTT_PPDU_STATS_COMMON_TLV) | \
180 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
181 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
182 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
183 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
184 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
185 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \
186 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV))
187 
188 static const enum cdp_packet_type hal_2_dp_pkt_type_map[HAL_DOT11_MAX] = {
189 	[HAL_DOT11A] = DOT11_A,
190 	[HAL_DOT11B] = DOT11_B,
191 	[HAL_DOT11N_MM] = DOT11_N,
192 	[HAL_DOT11AC] = DOT11_AC,
193 	[HAL_DOT11AX] = DOT11_AX,
194 	[HAL_DOT11BA] = DOT11_MAX,
195 #ifdef WLAN_FEATURE_11BE
196 	[HAL_DOT11BE] = DOT11_BE,
197 #else
198 	[HAL_DOT11BE] = DOT11_MAX,
199 #endif
200 	[HAL_DOT11AZ] = DOT11_MAX,
201 	[HAL_DOT11N_GF] = DOT11_MAX,
202 };
203 
204 #ifdef WLAN_FEATURE_11BE
205 /**
206  * dp_get_mcs_array_index_by_pkt_type_mcs() - get the destination mcs index
207  *					      in array
208  * @pkt_type: host SW pkt type
209  * @mcs: mcs value for TX/RX rate
210  *
211  * Return: succeeded - valid index in mcs array
212  *	   fail - same value as MCS_MAX
213  */
214 static inline uint8_t
215 dp_get_mcs_array_index_by_pkt_type_mcs(uint32_t pkt_type, uint32_t mcs)
216 {
217 	uint8_t dst_mcs_idx = MCS_INVALID_ARRAY_INDEX;
218 
219 	switch (pkt_type) {
220 	case DOT11_A:
221 		dst_mcs_idx =
222 			mcs >= MAX_MCS_11A ? (MAX_MCS - 1) : mcs;
223 		break;
224 	case DOT11_B:
225 		dst_mcs_idx =
226 			mcs >= MAX_MCS_11B ? (MAX_MCS - 1) : mcs;
227 		break;
228 	case DOT11_N:
229 		dst_mcs_idx =
230 			mcs >= MAX_MCS_11N ? (MAX_MCS - 1) : mcs;
231 		break;
232 	case DOT11_AC:
233 		dst_mcs_idx =
234 			mcs >= MAX_MCS_11AC ? (MAX_MCS - 1) : mcs;
235 		break;
236 	case DOT11_AX:
237 		dst_mcs_idx =
238 			mcs >= MAX_MCS_11AX ? (MAX_MCS - 1) : mcs;
239 		break;
240 	case DOT11_BE:
241 		dst_mcs_idx =
242 			mcs >= MAX_MCS_11BE ? (MAX_MCS - 1) : mcs;
243 		break;
244 	default:
245 		break;
246 	}
247 
248 	return dst_mcs_idx;
249 }
250 #else
251 static inline uint8_t
252 dp_get_mcs_array_index_by_pkt_type_mcs(uint32_t pkt_type, uint32_t mcs)
253 {
254 	uint8_t dst_mcs_idx = MCS_INVALID_ARRAY_INDEX;
255 
256 	switch (pkt_type) {
257 	case DOT11_A:
258 		dst_mcs_idx =
259 			mcs >= MAX_MCS_11A ? (MAX_MCS - 1) : mcs;
260 		break;
261 	case DOT11_B:
262 		dst_mcs_idx =
263 			mcs >= MAX_MCS_11B ? (MAX_MCS - 1) : mcs;
264 		break;
265 	case DOT11_N:
266 		dst_mcs_idx =
267 			mcs >= MAX_MCS_11N ? (MAX_MCS - 1) : mcs;
268 		break;
269 	case DOT11_AC:
270 		dst_mcs_idx =
271 			mcs >= MAX_MCS_11AC ? (MAX_MCS - 1) : mcs;
272 		break;
273 	case DOT11_AX:
274 		dst_mcs_idx =
275 			mcs >= MAX_MCS_11AX ? (MAX_MCS - 1) : mcs;
276 		break;
277 	default:
278 		break;
279 	}
280 
281 	return dst_mcs_idx;
282 }
283 #endif
284 
285 #ifdef WIFI_MONITOR_SUPPORT
286 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc);
287 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc);
288 #else
289 static inline
290 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc)
291 {
292 	return QDF_STATUS_SUCCESS;
293 }
294 
295 static inline
296 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc)
297 {
298 	return QDF_STATUS_SUCCESS;
299 }
300 #endif
301 
302 /**
303  * dp_rx_err_match_dhost() - function to check whether dest-mac is correct
304  * @eh: Ethernet header of incoming packet
305  * @vdev: dp_vdev object of the VAP on which this data packet is received
306  *
307  * Return: 1 if the destination mac is correct,
308  *         0 if this frame is not correctly destined to this VAP/MLD
309  */
310 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev);
311 
312 #ifdef MONITOR_MODULARIZED_ENABLE
313 static inline bool dp_monitor_modularized_enable(void)
314 {
315 	return TRUE;
316 }
317 
318 static inline QDF_STATUS
319 dp_mon_soc_attach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; }
320 
321 static inline QDF_STATUS
322 dp_mon_soc_detach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; }
323 #else
324 static inline bool dp_monitor_modularized_enable(void)
325 {
326 	return FALSE;
327 }
328 
329 static inline QDF_STATUS dp_mon_soc_attach_wrapper(struct dp_soc *soc)
330 {
331 	return dp_mon_soc_attach(soc);
332 }
333 
334 static inline QDF_STATUS dp_mon_soc_detach_wrapper(struct dp_soc *soc)
335 {
336 	return dp_mon_soc_detach(soc);
337 }
338 #endif
339 
340 #ifndef WIFI_MONITOR_SUPPORT
341 #define MON_BUF_MIN_ENTRIES 64
342 
343 static inline QDF_STATUS dp_monitor_pdev_attach(struct dp_pdev *pdev)
344 {
345 	return QDF_STATUS_SUCCESS;
346 }
347 
348 static inline QDF_STATUS dp_monitor_pdev_detach(struct dp_pdev *pdev)
349 {
350 	return QDF_STATUS_SUCCESS;
351 }
352 
353 static inline QDF_STATUS dp_monitor_vdev_attach(struct dp_vdev *vdev)
354 {
355 	return QDF_STATUS_E_FAILURE;
356 }
357 
358 static inline QDF_STATUS dp_monitor_vdev_detach(struct dp_vdev *vdev)
359 {
360 	return QDF_STATUS_E_FAILURE;
361 }
362 
363 static inline QDF_STATUS dp_monitor_peer_attach(struct dp_soc *soc,
364 						struct dp_peer *peer)
365 {
366 	return QDF_STATUS_SUCCESS;
367 }
368 
369 static inline QDF_STATUS dp_monitor_peer_detach(struct dp_soc *soc,
370 						struct dp_peer *peer)
371 {
372 	return QDF_STATUS_E_FAILURE;
373 }
374 
375 static inline struct cdp_peer_rate_stats_ctx*
376 dp_monitor_peer_get_peerstats_ctx(struct dp_soc *soc, struct dp_peer *peer)
377 {
378 	return NULL;
379 }
380 
381 static inline
382 void dp_monitor_peer_reset_stats(struct dp_soc *soc, struct dp_peer *peer)
383 {
384 }
385 
386 static inline
387 void dp_monitor_peer_get_stats(struct dp_soc *soc, struct dp_peer *peer,
388 			       void *arg, enum cdp_stat_update_type type)
389 {
390 }
391 
392 static inline
393 void dp_monitor_invalid_peer_update_pdev_stats(struct dp_soc *soc,
394 					       struct dp_pdev *pdev)
395 {
396 }
397 
398 static inline
399 QDF_STATUS dp_monitor_peer_get_stats_param(struct dp_soc *soc,
400 					   struct dp_peer *peer,
401 					   enum cdp_peer_stats_type type,
402 					   cdp_peer_stats_param_t *buf)
403 {
404 	return QDF_STATUS_E_FAILURE;
405 }
406 
407 static inline QDF_STATUS dp_monitor_pdev_init(struct dp_pdev *pdev)
408 {
409 	return QDF_STATUS_SUCCESS;
410 }
411 
412 static inline QDF_STATUS dp_monitor_pdev_deinit(struct dp_pdev *pdev)
413 {
414 	return QDF_STATUS_SUCCESS;
415 }
416 
417 static inline QDF_STATUS dp_monitor_soc_cfg_init(struct dp_soc *soc)
418 {
419 	return QDF_STATUS_SUCCESS;
420 }
421 
422 static inline QDF_STATUS dp_monitor_config_debug_sniffer(struct dp_pdev *pdev,
423 							 int val)
424 {
425 	return QDF_STATUS_E_FAILURE;
426 }
427 
428 static inline void dp_monitor_flush_rings(struct dp_soc *soc)
429 {
430 }
431 
432 static inline QDF_STATUS dp_monitor_htt_srng_setup(struct dp_soc *soc,
433 						   struct dp_pdev *pdev,
434 						   int mac_id,
435 						   int mac_for_pdev)
436 {
437 	return QDF_STATUS_SUCCESS;
438 }
439 
440 static inline void dp_monitor_service_mon_rings(struct dp_soc *soc,
441 						uint32_t quota)
442 {
443 }
444 
445 static inline
446 uint32_t dp_monitor_process(struct dp_soc *soc, struct dp_intr *int_ctx,
447 			    uint32_t mac_id, uint32_t quota)
448 {
449 	return 0;
450 }
451 
452 static inline
453 uint32_t dp_monitor_drop_packets_for_mac(struct dp_pdev *pdev,
454 					 uint32_t mac_id, uint32_t quota)
455 {
456 	return 0;
457 }
458 
459 static inline void dp_monitor_peer_tx_init(struct dp_pdev *pdev,
460 					   struct dp_peer *peer)
461 {
462 }
463 
464 static inline void dp_monitor_peer_tx_cleanup(struct dp_vdev *vdev,
465 					      struct dp_peer *peer)
466 {
467 }
468 
469 static inline
470 void dp_monitor_peer_tid_peer_id_update(struct dp_soc *soc,
471 					struct dp_peer *peer,
472 					uint16_t peer_id)
473 {
474 }
475 
476 static inline void dp_monitor_tx_ppdu_stats_attach(struct dp_pdev *pdev)
477 {
478 }
479 
480 static inline void dp_monitor_tx_ppdu_stats_detach(struct dp_pdev *pdev)
481 {
482 }
483 
484 static inline
485 QDF_STATUS dp_monitor_tx_capture_debugfs_init(struct dp_pdev *pdev)
486 {
487 	return QDF_STATUS_SUCCESS;
488 }
489 
490 static inline void dp_monitor_peer_tx_capture_filter_check(struct dp_pdev *pdev,
491 							   struct dp_peer *peer)
492 {
493 }
494 
495 static inline
496 QDF_STATUS dp_monitor_tx_add_to_comp_queue(struct dp_soc *soc,
497 					   struct dp_tx_desc_s *desc,
498 					   struct hal_tx_completion_status *ts,
499 					   uint16_t peer_id)
500 {
501 	return QDF_STATUS_E_FAILURE;
502 }
503 
504 static inline
505 QDF_STATUS monitor_update_msdu_to_list(struct dp_soc *soc,
506 				       struct dp_pdev *pdev,
507 				       struct dp_peer *peer,
508 				       struct hal_tx_completion_status *ts,
509 				       qdf_nbuf_t netbuf)
510 {
511 	return QDF_STATUS_E_FAILURE;
512 }
513 
514 static inline bool dp_monitor_ppdu_stats_ind_handler(struct htt_soc *soc,
515 						     uint32_t *msg_word,
516 						     qdf_nbuf_t htt_t2h_msg)
517 {
518 	return true;
519 }
520 
521 static inline QDF_STATUS dp_monitor_htt_ppdu_stats_attach(struct dp_pdev *pdev)
522 {
523 	return QDF_STATUS_SUCCESS;
524 }
525 
526 static inline void dp_monitor_htt_ppdu_stats_detach(struct dp_pdev *pdev)
527 {
528 }
529 
530 static inline void dp_monitor_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
531 {
532 }
533 
534 static inline QDF_STATUS dp_monitor_config_enh_tx_capture(struct dp_pdev *pdev,
535 							  uint32_t val)
536 {
537 	return QDF_STATUS_E_INVAL;
538 }
539 
540 static inline QDF_STATUS dp_monitor_tx_peer_filter(struct dp_pdev *pdev,
541 						   struct dp_peer *peer,
542 						   uint8_t is_tx_pkt_cap_enable,
543 						   uint8_t *peer_mac)
544 {
545 	return QDF_STATUS_E_INVAL;
546 }
547 
548 static inline QDF_STATUS dp_monitor_config_enh_rx_capture(struct dp_pdev *pdev,
549 							  uint32_t val)
550 {
551 	return QDF_STATUS_E_INVAL;
552 }
553 
554 static inline
555 QDF_STATUS dp_monitor_set_bpr_enable(struct dp_pdev *pdev, uint32_t val)
556 {
557 	return QDF_STATUS_E_FAILURE;
558 }
559 
560 static inline
561 int dp_monitor_set_filter_neigh_peers(struct dp_pdev *pdev, bool val)
562 {
563 	return 0;
564 }
565 
566 static inline
567 void dp_monitor_set_atf_stats_enable(struct dp_pdev *pdev, bool value)
568 {
569 }
570 
571 static inline
572 void dp_monitor_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor)
573 {
574 }
575 
576 static inline
577 bool dp_monitor_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle)
578 {
579 	return false;
580 }
581 
582 static inline
583 bool dp_monitor_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle)
584 {
585 	return false;
586 }
587 
588 static inline
589 bool dp_monitor_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle)
590 {
591 	return false;
592 }
593 
594 static inline
595 int dp_monitor_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
596 				bool enable)
597 {
598 	return 0;
599 }
600 
601 static inline void dp_monitor_pktlogmod_exit(struct dp_pdev *pdev)
602 {
603 }
604 
605 static inline
606 QDF_STATUS dp_monitor_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev)
607 {
608 	return QDF_STATUS_E_FAILURE;
609 }
610 
611 static inline
612 void dp_monitor_neighbour_peers_detach(struct dp_pdev *pdev)
613 {
614 }
615 
616 static inline QDF_STATUS dp_monitor_filter_neighbour_peer(struct dp_pdev *pdev,
617 							  uint8_t *rx_pkt_hdr)
618 {
619 	return QDF_STATUS_E_FAILURE;
620 }
621 
622 static inline void dp_monitor_print_pdev_tx_capture_stats(struct dp_pdev *pdev)
623 {
624 }
625 
626 static inline
627 void dp_monitor_reap_timer_init(struct dp_soc *soc)
628 {
629 }
630 
631 static inline
632 void dp_monitor_reap_timer_deinit(struct dp_soc *soc)
633 {
634 }
635 
636 static inline
637 bool dp_monitor_reap_timer_start(struct dp_soc *soc,
638 				 enum cdp_mon_reap_source source)
639 {
640 	return false;
641 }
642 
643 static inline
644 bool dp_monitor_reap_timer_stop(struct dp_soc *soc,
645 				enum cdp_mon_reap_source source)
646 {
647 	return false;
648 }
649 
650 static inline void
651 dp_monitor_reap_timer_suspend(struct dp_soc *soc)
652 {
653 }
654 
655 static inline
656 void dp_monitor_vdev_timer_init(struct dp_soc *soc)
657 {
658 }
659 
660 static inline
661 void dp_monitor_vdev_timer_deinit(struct dp_soc *soc)
662 {
663 }
664 
665 static inline
666 void dp_monitor_vdev_timer_start(struct dp_soc *soc)
667 {
668 }
669 
670 static inline
671 bool dp_monitor_vdev_timer_stop(struct dp_soc *soc)
672 {
673 	return false;
674 }
675 
676 static inline struct qdf_mem_multi_page_t*
677 dp_monitor_get_link_desc_pages(struct dp_soc *soc, uint32_t mac_id)
678 {
679 	return NULL;
680 }
681 
682 static inline struct dp_srng*
683 dp_monitor_get_link_desc_ring(struct dp_soc *soc, uint32_t mac_id)
684 {
685 	return NULL;
686 }
687 
688 static inline uint32_t
689 dp_monitor_get_num_link_desc_ring_entries(struct dp_soc *soc)
690 {
691 	return 0;
692 }
693 
694 static inline uint32_t *
695 dp_monitor_get_total_link_descs(struct dp_soc *soc, uint32_t mac_id)
696 {
697 	return NULL;
698 }
699 
700 static inline QDF_STATUS dp_monitor_drop_inv_peer_pkts(struct dp_vdev *vdev)
701 {
702 	return QDF_STATUS_E_FAILURE;
703 }
704 
705 static inline bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev)
706 {
707 	return false;
708 }
709 
710 static inline void dp_monitor_vdev_register_osif(struct dp_vdev *vdev,
711 						 struct ol_txrx_ops *txrx_ops)
712 {
713 }
714 
715 static inline bool dp_monitor_is_vdev_timer_running(struct dp_soc *soc)
716 {
717 	return false;
718 }
719 
720 static inline
721 void dp_monitor_pdev_set_mon_vdev(struct dp_vdev *vdev)
722 {
723 }
724 
725 static inline void dp_monitor_vdev_delete(struct dp_soc *soc,
726 					  struct dp_vdev *vdev)
727 {
728 }
729 
730 static inline void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
731 {
732 }
733 
734 static inline void dp_monitor_neighbour_peer_add_ast(struct dp_pdev *pdev,
735 						     struct dp_peer *ta_peer,
736 						     uint8_t *mac_addr,
737 						     qdf_nbuf_t nbuf,
738 						     uint32_t flags)
739 {
740 }
741 
742 static inline void
743 dp_monitor_set_chan_band(struct dp_pdev *pdev, enum reg_wifi_band chan_band)
744 {
745 }
746 
747 static inline void
748 dp_monitor_set_chan_freq(struct dp_pdev *pdev, qdf_freq_t chan_freq)
749 {
750 }
751 
752 static inline void dp_monitor_set_chan_num(struct dp_pdev *pdev, int chan_num)
753 {
754 }
755 
756 static inline bool dp_monitor_is_enable_mcopy_mode(struct dp_pdev *pdev)
757 {
758 	return false;
759 }
760 
761 static inline
762 void dp_monitor_neighbour_peer_list_remove(struct dp_pdev *pdev,
763 					   struct dp_vdev *vdev,
764 					   struct dp_neighbour_peer *peer)
765 {
766 }
767 
768 static inline bool dp_monitor_is_chan_band_known(struct dp_pdev *pdev)
769 {
770 	return false;
771 }
772 
773 static inline enum reg_wifi_band
774 dp_monitor_get_chan_band(struct dp_pdev *pdev)
775 {
776 	return 0;
777 }
778 
779 static inline int
780 dp_monitor_get_chan_num(struct dp_pdev *pdev)
781 {
782 	return 0;
783 }
784 
785 static inline qdf_freq_t
786 dp_monitor_get_chan_freq(struct dp_pdev *pdev)
787 {
788 	return 0;
789 }
790 
791 static inline void dp_monitor_get_mpdu_status(struct dp_pdev *pdev,
792 					      struct dp_soc *soc,
793 					      uint8_t *rx_tlv_hdr)
794 {
795 }
796 
797 static inline void dp_monitor_print_tx_stats(struct dp_pdev *pdev)
798 {
799 }
800 
801 static inline
802 QDF_STATUS dp_monitor_mcopy_check_deliver(struct dp_pdev *pdev,
803 					  uint16_t peer_id, uint32_t ppdu_id,
804 					  uint8_t first_msdu)
805 {
806 	return QDF_STATUS_SUCCESS;
807 }
808 
809 static inline bool dp_monitor_is_enable_tx_sniffer(struct dp_pdev *pdev)
810 {
811 	return false;
812 }
813 
814 static inline struct dp_vdev*
815 dp_monitor_get_monitor_vdev_from_pdev(struct dp_pdev *pdev)
816 {
817 	return NULL;
818 }
819 
820 static inline QDF_STATUS dp_monitor_check_com_info_ppdu_id(struct dp_pdev *pdev,
821 							   void *rx_desc)
822 {
823 	return QDF_STATUS_E_FAILURE;
824 }
825 
826 static inline struct mon_rx_status*
827 dp_monitor_get_rx_status(struct dp_pdev *pdev)
828 {
829 	return NULL;
830 }
831 
832 static inline
833 void dp_monitor_pdev_config_scan_spcl_vap(struct dp_pdev *pdev, bool val)
834 {
835 }
836 
837 static inline
838 void dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(struct dp_pdev *pdev,
839 						      bool val)
840 {
841 }
842 
843 static inline QDF_STATUS
844 dp_monitor_peer_tx_capture_get_stats(struct dp_soc *soc, struct dp_peer *peer,
845 				     struct cdp_peer_tx_capture_stats *stats)
846 {
847 	return QDF_STATUS_E_FAILURE;
848 }
849 
850 static inline QDF_STATUS
851 dp_monitor_pdev_tx_capture_get_stats(struct dp_soc *soc, struct dp_pdev *pdev,
852 				     struct cdp_pdev_tx_capture_stats *stats)
853 {
854 	return QDF_STATUS_E_FAILURE;
855 }
856 
857 #ifdef DP_POWER_SAVE
858 static inline
859 void dp_monitor_pktlog_reap_pending_frames(struct dp_pdev *pdev)
860 {
861 }
862 
863 static inline
864 void dp_monitor_pktlog_start_reap_timer(struct dp_pdev *pdev)
865 {
866 }
867 #endif
868 
869 static inline bool dp_monitor_is_configured(struct dp_pdev *pdev)
870 {
871 	return false;
872 }
873 
874 static inline void
875 dp_mon_rx_hdr_length_set(struct dp_soc *soc, uint32_t *msg_word,
876 			 struct htt_rx_ring_tlv_filter *tlv_filter)
877 {
878 }
879 
880 static inline void dp_monitor_soc_init(struct dp_soc *soc)
881 {
882 }
883 
884 static inline void dp_monitor_soc_deinit(struct dp_soc *soc)
885 {
886 }
887 
888 static inline
889 QDF_STATUS dp_monitor_config_undecoded_metadata_capture(struct dp_pdev *pdev,
890 							int val)
891 {
892 	return QDF_STATUS_SUCCESS;
893 }
894 
895 static inline QDF_STATUS
896 dp_monitor_config_undecoded_metadata_phyrx_error_mask(struct dp_pdev *pdev,
897 						      int mask1, int mask2)
898 {
899 	return QDF_STATUS_SUCCESS;
900 }
901 
902 static inline QDF_STATUS
903 dp_monitor_get_undecoded_metadata_phyrx_error_mask(struct dp_pdev *pdev,
904 						   int *mask, int *mask_cont)
905 {
906 	return QDF_STATUS_SUCCESS;
907 }
908 
909 static inline QDF_STATUS dp_monitor_soc_htt_srng_setup(struct dp_soc *soc)
910 {
911 	return QDF_STATUS_E_FAILURE;
912 }
913 
914 static inline bool dp_is_monitor_mode_using_poll(struct dp_soc *soc)
915 {
916 	return false;
917 }
918 
919 static inline
920 uint32_t dp_tx_mon_buf_refill(struct dp_intr *int_ctx)
921 {
922 	return 0;
923 }
924 
925 static inline uint32_t
926 dp_tx_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx,
927 		  uint32_t mac_id, uint32_t quota)
928 {
929 	return 0;
930 }
931 
932 static inline uint32_t
933 dp_print_txmon_ring_stat_from_hal(struct dp_pdev *pdev)
934 {
935 	return 0;
936 }
937 
938 static inline
939 uint32_t dp_rx_mon_buf_refill(struct dp_intr *int_ctx)
940 {
941 	return 0;
942 }
943 
944 static inline bool dp_monitor_is_tx_cap_enabled(struct dp_peer *peer)
945 {
946 	return 0;
947 }
948 
949 static inline bool dp_monitor_is_rx_cap_enabled(struct dp_peer *peer)
950 {
951 	return 0;
952 }
953 
954 static inline void
955 dp_rx_mon_enable(struct dp_soc *soc, uint32_t *msg_word,
956 		 struct htt_rx_ring_tlv_filter *tlv_filter)
957 {
958 }
959 
960 static inline void
961 dp_mon_rx_packet_length_set(struct dp_soc *soc, uint32_t *msg_word,
962 			    struct htt_rx_ring_tlv_filter *tlv_filter)
963 {
964 }
965 
966 static inline void
967 dp_mon_rx_enable_mpdu_logging(struct dp_soc *soc, uint32_t *msg_word,
968 			      struct htt_rx_ring_tlv_filter *tlv_filter)
969 {
970 }
971 
972 static inline void
973 dp_mon_rx_wmask_subscribe(struct dp_soc *soc, uint32_t *msg_word,
974 			  struct htt_rx_ring_tlv_filter *tlv_filter)
975 {
976 }
977 
978 static inline void
979 dp_mon_rx_mac_filter_set(struct dp_soc *soc, uint32_t *msg_word,
980 			 struct htt_rx_ring_tlv_filter *tlv_filter)
981 {
982 }
983 
984 static inline void
985 dp_mon_rx_enable_pkt_tlv_offset(struct dp_soc *soc, uint32_t *msg_word,
986 				struct htt_rx_ring_tlv_filter *tlv_filter)
987 {
988 }
989 
990 static inline void
991 dp_mon_rx_enable_fpmo(struct dp_soc *soc, uint32_t *msg_word,
992 		      struct htt_rx_ring_tlv_filter *tlv_filter)
993 {
994 }
995 
996 #ifdef WLAN_CONFIG_TELEMETRY_AGENT
997 static inline
998 void dp_monitor_peer_telemetry_stats(struct dp_peer *peer,
999 				     struct cdp_peer_telemetry_stats *stats)
1000 {
1001 }
1002 
1003 static inline
1004 void dp_monitor_peer_deter_stats(struct dp_peer *peer,
1005 				 struct cdp_peer_telemetry_stats *stats)
1006 {
1007 }
1008 #endif /* WLAN_CONFIG_TELEMETRY_AGENT */
1009 #endif /* !WIFI_MONITOR_SUPPORT */
1010 
1011 /**
1012  * cdp_soc_t_to_dp_soc() - typecast cdp_soc_t to
1013  * dp soc handle
1014  * @psoc: CDP psoc handle
1015  *
1016  * Return: struct dp_soc pointer
1017  */
1018 static inline
1019 struct dp_soc *cdp_soc_t_to_dp_soc(struct cdp_soc_t *psoc)
1020 {
1021 	return (struct dp_soc *)psoc;
1022 }
1023 
1024 #define DP_MAX_TIMER_EXEC_TIME_TICKS \
1025 		(QDF_LOG_TIMESTAMP_CYCLES_PER_10_US * 100 * 20)
1026 
1027 /**
1028  * enum timer_yield_status - yield status code used in monitor mode timer.
1029  * @DP_TIMER_NO_YIELD: do not yield
1030  * @DP_TIMER_WORK_DONE: yield because work is done
1031  * @DP_TIMER_WORK_EXHAUST: yield because work quota is exhausted
1032  * @DP_TIMER_TIME_EXHAUST: yield due to time slot exhausted
1033  */
1034 enum timer_yield_status {
1035 	DP_TIMER_NO_YIELD,
1036 	DP_TIMER_WORK_DONE,
1037 	DP_TIMER_WORK_EXHAUST,
1038 	DP_TIMER_TIME_EXHAUST,
1039 };
1040 
1041 #if DP_PRINT_ENABLE
1042 #include <qdf_types.h> /* qdf_vprint */
1043 #include <cdp_txrx_handle.h>
1044 
1045 enum {
1046 	/* FATAL_ERR - print only irrecoverable error messages */
1047 	DP_PRINT_LEVEL_FATAL_ERR,
1048 
1049 	/* ERR - include non-fatal err messages */
1050 	DP_PRINT_LEVEL_ERR,
1051 
1052 	/* WARN - include warnings */
1053 	DP_PRINT_LEVEL_WARN,
1054 
1055 	/* INFO1 - include fundamental, infrequent events */
1056 	DP_PRINT_LEVEL_INFO1,
1057 
1058 	/* INFO2 - include non-fundamental but infrequent events */
1059 	DP_PRINT_LEVEL_INFO2,
1060 };
1061 
1062 #define dp_print(level, fmt, ...) do { \
1063 	if (level <= g_txrx_print_level) \
1064 		qdf_print(fmt, ## __VA_ARGS__); \
1065 while (0)
1066 #define DP_PRINT(level, fmt, ...) do { \
1067 	dp_print(level, "DP: " fmt, ## __VA_ARGS__); \
1068 while (0)
1069 #else
1070 #define DP_PRINT(level, fmt, ...)
1071 #endif /* DP_PRINT_ENABLE */
1072 
1073 #define DP_TRACE(LVL, fmt, args ...)                             \
1074 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL,       \
1075 		fmt, ## args)
1076 
1077 #ifdef WLAN_SYSFS_DP_STATS
1078 void DP_PRINT_STATS(const char *fmt, ...);
1079 #else /* WLAN_SYSFS_DP_STATS */
1080 #ifdef DP_PRINT_NO_CONSOLE
1081 /* Stat prints should not go to console or kernel logs.*/
1082 #define DP_PRINT_STATS(fmt, args ...)\
1083 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,       \
1084 		  fmt, ## args)
1085 #else
1086 #define DP_PRINT_STATS(fmt, args ...)\
1087 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,\
1088 		  fmt, ## args)
1089 #endif
1090 #endif /* WLAN_SYSFS_DP_STATS */
1091 
1092 #define DP_STATS_INIT(_handle) \
1093 	qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats))
1094 
1095 #define DP_TXRX_PEER_STATS_INIT(_handle, size) \
1096 	qdf_mem_zero(&((_handle)->stats[0]), size)
1097 
1098 #define DP_STATS_CLR(_handle) \
1099 	qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats))
1100 
1101 #define DP_TXRX_PEER_STATS_CLR(_handle, size) \
1102 	qdf_mem_zero(&((_handle)->stats[0]), size)
1103 
1104 #ifndef DISABLE_DP_STATS
1105 #define DP_STATS_INC(_handle, _field, _delta) \
1106 { \
1107 	if (likely(_handle)) \
1108 		_handle->stats._field += _delta; \
1109 }
1110 
1111 #define DP_PEER_LINK_STATS_INC(_handle, _field, _delta, _link) \
1112 { \
1113 	if (likely(_handle)) \
1114 		_handle->stats[_link]._field += _delta; \
1115 }
1116 
1117 #define DP_PEER_STATS_FLAT_INC(_handle, _field, _delta) \
1118 { \
1119 	if (likely(_handle)) \
1120 		_handle->_field += _delta; \
1121 }
1122 
1123 #define DP_STATS_INCC(_handle, _field, _delta, _cond) \
1124 { \
1125 	if (_cond && likely(_handle)) \
1126 		_handle->stats._field += _delta; \
1127 }
1128 
1129 #define DP_PEER_LINK_STATS_INCC(_handle, _field, _delta, _cond, _link) \
1130 { \
1131 	if (_cond && likely(_handle)) \
1132 		_handle->stats[_link]._field += _delta; \
1133 }
1134 
1135 #define DP_STATS_DEC(_handle, _field, _delta) \
1136 { \
1137 	if (likely(_handle)) \
1138 		_handle->stats._field -= _delta; \
1139 }
1140 
1141 #define DP_PEER_STATS_FLAT_DEC(_handle, _field, _delta) \
1142 { \
1143 	if (likely(_handle)) \
1144 		_handle->_field -= _delta; \
1145 }
1146 
1147 #define DP_STATS_UPD(_handle, _field, _delta) \
1148 { \
1149 	if (likely(_handle)) \
1150 		_handle->stats._field = _delta; \
1151 }
1152 
1153 #define DP_PEER_LINK_STATS_UPD(_handle, _field, _delta, _link) \
1154 { \
1155 	if (likely(_handle)) \
1156 		_handle->stats[_link]._field = _delta; \
1157 }
1158 
1159 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) \
1160 { \
1161 	DP_STATS_INC(_handle, _field.num, _count); \
1162 	DP_STATS_INC(_handle, _field.bytes, _bytes) \
1163 }
1164 
1165 #define DP_PEER_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) \
1166 { \
1167 	DP_PEER_STATS_FLAT_INC(_handle, _field.num, _count); \
1168 	DP_PEER_STATS_FLAT_INC(_handle, _field.bytes, _bytes) \
1169 }
1170 
1171 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \
1172 { \
1173 	DP_STATS_INCC(_handle, _field.num, _count, _cond); \
1174 	DP_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \
1175 }
1176 
1177 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) \
1178 { \
1179 	_handle_a->stats._field += _handle_b->stats._field; \
1180 }
1181 
1182 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) \
1183 { \
1184 	DP_STATS_AGGR(_handle_a, _handle_b, _field.num); \
1185 	DP_STATS_AGGR(_handle_a, _handle_b, _field.bytes);\
1186 }
1187 
1188 #define DP_STATS_UPD_STRUCT(_handle_a, _handle_b, _field) \
1189 { \
1190 	_handle_a->stats._field = _handle_b->stats._field; \
1191 }
1192 
1193 #else
1194 #define DP_STATS_INC(_handle, _field, _delta)
1195 #define DP_PEER_LINK_STATS_INC(_handle, _field, _delta, _link)
1196 #define DP_PEER_STATS_FLAT_INC(_handle, _field, _delta)
1197 #define DP_STATS_INCC(_handle, _field, _delta, _cond)
1198 #define DP_PEER_LINK_STATS_INCC(_handle, _field, _delta, _cond, _link)
1199 #define DP_STATS_DEC(_handle, _field, _delta)
1200 #define DP_PEER_STATS_FLAT_DEC(_handle, _field, _delta)
1201 #define DP_STATS_UPD(_handle, _field, _delta)
1202 #define DP_PEER_LINK_STATS_UPD(_handle, _field, _delta, _link)
1203 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes)
1204 #define DP_PEER_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes)
1205 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond)
1206 #define DP_STATS_AGGR(_handle_a, _handle_b, _field)
1207 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field)
1208 #endif
1209 
1210 #define DP_PEER_PER_PKT_STATS_INC(_handle, _field, _delta, _link) \
1211 { \
1212 	DP_PEER_LINK_STATS_INC(_handle, per_pkt_stats._field, _delta, _link); \
1213 }
1214 
1215 #define DP_PEER_PER_PKT_STATS_INCC(_handle, _field, _delta, _cond, _link) \
1216 { \
1217 	DP_PEER_LINK_STATS_INCC(_handle, per_pkt_stats._field, _delta, _cond, _link); \
1218 }
1219 
1220 #define DP_PEER_PER_PKT_STATS_INC_PKT(_handle, _field, _count, _bytes, _link) \
1221 { \
1222 	DP_PEER_PER_PKT_STATS_INC(_handle, _field.num, _count, _link); \
1223 	DP_PEER_PER_PKT_STATS_INC(_handle, _field.bytes, _bytes, _link) \
1224 }
1225 
1226 #define DP_PEER_PER_PKT_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond, _link) \
1227 { \
1228 	DP_PEER_PER_PKT_STATS_INCC(_handle, _field.num, _count, _cond, _link); \
1229 	DP_PEER_PER_PKT_STATS_INCC(_handle, _field.bytes, _bytes, _cond, _link) \
1230 }
1231 
1232 #define DP_PEER_PER_PKT_STATS_UPD(_handle, _field, _delta, _link) \
1233 { \
1234 	DP_PEER_LINK_STATS_UPD(_handle, per_pkt_stats._field, _delta, _link); \
1235 }
1236 
1237 #ifndef QCA_ENHANCED_STATS_SUPPORT
1238 #define DP_PEER_EXTD_STATS_INC(_handle, _field, _delta, _link) \
1239 { \
1240 	DP_PEER_LINK_STATS_INC(_handle, extd_stats._field, _delta, _link); \
1241 }
1242 
1243 #define DP_PEER_EXTD_STATS_INCC(_handle, _field, _delta, _cond, _link) \
1244 { \
1245 	DP_PEER_LINK_STATS_INCC(_handle, extd_stats._field, _delta, _cond, _link); \
1246 }
1247 
1248 #define DP_PEER_EXTD_STATS_UPD(_handle, _field, _delta, _link) \
1249 { \
1250 	DP_PEER_LINK_STATS_UPD(_handle, extd_stats._field, _delta, _link); \
1251 }
1252 #endif
1253 
1254 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \
1255 	defined(QCA_ENHANCED_STATS_SUPPORT)
1256 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \
1257 { \
1258 	if (_cond || !(_handle->hw_txrx_stats_en)) \
1259 		DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \
1260 }
1261 
1262 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \
1263 { \
1264 	if (_cond || !(_handle->hw_txrx_stats_en)) \
1265 		DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); \
1266 }
1267 
1268 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \
1269 { \
1270 	if (_cond || !(_handle->hw_txrx_stats_en)) \
1271 		DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes, _link); \
1272 }
1273 
1274 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \
1275 { \
1276 	if (_cond || !(_handle->hw_txrx_stats_en)) \
1277 		DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes, _link); \
1278 }
1279 
1280 #define DP_PEER_UC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \
1281 { \
1282 	if (_cond || !(_handle->hw_txrx_stats_en)) \
1283 		DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.unicast, _count, _bytes, _link); \
1284 }
1285 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
1286 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \
1287 { \
1288 	if (!(_handle->hw_txrx_stats_en)) \
1289 		DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \
1290 }
1291 
1292 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \
1293 { \
1294 	if (!(_handle->hw_txrx_stats_en)) \
1295 		DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); \
1296 }
1297 
1298 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \
1299 { \
1300 	if (!(_handle->hw_txrx_stats_en)) \
1301 		DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes, _link); \
1302 }
1303 
1304 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \
1305 { \
1306 	if (!(_handle->hw_txrx_stats_en)) \
1307 		DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes, _link); \
1308 }
1309 
1310 #define DP_PEER_UC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \
1311 { \
1312 	if (!(_handle->hw_txrx_stats_en)) \
1313 		DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.unicast, _count, _bytes, _link); \
1314 }
1315 #else
1316 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \
1317 	DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes);
1318 
1319 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \
1320 	DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count);
1321 
1322 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \
1323 	DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes, _link);
1324 
1325 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \
1326 	DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes, _link);
1327 
1328 #define DP_PEER_UC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \
1329 	DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.unicast, _count, _bytes, _link);
1330 #endif
1331 
1332 #ifdef ENABLE_DP_HIST_STATS
1333 #define DP_HIST_INIT() \
1334 	uint32_t num_of_packets[MAX_PDEV_CNT] = {0};
1335 
1336 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) \
1337 { \
1338 		++num_of_packets[_pdev_id]; \
1339 }
1340 
1341 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \
1342 	do {                                                              \
1343 		if (_p_cntrs == 1) {                                      \
1344 			DP_STATS_INC(_pdev,                               \
1345 				tx_comp_histogram.pkts_1, 1);             \
1346 		} else if (_p_cntrs > 1 && _p_cntrs <= 20) {              \
1347 			DP_STATS_INC(_pdev,                               \
1348 				tx_comp_histogram.pkts_2_20, 1);          \
1349 		} else if (_p_cntrs > 20 && _p_cntrs <= 40) {             \
1350 			DP_STATS_INC(_pdev,                               \
1351 				tx_comp_histogram.pkts_21_40, 1);         \
1352 		} else if (_p_cntrs > 40 && _p_cntrs <= 60) {             \
1353 			DP_STATS_INC(_pdev,                               \
1354 				tx_comp_histogram.pkts_41_60, 1);         \
1355 		} else if (_p_cntrs > 60 && _p_cntrs <= 80) {             \
1356 			DP_STATS_INC(_pdev,                               \
1357 				tx_comp_histogram.pkts_61_80, 1);         \
1358 		} else if (_p_cntrs > 80 && _p_cntrs <= 100) {            \
1359 			DP_STATS_INC(_pdev,                               \
1360 				tx_comp_histogram.pkts_81_100, 1);        \
1361 		} else if (_p_cntrs > 100 && _p_cntrs <= 200) {           \
1362 			DP_STATS_INC(_pdev,                               \
1363 				tx_comp_histogram.pkts_101_200, 1);       \
1364 		} else if (_p_cntrs > 200) {                              \
1365 			DP_STATS_INC(_pdev,                               \
1366 				tx_comp_histogram.pkts_201_plus, 1);      \
1367 		}                                                         \
1368 	} while (0)
1369 
1370 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \
1371 	do {                                                              \
1372 		if (_p_cntrs == 1) {                                      \
1373 			DP_STATS_INC(_pdev,                               \
1374 				rx_ind_histogram.pkts_1, 1);              \
1375 		} else if (_p_cntrs > 1 && _p_cntrs <= 20) {              \
1376 			DP_STATS_INC(_pdev,                               \
1377 				rx_ind_histogram.pkts_2_20, 1);           \
1378 		} else if (_p_cntrs > 20 && _p_cntrs <= 40) {             \
1379 			DP_STATS_INC(_pdev,                               \
1380 				rx_ind_histogram.pkts_21_40, 1);          \
1381 		} else if (_p_cntrs > 40 && _p_cntrs <= 60) {             \
1382 			DP_STATS_INC(_pdev,                               \
1383 				rx_ind_histogram.pkts_41_60, 1);          \
1384 		} else if (_p_cntrs > 60 && _p_cntrs <= 80) {             \
1385 			DP_STATS_INC(_pdev,                               \
1386 				rx_ind_histogram.pkts_61_80, 1);          \
1387 		} else if (_p_cntrs > 80 && _p_cntrs <= 100) {            \
1388 			DP_STATS_INC(_pdev,                               \
1389 				rx_ind_histogram.pkts_81_100, 1);         \
1390 		} else if (_p_cntrs > 100 && _p_cntrs <= 200) {           \
1391 			DP_STATS_INC(_pdev,                               \
1392 				rx_ind_histogram.pkts_101_200, 1);        \
1393 		} else if (_p_cntrs > 200) {                              \
1394 			DP_STATS_INC(_pdev,                               \
1395 				rx_ind_histogram.pkts_201_plus, 1);       \
1396 		}                                                         \
1397 	} while (0)
1398 
1399 #define DP_TX_HIST_STATS_PER_PDEV() \
1400 	do { \
1401 		uint8_t hist_stats = 0; \
1402 		for (hist_stats = 0; hist_stats < soc->pdev_count; \
1403 				hist_stats++) { \
1404 			DP_TX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \
1405 					num_of_packets[hist_stats]); \
1406 		} \
1407 	}  while (0)
1408 
1409 
1410 #define DP_RX_HIST_STATS_PER_PDEV() \
1411 	do { \
1412 		uint8_t hist_stats = 0; \
1413 		for (hist_stats = 0; hist_stats < soc->pdev_count; \
1414 				hist_stats++) { \
1415 			DP_RX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \
1416 					num_of_packets[hist_stats]); \
1417 		} \
1418 	}  while (0)
1419 
1420 #else
1421 #define DP_HIST_INIT()
1422 #define DP_HIST_PACKET_COUNT_INC(_pdev_id)
1423 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs)
1424 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs)
1425 #define DP_RX_HIST_STATS_PER_PDEV()
1426 #define DP_TX_HIST_STATS_PER_PDEV()
1427 #endif /* DISABLE_DP_STATS */
1428 
1429 #define FRAME_MASK_IPV4_ARP   1
1430 #define FRAME_MASK_IPV4_DHCP  2
1431 #define FRAME_MASK_IPV4_EAPOL 4
1432 #define FRAME_MASK_IPV6_DHCP  8
1433 
1434 static inline int dp_log2_ceil(unsigned int value)
1435 {
1436 	unsigned int tmp = value;
1437 	int log2 = -1;
1438 
1439 	while (tmp) {
1440 		log2++;
1441 		tmp >>= 1;
1442 	}
1443 	if (1 << log2 != value)
1444 		log2++;
1445 	return log2;
1446 }
1447 
1448 #ifdef QCA_SUPPORT_PEER_ISOLATION
1449 #define dp_get_peer_isolation(_peer) ((_peer)->isolation)
1450 
1451 static inline void dp_set_peer_isolation(struct dp_txrx_peer *txrx_peer,
1452 					 bool val)
1453 {
1454 	txrx_peer->isolation = val;
1455 }
1456 
1457 #else
1458 #define dp_get_peer_isolation(_peer) (0)
1459 
1460 static inline void dp_set_peer_isolation(struct dp_txrx_peer *peer, bool val)
1461 {
1462 }
1463 #endif /* QCA_SUPPORT_PEER_ISOLATION */
1464 
1465 bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev);
1466 
1467 #ifdef QCA_SUPPORT_WDS_EXTENDED
1468 static inline void dp_wds_ext_peer_init(struct dp_txrx_peer *txrx_peer)
1469 {
1470 	txrx_peer->wds_ext.osif_peer = NULL;
1471 	txrx_peer->wds_ext.init = 0;
1472 }
1473 #else
1474 static inline void dp_wds_ext_peer_init(struct dp_txrx_peer *txrx_peer)
1475 {
1476 }
1477 #endif /* QCA_SUPPORT_WDS_EXTENDED */
1478 
1479 #ifdef QCA_HOST2FW_RXBUF_RING
1480 static inline
1481 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id)
1482 {
1483 	return &pdev->rx_mac_buf_ring[lmac_id];
1484 }
1485 #else
1486 static inline
1487 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id)
1488 {
1489 	return &pdev->soc->rx_refill_buf_ring[lmac_id];
1490 }
1491 #endif
1492 
1493 /*
1494  * The lmac ID for a particular channel band is fixed.
1495  * 2.4GHz band uses lmac_id = 1
1496  * 5GHz/6GHz band uses lmac_id=0
1497  */
1498 #define DP_INVALID_LMAC_ID	(-1)
1499 #define DP_MON_INVALID_LMAC_ID	(-1)
1500 #define DP_MAC0_LMAC_ID	0
1501 #define DP_MAC1_LMAC_ID	1
1502 
1503 #ifdef FEATURE_TSO_STATS
1504 /**
1505  * dp_init_tso_stats() - Clear tso stats
1506  * @pdev: pdev handle
1507  *
1508  * Return: None
1509  */
1510 static inline
1511 void dp_init_tso_stats(struct dp_pdev *pdev)
1512 {
1513 	if (pdev) {
1514 		qdf_mem_zero(&((pdev)->stats.tso_stats),
1515 			     sizeof((pdev)->stats.tso_stats));
1516 		qdf_atomic_init(&pdev->tso_idx);
1517 	}
1518 }
1519 
1520 /**
1521  * dp_stats_tso_segment_histogram_update() - TSO Segment Histogram
1522  * @pdev: pdev handle
1523  * @_p_cntrs: number of tso segments for a tso packet
1524  *
1525  * Return: None
1526  */
1527 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev,
1528 					   uint8_t _p_cntrs);
1529 
1530 /**
1531  * dp_tso_segment_update() - Collect tso segment information
1532  * @pdev: pdev handle
1533  * @stats_idx: tso packet number
1534  * @idx: tso segment number
1535  * @seg: tso segment
1536  *
1537  * Return: None
1538  */
1539 void dp_tso_segment_update(struct dp_pdev *pdev,
1540 			   uint32_t stats_idx,
1541 			   uint8_t idx,
1542 			   struct qdf_tso_seg_t seg);
1543 
1544 /**
1545  * dp_tso_packet_update() - TSO Packet information
1546  * @pdev: pdev handle
1547  * @stats_idx: tso packet number
1548  * @msdu: nbuf handle
1549  * @num_segs: tso segments
1550  *
1551  * Return: None
1552  */
1553 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx,
1554 			  qdf_nbuf_t msdu, uint16_t num_segs);
1555 
1556 /**
1557  * dp_tso_segment_stats_update() - TSO Segment stats
1558  * @pdev: pdev handle
1559  * @stats_seg: tso segment list
1560  * @stats_idx: tso packet number
1561  *
1562  * Return: None
1563  */
1564 void dp_tso_segment_stats_update(struct dp_pdev *pdev,
1565 				 struct qdf_tso_seg_elem_t *stats_seg,
1566 				 uint32_t stats_idx);
1567 
1568 /**
1569  * dp_print_tso_stats() - dump tso statistics
1570  * @soc:soc handle
1571  * @level: verbosity level
1572  *
1573  * Return: None
1574  */
1575 void dp_print_tso_stats(struct dp_soc *soc,
1576 			enum qdf_stats_verbosity_level level);
1577 
1578 /**
1579  * dp_txrx_clear_tso_stats() - clear tso stats
1580  * @soc: soc handle
1581  *
1582  * Return: None
1583  */
1584 void dp_txrx_clear_tso_stats(struct dp_soc *soc);
1585 #else
1586 static inline
1587 void dp_init_tso_stats(struct dp_pdev *pdev)
1588 {
1589 }
1590 
1591 static inline
1592 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev,
1593 					   uint8_t _p_cntrs)
1594 {
1595 }
1596 
1597 static inline
1598 void dp_tso_segment_update(struct dp_pdev *pdev,
1599 			   uint32_t stats_idx,
1600 			   uint32_t idx,
1601 			   struct qdf_tso_seg_t seg)
1602 {
1603 }
1604 
1605 static inline
1606 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx,
1607 			  qdf_nbuf_t msdu, uint16_t num_segs)
1608 {
1609 }
1610 
1611 static inline
1612 void dp_tso_segment_stats_update(struct dp_pdev *pdev,
1613 				 struct qdf_tso_seg_elem_t *stats_seg,
1614 				 uint32_t stats_idx)
1615 {
1616 }
1617 
1618 static inline
1619 void dp_print_tso_stats(struct dp_soc *soc,
1620 			enum qdf_stats_verbosity_level level)
1621 {
1622 }
1623 
1624 static inline
1625 void dp_txrx_clear_tso_stats(struct dp_soc *soc)
1626 {
1627 }
1628 #endif /* FEATURE_TSO_STATS */
1629 
1630 /**
1631  * dp_txrx_get_peer_per_pkt_stats_param() - Get peer per pkt stats param
1632  * @peer: DP peer handle
1633  * @type: Requested stats type
1634  * @buf: Buffer to hold the value
1635  *
1636  * Return: status success/failure
1637  */
1638 QDF_STATUS dp_txrx_get_peer_per_pkt_stats_param(struct dp_peer *peer,
1639 						enum cdp_peer_stats_type type,
1640 						cdp_peer_stats_param_t *buf);
1641 
1642 /**
1643  * dp_txrx_get_peer_extd_stats_param() - Get peer extd stats param
1644  * @peer: DP peer handle
1645  * @type: Requested stats type
1646  * @buf: Buffer to hold the value
1647  *
1648  * Return: status success/failure
1649  */
1650 QDF_STATUS dp_txrx_get_peer_extd_stats_param(struct dp_peer *peer,
1651 					     enum cdp_peer_stats_type type,
1652 					     cdp_peer_stats_param_t *buf);
1653 
1654 #define DP_HTT_T2H_HP_PIPE 5
1655 /**
1656  * dp_update_pdev_stats(): Update the pdev stats
1657  * @tgtobj: pdev handle
1658  * @srcobj: vdev stats structure
1659  *
1660  * Update the pdev stats from the specified vdev stats
1661  *
1662  * Return: None
1663  */
1664 void dp_update_pdev_stats(struct dp_pdev *tgtobj,
1665 			  struct cdp_vdev_stats *srcobj);
1666 
1667 /**
1668  * dp_update_vdev_ingress_stats(): Update the vdev ingress stats
1669  * @tgtobj: vdev handle
1670  *
1671  * Update the vdev ingress stats
1672  *
1673  * Return: None
1674  */
1675 void dp_update_vdev_ingress_stats(struct dp_vdev *tgtobj);
1676 
1677 /**
1678  * dp_update_vdev_rate_stats() - Update the vdev rate stats
1679  * @tgtobj: tgt buffer for vdev stats
1680  * @srcobj: srcobj vdev stats
1681  *
1682  * Return: None
1683  */
1684 void dp_update_vdev_rate_stats(struct cdp_vdev_stats *tgtobj,
1685 			       struct cdp_vdev_stats *srcobj);
1686 
1687 /**
1688  * dp_update_pdev_ingress_stats(): Update the pdev ingress stats
1689  * @tgtobj: pdev handle
1690  * @srcobj: vdev stats structure
1691  *
1692  * Update the pdev ingress stats from the specified vdev stats
1693  *
1694  * Return: None
1695  */
1696 void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj,
1697 				  struct dp_vdev *srcobj);
1698 
1699 /**
1700  * dp_update_vdev_stats(): Update the vdev stats
1701  * @soc: soc handle
1702  * @srcobj: DP_PEER object
1703  * @arg: point to vdev stats structure
1704  *
1705  * Update the vdev stats from the specified peer stats
1706  *
1707  * Return: None
1708  */
1709 void dp_update_vdev_stats(struct dp_soc *soc,
1710 			  struct dp_peer *srcobj,
1711 			  void *arg);
1712 
1713 /**
1714  * dp_update_vdev_stats_on_peer_unmap() - Update the vdev stats on peer unmap
1715  * @vdev: DP_VDEV handle
1716  * @peer: DP_PEER handle
1717  *
1718  * Return: None
1719  */
1720 void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev,
1721 					struct dp_peer *peer);
1722 
1723 #ifdef IPA_OFFLOAD
1724 #define DP_IPA_UPDATE_RX_STATS(__tgtobj, __srcobj) \
1725 { \
1726 	DP_STATS_AGGR_PKT(__tgtobj, __srcobj, rx.rx_total); \
1727 }
1728 
1729 #define DP_IPA_UPDATE_PER_PKT_RX_STATS(__tgtobj, __srcobj) \
1730 { \
1731 	(__tgtobj)->rx.rx_total.num += (__srcobj)->rx.rx_total.num; \
1732 	(__tgtobj)->rx.rx_total.bytes += (__srcobj)->rx.rx_total.bytes; \
1733 }
1734 #else
1735 #define DP_IPA_UPDATE_PER_PKT_RX_STATS(tgtobj, srcobj) \
1736 
1737 #define DP_IPA_UPDATE_RX_STATS(tgtobj, srcobj)
1738 #endif
1739 
1740 #define DP_UPDATE_STATS(_tgtobj, _srcobj)	\
1741 	do {				\
1742 		uint8_t i;		\
1743 		uint8_t pream_type;	\
1744 		for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \
1745 			for (i = 0; i < MAX_MCS; i++) { \
1746 				DP_STATS_AGGR(_tgtobj, _srcobj, \
1747 					tx.pkt_type[pream_type].mcs_count[i]); \
1748 				DP_STATS_AGGR(_tgtobj, _srcobj, \
1749 					rx.pkt_type[pream_type].mcs_count[i]); \
1750 			} \
1751 		} \
1752 		  \
1753 		for (i = 0; i < MAX_BW; i++) { \
1754 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.bw[i]); \
1755 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.bw[i]); \
1756 		} \
1757 		  \
1758 		for (i = 0; i < SS_COUNT; i++) { \
1759 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.nss[i]); \
1760 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.nss[i]); \
1761 		} \
1762 		for (i = 0; i < WME_AC_MAX; i++) { \
1763 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.wme_ac_type[i]); \
1764 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.wme_ac_type[i]); \
1765 			DP_STATS_AGGR(_tgtobj, _srcobj, \
1766 				      tx.wme_ac_type_bytes[i]); \
1767 			DP_STATS_AGGR(_tgtobj, _srcobj, \
1768 				      rx.wme_ac_type_bytes[i]); \
1769 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.excess_retries_per_ac[i]); \
1770 		\
1771 		} \
1772 		\
1773 		for (i = 0; i < MAX_GI; i++) { \
1774 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.sgi_count[i]); \
1775 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.sgi_count[i]); \
1776 		} \
1777 		\
1778 		for (i = 0; i < MAX_RECEPTION_TYPES; i++) \
1779 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.reception_type[i]); \
1780 		\
1781 		if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) { \
1782 			DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.comp_pkt); \
1783 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.tx_failed); \
1784 		} \
1785 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.ucast); \
1786 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.mcast); \
1787 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.bcast); \
1788 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_success); \
1789 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.nawds_mcast); \
1790 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.nawds_mcast_drop); \
1791 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ofdma); \
1792 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.stbc); \
1793 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ldpc); \
1794 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.retries); \
1795 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_amsdu_cnt); \
1796 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.amsdu_cnt); \
1797 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_ampdu_cnt); \
1798 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ampdu_cnt); \
1799 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.dropped.fw_rem); \
1800 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_tx); \
1801 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_notx); \
1802 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason1); \
1803 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason2); \
1804 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason3); \
1805 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_queue_disable); \
1806 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_no_match); \
1807 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.drop_threshold); \
1808 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.drop_link_desc_na); \
1809 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.invalid_drop); \
1810 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.mcast_vdev_drop); \
1811 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.invalid_rr); \
1812 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.age_out); \
1813 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_ucast_total); \
1814 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_ucast_success); \
1815 								\
1816 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.mic_err); \
1817 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.decrypt_err); \
1818 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.fcserr); \
1819 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.pn_err); \
1820 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.oor_err); \
1821 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.jump_2k_err); \
1822 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.rxdma_wifi_parse_err); \
1823 		if (_srcobj->stats.rx.snr != 0) \
1824 			DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.snr); \
1825 		DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rx_rate); \
1826 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_ampdu_cnt); \
1827 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.ampdu_cnt); \
1828 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_amsdu_cnt); \
1829 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.amsdu_cnt); \
1830 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.nawds_mcast_drop); \
1831 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.to_stack); \
1832 								\
1833 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)	\
1834 			DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rcvd_reo[i]); \
1835 									\
1836 		for (i = 0; i <  CDP_MAX_LMACS; i++) \
1837 			DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rx_lmac[i]); \
1838 									\
1839 		_srcobj->stats.rx.unicast.num = \
1840 			_srcobj->stats.rx.to_stack.num - \
1841 					_srcobj->stats.rx.multicast.num; \
1842 		_srcobj->stats.rx.unicast.bytes = \
1843 			_srcobj->stats.rx.to_stack.bytes - \
1844 					_srcobj->stats.rx.multicast.bytes; \
1845 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.unicast); \
1846 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.multicast); \
1847 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.bcast); \
1848 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.raw); \
1849 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.pkts); \
1850 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.fail); \
1851 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.mec_drop); \
1852 								  \
1853 		_tgtobj->stats.tx.last_ack_rssi =	\
1854 			_srcobj->stats.tx.last_ack_rssi; \
1855 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.multipass_rx_pkt_drop); \
1856 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.peer_unauth_rx_pkt_drop); \
1857 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.policy_check_drop); \
1858 		DP_IPA_UPDATE_RX_STATS(_tgtobj, _srcobj); \
1859 	}  while (0)
1860 
1861 #ifdef VDEV_PEER_PROTOCOL_COUNT
1862 #define DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj) \
1863 { \
1864 	uint8_t j; \
1865 	for (j = 0; j < CDP_TRACE_MAX; j++) { \
1866 		_tgtobj->tx.protocol_trace_cnt[j].egress_cnt += \
1867 			_srcobj->tx.protocol_trace_cnt[j].egress_cnt; \
1868 		_tgtobj->tx.protocol_trace_cnt[j].ingress_cnt += \
1869 			_srcobj->tx.protocol_trace_cnt[j].ingress_cnt; \
1870 		_tgtobj->rx.protocol_trace_cnt[j].egress_cnt += \
1871 			_srcobj->rx.protocol_trace_cnt[j].egress_cnt; \
1872 		_tgtobj->rx.protocol_trace_cnt[j].ingress_cnt += \
1873 			_srcobj->rx.protocol_trace_cnt[j].ingress_cnt; \
1874 	} \
1875 }
1876 #else
1877 #define DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj)
1878 #endif
1879 
1880 #ifdef WLAN_FEATURE_11BE
1881 #define DP_UPDATE_11BE_STATS(_tgtobj, _srcobj) \
1882 	do { \
1883 		uint8_t i, mu_type; \
1884 		for (i = 0; i < MAX_MCS; i++) { \
1885 			_tgtobj->tx.su_be_ppdu_cnt.mcs_count[i] += \
1886 				_srcobj->tx.su_be_ppdu_cnt.mcs_count[i]; \
1887 			_tgtobj->rx.su_be_ppdu_cnt.mcs_count[i] += \
1888 				_srcobj->rx.su_be_ppdu_cnt.mcs_count[i]; \
1889 		} \
1890 		for (mu_type = 0; mu_type < TXRX_TYPE_MU_MAX; mu_type++) { \
1891 			for (i = 0; i < MAX_MCS; i++) { \
1892 				_tgtobj->tx.mu_be_ppdu_cnt[mu_type].mcs_count[i] += \
1893 					_srcobj->tx.mu_be_ppdu_cnt[mu_type].mcs_count[i]; \
1894 				_tgtobj->rx.mu_be_ppdu_cnt[mu_type].mcs_count[i] += \
1895 					_srcobj->rx.mu_be_ppdu_cnt[mu_type].mcs_count[i]; \
1896 			} \
1897 		} \
1898 		for (i = 0; i < MAX_PUNCTURED_MODE; i++) { \
1899 			_tgtobj->tx.punc_bw[i] += _srcobj->tx.punc_bw[i]; \
1900 			_tgtobj->rx.punc_bw[i] += _srcobj->rx.punc_bw[i]; \
1901 		} \
1902 	} while (0)
1903 #else
1904 #define DP_UPDATE_11BE_STATS(_tgtobj, _srcobj)
1905 #endif
1906 
1907 #define DP_UPDATE_BASIC_STATS(_tgtobj, _srcobj) \
1908 	do { \
1909 		_tgtobj->tx.comp_pkt.num += _srcobj->tx.comp_pkt.num; \
1910 		_tgtobj->tx.comp_pkt.bytes += _srcobj->tx.comp_pkt.bytes; \
1911 		_tgtobj->tx.tx_failed += _srcobj->tx.tx_failed; \
1912 		_tgtobj->rx.to_stack.num += _srcobj->rx.to_stack.num; \
1913 		_tgtobj->rx.to_stack.bytes += _srcobj->rx.to_stack.bytes; \
1914 	} while (0)
1915 
1916 #define DP_UPDATE_PER_PKT_STATS(_tgtobj, _srcobj) \
1917 	do { \
1918 		uint8_t i; \
1919 		_tgtobj->tx.ucast.num += _srcobj->tx.ucast.num; \
1920 		_tgtobj->tx.ucast.bytes += _srcobj->tx.ucast.bytes; \
1921 		_tgtobj->tx.mcast.num += _srcobj->tx.mcast.num; \
1922 		_tgtobj->tx.mcast.bytes += _srcobj->tx.mcast.bytes; \
1923 		_tgtobj->tx.bcast.num += _srcobj->tx.bcast.num; \
1924 		_tgtobj->tx.bcast.bytes += _srcobj->tx.bcast.bytes; \
1925 		_tgtobj->tx.nawds_mcast.num += _srcobj->tx.nawds_mcast.num; \
1926 		_tgtobj->tx.nawds_mcast.bytes += \
1927 					_srcobj->tx.nawds_mcast.bytes; \
1928 		_tgtobj->tx.tx_success.num += _srcobj->tx.tx_success.num; \
1929 		_tgtobj->tx.tx_success.bytes += _srcobj->tx.tx_success.bytes; \
1930 		_tgtobj->tx.nawds_mcast_drop += _srcobj->tx.nawds_mcast_drop; \
1931 		_tgtobj->tx.ofdma += _srcobj->tx.ofdma; \
1932 		_tgtobj->tx.non_amsdu_cnt += _srcobj->tx.non_amsdu_cnt; \
1933 		_tgtobj->tx.amsdu_cnt += _srcobj->tx.amsdu_cnt; \
1934 		_tgtobj->tx.dropped.fw_rem.num += \
1935 					_srcobj->tx.dropped.fw_rem.num; \
1936 		_tgtobj->tx.dropped.fw_rem.bytes += \
1937 					_srcobj->tx.dropped.fw_rem.bytes; \
1938 		_tgtobj->tx.dropped.fw_rem_notx += \
1939 					_srcobj->tx.dropped.fw_rem_notx; \
1940 		_tgtobj->tx.dropped.fw_rem_tx += \
1941 					_srcobj->tx.dropped.fw_rem_tx; \
1942 		_tgtobj->tx.dropped.age_out += _srcobj->tx.dropped.age_out; \
1943 		_tgtobj->tx.dropped.fw_reason1 += \
1944 					_srcobj->tx.dropped.fw_reason1; \
1945 		_tgtobj->tx.dropped.fw_reason2 += \
1946 					_srcobj->tx.dropped.fw_reason2; \
1947 		_tgtobj->tx.dropped.fw_reason3 += \
1948 					_srcobj->tx.dropped.fw_reason3; \
1949 		_tgtobj->tx.dropped.fw_rem_queue_disable += \
1950 					_srcobj->tx.dropped.fw_rem_queue_disable; \
1951 		_tgtobj->tx.dropped.fw_rem_no_match += \
1952 					_srcobj->tx.dropped.fw_rem_no_match; \
1953 		_tgtobj->tx.dropped.drop_threshold += \
1954 					_srcobj->tx.dropped.drop_threshold; \
1955 		_tgtobj->tx.dropped.drop_link_desc_na += \
1956 					_srcobj->tx.dropped.drop_link_desc_na; \
1957 		_tgtobj->tx.dropped.invalid_drop += \
1958 					_srcobj->tx.dropped.invalid_drop; \
1959 		_tgtobj->tx.dropped.mcast_vdev_drop += \
1960 					_srcobj->tx.dropped.mcast_vdev_drop; \
1961 		_tgtobj->tx.dropped.invalid_rr += \
1962 					_srcobj->tx.dropped.invalid_rr; \
1963 		_tgtobj->tx.failed_retry_count += \
1964 					_srcobj->tx.failed_retry_count; \
1965 		_tgtobj->tx.retry_count += _srcobj->tx.retry_count; \
1966 		_tgtobj->tx.multiple_retry_count += \
1967 					_srcobj->tx.multiple_retry_count; \
1968 		_tgtobj->tx.tx_success_twt.num += \
1969 					_srcobj->tx.tx_success_twt.num; \
1970 		_tgtobj->tx.tx_success_twt.bytes += \
1971 					_srcobj->tx.tx_success_twt.bytes; \
1972 		_tgtobj->tx.last_tx_ts = _srcobj->tx.last_tx_ts; \
1973 		_tgtobj->tx.release_src_not_tqm += \
1974 					_srcobj->tx.release_src_not_tqm; \
1975 		for (i = 0; i < QDF_PROTO_SUBTYPE_MAX; i++) { \
1976 			_tgtobj->tx.no_ack_count[i] += \
1977 					_srcobj->tx.no_ack_count[i];\
1978 		} \
1979 		\
1980 		_tgtobj->rx.multicast.num += _srcobj->rx.multicast.num; \
1981 		_tgtobj->rx.multicast.bytes += _srcobj->rx.multicast.bytes; \
1982 		_tgtobj->rx.bcast.num += _srcobj->rx.bcast.num; \
1983 		_tgtobj->rx.bcast.bytes += _srcobj->rx.bcast.bytes; \
1984 		_tgtobj->rx.unicast.num += _srcobj->rx.unicast.num; \
1985 		_tgtobj->rx.unicast.bytes += _srcobj->rx.unicast.bytes; \
1986 		_tgtobj->rx.raw.num += _srcobj->rx.raw.num; \
1987 		_tgtobj->rx.raw.bytes += _srcobj->rx.raw.bytes; \
1988 		_tgtobj->rx.nawds_mcast_drop += _srcobj->rx.nawds_mcast_drop; \
1989 		_tgtobj->rx.mcast_3addr_drop += _srcobj->rx.mcast_3addr_drop; \
1990 		_tgtobj->rx.mec_drop.num += _srcobj->rx.mec_drop.num; \
1991 		_tgtobj->rx.mec_drop.bytes += _srcobj->rx.mec_drop.bytes; \
1992 		_tgtobj->rx.ppeds_drop.num += _srcobj->rx.ppeds_drop.num; \
1993 		_tgtobj->rx.ppeds_drop.bytes += _srcobj->rx.ppeds_drop.bytes; \
1994 		_tgtobj->rx.intra_bss.pkts.num += \
1995 					_srcobj->rx.intra_bss.pkts.num; \
1996 		_tgtobj->rx.intra_bss.pkts.bytes += \
1997 					_srcobj->rx.intra_bss.pkts.bytes; \
1998 		_tgtobj->rx.intra_bss.fail.num += \
1999 					_srcobj->rx.intra_bss.fail.num; \
2000 		_tgtobj->rx.intra_bss.fail.bytes += \
2001 					_srcobj->rx.intra_bss.fail.bytes; \
2002 		_tgtobj->rx.intra_bss.mdns_no_fwd += \
2003 					_srcobj->rx.intra_bss.mdns_no_fwd; \
2004 		_tgtobj->rx.err.mic_err += _srcobj->rx.err.mic_err; \
2005 		_tgtobj->rx.err.decrypt_err += _srcobj->rx.err.decrypt_err; \
2006 		_tgtobj->rx.err.fcserr += _srcobj->rx.err.fcserr; \
2007 		_tgtobj->rx.err.pn_err += _srcobj->rx.err.pn_err; \
2008 		_tgtobj->rx.err.oor_err += _srcobj->rx.err.oor_err; \
2009 		_tgtobj->rx.err.jump_2k_err += _srcobj->rx.err.jump_2k_err; \
2010 		_tgtobj->rx.err.rxdma_wifi_parse_err += \
2011 					_srcobj->rx.err.rxdma_wifi_parse_err; \
2012 		_tgtobj->rx.non_amsdu_cnt += _srcobj->rx.non_amsdu_cnt; \
2013 		_tgtobj->rx.amsdu_cnt += _srcobj->rx.amsdu_cnt; \
2014 		_tgtobj->rx.rx_retries += _srcobj->rx.rx_retries; \
2015 		_tgtobj->rx.multipass_rx_pkt_drop += \
2016 					_srcobj->rx.multipass_rx_pkt_drop; \
2017 		_tgtobj->rx.peer_unauth_rx_pkt_drop += \
2018 					_srcobj->rx.peer_unauth_rx_pkt_drop; \
2019 		_tgtobj->rx.policy_check_drop += \
2020 					_srcobj->rx.policy_check_drop; \
2021 		_tgtobj->rx.to_stack_twt.num += _srcobj->rx.to_stack_twt.num; \
2022 		_tgtobj->rx.to_stack_twt.bytes += \
2023 					_srcobj->rx.to_stack_twt.bytes; \
2024 		_tgtobj->rx.last_rx_ts = _srcobj->rx.last_rx_ts; \
2025 		for (i = 0; i < CDP_MAX_RX_RINGS; i++) { \
2026 			_tgtobj->rx.rcvd_reo[i].num += \
2027 					 _srcobj->rx.rcvd_reo[i].num; \
2028 			_tgtobj->rx.rcvd_reo[i].bytes += \
2029 					_srcobj->rx.rcvd_reo[i].bytes; \
2030 			_tgtobj->rx.rcvd.num += \
2031 					 _srcobj->rx.rcvd_reo[i].num; \
2032 			_tgtobj->rx.rcvd.bytes += \
2033 					_srcobj->rx.rcvd_reo[i].bytes; \
2034 		} \
2035 		for (i = 0; i < CDP_MAX_LMACS; i++) { \
2036 			_tgtobj->rx.rx_lmac[i].num += \
2037 					_srcobj->rx.rx_lmac[i].num; \
2038 			_tgtobj->rx.rx_lmac[i].bytes += \
2039 					_srcobj->rx.rx_lmac[i].bytes; \
2040 		} \
2041 		DP_IPA_UPDATE_PER_PKT_RX_STATS(_tgtobj, _srcobj); \
2042 		DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj); \
2043 	} while (0)
2044 
2045 #define DP_UPDATE_EXTD_STATS(_tgtobj, _srcobj) \
2046 	do { \
2047 		uint8_t i, pream_type, mu_type; \
2048 		_tgtobj->tx.stbc += _srcobj->tx.stbc; \
2049 		_tgtobj->tx.ldpc += _srcobj->tx.ldpc; \
2050 		_tgtobj->tx.retries += _srcobj->tx.retries; \
2051 		_tgtobj->tx.ampdu_cnt += _srcobj->tx.ampdu_cnt; \
2052 		_tgtobj->tx.non_ampdu_cnt += _srcobj->tx.non_ampdu_cnt; \
2053 		_tgtobj->tx.num_ppdu_cookie_valid += \
2054 					_srcobj->tx.num_ppdu_cookie_valid; \
2055 		_tgtobj->tx.tx_ppdus += _srcobj->tx.tx_ppdus; \
2056 		_tgtobj->tx.tx_mpdus_success += _srcobj->tx.tx_mpdus_success; \
2057 		_tgtobj->tx.tx_mpdus_tried += _srcobj->tx.tx_mpdus_tried; \
2058 		_tgtobj->tx.tx_rate = _srcobj->tx.tx_rate; \
2059 		_tgtobj->tx.last_tx_rate = _srcobj->tx.last_tx_rate; \
2060 		_tgtobj->tx.last_tx_rate_mcs = _srcobj->tx.last_tx_rate_mcs; \
2061 		_tgtobj->tx.mcast_last_tx_rate = \
2062 					_srcobj->tx.mcast_last_tx_rate; \
2063 		_tgtobj->tx.mcast_last_tx_rate_mcs = \
2064 					_srcobj->tx.mcast_last_tx_rate_mcs; \
2065 		_tgtobj->tx.rnd_avg_tx_rate = _srcobj->tx.rnd_avg_tx_rate; \
2066 		_tgtobj->tx.avg_tx_rate = _srcobj->tx.avg_tx_rate; \
2067 		_tgtobj->tx.tx_ratecode = _srcobj->tx.tx_ratecode; \
2068 		_tgtobj->tx.pream_punct_cnt += _srcobj->tx.pream_punct_cnt; \
2069 		_tgtobj->tx.ru_start = _srcobj->tx.ru_start; \
2070 		_tgtobj->tx.ru_tones = _srcobj->tx.ru_tones; \
2071 		_tgtobj->tx.last_ack_rssi = _srcobj->tx.last_ack_rssi; \
2072 		_tgtobj->tx.nss_info = _srcobj->tx.nss_info; \
2073 		_tgtobj->tx.mcs_info = _srcobj->tx.mcs_info; \
2074 		_tgtobj->tx.bw_info = _srcobj->tx.bw_info; \
2075 		_tgtobj->tx.gi_info = _srcobj->tx.gi_info; \
2076 		_tgtobj->tx.preamble_info = _srcobj->tx.preamble_info; \
2077 		_tgtobj->tx.retries_mpdu += _srcobj->tx.retries_mpdu; \
2078 		_tgtobj->tx.mpdu_success_with_retries += \
2079 					_srcobj->tx.mpdu_success_with_retries; \
2080 		_tgtobj->tx.rts_success = _srcobj->tx.rts_success; \
2081 		_tgtobj->tx.rts_failure = _srcobj->tx.rts_failure; \
2082 		_tgtobj->tx.bar_cnt = _srcobj->tx.bar_cnt; \
2083 		_tgtobj->tx.ndpa_cnt = _srcobj->tx.ndpa_cnt; \
2084 		for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \
2085 			for (i = 0; i < MAX_MCS; i++) \
2086 				_tgtobj->tx.pkt_type[pream_type].mcs_count[i] += \
2087 				_srcobj->tx.pkt_type[pream_type].mcs_count[i]; \
2088 		} \
2089 		for (i = 0; i < WME_AC_MAX; i++) { \
2090 			_tgtobj->tx.wme_ac_type[i] += _srcobj->tx.wme_ac_type[i]; \
2091 			_tgtobj->tx.wme_ac_type_bytes[i] += \
2092 					_srcobj->tx.wme_ac_type_bytes[i]; \
2093 			_tgtobj->tx.excess_retries_per_ac[i] += \
2094 					_srcobj->tx.excess_retries_per_ac[i]; \
2095 		} \
2096 		for (i = 0; i < MAX_GI; i++) { \
2097 			_tgtobj->tx.sgi_count[i] += _srcobj->tx.sgi_count[i]; \
2098 		} \
2099 		for (i = 0; i < SS_COUNT; i++) { \
2100 			_tgtobj->tx.nss[i] += _srcobj->tx.nss[i]; \
2101 		} \
2102 		for (i = 0; i < MAX_BW; i++) { \
2103 			_tgtobj->tx.bw[i] += _srcobj->tx.bw[i]; \
2104 		} \
2105 		for (i = 0; i < MAX_RU_LOCATIONS; i++) { \
2106 			_tgtobj->tx.ru_loc[i].num_msdu += \
2107 					_srcobj->tx.ru_loc[i].num_msdu; \
2108 			_tgtobj->tx.ru_loc[i].num_mpdu += \
2109 					_srcobj->tx.ru_loc[i].num_mpdu; \
2110 			_tgtobj->tx.ru_loc[i].mpdu_tried += \
2111 					_srcobj->tx.ru_loc[i].mpdu_tried; \
2112 		} \
2113 		for (i = 0; i < MAX_TRANSMIT_TYPES; i++) { \
2114 			_tgtobj->tx.transmit_type[i].num_msdu += \
2115 					_srcobj->tx.transmit_type[i].num_msdu; \
2116 			_tgtobj->tx.transmit_type[i].num_mpdu += \
2117 					_srcobj->tx.transmit_type[i].num_mpdu; \
2118 			_tgtobj->tx.transmit_type[i].mpdu_tried += \
2119 					_srcobj->tx.transmit_type[i].mpdu_tried; \
2120 		} \
2121 		for (i = 0; i < MAX_MU_GROUP_ID; i++) { \
2122 			_tgtobj->tx.mu_group_id[i] = _srcobj->tx.mu_group_id[i]; \
2123 		} \
2124 		_tgtobj->tx.tx_ucast_total.num += \
2125 				_srcobj->tx.tx_ucast_total.num;\
2126 		_tgtobj->tx.tx_ucast_total.bytes += \
2127 				 _srcobj->tx.tx_ucast_total.bytes;\
2128 		_tgtobj->tx.tx_ucast_success.num += \
2129 				_srcobj->tx.tx_ucast_success.num; \
2130 		_tgtobj->tx.tx_ucast_success.bytes += \
2131 				_srcobj->tx.tx_ucast_success.bytes; \
2132 		\
2133 		for (i = 0; i < CDP_RSSI_CHAIN_LEN; i++) \
2134 			_tgtobj->tx.rssi_chain[i] = _srcobj->tx.rssi_chain[i]; \
2135 		_tgtobj->rx.mpdu_cnt_fcs_ok += _srcobj->rx.mpdu_cnt_fcs_ok; \
2136 		_tgtobj->rx.mpdu_cnt_fcs_err += _srcobj->rx.mpdu_cnt_fcs_err; \
2137 		_tgtobj->rx.non_ampdu_cnt += _srcobj->rx.non_ampdu_cnt; \
2138 		_tgtobj->rx.ampdu_cnt += _srcobj->rx.ampdu_cnt; \
2139 		_tgtobj->rx.rx_mpdus += _srcobj->rx.rx_mpdus; \
2140 		_tgtobj->rx.rx_ppdus += _srcobj->rx.rx_ppdus; \
2141 		_tgtobj->rx.rx_rate = _srcobj->rx.rx_rate; \
2142 		_tgtobj->rx.last_rx_rate = _srcobj->rx.last_rx_rate; \
2143 		_tgtobj->rx.rnd_avg_rx_rate = _srcobj->rx.rnd_avg_rx_rate; \
2144 		_tgtobj->rx.avg_rx_rate = _srcobj->rx.avg_rx_rate; \
2145 		_tgtobj->rx.rx_ratecode = _srcobj->rx.rx_ratecode; \
2146 		_tgtobj->rx.avg_snr = _srcobj->rx.avg_snr; \
2147 		_tgtobj->rx.rx_snr_measured_time = \
2148 					_srcobj->rx.rx_snr_measured_time; \
2149 		_tgtobj->rx.snr = _srcobj->rx.snr; \
2150 		_tgtobj->rx.last_snr = _srcobj->rx.last_snr; \
2151 		_tgtobj->rx.nss_info = _srcobj->rx.nss_info; \
2152 		_tgtobj->rx.mcs_info = _srcobj->rx.mcs_info; \
2153 		_tgtobj->rx.bw_info = _srcobj->rx.bw_info; \
2154 		_tgtobj->rx.gi_info = _srcobj->rx.gi_info; \
2155 		_tgtobj->rx.preamble_info = _srcobj->rx.preamble_info; \
2156 		_tgtobj->rx.mpdu_retry_cnt += _srcobj->rx.mpdu_retry_cnt; \
2157 		_tgtobj->rx.bar_cnt = _srcobj->rx.bar_cnt; \
2158 		_tgtobj->rx.ndpa_cnt = _srcobj->rx.ndpa_cnt; \
2159 		for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \
2160 			for (i = 0; i < MAX_MCS; i++) { \
2161 				_tgtobj->rx.pkt_type[pream_type].mcs_count[i] += \
2162 					_srcobj->rx.pkt_type[pream_type].mcs_count[i]; \
2163 			} \
2164 		} \
2165 		for (i = 0; i < WME_AC_MAX; i++) { \
2166 			_tgtobj->rx.wme_ac_type[i] += _srcobj->rx.wme_ac_type[i]; \
2167 			_tgtobj->rx.wme_ac_type_bytes[i] += \
2168 					_srcobj->rx.wme_ac_type_bytes[i]; \
2169 		} \
2170 		for (i = 0; i < MAX_MCS; i++) { \
2171 			_tgtobj->rx.su_ax_ppdu_cnt.mcs_count[i] += \
2172 					_srcobj->rx.su_ax_ppdu_cnt.mcs_count[i]; \
2173 			_tgtobj->rx.rx_mpdu_cnt[i] += _srcobj->rx.rx_mpdu_cnt[i]; \
2174 		} \
2175 		for (mu_type = 0 ; mu_type < TXRX_TYPE_MU_MAX; mu_type++) { \
2176 			_tgtobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_ok += \
2177 				_srcobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_ok; \
2178 			_tgtobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_err += \
2179 				_srcobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_err; \
2180 			for (i = 0; i < SS_COUNT; i++) \
2181 				_tgtobj->rx.rx_mu[mu_type].ppdu_nss[i] += \
2182 					_srcobj->rx.rx_mu[mu_type].ppdu_nss[i]; \
2183 			for (i = 0; i < MAX_MCS; i++) \
2184 				_tgtobj->rx.rx_mu[mu_type].ppdu.mcs_count[i] += \
2185 					_srcobj->rx.rx_mu[mu_type].ppdu.mcs_count[i]; \
2186 		} \
2187 		for (i = 0; i < MAX_RECEPTION_TYPES; i++) { \
2188 			_tgtobj->rx.reception_type[i] += \
2189 					_srcobj->rx.reception_type[i]; \
2190 			_tgtobj->rx.ppdu_cnt[i] += _srcobj->rx.ppdu_cnt[i]; \
2191 		} \
2192 		for (i = 0; i < MAX_GI; i++) { \
2193 			_tgtobj->rx.sgi_count[i] += _srcobj->rx.sgi_count[i]; \
2194 		} \
2195 		for (i = 0; i < SS_COUNT; i++) { \
2196 			_tgtobj->rx.nss[i] += _srcobj->rx.nss[i]; \
2197 			_tgtobj->rx.ppdu_nss[i] += _srcobj->rx.ppdu_nss[i]; \
2198 		} \
2199 		for (i = 0; i < MAX_BW; i++) { \
2200 			_tgtobj->rx.bw[i] += _srcobj->rx.bw[i]; \
2201 		} \
2202 		DP_UPDATE_11BE_STATS(_tgtobj, _srcobj); \
2203 	} while (0)
2204 
2205 #define DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(_tgtobj, _srcobj) \
2206 	do { \
2207 		DP_UPDATE_BASIC_STATS(_tgtobj, _srcobj); \
2208 		DP_UPDATE_PER_PKT_STATS(_tgtobj, _srcobj); \
2209 		DP_UPDATE_EXTD_STATS(_tgtobj, _srcobj); \
2210 	} while (0)
2211 
2212 #define DP_UPDATE_INGRESS_STATS(_tgtobj, _srcobj) \
2213 	do { \
2214 		uint8_t i = 0; \
2215 		_tgtobj->tx_i.rcvd.num += _srcobj->tx_i.rcvd.num; \
2216 		_tgtobj->tx_i.rcvd.bytes += _srcobj->tx_i.rcvd.bytes; \
2217 		_tgtobj->tx_i.rcvd_in_fast_xmit_flow += \
2218 					_srcobj->tx_i.rcvd_in_fast_xmit_flow; \
2219 		for (i = 0; i < CDP_MAX_TX_DATA_RINGS; i++) { \
2220 			_tgtobj->tx_i.rcvd_per_core[i] += \
2221 					_srcobj->tx_i.rcvd_per_core[i]; \
2222 		} \
2223 		_tgtobj->tx_i.processed.num += _srcobj->tx_i.processed.num; \
2224 		_tgtobj->tx_i.processed.bytes += \
2225 						_srcobj->tx_i.processed.bytes; \
2226 		_tgtobj->tx_i.reinject_pkts.num += \
2227 					_srcobj->tx_i.reinject_pkts.num; \
2228 		_tgtobj->tx_i.reinject_pkts.bytes += \
2229 					_srcobj->tx_i.reinject_pkts.bytes; \
2230 		_tgtobj->tx_i.inspect_pkts.num += \
2231 					_srcobj->tx_i.inspect_pkts.num; \
2232 		_tgtobj->tx_i.inspect_pkts.bytes += \
2233 				_srcobj->tx_i.inspect_pkts.bytes; \
2234 		_tgtobj->tx_i.nawds_mcast.num += \
2235 					_srcobj->tx_i.nawds_mcast.num; \
2236 		_tgtobj->tx_i.nawds_mcast.bytes += \
2237 					_srcobj->tx_i.nawds_mcast.bytes; \
2238 		_tgtobj->tx_i.bcast.num += _srcobj->tx_i.bcast.num; \
2239 		_tgtobj->tx_i.bcast.bytes += _srcobj->tx_i.bcast.bytes; \
2240 		_tgtobj->tx_i.raw.raw_pkt.num += \
2241 					_srcobj->tx_i.raw.raw_pkt.num; \
2242 		_tgtobj->tx_i.raw.raw_pkt.bytes += \
2243 					_srcobj->tx_i.raw.raw_pkt.bytes; \
2244 		_tgtobj->tx_i.raw.dma_map_error += \
2245 					_srcobj->tx_i.raw.dma_map_error; \
2246 		_tgtobj->tx_i.raw.invalid_raw_pkt_datatype += \
2247 				_srcobj->tx_i.raw.invalid_raw_pkt_datatype; \
2248 		_tgtobj->tx_i.raw.num_frags_overflow_err += \
2249 				_srcobj->tx_i.raw.num_frags_overflow_err; \
2250 		_tgtobj->tx_i.sg.sg_pkt.num += _srcobj->tx_i.sg.sg_pkt.num; \
2251 		_tgtobj->tx_i.sg.sg_pkt.bytes += \
2252 					_srcobj->tx_i.sg.sg_pkt.bytes; \
2253 		_tgtobj->tx_i.sg.non_sg_pkts.num += \
2254 					_srcobj->tx_i.sg.non_sg_pkts.num; \
2255 		_tgtobj->tx_i.sg.non_sg_pkts.bytes += \
2256 					_srcobj->tx_i.sg.non_sg_pkts.bytes; \
2257 		_tgtobj->tx_i.sg.dropped_host.num += \
2258 					_srcobj->tx_i.sg.dropped_host.num; \
2259 		_tgtobj->tx_i.sg.dropped_host.bytes += \
2260 					_srcobj->tx_i.sg.dropped_host.bytes; \
2261 		_tgtobj->tx_i.sg.dropped_target += \
2262 					_srcobj->tx_i.sg.dropped_target; \
2263 		_tgtobj->tx_i.sg.dma_map_error += \
2264 					_srcobj->tx_i.sg.dma_map_error; \
2265 		_tgtobj->tx_i.mcast_en.mcast_pkt.num += \
2266 					_srcobj->tx_i.mcast_en.mcast_pkt.num; \
2267 		_tgtobj->tx_i.mcast_en.mcast_pkt.bytes += \
2268 				_srcobj->tx_i.mcast_en.mcast_pkt.bytes; \
2269 		_tgtobj->tx_i.mcast_en.dropped_map_error += \
2270 				_srcobj->tx_i.mcast_en.dropped_map_error; \
2271 		_tgtobj->tx_i.mcast_en.dropped_self_mac += \
2272 				_srcobj->tx_i.mcast_en.dropped_self_mac; \
2273 		_tgtobj->tx_i.mcast_en.dropped_send_fail += \
2274 				_srcobj->tx_i.mcast_en.dropped_send_fail; \
2275 		_tgtobj->tx_i.mcast_en.ucast += _srcobj->tx_i.mcast_en.ucast; \
2276 		_tgtobj->tx_i.mcast_en.fail_seg_alloc += \
2277 					_srcobj->tx_i.mcast_en.fail_seg_alloc; \
2278 		_tgtobj->tx_i.mcast_en.clone_fail += \
2279 					_srcobj->tx_i.mcast_en.clone_fail; \
2280 		_tgtobj->tx_i.igmp_mcast_en.igmp_rcvd += \
2281 				_srcobj->tx_i.igmp_mcast_en.igmp_rcvd; \
2282 		_tgtobj->tx_i.igmp_mcast_en.igmp_ucast_converted += \
2283 			_srcobj->tx_i.igmp_mcast_en.igmp_ucast_converted; \
2284 		_tgtobj->tx_i.dropped.desc_na.num += \
2285 				_srcobj->tx_i.dropped.desc_na.num; \
2286 		_tgtobj->tx_i.dropped.desc_na.bytes += \
2287 				_srcobj->tx_i.dropped.desc_na.bytes; \
2288 		_tgtobj->tx_i.dropped.desc_na_exc_alloc_fail.num += \
2289 			_srcobj->tx_i.dropped.desc_na_exc_alloc_fail.num; \
2290 		_tgtobj->tx_i.dropped.desc_na_exc_alloc_fail.bytes += \
2291 			_srcobj->tx_i.dropped.desc_na_exc_alloc_fail.bytes; \
2292 		_tgtobj->tx_i.dropped.desc_na_exc_outstand.num += \
2293 			_srcobj->tx_i.dropped.desc_na_exc_outstand.num; \
2294 		_tgtobj->tx_i.dropped.desc_na_exc_outstand.bytes += \
2295 			_srcobj->tx_i.dropped.desc_na_exc_outstand.bytes; \
2296 		_tgtobj->tx_i.dropped.exc_desc_na.num += \
2297 				_srcobj->tx_i.dropped.exc_desc_na.num; \
2298 		_tgtobj->tx_i.dropped.exc_desc_na.bytes += \
2299 				_srcobj->tx_i.dropped.exc_desc_na.bytes; \
2300 		_tgtobj->tx_i.dropped.ring_full += \
2301 					_srcobj->tx_i.dropped.ring_full; \
2302 		_tgtobj->tx_i.dropped.enqueue_fail += \
2303 					_srcobj->tx_i.dropped.enqueue_fail; \
2304 		_tgtobj->tx_i.dropped.dma_error += \
2305 					_srcobj->tx_i.dropped.dma_error; \
2306 		_tgtobj->tx_i.dropped.res_full += \
2307 					_srcobj->tx_i.dropped.res_full; \
2308 		_tgtobj->tx_i.dropped.headroom_insufficient += \
2309 				_srcobj->tx_i.dropped.headroom_insufficient; \
2310 		_tgtobj->tx_i.dropped.fail_per_pkt_vdev_id_check += \
2311 			_srcobj->tx_i.dropped.fail_per_pkt_vdev_id_check; \
2312 		_tgtobj->tx_i.dropped.drop_ingress += \
2313 				_srcobj->tx_i.dropped.drop_ingress; \
2314 		_tgtobj->tx_i.dropped.invalid_peer_id_in_exc_path += \
2315 			_srcobj->tx_i.dropped.invalid_peer_id_in_exc_path; \
2316 		_tgtobj->tx_i.dropped.tx_mcast_drop += \
2317 					_srcobj->tx_i.dropped.tx_mcast_drop; \
2318 		_tgtobj->tx_i.dropped.fw2wbm_tx_drop += \
2319 					_srcobj->tx_i.dropped.fw2wbm_tx_drop; \
2320 		_tgtobj->tx_i.dropped.dropped_pkt.num = \
2321 			_tgtobj->tx_i.dropped.dma_error + \
2322 			_tgtobj->tx_i.dropped.ring_full + \
2323 			_tgtobj->tx_i.dropped.enqueue_fail + \
2324 			_tgtobj->tx_i.dropped.fail_per_pkt_vdev_id_check + \
2325 			_tgtobj->tx_i.dropped.desc_na.num + \
2326 			_tgtobj->tx_i.dropped.res_full + \
2327 			_tgtobj->tx_i.dropped.drop_ingress + \
2328 			_tgtobj->tx_i.dropped.headroom_insufficient + \
2329 			_tgtobj->tx_i.dropped.invalid_peer_id_in_exc_path + \
2330 			_tgtobj->tx_i.dropped.tx_mcast_drop + \
2331 			_tgtobj->tx_i.dropped.fw2wbm_tx_drop; \
2332 		_tgtobj->tx_i.dropped.dropped_pkt.bytes += \
2333 				_srcobj->tx_i.dropped.dropped_pkt.bytes; \
2334 		_tgtobj->tx_i.mesh.exception_fw += \
2335 					_srcobj->tx_i.mesh.exception_fw; \
2336 		_tgtobj->tx_i.mesh.completion_fw += \
2337 					_srcobj->tx_i.mesh.completion_fw; \
2338 		_tgtobj->tx_i.cce_classified += \
2339 					_srcobj->tx_i.cce_classified; \
2340 		_tgtobj->tx_i.cce_classified_raw += \
2341 					_srcobj->tx_i.cce_classified_raw; \
2342 		_tgtobj->tx_i.sniffer_rcvd.num += \
2343 					_srcobj->tx_i.sniffer_rcvd.num; \
2344 		_tgtobj->tx_i.sniffer_rcvd.bytes += \
2345 					_srcobj->tx_i.sniffer_rcvd.bytes; \
2346 		_tgtobj->rx_i.reo_rcvd_pkt.num += \
2347 					_srcobj->rx_i.reo_rcvd_pkt.num; \
2348 		_tgtobj->rx_i.reo_rcvd_pkt.bytes += \
2349 					_srcobj->rx_i.reo_rcvd_pkt.bytes; \
2350 		_tgtobj->rx_i.null_q_desc_pkt.num += \
2351 					_srcobj->rx_i.null_q_desc_pkt.num; \
2352 		_tgtobj->rx_i.null_q_desc_pkt.bytes += \
2353 					_srcobj->rx_i.null_q_desc_pkt.bytes; \
2354 		_tgtobj->rx_i.routed_eapol_pkt.num += \
2355 					_srcobj->rx_i.routed_eapol_pkt.num; \
2356 		_tgtobj->rx_i.routed_eapol_pkt.bytes += \
2357 					_srcobj->rx_i.routed_eapol_pkt.bytes; \
2358 	} while (0)
2359 /**
2360  * dp_peer_find_attach() - Allocates memory for peer objects
2361  * @soc: SoC handle
2362  *
2363  * Return: QDF_STATUS
2364  */
2365 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc);
2366 
2367 /**
2368  * dp_peer_find_detach() - Frees memory for peer objects
2369  * @soc: SoC handle
2370  *
2371  * Return: none
2372  */
2373 void dp_peer_find_detach(struct dp_soc *soc);
2374 
2375 /**
2376  * dp_peer_find_hash_add() - add peer to peer_hash_table
2377  * @soc: soc handle
2378  * @peer: peer handle
2379  *
2380  * Return: none
2381  */
2382 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer);
2383 
2384 /**
2385  * dp_peer_find_hash_remove() - remove peer from peer_hash_table
2386  * @soc: soc handle
2387  * @peer: peer handle
2388  *
2389  * Return: none
2390  */
2391 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer);
2392 
2393 /* unused?? */
2394 void dp_peer_find_hash_erase(struct dp_soc *soc);
2395 
2396 /**
2397  * dp_peer_vdev_list_add() - add peer into vdev's peer list
2398  * @soc: soc handle
2399  * @vdev: vdev handle
2400  * @peer: peer handle
2401  *
2402  * Return: none
2403  */
2404 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
2405 			   struct dp_peer *peer);
2406 
2407 /**
2408  * dp_peer_vdev_list_remove() - remove peer from vdev's peer list
2409  * @soc: SoC handle
2410  * @vdev: VDEV handle
2411  * @peer: peer handle
2412  *
2413  * Return: none
2414  */
2415 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
2416 			      struct dp_peer *peer);
2417 
2418 /**
2419  * dp_peer_find_id_to_obj_add() - Add peer into peer_id table
2420  * @soc: SoC handle
2421  * @peer: peer handle
2422  * @peer_id: peer_id
2423  *
2424  * Return: None
2425  */
2426 void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
2427 				struct dp_peer *peer,
2428 				uint16_t peer_id);
2429 
2430 /**
2431  * dp_txrx_peer_attach_add() - Attach txrx_peer and add it to peer_id table
2432  * @soc: SoC handle
2433  * @peer: peer handle
2434  * @txrx_peer: txrx peer handle
2435  *
2436  * Return: None
2437  */
2438 void dp_txrx_peer_attach_add(struct dp_soc *soc,
2439 			     struct dp_peer *peer,
2440 			     struct dp_txrx_peer *txrx_peer);
2441 
2442 /**
2443  * dp_peer_find_id_to_obj_remove() - remove peer from peer_id table
2444  * @soc: SoC handle
2445  * @peer_id: peer_id
2446  *
2447  * Return: None
2448  */
2449 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
2450 				   uint16_t peer_id);
2451 
2452 /**
2453  * dp_vdev_unref_delete() - check and process vdev delete
2454  * @soc: DP specific soc pointer
2455  * @vdev: DP specific vdev pointer
2456  * @mod_id: module id
2457  *
2458  */
2459 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev,
2460 			  enum dp_mod_id mod_id);
2461 
2462 /**
2463  * dp_peer_ppdu_delayed_ba_cleanup() - free ppdu allocated in peer
2464  * @peer: Datapath peer
2465  *
2466  * Return: void
2467  */
2468 void dp_peer_ppdu_delayed_ba_cleanup(struct dp_peer *peer);
2469 
2470 /**
2471  * dp_peer_rx_init() - Initialize receive TID state
2472  * @pdev: Datapath pdev
2473  * @peer: Datapath peer
2474  *
2475  */
2476 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer);
2477 
2478 /**
2479  * dp_peer_cleanup() - Cleanup peer information
2480  * @vdev: Datapath vdev
2481  * @peer: Datapath peer
2482  *
2483  */
2484 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer);
2485 
2486 #ifdef DP_PEER_EXTENDED_API
2487 /**
2488  * dp_register_peer() - Register peer into physical device
2489  * @soc_hdl: data path soc handle
2490  * @pdev_id: device instance id
2491  * @sta_desc: peer description
2492  *
2493  * Register peer into physical device
2494  *
2495  * Return: QDF_STATUS_SUCCESS registration success
2496  *         QDF_STATUS_E_FAULT peer not found
2497  */
2498 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
2499 			    struct ol_txrx_desc_type *sta_desc);
2500 
2501 /**
2502  * dp_clear_peer() - remove peer from physical device
2503  * @soc_hdl: data path soc handle
2504  * @pdev_id: device instance id
2505  * @peer_addr: peer mac address
2506  *
2507  * remove peer from physical device
2508  *
2509  * Return: QDF_STATUS_SUCCESS registration success
2510  *         QDF_STATUS_E_FAULT peer not found
2511  */
2512 QDF_STATUS dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
2513 			 struct qdf_mac_addr peer_addr);
2514 
2515 /**
2516  * dp_find_peer_exist_on_vdev - find if peer exists on the given vdev
2517  * @soc_hdl: datapath soc handle
2518  * @vdev_id: vdev instance id
2519  * @peer_addr: peer mac address
2520  *
2521  * Return: true or false
2522  */
2523 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2524 				uint8_t *peer_addr);
2525 
2526 /**
2527  * dp_find_peer_exist_on_other_vdev - find if peer exists
2528  * on other than the given vdev
2529  * @soc_hdl: datapath soc handle
2530  * @vdev_id: vdev instance id
2531  * @peer_addr: peer mac address
2532  * @max_bssid: max number of bssids
2533  *
2534  * Return: true or false
2535  */
2536 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
2537 				      uint8_t vdev_id, uint8_t *peer_addr,
2538 				      uint16_t max_bssid);
2539 
2540 /**
2541  * dp_peer_state_update() - update peer local state
2542  * @soc: datapath soc handle
2543  * @peer_mac: peer mac address
2544  * @state: new peer local state
2545  *
2546  * update peer local state
2547  *
2548  * Return: QDF_STATUS_SUCCESS registration success
2549  */
2550 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc, uint8_t *peer_mac,
2551 				enum ol_txrx_peer_state state);
2552 
2553 /**
2554  * dp_get_vdevid() - Get virtual interface id which peer registered
2555  * @soc_hdl: datapath soc handle
2556  * @peer_mac: peer mac address
2557  * @vdev_id: virtual interface id which peer registered
2558  *
2559  * Get virtual interface id which peer registered
2560  *
2561  * Return: QDF_STATUS_SUCCESS registration success
2562  */
2563 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
2564 			 uint8_t *vdev_id);
2565 
2566 struct cdp_vdev *dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
2567 		struct qdf_mac_addr peer_addr);
2568 
2569 /**
2570  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
2571  * @peer: peer instance
2572  *
2573  * Get virtual interface instance which peer belongs
2574  *
2575  * Return: virtual interface instance pointer
2576  *         NULL in case cannot find
2577  */
2578 struct cdp_vdev *dp_get_vdev_for_peer(void *peer);
2579 
2580 /**
2581  * dp_peer_get_peer_mac_addr() - Get peer mac address
2582  * @peer: peer instance
2583  *
2584  * Get peer mac address
2585  *
2586  * Return: peer mac address pointer
2587  *         NULL in case cannot find
2588  */
2589 uint8_t *dp_peer_get_peer_mac_addr(void *peer);
2590 
2591 /**
2592  * dp_get_peer_state() - Get local peer state
2593  * @soc: datapath soc handle
2594  * @vdev_id: vdev id
2595  * @peer_mac: peer mac addr
2596  *
2597  * Get local peer state
2598  *
2599  * Return: peer status
2600  */
2601 int dp_get_peer_state(struct cdp_soc_t *soc, uint8_t vdev_id,
2602 		      uint8_t *peer_mac);
2603 
2604 /**
2605  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
2606  * @pdev: data path device instance
2607  *
2608  * local peer id pool alloc for physical device
2609  *
2610  * Return: none
2611  */
2612 void dp_local_peer_id_pool_init(struct dp_pdev *pdev);
2613 
2614 /**
2615  * dp_local_peer_id_alloc() - allocate local peer id
2616  * @pdev: data path device instance
2617  * @peer: new peer instance
2618  *
2619  * allocate local peer id
2620  *
2621  * Return: none
2622  */
2623 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer);
2624 
2625 /**
2626  * dp_local_peer_id_free() - remove local peer id
2627  * @pdev: data path device instance
2628  * @peer: peer instance should be removed
2629  *
2630  * remove local peer id
2631  *
2632  * Return: none
2633  */
2634 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer);
2635 
2636 /**
2637  * dp_set_peer_as_tdls_peer() - set tdls peer flag to peer
2638  * @soc_hdl: datapath soc handle
2639  * @vdev_id: vdev_id
2640  * @peer_mac: peer mac addr
2641  * @val: tdls peer flag
2642  *
2643  * Return: none
2644  */
2645 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2646 			      uint8_t *peer_mac, bool val);
2647 #else
2648 static inline
2649 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
2650 			 uint8_t *vdev_id)
2651 {
2652 	return QDF_STATUS_E_NOSUPPORT;
2653 }
2654 
2655 static inline void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
2656 {
2657 }
2658 
2659 static inline
2660 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
2661 {
2662 }
2663 
2664 static inline
2665 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
2666 {
2667 }
2668 
2669 static inline
2670 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2671 			      uint8_t *peer_mac, bool val)
2672 {
2673 }
2674 #endif
2675 
2676 /**
2677  * dp_find_peer_exist - find peer if already exists
2678  * @soc_hdl: datapath soc handle
2679  * @pdev_id: physical device instance id
2680  * @peer_addr: peer mac address
2681  *
2682  * Return: true or false
2683  */
2684 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
2685 			uint8_t *peer_addr);
2686 
2687 #ifdef DP_UMAC_HW_RESET_SUPPORT
2688 /**
2689  * dp_pause_reo_send_cmd() - Pause Reo send commands.
2690  * @soc: dp soc
2691  *
2692  * Return: none
2693  */
2694 void dp_pause_reo_send_cmd(struct dp_soc *soc);
2695 
2696 /**
2697  * dp_resume_reo_send_cmd() - Resume Reo send commands.
2698  * @soc: dp soc
2699  *
2700  * Return: none
2701  */
2702 void dp_resume_reo_send_cmd(struct dp_soc *soc);
2703 
2704 /**
2705  * dp_cleanup_reo_cmd_module - Clean up the reo cmd module
2706  * @soc: DP SoC handle
2707  *
2708  * Return: none
2709  */
2710 void dp_cleanup_reo_cmd_module(struct dp_soc *soc);
2711 
2712 /**
2713  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
2714  * @soc: DP SOC handle
2715  *
2716  * Return: none
2717  */
2718 void dp_reo_desc_freelist_destroy(struct dp_soc *soc);
2719 
2720 /**
2721  * dp_reset_rx_reo_tid_queue() - Reset the reo tid queues
2722  * @soc: dp soc
2723  * @hw_qdesc_vaddr: starting address of the tid queues
2724  * @size: size of the memory pointed to by hw_qdesc_vaddr
2725  *
2726  * Return: none
2727  */
2728 void dp_reset_rx_reo_tid_queue(struct dp_soc *soc, void *hw_qdesc_vaddr,
2729 			       uint32_t size);
2730 
2731 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
2732 /**
2733  * dp_umac_reset_complete_umac_recovery() - Complete Umac reset session
2734  * @soc: dp soc handle
2735  *
2736  * Return: void
2737  */
2738 void dp_umac_reset_complete_umac_recovery(struct dp_soc *soc);
2739 
2740 /**
2741  * dp_umac_reset_initiate_umac_recovery() - Initiate Umac reset session
2742  * @soc: dp soc handle
2743  * @umac_reset_ctx: Umac reset context
2744  * @rx_event: Rx event received
2745  * @is_target_recovery: Flag to indicate if it is triggered for target recovery
2746  *
2747  * Return: status
2748  */
2749 QDF_STATUS dp_umac_reset_initiate_umac_recovery(struct dp_soc *soc,
2750 				struct dp_soc_umac_reset_ctx *umac_reset_ctx,
2751 				enum umac_reset_rx_event rx_event,
2752 				bool is_target_recovery);
2753 
2754 /**
2755  * dp_umac_reset_handle_action_cb() - Function to call action callback
2756  * @soc: dp soc handle
2757  * @umac_reset_ctx: Umac reset context
2758  * @action: Action to call the callback for
2759  *
2760  * Return: QDF_STATUS status
2761  */
2762 QDF_STATUS dp_umac_reset_handle_action_cb(struct dp_soc *soc,
2763 				struct dp_soc_umac_reset_ctx *umac_reset_ctx,
2764 				enum umac_reset_action action);
2765 
2766 /**
2767  * dp_umac_reset_post_tx_cmd() - Iterate partner socs and post Tx command
2768  * @umac_reset_ctx: UMAC reset context
2769  * @tx_cmd: Tx command to be posted
2770  *
2771  * Return: QDF status of operation
2772  */
2773 QDF_STATUS
2774 dp_umac_reset_post_tx_cmd(struct dp_soc_umac_reset_ctx *umac_reset_ctx,
2775 			  enum umac_reset_tx_cmd tx_cmd);
2776 
2777 /**
2778  * dp_umac_reset_initiator_check() - Check if soc is the Umac reset initiator
2779  * @soc: dp soc handle
2780  *
2781  * Return: true if the soc is initiator or false otherwise
2782  */
2783 bool dp_umac_reset_initiator_check(struct dp_soc *soc);
2784 
2785 /**
2786  * dp_umac_reset_target_recovery_check() - Check if this is for target recovery
2787  * @soc: dp soc handle
2788  *
2789  * Return: true if the session is for target recovery or false otherwise
2790  */
2791 bool dp_umac_reset_target_recovery_check(struct dp_soc *soc);
2792 
2793 /**
2794  * dp_umac_reset_is_soc_ignored() - Check if this soc is to be ignored
2795  * @soc: dp soc handle
2796  *
2797  * Return: true if the soc is ignored or false otherwise
2798  */
2799 bool dp_umac_reset_is_soc_ignored(struct dp_soc *soc);
2800 
2801 /**
2802  * dp_mlo_umac_reset_stats_print() - API to print MLO umac reset stats
2803  * @soc: dp soc handle
2804  *
2805  * Return: QDF_STATUS
2806  */
2807 QDF_STATUS dp_mlo_umac_reset_stats_print(struct dp_soc *soc);
2808 #else
2809 static inline
2810 QDF_STATUS dp_mlo_umac_reset_stats_print(struct dp_soc *soc)
2811 {
2812 	return QDF_STATUS_SUCCESS;
2813 }
2814 #endif
2815 
2816 #endif
2817 
2818 #if defined(DP_UMAC_HW_RESET_SUPPORT) && defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
2819 /**
2820  * dp_umac_reset_notify_asserted_soc() - API to notify the asserted SOC
2821  * @soc: dp soc
2822  *
2823  * Return: QDF_STATUS
2824  */
2825 QDF_STATUS dp_umac_reset_notify_asserted_soc(struct dp_soc *soc);
2826 
2827 /**
2828  * dp_get_umac_reset_in_progress_state() - API to check umac reset in progress
2829  * state
2830  * @psoc: dp soc handle
2831  *
2832  * Return: umac reset state
2833  */
2834 enum cdp_umac_reset_state
2835 dp_get_umac_reset_in_progress_state(struct cdp_soc_t *psoc);
2836 #else
2837 static inline
2838 QDF_STATUS dp_umac_reset_notify_asserted_soc(struct dp_soc *soc)
2839 {
2840 	return QDF_STATUS_SUCCESS;
2841 }
2842 
2843 static inline enum cdp_umac_reset_state
2844 dp_get_umac_reset_in_progress_state(struct cdp_soc_t *psoc)
2845 {
2846 	return CDP_UMAC_RESET_NOT_IN_PROGRESS;
2847 }
2848 #endif
2849 
2850 #ifndef WLAN_SOFTUMAC_SUPPORT
2851 QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, enum hal_reo_cmd_type type,
2852 			   struct hal_reo_cmd_params *params,
2853 			   void (*callback_fn), void *data);
2854 
2855 /**
2856  * dp_reo_cmdlist_destroy() - Free REO commands in the queue
2857  * @soc: DP SoC handle
2858  *
2859  * Return: none
2860  */
2861 void dp_reo_cmdlist_destroy(struct dp_soc *soc);
2862 
2863 /**
2864  * dp_reo_status_ring_handler() - Handler for REO Status ring
2865  * @int_ctx: pointer to DP interrupt context
2866  * @soc: DP Soc handle
2867  *
2868  * Return: Number of descriptors reaped
2869  */
2870 uint32_t dp_reo_status_ring_handler(struct dp_intr *int_ctx,
2871 				    struct dp_soc *soc);
2872 #endif
2873 
2874 /**
2875  * dp_aggregate_vdev_stats() - Consolidate stats at VDEV level
2876  * @vdev: DP VDEV handle
2877  * @vdev_stats: aggregate statistics
2878  *
2879  * return: void
2880  */
2881 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
2882 			     struct cdp_vdev_stats *vdev_stats);
2883 
2884 /**
2885  * dp_txrx_get_vdev_stats() - Update buffer with cdp_vdev_stats
2886  * @soc_hdl: CDP SoC handle
2887  * @vdev_id: vdev Id
2888  * @buf: buffer for vdev stats
2889  * @is_aggregate: are aggregate stats being collected
2890  *
2891  * Return: QDF_STATUS
2892  */
2893 QDF_STATUS
2894 dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2895 		       void *buf, bool is_aggregate);
2896 
2897 /**
2898  * dp_rx_bar_stats_cb() - BAR received stats callback
2899  * @soc: SOC handle
2900  * @cb_ctxt: Call back context
2901  * @reo_status: Reo status
2902  *
2903  * Return: void
2904  */
2905 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
2906 			union hal_reo_status *reo_status);
2907 
2908 uint16_t dp_tx_me_send_convert_ucast(struct cdp_soc_t *soc, uint8_t vdev_id,
2909 				     qdf_nbuf_t nbuf,
2910 				     uint8_t newmac[][QDF_MAC_ADDR_SIZE],
2911 				     uint8_t new_mac_cnt, uint8_t tid,
2912 				     bool is_igmp, bool is_dms_pkt);
2913 void dp_tx_me_alloc_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id);
2914 
2915 void dp_tx_me_free_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id);
2916 
2917 /**
2918  * dp_h2t_ext_stats_msg_send(): function to construct HTT message to pass to FW
2919  * @pdev: DP PDEV handle
2920  * @stats_type_upload_mask: stats type requested by user
2921  * @config_param_0: extra configuration parameters
2922  * @config_param_1: extra configuration parameters
2923  * @config_param_2: extra configuration parameters
2924  * @config_param_3: extra configuration parameters
2925  * @cookie:
2926  * @cookie_msb:
2927  * @mac_id: mac number
2928  *
2929  * Return: QDF STATUS
2930  */
2931 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
2932 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
2933 		uint32_t config_param_1, uint32_t config_param_2,
2934 		uint32_t config_param_3, int cookie, int cookie_msb,
2935 		uint8_t mac_id);
2936 
2937 /**
2938  * dp_htt_stats_print_tag() - function to select the tag type and
2939  * print the corresponding tag structure
2940  * @pdev: pdev pointer
2941  * @tag_type: tag type that is to be printed
2942  * @tag_buf: pointer to the tag structure
2943  *
2944  * Return: void
2945  */
2946 void dp_htt_stats_print_tag(struct dp_pdev *pdev,
2947 			    uint8_t tag_type, uint32_t *tag_buf);
2948 
2949 /**
2950  * dp_htt_stats_copy_tag() - function to select the tag type and
2951  * copy the corresponding tag structure
2952  * @pdev: DP_PDEV handle
2953  * @tag_type: tag type that is to be printed
2954  * @tag_buf: pointer to the tag structure
2955  *
2956  * Return: void
2957  */
2958 void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf);
2959 
2960 /**
2961  * dp_h2t_3tuple_config_send(): function to construct 3 tuple configuration
2962  * HTT message to pass to FW
2963  * @pdev: DP PDEV handle
2964  * @tuple_mask: tuple configuration to report 3 tuple hash value in either
2965  * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV.
2966  *
2967  * tuple_mask[1:0]:
2968  *   00 - Do not report 3 tuple hash value
2969  *   10 - Report 3 tuple hash value in toeplitz_2_or_4
2970  *   01 - Report 3 tuple hash value in flow_id_toeplitz
2971  *   11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz
2972  * @mac_id: MAC ID
2973  *
2974  * Return: QDF STATUS
2975  */
2976 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev, uint32_t tuple_mask,
2977 				     uint8_t mac_id);
2978 
2979 #ifdef IPA_OFFLOAD
2980 /**
2981  * dp_peer_update_tid_stats_from_reo() - update rx pkt and byte count from reo
2982  * @soc: soc handle
2983  * @cb_ctxt: combination of peer_id and tid
2984  * @reo_status: reo status
2985  *
2986  * Return: void
2987  */
2988 void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt,
2989 				       union hal_reo_status *reo_status);
2990 
2991 int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
2992 				dp_rxtid_stats_cmd_cb dp_stats_cmd_cb);
2993 #ifdef IPA_OPT_WIFI_DP
2994 void dp_ipa_wdi_opt_dpath_notify_flt_rlsd(int flt0_rslt,
2995 					  int flt1_rslt);
2996 void dp_ipa_wdi_opt_dpath_notify_flt_add_rem_cb(int flt0_rslt, int flt1_rslt);
2997 void dp_ipa_wdi_opt_dpath_notify_flt_rsvd(bool is_success);
2998 #endif
2999 #ifdef QCA_ENHANCED_STATS_SUPPORT
3000 /**
3001  * dp_peer_aggregate_tid_stats - aggregate rx tid stats
3002  * @peer: Data Path peer
3003  *
3004  * Return: void
3005  */
3006 void dp_peer_aggregate_tid_stats(struct dp_peer *peer);
3007 #endif
3008 #else
3009 static inline void dp_peer_aggregate_tid_stats(struct dp_peer *peer)
3010 {
3011 }
3012 #endif
3013 
3014 /**
3015  * dp_set_key_sec_type_wifi3() - set security mode of key
3016  * @soc: Datapath soc handle
3017  * @vdev_id: id of atapath vdev
3018  * @peer_mac: Datapath peer mac address
3019  * @sec_type: security type
3020  * @is_unicast: key type
3021  *
3022  */
3023 QDF_STATUS
3024 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
3025 			  uint8_t *peer_mac, enum cdp_sec_type sec_type,
3026 			  bool is_unicast);
3027 
3028 /**
3029  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
3030  * @soc: handle to DP soc
3031  * @mac_id: MAC id
3032  *
3033  * Return: Return pdev corresponding to MAC
3034  */
3035 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id);
3036 
3037 QDF_STATUS
3038 dp_set_michael_key(struct cdp_soc_t *soc, uint8_t vdev_id,
3039 		   uint8_t *peer_mac,
3040 		   bool is_unicast, uint32_t *key);
3041 
3042 /**
3043  * dp_check_pdev_exists() - Validate pdev before use
3044  * @soc: dp soc handle
3045  * @data: pdev handle
3046  *
3047  * Return: 0 - success/invalid - failure
3048  */
3049 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data);
3050 
3051 /**
3052  * dp_update_delay_stats() - Update delay statistics in structure
3053  *				and fill min, max and avg delay
3054  * @tstats: tid tx stats
3055  * @rstats: tid rx stats
3056  * @delay: delay in ms
3057  * @tid: tid value
3058  * @mode: type of tx delay mode
3059  * @ring_id: ring number
3060  * @delay_in_us: flag to indicate whether the delay is in ms or us
3061  *
3062  * Return: none
3063  */
3064 void dp_update_delay_stats(struct cdp_tid_tx_stats *tstats,
3065 			   struct cdp_tid_rx_stats *rstats, uint32_t delay,
3066 			   uint8_t tid, uint8_t mode, uint8_t ring_id,
3067 			   bool delay_in_us);
3068 
3069 /**
3070  * dp_print_ring_stats(): Print tail and head pointer
3071  * @pdev: DP_PDEV handle
3072  *
3073  * Return: void
3074  */
3075 void dp_print_ring_stats(struct dp_pdev *pdev);
3076 
3077 /**
3078  * dp_print_ring_stat_from_hal(): Print tail and head pointer through hal
3079  * @soc: soc handle
3080  * @srng: srng handle
3081  * @ring_type: ring type
3082  *
3083  * Return: void
3084  */
3085 void
3086 dp_print_ring_stat_from_hal(struct dp_soc *soc,  struct dp_srng *srng,
3087 			    enum hal_ring_type ring_type);
3088 
3089 /**
3090  * dp_print_pdev_cfg_params() - Print the pdev cfg parameters
3091  * @pdev: DP pdev handle
3092  *
3093  * Return: void
3094  */
3095 void dp_print_pdev_cfg_params(struct dp_pdev *pdev);
3096 
3097 /**
3098  * dp_print_soc_cfg_params()- Dump soc wlan config parameters
3099  * @soc: Soc handle
3100  *
3101  * Return: void
3102  */
3103 void dp_print_soc_cfg_params(struct dp_soc *soc);
3104 
3105 /**
3106  * dp_srng_get_str_from_hal_ring_type() - Return string name for a ring
3107  * @ring_type: Ring
3108  *
3109  * Return: char const pointer
3110  */
3111 const
3112 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type);
3113 
3114 /**
3115  * dp_txrx_path_stats() - Function to display dump stats
3116  * @soc: soc handle
3117  *
3118  * Return: none
3119  */
3120 void dp_txrx_path_stats(struct dp_soc *soc);
3121 
3122 /**
3123  * dp_print_per_ring_stats(): Packet count per ring
3124  * @soc: soc handle
3125  *
3126  * Return: None
3127  */
3128 void dp_print_per_ring_stats(struct dp_soc *soc);
3129 
3130 /**
3131  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
3132  * @pdev: DP PDEV handle
3133  *
3134  * Return: void
3135  */
3136 void dp_aggregate_pdev_stats(struct dp_pdev *pdev);
3137 
3138 /**
3139  * dp_print_rx_rates(): Print Rx rate stats
3140  * @vdev: DP_VDEV handle
3141  *
3142  * Return:void
3143  */
3144 void dp_print_rx_rates(struct dp_vdev *vdev);
3145 
3146 /**
3147  * dp_print_tx_rates(): Print tx rates
3148  * @vdev: DP_VDEV handle
3149  *
3150  * Return:void
3151  */
3152 void dp_print_tx_rates(struct dp_vdev *vdev);
3153 
3154 /**
3155  * dp_print_peer_stats():print peer stats
3156  * @peer: DP_PEER handle
3157  * @peer_stats: buffer holding peer stats
3158  *
3159  * return void
3160  */
3161 void dp_print_peer_stats(struct dp_peer *peer,
3162 			 struct cdp_peer_stats *peer_stats);
3163 
3164 /**
3165  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
3166  * @pdev: DP_PDEV Handle
3167  *
3168  * Return:void
3169  */
3170 void
3171 dp_print_pdev_tx_stats(struct dp_pdev *pdev);
3172 
3173 /**
3174  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
3175  * @pdev: DP_PDEV Handle
3176  *
3177  * Return: void
3178  */
3179 void
3180 dp_print_pdev_rx_stats(struct dp_pdev *pdev);
3181 
3182 /**
3183  * dp_print_soc_tx_stats(): Print SOC level  stats
3184  * @soc: DP_SOC Handle
3185  *
3186  * Return: void
3187  */
3188 void dp_print_soc_tx_stats(struct dp_soc *soc);
3189 
3190 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
3191 /**
3192  * dp_print_global_desc_count(): Print global desc in use
3193  *
3194  * Return: void
3195  */
3196 void dp_print_global_desc_count(void);
3197 #else
3198 /**
3199  * dp_print_global_desc_count(): Print global desc in use
3200  *
3201  * Return: void
3202  */
3203 static inline
3204 void dp_print_global_desc_count(void)
3205 {
3206 }
3207 #endif
3208 
3209 /**
3210  * dp_print_soc_interrupt_stats() - Print interrupt stats for the soc
3211  * @soc: dp_soc handle
3212  *
3213  * Return: None
3214  */
3215 void dp_print_soc_interrupt_stats(struct dp_soc *soc);
3216 
3217 /**
3218  * dp_print_tx_ppeds_stats() - Print Tx in use stats for the soc in DS
3219  * @soc: dp_soc handle
3220  *
3221  * Return: None
3222  */
3223 
3224 void dp_print_tx_ppeds_stats(struct dp_soc *soc);
3225 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING
3226 /**
3227  * dp_dump_srng_high_wm_stats() - Print the ring usage high watermark stats
3228  *				  for all SRNGs
3229  * @soc: DP soc handle
3230  * @srng_mask: SRNGs mask for dumping usage watermark stats
3231  *
3232  * Return: None
3233  */
3234 void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask);
3235 #else
3236 static inline
3237 void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask)
3238 {
3239 }
3240 #endif
3241 
3242 /**
3243  * dp_print_soc_rx_stats() - Print SOC level Rx stats
3244  * @soc: DP_SOC Handle
3245  *
3246  * Return: void
3247  */
3248 void dp_print_soc_rx_stats(struct dp_soc *soc);
3249 
3250 /**
3251  * dp_get_mac_id_for_pdev() - Return mac corresponding to pdev for mac
3252  *
3253  * @mac_id: MAC id
3254  * @pdev_id: pdev_id corresponding to pdev, 0 for MCL
3255  *
3256  * Single pdev using both MACs will operate on both MAC rings,
3257  * which is the case for MCL.
3258  * For WIN each PDEV will operate one ring, so index is zero.
3259  *
3260  */
3261 static inline int dp_get_mac_id_for_pdev(uint32_t mac_id, uint32_t pdev_id)
3262 {
3263 	if (mac_id && pdev_id) {
3264 		qdf_print("Both mac_id and pdev_id cannot be non zero");
3265 		QDF_BUG(0);
3266 		return 0;
3267 	}
3268 	return (mac_id + pdev_id);
3269 }
3270 
3271 /**
3272  * dp_get_lmac_id_for_pdev_id() - Return lmac id corresponding to host pdev id
3273  * @soc: soc pointer
3274  * @mac_id: MAC id
3275  * @pdev_id: pdev_id corresponding to pdev, 0 for MCL
3276  *
3277  * For MCL, Single pdev using both MACs will operate on both MAC rings.
3278  *
3279  * For WIN, each PDEV will operate one ring.
3280  *
3281  */
3282 static inline int
3283 dp_get_lmac_id_for_pdev_id
3284 	(struct dp_soc *soc, uint32_t mac_id, uint32_t pdev_id)
3285 {
3286 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3287 		if (mac_id && pdev_id) {
3288 			qdf_print("Both mac_id and pdev_id cannot be non zero");
3289 			QDF_BUG(0);
3290 			return 0;
3291 		}
3292 		return (mac_id + pdev_id);
3293 	}
3294 
3295 	return soc->pdev_list[pdev_id]->lmac_id;
3296 }
3297 
3298 /**
3299  * dp_get_pdev_for_lmac_id() - Return pdev pointer corresponding to lmac id
3300  * @soc: soc pointer
3301  * @lmac_id: LMAC id
3302  *
3303  * For MCL, Single pdev exists
3304  *
3305  * For WIN, each PDEV will operate one ring.
3306  *
3307  */
3308 static inline struct dp_pdev *
3309 	dp_get_pdev_for_lmac_id(struct dp_soc *soc, uint32_t lmac_id)
3310 {
3311 	uint8_t i = 0;
3312 
3313 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3314 		i = wlan_cfg_get_pdev_idx(soc->wlan_cfg_ctx, lmac_id);
3315 		return ((i < MAX_PDEV_CNT) ? soc->pdev_list[i] : NULL);
3316 	}
3317 
3318 	/* Typically for MCL as there only 1 PDEV*/
3319 	return soc->pdev_list[0];
3320 }
3321 
3322 /**
3323  * dp_calculate_target_pdev_id_from_host_pdev_id() - Return target pdev
3324  *                                          corresponding to host pdev id
3325  * @soc: soc pointer
3326  * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL
3327  *
3328  * Return: target pdev_id for host pdev id. For WIN, this is derived through
3329  * a two step process:
3330  * 1. Get lmac_id corresponding to host pdev_id (lmac_id can change
3331  *    during mode switch)
3332  * 2. Get target pdev_id (set up during WMI ready) from lmac_id
3333  *
3334  * For MCL, return the offset-1 translated mac_id
3335  */
3336 static inline int
3337 dp_calculate_target_pdev_id_from_host_pdev_id
3338 	(struct dp_soc *soc, uint32_t mac_for_pdev)
3339 {
3340 	struct dp_pdev *pdev;
3341 
3342 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
3343 		return DP_SW2HW_MACID(mac_for_pdev);
3344 
3345 	pdev = soc->pdev_list[mac_for_pdev];
3346 
3347 	/*non-MCL case, get original target_pdev mapping*/
3348 	return wlan_cfg_get_target_pdev_id(soc->wlan_cfg_ctx, pdev->lmac_id);
3349 }
3350 
3351 /**
3352  * dp_get_target_pdev_id_for_host_pdev_id() - Return target pdev corresponding
3353  *                                         to host pdev id
3354  * @soc: soc pointer
3355  * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL
3356  *
3357  * Return: target pdev_id for host pdev id.
3358  * For WIN, return the value stored in pdev object.
3359  * For MCL, return the offset-1 translated mac_id.
3360  */
3361 static inline int
3362 dp_get_target_pdev_id_for_host_pdev_id
3363 	(struct dp_soc *soc, uint32_t mac_for_pdev)
3364 {
3365 	struct dp_pdev *pdev;
3366 
3367 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
3368 		return DP_SW2HW_MACID(mac_for_pdev);
3369 
3370 	pdev = soc->pdev_list[mac_for_pdev];
3371 
3372 	return pdev->target_pdev_id;
3373 }
3374 
3375 /**
3376  * dp_get_host_pdev_id_for_target_pdev_id() - Return host pdev corresponding
3377  *                                         to target pdev id
3378  * @soc: soc pointer
3379  * @pdev_id: pdev_id corresponding to target pdev
3380  *
3381  * Return: host pdev_id for target pdev id. For WIN, this is derived through
3382  * a two step process:
3383  * 1. Get lmac_id corresponding to target pdev_id
3384  * 2. Get host pdev_id (set up during WMI ready) from lmac_id
3385  *
3386  * For MCL, return the 0-offset pdev_id
3387  */
3388 static inline int
3389 dp_get_host_pdev_id_for_target_pdev_id
3390 	(struct dp_soc *soc, uint32_t pdev_id)
3391 {
3392 	struct dp_pdev *pdev;
3393 	int lmac_id;
3394 
3395 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
3396 		return DP_HW2SW_MACID(pdev_id);
3397 
3398 	/*non-MCL case, get original target_lmac mapping from target pdev*/
3399 	lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx,
3400 					  DP_HW2SW_MACID(pdev_id));
3401 
3402 	/*Get host pdev from lmac*/
3403 	pdev = dp_get_pdev_for_lmac_id(soc, lmac_id);
3404 
3405 	return pdev ? pdev->pdev_id : INVALID_PDEV_ID;
3406 }
3407 
3408 /**
3409  * dp_get_mac_id_for_mac() -  Return mac corresponding WIN and MCL mac_ids
3410  *
3411  * @soc: handle to DP soc
3412  * @mac_id: MAC id
3413  *
3414  * Single pdev using both MACs will operate on both MAC rings,
3415  * which is the case for MCL.
3416  * For WIN each PDEV will operate one ring, so index is zero.
3417  *
3418  */
3419 static inline int dp_get_mac_id_for_mac(struct dp_soc *soc, uint32_t mac_id)
3420 {
3421 	/*
3422 	 * Single pdev using both MACs will operate on both MAC rings,
3423 	 * which is the case for MCL.
3424 	 */
3425 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
3426 		return mac_id;
3427 
3428 	/* For WIN each PDEV will operate one ring, so index is zero. */
3429 	return 0;
3430 }
3431 
3432 /**
3433  * dp_is_subtype_data() - check if the frame subtype is data
3434  *
3435  * @frame_ctrl: Frame control field
3436  *
3437  * check the frame control field and verify if the packet
3438  * is a data packet.
3439  *
3440  * Return: true or false
3441  */
3442 static inline bool dp_is_subtype_data(uint16_t frame_ctrl)
3443 {
3444 	if (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_TYPE_MASK) ==
3445 	    QDF_IEEE80211_FC0_TYPE_DATA) &&
3446 	    (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) ==
3447 	    QDF_IEEE80211_FC0_SUBTYPE_DATA) ||
3448 	    ((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) ==
3449 	    QDF_IEEE80211_FC0_SUBTYPE_QOS))) {
3450 		return true;
3451 	}
3452 
3453 	return false;
3454 }
3455 
3456 #ifdef WDI_EVENT_ENABLE
3457 /**
3458  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
3459  * @pdev: DP PDEV handle
3460  * @stats_type_upload_mask: stats type requested by user
3461  * @mac_id: Mac id number
3462  *
3463  * return: QDF STATUS
3464  */
3465 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
3466 				uint32_t stats_type_upload_mask,
3467 				uint8_t mac_id);
3468 
3469 /**
3470  * dp_wdi_event_unsub() - WDI event unsubscribe
3471  * @soc: soc handle
3472  * @pdev_id: id of pdev
3473  * @event_cb_sub_handle: subscribed event handle
3474  * @event: Event to be unsubscribe
3475  *
3476  * Return: 0 for success. nonzero for failure.
3477  */
3478 int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id,
3479 		       wdi_event_subscribe *event_cb_sub_handle,
3480 		       uint32_t event);
3481 
3482 /**
3483  * dp_wdi_event_sub() - Subscribe WDI event
3484  * @soc: soc handle
3485  * @pdev_id: id of pdev
3486  * @event_cb_sub_handle: subscribe event handle
3487  * @event: Event to be subscribe
3488  *
3489  * Return: 0 for success. nonzero for failure.
3490  */
3491 int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id,
3492 		     wdi_event_subscribe *event_cb_sub_handle,
3493 		     uint32_t event);
3494 
3495 /**
3496  * dp_wdi_event_handler() - Event handler for WDI event
3497  * @event: wdi event number
3498  * @soc: soc handle
3499  * @data: pointer to data
3500  * @peer_id: peer id number
3501  * @status: HTT rx status
3502  * @pdev_id: id of pdev
3503  *
3504  * It will be called to register WDI event
3505  *
3506  * Return: None
3507  */
3508 void dp_wdi_event_handler(enum WDI_EVENT event, struct dp_soc *soc,
3509 			  void *data, u_int16_t peer_id,
3510 			  int status, u_int8_t pdev_id);
3511 
3512 /**
3513  * dp_wdi_event_attach() - Attach wdi event
3514  * @txrx_pdev: DP pdev handle
3515  *
3516  * Return: 0 for success. nonzero for failure.
3517  */
3518 int dp_wdi_event_attach(struct dp_pdev *txrx_pdev);
3519 
3520 /**
3521  * dp_wdi_event_detach() - Detach WDI event
3522  * @txrx_pdev: DP pdev handle
3523  *
3524  * Return: 0 for success. nonzero for failure.
3525  */
3526 int dp_wdi_event_detach(struct dp_pdev *txrx_pdev);
3527 
3528 static inline void
3529 dp_hif_update_pipe_callback(struct dp_soc *dp_soc,
3530 			    void *cb_context,
3531 			    QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t),
3532 			    uint8_t pipe_id)
3533 {
3534 	struct hif_msg_callbacks hif_pipe_callbacks = { 0 };
3535 
3536 	/* TODO: Temporary change to bypass HTC connection for this new
3537 	 * HIF pipe, which will be used for packet log and other high-
3538 	 * priority HTT messages. Proper HTC connection to be added
3539 	 * later once required FW changes are available
3540 	 */
3541 	hif_pipe_callbacks.rxCompletionHandler = callback;
3542 	hif_pipe_callbacks.Context = cb_context;
3543 	hif_update_pipe_callback(dp_soc->hif_handle,
3544 		DP_HTT_T2H_HP_PIPE, &hif_pipe_callbacks);
3545 }
3546 #else
3547 static inline int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id,
3548 				     wdi_event_subscribe *event_cb_sub_handle,
3549 				     uint32_t event)
3550 {
3551 	return 0;
3552 }
3553 
3554 static inline int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id,
3555 				   wdi_event_subscribe *event_cb_sub_handle,
3556 				   uint32_t event)
3557 {
3558 	return 0;
3559 }
3560 
3561 static inline
3562 void dp_wdi_event_handler(enum WDI_EVENT event,
3563 			  struct dp_soc *soc,
3564 			  void *data, u_int16_t peer_id,
3565 			  int status, u_int8_t pdev_id)
3566 {
3567 }
3568 
3569 static inline int dp_wdi_event_attach(struct dp_pdev *txrx_pdev)
3570 {
3571 	return 0;
3572 }
3573 
3574 static inline int dp_wdi_event_detach(struct dp_pdev *txrx_pdev)
3575 {
3576 	return 0;
3577 }
3578 
3579 static inline QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
3580 		uint32_t stats_type_upload_mask, uint8_t mac_id)
3581 {
3582 	return 0;
3583 }
3584 
3585 static inline void
3586 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, void *cb_context,
3587 			    QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t),
3588 			    uint8_t pipe_id)
3589 {
3590 }
3591 #endif
3592 
3593 #ifdef VDEV_PEER_PROTOCOL_COUNT
3594 /**
3595  * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters
3596  * @vdev: VDEV DP object
3597  * @nbuf: data packet
3598  * @txrx_peer: DP TXRX Peer object
3599  * @is_egress: whether egress or ingress
3600  * @is_rx: whether rx or tx
3601  *
3602  * This function updates the per-peer protocol counters
3603  * Return: void
3604  */
3605 void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev,
3606 					    qdf_nbuf_t nbuf,
3607 					    struct dp_txrx_peer *txrx_peer,
3608 					    bool is_egress,
3609 					    bool is_rx);
3610 
3611 /**
3612  * dp_peer_stats_update_protocol_cnt() - update per-peer protocol counters
3613  * @soc: SOC DP object
3614  * @vdev_id: vdev_id
3615  * @nbuf: data packet
3616  * @is_egress: whether egress or ingress
3617  * @is_rx: whether rx or tx
3618  *
3619  * This function updates the per-peer protocol counters
3620  *
3621  * Return: void
3622  */
3623 void dp_peer_stats_update_protocol_cnt(struct cdp_soc_t *soc,
3624 				       int8_t vdev_id,
3625 				       qdf_nbuf_t nbuf,
3626 				       bool is_egress,
3627 				       bool is_rx);
3628 
3629 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
3630 					       qdf_nbuf_t nbuf);
3631 
3632 #else
3633 #define dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, txrx_peer, \
3634 					       is_egress, is_rx)
3635 
3636 static inline
3637 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
3638 					       qdf_nbuf_t nbuf)
3639 {
3640 }
3641 
3642 #endif
3643 
3644 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3645 /**
3646  * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
3647  * @soc_hdl: Handle to struct cdp_soc
3648  *
3649  * Return: none
3650  */
3651 void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl);
3652 
3653 /**
3654  * dp_tx_dump_flow_pool_info_compact() - dump flow pool info
3655  * @soc: DP soc context
3656  *
3657  * Return: none
3658  */
3659 void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc);
3660 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
3661 	bool force);
3662 #else
3663 static inline void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc)
3664 {
3665 }
3666 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
3667 
3668 #ifdef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS
3669 static inline int
3670 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl)
3671 {
3672 	return hal_srng_access_start_unlocked(soc, hal_ring_hdl);
3673 }
3674 
3675 static inline void
3676 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl)
3677 {
3678 	hal_srng_access_end_unlocked(soc, hal_ring_hdl);
3679 }
3680 
3681 #else
3682 static inline int
3683 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl)
3684 {
3685 	return hal_srng_access_start(soc, hal_ring_hdl);
3686 }
3687 
3688 static inline void
3689 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl)
3690 {
3691 	hal_srng_access_end(soc, hal_ring_hdl);
3692 }
3693 #endif
3694 
3695 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
3696 /**
3697  * dp_srng_access_start() - Wrapper function to log access start of a hal ring
3698  * @int_ctx: pointer to DP interrupt context. This should not be NULL
3699  * @dp_soc: DP Soc handle
3700  * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be
3701  *                serviced
3702  *
3703  * Return: 0 on success; error on failure
3704  */
3705 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
3706 			 hal_ring_handle_t hal_ring_hdl);
3707 
3708 /**
3709  * dp_srng_access_end() - Wrapper function to log access end of a hal ring
3710  * @int_ctx: pointer to DP interrupt context. This should not be NULL
3711  * @dp_soc: DP Soc handle
3712  * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be
3713  *                serviced
3714  *
3715  * Return: void
3716  */
3717 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
3718 			hal_ring_handle_t hal_ring_hdl);
3719 
3720 #else
3721 static inline int dp_srng_access_start(struct dp_intr *int_ctx,
3722 				       struct dp_soc *dp_soc,
3723 				       hal_ring_handle_t hal_ring_hdl)
3724 {
3725 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
3726 
3727 	return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
3728 }
3729 
3730 static inline void dp_srng_access_end(struct dp_intr *int_ctx,
3731 				      struct dp_soc *dp_soc,
3732 				      hal_ring_handle_t hal_ring_hdl)
3733 {
3734 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
3735 
3736 	return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
3737 }
3738 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
3739 
3740 #ifdef QCA_CACHED_RING_DESC
3741 /**
3742  * dp_srng_dst_get_next() - Wrapper function to get next ring desc
3743  * @dp_soc: DP Soc handle
3744  * @hal_ring_hdl: opaque pointer to the HAL Destination Ring
3745  *
3746  * Return: HAL ring descriptor
3747  */
3748 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc,
3749 					 hal_ring_handle_t hal_ring_hdl)
3750 {
3751 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
3752 
3753 	return hal_srng_dst_get_next_cached(hal_soc, hal_ring_hdl);
3754 }
3755 
3756 /**
3757  * dp_srng_dst_inv_cached_descs() - Wrapper function to invalidate cached
3758  * descriptors
3759  * @dp_soc: DP Soc handle
3760  * @hal_ring_hdl: opaque pointer to the HAL Rx Destination ring
3761  * @num_entries: Entry count
3762  *
3763  * Return: None
3764  */
3765 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc,
3766 						hal_ring_handle_t hal_ring_hdl,
3767 						uint32_t num_entries)
3768 {
3769 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
3770 
3771 	hal_srng_dst_inv_cached_descs(hal_soc, hal_ring_hdl, num_entries);
3772 }
3773 #else
3774 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc,
3775 					 hal_ring_handle_t hal_ring_hdl)
3776 {
3777 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
3778 
3779 	return hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
3780 }
3781 
3782 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc,
3783 						hal_ring_handle_t hal_ring_hdl,
3784 						uint32_t num_entries)
3785 {
3786 }
3787 #endif /* QCA_CACHED_RING_DESC */
3788 
3789 #if defined(QCA_CACHED_RING_DESC) && \
3790 	(defined(QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH) || \
3791 	 defined(QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH))
3792 /**
3793  * dp_srng_dst_prefetch() - Wrapper function to prefetch descs from dest ring
3794  * @hal_soc: HAL SOC handle
3795  * @hal_ring_hdl: opaque pointer to the HAL Rx Destination ring
3796  * @num_entries: Entry count
3797  *
3798  * Return: None
3799  */
3800 static inline void *dp_srng_dst_prefetch(hal_soc_handle_t hal_soc,
3801 					 hal_ring_handle_t hal_ring_hdl,
3802 					 uint32_t num_entries)
3803 {
3804 	return hal_srng_dst_prefetch(hal_soc, hal_ring_hdl, num_entries);
3805 }
3806 
3807 /**
3808  * dp_srng_dst_prefetch_32_byte_desc() - Wrapper function to prefetch
3809  *					 32 byte descriptor starting at
3810  *					 64 byte offset
3811  * @hal_soc: HAL SOC handle
3812  * @hal_ring_hdl: opaque pointer to the HAL Rx Destination ring
3813  * @num_entries: Entry count
3814  *
3815  * Return: None
3816  */
3817 static inline
3818 void *dp_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc,
3819 					hal_ring_handle_t hal_ring_hdl,
3820 					uint32_t num_entries)
3821 {
3822 	return hal_srng_dst_prefetch_32_byte_desc(hal_soc, hal_ring_hdl,
3823 						  num_entries);
3824 }
3825 #else
3826 static inline void *dp_srng_dst_prefetch(hal_soc_handle_t hal_soc,
3827 					 hal_ring_handle_t hal_ring_hdl,
3828 					 uint32_t num_entries)
3829 {
3830 	return NULL;
3831 }
3832 
3833 static inline
3834 void *dp_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc,
3835 					hal_ring_handle_t hal_ring_hdl,
3836 					uint32_t num_entries)
3837 {
3838 	return NULL;
3839 }
3840 #endif
3841 
3842 #ifdef QCA_ENH_V3_STATS_SUPPORT
3843 /**
3844  * dp_pdev_print_delay_stats(): Print pdev level delay stats
3845  * @pdev: DP_PDEV handle
3846  *
3847  * Return:void
3848  */
3849 void dp_pdev_print_delay_stats(struct dp_pdev *pdev);
3850 
3851 /**
3852  * dp_pdev_print_tid_stats(): Print pdev level tid stats
3853  * @pdev: DP_PDEV handle
3854  *
3855  * Return:void
3856  */
3857 void dp_pdev_print_tid_stats(struct dp_pdev *pdev);
3858 
3859 /**
3860  * dp_pdev_print_rx_error_stats(): Print pdev level rx error stats
3861  * @pdev: DP_PDEV handle
3862  *
3863  * Return:void
3864  */
3865 void dp_pdev_print_rx_error_stats(struct dp_pdev *pdev);
3866 #endif /* QCA_ENH_V3_STATS_SUPPORT */
3867 
3868 /**
3869  * dp_pdev_get_tid_stats(): Get accumulated pdev level tid_stats
3870  * @soc_hdl: soc handle
3871  * @pdev_id: id of dp_pdev handle
3872  * @tid_stats: Pointer for cdp_tid_stats_intf
3873  *
3874  * Return: QDF_STATUS_SUCCESS or QDF_STATUS_E_INVAL
3875  */
3876 QDF_STATUS dp_pdev_get_tid_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3877 				 struct cdp_tid_stats_intf *tid_stats);
3878 
3879 /**
3880  * dp_soc_set_txrx_ring_map()
3881  * @soc: DP handler for soc
3882  *
3883  * Return: Void
3884  */
3885 void dp_soc_set_txrx_ring_map(struct dp_soc *soc);
3886 
3887 /**
3888  * dp_vdev_to_cdp_vdev() - typecast dp vdev to cdp vdev
3889  * @vdev: DP vdev handle
3890  *
3891  * Return: struct cdp_vdev pointer
3892  */
3893 static inline
3894 struct cdp_vdev *dp_vdev_to_cdp_vdev(struct dp_vdev *vdev)
3895 {
3896 	return (struct cdp_vdev *)vdev;
3897 }
3898 
3899 /**
3900  * dp_pdev_to_cdp_pdev() - typecast dp pdev to cdp pdev
3901  * @pdev: DP pdev handle
3902  *
3903  * Return: struct cdp_pdev pointer
3904  */
3905 static inline
3906 struct cdp_pdev *dp_pdev_to_cdp_pdev(struct dp_pdev *pdev)
3907 {
3908 	return (struct cdp_pdev *)pdev;
3909 }
3910 
3911 /**
3912  * dp_soc_to_cdp_soc() - typecast dp psoc to cdp psoc
3913  * @psoc: DP psoc handle
3914  *
3915  * Return: struct cdp_soc pointer
3916  */
3917 static inline
3918 struct cdp_soc *dp_soc_to_cdp_soc(struct dp_soc *psoc)
3919 {
3920 	return (struct cdp_soc *)psoc;
3921 }
3922 
3923 /**
3924  * dp_soc_to_cdp_soc_t() - typecast dp psoc to ol txrx soc handle
3925  * @psoc: DP psoc handle
3926  *
3927  * Return: struct cdp_soc_t pointer
3928  */
3929 static inline
3930 struct cdp_soc_t *dp_soc_to_cdp_soc_t(struct dp_soc *psoc)
3931 {
3932 	return (struct cdp_soc_t *)psoc;
3933 }
3934 
3935 #if defined(WLAN_SUPPORT_RX_FLOW_TAG)
3936 /**
3937  * dp_rx_flow_get_fse_stats() - Retrieve a flow's statistics
3938  * @pdev: pdev handle
3939  * @rx_flow_info: flow information in the Rx FST
3940  * @stats: stats to update
3941  *
3942  * Return: Success when flow statistcs is updated, error on failure
3943  */
3944 QDF_STATUS dp_rx_flow_get_fse_stats(struct dp_pdev *pdev,
3945 				    struct cdp_rx_flow_info *rx_flow_info,
3946 				    struct cdp_flow_stats *stats);
3947 
3948 /**
3949  * dp_rx_flow_delete_entry() - Delete a flow entry from flow search table
3950  * @pdev: pdev handle
3951  * @rx_flow_info: DP flow parameters
3952  *
3953  * Return: Success when flow is deleted, error on failure
3954  */
3955 QDF_STATUS dp_rx_flow_delete_entry(struct dp_pdev *pdev,
3956 				   struct cdp_rx_flow_info *rx_flow_info);
3957 
3958 /**
3959  * dp_rx_flow_add_entry() - Add a flow entry to flow search table
3960  * @pdev: DP pdev instance
3961  * @rx_flow_info: DP flow parameters
3962  *
3963  * Return: Success when flow is added, no-memory or already exists on error
3964  */
3965 QDF_STATUS dp_rx_flow_add_entry(struct dp_pdev *pdev,
3966 				struct cdp_rx_flow_info *rx_flow_info);
3967 
3968 /**
3969  * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters
3970  * @soc: SoC handle
3971  * @pdev: Pdev handle
3972  *
3973  * Return: Handle to flow search table entry
3974  */
3975 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev);
3976 
3977 /**
3978  * dp_rx_fst_detach() - De-initialize Rx FST
3979  * @soc: SoC handle
3980  * @pdev: Pdev handle
3981  *
3982  * Return: None
3983  */
3984 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev);
3985 
3986 /**
3987  * dp_mon_rx_update_rx_flow_tag_stats() - Update a mon flow's statistics
3988  * @pdev: pdev handle
3989  * @flow_id: flow index (truncated hash) in the Rx FST
3990  *
3991  * Return: Success when flow statistcs is updated, error on failure
3992  */
3993 QDF_STATUS
3994 dp_mon_rx_update_rx_flow_tag_stats(struct dp_pdev *pdev, uint32_t flow_id);
3995 #endif
3996 
3997 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
3998 /**
3999  * dp_rx_flow_send_fst_fw_setup() - Program FST parameters in FW/HW post-attach
4000  * @soc: SoC handle
4001  * @pdev: Pdev handle
4002  *
4003  * Return: Success when fst parameters are programmed in FW, error otherwise
4004  */
4005 QDF_STATUS dp_rx_flow_send_fst_fw_setup(struct dp_soc *soc,
4006 					struct dp_pdev *pdev);
4007 #endif
4008 
4009 /**
4010  * dp_rx_fst_attach_wrapper() - wrapper API for dp_rx_fst_attach
4011  * @soc: SoC handle
4012  * @pdev: Pdev handle
4013  *
4014  * Return: Handle to flow search table entry
4015  */
4016 extern QDF_STATUS
4017 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev);
4018 
4019 /**
4020  * dp_rx_fst_detach_wrapper() - wrapper API for dp_rx_fst_detach
4021  * @soc: SoC handle
4022  * @pdev: Pdev handle
4023  *
4024  * Return: None
4025  */
4026 extern void
4027 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev);
4028 
4029 /**
4030  * dp_vdev_get_ref() - API to take a reference for VDEV object
4031  *
4032  * @soc		: core DP soc context
4033  * @vdev	: DP vdev
4034  * @mod_id	: module id
4035  *
4036  * Return:	QDF_STATUS_SUCCESS if reference held successfully
4037  *		else QDF_STATUS_E_INVAL
4038  */
4039 static inline
4040 QDF_STATUS dp_vdev_get_ref(struct dp_soc *soc, struct dp_vdev *vdev,
4041 			   enum dp_mod_id mod_id)
4042 {
4043 	if (!qdf_atomic_inc_not_zero(&vdev->ref_cnt))
4044 		return QDF_STATUS_E_INVAL;
4045 
4046 	qdf_atomic_inc(&vdev->mod_refs[mod_id]);
4047 
4048 	return QDF_STATUS_SUCCESS;
4049 }
4050 
4051 /**
4052  * dp_vdev_get_ref_by_id() - Returns vdev object given the vdev id
4053  * @soc: core DP soc context
4054  * @vdev_id: vdev id from vdev object can be retrieved
4055  * @mod_id: module id which is requesting the reference
4056  *
4057  * Return: struct dp_vdev*: Pointer to DP vdev object
4058  */
4059 static inline struct dp_vdev *
4060 dp_vdev_get_ref_by_id(struct dp_soc *soc, uint8_t vdev_id,
4061 		      enum dp_mod_id mod_id)
4062 {
4063 	struct dp_vdev *vdev = NULL;
4064 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
4065 		return NULL;
4066 
4067 	qdf_spin_lock_bh(&soc->vdev_map_lock);
4068 	vdev = soc->vdev_id_map[vdev_id];
4069 
4070 	if (!vdev || dp_vdev_get_ref(soc, vdev, mod_id) != QDF_STATUS_SUCCESS) {
4071 		qdf_spin_unlock_bh(&soc->vdev_map_lock);
4072 		return NULL;
4073 	}
4074 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
4075 
4076 	return vdev;
4077 }
4078 
4079 /**
4080  * dp_get_pdev_from_soc_pdev_id_wifi3() - Returns pdev object given the pdev id
4081  * @soc: core DP soc context
4082  * @pdev_id: pdev id from pdev object can be retrieved
4083  *
4084  * Return: struct dp_pdev*: Pointer to DP pdev object
4085  */
4086 static inline struct dp_pdev *
4087 dp_get_pdev_from_soc_pdev_id_wifi3(struct dp_soc *soc,
4088 				   uint8_t pdev_id)
4089 {
4090 	if (qdf_unlikely(pdev_id >= MAX_PDEV_CNT))
4091 		return NULL;
4092 
4093 	return soc->pdev_list[pdev_id];
4094 }
4095 
4096 /**
4097  * dp_get_peer_mac_list(): function to get peer mac list of vdev
4098  * @soc: Datapath soc handle
4099  * @vdev_id: vdev id
4100  * @newmac: Table of the clients mac
4101  * @mac_cnt: No. of MACs required
4102  * @limit: Limit the number of clients
4103  *
4104  * Return: no of clients
4105  */
4106 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
4107 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
4108 			      u_int16_t mac_cnt, bool limit);
4109 
4110 /**
4111  * dp_update_num_mac_rings_for_dbs() - Update No of MAC rings based on
4112  *				       DBS check
4113  * @soc: DP SoC context
4114  * @max_mac_rings: Pointer to variable for No of MAC rings
4115  *
4116  * Return: None
4117  */
4118 void dp_update_num_mac_rings_for_dbs(struct dp_soc *soc,
4119 				     int *max_mac_rings);
4120 
4121 
4122 #if defined(WLAN_SUPPORT_RX_FISA)
4123 /**
4124  * dp_rx_fst_update_cmem_params() - Update CMEM FST params
4125  * @soc:		DP SoC context
4126  * @num_entries:	Number of flow search entries
4127  * @cmem_ba_lo:		CMEM base address low
4128  * @cmem_ba_hi:		CMEM base address high
4129  *
4130  * Return: None
4131  */
4132 void dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries,
4133 				  uint32_t cmem_ba_lo, uint32_t cmem_ba_hi);
4134 
4135 /**
4136  * dp_fisa_config() - FISA config handler
4137  * @cdp_soc: CDP SoC handle
4138  * @pdev_id: PDEV ID
4139  * @config_id: FISA config ID
4140  * @cfg: FISA config msg data
4141  */
4142 QDF_STATUS dp_fisa_config(ol_txrx_soc_handle cdp_soc, uint8_t pdev_id,
4143 			  enum cdp_fisa_config_id config_id,
4144 			  union cdp_fisa_config *cfg);
4145 #else
4146 static inline void
4147 dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries,
4148 			     uint32_t cmem_ba_lo, uint32_t cmem_ba_hi)
4149 {
4150 }
4151 #endif /* WLAN_SUPPORT_RX_FISA */
4152 
4153 #ifdef MAX_ALLOC_PAGE_SIZE
4154 /**
4155  * dp_set_max_page_size() - Set the max page size for hw link desc.
4156  * @pages: link desc page handle
4157  * @max_alloc_size: max_alloc_size
4158  *
4159  * For MCL the page size is set to OS defined value and for WIN
4160  * the page size is set to the max_alloc_size cfg ini
4161  * param.
4162  * This is to ensure that WIN gets contiguous memory allocations
4163  * as per requirement.
4164  *
4165  * Return: None
4166  */
4167 static inline
4168 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages,
4169 			  uint32_t max_alloc_size)
4170 {
4171 	pages->page_size = qdf_page_size;
4172 }
4173 
4174 #else
4175 static inline
4176 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages,
4177 			  uint32_t max_alloc_size)
4178 {
4179 	pages->page_size = max_alloc_size;
4180 }
4181 #endif /* MAX_ALLOC_PAGE_SIZE */
4182 
4183 /**
4184  * dp_history_get_next_index() - get the next entry to record an entry
4185  *				 in the history.
4186  * @curr_idx: Current index where the last entry is written.
4187  * @max_entries: Max number of entries in the history
4188  *
4189  * This function assumes that the max number os entries is a power of 2.
4190  *
4191  * Return: The index where the next entry is to be written.
4192  */
4193 static inline uint32_t dp_history_get_next_index(qdf_atomic_t *curr_idx,
4194 						 uint32_t max_entries)
4195 {
4196 	uint32_t idx = qdf_atomic_inc_return(curr_idx);
4197 
4198 	return idx & (max_entries - 1);
4199 }
4200 
4201 /**
4202  * dp_rx_skip_tlvs() - Skip TLVs len + L3 padding, save in nbuf->cb
4203  * @soc: Datapath soc handle
4204  * @nbuf: nbuf cb to be updated
4205  * @l3_padding: L3 padding
4206  *
4207  * Return: None
4208  */
4209 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding);
4210 
4211 #ifndef FEATURE_WDS
4212 static inline void
4213 dp_hmwds_ast_add_notify(struct dp_peer *peer,
4214 			uint8_t *mac_addr,
4215 			enum cdp_txrx_ast_entry_type type,
4216 			QDF_STATUS err,
4217 			bool is_peer_map)
4218 {
4219 }
4220 #endif
4221 
4222 #ifdef HTT_STATS_DEBUGFS_SUPPORT
4223 /**
4224  * dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize
4225  * debugfs for HTT stats
4226  * @pdev: dp pdev handle
4227  *
4228  * Return: QDF_STATUS
4229  */
4230 QDF_STATUS dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev);
4231 
4232 /**
4233  * dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for
4234  * HTT stats
4235  * @pdev: dp pdev handle
4236  *
4237  * Return: none
4238  */
4239 void dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev);
4240 #else
4241 
4242 /**
4243  * dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize
4244  * debugfs for HTT stats
4245  * @pdev: dp pdev handle
4246  *
4247  * Return: QDF_STATUS
4248  */
4249 static inline QDF_STATUS
4250 dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev)
4251 {
4252 	return QDF_STATUS_SUCCESS;
4253 }
4254 
4255 /**
4256  * dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for
4257  * HTT stats
4258  * @pdev: dp pdev handle
4259  *
4260  * Return: none
4261  */
4262 static inline void
4263 dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev)
4264 {
4265 }
4266 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
4267 
4268 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR
4269 /**
4270  * dp_soc_swlm_attach() - attach the software latency manager resources
4271  * @soc: Datapath global soc handle
4272  *
4273  * Return: QDF_STATUS
4274  */
4275 static inline QDF_STATUS dp_soc_swlm_attach(struct dp_soc *soc)
4276 {
4277 	return QDF_STATUS_SUCCESS;
4278 }
4279 
4280 /**
4281  * dp_soc_swlm_detach() - detach the software latency manager resources
4282  * @soc: Datapath global soc handle
4283  *
4284  * Return: QDF_STATUS
4285  */
4286 static inline QDF_STATUS dp_soc_swlm_detach(struct dp_soc *soc)
4287 {
4288 	return QDF_STATUS_SUCCESS;
4289 }
4290 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */
4291 
4292 #ifndef WLAN_DP_PROFILE_SUPPORT
4293 static inline void wlan_dp_soc_cfg_sync_profile(struct cdp_soc_t *cdp_soc) {}
4294 
4295 static inline void wlan_dp_pdev_cfg_sync_profile(struct cdp_soc_t *cdp_soc,
4296 						 uint8_t pdev_id) {}
4297 #endif
4298 
4299 /**
4300  * dp_get_peer_id(): function to get peer id by mac
4301  * @soc: Datapath soc handle
4302  * @vdev_id: vdev id
4303  * @mac: Peer mac address
4304  *
4305  * Return: valid peer id on success
4306  *         HTT_INVALID_PEER on failure
4307  */
4308 uint16_t dp_get_peer_id(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *mac);
4309 
4310 #ifdef QCA_SUPPORT_WDS_EXTENDED
4311 /**
4312  * dp_wds_ext_set_peer_rx(): function to set peer rx handler
4313  * @soc: Datapath soc handle
4314  * @vdev_id: vdev id
4315  * @mac: Peer mac address
4316  * @rx: rx function pointer
4317  * @osif_peer: OSIF peer handle
4318  *
4319  * Return: QDF_STATUS_SUCCESS on success
4320  *         QDF_STATUS_E_INVAL if peer is not found
4321  *         QDF_STATUS_E_ALREADY if rx is already set/unset
4322  */
4323 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
4324 				  uint8_t vdev_id,
4325 				  uint8_t *mac,
4326 				  ol_txrx_rx_fp rx,
4327 				  ol_osif_peer_handle osif_peer);
4328 
4329 /**
4330  * dp_wds_ext_get_peer_osif_handle(): function to get peer osif handle
4331  * @soc: Datapath soc handle
4332  * @vdev_id: vdev id
4333  * @mac: Peer mac address
4334  * @osif_peer: OSIF peer handle
4335  *
4336  * Return: QDF_STATUS_SUCCESS on success
4337  *         QDF_STATUS_E_INVAL if peer is not found
4338  */
4339 QDF_STATUS dp_wds_ext_get_peer_osif_handle(
4340 				ol_txrx_soc_handle soc,
4341 				uint8_t vdev_id,
4342 				uint8_t *mac,
4343 				ol_osif_peer_handle *osif_peer);
4344 
4345 #endif /* QCA_SUPPORT_WDS_EXTENDED */
4346 
4347 #ifdef DP_MEM_PRE_ALLOC
4348 
4349 /**
4350  * dp_context_alloc_mem() - allocate memory for DP context
4351  * @soc: datapath soc handle
4352  * @ctxt_type: DP context type
4353  * @ctxt_size: DP context size
4354  *
4355  * Return: DP context address
4356  */
4357 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
4358 			   size_t ctxt_size);
4359 
4360 /**
4361  * dp_context_free_mem() - Free memory of DP context
4362  * @soc: datapath soc handle
4363  * @ctxt_type: DP context type
4364  * @vaddr: Address of context memory
4365  *
4366  * Return: None
4367  */
4368 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
4369 			 void *vaddr);
4370 
4371 /**
4372  * dp_desc_multi_pages_mem_alloc() - alloc memory over multiple pages
4373  * @soc: datapath soc handle
4374  * @desc_type: memory request source type
4375  * @pages: multi page information storage
4376  * @element_size: each element size
4377  * @element_num: total number of elements should be allocated
4378  * @memctxt: memory context
4379  * @cacheable: coherent memory or cacheable memory
4380  *
4381  * This function is a wrapper for memory allocation over multiple
4382  * pages, if dp prealloc method is registered, then will try prealloc
4383  * firstly. if prealloc failed, fall back to regular way over
4384  * qdf_mem_multi_pages_alloc().
4385  *
4386  * Return: None
4387  */
4388 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
4389 				   enum qdf_dp_desc_type desc_type,
4390 				   struct qdf_mem_multi_page_t *pages,
4391 				   size_t element_size,
4392 				   uint32_t element_num,
4393 				   qdf_dma_context_t memctxt,
4394 				   bool cacheable);
4395 
4396 /**
4397  * dp_desc_multi_pages_mem_free() - free multiple pages memory
4398  * @soc: datapath soc handle
4399  * @desc_type: memory request source type
4400  * @pages: multi page information storage
4401  * @memctxt: memory context
4402  * @cacheable: coherent memory or cacheable memory
4403  *
4404  * This function is a wrapper for multiple pages memory free,
4405  * if memory is got from prealloc pool, put it back to pool.
4406  * otherwise free by qdf_mem_multi_pages_free().
4407  *
4408  * Return: None
4409  */
4410 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
4411 				  enum qdf_dp_desc_type desc_type,
4412 				  struct qdf_mem_multi_page_t *pages,
4413 				  qdf_dma_context_t memctxt,
4414 				  bool cacheable);
4415 
4416 #else
4417 static inline
4418 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
4419 			   size_t ctxt_size)
4420 {
4421 	return qdf_mem_malloc(ctxt_size);
4422 }
4423 
4424 static inline
4425 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
4426 			 void *vaddr)
4427 {
4428 	qdf_mem_free(vaddr);
4429 }
4430 
4431 static inline
4432 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
4433 				   enum qdf_dp_desc_type desc_type,
4434 				   struct qdf_mem_multi_page_t *pages,
4435 				   size_t element_size,
4436 				   uint32_t element_num,
4437 				   qdf_dma_context_t memctxt,
4438 				   bool cacheable)
4439 {
4440 	qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
4441 				  element_num, memctxt, cacheable);
4442 }
4443 
4444 static inline
4445 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
4446 				  enum qdf_dp_desc_type desc_type,
4447 				  struct qdf_mem_multi_page_t *pages,
4448 				  qdf_dma_context_t memctxt,
4449 				  bool cacheable)
4450 {
4451 	qdf_mem_multi_pages_free(soc->osdev, pages,
4452 				 memctxt, cacheable);
4453 }
4454 #endif
4455 
4456 /**
4457  * struct dp_frag_history_opaque_atomic - Opaque struct for adding a fragmented
4458  *					  history.
4459  * @index: atomic index
4460  * @num_entries_per_slot: Number of entries per slot
4461  * @allocated: is allocated or not
4462  * @entry: pointers to array of records
4463  */
4464 struct dp_frag_history_opaque_atomic {
4465 	qdf_atomic_t index;
4466 	uint16_t num_entries_per_slot;
4467 	uint16_t allocated;
4468 	void *entry[0];
4469 };
4470 
4471 static inline QDF_STATUS
4472 dp_soc_frag_history_attach(struct dp_soc *soc, void *history_hdl,
4473 			   uint32_t max_slots, uint32_t max_entries_per_slot,
4474 			   uint32_t entry_size,
4475 			   bool attempt_prealloc, enum dp_ctxt_type ctxt_type)
4476 {
4477 	struct dp_frag_history_opaque_atomic *history =
4478 			(struct dp_frag_history_opaque_atomic *)history_hdl;
4479 	size_t alloc_size = max_entries_per_slot * entry_size;
4480 	int i;
4481 
4482 	for (i = 0; i < max_slots; i++) {
4483 		if (attempt_prealloc)
4484 			history->entry[i] = dp_context_alloc_mem(soc, ctxt_type,
4485 								 alloc_size);
4486 		else
4487 			history->entry[i] = qdf_mem_malloc(alloc_size);
4488 
4489 		if (!history->entry[i])
4490 			goto exit;
4491 	}
4492 
4493 	qdf_atomic_init(&history->index);
4494 	history->allocated = 1;
4495 	history->num_entries_per_slot = max_entries_per_slot;
4496 
4497 	return QDF_STATUS_SUCCESS;
4498 exit:
4499 	for (i = i - 1; i >= 0; i--) {
4500 		if (attempt_prealloc)
4501 			dp_context_free_mem(soc, ctxt_type, history->entry[i]);
4502 		else
4503 			qdf_mem_free(history->entry[i]);
4504 	}
4505 
4506 	return QDF_STATUS_E_NOMEM;
4507 }
4508 
4509 static inline
4510 void dp_soc_frag_history_detach(struct dp_soc *soc,
4511 				void *history_hdl, uint32_t max_slots,
4512 				bool attempt_prealloc,
4513 				enum dp_ctxt_type ctxt_type)
4514 {
4515 	struct dp_frag_history_opaque_atomic *history =
4516 			(struct dp_frag_history_opaque_atomic *)history_hdl;
4517 	int i;
4518 
4519 	for (i = 0; i < max_slots; i++) {
4520 		if (attempt_prealloc)
4521 			dp_context_free_mem(soc, ctxt_type, history->entry[i]);
4522 		else
4523 			qdf_mem_free(history->entry[i]);
4524 	}
4525 
4526 	history->allocated = 0;
4527 }
4528 
4529 /**
4530  * dp_get_frag_hist_next_atomic_idx() - get the next entry index to record an
4531  *					entry in a fragmented history with
4532  *					index being atomic.
4533  * @curr_idx: address of the current index where the last entry was written
4534  * @next_idx: pointer to update the next index
4535  * @slot: pointer to update the history slot to be selected
4536  * @slot_shift: BITwise shift mask for slot (in index)
4537  * @max_entries_per_slot: Max number of entries in a slot of history
4538  * @max_entries: Total number of entries in the history (sum of all slots)
4539  *
4540  * This function assumes that the "max_entries_per_slot" and "max_entries"
4541  * are a power-of-2.
4542  *
4543  * Return: None
4544  */
4545 static inline void
4546 dp_get_frag_hist_next_atomic_idx(qdf_atomic_t *curr_idx, uint32_t *next_idx,
4547 				 uint16_t *slot, uint32_t slot_shift,
4548 				 uint32_t max_entries_per_slot,
4549 				 uint32_t max_entries)
4550 {
4551 	uint32_t idx;
4552 
4553 	idx = qdf_do_div_rem(qdf_atomic_inc_return(curr_idx), max_entries);
4554 
4555 	*slot = idx >> slot_shift;
4556 	*next_idx = idx & (max_entries_per_slot - 1);
4557 }
4558 
4559 #ifdef FEATURE_RUNTIME_PM
4560 /**
4561  * dp_runtime_get() - Get dp runtime refcount
4562  * @soc: Datapath soc handle
4563  *
4564  * Get dp runtime refcount by increment of an atomic variable, which can block
4565  * dp runtime resume to wait to flush pending tx by runtime suspend.
4566  *
4567  * Return: Current refcount
4568  */
4569 static inline int32_t dp_runtime_get(struct dp_soc *soc)
4570 {
4571 	return qdf_atomic_inc_return(&soc->dp_runtime_refcount);
4572 }
4573 
4574 /**
4575  * dp_runtime_put() - Return dp runtime refcount
4576  * @soc: Datapath soc handle
4577  *
4578  * Return dp runtime refcount by decrement of an atomic variable, allow dp
4579  * runtime resume finish.
4580  *
4581  * Return: Current refcount
4582  */
4583 static inline int32_t dp_runtime_put(struct dp_soc *soc)
4584 {
4585 	return qdf_atomic_dec_return(&soc->dp_runtime_refcount);
4586 }
4587 
4588 /**
4589  * dp_runtime_get_refcount() - Get dp runtime refcount
4590  * @soc: Datapath soc handle
4591  *
4592  * Get dp runtime refcount by returning an atomic variable
4593  *
4594  * Return: Current refcount
4595  */
4596 static inline int32_t dp_runtime_get_refcount(struct dp_soc *soc)
4597 {
4598 	return qdf_atomic_read(&soc->dp_runtime_refcount);
4599 }
4600 
4601 /**
4602  * dp_runtime_init() - Init DP related runtime PM clients and runtime refcount
4603  * @soc: Datapath soc handle
4604  *
4605  * Return: QDF_STATUS
4606  */
4607 static inline void dp_runtime_init(struct dp_soc *soc)
4608 {
4609 	hif_rtpm_register(HIF_RTPM_ID_DP, NULL);
4610 	hif_rtpm_register(HIF_RTPM_ID_DP_RING_STATS, NULL);
4611 	qdf_atomic_init(&soc->dp_runtime_refcount);
4612 }
4613 
4614 /**
4615  * dp_runtime_deinit() - Deinit DP related runtime PM clients
4616  *
4617  * Return: None
4618  */
4619 static inline void dp_runtime_deinit(void)
4620 {
4621 	hif_rtpm_deregister(HIF_RTPM_ID_DP);
4622 	hif_rtpm_deregister(HIF_RTPM_ID_DP_RING_STATS);
4623 }
4624 
4625 /**
4626  * dp_runtime_pm_mark_last_busy() - Mark last busy when rx path in use
4627  * @soc: Datapath soc handle
4628  *
4629  * Return: None
4630  */
4631 static inline void dp_runtime_pm_mark_last_busy(struct dp_soc *soc)
4632 {
4633 	soc->rx_last_busy = qdf_get_log_timestamp_usecs();
4634 
4635 	hif_rtpm_mark_last_busy(HIF_RTPM_ID_DP);
4636 }
4637 #else
4638 static inline int32_t dp_runtime_get(struct dp_soc *soc)
4639 {
4640 	return 0;
4641 }
4642 
4643 static inline int32_t dp_runtime_put(struct dp_soc *soc)
4644 {
4645 	return 0;
4646 }
4647 
4648 static inline QDF_STATUS dp_runtime_init(struct dp_soc *soc)
4649 {
4650 	return QDF_STATUS_SUCCESS;
4651 }
4652 
4653 static inline void dp_runtime_deinit(void)
4654 {
4655 }
4656 
4657 static inline void dp_runtime_pm_mark_last_busy(struct dp_soc *soc)
4658 {
4659 }
4660 #endif
4661 
4662 static inline enum QDF_GLOBAL_MODE dp_soc_get_con_mode(struct dp_soc *soc)
4663 {
4664 	if (soc->cdp_soc.ol_ops->get_con_mode)
4665 		return soc->cdp_soc.ol_ops->get_con_mode();
4666 
4667 	return QDF_GLOBAL_MAX_MODE;
4668 }
4669 
4670 /**
4671  * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats
4672  *				processing
4673  * @pdev: Datapath PDEV handle
4674  *
4675  */
4676 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev);
4677 
4678 /**
4679  * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats
4680  *				processing
4681  * @pdev: Datapath PDEV handle
4682  *
4683  * Return: QDF_STATUS_SUCCESS: Success
4684  *         QDF_STATUS_E_NOMEM: Error
4685  */
4686 
4687 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev);
4688 
4689 /**
4690  * dp_peer_flush_frags() - Flush all fragments for a particular
4691  *  peer
4692  * @soc_hdl: data path soc handle
4693  * @vdev_id: vdev id
4694  * @peer_mac: peer mac address
4695  *
4696  * Return: None
4697  */
4698 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4699 			 uint8_t *peer_mac);
4700 
4701 /**
4702  * dp_soc_reset_mon_intr_mask() - reset mon intr mask
4703  * @soc: pointer to dp_soc handle
4704  *
4705  * Return:
4706  */
4707 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc);
4708 
4709 /**
4710  * dp_txrx_get_soc_stats() - will return cdp_soc_stats
4711  * @soc_hdl: soc handle
4712  * @soc_stats: buffer to hold the values
4713  *
4714  * Return: QDF_STATUS_SUCCESS: Success
4715  *         QDF_STATUS_E_FAILURE: Error
4716  */
4717 QDF_STATUS dp_txrx_get_soc_stats(struct cdp_soc_t *soc_hdl,
4718 				 struct cdp_soc_stats *soc_stats);
4719 
4720 /**
4721  * dp_txrx_get_peer_delay_stats() - to get peer delay stats per TIDs
4722  * @soc_hdl: soc handle
4723  * @vdev_id: id of vdev handle
4724  * @peer_mac: mac of DP_PEER handle
4725  * @delay_stats: pointer to delay stats array
4726  *
4727  * Return: QDF_STATUS_SUCCESS: Success
4728  *         QDF_STATUS_E_FAILURE: Error
4729  */
4730 QDF_STATUS
4731 dp_txrx_get_peer_delay_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4732 			     uint8_t *peer_mac,
4733 			     struct cdp_delay_tid_stats *delay_stats);
4734 
4735 /**
4736  * dp_txrx_get_peer_jitter_stats() - to get peer jitter stats per TIDs
4737  * @soc_hdl: soc handle
4738  * @pdev_id: id of pdev handle
4739  * @vdev_id: id of vdev handle
4740  * @peer_mac: mac of DP_PEER handle
4741  * @tid_stats: pointer to jitter stats array
4742  *
4743  * Return: QDF_STATUS_SUCCESS: Success
4744  *         QDF_STATUS_E_FAILURE: Error
4745  */
4746 QDF_STATUS
4747 dp_txrx_get_peer_jitter_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
4748 			      uint8_t vdev_id, uint8_t *peer_mac,
4749 			      struct cdp_peer_tid_stats *tid_stats);
4750 
4751 /**
4752  * dp_peer_get_tx_capture_stats() - to get peer Tx Capture stats
4753  * @soc_hdl: soc handle
4754  * @vdev_id: id of vdev handle
4755  * @peer_mac: mac of DP_PEER handle
4756  * @stats: pointer to peer tx capture stats
4757  *
4758  * Return: QDF_STATUS_SUCCESS: Success
4759  *         QDF_STATUS_E_FAILURE: Error
4760  */
4761 QDF_STATUS
4762 dp_peer_get_tx_capture_stats(struct cdp_soc_t *soc_hdl,
4763 			     uint8_t vdev_id, uint8_t *peer_mac,
4764 			     struct cdp_peer_tx_capture_stats *stats);
4765 
4766 /**
4767  * dp_pdev_get_tx_capture_stats() - to get pdev Tx Capture stats
4768  * @soc_hdl: soc handle
4769  * @pdev_id: id of pdev handle
4770  * @stats: pointer to pdev tx capture stats
4771  *
4772  * Return: QDF_STATUS_SUCCESS: Success
4773  *         QDF_STATUS_E_FAILURE: Error
4774  */
4775 QDF_STATUS
4776 dp_pdev_get_tx_capture_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
4777 			     struct cdp_pdev_tx_capture_stats *stats);
4778 
4779 #ifdef HW_TX_DELAY_STATS_ENABLE
4780 /**
4781  * dp_is_vdev_tx_delay_stats_enabled(): Check if tx delay stats
4782  *  is enabled for vdev
4783  * @vdev: dp vdev
4784  *
4785  * Return: true if tx delay stats is enabled for vdev else false
4786  */
4787 static inline uint8_t dp_is_vdev_tx_delay_stats_enabled(struct dp_vdev *vdev)
4788 {
4789 	return vdev->hw_tx_delay_stats_enabled;
4790 }
4791 
4792 /**
4793  * dp_pdev_print_tx_delay_stats(): Print vdev tx delay stats
4794  *  for pdev
4795  * @soc: dp soc
4796  *
4797  * Return: None
4798  */
4799 void dp_pdev_print_tx_delay_stats(struct dp_soc *soc);
4800 
4801 /**
4802  * dp_pdev_clear_tx_delay_stats() - clear tx delay stats
4803  * @soc: soc handle
4804  *
4805  * Return: None
4806  */
4807 void dp_pdev_clear_tx_delay_stats(struct dp_soc *soc);
4808 #else
4809 static inline uint8_t dp_is_vdev_tx_delay_stats_enabled(struct dp_vdev *vdev)
4810 {
4811 	return 0;
4812 }
4813 
4814 static inline void dp_pdev_print_tx_delay_stats(struct dp_soc *soc)
4815 {
4816 }
4817 
4818 static inline void dp_pdev_clear_tx_delay_stats(struct dp_soc *soc)
4819 {
4820 }
4821 #endif
4822 
4823 static inline void
4824 dp_get_rx_hash_key_bytes(struct cdp_lro_hash_config *lro_hash)
4825 {
4826 	qdf_get_random_bytes(lro_hash->toeplitz_hash_ipv4,
4827 			     (sizeof(lro_hash->toeplitz_hash_ipv4[0]) *
4828 			      LRO_IPV4_SEED_ARR_SZ));
4829 	qdf_get_random_bytes(lro_hash->toeplitz_hash_ipv6,
4830 			     (sizeof(lro_hash->toeplitz_hash_ipv6[0]) *
4831 			      LRO_IPV6_SEED_ARR_SZ));
4832 }
4833 
4834 #ifdef WLAN_CONFIG_TELEMETRY_AGENT
4835 /**
4836  * dp_get_pdev_telemetry_stats- API to get pdev telemetry stats
4837  * @soc_hdl: soc handle
4838  * @pdev_id: id of pdev handle
4839  * @stats: pointer to pdev telemetry stats
4840  *
4841  * Return: QDF_STATUS_SUCCESS: Success
4842  *         QDF_STATUS_E_FAILURE: Error
4843  */
4844 QDF_STATUS
4845 dp_get_pdev_telemetry_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
4846 			    struct cdp_pdev_telemetry_stats *stats);
4847 
4848 /**
4849  * dp_get_peer_telemetry_stats() - API to get peer telemetry stats
4850  * @soc_hdl: soc handle
4851  * @addr: peer mac
4852  * @stats: pointer to peer telemetry stats
4853  *
4854  * Return: QDF_STATUS_SUCCESS: Success
4855  *         QDF_STATUS_E_FAILURE: Error
4856  */
4857 QDF_STATUS
4858 dp_get_peer_telemetry_stats(struct cdp_soc_t *soc_hdl, uint8_t *addr,
4859 			    struct cdp_peer_telemetry_stats *stats);
4860 
4861 /**
4862  * dp_get_peer_deter_stats() - API to get peer deterministic stats
4863  * @soc_hdl: soc handle
4864  * @vdev_id: id of vdev handle
4865  * @addr: peer mac
4866  * @stats: pointer to peer deterministic stats
4867  *
4868  * Return: QDF_STATUS_SUCCESS: Success
4869  *         QDF_STATUS_E_FAILURE: Error
4870  */
4871 QDF_STATUS
4872 dp_get_peer_deter_stats(struct cdp_soc_t *soc_hdl,
4873 			uint8_t vdev_id,
4874 			uint8_t *addr,
4875 			struct cdp_peer_deter_stats *stats);
4876 
4877 /**
4878  * dp_get_pdev_deter_stats() - API to get pdev deterministic stats
4879  * @soc_hdl: soc handle
4880  * @pdev_id: id of pdev handle
4881  * @stats: pointer to pdev deterministic stats
4882  *
4883  * Return: QDF_STATUS_SUCCESS: Success
4884  *         QDF_STATUS_E_FAILURE: Error
4885  */
4886 QDF_STATUS
4887 dp_get_pdev_deter_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
4888 			struct cdp_pdev_deter_stats *stats);
4889 
4890 /**
4891  * dp_update_pdev_chan_util_stats() - API to update channel utilization stats
4892  * @soc_hdl: soc handle
4893  * @pdev_id: id of pdev handle
4894  * @ch_util: Pointer to channel util stats
4895  *
4896  * Return: QDF_STATUS_SUCCESS: Success
4897  *         QDF_STATUS_E_FAILURE: Error
4898  */
4899 QDF_STATUS
4900 dp_update_pdev_chan_util_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
4901 			       struct cdp_pdev_chan_util_stats *ch_util);
4902 #endif /* WLAN_CONFIG_TELEMETRY_AGENT */
4903 
4904 #ifdef CONNECTIVITY_PKTLOG
4905 /**
4906  * dp_tx_send_pktlog() - send tx packet log
4907  * @soc: soc handle
4908  * @pdev: pdev handle
4909  * @tx_desc: TX software descriptor
4910  * @nbuf: nbuf
4911  * @status: status of tx packet
4912  *
4913  * This function is used to send tx packet for logging
4914  *
4915  * Return: None
4916  *
4917  */
4918 static inline
4919 void dp_tx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev,
4920 		       struct dp_tx_desc_s *tx_desc,
4921 		       qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status)
4922 {
4923 	ol_txrx_pktdump_cb packetdump_cb = pdev->dp_tx_packetdump_cb;
4924 
4925 	if (qdf_unlikely(packetdump_cb) &&
4926 	    dp_tx_frm_std == tx_desc->frm_type) {
4927 		packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id,
4928 			      tx_desc->vdev_id, nbuf, status, QDF_TX_DATA_PKT);
4929 	}
4930 }
4931 
4932 /**
4933  * dp_rx_send_pktlog() - send rx packet log
4934  * @soc: soc handle
4935  * @pdev: pdev handle
4936  * @nbuf: nbuf
4937  * @status: status of rx packet
4938  *
4939  * This function is used to send rx packet for logging
4940  *
4941  * Return: None
4942  *
4943  */
4944 static inline
4945 void dp_rx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev,
4946 		       qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status)
4947 {
4948 	ol_txrx_pktdump_cb packetdump_cb = pdev->dp_rx_packetdump_cb;
4949 
4950 	if (qdf_unlikely(packetdump_cb)) {
4951 		packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id,
4952 			      QDF_NBUF_CB_RX_VDEV_ID(nbuf),
4953 			      nbuf, status, QDF_RX_DATA_PKT);
4954 	}
4955 }
4956 
4957 /**
4958  * dp_rx_err_send_pktlog() - send rx error packet log
4959  * @soc: soc handle
4960  * @pdev: pdev handle
4961  * @mpdu_desc_info: MPDU descriptor info
4962  * @nbuf: nbuf
4963  * @status: status of rx packet
4964  * @set_pktlen: weither to set packet length
4965  *
4966  * This API should only be called when we have not removed
4967  * Rx TLV from head, and head is pointing to rx_tlv
4968  *
4969  * This function is used to send rx packet from error path
4970  * for logging for which rx packet tlv is not removed.
4971  *
4972  * Return: None
4973  *
4974  */
4975 static inline
4976 void dp_rx_err_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev,
4977 			   struct hal_rx_mpdu_desc_info *mpdu_desc_info,
4978 			   qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status,
4979 			   bool set_pktlen)
4980 {
4981 	ol_txrx_pktdump_cb packetdump_cb = pdev->dp_rx_packetdump_cb;
4982 	qdf_size_t skip_size;
4983 	uint16_t msdu_len, nbuf_len;
4984 	uint8_t *rx_tlv_hdr;
4985 	struct hal_rx_msdu_metadata msdu_metadata;
4986 
4987 	if (qdf_unlikely(packetdump_cb)) {
4988 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
4989 		nbuf_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
4990 							  rx_tlv_hdr);
4991 		hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr,
4992 					 &msdu_metadata);
4993 
4994 		if (mpdu_desc_info->bar_frame ||
4995 		    (mpdu_desc_info->mpdu_flags & HAL_MPDU_F_FRAGMENT))
4996 			skip_size = soc->rx_pkt_tlv_size;
4997 		else
4998 			skip_size = soc->rx_pkt_tlv_size +
4999 					msdu_metadata.l3_hdr_pad;
5000 
5001 		if (set_pktlen) {
5002 			msdu_len = nbuf_len + skip_size;
5003 			qdf_nbuf_set_pktlen(nbuf, qdf_min(msdu_len,
5004 					    (uint16_t)RX_DATA_BUFFER_SIZE));
5005 		}
5006 
5007 		qdf_nbuf_pull_head(nbuf, skip_size);
5008 		packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id,
5009 			      QDF_NBUF_CB_RX_VDEV_ID(nbuf),
5010 			      nbuf, status, QDF_RX_DATA_PKT);
5011 		qdf_nbuf_push_head(nbuf, skip_size);
5012 	}
5013 }
5014 
5015 #else
5016 static inline
5017 void dp_tx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev,
5018 		       struct dp_tx_desc_s *tx_desc,
5019 		       qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status)
5020 {
5021 }
5022 
5023 static inline
5024 void dp_rx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev,
5025 		       qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status)
5026 {
5027 }
5028 
5029 static inline
5030 void dp_rx_err_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev,
5031 			   struct hal_rx_mpdu_desc_info *mpdu_desc_info,
5032 			   qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status,
5033 			   bool set_pktlen)
5034 {
5035 }
5036 #endif
5037 
5038 /**
5039  * dp_pdev_update_fast_rx_flag() - Update Fast rx flag for a PDEV
5040  * @soc  : Data path soc handle
5041  * @pdev : PDEV handle
5042  *
5043  * Return: None
5044  */
5045 void dp_pdev_update_fast_rx_flag(struct dp_soc *soc, struct dp_pdev *pdev);
5046 
5047 #ifdef FEATURE_DIRECT_LINK
5048 /**
5049  * dp_setup_direct_link_refill_ring(): Setup direct link refill ring for pdev
5050  * @soc_hdl: DP SOC handle
5051  * @pdev_id: pdev id
5052  *
5053  * Return: Handle to SRNG
5054  */
5055 struct dp_srng *dp_setup_direct_link_refill_ring(struct cdp_soc_t *soc_hdl,
5056 						 uint8_t pdev_id);
5057 
5058 /**
5059  * dp_destroy_direct_link_refill_ring(): Destroy direct link refill ring for
5060  *  pdev
5061  * @soc_hdl: DP SOC handle
5062  * @pdev_id: pdev id
5063  *
5064  * Return: None
5065  */
5066 void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl,
5067 					uint8_t pdev_id);
5068 #else
5069 static inline
5070 struct dp_srng *dp_setup_direct_link_refill_ring(struct cdp_soc_t *soc_hdl,
5071 						 uint8_t pdev_id)
5072 {
5073 	return NULL;
5074 }
5075 
5076 static inline
5077 void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl,
5078 					uint8_t pdev_id)
5079 {
5080 }
5081 #endif
5082 
5083 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY
5084 static inline
5085 void dp_cfg_event_record(struct dp_soc *soc,
5086 			 enum dp_cfg_event_type event,
5087 			 union dp_cfg_event_desc *cfg_event_desc)
5088 {
5089 	struct dp_cfg_event_history *cfg_event_history =
5090 						&soc->cfg_event_history;
5091 	struct dp_cfg_event *entry;
5092 	uint32_t idx;
5093 	uint16_t slot;
5094 
5095 	dp_get_frag_hist_next_atomic_idx(&cfg_event_history->index, &idx,
5096 					 &slot,
5097 					 DP_CFG_EVT_HIST_SLOT_SHIFT,
5098 					 DP_CFG_EVT_HIST_PER_SLOT_MAX,
5099 					 DP_CFG_EVT_HISTORY_SIZE);
5100 
5101 	entry = &cfg_event_history->entry[slot][idx];
5102 
5103 	entry->timestamp = qdf_get_log_timestamp();
5104 	entry->type = event;
5105 	qdf_mem_copy(&entry->event_desc, cfg_event_desc,
5106 		     sizeof(entry->event_desc));
5107 }
5108 
5109 static inline void
5110 dp_cfg_event_record_vdev_evt(struct dp_soc *soc, enum dp_cfg_event_type event,
5111 			     struct dp_vdev *vdev)
5112 {
5113 	union dp_cfg_event_desc cfg_evt_desc = {0};
5114 	struct dp_vdev_attach_detach_desc *vdev_evt =
5115 						&cfg_evt_desc.vdev_evt;
5116 
5117 	if (qdf_unlikely(event != DP_CFG_EVENT_VDEV_ATTACH &&
5118 			 event != DP_CFG_EVENT_VDEV_UNREF_DEL &&
5119 			 event != DP_CFG_EVENT_VDEV_DETACH)) {
5120 		qdf_assert_always(0);
5121 		return;
5122 	}
5123 
5124 	vdev_evt->vdev = vdev;
5125 	vdev_evt->vdev_id = vdev->vdev_id;
5126 	vdev_evt->ref_count = qdf_atomic_read(&vdev->ref_cnt);
5127 	vdev_evt->mac_addr = vdev->mac_addr;
5128 
5129 	dp_cfg_event_record(soc, event, &cfg_evt_desc);
5130 }
5131 
5132 static inline void
5133 dp_cfg_event_record_peer_evt(struct dp_soc *soc, enum dp_cfg_event_type event,
5134 			     struct dp_peer *peer, struct dp_vdev *vdev,
5135 			     uint8_t is_reuse)
5136 {
5137 	union dp_cfg_event_desc cfg_evt_desc = {0};
5138 	struct dp_peer_cmn_ops_desc *peer_evt = &cfg_evt_desc.peer_cmn_evt;
5139 
5140 	if (qdf_unlikely(event != DP_CFG_EVENT_PEER_CREATE &&
5141 			 event != DP_CFG_EVENT_PEER_DELETE &&
5142 			 event != DP_CFG_EVENT_PEER_UNREF_DEL)) {
5143 		qdf_assert_always(0);
5144 		return;
5145 	}
5146 
5147 	peer_evt->peer = peer;
5148 	peer_evt->vdev = vdev;
5149 	peer_evt->vdev_id = vdev->vdev_id;
5150 	peer_evt->is_reuse = is_reuse;
5151 	peer_evt->peer_ref_count = qdf_atomic_read(&peer->ref_cnt);
5152 	peer_evt->vdev_ref_count = qdf_atomic_read(&vdev->ref_cnt);
5153 	peer_evt->mac_addr = peer->mac_addr;
5154 	peer_evt->vdev_mac_addr = vdev->mac_addr;
5155 
5156 	dp_cfg_event_record(soc, event, &cfg_evt_desc);
5157 }
5158 
5159 static inline void
5160 dp_cfg_event_record_mlo_link_delink_evt(struct dp_soc *soc,
5161 					enum dp_cfg_event_type event,
5162 					struct dp_peer *mld_peer,
5163 					struct dp_peer *link_peer,
5164 					uint8_t idx, uint8_t result)
5165 {
5166 	union dp_cfg_event_desc cfg_evt_desc = {0};
5167 	struct dp_mlo_add_del_link_desc *mlo_link_delink_evt =
5168 					&cfg_evt_desc.mlo_link_delink_evt;
5169 
5170 	if (qdf_unlikely(event != DP_CFG_EVENT_MLO_ADD_LINK &&
5171 			 event != DP_CFG_EVENT_MLO_DEL_LINK)) {
5172 		qdf_assert_always(0);
5173 		return;
5174 	}
5175 
5176 	mlo_link_delink_evt->link_peer = link_peer;
5177 	mlo_link_delink_evt->mld_peer = mld_peer;
5178 	mlo_link_delink_evt->link_mac_addr = link_peer->mac_addr;
5179 	mlo_link_delink_evt->mld_mac_addr = mld_peer->mac_addr;
5180 	mlo_link_delink_evt->num_links = mld_peer->num_links;
5181 	mlo_link_delink_evt->action_result = result;
5182 	mlo_link_delink_evt->idx = idx;
5183 
5184 	dp_cfg_event_record(soc, event, &cfg_evt_desc);
5185 }
5186 
5187 static inline void
5188 dp_cfg_event_record_mlo_setup_vdev_update_evt(struct dp_soc *soc,
5189 					      struct dp_peer *mld_peer,
5190 					      struct dp_vdev *prev_vdev,
5191 					      struct dp_vdev *new_vdev)
5192 {
5193 	union dp_cfg_event_desc cfg_evt_desc = {0};
5194 	struct dp_mlo_setup_vdev_update_desc *vdev_update_evt =
5195 					&cfg_evt_desc.mlo_setup_vdev_update;
5196 
5197 	vdev_update_evt->mld_peer = mld_peer;
5198 	vdev_update_evt->prev_vdev = prev_vdev;
5199 	vdev_update_evt->new_vdev = new_vdev;
5200 
5201 	dp_cfg_event_record(soc, DP_CFG_EVENT_MLO_SETUP_VDEV_UPDATE,
5202 			    &cfg_evt_desc);
5203 }
5204 
5205 static inline void
5206 dp_cfg_event_record_peer_map_unmap_evt(struct dp_soc *soc,
5207 				       enum dp_cfg_event_type event,
5208 				       struct dp_peer *peer,
5209 				       uint8_t *mac_addr,
5210 				       uint8_t is_ml_peer,
5211 				       uint16_t peer_id, uint16_t ml_peer_id,
5212 				       uint16_t hw_peer_id, uint8_t vdev_id)
5213 {
5214 	union dp_cfg_event_desc cfg_evt_desc = {0};
5215 	struct dp_rx_peer_map_unmap_desc *peer_map_unmap_evt =
5216 					&cfg_evt_desc.peer_map_unmap_evt;
5217 
5218 	if (qdf_unlikely(event != DP_CFG_EVENT_PEER_MAP &&
5219 			 event != DP_CFG_EVENT_PEER_UNMAP &&
5220 			 event != DP_CFG_EVENT_MLO_PEER_MAP &&
5221 			 event != DP_CFG_EVENT_MLO_PEER_UNMAP)) {
5222 		qdf_assert_always(0);
5223 		return;
5224 	}
5225 
5226 	peer_map_unmap_evt->peer_id = peer_id;
5227 	peer_map_unmap_evt->ml_peer_id = ml_peer_id;
5228 	peer_map_unmap_evt->hw_peer_id = hw_peer_id;
5229 	peer_map_unmap_evt->vdev_id = vdev_id;
5230 	/* Peer may be NULL at times, but its not an issue. */
5231 	peer_map_unmap_evt->peer = peer;
5232 	peer_map_unmap_evt->is_ml_peer = is_ml_peer;
5233 	qdf_mem_copy(&peer_map_unmap_evt->mac_addr.raw, mac_addr,
5234 		     QDF_MAC_ADDR_SIZE);
5235 
5236 	dp_cfg_event_record(soc, event, &cfg_evt_desc);
5237 }
5238 
5239 static inline void
5240 dp_cfg_event_record_peer_setup_evt(struct dp_soc *soc,
5241 				   enum dp_cfg_event_type event,
5242 				   struct dp_peer *peer,
5243 				   struct dp_vdev *vdev,
5244 				   uint8_t vdev_id,
5245 				   struct cdp_peer_setup_info *peer_setup_info)
5246 {
5247 	union dp_cfg_event_desc cfg_evt_desc = {0};
5248 	struct dp_peer_setup_desc *peer_setup_evt =
5249 					&cfg_evt_desc.peer_setup_evt;
5250 
5251 	if (qdf_unlikely(event != DP_CFG_EVENT_PEER_SETUP &&
5252 			 event != DP_CFG_EVENT_MLO_SETUP)) {
5253 		qdf_assert_always(0);
5254 		return;
5255 	}
5256 
5257 	peer_setup_evt->peer = peer;
5258 	peer_setup_evt->vdev = vdev;
5259 	if (vdev)
5260 		peer_setup_evt->vdev_ref_count = qdf_atomic_read(&vdev->ref_cnt);
5261 	peer_setup_evt->mac_addr = peer->mac_addr;
5262 	peer_setup_evt->vdev_id = vdev_id;
5263 	if (peer_setup_info) {
5264 		peer_setup_evt->is_first_link = peer_setup_info->is_first_link;
5265 		peer_setup_evt->is_primary_link = peer_setup_info->is_primary_link;
5266 		qdf_mem_copy(peer_setup_evt->mld_mac_addr.raw,
5267 			     peer_setup_info->mld_peer_mac,
5268 			     QDF_MAC_ADDR_SIZE);
5269 	}
5270 
5271 	dp_cfg_event_record(soc, event, &cfg_evt_desc);
5272 }
5273 #else
5274 
5275 static inline void
5276 dp_cfg_event_record_vdev_evt(struct dp_soc *soc, enum dp_cfg_event_type event,
5277 			     struct dp_vdev *vdev)
5278 {
5279 }
5280 
5281 static inline void
5282 dp_cfg_event_record_peer_evt(struct dp_soc *soc, enum dp_cfg_event_type event,
5283 			     struct dp_peer *peer, struct dp_vdev *vdev,
5284 			     uint8_t is_reuse)
5285 {
5286 }
5287 
5288 static inline void
5289 dp_cfg_event_record_mlo_link_delink_evt(struct dp_soc *soc,
5290 					enum dp_cfg_event_type event,
5291 					struct dp_peer *mld_peer,
5292 					struct dp_peer *link_peer,
5293 					uint8_t idx, uint8_t result)
5294 {
5295 }
5296 
5297 static inline void
5298 dp_cfg_event_record_mlo_setup_vdev_update_evt(struct dp_soc *soc,
5299 					      struct dp_peer *mld_peer,
5300 					      struct dp_vdev *prev_vdev,
5301 					      struct dp_vdev *new_vdev)
5302 {
5303 }
5304 
5305 static inline void
5306 dp_cfg_event_record_peer_map_unmap_evt(struct dp_soc *soc,
5307 				       enum dp_cfg_event_type event,
5308 				       struct dp_peer *peer,
5309 				       uint8_t *mac_addr,
5310 				       uint8_t is_ml_peer,
5311 				       uint16_t peer_id, uint16_t ml_peer_id,
5312 				       uint16_t hw_peer_id, uint8_t vdev_id)
5313 {
5314 }
5315 
5316 static inline void
5317 dp_cfg_event_record_peer_setup_evt(struct dp_soc *soc,
5318 				   enum dp_cfg_event_type event,
5319 				   struct dp_peer *peer,
5320 				   struct dp_vdev *vdev,
5321 				   uint8_t vdev_id,
5322 				   struct cdp_peer_setup_info *peer_setup_info)
5323 {
5324 }
5325 #endif
5326 
5327 #ifndef WLAN_SOFTUMAC_SUPPORT
5328 /**
5329  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
5330  * @txrx_soc: DP SOC handle
5331  *
5332  * Return: none
5333  */
5334 void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc);
5335 #endif
5336 
5337 /**
5338  * dp_get_peer_stats()- Get peer stats
5339  * @peer: Datapath peer
5340  * @peer_stats: buffer for peer stats
5341  *
5342  * Return: none
5343  */
5344 void dp_get_peer_stats(struct dp_peer *peer,
5345 		       struct cdp_peer_stats *peer_stats);
5346 
5347 /**
5348  * dp_get_per_link_peer_stats()- Get per link peer stats
5349  * @peer: Datapath peer
5350  * @peer_stats: buffer for peer stats
5351  * @peer_type: Peer type
5352  * @num_link: Number of ML links
5353  *
5354  * Return: status success/failure
5355  */
5356 QDF_STATUS dp_get_per_link_peer_stats(struct dp_peer *peer,
5357 				      struct cdp_peer_stats *peer_stats,
5358 				      enum cdp_peer_type peer_type,
5359 				      uint8_t num_link);
5360 /**
5361  * dp_get_peer_hw_link_id() - get peer hardware link id
5362  * @soc: soc handle
5363  * @pdev: data path pdev
5364  *
5365  * Return: link_id
5366  */
5367 static inline int
5368 dp_get_peer_hw_link_id(struct dp_soc *soc,
5369 		       struct dp_pdev *pdev)
5370 {
5371 	if (wlan_cfg_is_peer_link_stats_enabled(soc->wlan_cfg_ctx))
5372 		return ((soc->arch_ops.get_hw_link_id(pdev)) + 1);
5373 
5374 	return 0;
5375 }
5376 
5377 #ifdef QCA_MULTIPASS_SUPPORT
5378 /**
5379  * dp_tx_remove_vlan_tag() - Remove 4 bytes of vlan tag
5380  * @vdev: DP vdev handle
5381  * @nbuf: network buffer
5382  *
5383  * Return: void
5384  */
5385 void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
5386 #endif
5387 
5388 /**
5389  * dp_print_per_link_stats() - Print per link peer stats.
5390  * @soc_hdl: soc handle.
5391  * @vdev_id: vdev_id.
5392  *
5393  * Return: None.
5394  */
5395 void dp_print_per_link_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
5396 #endif /* #ifndef _DP_INTERNAL_H_ */
5397