xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_internal.h (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_INTERNAL_H_
20 #define _DP_INTERNAL_H_
21 
22 #include "dp_types.h"
23 
24 #define RX_BUFFER_SIZE_PKTLOG_LITE 1024
25 
26 #define DP_PEER_WDS_COUNT_INVALID UINT_MAX
27 
28 /* Alignment for consistent memory for DP rings*/
29 #define DP_RING_BASE_ALIGN 32
30 
31 #define DP_RSSI_INVAL 0x80
32 #define DP_RSSI_AVG_WEIGHT 2
33 /*
34  * Formula to derive avg_rssi is taken from wifi2.o firmware
35  */
36 #define DP_GET_AVG_RSSI(avg_rssi, last_rssi) \
37 	(((avg_rssi) - (((uint8_t)(avg_rssi)) >> DP_RSSI_AVG_WEIGHT)) \
38 	+ ((((uint8_t)(last_rssi)) >> DP_RSSI_AVG_WEIGHT)))
39 
40 /* Macro For NYSM value received in VHT TLV */
41 #define VHT_SGI_NYSM 3
42 
43 /* struct htt_dbgfs_cfg - structure to maintain required htt data
44  * @msg_word: htt msg sent to upper layer
45  * @m: qdf debugfs file pointer
46  */
47 struct htt_dbgfs_cfg {
48 	uint32_t *msg_word;
49 	qdf_debugfs_file_t m;
50 };
51 
52 /* Cookie MSB bits assigned for different use case.
53  * Note: User can't use last 3 bits, as it is reserved for pdev_id.
54  * If in future number of pdev are more than 3.
55  */
56 /* Reserve for default case */
57 #define DBG_STATS_COOKIE_DEFAULT 0x0
58 
59 /* Reserve for DP Stats: 3rd bit */
60 #define DBG_STATS_COOKIE_DP_STATS 0x8
61 
62 /* Reserve for HTT Stats debugfs support: 4th bit */
63 #define DBG_STATS_COOKIE_HTT_DBGFS 0x10
64 
65 /**
66  * Bitmap of HTT PPDU TLV types for Default mode
67  */
68 #define HTT_PPDU_DEFAULT_TLV_BITMAP \
69 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
70 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
71 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
72 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
73 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
74 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
75 
76 /* PPDU STATS CFG */
77 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
78 
79 /* PPDU stats mask sent to FW to enable enhanced stats */
80 #define DP_PPDU_STATS_CFG_ENH_STATS \
81 	(HTT_PPDU_DEFAULT_TLV_BITMAP) | \
82 	(1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \
83 	(1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \
84 	(1 << HTT_PPDU_STATS_USERS_INFO_TLV)
85 
86 /* PPDU stats mask sent to FW to support debug sniffer feature */
87 #define DP_PPDU_STATS_CFG_SNIFFER \
88 	(HTT_PPDU_DEFAULT_TLV_BITMAP) | \
89 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \
90 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV) | \
91 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
92 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \
93 	(1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \
94 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \
95 	(1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \
96 	(1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \
97 	(1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \
98 	(1 << HTT_PPDU_STATS_USERS_INFO_TLV)
99 
100 /* PPDU stats mask sent to FW to support BPR feature*/
101 #define DP_PPDU_STATS_CFG_BPR \
102 	(1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \
103 	(1 << HTT_PPDU_STATS_USERS_INFO_TLV)
104 
105 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
106 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
107 				   DP_PPDU_STATS_CFG_ENH_STATS)
108 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
109 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
110 				      DP_PPDU_TXLITE_STATS_BITMASK_CFG)
111 
112 /**
113  * Bitmap of HTT PPDU delayed ba TLV types for Default mode
114  */
115 #define HTT_PPDU_DELAYED_BA_TLV_BITMAP \
116 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
117 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
118 	(1 << HTT_PPDU_STATS_USR_RATE_TLV)
119 
120 /**
121  * Bitmap of HTT PPDU TLV types for Delayed BA
122  */
123 #define HTT_PPDU_STATUS_TLV_BITMAP \
124 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
125 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
126 
127 /**
128  * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 64
129  */
130 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64 \
131 	((1 << HTT_PPDU_STATS_COMMON_TLV) | \
132 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
133 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
134 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
135 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
136 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
137 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
138 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV))
139 
140 /**
141  * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 256
142  */
143 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256 \
144 	((1 << HTT_PPDU_STATS_COMMON_TLV) | \
145 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
146 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
147 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
148 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
149 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
150 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \
151 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV))
152 
153 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc);
154 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc);
155 
156 #ifdef MONITOR_MODULARIZED_ENABLE
157 static inline bool dp_monitor_modularized_enable(void)
158 {
159 	return TRUE;
160 }
161 
162 static inline QDF_STATUS
163 dp_mon_soc_attach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; }
164 
165 static inline QDF_STATUS
166 dp_mon_soc_detach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; }
167 #else
168 static inline bool dp_monitor_modularized_enable(void)
169 {
170 	return FALSE;
171 }
172 
173 static inline QDF_STATUS dp_mon_soc_attach_wrapper(struct dp_soc *soc)
174 {
175 	return dp_mon_soc_attach(soc);
176 }
177 
178 static inline QDF_STATUS dp_mon_soc_detach_wrapper(struct dp_soc *soc)
179 {
180 	return dp_mon_soc_detach(soc);
181 }
182 #endif
183 
184 #ifndef WIFI_MONITOR_SUPPORT
185 #define MON_BUF_MIN_ENTRIES 64
186 
187 static inline QDF_STATUS dp_monitor_pdev_attach(struct dp_pdev *pdev)
188 {
189 	return QDF_STATUS_SUCCESS;
190 }
191 
192 static inline QDF_STATUS dp_monitor_pdev_detach(struct dp_pdev *pdev)
193 {
194 	return QDF_STATUS_SUCCESS;
195 }
196 
197 static inline QDF_STATUS dp_monitor_vdev_attach(struct dp_vdev *vdev)
198 {
199 	return QDF_STATUS_E_FAILURE;
200 }
201 
202 static inline QDF_STATUS dp_monitor_vdev_detach(struct dp_vdev *vdev)
203 {
204 	return QDF_STATUS_E_FAILURE;
205 }
206 
207 static inline QDF_STATUS dp_monitor_peer_attach(struct dp_soc *soc,
208 						struct dp_peer *peer)
209 {
210 	return QDF_STATUS_SUCCESS;
211 }
212 
213 static inline QDF_STATUS dp_monitor_peer_detach(struct dp_soc *soc,
214 						struct dp_peer *peer)
215 {
216 	return QDF_STATUS_E_FAILURE;
217 }
218 
219 static inline QDF_STATUS dp_monitor_pdev_init(struct dp_pdev *pdev)
220 {
221 	return QDF_STATUS_SUCCESS;
222 }
223 
224 static inline QDF_STATUS dp_monitor_pdev_deinit(struct dp_pdev *pdev)
225 {
226 	return QDF_STATUS_SUCCESS;
227 }
228 
229 static inline QDF_STATUS dp_monitor_soc_cfg_init(struct dp_soc *soc)
230 {
231 	return QDF_STATUS_SUCCESS;
232 }
233 
234 static inline QDF_STATUS dp_monitor_config_debug_sniffer(struct dp_pdev *pdev,
235 							 int val)
236 {
237 	return QDF_STATUS_E_FAILURE;
238 }
239 
240 static inline void dp_monitor_flush_rings(struct dp_soc *soc)
241 {
242 }
243 
244 static inline QDF_STATUS dp_monitor_htt_srng_setup(struct dp_soc *soc,
245 						   struct dp_pdev *pdev,
246 						   int mac_id,
247 						   int mac_for_pdev)
248 {
249 	return QDF_STATUS_SUCCESS;
250 }
251 
252 static inline void dp_monitor_service_mon_rings(struct dp_soc *soc,
253 						uint32_t quota)
254 {
255 }
256 
257 static inline
258 uint32_t dp_monitor_process(struct dp_soc *soc, struct dp_intr *int_ctx,
259 			    uint32_t mac_id, uint32_t quota)
260 {
261 	return 0;
262 }
263 
264 static inline
265 uint32_t dp_monitor_drop_packets_for_mac(struct dp_pdev *pdev,
266 					 uint32_t mac_id, uint32_t quota)
267 {
268 	return 0;
269 }
270 
271 static inline void dp_monitor_peer_tx_init(struct dp_pdev *pdev,
272 					   struct dp_peer *peer)
273 {
274 }
275 
276 static inline void dp_monitor_peer_tx_cleanup(struct dp_vdev *vdev,
277 					      struct dp_peer *peer)
278 {
279 }
280 
281 static inline
282 void dp_monitor_peer_tid_peer_id_update(struct dp_soc *soc,
283 					struct dp_peer *peer,
284 					uint16_t peer_id)
285 {
286 }
287 
288 static inline void dp_monitor_tx_ppdu_stats_attach(struct dp_pdev *pdev)
289 {
290 }
291 
292 static inline void dp_monitor_tx_ppdu_stats_detach(struct dp_pdev *pdev)
293 {
294 }
295 
296 static inline
297 QDF_STATUS dp_monitor_tx_capture_debugfs_init(struct dp_pdev *pdev)
298 {
299 	return QDF_STATUS_SUCCESS;
300 }
301 
302 static inline void dp_monitor_peer_tx_capture_filter_check(struct dp_pdev *pdev,
303 							   struct dp_peer *peer)
304 {
305 }
306 
307 static inline
308 QDF_STATUS dp_monitor_tx_add_to_comp_queue(struct dp_soc *soc,
309 					   struct dp_tx_desc_s *desc,
310 					   struct hal_tx_completion_status *ts,
311 					   struct dp_peer *peer)
312 {
313 	return QDF_STATUS_E_FAILURE;
314 }
315 
316 static inline
317 QDF_STATUS monitor_update_msdu_to_list(struct dp_soc *soc,
318 				       struct dp_pdev *pdev,
319 				       struct dp_peer *peer,
320 				       struct hal_tx_completion_status *ts,
321 				       qdf_nbuf_t netbuf)
322 {
323 	return QDF_STATUS_E_FAILURE;
324 }
325 
326 static inline bool dp_monitor_ppdu_stats_ind_handler(struct htt_soc *soc,
327 						     uint32_t *msg_word,
328 						     qdf_nbuf_t htt_t2h_msg)
329 {
330 	return true;
331 }
332 
333 static inline QDF_STATUS dp_monitor_htt_ppdu_stats_attach(struct dp_pdev *pdev)
334 {
335 	return QDF_STATUS_SUCCESS;
336 }
337 
338 static inline void dp_monitor_htt_ppdu_stats_detach(struct dp_pdev *pdev)
339 {
340 }
341 
342 static inline void dp_monitor_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
343 {
344 }
345 
346 static inline QDF_STATUS dp_monitor_config_enh_tx_capture(struct dp_pdev *pdev,
347 							  uint32_t val)
348 {
349 	return QDF_STATUS_E_INVAL;
350 }
351 
352 static inline QDF_STATUS dp_monitor_config_enh_rx_capture(struct dp_pdev *pdev,
353 							  uint32_t val)
354 {
355 	return QDF_STATUS_E_INVAL;
356 }
357 
358 static inline
359 QDF_STATUS dp_monitor_set_bpr_enable(struct dp_pdev *pdev, uint32_t val)
360 {
361 	return QDF_STATUS_E_FAILURE;
362 }
363 
364 static inline
365 int dp_monitor_set_filter_neigh_peers(struct dp_pdev *pdev, bool val)
366 {
367 	return 0;
368 }
369 
370 static inline
371 void dp_monitor_set_atf_stats_enable(struct dp_pdev *pdev, bool value)
372 {
373 }
374 
375 static inline
376 void dp_monitor_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor)
377 {
378 }
379 
380 static inline
381 bool dp_monitor_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle)
382 {
383 	return false;
384 }
385 
386 static inline
387 bool dp_monitor_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle)
388 {
389 	return false;
390 }
391 
392 static inline
393 bool dp_monitor_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle)
394 {
395 	return false;
396 }
397 
398 static inline
399 int dp_monitor_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
400 				bool enable)
401 {
402 	return 0;
403 }
404 
405 static inline void dp_monitor_pktlogmod_exit(struct dp_pdev *pdev)
406 {
407 }
408 
409 static inline
410 void dp_monitor_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev)
411 {
412 }
413 
414 static inline
415 void dp_monitor_neighbour_peers_detach(struct dp_pdev *pdev)
416 {
417 }
418 
419 static inline QDF_STATUS dp_monitor_filter_neighbour_peer(struct dp_pdev *pdev,
420 							  uint8_t *rx_pkt_hdr)
421 {
422 	return QDF_STATUS_E_FAILURE;
423 }
424 
425 static inline void dp_monitor_print_pdev_tx_capture_stats(struct dp_pdev *pdev)
426 {
427 }
428 
429 static inline
430 void dp_monitor_reap_timer_init(struct dp_soc *soc)
431 {
432 }
433 
434 static inline
435 void dp_monitor_reap_timer_deinit(struct dp_soc *soc)
436 {
437 }
438 
439 static inline
440 void dp_monitor_reap_timer_start(struct dp_soc *soc)
441 {
442 }
443 
444 static inline
445 bool dp_monitor_reap_timer_stop(struct dp_soc *soc)
446 {
447 	return false;
448 }
449 
450 static inline
451 void dp_monitor_vdev_timer_init(struct dp_soc *soc)
452 {
453 }
454 
455 static inline
456 void dp_monitor_vdev_timer_deinit(struct dp_soc *soc)
457 {
458 }
459 
460 static inline
461 void dp_monitor_vdev_timer_start(struct dp_soc *soc)
462 {
463 }
464 
465 static inline
466 bool dp_monitor_vdev_timer_stop(struct dp_soc *soc)
467 {
468 	return false;
469 }
470 
471 static inline struct qdf_mem_multi_page_t*
472 dp_monitor_get_link_desc_pages(struct dp_soc *soc, uint32_t mac_id)
473 {
474 	return NULL;
475 }
476 
477 static inline uint32_t *
478 dp_monitor_get_total_link_descs(struct dp_soc *soc, uint32_t mac_id)
479 {
480 	return NULL;
481 }
482 
483 static inline QDF_STATUS dp_monitor_drop_inv_peer_pkts(struct dp_vdev *vdev)
484 {
485 	return QDF_STATUS_E_FAILURE;
486 }
487 
488 static inline bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev)
489 {
490 	return false;
491 }
492 
493 static inline void dp_monitor_vdev_register_osif(struct dp_vdev *vdev,
494 						 struct ol_txrx_ops *txrx_ops)
495 {
496 }
497 
498 static inline bool dp_monitor_is_vdev_timer_running(struct dp_soc *soc)
499 {
500 	return false;
501 }
502 
503 static inline
504 void dp_monitor_pdev_set_mon_vdev(struct dp_pdev *pdev)
505 {
506 }
507 
508 static inline void dp_monitor_vdev_delete(struct dp_soc *soc,
509 					  struct dp_vdev *vdev)
510 {
511 }
512 
513 static inline void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
514 {
515 }
516 
517 static inline void dp_monitor_neighbour_peer_add_ast(struct dp_pdev *pdev,
518 						     struct dp_peer *ta_peer,
519 						     uint8_t *mac_addr,
520 						     qdf_nbuf_t nbuf,
521 						     uint32_t flags)
522 {
523 }
524 
525 static inline void
526 dp_monitor_set_chan_band(struct dp_pdev *pdev, enum reg_wifi_band chan_band)
527 {
528 }
529 
530 static inline void
531 dp_monitor_set_chan_freq(struct dp_pdev *pdev, qdf_freq_t chan_freq)
532 {
533 }
534 
535 static inline void dp_monitor_set_chan_num(struct dp_pdev *pdev, int chan_num)
536 {
537 }
538 
539 static inline bool dp_monitor_is_enable_mcopy_mode(struct dp_pdev *pdev)
540 {
541 	return false;
542 }
543 
544 static inline
545 void dp_monitor_neighbour_peer_list_remove(struct dp_pdev *pdev,
546 					   struct dp_vdev *vdev,
547 					   struct dp_neighbour_peer *peer)
548 {
549 }
550 
551 static inline bool dp_monitor_is_chan_band_known(struct dp_pdev *pdev)
552 {
553 	return false;
554 }
555 
556 static inline enum reg_wifi_band
557 dp_monitor_get_chan_band(struct dp_pdev *pdev)
558 {
559 	return 0;
560 }
561 
562 static inline void dp_monitor_get_mpdu_status(struct dp_pdev *pdev,
563 					      struct dp_soc *soc,
564 					      uint8_t *rx_tlv_hdr)
565 {
566 }
567 
568 static inline void dp_monitor_print_tx_stats(struct dp_pdev *pdev)
569 {
570 }
571 
572 static inline
573 QDF_STATUS dp_monitor_mcopy_check_deliver(struct dp_pdev *pdev,
574 					  uint16_t peer_id, uint32_t ppdu_id,
575 					  uint8_t first_msdu)
576 {
577 	return QDF_STATUS_SUCCESS;
578 }
579 
580 static inline bool dp_monitor_is_enable_tx_sniffer(struct dp_pdev *pdev)
581 {
582 	return false;
583 }
584 
585 static inline struct dp_vdev*
586 dp_monitor_get_monitor_vdev_from_pdev(struct dp_pdev *pdev)
587 {
588 	return NULL;
589 }
590 
591 static inline QDF_STATUS dp_monitor_check_com_info_ppdu_id(struct dp_pdev *pdev,
592 							   void *rx_desc)
593 {
594 	return QDF_STATUS_E_FAILURE;
595 }
596 
597 static inline struct mon_rx_status*
598 dp_monitor_get_rx_status(struct dp_pdev *pdev)
599 {
600 	return NULL;
601 }
602 
603 static inline
604 void dp_monitor_pdev_config_scan_spcl_vap(struct dp_pdev *pdev)
605 {
606 }
607 
608 static inline
609 void dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(struct dp_pdev *pdev,
610 						      bool val)
611 {
612 }
613 #endif
614 
615 #define DP_MAX_TIMER_EXEC_TIME_TICKS \
616 		(QDF_LOG_TIMESTAMP_CYCLES_PER_10_US * 100 * 20)
617 
618 /**
619  * enum timer_yield_status - yield status code used in monitor mode timer.
620  * @DP_TIMER_NO_YIELD: do not yield
621  * @DP_TIMER_WORK_DONE: yield because work is done
622  * @DP_TIMER_WORK_EXHAUST: yield because work quota is exhausted
623  * @DP_TIMER_TIME_EXHAUST: yield due to time slot exhausted
624  */
625 enum timer_yield_status {
626 	DP_TIMER_NO_YIELD,
627 	DP_TIMER_WORK_DONE,
628 	DP_TIMER_WORK_EXHAUST,
629 	DP_TIMER_TIME_EXHAUST,
630 };
631 
632 #if DP_PRINT_ENABLE
633 #include <stdarg.h>       /* va_list */
634 #include <qdf_types.h> /* qdf_vprint */
635 #include <cdp_txrx_handle.h>
636 
637 enum {
638 	/* FATAL_ERR - print only irrecoverable error messages */
639 	DP_PRINT_LEVEL_FATAL_ERR,
640 
641 	/* ERR - include non-fatal err messages */
642 	DP_PRINT_LEVEL_ERR,
643 
644 	/* WARN - include warnings */
645 	DP_PRINT_LEVEL_WARN,
646 
647 	/* INFO1 - include fundamental, infrequent events */
648 	DP_PRINT_LEVEL_INFO1,
649 
650 	/* INFO2 - include non-fundamental but infrequent events */
651 	DP_PRINT_LEVEL_INFO2,
652 };
653 
654 #define dp_print(level, fmt, ...) do { \
655 	if (level <= g_txrx_print_level) \
656 		qdf_print(fmt, ## __VA_ARGS__); \
657 while (0)
658 #define DP_PRINT(level, fmt, ...) do { \
659 	dp_print(level, "DP: " fmt, ## __VA_ARGS__); \
660 while (0)
661 #else
662 #define DP_PRINT(level, fmt, ...)
663 #endif /* DP_PRINT_ENABLE */
664 
665 #define DP_TRACE(LVL, fmt, args ...)                             \
666 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL,       \
667 		fmt, ## args)
668 
669 #ifdef DP_PRINT_NO_CONSOLE
670 /* Stat prints should not go to console or kernel logs.*/
671 #define DP_PRINT_STATS(fmt, args ...)\
672 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,       \
673 		  fmt, ## args)
674 #else
675 #define DP_PRINT_STATS(fmt, args ...)\
676 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,\
677 		  fmt, ## args)
678 #endif
679 #define DP_STATS_INIT(_handle) \
680 	qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats))
681 
682 #define DP_STATS_CLR(_handle) \
683 	qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats))
684 
685 #ifndef DISABLE_DP_STATS
686 #define DP_STATS_INC(_handle, _field, _delta) \
687 { \
688 	if (likely(_handle)) \
689 		_handle->stats._field += _delta; \
690 }
691 
692 #define DP_STATS_INCC(_handle, _field, _delta, _cond) \
693 { \
694 	if (_cond && likely(_handle)) \
695 		_handle->stats._field += _delta; \
696 }
697 
698 #define DP_STATS_DEC(_handle, _field, _delta) \
699 { \
700 	if (likely(_handle)) \
701 		_handle->stats._field -= _delta; \
702 }
703 
704 #define DP_STATS_UPD(_handle, _field, _delta) \
705 { \
706 	if (likely(_handle)) \
707 		_handle->stats._field = _delta; \
708 }
709 
710 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) \
711 { \
712 	DP_STATS_INC(_handle, _field.num, _count); \
713 	DP_STATS_INC(_handle, _field.bytes, _bytes) \
714 }
715 
716 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \
717 { \
718 	DP_STATS_INCC(_handle, _field.num, _count, _cond); \
719 	DP_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \
720 }
721 
722 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) \
723 { \
724 	_handle_a->stats._field += _handle_b->stats._field; \
725 }
726 
727 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) \
728 { \
729 	DP_STATS_AGGR(_handle_a, _handle_b, _field.num); \
730 	DP_STATS_AGGR(_handle_a, _handle_b, _field.bytes);\
731 }
732 
733 #define DP_STATS_UPD_STRUCT(_handle_a, _handle_b, _field) \
734 { \
735 	_handle_a->stats._field = _handle_b->stats._field; \
736 }
737 
738 #else
739 #define DP_STATS_INC(_handle, _field, _delta)
740 #define DP_STATS_INCC(_handle, _field, _delta, _cond)
741 #define DP_STATS_DEC(_handle, _field, _delta)
742 #define DP_STATS_UPD(_handle, _field, _delta)
743 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes)
744 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond)
745 #define DP_STATS_AGGR(_handle_a, _handle_b, _field)
746 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field)
747 #endif
748 
749 #ifdef ENABLE_DP_HIST_STATS
750 #define DP_HIST_INIT() \
751 	uint32_t num_of_packets[MAX_PDEV_CNT] = {0};
752 
753 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) \
754 { \
755 		++num_of_packets[_pdev_id]; \
756 }
757 
758 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \
759 	do {                                                              \
760 		if (_p_cntrs == 1) {                                      \
761 			DP_STATS_INC(_pdev,                               \
762 				tx_comp_histogram.pkts_1, 1);             \
763 		} else if (_p_cntrs > 1 && _p_cntrs <= 20) {              \
764 			DP_STATS_INC(_pdev,                               \
765 				tx_comp_histogram.pkts_2_20, 1);          \
766 		} else if (_p_cntrs > 20 && _p_cntrs <= 40) {             \
767 			DP_STATS_INC(_pdev,                               \
768 				tx_comp_histogram.pkts_21_40, 1);         \
769 		} else if (_p_cntrs > 40 && _p_cntrs <= 60) {             \
770 			DP_STATS_INC(_pdev,                               \
771 				tx_comp_histogram.pkts_41_60, 1);         \
772 		} else if (_p_cntrs > 60 && _p_cntrs <= 80) {             \
773 			DP_STATS_INC(_pdev,                               \
774 				tx_comp_histogram.pkts_61_80, 1);         \
775 		} else if (_p_cntrs > 80 && _p_cntrs <= 100) {            \
776 			DP_STATS_INC(_pdev,                               \
777 				tx_comp_histogram.pkts_81_100, 1);        \
778 		} else if (_p_cntrs > 100 && _p_cntrs <= 200) {           \
779 			DP_STATS_INC(_pdev,                               \
780 				tx_comp_histogram.pkts_101_200, 1);       \
781 		} else if (_p_cntrs > 200) {                              \
782 			DP_STATS_INC(_pdev,                               \
783 				tx_comp_histogram.pkts_201_plus, 1);      \
784 		}                                                         \
785 	} while (0)
786 
787 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \
788 	do {                                                              \
789 		if (_p_cntrs == 1) {                                      \
790 			DP_STATS_INC(_pdev,                               \
791 				rx_ind_histogram.pkts_1, 1);              \
792 		} else if (_p_cntrs > 1 && _p_cntrs <= 20) {              \
793 			DP_STATS_INC(_pdev,                               \
794 				rx_ind_histogram.pkts_2_20, 1);           \
795 		} else if (_p_cntrs > 20 && _p_cntrs <= 40) {             \
796 			DP_STATS_INC(_pdev,                               \
797 				rx_ind_histogram.pkts_21_40, 1);          \
798 		} else if (_p_cntrs > 40 && _p_cntrs <= 60) {             \
799 			DP_STATS_INC(_pdev,                               \
800 				rx_ind_histogram.pkts_41_60, 1);          \
801 		} else if (_p_cntrs > 60 && _p_cntrs <= 80) {             \
802 			DP_STATS_INC(_pdev,                               \
803 				rx_ind_histogram.pkts_61_80, 1);          \
804 		} else if (_p_cntrs > 80 && _p_cntrs <= 100) {            \
805 			DP_STATS_INC(_pdev,                               \
806 				rx_ind_histogram.pkts_81_100, 1);         \
807 		} else if (_p_cntrs > 100 && _p_cntrs <= 200) {           \
808 			DP_STATS_INC(_pdev,                               \
809 				rx_ind_histogram.pkts_101_200, 1);        \
810 		} else if (_p_cntrs > 200) {                              \
811 			DP_STATS_INC(_pdev,                               \
812 				rx_ind_histogram.pkts_201_plus, 1);       \
813 		}                                                         \
814 	} while (0)
815 
816 #define DP_TX_HIST_STATS_PER_PDEV() \
817 	do { \
818 		uint8_t hist_stats = 0; \
819 		for (hist_stats = 0; hist_stats < soc->pdev_count; \
820 				hist_stats++) { \
821 			DP_TX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \
822 					num_of_packets[hist_stats]); \
823 		} \
824 	}  while (0)
825 
826 
827 #define DP_RX_HIST_STATS_PER_PDEV() \
828 	do { \
829 		uint8_t hist_stats = 0; \
830 		for (hist_stats = 0; hist_stats < soc->pdev_count; \
831 				hist_stats++) { \
832 			DP_RX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \
833 					num_of_packets[hist_stats]); \
834 		} \
835 	}  while (0)
836 
837 #else
838 #define DP_HIST_INIT()
839 #define DP_HIST_PACKET_COUNT_INC(_pdev_id)
840 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs)
841 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs)
842 #define DP_RX_HIST_STATS_PER_PDEV()
843 #define DP_TX_HIST_STATS_PER_PDEV()
844 #endif /* DISABLE_DP_STATS */
845 
846 #define FRAME_MASK_IPV4_ARP   1
847 #define FRAME_MASK_IPV4_DHCP  2
848 #define FRAME_MASK_IPV4_EAPOL 4
849 #define FRAME_MASK_IPV6_DHCP  8
850 
851 #ifdef QCA_SUPPORT_PEER_ISOLATION
852 #define dp_get_peer_isolation(_peer) ((_peer)->isolation)
853 
854 static inline void dp_set_peer_isolation(struct dp_peer *peer, bool val)
855 {
856 	peer->isolation = val;
857 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
858 		  "peer:"QDF_MAC_ADDR_FMT" isolation:%d",
859 		  QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer->isolation);
860 }
861 
862 #else
863 #define dp_get_peer_isolation(_peer) (0)
864 
865 static inline void dp_set_peer_isolation(struct dp_peer *peer, bool val)
866 {
867 }
868 #endif /* QCA_SUPPORT_PEER_ISOLATION */
869 
870 #ifdef QCA_SUPPORT_WDS_EXTENDED
871 static inline void dp_wds_ext_peer_init(struct dp_peer *peer)
872 {
873 	peer->wds_ext.init = 0;
874 }
875 #else
876 static inline void dp_wds_ext_peer_init(struct dp_peer *peer)
877 {
878 }
879 #endif /* QCA_SUPPORT_WDS_EXTENDED */
880 
881 #ifdef QCA_HOST2FW_RXBUF_RING
882 static inline
883 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id)
884 {
885 	return &pdev->rx_mac_buf_ring[lmac_id];
886 }
887 #else
888 static inline
889 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id)
890 {
891 	return &pdev->soc->rx_refill_buf_ring[lmac_id];
892 }
893 #endif
894 
895 /**
896  * The lmac ID for a particular channel band is fixed.
897  * 2.4GHz band uses lmac_id = 1
898  * 5GHz/6GHz band uses lmac_id=0
899  */
900 #define DP_INVALID_LMAC_ID	(-1)
901 #define DP_MON_INVALID_LMAC_ID	(-1)
902 #define DP_MON_2G_LMAC_ID	1
903 #define DP_MON_5G_LMAC_ID	0
904 #define DP_MON_6G_LMAC_ID	0
905 
906 #ifdef FEATURE_TSO_STATS
907 /**
908  * dp_init_tso_stats() - Clear tso stats
909  * @pdev: pdev handle
910  *
911  * Return: None
912  */
913 static inline
914 void dp_init_tso_stats(struct dp_pdev *pdev)
915 {
916 	if (pdev) {
917 		qdf_mem_zero(&((pdev)->stats.tso_stats),
918 			     sizeof((pdev)->stats.tso_stats));
919 		qdf_atomic_init(&pdev->tso_idx);
920 	}
921 }
922 
923 /**
924  * dp_stats_tso_segment_histogram_update() - TSO Segment Histogram
925  * @pdev: pdev handle
926  * @_p_cntrs: number of tso segments for a tso packet
927  *
928  * Return: None
929  */
930 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev,
931 					   uint8_t _p_cntrs);
932 
933 /**
934  * dp_tso_segment_update() - Collect tso segment information
935  * @pdev: pdev handle
936  * @stats_idx: tso packet number
937  * @idx: tso segment number
938  * @seg: tso segment
939  *
940  * Return: None
941  */
942 void dp_tso_segment_update(struct dp_pdev *pdev,
943 			   uint32_t stats_idx,
944 			   uint8_t idx,
945 			   struct qdf_tso_seg_t seg);
946 
947 /**
948  * dp_tso_packet_update() - TSO Packet information
949  * @pdev: pdev handle
950  * @stats_idx: tso packet number
951  * @msdu: nbuf handle
952  * @num_segs: tso segments
953  *
954  * Return: None
955  */
956 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx,
957 			  qdf_nbuf_t msdu, uint16_t num_segs);
958 
959 /**
960  * dp_tso_segment_stats_update() - TSO Segment stats
961  * @pdev: pdev handle
962  * @stats_seg: tso segment list
963  * @stats_idx: tso packet number
964  *
965  * Return: None
966  */
967 void dp_tso_segment_stats_update(struct dp_pdev *pdev,
968 				 struct qdf_tso_seg_elem_t *stats_seg,
969 				 uint32_t stats_idx);
970 
971 /**
972  * dp_print_tso_stats() - dump tso statistics
973  * @soc:soc handle
974  * @level: verbosity level
975  *
976  * Return: None
977  */
978 void dp_print_tso_stats(struct dp_soc *soc,
979 			enum qdf_stats_verbosity_level level);
980 
981 /**
982  * dp_txrx_clear_tso_stats() - clear tso stats
983  * @soc: soc handle
984  *
985  * Return: None
986  */
987 void dp_txrx_clear_tso_stats(struct dp_soc *soc);
988 #else
989 static inline
990 void dp_init_tso_stats(struct dp_pdev *pdev)
991 {
992 }
993 
994 static inline
995 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev,
996 					   uint8_t _p_cntrs)
997 {
998 }
999 
1000 static inline
1001 void dp_tso_segment_update(struct dp_pdev *pdev,
1002 			   uint32_t stats_idx,
1003 			   uint32_t idx,
1004 			   struct qdf_tso_seg_t seg)
1005 {
1006 }
1007 
1008 static inline
1009 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx,
1010 			  qdf_nbuf_t msdu, uint16_t num_segs)
1011 {
1012 }
1013 
1014 static inline
1015 void dp_tso_segment_stats_update(struct dp_pdev *pdev,
1016 				 struct qdf_tso_seg_elem_t *stats_seg,
1017 				 uint32_t stats_idx)
1018 {
1019 }
1020 
1021 static inline
1022 void dp_print_tso_stats(struct dp_soc *soc,
1023 			enum qdf_stats_verbosity_level level)
1024 {
1025 }
1026 
1027 static inline
1028 void dp_txrx_clear_tso_stats(struct dp_soc *soc)
1029 {
1030 }
1031 #endif /* FEATURE_TSO_STATS */
1032 
1033 #define DP_HTT_T2H_HP_PIPE 5
1034 static inline void dp_update_pdev_stats(struct dp_pdev *tgtobj,
1035 					struct cdp_vdev_stats *srcobj)
1036 {
1037 	uint8_t i;
1038 	uint8_t pream_type;
1039 
1040 	for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) {
1041 		for (i = 0; i < MAX_MCS; i++) {
1042 			tgtobj->stats.tx.pkt_type[pream_type].
1043 				mcs_count[i] +=
1044 			srcobj->tx.pkt_type[pream_type].
1045 				mcs_count[i];
1046 			tgtobj->stats.rx.pkt_type[pream_type].
1047 				mcs_count[i] +=
1048 			srcobj->rx.pkt_type[pream_type].
1049 				mcs_count[i];
1050 		}
1051 	}
1052 
1053 	for (i = 0; i < MAX_BW; i++) {
1054 		tgtobj->stats.tx.bw[i] += srcobj->tx.bw[i];
1055 		tgtobj->stats.rx.bw[i] += srcobj->rx.bw[i];
1056 	}
1057 
1058 	for (i = 0; i < SS_COUNT; i++) {
1059 		tgtobj->stats.tx.nss[i] += srcobj->tx.nss[i];
1060 		tgtobj->stats.rx.nss[i] += srcobj->rx.nss[i];
1061 	}
1062 
1063 	for (i = 0; i < WME_AC_MAX; i++) {
1064 		tgtobj->stats.tx.wme_ac_type[i] +=
1065 			srcobj->tx.wme_ac_type[i];
1066 		tgtobj->stats.rx.wme_ac_type[i] +=
1067 			srcobj->rx.wme_ac_type[i];
1068 		tgtobj->stats.tx.excess_retries_per_ac[i] +=
1069 			srcobj->tx.excess_retries_per_ac[i];
1070 	}
1071 
1072 	for (i = 0; i < MAX_GI; i++) {
1073 		tgtobj->stats.tx.sgi_count[i] +=
1074 			srcobj->tx.sgi_count[i];
1075 		tgtobj->stats.rx.sgi_count[i] +=
1076 			srcobj->rx.sgi_count[i];
1077 	}
1078 
1079 	for (i = 0; i < MAX_RECEPTION_TYPES; i++)
1080 		tgtobj->stats.rx.reception_type[i] +=
1081 			srcobj->rx.reception_type[i];
1082 
1083 	tgtobj->stats.tx.comp_pkt.bytes += srcobj->tx.comp_pkt.bytes;
1084 	tgtobj->stats.tx.comp_pkt.num += srcobj->tx.comp_pkt.num;
1085 	tgtobj->stats.tx.ucast.num += srcobj->tx.ucast.num;
1086 	tgtobj->stats.tx.ucast.bytes += srcobj->tx.ucast.bytes;
1087 	tgtobj->stats.tx.mcast.num += srcobj->tx.mcast.num;
1088 	tgtobj->stats.tx.mcast.bytes += srcobj->tx.mcast.bytes;
1089 	tgtobj->stats.tx.bcast.num += srcobj->tx.bcast.num;
1090 	tgtobj->stats.tx.bcast.bytes += srcobj->tx.bcast.bytes;
1091 	tgtobj->stats.tx.tx_success.num += srcobj->tx.tx_success.num;
1092 	tgtobj->stats.tx.tx_success.bytes +=
1093 		srcobj->tx.tx_success.bytes;
1094 	tgtobj->stats.tx.nawds_mcast.num +=
1095 		srcobj->tx.nawds_mcast.num;
1096 	tgtobj->stats.tx.nawds_mcast.bytes +=
1097 		srcobj->tx.nawds_mcast.bytes;
1098 	tgtobj->stats.tx.nawds_mcast_drop +=
1099 		srcobj->tx.nawds_mcast_drop;
1100 	tgtobj->stats.tx.num_ppdu_cookie_valid +=
1101 		srcobj->tx.num_ppdu_cookie_valid;
1102 	tgtobj->stats.tx.tx_failed += srcobj->tx.tx_failed;
1103 	tgtobj->stats.tx.ofdma += srcobj->tx.ofdma;
1104 	tgtobj->stats.tx.stbc += srcobj->tx.stbc;
1105 	tgtobj->stats.tx.ldpc += srcobj->tx.ldpc;
1106 	tgtobj->stats.tx.pream_punct_cnt += srcobj->tx.pream_punct_cnt;
1107 	tgtobj->stats.tx.retries += srcobj->tx.retries;
1108 	tgtobj->stats.tx.non_amsdu_cnt += srcobj->tx.non_amsdu_cnt;
1109 	tgtobj->stats.tx.amsdu_cnt += srcobj->tx.amsdu_cnt;
1110 	tgtobj->stats.tx.non_ampdu_cnt += srcobj->tx.non_ampdu_cnt;
1111 	tgtobj->stats.tx.ampdu_cnt += srcobj->tx.ampdu_cnt;
1112 	tgtobj->stats.tx.dropped.fw_rem.num += srcobj->tx.dropped.fw_rem.num;
1113 	tgtobj->stats.tx.dropped.fw_rem.bytes +=
1114 			srcobj->tx.dropped.fw_rem.bytes;
1115 	tgtobj->stats.tx.dropped.fw_rem_tx +=
1116 			srcobj->tx.dropped.fw_rem_tx;
1117 	tgtobj->stats.tx.dropped.fw_rem_notx +=
1118 			srcobj->tx.dropped.fw_rem_notx;
1119 	tgtobj->stats.tx.dropped.fw_reason1 +=
1120 			srcobj->tx.dropped.fw_reason1;
1121 	tgtobj->stats.tx.dropped.fw_reason2 +=
1122 			srcobj->tx.dropped.fw_reason2;
1123 	tgtobj->stats.tx.dropped.fw_reason3 +=
1124 			srcobj->tx.dropped.fw_reason3;
1125 	tgtobj->stats.tx.dropped.age_out += srcobj->tx.dropped.age_out;
1126 	tgtobj->stats.rx.err.mic_err += srcobj->rx.err.mic_err;
1127 	if (srcobj->rx.snr != 0)
1128 		tgtobj->stats.rx.snr = srcobj->rx.snr;
1129 	tgtobj->stats.rx.rx_rate = srcobj->rx.rx_rate;
1130 	tgtobj->stats.rx.err.decrypt_err += srcobj->rx.err.decrypt_err;
1131 	tgtobj->stats.rx.non_ampdu_cnt += srcobj->rx.non_ampdu_cnt;
1132 	tgtobj->stats.rx.amsdu_cnt += srcobj->rx.ampdu_cnt;
1133 	tgtobj->stats.rx.non_amsdu_cnt += srcobj->rx.non_amsdu_cnt;
1134 	tgtobj->stats.rx.amsdu_cnt += srcobj->rx.amsdu_cnt;
1135 	tgtobj->stats.rx.nawds_mcast_drop += srcobj->rx.nawds_mcast_drop;
1136 	tgtobj->stats.rx.to_stack.num += srcobj->rx.to_stack.num;
1137 	tgtobj->stats.rx.to_stack.bytes += srcobj->rx.to_stack.bytes;
1138 
1139 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
1140 		tgtobj->stats.rx.rcvd_reo[i].num +=
1141 			srcobj->rx.rcvd_reo[i].num;
1142 		tgtobj->stats.rx.rcvd_reo[i].bytes +=
1143 			srcobj->rx.rcvd_reo[i].bytes;
1144 	}
1145 
1146 	srcobj->rx.unicast.num =
1147 		srcobj->rx.to_stack.num -
1148 				(srcobj->rx.multicast.num);
1149 	srcobj->rx.unicast.bytes =
1150 		srcobj->rx.to_stack.bytes -
1151 				(srcobj->rx.multicast.bytes);
1152 
1153 	tgtobj->stats.rx.unicast.num += srcobj->rx.unicast.num;
1154 	tgtobj->stats.rx.unicast.bytes += srcobj->rx.unicast.bytes;
1155 	tgtobj->stats.rx.multicast.num += srcobj->rx.multicast.num;
1156 	tgtobj->stats.rx.multicast.bytes += srcobj->rx.multicast.bytes;
1157 	tgtobj->stats.rx.bcast.num += srcobj->rx.bcast.num;
1158 	tgtobj->stats.rx.bcast.bytes += srcobj->rx.bcast.bytes;
1159 	tgtobj->stats.rx.raw.num += srcobj->rx.raw.num;
1160 	tgtobj->stats.rx.raw.bytes += srcobj->rx.raw.bytes;
1161 	tgtobj->stats.rx.intra_bss.pkts.num +=
1162 			srcobj->rx.intra_bss.pkts.num;
1163 	tgtobj->stats.rx.intra_bss.pkts.bytes +=
1164 			srcobj->rx.intra_bss.pkts.bytes;
1165 	tgtobj->stats.rx.intra_bss.fail.num +=
1166 			srcobj->rx.intra_bss.fail.num;
1167 	tgtobj->stats.rx.intra_bss.fail.bytes +=
1168 			srcobj->rx.intra_bss.fail.bytes;
1169 
1170 	tgtobj->stats.tx.last_ack_rssi =
1171 		srcobj->tx.last_ack_rssi;
1172 	tgtobj->stats.rx.mec_drop.num += srcobj->rx.mec_drop.num;
1173 	tgtobj->stats.rx.mec_drop.bytes += srcobj->rx.mec_drop.bytes;
1174 	tgtobj->stats.rx.multipass_rx_pkt_drop +=
1175 		srcobj->rx.multipass_rx_pkt_drop;
1176 }
1177 
1178 static inline void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj,
1179 						struct dp_vdev *srcobj)
1180 {
1181 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.nawds_mcast);
1182 
1183 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.rcvd);
1184 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.processed);
1185 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.reinject_pkts);
1186 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.inspect_pkts);
1187 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.raw.raw_pkt);
1188 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.raw.dma_map_error);
1189 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.raw.num_frags_overflow_err);
1190 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.sg.dropped_host.num);
1191 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.sg.dropped_target);
1192 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.sg.sg_pkt);
1193 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.mcast_en.mcast_pkt);
1194 	DP_STATS_AGGR(tgtobj, srcobj,
1195 		      tx_i.mcast_en.dropped_map_error);
1196 	DP_STATS_AGGR(tgtobj, srcobj,
1197 		      tx_i.mcast_en.dropped_self_mac);
1198 	DP_STATS_AGGR(tgtobj, srcobj,
1199 		      tx_i.mcast_en.dropped_send_fail);
1200 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.mcast_en.ucast);
1201 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.igmp_mcast_en.igmp_rcvd);
1202 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.igmp_mcast_en.igmp_ucast_converted);
1203 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.dma_error);
1204 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.ring_full);
1205 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.enqueue_fail);
1206 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.fail_per_pkt_vdev_id_check);
1207 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.desc_na.num);
1208 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.res_full);
1209 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.headroom_insufficient);
1210 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.cce_classified);
1211 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.cce_classified_raw);
1212 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.sniffer_rcvd);
1213 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.mesh.exception_fw);
1214 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.mesh.completion_fw);
1215 
1216 	tgtobj->stats.tx_i.dropped.dropped_pkt.num =
1217 		tgtobj->stats.tx_i.dropped.dma_error +
1218 		tgtobj->stats.tx_i.dropped.ring_full +
1219 		tgtobj->stats.tx_i.dropped.enqueue_fail +
1220 		tgtobj->stats.tx_i.dropped.fail_per_pkt_vdev_id_check +
1221 		tgtobj->stats.tx_i.dropped.desc_na.num +
1222 		tgtobj->stats.tx_i.dropped.res_full;
1223 
1224 }
1225 
1226 /**
1227  * dp_is_wds_extended(): Check if wds ext is enabled
1228  * @vdev: DP VDEV handle
1229  *
1230  * return: true if enabled, false if not
1231  */
1232 #ifdef QCA_SUPPORT_WDS_EXTENDED
1233 static bool dp_is_wds_extended(struct dp_peer *peer)
1234 {
1235 	if (qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT,
1236 				&peer->wds_ext.init))
1237 		return true;
1238 
1239 	return false;
1240 }
1241 #else
1242 static bool dp_is_wds_extended(struct dp_peer *peer)
1243 {
1244 	return false;
1245 }
1246 #endif /* QCA_SUPPORT_WDS_EXTENDED */
1247 
1248 static inline void dp_update_vdev_stats(struct dp_soc *soc,
1249 					struct dp_peer *srcobj,
1250 					void *arg)
1251 {
1252 	struct cdp_vdev_stats *tgtobj = (struct cdp_vdev_stats *)arg;
1253 	uint8_t i;
1254 	uint8_t pream_type;
1255 
1256 	if (qdf_unlikely(dp_is_wds_extended(srcobj)))
1257 		return;
1258 
1259 	for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) {
1260 		for (i = 0; i < MAX_MCS; i++) {
1261 			tgtobj->tx.pkt_type[pream_type].
1262 				mcs_count[i] +=
1263 			srcobj->stats.tx.pkt_type[pream_type].
1264 				mcs_count[i];
1265 			tgtobj->rx.pkt_type[pream_type].
1266 				mcs_count[i] +=
1267 			srcobj->stats.rx.pkt_type[pream_type].
1268 				mcs_count[i];
1269 		}
1270 	}
1271 
1272 	for (i = 0; i < MAX_BW; i++) {
1273 		tgtobj->tx.bw[i] += srcobj->stats.tx.bw[i];
1274 		tgtobj->rx.bw[i] += srcobj->stats.rx.bw[i];
1275 	}
1276 
1277 	for (i = 0; i < SS_COUNT; i++) {
1278 		tgtobj->tx.nss[i] += srcobj->stats.tx.nss[i];
1279 		tgtobj->rx.nss[i] += srcobj->stats.rx.nss[i];
1280 	}
1281 
1282 	for (i = 0; i < WME_AC_MAX; i++) {
1283 		tgtobj->tx.wme_ac_type[i] +=
1284 			srcobj->stats.tx.wme_ac_type[i];
1285 		tgtobj->rx.wme_ac_type[i] +=
1286 			srcobj->stats.rx.wme_ac_type[i];
1287 		tgtobj->tx.excess_retries_per_ac[i] +=
1288 			srcobj->stats.tx.excess_retries_per_ac[i];
1289 	}
1290 
1291 	for (i = 0; i < MAX_GI; i++) {
1292 		tgtobj->tx.sgi_count[i] +=
1293 			srcobj->stats.tx.sgi_count[i];
1294 		tgtobj->rx.sgi_count[i] +=
1295 			srcobj->stats.rx.sgi_count[i];
1296 	}
1297 
1298 	for (i = 0; i < MAX_RECEPTION_TYPES; i++)
1299 		tgtobj->rx.reception_type[i] +=
1300 			srcobj->stats.rx.reception_type[i];
1301 
1302 	tgtobj->tx.comp_pkt.bytes += srcobj->stats.tx.comp_pkt.bytes;
1303 	tgtobj->tx.comp_pkt.num += srcobj->stats.tx.comp_pkt.num;
1304 	tgtobj->tx.ucast.num += srcobj->stats.tx.ucast.num;
1305 	tgtobj->tx.ucast.bytes += srcobj->stats.tx.ucast.bytes;
1306 	tgtobj->tx.mcast.num += srcobj->stats.tx.mcast.num;
1307 	tgtobj->tx.mcast.bytes += srcobj->stats.tx.mcast.bytes;
1308 	tgtobj->tx.bcast.num += srcobj->stats.tx.bcast.num;
1309 	tgtobj->tx.bcast.bytes += srcobj->stats.tx.bcast.bytes;
1310 	tgtobj->tx.tx_success.num += srcobj->stats.tx.tx_success.num;
1311 	tgtobj->tx.tx_success.bytes +=
1312 		srcobj->stats.tx.tx_success.bytes;
1313 	tgtobj->tx.nawds_mcast.num +=
1314 		srcobj->stats.tx.nawds_mcast.num;
1315 	tgtobj->tx.nawds_mcast.bytes +=
1316 		srcobj->stats.tx.nawds_mcast.bytes;
1317 	tgtobj->tx.nawds_mcast_drop +=
1318 		srcobj->stats.tx.nawds_mcast_drop;
1319 	tgtobj->tx.num_ppdu_cookie_valid +=
1320 		srcobj->stats.tx.num_ppdu_cookie_valid;
1321 	tgtobj->tx.tx_failed += srcobj->stats.tx.tx_failed;
1322 	tgtobj->tx.ofdma += srcobj->stats.tx.ofdma;
1323 	tgtobj->tx.stbc += srcobj->stats.tx.stbc;
1324 	tgtobj->tx.ldpc += srcobj->stats.tx.ldpc;
1325 	tgtobj->tx.pream_punct_cnt += srcobj->stats.tx.pream_punct_cnt;
1326 	tgtobj->tx.retries += srcobj->stats.tx.retries;
1327 	tgtobj->tx.non_amsdu_cnt += srcobj->stats.tx.non_amsdu_cnt;
1328 	tgtobj->tx.amsdu_cnt += srcobj->stats.tx.amsdu_cnt;
1329 	tgtobj->tx.non_ampdu_cnt += srcobj->stats.tx.non_ampdu_cnt;
1330 	tgtobj->tx.ampdu_cnt += srcobj->stats.tx.ampdu_cnt;
1331 	tgtobj->tx.dropped.fw_rem.num += srcobj->stats.tx.dropped.fw_rem.num;
1332 	tgtobj->tx.dropped.fw_rem.bytes +=
1333 			srcobj->stats.tx.dropped.fw_rem.bytes;
1334 	tgtobj->tx.dropped.fw_rem_tx +=
1335 			srcobj->stats.tx.dropped.fw_rem_tx;
1336 	tgtobj->tx.dropped.fw_rem_notx +=
1337 			srcobj->stats.tx.dropped.fw_rem_notx;
1338 	tgtobj->tx.dropped.fw_reason1 +=
1339 			srcobj->stats.tx.dropped.fw_reason1;
1340 	tgtobj->tx.dropped.fw_reason2 +=
1341 			srcobj->stats.tx.dropped.fw_reason2;
1342 	tgtobj->tx.dropped.fw_reason3 +=
1343 			srcobj->stats.tx.dropped.fw_reason3;
1344 	tgtobj->tx.dropped.age_out += srcobj->stats.tx.dropped.age_out;
1345 	tgtobj->rx.err.mic_err += srcobj->stats.rx.err.mic_err;
1346 	if (srcobj->stats.rx.snr != 0)
1347 		tgtobj->rx.snr = srcobj->stats.rx.snr;
1348 	tgtobj->rx.rx_rate = srcobj->stats.rx.rx_rate;
1349 	tgtobj->rx.err.decrypt_err += srcobj->stats.rx.err.decrypt_err;
1350 	tgtobj->rx.non_ampdu_cnt += srcobj->stats.rx.non_ampdu_cnt;
1351 	tgtobj->rx.amsdu_cnt += srcobj->stats.rx.ampdu_cnt;
1352 	tgtobj->rx.non_amsdu_cnt += srcobj->stats.rx.non_amsdu_cnt;
1353 	tgtobj->rx.amsdu_cnt += srcobj->stats.rx.amsdu_cnt;
1354 	tgtobj->rx.nawds_mcast_drop += srcobj->stats.rx.nawds_mcast_drop;
1355 	tgtobj->rx.to_stack.num += srcobj->stats.rx.to_stack.num;
1356 	tgtobj->rx.to_stack.bytes += srcobj->stats.rx.to_stack.bytes;
1357 
1358 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
1359 		tgtobj->rx.rcvd_reo[i].num +=
1360 			srcobj->stats.rx.rcvd_reo[i].num;
1361 		tgtobj->rx.rcvd_reo[i].bytes +=
1362 			srcobj->stats.rx.rcvd_reo[i].bytes;
1363 	}
1364 
1365 	srcobj->stats.rx.unicast.num =
1366 		srcobj->stats.rx.to_stack.num -
1367 				srcobj->stats.rx.multicast.num;
1368 	srcobj->stats.rx.unicast.bytes =
1369 		srcobj->stats.rx.to_stack.bytes -
1370 				srcobj->stats.rx.multicast.bytes;
1371 
1372 	tgtobj->rx.unicast.num += srcobj->stats.rx.unicast.num;
1373 	tgtobj->rx.unicast.bytes += srcobj->stats.rx.unicast.bytes;
1374 	tgtobj->rx.multicast.num += srcobj->stats.rx.multicast.num;
1375 	tgtobj->rx.multicast.bytes += srcobj->stats.rx.multicast.bytes;
1376 	tgtobj->rx.bcast.num += srcobj->stats.rx.bcast.num;
1377 	tgtobj->rx.bcast.bytes += srcobj->stats.rx.bcast.bytes;
1378 	tgtobj->rx.raw.num += srcobj->stats.rx.raw.num;
1379 	tgtobj->rx.raw.bytes += srcobj->stats.rx.raw.bytes;
1380 	tgtobj->rx.intra_bss.pkts.num +=
1381 			srcobj->stats.rx.intra_bss.pkts.num;
1382 	tgtobj->rx.intra_bss.pkts.bytes +=
1383 			srcobj->stats.rx.intra_bss.pkts.bytes;
1384 	tgtobj->rx.intra_bss.fail.num +=
1385 			srcobj->stats.rx.intra_bss.fail.num;
1386 	tgtobj->rx.intra_bss.fail.bytes +=
1387 			srcobj->stats.rx.intra_bss.fail.bytes;
1388 	tgtobj->tx.last_ack_rssi =
1389 		srcobj->stats.tx.last_ack_rssi;
1390 	tgtobj->rx.mec_drop.num += srcobj->stats.rx.mec_drop.num;
1391 	tgtobj->rx.mec_drop.bytes += srcobj->stats.rx.mec_drop.bytes;
1392 	tgtobj->rx.multipass_rx_pkt_drop +=
1393 		srcobj->stats.rx.multipass_rx_pkt_drop;
1394 }
1395 
1396 #define DP_UPDATE_STATS(_tgtobj, _srcobj)	\
1397 	do {				\
1398 		uint8_t i;		\
1399 		uint8_t pream_type;	\
1400 		for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \
1401 			for (i = 0; i < MAX_MCS; i++) { \
1402 				DP_STATS_AGGR(_tgtobj, _srcobj, \
1403 					tx.pkt_type[pream_type].mcs_count[i]); \
1404 				DP_STATS_AGGR(_tgtobj, _srcobj, \
1405 					rx.pkt_type[pream_type].mcs_count[i]); \
1406 			} \
1407 		} \
1408 		  \
1409 		for (i = 0; i < MAX_BW; i++) { \
1410 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.bw[i]); \
1411 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.bw[i]); \
1412 		} \
1413 		  \
1414 		for (i = 0; i < SS_COUNT; i++) { \
1415 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.nss[i]); \
1416 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.nss[i]); \
1417 		} \
1418 		for (i = 0; i < WME_AC_MAX; i++) { \
1419 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.wme_ac_type[i]); \
1420 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.wme_ac_type[i]); \
1421 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.excess_retries_per_ac[i]); \
1422 		\
1423 		} \
1424 		\
1425 		for (i = 0; i < MAX_GI; i++) { \
1426 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.sgi_count[i]); \
1427 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.sgi_count[i]); \
1428 		} \
1429 		\
1430 		for (i = 0; i < MAX_RECEPTION_TYPES; i++) \
1431 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.reception_type[i]); \
1432 		\
1433 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.comp_pkt); \
1434 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.ucast); \
1435 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.mcast); \
1436 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.bcast); \
1437 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_success); \
1438 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.nawds_mcast); \
1439 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.nawds_mcast_drop); \
1440 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.tx_failed); \
1441 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ofdma); \
1442 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.stbc); \
1443 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ldpc); \
1444 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.retries); \
1445 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_amsdu_cnt); \
1446 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.amsdu_cnt); \
1447 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_ampdu_cnt); \
1448 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ampdu_cnt); \
1449 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.dropped.fw_rem); \
1450 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_tx); \
1451 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_notx); \
1452 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason1); \
1453 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason2); \
1454 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason3); \
1455 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.age_out); \
1456 								\
1457 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.mic_err); \
1458 		if (_srcobj->stats.rx.snr != 0) \
1459 			DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.snr); \
1460 		DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rx_rate); \
1461 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.decrypt_err); \
1462 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_ampdu_cnt); \
1463 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.ampdu_cnt); \
1464 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_amsdu_cnt); \
1465 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.amsdu_cnt); \
1466 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.nawds_mcast_drop); \
1467 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.to_stack); \
1468 								\
1469 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)	\
1470 			DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rcvd_reo[i]); \
1471 									\
1472 		_srcobj->stats.rx.unicast.num = \
1473 			_srcobj->stats.rx.to_stack.num - \
1474 					_srcobj->stats.rx.multicast.num; \
1475 		_srcobj->stats.rx.unicast.bytes = \
1476 			_srcobj->stats.rx.to_stack.bytes - \
1477 					_srcobj->stats.rx.multicast.bytes; \
1478 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.unicast); \
1479 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.multicast); \
1480 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.bcast); \
1481 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.raw); \
1482 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.pkts); \
1483 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.fail); \
1484 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.mec_drop); \
1485 								  \
1486 		_tgtobj->stats.tx.last_ack_rssi =	\
1487 			_srcobj->stats.tx.last_ack_rssi; \
1488 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.multipass_rx_pkt_drop); \
1489 	}  while (0)
1490 
1491 /**
1492  * dp_peer_find_attach() - Allocates memory for peer objects
1493  * @soc: SoC handle
1494  *
1495  * Return: QDF_STATUS
1496  */
1497 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc);
1498 extern void dp_peer_find_detach(struct dp_soc *soc);
1499 extern void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer);
1500 extern void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer);
1501 extern void dp_peer_find_hash_erase(struct dp_soc *soc);
1502 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
1503 			   struct dp_peer *peer);
1504 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
1505 			      struct dp_peer *peer);
1506 void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
1507 				struct dp_peer *peer,
1508 				uint16_t peer_id);
1509 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
1510 				   uint16_t peer_id);
1511 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev,
1512 			  enum dp_mod_id mod_id);
1513 
1514 /*
1515  * dp_peer_ppdu_delayed_ba_cleanup() free ppdu allocated in peer
1516  * @peer: Datapath peer
1517  *
1518  * return: void
1519  */
1520 void dp_peer_ppdu_delayed_ba_cleanup(struct dp_peer *peer);
1521 
1522 extern void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer);
1523 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer);
1524 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer);
1525 extern struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
1526 					      uint8_t *peer_mac_addr,
1527 					      int mac_addr_is_aligned,
1528 					      uint8_t vdev_id,
1529 					      enum dp_mod_id id);
1530 
1531 #ifdef DP_PEER_EXTENDED_API
1532 /**
1533  * dp_register_peer() - Register peer into physical device
1534  * @soc_hdl - data path soc handle
1535  * @pdev_id - device instance id
1536  * @sta_desc - peer description
1537  *
1538  * Register peer into physical device
1539  *
1540  * Return: QDF_STATUS_SUCCESS registration success
1541  *         QDF_STATUS_E_FAULT peer not found
1542  */
1543 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1544 			    struct ol_txrx_desc_type *sta_desc);
1545 
1546 /**
1547  * dp_clear_peer() - remove peer from physical device
1548  * @soc_hdl - data path soc handle
1549  * @pdev_id - device instance id
1550  * @peer_addr - peer mac address
1551  *
1552  * remove peer from physical device
1553  *
1554  * Return: QDF_STATUS_SUCCESS registration success
1555  *         QDF_STATUS_E_FAULT peer not found
1556  */
1557 QDF_STATUS dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1558 			 struct qdf_mac_addr peer_addr);
1559 
1560 /*
1561  * dp_find_peer_exist - find peer if already exists
1562  * @soc: datapath soc handle
1563  * @pdev_id: physical device instance id
1564  * @peer_mac_addr: peer mac address
1565  *
1566  * Return: true or false
1567  */
1568 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1569 			uint8_t *peer_addr);
1570 
1571 /*
1572  * dp_find_peer_exist_on_vdev - find if peer exists on the given vdev
1573  * @soc: datapath soc handle
1574  * @vdev_id: vdev instance id
1575  * @peer_mac_addr: peer mac address
1576  *
1577  * Return: true or false
1578  */
1579 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1580 				uint8_t *peer_addr);
1581 
1582 /*
1583  * dp_find_peer_exist_on_other_vdev - find if peer exists
1584  * on other than the given vdev
1585  * @soc: datapath soc handle
1586  * @vdev_id: vdev instance id
1587  * @peer_mac_addr: peer mac address
1588  * @max_bssid: max number of bssids
1589  *
1590  * Return: true or false
1591  */
1592 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
1593 				      uint8_t vdev_id, uint8_t *peer_addr,
1594 				      uint16_t max_bssid);
1595 
1596 /**
1597  * dp_peer_state_update() - update peer local state
1598  * @pdev - data path device instance
1599  * @peer_addr - peer mac address
1600  * @state - new peer local state
1601  *
1602  * update peer local state
1603  *
1604  * Return: QDF_STATUS_SUCCESS registration success
1605  */
1606 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc, uint8_t *peer_mac,
1607 				enum ol_txrx_peer_state state);
1608 
1609 /**
1610  * dp_get_vdevid() - Get virtual interface id which peer registered
1611  * @soc - datapath soc handle
1612  * @peer_mac - peer mac address
1613  * @vdev_id - virtual interface id which peer registered
1614  *
1615  * Get virtual interface id which peer registered
1616  *
1617  * Return: QDF_STATUS_SUCCESS registration success
1618  */
1619 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
1620 			 uint8_t *vdev_id);
1621 struct cdp_vdev *dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
1622 		struct qdf_mac_addr peer_addr);
1623 struct cdp_vdev *dp_get_vdev_for_peer(void *peer);
1624 uint8_t *dp_peer_get_peer_mac_addr(void *peer);
1625 
1626 /**
1627  * dp_get_peer_state() - Get local peer state
1628  * @soc - datapath soc handle
1629  * @vdev_id - vdev id
1630  * @peer_mac - peer mac addr
1631  *
1632  * Get local peer state
1633  *
1634  * Return: peer status
1635  */
1636 int dp_get_peer_state(struct cdp_soc_t *soc, uint8_t vdev_id,
1637 		      uint8_t *peer_mac);
1638 void dp_local_peer_id_pool_init(struct dp_pdev *pdev);
1639 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer);
1640 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer);
1641 #else
1642 /**
1643  * dp_get_vdevid() - Get virtual interface id which peer registered
1644  * @soc - datapath soc handle
1645  * @peer_mac - peer mac address
1646  * @vdev_id - virtual interface id which peer registered
1647  *
1648  * Get virtual interface id which peer registered
1649  *
1650  * Return: QDF_STATUS_SUCCESS registration success
1651  */
1652 static inline
1653 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
1654 			 uint8_t *vdev_id)
1655 {
1656 	return QDF_STATUS_E_NOSUPPORT;
1657 }
1658 
1659 static inline void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
1660 {
1661 }
1662 
1663 static inline
1664 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
1665 {
1666 }
1667 
1668 static inline
1669 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
1670 {
1671 }
1672 #endif
1673 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
1674 				      uint8_t *peer_mac, uint16_t vdev_id,
1675 				      uint8_t tid,
1676 				      int status);
1677 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
1678 				  uint8_t *peer_mac, uint16_t vdev_id,
1679 				  uint8_t dialogtoken, uint16_t tid,
1680 				  uint16_t batimeout,
1681 				  uint16_t buffersize,
1682 				  uint16_t startseqnum);
1683 QDF_STATUS dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc,
1684 					uint8_t *peer_mac, uint16_t vdev_id,
1685 					uint8_t tid, uint8_t *dialogtoken,
1686 					uint16_t *statuscode,
1687 					uint16_t *buffersize,
1688 					uint16_t *batimeout);
1689 QDF_STATUS dp_set_addba_response(struct cdp_soc_t *cdp_soc,
1690 				 uint8_t *peer_mac,
1691 				 uint16_t vdev_id, uint8_t tid,
1692 				 uint16_t statuscode);
1693 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1694 			   uint16_t vdev_id, int tid,
1695 			   uint16_t reasoncode);
1696 /*
1697  * dp_delba_tx_completion_wifi3() -  Handle delba tx completion
1698  *
1699  * @cdp_soc: soc handle
1700  * @vdev_id: id of the vdev handle
1701  * @peer_mac: peer mac address
1702  * @tid: Tid number
1703  * @status: Tx completion status
1704  * Indicate status of delba Tx to DP for stats update and retry
1705  * delba if tx failed.
1706  *
1707  */
1708 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1709 				 uint16_t vdev_id, uint8_t tid,
1710 				 int status);
1711 extern QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
1712 					uint32_t ba_window_size,
1713 					uint32_t start_seq);
1714 
1715 extern QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc,
1716 	enum hal_reo_cmd_type type, struct hal_reo_cmd_params *params,
1717 	void (*callback_fn), void *data);
1718 
1719 extern void dp_reo_cmdlist_destroy(struct dp_soc *soc);
1720 
1721 /**
1722  * dp_reo_status_ring_handler - Handler for REO Status ring
1723  * @int_ctx: pointer to DP interrupt context
1724  * @soc: DP Soc handle
1725  *
1726  * Returns: Number of descriptors reaped
1727  */
1728 uint32_t dp_reo_status_ring_handler(struct dp_intr *int_ctx,
1729 				    struct dp_soc *soc);
1730 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
1731 			     struct cdp_vdev_stats *vdev_stats);
1732 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1733 	union hal_reo_status *reo_status);
1734 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1735 		union hal_reo_status *reo_status);
1736 uint16_t dp_tx_me_send_convert_ucast(struct cdp_soc_t *soc, uint8_t vdev_id,
1737 				     qdf_nbuf_t nbuf,
1738 				     uint8_t newmac[][QDF_MAC_ADDR_SIZE],
1739 				     uint8_t new_mac_cnt, uint8_t tid,
1740 				     bool is_igmp);
1741 void dp_tx_me_alloc_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id);
1742 
1743 void dp_tx_me_free_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id);
1744 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
1745 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
1746 		uint32_t config_param_1, uint32_t config_param_2,
1747 		uint32_t config_param_3, int cookie, int cookie_msb,
1748 		uint8_t mac_id);
1749 void dp_htt_stats_print_tag(struct dp_pdev *pdev,
1750 			    uint8_t tag_type, uint32_t *tag_buf);
1751 void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf);
1752 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev, uint32_t tuple_mask,
1753 				     uint8_t mac_id);
1754 /**
1755  * dp_rxtid_stats_cmd_cb - function pointer for peer
1756  *			   rx tid stats cmd call_back
1757  */
1758 typedef void (*dp_rxtid_stats_cmd_cb)(struct dp_soc *soc, void *cb_ctxt,
1759 				      union hal_reo_status *reo_status);
1760 int dp_peer_rxtid_stats(struct dp_peer *peer,
1761 			dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
1762 			void *cb_ctxt);
1763 QDF_STATUS
1764 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
1765 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
1766 		      uint32_t *rx_pn);
1767 
1768 QDF_STATUS
1769 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
1770 			  uint8_t *peer_mac, enum cdp_sec_type sec_type,
1771 			  bool is_unicast);
1772 
1773 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id);
1774 
1775 QDF_STATUS
1776 dp_set_michael_key(struct cdp_soc_t *soc, uint8_t vdev_id,
1777 		   uint8_t *peer_mac,
1778 		   bool is_unicast, uint32_t *key);
1779 
1780 /**
1781  * dp_check_pdev_exists() - Validate pdev before use
1782  * @soc - dp soc handle
1783  * @data - pdev handle
1784  *
1785  * Return: 0 - success/invalid - failure
1786  */
1787 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data);
1788 
1789 /**
1790  * dp_update_delay_stats() - Update delay statistics in structure
1791  *                              and fill min, max and avg delay
1792  * @pdev: pdev handle
1793  * @delay: delay in ms
1794  * @tid: tid value
1795  * @mode: type of tx delay mode
1796  * @ring id: ring number
1797  *
1798  * Return: none
1799  */
1800 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
1801 			   uint8_t tid, uint8_t mode, uint8_t ring_id);
1802 
1803 /**
1804  * dp_print_ring_stats(): Print tail and head pointer
1805  * @pdev: DP_PDEV handle
1806  *
1807  * Return:void
1808  */
1809 void dp_print_ring_stats(struct dp_pdev *pdev);
1810 
1811 /**
1812  * dp_print_pdev_cfg_params() - Print the pdev cfg parameters
1813  * @pdev_handle: DP pdev handle
1814  *
1815  * Return - void
1816  */
1817 void dp_print_pdev_cfg_params(struct dp_pdev *pdev);
1818 
1819 /**
1820  * dp_print_soc_cfg_params()- Dump soc wlan config parameters
1821  * @soc_handle: Soc handle
1822  *
1823  * Return: void
1824  */
1825 void dp_print_soc_cfg_params(struct dp_soc *soc);
1826 
1827 /**
1828  * dp_srng_get_str_from_ring_type() - Return string name for a ring
1829  * @ring_type: Ring
1830  *
1831  * Return: char const pointer
1832  */
1833 const
1834 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type);
1835 
1836 /*
1837  * dp_txrx_path_stats() - Function to display dump stats
1838  * @soc - soc handle
1839  *
1840  * return: none
1841  */
1842 void dp_txrx_path_stats(struct dp_soc *soc);
1843 
1844 /*
1845  * dp_print_per_ring_stats(): Packet count per ring
1846  * @soc - soc handle
1847  *
1848  * Return - None
1849  */
1850 void dp_print_per_ring_stats(struct dp_soc *soc);
1851 
1852 /**
1853  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
1854  * @pdev: DP PDEV handle
1855  *
1856  * return: void
1857  */
1858 void dp_aggregate_pdev_stats(struct dp_pdev *pdev);
1859 
1860 /**
1861  * dp_print_rx_rates(): Print Rx rate stats
1862  * @vdev: DP_VDEV handle
1863  *
1864  * Return:void
1865  */
1866 void dp_print_rx_rates(struct dp_vdev *vdev);
1867 
1868 /**
1869  * dp_print_tx_rates(): Print tx rates
1870  * @vdev: DP_VDEV handle
1871  *
1872  * Return:void
1873  */
1874 void dp_print_tx_rates(struct dp_vdev *vdev);
1875 
1876 /**
1877  * dp_print_peer_stats():print peer stats
1878  * @peer: DP_PEER handle
1879  *
1880  * return void
1881  */
1882 void dp_print_peer_stats(struct dp_peer *peer);
1883 
1884 /**
1885  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
1886  * @pdev: DP_PDEV Handle
1887  *
1888  * Return:void
1889  */
1890 void
1891 dp_print_pdev_tx_stats(struct dp_pdev *pdev);
1892 
1893 /**
1894  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
1895  * @pdev: DP_PDEV Handle
1896  *
1897  * Return: void
1898  */
1899 void
1900 dp_print_pdev_rx_stats(struct dp_pdev *pdev);
1901 
1902 /**
1903  * dp_print_soc_tx_stats(): Print SOC level  stats
1904  * @soc DP_SOC Handle
1905  *
1906  * Return: void
1907  */
1908 void dp_print_soc_tx_stats(struct dp_soc *soc);
1909 
1910 /**
1911  * dp_print_soc_interrupt_stats() - Print interrupt stats for the soc
1912  * @soc: dp_soc handle
1913  *
1914  * Return: None
1915  */
1916 void dp_print_soc_interrupt_stats(struct dp_soc *soc);
1917 
1918 /**
1919  * dp_print_soc_rx_stats: Print SOC level Rx stats
1920  * @soc: DP_SOC Handle
1921  *
1922  * Return:void
1923  */
1924 void dp_print_soc_rx_stats(struct dp_soc *soc);
1925 
1926 /**
1927  * dp_get_mac_id_for_pdev() -  Return mac corresponding to pdev for mac
1928  *
1929  * @mac_id: MAC id
1930  * @pdev_id: pdev_id corresponding to pdev, 0 for MCL
1931  *
1932  * Single pdev using both MACs will operate on both MAC rings,
1933  * which is the case for MCL.
1934  * For WIN each PDEV will operate one ring, so index is zero.
1935  *
1936  */
1937 static inline int dp_get_mac_id_for_pdev(uint32_t mac_id, uint32_t pdev_id)
1938 {
1939 	if (mac_id && pdev_id) {
1940 		qdf_print("Both mac_id and pdev_id cannot be non zero");
1941 		QDF_BUG(0);
1942 		return 0;
1943 	}
1944 	return (mac_id + pdev_id);
1945 }
1946 
1947 /**
1948  * dp_get_lmac_id_for_pdev_id() -  Return lmac id corresponding to host pdev id
1949  * @soc: soc pointer
1950  * @mac_id: MAC id
1951  * @pdev_id: pdev_id corresponding to pdev, 0 for MCL
1952  *
1953  * For MCL, Single pdev using both MACs will operate on both MAC rings.
1954  *
1955  * For WIN, each PDEV will operate one ring.
1956  *
1957  */
1958 static inline int
1959 dp_get_lmac_id_for_pdev_id
1960 	(struct dp_soc *soc, uint32_t mac_id, uint32_t pdev_id)
1961 {
1962 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
1963 		if (mac_id && pdev_id) {
1964 			qdf_print("Both mac_id and pdev_id cannot be non zero");
1965 			QDF_BUG(0);
1966 			return 0;
1967 		}
1968 		return (mac_id + pdev_id);
1969 	}
1970 
1971 	return soc->pdev_list[pdev_id]->lmac_id;
1972 }
1973 
1974 /**
1975  * dp_get_pdev_for_lmac_id() -  Return pdev pointer corresponding to lmac id
1976  * @soc: soc pointer
1977  * @lmac_id: LMAC id
1978  *
1979  * For MCL, Single pdev exists
1980  *
1981  * For WIN, each PDEV will operate one ring.
1982  *
1983  */
1984 static inline struct dp_pdev *
1985 	dp_get_pdev_for_lmac_id(struct dp_soc *soc, uint32_t lmac_id)
1986 {
1987 	uint8_t i = 0;
1988 
1989 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
1990 		i = wlan_cfg_get_pdev_idx(soc->wlan_cfg_ctx, lmac_id);
1991 		return ((i < MAX_PDEV_CNT) ? soc->pdev_list[i] : NULL);
1992 	}
1993 
1994 	/* Typically for MCL as there only 1 PDEV*/
1995 	return soc->pdev_list[0];
1996 }
1997 
1998 /**
1999  * dp_calculate_target_pdev_id_from_host_pdev_id() - Return target pdev
2000  *                                          corresponding to host pdev id
2001  * @soc: soc pointer
2002  * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL
2003  *
2004  * returns target pdev_id for host pdev id. For WIN, this is derived through
2005  * a two step process:
2006  * 1. Get lmac_id corresponding to host pdev_id (lmac_id can change
2007  *    during mode switch)
2008  * 2. Get target pdev_id (set up during WMI ready) from lmac_id
2009  *
2010  * For MCL, return the offset-1 translated mac_id
2011  */
2012 static inline int
2013 dp_calculate_target_pdev_id_from_host_pdev_id
2014 	(struct dp_soc *soc, uint32_t mac_for_pdev)
2015 {
2016 	struct dp_pdev *pdev;
2017 
2018 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2019 		return DP_SW2HW_MACID(mac_for_pdev);
2020 
2021 	pdev = soc->pdev_list[mac_for_pdev];
2022 
2023 	/*non-MCL case, get original target_pdev mapping*/
2024 	return wlan_cfg_get_target_pdev_id(soc->wlan_cfg_ctx, pdev->lmac_id);
2025 }
2026 
2027 /**
2028  * dp_get_target_pdev_id_for_host_pdev_id() - Return target pdev corresponding
2029  *                                         to host pdev id
2030  * @soc: soc pointer
2031  * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL
2032  *
2033  * returns target pdev_id for host pdev id.
2034  * For WIN, return the value stored in pdev object.
2035  * For MCL, return the offset-1 translated mac_id.
2036  */
2037 static inline int
2038 dp_get_target_pdev_id_for_host_pdev_id
2039 	(struct dp_soc *soc, uint32_t mac_for_pdev)
2040 {
2041 	struct dp_pdev *pdev;
2042 
2043 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2044 		return DP_SW2HW_MACID(mac_for_pdev);
2045 
2046 	pdev = soc->pdev_list[mac_for_pdev];
2047 
2048 	return pdev->target_pdev_id;
2049 }
2050 
2051 /**
2052  * dp_get_host_pdev_id_for_target_pdev_id() - Return host pdev corresponding
2053  *                                         to target pdev id
2054  * @soc: soc pointer
2055  * @pdev_id: pdev_id corresponding to target pdev
2056  *
2057  * returns host pdev_id for target pdev id. For WIN, this is derived through
2058  * a two step process:
2059  * 1. Get lmac_id corresponding to target pdev_id
2060  * 2. Get host pdev_id (set up during WMI ready) from lmac_id
2061  *
2062  * For MCL, return the 0-offset pdev_id
2063  */
2064 static inline int
2065 dp_get_host_pdev_id_for_target_pdev_id
2066 	(struct dp_soc *soc, uint32_t pdev_id)
2067 {
2068 	struct dp_pdev *pdev;
2069 	int lmac_id;
2070 
2071 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2072 		return DP_HW2SW_MACID(pdev_id);
2073 
2074 	/*non-MCL case, get original target_lmac mapping from target pdev*/
2075 	lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx,
2076 					  DP_HW2SW_MACID(pdev_id));
2077 
2078 	/*Get host pdev from lmac*/
2079 	pdev = dp_get_pdev_for_lmac_id(soc, lmac_id);
2080 
2081 	return pdev ? pdev->pdev_id : INVALID_PDEV_ID;
2082 }
2083 
2084 /*
2085  * dp_get_mac_id_for_mac() -  Return mac corresponding WIN and MCL mac_ids
2086  *
2087  * @soc: handle to DP soc
2088  * @mac_id: MAC id
2089  *
2090  * Single pdev using both MACs will operate on both MAC rings,
2091  * which is the case for MCL.
2092  * For WIN each PDEV will operate one ring, so index is zero.
2093  *
2094  */
2095 static inline int dp_get_mac_id_for_mac(struct dp_soc *soc, uint32_t mac_id)
2096 {
2097 	/*
2098 	 * Single pdev using both MACs will operate on both MAC rings,
2099 	 * which is the case for MCL.
2100 	 */
2101 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2102 		return mac_id;
2103 
2104 	/* For WIN each PDEV will operate one ring, so index is zero. */
2105 	return 0;
2106 }
2107 
2108 /*
2109  * dp_is_subtype_data() - check if the frame subtype is data
2110  *
2111  * @frame_ctrl: Frame control field
2112  *
2113  * check the frame control field and verify if the packet
2114  * is a data packet.
2115  *
2116  * Return: true or false
2117  */
2118 static inline bool dp_is_subtype_data(uint16_t frame_ctrl)
2119 {
2120 	if (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_TYPE_MASK) ==
2121 	    QDF_IEEE80211_FC0_TYPE_DATA) &&
2122 	    (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) ==
2123 	    QDF_IEEE80211_FC0_SUBTYPE_DATA) ||
2124 	    ((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) ==
2125 	    QDF_IEEE80211_FC0_SUBTYPE_QOS))) {
2126 		return true;
2127 	}
2128 
2129 	return false;
2130 }
2131 
2132 #ifdef WDI_EVENT_ENABLE
2133 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
2134 				uint32_t stats_type_upload_mask,
2135 				uint8_t mac_id);
2136 
2137 int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id,
2138 		       wdi_event_subscribe *event_cb_sub_handle,
2139 		       uint32_t event);
2140 
2141 int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id,
2142 		     wdi_event_subscribe *event_cb_sub_handle,
2143 		     uint32_t event);
2144 
2145 void dp_wdi_event_handler(enum WDI_EVENT event, struct dp_soc *soc,
2146 			  void *data, u_int16_t peer_id,
2147 			  int status, u_int8_t pdev_id);
2148 
2149 int dp_wdi_event_attach(struct dp_pdev *txrx_pdev);
2150 int dp_wdi_event_detach(struct dp_pdev *txrx_pdev);
2151 
2152 static inline void
2153 dp_hif_update_pipe_callback(struct dp_soc *dp_soc,
2154 			    void *cb_context,
2155 			    QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t),
2156 			    uint8_t pipe_id)
2157 {
2158 	struct hif_msg_callbacks hif_pipe_callbacks;
2159 
2160 	/* TODO: Temporary change to bypass HTC connection for this new
2161 	 * HIF pipe, which will be used for packet log and other high-
2162 	 * priority HTT messages. Proper HTC connection to be added
2163 	 * later once required FW changes are available
2164 	 */
2165 	hif_pipe_callbacks.rxCompletionHandler = callback;
2166 	hif_pipe_callbacks.Context = cb_context;
2167 	hif_update_pipe_callback(dp_soc->hif_handle,
2168 		DP_HTT_T2H_HP_PIPE, &hif_pipe_callbacks);
2169 }
2170 #else
2171 static inline int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id,
2172 				     wdi_event_subscribe *event_cb_sub_handle,
2173 				     uint32_t event)
2174 {
2175 	return 0;
2176 }
2177 
2178 static inline int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id,
2179 				   wdi_event_subscribe *event_cb_sub_handle,
2180 				   uint32_t event)
2181 {
2182 	return 0;
2183 }
2184 
2185 static inline
2186 void dp_wdi_event_handler(enum WDI_EVENT event,
2187 			  struct dp_soc *soc,
2188 			  void *data, u_int16_t peer_id,
2189 			  int status, u_int8_t pdev_id)
2190 {
2191 }
2192 
2193 static inline int dp_wdi_event_attach(struct dp_pdev *txrx_pdev)
2194 {
2195 	return 0;
2196 }
2197 
2198 static inline int dp_wdi_event_detach(struct dp_pdev *txrx_pdev)
2199 {
2200 	return 0;
2201 }
2202 
2203 static inline QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
2204 		uint32_t stats_type_upload_mask, uint8_t mac_id)
2205 {
2206 	return 0;
2207 }
2208 
2209 static inline void
2210 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, void *cb_context,
2211 			    QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t),
2212 			    uint8_t pipe_id)
2213 {
2214 }
2215 #endif /* CONFIG_WIN */
2216 
2217 #ifdef VDEV_PEER_PROTOCOL_COUNT
2218 /**
2219  * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters
2220  * @vdev: VDEV DP object
2221  * @nbuf: data packet
2222  * @peer: Peer DP object
2223  * @is_egress: whether egress or ingress
2224  * @is_rx: whether rx or tx
2225  *
2226  * This function updates the per-peer protocol counters
2227  * Return: void
2228  */
2229 void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev,
2230 					    qdf_nbuf_t nbuf,
2231 					    struct dp_peer *peer,
2232 					    bool is_egress,
2233 					    bool is_rx);
2234 
2235 /**
2236  * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters
2237  * @soc: SOC DP object
2238  * @vdev_id: vdev_id
2239  * @nbuf: data packet
2240  * @is_egress: whether egress or ingress
2241  * @is_rx: whether rx or tx
2242  *
2243  * This function updates the per-peer protocol counters
2244  * Return: void
2245  */
2246 
2247 void dp_peer_stats_update_protocol_cnt(struct cdp_soc_t *soc,
2248 				       int8_t vdev_id,
2249 				       qdf_nbuf_t nbuf,
2250 				       bool is_egress,
2251 				       bool is_rx);
2252 
2253 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
2254 					       qdf_nbuf_t nbuf);
2255 
2256 #else
2257 #define dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, peer, \
2258 					       is_egress, is_rx)
2259 
2260 static inline
2261 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
2262 					       qdf_nbuf_t nbuf)
2263 {
2264 }
2265 
2266 #endif
2267 
2268 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2269 void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl);
2270 
2271 /**
2272  * dp_tx_dump_flow_pool_info_compact() - dump flow pool info
2273  * @soc: DP soc context
2274  *
2275  * Return: none
2276  */
2277 void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc);
2278 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
2279 	bool force);
2280 #else
2281 static inline void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc)
2282 {
2283 }
2284 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
2285 
2286 #ifdef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS
2287 static inline int
2288 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl)
2289 {
2290 	return hal_srng_access_start_unlocked(soc, hal_ring_hdl);
2291 }
2292 
2293 static inline void
2294 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl)
2295 {
2296 	hal_srng_access_end_unlocked(soc, hal_ring_hdl);
2297 }
2298 
2299 #else
2300 static inline int
2301 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl)
2302 {
2303 	return hal_srng_access_start(soc, hal_ring_hdl);
2304 }
2305 
2306 static inline void
2307 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl)
2308 {
2309 	hal_srng_access_end(soc, hal_ring_hdl);
2310 }
2311 #endif
2312 
2313 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2314 /**
2315  * dp_srng_access_start() - Wrapper function to log access start of a hal ring
2316  * @int_ctx: pointer to DP interrupt context. This should not be NULL
2317  * @soc: DP Soc handle
2318  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
2319  *
2320  * Return: 0 on success; error on failure
2321  */
2322 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2323 			 hal_ring_handle_t hal_ring_hdl);
2324 
2325 /**
2326  * dp_srng_access_end() - Wrapper function to log access end of a hal ring
2327  * @int_ctx: pointer to DP interrupt context. This should not be NULL
2328  * @soc: DP Soc handle
2329  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
2330  *
2331  * Return: void
2332  */
2333 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2334 			hal_ring_handle_t hal_ring_hdl);
2335 
2336 #else
2337 static inline int dp_srng_access_start(struct dp_intr *int_ctx,
2338 				       struct dp_soc *dp_soc,
2339 				       hal_ring_handle_t hal_ring_hdl)
2340 {
2341 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2342 
2343 	return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2344 }
2345 
2346 static inline void dp_srng_access_end(struct dp_intr *int_ctx,
2347 				      struct dp_soc *dp_soc,
2348 				      hal_ring_handle_t hal_ring_hdl)
2349 {
2350 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2351 
2352 	return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2353 }
2354 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
2355 
2356 #ifdef QCA_CACHED_RING_DESC
2357 /**
2358  * dp_srng_dst_get_next() - Wrapper function to get next ring desc
2359  * @dp_socsoc: DP Soc handle
2360  * @hal_ring: opaque pointer to the HAL Destination Ring
2361  *
2362  * Return: HAL ring descriptor
2363  */
2364 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc,
2365 					 hal_ring_handle_t hal_ring_hdl)
2366 {
2367 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2368 
2369 	return hal_srng_dst_get_next_cached(hal_soc, hal_ring_hdl);
2370 }
2371 
2372 /**
2373  * dp_srng_dst_inv_cached_descs() - Wrapper function to invalidate cached
2374  * descriptors
2375  * @dp_socsoc: DP Soc handle
2376  * @hal_ring: opaque pointer to the HAL Rx Destination ring
2377  * @num_entries: Entry count
2378  *
2379  * Return: None
2380  */
2381 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc,
2382 						hal_ring_handle_t hal_ring_hdl,
2383 						uint32_t num_entries)
2384 {
2385 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2386 
2387 	hal_srng_dst_inv_cached_descs(hal_soc, hal_ring_hdl, num_entries);
2388 }
2389 #else
2390 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc,
2391 					 hal_ring_handle_t hal_ring_hdl)
2392 {
2393 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2394 
2395 	return hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2396 }
2397 
2398 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc,
2399 						hal_ring_handle_t hal_ring_hdl,
2400 						uint32_t num_entries)
2401 {
2402 }
2403 #endif /* QCA_CACHED_RING_DESC */
2404 
2405 #ifdef QCA_ENH_V3_STATS_SUPPORT
2406 /**
2407  * dp_pdev_print_delay_stats(): Print pdev level delay stats
2408  * @pdev: DP_PDEV handle
2409  *
2410  * Return:void
2411  */
2412 void dp_pdev_print_delay_stats(struct dp_pdev *pdev);
2413 
2414 /**
2415  * dp_pdev_print_tid_stats(): Print pdev level tid stats
2416  * @pdev: DP_PDEV handle
2417  *
2418  * Return:void
2419  */
2420 void dp_pdev_print_tid_stats(struct dp_pdev *pdev);
2421 
2422 /**
2423  * dp_pdev_print_rx_error_stats(): Print pdev level rx error stats
2424  * @pdev: DP_PDEV handle
2425  *
2426  * Return:void
2427  */
2428 void dp_pdev_print_rx_error_stats(struct dp_pdev *pdev);
2429 #endif /* CONFIG_WIN */
2430 
2431 void dp_soc_set_txrx_ring_map(struct dp_soc *soc);
2432 
2433 /**
2434  * dp_vdev_to_cdp_vdev() - typecast dp vdev to cdp vdev
2435  * @vdev: DP vdev handle
2436  *
2437  * Return: struct cdp_vdev pointer
2438  */
2439 static inline
2440 struct cdp_vdev *dp_vdev_to_cdp_vdev(struct dp_vdev *vdev)
2441 {
2442 	return (struct cdp_vdev *)vdev;
2443 }
2444 
2445 /**
2446  * dp_pdev_to_cdp_pdev() - typecast dp pdev to cdp pdev
2447  * @pdev: DP pdev handle
2448  *
2449  * Return: struct cdp_pdev pointer
2450  */
2451 static inline
2452 struct cdp_pdev *dp_pdev_to_cdp_pdev(struct dp_pdev *pdev)
2453 {
2454 	return (struct cdp_pdev *)pdev;
2455 }
2456 
2457 /**
2458  * dp_soc_to_cdp_soc() - typecast dp psoc to cdp psoc
2459  * @psoc: DP psoc handle
2460  *
2461  * Return: struct cdp_soc pointer
2462  */
2463 static inline
2464 struct cdp_soc *dp_soc_to_cdp_soc(struct dp_soc *psoc)
2465 {
2466 	return (struct cdp_soc *)psoc;
2467 }
2468 
2469 /**
2470  * dp_soc_to_cdp_soc_t() - typecast dp psoc to
2471  * ol txrx soc handle
2472  * @psoc: DP psoc handle
2473  *
2474  * Return: struct cdp_soc_t pointer
2475  */
2476 static inline
2477 struct cdp_soc_t *dp_soc_to_cdp_soc_t(struct dp_soc *psoc)
2478 {
2479 	return (struct cdp_soc_t *)psoc;
2480 }
2481 
2482 /**
2483  * cdp_soc_t_to_dp_soc() - typecast cdp_soc_t to
2484  * dp soc handle
2485  * @psoc: CDP psoc handle
2486  *
2487  * Return: struct dp_soc pointer
2488  */
2489 static inline
2490 struct dp_soc *cdp_soc_t_to_dp_soc(struct cdp_soc_t *psoc)
2491 {
2492 	return (struct dp_soc *)psoc;
2493 }
2494 
2495 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
2496 /**
2497  * dp_rx_flow_update_fse_stats() - Update a flow's statistics
2498  * @pdev: pdev handle
2499  * @flow_id: flow index (truncated hash) in the Rx FST
2500  *
2501  * Return: Success when flow statistcs is updated, error on failure
2502  */
2503 QDF_STATUS dp_rx_flow_get_fse_stats(struct dp_pdev *pdev,
2504 				    struct cdp_rx_flow_info *rx_flow_info,
2505 				    struct cdp_flow_stats *stats);
2506 
2507 /**
2508  * dp_rx_flow_delete_entry() - Delete a flow entry from flow search table
2509  * @pdev: pdev handle
2510  * @rx_flow_info: DP flow parameters
2511  *
2512  * Return: Success when flow is deleted, error on failure
2513  */
2514 QDF_STATUS dp_rx_flow_delete_entry(struct dp_pdev *pdev,
2515 				   struct cdp_rx_flow_info *rx_flow_info);
2516 
2517 /**
2518  * dp_rx_flow_add_entry() - Add a flow entry to flow search table
2519  * @pdev: DP pdev instance
2520  * @rx_flow_info: DP flow paramaters
2521  *
2522  * Return: Success when flow is added, no-memory or already exists on error
2523  */
2524 QDF_STATUS dp_rx_flow_add_entry(struct dp_pdev *pdev,
2525 				struct cdp_rx_flow_info *rx_flow_info);
2526 
2527 /**
2528  * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters
2529  * @soc: SoC handle
2530  * @pdev: Pdev handle
2531  *
2532  * Return: Handle to flow search table entry
2533  */
2534 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev);
2535 
2536 /**
2537  * dp_rx_fst_detach() - De-initialize Rx FST
2538  * @soc: SoC handle
2539  * @pdev: Pdev handle
2540  *
2541  * Return: None
2542  */
2543 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev);
2544 
2545 /**
2546  * dp_rx_flow_send_fst_fw_setup() - Program FST parameters in FW/HW post-attach
2547  * @soc: SoC handle
2548  * @pdev: Pdev handle
2549  *
2550  * Return: Success when fst parameters are programmed in FW, error otherwise
2551  */
2552 QDF_STATUS dp_rx_flow_send_fst_fw_setup(struct dp_soc *soc,
2553 					struct dp_pdev *pdev);
2554 #else /* !((WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)) */
2555 
2556 /**
2557  * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters
2558  * @soc: SoC handle
2559  * @pdev: Pdev handle
2560  *
2561  * Return: Handle to flow search table entry
2562  */
2563 static inline
2564 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev)
2565 {
2566 	return QDF_STATUS_SUCCESS;
2567 }
2568 
2569 /**
2570  * dp_rx_fst_detach() - De-initialize Rx FST
2571  * @soc: SoC handle
2572  * @pdev: Pdev handle
2573  *
2574  * Return: None
2575  */
2576 static inline
2577 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev)
2578 {
2579 }
2580 #endif
2581 
2582 /**
2583  * dp_vdev_get_ref() - API to take a reference for VDEV object
2584  *
2585  * @soc		: core DP soc context
2586  * @vdev	: DP vdev
2587  * @mod_id	: module id
2588  *
2589  * Return:	QDF_STATUS_SUCCESS if reference held successfully
2590  *		else QDF_STATUS_E_INVAL
2591  */
2592 static inline
2593 QDF_STATUS dp_vdev_get_ref(struct dp_soc *soc, struct dp_vdev *vdev,
2594 			   enum dp_mod_id mod_id)
2595 {
2596 	if (!qdf_atomic_inc_not_zero(&vdev->ref_cnt))
2597 		return QDF_STATUS_E_INVAL;
2598 
2599 	qdf_atomic_inc(&vdev->mod_refs[mod_id]);
2600 
2601 	return QDF_STATUS_SUCCESS;
2602 }
2603 
2604 /**
2605  * dp_vdev_get_ref_by_id() - Returns vdev object given the vdev id
2606  * @soc: core DP soc context
2607  * @vdev_id: vdev id from vdev object can be retrieved
2608  * @mod_id: module id which is requesting the reference
2609  *
2610  * Return: struct dp_vdev*: Pointer to DP vdev object
2611  */
2612 static inline struct dp_vdev *
2613 dp_vdev_get_ref_by_id(struct dp_soc *soc, uint8_t vdev_id,
2614 		      enum dp_mod_id mod_id)
2615 {
2616 	struct dp_vdev *vdev = NULL;
2617 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
2618 		return NULL;
2619 
2620 	qdf_spin_lock_bh(&soc->vdev_map_lock);
2621 	vdev = soc->vdev_id_map[vdev_id];
2622 
2623 	if (!vdev || dp_vdev_get_ref(soc, vdev, mod_id) != QDF_STATUS_SUCCESS) {
2624 		qdf_spin_unlock_bh(&soc->vdev_map_lock);
2625 		return NULL;
2626 	}
2627 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
2628 
2629 	return vdev;
2630 }
2631 
2632 /**
2633  * dp_get_pdev_from_soc_pdev_id_wifi3() - Returns pdev object given the pdev id
2634  * @soc: core DP soc context
2635  * @pdev_id: pdev id from pdev object can be retrieved
2636  *
2637  * Return: struct dp_pdev*: Pointer to DP pdev object
2638  */
2639 static inline struct dp_pdev *
2640 dp_get_pdev_from_soc_pdev_id_wifi3(struct dp_soc *soc,
2641 				   uint8_t pdev_id)
2642 {
2643 	if (qdf_unlikely(pdev_id >= MAX_PDEV_CNT))
2644 		return NULL;
2645 
2646 	return soc->pdev_list[pdev_id];
2647 }
2648 
2649 /*
2650  * dp_rx_tid_update_wifi3() – Update receive TID state
2651  * @peer: Datapath peer handle
2652  * @tid: TID
2653  * @ba_window_size: BlockAck window size
2654  * @start_seq: Starting sequence number
2655  * @bar_update: BAR update triggered
2656  *
2657  * Return: QDF_STATUS code
2658  */
2659 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
2660 					 ba_window_size, uint32_t start_seq,
2661 					 bool bar_update);
2662 
2663 /**
2664  * dp_get_peer_mac_list(): function to get peer mac list of vdev
2665  * @soc: Datapath soc handle
2666  * @vdev_id: vdev id
2667  * @newmac: Table of the clients mac
2668  * @mac_cnt: No. of MACs required
2669  * @limit: Limit the number of clients
2670  *
2671  * return: no of clients
2672  */
2673 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
2674 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
2675 			      u_int16_t mac_cnt, bool limit);
2676 /*
2677  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
2678  * @soc:		DP SoC context
2679  * @max_mac_rings:	No of MAC rings
2680  *
2681  * Return: None
2682  */
2683 void dp_is_hw_dbs_enable(struct dp_soc *soc,
2684 				int *max_mac_rings);
2685 
2686 
2687 #if defined(WLAN_SUPPORT_RX_FISA)
2688 void dp_rx_dump_fisa_table(struct dp_soc *soc);
2689 
2690 /*
2691  * dp_rx_fst_update_cmem_params() - Update CMEM FST params
2692  * @soc:		DP SoC context
2693  * @num_entries:	Number of flow search entries
2694  * @cmem_ba_lo:		CMEM base address low
2695  * @cmem_ba_hi:		CMEM base address high
2696  *
2697  * Return: None
2698  */
2699 void dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries,
2700 				  uint32_t cmem_ba_lo, uint32_t cmem_ba_hi);
2701 
2702 void
2703 dp_rx_fst_update_pm_suspend_status(struct dp_soc *soc, bool suspended);
2704 #else
2705 static inline void
2706 dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries,
2707 			     uint32_t cmem_ba_lo, uint32_t cmem_ba_hi)
2708 {
2709 }
2710 
2711 static inline void
2712 dp_rx_fst_update_pm_suspend_status(struct dp_soc *soc, bool suspended)
2713 {
2714 }
2715 #endif /* WLAN_SUPPORT_RX_FISA */
2716 
2717 #ifdef MAX_ALLOC_PAGE_SIZE
2718 /**
2719  * dp_set_page_size() - Set the max page size for hw link desc.
2720  * For MCL the page size is set to OS defined value and for WIN
2721  * the page size is set to the max_alloc_size cfg ini
2722  * param.
2723  * This is to ensure that WIN gets contiguous memory allocations
2724  * as per requirement.
2725  * @pages: link desc page handle
2726  * @max_alloc_size: max_alloc_size
2727  *
2728  * Return: None
2729  */
2730 static inline
2731 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages,
2732 			  uint32_t max_alloc_size)
2733 {
2734 	pages->page_size = qdf_page_size;
2735 }
2736 
2737 #else
2738 static inline
2739 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages,
2740 			  uint32_t max_alloc_size)
2741 {
2742 	pages->page_size = max_alloc_size;
2743 }
2744 #endif /* MAX_ALLOC_PAGE_SIZE */
2745 
2746 /**
2747  * dp_history_get_next_index() - get the next entry to record an entry
2748  *				 in the history.
2749  * @curr_idx: Current index where the last entry is written.
2750  * @max_entries: Max number of entries in the history
2751  *
2752  * This function assumes that the max number os entries is a power of 2.
2753  *
2754  * Returns: The index where the next entry is to be written.
2755  */
2756 static inline uint32_t dp_history_get_next_index(qdf_atomic_t *curr_idx,
2757 						 uint32_t max_entries)
2758 {
2759 	uint32_t idx = qdf_atomic_inc_return(curr_idx);
2760 
2761 	return idx & (max_entries - 1);
2762 }
2763 
2764 /**
2765  * dp_rx_skip_tlvs() - Skip TLVs len + L2 hdr_offset, save in nbuf->cb
2766  * @nbuf: nbuf cb to be updated
2767  * @l2_hdr_offset: l2_hdr_offset
2768  *
2769  * Return: None
2770  */
2771 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding);
2772 
2773 #ifndef FEATURE_WDS
2774 static inline void
2775 dp_hmwds_ast_add_notify(struct dp_peer *peer,
2776 			uint8_t *mac_addr,
2777 			enum cdp_txrx_ast_entry_type type,
2778 			QDF_STATUS err,
2779 			bool is_peer_map)
2780 {
2781 }
2782 #endif
2783 
2784 #ifdef HTT_STATS_DEBUGFS_SUPPORT
2785 /* dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize
2786  * debugfs for HTT stats
2787  * @pdev: dp pdev handle
2788  *
2789  * Return: QDF_STATUS
2790  */
2791 QDF_STATUS dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev);
2792 
2793 /* dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for
2794  * HTT stats
2795  * @pdev: dp pdev handle
2796  *
2797  * Return: none
2798  */
2799 void dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev);
2800 #else
2801 
2802 /* dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize
2803  * debugfs for HTT stats
2804  * @pdev: dp pdev handle
2805  *
2806  * Return: QDF_STATUS
2807  */
2808 static inline QDF_STATUS
2809 dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev)
2810 {
2811 	return QDF_STATUS_SUCCESS;
2812 }
2813 
2814 /* dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for
2815  * HTT stats
2816  * @pdev: dp pdev handle
2817  *
2818  * Return: none
2819  */
2820 static inline void
2821 dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev)
2822 {
2823 }
2824 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
2825 
2826 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR
2827 /**
2828  * dp_soc_swlm_attach() - attach the software latency manager resources
2829  * @soc: Datapath global soc handle
2830  *
2831  * Returns: QDF_STATUS
2832  */
2833 static inline QDF_STATUS dp_soc_swlm_attach(struct dp_soc *soc)
2834 {
2835 	return QDF_STATUS_SUCCESS;
2836 }
2837 
2838 /**
2839  * dp_soc_swlm_detach() - detach the software latency manager resources
2840  * @soc: Datapath global soc handle
2841  *
2842  * Returns: QDF_STATUS
2843  */
2844 static inline QDF_STATUS dp_soc_swlm_detach(struct dp_soc *soc)
2845 {
2846 	return QDF_STATUS_SUCCESS;
2847 }
2848 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */
2849 
2850 #ifdef QCA_SUPPORT_WDS_EXTENDED
2851 /**
2852  * dp_wds_ext_get_peer_id(): function to get peer id by mac
2853  * This API is called from control path when wds extended
2854  * device is created, hence it also updates wds extended
2855  * peer state to up, which will be referred in rx processing.
2856  * @soc: Datapath soc handle
2857  * @vdev_id: vdev id
2858  * @mac: Peer mac address
2859  *
2860  * return: valid peer id on success
2861  *         HTT_INVALID_PEER on failure
2862  */
2863 uint16_t dp_wds_ext_get_peer_id(ol_txrx_soc_handle soc,
2864 				uint8_t vdev_id,
2865 				uint8_t *mac);
2866 
2867 /**
2868  * dp_wds_ext_set_peer_state(): function to set peer state
2869  * @soc: Datapath soc handle
2870  * @vdev_id: vdev id
2871  * @mac: Peer mac address
2872  * @rx: rx function pointer
2873  *
2874  * return: QDF_STATUS_SUCCESS on success
2875  *         QDF_STATUS_E_INVAL if peer is not found
2876  *         QDF_STATUS_E_ALREADY if rx is already set/unset
2877  */
2878 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
2879 				  uint8_t vdev_id,
2880 				  uint8_t *mac,
2881 				  ol_txrx_rx_fp rx,
2882 				  ol_osif_peer_handle osif_peer);
2883 #endif /* QCA_SUPPORT_WDS_EXTENDED */
2884 
2885 #ifdef DP_MEM_PRE_ALLOC
2886 
2887 /**
2888  * dp_context_alloc_mem() - allocate memory for DP context
2889  * @soc: datapath soc handle
2890  * @ctxt_type: DP context type
2891  * @ctxt_size: DP context size
2892  *
2893  * Return: DP context address
2894  */
2895 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2896 			   size_t ctxt_size);
2897 
2898 /**
2899  * dp_context_free_mem() - Free memory of DP context
2900  * @soc: datapath soc handle
2901  * @ctxt_type: DP context type
2902  * @vaddr: Address of context memory
2903  *
2904  * Return: None
2905  */
2906 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2907 			 void *vaddr);
2908 
2909 /**
2910  * dp_desc_multi_pages_mem_alloc() - alloc memory over multiple pages
2911  * @soc: datapath soc handle
2912  * @desc_type: memory request source type
2913  * @pages: multi page information storage
2914  * @element_size: each element size
2915  * @element_num: total number of elements should be allocated
2916  * @memctxt: memory context
2917  * @cacheable: coherent memory or cacheable memory
2918  *
2919  * This function is a wrapper for memory allocation over multiple
2920  * pages, if dp prealloc method is registered, then will try prealloc
2921  * firstly. if prealloc failed, fall back to regular way over
2922  * qdf_mem_multi_pages_alloc().
2923  *
2924  * Return: None
2925  */
2926 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
2927 				   enum dp_desc_type desc_type,
2928 				   struct qdf_mem_multi_page_t *pages,
2929 				   size_t element_size,
2930 				   uint16_t element_num,
2931 				   qdf_dma_context_t memctxt,
2932 				   bool cacheable);
2933 
2934 /**
2935  * dp_desc_multi_pages_mem_free() - free multiple pages memory
2936  * @soc: datapath soc handle
2937  * @desc_type: memory request source type
2938  * @pages: multi page information storage
2939  * @memctxt: memory context
2940  * @cacheable: coherent memory or cacheable memory
2941  *
2942  * This function is a wrapper for multiple pages memory free,
2943  * if memory is got from prealloc pool, put it back to pool.
2944  * otherwise free by qdf_mem_multi_pages_free().
2945  *
2946  * Return: None
2947  */
2948 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
2949 				  enum dp_desc_type desc_type,
2950 				  struct qdf_mem_multi_page_t *pages,
2951 				  qdf_dma_context_t memctxt,
2952 				  bool cacheable);
2953 
2954 #else
2955 static inline
2956 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2957 			   size_t ctxt_size)
2958 {
2959 	return qdf_mem_malloc(ctxt_size);
2960 }
2961 
2962 static inline
2963 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2964 			 void *vaddr)
2965 {
2966 	qdf_mem_free(vaddr);
2967 }
2968 
2969 static inline
2970 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
2971 				   enum dp_desc_type desc_type,
2972 				   struct qdf_mem_multi_page_t *pages,
2973 				   size_t element_size,
2974 				   uint16_t element_num,
2975 				   qdf_dma_context_t memctxt,
2976 				   bool cacheable)
2977 {
2978 	qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
2979 				  element_num, memctxt, cacheable);
2980 }
2981 
2982 static inline
2983 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
2984 				  enum dp_desc_type desc_type,
2985 				  struct qdf_mem_multi_page_t *pages,
2986 				  qdf_dma_context_t memctxt,
2987 				  bool cacheable)
2988 {
2989 	qdf_mem_multi_pages_free(soc->osdev, pages,
2990 				 memctxt, cacheable);
2991 }
2992 #endif
2993 
2994 #ifdef FEATURE_RUNTIME_PM
2995 /**
2996  * dp_runtime_get() - Get dp runtime refcount
2997  * @soc: Datapath soc handle
2998  *
2999  * Get dp runtime refcount by increment of an atomic variable, which can block
3000  * dp runtime resume to wait to flush pending tx by runtime suspend.
3001  *
3002  * Return: Current refcount
3003  */
3004 static inline int32_t dp_runtime_get(struct dp_soc *soc)
3005 {
3006 	return qdf_atomic_inc_return(&soc->dp_runtime_refcount);
3007 }
3008 
3009 /**
3010  * dp_runtime_put() - Return dp runtime refcount
3011  * @soc: Datapath soc handle
3012  *
3013  * Return dp runtime refcount by decrement of an atomic variable, allow dp
3014  * runtime resume finish.
3015  *
3016  * Return: Current refcount
3017  */
3018 static inline int32_t dp_runtime_put(struct dp_soc *soc)
3019 {
3020 	return qdf_atomic_dec_return(&soc->dp_runtime_refcount);
3021 }
3022 
3023 /**
3024  * dp_runtime_get_refcount() - Get dp runtime refcount
3025  * @soc: Datapath soc handle
3026  *
3027  * Get dp runtime refcount by returning an atomic variable
3028  *
3029  * Return: Current refcount
3030  */
3031 static inline int32_t dp_runtime_get_refcount(struct dp_soc *soc)
3032 {
3033 	return qdf_atomic_read(&soc->dp_runtime_refcount);
3034 }
3035 
3036 /**
3037  * dp_runtime_init() - Init dp runtime refcount when dp soc init
3038  * @soc: Datapath soc handle
3039  *
3040  * Return: QDF_STATUS
3041  */
3042 static inline QDF_STATUS dp_runtime_init(struct dp_soc *soc)
3043 {
3044 	return qdf_atomic_init(&soc->dp_runtime_refcount);
3045 }
3046 #else
3047 static inline int32_t dp_runtime_get(struct dp_soc *soc)
3048 {
3049 	return 0;
3050 }
3051 
3052 static inline int32_t dp_runtime_put(struct dp_soc *soc)
3053 {
3054 	return 0;
3055 }
3056 
3057 static inline QDF_STATUS dp_runtime_init(struct dp_soc *soc)
3058 {
3059 	return QDF_STATUS_SUCCESS;
3060 }
3061 #endif
3062 
3063 /*
3064  * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats
3065  *				processing
3066  * @pdev: Datapath PDEV handle
3067  *
3068  */
3069 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev);
3070 
3071 /*
3072  * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats
3073  *				processing
3074  * @pdev: Datapath PDEV handle
3075  *
3076  * Return: QDF_STATUS_SUCCESS: Success
3077  *         QDF_STATUS_E_NOMEM: Error
3078  */
3079 
3080 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev);
3081 
3082 /**
3083  * dp_peer_flush_frags() - Flush all fragments for a particular
3084  *  peer
3085  * @soc_hdl - data path soc handle
3086  * @vdev_id - vdev id
3087  * @peer_addr - peer mac address
3088  *
3089  * Return: None
3090  */
3091 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3092 			 uint8_t *peer_mac);
3093 
3094 /**
3095  * dp_soc_reset_mon_intr_mask() - reset mon intr mask
3096  * @soc: pointer to dp_soc handle
3097  *
3098  * Return:
3099  */
3100 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc);
3101 #endif /* #ifndef _DP_INTERNAL_H_ */
3102