xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_internal.h (revision 5611ef508114526caa3c58ffe2e188650c7b53d1)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_INTERNAL_H_
20 #define _DP_INTERNAL_H_
21 
22 #include "dp_types.h"
23 
24 #define RX_BUFFER_SIZE_PKTLOG_LITE 1024
25 
26 #define DP_PEER_WDS_COUNT_INVALID UINT_MAX
27 
28 /* Alignment for consistent memory for DP rings*/
29 #define DP_RING_BASE_ALIGN 32
30 
31 #define DP_RSSI_INVAL 0x80
32 #define DP_RSSI_AVG_WEIGHT 2
33 /*
34  * Formula to derive avg_rssi is taken from wifi2.o firmware
35  */
36 #define DP_GET_AVG_RSSI(avg_rssi, last_rssi) \
37 	(((avg_rssi) - (((uint8_t)(avg_rssi)) >> DP_RSSI_AVG_WEIGHT)) \
38 	+ ((((uint8_t)(last_rssi)) >> DP_RSSI_AVG_WEIGHT)))
39 
40 /* Macro For NYSM value received in VHT TLV */
41 #define VHT_SGI_NYSM 3
42 
43 /* struct htt_dbgfs_cfg - structure to maintain required htt data
44  * @msg_word: htt msg sent to upper layer
45  * @m: qdf debugfs file pointer
46  */
47 struct htt_dbgfs_cfg {
48 	uint32_t *msg_word;
49 	qdf_debugfs_file_t m;
50 };
51 
52 /* Cookie MSB bits assigned for different use case.
53  * Note: User can't use last 3 bits, as it is reserved for pdev_id.
54  * If in future number of pdev are more than 3.
55  */
56 /* Reserve for default case */
57 #define DBG_STATS_COOKIE_DEFAULT 0x0
58 
59 /* Reserve for DP Stats: 3rd bit */
60 #define DBG_STATS_COOKIE_DP_STATS 0x8
61 
62 /* Reserve for HTT Stats debugfs support: 4th bit */
63 #define DBG_STATS_COOKIE_HTT_DBGFS 0x10
64 
65 /**
66  * Bitmap of HTT PPDU TLV types for Default mode
67  */
68 #define HTT_PPDU_DEFAULT_TLV_BITMAP \
69 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
70 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
71 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
72 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
73 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
74 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
75 
76 /* PPDU STATS CFG */
77 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
78 
79 /* PPDU stats mask sent to FW to enable enhanced stats */
80 #define DP_PPDU_STATS_CFG_ENH_STATS \
81 	(HTT_PPDU_DEFAULT_TLV_BITMAP) | \
82 	(1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \
83 	(1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \
84 	(1 << HTT_PPDU_STATS_USERS_INFO_TLV)
85 
86 /* PPDU stats mask sent to FW to support debug sniffer feature */
87 #define DP_PPDU_STATS_CFG_SNIFFER \
88 	(HTT_PPDU_DEFAULT_TLV_BITMAP) | \
89 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \
90 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV) | \
91 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
92 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \
93 	(1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \
94 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \
95 	(1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \
96 	(1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \
97 	(1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \
98 	(1 << HTT_PPDU_STATS_USERS_INFO_TLV)
99 
100 /* PPDU stats mask sent to FW to support BPR feature*/
101 #define DP_PPDU_STATS_CFG_BPR \
102 	(1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \
103 	(1 << HTT_PPDU_STATS_USERS_INFO_TLV)
104 
105 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
106 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
107 				   DP_PPDU_STATS_CFG_ENH_STATS)
108 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
109 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
110 				      DP_PPDU_TXLITE_STATS_BITMASK_CFG)
111 
112 /**
113  * Bitmap of HTT PPDU delayed ba TLV types for Default mode
114  */
115 #define HTT_PPDU_DELAYED_BA_TLV_BITMAP \
116 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
117 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
118 	(1 << HTT_PPDU_STATS_USR_RATE_TLV)
119 
120 /**
121  * Bitmap of HTT PPDU TLV types for Delayed BA
122  */
123 #define HTT_PPDU_STATUS_TLV_BITMAP \
124 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
125 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
126 
127 /**
128  * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 64
129  */
130 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64 \
131 	((1 << HTT_PPDU_STATS_COMMON_TLV) | \
132 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
133 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
134 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
135 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
136 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
137 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
138 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV))
139 
140 /**
141  * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 256
142  */
143 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256 \
144 	((1 << HTT_PPDU_STATS_COMMON_TLV) | \
145 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
146 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
147 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
148 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
149 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
150 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \
151 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV))
152 
153 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc);
154 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc);
155 
156 #ifdef MONITOR_MODULARIZED_ENABLE
157 static inline bool dp_monitor_modularized_enable(void)
158 {
159 	return TRUE;
160 }
161 
162 static inline QDF_STATUS
163 dp_mon_soc_attach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; }
164 
165 static inline QDF_STATUS
166 dp_mon_soc_detach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; }
167 #else
168 static inline bool dp_monitor_modularized_enable(void)
169 {
170 	return FALSE;
171 }
172 
173 static inline QDF_STATUS dp_mon_soc_attach_wrapper(struct dp_soc *soc)
174 {
175 	return dp_mon_soc_attach(soc);
176 }
177 
178 static inline QDF_STATUS dp_mon_soc_detach_wrapper(struct dp_soc *soc)
179 {
180 	return dp_mon_soc_detach(soc);
181 }
182 #endif
183 
184 #ifndef WIFI_MONITOR_SUPPORT
185 #define MON_BUF_MIN_ENTRIES 64
186 
187 static inline QDF_STATUS monitor_pdev_attach(struct dp_pdev *pdev)
188 {
189 	return QDF_STATUS_SUCCESS;
190 }
191 
192 static inline QDF_STATUS monitor_pdev_detach(struct dp_pdev *pdev)
193 {
194 	return QDF_STATUS_SUCCESS;
195 }
196 
197 static inline QDF_STATUS monitor_vdev_attach(struct dp_vdev *vdev)
198 {
199 	return QDF_STATUS_E_FAILURE;
200 }
201 
202 static inline QDF_STATUS monitor_vdev_detach(struct dp_vdev *vdev)
203 {
204 	return QDF_STATUS_E_FAILURE;
205 }
206 
207 static inline QDF_STATUS monitor_peer_attach(struct dp_soc *soc,
208 					     struct dp_peer *peer)
209 {
210 	return QDF_STATUS_SUCCESS;
211 }
212 
213 static inline QDF_STATUS monitor_peer_detach(struct dp_soc *soc,
214 					     struct dp_peer *peer)
215 {
216 	return QDF_STATUS_E_FAILURE;
217 }
218 
219 static inline QDF_STATUS monitor_pdev_init(struct dp_pdev *pdev)
220 {
221 	return QDF_STATUS_SUCCESS;
222 }
223 
224 static inline QDF_STATUS monitor_pdev_deinit(struct dp_pdev *pdev)
225 {
226 	return QDF_STATUS_SUCCESS;
227 }
228 
229 static inline QDF_STATUS monitor_soc_cfg_init(struct dp_soc *soc)
230 {
231 	return QDF_STATUS_SUCCESS;
232 }
233 
234 static inline QDF_STATUS monitor_config_debug_sniffer(struct dp_pdev *pdev,
235 						      int val)
236 {
237 	return QDF_STATUS_E_FAILURE;
238 }
239 
240 static inline void monitor_flush_rings(struct dp_soc *soc)
241 {
242 }
243 
244 static inline QDF_STATUS monitor_htt_srng_setup(struct dp_soc *soc,
245 						struct dp_pdev *pdev,
246 						int mac_id,
247 						int mac_for_pdev)
248 {
249 	return QDF_STATUS_SUCCESS;
250 }
251 
252 static inline void monitor_service_mon_rings(struct dp_soc *soc, uint32_t quota)
253 {
254 }
255 
256 static inline
257 uint32_t monitor_process(struct dp_soc *soc, struct dp_intr *int_ctx,
258 			 uint32_t mac_id, uint32_t quota)
259 {
260 	return 0;
261 }
262 
263 static inline
264 uint32_t monitor_drop_packets_for_mac(struct dp_pdev *pdev,
265 				      uint32_t mac_id, uint32_t quota)
266 {
267 	return 0;
268 }
269 
270 static inline void monitor_peer_tx_init(struct dp_pdev *pdev,
271 					struct dp_peer *peer)
272 {
273 }
274 
275 static inline void monitor_peer_tx_cleanup(struct dp_vdev *vdev,
276 					   struct dp_peer *peer)
277 {
278 }
279 
280 static inline
281 void monitor_peer_tid_peer_id_update(struct dp_soc *soc,
282 				     struct dp_peer *peer,
283 				     uint16_t peer_id)
284 {
285 }
286 
287 static inline void monitor_tx_ppdu_stats_attach(struct dp_pdev *pdev)
288 {
289 }
290 
291 static inline void monitor_tx_ppdu_stats_detach(struct dp_pdev *pdev)
292 {
293 }
294 
295 static inline QDF_STATUS monitor_tx_capture_debugfs_init(struct dp_pdev *pdev)
296 {
297 	return QDF_STATUS_SUCCESS;
298 }
299 
300 static inline void monitor_peer_tx_capture_filter_check(struct dp_pdev *pdev,
301 							struct dp_peer *peer)
302 {
303 }
304 
305 static inline
306 QDF_STATUS monitor_tx_add_to_comp_queue(struct dp_soc *soc,
307 					struct dp_tx_desc_s *desc,
308 					struct hal_tx_completion_status *ts,
309 					struct dp_peer *peer)
310 {
311 	return QDF_STATUS_E_FAILURE;
312 }
313 
314 static inline bool monitor_ppdu_stats_ind_handler(struct htt_soc *soc,
315 						  uint32_t *msg_word,
316 						  qdf_nbuf_t htt_t2h_msg)
317 {
318 	return true;
319 }
320 
321 static inline QDF_STATUS monitor_htt_ppdu_stats_attach(struct dp_pdev *pdev)
322 {
323 	return QDF_STATUS_SUCCESS;
324 }
325 
326 static inline void monitor_htt_ppdu_stats_detach(struct dp_pdev *pdev)
327 {
328 }
329 
330 static inline void monitor_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
331 {
332 }
333 
334 static inline QDF_STATUS monitor_config_enh_tx_capture(struct dp_pdev *pdev,
335 						       uint32_t val)
336 {
337 	return QDF_STATUS_E_INVAL;
338 }
339 
340 static inline QDF_STATUS monitor_config_enh_rx_capture(struct dp_pdev *pdev,
341 						       uint32_t val)
342 {
343 	return QDF_STATUS_E_INVAL;
344 }
345 
346 static inline QDF_STATUS monitor_set_bpr_enable(struct dp_pdev *pdev,
347 						uint32_t val)
348 {
349 	return QDF_STATUS_E_FAILURE;
350 }
351 
352 static inline int monitor_set_filter_neigh_peers(struct dp_pdev *pdev, bool val)
353 {
354 	return 0;
355 }
356 
357 static inline
358 void monitor_set_atf_stats_enable(struct dp_pdev *pdev, bool value)
359 {
360 }
361 
362 static inline
363 void monitor_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor)
364 {
365 }
366 
367 static inline
368 bool monitor_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle)
369 {
370 	return false;
371 }
372 
373 static inline
374 bool monitor_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle)
375 {
376 	return false;
377 }
378 
379 static inline
380 bool monitor_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle)
381 {
382 	return false;
383 }
384 
385 static inline
386 int monitor_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, bool enable)
387 {
388 	return 0;
389 }
390 
391 static inline void monitor_pktlogmod_exit(struct dp_pdev *pdev)
392 {
393 }
394 
395 static inline
396 void monitor_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev)
397 {
398 }
399 
400 static inline
401 void monitor_neighbour_peers_detach(struct dp_pdev *pdev)
402 {
403 }
404 
405 static inline QDF_STATUS monitor_filter_neighbour_peer(struct dp_pdev *pdev,
406 						       uint8_t *rx_pkt_hdr)
407 {
408 	return QDF_STATUS_E_FAILURE;
409 }
410 
411 static inline void monitor_print_pdev_tx_capture_stats(struct dp_pdev *pdev)
412 {
413 }
414 
415 static inline
416 void monitor_reap_timer_init(struct dp_soc *soc)
417 {
418 }
419 
420 static inline
421 void monitor_reap_timer_deinit(struct dp_soc *soc)
422 {
423 }
424 
425 static inline
426 void monitor_reap_timer_start(struct dp_soc *soc)
427 {
428 }
429 
430 static inline
431 bool monitor_reap_timer_stop(struct dp_soc *soc)
432 {
433 	return false;
434 }
435 
436 static inline
437 void monitor_vdev_timer_init(struct dp_soc *soc)
438 {
439 }
440 
441 static inline
442 void monitor_vdev_timer_deinit(struct dp_soc *soc)
443 {
444 }
445 
446 static inline
447 void monitor_vdev_timer_start(struct dp_soc *soc)
448 {
449 }
450 
451 static inline
452 bool monitor_vdev_timer_stop(struct dp_soc *soc)
453 {
454 	return false;
455 }
456 
457 static inline struct qdf_mem_multi_page_t*
458 monitor_get_link_desc_pages(struct dp_soc *soc, uint32_t mac_id)
459 {
460 	return NULL;
461 }
462 
463 static inline uint32_t *
464 monitor_get_total_link_descs(struct dp_soc *soc, uint32_t mac_id)
465 {
466 	return NULL;
467 }
468 
469 static inline QDF_STATUS monitor_drop_inv_peer_pkts(struct dp_vdev *vdev,
470 						    struct ieee80211_frame *wh)
471 {
472 	return QDF_STATUS_E_FAILURE;
473 }
474 
475 static inline bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev)
476 {
477 	return false;
478 }
479 
480 static inline void monitor_vdev_register_osif(struct dp_vdev *vdev,
481 					      struct ol_txrx_ops *txrx_ops)
482 {
483 }
484 
485 static inline bool monitor_is_vdev_timer_running(struct dp_soc *soc)
486 {
487 	return false;
488 }
489 
490 static inline
491 void monitor_pdev_set_mon_vdev(struct dp_vdev *vdev)
492 {
493 }
494 
495 static inline void monitor_vdev_delete(struct dp_soc *soc, struct dp_vdev *vdev)
496 {
497 }
498 
499 static inline void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
500 {
501 }
502 
503 static inline void monitor_neighbour_peer_add_ast(struct dp_pdev *pdev,
504 						  struct dp_peer *ta_peer,
505 						  uint8_t *mac_addr,
506 						  qdf_nbuf_t nbuf,
507 						  uint32_t flags)
508 {
509 }
510 
511 static inline void
512 monitor_set_chan_band(struct dp_pdev *pdev, enum reg_wifi_band chan_band)
513 {
514 }
515 
516 static inline void
517 monitor_set_chan_freq(struct dp_pdev *pdev, qdf_freq_t chan_freq)
518 {
519 }
520 
521 static inline void monitor_set_chan_num(struct dp_pdev *pdev, int chan_num)
522 {
523 }
524 
525 static inline bool monitor_is_enable_mcopy_mode(struct dp_pdev *pdev)
526 {
527 	return false;
528 }
529 
530 static inline
531 void monitor_neighbour_peer_list_remove(struct dp_pdev *pdev,
532 					struct dp_vdev *vdev,
533 					struct dp_neighbour_peer *peer)
534 {
535 }
536 
537 static inline bool monitor_is_chan_band_known(struct dp_pdev *pdev)
538 {
539 	return false;
540 }
541 
542 static inline enum reg_wifi_band
543 monitor_get_chan_band(struct dp_pdev *pdev)
544 {
545 	return 0;
546 }
547 
548 static inline void monitor_get_mpdu_status(struct dp_pdev *pdev,
549 					   struct dp_soc *soc,
550 					   uint8_t *rx_tlv_hdr)
551 {
552 }
553 
554 static inline void monitor_print_tx_stats(struct dp_pdev *pdev)
555 {
556 }
557 
558 static inline
559 QDF_STATUS monitor_mcopy_check_deliver(struct dp_pdev *pdev,
560 				       uint16_t peer_id, uint32_t ppdu_id,
561 				       uint8_t first_msdu)
562 {
563 	return QDF_STATUS_SUCCESS;
564 }
565 
566 static inline bool monitor_is_enable_tx_sniffer(struct dp_pdev *pdev)
567 {
568 	return false;
569 }
570 
571 static inline struct dp_vdev*
572 monitor_get_monitor_vdev_from_pdev(struct dp_pdev *pdev)
573 {
574 	return NULL;
575 }
576 
577 static inline QDF_STATUS monitor_check_com_info_ppdu_id(struct dp_pdev *pdev,
578 							void *rx_desc)
579 {
580 	return QDF_STATUS_E_FAILURE;
581 }
582 
583 static inline struct mon_rx_status*
584 monitor_get_rx_status(struct dp_pdev *pdev)
585 {
586 	return NULL;
587 }
588 
589 static inline bool monitor_is_enable_enhanced_stats(struct dp_pdev *pdev)
590 {
591 	return false;
592 }
593 #endif
594 
595 #define DP_MAX_TIMER_EXEC_TIME_TICKS \
596 		(QDF_LOG_TIMESTAMP_CYCLES_PER_10_US * 100 * 20)
597 
598 /**
599  * enum timer_yield_status - yield status code used in monitor mode timer.
600  * @DP_TIMER_NO_YIELD: do not yield
601  * @DP_TIMER_WORK_DONE: yield because work is done
602  * @DP_TIMER_WORK_EXHAUST: yield because work quota is exhausted
603  * @DP_TIMER_TIME_EXHAUST: yield due to time slot exhausted
604  */
605 enum timer_yield_status {
606 	DP_TIMER_NO_YIELD,
607 	DP_TIMER_WORK_DONE,
608 	DP_TIMER_WORK_EXHAUST,
609 	DP_TIMER_TIME_EXHAUST,
610 };
611 
612 #if DP_PRINT_ENABLE
613 #include <stdarg.h>       /* va_list */
614 #include <qdf_types.h> /* qdf_vprint */
615 #include <cdp_txrx_handle.h>
616 
617 enum {
618 	/* FATAL_ERR - print only irrecoverable error messages */
619 	DP_PRINT_LEVEL_FATAL_ERR,
620 
621 	/* ERR - include non-fatal err messages */
622 	DP_PRINT_LEVEL_ERR,
623 
624 	/* WARN - include warnings */
625 	DP_PRINT_LEVEL_WARN,
626 
627 	/* INFO1 - include fundamental, infrequent events */
628 	DP_PRINT_LEVEL_INFO1,
629 
630 	/* INFO2 - include non-fundamental but infrequent events */
631 	DP_PRINT_LEVEL_INFO2,
632 };
633 
634 #define dp_print(level, fmt, ...) do { \
635 	if (level <= g_txrx_print_level) \
636 		qdf_print(fmt, ## __VA_ARGS__); \
637 while (0)
638 #define DP_PRINT(level, fmt, ...) do { \
639 	dp_print(level, "DP: " fmt, ## __VA_ARGS__); \
640 while (0)
641 #else
642 #define DP_PRINT(level, fmt, ...)
643 #endif /* DP_PRINT_ENABLE */
644 
645 #define DP_TRACE(LVL, fmt, args ...)                             \
646 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL,       \
647 		fmt, ## args)
648 
649 #ifdef DP_PRINT_NO_CONSOLE
650 /* Stat prints should not go to console or kernel logs.*/
651 #define DP_PRINT_STATS(fmt, args ...)\
652 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,       \
653 		  fmt, ## args)
654 #else
655 #define DP_PRINT_STATS(fmt, args ...)\
656 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,\
657 		  fmt, ## args)
658 #endif
659 #define DP_STATS_INIT(_handle) \
660 	qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats))
661 
662 #define DP_STATS_CLR(_handle) \
663 	qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats))
664 
665 #ifndef DISABLE_DP_STATS
666 #define DP_STATS_INC(_handle, _field, _delta) \
667 { \
668 	if (likely(_handle)) \
669 		_handle->stats._field += _delta; \
670 }
671 
672 #define DP_STATS_INCC(_handle, _field, _delta, _cond) \
673 { \
674 	if (_cond && likely(_handle)) \
675 		_handle->stats._field += _delta; \
676 }
677 
678 #define DP_STATS_DEC(_handle, _field, _delta) \
679 { \
680 	if (likely(_handle)) \
681 		_handle->stats._field -= _delta; \
682 }
683 
684 #define DP_STATS_UPD(_handle, _field, _delta) \
685 { \
686 	if (likely(_handle)) \
687 		_handle->stats._field = _delta; \
688 }
689 
690 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) \
691 { \
692 	DP_STATS_INC(_handle, _field.num, _count); \
693 	DP_STATS_INC(_handle, _field.bytes, _bytes) \
694 }
695 
696 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \
697 { \
698 	DP_STATS_INCC(_handle, _field.num, _count, _cond); \
699 	DP_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \
700 }
701 
702 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) \
703 { \
704 	_handle_a->stats._field += _handle_b->stats._field; \
705 }
706 
707 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) \
708 { \
709 	DP_STATS_AGGR(_handle_a, _handle_b, _field.num); \
710 	DP_STATS_AGGR(_handle_a, _handle_b, _field.bytes);\
711 }
712 
713 #define DP_STATS_UPD_STRUCT(_handle_a, _handle_b, _field) \
714 { \
715 	_handle_a->stats._field = _handle_b->stats._field; \
716 }
717 
718 #else
719 #define DP_STATS_INC(_handle, _field, _delta)
720 #define DP_STATS_INCC(_handle, _field, _delta, _cond)
721 #define DP_STATS_DEC(_handle, _field, _delta)
722 #define DP_STATS_UPD(_handle, _field, _delta)
723 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes)
724 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond)
725 #define DP_STATS_AGGR(_handle_a, _handle_b, _field)
726 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field)
727 #endif
728 
729 #ifdef ENABLE_DP_HIST_STATS
730 #define DP_HIST_INIT() \
731 	uint32_t num_of_packets[MAX_PDEV_CNT] = {0};
732 
733 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) \
734 { \
735 		++num_of_packets[_pdev_id]; \
736 }
737 
738 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \
739 	do {                                                              \
740 		if (_p_cntrs == 1) {                                      \
741 			DP_STATS_INC(_pdev,                               \
742 				tx_comp_histogram.pkts_1, 1);             \
743 		} else if (_p_cntrs > 1 && _p_cntrs <= 20) {              \
744 			DP_STATS_INC(_pdev,                               \
745 				tx_comp_histogram.pkts_2_20, 1);          \
746 		} else if (_p_cntrs > 20 && _p_cntrs <= 40) {             \
747 			DP_STATS_INC(_pdev,                               \
748 				tx_comp_histogram.pkts_21_40, 1);         \
749 		} else if (_p_cntrs > 40 && _p_cntrs <= 60) {             \
750 			DP_STATS_INC(_pdev,                               \
751 				tx_comp_histogram.pkts_41_60, 1);         \
752 		} else if (_p_cntrs > 60 && _p_cntrs <= 80) {             \
753 			DP_STATS_INC(_pdev,                               \
754 				tx_comp_histogram.pkts_61_80, 1);         \
755 		} else if (_p_cntrs > 80 && _p_cntrs <= 100) {            \
756 			DP_STATS_INC(_pdev,                               \
757 				tx_comp_histogram.pkts_81_100, 1);        \
758 		} else if (_p_cntrs > 100 && _p_cntrs <= 200) {           \
759 			DP_STATS_INC(_pdev,                               \
760 				tx_comp_histogram.pkts_101_200, 1);       \
761 		} else if (_p_cntrs > 200) {                              \
762 			DP_STATS_INC(_pdev,                               \
763 				tx_comp_histogram.pkts_201_plus, 1);      \
764 		}                                                         \
765 	} while (0)
766 
767 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \
768 	do {                                                              \
769 		if (_p_cntrs == 1) {                                      \
770 			DP_STATS_INC(_pdev,                               \
771 				rx_ind_histogram.pkts_1, 1);              \
772 		} else if (_p_cntrs > 1 && _p_cntrs <= 20) {              \
773 			DP_STATS_INC(_pdev,                               \
774 				rx_ind_histogram.pkts_2_20, 1);           \
775 		} else if (_p_cntrs > 20 && _p_cntrs <= 40) {             \
776 			DP_STATS_INC(_pdev,                               \
777 				rx_ind_histogram.pkts_21_40, 1);          \
778 		} else if (_p_cntrs > 40 && _p_cntrs <= 60) {             \
779 			DP_STATS_INC(_pdev,                               \
780 				rx_ind_histogram.pkts_41_60, 1);          \
781 		} else if (_p_cntrs > 60 && _p_cntrs <= 80) {             \
782 			DP_STATS_INC(_pdev,                               \
783 				rx_ind_histogram.pkts_61_80, 1);          \
784 		} else if (_p_cntrs > 80 && _p_cntrs <= 100) {            \
785 			DP_STATS_INC(_pdev,                               \
786 				rx_ind_histogram.pkts_81_100, 1);         \
787 		} else if (_p_cntrs > 100 && _p_cntrs <= 200) {           \
788 			DP_STATS_INC(_pdev,                               \
789 				rx_ind_histogram.pkts_101_200, 1);        \
790 		} else if (_p_cntrs > 200) {                              \
791 			DP_STATS_INC(_pdev,                               \
792 				rx_ind_histogram.pkts_201_plus, 1);       \
793 		}                                                         \
794 	} while (0)
795 
796 #define DP_TX_HIST_STATS_PER_PDEV() \
797 	do { \
798 		uint8_t hist_stats = 0; \
799 		for (hist_stats = 0; hist_stats < soc->pdev_count; \
800 				hist_stats++) { \
801 			DP_TX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \
802 					num_of_packets[hist_stats]); \
803 		} \
804 	}  while (0)
805 
806 
807 #define DP_RX_HIST_STATS_PER_PDEV() \
808 	do { \
809 		uint8_t hist_stats = 0; \
810 		for (hist_stats = 0; hist_stats < soc->pdev_count; \
811 				hist_stats++) { \
812 			DP_RX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \
813 					num_of_packets[hist_stats]); \
814 		} \
815 	}  while (0)
816 
817 #else
818 #define DP_HIST_INIT()
819 #define DP_HIST_PACKET_COUNT_INC(_pdev_id)
820 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs)
821 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs)
822 #define DP_RX_HIST_STATS_PER_PDEV()
823 #define DP_TX_HIST_STATS_PER_PDEV()
824 #endif /* DISABLE_DP_STATS */
825 
826 #define FRAME_MASK_IPV4_ARP   1
827 #define FRAME_MASK_IPV4_DHCP  2
828 #define FRAME_MASK_IPV4_EAPOL 4
829 #define FRAME_MASK_IPV6_DHCP  8
830 
831 #ifdef QCA_SUPPORT_PEER_ISOLATION
832 #define dp_get_peer_isolation(_peer) ((_peer)->isolation)
833 
834 static inline void dp_set_peer_isolation(struct dp_peer *peer, bool val)
835 {
836 	peer->isolation = val;
837 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
838 		  "peer:"QDF_MAC_ADDR_FMT" isolation:%d",
839 		  QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer->isolation);
840 }
841 
842 #else
843 #define dp_get_peer_isolation(_peer) (0)
844 
845 static inline void dp_set_peer_isolation(struct dp_peer *peer, bool val)
846 {
847 }
848 #endif /* QCA_SUPPORT_PEER_ISOLATION */
849 
850 #ifdef QCA_SUPPORT_WDS_EXTENDED
851 static inline void dp_wds_ext_peer_init(struct dp_peer *peer)
852 {
853 	peer->wds_ext.init = 0;
854 }
855 #else
856 static inline void dp_wds_ext_peer_init(struct dp_peer *peer)
857 {
858 }
859 #endif /* QCA_SUPPORT_WDS_EXTENDED */
860 
861 #ifdef QCA_HOST2FW_RXBUF_RING
862 static inline
863 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id)
864 {
865 	return &pdev->rx_mac_buf_ring[lmac_id];
866 }
867 #else
868 static inline
869 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id)
870 {
871 	return &pdev->soc->rx_refill_buf_ring[lmac_id];
872 }
873 #endif
874 
875 /**
876  * The lmac ID for a particular channel band is fixed.
877  * 2.4GHz band uses lmac_id = 1
878  * 5GHz/6GHz band uses lmac_id=0
879  */
880 #define DP_INVALID_LMAC_ID	(-1)
881 #define DP_MON_INVALID_LMAC_ID	(-1)
882 #define DP_MON_2G_LMAC_ID	1
883 #define DP_MON_5G_LMAC_ID	0
884 #define DP_MON_6G_LMAC_ID	0
885 
886 #ifdef FEATURE_TSO_STATS
887 /**
888  * dp_init_tso_stats() - Clear tso stats
889  * @pdev: pdev handle
890  *
891  * Return: None
892  */
893 static inline
894 void dp_init_tso_stats(struct dp_pdev *pdev)
895 {
896 	if (pdev) {
897 		qdf_mem_zero(&((pdev)->stats.tso_stats),
898 			     sizeof((pdev)->stats.tso_stats));
899 		qdf_atomic_init(&pdev->tso_idx);
900 	}
901 }
902 
903 /**
904  * dp_stats_tso_segment_histogram_update() - TSO Segment Histogram
905  * @pdev: pdev handle
906  * @_p_cntrs: number of tso segments for a tso packet
907  *
908  * Return: None
909  */
910 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev,
911 					   uint8_t _p_cntrs);
912 
913 /**
914  * dp_tso_segment_update() - Collect tso segment information
915  * @pdev: pdev handle
916  * @stats_idx: tso packet number
917  * @idx: tso segment number
918  * @seg: tso segment
919  *
920  * Return: None
921  */
922 void dp_tso_segment_update(struct dp_pdev *pdev,
923 			   uint32_t stats_idx,
924 			   uint8_t idx,
925 			   struct qdf_tso_seg_t seg);
926 
927 /**
928  * dp_tso_packet_update() - TSO Packet information
929  * @pdev: pdev handle
930  * @stats_idx: tso packet number
931  * @msdu: nbuf handle
932  * @num_segs: tso segments
933  *
934  * Return: None
935  */
936 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx,
937 			  qdf_nbuf_t msdu, uint16_t num_segs);
938 
939 /**
940  * dp_tso_segment_stats_update() - TSO Segment stats
941  * @pdev: pdev handle
942  * @stats_seg: tso segment list
943  * @stats_idx: tso packet number
944  *
945  * Return: None
946  */
947 void dp_tso_segment_stats_update(struct dp_pdev *pdev,
948 				 struct qdf_tso_seg_elem_t *stats_seg,
949 				 uint32_t stats_idx);
950 
951 /**
952  * dp_print_tso_stats() - dump tso statistics
953  * @soc:soc handle
954  * @level: verbosity level
955  *
956  * Return: None
957  */
958 void dp_print_tso_stats(struct dp_soc *soc,
959 			enum qdf_stats_verbosity_level level);
960 
961 /**
962  * dp_txrx_clear_tso_stats() - clear tso stats
963  * @soc: soc handle
964  *
965  * Return: None
966  */
967 void dp_txrx_clear_tso_stats(struct dp_soc *soc);
968 #else
969 static inline
970 void dp_init_tso_stats(struct dp_pdev *pdev)
971 {
972 }
973 
974 static inline
975 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev,
976 					   uint8_t _p_cntrs)
977 {
978 }
979 
980 static inline
981 void dp_tso_segment_update(struct dp_pdev *pdev,
982 			   uint32_t stats_idx,
983 			   uint32_t idx,
984 			   struct qdf_tso_seg_t seg)
985 {
986 }
987 
988 static inline
989 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx,
990 			  qdf_nbuf_t msdu, uint16_t num_segs)
991 {
992 }
993 
994 static inline
995 void dp_tso_segment_stats_update(struct dp_pdev *pdev,
996 				 struct qdf_tso_seg_elem_t *stats_seg,
997 				 uint32_t stats_idx)
998 {
999 }
1000 
1001 static inline
1002 void dp_print_tso_stats(struct dp_soc *soc,
1003 			enum qdf_stats_verbosity_level level)
1004 {
1005 }
1006 
1007 static inline
1008 void dp_txrx_clear_tso_stats(struct dp_soc *soc)
1009 {
1010 }
1011 #endif /* FEATURE_TSO_STATS */
1012 
1013 #define DP_HTT_T2H_HP_PIPE 5
1014 static inline void dp_update_pdev_stats(struct dp_pdev *tgtobj,
1015 					struct cdp_vdev_stats *srcobj)
1016 {
1017 	uint8_t i;
1018 	uint8_t pream_type;
1019 
1020 	for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) {
1021 		for (i = 0; i < MAX_MCS; i++) {
1022 			tgtobj->stats.tx.pkt_type[pream_type].
1023 				mcs_count[i] +=
1024 			srcobj->tx.pkt_type[pream_type].
1025 				mcs_count[i];
1026 			tgtobj->stats.rx.pkt_type[pream_type].
1027 				mcs_count[i] +=
1028 			srcobj->rx.pkt_type[pream_type].
1029 				mcs_count[i];
1030 		}
1031 	}
1032 
1033 	for (i = 0; i < MAX_BW; i++) {
1034 		tgtobj->stats.tx.bw[i] += srcobj->tx.bw[i];
1035 		tgtobj->stats.rx.bw[i] += srcobj->rx.bw[i];
1036 	}
1037 
1038 	for (i = 0; i < SS_COUNT; i++) {
1039 		tgtobj->stats.tx.nss[i] += srcobj->tx.nss[i];
1040 		tgtobj->stats.rx.nss[i] += srcobj->rx.nss[i];
1041 	}
1042 
1043 	for (i = 0; i < WME_AC_MAX; i++) {
1044 		tgtobj->stats.tx.wme_ac_type[i] +=
1045 			srcobj->tx.wme_ac_type[i];
1046 		tgtobj->stats.rx.wme_ac_type[i] +=
1047 			srcobj->rx.wme_ac_type[i];
1048 		tgtobj->stats.tx.excess_retries_per_ac[i] +=
1049 			srcobj->tx.excess_retries_per_ac[i];
1050 	}
1051 
1052 	for (i = 0; i < MAX_GI; i++) {
1053 		tgtobj->stats.tx.sgi_count[i] +=
1054 			srcobj->tx.sgi_count[i];
1055 		tgtobj->stats.rx.sgi_count[i] +=
1056 			srcobj->rx.sgi_count[i];
1057 	}
1058 
1059 	for (i = 0; i < MAX_RECEPTION_TYPES; i++)
1060 		tgtobj->stats.rx.reception_type[i] +=
1061 			srcobj->rx.reception_type[i];
1062 
1063 	tgtobj->stats.tx.comp_pkt.bytes += srcobj->tx.comp_pkt.bytes;
1064 	tgtobj->stats.tx.comp_pkt.num += srcobj->tx.comp_pkt.num;
1065 	tgtobj->stats.tx.ucast.num += srcobj->tx.ucast.num;
1066 	tgtobj->stats.tx.ucast.bytes += srcobj->tx.ucast.bytes;
1067 	tgtobj->stats.tx.mcast.num += srcobj->tx.mcast.num;
1068 	tgtobj->stats.tx.mcast.bytes += srcobj->tx.mcast.bytes;
1069 	tgtobj->stats.tx.bcast.num += srcobj->tx.bcast.num;
1070 	tgtobj->stats.tx.bcast.bytes += srcobj->tx.bcast.bytes;
1071 	tgtobj->stats.tx.tx_success.num += srcobj->tx.tx_success.num;
1072 	tgtobj->stats.tx.tx_success.bytes +=
1073 		srcobj->tx.tx_success.bytes;
1074 	tgtobj->stats.tx.nawds_mcast.num +=
1075 		srcobj->tx.nawds_mcast.num;
1076 	tgtobj->stats.tx.nawds_mcast.bytes +=
1077 		srcobj->tx.nawds_mcast.bytes;
1078 	tgtobj->stats.tx.nawds_mcast_drop +=
1079 		srcobj->tx.nawds_mcast_drop;
1080 	tgtobj->stats.tx.num_ppdu_cookie_valid +=
1081 		srcobj->tx.num_ppdu_cookie_valid;
1082 	tgtobj->stats.tx.tx_failed += srcobj->tx.tx_failed;
1083 	tgtobj->stats.tx.ofdma += srcobj->tx.ofdma;
1084 	tgtobj->stats.tx.stbc += srcobj->tx.stbc;
1085 	tgtobj->stats.tx.ldpc += srcobj->tx.ldpc;
1086 	tgtobj->stats.tx.pream_punct_cnt += srcobj->tx.pream_punct_cnt;
1087 	tgtobj->stats.tx.retries += srcobj->tx.retries;
1088 	tgtobj->stats.tx.non_amsdu_cnt += srcobj->tx.non_amsdu_cnt;
1089 	tgtobj->stats.tx.amsdu_cnt += srcobj->tx.amsdu_cnt;
1090 	tgtobj->stats.tx.non_ampdu_cnt += srcobj->tx.non_ampdu_cnt;
1091 	tgtobj->stats.tx.ampdu_cnt += srcobj->tx.ampdu_cnt;
1092 	tgtobj->stats.tx.dropped.fw_rem.num += srcobj->tx.dropped.fw_rem.num;
1093 	tgtobj->stats.tx.dropped.fw_rem.bytes +=
1094 			srcobj->tx.dropped.fw_rem.bytes;
1095 	tgtobj->stats.tx.dropped.fw_rem_tx +=
1096 			srcobj->tx.dropped.fw_rem_tx;
1097 	tgtobj->stats.tx.dropped.fw_rem_notx +=
1098 			srcobj->tx.dropped.fw_rem_notx;
1099 	tgtobj->stats.tx.dropped.fw_reason1 +=
1100 			srcobj->tx.dropped.fw_reason1;
1101 	tgtobj->stats.tx.dropped.fw_reason2 +=
1102 			srcobj->tx.dropped.fw_reason2;
1103 	tgtobj->stats.tx.dropped.fw_reason3 +=
1104 			srcobj->tx.dropped.fw_reason3;
1105 	tgtobj->stats.tx.dropped.age_out += srcobj->tx.dropped.age_out;
1106 	tgtobj->stats.rx.err.mic_err += srcobj->rx.err.mic_err;
1107 	if (srcobj->rx.snr != 0)
1108 		tgtobj->stats.rx.snr = srcobj->rx.snr;
1109 	tgtobj->stats.rx.rx_rate = srcobj->rx.rx_rate;
1110 	tgtobj->stats.rx.err.decrypt_err += srcobj->rx.err.decrypt_err;
1111 	tgtobj->stats.rx.non_ampdu_cnt += srcobj->rx.non_ampdu_cnt;
1112 	tgtobj->stats.rx.amsdu_cnt += srcobj->rx.ampdu_cnt;
1113 	tgtobj->stats.rx.non_amsdu_cnt += srcobj->rx.non_amsdu_cnt;
1114 	tgtobj->stats.rx.amsdu_cnt += srcobj->rx.amsdu_cnt;
1115 	tgtobj->stats.rx.nawds_mcast_drop += srcobj->rx.nawds_mcast_drop;
1116 	tgtobj->stats.rx.to_stack.num += srcobj->rx.to_stack.num;
1117 	tgtobj->stats.rx.to_stack.bytes += srcobj->rx.to_stack.bytes;
1118 
1119 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
1120 		tgtobj->stats.rx.rcvd_reo[i].num +=
1121 			srcobj->rx.rcvd_reo[i].num;
1122 		tgtobj->stats.rx.rcvd_reo[i].bytes +=
1123 			srcobj->rx.rcvd_reo[i].bytes;
1124 	}
1125 
1126 	srcobj->rx.unicast.num =
1127 		srcobj->rx.to_stack.num -
1128 				(srcobj->rx.multicast.num);
1129 	srcobj->rx.unicast.bytes =
1130 		srcobj->rx.to_stack.bytes -
1131 				(srcobj->rx.multicast.bytes);
1132 
1133 	tgtobj->stats.rx.unicast.num += srcobj->rx.unicast.num;
1134 	tgtobj->stats.rx.unicast.bytes += srcobj->rx.unicast.bytes;
1135 	tgtobj->stats.rx.multicast.num += srcobj->rx.multicast.num;
1136 	tgtobj->stats.rx.multicast.bytes += srcobj->rx.multicast.bytes;
1137 	tgtobj->stats.rx.bcast.num += srcobj->rx.bcast.num;
1138 	tgtobj->stats.rx.bcast.bytes += srcobj->rx.bcast.bytes;
1139 	tgtobj->stats.rx.raw.num += srcobj->rx.raw.num;
1140 	tgtobj->stats.rx.raw.bytes += srcobj->rx.raw.bytes;
1141 	tgtobj->stats.rx.intra_bss.pkts.num +=
1142 			srcobj->rx.intra_bss.pkts.num;
1143 	tgtobj->stats.rx.intra_bss.pkts.bytes +=
1144 			srcobj->rx.intra_bss.pkts.bytes;
1145 	tgtobj->stats.rx.intra_bss.fail.num +=
1146 			srcobj->rx.intra_bss.fail.num;
1147 	tgtobj->stats.rx.intra_bss.fail.bytes +=
1148 			srcobj->rx.intra_bss.fail.bytes;
1149 
1150 	tgtobj->stats.tx.last_ack_rssi =
1151 		srcobj->tx.last_ack_rssi;
1152 	tgtobj->stats.rx.mec_drop.num += srcobj->rx.mec_drop.num;
1153 	tgtobj->stats.rx.mec_drop.bytes += srcobj->rx.mec_drop.bytes;
1154 	tgtobj->stats.rx.multipass_rx_pkt_drop +=
1155 		srcobj->rx.multipass_rx_pkt_drop;
1156 }
1157 
1158 static inline void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj,
1159 						struct dp_vdev *srcobj)
1160 {
1161 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.nawds_mcast);
1162 
1163 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.rcvd);
1164 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.processed);
1165 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.reinject_pkts);
1166 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.inspect_pkts);
1167 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.raw.raw_pkt);
1168 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.raw.dma_map_error);
1169 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.raw.num_frags_overflow_err);
1170 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.sg.dropped_host.num);
1171 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.sg.dropped_target);
1172 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.sg.sg_pkt);
1173 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.mcast_en.mcast_pkt);
1174 	DP_STATS_AGGR(tgtobj, srcobj,
1175 		      tx_i.mcast_en.dropped_map_error);
1176 	DP_STATS_AGGR(tgtobj, srcobj,
1177 		      tx_i.mcast_en.dropped_self_mac);
1178 	DP_STATS_AGGR(tgtobj, srcobj,
1179 		      tx_i.mcast_en.dropped_send_fail);
1180 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.mcast_en.ucast);
1181 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.igmp_mcast_en.igmp_rcvd);
1182 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.igmp_mcast_en.igmp_ucast_converted);
1183 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.dma_error);
1184 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.ring_full);
1185 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.enqueue_fail);
1186 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.fail_per_pkt_vdev_id_check);
1187 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.desc_na.num);
1188 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.res_full);
1189 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.headroom_insufficient);
1190 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.cce_classified);
1191 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.cce_classified_raw);
1192 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.sniffer_rcvd);
1193 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.mesh.exception_fw);
1194 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.mesh.completion_fw);
1195 
1196 	tgtobj->stats.tx_i.dropped.dropped_pkt.num =
1197 		tgtobj->stats.tx_i.dropped.dma_error +
1198 		tgtobj->stats.tx_i.dropped.ring_full +
1199 		tgtobj->stats.tx_i.dropped.enqueue_fail +
1200 		tgtobj->stats.tx_i.dropped.fail_per_pkt_vdev_id_check +
1201 		tgtobj->stats.tx_i.dropped.desc_na.num +
1202 		tgtobj->stats.tx_i.dropped.res_full;
1203 
1204 }
1205 
1206 /**
1207  * dp_is_wds_extended(): Check if wds ext is enabled
1208  * @vdev: DP VDEV handle
1209  *
1210  * return: true if enabled, false if not
1211  */
1212 #ifdef QCA_SUPPORT_WDS_EXTENDED
1213 static bool dp_is_wds_extended(struct dp_peer *peer)
1214 {
1215 	if (qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT,
1216 				&peer->wds_ext.init))
1217 		return true;
1218 
1219 	return false;
1220 }
1221 #else
1222 static bool dp_is_wds_extended(struct dp_peer *peer)
1223 {
1224 	return false;
1225 }
1226 #endif /* QCA_SUPPORT_WDS_EXTENDED */
1227 
1228 static inline void dp_update_vdev_stats(struct dp_soc *soc,
1229 					struct dp_peer *srcobj,
1230 					void *arg)
1231 {
1232 	struct cdp_vdev_stats *tgtobj = (struct cdp_vdev_stats *)arg;
1233 	uint8_t i;
1234 	uint8_t pream_type;
1235 
1236 	if (qdf_unlikely(dp_is_wds_extended(srcobj)))
1237 		return;
1238 
1239 	for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) {
1240 		for (i = 0; i < MAX_MCS; i++) {
1241 			tgtobj->tx.pkt_type[pream_type].
1242 				mcs_count[i] +=
1243 			srcobj->stats.tx.pkt_type[pream_type].
1244 				mcs_count[i];
1245 			tgtobj->rx.pkt_type[pream_type].
1246 				mcs_count[i] +=
1247 			srcobj->stats.rx.pkt_type[pream_type].
1248 				mcs_count[i];
1249 		}
1250 	}
1251 
1252 	for (i = 0; i < MAX_BW; i++) {
1253 		tgtobj->tx.bw[i] += srcobj->stats.tx.bw[i];
1254 		tgtobj->rx.bw[i] += srcobj->stats.rx.bw[i];
1255 	}
1256 
1257 	for (i = 0; i < SS_COUNT; i++) {
1258 		tgtobj->tx.nss[i] += srcobj->stats.tx.nss[i];
1259 		tgtobj->rx.nss[i] += srcobj->stats.rx.nss[i];
1260 	}
1261 
1262 	for (i = 0; i < WME_AC_MAX; i++) {
1263 		tgtobj->tx.wme_ac_type[i] +=
1264 			srcobj->stats.tx.wme_ac_type[i];
1265 		tgtobj->rx.wme_ac_type[i] +=
1266 			srcobj->stats.rx.wme_ac_type[i];
1267 		tgtobj->tx.excess_retries_per_ac[i] +=
1268 			srcobj->stats.tx.excess_retries_per_ac[i];
1269 	}
1270 
1271 	for (i = 0; i < MAX_GI; i++) {
1272 		tgtobj->tx.sgi_count[i] +=
1273 			srcobj->stats.tx.sgi_count[i];
1274 		tgtobj->rx.sgi_count[i] +=
1275 			srcobj->stats.rx.sgi_count[i];
1276 	}
1277 
1278 	for (i = 0; i < MAX_RECEPTION_TYPES; i++)
1279 		tgtobj->rx.reception_type[i] +=
1280 			srcobj->stats.rx.reception_type[i];
1281 
1282 	tgtobj->tx.comp_pkt.bytes += srcobj->stats.tx.comp_pkt.bytes;
1283 	tgtobj->tx.comp_pkt.num += srcobj->stats.tx.comp_pkt.num;
1284 	tgtobj->tx.ucast.num += srcobj->stats.tx.ucast.num;
1285 	tgtobj->tx.ucast.bytes += srcobj->stats.tx.ucast.bytes;
1286 	tgtobj->tx.mcast.num += srcobj->stats.tx.mcast.num;
1287 	tgtobj->tx.mcast.bytes += srcobj->stats.tx.mcast.bytes;
1288 	tgtobj->tx.bcast.num += srcobj->stats.tx.bcast.num;
1289 	tgtobj->tx.bcast.bytes += srcobj->stats.tx.bcast.bytes;
1290 	tgtobj->tx.tx_success.num += srcobj->stats.tx.tx_success.num;
1291 	tgtobj->tx.tx_success.bytes +=
1292 		srcobj->stats.tx.tx_success.bytes;
1293 	tgtobj->tx.nawds_mcast.num +=
1294 		srcobj->stats.tx.nawds_mcast.num;
1295 	tgtobj->tx.nawds_mcast.bytes +=
1296 		srcobj->stats.tx.nawds_mcast.bytes;
1297 	tgtobj->tx.nawds_mcast_drop +=
1298 		srcobj->stats.tx.nawds_mcast_drop;
1299 	tgtobj->tx.num_ppdu_cookie_valid +=
1300 		srcobj->stats.tx.num_ppdu_cookie_valid;
1301 	tgtobj->tx.tx_failed += srcobj->stats.tx.tx_failed;
1302 	tgtobj->tx.ofdma += srcobj->stats.tx.ofdma;
1303 	tgtobj->tx.stbc += srcobj->stats.tx.stbc;
1304 	tgtobj->tx.ldpc += srcobj->stats.tx.ldpc;
1305 	tgtobj->tx.pream_punct_cnt += srcobj->stats.tx.pream_punct_cnt;
1306 	tgtobj->tx.retries += srcobj->stats.tx.retries;
1307 	tgtobj->tx.non_amsdu_cnt += srcobj->stats.tx.non_amsdu_cnt;
1308 	tgtobj->tx.amsdu_cnt += srcobj->stats.tx.amsdu_cnt;
1309 	tgtobj->tx.non_ampdu_cnt += srcobj->stats.tx.non_ampdu_cnt;
1310 	tgtobj->tx.ampdu_cnt += srcobj->stats.tx.ampdu_cnt;
1311 	tgtobj->tx.dropped.fw_rem.num += srcobj->stats.tx.dropped.fw_rem.num;
1312 	tgtobj->tx.dropped.fw_rem.bytes +=
1313 			srcobj->stats.tx.dropped.fw_rem.bytes;
1314 	tgtobj->tx.dropped.fw_rem_tx +=
1315 			srcobj->stats.tx.dropped.fw_rem_tx;
1316 	tgtobj->tx.dropped.fw_rem_notx +=
1317 			srcobj->stats.tx.dropped.fw_rem_notx;
1318 	tgtobj->tx.dropped.fw_reason1 +=
1319 			srcobj->stats.tx.dropped.fw_reason1;
1320 	tgtobj->tx.dropped.fw_reason2 +=
1321 			srcobj->stats.tx.dropped.fw_reason2;
1322 	tgtobj->tx.dropped.fw_reason3 +=
1323 			srcobj->stats.tx.dropped.fw_reason3;
1324 	tgtobj->tx.dropped.age_out += srcobj->stats.tx.dropped.age_out;
1325 	tgtobj->rx.err.mic_err += srcobj->stats.rx.err.mic_err;
1326 	if (srcobj->stats.rx.snr != 0)
1327 		tgtobj->rx.snr = srcobj->stats.rx.snr;
1328 	tgtobj->rx.rx_rate = srcobj->stats.rx.rx_rate;
1329 	tgtobj->rx.err.decrypt_err += srcobj->stats.rx.err.decrypt_err;
1330 	tgtobj->rx.non_ampdu_cnt += srcobj->stats.rx.non_ampdu_cnt;
1331 	tgtobj->rx.amsdu_cnt += srcobj->stats.rx.ampdu_cnt;
1332 	tgtobj->rx.non_amsdu_cnt += srcobj->stats.rx.non_amsdu_cnt;
1333 	tgtobj->rx.amsdu_cnt += srcobj->stats.rx.amsdu_cnt;
1334 	tgtobj->rx.nawds_mcast_drop += srcobj->stats.rx.nawds_mcast_drop;
1335 	tgtobj->rx.to_stack.num += srcobj->stats.rx.to_stack.num;
1336 	tgtobj->rx.to_stack.bytes += srcobj->stats.rx.to_stack.bytes;
1337 
1338 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
1339 		tgtobj->rx.rcvd_reo[i].num +=
1340 			srcobj->stats.rx.rcvd_reo[i].num;
1341 		tgtobj->rx.rcvd_reo[i].bytes +=
1342 			srcobj->stats.rx.rcvd_reo[i].bytes;
1343 	}
1344 
1345 	srcobj->stats.rx.unicast.num =
1346 		srcobj->stats.rx.to_stack.num -
1347 				srcobj->stats.rx.multicast.num;
1348 	srcobj->stats.rx.unicast.bytes =
1349 		srcobj->stats.rx.to_stack.bytes -
1350 				srcobj->stats.rx.multicast.bytes;
1351 
1352 	tgtobj->rx.unicast.num += srcobj->stats.rx.unicast.num;
1353 	tgtobj->rx.unicast.bytes += srcobj->stats.rx.unicast.bytes;
1354 	tgtobj->rx.multicast.num += srcobj->stats.rx.multicast.num;
1355 	tgtobj->rx.multicast.bytes += srcobj->stats.rx.multicast.bytes;
1356 	tgtobj->rx.bcast.num += srcobj->stats.rx.bcast.num;
1357 	tgtobj->rx.bcast.bytes += srcobj->stats.rx.bcast.bytes;
1358 	tgtobj->rx.raw.num += srcobj->stats.rx.raw.num;
1359 	tgtobj->rx.raw.bytes += srcobj->stats.rx.raw.bytes;
1360 	tgtobj->rx.intra_bss.pkts.num +=
1361 			srcobj->stats.rx.intra_bss.pkts.num;
1362 	tgtobj->rx.intra_bss.pkts.bytes +=
1363 			srcobj->stats.rx.intra_bss.pkts.bytes;
1364 	tgtobj->rx.intra_bss.fail.num +=
1365 			srcobj->stats.rx.intra_bss.fail.num;
1366 	tgtobj->rx.intra_bss.fail.bytes +=
1367 			srcobj->stats.rx.intra_bss.fail.bytes;
1368 	tgtobj->tx.last_ack_rssi =
1369 		srcobj->stats.tx.last_ack_rssi;
1370 	tgtobj->rx.mec_drop.num += srcobj->stats.rx.mec_drop.num;
1371 	tgtobj->rx.mec_drop.bytes += srcobj->stats.rx.mec_drop.bytes;
1372 	tgtobj->rx.multipass_rx_pkt_drop +=
1373 		srcobj->stats.rx.multipass_rx_pkt_drop;
1374 }
1375 
1376 #define DP_UPDATE_STATS(_tgtobj, _srcobj)	\
1377 	do {				\
1378 		uint8_t i;		\
1379 		uint8_t pream_type;	\
1380 		for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \
1381 			for (i = 0; i < MAX_MCS; i++) { \
1382 				DP_STATS_AGGR(_tgtobj, _srcobj, \
1383 					tx.pkt_type[pream_type].mcs_count[i]); \
1384 				DP_STATS_AGGR(_tgtobj, _srcobj, \
1385 					rx.pkt_type[pream_type].mcs_count[i]); \
1386 			} \
1387 		} \
1388 		  \
1389 		for (i = 0; i < MAX_BW; i++) { \
1390 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.bw[i]); \
1391 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.bw[i]); \
1392 		} \
1393 		  \
1394 		for (i = 0; i < SS_COUNT; i++) { \
1395 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.nss[i]); \
1396 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.nss[i]); \
1397 		} \
1398 		for (i = 0; i < WME_AC_MAX; i++) { \
1399 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.wme_ac_type[i]); \
1400 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.wme_ac_type[i]); \
1401 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.excess_retries_per_ac[i]); \
1402 		\
1403 		} \
1404 		\
1405 		for (i = 0; i < MAX_GI; i++) { \
1406 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.sgi_count[i]); \
1407 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.sgi_count[i]); \
1408 		} \
1409 		\
1410 		for (i = 0; i < MAX_RECEPTION_TYPES; i++) \
1411 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.reception_type[i]); \
1412 		\
1413 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.comp_pkt); \
1414 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.ucast); \
1415 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.mcast); \
1416 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.bcast); \
1417 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_success); \
1418 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.nawds_mcast); \
1419 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.nawds_mcast_drop); \
1420 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.tx_failed); \
1421 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ofdma); \
1422 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.stbc); \
1423 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ldpc); \
1424 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.retries); \
1425 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_amsdu_cnt); \
1426 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.amsdu_cnt); \
1427 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_ampdu_cnt); \
1428 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ampdu_cnt); \
1429 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.dropped.fw_rem); \
1430 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_tx); \
1431 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_notx); \
1432 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason1); \
1433 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason2); \
1434 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason3); \
1435 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.age_out); \
1436 								\
1437 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.mic_err); \
1438 		if (_srcobj->stats.rx.snr != 0) \
1439 			DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.snr); \
1440 		DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rx_rate); \
1441 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.decrypt_err); \
1442 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_ampdu_cnt); \
1443 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.ampdu_cnt); \
1444 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_amsdu_cnt); \
1445 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.amsdu_cnt); \
1446 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.nawds_mcast_drop); \
1447 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.to_stack); \
1448 								\
1449 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)	\
1450 			DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rcvd_reo[i]); \
1451 									\
1452 		_srcobj->stats.rx.unicast.num = \
1453 			_srcobj->stats.rx.to_stack.num - \
1454 					_srcobj->stats.rx.multicast.num; \
1455 		_srcobj->stats.rx.unicast.bytes = \
1456 			_srcobj->stats.rx.to_stack.bytes - \
1457 					_srcobj->stats.rx.multicast.bytes; \
1458 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.unicast); \
1459 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.multicast); \
1460 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.bcast); \
1461 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.raw); \
1462 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.pkts); \
1463 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.fail); \
1464 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.mec_drop); \
1465 								  \
1466 		_tgtobj->stats.tx.last_ack_rssi =	\
1467 			_srcobj->stats.tx.last_ack_rssi; \
1468 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.multipass_rx_pkt_drop); \
1469 	}  while (0)
1470 
1471 /**
1472  * dp_peer_find_attach() - Allocates memory for peer objects
1473  * @soc: SoC handle
1474  *
1475  * Return: QDF_STATUS
1476  */
1477 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc);
1478 extern void dp_peer_find_detach(struct dp_soc *soc);
1479 extern void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer);
1480 extern void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer);
1481 extern void dp_peer_find_hash_erase(struct dp_soc *soc);
1482 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
1483 			   struct dp_peer *peer);
1484 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
1485 			      struct dp_peer *peer);
1486 void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
1487 				struct dp_peer *peer,
1488 				uint16_t peer_id);
1489 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
1490 				   uint16_t peer_id);
1491 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev,
1492 			  enum dp_mod_id mod_id);
1493 
1494 /*
1495  * dp_peer_ppdu_delayed_ba_cleanup() free ppdu allocated in peer
1496  * @peer: Datapath peer
1497  *
1498  * return: void
1499  */
1500 void dp_peer_ppdu_delayed_ba_cleanup(struct dp_peer *peer);
1501 
1502 extern void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer);
1503 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer);
1504 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer);
1505 extern struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
1506 					      uint8_t *peer_mac_addr,
1507 					      int mac_addr_is_aligned,
1508 					      uint8_t vdev_id,
1509 					      enum dp_mod_id id);
1510 
1511 #ifdef DP_PEER_EXTENDED_API
1512 /**
1513  * dp_register_peer() - Register peer into physical device
1514  * @soc_hdl - data path soc handle
1515  * @pdev_id - device instance id
1516  * @sta_desc - peer description
1517  *
1518  * Register peer into physical device
1519  *
1520  * Return: QDF_STATUS_SUCCESS registration success
1521  *         QDF_STATUS_E_FAULT peer not found
1522  */
1523 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1524 			    struct ol_txrx_desc_type *sta_desc);
1525 
1526 /**
1527  * dp_clear_peer() - remove peer from physical device
1528  * @soc_hdl - data path soc handle
1529  * @pdev_id - device instance id
1530  * @peer_addr - peer mac address
1531  *
1532  * remove peer from physical device
1533  *
1534  * Return: QDF_STATUS_SUCCESS registration success
1535  *         QDF_STATUS_E_FAULT peer not found
1536  */
1537 QDF_STATUS dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1538 			 struct qdf_mac_addr peer_addr);
1539 
1540 /*
1541  * dp_find_peer_exist - find peer if already exists
1542  * @soc: datapath soc handle
1543  * @pdev_id: physical device instance id
1544  * @peer_mac_addr: peer mac address
1545  *
1546  * Return: true or false
1547  */
1548 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1549 			uint8_t *peer_addr);
1550 
1551 /*
1552  * dp_find_peer_exist_on_vdev - find if peer exists on the given vdev
1553  * @soc: datapath soc handle
1554  * @vdev_id: vdev instance id
1555  * @peer_mac_addr: peer mac address
1556  *
1557  * Return: true or false
1558  */
1559 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1560 				uint8_t *peer_addr);
1561 
1562 /*
1563  * dp_find_peer_exist_on_other_vdev - find if peer exists
1564  * on other than the given vdev
1565  * @soc: datapath soc handle
1566  * @vdev_id: vdev instance id
1567  * @peer_mac_addr: peer mac address
1568  * @max_bssid: max number of bssids
1569  *
1570  * Return: true or false
1571  */
1572 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
1573 				      uint8_t vdev_id, uint8_t *peer_addr,
1574 				      uint16_t max_bssid);
1575 
1576 /**
1577  * dp_peer_state_update() - update peer local state
1578  * @pdev - data path device instance
1579  * @peer_addr - peer mac address
1580  * @state - new peer local state
1581  *
1582  * update peer local state
1583  *
1584  * Return: QDF_STATUS_SUCCESS registration success
1585  */
1586 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc, uint8_t *peer_mac,
1587 				enum ol_txrx_peer_state state);
1588 
1589 /**
1590  * dp_get_vdevid() - Get virtual interface id which peer registered
1591  * @soc - datapath soc handle
1592  * @peer_mac - peer mac address
1593  * @vdev_id - virtual interface id which peer registered
1594  *
1595  * Get virtual interface id which peer registered
1596  *
1597  * Return: QDF_STATUS_SUCCESS registration success
1598  */
1599 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
1600 			 uint8_t *vdev_id);
1601 struct cdp_vdev *dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
1602 		struct qdf_mac_addr peer_addr);
1603 struct cdp_vdev *dp_get_vdev_for_peer(void *peer);
1604 uint8_t *dp_peer_get_peer_mac_addr(void *peer);
1605 
1606 /**
1607  * dp_get_peer_state() - Get local peer state
1608  * @soc - datapath soc handle
1609  * @vdev_id - vdev id
1610  * @peer_mac - peer mac addr
1611  *
1612  * Get local peer state
1613  *
1614  * Return: peer status
1615  */
1616 int dp_get_peer_state(struct cdp_soc_t *soc, uint8_t vdev_id,
1617 		      uint8_t *peer_mac);
1618 void dp_local_peer_id_pool_init(struct dp_pdev *pdev);
1619 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer);
1620 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer);
1621 #else
1622 /**
1623  * dp_get_vdevid() - Get virtual interface id which peer registered
1624  * @soc - datapath soc handle
1625  * @peer_mac - peer mac address
1626  * @vdev_id - virtual interface id which peer registered
1627  *
1628  * Get virtual interface id which peer registered
1629  *
1630  * Return: QDF_STATUS_SUCCESS registration success
1631  */
1632 static inline
1633 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
1634 			 uint8_t *vdev_id)
1635 {
1636 	return QDF_STATUS_E_NOSUPPORT;
1637 }
1638 
1639 static inline void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
1640 {
1641 }
1642 
1643 static inline
1644 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
1645 {
1646 }
1647 
1648 static inline
1649 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
1650 {
1651 }
1652 #endif
1653 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
1654 				      uint8_t *peer_mac, uint16_t vdev_id,
1655 				      uint8_t tid,
1656 				      int status);
1657 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
1658 				  uint8_t *peer_mac, uint16_t vdev_id,
1659 				  uint8_t dialogtoken, uint16_t tid,
1660 				  uint16_t batimeout,
1661 				  uint16_t buffersize,
1662 				  uint16_t startseqnum);
1663 QDF_STATUS dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc,
1664 					uint8_t *peer_mac, uint16_t vdev_id,
1665 					uint8_t tid, uint8_t *dialogtoken,
1666 					uint16_t *statuscode,
1667 					uint16_t *buffersize,
1668 					uint16_t *batimeout);
1669 QDF_STATUS dp_set_addba_response(struct cdp_soc_t *cdp_soc,
1670 				 uint8_t *peer_mac,
1671 				 uint16_t vdev_id, uint8_t tid,
1672 				 uint16_t statuscode);
1673 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1674 			   uint16_t vdev_id, int tid,
1675 			   uint16_t reasoncode);
1676 /*
1677  * dp_delba_tx_completion_wifi3() -  Handle delba tx completion
1678  *
1679  * @cdp_soc: soc handle
1680  * @vdev_id: id of the vdev handle
1681  * @peer_mac: peer mac address
1682  * @tid: Tid number
1683  * @status: Tx completion status
1684  * Indicate status of delba Tx to DP for stats update and retry
1685  * delba if tx failed.
1686  *
1687  */
1688 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1689 				 uint16_t vdev_id, uint8_t tid,
1690 				 int status);
1691 extern QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
1692 					uint32_t ba_window_size,
1693 					uint32_t start_seq);
1694 
1695 extern QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc,
1696 	enum hal_reo_cmd_type type, struct hal_reo_cmd_params *params,
1697 	void (*callback_fn), void *data);
1698 
1699 extern void dp_reo_cmdlist_destroy(struct dp_soc *soc);
1700 
1701 /**
1702  * dp_reo_status_ring_handler - Handler for REO Status ring
1703  * @int_ctx: pointer to DP interrupt context
1704  * @soc: DP Soc handle
1705  *
1706  * Returns: Number of descriptors reaped
1707  */
1708 uint32_t dp_reo_status_ring_handler(struct dp_intr *int_ctx,
1709 				    struct dp_soc *soc);
1710 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
1711 			     struct cdp_vdev_stats *vdev_stats);
1712 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1713 	union hal_reo_status *reo_status);
1714 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1715 		union hal_reo_status *reo_status);
1716 uint16_t dp_tx_me_send_convert_ucast(struct cdp_soc_t *soc, uint8_t vdev_id,
1717 				     qdf_nbuf_t nbuf,
1718 				     uint8_t newmac[][QDF_MAC_ADDR_SIZE],
1719 				     uint8_t new_mac_cnt, uint8_t tid,
1720 				     bool is_igmp);
1721 void dp_tx_me_alloc_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id);
1722 
1723 void dp_tx_me_free_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id);
1724 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
1725 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
1726 		uint32_t config_param_1, uint32_t config_param_2,
1727 		uint32_t config_param_3, int cookie, int cookie_msb,
1728 		uint8_t mac_id);
1729 void dp_htt_stats_print_tag(struct dp_pdev *pdev,
1730 			    uint8_t tag_type, uint32_t *tag_buf);
1731 void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf);
1732 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev, uint32_t tuple_mask,
1733 				     uint8_t mac_id);
1734 /**
1735  * dp_rxtid_stats_cmd_cb - function pointer for peer
1736  *			   rx tid stats cmd call_back
1737  */
1738 typedef void (*dp_rxtid_stats_cmd_cb)(struct dp_soc *soc, void *cb_ctxt,
1739 				      union hal_reo_status *reo_status);
1740 int dp_peer_rxtid_stats(struct dp_peer *peer,
1741 			dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
1742 			void *cb_ctxt);
1743 QDF_STATUS
1744 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
1745 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
1746 		      uint32_t *rx_pn);
1747 
1748 QDF_STATUS
1749 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
1750 			  uint8_t *peer_mac, enum cdp_sec_type sec_type,
1751 			  bool is_unicast);
1752 
1753 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id);
1754 
1755 QDF_STATUS
1756 dp_set_michael_key(struct cdp_soc_t *soc, uint8_t vdev_id,
1757 		   uint8_t *peer_mac,
1758 		   bool is_unicast, uint32_t *key);
1759 
1760 /**
1761  * dp_check_pdev_exists() - Validate pdev before use
1762  * @soc - dp soc handle
1763  * @data - pdev handle
1764  *
1765  * Return: 0 - success/invalid - failure
1766  */
1767 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data);
1768 
1769 /**
1770  * dp_update_delay_stats() - Update delay statistics in structure
1771  *                              and fill min, max and avg delay
1772  * @pdev: pdev handle
1773  * @delay: delay in ms
1774  * @tid: tid value
1775  * @mode: type of tx delay mode
1776  * @ring id: ring number
1777  *
1778  * Return: none
1779  */
1780 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
1781 			   uint8_t tid, uint8_t mode, uint8_t ring_id);
1782 
1783 /**
1784  * dp_print_ring_stats(): Print tail and head pointer
1785  * @pdev: DP_PDEV handle
1786  *
1787  * Return:void
1788  */
1789 void dp_print_ring_stats(struct dp_pdev *pdev);
1790 
1791 /**
1792  * dp_print_pdev_cfg_params() - Print the pdev cfg parameters
1793  * @pdev_handle: DP pdev handle
1794  *
1795  * Return - void
1796  */
1797 void dp_print_pdev_cfg_params(struct dp_pdev *pdev);
1798 
1799 /**
1800  * dp_print_soc_cfg_params()- Dump soc wlan config parameters
1801  * @soc_handle: Soc handle
1802  *
1803  * Return: void
1804  */
1805 void dp_print_soc_cfg_params(struct dp_soc *soc);
1806 
1807 /**
1808  * dp_srng_get_str_from_ring_type() - Return string name for a ring
1809  * @ring_type: Ring
1810  *
1811  * Return: char const pointer
1812  */
1813 const
1814 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type);
1815 
1816 /*
1817  * dp_txrx_path_stats() - Function to display dump stats
1818  * @soc - soc handle
1819  *
1820  * return: none
1821  */
1822 void dp_txrx_path_stats(struct dp_soc *soc);
1823 
1824 /*
1825  * dp_print_per_ring_stats(): Packet count per ring
1826  * @soc - soc handle
1827  *
1828  * Return - None
1829  */
1830 void dp_print_per_ring_stats(struct dp_soc *soc);
1831 
1832 /**
1833  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
1834  * @pdev: DP PDEV handle
1835  *
1836  * return: void
1837  */
1838 void dp_aggregate_pdev_stats(struct dp_pdev *pdev);
1839 
1840 /**
1841  * dp_print_rx_rates(): Print Rx rate stats
1842  * @vdev: DP_VDEV handle
1843  *
1844  * Return:void
1845  */
1846 void dp_print_rx_rates(struct dp_vdev *vdev);
1847 
1848 /**
1849  * dp_print_tx_rates(): Print tx rates
1850  * @vdev: DP_VDEV handle
1851  *
1852  * Return:void
1853  */
1854 void dp_print_tx_rates(struct dp_vdev *vdev);
1855 
1856 /**
1857  * dp_print_peer_stats():print peer stats
1858  * @peer: DP_PEER handle
1859  *
1860  * return void
1861  */
1862 void dp_print_peer_stats(struct dp_peer *peer);
1863 
1864 /**
1865  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
1866  * @pdev: DP_PDEV Handle
1867  *
1868  * Return:void
1869  */
1870 void
1871 dp_print_pdev_tx_stats(struct dp_pdev *pdev);
1872 
1873 /**
1874  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
1875  * @pdev: DP_PDEV Handle
1876  *
1877  * Return: void
1878  */
1879 void
1880 dp_print_pdev_rx_stats(struct dp_pdev *pdev);
1881 
1882 /**
1883  * dp_print_soc_tx_stats(): Print SOC level  stats
1884  * @soc DP_SOC Handle
1885  *
1886  * Return: void
1887  */
1888 void dp_print_soc_tx_stats(struct dp_soc *soc);
1889 
1890 /**
1891  * dp_print_soc_interrupt_stats() - Print interrupt stats for the soc
1892  * @soc: dp_soc handle
1893  *
1894  * Return: None
1895  */
1896 void dp_print_soc_interrupt_stats(struct dp_soc *soc);
1897 
1898 /**
1899  * dp_print_soc_rx_stats: Print SOC level Rx stats
1900  * @soc: DP_SOC Handle
1901  *
1902  * Return:void
1903  */
1904 void dp_print_soc_rx_stats(struct dp_soc *soc);
1905 
1906 /**
1907  * dp_get_mac_id_for_pdev() -  Return mac corresponding to pdev for mac
1908  *
1909  * @mac_id: MAC id
1910  * @pdev_id: pdev_id corresponding to pdev, 0 for MCL
1911  *
1912  * Single pdev using both MACs will operate on both MAC rings,
1913  * which is the case for MCL.
1914  * For WIN each PDEV will operate one ring, so index is zero.
1915  *
1916  */
1917 static inline int dp_get_mac_id_for_pdev(uint32_t mac_id, uint32_t pdev_id)
1918 {
1919 	if (mac_id && pdev_id) {
1920 		qdf_print("Both mac_id and pdev_id cannot be non zero");
1921 		QDF_BUG(0);
1922 		return 0;
1923 	}
1924 	return (mac_id + pdev_id);
1925 }
1926 
1927 /**
1928  * dp_get_lmac_id_for_pdev_id() -  Return lmac id corresponding to host pdev id
1929  * @soc: soc pointer
1930  * @mac_id: MAC id
1931  * @pdev_id: pdev_id corresponding to pdev, 0 for MCL
1932  *
1933  * For MCL, Single pdev using both MACs will operate on both MAC rings.
1934  *
1935  * For WIN, each PDEV will operate one ring.
1936  *
1937  */
1938 static inline int
1939 dp_get_lmac_id_for_pdev_id
1940 	(struct dp_soc *soc, uint32_t mac_id, uint32_t pdev_id)
1941 {
1942 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
1943 		if (mac_id && pdev_id) {
1944 			qdf_print("Both mac_id and pdev_id cannot be non zero");
1945 			QDF_BUG(0);
1946 			return 0;
1947 		}
1948 		return (mac_id + pdev_id);
1949 	}
1950 
1951 	return soc->pdev_list[pdev_id]->lmac_id;
1952 }
1953 
1954 /**
1955  * dp_get_pdev_for_lmac_id() -  Return pdev pointer corresponding to lmac id
1956  * @soc: soc pointer
1957  * @lmac_id: LMAC id
1958  *
1959  * For MCL, Single pdev exists
1960  *
1961  * For WIN, each PDEV will operate one ring.
1962  *
1963  */
1964 static inline struct dp_pdev *
1965 	dp_get_pdev_for_lmac_id(struct dp_soc *soc, uint32_t lmac_id)
1966 {
1967 	uint8_t i = 0;
1968 
1969 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
1970 		i = wlan_cfg_get_pdev_idx(soc->wlan_cfg_ctx, lmac_id);
1971 		return ((i < MAX_PDEV_CNT) ? soc->pdev_list[i] : NULL);
1972 	}
1973 
1974 	/* Typically for MCL as there only 1 PDEV*/
1975 	return soc->pdev_list[0];
1976 }
1977 
1978 /**
1979  * dp_calculate_target_pdev_id_from_host_pdev_id() - Return target pdev
1980  *                                          corresponding to host pdev id
1981  * @soc: soc pointer
1982  * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL
1983  *
1984  * returns target pdev_id for host pdev id. For WIN, this is derived through
1985  * a two step process:
1986  * 1. Get lmac_id corresponding to host pdev_id (lmac_id can change
1987  *    during mode switch)
1988  * 2. Get target pdev_id (set up during WMI ready) from lmac_id
1989  *
1990  * For MCL, return the offset-1 translated mac_id
1991  */
1992 static inline int
1993 dp_calculate_target_pdev_id_from_host_pdev_id
1994 	(struct dp_soc *soc, uint32_t mac_for_pdev)
1995 {
1996 	struct dp_pdev *pdev;
1997 
1998 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
1999 		return DP_SW2HW_MACID(mac_for_pdev);
2000 
2001 	pdev = soc->pdev_list[mac_for_pdev];
2002 
2003 	/*non-MCL case, get original target_pdev mapping*/
2004 	return wlan_cfg_get_target_pdev_id(soc->wlan_cfg_ctx, pdev->lmac_id);
2005 }
2006 
2007 /**
2008  * dp_get_target_pdev_id_for_host_pdev_id() - Return target pdev corresponding
2009  *                                         to host pdev id
2010  * @soc: soc pointer
2011  * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL
2012  *
2013  * returns target pdev_id for host pdev id.
2014  * For WIN, return the value stored in pdev object.
2015  * For MCL, return the offset-1 translated mac_id.
2016  */
2017 static inline int
2018 dp_get_target_pdev_id_for_host_pdev_id
2019 	(struct dp_soc *soc, uint32_t mac_for_pdev)
2020 {
2021 	struct dp_pdev *pdev;
2022 
2023 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2024 		return DP_SW2HW_MACID(mac_for_pdev);
2025 
2026 	pdev = soc->pdev_list[mac_for_pdev];
2027 
2028 	return pdev->target_pdev_id;
2029 }
2030 
2031 /**
2032  * dp_get_host_pdev_id_for_target_pdev_id() - Return host pdev corresponding
2033  *                                         to target pdev id
2034  * @soc: soc pointer
2035  * @pdev_id: pdev_id corresponding to target pdev
2036  *
2037  * returns host pdev_id for target pdev id. For WIN, this is derived through
2038  * a two step process:
2039  * 1. Get lmac_id corresponding to target pdev_id
2040  * 2. Get host pdev_id (set up during WMI ready) from lmac_id
2041  *
2042  * For MCL, return the 0-offset pdev_id
2043  */
2044 static inline int
2045 dp_get_host_pdev_id_for_target_pdev_id
2046 	(struct dp_soc *soc, uint32_t pdev_id)
2047 {
2048 	struct dp_pdev *pdev;
2049 	int lmac_id;
2050 
2051 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2052 		return DP_HW2SW_MACID(pdev_id);
2053 
2054 	/*non-MCL case, get original target_lmac mapping from target pdev*/
2055 	lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx,
2056 					  DP_HW2SW_MACID(pdev_id));
2057 
2058 	/*Get host pdev from lmac*/
2059 	pdev = dp_get_pdev_for_lmac_id(soc, lmac_id);
2060 
2061 	return pdev ? pdev->pdev_id : INVALID_PDEV_ID;
2062 }
2063 
2064 /*
2065  * dp_get_mac_id_for_mac() -  Return mac corresponding WIN and MCL mac_ids
2066  *
2067  * @soc: handle to DP soc
2068  * @mac_id: MAC id
2069  *
2070  * Single pdev using both MACs will operate on both MAC rings,
2071  * which is the case for MCL.
2072  * For WIN each PDEV will operate one ring, so index is zero.
2073  *
2074  */
2075 static inline int dp_get_mac_id_for_mac(struct dp_soc *soc, uint32_t mac_id)
2076 {
2077 	/*
2078 	 * Single pdev using both MACs will operate on both MAC rings,
2079 	 * which is the case for MCL.
2080 	 */
2081 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2082 		return mac_id;
2083 
2084 	/* For WIN each PDEV will operate one ring, so index is zero. */
2085 	return 0;
2086 }
2087 
2088 /*
2089  * dp_is_subtype_data() - check if the frame subtype is data
2090  *
2091  * @frame_ctrl: Frame control field
2092  *
2093  * check the frame control field and verify if the packet
2094  * is a data packet.
2095  *
2096  * Return: true or false
2097  */
2098 static inline bool dp_is_subtype_data(uint16_t frame_ctrl)
2099 {
2100 	if (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_TYPE_MASK) ==
2101 	    QDF_IEEE80211_FC0_TYPE_DATA) &&
2102 	    (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) ==
2103 	    QDF_IEEE80211_FC0_SUBTYPE_DATA) ||
2104 	    ((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) ==
2105 	    QDF_IEEE80211_FC0_SUBTYPE_QOS))) {
2106 		return true;
2107 	}
2108 
2109 	return false;
2110 }
2111 
2112 #ifdef WDI_EVENT_ENABLE
2113 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
2114 				uint32_t stats_type_upload_mask,
2115 				uint8_t mac_id);
2116 
2117 int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id,
2118 		       wdi_event_subscribe *event_cb_sub_handle,
2119 		       uint32_t event);
2120 
2121 int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id,
2122 		     wdi_event_subscribe *event_cb_sub_handle,
2123 		     uint32_t event);
2124 
2125 void dp_wdi_event_handler(enum WDI_EVENT event, struct dp_soc *soc,
2126 			  void *data, u_int16_t peer_id,
2127 			  int status, u_int8_t pdev_id);
2128 
2129 int dp_wdi_event_attach(struct dp_pdev *txrx_pdev);
2130 int dp_wdi_event_detach(struct dp_pdev *txrx_pdev);
2131 
2132 static inline void
2133 dp_hif_update_pipe_callback(struct dp_soc *dp_soc,
2134 			    void *cb_context,
2135 			    QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t),
2136 			    uint8_t pipe_id)
2137 {
2138 	struct hif_msg_callbacks hif_pipe_callbacks;
2139 
2140 	/* TODO: Temporary change to bypass HTC connection for this new
2141 	 * HIF pipe, which will be used for packet log and other high-
2142 	 * priority HTT messages. Proper HTC connection to be added
2143 	 * later once required FW changes are available
2144 	 */
2145 	hif_pipe_callbacks.rxCompletionHandler = callback;
2146 	hif_pipe_callbacks.Context = cb_context;
2147 	hif_update_pipe_callback(dp_soc->hif_handle,
2148 		DP_HTT_T2H_HP_PIPE, &hif_pipe_callbacks);
2149 }
2150 #else
2151 static inline int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id,
2152 				     wdi_event_subscribe *event_cb_sub_handle,
2153 				     uint32_t event)
2154 {
2155 	return 0;
2156 }
2157 
2158 static inline int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id,
2159 				   wdi_event_subscribe *event_cb_sub_handle,
2160 				   uint32_t event)
2161 {
2162 	return 0;
2163 }
2164 
2165 static inline
2166 void dp_wdi_event_handler(enum WDI_EVENT event,
2167 			  struct dp_soc *soc,
2168 			  void *data, u_int16_t peer_id,
2169 			  int status, u_int8_t pdev_id)
2170 {
2171 }
2172 
2173 static inline int dp_wdi_event_attach(struct dp_pdev *txrx_pdev)
2174 {
2175 	return 0;
2176 }
2177 
2178 static inline int dp_wdi_event_detach(struct dp_pdev *txrx_pdev)
2179 {
2180 	return 0;
2181 }
2182 
2183 static inline QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
2184 		uint32_t stats_type_upload_mask, uint8_t mac_id)
2185 {
2186 	return 0;
2187 }
2188 
2189 static inline void
2190 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, void *cb_context,
2191 			    QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t),
2192 			    uint8_t pipe_id)
2193 {
2194 }
2195 #endif /* CONFIG_WIN */
2196 
2197 #ifdef VDEV_PEER_PROTOCOL_COUNT
2198 /**
2199  * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters
2200  * @vdev: VDEV DP object
2201  * @nbuf: data packet
2202  * @peer: Peer DP object
2203  * @is_egress: whether egress or ingress
2204  * @is_rx: whether rx or tx
2205  *
2206  * This function updates the per-peer protocol counters
2207  * Return: void
2208  */
2209 void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev,
2210 					    qdf_nbuf_t nbuf,
2211 					    struct dp_peer *peer,
2212 					    bool is_egress,
2213 					    bool is_rx);
2214 
2215 /**
2216  * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters
2217  * @soc: SOC DP object
2218  * @vdev_id: vdev_id
2219  * @nbuf: data packet
2220  * @is_egress: whether egress or ingress
2221  * @is_rx: whether rx or tx
2222  *
2223  * This function updates the per-peer protocol counters
2224  * Return: void
2225  */
2226 
2227 void dp_peer_stats_update_protocol_cnt(struct cdp_soc_t *soc,
2228 				       int8_t vdev_id,
2229 				       qdf_nbuf_t nbuf,
2230 				       bool is_egress,
2231 				       bool is_rx);
2232 
2233 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
2234 					       qdf_nbuf_t nbuf);
2235 
2236 #else
2237 #define dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, peer, \
2238 					       is_egress, is_rx)
2239 
2240 static inline
2241 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
2242 					       qdf_nbuf_t nbuf)
2243 {
2244 }
2245 
2246 #endif
2247 
2248 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2249 void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl);
2250 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
2251 	bool force);
2252 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
2253 
2254 #ifdef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS
2255 static inline int
2256 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl)
2257 {
2258 	return hal_srng_access_start_unlocked(soc, hal_ring_hdl);
2259 }
2260 
2261 static inline void
2262 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl)
2263 {
2264 	hal_srng_access_end_unlocked(soc, hal_ring_hdl);
2265 }
2266 
2267 #else
2268 static inline int
2269 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl)
2270 {
2271 	return hal_srng_access_start(soc, hal_ring_hdl);
2272 }
2273 
2274 static inline void
2275 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl)
2276 {
2277 	hal_srng_access_end(soc, hal_ring_hdl);
2278 }
2279 #endif
2280 
2281 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2282 /**
2283  * dp_srng_access_start() - Wrapper function to log access start of a hal ring
2284  * @int_ctx: pointer to DP interrupt context. This should not be NULL
2285  * @soc: DP Soc handle
2286  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
2287  *
2288  * Return: 0 on success; error on failure
2289  */
2290 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2291 			 hal_ring_handle_t hal_ring_hdl);
2292 
2293 /**
2294  * dp_srng_access_end() - Wrapper function to log access end of a hal ring
2295  * @int_ctx: pointer to DP interrupt context. This should not be NULL
2296  * @soc: DP Soc handle
2297  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
2298  *
2299  * Return: void
2300  */
2301 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2302 			hal_ring_handle_t hal_ring_hdl);
2303 
2304 #else
2305 static inline int dp_srng_access_start(struct dp_intr *int_ctx,
2306 				       struct dp_soc *dp_soc,
2307 				       hal_ring_handle_t hal_ring_hdl)
2308 {
2309 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2310 
2311 	return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2312 }
2313 
2314 static inline void dp_srng_access_end(struct dp_intr *int_ctx,
2315 				      struct dp_soc *dp_soc,
2316 				      hal_ring_handle_t hal_ring_hdl)
2317 {
2318 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2319 
2320 	return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2321 }
2322 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
2323 
2324 #ifdef QCA_CACHED_RING_DESC
2325 /**
2326  * dp_srng_dst_get_next() - Wrapper function to get next ring desc
2327  * @dp_socsoc: DP Soc handle
2328  * @hal_ring: opaque pointer to the HAL Destination Ring
2329  *
2330  * Return: HAL ring descriptor
2331  */
2332 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc,
2333 					 hal_ring_handle_t hal_ring_hdl)
2334 {
2335 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2336 
2337 	return hal_srng_dst_get_next_cached(hal_soc, hal_ring_hdl);
2338 }
2339 
2340 /**
2341  * dp_srng_dst_inv_cached_descs() - Wrapper function to invalidate cached
2342  * descriptors
2343  * @dp_socsoc: DP Soc handle
2344  * @hal_ring: opaque pointer to the HAL Rx Destination ring
2345  * @num_entries: Entry count
2346  *
2347  * Return: None
2348  */
2349 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc,
2350 						hal_ring_handle_t hal_ring_hdl,
2351 						uint32_t num_entries)
2352 {
2353 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2354 
2355 	hal_srng_dst_inv_cached_descs(hal_soc, hal_ring_hdl, num_entries);
2356 }
2357 #else
2358 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc,
2359 					 hal_ring_handle_t hal_ring_hdl)
2360 {
2361 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2362 
2363 	return hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2364 }
2365 
2366 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc,
2367 						hal_ring_handle_t hal_ring_hdl,
2368 						uint32_t num_entries)
2369 {
2370 }
2371 #endif /* QCA_CACHED_RING_DESC */
2372 
2373 #ifdef QCA_ENH_V3_STATS_SUPPORT
2374 /**
2375  * dp_pdev_print_delay_stats(): Print pdev level delay stats
2376  * @pdev: DP_PDEV handle
2377  *
2378  * Return:void
2379  */
2380 void dp_pdev_print_delay_stats(struct dp_pdev *pdev);
2381 
2382 /**
2383  * dp_pdev_print_tid_stats(): Print pdev level tid stats
2384  * @pdev: DP_PDEV handle
2385  *
2386  * Return:void
2387  */
2388 void dp_pdev_print_tid_stats(struct dp_pdev *pdev);
2389 #endif /* CONFIG_WIN */
2390 
2391 void dp_soc_set_txrx_ring_map(struct dp_soc *soc);
2392 
2393 /**
2394  * dp_vdev_to_cdp_vdev() - typecast dp vdev to cdp vdev
2395  * @vdev: DP vdev handle
2396  *
2397  * Return: struct cdp_vdev pointer
2398  */
2399 static inline
2400 struct cdp_vdev *dp_vdev_to_cdp_vdev(struct dp_vdev *vdev)
2401 {
2402 	return (struct cdp_vdev *)vdev;
2403 }
2404 
2405 /**
2406  * dp_pdev_to_cdp_pdev() - typecast dp pdev to cdp pdev
2407  * @pdev: DP pdev handle
2408  *
2409  * Return: struct cdp_pdev pointer
2410  */
2411 static inline
2412 struct cdp_pdev *dp_pdev_to_cdp_pdev(struct dp_pdev *pdev)
2413 {
2414 	return (struct cdp_pdev *)pdev;
2415 }
2416 
2417 /**
2418  * dp_soc_to_cdp_soc() - typecast dp psoc to cdp psoc
2419  * @psoc: DP psoc handle
2420  *
2421  * Return: struct cdp_soc pointer
2422  */
2423 static inline
2424 struct cdp_soc *dp_soc_to_cdp_soc(struct dp_soc *psoc)
2425 {
2426 	return (struct cdp_soc *)psoc;
2427 }
2428 
2429 /**
2430  * dp_soc_to_cdp_soc_t() - typecast dp psoc to
2431  * ol txrx soc handle
2432  * @psoc: DP psoc handle
2433  *
2434  * Return: struct cdp_soc_t pointer
2435  */
2436 static inline
2437 struct cdp_soc_t *dp_soc_to_cdp_soc_t(struct dp_soc *psoc)
2438 {
2439 	return (struct cdp_soc_t *)psoc;
2440 }
2441 
2442 /**
2443  * cdp_soc_t_to_dp_soc() - typecast cdp_soc_t to
2444  * dp soc handle
2445  * @psoc: CDP psoc handle
2446  *
2447  * Return: struct dp_soc pointer
2448  */
2449 static inline
2450 struct dp_soc *cdp_soc_t_to_dp_soc(struct cdp_soc_t *psoc)
2451 {
2452 	return (struct dp_soc *)psoc;
2453 }
2454 
2455 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
2456 /**
2457  * dp_rx_flow_update_fse_stats() - Update a flow's statistics
2458  * @pdev: pdev handle
2459  * @flow_id: flow index (truncated hash) in the Rx FST
2460  *
2461  * Return: Success when flow statistcs is updated, error on failure
2462  */
2463 QDF_STATUS dp_rx_flow_get_fse_stats(struct dp_pdev *pdev,
2464 				    struct cdp_rx_flow_info *rx_flow_info,
2465 				    struct cdp_flow_stats *stats);
2466 
2467 /**
2468  * dp_rx_flow_delete_entry() - Delete a flow entry from flow search table
2469  * @pdev: pdev handle
2470  * @rx_flow_info: DP flow parameters
2471  *
2472  * Return: Success when flow is deleted, error on failure
2473  */
2474 QDF_STATUS dp_rx_flow_delete_entry(struct dp_pdev *pdev,
2475 				   struct cdp_rx_flow_info *rx_flow_info);
2476 
2477 /**
2478  * dp_rx_flow_add_entry() - Add a flow entry to flow search table
2479  * @pdev: DP pdev instance
2480  * @rx_flow_info: DP flow paramaters
2481  *
2482  * Return: Success when flow is added, no-memory or already exists on error
2483  */
2484 QDF_STATUS dp_rx_flow_add_entry(struct dp_pdev *pdev,
2485 				struct cdp_rx_flow_info *rx_flow_info);
2486 
2487 /**
2488  * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters
2489  * @soc: SoC handle
2490  * @pdev: Pdev handle
2491  *
2492  * Return: Handle to flow search table entry
2493  */
2494 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev);
2495 
2496 /**
2497  * dp_rx_fst_detach() - De-initialize Rx FST
2498  * @soc: SoC handle
2499  * @pdev: Pdev handle
2500  *
2501  * Return: None
2502  */
2503 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev);
2504 
2505 /**
2506  * dp_rx_flow_send_fst_fw_setup() - Program FST parameters in FW/HW post-attach
2507  * @soc: SoC handle
2508  * @pdev: Pdev handle
2509  *
2510  * Return: Success when fst parameters are programmed in FW, error otherwise
2511  */
2512 QDF_STATUS dp_rx_flow_send_fst_fw_setup(struct dp_soc *soc,
2513 					struct dp_pdev *pdev);
2514 #else /* !((WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)) */
2515 
2516 /**
2517  * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters
2518  * @soc: SoC handle
2519  * @pdev: Pdev handle
2520  *
2521  * Return: Handle to flow search table entry
2522  */
2523 static inline
2524 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev)
2525 {
2526 	return QDF_STATUS_SUCCESS;
2527 }
2528 
2529 /**
2530  * dp_rx_fst_detach() - De-initialize Rx FST
2531  * @soc: SoC handle
2532  * @pdev: Pdev handle
2533  *
2534  * Return: None
2535  */
2536 static inline
2537 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev)
2538 {
2539 }
2540 #endif
2541 
2542 /**
2543  * dp_vdev_get_ref() - API to take a reference for VDEV object
2544  *
2545  * @soc		: core DP soc context
2546  * @vdev	: DP vdev
2547  * @mod_id	: module id
2548  *
2549  * Return:	QDF_STATUS_SUCCESS if reference held successfully
2550  *		else QDF_STATUS_E_INVAL
2551  */
2552 static inline
2553 QDF_STATUS dp_vdev_get_ref(struct dp_soc *soc, struct dp_vdev *vdev,
2554 			   enum dp_mod_id mod_id)
2555 {
2556 	if (!qdf_atomic_inc_not_zero(&vdev->ref_cnt))
2557 		return QDF_STATUS_E_INVAL;
2558 
2559 	qdf_atomic_inc(&vdev->mod_refs[mod_id]);
2560 
2561 	return QDF_STATUS_SUCCESS;
2562 }
2563 
2564 /**
2565  * dp_vdev_get_ref_by_id() - Returns vdev object given the vdev id
2566  * @soc: core DP soc context
2567  * @vdev_id: vdev id from vdev object can be retrieved
2568  * @mod_id: module id which is requesting the reference
2569  *
2570  * Return: struct dp_vdev*: Pointer to DP vdev object
2571  */
2572 static inline struct dp_vdev *
2573 dp_vdev_get_ref_by_id(struct dp_soc *soc, uint8_t vdev_id,
2574 		      enum dp_mod_id mod_id)
2575 {
2576 	struct dp_vdev *vdev = NULL;
2577 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
2578 		return NULL;
2579 
2580 	qdf_spin_lock_bh(&soc->vdev_map_lock);
2581 	vdev = soc->vdev_id_map[vdev_id];
2582 
2583 	if (!vdev || dp_vdev_get_ref(soc, vdev, mod_id) != QDF_STATUS_SUCCESS) {
2584 		qdf_spin_unlock_bh(&soc->vdev_map_lock);
2585 		return NULL;
2586 	}
2587 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
2588 
2589 	return vdev;
2590 }
2591 
2592 /**
2593  * dp_get_pdev_from_soc_pdev_id_wifi3() - Returns pdev object given the pdev id
2594  * @soc: core DP soc context
2595  * @pdev_id: pdev id from pdev object can be retrieved
2596  *
2597  * Return: struct dp_pdev*: Pointer to DP pdev object
2598  */
2599 static inline struct dp_pdev *
2600 dp_get_pdev_from_soc_pdev_id_wifi3(struct dp_soc *soc,
2601 				   uint8_t pdev_id)
2602 {
2603 	if (qdf_unlikely(pdev_id >= MAX_PDEV_CNT))
2604 		return NULL;
2605 
2606 	return soc->pdev_list[pdev_id];
2607 }
2608 
2609 /*
2610  * dp_rx_tid_update_wifi3() – Update receive TID state
2611  * @peer: Datapath peer handle
2612  * @tid: TID
2613  * @ba_window_size: BlockAck window size
2614  * @start_seq: Starting sequence number
2615  * @bar_update: BAR update triggered
2616  *
2617  * Return: QDF_STATUS code
2618  */
2619 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
2620 					 ba_window_size, uint32_t start_seq,
2621 					 bool bar_update);
2622 
2623 /**
2624  * dp_get_peer_mac_list(): function to get peer mac list of vdev
2625  * @soc: Datapath soc handle
2626  * @vdev_id: vdev id
2627  * @newmac: Table of the clients mac
2628  * @mac_cnt: No. of MACs required
2629  * @limit: Limit the number of clients
2630  *
2631  * return: no of clients
2632  */
2633 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
2634 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
2635 			      u_int16_t mac_cnt, bool limit);
2636 /*
2637  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
2638  * @soc:		DP SoC context
2639  * @max_mac_rings:	No of MAC rings
2640  *
2641  * Return: None
2642  */
2643 void dp_is_hw_dbs_enable(struct dp_soc *soc,
2644 				int *max_mac_rings);
2645 
2646 
2647 #if defined(WLAN_SUPPORT_RX_FISA)
2648 void dp_rx_dump_fisa_table(struct dp_soc *soc);
2649 
2650 /*
2651  * dp_rx_fst_update_cmem_params() - Update CMEM FST params
2652  * @soc:		DP SoC context
2653  * @num_entries:	Number of flow search entries
2654  * @cmem_ba_lo:		CMEM base address low
2655  * @cmem_ba_hi:		CMEM base address high
2656  *
2657  * Return: None
2658  */
2659 void dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries,
2660 				  uint32_t cmem_ba_lo, uint32_t cmem_ba_hi);
2661 
2662 void
2663 dp_rx_fst_update_pm_suspend_status(struct dp_soc *soc, bool suspended);
2664 #else
2665 static inline void
2666 dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries,
2667 			     uint32_t cmem_ba_lo, uint32_t cmem_ba_hi)
2668 {
2669 }
2670 
2671 static inline void
2672 dp_rx_fst_update_pm_suspend_status(struct dp_soc *soc, bool suspended)
2673 {
2674 }
2675 #endif /* WLAN_SUPPORT_RX_FISA */
2676 
2677 #ifdef MAX_ALLOC_PAGE_SIZE
2678 /**
2679  * dp_set_page_size() - Set the max page size for hw link desc.
2680  * For MCL the page size is set to OS defined value and for WIN
2681  * the page size is set to the max_alloc_size cfg ini
2682  * param.
2683  * This is to ensure that WIN gets contiguous memory allocations
2684  * as per requirement.
2685  * @pages: link desc page handle
2686  * @max_alloc_size: max_alloc_size
2687  *
2688  * Return: None
2689  */
2690 static inline
2691 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages,
2692 			  uint32_t max_alloc_size)
2693 {
2694 	pages->page_size = qdf_page_size;
2695 }
2696 
2697 #else
2698 static inline
2699 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages,
2700 			  uint32_t max_alloc_size)
2701 {
2702 	pages->page_size = max_alloc_size;
2703 }
2704 #endif /* MAX_ALLOC_PAGE_SIZE */
2705 
2706 /**
2707  * dp_history_get_next_index() - get the next entry to record an entry
2708  *				 in the history.
2709  * @curr_idx: Current index where the last entry is written.
2710  * @max_entries: Max number of entries in the history
2711  *
2712  * This function assumes that the max number os entries is a power of 2.
2713  *
2714  * Returns: The index where the next entry is to be written.
2715  */
2716 static inline uint32_t dp_history_get_next_index(qdf_atomic_t *curr_idx,
2717 						 uint32_t max_entries)
2718 {
2719 	uint32_t idx = qdf_atomic_inc_return(curr_idx);
2720 
2721 	return idx & (max_entries - 1);
2722 }
2723 
2724 /**
2725  * dp_rx_skip_tlvs() - Skip TLVs len + L2 hdr_offset, save in nbuf->cb
2726  * @nbuf: nbuf cb to be updated
2727  * @l2_hdr_offset: l2_hdr_offset
2728  *
2729  * Return: None
2730  */
2731 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding);
2732 
2733 #ifndef FEATURE_WDS
2734 static inline void
2735 dp_hmwds_ast_add_notify(struct dp_peer *peer,
2736 			uint8_t *mac_addr,
2737 			enum cdp_txrx_ast_entry_type type,
2738 			QDF_STATUS err,
2739 			bool is_peer_map)
2740 {
2741 }
2742 #endif
2743 
2744 #ifdef HTT_STATS_DEBUGFS_SUPPORT
2745 /* dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize
2746  * debugfs for HTT stats
2747  * @pdev: dp pdev handle
2748  *
2749  * Return: QDF_STATUS
2750  */
2751 QDF_STATUS dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev);
2752 
2753 /* dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for
2754  * HTT stats
2755  * @pdev: dp pdev handle
2756  *
2757  * Return: none
2758  */
2759 void dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev);
2760 #else
2761 
2762 /* dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize
2763  * debugfs for HTT stats
2764  * @pdev: dp pdev handle
2765  *
2766  * Return: QDF_STATUS
2767  */
2768 static inline QDF_STATUS
2769 dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev)
2770 {
2771 	return QDF_STATUS_SUCCESS;
2772 }
2773 
2774 /* dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for
2775  * HTT stats
2776  * @pdev: dp pdev handle
2777  *
2778  * Return: none
2779  */
2780 static inline void
2781 dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev)
2782 {
2783 }
2784 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
2785 
2786 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR
2787 /**
2788  * dp_soc_swlm_attach() - attach the software latency manager resources
2789  * @soc: Datapath global soc handle
2790  *
2791  * Returns: QDF_STATUS
2792  */
2793 static inline QDF_STATUS dp_soc_swlm_attach(struct dp_soc *soc)
2794 {
2795 	return QDF_STATUS_SUCCESS;
2796 }
2797 
2798 /**
2799  * dp_soc_swlm_detach() - detach the software latency manager resources
2800  * @soc: Datapath global soc handle
2801  *
2802  * Returns: QDF_STATUS
2803  */
2804 static inline QDF_STATUS dp_soc_swlm_detach(struct dp_soc *soc)
2805 {
2806 	return QDF_STATUS_SUCCESS;
2807 }
2808 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */
2809 
2810 #ifdef QCA_SUPPORT_WDS_EXTENDED
2811 /**
2812  * dp_wds_ext_get_peer_id(): function to get peer id by mac
2813  * This API is called from control path when wds extended
2814  * device is created, hence it also updates wds extended
2815  * peer state to up, which will be referred in rx processing.
2816  * @soc: Datapath soc handle
2817  * @vdev_id: vdev id
2818  * @mac: Peer mac address
2819  *
2820  * return: valid peer id on success
2821  *         HTT_INVALID_PEER on failure
2822  */
2823 uint16_t dp_wds_ext_get_peer_id(ol_txrx_soc_handle soc,
2824 				uint8_t vdev_id,
2825 				uint8_t *mac);
2826 
2827 /**
2828  * dp_wds_ext_set_peer_state(): function to set peer state
2829  * @soc: Datapath soc handle
2830  * @vdev_id: vdev id
2831  * @mac: Peer mac address
2832  * @rx: rx function pointer
2833  *
2834  * return: QDF_STATUS_SUCCESS on success
2835  *         QDF_STATUS_E_INVAL if peer is not found
2836  *         QDF_STATUS_E_ALREADY if rx is already set/unset
2837  */
2838 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
2839 				  uint8_t vdev_id,
2840 				  uint8_t *mac,
2841 				  ol_txrx_rx_fp rx,
2842 				  ol_osif_peer_handle osif_peer);
2843 #endif /* QCA_SUPPORT_WDS_EXTENDED */
2844 
2845 #ifdef DP_MEM_PRE_ALLOC
2846 
2847 /**
2848  * dp_context_alloc_mem() - allocate memory for DP context
2849  * @soc: datapath soc handle
2850  * @ctxt_type: DP context type
2851  * @ctxt_size: DP context size
2852  *
2853  * Return: DP context address
2854  */
2855 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2856 			   size_t ctxt_size);
2857 
2858 /**
2859  * dp_context_free_mem() - Free memory of DP context
2860  * @soc: datapath soc handle
2861  * @ctxt_type: DP context type
2862  * @vaddr: Address of context memory
2863  *
2864  * Return: None
2865  */
2866 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2867 			 void *vaddr);
2868 
2869 /**
2870  * dp_desc_multi_pages_mem_alloc() - alloc memory over multiple pages
2871  * @soc: datapath soc handle
2872  * @desc_type: memory request source type
2873  * @pages: multi page information storage
2874  * @element_size: each element size
2875  * @element_num: total number of elements should be allocated
2876  * @memctxt: memory context
2877  * @cacheable: coherent memory or cacheable memory
2878  *
2879  * This function is a wrapper for memory allocation over multiple
2880  * pages, if dp prealloc method is registered, then will try prealloc
2881  * firstly. if prealloc failed, fall back to regular way over
2882  * qdf_mem_multi_pages_alloc().
2883  *
2884  * Return: None
2885  */
2886 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
2887 				   enum dp_desc_type desc_type,
2888 				   struct qdf_mem_multi_page_t *pages,
2889 				   size_t element_size,
2890 				   uint16_t element_num,
2891 				   qdf_dma_context_t memctxt,
2892 				   bool cacheable);
2893 
2894 /**
2895  * dp_desc_multi_pages_mem_free() - free multiple pages memory
2896  * @soc: datapath soc handle
2897  * @desc_type: memory request source type
2898  * @pages: multi page information storage
2899  * @memctxt: memory context
2900  * @cacheable: coherent memory or cacheable memory
2901  *
2902  * This function is a wrapper for multiple pages memory free,
2903  * if memory is got from prealloc pool, put it back to pool.
2904  * otherwise free by qdf_mem_multi_pages_free().
2905  *
2906  * Return: None
2907  */
2908 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
2909 				  enum dp_desc_type desc_type,
2910 				  struct qdf_mem_multi_page_t *pages,
2911 				  qdf_dma_context_t memctxt,
2912 				  bool cacheable);
2913 
2914 #else
2915 static inline
2916 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2917 			   size_t ctxt_size)
2918 {
2919 	return qdf_mem_malloc(ctxt_size);
2920 }
2921 
2922 static inline
2923 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2924 			 void *vaddr)
2925 {
2926 	qdf_mem_free(vaddr);
2927 }
2928 
2929 static inline
2930 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
2931 				   enum dp_desc_type desc_type,
2932 				   struct qdf_mem_multi_page_t *pages,
2933 				   size_t element_size,
2934 				   uint16_t element_num,
2935 				   qdf_dma_context_t memctxt,
2936 				   bool cacheable)
2937 {
2938 	qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
2939 				  element_num, memctxt, cacheable);
2940 }
2941 
2942 static inline
2943 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
2944 				  enum dp_desc_type desc_type,
2945 				  struct qdf_mem_multi_page_t *pages,
2946 				  qdf_dma_context_t memctxt,
2947 				  bool cacheable)
2948 {
2949 	qdf_mem_multi_pages_free(soc->osdev, pages,
2950 				 memctxt, cacheable);
2951 }
2952 #endif
2953 
2954 #ifdef FEATURE_RUNTIME_PM
2955 /**
2956  * dp_runtime_get() - Get dp runtime refcount
2957  * @soc: Datapath soc handle
2958  *
2959  * Get dp runtime refcount by increment of an atomic variable, which can block
2960  * dp runtime resume to wait to flush pending tx by runtime suspend.
2961  *
2962  * Return: Current refcount
2963  */
2964 static inline int32_t dp_runtime_get(struct dp_soc *soc)
2965 {
2966 	return qdf_atomic_inc_return(&soc->dp_runtime_refcount);
2967 }
2968 
2969 /**
2970  * dp_runtime_put() - Return dp runtime refcount
2971  * @soc: Datapath soc handle
2972  *
2973  * Return dp runtime refcount by decrement of an atomic variable, allow dp
2974  * runtime resume finish.
2975  *
2976  * Return: Current refcount
2977  */
2978 static inline int32_t dp_runtime_put(struct dp_soc *soc)
2979 {
2980 	return qdf_atomic_dec_return(&soc->dp_runtime_refcount);
2981 }
2982 
2983 /**
2984  * dp_runtime_get_refcount() - Get dp runtime refcount
2985  * @soc: Datapath soc handle
2986  *
2987  * Get dp runtime refcount by returning an atomic variable
2988  *
2989  * Return: Current refcount
2990  */
2991 static inline int32_t dp_runtime_get_refcount(struct dp_soc *soc)
2992 {
2993 	return qdf_atomic_read(&soc->dp_runtime_refcount);
2994 }
2995 
2996 /**
2997  * dp_runtime_init() - Init dp runtime refcount when dp soc init
2998  * @soc: Datapath soc handle
2999  *
3000  * Return: QDF_STATUS
3001  */
3002 static inline QDF_STATUS dp_runtime_init(struct dp_soc *soc)
3003 {
3004 	return qdf_atomic_init(&soc->dp_runtime_refcount);
3005 }
3006 #else
3007 static inline int32_t dp_runtime_get(struct dp_soc *soc)
3008 {
3009 	return 0;
3010 }
3011 
3012 static inline int32_t dp_runtime_put(struct dp_soc *soc)
3013 {
3014 	return 0;
3015 }
3016 
3017 static inline QDF_STATUS dp_runtime_init(struct dp_soc *soc)
3018 {
3019 	return QDF_STATUS_SUCCESS;
3020 }
3021 #endif
3022 
3023 /*
3024  * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats
3025  *				processing
3026  * @pdev: Datapath PDEV handle
3027  *
3028  */
3029 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev);
3030 
3031 /*
3032  * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats
3033  *				processing
3034  * @pdev: Datapath PDEV handle
3035  *
3036  * Return: QDF_STATUS_SUCCESS: Success
3037  *         QDF_STATUS_E_NOMEM: Error
3038  */
3039 
3040 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev);
3041 
3042 /**
3043  * dp_peer_flush_frags() - Flush all fragments for a particular
3044  *  peer
3045  * @soc_hdl - data path soc handle
3046  * @vdev_id - vdev id
3047  * @peer_addr - peer mac address
3048  *
3049  * Return: None
3050  */
3051 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3052 			 uint8_t *peer_mac);
3053 #endif /* #ifndef _DP_INTERNAL_H_ */
3054