xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_internal.h (revision 45a38684b07295822dc8eba39e293408f203eec8)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_INTERNAL_H_
20 #define _DP_INTERNAL_H_
21 
22 #include "dp_types.h"
23 
24 #define RX_BUFFER_SIZE_PKTLOG_LITE 1024
25 #define DP_PEER_WDS_COUNT_INVALID UINT_MAX
26 
27 #define DP_RSSI_INVAL 0x80
28 #define DP_RSSI_AVG_WEIGHT 2
29 /*
30  * Formula to derive avg_rssi is taken from wifi2.o firmware
31  */
32 #define DP_GET_AVG_RSSI(avg_rssi, last_rssi) \
33 	(((avg_rssi) - (((uint8_t)(avg_rssi)) >> DP_RSSI_AVG_WEIGHT)) \
34 	+ ((((uint8_t)(last_rssi)) >> DP_RSSI_AVG_WEIGHT)))
35 
36 /* Macro For NYSM value received in VHT TLV */
37 #define VHT_SGI_NYSM 3
38 
39 /**
40  * Bitmap of HTT PPDU TLV types for Default mode
41  */
42 #define HTT_PPDU_DEFAULT_TLV_BITMAP \
43 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
44 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
45 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
46 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
47 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
48 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
49 
50 /* PPDU STATS CFG */
51 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
52 
53 /* PPDU stats mask sent to FW to enable enhanced stats */
54 #define DP_PPDU_STATS_CFG_ENH_STATS \
55 	(HTT_PPDU_DEFAULT_TLV_BITMAP) | \
56 	(1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \
57 	(1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \
58 	(1 << HTT_PPDU_STATS_USERS_INFO_TLV)
59 
60 /* PPDU stats mask sent to FW to support debug sniffer feature */
61 #define DP_PPDU_STATS_CFG_SNIFFER \
62 	(HTT_PPDU_DEFAULT_TLV_BITMAP) | \
63 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \
64 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV) | \
65 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
66 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \
67 	(1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \
68 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \
69 	(1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \
70 	(1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \
71 	(1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \
72 	(1 << HTT_PPDU_STATS_USERS_INFO_TLV)
73 
74 /* PPDU stats mask sent to FW to support BPR feature*/
75 #define DP_PPDU_STATS_CFG_BPR \
76 	(1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \
77 	(1 << HTT_PPDU_STATS_USERS_INFO_TLV)
78 
79 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
80 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
81 				   DP_PPDU_STATS_CFG_ENH_STATS)
82 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
83 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
84 				      DP_PPDU_TXLITE_STATS_BITMASK_CFG)
85 
86 /**
87  * Bitmap of HTT PPDU delayed ba TLV types for Default mode
88  */
89 #define HTT_PPDU_DELAYED_BA_TLV_BITMAP \
90 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
91 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
92 	(1 << HTT_PPDU_STATS_USR_RATE_TLV)
93 
94 /**
95  * Bitmap of HTT PPDU TLV types for Delayed BA
96  */
97 #define HTT_PPDU_STATUS_TLV_BITMAP \
98 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
99 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
100 
101 /**
102  * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 64
103  */
104 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64 \
105 	((1 << HTT_PPDU_STATS_COMMON_TLV) | \
106 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
107 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
108 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
109 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
110 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
111 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
112 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV))
113 
114 /**
115  * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 256
116  */
117 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256 \
118 	((1 << HTT_PPDU_STATS_COMMON_TLV) | \
119 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
120 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
121 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
122 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
123 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
124 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \
125 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV))
126 
127 #ifdef WLAN_TX_PKT_CAPTURE_ENH
128 extern uint8_t
129 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX];
130 #endif
131 
132 #define DP_MAX_TIMER_EXEC_TIME_TICKS \
133 		(QDF_LOG_TIMESTAMP_CYCLES_PER_10_US * 100 * 20)
134 
135 /**
136  * enum timer_yield_status - yield status code used in monitor mode timer.
137  * @DP_TIMER_NO_YIELD: do not yield
138  * @DP_TIMER_WORK_DONE: yield because work is done
139  * @DP_TIMER_WORK_EXHAUST: yield because work quota is exhausted
140  * @DP_TIMER_TIME_EXHAUST: yield due to time slot exhausted
141  */
142 enum timer_yield_status {
143 	DP_TIMER_NO_YIELD,
144 	DP_TIMER_WORK_DONE,
145 	DP_TIMER_WORK_EXHAUST,
146 	DP_TIMER_TIME_EXHAUST,
147 };
148 
149 #if DP_PRINT_ENABLE
150 #include <stdarg.h>       /* va_list */
151 #include <qdf_types.h> /* qdf_vprint */
152 #include <cdp_txrx_handle.h>
153 
154 enum {
155 	/* FATAL_ERR - print only irrecoverable error messages */
156 	DP_PRINT_LEVEL_FATAL_ERR,
157 
158 	/* ERR - include non-fatal err messages */
159 	DP_PRINT_LEVEL_ERR,
160 
161 	/* WARN - include warnings */
162 	DP_PRINT_LEVEL_WARN,
163 
164 	/* INFO1 - include fundamental, infrequent events */
165 	DP_PRINT_LEVEL_INFO1,
166 
167 	/* INFO2 - include non-fundamental but infrequent events */
168 	DP_PRINT_LEVEL_INFO2,
169 };
170 
171 #define dp_print(level, fmt, ...) do { \
172 	if (level <= g_txrx_print_level) \
173 		qdf_print(fmt, ## __VA_ARGS__); \
174 while (0)
175 #define DP_PRINT(level, fmt, ...) do { \
176 	dp_print(level, "DP: " fmt, ## __VA_ARGS__); \
177 while (0)
178 #else
179 #define DP_PRINT(level, fmt, ...)
180 #endif /* DP_PRINT_ENABLE */
181 
182 #define DP_TRACE(LVL, fmt, args ...)                             \
183 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL,       \
184 		fmt, ## args)
185 
186 #ifdef DP_PRINT_NO_CONSOLE
187 /* Stat prints should not go to console or kernel logs.*/
188 #define DP_PRINT_STATS(fmt, args ...)\
189 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,       \
190 		  fmt, ## args)
191 #else
192 #define DP_PRINT_STATS(fmt, args ...)\
193 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,\
194 		  fmt, ## args)
195 #endif
196 #define DP_STATS_INIT(_handle) \
197 	qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats))
198 
199 #define DP_STATS_CLR(_handle) \
200 	qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats))
201 
202 #ifndef DISABLE_DP_STATS
203 #define DP_STATS_INC(_handle, _field, _delta) \
204 { \
205 	if (likely(_handle)) \
206 		_handle->stats._field += _delta; \
207 }
208 
209 #define DP_STATS_INCC(_handle, _field, _delta, _cond) \
210 { \
211 	if (_cond && likely(_handle)) \
212 		_handle->stats._field += _delta; \
213 }
214 
215 #define DP_STATS_DEC(_handle, _field, _delta) \
216 { \
217 	if (likely(_handle)) \
218 		_handle->stats._field -= _delta; \
219 }
220 
221 #define DP_STATS_UPD(_handle, _field, _delta) \
222 { \
223 	if (likely(_handle)) \
224 		_handle->stats._field = _delta; \
225 }
226 
227 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) \
228 { \
229 	DP_STATS_INC(_handle, _field.num, _count); \
230 	DP_STATS_INC(_handle, _field.bytes, _bytes) \
231 }
232 
233 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \
234 { \
235 	DP_STATS_INCC(_handle, _field.num, _count, _cond); \
236 	DP_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \
237 }
238 
239 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) \
240 { \
241 	_handle_a->stats._field += _handle_b->stats._field; \
242 }
243 
244 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) \
245 { \
246 	DP_STATS_AGGR(_handle_a, _handle_b, _field.num); \
247 	DP_STATS_AGGR(_handle_a, _handle_b, _field.bytes);\
248 }
249 
250 #define DP_STATS_UPD_STRUCT(_handle_a, _handle_b, _field) \
251 { \
252 	_handle_a->stats._field = _handle_b->stats._field; \
253 }
254 
255 #else
256 #define DP_STATS_INC(_handle, _field, _delta)
257 #define DP_STATS_INCC(_handle, _field, _delta, _cond)
258 #define DP_STATS_DEC(_handle, _field, _delta)
259 #define DP_STATS_UPD(_handle, _field, _delta)
260 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes)
261 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond)
262 #define DP_STATS_AGGR(_handle_a, _handle_b, _field)
263 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field)
264 #endif
265 
266 #ifdef ENABLE_DP_HIST_STATS
267 #define DP_HIST_INIT() \
268 	uint32_t num_of_packets[MAX_PDEV_CNT] = {0};
269 
270 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) \
271 { \
272 		++num_of_packets[_pdev_id]; \
273 }
274 
275 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \
276 	do {                                                              \
277 		if (_p_cntrs == 1) {                                      \
278 			DP_STATS_INC(_pdev,                               \
279 				tx_comp_histogram.pkts_1, 1);             \
280 		} else if (_p_cntrs > 1 && _p_cntrs <= 20) {              \
281 			DP_STATS_INC(_pdev,                               \
282 				tx_comp_histogram.pkts_2_20, 1);          \
283 		} else if (_p_cntrs > 20 && _p_cntrs <= 40) {             \
284 			DP_STATS_INC(_pdev,                               \
285 				tx_comp_histogram.pkts_21_40, 1);         \
286 		} else if (_p_cntrs > 40 && _p_cntrs <= 60) {             \
287 			DP_STATS_INC(_pdev,                               \
288 				tx_comp_histogram.pkts_41_60, 1);         \
289 		} else if (_p_cntrs > 60 && _p_cntrs <= 80) {             \
290 			DP_STATS_INC(_pdev,                               \
291 				tx_comp_histogram.pkts_61_80, 1);         \
292 		} else if (_p_cntrs > 80 && _p_cntrs <= 100) {            \
293 			DP_STATS_INC(_pdev,                               \
294 				tx_comp_histogram.pkts_81_100, 1);        \
295 		} else if (_p_cntrs > 100 && _p_cntrs <= 200) {           \
296 			DP_STATS_INC(_pdev,                               \
297 				tx_comp_histogram.pkts_101_200, 1);       \
298 		} else if (_p_cntrs > 200) {                              \
299 			DP_STATS_INC(_pdev,                               \
300 				tx_comp_histogram.pkts_201_plus, 1);      \
301 		}                                                         \
302 	} while (0)
303 
304 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \
305 	do {                                                              \
306 		if (_p_cntrs == 1) {                                      \
307 			DP_STATS_INC(_pdev,                               \
308 				rx_ind_histogram.pkts_1, 1);              \
309 		} else if (_p_cntrs > 1 && _p_cntrs <= 20) {              \
310 			DP_STATS_INC(_pdev,                               \
311 				rx_ind_histogram.pkts_2_20, 1);           \
312 		} else if (_p_cntrs > 20 && _p_cntrs <= 40) {             \
313 			DP_STATS_INC(_pdev,                               \
314 				rx_ind_histogram.pkts_21_40, 1);          \
315 		} else if (_p_cntrs > 40 && _p_cntrs <= 60) {             \
316 			DP_STATS_INC(_pdev,                               \
317 				rx_ind_histogram.pkts_41_60, 1);          \
318 		} else if (_p_cntrs > 60 && _p_cntrs <= 80) {             \
319 			DP_STATS_INC(_pdev,                               \
320 				rx_ind_histogram.pkts_61_80, 1);          \
321 		} else if (_p_cntrs > 80 && _p_cntrs <= 100) {            \
322 			DP_STATS_INC(_pdev,                               \
323 				rx_ind_histogram.pkts_81_100, 1);         \
324 		} else if (_p_cntrs > 100 && _p_cntrs <= 200) {           \
325 			DP_STATS_INC(_pdev,                               \
326 				rx_ind_histogram.pkts_101_200, 1);        \
327 		} else if (_p_cntrs > 200) {                              \
328 			DP_STATS_INC(_pdev,                               \
329 				rx_ind_histogram.pkts_201_plus, 1);       \
330 		}                                                         \
331 	} while (0)
332 
333 #define DP_TX_HIST_STATS_PER_PDEV() \
334 	do { \
335 		uint8_t hist_stats = 0; \
336 		for (hist_stats = 0; hist_stats < soc->pdev_count; \
337 				hist_stats++) { \
338 			DP_TX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \
339 					num_of_packets[hist_stats]); \
340 		} \
341 	}  while (0)
342 
343 
344 #define DP_RX_HIST_STATS_PER_PDEV() \
345 	do { \
346 		uint8_t hist_stats = 0; \
347 		for (hist_stats = 0; hist_stats < soc->pdev_count; \
348 				hist_stats++) { \
349 			DP_RX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \
350 					num_of_packets[hist_stats]); \
351 		} \
352 	}  while (0)
353 
354 #else
355 #define DP_HIST_INIT()
356 #define DP_HIST_PACKET_COUNT_INC(_pdev_id)
357 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs)
358 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs)
359 #define DP_RX_HIST_STATS_PER_PDEV()
360 #define DP_TX_HIST_STATS_PER_PDEV()
361 #endif /* DISABLE_DP_STATS */
362 
363 #ifdef QCA_SUPPORT_PEER_ISOLATION
364 #define dp_get_peer_isolation(_peer) ((_peer)->isolation)
365 
366 static inline void dp_set_peer_isolation(struct dp_peer *peer, bool val)
367 {
368 	peer->isolation = val;
369 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
370 		  "peer:%pM isolation:%d",
371 		  peer->mac_addr.raw, peer->isolation);
372 }
373 
374 #else
375 #define dp_get_peer_isolation(_peer) (0)
376 
377 static inline void dp_set_peer_isolation(struct dp_peer *peer, bool val)
378 {
379 }
380 #endif /* QCA_SUPPORT_PEER_ISOLATION */
381 
382 /**
383  * The lmac ID for a particular channel band is fixed.
384  * 2.4GHz band uses lmac_id = 1
385  * 5GHz/6GHz band uses lmac_id=0
386  */
387 #define DP_MON_INVALID_LMAC_ID	(-1)
388 #define DP_MON_2G_LMAC_ID	1
389 #define DP_MON_5G_LMAC_ID	0
390 #define DP_MON_6G_LMAC_ID	0
391 
392 #ifdef FEATURE_TSO_STATS
393 /**
394  * dp_init_tso_stats() - Clear tso stats
395  * @pdev: pdev handle
396  *
397  * Return: None
398  */
399 static inline
400 void dp_init_tso_stats(struct dp_pdev *pdev)
401 {
402 	if (pdev) {
403 		qdf_mem_zero(&((pdev)->stats.tso_stats),
404 			     sizeof((pdev)->stats.tso_stats));
405 		qdf_atomic_init(&pdev->tso_idx);
406 	}
407 }
408 
409 /**
410  * dp_stats_tso_segment_histogram_update() - TSO Segment Histogram
411  * @pdev: pdev handle
412  * @_p_cntrs: number of tso segments for a tso packet
413  *
414  * Return: None
415  */
416 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev,
417 					   uint8_t _p_cntrs);
418 
419 /**
420  * dp_tso_segment_update() - Collect tso segment information
421  * @pdev: pdev handle
422  * @stats_idx: tso packet number
423  * @idx: tso segment number
424  * @seg: tso segment
425  *
426  * Return: None
427  */
428 void dp_tso_segment_update(struct dp_pdev *pdev,
429 			   uint32_t stats_idx,
430 			   uint8_t idx,
431 			   struct qdf_tso_seg_t seg);
432 
433 /**
434  * dp_tso_packet_update() - TSO Packet information
435  * @pdev: pdev handle
436  * @stats_idx: tso packet number
437  * @msdu: nbuf handle
438  * @num_segs: tso segments
439  *
440  * Return: None
441  */
442 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx,
443 			  qdf_nbuf_t msdu, uint16_t num_segs);
444 
445 /**
446  * dp_tso_segment_stats_update() - TSO Segment stats
447  * @pdev: pdev handle
448  * @stats_seg: tso segment list
449  * @stats_idx: tso packet number
450  *
451  * Return: None
452  */
453 void dp_tso_segment_stats_update(struct dp_pdev *pdev,
454 				 struct qdf_tso_seg_elem_t *stats_seg,
455 				 uint32_t stats_idx);
456 
457 /**
458  * dp_print_tso_stats() - dump tso statistics
459  * @soc:soc handle
460  * @level: verbosity level
461  *
462  * Return: None
463  */
464 void dp_print_tso_stats(struct dp_soc *soc,
465 			enum qdf_stats_verbosity_level level);
466 
467 /**
468  * dp_txrx_clear_tso_stats() - clear tso stats
469  * @soc: soc handle
470  *
471  * Return: None
472  */
473 void dp_txrx_clear_tso_stats(struct dp_soc *soc);
474 #else
475 static inline
476 void dp_init_tso_stats(struct dp_pdev *pdev)
477 {
478 }
479 
480 static inline
481 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev,
482 					   uint8_t _p_cntrs)
483 {
484 }
485 
486 static inline
487 void dp_tso_segment_update(struct dp_pdev *pdev,
488 			   uint32_t stats_idx,
489 			   uint32_t idx,
490 			   struct qdf_tso_seg_t seg)
491 {
492 }
493 
494 static inline
495 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx,
496 			  qdf_nbuf_t msdu, uint16_t num_segs)
497 {
498 }
499 
500 static inline
501 void dp_tso_segment_stats_update(struct dp_pdev *pdev,
502 				 struct qdf_tso_seg_elem_t *stats_seg,
503 				 uint32_t stats_idx)
504 {
505 }
506 
507 static inline
508 void dp_print_tso_stats(struct dp_soc *soc,
509 			enum qdf_stats_verbosity_level level)
510 {
511 }
512 
513 static inline
514 void dp_txrx_clear_tso_stats(struct dp_soc *soc)
515 {
516 }
517 #endif /* FEATURE_TSO_STATS */
518 
519 #define DP_HTT_T2H_HP_PIPE 5
520 static inline void dp_update_pdev_stats(struct dp_pdev *tgtobj,
521 					struct cdp_vdev_stats *srcobj)
522 {
523 	uint8_t i;
524 	uint8_t pream_type;
525 
526 	for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) {
527 		for (i = 0; i < MAX_MCS; i++) {
528 			tgtobj->stats.tx.pkt_type[pream_type].
529 				mcs_count[i] +=
530 			srcobj->tx.pkt_type[pream_type].
531 				mcs_count[i];
532 			tgtobj->stats.rx.pkt_type[pream_type].
533 				mcs_count[i] +=
534 			srcobj->rx.pkt_type[pream_type].
535 				mcs_count[i];
536 		}
537 	}
538 
539 	for (i = 0; i < MAX_BW; i++) {
540 		tgtobj->stats.tx.bw[i] += srcobj->tx.bw[i];
541 		tgtobj->stats.rx.bw[i] += srcobj->rx.bw[i];
542 	}
543 
544 	for (i = 0; i < SS_COUNT; i++) {
545 		tgtobj->stats.tx.nss[i] += srcobj->tx.nss[i];
546 		tgtobj->stats.rx.nss[i] += srcobj->rx.nss[i];
547 	}
548 
549 	for (i = 0; i < WME_AC_MAX; i++) {
550 		tgtobj->stats.tx.wme_ac_type[i] +=
551 			srcobj->tx.wme_ac_type[i];
552 		tgtobj->stats.rx.wme_ac_type[i] +=
553 			srcobj->rx.wme_ac_type[i];
554 		tgtobj->stats.tx.excess_retries_per_ac[i] +=
555 			srcobj->tx.excess_retries_per_ac[i];
556 	}
557 
558 	for (i = 0; i < MAX_GI; i++) {
559 		tgtobj->stats.tx.sgi_count[i] +=
560 			srcobj->tx.sgi_count[i];
561 		tgtobj->stats.rx.sgi_count[i] +=
562 			srcobj->rx.sgi_count[i];
563 	}
564 
565 	for (i = 0; i < MAX_RECEPTION_TYPES; i++)
566 		tgtobj->stats.rx.reception_type[i] +=
567 			srcobj->rx.reception_type[i];
568 
569 	tgtobj->stats.tx.comp_pkt.bytes += srcobj->tx.comp_pkt.bytes;
570 	tgtobj->stats.tx.comp_pkt.num += srcobj->tx.comp_pkt.num;
571 	tgtobj->stats.tx.ucast.num += srcobj->tx.ucast.num;
572 	tgtobj->stats.tx.ucast.bytes += srcobj->tx.ucast.bytes;
573 	tgtobj->stats.tx.mcast.num += srcobj->tx.mcast.num;
574 	tgtobj->stats.tx.mcast.bytes += srcobj->tx.mcast.bytes;
575 	tgtobj->stats.tx.bcast.num += srcobj->tx.bcast.num;
576 	tgtobj->stats.tx.bcast.bytes += srcobj->tx.bcast.bytes;
577 	tgtobj->stats.tx.tx_success.num += srcobj->tx.tx_success.num;
578 	tgtobj->stats.tx.tx_success.bytes +=
579 		srcobj->tx.tx_success.bytes;
580 	tgtobj->stats.tx.nawds_mcast.num +=
581 		srcobj->tx.nawds_mcast.num;
582 	tgtobj->stats.tx.nawds_mcast.bytes +=
583 		srcobj->tx.nawds_mcast.bytes;
584 	tgtobj->stats.tx.nawds_mcast_drop +=
585 		srcobj->tx.nawds_mcast_drop;
586 	tgtobj->stats.tx.num_ppdu_cookie_valid +=
587 		srcobj->tx.num_ppdu_cookie_valid;
588 	tgtobj->stats.tx.tx_failed += srcobj->tx.tx_failed;
589 	tgtobj->stats.tx.ofdma += srcobj->tx.ofdma;
590 	tgtobj->stats.tx.stbc += srcobj->tx.stbc;
591 	tgtobj->stats.tx.ldpc += srcobj->tx.ldpc;
592 	tgtobj->stats.tx.retries += srcobj->tx.retries;
593 	tgtobj->stats.tx.non_amsdu_cnt += srcobj->tx.non_amsdu_cnt;
594 	tgtobj->stats.tx.amsdu_cnt += srcobj->tx.amsdu_cnt;
595 	tgtobj->stats.tx.non_ampdu_cnt += srcobj->tx.non_ampdu_cnt;
596 	tgtobj->stats.tx.ampdu_cnt += srcobj->tx.ampdu_cnt;
597 	tgtobj->stats.tx.dropped.fw_rem.num += srcobj->tx.dropped.fw_rem.num;
598 	tgtobj->stats.tx.dropped.fw_rem.bytes +=
599 			srcobj->tx.dropped.fw_rem.bytes;
600 	tgtobj->stats.tx.dropped.fw_rem_tx +=
601 			srcobj->tx.dropped.fw_rem_tx;
602 	tgtobj->stats.tx.dropped.fw_rem_notx +=
603 			srcobj->tx.dropped.fw_rem_notx;
604 	tgtobj->stats.tx.dropped.fw_reason1 +=
605 			srcobj->tx.dropped.fw_reason1;
606 	tgtobj->stats.tx.dropped.fw_reason2 +=
607 			srcobj->tx.dropped.fw_reason2;
608 	tgtobj->stats.tx.dropped.fw_reason3 +=
609 			srcobj->tx.dropped.fw_reason3;
610 	tgtobj->stats.tx.dropped.age_out += srcobj->tx.dropped.age_out;
611 	tgtobj->stats.rx.err.mic_err += srcobj->rx.err.mic_err;
612 	if (srcobj->rx.rssi != 0)
613 		tgtobj->stats.rx.rssi = srcobj->rx.rssi;
614 	tgtobj->stats.rx.rx_rate = srcobj->rx.rx_rate;
615 	tgtobj->stats.rx.err.decrypt_err += srcobj->rx.err.decrypt_err;
616 	tgtobj->stats.rx.non_ampdu_cnt += srcobj->rx.non_ampdu_cnt;
617 	tgtobj->stats.rx.amsdu_cnt += srcobj->rx.ampdu_cnt;
618 	tgtobj->stats.rx.non_amsdu_cnt += srcobj->rx.non_amsdu_cnt;
619 	tgtobj->stats.rx.amsdu_cnt += srcobj->rx.amsdu_cnt;
620 	tgtobj->stats.rx.nawds_mcast_drop += srcobj->rx.nawds_mcast_drop;
621 	tgtobj->stats.rx.to_stack.num += srcobj->rx.to_stack.num;
622 	tgtobj->stats.rx.to_stack.bytes += srcobj->rx.to_stack.bytes;
623 
624 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
625 		tgtobj->stats.rx.rcvd_reo[i].num +=
626 			srcobj->rx.rcvd_reo[i].num;
627 		tgtobj->stats.rx.rcvd_reo[i].bytes +=
628 			srcobj->rx.rcvd_reo[i].bytes;
629 	}
630 
631 	srcobj->rx.unicast.num =
632 		srcobj->rx.to_stack.num -
633 				(srcobj->rx.multicast.num);
634 	srcobj->rx.unicast.bytes =
635 		srcobj->rx.to_stack.bytes -
636 				(srcobj->rx.multicast.bytes);
637 
638 	tgtobj->stats.rx.unicast.num += srcobj->rx.unicast.num;
639 	tgtobj->stats.rx.unicast.bytes += srcobj->rx.unicast.bytes;
640 	tgtobj->stats.rx.multicast.num += srcobj->rx.multicast.num;
641 	tgtobj->stats.rx.multicast.bytes += srcobj->rx.multicast.bytes;
642 	tgtobj->stats.rx.bcast.num += srcobj->rx.bcast.num;
643 	tgtobj->stats.rx.bcast.bytes += srcobj->rx.bcast.bytes;
644 	tgtobj->stats.rx.raw.num += srcobj->rx.raw.num;
645 	tgtobj->stats.rx.raw.bytes += srcobj->rx.raw.bytes;
646 	tgtobj->stats.rx.intra_bss.pkts.num +=
647 			srcobj->rx.intra_bss.pkts.num;
648 	tgtobj->stats.rx.intra_bss.pkts.bytes +=
649 			srcobj->rx.intra_bss.pkts.bytes;
650 	tgtobj->stats.rx.intra_bss.fail.num +=
651 			srcobj->rx.intra_bss.fail.num;
652 	tgtobj->stats.rx.intra_bss.fail.bytes +=
653 			srcobj->rx.intra_bss.fail.bytes;
654 
655 	tgtobj->stats.tx.last_ack_rssi =
656 		srcobj->tx.last_ack_rssi;
657 	tgtobj->stats.rx.mec_drop.num += srcobj->rx.mec_drop.num;
658 	tgtobj->stats.rx.mec_drop.bytes += srcobj->rx.mec_drop.bytes;
659 	tgtobj->stats.rx.multipass_rx_pkt_drop +=
660 		srcobj->rx.multipass_rx_pkt_drop;
661 }
662 
663 static inline void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj,
664 						struct dp_vdev *srcobj)
665 {
666 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.nawds_mcast);
667 
668 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.rcvd);
669 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.processed);
670 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.reinject_pkts);
671 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.inspect_pkts);
672 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.raw.raw_pkt);
673 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.raw.dma_map_error);
674 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.sg.dropped_host.num);
675 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.sg.dropped_target);
676 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.sg.sg_pkt);
677 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.mcast_en.mcast_pkt);
678 	DP_STATS_AGGR(tgtobj, srcobj,
679 		      tx_i.mcast_en.dropped_map_error);
680 	DP_STATS_AGGR(tgtobj, srcobj,
681 		      tx_i.mcast_en.dropped_self_mac);
682 	DP_STATS_AGGR(tgtobj, srcobj,
683 		      tx_i.mcast_en.dropped_send_fail);
684 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.mcast_en.ucast);
685 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.dma_error);
686 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.ring_full);
687 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.enqueue_fail);
688 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.desc_na.num);
689 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.res_full);
690 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.headroom_insufficient);
691 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.cce_classified);
692 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.cce_classified_raw);
693 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.sniffer_rcvd);
694 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.mesh.exception_fw);
695 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.mesh.completion_fw);
696 
697 	tgtobj->stats.tx_i.dropped.dropped_pkt.num =
698 		tgtobj->stats.tx_i.dropped.dma_error +
699 		tgtobj->stats.tx_i.dropped.ring_full +
700 		tgtobj->stats.tx_i.dropped.enqueue_fail +
701 		tgtobj->stats.tx_i.dropped.desc_na.num +
702 		tgtobj->stats.tx_i.dropped.res_full;
703 
704 }
705 
706 static inline void dp_update_vdev_stats(struct cdp_vdev_stats *tgtobj,
707 					struct dp_peer *srcobj)
708 {
709 	uint8_t i;
710 	uint8_t pream_type;
711 
712 	for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) {
713 		for (i = 0; i < MAX_MCS; i++) {
714 			tgtobj->tx.pkt_type[pream_type].
715 				mcs_count[i] +=
716 			srcobj->stats.tx.pkt_type[pream_type].
717 				mcs_count[i];
718 			tgtobj->rx.pkt_type[pream_type].
719 				mcs_count[i] +=
720 			srcobj->stats.rx.pkt_type[pream_type].
721 				mcs_count[i];
722 		}
723 	}
724 
725 	for (i = 0; i < MAX_BW; i++) {
726 		tgtobj->tx.bw[i] += srcobj->stats.tx.bw[i];
727 		tgtobj->rx.bw[i] += srcobj->stats.rx.bw[i];
728 	}
729 
730 	for (i = 0; i < SS_COUNT; i++) {
731 		tgtobj->tx.nss[i] += srcobj->stats.tx.nss[i];
732 		tgtobj->rx.nss[i] += srcobj->stats.rx.nss[i];
733 	}
734 
735 	for (i = 0; i < WME_AC_MAX; i++) {
736 		tgtobj->tx.wme_ac_type[i] +=
737 			srcobj->stats.tx.wme_ac_type[i];
738 		tgtobj->rx.wme_ac_type[i] +=
739 			srcobj->stats.rx.wme_ac_type[i];
740 		tgtobj->tx.excess_retries_per_ac[i] +=
741 			srcobj->stats.tx.excess_retries_per_ac[i];
742 	}
743 
744 	for (i = 0; i < MAX_GI; i++) {
745 		tgtobj->tx.sgi_count[i] +=
746 			srcobj->stats.tx.sgi_count[i];
747 		tgtobj->rx.sgi_count[i] +=
748 			srcobj->stats.rx.sgi_count[i];
749 	}
750 
751 	for (i = 0; i < MAX_RECEPTION_TYPES; i++)
752 		tgtobj->rx.reception_type[i] +=
753 			srcobj->stats.rx.reception_type[i];
754 
755 	tgtobj->tx.comp_pkt.bytes += srcobj->stats.tx.comp_pkt.bytes;
756 	tgtobj->tx.comp_pkt.num += srcobj->stats.tx.comp_pkt.num;
757 	tgtobj->tx.ucast.num += srcobj->stats.tx.ucast.num;
758 	tgtobj->tx.ucast.bytes += srcobj->stats.tx.ucast.bytes;
759 	tgtobj->tx.mcast.num += srcobj->stats.tx.mcast.num;
760 	tgtobj->tx.mcast.bytes += srcobj->stats.tx.mcast.bytes;
761 	tgtobj->tx.bcast.num += srcobj->stats.tx.bcast.num;
762 	tgtobj->tx.bcast.bytes += srcobj->stats.tx.bcast.bytes;
763 	tgtobj->tx.tx_success.num += srcobj->stats.tx.tx_success.num;
764 	tgtobj->tx.tx_success.bytes +=
765 		srcobj->stats.tx.tx_success.bytes;
766 	tgtobj->tx.nawds_mcast.num +=
767 		srcobj->stats.tx.nawds_mcast.num;
768 	tgtobj->tx.nawds_mcast.bytes +=
769 		srcobj->stats.tx.nawds_mcast.bytes;
770 	tgtobj->tx.nawds_mcast_drop +=
771 		srcobj->stats.tx.nawds_mcast_drop;
772 	tgtobj->tx.num_ppdu_cookie_valid +=
773 		srcobj->stats.tx.num_ppdu_cookie_valid;
774 	tgtobj->tx.tx_failed += srcobj->stats.tx.tx_failed;
775 	tgtobj->tx.ofdma += srcobj->stats.tx.ofdma;
776 	tgtobj->tx.stbc += srcobj->stats.tx.stbc;
777 	tgtobj->tx.ldpc += srcobj->stats.tx.ldpc;
778 	tgtobj->tx.retries += srcobj->stats.tx.retries;
779 	tgtobj->tx.non_amsdu_cnt += srcobj->stats.tx.non_amsdu_cnt;
780 	tgtobj->tx.amsdu_cnt += srcobj->stats.tx.amsdu_cnt;
781 	tgtobj->tx.non_ampdu_cnt += srcobj->stats.tx.non_ampdu_cnt;
782 	tgtobj->tx.ampdu_cnt += srcobj->stats.tx.ampdu_cnt;
783 	tgtobj->tx.dropped.fw_rem.num += srcobj->stats.tx.dropped.fw_rem.num;
784 	tgtobj->tx.dropped.fw_rem.bytes +=
785 			srcobj->stats.tx.dropped.fw_rem.bytes;
786 	tgtobj->tx.dropped.fw_rem_tx +=
787 			srcobj->stats.tx.dropped.fw_rem_tx;
788 	tgtobj->tx.dropped.fw_rem_notx +=
789 			srcobj->stats.tx.dropped.fw_rem_notx;
790 	tgtobj->tx.dropped.fw_reason1 +=
791 			srcobj->stats.tx.dropped.fw_reason1;
792 	tgtobj->tx.dropped.fw_reason2 +=
793 			srcobj->stats.tx.dropped.fw_reason2;
794 	tgtobj->tx.dropped.fw_reason3 +=
795 			srcobj->stats.tx.dropped.fw_reason3;
796 	tgtobj->tx.dropped.age_out += srcobj->stats.tx.dropped.age_out;
797 	tgtobj->rx.err.mic_err += srcobj->stats.rx.err.mic_err;
798 	if (srcobj->stats.rx.rssi != 0)
799 		tgtobj->rx.rssi = srcobj->stats.rx.rssi;
800 	tgtobj->rx.rx_rate = srcobj->stats.rx.rx_rate;
801 	tgtobj->rx.err.decrypt_err += srcobj->stats.rx.err.decrypt_err;
802 	tgtobj->rx.non_ampdu_cnt += srcobj->stats.rx.non_ampdu_cnt;
803 	tgtobj->rx.amsdu_cnt += srcobj->stats.rx.ampdu_cnt;
804 	tgtobj->rx.non_amsdu_cnt += srcobj->stats.rx.non_amsdu_cnt;
805 	tgtobj->rx.amsdu_cnt += srcobj->stats.rx.amsdu_cnt;
806 	tgtobj->rx.nawds_mcast_drop += srcobj->stats.rx.nawds_mcast_drop;
807 	tgtobj->rx.to_stack.num += srcobj->stats.rx.to_stack.num;
808 	tgtobj->rx.to_stack.bytes += srcobj->stats.rx.to_stack.bytes;
809 
810 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
811 		tgtobj->rx.rcvd_reo[i].num +=
812 			srcobj->stats.rx.rcvd_reo[i].num;
813 		tgtobj->rx.rcvd_reo[i].bytes +=
814 			srcobj->stats.rx.rcvd_reo[i].bytes;
815 	}
816 
817 	srcobj->stats.rx.unicast.num =
818 		srcobj->stats.rx.to_stack.num -
819 				srcobj->stats.rx.multicast.num;
820 	srcobj->stats.rx.unicast.bytes =
821 		srcobj->stats.rx.to_stack.bytes -
822 				srcobj->stats.rx.multicast.bytes;
823 
824 	tgtobj->rx.unicast.num += srcobj->stats.rx.unicast.num;
825 	tgtobj->rx.unicast.bytes += srcobj->stats.rx.unicast.bytes;
826 	tgtobj->rx.multicast.num += srcobj->stats.rx.multicast.num;
827 	tgtobj->rx.multicast.bytes += srcobj->stats.rx.multicast.bytes;
828 	tgtobj->rx.bcast.num += srcobj->stats.rx.bcast.num;
829 	tgtobj->rx.bcast.bytes += srcobj->stats.rx.bcast.bytes;
830 	tgtobj->rx.raw.num += srcobj->stats.rx.raw.num;
831 	tgtobj->rx.raw.bytes += srcobj->stats.rx.raw.bytes;
832 	tgtobj->rx.intra_bss.pkts.num +=
833 			srcobj->stats.rx.intra_bss.pkts.num;
834 	tgtobj->rx.intra_bss.pkts.bytes +=
835 			srcobj->stats.rx.intra_bss.pkts.bytes;
836 	tgtobj->rx.intra_bss.fail.num +=
837 			srcobj->stats.rx.intra_bss.fail.num;
838 	tgtobj->rx.intra_bss.fail.bytes +=
839 			srcobj->stats.rx.intra_bss.fail.bytes;
840 	tgtobj->tx.last_ack_rssi =
841 		srcobj->stats.tx.last_ack_rssi;
842 	tgtobj->rx.mec_drop.num += srcobj->stats.rx.mec_drop.num;
843 	tgtobj->rx.mec_drop.bytes += srcobj->stats.rx.mec_drop.bytes;
844 	tgtobj->rx.multipass_rx_pkt_drop +=
845 		srcobj->stats.rx.multipass_rx_pkt_drop;
846 }
847 
848 #define DP_UPDATE_STATS(_tgtobj, _srcobj)	\
849 	do {				\
850 		uint8_t i;		\
851 		uint8_t pream_type;	\
852 		for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \
853 			for (i = 0; i < MAX_MCS; i++) { \
854 				DP_STATS_AGGR(_tgtobj, _srcobj, \
855 					tx.pkt_type[pream_type].mcs_count[i]); \
856 				DP_STATS_AGGR(_tgtobj, _srcobj, \
857 					rx.pkt_type[pream_type].mcs_count[i]); \
858 			} \
859 		} \
860 		  \
861 		for (i = 0; i < MAX_BW; i++) { \
862 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.bw[i]); \
863 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.bw[i]); \
864 		} \
865 		  \
866 		for (i = 0; i < SS_COUNT; i++) { \
867 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.nss[i]); \
868 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.nss[i]); \
869 		} \
870 		for (i = 0; i < WME_AC_MAX; i++) { \
871 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.wme_ac_type[i]); \
872 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.wme_ac_type[i]); \
873 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.excess_retries_per_ac[i]); \
874 		\
875 		} \
876 		\
877 		for (i = 0; i < MAX_GI; i++) { \
878 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.sgi_count[i]); \
879 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.sgi_count[i]); \
880 		} \
881 		\
882 		for (i = 0; i < MAX_RECEPTION_TYPES; i++) \
883 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.reception_type[i]); \
884 		\
885 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.comp_pkt); \
886 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.ucast); \
887 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.mcast); \
888 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.bcast); \
889 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_success); \
890 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.nawds_mcast); \
891 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.nawds_mcast_drop); \
892 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.tx_failed); \
893 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ofdma); \
894 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.stbc); \
895 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ldpc); \
896 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.retries); \
897 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_amsdu_cnt); \
898 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.amsdu_cnt); \
899 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_ampdu_cnt); \
900 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ampdu_cnt); \
901 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.dropped.fw_rem); \
902 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_tx); \
903 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_notx); \
904 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason1); \
905 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason2); \
906 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason3); \
907 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.age_out); \
908 								\
909 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.mic_err); \
910 		if (_srcobj->stats.rx.rssi != 0) \
911 			DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rssi); \
912 		DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rx_rate); \
913 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.decrypt_err); \
914 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_ampdu_cnt); \
915 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.ampdu_cnt); \
916 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_amsdu_cnt); \
917 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.amsdu_cnt); \
918 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.nawds_mcast_drop); \
919 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.to_stack); \
920 								\
921 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)	\
922 			DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rcvd_reo[i]); \
923 									\
924 		_srcobj->stats.rx.unicast.num = \
925 			_srcobj->stats.rx.to_stack.num - \
926 					_srcobj->stats.rx.multicast.num; \
927 		_srcobj->stats.rx.unicast.bytes = \
928 			_srcobj->stats.rx.to_stack.bytes - \
929 					_srcobj->stats.rx.multicast.bytes; \
930 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.unicast); \
931 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.multicast); \
932 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.bcast); \
933 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.raw); \
934 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.pkts); \
935 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.fail); \
936 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.mec_drop); \
937 								  \
938 		_tgtobj->stats.tx.last_ack_rssi =	\
939 			_srcobj->stats.tx.last_ack_rssi; \
940 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.multipass_rx_pkt_drop); \
941 	}  while (0)
942 
943 extern int dp_peer_find_attach(struct dp_soc *soc);
944 extern void dp_peer_find_detach(struct dp_soc *soc);
945 extern void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer);
946 extern void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer);
947 extern void dp_peer_find_hash_erase(struct dp_soc *soc);
948 
949 /*
950  * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
951  * @peer: Datapath peer
952  *
953  * return: void
954  */
955 void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer);
956 
957 /*
958  * dp_peer_ppdu_delayed_ba_cleanup() free ppdu allocated in peer
959  * @peer: Datapath peer
960  *
961  * return: void
962  */
963 void dp_peer_ppdu_delayed_ba_cleanup(struct dp_peer *peer);
964 
965 extern void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer);
966 void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer);
967 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer,
968 		     bool reuse);
969 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer,
970 			bool reuse);
971 void dp_peer_unref_delete(struct dp_peer *peer);
972 extern void *dp_find_peer_by_addr(struct cdp_pdev *dev,
973 	uint8_t *peer_mac_addr);
974 extern struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
975 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id);
976 
977 #ifdef DP_PEER_EXTENDED_API
978 /**
979  * dp_register_peer() - Register peer into physical device
980  * @soc_hdl - data path soc handle
981  * @pdev_id - device instance id
982  * @sta_desc - peer description
983  *
984  * Register peer into physical device
985  *
986  * Return: QDF_STATUS_SUCCESS registration success
987  *         QDF_STATUS_E_FAULT peer not found
988  */
989 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
990 			    struct ol_txrx_desc_type *sta_desc);
991 
992 /**
993  * dp_clear_peer() - remove peer from physical device
994  * @soc_hdl - data path soc handle
995  * @pdev_id - device instance id
996  * @peer_addr - peer mac address
997  *
998  * remove peer from physical device
999  *
1000  * Return: QDF_STATUS_SUCCESS registration success
1001  *         QDF_STATUS_E_FAULT peer not found
1002  */
1003 QDF_STATUS dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1004 			 struct qdf_mac_addr peer_addr);
1005 
1006 /*
1007  * dp_find_peer_exist - find peer if already exists
1008  * @soc: datapath soc handle
1009  * @pdev_id: physical device instance id
1010  * @peer_mac_addr: peer mac address
1011  *
1012  * Return: true or false
1013  */
1014 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1015 			uint8_t *peer_addr);
1016 
1017 /*
1018  * dp_find_peer_exist_on_vdev - find if peer exists on the given vdev
1019  * @soc: datapath soc handle
1020  * @vdev_id: vdev instance id
1021  * @peer_mac_addr: peer mac address
1022  *
1023  * Return: true or false
1024  */
1025 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1026 				uint8_t *peer_addr);
1027 
1028 /*
1029  * dp_find_peer_exist_on_other_vdev - find if peer exists
1030  * on other than the given vdev
1031  * @soc: datapath soc handle
1032  * @vdev_id: vdev instance id
1033  * @peer_mac_addr: peer mac address
1034  * @max_bssid: max number of bssids
1035  *
1036  * Return: true or false
1037  */
1038 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
1039 				      uint8_t vdev_id, uint8_t *peer_addr,
1040 				      uint16_t max_bssid);
1041 
1042 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
1043 		struct cdp_vdev *vdev,
1044 		uint8_t *peer_addr);
1045 
1046 /**
1047  * dp_peer_state_update() - update peer local state
1048  * @pdev - data path device instance
1049  * @peer_addr - peer mac address
1050  * @state - new peer local state
1051  *
1052  * update peer local state
1053  *
1054  * Return: QDF_STATUS_SUCCESS registration success
1055  */
1056 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc, uint8_t *peer_mac,
1057 				enum ol_txrx_peer_state state);
1058 
1059 /**
1060  * dp_get_vdevid() - Get virtual interface id which peer registered
1061  * @soc - datapath soc handle
1062  * @peer_mac - peer mac address
1063  * @vdev_id - virtual interface id which peer registered
1064  *
1065  * Get virtual interface id which peer registered
1066  *
1067  * Return: QDF_STATUS_SUCCESS registration success
1068  */
1069 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
1070 			 uint8_t *vdev_id);
1071 struct cdp_vdev *dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
1072 		struct qdf_mac_addr peer_addr);
1073 struct cdp_vdev *dp_get_vdev_for_peer(void *peer);
1074 uint8_t *dp_peer_get_peer_mac_addr(void *peer);
1075 
1076 /**
1077  * dp_get_peer_state() - Get local peer state
1078  * @soc - datapath soc handle
1079  * @vdev_id - vdev id
1080  * @peer_mac - peer mac addr
1081  *
1082  * Get local peer state
1083  *
1084  * Return: peer status
1085  */
1086 int dp_get_peer_state(struct cdp_soc_t *soc, uint8_t vdev_id,
1087 		      uint8_t *peer_mac);
1088 void dp_local_peer_id_pool_init(struct dp_pdev *pdev);
1089 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer);
1090 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer);
1091 #else
1092 /**
1093  * dp_get_vdevid() - Get virtual interface id which peer registered
1094  * @soc - datapath soc handle
1095  * @peer_mac - peer mac address
1096  * @vdev_id - virtual interface id which peer registered
1097  *
1098  * Get virtual interface id which peer registered
1099  *
1100  * Return: QDF_STATUS_SUCCESS registration success
1101  */
1102 static inline
1103 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
1104 			 uint8_t *vdev_id)
1105 {
1106 	return QDF_STATUS_E_NOSUPPORT;
1107 }
1108 
1109 static inline void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
1110 {
1111 }
1112 
1113 static inline
1114 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
1115 {
1116 }
1117 
1118 static inline
1119 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
1120 {
1121 }
1122 #endif
1123 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
1124 				      uint8_t *peer_mac, uint16_t vdev_id,
1125 				      uint8_t tid,
1126 				      int status);
1127 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
1128 				  uint8_t *peer_mac, uint16_t vdev_id,
1129 				  uint8_t dialogtoken, uint16_t tid,
1130 				  uint16_t batimeout,
1131 				  uint16_t buffersize,
1132 				  uint16_t startseqnum);
1133 QDF_STATUS dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc,
1134 					uint8_t *peer_mac, uint16_t vdev_id,
1135 					uint8_t tid, uint8_t *dialogtoken,
1136 					uint16_t *statuscode,
1137 					uint16_t *buffersize,
1138 					uint16_t *batimeout);
1139 QDF_STATUS dp_set_addba_response(struct cdp_soc_t *cdp_soc,
1140 				 uint8_t *peer_mac,
1141 				 uint16_t vdev_id, uint8_t tid,
1142 				 uint16_t statuscode);
1143 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1144 			   uint16_t vdev_id, int tid,
1145 			   uint16_t reasoncode);
1146 /*
1147  * dp_delba_tx_completion_wifi3() -  Handle delba tx completion
1148  *
1149  * @cdp_soc: soc handle
1150  * @vdev_id: id of the vdev handle
1151  * @peer_mac: peer mac address
1152  * @tid: Tid number
1153  * @status: Tx completion status
1154  * Indicate status of delba Tx to DP for stats update and retry
1155  * delba if tx failed.
1156  *
1157  */
1158 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1159 				 uint16_t vdev_id, uint8_t tid,
1160 				 int status);
1161 extern QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
1162 					uint32_t ba_window_size,
1163 					uint32_t start_seq);
1164 
1165 extern QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc,
1166 	enum hal_reo_cmd_type type, struct hal_reo_cmd_params *params,
1167 	void (*callback_fn), void *data);
1168 
1169 extern void dp_reo_cmdlist_destroy(struct dp_soc *soc);
1170 
1171 /**
1172  * dp_reo_status_ring_handler - Handler for REO Status ring
1173  * @int_ctx: pointer to DP interrupt context
1174  * @soc: DP Soc handle
1175  *
1176  * Returns: Number of descriptors reaped
1177  */
1178 uint32_t dp_reo_status_ring_handler(struct dp_intr *int_ctx,
1179 				    struct dp_soc *soc);
1180 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
1181 			     struct cdp_vdev_stats *vdev_stats);
1182 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1183 	union hal_reo_status *reo_status);
1184 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1185 		union hal_reo_status *reo_status);
1186 uint16_t dp_tx_me_send_convert_ucast(struct cdp_soc_t *soc, uint8_t vdev_id,
1187 				     qdf_nbuf_t nbuf,
1188 				     uint8_t newmac[][QDF_MAC_ADDR_SIZE],
1189 				     uint8_t new_mac_cnt);
1190 void dp_tx_me_alloc_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id);
1191 
1192 void dp_tx_me_free_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id);
1193 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
1194 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
1195 		uint32_t config_param_1, uint32_t config_param_2,
1196 		uint32_t config_param_3, int cookie, int cookie_msb,
1197 		uint8_t mac_id);
1198 void dp_htt_stats_print_tag(struct dp_pdev *pdev,
1199 			    uint8_t tag_type, uint32_t *tag_buf);
1200 void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf);
1201 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev, uint32_t tuple_mask,
1202 				     uint8_t mac_id);
1203 /**
1204  * dp_rxtid_stats_cmd_cb - function pointer for peer
1205  *			   rx tid stats cmd call_back
1206  */
1207 typedef void (*dp_rxtid_stats_cmd_cb)(struct dp_soc *soc, void *cb_ctxt,
1208 				      union hal_reo_status *reo_status);
1209 int dp_peer_rxtid_stats(struct dp_peer *peer,
1210 			dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
1211 			void *cb_ctxt);
1212 QDF_STATUS
1213 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
1214 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
1215 		      uint32_t *rx_pn);
1216 
1217 QDF_STATUS
1218 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
1219 			  uint8_t *peer_mac, enum cdp_sec_type sec_type,
1220 			  bool is_unicast);
1221 
1222 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id);
1223 
1224 QDF_STATUS
1225 dp_set_michael_key(struct cdp_soc_t *soc, uint8_t vdev_id,
1226 		   uint8_t *peer_mac,
1227 		   bool is_unicast, uint32_t *key);
1228 
1229 /**
1230  * dp_check_pdev_exists() - Validate pdev before use
1231  * @soc - dp soc handle
1232  * @data - pdev handle
1233  *
1234  * Return: 0 - success/invalid - failure
1235  */
1236 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data);
1237 
1238 /**
1239  * dp_update_delay_stats() - Update delay statistics in structure
1240  *                              and fill min, max and avg delay
1241  * @pdev: pdev handle
1242  * @delay: delay in ms
1243  * @tid: tid value
1244  * @mode: type of tx delay mode
1245  * @ring id: ring number
1246  *
1247  * Return: none
1248  */
1249 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
1250 			   uint8_t tid, uint8_t mode, uint8_t ring_id);
1251 
1252 /**
1253  * dp_print_ring_stats(): Print tail and head pointer
1254  * @pdev: DP_PDEV handle
1255  *
1256  * Return:void
1257  */
1258 void dp_print_ring_stats(struct dp_pdev *pdev);
1259 
1260 /**
1261  * dp_print_pdev_cfg_params() - Print the pdev cfg parameters
1262  * @pdev_handle: DP pdev handle
1263  *
1264  * Return - void
1265  */
1266 void dp_print_pdev_cfg_params(struct dp_pdev *pdev);
1267 
1268 /**
1269  * dp_print_soc_cfg_params()- Dump soc wlan config parameters
1270  * @soc_handle: Soc handle
1271  *
1272  * Return: void
1273  */
1274 void dp_print_soc_cfg_params(struct dp_soc *soc);
1275 
1276 /**
1277  * dp_srng_get_str_from_ring_type() - Return string name for a ring
1278  * @ring_type: Ring
1279  *
1280  * Return: char const pointer
1281  */
1282 const
1283 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type);
1284 
1285 /*
1286  * dp_txrx_path_stats() - Function to display dump stats
1287  * @soc - soc handle
1288  *
1289  * return: none
1290  */
1291 void dp_txrx_path_stats(struct dp_soc *soc);
1292 
1293 /*
1294  * dp_print_per_ring_stats(): Packet count per ring
1295  * @soc - soc handle
1296  *
1297  * Return - None
1298  */
1299 void dp_print_per_ring_stats(struct dp_soc *soc);
1300 
1301 /**
1302  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
1303  * @pdev: DP PDEV handle
1304  *
1305  * return: void
1306  */
1307 void dp_aggregate_pdev_stats(struct dp_pdev *pdev);
1308 
1309 /**
1310  * dp_print_rx_rates(): Print Rx rate stats
1311  * @vdev: DP_VDEV handle
1312  *
1313  * Return:void
1314  */
1315 void dp_print_rx_rates(struct dp_vdev *vdev);
1316 
1317 /**
1318  * dp_print_tx_rates(): Print tx rates
1319  * @vdev: DP_VDEV handle
1320  *
1321  * Return:void
1322  */
1323 void dp_print_tx_rates(struct dp_vdev *vdev);
1324 
1325 /**
1326  * dp_print_peer_stats():print peer stats
1327  * @peer: DP_PEER handle
1328  *
1329  * return void
1330  */
1331 void dp_print_peer_stats(struct dp_peer *peer);
1332 
1333 /**
1334  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
1335  * @pdev: DP_PDEV Handle
1336  *
1337  * Return:void
1338  */
1339 void
1340 dp_print_pdev_tx_stats(struct dp_pdev *pdev);
1341 
1342 /**
1343  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
1344  * @pdev: DP_PDEV Handle
1345  *
1346  * Return: void
1347  */
1348 void
1349 dp_print_pdev_rx_stats(struct dp_pdev *pdev);
1350 
1351 /**
1352  * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
1353  * @pdev: DP_PDEV Handle
1354  *
1355  * Return: void
1356  */
1357 void
1358 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev);
1359 
1360 /**
1361  * dp_print_soc_tx_stats(): Print SOC level  stats
1362  * @soc DP_SOC Handle
1363  *
1364  * Return: void
1365  */
1366 void dp_print_soc_tx_stats(struct dp_soc *soc);
1367 
1368 /**
1369  * dp_print_soc_interrupt_stats() - Print interrupt stats for the soc
1370  * @soc: dp_soc handle
1371  *
1372  * Return: None
1373  */
1374 void dp_print_soc_interrupt_stats(struct dp_soc *soc);
1375 
1376 /**
1377  * dp_print_soc_rx_stats: Print SOC level Rx stats
1378  * @soc: DP_SOC Handle
1379  *
1380  * Return:void
1381  */
1382 void dp_print_soc_rx_stats(struct dp_soc *soc);
1383 
1384 /**
1385  * dp_get_mac_id_for_pdev() -  Return mac corresponding to pdev for mac
1386  *
1387  * @mac_id: MAC id
1388  * @pdev_id: pdev_id corresponding to pdev, 0 for MCL
1389  *
1390  * Single pdev using both MACs will operate on both MAC rings,
1391  * which is the case for MCL.
1392  * For WIN each PDEV will operate one ring, so index is zero.
1393  *
1394  */
1395 static inline int dp_get_mac_id_for_pdev(uint32_t mac_id, uint32_t pdev_id)
1396 {
1397 	if (mac_id && pdev_id) {
1398 		qdf_print("Both mac_id and pdev_id cannot be non zero");
1399 		QDF_BUG(0);
1400 		return 0;
1401 	}
1402 	return (mac_id + pdev_id);
1403 }
1404 
1405 /**
1406  * dp_get_lmac_id_for_pdev_id() -  Return lmac id corresponding to host pdev id
1407  * @soc: soc pointer
1408  * @mac_id: MAC id
1409  * @pdev_id: pdev_id corresponding to pdev, 0 for MCL
1410  *
1411  * For MCL, Single pdev using both MACs will operate on both MAC rings.
1412  *
1413  * For WIN, each PDEV will operate one ring.
1414  *
1415  */
1416 static inline int
1417 dp_get_lmac_id_for_pdev_id
1418 	(struct dp_soc *soc, uint32_t mac_id, uint32_t pdev_id)
1419 {
1420 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
1421 		if (mac_id && pdev_id) {
1422 			qdf_print("Both mac_id and pdev_id cannot be non zero");
1423 			QDF_BUG(0);
1424 			return 0;
1425 		}
1426 		return (mac_id + pdev_id);
1427 	}
1428 
1429 	return soc->pdev_list[pdev_id]->lmac_id;
1430 }
1431 
1432 /**
1433  * dp_get_pdev_for_lmac_id() -  Return pdev pointer corresponding to lmac id
1434  * @soc: soc pointer
1435  * @lmac_id: LMAC id
1436  *
1437  * For MCL, Single pdev exists
1438  *
1439  * For WIN, each PDEV will operate one ring.
1440  *
1441  */
1442 static inline struct dp_pdev *
1443 	dp_get_pdev_for_lmac_id(struct dp_soc *soc, uint32_t lmac_id)
1444 {
1445 	uint8_t i = 0;
1446 
1447 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
1448 		i = wlan_cfg_get_pdev_idx(soc->wlan_cfg_ctx, lmac_id);
1449 		return ((i < MAX_PDEV_CNT) ? soc->pdev_list[i] : NULL);
1450 	}
1451 
1452 	/* Typically for MCL as there only 1 PDEV*/
1453 	return soc->pdev_list[0];
1454 }
1455 
1456 /**
1457  * dp_calculate_target_pdev_id_from_host_pdev_id() - Return target pdev
1458  *                                          corresponding to host pdev id
1459  * @soc: soc pointer
1460  * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL
1461  *
1462  * returns target pdev_id for host pdev id. For WIN, this is derived through
1463  * a two step process:
1464  * 1. Get lmac_id corresponding to host pdev_id (lmac_id can change
1465  *    during mode switch)
1466  * 2. Get target pdev_id (set up during WMI ready) from lmac_id
1467  *
1468  * For MCL, return the offset-1 translated mac_id
1469  */
1470 static inline int
1471 dp_calculate_target_pdev_id_from_host_pdev_id
1472 	(struct dp_soc *soc, uint32_t mac_for_pdev)
1473 {
1474 	struct dp_pdev *pdev;
1475 
1476 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
1477 		return DP_SW2HW_MACID(mac_for_pdev);
1478 
1479 	pdev = soc->pdev_list[mac_for_pdev];
1480 
1481 	/*non-MCL case, get original target_pdev mapping*/
1482 	return wlan_cfg_get_target_pdev_id(soc->wlan_cfg_ctx, pdev->lmac_id);
1483 }
1484 
1485 /**
1486  * dp_get_target_pdev_id_for_host_pdev_id() - Return target pdev corresponding
1487  *                                         to host pdev id
1488  * @soc: soc pointer
1489  * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL
1490  *
1491  * returns target pdev_id for host pdev id.
1492  * For WIN, return the value stored in pdev object.
1493  * For MCL, return the offset-1 translated mac_id.
1494  */
1495 static inline int
1496 dp_get_target_pdev_id_for_host_pdev_id
1497 	(struct dp_soc *soc, uint32_t mac_for_pdev)
1498 {
1499 	struct dp_pdev *pdev;
1500 
1501 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
1502 		return DP_SW2HW_MACID(mac_for_pdev);
1503 
1504 	pdev = soc->pdev_list[mac_for_pdev];
1505 
1506 	return pdev->target_pdev_id;
1507 }
1508 
1509 /**
1510  * dp_get_host_pdev_id_for_target_pdev_id() - Return host pdev corresponding
1511  *                                         to target pdev id
1512  * @soc: soc pointer
1513  * @pdev_id: pdev_id corresponding to target pdev
1514  *
1515  * returns host pdev_id for target pdev id. For WIN, this is derived through
1516  * a two step process:
1517  * 1. Get lmac_id corresponding to target pdev_id
1518  * 2. Get host pdev_id (set up during WMI ready) from lmac_id
1519  *
1520  * For MCL, return the 0-offset pdev_id
1521  */
1522 static inline int
1523 dp_get_host_pdev_id_for_target_pdev_id
1524 	(struct dp_soc *soc, uint32_t pdev_id)
1525 {
1526 	struct dp_pdev *pdev;
1527 	int lmac_id;
1528 
1529 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
1530 		return DP_HW2SW_MACID(pdev_id);
1531 
1532 	/*non-MCL case, get original target_lmac mapping from target pdev*/
1533 	lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx,
1534 					  DP_HW2SW_MACID(pdev_id));
1535 
1536 	/*Get host pdev from lmac*/
1537 	pdev = dp_get_pdev_for_lmac_id(soc, lmac_id);
1538 
1539 	return pdev ? pdev->pdev_id : INVALID_PDEV_ID;
1540 }
1541 
1542 /*
1543  * dp_get_mac_id_for_mac() -  Return mac corresponding WIN and MCL mac_ids
1544  *
1545  * @soc: handle to DP soc
1546  * @mac_id: MAC id
1547  *
1548  * Single pdev using both MACs will operate on both MAC rings,
1549  * which is the case for MCL.
1550  * For WIN each PDEV will operate one ring, so index is zero.
1551  *
1552  */
1553 static inline int dp_get_mac_id_for_mac(struct dp_soc *soc, uint32_t mac_id)
1554 {
1555 	/*
1556 	 * Single pdev using both MACs will operate on both MAC rings,
1557 	 * which is the case for MCL.
1558 	 */
1559 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
1560 		return mac_id;
1561 
1562 	/* For WIN each PDEV will operate one ring, so index is zero. */
1563 	return 0;
1564 }
1565 
1566 /*
1567  * dp_is_subtype_data() - check if the frame subtype is data
1568  *
1569  * @frame_ctrl: Frame control field
1570  *
1571  * check the frame control field and verify if the packet
1572  * is a data packet.
1573  *
1574  * Return: true or false
1575  */
1576 static inline bool dp_is_subtype_data(uint16_t frame_ctrl)
1577 {
1578 	if (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_TYPE_MASK) ==
1579 	    QDF_IEEE80211_FC0_TYPE_DATA) &&
1580 	    (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) ==
1581 	    QDF_IEEE80211_FC0_SUBTYPE_DATA) ||
1582 	    ((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) ==
1583 	    QDF_IEEE80211_FC0_SUBTYPE_QOS))) {
1584 		return true;
1585 	}
1586 
1587 	return false;
1588 }
1589 
1590 #ifdef WDI_EVENT_ENABLE
1591 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
1592 				uint32_t stats_type_upload_mask,
1593 				uint8_t mac_id);
1594 
1595 int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id,
1596 		       wdi_event_subscribe *event_cb_sub_handle,
1597 		       uint32_t event);
1598 
1599 int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id,
1600 		     wdi_event_subscribe *event_cb_sub_handle,
1601 		     uint32_t event);
1602 
1603 void dp_wdi_event_handler(enum WDI_EVENT event, struct dp_soc *soc,
1604 			  void *data, u_int16_t peer_id,
1605 			  int status, u_int8_t pdev_id);
1606 
1607 int dp_wdi_event_attach(struct dp_pdev *txrx_pdev);
1608 int dp_wdi_event_detach(struct dp_pdev *txrx_pdev);
1609 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
1610 	bool enable);
1611 
1612 /**
1613  * dp_get_pldev() - function to get pktlog device handle
1614  * @soc_hdl: datapath soc handle
1615  * @pdev_id: physical device id
1616  *
1617  * Return: pktlog device handle or NULL
1618  */
1619 void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
1620 void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn);
1621 
1622 static inline void
1623 dp_hif_update_pipe_callback(struct dp_soc *dp_soc,
1624 			    void *cb_context,
1625 			    QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t),
1626 			    uint8_t pipe_id)
1627 {
1628 	struct hif_msg_callbacks hif_pipe_callbacks;
1629 
1630 	/* TODO: Temporary change to bypass HTC connection for this new
1631 	 * HIF pipe, which will be used for packet log and other high-
1632 	 * priority HTT messages. Proper HTC connection to be added
1633 	 * later once required FW changes are available
1634 	 */
1635 	hif_pipe_callbacks.rxCompletionHandler = callback;
1636 	hif_pipe_callbacks.Context = cb_context;
1637 	hif_update_pipe_callback(dp_soc->hif_handle,
1638 		DP_HTT_T2H_HP_PIPE, &hif_pipe_callbacks);
1639 }
1640 
1641 QDF_STATUS dp_peer_stats_notify(struct dp_pdev *pdev, struct dp_peer *peer);
1642 
1643 #else
1644 static inline int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id,
1645 				     wdi_event_subscribe *event_cb_sub_handle,
1646 				     uint32_t event)
1647 {
1648 	return 0;
1649 }
1650 
1651 static inline int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id,
1652 				   wdi_event_subscribe *event_cb_sub_handle,
1653 				   uint32_t event)
1654 {
1655 	return 0;
1656 }
1657 
1658 static inline
1659 void dp_wdi_event_handler(enum WDI_EVENT event,
1660 			  struct dp_soc *soc,
1661 			  void *data, u_int16_t peer_id,
1662 			  int status, u_int8_t pdev_id)
1663 {
1664 }
1665 
1666 static inline int dp_wdi_event_attach(struct dp_pdev *txrx_pdev)
1667 {
1668 	return 0;
1669 }
1670 
1671 static inline int dp_wdi_event_detach(struct dp_pdev *txrx_pdev)
1672 {
1673 	return 0;
1674 }
1675 
1676 static inline int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
1677 	bool enable)
1678 {
1679 	return 0;
1680 }
1681 static inline QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
1682 		uint32_t stats_type_upload_mask, uint8_t mac_id)
1683 {
1684 	return 0;
1685 }
1686 
1687 static inline void
1688 dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
1689 {
1690 }
1691 
1692 static inline void
1693 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, void *cb_context,
1694 			    QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t),
1695 			    uint8_t pipe_id)
1696 {
1697 }
1698 
1699 static inline QDF_STATUS dp_peer_stats_notify(struct dp_pdev *pdev,
1700 					      struct dp_peer *peer)
1701 {
1702 	return QDF_STATUS_SUCCESS;
1703 }
1704 
1705 #endif /* CONFIG_WIN */
1706 
1707 #ifdef VDEV_PEER_PROTOCOL_COUNT
1708 /**
1709  * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters
1710  * @vdev: VDEV DP object
1711  * @nbuf: data packet
1712  * @peer: Peer DP object
1713  * @is_egress: whether egress or ingress
1714  * @is_rx: whether rx or tx
1715  *
1716  * This function updates the per-peer protocol counters
1717  * Return: void
1718  */
1719 void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev,
1720 					    qdf_nbuf_t nbuf,
1721 					    struct dp_peer *peer,
1722 					    bool is_egress,
1723 					    bool is_rx);
1724 
1725 /**
1726  * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters
1727  * @soc: SOC DP object
1728  * @vdev_id: vdev_id
1729  * @nbuf: data packet
1730  * @is_egress: whether egress or ingress
1731  * @is_rx: whether rx or tx
1732  *
1733  * This function updates the per-peer protocol counters
1734  * Return: void
1735  */
1736 
1737 void dp_peer_stats_update_protocol_cnt(struct cdp_soc_t *soc,
1738 				       int8_t vdev_id,
1739 				       qdf_nbuf_t nbuf,
1740 				       bool is_egress,
1741 				       bool is_rx);
1742 
1743 #else
1744 #define dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, peer, \
1745 					       is_egress, is_rx)
1746 #endif
1747 
1748 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
1749 void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl);
1750 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
1751 	bool force);
1752 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
1753 
1754 #ifdef PEER_PROTECTED_ACCESS
1755 /**
1756  * dp_peer_unref_del_find_by_id() - dec ref and del peer if ref count is
1757  *                                  taken by dp_peer_find_by_id
1758  * @peer: peer context
1759  *
1760  * Return: none
1761  */
1762 static inline void dp_peer_unref_del_find_by_id(struct dp_peer *peer)
1763 {
1764 	dp_peer_unref_delete(peer);
1765 }
1766 #else
1767 static inline void dp_peer_unref_del_find_by_id(struct dp_peer *peer)
1768 {
1769 }
1770 #endif
1771 
1772 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1773 /**
1774  * dp_srng_access_start() - Wrapper function to log access start of a hal ring
1775  * @int_ctx: pointer to DP interrupt context. This should not be NULL
1776  * @soc: DP Soc handle
1777  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
1778  *
1779  * Return: 0 on success; error on failure
1780  */
1781 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1782 			 hal_ring_handle_t hal_ring_hdl);
1783 
1784 /**
1785  * dp_srng_access_end() - Wrapper function to log access end of a hal ring
1786  * @int_ctx: pointer to DP interrupt context. This should not be NULL
1787  * @soc: DP Soc handle
1788  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
1789  *
1790  * Return: void
1791  */
1792 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1793 			hal_ring_handle_t hal_ring_hdl);
1794 
1795 #else
1796 
1797 static inline int dp_srng_access_start(struct dp_intr *int_ctx,
1798 				       struct dp_soc *dp_soc,
1799 				       hal_ring_handle_t hal_ring_hdl)
1800 {
1801 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1802 
1803 	return hal_srng_access_start(hal_soc, hal_ring_hdl);
1804 }
1805 
1806 static inline void dp_srng_access_end(struct dp_intr *int_ctx,
1807 				      struct dp_soc *dp_soc,
1808 				      hal_ring_handle_t hal_ring_hdl)
1809 {
1810 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1811 
1812 	return hal_srng_access_end(hal_soc, hal_ring_hdl);
1813 }
1814 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1815 
1816 #ifdef QCA_CACHED_RING_DESC
1817 /**
1818  * dp_srng_dst_get_next() - Wrapper function to get next ring desc
1819  * @dp_socsoc: DP Soc handle
1820  * @hal_ring: opaque pointer to the HAL Destination Ring
1821  *
1822  * Return: HAL ring descriptor
1823  */
1824 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc,
1825 					 hal_ring_handle_t hal_ring_hdl)
1826 {
1827 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1828 
1829 	return hal_srng_dst_get_next_cached(hal_soc, hal_ring_hdl);
1830 }
1831 
1832 /**
1833  * dp_srng_dst_inv_cached_descs() - Wrapper function to invalidate cached
1834  * descriptors
1835  * @dp_socsoc: DP Soc handle
1836  * @hal_ring: opaque pointer to the HAL Rx Destination ring
1837  * @num_entries: Entry count
1838  *
1839  * Return: None
1840  */
1841 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc,
1842 						hal_ring_handle_t hal_ring_hdl,
1843 						uint32_t num_entries)
1844 {
1845 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1846 
1847 	hal_srng_dst_inv_cached_descs(hal_soc, hal_ring_hdl, num_entries);
1848 }
1849 #else
1850 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc,
1851 					 hal_ring_handle_t hal_ring_hdl)
1852 {
1853 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1854 
1855 	return hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
1856 }
1857 
1858 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc,
1859 						hal_ring_handle_t hal_ring_hdl,
1860 						uint32_t num_entries)
1861 {
1862 }
1863 #endif /* QCA_CACHED_RING_DESC */
1864 
1865 #ifdef QCA_ENH_V3_STATS_SUPPORT
1866 /**
1867  * dp_pdev_print_delay_stats(): Print pdev level delay stats
1868  * @pdev: DP_PDEV handle
1869  *
1870  * Return:void
1871  */
1872 void dp_pdev_print_delay_stats(struct dp_pdev *pdev);
1873 
1874 /**
1875  * dp_pdev_print_tid_stats(): Print pdev level tid stats
1876  * @pdev: DP_PDEV handle
1877  *
1878  * Return:void
1879  */
1880 void dp_pdev_print_tid_stats(struct dp_pdev *pdev);
1881 #endif /* CONFIG_WIN */
1882 
1883 void dp_soc_set_txrx_ring_map(struct dp_soc *soc);
1884 
1885 #ifndef WLAN_TX_PKT_CAPTURE_ENH
1886 /**
1887  * dp_tx_ppdu_stats_attach - Initialize Tx PPDU stats and enhanced capture
1888  * @pdev: DP PDEV
1889  *
1890  * Return: none
1891  */
1892 static inline void dp_tx_ppdu_stats_attach(struct dp_pdev *pdev)
1893 {
1894 }
1895 
1896 /**
1897  * dp_tx_ppdu_stats_detach - Cleanup Tx PPDU stats and enhanced capture
1898  * @pdev: DP PDEV
1899  *
1900  * Return: none
1901  */
1902 static inline void dp_tx_ppdu_stats_detach(struct dp_pdev *pdev)
1903 {
1904 }
1905 
1906 /**
1907  * dp_tx_ppdu_stats_process - Deferred PPDU stats handler
1908  * @context: Opaque work context (PDEV)
1909  *
1910  * Return: none
1911  */
1912 static  inline void dp_tx_ppdu_stats_process(void *context)
1913 {
1914 }
1915 
1916 /**
1917  * dp_tx_add_to_comp_queue() - add completion msdu to queue
1918  * @soc: DP Soc handle
1919  * @tx_desc: software Tx descriptor
1920  * @ts : Tx completion status from HAL/HTT descriptor
1921  * @peer: DP peer
1922  *
1923  * Return: none
1924  */
1925 static inline
1926 QDF_STATUS dp_tx_add_to_comp_queue(struct dp_soc *soc,
1927 				   struct dp_tx_desc_s *desc,
1928 				   struct hal_tx_completion_status *ts,
1929 				   struct dp_peer *peer)
1930 {
1931 	return QDF_STATUS_E_FAILURE;
1932 }
1933 
1934 /*
1935  * dp_tx_capture_htt_frame_counter: increment counter for htt_frame_type
1936  * pdev: DP pdev handle
1937  * htt_frame_type: htt frame type received from fw
1938  *
1939  * return: void
1940  */
1941 static inline
1942 void dp_tx_capture_htt_frame_counter(struct dp_pdev *pdev,
1943 				     uint32_t htt_frame_type)
1944 {
1945 }
1946 
1947 /*
1948  * dp_tx_cature_stats: print tx capture stats
1949  * @pdev: DP PDEV handle
1950  *
1951  * return: void
1952  */
1953 static inline
1954 void dp_print_pdev_tx_capture_stats(struct dp_pdev *pdev)
1955 {
1956 }
1957 
1958 /*
1959  * dp_peer_tx_capture_filter_check: check filter is enable for the filter
1960  * and update tx_cap_enabled flag
1961  * @pdev: DP PDEV handle
1962  * @peer: DP PEER handle
1963  *
1964  * return: void
1965  */
1966 static inline
1967 void dp_peer_tx_capture_filter_check(struct dp_pdev *pdev,
1968 				     struct dp_peer *peer)
1969 {
1970 }
1971 
1972 /*
1973  * dp_tx_capture_debugfs_init: tx capture debugfs init
1974  * @pdev: DP PDEV handle
1975  *
1976  * return: QDF_STATUS
1977  */
1978 static inline
1979 QDF_STATUS dp_tx_capture_debugfs_init(struct dp_pdev *pdev)
1980 {
1981 	return QDF_STATUS_E_FAILURE;
1982 }
1983 #endif
1984 
1985 #ifdef FEATURE_PERPKT_INFO
1986 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf);
1987 #else
1988 static inline
1989 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
1990 {
1991 }
1992 #endif
1993 
1994 /**
1995  * dp_vdev_to_cdp_vdev() - typecast dp vdev to cdp vdev
1996  * @vdev: DP vdev handle
1997  *
1998  * Return: struct cdp_vdev pointer
1999  */
2000 static inline
2001 struct cdp_vdev *dp_vdev_to_cdp_vdev(struct dp_vdev *vdev)
2002 {
2003 	return (struct cdp_vdev *)vdev;
2004 }
2005 
2006 /**
2007  * dp_pdev_to_cdp_pdev() - typecast dp pdev to cdp pdev
2008  * @pdev: DP pdev handle
2009  *
2010  * Return: struct cdp_pdev pointer
2011  */
2012 static inline
2013 struct cdp_pdev *dp_pdev_to_cdp_pdev(struct dp_pdev *pdev)
2014 {
2015 	return (struct cdp_pdev *)pdev;
2016 }
2017 
2018 /**
2019  * dp_soc_to_cdp_soc() - typecast dp psoc to cdp psoc
2020  * @psoc: DP psoc handle
2021  *
2022  * Return: struct cdp_soc pointer
2023  */
2024 static inline
2025 struct cdp_soc *dp_soc_to_cdp_soc(struct dp_soc *psoc)
2026 {
2027 	return (struct cdp_soc *)psoc;
2028 }
2029 
2030 /**
2031  * dp_soc_to_cdp_soc_t() - typecast dp psoc to
2032  * ol txrx soc handle
2033  * @psoc: DP psoc handle
2034  *
2035  * Return: struct cdp_soc_t pointer
2036  */
2037 static inline
2038 struct cdp_soc_t *dp_soc_to_cdp_soc_t(struct dp_soc *psoc)
2039 {
2040 	return (struct cdp_soc_t *)psoc;
2041 }
2042 
2043 /**
2044  * cdp_soc_t_to_dp_soc() - typecast cdp_soc_t to
2045  * dp soc handle
2046  * @psoc: CDP psoc handle
2047  *
2048  * Return: struct dp_soc pointer
2049  */
2050 static inline
2051 struct dp_soc *cdp_soc_t_to_dp_soc(struct cdp_soc_t *psoc)
2052 {
2053 	return (struct dp_soc *)psoc;
2054 }
2055 
2056 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
2057 /**
2058  * dp_rx_flow_update_fse_stats() - Update a flow's statistics
2059  * @pdev: pdev handle
2060  * @flow_id: flow index (truncated hash) in the Rx FST
2061  *
2062  * Return: Success when flow statistcs is updated, error on failure
2063  */
2064 QDF_STATUS dp_rx_flow_get_fse_stats(struct dp_pdev *pdev,
2065 				    struct cdp_rx_flow_info *rx_flow_info,
2066 				    struct cdp_flow_stats *stats);
2067 
2068 /**
2069  * dp_rx_flow_delete_entry() - Delete a flow entry from flow search table
2070  * @pdev: pdev handle
2071  * @rx_flow_info: DP flow parameters
2072  *
2073  * Return: Success when flow is deleted, error on failure
2074  */
2075 QDF_STATUS dp_rx_flow_delete_entry(struct dp_pdev *pdev,
2076 				   struct cdp_rx_flow_info *rx_flow_info);
2077 
2078 /**
2079  * dp_rx_flow_add_entry() - Add a flow entry to flow search table
2080  * @pdev: DP pdev instance
2081  * @rx_flow_info: DP flow paramaters
2082  *
2083  * Return: Success when flow is added, no-memory or already exists on error
2084  */
2085 QDF_STATUS dp_rx_flow_add_entry(struct dp_pdev *pdev,
2086 				struct cdp_rx_flow_info *rx_flow_info);
2087 
2088 /**
2089  * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters
2090  * @soc: SoC handle
2091  * @pdev: Pdev handle
2092  *
2093  * Return: Handle to flow search table entry
2094  */
2095 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev);
2096 
2097 /**
2098  * dp_rx_fst_detach() - De-initialize Rx FST
2099  * @soc: SoC handle
2100  * @pdev: Pdev handle
2101  *
2102  * Return: None
2103  */
2104 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev);
2105 
2106 /**
2107  * dp_rx_flow_send_fst_fw_setup() - Program FST parameters in FW/HW post-attach
2108  * @soc: SoC handle
2109  * @pdev: Pdev handle
2110  *
2111  * Return: Success when fst parameters are programmed in FW, error otherwise
2112  */
2113 QDF_STATUS dp_rx_flow_send_fst_fw_setup(struct dp_soc *soc,
2114 					struct dp_pdev *pdev);
2115 #else /* !((WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)) */
2116 
2117 /**
2118  * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters
2119  * @soc: SoC handle
2120  * @pdev: Pdev handle
2121  *
2122  * Return: Handle to flow search table entry
2123  */
2124 static inline
2125 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev)
2126 {
2127 	return QDF_STATUS_SUCCESS;
2128 }
2129 
2130 /**
2131  * dp_rx_fst_detach() - De-initialize Rx FST
2132  * @soc: SoC handle
2133  * @pdev: Pdev handle
2134  *
2135  * Return: None
2136  */
2137 static inline
2138 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev)
2139 {
2140 }
2141 #endif
2142 
2143 /**
2144  * dp_get_vdev_from_soc_vdev_id_wifi3() - Returns vdev object given the vdev id
2145  * @soc: core DP soc context
2146  * @vdev_id: vdev id from vdev object can be retrieved
2147  *
2148  * Return: struct dp_vdev*: Pointer to DP vdev object
2149  */
2150 static inline struct dp_vdev *
2151 dp_get_vdev_from_soc_vdev_id_wifi3(struct dp_soc *soc,
2152 				   uint8_t vdev_id)
2153 {
2154 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
2155 		return NULL;
2156 
2157 	return soc->vdev_id_map[vdev_id];
2158 }
2159 
2160 /**
2161  * dp_get_pdev_from_soc_pdev_id_wifi3() - Returns pdev object given the pdev id
2162  * @soc: core DP soc context
2163  * @pdev_id: pdev id from pdev object can be retrieved
2164  *
2165  * Return: struct dp_pdev*: Pointer to DP pdev object
2166  */
2167 static inline struct dp_pdev *
2168 dp_get_pdev_from_soc_pdev_id_wifi3(struct dp_soc *soc,
2169 				   uint8_t pdev_id)
2170 {
2171 	if (qdf_unlikely(pdev_id >= MAX_PDEV_CNT))
2172 		return NULL;
2173 
2174 	return soc->pdev_list[pdev_id];
2175 }
2176 
2177 /*
2178  * dp_rx_tid_update_wifi3() – Update receive TID state
2179  * @peer: Datapath peer handle
2180  * @tid: TID
2181  * @ba_window_size: BlockAck window size
2182  * @start_seq: Starting sequence number
2183  *
2184  * Return: QDF_STATUS code
2185  */
2186 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
2187 					 ba_window_size, uint32_t start_seq);
2188 
2189 /**
2190  * dp_get_peer_mac_list(): function to get peer mac list of vdev
2191  * @soc: Datapath soc handle
2192  * @vdev_id: vdev id
2193  * @newmac: Table of the clients mac
2194  * @mac_cnt: No. of MACs required
2195  *
2196  * return: no of clients
2197  */
2198 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
2199 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
2200 			      u_int16_t mac_cnt);
2201 /*
2202  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
2203  * @soc:		DP SoC context
2204  * @max_mac_rings:	No of MAC rings
2205  *
2206  * Return: None
2207  */
2208 void dp_is_hw_dbs_enable(struct dp_soc *soc,
2209 				int *max_mac_rings);
2210 
2211 
2212 #if defined(WLAN_SUPPORT_RX_FISA)
2213 void dp_rx_dump_fisa_table(struct dp_soc *soc);
2214 #endif /* WLAN_SUPPORT_RX_FISA */
2215 
2216 #ifdef MAX_ALLOC_PAGE_SIZE
2217 /**
2218  * dp_set_page_size() - Set the max page size for hw link desc.
2219  * For MCL the page size is set to OS defined value and for WIN
2220  * the page size is set to the max_alloc_size cfg ini
2221  * param.
2222  * This is to ensure that WIN gets contiguous memory allocations
2223  * as per requirement.
2224  * @pages: link desc page handle
2225  * @max_alloc_size: max_alloc_size
2226  *
2227  * Return: None
2228  */
2229 static inline
2230 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages,
2231 			  uint32_t max_alloc_size)
2232 {
2233 	pages->page_size = qdf_page_size;
2234 }
2235 
2236 #else
2237 static inline
2238 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages,
2239 			  uint32_t max_alloc_size)
2240 {
2241 	pages->page_size = max_alloc_size;
2242 }
2243 #endif /* MAX_ALLOC_PAGE_SIZE */
2244 
2245 /**
2246  * dp_history_get_next_index() - get the next entry to record an entry
2247  *				 in the history.
2248  * @curr_idx: Current index where the last entry is written.
2249  * @max_entries: Max number of entries in the history
2250  *
2251  * This function assumes that the max number os entries is a power of 2.
2252  *
2253  * Returns: The index where the next entry is to be written.
2254  */
2255 static inline uint32_t dp_history_get_next_index(qdf_atomic_t *curr_idx,
2256 						 uint32_t max_entries)
2257 {
2258 	uint32_t idx = qdf_atomic_inc_return(curr_idx);
2259 
2260 	return idx & (max_entries - 1);
2261 }
2262 
2263 /**
2264  * dp_rx_skip_tlvs() - Skip TLVs len + L2 hdr_offset, save in nbuf->cb
2265  * @nbuf: nbuf cb to be updated
2266  * @l2_hdr_offset: l2_hdr_offset
2267  *
2268  * Return: None
2269  */
2270 void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding);
2271 
2272 /**
2273  * dp_soc_is_full_mon_enable () - Return if full monitor mode is enabled
2274  * @soc: DP soc handle
2275  *
2276  * Return: Full monitor mode status
2277  */
2278 static inline bool dp_soc_is_full_mon_enable(struct dp_pdev *pdev)
2279 {
2280 	return (pdev->soc->full_mon_mode && pdev->monitor_configured) ?
2281 			true : false;
2282 }
2283 
2284 #ifndef FEATURE_WDS
2285 static inline void
2286 dp_hmwds_ast_add_notify(struct dp_peer *peer,
2287 			uint8_t *mac_addr,
2288 			enum cdp_txrx_ast_entry_type type,
2289 			QDF_STATUS err,
2290 			bool is_peer_map)
2291 {
2292 }
2293 #endif
2294 #endif /* #ifndef _DP_INTERNAL_H_ */
2295