xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_internal.h (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_INTERNAL_H_
20 #define _DP_INTERNAL_H_
21 
22 #include "dp_types.h"
23 
24 #define RX_BUFFER_SIZE_PKTLOG_LITE 1024
25 
26 #define DP_PEER_WDS_COUNT_INVALID UINT_MAX
27 
28 /* Alignment for consistent memory for DP rings*/
29 #define DP_RING_BASE_ALIGN 32
30 
31 #define DP_RSSI_INVAL 0x80
32 #define DP_RSSI_AVG_WEIGHT 2
33 /*
34  * Formula to derive avg_rssi is taken from wifi2.o firmware
35  */
36 #define DP_GET_AVG_RSSI(avg_rssi, last_rssi) \
37 	(((avg_rssi) - (((uint8_t)(avg_rssi)) >> DP_RSSI_AVG_WEIGHT)) \
38 	+ ((((uint8_t)(last_rssi)) >> DP_RSSI_AVG_WEIGHT)))
39 
40 /* Macro For NYSM value received in VHT TLV */
41 #define VHT_SGI_NYSM 3
42 
43 /* struct htt_dbgfs_cfg - structure to maintain required htt data
44  * @msg_word: htt msg sent to upper layer
45  * @m: qdf debugfs file pointer
46  */
47 struct htt_dbgfs_cfg {
48 	uint32_t *msg_word;
49 	qdf_debugfs_file_t m;
50 };
51 
52 /* Cookie MSB bits assigned for different use case.
53  * Note: User can't use last 3 bits, as it is reserved for pdev_id.
54  * If in future number of pdev are more than 3.
55  */
56 /* Reserve for default case */
57 #define DBG_STATS_COOKIE_DEFAULT 0x0
58 
59 /* Reserve for DP Stats: 3rd bit */
60 #define DBG_STATS_COOKIE_DP_STATS 0x8
61 
62 /* Reserve for HTT Stats debugfs support: 4th bit */
63 #define DBG_STATS_COOKIE_HTT_DBGFS 0x10
64 
65 /**
66  * Bitmap of HTT PPDU TLV types for Default mode
67  */
68 #define HTT_PPDU_DEFAULT_TLV_BITMAP \
69 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
70 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
71 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
72 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
73 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
74 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
75 
76 /* PPDU STATS CFG */
77 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
78 
79 /* PPDU stats mask sent to FW to enable enhanced stats */
80 #define DP_PPDU_STATS_CFG_ENH_STATS \
81 	(HTT_PPDU_DEFAULT_TLV_BITMAP) | \
82 	(1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \
83 	(1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \
84 	(1 << HTT_PPDU_STATS_USERS_INFO_TLV)
85 
86 /* PPDU stats mask sent to FW to support debug sniffer feature */
87 #define DP_PPDU_STATS_CFG_SNIFFER \
88 	(HTT_PPDU_DEFAULT_TLV_BITMAP) | \
89 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \
90 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV) | \
91 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
92 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \
93 	(1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \
94 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \
95 	(1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \
96 	(1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \
97 	(1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \
98 	(1 << HTT_PPDU_STATS_USERS_INFO_TLV)
99 
100 /* PPDU stats mask sent to FW to support BPR feature*/
101 #define DP_PPDU_STATS_CFG_BPR \
102 	(1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \
103 	(1 << HTT_PPDU_STATS_USERS_INFO_TLV)
104 
105 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
106 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
107 				   DP_PPDU_STATS_CFG_ENH_STATS)
108 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
109 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
110 				      DP_PPDU_TXLITE_STATS_BITMASK_CFG)
111 
112 /**
113  * Bitmap of HTT PPDU delayed ba TLV types for Default mode
114  */
115 #define HTT_PPDU_DELAYED_BA_TLV_BITMAP \
116 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
117 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
118 	(1 << HTT_PPDU_STATS_USR_RATE_TLV)
119 
120 /**
121  * Bitmap of HTT PPDU TLV types for Delayed BA
122  */
123 #define HTT_PPDU_STATUS_TLV_BITMAP \
124 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
125 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
126 
127 /**
128  * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 64
129  */
130 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64 \
131 	((1 << HTT_PPDU_STATS_COMMON_TLV) | \
132 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
133 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
134 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
135 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
136 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
137 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
138 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV))
139 
140 /**
141  * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 256
142  */
143 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256 \
144 	((1 << HTT_PPDU_STATS_COMMON_TLV) | \
145 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
146 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
147 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
148 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
149 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
150 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \
151 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV))
152 
153 #ifdef WLAN_TX_PKT_CAPTURE_ENH
154 extern uint8_t
155 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX];
156 #endif
157 
158 #define DP_MAX_TIMER_EXEC_TIME_TICKS \
159 		(QDF_LOG_TIMESTAMP_CYCLES_PER_10_US * 100 * 20)
160 
161 /**
162  * enum timer_yield_status - yield status code used in monitor mode timer.
163  * @DP_TIMER_NO_YIELD: do not yield
164  * @DP_TIMER_WORK_DONE: yield because work is done
165  * @DP_TIMER_WORK_EXHAUST: yield because work quota is exhausted
166  * @DP_TIMER_TIME_EXHAUST: yield due to time slot exhausted
167  */
168 enum timer_yield_status {
169 	DP_TIMER_NO_YIELD,
170 	DP_TIMER_WORK_DONE,
171 	DP_TIMER_WORK_EXHAUST,
172 	DP_TIMER_TIME_EXHAUST,
173 };
174 
175 #if DP_PRINT_ENABLE
176 #include <stdarg.h>       /* va_list */
177 #include <qdf_types.h> /* qdf_vprint */
178 #include <cdp_txrx_handle.h>
179 
180 enum {
181 	/* FATAL_ERR - print only irrecoverable error messages */
182 	DP_PRINT_LEVEL_FATAL_ERR,
183 
184 	/* ERR - include non-fatal err messages */
185 	DP_PRINT_LEVEL_ERR,
186 
187 	/* WARN - include warnings */
188 	DP_PRINT_LEVEL_WARN,
189 
190 	/* INFO1 - include fundamental, infrequent events */
191 	DP_PRINT_LEVEL_INFO1,
192 
193 	/* INFO2 - include non-fundamental but infrequent events */
194 	DP_PRINT_LEVEL_INFO2,
195 };
196 
197 #define dp_print(level, fmt, ...) do { \
198 	if (level <= g_txrx_print_level) \
199 		qdf_print(fmt, ## __VA_ARGS__); \
200 while (0)
201 #define DP_PRINT(level, fmt, ...) do { \
202 	dp_print(level, "DP: " fmt, ## __VA_ARGS__); \
203 while (0)
204 #else
205 #define DP_PRINT(level, fmt, ...)
206 #endif /* DP_PRINT_ENABLE */
207 
208 #define DP_TRACE(LVL, fmt, args ...)                             \
209 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL,       \
210 		fmt, ## args)
211 
212 #ifdef DP_PRINT_NO_CONSOLE
213 /* Stat prints should not go to console or kernel logs.*/
214 #define DP_PRINT_STATS(fmt, args ...)\
215 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,       \
216 		  fmt, ## args)
217 #else
218 #define DP_PRINT_STATS(fmt, args ...)\
219 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,\
220 		  fmt, ## args)
221 #endif
222 #define DP_STATS_INIT(_handle) \
223 	qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats))
224 
225 #define DP_STATS_CLR(_handle) \
226 	qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats))
227 
228 #ifndef DISABLE_DP_STATS
229 #define DP_STATS_INC(_handle, _field, _delta) \
230 { \
231 	if (likely(_handle)) \
232 		_handle->stats._field += _delta; \
233 }
234 
235 #define DP_STATS_INCC(_handle, _field, _delta, _cond) \
236 { \
237 	if (_cond && likely(_handle)) \
238 		_handle->stats._field += _delta; \
239 }
240 
241 #define DP_STATS_DEC(_handle, _field, _delta) \
242 { \
243 	if (likely(_handle)) \
244 		_handle->stats._field -= _delta; \
245 }
246 
247 #define DP_STATS_UPD(_handle, _field, _delta) \
248 { \
249 	if (likely(_handle)) \
250 		_handle->stats._field = _delta; \
251 }
252 
253 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) \
254 { \
255 	DP_STATS_INC(_handle, _field.num, _count); \
256 	DP_STATS_INC(_handle, _field.bytes, _bytes) \
257 }
258 
259 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \
260 { \
261 	DP_STATS_INCC(_handle, _field.num, _count, _cond); \
262 	DP_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \
263 }
264 
265 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) \
266 { \
267 	_handle_a->stats._field += _handle_b->stats._field; \
268 }
269 
270 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) \
271 { \
272 	DP_STATS_AGGR(_handle_a, _handle_b, _field.num); \
273 	DP_STATS_AGGR(_handle_a, _handle_b, _field.bytes);\
274 }
275 
276 #define DP_STATS_UPD_STRUCT(_handle_a, _handle_b, _field) \
277 { \
278 	_handle_a->stats._field = _handle_b->stats._field; \
279 }
280 
281 #else
282 #define DP_STATS_INC(_handle, _field, _delta)
283 #define DP_STATS_INCC(_handle, _field, _delta, _cond)
284 #define DP_STATS_DEC(_handle, _field, _delta)
285 #define DP_STATS_UPD(_handle, _field, _delta)
286 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes)
287 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond)
288 #define DP_STATS_AGGR(_handle_a, _handle_b, _field)
289 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field)
290 #endif
291 
292 #ifdef ENABLE_DP_HIST_STATS
293 #define DP_HIST_INIT() \
294 	uint32_t num_of_packets[MAX_PDEV_CNT] = {0};
295 
296 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) \
297 { \
298 		++num_of_packets[_pdev_id]; \
299 }
300 
301 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \
302 	do {                                                              \
303 		if (_p_cntrs == 1) {                                      \
304 			DP_STATS_INC(_pdev,                               \
305 				tx_comp_histogram.pkts_1, 1);             \
306 		} else if (_p_cntrs > 1 && _p_cntrs <= 20) {              \
307 			DP_STATS_INC(_pdev,                               \
308 				tx_comp_histogram.pkts_2_20, 1);          \
309 		} else if (_p_cntrs > 20 && _p_cntrs <= 40) {             \
310 			DP_STATS_INC(_pdev,                               \
311 				tx_comp_histogram.pkts_21_40, 1);         \
312 		} else if (_p_cntrs > 40 && _p_cntrs <= 60) {             \
313 			DP_STATS_INC(_pdev,                               \
314 				tx_comp_histogram.pkts_41_60, 1);         \
315 		} else if (_p_cntrs > 60 && _p_cntrs <= 80) {             \
316 			DP_STATS_INC(_pdev,                               \
317 				tx_comp_histogram.pkts_61_80, 1);         \
318 		} else if (_p_cntrs > 80 && _p_cntrs <= 100) {            \
319 			DP_STATS_INC(_pdev,                               \
320 				tx_comp_histogram.pkts_81_100, 1);        \
321 		} else if (_p_cntrs > 100 && _p_cntrs <= 200) {           \
322 			DP_STATS_INC(_pdev,                               \
323 				tx_comp_histogram.pkts_101_200, 1);       \
324 		} else if (_p_cntrs > 200) {                              \
325 			DP_STATS_INC(_pdev,                               \
326 				tx_comp_histogram.pkts_201_plus, 1);      \
327 		}                                                         \
328 	} while (0)
329 
330 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \
331 	do {                                                              \
332 		if (_p_cntrs == 1) {                                      \
333 			DP_STATS_INC(_pdev,                               \
334 				rx_ind_histogram.pkts_1, 1);              \
335 		} else if (_p_cntrs > 1 && _p_cntrs <= 20) {              \
336 			DP_STATS_INC(_pdev,                               \
337 				rx_ind_histogram.pkts_2_20, 1);           \
338 		} else if (_p_cntrs > 20 && _p_cntrs <= 40) {             \
339 			DP_STATS_INC(_pdev,                               \
340 				rx_ind_histogram.pkts_21_40, 1);          \
341 		} else if (_p_cntrs > 40 && _p_cntrs <= 60) {             \
342 			DP_STATS_INC(_pdev,                               \
343 				rx_ind_histogram.pkts_41_60, 1);          \
344 		} else if (_p_cntrs > 60 && _p_cntrs <= 80) {             \
345 			DP_STATS_INC(_pdev,                               \
346 				rx_ind_histogram.pkts_61_80, 1);          \
347 		} else if (_p_cntrs > 80 && _p_cntrs <= 100) {            \
348 			DP_STATS_INC(_pdev,                               \
349 				rx_ind_histogram.pkts_81_100, 1);         \
350 		} else if (_p_cntrs > 100 && _p_cntrs <= 200) {           \
351 			DP_STATS_INC(_pdev,                               \
352 				rx_ind_histogram.pkts_101_200, 1);        \
353 		} else if (_p_cntrs > 200) {                              \
354 			DP_STATS_INC(_pdev,                               \
355 				rx_ind_histogram.pkts_201_plus, 1);       \
356 		}                                                         \
357 	} while (0)
358 
359 #define DP_TX_HIST_STATS_PER_PDEV() \
360 	do { \
361 		uint8_t hist_stats = 0; \
362 		for (hist_stats = 0; hist_stats < soc->pdev_count; \
363 				hist_stats++) { \
364 			DP_TX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \
365 					num_of_packets[hist_stats]); \
366 		} \
367 	}  while (0)
368 
369 
370 #define DP_RX_HIST_STATS_PER_PDEV() \
371 	do { \
372 		uint8_t hist_stats = 0; \
373 		for (hist_stats = 0; hist_stats < soc->pdev_count; \
374 				hist_stats++) { \
375 			DP_RX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \
376 					num_of_packets[hist_stats]); \
377 		} \
378 	}  while (0)
379 
380 #else
381 #define DP_HIST_INIT()
382 #define DP_HIST_PACKET_COUNT_INC(_pdev_id)
383 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs)
384 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs)
385 #define DP_RX_HIST_STATS_PER_PDEV()
386 #define DP_TX_HIST_STATS_PER_PDEV()
387 #endif /* DISABLE_DP_STATS */
388 
389 #define FRAME_MASK_IPV4_ARP   1
390 #define FRAME_MASK_IPV4_DHCP  2
391 #define FRAME_MASK_IPV4_EAPOL 4
392 #define FRAME_MASK_IPV6_DHCP  8
393 
394 #ifdef QCA_SUPPORT_PEER_ISOLATION
395 #define dp_get_peer_isolation(_peer) ((_peer)->isolation)
396 
397 static inline void dp_set_peer_isolation(struct dp_peer *peer, bool val)
398 {
399 	peer->isolation = val;
400 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
401 		  "peer:"QDF_MAC_ADDR_FMT" isolation:%d",
402 		  QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer->isolation);
403 }
404 
405 #else
406 #define dp_get_peer_isolation(_peer) (0)
407 
408 static inline void dp_set_peer_isolation(struct dp_peer *peer, bool val)
409 {
410 }
411 #endif /* QCA_SUPPORT_PEER_ISOLATION */
412 
413 #ifdef QCA_SUPPORT_WDS_EXTENDED
414 static inline void dp_wds_ext_peer_init(struct dp_peer *peer)
415 {
416 	peer->wds_ext.init = 0;
417 }
418 #else
419 static inline void dp_wds_ext_peer_init(struct dp_peer *peer)
420 {
421 }
422 #endif /* QCA_SUPPORT_WDS_EXTENDED */
423 
424 /**
425  * The lmac ID for a particular channel band is fixed.
426  * 2.4GHz band uses lmac_id = 1
427  * 5GHz/6GHz band uses lmac_id=0
428  */
429 #define DP_MON_INVALID_LMAC_ID	(-1)
430 #define DP_MON_2G_LMAC_ID	1
431 #define DP_MON_5G_LMAC_ID	0
432 #define DP_MON_6G_LMAC_ID	0
433 
434 #ifdef FEATURE_TSO_STATS
435 /**
436  * dp_init_tso_stats() - Clear tso stats
437  * @pdev: pdev handle
438  *
439  * Return: None
440  */
441 static inline
442 void dp_init_tso_stats(struct dp_pdev *pdev)
443 {
444 	if (pdev) {
445 		qdf_mem_zero(&((pdev)->stats.tso_stats),
446 			     sizeof((pdev)->stats.tso_stats));
447 		qdf_atomic_init(&pdev->tso_idx);
448 	}
449 }
450 
451 /**
452  * dp_stats_tso_segment_histogram_update() - TSO Segment Histogram
453  * @pdev: pdev handle
454  * @_p_cntrs: number of tso segments for a tso packet
455  *
456  * Return: None
457  */
458 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev,
459 					   uint8_t _p_cntrs);
460 
461 /**
462  * dp_tso_segment_update() - Collect tso segment information
463  * @pdev: pdev handle
464  * @stats_idx: tso packet number
465  * @idx: tso segment number
466  * @seg: tso segment
467  *
468  * Return: None
469  */
470 void dp_tso_segment_update(struct dp_pdev *pdev,
471 			   uint32_t stats_idx,
472 			   uint8_t idx,
473 			   struct qdf_tso_seg_t seg);
474 
475 /**
476  * dp_tso_packet_update() - TSO Packet information
477  * @pdev: pdev handle
478  * @stats_idx: tso packet number
479  * @msdu: nbuf handle
480  * @num_segs: tso segments
481  *
482  * Return: None
483  */
484 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx,
485 			  qdf_nbuf_t msdu, uint16_t num_segs);
486 
487 /**
488  * dp_tso_segment_stats_update() - TSO Segment stats
489  * @pdev: pdev handle
490  * @stats_seg: tso segment list
491  * @stats_idx: tso packet number
492  *
493  * Return: None
494  */
495 void dp_tso_segment_stats_update(struct dp_pdev *pdev,
496 				 struct qdf_tso_seg_elem_t *stats_seg,
497 				 uint32_t stats_idx);
498 
499 /**
500  * dp_print_tso_stats() - dump tso statistics
501  * @soc:soc handle
502  * @level: verbosity level
503  *
504  * Return: None
505  */
506 void dp_print_tso_stats(struct dp_soc *soc,
507 			enum qdf_stats_verbosity_level level);
508 
509 /**
510  * dp_txrx_clear_tso_stats() - clear tso stats
511  * @soc: soc handle
512  *
513  * Return: None
514  */
515 void dp_txrx_clear_tso_stats(struct dp_soc *soc);
516 #else
517 static inline
518 void dp_init_tso_stats(struct dp_pdev *pdev)
519 {
520 }
521 
522 static inline
523 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev,
524 					   uint8_t _p_cntrs)
525 {
526 }
527 
528 static inline
529 void dp_tso_segment_update(struct dp_pdev *pdev,
530 			   uint32_t stats_idx,
531 			   uint32_t idx,
532 			   struct qdf_tso_seg_t seg)
533 {
534 }
535 
536 static inline
537 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx,
538 			  qdf_nbuf_t msdu, uint16_t num_segs)
539 {
540 }
541 
542 static inline
543 void dp_tso_segment_stats_update(struct dp_pdev *pdev,
544 				 struct qdf_tso_seg_elem_t *stats_seg,
545 				 uint32_t stats_idx)
546 {
547 }
548 
549 static inline
550 void dp_print_tso_stats(struct dp_soc *soc,
551 			enum qdf_stats_verbosity_level level)
552 {
553 }
554 
555 static inline
556 void dp_txrx_clear_tso_stats(struct dp_soc *soc)
557 {
558 }
559 #endif /* FEATURE_TSO_STATS */
560 
561 #define DP_HTT_T2H_HP_PIPE 5
562 static inline void dp_update_pdev_stats(struct dp_pdev *tgtobj,
563 					struct cdp_vdev_stats *srcobj)
564 {
565 	uint8_t i;
566 	uint8_t pream_type;
567 
568 	for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) {
569 		for (i = 0; i < MAX_MCS; i++) {
570 			tgtobj->stats.tx.pkt_type[pream_type].
571 				mcs_count[i] +=
572 			srcobj->tx.pkt_type[pream_type].
573 				mcs_count[i];
574 			tgtobj->stats.rx.pkt_type[pream_type].
575 				mcs_count[i] +=
576 			srcobj->rx.pkt_type[pream_type].
577 				mcs_count[i];
578 		}
579 	}
580 
581 	for (i = 0; i < MAX_BW; i++) {
582 		tgtobj->stats.tx.bw[i] += srcobj->tx.bw[i];
583 		tgtobj->stats.rx.bw[i] += srcobj->rx.bw[i];
584 	}
585 
586 	for (i = 0; i < SS_COUNT; i++) {
587 		tgtobj->stats.tx.nss[i] += srcobj->tx.nss[i];
588 		tgtobj->stats.rx.nss[i] += srcobj->rx.nss[i];
589 	}
590 
591 	for (i = 0; i < WME_AC_MAX; i++) {
592 		tgtobj->stats.tx.wme_ac_type[i] +=
593 			srcobj->tx.wme_ac_type[i];
594 		tgtobj->stats.rx.wme_ac_type[i] +=
595 			srcobj->rx.wme_ac_type[i];
596 		tgtobj->stats.tx.excess_retries_per_ac[i] +=
597 			srcobj->tx.excess_retries_per_ac[i];
598 	}
599 
600 	for (i = 0; i < MAX_GI; i++) {
601 		tgtobj->stats.tx.sgi_count[i] +=
602 			srcobj->tx.sgi_count[i];
603 		tgtobj->stats.rx.sgi_count[i] +=
604 			srcobj->rx.sgi_count[i];
605 	}
606 
607 	for (i = 0; i < MAX_RECEPTION_TYPES; i++)
608 		tgtobj->stats.rx.reception_type[i] +=
609 			srcobj->rx.reception_type[i];
610 
611 	tgtobj->stats.tx.comp_pkt.bytes += srcobj->tx.comp_pkt.bytes;
612 	tgtobj->stats.tx.comp_pkt.num += srcobj->tx.comp_pkt.num;
613 	tgtobj->stats.tx.ucast.num += srcobj->tx.ucast.num;
614 	tgtobj->stats.tx.ucast.bytes += srcobj->tx.ucast.bytes;
615 	tgtobj->stats.tx.mcast.num += srcobj->tx.mcast.num;
616 	tgtobj->stats.tx.mcast.bytes += srcobj->tx.mcast.bytes;
617 	tgtobj->stats.tx.bcast.num += srcobj->tx.bcast.num;
618 	tgtobj->stats.tx.bcast.bytes += srcobj->tx.bcast.bytes;
619 	tgtobj->stats.tx.tx_success.num += srcobj->tx.tx_success.num;
620 	tgtobj->stats.tx.tx_success.bytes +=
621 		srcobj->tx.tx_success.bytes;
622 	tgtobj->stats.tx.nawds_mcast.num +=
623 		srcobj->tx.nawds_mcast.num;
624 	tgtobj->stats.tx.nawds_mcast.bytes +=
625 		srcobj->tx.nawds_mcast.bytes;
626 	tgtobj->stats.tx.nawds_mcast_drop +=
627 		srcobj->tx.nawds_mcast_drop;
628 	tgtobj->stats.tx.num_ppdu_cookie_valid +=
629 		srcobj->tx.num_ppdu_cookie_valid;
630 	tgtobj->stats.tx.tx_failed += srcobj->tx.tx_failed;
631 	tgtobj->stats.tx.ofdma += srcobj->tx.ofdma;
632 	tgtobj->stats.tx.stbc += srcobj->tx.stbc;
633 	tgtobj->stats.tx.ldpc += srcobj->tx.ldpc;
634 	tgtobj->stats.tx.pream_punct_cnt += srcobj->tx.pream_punct_cnt;
635 	tgtobj->stats.tx.retries += srcobj->tx.retries;
636 	tgtobj->stats.tx.non_amsdu_cnt += srcobj->tx.non_amsdu_cnt;
637 	tgtobj->stats.tx.amsdu_cnt += srcobj->tx.amsdu_cnt;
638 	tgtobj->stats.tx.non_ampdu_cnt += srcobj->tx.non_ampdu_cnt;
639 	tgtobj->stats.tx.ampdu_cnt += srcobj->tx.ampdu_cnt;
640 	tgtobj->stats.tx.dropped.fw_rem.num += srcobj->tx.dropped.fw_rem.num;
641 	tgtobj->stats.tx.dropped.fw_rem.bytes +=
642 			srcobj->tx.dropped.fw_rem.bytes;
643 	tgtobj->stats.tx.dropped.fw_rem_tx +=
644 			srcobj->tx.dropped.fw_rem_tx;
645 	tgtobj->stats.tx.dropped.fw_rem_notx +=
646 			srcobj->tx.dropped.fw_rem_notx;
647 	tgtobj->stats.tx.dropped.fw_reason1 +=
648 			srcobj->tx.dropped.fw_reason1;
649 	tgtobj->stats.tx.dropped.fw_reason2 +=
650 			srcobj->tx.dropped.fw_reason2;
651 	tgtobj->stats.tx.dropped.fw_reason3 +=
652 			srcobj->tx.dropped.fw_reason3;
653 	tgtobj->stats.tx.dropped.age_out += srcobj->tx.dropped.age_out;
654 	tgtobj->stats.rx.err.mic_err += srcobj->rx.err.mic_err;
655 	if (srcobj->rx.rssi != 0)
656 		tgtobj->stats.rx.rssi = srcobj->rx.rssi;
657 	tgtobj->stats.rx.rx_rate = srcobj->rx.rx_rate;
658 	tgtobj->stats.rx.err.decrypt_err += srcobj->rx.err.decrypt_err;
659 	tgtobj->stats.rx.non_ampdu_cnt += srcobj->rx.non_ampdu_cnt;
660 	tgtobj->stats.rx.amsdu_cnt += srcobj->rx.ampdu_cnt;
661 	tgtobj->stats.rx.non_amsdu_cnt += srcobj->rx.non_amsdu_cnt;
662 	tgtobj->stats.rx.amsdu_cnt += srcobj->rx.amsdu_cnt;
663 	tgtobj->stats.rx.nawds_mcast_drop += srcobj->rx.nawds_mcast_drop;
664 	tgtobj->stats.rx.to_stack.num += srcobj->rx.to_stack.num;
665 	tgtobj->stats.rx.to_stack.bytes += srcobj->rx.to_stack.bytes;
666 
667 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
668 		tgtobj->stats.rx.rcvd_reo[i].num +=
669 			srcobj->rx.rcvd_reo[i].num;
670 		tgtobj->stats.rx.rcvd_reo[i].bytes +=
671 			srcobj->rx.rcvd_reo[i].bytes;
672 	}
673 
674 	srcobj->rx.unicast.num =
675 		srcobj->rx.to_stack.num -
676 				(srcobj->rx.multicast.num);
677 	srcobj->rx.unicast.bytes =
678 		srcobj->rx.to_stack.bytes -
679 				(srcobj->rx.multicast.bytes);
680 
681 	tgtobj->stats.rx.unicast.num += srcobj->rx.unicast.num;
682 	tgtobj->stats.rx.unicast.bytes += srcobj->rx.unicast.bytes;
683 	tgtobj->stats.rx.multicast.num += srcobj->rx.multicast.num;
684 	tgtobj->stats.rx.multicast.bytes += srcobj->rx.multicast.bytes;
685 	tgtobj->stats.rx.bcast.num += srcobj->rx.bcast.num;
686 	tgtobj->stats.rx.bcast.bytes += srcobj->rx.bcast.bytes;
687 	tgtobj->stats.rx.raw.num += srcobj->rx.raw.num;
688 	tgtobj->stats.rx.raw.bytes += srcobj->rx.raw.bytes;
689 	tgtobj->stats.rx.intra_bss.pkts.num +=
690 			srcobj->rx.intra_bss.pkts.num;
691 	tgtobj->stats.rx.intra_bss.pkts.bytes +=
692 			srcobj->rx.intra_bss.pkts.bytes;
693 	tgtobj->stats.rx.intra_bss.fail.num +=
694 			srcobj->rx.intra_bss.fail.num;
695 	tgtobj->stats.rx.intra_bss.fail.bytes +=
696 			srcobj->rx.intra_bss.fail.bytes;
697 
698 	tgtobj->stats.tx.last_ack_rssi =
699 		srcobj->tx.last_ack_rssi;
700 	tgtobj->stats.rx.mec_drop.num += srcobj->rx.mec_drop.num;
701 	tgtobj->stats.rx.mec_drop.bytes += srcobj->rx.mec_drop.bytes;
702 	tgtobj->stats.rx.multipass_rx_pkt_drop +=
703 		srcobj->rx.multipass_rx_pkt_drop;
704 }
705 
706 static inline void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj,
707 						struct dp_vdev *srcobj)
708 {
709 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.nawds_mcast);
710 
711 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.rcvd);
712 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.processed);
713 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.reinject_pkts);
714 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.inspect_pkts);
715 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.raw.raw_pkt);
716 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.raw.dma_map_error);
717 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.raw.num_frags_overflow_err);
718 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.sg.dropped_host.num);
719 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.sg.dropped_target);
720 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.sg.sg_pkt);
721 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.mcast_en.mcast_pkt);
722 	DP_STATS_AGGR(tgtobj, srcobj,
723 		      tx_i.mcast_en.dropped_map_error);
724 	DP_STATS_AGGR(tgtobj, srcobj,
725 		      tx_i.mcast_en.dropped_self_mac);
726 	DP_STATS_AGGR(tgtobj, srcobj,
727 		      tx_i.mcast_en.dropped_send_fail);
728 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.mcast_en.ucast);
729 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.igmp_mcast_en.igmp_rcvd);
730 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.igmp_mcast_en.igmp_ucast_converted);
731 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.dma_error);
732 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.ring_full);
733 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.enqueue_fail);
734 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.fail_per_pkt_vdev_id_check);
735 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.desc_na.num);
736 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.res_full);
737 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.headroom_insufficient);
738 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.cce_classified);
739 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.cce_classified_raw);
740 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.sniffer_rcvd);
741 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.mesh.exception_fw);
742 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.mesh.completion_fw);
743 
744 	tgtobj->stats.tx_i.dropped.dropped_pkt.num =
745 		tgtobj->stats.tx_i.dropped.dma_error +
746 		tgtobj->stats.tx_i.dropped.ring_full +
747 		tgtobj->stats.tx_i.dropped.enqueue_fail +
748 		tgtobj->stats.tx_i.dropped.fail_per_pkt_vdev_id_check +
749 		tgtobj->stats.tx_i.dropped.desc_na.num +
750 		tgtobj->stats.tx_i.dropped.res_full;
751 
752 }
753 
754 /**
755  * dp_is_wds_extended(): Check if wds ext is enabled
756  * @vdev: DP VDEV handle
757  *
758  * return: true if enabled, false if not
759  */
760 #ifdef QCA_SUPPORT_WDS_EXTENDED
761 static bool dp_is_wds_extended(struct dp_peer *peer)
762 {
763 	if (qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT,
764 				&peer->wds_ext.init))
765 		return true;
766 
767 	return false;
768 }
769 #else
770 static bool dp_is_wds_extended(struct dp_peer *peer)
771 {
772 	return false;
773 }
774 #endif /* QCA_SUPPORT_WDS_EXTENDED */
775 
776 static inline void dp_update_vdev_stats(struct dp_soc *soc,
777 					struct dp_peer *srcobj,
778 					void *arg)
779 {
780 	struct cdp_vdev_stats *tgtobj = (struct cdp_vdev_stats *)arg;
781 	uint8_t i;
782 	uint8_t pream_type;
783 
784 	if (qdf_unlikely(dp_is_wds_extended(srcobj)))
785 		return;
786 
787 	for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) {
788 		for (i = 0; i < MAX_MCS; i++) {
789 			tgtobj->tx.pkt_type[pream_type].
790 				mcs_count[i] +=
791 			srcobj->stats.tx.pkt_type[pream_type].
792 				mcs_count[i];
793 			tgtobj->rx.pkt_type[pream_type].
794 				mcs_count[i] +=
795 			srcobj->stats.rx.pkt_type[pream_type].
796 				mcs_count[i];
797 		}
798 	}
799 
800 	for (i = 0; i < MAX_BW; i++) {
801 		tgtobj->tx.bw[i] += srcobj->stats.tx.bw[i];
802 		tgtobj->rx.bw[i] += srcobj->stats.rx.bw[i];
803 	}
804 
805 	for (i = 0; i < SS_COUNT; i++) {
806 		tgtobj->tx.nss[i] += srcobj->stats.tx.nss[i];
807 		tgtobj->rx.nss[i] += srcobj->stats.rx.nss[i];
808 	}
809 
810 	for (i = 0; i < WME_AC_MAX; i++) {
811 		tgtobj->tx.wme_ac_type[i] +=
812 			srcobj->stats.tx.wme_ac_type[i];
813 		tgtobj->rx.wme_ac_type[i] +=
814 			srcobj->stats.rx.wme_ac_type[i];
815 		tgtobj->tx.excess_retries_per_ac[i] +=
816 			srcobj->stats.tx.excess_retries_per_ac[i];
817 	}
818 
819 	for (i = 0; i < MAX_GI; i++) {
820 		tgtobj->tx.sgi_count[i] +=
821 			srcobj->stats.tx.sgi_count[i];
822 		tgtobj->rx.sgi_count[i] +=
823 			srcobj->stats.rx.sgi_count[i];
824 	}
825 
826 	for (i = 0; i < MAX_RECEPTION_TYPES; i++)
827 		tgtobj->rx.reception_type[i] +=
828 			srcobj->stats.rx.reception_type[i];
829 
830 	tgtobj->tx.comp_pkt.bytes += srcobj->stats.tx.comp_pkt.bytes;
831 	tgtobj->tx.comp_pkt.num += srcobj->stats.tx.comp_pkt.num;
832 	tgtobj->tx.ucast.num += srcobj->stats.tx.ucast.num;
833 	tgtobj->tx.ucast.bytes += srcobj->stats.tx.ucast.bytes;
834 	tgtobj->tx.mcast.num += srcobj->stats.tx.mcast.num;
835 	tgtobj->tx.mcast.bytes += srcobj->stats.tx.mcast.bytes;
836 	tgtobj->tx.bcast.num += srcobj->stats.tx.bcast.num;
837 	tgtobj->tx.bcast.bytes += srcobj->stats.tx.bcast.bytes;
838 	tgtobj->tx.tx_success.num += srcobj->stats.tx.tx_success.num;
839 	tgtobj->tx.tx_success.bytes +=
840 		srcobj->stats.tx.tx_success.bytes;
841 	tgtobj->tx.nawds_mcast.num +=
842 		srcobj->stats.tx.nawds_mcast.num;
843 	tgtobj->tx.nawds_mcast.bytes +=
844 		srcobj->stats.tx.nawds_mcast.bytes;
845 	tgtobj->tx.nawds_mcast_drop +=
846 		srcobj->stats.tx.nawds_mcast_drop;
847 	tgtobj->tx.num_ppdu_cookie_valid +=
848 		srcobj->stats.tx.num_ppdu_cookie_valid;
849 	tgtobj->tx.tx_failed += srcobj->stats.tx.tx_failed;
850 	tgtobj->tx.ofdma += srcobj->stats.tx.ofdma;
851 	tgtobj->tx.stbc += srcobj->stats.tx.stbc;
852 	tgtobj->tx.ldpc += srcobj->stats.tx.ldpc;
853 	tgtobj->tx.pream_punct_cnt += srcobj->stats.tx.pream_punct_cnt;
854 	tgtobj->tx.retries += srcobj->stats.tx.retries;
855 	tgtobj->tx.non_amsdu_cnt += srcobj->stats.tx.non_amsdu_cnt;
856 	tgtobj->tx.amsdu_cnt += srcobj->stats.tx.amsdu_cnt;
857 	tgtobj->tx.non_ampdu_cnt += srcobj->stats.tx.non_ampdu_cnt;
858 	tgtobj->tx.ampdu_cnt += srcobj->stats.tx.ampdu_cnt;
859 	tgtobj->tx.dropped.fw_rem.num += srcobj->stats.tx.dropped.fw_rem.num;
860 	tgtobj->tx.dropped.fw_rem.bytes +=
861 			srcobj->stats.tx.dropped.fw_rem.bytes;
862 	tgtobj->tx.dropped.fw_rem_tx +=
863 			srcobj->stats.tx.dropped.fw_rem_tx;
864 	tgtobj->tx.dropped.fw_rem_notx +=
865 			srcobj->stats.tx.dropped.fw_rem_notx;
866 	tgtobj->tx.dropped.fw_reason1 +=
867 			srcobj->stats.tx.dropped.fw_reason1;
868 	tgtobj->tx.dropped.fw_reason2 +=
869 			srcobj->stats.tx.dropped.fw_reason2;
870 	tgtobj->tx.dropped.fw_reason3 +=
871 			srcobj->stats.tx.dropped.fw_reason3;
872 	tgtobj->tx.dropped.age_out += srcobj->stats.tx.dropped.age_out;
873 	tgtobj->rx.err.mic_err += srcobj->stats.rx.err.mic_err;
874 	if (srcobj->stats.rx.rssi != 0)
875 		tgtobj->rx.rssi = srcobj->stats.rx.rssi;
876 	tgtobj->rx.rx_rate = srcobj->stats.rx.rx_rate;
877 	tgtobj->rx.err.decrypt_err += srcobj->stats.rx.err.decrypt_err;
878 	tgtobj->rx.non_ampdu_cnt += srcobj->stats.rx.non_ampdu_cnt;
879 	tgtobj->rx.amsdu_cnt += srcobj->stats.rx.ampdu_cnt;
880 	tgtobj->rx.non_amsdu_cnt += srcobj->stats.rx.non_amsdu_cnt;
881 	tgtobj->rx.amsdu_cnt += srcobj->stats.rx.amsdu_cnt;
882 	tgtobj->rx.nawds_mcast_drop += srcobj->stats.rx.nawds_mcast_drop;
883 	tgtobj->rx.to_stack.num += srcobj->stats.rx.to_stack.num;
884 	tgtobj->rx.to_stack.bytes += srcobj->stats.rx.to_stack.bytes;
885 
886 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
887 		tgtobj->rx.rcvd_reo[i].num +=
888 			srcobj->stats.rx.rcvd_reo[i].num;
889 		tgtobj->rx.rcvd_reo[i].bytes +=
890 			srcobj->stats.rx.rcvd_reo[i].bytes;
891 	}
892 
893 	srcobj->stats.rx.unicast.num =
894 		srcobj->stats.rx.to_stack.num -
895 				srcobj->stats.rx.multicast.num;
896 	srcobj->stats.rx.unicast.bytes =
897 		srcobj->stats.rx.to_stack.bytes -
898 				srcobj->stats.rx.multicast.bytes;
899 
900 	tgtobj->rx.unicast.num += srcobj->stats.rx.unicast.num;
901 	tgtobj->rx.unicast.bytes += srcobj->stats.rx.unicast.bytes;
902 	tgtobj->rx.multicast.num += srcobj->stats.rx.multicast.num;
903 	tgtobj->rx.multicast.bytes += srcobj->stats.rx.multicast.bytes;
904 	tgtobj->rx.bcast.num += srcobj->stats.rx.bcast.num;
905 	tgtobj->rx.bcast.bytes += srcobj->stats.rx.bcast.bytes;
906 	tgtobj->rx.raw.num += srcobj->stats.rx.raw.num;
907 	tgtobj->rx.raw.bytes += srcobj->stats.rx.raw.bytes;
908 	tgtobj->rx.intra_bss.pkts.num +=
909 			srcobj->stats.rx.intra_bss.pkts.num;
910 	tgtobj->rx.intra_bss.pkts.bytes +=
911 			srcobj->stats.rx.intra_bss.pkts.bytes;
912 	tgtobj->rx.intra_bss.fail.num +=
913 			srcobj->stats.rx.intra_bss.fail.num;
914 	tgtobj->rx.intra_bss.fail.bytes +=
915 			srcobj->stats.rx.intra_bss.fail.bytes;
916 	tgtobj->tx.last_ack_rssi =
917 		srcobj->stats.tx.last_ack_rssi;
918 	tgtobj->rx.mec_drop.num += srcobj->stats.rx.mec_drop.num;
919 	tgtobj->rx.mec_drop.bytes += srcobj->stats.rx.mec_drop.bytes;
920 	tgtobj->rx.multipass_rx_pkt_drop +=
921 		srcobj->stats.rx.multipass_rx_pkt_drop;
922 }
923 
924 #define DP_UPDATE_STATS(_tgtobj, _srcobj)	\
925 	do {				\
926 		uint8_t i;		\
927 		uint8_t pream_type;	\
928 		for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \
929 			for (i = 0; i < MAX_MCS; i++) { \
930 				DP_STATS_AGGR(_tgtobj, _srcobj, \
931 					tx.pkt_type[pream_type].mcs_count[i]); \
932 				DP_STATS_AGGR(_tgtobj, _srcobj, \
933 					rx.pkt_type[pream_type].mcs_count[i]); \
934 			} \
935 		} \
936 		  \
937 		for (i = 0; i < MAX_BW; i++) { \
938 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.bw[i]); \
939 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.bw[i]); \
940 		} \
941 		  \
942 		for (i = 0; i < SS_COUNT; i++) { \
943 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.nss[i]); \
944 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.nss[i]); \
945 		} \
946 		for (i = 0; i < WME_AC_MAX; i++) { \
947 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.wme_ac_type[i]); \
948 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.wme_ac_type[i]); \
949 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.excess_retries_per_ac[i]); \
950 		\
951 		} \
952 		\
953 		for (i = 0; i < MAX_GI; i++) { \
954 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.sgi_count[i]); \
955 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.sgi_count[i]); \
956 		} \
957 		\
958 		for (i = 0; i < MAX_RECEPTION_TYPES; i++) \
959 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.reception_type[i]); \
960 		\
961 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.comp_pkt); \
962 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.ucast); \
963 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.mcast); \
964 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.bcast); \
965 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_success); \
966 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.nawds_mcast); \
967 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.nawds_mcast_drop); \
968 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.tx_failed); \
969 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ofdma); \
970 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.stbc); \
971 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ldpc); \
972 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.retries); \
973 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_amsdu_cnt); \
974 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.amsdu_cnt); \
975 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_ampdu_cnt); \
976 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ampdu_cnt); \
977 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.dropped.fw_rem); \
978 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_tx); \
979 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_notx); \
980 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason1); \
981 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason2); \
982 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason3); \
983 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.age_out); \
984 								\
985 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.mic_err); \
986 		if (_srcobj->stats.rx.rssi != 0) \
987 			DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rssi); \
988 		DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rx_rate); \
989 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.decrypt_err); \
990 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_ampdu_cnt); \
991 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.ampdu_cnt); \
992 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_amsdu_cnt); \
993 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.amsdu_cnt); \
994 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.nawds_mcast_drop); \
995 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.to_stack); \
996 								\
997 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)	\
998 			DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rcvd_reo[i]); \
999 									\
1000 		_srcobj->stats.rx.unicast.num = \
1001 			_srcobj->stats.rx.to_stack.num - \
1002 					_srcobj->stats.rx.multicast.num; \
1003 		_srcobj->stats.rx.unicast.bytes = \
1004 			_srcobj->stats.rx.to_stack.bytes - \
1005 					_srcobj->stats.rx.multicast.bytes; \
1006 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.unicast); \
1007 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.multicast); \
1008 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.bcast); \
1009 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.raw); \
1010 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.pkts); \
1011 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.fail); \
1012 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.mec_drop); \
1013 								  \
1014 		_tgtobj->stats.tx.last_ack_rssi =	\
1015 			_srcobj->stats.tx.last_ack_rssi; \
1016 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.multipass_rx_pkt_drop); \
1017 	}  while (0)
1018 
1019 extern int dp_peer_find_attach(struct dp_soc *soc);
1020 extern void dp_peer_find_detach(struct dp_soc *soc);
1021 extern void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer);
1022 extern void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer);
1023 extern void dp_peer_find_hash_erase(struct dp_soc *soc);
1024 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
1025 			   struct dp_peer *peer);
1026 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
1027 			      struct dp_peer *peer);
1028 void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
1029 				struct dp_peer *peer,
1030 				uint16_t peer_id);
1031 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
1032 				   uint16_t peer_id);
1033 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev,
1034 			  enum dp_mod_id mod_id);
1035 /*
1036  * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
1037  * @peer: Datapath peer
1038  *
1039  * return: void
1040  */
1041 void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer);
1042 
1043 /*
1044  * dp_peer_ppdu_delayed_ba_cleanup() free ppdu allocated in peer
1045  * @peer: Datapath peer
1046  *
1047  * return: void
1048  */
1049 void dp_peer_ppdu_delayed_ba_cleanup(struct dp_peer *peer);
1050 
1051 extern void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer);
1052 void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer);
1053 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer);
1054 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer);
1055 extern struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
1056 					      uint8_t *peer_mac_addr,
1057 					      int mac_addr_is_aligned,
1058 					      uint8_t vdev_id,
1059 					      enum dp_mod_id id);
1060 
1061 #ifdef DP_PEER_EXTENDED_API
1062 /**
1063  * dp_register_peer() - Register peer into physical device
1064  * @soc_hdl - data path soc handle
1065  * @pdev_id - device instance id
1066  * @sta_desc - peer description
1067  *
1068  * Register peer into physical device
1069  *
1070  * Return: QDF_STATUS_SUCCESS registration success
1071  *         QDF_STATUS_E_FAULT peer not found
1072  */
1073 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1074 			    struct ol_txrx_desc_type *sta_desc);
1075 
1076 /**
1077  * dp_clear_peer() - remove peer from physical device
1078  * @soc_hdl - data path soc handle
1079  * @pdev_id - device instance id
1080  * @peer_addr - peer mac address
1081  *
1082  * remove peer from physical device
1083  *
1084  * Return: QDF_STATUS_SUCCESS registration success
1085  *         QDF_STATUS_E_FAULT peer not found
1086  */
1087 QDF_STATUS dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1088 			 struct qdf_mac_addr peer_addr);
1089 
1090 /*
1091  * dp_find_peer_exist - find peer if already exists
1092  * @soc: datapath soc handle
1093  * @pdev_id: physical device instance id
1094  * @peer_mac_addr: peer mac address
1095  *
1096  * Return: true or false
1097  */
1098 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1099 			uint8_t *peer_addr);
1100 
1101 /*
1102  * dp_find_peer_exist_on_vdev - find if peer exists on the given vdev
1103  * @soc: datapath soc handle
1104  * @vdev_id: vdev instance id
1105  * @peer_mac_addr: peer mac address
1106  *
1107  * Return: true or false
1108  */
1109 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1110 				uint8_t *peer_addr);
1111 
1112 /*
1113  * dp_find_peer_exist_on_other_vdev - find if peer exists
1114  * on other than the given vdev
1115  * @soc: datapath soc handle
1116  * @vdev_id: vdev instance id
1117  * @peer_mac_addr: peer mac address
1118  * @max_bssid: max number of bssids
1119  *
1120  * Return: true or false
1121  */
1122 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
1123 				      uint8_t vdev_id, uint8_t *peer_addr,
1124 				      uint16_t max_bssid);
1125 
1126 /**
1127  * dp_peer_state_update() - update peer local state
1128  * @pdev - data path device instance
1129  * @peer_addr - peer mac address
1130  * @state - new peer local state
1131  *
1132  * update peer local state
1133  *
1134  * Return: QDF_STATUS_SUCCESS registration success
1135  */
1136 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc, uint8_t *peer_mac,
1137 				enum ol_txrx_peer_state state);
1138 
1139 /**
1140  * dp_get_vdevid() - Get virtual interface id which peer registered
1141  * @soc - datapath soc handle
1142  * @peer_mac - peer mac address
1143  * @vdev_id - virtual interface id which peer registered
1144  *
1145  * Get virtual interface id which peer registered
1146  *
1147  * Return: QDF_STATUS_SUCCESS registration success
1148  */
1149 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
1150 			 uint8_t *vdev_id);
1151 struct cdp_vdev *dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
1152 		struct qdf_mac_addr peer_addr);
1153 struct cdp_vdev *dp_get_vdev_for_peer(void *peer);
1154 uint8_t *dp_peer_get_peer_mac_addr(void *peer);
1155 
1156 /**
1157  * dp_get_peer_state() - Get local peer state
1158  * @soc - datapath soc handle
1159  * @vdev_id - vdev id
1160  * @peer_mac - peer mac addr
1161  *
1162  * Get local peer state
1163  *
1164  * Return: peer status
1165  */
1166 int dp_get_peer_state(struct cdp_soc_t *soc, uint8_t vdev_id,
1167 		      uint8_t *peer_mac);
1168 void dp_local_peer_id_pool_init(struct dp_pdev *pdev);
1169 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer);
1170 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer);
1171 #else
1172 /**
1173  * dp_get_vdevid() - Get virtual interface id which peer registered
1174  * @soc - datapath soc handle
1175  * @peer_mac - peer mac address
1176  * @vdev_id - virtual interface id which peer registered
1177  *
1178  * Get virtual interface id which peer registered
1179  *
1180  * Return: QDF_STATUS_SUCCESS registration success
1181  */
1182 static inline
1183 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
1184 			 uint8_t *vdev_id)
1185 {
1186 	return QDF_STATUS_E_NOSUPPORT;
1187 }
1188 
1189 static inline void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
1190 {
1191 }
1192 
1193 static inline
1194 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
1195 {
1196 }
1197 
1198 static inline
1199 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
1200 {
1201 }
1202 #endif
1203 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
1204 				      uint8_t *peer_mac, uint16_t vdev_id,
1205 				      uint8_t tid,
1206 				      int status);
1207 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
1208 				  uint8_t *peer_mac, uint16_t vdev_id,
1209 				  uint8_t dialogtoken, uint16_t tid,
1210 				  uint16_t batimeout,
1211 				  uint16_t buffersize,
1212 				  uint16_t startseqnum);
1213 QDF_STATUS dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc,
1214 					uint8_t *peer_mac, uint16_t vdev_id,
1215 					uint8_t tid, uint8_t *dialogtoken,
1216 					uint16_t *statuscode,
1217 					uint16_t *buffersize,
1218 					uint16_t *batimeout);
1219 QDF_STATUS dp_set_addba_response(struct cdp_soc_t *cdp_soc,
1220 				 uint8_t *peer_mac,
1221 				 uint16_t vdev_id, uint8_t tid,
1222 				 uint16_t statuscode);
1223 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1224 			   uint16_t vdev_id, int tid,
1225 			   uint16_t reasoncode);
1226 /*
1227  * dp_delba_tx_completion_wifi3() -  Handle delba tx completion
1228  *
1229  * @cdp_soc: soc handle
1230  * @vdev_id: id of the vdev handle
1231  * @peer_mac: peer mac address
1232  * @tid: Tid number
1233  * @status: Tx completion status
1234  * Indicate status of delba Tx to DP for stats update and retry
1235  * delba if tx failed.
1236  *
1237  */
1238 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1239 				 uint16_t vdev_id, uint8_t tid,
1240 				 int status);
1241 extern QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
1242 					uint32_t ba_window_size,
1243 					uint32_t start_seq);
1244 
1245 extern QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc,
1246 	enum hal_reo_cmd_type type, struct hal_reo_cmd_params *params,
1247 	void (*callback_fn), void *data);
1248 
1249 extern void dp_reo_cmdlist_destroy(struct dp_soc *soc);
1250 
1251 /**
1252  * dp_reo_status_ring_handler - Handler for REO Status ring
1253  * @int_ctx: pointer to DP interrupt context
1254  * @soc: DP Soc handle
1255  *
1256  * Returns: Number of descriptors reaped
1257  */
1258 uint32_t dp_reo_status_ring_handler(struct dp_intr *int_ctx,
1259 				    struct dp_soc *soc);
1260 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
1261 			     struct cdp_vdev_stats *vdev_stats);
1262 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1263 	union hal_reo_status *reo_status);
1264 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1265 		union hal_reo_status *reo_status);
1266 uint16_t dp_tx_me_send_convert_ucast(struct cdp_soc_t *soc, uint8_t vdev_id,
1267 				     qdf_nbuf_t nbuf,
1268 				     uint8_t newmac[][QDF_MAC_ADDR_SIZE],
1269 				     uint8_t new_mac_cnt, uint8_t tid,
1270 				     bool is_igmp);
1271 void dp_tx_me_alloc_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id);
1272 
1273 void dp_tx_me_free_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id);
1274 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
1275 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
1276 		uint32_t config_param_1, uint32_t config_param_2,
1277 		uint32_t config_param_3, int cookie, int cookie_msb,
1278 		uint8_t mac_id);
1279 void dp_htt_stats_print_tag(struct dp_pdev *pdev,
1280 			    uint8_t tag_type, uint32_t *tag_buf);
1281 void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf);
1282 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev, uint32_t tuple_mask,
1283 				     uint8_t mac_id);
1284 /**
1285  * dp_rxtid_stats_cmd_cb - function pointer for peer
1286  *			   rx tid stats cmd call_back
1287  */
1288 typedef void (*dp_rxtid_stats_cmd_cb)(struct dp_soc *soc, void *cb_ctxt,
1289 				      union hal_reo_status *reo_status);
1290 int dp_peer_rxtid_stats(struct dp_peer *peer,
1291 			dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
1292 			void *cb_ctxt);
1293 QDF_STATUS
1294 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
1295 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
1296 		      uint32_t *rx_pn);
1297 
1298 QDF_STATUS
1299 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
1300 			  uint8_t *peer_mac, enum cdp_sec_type sec_type,
1301 			  bool is_unicast);
1302 
1303 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id);
1304 
1305 QDF_STATUS
1306 dp_set_michael_key(struct cdp_soc_t *soc, uint8_t vdev_id,
1307 		   uint8_t *peer_mac,
1308 		   bool is_unicast, uint32_t *key);
1309 
1310 /**
1311  * dp_check_pdev_exists() - Validate pdev before use
1312  * @soc - dp soc handle
1313  * @data - pdev handle
1314  *
1315  * Return: 0 - success/invalid - failure
1316  */
1317 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data);
1318 
1319 /**
1320  * dp_update_delay_stats() - Update delay statistics in structure
1321  *                              and fill min, max and avg delay
1322  * @pdev: pdev handle
1323  * @delay: delay in ms
1324  * @tid: tid value
1325  * @mode: type of tx delay mode
1326  * @ring id: ring number
1327  *
1328  * Return: none
1329  */
1330 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
1331 			   uint8_t tid, uint8_t mode, uint8_t ring_id);
1332 
1333 /**
1334  * dp_print_ring_stats(): Print tail and head pointer
1335  * @pdev: DP_PDEV handle
1336  *
1337  * Return:void
1338  */
1339 void dp_print_ring_stats(struct dp_pdev *pdev);
1340 
1341 /**
1342  * dp_print_pdev_cfg_params() - Print the pdev cfg parameters
1343  * @pdev_handle: DP pdev handle
1344  *
1345  * Return - void
1346  */
1347 void dp_print_pdev_cfg_params(struct dp_pdev *pdev);
1348 
1349 /**
1350  * dp_print_soc_cfg_params()- Dump soc wlan config parameters
1351  * @soc_handle: Soc handle
1352  *
1353  * Return: void
1354  */
1355 void dp_print_soc_cfg_params(struct dp_soc *soc);
1356 
1357 /**
1358  * dp_srng_get_str_from_ring_type() - Return string name for a ring
1359  * @ring_type: Ring
1360  *
1361  * Return: char const pointer
1362  */
1363 const
1364 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type);
1365 
1366 /*
1367  * dp_txrx_path_stats() - Function to display dump stats
1368  * @soc - soc handle
1369  *
1370  * return: none
1371  */
1372 void dp_txrx_path_stats(struct dp_soc *soc);
1373 
1374 /*
1375  * dp_print_per_ring_stats(): Packet count per ring
1376  * @soc - soc handle
1377  *
1378  * Return - None
1379  */
1380 void dp_print_per_ring_stats(struct dp_soc *soc);
1381 
1382 /**
1383  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
1384  * @pdev: DP PDEV handle
1385  *
1386  * return: void
1387  */
1388 void dp_aggregate_pdev_stats(struct dp_pdev *pdev);
1389 
1390 /**
1391  * dp_print_rx_rates(): Print Rx rate stats
1392  * @vdev: DP_VDEV handle
1393  *
1394  * Return:void
1395  */
1396 void dp_print_rx_rates(struct dp_vdev *vdev);
1397 
1398 /**
1399  * dp_print_tx_rates(): Print tx rates
1400  * @vdev: DP_VDEV handle
1401  *
1402  * Return:void
1403  */
1404 void dp_print_tx_rates(struct dp_vdev *vdev);
1405 
1406 /**
1407  * dp_print_peer_stats():print peer stats
1408  * @peer: DP_PEER handle
1409  *
1410  * return void
1411  */
1412 void dp_print_peer_stats(struct dp_peer *peer);
1413 
1414 /**
1415  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
1416  * @pdev: DP_PDEV Handle
1417  *
1418  * Return:void
1419  */
1420 void
1421 dp_print_pdev_tx_stats(struct dp_pdev *pdev);
1422 
1423 /**
1424  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
1425  * @pdev: DP_PDEV Handle
1426  *
1427  * Return: void
1428  */
1429 void
1430 dp_print_pdev_rx_stats(struct dp_pdev *pdev);
1431 
1432 /**
1433  * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
1434  * @pdev: DP_PDEV Handle
1435  *
1436  * Return: void
1437  */
1438 void
1439 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev);
1440 
1441 /**
1442  * dp_print_soc_tx_stats(): Print SOC level  stats
1443  * @soc DP_SOC Handle
1444  *
1445  * Return: void
1446  */
1447 void dp_print_soc_tx_stats(struct dp_soc *soc);
1448 
1449 /**
1450  * dp_print_soc_interrupt_stats() - Print interrupt stats for the soc
1451  * @soc: dp_soc handle
1452  *
1453  * Return: None
1454  */
1455 void dp_print_soc_interrupt_stats(struct dp_soc *soc);
1456 
1457 /**
1458  * dp_print_soc_rx_stats: Print SOC level Rx stats
1459  * @soc: DP_SOC Handle
1460  *
1461  * Return:void
1462  */
1463 void dp_print_soc_rx_stats(struct dp_soc *soc);
1464 
1465 /**
1466  * dp_get_mac_id_for_pdev() -  Return mac corresponding to pdev for mac
1467  *
1468  * @mac_id: MAC id
1469  * @pdev_id: pdev_id corresponding to pdev, 0 for MCL
1470  *
1471  * Single pdev using both MACs will operate on both MAC rings,
1472  * which is the case for MCL.
1473  * For WIN each PDEV will operate one ring, so index is zero.
1474  *
1475  */
1476 static inline int dp_get_mac_id_for_pdev(uint32_t mac_id, uint32_t pdev_id)
1477 {
1478 	if (mac_id && pdev_id) {
1479 		qdf_print("Both mac_id and pdev_id cannot be non zero");
1480 		QDF_BUG(0);
1481 		return 0;
1482 	}
1483 	return (mac_id + pdev_id);
1484 }
1485 
1486 /**
1487  * dp_get_lmac_id_for_pdev_id() -  Return lmac id corresponding to host pdev id
1488  * @soc: soc pointer
1489  * @mac_id: MAC id
1490  * @pdev_id: pdev_id corresponding to pdev, 0 for MCL
1491  *
1492  * For MCL, Single pdev using both MACs will operate on both MAC rings.
1493  *
1494  * For WIN, each PDEV will operate one ring.
1495  *
1496  */
1497 static inline int
1498 dp_get_lmac_id_for_pdev_id
1499 	(struct dp_soc *soc, uint32_t mac_id, uint32_t pdev_id)
1500 {
1501 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
1502 		if (mac_id && pdev_id) {
1503 			qdf_print("Both mac_id and pdev_id cannot be non zero");
1504 			QDF_BUG(0);
1505 			return 0;
1506 		}
1507 		return (mac_id + pdev_id);
1508 	}
1509 
1510 	return soc->pdev_list[pdev_id]->lmac_id;
1511 }
1512 
1513 /**
1514  * dp_get_pdev_for_lmac_id() -  Return pdev pointer corresponding to lmac id
1515  * @soc: soc pointer
1516  * @lmac_id: LMAC id
1517  *
1518  * For MCL, Single pdev exists
1519  *
1520  * For WIN, each PDEV will operate one ring.
1521  *
1522  */
1523 static inline struct dp_pdev *
1524 	dp_get_pdev_for_lmac_id(struct dp_soc *soc, uint32_t lmac_id)
1525 {
1526 	uint8_t i = 0;
1527 
1528 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
1529 		i = wlan_cfg_get_pdev_idx(soc->wlan_cfg_ctx, lmac_id);
1530 		return ((i < MAX_PDEV_CNT) ? soc->pdev_list[i] : NULL);
1531 	}
1532 
1533 	/* Typically for MCL as there only 1 PDEV*/
1534 	return soc->pdev_list[0];
1535 }
1536 
1537 /**
1538  * dp_calculate_target_pdev_id_from_host_pdev_id() - Return target pdev
1539  *                                          corresponding to host pdev id
1540  * @soc: soc pointer
1541  * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL
1542  *
1543  * returns target pdev_id for host pdev id. For WIN, this is derived through
1544  * a two step process:
1545  * 1. Get lmac_id corresponding to host pdev_id (lmac_id can change
1546  *    during mode switch)
1547  * 2. Get target pdev_id (set up during WMI ready) from lmac_id
1548  *
1549  * For MCL, return the offset-1 translated mac_id
1550  */
1551 static inline int
1552 dp_calculate_target_pdev_id_from_host_pdev_id
1553 	(struct dp_soc *soc, uint32_t mac_for_pdev)
1554 {
1555 	struct dp_pdev *pdev;
1556 
1557 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
1558 		return DP_SW2HW_MACID(mac_for_pdev);
1559 
1560 	pdev = soc->pdev_list[mac_for_pdev];
1561 
1562 	/*non-MCL case, get original target_pdev mapping*/
1563 	return wlan_cfg_get_target_pdev_id(soc->wlan_cfg_ctx, pdev->lmac_id);
1564 }
1565 
1566 /**
1567  * dp_get_target_pdev_id_for_host_pdev_id() - Return target pdev corresponding
1568  *                                         to host pdev id
1569  * @soc: soc pointer
1570  * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL
1571  *
1572  * returns target pdev_id for host pdev id.
1573  * For WIN, return the value stored in pdev object.
1574  * For MCL, return the offset-1 translated mac_id.
1575  */
1576 static inline int
1577 dp_get_target_pdev_id_for_host_pdev_id
1578 	(struct dp_soc *soc, uint32_t mac_for_pdev)
1579 {
1580 	struct dp_pdev *pdev;
1581 
1582 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
1583 		return DP_SW2HW_MACID(mac_for_pdev);
1584 
1585 	pdev = soc->pdev_list[mac_for_pdev];
1586 
1587 	return pdev->target_pdev_id;
1588 }
1589 
1590 /**
1591  * dp_get_host_pdev_id_for_target_pdev_id() - Return host pdev corresponding
1592  *                                         to target pdev id
1593  * @soc: soc pointer
1594  * @pdev_id: pdev_id corresponding to target pdev
1595  *
1596  * returns host pdev_id for target pdev id. For WIN, this is derived through
1597  * a two step process:
1598  * 1. Get lmac_id corresponding to target pdev_id
1599  * 2. Get host pdev_id (set up during WMI ready) from lmac_id
1600  *
1601  * For MCL, return the 0-offset pdev_id
1602  */
1603 static inline int
1604 dp_get_host_pdev_id_for_target_pdev_id
1605 	(struct dp_soc *soc, uint32_t pdev_id)
1606 {
1607 	struct dp_pdev *pdev;
1608 	int lmac_id;
1609 
1610 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
1611 		return DP_HW2SW_MACID(pdev_id);
1612 
1613 	/*non-MCL case, get original target_lmac mapping from target pdev*/
1614 	lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx,
1615 					  DP_HW2SW_MACID(pdev_id));
1616 
1617 	/*Get host pdev from lmac*/
1618 	pdev = dp_get_pdev_for_lmac_id(soc, lmac_id);
1619 
1620 	return pdev ? pdev->pdev_id : INVALID_PDEV_ID;
1621 }
1622 
1623 /*
1624  * dp_get_mac_id_for_mac() -  Return mac corresponding WIN and MCL mac_ids
1625  *
1626  * @soc: handle to DP soc
1627  * @mac_id: MAC id
1628  *
1629  * Single pdev using both MACs will operate on both MAC rings,
1630  * which is the case for MCL.
1631  * For WIN each PDEV will operate one ring, so index is zero.
1632  *
1633  */
1634 static inline int dp_get_mac_id_for_mac(struct dp_soc *soc, uint32_t mac_id)
1635 {
1636 	/*
1637 	 * Single pdev using both MACs will operate on both MAC rings,
1638 	 * which is the case for MCL.
1639 	 */
1640 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
1641 		return mac_id;
1642 
1643 	/* For WIN each PDEV will operate one ring, so index is zero. */
1644 	return 0;
1645 }
1646 
1647 /*
1648  * dp_is_subtype_data() - check if the frame subtype is data
1649  *
1650  * @frame_ctrl: Frame control field
1651  *
1652  * check the frame control field and verify if the packet
1653  * is a data packet.
1654  *
1655  * Return: true or false
1656  */
1657 static inline bool dp_is_subtype_data(uint16_t frame_ctrl)
1658 {
1659 	if (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_TYPE_MASK) ==
1660 	    QDF_IEEE80211_FC0_TYPE_DATA) &&
1661 	    (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) ==
1662 	    QDF_IEEE80211_FC0_SUBTYPE_DATA) ||
1663 	    ((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) ==
1664 	    QDF_IEEE80211_FC0_SUBTYPE_QOS))) {
1665 		return true;
1666 	}
1667 
1668 	return false;
1669 }
1670 
1671 #ifdef WDI_EVENT_ENABLE
1672 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
1673 				uint32_t stats_type_upload_mask,
1674 				uint8_t mac_id);
1675 
1676 int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id,
1677 		       wdi_event_subscribe *event_cb_sub_handle,
1678 		       uint32_t event);
1679 
1680 int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id,
1681 		     wdi_event_subscribe *event_cb_sub_handle,
1682 		     uint32_t event);
1683 
1684 void dp_wdi_event_handler(enum WDI_EVENT event, struct dp_soc *soc,
1685 			  void *data, u_int16_t peer_id,
1686 			  int status, u_int8_t pdev_id);
1687 
1688 int dp_wdi_event_attach(struct dp_pdev *txrx_pdev);
1689 int dp_wdi_event_detach(struct dp_pdev *txrx_pdev);
1690 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
1691 	bool enable);
1692 
1693 /**
1694  * dp_get_pldev() - function to get pktlog device handle
1695  * @soc_hdl: datapath soc handle
1696  * @pdev_id: physical device id
1697  *
1698  * Return: pktlog device handle or NULL
1699  */
1700 void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
1701 void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn);
1702 
1703 static inline void
1704 dp_hif_update_pipe_callback(struct dp_soc *dp_soc,
1705 			    void *cb_context,
1706 			    QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t),
1707 			    uint8_t pipe_id)
1708 {
1709 	struct hif_msg_callbacks hif_pipe_callbacks;
1710 
1711 	/* TODO: Temporary change to bypass HTC connection for this new
1712 	 * HIF pipe, which will be used for packet log and other high-
1713 	 * priority HTT messages. Proper HTC connection to be added
1714 	 * later once required FW changes are available
1715 	 */
1716 	hif_pipe_callbacks.rxCompletionHandler = callback;
1717 	hif_pipe_callbacks.Context = cb_context;
1718 	hif_update_pipe_callback(dp_soc->hif_handle,
1719 		DP_HTT_T2H_HP_PIPE, &hif_pipe_callbacks);
1720 }
1721 
1722 QDF_STATUS dp_peer_stats_notify(struct dp_pdev *pdev, struct dp_peer *peer);
1723 
1724 QDF_STATUS dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev,
1725 				    struct cdp_rx_stats_ppdu_user *ppdu_user);
1726 #else
1727 static inline int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id,
1728 				     wdi_event_subscribe *event_cb_sub_handle,
1729 				     uint32_t event)
1730 {
1731 	return 0;
1732 }
1733 
1734 static inline int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id,
1735 				   wdi_event_subscribe *event_cb_sub_handle,
1736 				   uint32_t event)
1737 {
1738 	return 0;
1739 }
1740 
1741 static inline
1742 void dp_wdi_event_handler(enum WDI_EVENT event,
1743 			  struct dp_soc *soc,
1744 			  void *data, u_int16_t peer_id,
1745 			  int status, u_int8_t pdev_id)
1746 {
1747 }
1748 
1749 static inline int dp_wdi_event_attach(struct dp_pdev *txrx_pdev)
1750 {
1751 	return 0;
1752 }
1753 
1754 static inline int dp_wdi_event_detach(struct dp_pdev *txrx_pdev)
1755 {
1756 	return 0;
1757 }
1758 
1759 static inline int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
1760 	bool enable)
1761 {
1762 	return 0;
1763 }
1764 static inline QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
1765 		uint32_t stats_type_upload_mask, uint8_t mac_id)
1766 {
1767 	return 0;
1768 }
1769 
1770 static inline void
1771 dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
1772 {
1773 }
1774 
1775 static inline void
1776 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, void *cb_context,
1777 			    QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t),
1778 			    uint8_t pipe_id)
1779 {
1780 }
1781 
1782 static inline QDF_STATUS dp_peer_stats_notify(struct dp_pdev *pdev,
1783 					      struct dp_peer *peer)
1784 {
1785 	return QDF_STATUS_SUCCESS;
1786 }
1787 
1788 static inline QDF_STATUS
1789 dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev,
1790 			 struct cdp_rx_stats_ppdu_user *ppdu_user)
1791 {
1792 	return QDF_STATUS_SUCCESS;
1793 }
1794 #endif /* CONFIG_WIN */
1795 
1796 #ifdef VDEV_PEER_PROTOCOL_COUNT
1797 /**
1798  * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters
1799  * @vdev: VDEV DP object
1800  * @nbuf: data packet
1801  * @peer: Peer DP object
1802  * @is_egress: whether egress or ingress
1803  * @is_rx: whether rx or tx
1804  *
1805  * This function updates the per-peer protocol counters
1806  * Return: void
1807  */
1808 void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev,
1809 					    qdf_nbuf_t nbuf,
1810 					    struct dp_peer *peer,
1811 					    bool is_egress,
1812 					    bool is_rx);
1813 
1814 /**
1815  * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters
1816  * @soc: SOC DP object
1817  * @vdev_id: vdev_id
1818  * @nbuf: data packet
1819  * @is_egress: whether egress or ingress
1820  * @is_rx: whether rx or tx
1821  *
1822  * This function updates the per-peer protocol counters
1823  * Return: void
1824  */
1825 
1826 void dp_peer_stats_update_protocol_cnt(struct cdp_soc_t *soc,
1827 				       int8_t vdev_id,
1828 				       qdf_nbuf_t nbuf,
1829 				       bool is_egress,
1830 				       bool is_rx);
1831 
1832 #else
1833 #define dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, peer, \
1834 					       is_egress, is_rx)
1835 #endif
1836 
1837 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
1838 void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl);
1839 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
1840 	bool force);
1841 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
1842 
1843 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1844 /**
1845  * dp_srng_access_start() - Wrapper function to log access start of a hal ring
1846  * @int_ctx: pointer to DP interrupt context. This should not be NULL
1847  * @soc: DP Soc handle
1848  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
1849  *
1850  * Return: 0 on success; error on failure
1851  */
1852 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1853 			 hal_ring_handle_t hal_ring_hdl);
1854 
1855 /**
1856  * dp_srng_access_end() - Wrapper function to log access end of a hal ring
1857  * @int_ctx: pointer to DP interrupt context. This should not be NULL
1858  * @soc: DP Soc handle
1859  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
1860  *
1861  * Return: void
1862  */
1863 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1864 			hal_ring_handle_t hal_ring_hdl);
1865 
1866 #else
1867 
1868 static inline int dp_srng_access_start(struct dp_intr *int_ctx,
1869 				       struct dp_soc *dp_soc,
1870 				       hal_ring_handle_t hal_ring_hdl)
1871 {
1872 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1873 
1874 	return hal_srng_access_start(hal_soc, hal_ring_hdl);
1875 }
1876 
1877 static inline void dp_srng_access_end(struct dp_intr *int_ctx,
1878 				      struct dp_soc *dp_soc,
1879 				      hal_ring_handle_t hal_ring_hdl)
1880 {
1881 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1882 
1883 	return hal_srng_access_end(hal_soc, hal_ring_hdl);
1884 }
1885 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1886 
1887 #ifdef QCA_CACHED_RING_DESC
1888 /**
1889  * dp_srng_dst_get_next() - Wrapper function to get next ring desc
1890  * @dp_socsoc: DP Soc handle
1891  * @hal_ring: opaque pointer to the HAL Destination Ring
1892  *
1893  * Return: HAL ring descriptor
1894  */
1895 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc,
1896 					 hal_ring_handle_t hal_ring_hdl)
1897 {
1898 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1899 
1900 	return hal_srng_dst_get_next_cached(hal_soc, hal_ring_hdl);
1901 }
1902 
1903 /**
1904  * dp_srng_dst_inv_cached_descs() - Wrapper function to invalidate cached
1905  * descriptors
1906  * @dp_socsoc: DP Soc handle
1907  * @hal_ring: opaque pointer to the HAL Rx Destination ring
1908  * @num_entries: Entry count
1909  *
1910  * Return: None
1911  */
1912 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc,
1913 						hal_ring_handle_t hal_ring_hdl,
1914 						uint32_t num_entries)
1915 {
1916 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1917 
1918 	hal_srng_dst_inv_cached_descs(hal_soc, hal_ring_hdl, num_entries);
1919 }
1920 #else
1921 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc,
1922 					 hal_ring_handle_t hal_ring_hdl)
1923 {
1924 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1925 
1926 	return hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
1927 }
1928 
1929 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc,
1930 						hal_ring_handle_t hal_ring_hdl,
1931 						uint32_t num_entries)
1932 {
1933 }
1934 #endif /* QCA_CACHED_RING_DESC */
1935 
1936 #ifdef QCA_ENH_V3_STATS_SUPPORT
1937 /**
1938  * dp_pdev_print_delay_stats(): Print pdev level delay stats
1939  * @pdev: DP_PDEV handle
1940  *
1941  * Return:void
1942  */
1943 void dp_pdev_print_delay_stats(struct dp_pdev *pdev);
1944 
1945 /**
1946  * dp_pdev_print_tid_stats(): Print pdev level tid stats
1947  * @pdev: DP_PDEV handle
1948  *
1949  * Return:void
1950  */
1951 void dp_pdev_print_tid_stats(struct dp_pdev *pdev);
1952 #endif /* CONFIG_WIN */
1953 
1954 void dp_soc_set_txrx_ring_map(struct dp_soc *soc);
1955 
1956 #ifndef WLAN_TX_PKT_CAPTURE_ENH
1957 /**
1958  * dp_tx_ppdu_stats_attach - Initialize Tx PPDU stats and enhanced capture
1959  * @pdev: DP PDEV
1960  *
1961  * Return: none
1962  */
1963 static inline void dp_tx_ppdu_stats_attach(struct dp_pdev *pdev)
1964 {
1965 }
1966 
1967 /**
1968  * dp_tx_ppdu_stats_detach - Cleanup Tx PPDU stats and enhanced capture
1969  * @pdev: DP PDEV
1970  *
1971  * Return: none
1972  */
1973 static inline void dp_tx_ppdu_stats_detach(struct dp_pdev *pdev)
1974 {
1975 }
1976 
1977 /**
1978  * dp_tx_ppdu_stats_process - Deferred PPDU stats handler
1979  * @context: Opaque work context (PDEV)
1980  *
1981  * Return: none
1982  */
1983 static  inline void dp_tx_ppdu_stats_process(void *context)
1984 {
1985 }
1986 
1987 /**
1988  * dp_tx_add_to_comp_queue() - add completion msdu to queue
1989  * @soc: DP Soc handle
1990  * @tx_desc: software Tx descriptor
1991  * @ts : Tx completion status from HAL/HTT descriptor
1992  * @peer: DP peer
1993  *
1994  * Return: none
1995  */
1996 static inline
1997 QDF_STATUS dp_tx_add_to_comp_queue(struct dp_soc *soc,
1998 				   struct dp_tx_desc_s *desc,
1999 				   struct hal_tx_completion_status *ts,
2000 				   struct dp_peer *peer)
2001 {
2002 	return QDF_STATUS_E_FAILURE;
2003 }
2004 
2005 /*
2006  * dp_tx_capture_htt_frame_counter: increment counter for htt_frame_type
2007  * pdev: DP pdev handle
2008  * htt_frame_type: htt frame type received from fw
2009  *
2010  * return: void
2011  */
2012 static inline
2013 void dp_tx_capture_htt_frame_counter(struct dp_pdev *pdev,
2014 				     uint32_t htt_frame_type)
2015 {
2016 }
2017 
2018 /*
2019  * dp_tx_cature_stats: print tx capture stats
2020  * @pdev: DP PDEV handle
2021  *
2022  * return: void
2023  */
2024 static inline
2025 void dp_print_pdev_tx_capture_stats(struct dp_pdev *pdev)
2026 {
2027 }
2028 
2029 /*
2030  * dp_peer_tx_capture_filter_check: check filter is enable for the filter
2031  * and update tx_cap_enabled flag
2032  * @pdev: DP PDEV handle
2033  * @peer: DP PEER handle
2034  *
2035  * return: void
2036  */
2037 static inline
2038 void dp_peer_tx_capture_filter_check(struct dp_pdev *pdev,
2039 				     struct dp_peer *peer)
2040 {
2041 }
2042 
2043 /*
2044  * dp_tx_capture_debugfs_init: tx capture debugfs init
2045  * @pdev: DP PDEV handle
2046  *
2047  * return: QDF_STATUS
2048  */
2049 static inline
2050 QDF_STATUS dp_tx_capture_debugfs_init(struct dp_pdev *pdev)
2051 {
2052 	return QDF_STATUS_E_FAILURE;
2053 }
2054 #endif
2055 
2056 #ifdef FEATURE_PERPKT_INFO
2057 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf);
2058 #else
2059 static inline
2060 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
2061 {
2062 }
2063 #endif
2064 
2065 /**
2066  * dp_vdev_to_cdp_vdev() - typecast dp vdev to cdp vdev
2067  * @vdev: DP vdev handle
2068  *
2069  * Return: struct cdp_vdev pointer
2070  */
2071 static inline
2072 struct cdp_vdev *dp_vdev_to_cdp_vdev(struct dp_vdev *vdev)
2073 {
2074 	return (struct cdp_vdev *)vdev;
2075 }
2076 
2077 /**
2078  * dp_pdev_to_cdp_pdev() - typecast dp pdev to cdp pdev
2079  * @pdev: DP pdev handle
2080  *
2081  * Return: struct cdp_pdev pointer
2082  */
2083 static inline
2084 struct cdp_pdev *dp_pdev_to_cdp_pdev(struct dp_pdev *pdev)
2085 {
2086 	return (struct cdp_pdev *)pdev;
2087 }
2088 
2089 /**
2090  * dp_soc_to_cdp_soc() - typecast dp psoc to cdp psoc
2091  * @psoc: DP psoc handle
2092  *
2093  * Return: struct cdp_soc pointer
2094  */
2095 static inline
2096 struct cdp_soc *dp_soc_to_cdp_soc(struct dp_soc *psoc)
2097 {
2098 	return (struct cdp_soc *)psoc;
2099 }
2100 
2101 /**
2102  * dp_soc_to_cdp_soc_t() - typecast dp psoc to
2103  * ol txrx soc handle
2104  * @psoc: DP psoc handle
2105  *
2106  * Return: struct cdp_soc_t pointer
2107  */
2108 static inline
2109 struct cdp_soc_t *dp_soc_to_cdp_soc_t(struct dp_soc *psoc)
2110 {
2111 	return (struct cdp_soc_t *)psoc;
2112 }
2113 
2114 /**
2115  * cdp_soc_t_to_dp_soc() - typecast cdp_soc_t to
2116  * dp soc handle
2117  * @psoc: CDP psoc handle
2118  *
2119  * Return: struct dp_soc pointer
2120  */
2121 static inline
2122 struct dp_soc *cdp_soc_t_to_dp_soc(struct cdp_soc_t *psoc)
2123 {
2124 	return (struct dp_soc *)psoc;
2125 }
2126 
2127 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
2128 /**
2129  * dp_rx_flow_update_fse_stats() - Update a flow's statistics
2130  * @pdev: pdev handle
2131  * @flow_id: flow index (truncated hash) in the Rx FST
2132  *
2133  * Return: Success when flow statistcs is updated, error on failure
2134  */
2135 QDF_STATUS dp_rx_flow_get_fse_stats(struct dp_pdev *pdev,
2136 				    struct cdp_rx_flow_info *rx_flow_info,
2137 				    struct cdp_flow_stats *stats);
2138 
2139 /**
2140  * dp_rx_flow_delete_entry() - Delete a flow entry from flow search table
2141  * @pdev: pdev handle
2142  * @rx_flow_info: DP flow parameters
2143  *
2144  * Return: Success when flow is deleted, error on failure
2145  */
2146 QDF_STATUS dp_rx_flow_delete_entry(struct dp_pdev *pdev,
2147 				   struct cdp_rx_flow_info *rx_flow_info);
2148 
2149 /**
2150  * dp_rx_flow_add_entry() - Add a flow entry to flow search table
2151  * @pdev: DP pdev instance
2152  * @rx_flow_info: DP flow paramaters
2153  *
2154  * Return: Success when flow is added, no-memory or already exists on error
2155  */
2156 QDF_STATUS dp_rx_flow_add_entry(struct dp_pdev *pdev,
2157 				struct cdp_rx_flow_info *rx_flow_info);
2158 
2159 /**
2160  * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters
2161  * @soc: SoC handle
2162  * @pdev: Pdev handle
2163  *
2164  * Return: Handle to flow search table entry
2165  */
2166 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev);
2167 
2168 /**
2169  * dp_rx_fst_detach() - De-initialize Rx FST
2170  * @soc: SoC handle
2171  * @pdev: Pdev handle
2172  *
2173  * Return: None
2174  */
2175 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev);
2176 
2177 /**
2178  * dp_rx_flow_send_fst_fw_setup() - Program FST parameters in FW/HW post-attach
2179  * @soc: SoC handle
2180  * @pdev: Pdev handle
2181  *
2182  * Return: Success when fst parameters are programmed in FW, error otherwise
2183  */
2184 QDF_STATUS dp_rx_flow_send_fst_fw_setup(struct dp_soc *soc,
2185 					struct dp_pdev *pdev);
2186 #else /* !((WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)) */
2187 
2188 /**
2189  * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters
2190  * @soc: SoC handle
2191  * @pdev: Pdev handle
2192  *
2193  * Return: Handle to flow search table entry
2194  */
2195 static inline
2196 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev)
2197 {
2198 	return QDF_STATUS_SUCCESS;
2199 }
2200 
2201 /**
2202  * dp_rx_fst_detach() - De-initialize Rx FST
2203  * @soc: SoC handle
2204  * @pdev: Pdev handle
2205  *
2206  * Return: None
2207  */
2208 static inline
2209 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev)
2210 {
2211 }
2212 #endif
2213 
2214 /**
2215  * dp_vdev_get_ref() - API to take a reference for VDEV object
2216  *
2217  * @soc		: core DP soc context
2218  * @vdev	: DP vdev
2219  * @mod_id	: module id
2220  *
2221  * Return:	QDF_STATUS_SUCCESS if reference held successfully
2222  *		else QDF_STATUS_E_INVAL
2223  */
2224 static inline
2225 QDF_STATUS dp_vdev_get_ref(struct dp_soc *soc, struct dp_vdev *vdev,
2226 			   enum dp_mod_id mod_id)
2227 {
2228 	if (!qdf_atomic_inc_not_zero(&vdev->ref_cnt))
2229 		return QDF_STATUS_E_INVAL;
2230 
2231 	qdf_atomic_inc(&vdev->mod_refs[mod_id]);
2232 
2233 	return QDF_STATUS_SUCCESS;
2234 }
2235 
2236 /**
2237  * dp_vdev_get_ref_by_id() - Returns vdev object given the vdev id
2238  * @soc: core DP soc context
2239  * @vdev_id: vdev id from vdev object can be retrieved
2240  * @mod_id: module id which is requesting the reference
2241  *
2242  * Return: struct dp_vdev*: Pointer to DP vdev object
2243  */
2244 static inline struct dp_vdev *
2245 dp_vdev_get_ref_by_id(struct dp_soc *soc, uint8_t vdev_id,
2246 		      enum dp_mod_id mod_id)
2247 {
2248 	struct dp_vdev *vdev = NULL;
2249 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
2250 		return NULL;
2251 
2252 	qdf_spin_lock_bh(&soc->vdev_map_lock);
2253 	vdev = soc->vdev_id_map[vdev_id];
2254 
2255 	if (!vdev || dp_vdev_get_ref(soc, vdev, mod_id) != QDF_STATUS_SUCCESS) {
2256 		qdf_spin_unlock_bh(&soc->vdev_map_lock);
2257 		return NULL;
2258 	}
2259 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
2260 
2261 	return vdev;
2262 }
2263 
2264 /**
2265  * dp_get_pdev_from_soc_pdev_id_wifi3() - Returns pdev object given the pdev id
2266  * @soc: core DP soc context
2267  * @pdev_id: pdev id from pdev object can be retrieved
2268  *
2269  * Return: struct dp_pdev*: Pointer to DP pdev object
2270  */
2271 static inline struct dp_pdev *
2272 dp_get_pdev_from_soc_pdev_id_wifi3(struct dp_soc *soc,
2273 				   uint8_t pdev_id)
2274 {
2275 	if (qdf_unlikely(pdev_id >= MAX_PDEV_CNT))
2276 		return NULL;
2277 
2278 	return soc->pdev_list[pdev_id];
2279 }
2280 
2281 /*
2282  * dp_rx_tid_update_wifi3() – Update receive TID state
2283  * @peer: Datapath peer handle
2284  * @tid: TID
2285  * @ba_window_size: BlockAck window size
2286  * @start_seq: Starting sequence number
2287  *
2288  * Return: QDF_STATUS code
2289  */
2290 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
2291 					 ba_window_size, uint32_t start_seq);
2292 
2293 /**
2294  * dp_get_peer_mac_list(): function to get peer mac list of vdev
2295  * @soc: Datapath soc handle
2296  * @vdev_id: vdev id
2297  * @newmac: Table of the clients mac
2298  * @mac_cnt: No. of MACs required
2299  * @limit: Limit the number of clients
2300  *
2301  * return: no of clients
2302  */
2303 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
2304 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
2305 			      u_int16_t mac_cnt, bool limit);
2306 /*
2307  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
2308  * @soc:		DP SoC context
2309  * @max_mac_rings:	No of MAC rings
2310  *
2311  * Return: None
2312  */
2313 void dp_is_hw_dbs_enable(struct dp_soc *soc,
2314 				int *max_mac_rings);
2315 
2316 
2317 #if defined(WLAN_SUPPORT_RX_FISA)
2318 void dp_rx_dump_fisa_table(struct dp_soc *soc);
2319 
2320 /*
2321  * dp_rx_fst_update_cmem_params() - Update CMEM FST params
2322  * @soc:		DP SoC context
2323  * @num_entries:	Number of flow search entries
2324  * @cmem_ba_lo:		CMEM base address low
2325  * @cmem_ba_hi:		CMEM base address high
2326  *
2327  * Return: None
2328  */
2329 void dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries,
2330 				  uint32_t cmem_ba_lo, uint32_t cmem_ba_hi);
2331 #else
2332 static inline void
2333 dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries,
2334 			     uint32_t cmem_ba_lo, uint32_t cmem_ba_hi)
2335 {
2336 }
2337 #endif /* WLAN_SUPPORT_RX_FISA */
2338 
2339 #ifdef MAX_ALLOC_PAGE_SIZE
2340 /**
2341  * dp_set_page_size() - Set the max page size for hw link desc.
2342  * For MCL the page size is set to OS defined value and for WIN
2343  * the page size is set to the max_alloc_size cfg ini
2344  * param.
2345  * This is to ensure that WIN gets contiguous memory allocations
2346  * as per requirement.
2347  * @pages: link desc page handle
2348  * @max_alloc_size: max_alloc_size
2349  *
2350  * Return: None
2351  */
2352 static inline
2353 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages,
2354 			  uint32_t max_alloc_size)
2355 {
2356 	pages->page_size = qdf_page_size;
2357 }
2358 
2359 #else
2360 static inline
2361 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages,
2362 			  uint32_t max_alloc_size)
2363 {
2364 	pages->page_size = max_alloc_size;
2365 }
2366 #endif /* MAX_ALLOC_PAGE_SIZE */
2367 
2368 /**
2369  * dp_history_get_next_index() - get the next entry to record an entry
2370  *				 in the history.
2371  * @curr_idx: Current index where the last entry is written.
2372  * @max_entries: Max number of entries in the history
2373  *
2374  * This function assumes that the max number os entries is a power of 2.
2375  *
2376  * Returns: The index where the next entry is to be written.
2377  */
2378 static inline uint32_t dp_history_get_next_index(qdf_atomic_t *curr_idx,
2379 						 uint32_t max_entries)
2380 {
2381 	uint32_t idx = qdf_atomic_inc_return(curr_idx);
2382 
2383 	return idx & (max_entries - 1);
2384 }
2385 
2386 /**
2387  * dp_rx_skip_tlvs() - Skip TLVs len + L2 hdr_offset, save in nbuf->cb
2388  * @nbuf: nbuf cb to be updated
2389  * @l2_hdr_offset: l2_hdr_offset
2390  *
2391  * Return: None
2392  */
2393 void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding);
2394 
2395 /**
2396  * dp_soc_is_full_mon_enable () - Return if full monitor mode is enabled
2397  * @soc: DP soc handle
2398  *
2399  * Return: Full monitor mode status
2400  */
2401 static inline bool dp_soc_is_full_mon_enable(struct dp_pdev *pdev)
2402 {
2403 	return (pdev->soc->full_mon_mode && pdev->monitor_configured) ?
2404 			true : false;
2405 }
2406 
2407 #ifndef FEATURE_WDS
2408 static inline void
2409 dp_hmwds_ast_add_notify(struct dp_peer *peer,
2410 			uint8_t *mac_addr,
2411 			enum cdp_txrx_ast_entry_type type,
2412 			QDF_STATUS err,
2413 			bool is_peer_map)
2414 {
2415 }
2416 #endif
2417 
2418 #ifdef HTT_STATS_DEBUGFS_SUPPORT
2419 /* dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize
2420  * debugfs for HTT stats
2421  * @pdev: dp pdev handle
2422  *
2423  * Return: QDF_STATUS
2424  */
2425 QDF_STATUS dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev);
2426 
2427 /* dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for
2428  * HTT stats
2429  * @pdev: dp pdev handle
2430  *
2431  * Return: none
2432  */
2433 void dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev);
2434 #else
2435 
2436 /* dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize
2437  * debugfs for HTT stats
2438  * @pdev: dp pdev handle
2439  *
2440  * Return: QDF_STATUS
2441  */
2442 static inline QDF_STATUS
2443 dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev)
2444 {
2445 	return QDF_STATUS_SUCCESS;
2446 }
2447 
2448 /* dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for
2449  * HTT stats
2450  * @pdev: dp pdev handle
2451  *
2452  * Return: none
2453  */
2454 static inline void
2455 dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev)
2456 {
2457 }
2458 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
2459 
2460 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR
2461 /**
2462  * dp_soc_swlm_attach() - attach the software latency manager resources
2463  * @soc: Datapath global soc handle
2464  *
2465  * Returns: QDF_STATUS
2466  */
2467 static inline QDF_STATUS dp_soc_swlm_attach(struct dp_soc *soc)
2468 {
2469 	return QDF_STATUS_SUCCESS;
2470 }
2471 
2472 /**
2473  * dp_soc_swlm_detach() - detach the software latency manager resources
2474  * @soc: Datapath global soc handle
2475  *
2476  * Returns: QDF_STATUS
2477  */
2478 static inline QDF_STATUS dp_soc_swlm_detach(struct dp_soc *soc)
2479 {
2480 	return QDF_STATUS_SUCCESS;
2481 }
2482 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */
2483 
2484 #ifdef QCA_SUPPORT_WDS_EXTENDED
2485 /**
2486  * dp_wds_ext_get_peer_id(): function to get peer id by mac
2487  * This API is called from control path when wds extended
2488  * device is created, hence it also updates wds extended
2489  * peer state to up, which will be referred in rx processing.
2490  * @soc: Datapath soc handle
2491  * @vdev_id: vdev id
2492  * @mac: Peer mac address
2493  *
2494  * return: valid peer id on success
2495  *         HTT_INVALID_PEER on failure
2496  */
2497 uint16_t dp_wds_ext_get_peer_id(ol_txrx_soc_handle soc,
2498 				uint8_t vdev_id,
2499 				uint8_t *mac);
2500 
2501 /**
2502  * dp_wds_ext_set_peer_state(): function to set peer state
2503  * @soc: Datapath soc handle
2504  * @vdev_id: vdev id
2505  * @mac: Peer mac address
2506  * @rx: rx function pointer
2507  *
2508  * return: QDF_STATUS_SUCCESS on success
2509  *         QDF_STATUS_E_INVAL if peer is not found
2510  *         QDF_STATUS_E_ALREADY if rx is already set/unset
2511  */
2512 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
2513 				  uint8_t vdev_id,
2514 				  uint8_t *mac,
2515 				  ol_txrx_rx_fp rx,
2516 				  ol_osif_peer_handle osif_peer);
2517 #endif /* QCA_SUPPORT_WDS_EXTENDED */
2518 
2519 #ifdef DP_MEM_PRE_ALLOC
2520 
2521 /**
2522  * dp_context_alloc_mem() - allocate memory for DP context
2523  * @soc: datapath soc handle
2524  * @ctxt_type: DP context type
2525  * @ctxt_size: DP context size
2526  *
2527  * Return: DP context address
2528  */
2529 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2530 			   size_t ctxt_size);
2531 
2532 /**
2533  * dp_context_free_mem() - Free memory of DP context
2534  * @soc: datapath soc handle
2535  * @ctxt_type: DP context type
2536  * @vaddr: Address of context memory
2537  *
2538  * Return: None
2539  */
2540 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2541 			 void *vaddr);
2542 
2543 /**
2544  * dp_desc_multi_pages_mem_alloc() - alloc memory over multiple pages
2545  * @soc: datapath soc handle
2546  * @desc_type: memory request source type
2547  * @pages: multi page information storage
2548  * @element_size: each element size
2549  * @element_num: total number of elements should be allocated
2550  * @memctxt: memory context
2551  * @cacheable: coherent memory or cacheable memory
2552  *
2553  * This function is a wrapper for memory allocation over multiple
2554  * pages, if dp prealloc method is registered, then will try prealloc
2555  * firstly. if prealloc failed, fall back to regular way over
2556  * qdf_mem_multi_pages_alloc().
2557  *
2558  * Return: None
2559  */
2560 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
2561 				   enum dp_desc_type desc_type,
2562 				   struct qdf_mem_multi_page_t *pages,
2563 				   size_t element_size,
2564 				   uint16_t element_num,
2565 				   qdf_dma_context_t memctxt,
2566 				   bool cacheable);
2567 
2568 /**
2569  * dp_desc_multi_pages_mem_free() - free multiple pages memory
2570  * @soc: datapath soc handle
2571  * @desc_type: memory request source type
2572  * @pages: multi page information storage
2573  * @memctxt: memory context
2574  * @cacheable: coherent memory or cacheable memory
2575  *
2576  * This function is a wrapper for multiple pages memory free,
2577  * if memory is got from prealloc pool, put it back to pool.
2578  * otherwise free by qdf_mem_multi_pages_free().
2579  *
2580  * Return: None
2581  */
2582 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
2583 				  enum dp_desc_type desc_type,
2584 				  struct qdf_mem_multi_page_t *pages,
2585 				  qdf_dma_context_t memctxt,
2586 				  bool cacheable);
2587 
2588 #else
2589 static inline
2590 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2591 			   size_t ctxt_size)
2592 {
2593 	return qdf_mem_malloc(ctxt_size);
2594 }
2595 
2596 static inline
2597 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2598 			 void *vaddr)
2599 {
2600 	qdf_mem_free(vaddr);
2601 }
2602 
2603 static inline
2604 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
2605 				   enum dp_desc_type desc_type,
2606 				   struct qdf_mem_multi_page_t *pages,
2607 				   size_t element_size,
2608 				   uint16_t element_num,
2609 				   qdf_dma_context_t memctxt,
2610 				   bool cacheable)
2611 {
2612 	qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
2613 				  element_num, memctxt, cacheable);
2614 }
2615 
2616 static inline
2617 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
2618 				  enum dp_desc_type desc_type,
2619 				  struct qdf_mem_multi_page_t *pages,
2620 				  qdf_dma_context_t memctxt,
2621 				  bool cacheable)
2622 {
2623 	qdf_mem_multi_pages_free(soc->osdev, pages,
2624 				 memctxt, cacheable);
2625 }
2626 #endif
2627 
2628 #endif /* #ifndef _DP_INTERNAL_H_ */
2629