xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_internal.h (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_INTERNAL_H_
20 #define _DP_INTERNAL_H_
21 
22 #include "dp_types.h"
23 
24 #define RX_BUFFER_SIZE_PKTLOG_LITE 1024
25 
26 
27 #define DP_RSSI_AVG_WEIGHT 2
28 /*
29  * Formula to derive avg_rssi is taken from wifi2.o firmware
30  */
31 #define DP_GET_AVG_RSSI(avg_rssi, last_rssi) \
32 	(((avg_rssi) - (((uint8_t)(avg_rssi)) >> DP_RSSI_AVG_WEIGHT)) \
33 	+ ((((uint8_t)(last_rssi)) >> DP_RSSI_AVG_WEIGHT)))
34 
35 /* Macro For NYSM value received in VHT TLV */
36 #define VHT_SGI_NYSM 3
37 
38 /* PPDU STATS CFG */
39 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
40 
41 /* PPDU stats mask sent to FW to enable enhanced stats */
42 #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
43 /* PPDU stats mask sent to FW to support debug sniffer feature */
44 #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
45 /* PPDU stats mask sent to FW to support BPR feature*/
46 #define DP_PPDU_STATS_CFG_BPR 0x2000
47 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
48 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
49 				   DP_PPDU_STATS_CFG_ENH_STATS)
50 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
51 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
52 				      DP_PPDU_TXLITE_STATS_BITMASK_CFG)
53 
54 /**
55  * Bitmap of HTT PPDU TLV types for Default mode
56  */
57 #define HTT_PPDU_DEFAULT_TLV_BITMAP \
58 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
59 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
60 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
61 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
62 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
63 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
64 
65 /**
66  * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 64
67  */
68 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64 \
69 	((1 << HTT_PPDU_STATS_COMMON_TLV) | \
70 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
71 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
72 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
73 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
74 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
75 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
76 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV))
77 
78 /**
79  * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 256
80  */
81 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256 \
82 	((1 << HTT_PPDU_STATS_COMMON_TLV) | \
83 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
84 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
85 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
86 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
87 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
88 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \
89 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV))
90 
91 #ifdef WLAN_TX_PKT_CAPTURE_ENH
92 extern uint8_t
93 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS];
94 #endif
95 
96 #if DP_PRINT_ENABLE
97 #include <stdarg.h>       /* va_list */
98 #include <qdf_types.h> /* qdf_vprint */
99 #include <cdp_txrx_handle.h>
100 
101 enum {
102 	/* FATAL_ERR - print only irrecoverable error messages */
103 	DP_PRINT_LEVEL_FATAL_ERR,
104 
105 	/* ERR - include non-fatal err messages */
106 	DP_PRINT_LEVEL_ERR,
107 
108 	/* WARN - include warnings */
109 	DP_PRINT_LEVEL_WARN,
110 
111 	/* INFO1 - include fundamental, infrequent events */
112 	DP_PRINT_LEVEL_INFO1,
113 
114 	/* INFO2 - include non-fundamental but infrequent events */
115 	DP_PRINT_LEVEL_INFO2,
116 };
117 
118 
119 #define dp_print(level, fmt, ...) do { \
120 	if (level <= g_txrx_print_level) \
121 		qdf_print(fmt, ## __VA_ARGS__); \
122 while (0)
123 #define DP_PRINT(level, fmt, ...) do { \
124 	dp_print(level, "DP: " fmt, ## __VA_ARGS__); \
125 while (0)
126 #else
127 #define DP_PRINT(level, fmt, ...)
128 #endif /* DP_PRINT_ENABLE */
129 
130 #define DP_TRACE(LVL, fmt, args ...)                             \
131 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL,       \
132 		fmt, ## args)
133 
134 #ifdef CONFIG_MCL
135 /* Stat prints should not go to console or kernel logs.*/
136 #define DP_PRINT_STATS(fmt, args ...)\
137 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,       \
138 		  fmt, ## args)
139 #else
140 #define DP_PRINT_STATS(fmt, args ...)\
141 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,\
142 		  fmt, ## args)
143 #endif
144 #define DP_STATS_INIT(_handle) \
145 	qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats))
146 
147 #define DP_STATS_CLR(_handle) \
148 	qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats))
149 
150 #ifndef DISABLE_DP_STATS
151 #define DP_STATS_INC(_handle, _field, _delta) \
152 { \
153 	if (likely(_handle)) \
154 		_handle->stats._field += _delta; \
155 }
156 
157 #define DP_STATS_INCC(_handle, _field, _delta, _cond) \
158 { \
159 	if (_cond && likely(_handle)) \
160 		_handle->stats._field += _delta; \
161 }
162 
163 #define DP_STATS_DEC(_handle, _field, _delta) \
164 { \
165 	if (likely(_handle)) \
166 		_handle->stats._field -= _delta; \
167 }
168 
169 #define DP_STATS_UPD(_handle, _field, _delta) \
170 { \
171 	if (likely(_handle)) \
172 		_handle->stats._field = _delta; \
173 }
174 
175 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) \
176 { \
177 	DP_STATS_INC(_handle, _field.num, _count); \
178 	DP_STATS_INC(_handle, _field.bytes, _bytes) \
179 }
180 
181 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \
182 { \
183 	DP_STATS_INCC(_handle, _field.num, _count, _cond); \
184 	DP_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \
185 }
186 
187 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) \
188 { \
189 	_handle_a->stats._field += _handle_b->stats._field; \
190 }
191 
192 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) \
193 { \
194 	DP_STATS_AGGR(_handle_a, _handle_b, _field.num); \
195 	DP_STATS_AGGR(_handle_a, _handle_b, _field.bytes);\
196 }
197 
198 #define DP_STATS_UPD_STRUCT(_handle_a, _handle_b, _field) \
199 { \
200 	_handle_a->stats._field = _handle_b->stats._field; \
201 }
202 
203 #else
204 #define DP_STATS_INC(_handle, _field, _delta)
205 #define DP_STATS_INCC(_handle, _field, _delta, _cond)
206 #define DP_STATS_DEC(_handle, _field, _delta)
207 #define DP_STATS_UPD(_handle, _field, _delta)
208 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes)
209 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond)
210 #define DP_STATS_AGGR(_handle_a, _handle_b, _field)
211 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field)
212 #endif
213 
214 #ifdef ENABLE_DP_HIST_STATS
215 #define DP_HIST_INIT() \
216 	uint32_t num_of_packets[MAX_PDEV_CNT] = {0};
217 
218 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) \
219 { \
220 		++num_of_packets[_pdev_id]; \
221 }
222 
223 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \
224 	do {                                                              \
225 		if (_p_cntrs == 1) {                                      \
226 			DP_STATS_INC(_pdev,                               \
227 				tx_comp_histogram.pkts_1, 1);             \
228 		} else if (_p_cntrs > 1 && _p_cntrs <= 20) {              \
229 			DP_STATS_INC(_pdev,                               \
230 				tx_comp_histogram.pkts_2_20, 1);          \
231 		} else if (_p_cntrs > 20 && _p_cntrs <= 40) {             \
232 			DP_STATS_INC(_pdev,                               \
233 				tx_comp_histogram.pkts_21_40, 1);         \
234 		} else if (_p_cntrs > 40 && _p_cntrs <= 60) {             \
235 			DP_STATS_INC(_pdev,                               \
236 				tx_comp_histogram.pkts_41_60, 1);         \
237 		} else if (_p_cntrs > 60 && _p_cntrs <= 80) {             \
238 			DP_STATS_INC(_pdev,                               \
239 				tx_comp_histogram.pkts_61_80, 1);         \
240 		} else if (_p_cntrs > 80 && _p_cntrs <= 100) {            \
241 			DP_STATS_INC(_pdev,                               \
242 				tx_comp_histogram.pkts_81_100, 1);        \
243 		} else if (_p_cntrs > 100 && _p_cntrs <= 200) {           \
244 			DP_STATS_INC(_pdev,                               \
245 				tx_comp_histogram.pkts_101_200, 1);       \
246 		} else if (_p_cntrs > 200) {                              \
247 			DP_STATS_INC(_pdev,                               \
248 				tx_comp_histogram.pkts_201_plus, 1);      \
249 		}                                                         \
250 	} while (0)
251 
252 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \
253 	do {                                                              \
254 		if (_p_cntrs == 1) {                                      \
255 			DP_STATS_INC(_pdev,                               \
256 				rx_ind_histogram.pkts_1, 1);              \
257 		} else if (_p_cntrs > 1 && _p_cntrs <= 20) {              \
258 			DP_STATS_INC(_pdev,                               \
259 				rx_ind_histogram.pkts_2_20, 1);           \
260 		} else if (_p_cntrs > 20 && _p_cntrs <= 40) {             \
261 			DP_STATS_INC(_pdev,                               \
262 				rx_ind_histogram.pkts_21_40, 1);          \
263 		} else if (_p_cntrs > 40 && _p_cntrs <= 60) {             \
264 			DP_STATS_INC(_pdev,                               \
265 				rx_ind_histogram.pkts_41_60, 1);          \
266 		} else if (_p_cntrs > 60 && _p_cntrs <= 80) {             \
267 			DP_STATS_INC(_pdev,                               \
268 				rx_ind_histogram.pkts_61_80, 1);          \
269 		} else if (_p_cntrs > 80 && _p_cntrs <= 100) {            \
270 			DP_STATS_INC(_pdev,                               \
271 				rx_ind_histogram.pkts_81_100, 1);         \
272 		} else if (_p_cntrs > 100 && _p_cntrs <= 200) {           \
273 			DP_STATS_INC(_pdev,                               \
274 				rx_ind_histogram.pkts_101_200, 1);        \
275 		} else if (_p_cntrs > 200) {                              \
276 			DP_STATS_INC(_pdev,                               \
277 				rx_ind_histogram.pkts_201_plus, 1);       \
278 		}                                                         \
279 	} while (0)
280 
281 #define DP_TX_HIST_STATS_PER_PDEV() \
282 	do { \
283 		uint8_t hist_stats = 0; \
284 		for (hist_stats = 0; hist_stats < soc->pdev_count; \
285 				hist_stats++) { \
286 			DP_TX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \
287 					num_of_packets[hist_stats]); \
288 		} \
289 	}  while (0)
290 
291 
292 #define DP_RX_HIST_STATS_PER_PDEV() \
293 	do { \
294 		uint8_t hist_stats = 0; \
295 		for (hist_stats = 0; hist_stats < soc->pdev_count; \
296 				hist_stats++) { \
297 			DP_RX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \
298 					num_of_packets[hist_stats]); \
299 		} \
300 	}  while (0)
301 
302 
303 #else
304 #define DP_HIST_INIT()
305 #define DP_HIST_PACKET_COUNT_INC(_pdev_id)
306 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs)
307 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs)
308 #define DP_RX_HIST_STATS_PER_PDEV()
309 #define DP_TX_HIST_STATS_PER_PDEV()
310 #endif
311 
312 #define DP_HTT_T2H_HP_PIPE 5
313 static inline void dp_update_pdev_stats(struct dp_pdev *tgtobj,
314 					struct cdp_vdev_stats *srcobj)
315 {
316 	uint8_t i;
317 	uint8_t pream_type;
318 
319 	for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) {
320 		for (i = 0; i < MAX_MCS; i++) {
321 			tgtobj->stats.tx.pkt_type[pream_type].
322 				mcs_count[i] +=
323 			srcobj->tx.pkt_type[pream_type].
324 				mcs_count[i];
325 			tgtobj->stats.rx.pkt_type[pream_type].
326 				mcs_count[i] +=
327 			srcobj->rx.pkt_type[pream_type].
328 				mcs_count[i];
329 		}
330 	}
331 
332 	for (i = 0; i < MAX_BW; i++) {
333 		tgtobj->stats.tx.bw[i] += srcobj->tx.bw[i];
334 		tgtobj->stats.rx.bw[i] += srcobj->rx.bw[i];
335 	}
336 
337 	for (i = 0; i < SS_COUNT; i++) {
338 		tgtobj->stats.tx.nss[i] += srcobj->tx.nss[i];
339 		tgtobj->stats.rx.nss[i] += srcobj->rx.nss[i];
340 	}
341 
342 	for (i = 0; i < WME_AC_MAX; i++) {
343 		tgtobj->stats.tx.wme_ac_type[i] +=
344 			srcobj->tx.wme_ac_type[i];
345 		tgtobj->stats.rx.wme_ac_type[i] +=
346 			srcobj->rx.wme_ac_type[i];
347 		tgtobj->stats.tx.excess_retries_per_ac[i] +=
348 			srcobj->tx.excess_retries_per_ac[i];
349 	}
350 
351 	for (i = 0; i < MAX_GI; i++) {
352 		tgtobj->stats.tx.sgi_count[i] +=
353 			srcobj->tx.sgi_count[i];
354 		tgtobj->stats.rx.sgi_count[i] +=
355 			srcobj->rx.sgi_count[i];
356 	}
357 
358 	for (i = 0; i < MAX_RECEPTION_TYPES; i++)
359 		tgtobj->stats.rx.reception_type[i] +=
360 			srcobj->rx.reception_type[i];
361 
362 	tgtobj->stats.tx.comp_pkt.bytes += srcobj->tx.comp_pkt.bytes;
363 	tgtobj->stats.tx.comp_pkt.num += srcobj->tx.comp_pkt.num;
364 	tgtobj->stats.tx.ucast.num += srcobj->tx.ucast.num;
365 	tgtobj->stats.tx.ucast.bytes += srcobj->tx.ucast.bytes;
366 	tgtobj->stats.tx.mcast.num += srcobj->tx.mcast.num;
367 	tgtobj->stats.tx.mcast.bytes += srcobj->tx.mcast.bytes;
368 	tgtobj->stats.tx.bcast.num += srcobj->tx.bcast.num;
369 	tgtobj->stats.tx.bcast.bytes += srcobj->tx.bcast.bytes;
370 	tgtobj->stats.tx.tx_success.num += srcobj->tx.tx_success.num;
371 	tgtobj->stats.tx.tx_success.bytes +=
372 		srcobj->tx.tx_success.bytes;
373 	tgtobj->stats.tx.nawds_mcast.num +=
374 		srcobj->tx.nawds_mcast.num;
375 	tgtobj->stats.tx.nawds_mcast.bytes +=
376 		srcobj->tx.nawds_mcast.bytes;
377 	tgtobj->stats.tx.nawds_mcast_drop +=
378 		srcobj->tx.nawds_mcast_drop;
379 	tgtobj->stats.tx.tx_failed += srcobj->tx.tx_failed;
380 	tgtobj->stats.tx.ofdma += srcobj->tx.ofdma;
381 	tgtobj->stats.tx.stbc += srcobj->tx.stbc;
382 	tgtobj->stats.tx.ldpc += srcobj->tx.ldpc;
383 	tgtobj->stats.tx.retries += srcobj->tx.retries;
384 	tgtobj->stats.tx.non_amsdu_cnt += srcobj->tx.non_amsdu_cnt;
385 	tgtobj->stats.tx.amsdu_cnt += srcobj->tx.amsdu_cnt;
386 	tgtobj->stats.tx.non_ampdu_cnt += srcobj->tx.non_ampdu_cnt;
387 	tgtobj->stats.tx.ampdu_cnt += srcobj->tx.ampdu_cnt;
388 	tgtobj->stats.tx.dropped.fw_rem.num += srcobj->tx.dropped.fw_rem.num;
389 	tgtobj->stats.tx.dropped.fw_rem.bytes +=
390 			srcobj->tx.dropped.fw_rem.bytes;
391 	tgtobj->stats.tx.dropped.fw_rem_tx +=
392 			srcobj->tx.dropped.fw_rem_tx;
393 	tgtobj->stats.tx.dropped.fw_rem_notx +=
394 			srcobj->tx.dropped.fw_rem_notx;
395 	tgtobj->stats.tx.dropped.fw_reason1 +=
396 			srcobj->tx.dropped.fw_reason1;
397 	tgtobj->stats.tx.dropped.fw_reason2 +=
398 			srcobj->tx.dropped.fw_reason2;
399 	tgtobj->stats.tx.dropped.fw_reason3 +=
400 			srcobj->tx.dropped.fw_reason3;
401 	tgtobj->stats.tx.dropped.age_out += srcobj->tx.dropped.age_out;
402 	tgtobj->stats.rx.err.mic_err += srcobj->rx.err.mic_err;
403 	if (srcobj->rx.rssi != 0)
404 		tgtobj->stats.rx.rssi = srcobj->rx.rssi;
405 	tgtobj->stats.rx.rx_rate = srcobj->rx.rx_rate;
406 	tgtobj->stats.rx.err.decrypt_err += srcobj->rx.err.decrypt_err;
407 	tgtobj->stats.rx.non_ampdu_cnt += srcobj->rx.non_ampdu_cnt;
408 	tgtobj->stats.rx.amsdu_cnt += srcobj->rx.ampdu_cnt;
409 	tgtobj->stats.rx.non_amsdu_cnt += srcobj->rx.non_amsdu_cnt;
410 	tgtobj->stats.rx.amsdu_cnt += srcobj->rx.amsdu_cnt;
411 	tgtobj->stats.rx.nawds_mcast_drop += srcobj->rx.nawds_mcast_drop;
412 	tgtobj->stats.rx.to_stack.num += srcobj->rx.to_stack.num;
413 	tgtobj->stats.rx.to_stack.bytes += srcobj->rx.to_stack.bytes;
414 
415 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
416 		tgtobj->stats.rx.rcvd_reo[i].num +=
417 			srcobj->rx.rcvd_reo[i].num;
418 		tgtobj->stats.rx.rcvd_reo[i].bytes +=
419 			srcobj->rx.rcvd_reo[i].bytes;
420 	}
421 
422 	srcobj->rx.unicast.num =
423 		srcobj->rx.to_stack.num -
424 				(srcobj->rx.multicast.num);
425 	srcobj->rx.unicast.bytes =
426 		srcobj->rx.to_stack.bytes -
427 				(srcobj->rx.multicast.bytes);
428 
429 	tgtobj->stats.rx.unicast.num += srcobj->rx.unicast.num;
430 	tgtobj->stats.rx.unicast.bytes += srcobj->rx.unicast.bytes;
431 	tgtobj->stats.rx.multicast.num += srcobj->rx.multicast.num;
432 	tgtobj->stats.rx.multicast.bytes += srcobj->rx.multicast.bytes;
433 	tgtobj->stats.rx.bcast.num += srcobj->rx.bcast.num;
434 	tgtobj->stats.rx.bcast.bytes += srcobj->rx.bcast.bytes;
435 	tgtobj->stats.rx.raw.num += srcobj->rx.raw.num;
436 	tgtobj->stats.rx.raw.bytes += srcobj->rx.raw.bytes;
437 	tgtobj->stats.rx.intra_bss.pkts.num +=
438 			srcobj->rx.intra_bss.pkts.num;
439 	tgtobj->stats.rx.intra_bss.pkts.bytes +=
440 			srcobj->rx.intra_bss.pkts.bytes;
441 	tgtobj->stats.rx.intra_bss.fail.num +=
442 			srcobj->rx.intra_bss.fail.num;
443 	tgtobj->stats.rx.intra_bss.fail.bytes +=
444 			srcobj->rx.intra_bss.fail.bytes;
445 
446 	tgtobj->stats.tx.last_ack_rssi =
447 		srcobj->tx.last_ack_rssi;
448 	tgtobj->stats.rx.mec_drop.num += srcobj->rx.mec_drop.num;
449 	tgtobj->stats.rx.mec_drop.bytes += srcobj->rx.mec_drop.bytes;
450 }
451 
452 static inline void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj,
453 						struct dp_vdev *srcobj)
454 {
455 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.nawds_mcast);
456 
457 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.rcvd);
458 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.processed);
459 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.reinject_pkts);
460 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.inspect_pkts);
461 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.raw.raw_pkt);
462 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.raw.dma_map_error);
463 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.tso.tso_pkt);
464 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.tso.dropped_host.num);
465 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.tso.dropped_target);
466 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.sg.dropped_host.num);
467 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.sg.dropped_target);
468 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.sg.sg_pkt);
469 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.mcast_en.mcast_pkt);
470 	DP_STATS_AGGR(tgtobj, srcobj,
471 		      tx_i.mcast_en.dropped_map_error);
472 	DP_STATS_AGGR(tgtobj, srcobj,
473 		      tx_i.mcast_en.dropped_self_mac);
474 	DP_STATS_AGGR(tgtobj, srcobj,
475 		      tx_i.mcast_en.dropped_send_fail);
476 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.mcast_en.ucast);
477 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.dma_error);
478 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.ring_full);
479 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.enqueue_fail);
480 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.desc_na.num);
481 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.res_full);
482 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.headroom_insufficient);
483 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.cce_classified);
484 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.cce_classified_raw);
485 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.mesh.exception_fw);
486 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.mesh.completion_fw);
487 
488 	tgtobj->stats.tx_i.dropped.dropped_pkt.num =
489 		tgtobj->stats.tx_i.dropped.dma_error +
490 		tgtobj->stats.tx_i.dropped.ring_full +
491 		tgtobj->stats.tx_i.dropped.enqueue_fail +
492 		tgtobj->stats.tx_i.dropped.desc_na.num +
493 		tgtobj->stats.tx_i.dropped.res_full;
494 
495 	tgtobj->stats.tx_i.tso.num_seg =
496 		srcobj->stats.tx_i.tso.num_seg;
497 }
498 
499 static inline void dp_update_vdev_stats(struct cdp_vdev_stats *tgtobj,
500 					struct dp_peer *srcobj)
501 {
502 	uint8_t i;
503 	uint8_t pream_type;
504 
505 	for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) {
506 		for (i = 0; i < MAX_MCS; i++) {
507 			tgtobj->tx.pkt_type[pream_type].
508 				mcs_count[i] +=
509 			srcobj->stats.tx.pkt_type[pream_type].
510 				mcs_count[i];
511 			tgtobj->rx.pkt_type[pream_type].
512 				mcs_count[i] +=
513 			srcobj->stats.rx.pkt_type[pream_type].
514 				mcs_count[i];
515 		}
516 	}
517 
518 	for (i = 0; i < MAX_BW; i++) {
519 		tgtobj->tx.bw[i] += srcobj->stats.tx.bw[i];
520 		tgtobj->rx.bw[i] += srcobj->stats.rx.bw[i];
521 	}
522 
523 	for (i = 0; i < SS_COUNT; i++) {
524 		tgtobj->tx.nss[i] += srcobj->stats.tx.nss[i];
525 		tgtobj->rx.nss[i] += srcobj->stats.rx.nss[i];
526 	}
527 
528 	for (i = 0; i < WME_AC_MAX; i++) {
529 		tgtobj->tx.wme_ac_type[i] +=
530 			srcobj->stats.tx.wme_ac_type[i];
531 		tgtobj->rx.wme_ac_type[i] +=
532 			srcobj->stats.rx.wme_ac_type[i];
533 		tgtobj->tx.excess_retries_per_ac[i] +=
534 			srcobj->stats.tx.excess_retries_per_ac[i];
535 	}
536 
537 	for (i = 0; i < MAX_GI; i++) {
538 		tgtobj->tx.sgi_count[i] +=
539 			srcobj->stats.tx.sgi_count[i];
540 		tgtobj->rx.sgi_count[i] +=
541 			srcobj->stats.rx.sgi_count[i];
542 	}
543 
544 	for (i = 0; i < MAX_RECEPTION_TYPES; i++)
545 		tgtobj->rx.reception_type[i] +=
546 			srcobj->stats.rx.reception_type[i];
547 
548 	tgtobj->tx.comp_pkt.bytes += srcobj->stats.tx.comp_pkt.bytes;
549 	tgtobj->tx.comp_pkt.num += srcobj->stats.tx.comp_pkt.num;
550 	tgtobj->tx.ucast.num += srcobj->stats.tx.ucast.num;
551 	tgtobj->tx.ucast.bytes += srcobj->stats.tx.ucast.bytes;
552 	tgtobj->tx.mcast.num += srcobj->stats.tx.mcast.num;
553 	tgtobj->tx.mcast.bytes += srcobj->stats.tx.mcast.bytes;
554 	tgtobj->tx.bcast.num += srcobj->stats.tx.bcast.num;
555 	tgtobj->tx.bcast.bytes += srcobj->stats.tx.bcast.bytes;
556 	tgtobj->tx.tx_success.num += srcobj->stats.tx.tx_success.num;
557 	tgtobj->tx.tx_success.bytes +=
558 		srcobj->stats.tx.tx_success.bytes;
559 	tgtobj->tx.nawds_mcast.num +=
560 		srcobj->stats.tx.nawds_mcast.num;
561 	tgtobj->tx.nawds_mcast.bytes +=
562 		srcobj->stats.tx.nawds_mcast.bytes;
563 	tgtobj->tx.nawds_mcast_drop +=
564 		srcobj->stats.tx.nawds_mcast_drop;
565 	tgtobj->tx.tx_failed += srcobj->stats.tx.tx_failed;
566 	tgtobj->tx.ofdma += srcobj->stats.tx.ofdma;
567 	tgtobj->tx.stbc += srcobj->stats.tx.stbc;
568 	tgtobj->tx.ldpc += srcobj->stats.tx.ldpc;
569 	tgtobj->tx.retries += srcobj->stats.tx.retries;
570 	tgtobj->tx.non_amsdu_cnt += srcobj->stats.tx.non_amsdu_cnt;
571 	tgtobj->tx.amsdu_cnt += srcobj->stats.tx.amsdu_cnt;
572 	tgtobj->tx.non_ampdu_cnt += srcobj->stats.tx.non_ampdu_cnt;
573 	tgtobj->tx.ampdu_cnt += srcobj->stats.tx.ampdu_cnt;
574 	tgtobj->tx.dropped.fw_rem.num += srcobj->stats.tx.dropped.fw_rem.num;
575 	tgtobj->tx.dropped.fw_rem.bytes +=
576 			srcobj->stats.tx.dropped.fw_rem.bytes;
577 	tgtobj->tx.dropped.fw_rem_tx +=
578 			srcobj->stats.tx.dropped.fw_rem_tx;
579 	tgtobj->tx.dropped.fw_rem_notx +=
580 			srcobj->stats.tx.dropped.fw_rem_notx;
581 	tgtobj->tx.dropped.fw_reason1 +=
582 			srcobj->stats.tx.dropped.fw_reason1;
583 	tgtobj->tx.dropped.fw_reason2 +=
584 			srcobj->stats.tx.dropped.fw_reason2;
585 	tgtobj->tx.dropped.fw_reason3 +=
586 			srcobj->stats.tx.dropped.fw_reason3;
587 	tgtobj->tx.dropped.age_out += srcobj->stats.tx.dropped.age_out;
588 	tgtobj->rx.err.mic_err += srcobj->stats.rx.err.mic_err;
589 	if (srcobj->stats.rx.rssi != 0)
590 		tgtobj->rx.rssi = srcobj->stats.rx.rssi;
591 	tgtobj->rx.rx_rate = srcobj->stats.rx.rx_rate;
592 	tgtobj->rx.err.decrypt_err += srcobj->stats.rx.err.decrypt_err;
593 	tgtobj->rx.non_ampdu_cnt += srcobj->stats.rx.non_ampdu_cnt;
594 	tgtobj->rx.amsdu_cnt += srcobj->stats.rx.ampdu_cnt;
595 	tgtobj->rx.non_amsdu_cnt += srcobj->stats.rx.non_amsdu_cnt;
596 	tgtobj->rx.amsdu_cnt += srcobj->stats.rx.amsdu_cnt;
597 	tgtobj->rx.nawds_mcast_drop += srcobj->stats.rx.nawds_mcast_drop;
598 	tgtobj->rx.to_stack.num += srcobj->stats.rx.to_stack.num;
599 	tgtobj->rx.to_stack.bytes += srcobj->stats.rx.to_stack.bytes;
600 
601 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
602 		tgtobj->rx.rcvd_reo[i].num +=
603 			srcobj->stats.rx.rcvd_reo[i].num;
604 		tgtobj->rx.rcvd_reo[i].bytes +=
605 			srcobj->stats.rx.rcvd_reo[i].bytes;
606 	}
607 
608 	srcobj->stats.rx.unicast.num =
609 		srcobj->stats.rx.to_stack.num -
610 				srcobj->stats.rx.multicast.num;
611 	srcobj->stats.rx.unicast.bytes =
612 		srcobj->stats.rx.to_stack.bytes -
613 				srcobj->stats.rx.multicast.bytes;
614 
615 	tgtobj->rx.unicast.num += srcobj->stats.rx.unicast.num;
616 	tgtobj->rx.unicast.bytes += srcobj->stats.rx.unicast.bytes;
617 	tgtobj->rx.multicast.num += srcobj->stats.rx.multicast.num;
618 	tgtobj->rx.multicast.bytes += srcobj->stats.rx.multicast.bytes;
619 	tgtobj->rx.bcast.num += srcobj->stats.rx.bcast.num;
620 	tgtobj->rx.bcast.bytes += srcobj->stats.rx.bcast.bytes;
621 	tgtobj->rx.raw.num += srcobj->stats.rx.raw.num;
622 	tgtobj->rx.raw.bytes += srcobj->stats.rx.raw.bytes;
623 	tgtobj->rx.intra_bss.pkts.num +=
624 			srcobj->stats.rx.intra_bss.pkts.num;
625 	tgtobj->rx.intra_bss.pkts.bytes +=
626 			srcobj->stats.rx.intra_bss.pkts.bytes;
627 	tgtobj->rx.intra_bss.fail.num +=
628 			srcobj->stats.rx.intra_bss.fail.num;
629 	tgtobj->rx.intra_bss.fail.bytes +=
630 			srcobj->stats.rx.intra_bss.fail.bytes;
631 	tgtobj->tx.last_ack_rssi =
632 		srcobj->stats.tx.last_ack_rssi;
633 	tgtobj->rx.mec_drop.num += srcobj->stats.rx.mec_drop.num;
634 	tgtobj->rx.mec_drop.bytes += srcobj->stats.rx.mec_drop.bytes;
635 }
636 
637 #define DP_UPDATE_STATS(_tgtobj, _srcobj)	\
638 	do {				\
639 		uint8_t i;		\
640 		uint8_t pream_type;	\
641 		for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \
642 			for (i = 0; i < MAX_MCS; i++) { \
643 				DP_STATS_AGGR(_tgtobj, _srcobj, \
644 					tx.pkt_type[pream_type].mcs_count[i]); \
645 				DP_STATS_AGGR(_tgtobj, _srcobj, \
646 					rx.pkt_type[pream_type].mcs_count[i]); \
647 			} \
648 		} \
649 		  \
650 		for (i = 0; i < MAX_BW; i++) { \
651 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.bw[i]); \
652 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.bw[i]); \
653 		} \
654 		  \
655 		for (i = 0; i < SS_COUNT; i++) { \
656 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.nss[i]); \
657 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.nss[i]); \
658 		} \
659 		for (i = 0; i < WME_AC_MAX; i++) { \
660 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.wme_ac_type[i]); \
661 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.wme_ac_type[i]); \
662 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.excess_retries_per_ac[i]); \
663 		\
664 		} \
665 		\
666 		for (i = 0; i < MAX_GI; i++) { \
667 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.sgi_count[i]); \
668 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.sgi_count[i]); \
669 		} \
670 		\
671 		for (i = 0; i < MAX_RECEPTION_TYPES; i++) \
672 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.reception_type[i]); \
673 		\
674 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.comp_pkt); \
675 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.ucast); \
676 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.mcast); \
677 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.bcast); \
678 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_success); \
679 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.nawds_mcast); \
680 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.nawds_mcast_drop); \
681 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.tx_failed); \
682 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ofdma); \
683 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.stbc); \
684 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ldpc); \
685 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.retries); \
686 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_amsdu_cnt); \
687 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.amsdu_cnt); \
688 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_ampdu_cnt); \
689 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ampdu_cnt); \
690 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.dropped.fw_rem); \
691 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_tx); \
692 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_notx); \
693 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason1); \
694 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason2); \
695 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason3); \
696 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.age_out); \
697 								\
698 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.mic_err); \
699 		if (_srcobj->stats.rx.rssi != 0) \
700 			DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rssi); \
701 		DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rx_rate); \
702 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.decrypt_err); \
703 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_ampdu_cnt); \
704 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.ampdu_cnt); \
705 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_amsdu_cnt); \
706 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.amsdu_cnt); \
707 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.nawds_mcast_drop); \
708 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.to_stack); \
709 								\
710 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)	\
711 			DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rcvd_reo[i]); \
712 									\
713 		_srcobj->stats.rx.unicast.num = \
714 			_srcobj->stats.rx.to_stack.num - \
715 					_srcobj->stats.rx.multicast.num; \
716 		_srcobj->stats.rx.unicast.bytes = \
717 			_srcobj->stats.rx.to_stack.bytes - \
718 					_srcobj->stats.rx.multicast.bytes; \
719 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.unicast); \
720 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.multicast); \
721 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.bcast); \
722 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.raw); \
723 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.pkts); \
724 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.fail); \
725 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.mec_drop); \
726 								  \
727 		_tgtobj->stats.tx.last_ack_rssi =	\
728 			_srcobj->stats.tx.last_ack_rssi; \
729 	}  while (0)
730 
731 extern int dp_peer_find_attach(struct dp_soc *soc);
732 extern void dp_peer_find_detach(struct dp_soc *soc);
733 extern void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer);
734 extern void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer);
735 extern void dp_peer_find_hash_erase(struct dp_soc *soc);
736 extern void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer);
737 void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer);
738 extern void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer);
739 extern void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer);
740 extern void dp_peer_unref_delete(void *peer_handle);
741 extern void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer,
742 	unsigned tid, qdf_nbuf_t msdu_list);
743 extern void *dp_find_peer_by_addr(struct cdp_pdev *dev,
744 	uint8_t *peer_mac_addr, uint8_t *peer_id);
745 extern struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
746 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id);
747 
748 #ifndef CONFIG_WIN
749 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
750 		struct ol_txrx_desc_type *sta_desc);
751 QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id);
752 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
753 		struct cdp_vdev *vdev,
754 		uint8_t *peer_addr, uint8_t *local_id);
755 uint16_t dp_local_peer_id(void *peer);
756 void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id);
757 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
758 		enum ol_txrx_peer_state state);
759 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id);
760 struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle,
761 		uint8_t sta_id);
762 struct cdp_vdev *dp_get_vdev_for_peer(void *peer);
763 uint8_t *dp_peer_get_peer_mac_addr(void *peer);
764 int dp_get_peer_state(void *peer_handle);
765 void dp_local_peer_id_pool_init(struct dp_pdev *pdev);
766 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer);
767 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer);
768 #else
769 static inline void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
770 {
771 }
772 
773 static inline
774 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
775 {
776 }
777 
778 static inline
779 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
780 {
781 }
782 #endif
783 int dp_addba_resp_tx_completion_wifi3(void *peer_handle, uint8_t tid,
784 	int status);
785 extern int dp_addba_requestprocess_wifi3(void *peer_handle,
786 	uint8_t dialogtoken, uint16_t tid, uint16_t batimeout,
787 	uint16_t buffersize, uint16_t startseqnum);
788 extern void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
789 	uint8_t *dialogtoken, uint16_t *statuscode,
790 	uint16_t *buffersize, uint16_t *batimeout);
791 extern void dp_set_addba_response(void *peer_handle, uint8_t tid,
792 	uint16_t statuscode);
793 extern int dp_delba_process_wifi3(void *peer_handle,
794 	int tid, uint16_t reasoncode);
795 /*
796  * dp_delba_tx_completion_wifi3() -  Handle delba tx completion
797  *
798  * @peer_handle: Peer handle
799  * @tid: Tid number
800  * @status: Tx completion status
801  * Indicate status of delba Tx to DP for stats update and retry
802  * delba if tx failed.
803  *
804  */
805 int dp_delba_tx_completion_wifi3(void *peer_handle, uint8_t tid,
806 				  int status);
807 extern int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
808 	uint32_t ba_window_size, uint32_t start_seq);
809 
810 extern QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc,
811 	enum hal_reo_cmd_type type, struct hal_reo_cmd_params *params,
812 	void (*callback_fn), void *data);
813 
814 extern void dp_reo_cmdlist_destroy(struct dp_soc *soc);
815 
816 /**
817  * dp_reo_status_ring_handler - Handler for REO Status ring
818  * @soc: DP Soc handle
819  *
820  * Returns: Number of descriptors reaped
821  */
822 uint32_t dp_reo_status_ring_handler(struct dp_soc *soc);
823 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
824 			     struct cdp_vdev_stats *vdev_stats);
825 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
826 	union hal_reo_status *reo_status);
827 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
828 		union hal_reo_status *reo_status);
829 uint16_t dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle,
830 		qdf_nbuf_t nbuf, uint8_t newmac[][QDF_MAC_ADDR_SIZE],
831 		uint8_t new_mac_cnt);
832 void dp_tx_me_alloc_descriptor(struct cdp_pdev *pdev);
833 
834 void dp_tx_me_free_descriptor(struct cdp_pdev *pdev);
835 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
836 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
837 		uint32_t config_param_1, uint32_t config_param_2,
838 		uint32_t config_param_3, int cookie, int cookie_msb,
839 		uint8_t mac_id);
840 void dp_htt_stats_print_tag(uint8_t tag_type, uint32_t *tag_buf);
841 void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf);
842 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*callback_fn),
843 		void *cb_ctxt);
844 void dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle,
845 	struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,
846 	 uint32_t *rx_pn);
847 
848 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id);
849 void dp_set_michael_key(struct cdp_peer *peer_handle,
850 			bool is_unicast, uint32_t *key);
851 #ifdef CONFIG_WIN
852 uint32_t dp_pdev_tid_stats_display(void *pdev_handle,
853 			enum _ol_ath_param_t param, uint32_t value, void *buff);
854 #endif
855 
856 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
857 			   uint8_t tid, uint8_t mode);
858 
859 /**
860  * dp_print_ring_stats(): Print tail and head pointer
861  * @pdev: DP_PDEV handle
862  *
863  * Return:void
864  */
865 void dp_print_ring_stats(struct dp_pdev *pdev);
866 
867 /**
868  * dp_print_pdev_cfg_params() - Print the pdev cfg parameters
869  * @pdev_handle: DP pdev handle
870  *
871  * Return - void
872  */
873 void dp_print_pdev_cfg_params(struct dp_pdev *pdev);
874 
875 /**
876  * dp_print_soc_cfg_params()- Dump soc wlan config parameters
877  * @soc_handle: Soc handle
878  *
879  * Return: void
880  */
881 void dp_print_soc_cfg_params(struct dp_soc *soc);
882 
883 /**
884  * dp_srng_get_str_from_ring_type() - Return string name for a ring
885  * @ring_type: Ring
886  *
887  * Return: char const pointer
888  */
889 const
890 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type);
891 
892 /*
893  * dp_txrx_path_stats() - Function to display dump stats
894  * @soc - soc handle
895  *
896  * return: none
897  */
898 void dp_txrx_path_stats(struct dp_soc *soc);
899 
900 /*
901  * dp_print_per_ring_stats(): Packet count per ring
902  * @soc - soc handle
903  *
904  * Return - None
905  */
906 void dp_print_per_ring_stats(struct dp_soc *soc);
907 
908 /**
909  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
910  * @pdev: DP PDEV handle
911  *
912  * return: void
913  */
914 void dp_aggregate_pdev_stats(struct dp_pdev *pdev);
915 
916 /**
917  * dp_print_rx_rates(): Print Rx rate stats
918  * @vdev: DP_VDEV handle
919  *
920  * Return:void
921  */
922 void dp_print_rx_rates(struct dp_vdev *vdev);
923 
924 /**
925  * dp_print_tx_rates(): Print tx rates
926  * @vdev: DP_VDEV handle
927  *
928  * Return:void
929  */
930 void dp_print_tx_rates(struct dp_vdev *vdev);
931 
932 /**
933  * dp_print_peer_stats():print peer stats
934  * @peer: DP_PEER handle
935  *
936  * return void
937  */
938 void dp_print_peer_stats(struct dp_peer *peer);
939 
940 /**
941  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
942  * @pdev: DP_PDEV Handle
943  *
944  * Return:void
945  */
946 void
947 dp_print_pdev_tx_stats(struct dp_pdev *pdev);
948 
949 /**
950  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
951  * @pdev: DP_PDEV Handle
952  *
953  * Return: void
954  */
955 void
956 dp_print_pdev_rx_stats(struct dp_pdev *pdev);
957 
958 /**
959  * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
960  * @pdev: DP_PDEV Handle
961  *
962  * Return: void
963  */
964 void
965 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev);
966 
967 /**
968  * dp_print_soc_tx_stats(): Print SOC level  stats
969  * @soc DP_SOC Handle
970  *
971  * Return: void
972  */
973 void dp_print_soc_tx_stats(struct dp_soc *soc);
974 
975 /**
976  * dp_print_soc_interrupt_stats() - Print interrupt stats for the soc
977  * @soc: dp_soc handle
978  *
979  * Return: None
980  */
981 void dp_print_soc_interrupt_stats(struct dp_soc *soc);
982 
983 /**
984  * dp_print_soc_rx_stats: Print SOC level Rx stats
985  * @soc: DP_SOC Handle
986  *
987  * Return:void
988  */
989 void dp_print_soc_rx_stats(struct dp_soc *soc);
990 
991 /**
992  * dp_get_mac_id_for_pdev() -  Return mac corresponding to pdev for mac
993  *
994  * @mac_id: MAC id
995  * @pdev_id: pdev_id corresponding to pdev, 0 for MCL
996  *
997  * Single pdev using both MACs will operate on both MAC rings,
998  * which is the case for MCL.
999  * For WIN each PDEV will operate one ring, so index is zero.
1000  *
1001  */
1002 static inline int dp_get_mac_id_for_pdev(uint32_t mac_id, uint32_t pdev_id)
1003 {
1004 	if (mac_id && pdev_id) {
1005 		qdf_print("Both mac_id and pdev_id cannot be non zero");
1006 		QDF_BUG(0);
1007 		return 0;
1008 	}
1009 	return (mac_id + pdev_id);
1010 }
1011 
1012 /*
1013  * dp_get_mac_id_for_mac() -  Return mac corresponding WIN and MCL mac_ids
1014  *
1015  * @soc: handle to DP soc
1016  * @mac_id: MAC id
1017  *
1018  * Single pdev using both MACs will operate on both MAC rings,
1019  * which is the case for MCL.
1020  * For WIN each PDEV will operate one ring, so index is zero.
1021  *
1022  */
1023 static inline int dp_get_mac_id_for_mac(struct dp_soc *soc, uint32_t mac_id)
1024 {
1025 	/*
1026 	 * Single pdev using both MACs will operate on both MAC rings,
1027 	 * which is the case for MCL.
1028 	 */
1029 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
1030 		return mac_id;
1031 
1032 	/* For WIN each PDEV will operate one ring, so index is zero. */
1033 	return 0;
1034 }
1035 
1036 bool dp_is_soc_reinit(struct dp_soc *soc);
1037 
1038 /*
1039  * dp_is_subtype_data() - check if the frame subtype is data
1040  *
1041  * @frame_ctrl: Frame control field
1042  *
1043  * check the frame control field and verify if the packet
1044  * is a data packet.
1045  *
1046  * Return: true or false
1047  */
1048 static inline bool dp_is_subtype_data(uint16_t frame_ctrl)
1049 {
1050 	if (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_TYPE_MASK) ==
1051 	    QDF_IEEE80211_FC0_TYPE_DATA) &&
1052 	    (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) ==
1053 	    QDF_IEEE80211_FC0_SUBTYPE_DATA) ||
1054 	    ((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) ==
1055 	    QDF_IEEE80211_FC0_SUBTYPE_QOS))) {
1056 		return true;
1057 	}
1058 
1059 	return false;
1060 }
1061 
1062 #ifdef WDI_EVENT_ENABLE
1063 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
1064 				uint32_t stats_type_upload_mask,
1065 				uint8_t mac_id);
1066 
1067 int dp_wdi_event_unsub(struct cdp_pdev *txrx_pdev_handle,
1068 	void *event_cb_sub_handle,
1069 	uint32_t event);
1070 
1071 int dp_wdi_event_sub(struct cdp_pdev *txrx_pdev_handle,
1072 	void *event_cb_sub_handle,
1073 	uint32_t event);
1074 
1075 void dp_wdi_event_handler(enum WDI_EVENT event, void *soc,
1076 		void *data, u_int16_t peer_id,
1077 		int status, u_int8_t pdev_id);
1078 
1079 int dp_wdi_event_attach(struct dp_pdev *txrx_pdev);
1080 int dp_wdi_event_detach(struct dp_pdev *txrx_pdev);
1081 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
1082 	bool enable);
1083 void *dp_get_pldev(struct cdp_pdev *txrx_pdev);
1084 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn);
1085 
1086 static inline void dp_hif_update_pipe_callback(void *soc, void *cb_context,
1087 	QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), uint8_t pipe_id)
1088 {
1089 	struct hif_msg_callbacks hif_pipe_callbacks;
1090 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
1091 
1092 	/* TODO: Temporary change to bypass HTC connection for this new
1093 	 * HIF pipe, which will be used for packet log and other high-
1094 	 * priority HTT messages. Proper HTC connection to be added
1095 	 * later once required FW changes are available
1096 	 */
1097 	hif_pipe_callbacks.rxCompletionHandler = callback;
1098 	hif_pipe_callbacks.Context = cb_context;
1099 	hif_update_pipe_callback(dp_soc->hif_handle,
1100 		DP_HTT_T2H_HP_PIPE, &hif_pipe_callbacks);
1101 }
1102 
1103 QDF_STATUS dp_peer_stats_notify(struct dp_peer *peer);
1104 
1105 #else
1106 static inline int dp_wdi_event_unsub(struct cdp_pdev *txrx_pdev_handle,
1107 	void *event_cb_sub_handle,
1108 	uint32_t event)
1109 {
1110 	return 0;
1111 }
1112 
1113 static inline int dp_wdi_event_sub(struct cdp_pdev *txrx_pdev_handle,
1114 	void *event_cb_sub_handle,
1115 	uint32_t event)
1116 {
1117 	return 0;
1118 }
1119 
1120 static inline void dp_wdi_event_handler(enum WDI_EVENT event, void *soc,
1121 		void *data, u_int16_t peer_id,
1122 		int status, u_int8_t pdev_id)
1123 {
1124 }
1125 
1126 static inline int dp_wdi_event_attach(struct dp_pdev *txrx_pdev)
1127 {
1128 	return 0;
1129 }
1130 
1131 static inline int dp_wdi_event_detach(struct dp_pdev *txrx_pdev)
1132 {
1133 	return 0;
1134 }
1135 
1136 static inline int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
1137 	bool enable)
1138 {
1139 	return 0;
1140 }
1141 static inline QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
1142 		uint32_t stats_type_upload_mask, uint8_t mac_id)
1143 {
1144 	return 0;
1145 }
1146 static inline void dp_hif_update_pipe_callback(void *soc, void *cb_context,
1147 	QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), uint8_t pipe_id)
1148 {
1149 }
1150 
1151 static inline QDF_STATUS dp_peer_stats_notify(struct dp_peer *peer)
1152 {
1153 	return QDF_STATUS_SUCCESS;
1154 }
1155 
1156 #endif /* CONFIG_WIN */
1157 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
1158 void dp_tx_dump_flow_pool_info(void *soc);
1159 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
1160 	bool force);
1161 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
1162 
1163 #ifdef PEER_PROTECTED_ACCESS
1164 /**
1165  * dp_peer_unref_del_find_by_id() - dec ref and del peer if ref count is
1166  *                                  taken by dp_peer_find_by_id
1167  * @peer: peer context
1168  *
1169  * Return: none
1170  */
1171 static inline void dp_peer_unref_del_find_by_id(struct dp_peer *peer)
1172 {
1173 	dp_peer_unref_delete(peer);
1174 }
1175 #else
1176 static inline void dp_peer_unref_del_find_by_id(struct dp_peer *peer)
1177 {
1178 }
1179 #endif
1180 
1181 #ifdef CONFIG_WIN
1182 /**
1183  * dp_pdev_print_delay_stats(): Print pdev level delay stats
1184  * @pdev: DP_PDEV handle
1185  *
1186  * Return:void
1187  */
1188 void dp_pdev_print_delay_stats(struct dp_pdev *pdev);
1189 
1190 /**
1191  * dp_pdev_print_tid_stats(): Print pdev level tid stats
1192  * @pdev: DP_PDEV handle
1193  *
1194  * Return:void
1195  */
1196 void dp_pdev_print_tid_stats(struct dp_pdev *pdev);
1197 #endif /* CONFIG_WIN */
1198 
1199 void dp_soc_set_txrx_ring_map(struct dp_soc *soc);
1200 
1201 #ifndef WLAN_TX_PKT_CAPTURE_ENH
1202 /**
1203  * dp_tx_ppdu_stats_attach - Initialize Tx PPDU stats and enhanced capture
1204  * @pdev: DP PDEV
1205  *
1206  * Return: none
1207  */
1208 static inline void dp_tx_ppdu_stats_attach(struct dp_pdev *pdev)
1209 {
1210 }
1211 
1212 /**
1213  * dp_tx_ppdu_stats_detach - Cleanup Tx PPDU stats and enhanced capture
1214  * @pdev: DP PDEV
1215  *
1216  * Return: none
1217  */
1218 static inline void dp_tx_ppdu_stats_detach(struct dp_pdev *pdev)
1219 {
1220 }
1221 
1222 /**
1223  * dp_tx_ppdu_stats_process - Deferred PPDU stats handler
1224  * @context: Opaque work context (PDEV)
1225  *
1226  * Return: none
1227  */
1228 static  inline void dp_tx_ppdu_stats_process(void *context)
1229 {
1230 }
1231 
1232 /**
1233  * dp_tx_add_to_comp_queue() - add completion msdu to queue
1234  * @soc: DP Soc handle
1235  * @tx_desc: software Tx descriptor
1236  * @ts : Tx completion status from HAL/HTT descriptor
1237  * @peer: DP peer
1238  *
1239  * Return: none
1240  */
1241 static inline
1242 QDF_STATUS dp_tx_add_to_comp_queue(struct dp_soc *soc,
1243 				   struct dp_tx_desc_s *desc,
1244 				   struct hal_tx_completion_status *ts,
1245 				   struct dp_peer *peer)
1246 {
1247 	return QDF_STATUS_E_FAILURE;
1248 }
1249 #endif
1250 
1251 #endif /* #ifndef _DP_INTERNAL_H_ */
1252