xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_internal.h (revision 1397a33f48ea6455be40871470b286e535820eb8)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_INTERNAL_H_
20 #define _DP_INTERNAL_H_
21 
22 #include "dp_types.h"
23 
24 #define RX_BUFFER_SIZE_PKTLOG_LITE 1024
25 
26 /* Macro For NYSM value received in VHT TLV */
27 #define VHT_SGI_NYSM 3
28 
29 #if DP_PRINT_ENABLE
30 #include <stdarg.h>       /* va_list */
31 #include <qdf_types.h> /* qdf_vprint */
32 #include <cdp_txrx_handle.h>
33 
34 enum {
35 	/* FATAL_ERR - print only irrecoverable error messages */
36 	DP_PRINT_LEVEL_FATAL_ERR,
37 
38 	/* ERR - include non-fatal err messages */
39 	DP_PRINT_LEVEL_ERR,
40 
41 	/* WARN - include warnings */
42 	DP_PRINT_LEVEL_WARN,
43 
44 	/* INFO1 - include fundamental, infrequent events */
45 	DP_PRINT_LEVEL_INFO1,
46 
47 	/* INFO2 - include non-fundamental but infrequent events */
48 	DP_PRINT_LEVEL_INFO2,
49 };
50 
51 
52 #define dp_print(level, fmt, ...) do { \
53 	if (level <= g_txrx_print_level) \
54 		qdf_print(fmt, ## __VA_ARGS__); \
55 while (0)
56 #define DP_PRINT(level, fmt, ...) do { \
57 	dp_print(level, "DP: " fmt, ## __VA_ARGS__); \
58 while (0)
59 #else
60 #define DP_PRINT(level, fmt, ...)
61 #endif /* DP_PRINT_ENABLE */
62 
63 #define DP_TRACE(LVL, fmt, args ...)                             \
64 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL,       \
65 		fmt, ## args)
66 
67 #define DP_TRACE_STATS(LVL, fmt, args ...)                             \
68 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL,       \
69 		fmt, ## args)
70 
71 #define DP_PRINT_STATS(fmt, args ...)	\
72 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,       \
73 		fmt, ## args)
74 
75 #define DP_STATS_INIT(_handle) \
76 	qdf_mem_set(&((_handle)->stats), sizeof((_handle)->stats), 0x0)
77 
78 #define DP_STATS_CLR(_handle) \
79 	qdf_mem_set(&((_handle)->stats), sizeof((_handle)->stats), 0x0)
80 
81 #ifndef DISABLE_DP_STATS
82 #define DP_STATS_INC(_handle, _field, _delta) \
83 { \
84 	if (likely(_handle)) \
85 		_handle->stats._field += _delta; \
86 }
87 
88 #define DP_STATS_INCC(_handle, _field, _delta, _cond) \
89 { \
90 	if (_cond && likely(_handle)) \
91 		_handle->stats._field += _delta; \
92 }
93 
94 #define DP_STATS_DEC(_handle, _field, _delta) \
95 { \
96 	if (likely(_handle)) \
97 		_handle->stats._field -= _delta; \
98 }
99 
100 #define DP_STATS_UPD(_handle, _field, _delta) \
101 { \
102 	if (likely(_handle)) \
103 		_handle->stats._field = _delta; \
104 }
105 
106 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) \
107 { \
108 	DP_STATS_INC(_handle, _field.num, _count); \
109 	DP_STATS_INC(_handle, _field.bytes, _bytes) \
110 }
111 
112 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \
113 { \
114 	DP_STATS_INCC(_handle, _field.num, _count, _cond); \
115 	DP_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \
116 }
117 
118 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) \
119 { \
120 	_handle_a->stats._field += _handle_b->stats._field; \
121 }
122 
123 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) \
124 { \
125 	DP_STATS_AGGR(_handle_a, _handle_b, _field.num); \
126 	DP_STATS_AGGR(_handle_a, _handle_b, _field.bytes);\
127 }
128 
129 #define DP_STATS_UPD_STRUCT(_handle_a, _handle_b, _field) \
130 { \
131 	_handle_a->stats._field = _handle_b->stats._field; \
132 }
133 
134 #define DP_HIST_INIT() \
135 	uint32_t num_of_packets[MAX_PDEV_CNT] = {0};
136 
137 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) \
138 { \
139 		++num_of_packets[_pdev_id]; \
140 }
141 
142 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \
143 	do {                                                              \
144 		if (_p_cntrs == 1) {                                      \
145 			DP_STATS_INC(_pdev,                               \
146 				tx_comp_histogram.pkts_1, 1);             \
147 		} else if (_p_cntrs > 1 && _p_cntrs <= 20) {              \
148 			DP_STATS_INC(_pdev,                               \
149 				tx_comp_histogram.pkts_2_20, 1);          \
150 		} else if (_p_cntrs > 20 && _p_cntrs <= 40) {             \
151 			DP_STATS_INC(_pdev,                               \
152 				tx_comp_histogram.pkts_21_40, 1);         \
153 		} else if (_p_cntrs > 40 && _p_cntrs <= 60) {             \
154 			DP_STATS_INC(_pdev,                               \
155 				tx_comp_histogram.pkts_41_60, 1);         \
156 		} else if (_p_cntrs > 60 && _p_cntrs <= 80) {             \
157 			DP_STATS_INC(_pdev,                               \
158 				tx_comp_histogram.pkts_61_80, 1);         \
159 		} else if (_p_cntrs > 80 && _p_cntrs <= 100) {            \
160 			DP_STATS_INC(_pdev,                               \
161 				tx_comp_histogram.pkts_81_100, 1);        \
162 		} else if (_p_cntrs > 100 && _p_cntrs <= 200) {           \
163 			DP_STATS_INC(_pdev,                               \
164 				tx_comp_histogram.pkts_101_200, 1);       \
165 		} else if (_p_cntrs > 200) {                              \
166 			DP_STATS_INC(_pdev,                               \
167 				tx_comp_histogram.pkts_201_plus, 1);      \
168 		}                                                         \
169 	} while (0)
170 
171 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \
172 	do {                                                              \
173 		if (_p_cntrs == 1) {                                      \
174 			DP_STATS_INC(_pdev,                               \
175 				rx_ind_histogram.pkts_1, 1);              \
176 		} else if (_p_cntrs > 1 && _p_cntrs <= 20) {              \
177 			DP_STATS_INC(_pdev,                               \
178 				rx_ind_histogram.pkts_2_20, 1);           \
179 		} else if (_p_cntrs > 20 && _p_cntrs <= 40) {             \
180 			DP_STATS_INC(_pdev,                               \
181 				rx_ind_histogram.pkts_21_40, 1);          \
182 		} else if (_p_cntrs > 40 && _p_cntrs <= 60) {             \
183 			DP_STATS_INC(_pdev,                               \
184 				rx_ind_histogram.pkts_41_60, 1);          \
185 		} else if (_p_cntrs > 60 && _p_cntrs <= 80) {             \
186 			DP_STATS_INC(_pdev,                               \
187 				rx_ind_histogram.pkts_61_80, 1);          \
188 		} else if (_p_cntrs > 80 && _p_cntrs <= 100) {            \
189 			DP_STATS_INC(_pdev,                               \
190 				rx_ind_histogram.pkts_81_100, 1);         \
191 		} else if (_p_cntrs > 100 && _p_cntrs <= 200) {           \
192 			DP_STATS_INC(_pdev,                               \
193 				rx_ind_histogram.pkts_101_200, 1);        \
194 		} else if (_p_cntrs > 200) {                              \
195 			DP_STATS_INC(_pdev,                               \
196 				rx_ind_histogram.pkts_201_plus, 1);       \
197 		}                                                         \
198 	} while (0)
199 
200 #define DP_TX_HIST_STATS_PER_PDEV() \
201 	do { \
202 		uint8_t hist_stats = 0; \
203 		for (hist_stats = 0; hist_stats < soc->pdev_count; \
204 				hist_stats++) { \
205 			DP_TX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \
206 					num_of_packets[hist_stats]); \
207 		} \
208 	}  while (0)
209 
210 
211 #define DP_RX_HIST_STATS_PER_PDEV() \
212 	do { \
213 		uint8_t hist_stats = 0; \
214 		for (hist_stats = 0; hist_stats < soc->pdev_count; \
215 				hist_stats++) { \
216 			DP_RX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \
217 					num_of_packets[hist_stats]); \
218 		} \
219 	}  while (0)
220 
221 
222 #else
223 #define DP_STATS_INC(_handle, _field, _delta)
224 #define DP_STATS_INCC(_handle, _field, _delta, _cond)
225 #define DP_STATS_DEC(_handle, _field, _delta)
226 #define DP_STATS_UPD(_handle, _field, _delta)
227 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes)
228 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond)
229 #define DP_STATS_AGGR(_handle_a, _handle_b, _field)
230 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field)
231 #define DP_HIST_INIT()
232 #define DP_HIST_PACKET_COUNT_INC(_pdev_id)
233 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs)
234 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs)
235 #define DP_RX_HIST_STATS_PER_PDEV()
236 #define DP_TX_HIST_STATS_PER_PDEV()
237 #endif
238 
239 #define DP_HTT_T2H_HP_PIPE 5
240 static inline void dp_update_pdev_stats(struct dp_pdev *tgtobj,
241 					struct cdp_vdev_stats *srcobj)
242 {
243 	uint8_t i;
244 	uint8_t pream_type;
245 
246 	for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) {
247 		for (i = 0; i < MAX_MCS; i++) {
248 			tgtobj->stats.tx.pkt_type[pream_type].
249 				mcs_count[i] +=
250 			srcobj->tx.pkt_type[pream_type].
251 				mcs_count[i];
252 			tgtobj->stats.rx.pkt_type[pream_type].
253 				mcs_count[i] +=
254 			srcobj->rx.pkt_type[pream_type].
255 				mcs_count[i];
256 		}
257 	}
258 
259 	for (i = 0; i < MAX_BW; i++) {
260 		tgtobj->stats.tx.bw[i] += srcobj->tx.bw[i];
261 		tgtobj->stats.rx.bw[i] += srcobj->rx.bw[i];
262 	}
263 
264 	for (i = 0; i < SS_COUNT; i++) {
265 		tgtobj->stats.tx.nss[i] += srcobj->tx.nss[i];
266 		tgtobj->stats.rx.nss[i] += srcobj->rx.nss[i];
267 	}
268 
269 	for (i = 0; i < WME_AC_MAX; i++) {
270 		tgtobj->stats.tx.wme_ac_type[i] +=
271 			srcobj->tx.wme_ac_type[i];
272 		tgtobj->stats.rx.wme_ac_type[i] +=
273 			srcobj->rx.wme_ac_type[i];
274 		tgtobj->stats.tx.excess_retries_per_ac[i] +=
275 			srcobj->tx.excess_retries_per_ac[i];
276 	}
277 
278 	for (i = 0; i < MAX_GI; i++) {
279 		tgtobj->stats.tx.sgi_count[i] +=
280 			srcobj->tx.sgi_count[i];
281 		tgtobj->stats.rx.sgi_count[i] +=
282 			srcobj->rx.sgi_count[i];
283 	}
284 
285 	for (i = 0; i < MAX_RECEPTION_TYPES; i++)
286 		tgtobj->stats.rx.reception_type[i] +=
287 			srcobj->rx.reception_type[i];
288 
289 	tgtobj->stats.tx.comp_pkt.bytes += srcobj->tx.comp_pkt.bytes;
290 	tgtobj->stats.tx.comp_pkt.num += srcobj->tx.comp_pkt.num;
291 	tgtobj->stats.tx.ucast.num += srcobj->tx.ucast.num;
292 	tgtobj->stats.tx.ucast.bytes += srcobj->tx.ucast.bytes;
293 	tgtobj->stats.tx.mcast.num += srcobj->tx.mcast.num;
294 	tgtobj->stats.tx.mcast.bytes += srcobj->tx.mcast.bytes;
295 	tgtobj->stats.tx.bcast.num += srcobj->tx.bcast.num;
296 	tgtobj->stats.tx.bcast.bytes += srcobj->tx.bcast.bytes;
297 	tgtobj->stats.tx.tx_success.num += srcobj->tx.tx_success.num;
298 	tgtobj->stats.tx.tx_success.bytes +=
299 		srcobj->tx.tx_success.bytes;
300 	tgtobj->stats.tx.nawds_mcast.num +=
301 		srcobj->tx.nawds_mcast.num;
302 	tgtobj->stats.tx.nawds_mcast.bytes +=
303 		srcobj->tx.nawds_mcast.bytes;
304 	tgtobj->stats.tx.nawds_mcast_drop +=
305 		srcobj->tx.nawds_mcast_drop;
306 	tgtobj->stats.tx.tx_failed += srcobj->tx.tx_failed;
307 	tgtobj->stats.tx.ofdma += srcobj->tx.ofdma;
308 	tgtobj->stats.tx.stbc += srcobj->tx.stbc;
309 	tgtobj->stats.tx.ldpc += srcobj->tx.ldpc;
310 	tgtobj->stats.tx.retries += srcobj->tx.retries;
311 	tgtobj->stats.tx.non_amsdu_cnt += srcobj->tx.non_amsdu_cnt;
312 	tgtobj->stats.tx.amsdu_cnt += srcobj->tx.amsdu_cnt;
313 	tgtobj->stats.tx.dropped.fw_rem.num += srcobj->tx.dropped.fw_rem.num;
314 	tgtobj->stats.tx.dropped.fw_rem.bytes +=
315 			srcobj->tx.dropped.fw_rem.bytes;
316 	tgtobj->stats.tx.dropped.fw_rem_tx +=
317 			srcobj->tx.dropped.fw_rem_tx;
318 	tgtobj->stats.tx.dropped.fw_rem_notx +=
319 			srcobj->tx.dropped.fw_rem_notx;
320 	tgtobj->stats.tx.dropped.fw_reason1 +=
321 			srcobj->tx.dropped.fw_reason1;
322 	tgtobj->stats.tx.dropped.fw_reason2 +=
323 			srcobj->tx.dropped.fw_reason2;
324 	tgtobj->stats.tx.dropped.fw_reason3 +=
325 			srcobj->tx.dropped.fw_reason3;
326 	tgtobj->stats.tx.dropped.age_out += srcobj->tx.dropped.age_out;
327 	tgtobj->stats.rx.err.mic_err += srcobj->rx.err.mic_err;
328 	if (srcobj->rx.rssi != 0)
329 		tgtobj->stats.rx.rssi = srcobj->rx.rssi;
330 	tgtobj->stats.rx.rx_rate = srcobj->rx.rx_rate;
331 	tgtobj->stats.rx.err.decrypt_err += srcobj->rx.err.decrypt_err;
332 	tgtobj->stats.rx.non_ampdu_cnt += srcobj->rx.non_ampdu_cnt;
333 	tgtobj->stats.rx.amsdu_cnt += srcobj->rx.ampdu_cnt;
334 	tgtobj->stats.rx.non_amsdu_cnt += srcobj->rx.non_amsdu_cnt;
335 	tgtobj->stats.rx.amsdu_cnt += srcobj->rx.amsdu_cnt;
336 	tgtobj->stats.rx.nawds_mcast_drop += srcobj->rx.nawds_mcast_drop;
337 	tgtobj->stats.rx.to_stack.num += srcobj->rx.to_stack.num;
338 	tgtobj->stats.rx.to_stack.bytes += srcobj->rx.to_stack.bytes;
339 
340 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
341 		tgtobj->stats.rx.rcvd_reo[i].num +=
342 			srcobj->rx.rcvd_reo[i].num;
343 		tgtobj->stats.rx.rcvd_reo[i].bytes +=
344 			srcobj->rx.rcvd_reo[i].bytes;
345 	}
346 
347 	srcobj->rx.unicast.num =
348 		srcobj->rx.to_stack.num -
349 				(srcobj->rx.multicast.num);
350 	srcobj->rx.unicast.bytes =
351 		srcobj->rx.to_stack.bytes -
352 				(srcobj->rx.multicast.bytes);
353 
354 	tgtobj->stats.rx.unicast.num += srcobj->rx.unicast.num;
355 	tgtobj->stats.rx.unicast.bytes += srcobj->rx.unicast.bytes;
356 	tgtobj->stats.rx.multicast.num += srcobj->rx.multicast.num;
357 	tgtobj->stats.rx.multicast.bytes += srcobj->rx.multicast.bytes;
358 	tgtobj->stats.rx.bcast.num += srcobj->rx.bcast.num;
359 	tgtobj->stats.rx.bcast.bytes += srcobj->rx.bcast.bytes;
360 	tgtobj->stats.rx.raw.num += srcobj->rx.raw.num;
361 	tgtobj->stats.rx.raw.bytes += srcobj->rx.raw.bytes;
362 	tgtobj->stats.rx.intra_bss.pkts.num +=
363 			srcobj->rx.intra_bss.pkts.num;
364 	tgtobj->stats.rx.intra_bss.pkts.bytes +=
365 			srcobj->rx.intra_bss.pkts.bytes;
366 	tgtobj->stats.rx.intra_bss.fail.num +=
367 			srcobj->rx.intra_bss.fail.num;
368 	tgtobj->stats.rx.intra_bss.fail.bytes +=
369 			srcobj->rx.intra_bss.fail.bytes;
370 
371 	tgtobj->stats.tx.last_ack_rssi =
372 		srcobj->tx.last_ack_rssi;
373 	tgtobj->stats.rx.mec_drop.num += srcobj->rx.mec_drop.num;
374 	tgtobj->stats.rx.mec_drop.bytes += srcobj->rx.mec_drop.bytes;
375 }
376 
377 static inline void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj,
378 						struct dp_vdev *srcobj)
379 {
380 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.nawds_mcast);
381 
382 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.rcvd);
383 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.processed);
384 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.reinject_pkts);
385 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.inspect_pkts);
386 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.raw.raw_pkt);
387 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.raw.dma_map_error);
388 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.tso.tso_pkt);
389 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.tso.dropped_host.num);
390 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.tso.dropped_target);
391 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.sg.dropped_host.num);
392 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.sg.dropped_target);
393 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.sg.sg_pkt);
394 	DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.mcast_en.mcast_pkt);
395 	DP_STATS_AGGR(tgtobj, srcobj,
396 		      tx_i.mcast_en.dropped_map_error);
397 	DP_STATS_AGGR(tgtobj, srcobj,
398 		      tx_i.mcast_en.dropped_self_mac);
399 	DP_STATS_AGGR(tgtobj, srcobj,
400 		      tx_i.mcast_en.dropped_send_fail);
401 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.mcast_en.ucast);
402 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.dma_error);
403 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.ring_full);
404 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.enqueue_fail);
405 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.desc_na.num);
406 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.res_full);
407 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.headroom_insufficient);
408 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.cce_classified);
409 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.cce_classified_raw);
410 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.mesh.exception_fw);
411 	DP_STATS_AGGR(tgtobj, srcobj, tx_i.mesh.completion_fw);
412 
413 	tgtobj->stats.tx_i.dropped.dropped_pkt.num =
414 		tgtobj->stats.tx_i.dropped.dma_error +
415 		tgtobj->stats.tx_i.dropped.ring_full +
416 		tgtobj->stats.tx_i.dropped.enqueue_fail +
417 		tgtobj->stats.tx_i.dropped.desc_na.num +
418 		tgtobj->stats.tx_i.dropped.res_full;
419 
420 	tgtobj->stats.tx_i.tso.num_seg =
421 		srcobj->stats.tx_i.tso.num_seg;
422 }
423 
424 static inline void dp_update_vdev_stats(struct cdp_vdev_stats *tgtobj,
425 					struct dp_peer *srcobj)
426 {
427 	uint8_t i;
428 	uint8_t pream_type;
429 
430 	for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) {
431 		for (i = 0; i < MAX_MCS; i++) {
432 			tgtobj->tx.pkt_type[pream_type].
433 				mcs_count[i] +=
434 			srcobj->stats.tx.pkt_type[pream_type].
435 				mcs_count[i];
436 			tgtobj->rx.pkt_type[pream_type].
437 				mcs_count[i] +=
438 			srcobj->stats.rx.pkt_type[pream_type].
439 				mcs_count[i];
440 		}
441 	}
442 
443 	for (i = 0; i < MAX_BW; i++) {
444 		tgtobj->tx.bw[i] += srcobj->stats.tx.bw[i];
445 		tgtobj->rx.bw[i] += srcobj->stats.rx.bw[i];
446 	}
447 
448 	for (i = 0; i < SS_COUNT; i++) {
449 		tgtobj->tx.nss[i] += srcobj->stats.tx.nss[i];
450 		tgtobj->rx.nss[i] += srcobj->stats.rx.nss[i];
451 	}
452 
453 	for (i = 0; i < WME_AC_MAX; i++) {
454 		tgtobj->tx.wme_ac_type[i] +=
455 			srcobj->stats.tx.wme_ac_type[i];
456 		tgtobj->rx.wme_ac_type[i] +=
457 			srcobj->stats.rx.wme_ac_type[i];
458 		tgtobj->tx.excess_retries_per_ac[i] +=
459 			srcobj->stats.tx.excess_retries_per_ac[i];
460 	}
461 
462 	for (i = 0; i < MAX_GI; i++) {
463 		tgtobj->tx.sgi_count[i] +=
464 			srcobj->stats.tx.sgi_count[i];
465 		tgtobj->rx.sgi_count[i] +=
466 			srcobj->stats.rx.sgi_count[i];
467 	}
468 
469 	for (i = 0; i < MAX_RECEPTION_TYPES; i++)
470 		tgtobj->rx.reception_type[i] +=
471 			srcobj->stats.rx.reception_type[i];
472 
473 	tgtobj->tx.comp_pkt.bytes += srcobj->stats.tx.comp_pkt.bytes;
474 	tgtobj->tx.comp_pkt.num += srcobj->stats.tx.comp_pkt.num;
475 	tgtobj->tx.ucast.num += srcobj->stats.tx.ucast.num;
476 	tgtobj->tx.ucast.bytes += srcobj->stats.tx.ucast.bytes;
477 	tgtobj->tx.mcast.num += srcobj->stats.tx.mcast.num;
478 	tgtobj->tx.mcast.bytes += srcobj->stats.tx.mcast.bytes;
479 	tgtobj->tx.bcast.num += srcobj->stats.tx.bcast.num;
480 	tgtobj->tx.bcast.bytes += srcobj->stats.tx.bcast.bytes;
481 	tgtobj->tx.tx_success.num += srcobj->stats.tx.tx_success.num;
482 	tgtobj->tx.tx_success.bytes +=
483 		srcobj->stats.tx.tx_success.bytes;
484 	tgtobj->tx.nawds_mcast.num +=
485 		srcobj->stats.tx.nawds_mcast.num;
486 	tgtobj->tx.nawds_mcast.bytes +=
487 		srcobj->stats.tx.nawds_mcast.bytes;
488 	tgtobj->tx.nawds_mcast_drop +=
489 		srcobj->stats.tx.nawds_mcast_drop;
490 	tgtobj->tx.tx_failed += srcobj->stats.tx.tx_failed;
491 	tgtobj->tx.ofdma += srcobj->stats.tx.ofdma;
492 	tgtobj->tx.stbc += srcobj->stats.tx.stbc;
493 	tgtobj->tx.ldpc += srcobj->stats.tx.ldpc;
494 	tgtobj->tx.retries += srcobj->stats.tx.retries;
495 	tgtobj->tx.non_amsdu_cnt += srcobj->stats.tx.non_amsdu_cnt;
496 	tgtobj->tx.amsdu_cnt += srcobj->stats.tx.amsdu_cnt;
497 	tgtobj->tx.dropped.fw_rem.num += srcobj->stats.tx.dropped.fw_rem.num;
498 	tgtobj->tx.dropped.fw_rem.bytes +=
499 			srcobj->stats.tx.dropped.fw_rem.bytes;
500 	tgtobj->tx.dropped.fw_rem_tx +=
501 			srcobj->stats.tx.dropped.fw_rem_tx;
502 	tgtobj->tx.dropped.fw_rem_notx +=
503 			srcobj->stats.tx.dropped.fw_rem_notx;
504 	tgtobj->tx.dropped.fw_reason1 +=
505 			srcobj->stats.tx.dropped.fw_reason1;
506 	tgtobj->tx.dropped.fw_reason2 +=
507 			srcobj->stats.tx.dropped.fw_reason2;
508 	tgtobj->tx.dropped.fw_reason3 +=
509 			srcobj->stats.tx.dropped.fw_reason3;
510 	tgtobj->tx.dropped.age_out += srcobj->stats.tx.dropped.age_out;
511 	tgtobj->rx.err.mic_err += srcobj->stats.rx.err.mic_err;
512 	if (srcobj->stats.rx.rssi != 0)
513 		tgtobj->rx.rssi = srcobj->stats.rx.rssi;
514 	tgtobj->rx.rx_rate = srcobj->stats.rx.rx_rate;
515 	tgtobj->rx.err.decrypt_err += srcobj->stats.rx.err.decrypt_err;
516 	tgtobj->rx.non_ampdu_cnt += srcobj->stats.rx.non_ampdu_cnt;
517 	tgtobj->rx.amsdu_cnt += srcobj->stats.rx.ampdu_cnt;
518 	tgtobj->rx.non_amsdu_cnt += srcobj->stats.rx.non_amsdu_cnt;
519 	tgtobj->rx.amsdu_cnt += srcobj->stats.rx.amsdu_cnt;
520 	tgtobj->rx.nawds_mcast_drop += srcobj->stats.rx.nawds_mcast_drop;
521 	tgtobj->rx.to_stack.num += srcobj->stats.rx.to_stack.num;
522 	tgtobj->rx.to_stack.bytes += srcobj->stats.rx.to_stack.bytes;
523 
524 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
525 		tgtobj->rx.rcvd_reo[i].num +=
526 			srcobj->stats.rx.rcvd_reo[i].num;
527 		tgtobj->rx.rcvd_reo[i].bytes +=
528 			srcobj->stats.rx.rcvd_reo[i].bytes;
529 	}
530 
531 	srcobj->stats.rx.unicast.num =
532 		srcobj->stats.rx.to_stack.num -
533 				srcobj->stats.rx.multicast.num;
534 	srcobj->stats.rx.unicast.bytes =
535 		srcobj->stats.rx.to_stack.bytes -
536 				srcobj->stats.rx.multicast.bytes;
537 
538 	tgtobj->rx.unicast.num += srcobj->stats.rx.unicast.num;
539 	tgtobj->rx.unicast.bytes += srcobj->stats.rx.unicast.bytes;
540 	tgtobj->rx.multicast.num += srcobj->stats.rx.multicast.num;
541 	tgtobj->rx.multicast.bytes += srcobj->stats.rx.multicast.bytes;
542 	tgtobj->rx.bcast.num += srcobj->stats.rx.bcast.num;
543 	tgtobj->rx.bcast.bytes += srcobj->stats.rx.bcast.bytes;
544 	tgtobj->rx.raw.num += srcobj->stats.rx.raw.num;
545 	tgtobj->rx.raw.bytes += srcobj->stats.rx.raw.bytes;
546 	tgtobj->rx.intra_bss.pkts.num +=
547 			srcobj->stats.rx.intra_bss.pkts.num;
548 	tgtobj->rx.intra_bss.pkts.bytes +=
549 			srcobj->stats.rx.intra_bss.pkts.bytes;
550 	tgtobj->rx.intra_bss.fail.num +=
551 			srcobj->stats.rx.intra_bss.fail.num;
552 	tgtobj->rx.intra_bss.fail.bytes +=
553 			srcobj->stats.rx.intra_bss.fail.bytes;
554 	tgtobj->tx.last_ack_rssi =
555 		srcobj->stats.tx.last_ack_rssi;
556 	tgtobj->rx.mec_drop.num += srcobj->stats.rx.mec_drop.num;
557 	tgtobj->rx.mec_drop.bytes += srcobj->stats.rx.mec_drop.bytes;
558 }
559 
560 #define DP_UPDATE_STATS(_tgtobj, _srcobj)	\
561 	do {				\
562 		uint8_t i;		\
563 		uint8_t pream_type;	\
564 		for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \
565 			for (i = 0; i < MAX_MCS; i++) { \
566 				DP_STATS_AGGR(_tgtobj, _srcobj, \
567 					tx.pkt_type[pream_type].mcs_count[i]); \
568 				DP_STATS_AGGR(_tgtobj, _srcobj, \
569 					rx.pkt_type[pream_type].mcs_count[i]); \
570 			} \
571 		} \
572 		  \
573 		for (i = 0; i < MAX_BW; i++) { \
574 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.bw[i]); \
575 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.bw[i]); \
576 		} \
577 		  \
578 		for (i = 0; i < SS_COUNT; i++) { \
579 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.nss[i]); \
580 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.nss[i]); \
581 		} \
582 		for (i = 0; i < WME_AC_MAX; i++) { \
583 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.wme_ac_type[i]); \
584 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.wme_ac_type[i]); \
585 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.excess_retries_per_ac[i]); \
586 		\
587 		} \
588 		\
589 		for (i = 0; i < MAX_GI; i++) { \
590 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.sgi_count[i]); \
591 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.sgi_count[i]); \
592 		} \
593 		\
594 		for (i = 0; i < MAX_RECEPTION_TYPES; i++) \
595 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.reception_type[i]); \
596 		\
597 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.comp_pkt); \
598 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.ucast); \
599 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.mcast); \
600 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.bcast); \
601 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_success); \
602 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.nawds_mcast); \
603 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.nawds_mcast_drop); \
604 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.tx_failed); \
605 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ofdma); \
606 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.stbc); \
607 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ldpc); \
608 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.retries); \
609 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_amsdu_cnt); \
610 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.amsdu_cnt); \
611 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.dropped.fw_rem); \
612 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_tx); \
613 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_notx); \
614 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason1); \
615 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason2); \
616 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason3); \
617 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.age_out); \
618 								\
619 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.mic_err); \
620 		if (_srcobj->stats.rx.rssi != 0) \
621 			DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rssi); \
622 		DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rx_rate); \
623 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.decrypt_err); \
624 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_ampdu_cnt); \
625 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.ampdu_cnt); \
626 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_amsdu_cnt); \
627 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.amsdu_cnt); \
628 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.nawds_mcast_drop); \
629 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.to_stack); \
630 								\
631 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)	\
632 			DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rcvd_reo[i]); \
633 									\
634 		_srcobj->stats.rx.unicast.num = \
635 			_srcobj->stats.rx.to_stack.num - \
636 					_srcobj->stats.rx.multicast.num; \
637 		_srcobj->stats.rx.unicast.bytes = \
638 			_srcobj->stats.rx.to_stack.bytes - \
639 					_srcobj->stats.rx.multicast.bytes; \
640 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.unicast); \
641 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.multicast); \
642 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.bcast); \
643 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.raw); \
644 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.pkts); \
645 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.fail); \
646 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.mec_drop); \
647 								  \
648 		_tgtobj->stats.tx.last_ack_rssi =	\
649 			_srcobj->stats.tx.last_ack_rssi; \
650 	}  while (0)
651 
652 extern int dp_peer_find_attach(struct dp_soc *soc);
653 extern void dp_peer_find_detach(struct dp_soc *soc);
654 extern void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer);
655 extern void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer);
656 extern void dp_peer_find_hash_erase(struct dp_soc *soc);
657 extern void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer);
658 extern void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer);
659 extern void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer);
660 extern void dp_peer_unref_delete(void *peer_handle);
661 extern void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer,
662 	unsigned tid, qdf_nbuf_t msdu_list);
663 extern void *dp_find_peer_by_addr(struct cdp_pdev *dev,
664 	uint8_t *peer_mac_addr, uint8_t *peer_id);
665 extern struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
666 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id);
667 
668 #ifndef CONFIG_WIN
669 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
670 		struct ol_txrx_desc_type *sta_desc);
671 QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id);
672 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
673 		struct cdp_vdev *vdev,
674 		uint8_t *peer_addr, uint8_t *local_id);
675 uint16_t dp_local_peer_id(void *peer);
676 void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id);
677 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
678 		enum ol_txrx_peer_state state);
679 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id);
680 struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle,
681 		uint8_t sta_id);
682 struct cdp_vdev *dp_get_vdev_for_peer(void *peer);
683 uint8_t *dp_peer_get_peer_mac_addr(void *peer);
684 int dp_get_peer_state(void *peer_handle);
685 void dp_local_peer_id_pool_init(struct dp_pdev *pdev);
686 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer);
687 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer);
688 #else
689 static inline void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
690 {
691 }
692 
693 static inline
694 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
695 {
696 }
697 
698 static inline
699 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
700 {
701 }
702 #endif
703 int dp_addba_resp_tx_completion_wifi3(void *peer_handle, uint8_t tid,
704 	int status);
705 extern int dp_addba_requestprocess_wifi3(void *peer_handle,
706 	uint8_t dialogtoken, uint16_t tid, uint16_t batimeout,
707 	uint16_t buffersize, uint16_t startseqnum);
708 extern void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
709 	uint8_t *dialogtoken, uint16_t *statuscode,
710 	uint16_t *buffersize, uint16_t *batimeout);
711 extern void dp_set_addba_response(void *peer_handle, uint8_t tid,
712 	uint16_t statuscode);
713 extern int dp_delba_process_wifi3(void *peer_handle,
714 	int tid, uint16_t reasoncode);
715 /*
716  * dp_delba_tx_completion_wifi3() -  Handle delba tx completion
717  *
718  * @peer_handle: Peer handle
719  * @tid: Tid number
720  * @status: Tx completion status
721  * Indicate status of delba Tx to DP for stats update and retry
722  * delba if tx failed.
723  *
724  */
725 int dp_delba_tx_completion_wifi3(void *peer_handle, uint8_t tid,
726 				  int status);
727 extern int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
728 	uint32_t ba_window_size, uint32_t start_seq);
729 
730 extern QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc,
731 	enum hal_reo_cmd_type type, struct hal_reo_cmd_params *params,
732 	void (*callback_fn), void *data);
733 
734 extern void dp_reo_cmdlist_destroy(struct dp_soc *soc);
735 extern void dp_reo_status_ring_handler(struct dp_soc *soc);
736 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
737 			     struct cdp_vdev_stats *vdev_stats);
738 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
739 	union hal_reo_status *reo_status);
740 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
741 		union hal_reo_status *reo_status);
742 uint16_t dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle,
743 		qdf_nbuf_t nbuf, uint8_t newmac[][DP_MAC_ADDR_LEN],
744 		uint8_t new_mac_cnt);
745 void dp_tx_me_alloc_descriptor(struct cdp_pdev *pdev);
746 
747 void dp_tx_me_free_descriptor(struct cdp_pdev *pdev);
748 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
749 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
750 		uint32_t config_param_1, uint32_t config_param_2,
751 		uint32_t config_param_3, int cookie, int cookie_msb,
752 		uint8_t mac_id);
753 void dp_htt_stats_print_tag(uint8_t tag_type, uint32_t *tag_buf);
754 void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf);
755 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*callback_fn),
756 		void *cb_ctxt);
757 void dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle,
758 	struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,
759 	 uint32_t *rx_pn);
760 
761 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id);
762 void dp_set_michael_key(struct cdp_peer *peer_handle,
763 			bool is_unicast, uint32_t *key);
764 
765 /*
766  * dp_get_mac_id_for_pdev() -  Return mac corresponding to pdev for mac
767  *
768  * @mac_id: MAC id
769  * @pdev_id: pdev_id corresponding to pdev, 0 for MCL
770  *
771  * Single pdev using both MACs will operate on both MAC rings,
772  * which is the case for MCL.
773  * For WIN each PDEV will operate one ring, so index is zero.
774  *
775  */
776 static inline int dp_get_mac_id_for_pdev(uint32_t mac_id, uint32_t pdev_id)
777 {
778 	if (mac_id && pdev_id) {
779 		qdf_print("Both mac_id and pdev_id cannot be non zero");
780 		QDF_BUG(0);
781 		return 0;
782 	}
783 	return (mac_id + pdev_id);
784 }
785 
786 /*
787  * dp_get_mac_id_for_mac() -  Return mac corresponding WIN and MCL mac_ids
788  *
789  * @soc: handle to DP soc
790  * @mac_id: MAC id
791  *
792  * Single pdev using both MACs will operate on both MAC rings,
793  * which is the case for MCL.
794  * For WIN each PDEV will operate one ring, so index is zero.
795  *
796  */
797 static inline int dp_get_mac_id_for_mac(struct dp_soc *soc, uint32_t mac_id)
798 {
799 	/*
800 	 * Single pdev using both MACs will operate on both MAC rings,
801 	 * which is the case for MCL.
802 	 */
803 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
804 		return mac_id;
805 
806 	/* For WIN each PDEV will operate one ring, so index is zero. */
807 	return 0;
808 }
809 
810 #ifdef WDI_EVENT_ENABLE
811 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
812 				uint32_t stats_type_upload_mask,
813 				uint8_t mac_id);
814 
815 int dp_wdi_event_unsub(struct cdp_pdev *txrx_pdev_handle,
816 	void *event_cb_sub_handle,
817 	uint32_t event);
818 
819 int dp_wdi_event_sub(struct cdp_pdev *txrx_pdev_handle,
820 	void *event_cb_sub_handle,
821 	uint32_t event);
822 
823 void dp_wdi_event_handler(enum WDI_EVENT event, void *soc,
824 		void *data, u_int16_t peer_id,
825 		int status, u_int8_t pdev_id);
826 
827 int dp_wdi_event_attach(struct dp_pdev *txrx_pdev);
828 int dp_wdi_event_detach(struct dp_pdev *txrx_pdev);
829 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
830 	bool enable);
831 void *dp_get_pldev(struct cdp_pdev *txrx_pdev);
832 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn);
833 
834 static inline void dp_hif_update_pipe_callback(void *soc, void *cb_context,
835 	QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), uint8_t pipe_id)
836 {
837 	struct hif_msg_callbacks hif_pipe_callbacks;
838 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
839 
840 	/* TODO: Temporary change to bypass HTC connection for this new
841 	 * HIF pipe, which will be used for packet log and other high-
842 	 * priority HTT messages. Proper HTC connection to be added
843 	 * later once required FW changes are available
844 	 */
845 	hif_pipe_callbacks.rxCompletionHandler = callback;
846 	hif_pipe_callbacks.Context = cb_context;
847 	hif_update_pipe_callback(dp_soc->hif_handle,
848 		DP_HTT_T2H_HP_PIPE, &hif_pipe_callbacks);
849 }
850 
851 QDF_STATUS dp_peer_stats_notify(struct dp_peer *peer);
852 
853 #else
854 static inline int dp_wdi_event_unsub(struct cdp_pdev *txrx_pdev_handle,
855 	void *event_cb_sub_handle,
856 	uint32_t event)
857 {
858 	return 0;
859 }
860 
861 static inline int dp_wdi_event_sub(struct cdp_pdev *txrx_pdev_handle,
862 	void *event_cb_sub_handle,
863 	uint32_t event)
864 {
865 	return 0;
866 }
867 
868 static inline void dp_wdi_event_handler(enum WDI_EVENT event, void *soc,
869 		void *data, u_int16_t peer_id,
870 		int status, u_int8_t pdev_id)
871 {
872 }
873 
874 static inline int dp_wdi_event_attach(struct dp_pdev *txrx_pdev)
875 {
876 	return 0;
877 }
878 
879 static inline int dp_wdi_event_detach(struct dp_pdev *txrx_pdev)
880 {
881 	return 0;
882 }
883 
884 static inline int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
885 	bool enable)
886 {
887 	return 0;
888 }
889 static inline QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
890 		uint32_t stats_type_upload_mask, uint8_t mac_id)
891 {
892 	return 0;
893 }
894 static inline void dp_hif_update_pipe_callback(void *soc, void *cb_context,
895 	QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), uint8_t pipe_id)
896 {
897 }
898 
899 static inline QDF_STATUS dp_peer_stats_notify(struct dp_peer *peer)
900 {
901 	return QDF_STATUS_SUCCESS;
902 }
903 
904 #endif /* CONFIG_WIN */
905 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
906 void dp_tx_dump_flow_pool_info(void *soc);
907 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
908 	bool force);
909 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
910 
911 #ifdef PEER_PROTECTED_ACCESS
912 /**
913  * dp_peer_unref_del_find_by_id() - dec ref and del peer if ref count is
914  *                                  taken by dp_peer_find_by_id
915  * @peer: peer context
916  *
917  * Return: none
918  */
919 static inline void dp_peer_unref_del_find_by_id(struct dp_peer *peer)
920 {
921 	dp_peer_unref_delete(peer);
922 }
923 #else
924 static inline void dp_peer_unref_del_find_by_id(struct dp_peer *peer)
925 {
926 }
927 #endif
928 
929 #endif /* #ifndef _DP_INTERNAL_H_ */
930