xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_internal.h (revision 3149adf58a329e17232a4c0e58d460d025edd55a)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_INTERNAL_H_
20 #define _DP_INTERNAL_H_
21 
22 #include "dp_types.h"
23 
24 #define RX_BUFFER_SIZE_PKTLOG_LITE 1024
25 
26 #if DP_PRINT_ENABLE
27 #include <stdarg.h>       /* va_list */
28 #include <qdf_types.h> /* qdf_vprint */
29 #include <cdp_txrx_handle.h>
30 
31 enum {
32 	/* FATAL_ERR - print only irrecoverable error messages */
33 	DP_PRINT_LEVEL_FATAL_ERR,
34 
35 	/* ERR - include non-fatal err messages */
36 	DP_PRINT_LEVEL_ERR,
37 
38 	/* WARN - include warnings */
39 	DP_PRINT_LEVEL_WARN,
40 
41 	/* INFO1 - include fundamental, infrequent events */
42 	DP_PRINT_LEVEL_INFO1,
43 
44 	/* INFO2 - include non-fundamental but infrequent events */
45 	DP_PRINT_LEVEL_INFO2,
46 };
47 
48 
49 #define dp_print(level, fmt, ...) do { \
50 	if (level <= g_txrx_print_level) \
51 		qdf_print(fmt, ## __VA_ARGS__); \
52 while (0)
53 #define DP_PRINT(level, fmt, ...) do { \
54 	dp_print(level, "DP: " fmt, ## __VA_ARGS__); \
55 while (0)
56 #else
57 #define DP_PRINT(level, fmt, ...)
58 #endif /* DP_PRINT_ENABLE */
59 
60 #define DP_TRACE(LVL, fmt, args ...)                             \
61 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL,       \
62 		fmt, ## args)
63 
64 #define DP_TRACE_STATS(LVL, fmt, args ...)                             \
65 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL,       \
66 		fmt, ## args)
67 
68 #define DP_PRINT_STATS(fmt, args ...)	\
69 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,       \
70 		fmt, ## args)
71 
72 #define DP_STATS_INIT(_handle) \
73 	qdf_mem_set(&((_handle)->stats), sizeof((_handle)->stats), 0x0)
74 
75 #define DP_STATS_CLR(_handle) \
76 	qdf_mem_set(&((_handle)->stats), sizeof((_handle)->stats), 0x0)
77 
78 #ifndef DISABLE_DP_STATS
79 #define DP_STATS_INC(_handle, _field, _delta) \
80 { \
81 	if (likely(_handle)) \
82 		_handle->stats._field += _delta; \
83 }
84 
85 #define DP_STATS_INCC(_handle, _field, _delta, _cond) \
86 { \
87 	if (_cond && likely(_handle)) \
88 		_handle->stats._field += _delta; \
89 }
90 
91 #define DP_STATS_DEC(_handle, _field, _delta) \
92 { \
93 	if (likely(_handle)) \
94 		_handle->stats._field -= _delta; \
95 }
96 
97 #define DP_STATS_UPD(_handle, _field, _delta) \
98 { \
99 	if (likely(_handle)) \
100 		_handle->stats._field = _delta; \
101 }
102 
103 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) \
104 { \
105 	DP_STATS_INC(_handle, _field.num, _count); \
106 	DP_STATS_INC(_handle, _field.bytes, _bytes) \
107 }
108 
109 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \
110 { \
111 	DP_STATS_INCC(_handle, _field.num, _count, _cond); \
112 	DP_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \
113 }
114 
115 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) \
116 { \
117 	_handle_a->stats._field += _handle_b->stats._field; \
118 }
119 
120 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) \
121 { \
122 	DP_STATS_AGGR(_handle_a, _handle_b, _field.num); \
123 	DP_STATS_AGGR(_handle_a, _handle_b, _field.bytes);\
124 }
125 
126 
127 #define DP_HIST_INIT() \
128 	uint32_t num_of_packets[MAX_PDEV_CNT] = {0};
129 
130 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) \
131 { \
132 		++num_of_packets[_pdev_id]; \
133 }
134 
135 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \
136 	do {                                                              \
137 		if (_p_cntrs == 1) {                                      \
138 			DP_STATS_INC(_pdev,                               \
139 				tx_comp_histogram.pkts_1, 1);             \
140 		} else if (_p_cntrs > 1 && _p_cntrs <= 20) {              \
141 			DP_STATS_INC(_pdev,                               \
142 				tx_comp_histogram.pkts_2_20, 1);          \
143 		} else if (_p_cntrs > 20 && _p_cntrs <= 40) {             \
144 			DP_STATS_INC(_pdev,                               \
145 				tx_comp_histogram.pkts_21_40, 1);         \
146 		} else if (_p_cntrs > 40 && _p_cntrs <= 60) {             \
147 			DP_STATS_INC(_pdev,                               \
148 				tx_comp_histogram.pkts_41_60, 1);         \
149 		} else if (_p_cntrs > 60 && _p_cntrs <= 80) {             \
150 			DP_STATS_INC(_pdev,                               \
151 				tx_comp_histogram.pkts_61_80, 1);         \
152 		} else if (_p_cntrs > 80 && _p_cntrs <= 100) {            \
153 			DP_STATS_INC(_pdev,                               \
154 				tx_comp_histogram.pkts_81_100, 1);        \
155 		} else if (_p_cntrs > 100 && _p_cntrs <= 200) {           \
156 			DP_STATS_INC(_pdev,                               \
157 				tx_comp_histogram.pkts_101_200, 1);       \
158 		} else if (_p_cntrs > 200) {                              \
159 			DP_STATS_INC(_pdev,                               \
160 				tx_comp_histogram.pkts_201_plus, 1);      \
161 		}                                                         \
162 	} while (0)
163 
164 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \
165 	do {                                                              \
166 		if (_p_cntrs == 1) {                                      \
167 			DP_STATS_INC(_pdev,                               \
168 				rx_ind_histogram.pkts_1, 1);              \
169 		} else if (_p_cntrs > 1 && _p_cntrs <= 20) {              \
170 			DP_STATS_INC(_pdev,                               \
171 				rx_ind_histogram.pkts_2_20, 1);           \
172 		} else if (_p_cntrs > 20 && _p_cntrs <= 40) {             \
173 			DP_STATS_INC(_pdev,                               \
174 				rx_ind_histogram.pkts_21_40, 1);          \
175 		} else if (_p_cntrs > 40 && _p_cntrs <= 60) {             \
176 			DP_STATS_INC(_pdev,                               \
177 				rx_ind_histogram.pkts_41_60, 1);          \
178 		} else if (_p_cntrs > 60 && _p_cntrs <= 80) {             \
179 			DP_STATS_INC(_pdev,                               \
180 				rx_ind_histogram.pkts_61_80, 1);          \
181 		} else if (_p_cntrs > 80 && _p_cntrs <= 100) {            \
182 			DP_STATS_INC(_pdev,                               \
183 				rx_ind_histogram.pkts_81_100, 1);         \
184 		} else if (_p_cntrs > 100 && _p_cntrs <= 200) {           \
185 			DP_STATS_INC(_pdev,                               \
186 				rx_ind_histogram.pkts_101_200, 1);        \
187 		} else if (_p_cntrs > 200) {                              \
188 			DP_STATS_INC(_pdev,                               \
189 				rx_ind_histogram.pkts_201_plus, 1);       \
190 		}                                                         \
191 	} while (0)
192 
193 #define DP_TX_HIST_STATS_PER_PDEV() \
194 	do { \
195 		uint8_t hist_stats = 0; \
196 		for (hist_stats = 0; hist_stats < soc->pdev_count; \
197 				hist_stats++) { \
198 			DP_TX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \
199 					num_of_packets[hist_stats]); \
200 		} \
201 	}  while (0)
202 
203 
204 #define DP_RX_HIST_STATS_PER_PDEV() \
205 	do { \
206 		uint8_t hist_stats = 0; \
207 		for (hist_stats = 0; hist_stats < soc->pdev_count; \
208 				hist_stats++) { \
209 			DP_RX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \
210 					num_of_packets[hist_stats]); \
211 		} \
212 	}  while (0)
213 
214 
215 #else
216 #define DP_STATS_INC(_handle, _field, _delta)
217 #define DP_STATS_INCC(_handle, _field, _delta, _cond)
218 #define DP_STATS_DEC(_handle, _field, _delta)
219 #define DP_STATS_UPD(_handle, _field, _delta)
220 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes)
221 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond)
222 #define DP_STATS_AGGR(_handle_a, _handle_b, _field)
223 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field)
224 #define DP_HIST_INIT()
225 #define DP_HIST_PACKET_COUNT_INC(_pdev_id)
226 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs)
227 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs)
228 #define DP_RX_HIST_STATS_PER_PDEV()
229 #define DP_TX_HIST_STATS_PER_PDEV()
230 #endif
231 
232 #define DP_HTT_T2H_HP_PIPE 5
233 
234 #define DP_UPDATE_STATS(_tgtobj, _srcobj)	\
235 	do {				\
236 		uint8_t i;		\
237 		uint8_t pream_type;	\
238 		for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \
239 			for (i = 0; i < MAX_MCS; i++) { \
240 				DP_STATS_AGGR(_tgtobj, _srcobj, \
241 					tx.pkt_type[pream_type].mcs_count[i]); \
242 				DP_STATS_AGGR(_tgtobj, _srcobj, \
243 					rx.pkt_type[pream_type].mcs_count[i]); \
244 			} \
245 		} \
246 		  \
247 		for (i = 0; i < MAX_BW; i++) { \
248 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.bw[i]); \
249 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.bw[i]); \
250 		} \
251 		  \
252 		for (i = 0; i < SS_COUNT; i++) \
253 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.nss[i]); \
254 		\
255 		for (i = 0; i < WME_AC_MAX; i++) { \
256 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.wme_ac_type[i]); \
257 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.wme_ac_type[i]); \
258 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.excess_retries_per_ac[i]); \
259 		\
260 		} \
261 		\
262 		for (i = 0; i < MAX_GI; i++) { \
263 			DP_STATS_AGGR(_tgtobj, _srcobj, tx.sgi_count[i]); \
264 			DP_STATS_AGGR(_tgtobj, _srcobj, rx.sgi_count[i]); \
265 		} \
266 		\
267 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.comp_pkt); \
268 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.ucast); \
269 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.mcast); \
270 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.bcast); \
271 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_success); \
272 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.tx_failed); \
273 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ofdma); \
274 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.stbc); \
275 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.ldpc); \
276 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.retries); \
277 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_amsdu_cnt); \
278 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.amsdu_cnt); \
279 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem); \
280 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_tx); \
281 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_notx); \
282 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason1); \
283 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason2); \
284 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason3); \
285 		DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.age_out); \
286 								\
287 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.mic_err); \
288 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.decrypt_err); \
289 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_ampdu_cnt); \
290 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.ampdu_cnt); \
291 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_amsdu_cnt); \
292 		DP_STATS_AGGR(_tgtobj, _srcobj, rx.amsdu_cnt); \
293 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.to_stack); \
294 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.bcast);\
295 								\
296 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)	\
297 			DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rcvd_reo[i]); \
298 									\
299 		_srcobj->stats.rx.unicast.num = \
300 			_srcobj->stats.rx.to_stack.num - \
301 					_srcobj->stats.rx.multicast.num; \
302 		_srcobj->stats.rx.unicast.bytes = \
303 			_srcobj->stats.rx.to_stack.bytes - \
304 					_srcobj->stats.rx.multicast.bytes; \
305 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.unicast); \
306 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.multicast); \
307 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.raw); \
308 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.pkts); \
309 		DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.fail); \
310 								  \
311 		_tgtobj->stats.tx.last_ack_rssi =	\
312 			_srcobj->stats.tx.last_ack_rssi; \
313 	}  while (0)
314 
315 extern int dp_peer_find_attach(struct dp_soc *soc);
316 extern void dp_peer_find_detach(struct dp_soc *soc);
317 extern void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer);
318 extern void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer);
319 extern void dp_peer_find_hash_erase(struct dp_soc *soc);
320 extern void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer);
321 extern void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer);
322 extern void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer);
323 extern void dp_peer_unref_delete(void *peer_handle);
324 extern void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer,
325 	unsigned tid, qdf_nbuf_t msdu_list);
326 extern void *dp_find_peer_by_addr(struct cdp_pdev *dev,
327 	uint8_t *peer_mac_addr, uint8_t *peer_id);
328 #ifndef CONFIG_WIN
329 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
330 		struct ol_txrx_desc_type *sta_desc);
331 QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id);
332 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
333 		struct cdp_vdev *vdev,
334 		uint8_t *peer_addr, uint8_t *local_id);
335 uint16_t dp_local_peer_id(void *peer);
336 void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id);
337 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
338 		enum ol_txrx_peer_state state);
339 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id);
340 struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle,
341 		uint8_t sta_id);
342 struct cdp_vdev *dp_get_vdev_for_peer(void *peer);
343 uint8_t *dp_peer_get_peer_mac_addr(void *peer);
344 int dp_get_peer_state(void *peer_handle);
345 void dp_local_peer_id_pool_init(struct dp_pdev *pdev);
346 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer);
347 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer);
348 qdf_time_t *dp_get_last_assoc_received(void *peer_handle);
349 qdf_time_t *dp_get_last_disassoc_received(void *peer_handle);
350 qdf_time_t *dp_get_last_deauth_received(void *peer_handle);
351 #endif
352 extern int dp_addba_requestprocess_wifi3(void *peer_handle,
353 	uint8_t dialogtoken, uint16_t tid, uint16_t batimeout,
354 	uint16_t buffersize, uint16_t startseqnum);
355 extern void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
356 	uint8_t *dialogtoken, uint16_t *statuscode,
357 	uint16_t *buffersize, uint16_t *batimeout);
358 extern void dp_set_addba_response(void *peer_handle, uint8_t tid,
359 	uint16_t statuscode);
360 extern int dp_delba_process_wifi3(void *peer_handle,
361 	int tid, uint16_t reasoncode);
362 
363 extern int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
364 	uint32_t ba_window_size, uint32_t start_seq);
365 
366 extern QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc,
367 	enum hal_reo_cmd_type type, struct hal_reo_cmd_params *params,
368 	void (*callback_fn), void *data);
369 
370 extern void dp_reo_cmdlist_destroy(struct dp_soc *soc);
371 extern void dp_reo_status_ring_handler(struct dp_soc *soc);
372 void dp_aggregate_vdev_stats(struct dp_vdev *vdev);
373 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
374 	union hal_reo_status *reo_status);
375 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
376 		union hal_reo_status *reo_status);
377 uint16_t dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle,
378 		qdf_nbuf_t nbuf, uint8_t newmac[][DP_MAC_ADDR_LEN],
379 		uint8_t new_mac_cnt);
380 void dp_tx_me_alloc_descriptor(struct cdp_pdev *pdev);
381 
382 void dp_tx_me_free_descriptor(struct cdp_pdev *pdev);
383 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
384 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
385 		uint32_t config_param_1, uint32_t config_param_2,
386 		uint32_t config_param_3, int cookie, int cookie_msb,
387 		uint8_t channel);
388 void dp_htt_stats_print_tag(uint8_t tag_type, uint32_t *tag_buf);
389 void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf);
390 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*callback_fn),
391 		void *cb_ctxt);
392 void dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle,
393 	struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,
394 	 uint32_t *rx_pn);
395 
396 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id);
397 int dp_get_ring_id_for_mac_id(struct dp_soc *soc, uint32_t mac_id);
398 void dp_mark_peer_inact(void *peer_handle, bool inactive);
399 bool dp_set_inact_params(struct cdp_pdev *pdev_handle,
400 		 u_int16_t inact_check_interval,
401 		 u_int16_t inact_normal, u_int16_t inact_overload);
402 bool dp_start_inact_timer(struct cdp_pdev *pdev_handle, bool enable);
403 void dp_set_overload(struct cdp_pdev *pdev_handle, bool overload);
404 bool dp_peer_is_inact(void *peer_handle);
405 void dp_init_inact_timer(struct dp_soc *soc);
406 void dp_free_inact_timer(struct dp_soc *soc);
407 
408 #ifdef WDI_EVENT_ENABLE
409 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
410 				uint32_t stats_type_upload_mask,
411 				uint8_t mac_id);
412 
413 int dp_wdi_event_unsub(struct cdp_pdev *txrx_pdev_handle,
414 	void *event_cb_sub_handle,
415 	uint32_t event);
416 
417 int dp_wdi_event_sub(struct cdp_pdev *txrx_pdev_handle,
418 	void *event_cb_sub_handle,
419 	uint32_t event);
420 
421 void dp_wdi_event_handler(enum WDI_EVENT event, void *soc,
422 		void *data, u_int16_t peer_id,
423 		int status, u_int8_t pdev_id);
424 
425 int dp_wdi_event_attach(struct dp_pdev *txrx_pdev);
426 int dp_wdi_event_detach(struct dp_pdev *txrx_pdev);
427 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
428 	bool enable);
429 void *dp_get_pldev(struct cdp_pdev *txrx_pdev);
430 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn);
431 
432 static inline void dp_hif_update_pipe_callback(void *soc, void *cb_context,
433 	QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), uint8_t pipe_id)
434 {
435 	struct hif_msg_callbacks hif_pipe_callbacks;
436 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
437 
438 	/* TODO: Temporary change to bypass HTC connection for this new
439 	 * HIF pipe, which will be used for packet log and other high-
440 	 * priority HTT messsages. Proper HTC connection to be added
441 	 * later once required FW changes are available
442 	 */
443 	hif_pipe_callbacks.rxCompletionHandler = callback;
444 	hif_pipe_callbacks.Context = cb_context;
445 	hif_update_pipe_callback(dp_soc->hif_handle,
446 		DP_HTT_T2H_HP_PIPE, &hif_pipe_callbacks);
447 }
448 
449 #else
450 static inline int dp_wdi_event_unsub(struct cdp_pdev *txrx_pdev_handle,
451 	void *event_cb_sub_handle,
452 	uint32_t event)
453 {
454 	return 0;
455 }
456 
457 static inline int dp_wdi_event_sub(struct cdp_pdev *txrx_pdev_handle,
458 	void *event_cb_sub_handle,
459 	uint32_t event)
460 {
461 	return 0;
462 }
463 
464 static inline void dp_wdi_event_handler(enum WDI_EVENT event, void *soc,
465 		void *data, u_int16_t peer_id,
466 		int status, u_int8_t pdev_id)
467 {
468 }
469 
470 static inline int dp_wdi_event_attach(struct dp_pdev *txrx_pdev)
471 {
472 	return 0;
473 }
474 
475 static inline int dp_wdi_event_detach(struct dp_pdev *txrx_pdev)
476 {
477 	return 0;
478 }
479 
480 static inline int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
481 	bool enable)
482 {
483 	return 0;
484 }
485 static inline QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
486 		uint32_t stats_type_upload_mask, uint8_t mac_id);
487 {
488 	return 0;
489 }
490 static inline void dp_hif_update_pipe_callback(void *soc, void *cb_context,
491 	QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), uint8_t pipe_id)
492 {
493 }
494 #endif /* CONFIG_WIN */
495 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
496 void dp_tx_dump_flow_pool_info(void *soc);
497 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
498 	bool force);
499 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
500 #endif /* #ifndef _DP_INTERNAL_H_ */
501