xref: /wlan-dirver/qca-wifi-host-cmn/wmi/src/wmi_unified.c (revision cef49bdf89944744b91311eb5346fd6854abccb1)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * Host WMI unified implementation
22  */
23 #include "htc_api.h"
24 #include "htc_api.h"
25 #include "wmi_unified_priv.h"
26 #include "wmi_unified_api.h"
27 #include "qdf_module.h"
28 #include "qdf_platform.h"
29 #include "qdf_ssr_driver_dump.h"
30 #ifdef WMI_EXT_DBG
31 #include "qdf_list.h"
32 #include "qdf_atomic.h"
33 #endif
34 
35 #ifndef WMI_NON_TLV_SUPPORT
36 #include "wmi_tlv_helper.h"
37 #endif
38 
39 #include <linux/debugfs.h>
40 #include <target_if.h>
41 #include <qdf_debugfs.h>
42 #include "wmi_filtered_logging.h"
43 #include <wmi_hang_event.h>
44 
45 /* This check for CONFIG_WIN temporary added due to redeclaration compilation
46 error in MCL. Error is caused due to inclusion of wmi.h in wmi_unified_api.h
47 which gets included here through ol_if_athvar.h. Eventually it is expected that
48 wmi.h will be removed from wmi_unified_api.h after cleanup, which will need
49 WMI_CMD_HDR to be defined here. */
50 /* Copied from wmi.h */
51 #undef MS
52 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
53 #undef SM
54 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
55 #undef WO
56 #define WO(_f)      ((_f##_OFFSET) >> 2)
57 
58 #undef GET_FIELD
59 #define GET_FIELD(_addr, _f) MS(*((uint32_t *)(_addr) + WO(_f)), _f)
60 #undef SET_FIELD
61 #define SET_FIELD(_addr, _f, _val)  \
62 	    (*((uint32_t *)(_addr) + WO(_f)) = \
63 		(*((uint32_t *)(_addr) + WO(_f)) & ~_f##_MASK) | SM(_val, _f))
64 
65 #define WMI_GET_FIELD(_msg_buf, _msg_type, _f) \
66 	    GET_FIELD(_msg_buf, _msg_type ## _ ## _f)
67 
68 #define WMI_SET_FIELD(_msg_buf, _msg_type, _f, _val) \
69 	    SET_FIELD(_msg_buf, _msg_type ## _ ## _f, _val)
70 
71 #define WMI_EP_APASS           0x0
72 #define WMI_EP_LPASS           0x1
73 #define WMI_EP_SENSOR          0x2
74 
75 #define WMI_INFOS_DBG_FILE_PERM (QDF_FILE_USR_READ | \
76 				 QDF_FILE_USR_WRITE | \
77 				 QDF_FILE_GRP_READ | \
78 				 QDF_FILE_OTH_READ)
79 
80 /*
81  *  * Control Path
82  *   */
83 typedef PREPACK struct {
84 	uint32_t	commandId:24,
85 			reserved:2, /* used for WMI endpoint ID */
86 			plt_priv:6; /* platform private */
87 } POSTPACK WMI_CMD_HDR;        /* used for commands and events */
88 
89 #define WMI_CMD_HDR_COMMANDID_LSB           0
90 #define WMI_CMD_HDR_COMMANDID_MASK          0x00ffffff
91 #define WMI_CMD_HDR_COMMANDID_OFFSET        0x00000000
92 #define WMI_CMD_HDR_WMI_ENDPOINTID_MASK        0x03000000
93 #define WMI_CMD_HDR_WMI_ENDPOINTID_OFFSET      24
94 #define WMI_CMD_HDR_PLT_PRIV_LSB               24
95 #define WMI_CMD_HDR_PLT_PRIV_MASK              0xff000000
96 #define WMI_CMD_HDR_PLT_PRIV_OFFSET            0x00000000
97 /* end of copy wmi.h */
98 
99 #define WMI_MIN_HEAD_ROOM 64
100 
101 /* WBUFF pool sizes for WMI */
102 /* Allocation of size 256 bytes */
103 #define WMI_WBUFF_POOL_0_SIZE 128
104 /* Allocation of size 512 bytes */
105 #define WMI_WBUFF_POOL_1_SIZE 16
106 /* Allocation of size 1024 bytes */
107 #define WMI_WBUFF_POOL_2_SIZE 8
108 /* Allocation of size 2048 bytes */
109 #define WMI_WBUFF_POOL_3_SIZE 8
110 
111 #define RX_DIAG_EVENT_WORK_PROCESS_MAX_COUNT 500
112 
113 #ifdef WMI_INTERFACE_EVENT_LOGGING
114 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0))
115 /* TODO Cleanup this backported function */
116 static int wmi_bp_seq_printf(qdf_debugfs_file_t m, const char *f, ...)
117 {
118 	va_list args;
119 
120 	va_start(args, f);
121 	seq_vprintf(m, f, args);
122 	va_end(args);
123 
124 	return 0;
125 }
126 #else
127 #define wmi_bp_seq_printf(m, fmt, ...) seq_printf((m), fmt, ##__VA_ARGS__)
128 #endif
129 
130 #ifndef MAX_WMI_INSTANCES
131 #define CUSTOM_MGMT_CMD_DATA_SIZE 4
132 #endif
133 
134 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
135 /* WMI commands */
136 uint32_t g_wmi_command_buf_idx = 0;
137 struct wmi_command_debug wmi_command_log_buffer[WMI_CMD_DEBUG_MAX_ENTRY];
138 
139 /* WMI commands TX completed */
140 uint32_t g_wmi_command_tx_cmp_buf_idx = 0;
141 struct wmi_command_cmp_debug
142 	wmi_command_tx_cmp_log_buffer[WMI_CMD_CMPL_DEBUG_MAX_ENTRY];
143 
144 /* WMI events when processed */
145 uint32_t g_wmi_event_buf_idx = 0;
146 struct wmi_event_debug wmi_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
147 
148 /* WMI events when queued */
149 uint32_t g_wmi_rx_event_buf_idx = 0;
150 struct wmi_event_debug wmi_rx_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
151 #endif
152 
153 static void wmi_minidump_detach(struct wmi_unified *wmi_handle)
154 {
155 	struct wmi_log_buf_t *info =
156 		&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
157 	uint32_t buf_size = info->size * sizeof(struct wmi_command_cmp_debug);
158 
159 	qdf_ssr_driver_dump_unregister_region("wmi_debug_log_info");
160 	qdf_ssr_driver_dump_unregister_region("wmi_rx_event_idx");
161 	qdf_ssr_driver_dump_unregister_region("wmi_rx_event");
162 	qdf_ssr_driver_dump_unregister_region("wmi_event_log_idx");
163 	qdf_ssr_driver_dump_unregister_region("wmi_event_log");
164 	qdf_ssr_driver_dump_unregister_region("wmi_command_log_idx");
165 	qdf_ssr_driver_dump_unregister_region("wmi_command_log");
166 	qdf_ssr_driver_dump_unregister_region("wmi_tx_cmp_idx");
167 	qdf_ssr_driver_dump_unregister_region("wmi_tx_cmp");
168 	qdf_minidump_remove(info->buf, buf_size, "wmi_tx_cmp");
169 }
170 
171 static void wmi_minidump_attach(struct wmi_unified *wmi_handle)
172 {
173 	struct wmi_log_buf_t *info =
174 		&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
175 	uint32_t buf_size = info->size * sizeof(struct wmi_command_cmp_debug);
176 
177 	qdf_minidump_log(info->buf, buf_size, "wmi_tx_cmp");
178 
179 	qdf_ssr_driver_dump_register_region("wmi_tx_cmp", info->buf, buf_size);
180 	qdf_ssr_driver_dump_register_region("wmi_tx_cmp_idx",
181 					    info->p_buf_tail_idx,
182 					    sizeof(*info->p_buf_tail_idx));
183 
184 	info = &wmi_handle->log_info.wmi_command_log_buf_info;
185 	buf_size = info->size * sizeof(struct wmi_command_debug);
186 
187 	qdf_ssr_driver_dump_register_region("wmi_command_log", info->buf,
188 					    buf_size);
189 	qdf_ssr_driver_dump_register_region("wmi_command_log_idx",
190 					    info->p_buf_tail_idx,
191 					    sizeof(*info->p_buf_tail_idx));
192 
193 	info = &wmi_handle->log_info.wmi_event_log_buf_info;
194 	buf_size = info->size * sizeof(struct wmi_event_debug);
195 
196 	qdf_ssr_driver_dump_register_region("wmi_event_log", info->buf,
197 					    buf_size);
198 	qdf_ssr_driver_dump_register_region("wmi_event_log_idx",
199 					    info->p_buf_tail_idx,
200 					    sizeof(*info->p_buf_tail_idx));
201 
202 	info = &wmi_handle->log_info.wmi_rx_event_log_buf_info;
203 	buf_size = info->size * sizeof(struct wmi_event_debug);
204 
205 	qdf_ssr_driver_dump_register_region("wmi_rx_event", info->buf,
206 					    buf_size);
207 	qdf_ssr_driver_dump_register_region("wmi_rx_event_idx",
208 					    info->p_buf_tail_idx,
209 					    sizeof(*info->p_buf_tail_idx));
210 
211 	qdf_ssr_driver_dump_register_region("wmi_debug_log_info",
212 					    &wmi_handle->log_info,
213 					    sizeof(wmi_handle->log_info));
214 }
215 
216 #define WMI_COMMAND_RECORD(h, a, b) {					\
217 	if (wmi_cmd_log_max_entry <=					\
218 		*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))	\
219 		*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx) = 0;\
220 	((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\
221 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)]\
222 						.command = a;		\
223 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.		\
224 				wmi_command_log_buf_info.buf)		\
225 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].data,\
226 			b, wmi_record_max_length);			\
227 	((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\
228 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].\
229 		time = qdf_get_log_timestamp();			\
230 	(*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))++;	\
231 	h->log_info.wmi_command_log_buf_info.length++;			\
232 }
233 
234 #define WMI_COMMAND_TX_CMP_RECORD(h, a, b, da, pa) {			\
235 	if (wmi_cmd_cmpl_log_max_entry <=				\
236 		*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))\
237 		*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
238 				p_buf_tail_idx) = 0;			\
239 	((struct wmi_command_cmp_debug *)h->log_info.			\
240 		wmi_command_tx_cmp_log_buf_info.buf)			\
241 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
242 				p_buf_tail_idx)].			\
243 							command	= a;	\
244 	qdf_mem_copy(((struct wmi_command_cmp_debug *)h->log_info.	\
245 				wmi_command_tx_cmp_log_buf_info.buf)	\
246 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
247 			p_buf_tail_idx)].				\
248 		data, b, wmi_record_max_length);			\
249 	((struct wmi_command_cmp_debug *)h->log_info.			\
250 		wmi_command_tx_cmp_log_buf_info.buf)			\
251 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
252 				p_buf_tail_idx)].			\
253 		time = qdf_get_log_timestamp();				\
254 	((struct wmi_command_cmp_debug *)h->log_info.			\
255 		wmi_command_tx_cmp_log_buf_info.buf)			\
256 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
257 				p_buf_tail_idx)].			\
258 		dma_addr = da;						\
259 	((struct wmi_command_cmp_debug *)h->log_info.			\
260 		wmi_command_tx_cmp_log_buf_info.buf)			\
261 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
262 				p_buf_tail_idx)].			\
263 		phy_addr = pa;						\
264 	(*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))++;\
265 	h->log_info.wmi_command_tx_cmp_log_buf_info.length++;		\
266 }
267 
268 #define WMI_EVENT_RECORD(h, a, b) {					\
269 	if (wmi_event_log_max_entry <=					\
270 		*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))	\
271 		*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx) = 0;\
272 	((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\
273 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].	\
274 		event = a;						\
275 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
276 				wmi_event_log_buf_info.buf)		\
277 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].data, b,\
278 		wmi_record_max_length);					\
279 	((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\
280 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].time =\
281 		qdf_get_log_timestamp();				\
282 	(*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))++;	\
283 	h->log_info.wmi_event_log_buf_info.length++;			\
284 }
285 
286 #define WMI_RX_EVENT_RECORD(h, a, b) {					\
287 	if (wmi_event_log_max_entry <=					\
288 		*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))\
289 		*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx) = 0;\
290 	((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\
291 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
292 		event = a;						\
293 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
294 				wmi_rx_event_log_buf_info.buf)		\
295 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
296 			data, b, wmi_record_max_length);		\
297 	((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\
298 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
299 		time =	qdf_get_log_timestamp();			\
300 	(*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))++;	\
301 	h->log_info.wmi_rx_event_log_buf_info.length++;			\
302 }
303 
304 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
305 uint32_t g_wmi_mgmt_command_buf_idx = 0;
306 struct
307 wmi_command_debug wmi_mgmt_command_log_buffer[WMI_MGMT_TX_DEBUG_MAX_ENTRY];
308 
309 /* wmi_mgmt commands TX completed */
310 uint32_t g_wmi_mgmt_command_tx_cmp_buf_idx = 0;
311 struct wmi_command_debug
312 wmi_mgmt_command_tx_cmp_log_buffer[WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY];
313 
314 /* wmi_mgmt events when received */
315 uint32_t g_wmi_mgmt_rx_event_buf_idx = 0;
316 struct wmi_event_debug
317 wmi_mgmt_rx_event_log_buffer[WMI_MGMT_RX_DEBUG_MAX_ENTRY];
318 
319 /* wmi_diag events when received */
320 uint32_t g_wmi_diag_rx_event_buf_idx = 0;
321 struct wmi_event_debug
322 wmi_diag_rx_event_log_buffer[WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY];
323 #endif
324 
325 #define WMI_MGMT_COMMAND_RECORD(h, a, b) {                              \
326 	if (wmi_mgmt_tx_log_max_entry <=                                   \
327 		*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)) \
328 		*(h->log_info.wmi_mgmt_command_log_buf_info.		\
329 				p_buf_tail_idx) = 0;			\
330 	((struct wmi_command_debug *)h->log_info.                       \
331 		 wmi_mgmt_command_log_buf_info.buf)                     \
332 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
333 			command = a;                                    \
334 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.          \
335 				wmi_mgmt_command_log_buf_info.buf)      \
336 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
337 		data, b,                                                \
338 		wmi_record_max_length);                                	\
339 	((struct wmi_command_debug *)h->log_info.                       \
340 		 wmi_mgmt_command_log_buf_info.buf)                     \
341 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
342 			time =        qdf_get_log_timestamp();          \
343 	(*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx))++;\
344 	h->log_info.wmi_mgmt_command_log_buf_info.length++;             \
345 }
346 
347 #define WMI_MGMT_COMMAND_TX_CMP_RECORD(h, a, b) {			\
348 	if (wmi_mgmt_tx_cmpl_log_max_entry <=				\
349 		*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
350 			p_buf_tail_idx))				\
351 		*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
352 			p_buf_tail_idx) = 0;				\
353 	((struct wmi_command_debug *)h->log_info.			\
354 			wmi_mgmt_command_tx_cmp_log_buf_info.buf)	\
355 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
356 				p_buf_tail_idx)].command = a;		\
357 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.		\
358 				wmi_mgmt_command_tx_cmp_log_buf_info.buf)\
359 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
360 			p_buf_tail_idx)].data, b,			\
361 			wmi_record_max_length);				\
362 	((struct wmi_command_debug *)h->log_info.			\
363 			wmi_mgmt_command_tx_cmp_log_buf_info.buf)	\
364 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
365 				p_buf_tail_idx)].time =			\
366 		qdf_get_log_timestamp();				\
367 	(*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.		\
368 			p_buf_tail_idx))++;				\
369 	h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.length++;	\
370 }
371 
372 #define WMI_MGMT_RX_EVENT_RECORD(h, a, b) do {				\
373 	if (wmi_mgmt_rx_log_max_entry <=				\
374 		*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))\
375 		*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx) = 0;\
376 	((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\
377 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)]\
378 					.event = a;			\
379 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
380 				wmi_mgmt_event_log_buf_info.buf)	\
381 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\
382 			data, b, wmi_record_max_length);		\
383 	((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\
384 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\
385 			time = qdf_get_log_timestamp();			\
386 	(*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))++;	\
387 	h->log_info.wmi_mgmt_event_log_buf_info.length++;		\
388 } while (0);
389 
390 #define WMI_DIAG_RX_EVENT_RECORD(h, a, b) do {                             \
391 	if (wmi_diag_log_max_entry <=                                   \
392 		*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))\
393 		*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx) = 0;\
394 	((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\
395 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)]\
396 					.event = a;                     \
397 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.            \
398 				wmi_diag_event_log_buf_info.buf)        \
399 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\
400 			data, b, wmi_record_max_length);                \
401 	((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\
402 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\
403 			time = qdf_get_log_timestamp();                 \
404 	(*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))++;  \
405 	h->log_info.wmi_diag_event_log_buf_info.length++;               \
406 } while (0);
407 
408 /* These are defined to made it as module param, which can be configured */
409 /* WMI Commands */
410 uint32_t wmi_cmd_log_max_entry = WMI_CMD_DEBUG_MAX_ENTRY;
411 uint32_t wmi_cmd_cmpl_log_max_entry = WMI_CMD_CMPL_DEBUG_MAX_ENTRY;
412 /* WMI Events */
413 uint32_t wmi_event_log_max_entry = WMI_EVENT_DEBUG_MAX_ENTRY;
414 /* WMI MGMT Tx */
415 uint32_t wmi_mgmt_tx_log_max_entry = WMI_MGMT_TX_DEBUG_MAX_ENTRY;
416 uint32_t wmi_mgmt_tx_cmpl_log_max_entry = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY;
417 /* WMI MGMT Rx */
418 uint32_t wmi_mgmt_rx_log_max_entry = WMI_MGMT_RX_DEBUG_MAX_ENTRY;
419 /* WMI Diag Event */
420 uint32_t wmi_diag_log_max_entry = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY;
421 /* WMI capture size */
422 uint32_t wmi_record_max_length = WMI_DEBUG_ENTRY_MAX_LENGTH;
423 uint32_t wmi_display_size = 100;
424 
425 /**
426  * wmi_log_init() - Initialize WMI event logging
427  * @wmi_handle: WMI handle.
428  *
429  * Return: Initialization status
430  */
431 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
432 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
433 {
434 	struct wmi_log_buf_t *cmd_log_buf =
435 			&wmi_handle->log_info.wmi_command_log_buf_info;
436 	struct wmi_log_buf_t *cmd_tx_cmpl_log_buf =
437 			&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
438 
439 	struct wmi_log_buf_t *event_log_buf =
440 			&wmi_handle->log_info.wmi_event_log_buf_info;
441 	struct wmi_log_buf_t *rx_event_log_buf =
442 			&wmi_handle->log_info.wmi_rx_event_log_buf_info;
443 
444 	struct wmi_log_buf_t *mgmt_cmd_log_buf =
445 			&wmi_handle->log_info.wmi_mgmt_command_log_buf_info;
446 	struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf =
447 		&wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info;
448 	struct wmi_log_buf_t *mgmt_event_log_buf =
449 			&wmi_handle->log_info.wmi_mgmt_event_log_buf_info;
450 	struct wmi_log_buf_t *diag_event_log_buf =
451 			&wmi_handle->log_info.wmi_diag_event_log_buf_info;
452 
453 	/* WMI commands */
454 	cmd_log_buf->length = 0;
455 	cmd_log_buf->buf_tail_idx = 0;
456 	cmd_log_buf->buf = wmi_command_log_buffer;
457 	cmd_log_buf->p_buf_tail_idx = &g_wmi_command_buf_idx;
458 	cmd_log_buf->size = WMI_CMD_DEBUG_MAX_ENTRY;
459 
460 	/* WMI commands TX completed */
461 	cmd_tx_cmpl_log_buf->length = 0;
462 	cmd_tx_cmpl_log_buf->buf_tail_idx = 0;
463 	cmd_tx_cmpl_log_buf->buf = wmi_command_tx_cmp_log_buffer;
464 	cmd_tx_cmpl_log_buf->p_buf_tail_idx = &g_wmi_command_tx_cmp_buf_idx;
465 	cmd_tx_cmpl_log_buf->size = WMI_CMD_CMPL_DEBUG_MAX_ENTRY;
466 
467 	/* WMI events when processed */
468 	event_log_buf->length = 0;
469 	event_log_buf->buf_tail_idx = 0;
470 	event_log_buf->buf = wmi_event_log_buffer;
471 	event_log_buf->p_buf_tail_idx = &g_wmi_event_buf_idx;
472 	event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY;
473 
474 	/* WMI events when queued */
475 	rx_event_log_buf->length = 0;
476 	rx_event_log_buf->buf_tail_idx = 0;
477 	rx_event_log_buf->buf = wmi_rx_event_log_buffer;
478 	rx_event_log_buf->p_buf_tail_idx = &g_wmi_rx_event_buf_idx;
479 	rx_event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY;
480 
481 	/* WMI Management commands */
482 	mgmt_cmd_log_buf->length = 0;
483 	mgmt_cmd_log_buf->buf_tail_idx = 0;
484 	mgmt_cmd_log_buf->buf = wmi_mgmt_command_log_buffer;
485 	mgmt_cmd_log_buf->p_buf_tail_idx = &g_wmi_mgmt_command_buf_idx;
486 	mgmt_cmd_log_buf->size = WMI_MGMT_TX_DEBUG_MAX_ENTRY;
487 
488 	/* WMI Management commands Tx completed*/
489 	mgmt_cmd_tx_cmp_log_buf->length = 0;
490 	mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0;
491 	mgmt_cmd_tx_cmp_log_buf->buf = wmi_mgmt_command_tx_cmp_log_buffer;
492 	mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx =
493 		&g_wmi_mgmt_command_tx_cmp_buf_idx;
494 	mgmt_cmd_tx_cmp_log_buf->size = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY;
495 
496 	/* WMI Management events when received */
497 	mgmt_event_log_buf->length = 0;
498 	mgmt_event_log_buf->buf_tail_idx = 0;
499 	mgmt_event_log_buf->buf = wmi_mgmt_rx_event_log_buffer;
500 	mgmt_event_log_buf->p_buf_tail_idx = &g_wmi_mgmt_rx_event_buf_idx;
501 	mgmt_event_log_buf->size = WMI_MGMT_RX_DEBUG_MAX_ENTRY;
502 
503 	/* WMI diag events when received */
504 	diag_event_log_buf->length = 0;
505 	diag_event_log_buf->buf_tail_idx = 0;
506 	diag_event_log_buf->buf = wmi_diag_rx_event_log_buffer;
507 	diag_event_log_buf->p_buf_tail_idx = &g_wmi_diag_rx_event_buf_idx;
508 	diag_event_log_buf->size = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY;
509 
510 	qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
511 	wmi_handle->log_info.wmi_logging_enable = 1;
512 
513 	return QDF_STATUS_SUCCESS;
514 }
515 #else
516 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
517 {
518 	struct wmi_log_buf_t *cmd_log_buf =
519 			&wmi_handle->log_info.wmi_command_log_buf_info;
520 	struct wmi_log_buf_t *cmd_tx_cmpl_log_buf =
521 			&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
522 
523 	struct wmi_log_buf_t *event_log_buf =
524 			&wmi_handle->log_info.wmi_event_log_buf_info;
525 	struct wmi_log_buf_t *rx_event_log_buf =
526 			&wmi_handle->log_info.wmi_rx_event_log_buf_info;
527 
528 	struct wmi_log_buf_t *mgmt_cmd_log_buf =
529 			&wmi_handle->log_info.wmi_mgmt_command_log_buf_info;
530 	struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf =
531 		&wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info;
532 	struct wmi_log_buf_t *mgmt_event_log_buf =
533 			&wmi_handle->log_info.wmi_mgmt_event_log_buf_info;
534 	struct wmi_log_buf_t *diag_event_log_buf =
535 			&wmi_handle->log_info.wmi_diag_event_log_buf_info;
536 
537 	wmi_handle->log_info.wmi_logging_enable = 0;
538 
539 	/* WMI commands */
540 	cmd_log_buf->length = 0;
541 	cmd_log_buf->buf_tail_idx = 0;
542 	cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc(
543 		wmi_cmd_log_max_entry * sizeof(struct wmi_command_debug));
544 	cmd_log_buf->size = wmi_cmd_log_max_entry;
545 
546 	if (!cmd_log_buf->buf)
547 		return QDF_STATUS_E_NOMEM;
548 
549 	cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx;
550 
551 	/* WMI commands TX completed */
552 	cmd_tx_cmpl_log_buf->length = 0;
553 	cmd_tx_cmpl_log_buf->buf_tail_idx = 0;
554 	cmd_tx_cmpl_log_buf->buf = (struct wmi_command_cmp_debug *) qdf_mem_malloc(
555 		wmi_cmd_cmpl_log_max_entry * sizeof(struct wmi_command_cmp_debug));
556 	cmd_tx_cmpl_log_buf->size = wmi_cmd_cmpl_log_max_entry;
557 
558 	if (!cmd_tx_cmpl_log_buf->buf)
559 		return QDF_STATUS_E_NOMEM;
560 
561 	cmd_tx_cmpl_log_buf->p_buf_tail_idx =
562 		&cmd_tx_cmpl_log_buf->buf_tail_idx;
563 
564 	/* WMI events when processed */
565 	event_log_buf->length = 0;
566 	event_log_buf->buf_tail_idx = 0;
567 	event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
568 		wmi_event_log_max_entry * sizeof(struct wmi_event_debug));
569 	event_log_buf->size = wmi_event_log_max_entry;
570 
571 	if (!event_log_buf->buf)
572 		return QDF_STATUS_E_NOMEM;
573 
574 	event_log_buf->p_buf_tail_idx = &event_log_buf->buf_tail_idx;
575 
576 	/* WMI events when queued */
577 	rx_event_log_buf->length = 0;
578 	rx_event_log_buf->buf_tail_idx = 0;
579 	rx_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
580 		wmi_event_log_max_entry * sizeof(struct wmi_event_debug));
581 	rx_event_log_buf->size = wmi_event_log_max_entry;
582 
583 	if (!rx_event_log_buf->buf)
584 		return QDF_STATUS_E_NOMEM;
585 
586 	rx_event_log_buf->p_buf_tail_idx = &rx_event_log_buf->buf_tail_idx;
587 
588 	/* WMI Management commands */
589 	mgmt_cmd_log_buf->length = 0;
590 	mgmt_cmd_log_buf->buf_tail_idx = 0;
591 	mgmt_cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc(
592 		wmi_mgmt_tx_log_max_entry * sizeof(struct wmi_command_debug));
593 	mgmt_cmd_log_buf->size = wmi_mgmt_tx_log_max_entry;
594 
595 	if (!mgmt_cmd_log_buf->buf)
596 		return QDF_STATUS_E_NOMEM;
597 
598 	mgmt_cmd_log_buf->p_buf_tail_idx = &mgmt_cmd_log_buf->buf_tail_idx;
599 
600 	/* WMI Management commands Tx completed*/
601 	mgmt_cmd_tx_cmp_log_buf->length = 0;
602 	mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0;
603 	mgmt_cmd_tx_cmp_log_buf->buf = (struct wmi_command_debug *)
604 		qdf_mem_malloc(
605 		wmi_mgmt_tx_cmpl_log_max_entry *
606 		sizeof(struct wmi_command_debug));
607 	mgmt_cmd_tx_cmp_log_buf->size = wmi_mgmt_tx_cmpl_log_max_entry;
608 
609 	if (!mgmt_cmd_tx_cmp_log_buf->buf)
610 		return QDF_STATUS_E_NOMEM;
611 
612 	mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx =
613 		&mgmt_cmd_tx_cmp_log_buf->buf_tail_idx;
614 
615 	/* WMI Management events when received */
616 	mgmt_event_log_buf->length = 0;
617 	mgmt_event_log_buf->buf_tail_idx = 0;
618 
619 	mgmt_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
620 		wmi_mgmt_rx_log_max_entry *
621 		sizeof(struct wmi_event_debug));
622 	mgmt_event_log_buf->size = wmi_mgmt_rx_log_max_entry;
623 
624 	if (!mgmt_event_log_buf->buf)
625 		return QDF_STATUS_E_NOMEM;
626 
627 	mgmt_event_log_buf->p_buf_tail_idx = &mgmt_event_log_buf->buf_tail_idx;
628 
629 	/* WMI diag events when received */
630 	diag_event_log_buf->length = 0;
631 	diag_event_log_buf->buf_tail_idx = 0;
632 
633 	diag_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
634 		wmi_diag_log_max_entry *
635 		sizeof(struct wmi_event_debug));
636 	diag_event_log_buf->size = wmi_diag_log_max_entry;
637 
638 	if (!diag_event_log_buf->buf)
639 		return QDF_STATUS_E_NOMEM;
640 
641 	diag_event_log_buf->p_buf_tail_idx = &diag_event_log_buf->buf_tail_idx;
642 
643 	qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
644 	wmi_handle->log_info.wmi_logging_enable = 1;
645 
646 	wmi_filtered_logging_init(wmi_handle);
647 
648 	return QDF_STATUS_SUCCESS;
649 }
650 #endif
651 
652 /**
653  * wmi_log_buffer_free() - Free all dynamic allocated buffer memory for
654  * event logging
655  * @wmi_handle: WMI handle.
656  *
657  * Return: None
658  */
659 #ifdef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
660 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
661 {
662 	wmi_filtered_logging_free(wmi_handle);
663 
664 	if (wmi_handle->log_info.wmi_command_log_buf_info.buf)
665 		qdf_mem_free(wmi_handle->log_info.wmi_command_log_buf_info.buf);
666 	if (wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf)
667 		qdf_mem_free(
668 		wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf);
669 	if (wmi_handle->log_info.wmi_event_log_buf_info.buf)
670 		qdf_mem_free(wmi_handle->log_info.wmi_event_log_buf_info.buf);
671 	if (wmi_handle->log_info.wmi_rx_event_log_buf_info.buf)
672 		qdf_mem_free(
673 			wmi_handle->log_info.wmi_rx_event_log_buf_info.buf);
674 	if (wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf)
675 		qdf_mem_free(
676 			wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf);
677 	if (wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf)
678 		qdf_mem_free(
679 		wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf);
680 	if (wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf)
681 		qdf_mem_free(
682 			wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf);
683 	if (wmi_handle->log_info.wmi_diag_event_log_buf_info.buf)
684 		qdf_mem_free(
685 			wmi_handle->log_info.wmi_diag_event_log_buf_info.buf);
686 	wmi_handle->log_info.wmi_logging_enable = 0;
687 
688 	qdf_spinlock_destroy(&wmi_handle->log_info.wmi_record_lock);
689 }
690 #else
691 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
692 {
693 	/* Do Nothing */
694 }
695 #endif
696 
697 /**
698  * wmi_print_cmd_log_buffer() - an output agnostic wmi command log printer
699  * @log_buffer: the command log buffer metadata of the buffer to print
700  * @count: the maximum number of entries to print
701  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
702  * @print_priv: any data required by the print method, e.g. a file handle
703  *
704  * Return: None
705  */
706 static void
707 wmi_print_cmd_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
708 			 qdf_abstract_print *print, void *print_priv)
709 {
710 	static const int data_len =
711 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
712 	char str[128];
713 	uint32_t idx;
714 
715 	if (count > log_buffer->size)
716 		count = log_buffer->size;
717 	if (count > log_buffer->length)
718 		count = log_buffer->length;
719 
720 	/* subtract count from index, and wrap if necessary */
721 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
722 	idx %= log_buffer->size;
723 
724 	print(print_priv, "Time (seconds)      Cmd Id              Payload");
725 	while (count) {
726 		struct wmi_command_debug *cmd_log = (struct wmi_command_debug *)
727 			&((struct wmi_command_debug *)log_buffer->buf)[idx];
728 		uint64_t secs, usecs;
729 		int len = 0;
730 		int i;
731 
732 		qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
733 		len += scnprintf(str + len, sizeof(str) - len,
734 				 "% 8lld.%06lld    %6u (0x%06x)    ",
735 				 secs, usecs,
736 				 cmd_log->command, cmd_log->command);
737 		for (i = 0; i < data_len; ++i) {
738 			len += scnprintf(str + len, sizeof(str) - len,
739 					 "0x%08x ", cmd_log->data[i]);
740 		}
741 
742 		print(print_priv, str);
743 
744 		--count;
745 		++idx;
746 		if (idx >= log_buffer->size)
747 			idx = 0;
748 	}
749 }
750 
751 /**
752  * wmi_print_cmd_cmp_log_buffer() - wmi command completion log printer
753  * @log_buffer: the command completion log buffer metadata of the buffer to print
754  * @count: the maximum number of entries to print
755  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
756  * @print_priv: any data required by the print method, e.g. a file handle
757  *
758  * Return: None
759  */
760 static void
761 wmi_print_cmd_cmp_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
762 			 qdf_abstract_print *print, void *print_priv)
763 {
764 	static const int data_len =
765 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
766 	char str[128];
767 	uint32_t idx;
768 
769 	if (count > log_buffer->size)
770 		count = log_buffer->size;
771 	if (count > log_buffer->length)
772 		count = log_buffer->length;
773 
774 	/* subtract count from index, and wrap if necessary */
775 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
776 	idx %= log_buffer->size;
777 
778 	print(print_priv, "Time (seconds)      Cmd Id              Payload");
779 	while (count) {
780 		struct wmi_command_cmp_debug *cmd_log = (struct wmi_command_cmp_debug *)
781 			&((struct wmi_command_cmp_debug *)log_buffer->buf)[idx];
782 		uint64_t secs, usecs;
783 		int len = 0;
784 		int i;
785 
786 		qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
787 		len += scnprintf(str + len, sizeof(str) - len,
788 				 "% 8lld.%06lld    %6u (0x%06x)    ",
789 				 secs, usecs,
790 				 cmd_log->command, cmd_log->command);
791 		for (i = 0; i < data_len; ++i) {
792 			len += scnprintf(str + len, sizeof(str) - len,
793 					 "0x%08x ", cmd_log->data[i]);
794 		}
795 
796 		print(print_priv, str);
797 
798 		--count;
799 		++idx;
800 		if (idx >= log_buffer->size)
801 			idx = 0;
802 	}
803 }
804 
805 /**
806  * wmi_print_event_log_buffer() - an output agnostic wmi event log printer
807  * @log_buffer: the event log buffer metadata of the buffer to print
808  * @count: the maximum number of entries to print
809  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
810  * @print_priv: any data required by the print method, e.g. a file handle
811  *
812  * Return: None
813  */
814 static void
815 wmi_print_event_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
816 			   qdf_abstract_print *print, void *print_priv)
817 {
818 	static const int data_len =
819 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
820 	char str[128];
821 	uint32_t idx;
822 
823 	if (count > log_buffer->size)
824 		count = log_buffer->size;
825 	if (count > log_buffer->length)
826 		count = log_buffer->length;
827 
828 	/* subtract count from index, and wrap if necessary */
829 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
830 	idx %= log_buffer->size;
831 
832 	print(print_priv, "Time (seconds)      Event Id             Payload");
833 	while (count) {
834 		struct wmi_event_debug *event_log = (struct wmi_event_debug *)
835 			&((struct wmi_event_debug *)log_buffer->buf)[idx];
836 		uint64_t secs, usecs;
837 		int len = 0;
838 		int i;
839 
840 		qdf_log_timestamp_to_secs(event_log->time, &secs, &usecs);
841 		len += scnprintf(str + len, sizeof(str) - len,
842 				 "% 8lld.%06lld    %6u (0x%06x)    ",
843 				 secs, usecs,
844 				 event_log->event, event_log->event);
845 		for (i = 0; i < data_len; ++i) {
846 			len += scnprintf(str + len, sizeof(str) - len,
847 					 "0x%08x ", event_log->data[i]);
848 		}
849 
850 		print(print_priv, str);
851 
852 		--count;
853 		++idx;
854 		if (idx >= log_buffer->size)
855 			idx = 0;
856 	}
857 }
858 
859 inline void
860 wmi_print_cmd_log(wmi_unified_t wmi, uint32_t count,
861 		  qdf_abstract_print *print, void *print_priv)
862 {
863 	wmi_print_cmd_log_buffer(
864 		&wmi->log_info.wmi_command_log_buf_info,
865 		count, print, print_priv);
866 }
867 
868 inline void
869 wmi_print_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count,
870 			 qdf_abstract_print *print, void *print_priv)
871 {
872 	wmi_print_cmd_cmp_log_buffer(
873 		&wmi->log_info.wmi_command_tx_cmp_log_buf_info,
874 		count, print, print_priv);
875 }
876 
877 inline void
878 wmi_print_mgmt_cmd_log(wmi_unified_t wmi, uint32_t count,
879 		       qdf_abstract_print *print, void *print_priv)
880 {
881 	wmi_print_cmd_log_buffer(
882 		&wmi->log_info.wmi_mgmt_command_log_buf_info,
883 		count, print, print_priv);
884 }
885 
886 inline void
887 wmi_print_mgmt_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count,
888 			      qdf_abstract_print *print, void *print_priv)
889 {
890 	wmi_print_cmd_log_buffer(
891 		&wmi->log_info.wmi_mgmt_command_tx_cmp_log_buf_info,
892 		count, print, print_priv);
893 }
894 
895 inline void
896 wmi_print_event_log(wmi_unified_t wmi, uint32_t count,
897 		    qdf_abstract_print *print, void *print_priv)
898 {
899 	wmi_print_event_log_buffer(
900 		&wmi->log_info.wmi_event_log_buf_info,
901 		count, print, print_priv);
902 }
903 
904 inline void
905 wmi_print_rx_event_log(wmi_unified_t wmi, uint32_t count,
906 		       qdf_abstract_print *print, void *print_priv)
907 {
908 	wmi_print_event_log_buffer(
909 		&wmi->log_info.wmi_rx_event_log_buf_info,
910 		count, print, print_priv);
911 }
912 
913 inline void
914 wmi_print_mgmt_event_log(wmi_unified_t wmi, uint32_t count,
915 			 qdf_abstract_print *print, void *print_priv)
916 {
917 	wmi_print_event_log_buffer(
918 		&wmi->log_info.wmi_mgmt_event_log_buf_info,
919 		count, print, print_priv);
920 }
921 
922 
923 /* debugfs routines*/
924 
925 /*
926  * debug_wmi_##func_base##_show() - debugfs functions to display content of
927  * command and event buffers. Macro uses max buffer length to display
928  * buffer when it is wraparound.
929  *
930  * @m: debugfs handler to access wmi_handle
931  * @v: Variable arguments (not used)
932  *
933  * Return: Length of characters printed
934  */
935 #define GENERATE_COMMAND_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size, wmi_record_type)\
936 	static int debug_wmi_##func_base##_show(struct seq_file *m,	\
937 						void *v)		\
938 	{								\
939 		wmi_unified_t wmi_handle = (wmi_unified_t) m->private;	\
940 		struct wmi_log_buf_t *wmi_log =				\
941 			&wmi_handle->log_info.wmi_##func_base##_buf_info;\
942 		int pos, nread, outlen;					\
943 		int i;							\
944 		uint64_t secs, usecs;					\
945 									\
946 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
947 		if (!wmi_log->length) {					\
948 			qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
949 			return wmi_bp_seq_printf(m,			\
950 			"no elements to read from ring buffer!\n");	\
951 		}							\
952 									\
953 		if (wmi_log->length <= wmi_ring_size)			\
954 			nread = wmi_log->length;			\
955 		else							\
956 			nread = wmi_ring_size;				\
957 									\
958 		if (*(wmi_log->p_buf_tail_idx) == 0)			\
959 			/* tail can be 0 after wrap-around */		\
960 			pos = wmi_ring_size - 1;			\
961 		else							\
962 			pos = *(wmi_log->p_buf_tail_idx) - 1;		\
963 									\
964 		outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\
965 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
966 		while (nread--) {					\
967 			struct wmi_record_type *wmi_record;		\
968 									\
969 			wmi_record = (struct wmi_record_type *)	\
970 			&(((struct wmi_record_type *)wmi_log->buf)[pos]);\
971 			outlen += wmi_bp_seq_printf(m, "CMD ID = %x\n",	\
972 				(wmi_record->command));			\
973 			qdf_log_timestamp_to_secs(wmi_record->time, &secs,\
974 				&usecs);				\
975 			outlen +=					\
976 			wmi_bp_seq_printf(m, "CMD TIME = [%llu.%06llu]\n",\
977 				secs, usecs);				\
978 			outlen += wmi_bp_seq_printf(m, "CMD = ");	\
979 			for (i = 0; i < (wmi_record_max_length/		\
980 					sizeof(uint32_t)); i++)		\
981 				outlen += wmi_bp_seq_printf(m, "%x ",	\
982 					wmi_record->data[i]);		\
983 			outlen += wmi_bp_seq_printf(m, "\n");		\
984 									\
985 			if (pos == 0)					\
986 				pos = wmi_ring_size - 1;		\
987 			else						\
988 				pos--;					\
989 		}							\
990 		return outlen;						\
991 	}								\
992 
993 #define GENERATE_EVENT_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size)	\
994 	static int debug_wmi_##func_base##_show(struct seq_file *m,	\
995 						void *v)		\
996 	{								\
997 		wmi_unified_t wmi_handle = (wmi_unified_t) m->private;	\
998 		struct wmi_log_buf_t *wmi_log =				\
999 			&wmi_handle->log_info.wmi_##func_base##_buf_info;\
1000 		int pos, nread, outlen;					\
1001 		int i;							\
1002 		uint64_t secs, usecs;					\
1003 									\
1004 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
1005 		if (!wmi_log->length) {					\
1006 			qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
1007 			return wmi_bp_seq_printf(m,			\
1008 			"no elements to read from ring buffer!\n");	\
1009 		}							\
1010 									\
1011 		if (wmi_log->length <= wmi_ring_size)			\
1012 			nread = wmi_log->length;			\
1013 		else							\
1014 			nread = wmi_ring_size;				\
1015 									\
1016 		if (*(wmi_log->p_buf_tail_idx) == 0)			\
1017 			/* tail can be 0 after wrap-around */		\
1018 			pos = wmi_ring_size - 1;			\
1019 		else							\
1020 			pos = *(wmi_log->p_buf_tail_idx) - 1;		\
1021 									\
1022 		outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\
1023 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
1024 		while (nread--) {					\
1025 			struct wmi_event_debug *wmi_record;		\
1026 									\
1027 			wmi_record = (struct wmi_event_debug *)		\
1028 			&(((struct wmi_event_debug *)wmi_log->buf)[pos]);\
1029 			qdf_log_timestamp_to_secs(wmi_record->time, &secs,\
1030 				&usecs);				\
1031 			outlen += wmi_bp_seq_printf(m, "Event ID = %x\n",\
1032 				(wmi_record->event));			\
1033 			outlen +=					\
1034 			wmi_bp_seq_printf(m, "Event TIME = [%llu.%06llu]\n",\
1035 				secs, usecs);				\
1036 			outlen += wmi_bp_seq_printf(m, "CMD = ");	\
1037 			for (i = 0; i < (wmi_record_max_length/		\
1038 					sizeof(uint32_t)); i++)		\
1039 				outlen += wmi_bp_seq_printf(m, "%x ",	\
1040 					wmi_record->data[i]);		\
1041 			outlen += wmi_bp_seq_printf(m, "\n");		\
1042 									\
1043 			if (pos == 0)					\
1044 				pos = wmi_ring_size - 1;		\
1045 			else						\
1046 				pos--;					\
1047 		}							\
1048 		return outlen;						\
1049 	}
1050 
1051 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_log, wmi_display_size,
1052 				  wmi_command_debug);
1053 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_tx_cmp_log, wmi_display_size,
1054 				  wmi_command_cmp_debug);
1055 GENERATE_EVENT_DEBUG_SHOW_FUNCS(event_log, wmi_display_size);
1056 GENERATE_EVENT_DEBUG_SHOW_FUNCS(rx_event_log, wmi_display_size);
1057 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_log, wmi_display_size,
1058 				  wmi_command_debug);
1059 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_tx_cmp_log,
1060 					wmi_display_size,
1061 					wmi_command_debug);
1062 GENERATE_EVENT_DEBUG_SHOW_FUNCS(mgmt_event_log, wmi_display_size);
1063 
1064 /**
1065  * debug_wmi_enable_show() - debugfs functions to display enable state of
1066  * wmi logging feature.
1067  *
1068  * @m: debugfs handler to access wmi_handle
1069  * @v: Variable arguments (not used)
1070  *
1071  * Return: always 1
1072  */
1073 static int debug_wmi_enable_show(struct seq_file *m, void *v)
1074 {
1075 	wmi_unified_t wmi_handle = (wmi_unified_t) m->private;
1076 
1077 	return wmi_bp_seq_printf(m, "%d\n",
1078 			wmi_handle->log_info.wmi_logging_enable);
1079 }
1080 
1081 /**
1082  * debug_wmi_log_size_show() - debugfs functions to display configured size of
1083  * wmi logging command/event buffer and management command/event buffer.
1084  *
1085  * @m: debugfs handler to access wmi_handle
1086  * @v: Variable arguments (not used)
1087  *
1088  * Return: Length of characters printed
1089  */
1090 static int debug_wmi_log_size_show(struct seq_file *m, void *v)
1091 {
1092 
1093 	wmi_bp_seq_printf(m, "WMI command/cmpl log max size:%d/%d\n",
1094 			  wmi_cmd_log_max_entry, wmi_cmd_cmpl_log_max_entry);
1095 	wmi_bp_seq_printf(m, "WMI management Tx/cmpl log max size:%d/%d\n",
1096 			  wmi_mgmt_tx_log_max_entry,
1097 			  wmi_mgmt_tx_cmpl_log_max_entry);
1098 	wmi_bp_seq_printf(m, "WMI event log max size:%d\n",
1099 			  wmi_event_log_max_entry);
1100 	wmi_bp_seq_printf(m, "WMI management Rx log max size:%d\n",
1101 			  wmi_mgmt_rx_log_max_entry);
1102 	return wmi_bp_seq_printf(m,
1103 				 "WMI diag log max size:%d\n",
1104 				 wmi_diag_log_max_entry);
1105 }
1106 
1107 /*
1108  * debug_wmi_##func_base##_write() - debugfs functions to clear
1109  * wmi logging command/event buffer and management command/event buffer.
1110  *
1111  * @file: file handler to access wmi_handle
1112  * @buf: received data buffer
1113  * @count: length of received buffer
1114  * @ppos: Not used
1115  *
1116  * Return: count
1117  */
1118 #define GENERATE_DEBUG_WRITE_FUNCS(func_base, wmi_ring_size, wmi_record_type)\
1119 	static ssize_t debug_wmi_##func_base##_write(struct file *file,	\
1120 				const char __user *buf,			\
1121 				size_t count, loff_t *ppos)		\
1122 	{								\
1123 		int k, ret;						\
1124 		wmi_unified_t wmi_handle =				\
1125 			((struct seq_file *)file->private_data)->private;\
1126 		struct wmi_log_buf_t *wmi_log = &wmi_handle->log_info.	\
1127 				wmi_##func_base##_buf_info;		\
1128 		char locbuf[50];					\
1129 									\
1130 		if ((!buf) || (count > 50))				\
1131 			return -EFAULT;					\
1132 									\
1133 		if (copy_from_user(locbuf, buf, count))			\
1134 			return -EFAULT;					\
1135 									\
1136 		ret = sscanf(locbuf, "%d", &k);				\
1137 		if ((ret != 1) || (k != 0)) {                           \
1138 			wmi_err("Wrong input, echo 0 to clear the wmi buffer");\
1139 			return -EINVAL;					\
1140 		}							\
1141 									\
1142 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
1143 		qdf_mem_zero(wmi_log->buf, wmi_ring_size *		\
1144 				sizeof(struct wmi_record_type));	\
1145 		wmi_log->length = 0;					\
1146 		*(wmi_log->p_buf_tail_idx) = 0;				\
1147 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
1148 									\
1149 		return count;						\
1150 	}
1151 
1152 GENERATE_DEBUG_WRITE_FUNCS(command_log, wmi_cmd_log_max_entry,
1153 			   wmi_command_debug);
1154 GENERATE_DEBUG_WRITE_FUNCS(command_tx_cmp_log, wmi_cmd_cmpl_log_max_entry,
1155 			   wmi_command_cmp_debug);
1156 GENERATE_DEBUG_WRITE_FUNCS(event_log, wmi_event_log_max_entry,
1157 			   wmi_event_debug);
1158 GENERATE_DEBUG_WRITE_FUNCS(rx_event_log, wmi_event_log_max_entry,
1159 			   wmi_event_debug);
1160 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_log, wmi_mgmt_tx_log_max_entry,
1161 			   wmi_command_debug);
1162 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_tx_cmp_log,
1163 			   wmi_mgmt_tx_cmpl_log_max_entry, wmi_command_debug);
1164 GENERATE_DEBUG_WRITE_FUNCS(mgmt_event_log, wmi_mgmt_rx_log_max_entry,
1165 			   wmi_event_debug);
1166 
1167 /**
1168  * debug_wmi_enable_write() - debugfs functions to enable/disable
1169  * wmi logging feature.
1170  *
1171  * @file: file handler to access wmi_handle
1172  * @buf: received data buffer
1173  * @count: length of received buffer
1174  * @ppos: Not used
1175  *
1176  * Return: count
1177  */
1178 static ssize_t debug_wmi_enable_write(struct file *file, const char __user *buf,
1179 					size_t count, loff_t *ppos)
1180 {
1181 	wmi_unified_t wmi_handle =
1182 		((struct seq_file *)file->private_data)->private;
1183 	int k, ret;
1184 	char locbuf[50];
1185 
1186 	if ((!buf) || (count > 50))
1187 		return -EFAULT;
1188 
1189 	if (copy_from_user(locbuf, buf, count))
1190 		return -EFAULT;
1191 
1192 	ret = sscanf(locbuf, "%d", &k);
1193 	if ((ret != 1) || ((k != 0) && (k != 1)))
1194 		return -EINVAL;
1195 
1196 	wmi_handle->log_info.wmi_logging_enable = k;
1197 	return count;
1198 }
1199 
1200 /**
1201  * debug_wmi_log_size_write() - reserved.
1202  *
1203  * @file: file handler to access wmi_handle
1204  * @buf: received data buffer
1205  * @count: length of received buffer
1206  * @ppos: Not used
1207  *
1208  * Return: count
1209  */
1210 static ssize_t debug_wmi_log_size_write(struct file *file,
1211 		const char __user *buf, size_t count, loff_t *ppos)
1212 {
1213 	return -EINVAL;
1214 }
1215 
1216 /* Structure to maintain debug information */
1217 struct wmi_debugfs_info {
1218 	const char *name;
1219 	const struct file_operations *ops;
1220 };
1221 
1222 #define DEBUG_FOO(func_base) { .name = #func_base,			\
1223 	.ops = &debug_##func_base##_ops }
1224 
1225 /*
1226  * debug_##func_base##_open() - Open debugfs entry for respective command
1227  * and event buffer.
1228  *
1229  * @inode: node for debug dir entry
1230  * @file: file handler
1231  *
1232  * Return: open status
1233  */
1234 #define GENERATE_DEBUG_STRUCTS(func_base)				\
1235 	static int debug_##func_base##_open(struct inode *inode,	\
1236 						struct file *file)	\
1237 	{								\
1238 		return single_open(file, debug_##func_base##_show,	\
1239 				inode->i_private);			\
1240 	}								\
1241 									\
1242 									\
1243 	static struct file_operations debug_##func_base##_ops = {	\
1244 		.open		= debug_##func_base##_open,		\
1245 		.read		= seq_read,				\
1246 		.llseek		= seq_lseek,				\
1247 		.write		= debug_##func_base##_write,		\
1248 		.release	= single_release,			\
1249 	};
1250 
1251 GENERATE_DEBUG_STRUCTS(wmi_command_log);
1252 GENERATE_DEBUG_STRUCTS(wmi_command_tx_cmp_log);
1253 GENERATE_DEBUG_STRUCTS(wmi_event_log);
1254 GENERATE_DEBUG_STRUCTS(wmi_rx_event_log);
1255 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_log);
1256 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_tx_cmp_log);
1257 GENERATE_DEBUG_STRUCTS(wmi_mgmt_event_log);
1258 GENERATE_DEBUG_STRUCTS(wmi_enable);
1259 GENERATE_DEBUG_STRUCTS(wmi_log_size);
1260 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
1261 GENERATE_DEBUG_STRUCTS(filtered_wmi_cmds);
1262 GENERATE_DEBUG_STRUCTS(filtered_wmi_evts);
1263 GENERATE_DEBUG_STRUCTS(wmi_filtered_command_log);
1264 GENERATE_DEBUG_STRUCTS(wmi_filtered_event_log);
1265 #endif
1266 
1267 struct wmi_debugfs_info wmi_debugfs_infos[NUM_DEBUG_INFOS] = {
1268 	DEBUG_FOO(wmi_command_log),
1269 	DEBUG_FOO(wmi_command_tx_cmp_log),
1270 	DEBUG_FOO(wmi_event_log),
1271 	DEBUG_FOO(wmi_rx_event_log),
1272 	DEBUG_FOO(wmi_mgmt_command_log),
1273 	DEBUG_FOO(wmi_mgmt_command_tx_cmp_log),
1274 	DEBUG_FOO(wmi_mgmt_event_log),
1275 	DEBUG_FOO(wmi_enable),
1276 	DEBUG_FOO(wmi_log_size),
1277 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
1278 	DEBUG_FOO(filtered_wmi_cmds),
1279 	DEBUG_FOO(filtered_wmi_evts),
1280 	DEBUG_FOO(wmi_filtered_command_log),
1281 	DEBUG_FOO(wmi_filtered_event_log),
1282 #endif
1283 };
1284 
1285 /**
1286  * wmi_debugfs_create() - Create debug_fs entry for wmi logging.
1287  *
1288  * @wmi_handle: wmi handle
1289  * @par_entry: debug directory entry
1290  *
1291  * Return: none
1292  */
1293 static void wmi_debugfs_create(wmi_unified_t wmi_handle,
1294 			       struct dentry *par_entry)
1295 {
1296 	int i;
1297 
1298 	if (!par_entry)
1299 		goto out;
1300 
1301 	for (i = 0; i < NUM_DEBUG_INFOS; ++i) {
1302 		wmi_handle->debugfs_de[i] = qdf_debugfs_create_entry(
1303 						wmi_debugfs_infos[i].name,
1304 						WMI_INFOS_DBG_FILE_PERM,
1305 						par_entry,
1306 						wmi_handle,
1307 						wmi_debugfs_infos[i].ops);
1308 
1309 		if (!wmi_handle->debugfs_de[i]) {
1310 			wmi_err("debug Entry creation failed!");
1311 			goto out;
1312 		}
1313 	}
1314 
1315 	return;
1316 
1317 out:
1318 	wmi_err("debug Entry creation failed!");
1319 	wmi_log_buffer_free(wmi_handle);
1320 	return;
1321 }
1322 
1323 /**
1324  * wmi_debugfs_remove() - Remove debugfs entry for wmi logging.
1325  * @wmi_handle: wmi handle
1326  *
1327  * Return: none
1328  */
1329 static void wmi_debugfs_remove(wmi_unified_t wmi_handle)
1330 {
1331 	int i;
1332 	struct dentry *dentry = wmi_handle->log_info.wmi_log_debugfs_dir;
1333 
1334 	if (dentry) {
1335 		for (i = 0; i < NUM_DEBUG_INFOS; ++i) {
1336 			if (wmi_handle->debugfs_de[i])
1337 				wmi_handle->debugfs_de[i] = NULL;
1338 		}
1339 	}
1340 
1341 	if (dentry)
1342 		qdf_debugfs_remove_dir_recursive(dentry);
1343 }
1344 
1345 /**
1346  * wmi_debugfs_init() - debugfs functions to create debugfs directory and to
1347  *                      create debugfs entries.
1348  * @wmi_handle: wmi handler
1349  * @pdev_idx: pdev id
1350  *
1351  * Return: init status
1352  */
1353 static QDF_STATUS wmi_debugfs_init(wmi_unified_t wmi_handle, uint32_t pdev_idx)
1354 {
1355 	char buf[32];
1356 
1357 	snprintf(buf, sizeof(buf), "WMI_SOC%u_PDEV%u",
1358 		 wmi_handle->soc->soc_idx, pdev_idx);
1359 
1360 	wmi_handle->log_info.wmi_log_debugfs_dir =
1361 		qdf_debugfs_create_dir(buf, NULL);
1362 
1363 	if (!wmi_handle->log_info.wmi_log_debugfs_dir) {
1364 		wmi_err("error while creating debugfs dir for %s", buf);
1365 		return QDF_STATUS_E_FAILURE;
1366 	}
1367 	wmi_debugfs_create(wmi_handle,
1368 			   wmi_handle->log_info.wmi_log_debugfs_dir);
1369 
1370 	return QDF_STATUS_SUCCESS;
1371 }
1372 
1373 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
1374 			void *header, uint32_t vdev_id, uint32_t chanfreq)
1375 {
1376 
1377 	uint32_t data[CUSTOM_MGMT_CMD_DATA_SIZE];
1378 
1379 	data[0] = ((struct wmi_command_header *)header)->type;
1380 	data[1] = ((struct wmi_command_header *)header)->sub_type;
1381 	data[2] = vdev_id;
1382 	data[3] = chanfreq;
1383 
1384 	qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
1385 
1386 	WMI_MGMT_COMMAND_RECORD(wmi_handle, cmd, (uint8_t *)data);
1387 	wmi_specific_cmd_record(wmi_handle, cmd, (uint8_t *)data);
1388 	qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
1389 }
1390 #else
1391 static void wmi_debugfs_remove(wmi_unified_t wmi_handle) { }
1392 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
1393 			void *header, uint32_t vdev_id, uint32_t chanfreq) { }
1394 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) { }
1395 static void wmi_minidump_detach(struct wmi_unified *wmi_handle) { }
1396 static void wmi_minidump_attach(struct wmi_unified *wmi_handle) { }
1397 #endif /*WMI_INTERFACE_EVENT_LOGGING */
1398 qdf_export_symbol(wmi_mgmt_cmd_record);
1399 
1400 #ifdef WMI_EXT_DBG
1401 
1402 /**
1403  * wmi_ext_dbg_msg_enqueue() - enqueue wmi message
1404  * @wmi_handle: wmi handler
1405  * @msg: WMI message
1406  *
1407  * Return: size of wmi message queue after enqueue
1408  */
1409 static uint32_t wmi_ext_dbg_msg_enqueue(struct wmi_unified *wmi_handle,
1410 					struct wmi_ext_dbg_msg *msg)
1411 {
1412 	uint32_t list_size;
1413 
1414 	qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1415 	qdf_list_insert_back_size(&wmi_handle->wmi_ext_dbg_msg_queue,
1416 				  &msg->node, &list_size);
1417 	qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1418 
1419 	return list_size;
1420 }
1421 
1422 /**
1423  * wmi_ext_dbg_msg_dequeue() - dequeue wmi message
1424  * @wmi_handle: wmi handler
1425  *
1426  * Return: wmi msg on success else NULL
1427  */
1428 static struct wmi_ext_dbg_msg *wmi_ext_dbg_msg_dequeue(struct wmi_unified
1429 						       *wmi_handle)
1430 {
1431 	qdf_list_node_t *list_node = NULL;
1432 
1433 	qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1434 	qdf_list_remove_front(&wmi_handle->wmi_ext_dbg_msg_queue, &list_node);
1435 	qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1436 
1437 	if (!list_node)
1438 		return NULL;
1439 
1440 	return qdf_container_of(list_node, struct wmi_ext_dbg_msg, node);
1441 }
1442 
1443 /**
1444  * wmi_ext_dbg_msg_record() - record wmi messages
1445  * @wmi_handle: wmi handler
1446  * @buf: wmi message buffer
1447  * @len: wmi message length
1448  * @type: wmi message type
1449  *
1450  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1451  */
1452 static QDF_STATUS wmi_ext_dbg_msg_record(struct wmi_unified *wmi_handle,
1453 					 uint8_t *buf, uint32_t len,
1454 					 enum WMI_MSG_TYPE type)
1455 {
1456 	struct wmi_ext_dbg_msg *msg;
1457 	uint32_t list_size;
1458 
1459 	msg = wmi_ext_dbg_msg_get(len);
1460 	if (!msg)
1461 		return QDF_STATUS_E_NOMEM;
1462 
1463 	msg->len = len;
1464 	msg->type = type;
1465 	qdf_mem_copy(msg->buf, buf, len);
1466 	msg->ts = qdf_get_log_timestamp();
1467 	list_size = wmi_ext_dbg_msg_enqueue(wmi_handle, msg);
1468 
1469 	if (list_size >= wmi_handle->wmi_ext_dbg_msg_queue_size) {
1470 		msg = wmi_ext_dbg_msg_dequeue(wmi_handle);
1471 		wmi_ext_dbg_msg_put(msg);
1472 	}
1473 
1474 	return QDF_STATUS_SUCCESS;
1475 }
1476 
1477 /**
1478  * wmi_ext_dbg_msg_cmd_record() - record wmi command messages
1479  * @wmi_handle: wmi handler
1480  * @buf: wmi command buffer
1481  * @len: wmi command message length
1482  *
1483  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1484  */
1485 static QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified *wmi_handle,
1486 					     uint8_t *buf, uint32_t len)
1487 {
1488 	return wmi_ext_dbg_msg_record(wmi_handle, buf, len,
1489 				      WMI_MSG_TYPE_CMD);
1490 }
1491 
1492 /**
1493  * wmi_ext_dbg_msg_event_record() - record wmi event messages
1494  * @wmi_handle: wmi handler
1495  * @buf: wmi event buffer
1496  * @len: wmi event message length
1497  *
1498  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1499  */
1500 static QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified *wmi_handle,
1501 					       uint8_t *buf, uint32_t len)
1502 {
1503 	uint32_t id;
1504 
1505 	id = WMI_GET_FIELD(buf, WMI_CMD_HDR, COMMANDID);
1506 	if (id != wmi_handle->wmi_events[wmi_diag_event_id])
1507 		return wmi_ext_dbg_msg_record(wmi_handle, buf, len,
1508 					      WMI_MSG_TYPE_EVENT);
1509 
1510 	return QDF_STATUS_SUCCESS;
1511 }
1512 
1513 /**
1514  * wmi_ext_dbg_msg_queue_init() - create debugfs queue and associated lock
1515  * @wmi_handle: wmi handler
1516  *
1517  * Return: none
1518  */
1519 static void wmi_ext_dbg_msg_queue_init(struct wmi_unified *wmi_handle)
1520 {
1521 	qdf_list_create(&wmi_handle->wmi_ext_dbg_msg_queue,
1522 			wmi_handle->wmi_ext_dbg_msg_queue_size);
1523 	qdf_spinlock_create(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1524 }
1525 
1526 /**
1527  * wmi_ext_dbg_msg_queue_deinit() - destroy debugfs queue and associated lock
1528  * @wmi_handle: wmi handler
1529  *
1530  * Return: none
1531  */
1532 static void wmi_ext_dbg_msg_queue_deinit(struct wmi_unified *wmi_handle)
1533 {
1534 	qdf_list_destroy(&wmi_handle->wmi_ext_dbg_msg_queue);
1535 	qdf_spinlock_destroy(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1536 }
1537 
1538 /**
1539  * wmi_ext_dbg_msg_show() - debugfs function to display whole content of
1540  * wmi command/event messages including headers.
1541  * @file: qdf debugfs file handler
1542  * @arg: pointer to wmi handler
1543  *
1544  * Return: QDF_STATUS_SUCCESS if all the messages are shown successfully,
1545  * else QDF_STATUS_E_AGAIN if more data to show.
1546  */
1547 static QDF_STATUS wmi_ext_dbg_msg_show(qdf_debugfs_file_t file, void *arg)
1548 {
1549 	struct wmi_unified *wmi_handle = (struct wmi_unified *)arg;
1550 	struct wmi_ext_dbg_msg *msg;
1551 	uint64_t secs, usecs;
1552 
1553 	msg = wmi_ext_dbg_msg_dequeue(wmi_handle);
1554 	if (!msg)
1555 		return QDF_STATUS_SUCCESS;
1556 
1557 	qdf_debugfs_printf(file, "%s: 0x%x\n",
1558 			   msg->type == WMI_MSG_TYPE_CMD ? "COMMAND" :
1559 			   "EVENT", WMI_GET_FIELD(msg->buf, WMI_CMD_HDR,
1560 						  COMMANDID));
1561 	qdf_log_timestamp_to_secs(msg->ts, &secs, &usecs);
1562 	qdf_debugfs_printf(file, "Time: %llu.%llu\n", secs, usecs);
1563 	qdf_debugfs_printf(file, "Length:%d\n", msg->len);
1564 	qdf_debugfs_hexdump(file, msg->buf, msg->len,
1565 			    WMI_EXT_DBG_DUMP_ROW_SIZE,
1566 			    WMI_EXT_DBG_DUMP_GROUP_SIZE);
1567 	qdf_debugfs_printf(file, "\n");
1568 
1569 	if (qdf_debugfs_overflow(file)) {
1570 		qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1571 		qdf_list_insert_front(&wmi_handle->wmi_ext_dbg_msg_queue,
1572 				      &msg->node);
1573 		qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1574 
1575 	} else {
1576 		wmi_ext_dbg_msg_put(msg);
1577 	}
1578 
1579 	return QDF_STATUS_E_AGAIN;
1580 }
1581 
1582 /**
1583  * wmi_ext_dbg_msg_write() - debugfs write not supported
1584  * @priv: private data
1585  * @buf: received data buffer
1586  * @len: length of received buffer
1587  *
1588  * Return: QDF_STATUS_E_NOSUPPORT.
1589  */
1590 static QDF_STATUS wmi_ext_dbg_msg_write(void *priv, const char *buf,
1591 					qdf_size_t len)
1592 {
1593 	return QDF_STATUS_E_NOSUPPORT;
1594 }
1595 
1596 static struct qdf_debugfs_fops wmi_ext_dbgfs_ops[WMI_MAX_RADIOS];
1597 
1598 /**
1599  * wmi_ext_dbgfs_init() - init debugfs items for extended wmi dump.
1600  * @wmi_handle: wmi handler
1601  * @pdev_idx: pdev index
1602  *
1603  * Return: QDF_STATUS_SUCCESS if debugfs is initialized else
1604  * QDF_STATUS_E_FAILURE
1605  */
1606 static QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle,
1607 				     uint32_t pdev_idx)
1608 {
1609 	qdf_dentry_t dentry;
1610 	char buf[32];
1611 
1612 	/* To maintain backward compatibility, naming convention for PDEV 0
1613 	 * dentry is kept same as before. For more than 1 PDEV, dentry
1614 	 * names will be appended with PDEVx.
1615 	*/
1616 	if (wmi_handle->soc->soc_idx == 0 && pdev_idx == 0) {
1617 		dentry  = qdf_debugfs_create_dir(WMI_EXT_DBG_DIR, NULL);
1618 	} else {
1619 		snprintf(buf, sizeof(buf), "WMI_EXT_DBG_SOC%u_PDEV%u",
1620 			 wmi_handle->soc->soc_idx, pdev_idx);
1621 		dentry  = qdf_debugfs_create_dir(buf, NULL);
1622 	}
1623 
1624 	if (!dentry) {
1625 		wmi_err("error while creating extended wmi debugfs dir");
1626 		return QDF_STATUS_E_FAILURE;
1627 	}
1628 
1629 	wmi_ext_dbgfs_ops[pdev_idx].show = wmi_ext_dbg_msg_show;
1630 	wmi_ext_dbgfs_ops[pdev_idx].write = wmi_ext_dbg_msg_write;
1631 	wmi_ext_dbgfs_ops[pdev_idx].priv = wmi_handle;
1632 	if (!qdf_debugfs_create_file(WMI_EXT_DBG_FILE, WMI_EXT_DBG_FILE_PERM,
1633 				     dentry, &wmi_ext_dbgfs_ops[pdev_idx])) {
1634 		qdf_debugfs_remove_dir(dentry);
1635 		wmi_err("Error while creating extended wmi debugfs file");
1636 		return QDF_STATUS_E_FAILURE;
1637 	}
1638 
1639 	wmi_handle->wmi_ext_dbg_dentry = dentry;
1640 	wmi_handle->wmi_ext_dbg_msg_queue_size = WMI_EXT_DBG_QUEUE_SIZE;
1641 	wmi_ext_dbg_msg_queue_init(wmi_handle);
1642 
1643 	return QDF_STATUS_SUCCESS;
1644 }
1645 
1646 /**
1647  * wmi_ext_dbgfs_deinit() - cleanup/deinit debugfs items of extended wmi dump.
1648  * @wmi_handle: wmi handler
1649  *
1650  * Return: QDF_STATUS_SUCCESS if cleanup is successful
1651  */
1652 static QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle)
1653 {
1654 	struct wmi_ext_dbg_msg *msg;
1655 
1656 	while ((msg = wmi_ext_dbg_msg_dequeue(wmi_handle)))
1657 		wmi_ext_dbg_msg_put(msg);
1658 
1659 	wmi_ext_dbg_msg_queue_deinit(wmi_handle);
1660 	qdf_debugfs_remove_dir_recursive(wmi_handle->wmi_ext_dbg_dentry);
1661 
1662 	return QDF_STATUS_SUCCESS;
1663 }
1664 
1665 #else
1666 
1667 static inline QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified
1668 						    *wmi_handle,
1669 						    uint8_t *buf, uint32_t len)
1670 {
1671 		return QDF_STATUS_SUCCESS;
1672 }
1673 
1674 static inline QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified
1675 						      *wmi_handle,
1676 						      uint8_t *buf, uint32_t len)
1677 {
1678 		return QDF_STATUS_SUCCESS;
1679 }
1680 
1681 static inline QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle,
1682 					    uint32_t pdev_idx)
1683 {
1684 		return QDF_STATUS_SUCCESS;
1685 }
1686 
1687 static inline QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle)
1688 {
1689 		return QDF_STATUS_SUCCESS;
1690 }
1691 
1692 #endif /*WMI_EXT_DBG */
1693 
1694 int wmi_get_host_credits(wmi_unified_t wmi_handle);
1695 /* WMI buffer APIs */
1696 
1697 #ifdef NBUF_MEMORY_DEBUG
1698 wmi_buf_t
1699 wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint32_t len,
1700 		    const char *func_name,
1701 		    uint32_t line_num)
1702 {
1703 	wmi_buf_t wmi_buf;
1704 
1705 	if (roundup(len, 4) > wmi_handle->max_msg_len) {
1706 		wmi_err("Invalid length %u (via %s:%u) max size: %u",
1707 			len, func_name, line_num,
1708 			wmi_handle->max_msg_len);
1709 		QDF_ASSERT(0);
1710 		return NULL;
1711 	}
1712 
1713 	wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, func_name,
1714 				 line_num);
1715 	if (!wmi_buf)
1716 		wmi_buf = qdf_nbuf_alloc_debug(NULL,
1717 					       roundup(len + WMI_MIN_HEAD_ROOM,
1718 						       4),
1719 					       WMI_MIN_HEAD_ROOM, 4, false,
1720 					       func_name, line_num);
1721 	if (!wmi_buf)
1722 		return NULL;
1723 
1724 	/* Clear the wmi buffer */
1725 	OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
1726 
1727 	/*
1728 	 * Set the length of the buffer to match the allocation size.
1729 	 */
1730 	qdf_nbuf_set_pktlen(wmi_buf, len);
1731 
1732 	return wmi_buf;
1733 }
1734 qdf_export_symbol(wmi_buf_alloc_debug);
1735 
1736 void wmi_buf_free(wmi_buf_t net_buf)
1737 {
1738 	net_buf = wbuff_buff_put(net_buf);
1739 	if (net_buf)
1740 		qdf_nbuf_free(net_buf);
1741 }
1742 qdf_export_symbol(wmi_buf_free);
1743 #else
1744 wmi_buf_t wmi_buf_alloc_fl(wmi_unified_t wmi_handle, uint32_t len,
1745 			   const char *func, uint32_t line)
1746 {
1747 	wmi_buf_t wmi_buf;
1748 
1749 	if (roundup(len, 4) > wmi_handle->max_msg_len) {
1750 		QDF_DEBUG_PANIC("Invalid length %u (via %s:%u) max size: %u",
1751 				len, func, line, wmi_handle->max_msg_len);
1752 		return NULL;
1753 	}
1754 
1755 	wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, __func__,
1756 				 __LINE__);
1757 	if (!wmi_buf)
1758 		wmi_buf = qdf_nbuf_alloc_fl(NULL, roundup(len +
1759 				WMI_MIN_HEAD_ROOM, 4), WMI_MIN_HEAD_ROOM, 4,
1760 				false, func, line);
1761 
1762 	if (!wmi_buf) {
1763 		wmi_nofl_err("%s:%d, failed to alloc len:%d", func, line, len);
1764 		return NULL;
1765 	}
1766 
1767 	/* Clear the wmi buffer */
1768 	OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
1769 
1770 	/*
1771 	 * Set the length of the buffer to match the allocation size.
1772 	 */
1773 	qdf_nbuf_set_pktlen(wmi_buf, len);
1774 
1775 	return wmi_buf;
1776 }
1777 qdf_export_symbol(wmi_buf_alloc_fl);
1778 
1779 void wmi_buf_free(wmi_buf_t net_buf)
1780 {
1781 	net_buf = wbuff_buff_put(net_buf);
1782 	if (net_buf)
1783 		qdf_nbuf_free(net_buf);
1784 }
1785 qdf_export_symbol(wmi_buf_free);
1786 #endif
1787 
1788 uint16_t wmi_get_max_msg_len(wmi_unified_t wmi_handle)
1789 {
1790 	return wmi_handle->max_msg_len - WMI_MIN_HEAD_ROOM;
1791 }
1792 qdf_export_symbol(wmi_get_max_msg_len);
1793 
1794 #ifndef WMI_CMD_STRINGS
1795 static uint8_t *wmi_id_to_name(uint32_t wmi_command)
1796 {
1797 	return "Invalid WMI cmd";
1798 }
1799 #endif
1800 
1801 static inline void wmi_log_cmd_id(uint32_t cmd_id, uint32_t tag)
1802 {
1803 	wmi_nofl_debug("Send cmd %s(0x%x) tag:%d",
1804 		       wmi_id_to_name(cmd_id), cmd_id, tag);
1805 }
1806 
1807 /**
1808  * wmi_is_pm_resume_cmd() - check if a cmd is part of the resume sequence
1809  * @cmd_id: command to check
1810  *
1811  * Return: true if the command is part of the resume sequence.
1812  */
1813 #ifdef WLAN_POWER_MANAGEMENT_OFFLOAD
1814 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id)
1815 {
1816 	switch (cmd_id) {
1817 	case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
1818 	case WMI_PDEV_RESUME_CMDID:
1819 		return true;
1820 
1821 	default:
1822 		return false;
1823 	}
1824 }
1825 
1826 #else
1827 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id)
1828 {
1829 	return false;
1830 }
1831 
1832 #endif
1833 
1834 #ifdef FEATURE_WLAN_D0WOW
1835 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id)
1836 {
1837 	wmi_d0_wow_enable_disable_cmd_fixed_param *cmd;
1838 
1839 	if (cmd_id == WMI_D0_WOW_ENABLE_DISABLE_CMDID) {
1840 		cmd = (wmi_d0_wow_enable_disable_cmd_fixed_param *)
1841 			wmi_buf_data(buf);
1842 		if (!cmd->enable)
1843 			return true;
1844 		else
1845 			return false;
1846 	}
1847 
1848 	return false;
1849 }
1850 #else
1851 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id)
1852 {
1853 	return false;
1854 }
1855 
1856 #endif
1857 
1858 #ifdef WMI_INTERFACE_SEQUENCE_CHECK
1859 static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle)
1860 {
1861 	wmi_handle->wmi_sequence = 0;
1862 	wmi_handle->wmi_exp_sequence = 0;
1863 	wmi_handle->wmi_sequence_stop = false;
1864 }
1865 
1866 static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle)
1867 {
1868 	qdf_spinlock_create(&wmi_handle->wmi_seq_lock);
1869 	wmi_interface_sequence_reset(wmi_handle);
1870 }
1871 
1872 static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle)
1873 {
1874 	qdf_spinlock_destroy(&wmi_handle->wmi_seq_lock);
1875 }
1876 
1877 void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle)
1878 {
1879 	wmi_handle->wmi_sequence_stop = true;
1880 }
1881 
1882 static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle,
1883 					  HTC_PACKET *pkt,
1884 					  const char *func, uint32_t line)
1885 {
1886 	wmi_buf_t buf = GET_HTC_PACKET_NET_BUF_CONTEXT(pkt);
1887 	QDF_STATUS status;
1888 
1889 	qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock);
1890 	status = htc_send_pkt(wmi_handle->htc_handle, pkt);
1891 	if (QDF_STATUS_SUCCESS != status) {
1892 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1893 		qdf_atomic_dec(&wmi_handle->pending_cmds);
1894 		wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d",
1895 			     func, line, status);
1896 		qdf_mem_free(pkt);
1897 		return status;
1898 	}
1899 	/* Record the sequence number in the SKB */
1900 	qdf_nbuf_set_mark(buf, wmi_handle->wmi_sequence);
1901 	/* Increment the sequence number */
1902 	wmi_handle->wmi_sequence = (wmi_handle->wmi_sequence + 1)
1903 				   & (wmi_handle->wmi_max_cmds - 1);
1904 	qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1905 
1906 	return status;
1907 }
1908 
1909 static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle,
1910 						wmi_buf_t buf)
1911 {
1912 	/* Skip sequence check when wmi sequence stop is set */
1913 	if (wmi_handle->wmi_sequence_stop)
1914 		return;
1915 
1916 	qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock);
1917 	/* Match the completion sequence and expected sequence number */
1918 	if (qdf_nbuf_get_mark(buf) != wmi_handle->wmi_exp_sequence) {
1919 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1920 		wmi_nofl_err("WMI Tx Completion Sequence number mismatch");
1921 		wmi_nofl_err("Expected %d Received %d",
1922 			     wmi_handle->wmi_exp_sequence,
1923 			     qdf_nbuf_get_mark(buf));
1924 		/* Trigger Recovery */
1925 		qdf_trigger_self_recovery(wmi_handle->soc,
1926 					  QDF_WMI_BUF_SEQUENCE_MISMATCH);
1927 	} else {
1928 		/* Increment the expected sequence number */
1929 		wmi_handle->wmi_exp_sequence =
1930 				(wmi_handle->wmi_exp_sequence + 1)
1931 				& (wmi_handle->wmi_max_cmds - 1);
1932 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1933 	}
1934 }
1935 #else
1936 static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle)
1937 {
1938 }
1939 
1940 static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle)
1941 {
1942 }
1943 
1944 static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle)
1945 {
1946 }
1947 
1948 void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle)
1949 {
1950 }
1951 
1952 static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle,
1953 					  HTC_PACKET *pkt,
1954 					  const char *func, uint32_t line)
1955 {
1956 	QDF_STATUS status;
1957 
1958 	status = htc_send_pkt(wmi_handle->htc_handle, pkt);
1959 	if (QDF_STATUS_SUCCESS != status) {
1960 		qdf_atomic_dec(&wmi_handle->pending_cmds);
1961 		wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d",
1962 			     func, line, status);
1963 		qdf_mem_free(pkt);
1964 		return status;
1965 	}
1966 
1967 	return status;
1968 }
1969 
1970 static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle,
1971 						wmi_buf_t buf)
1972 {
1973 }
1974 #endif
1975 
1976 static inline void wmi_unified_debug_dump(wmi_unified_t wmi_handle)
1977 {
1978 	wmi_nofl_err("Endpoint ID = %d, Tx Queue Depth = %d, soc_id = %u, target type = %s",
1979 		     wmi_handle->wmi_endpoint_id,
1980 		     htc_get_tx_queue_depth(wmi_handle->htc_handle,
1981 					    wmi_handle->wmi_endpoint_id),
1982 		     wmi_handle->soc->soc_idx,
1983 		     (wmi_handle->target_type ==
1984 		      WMI_TLV_TARGET ? "WMI_TLV_TARGET" :
1985 						"WMI_NON_TLV_TARGET"));
1986 }
1987 
1988 #ifdef SYSTEM_PM_CHECK
1989 /**
1990  * wmi_set_system_pm_pkt_tag() - API to set tag for system pm packets
1991  * @htc_tag: HTC tag
1992  * @buf: wmi cmd buffer
1993  * @cmd_id: cmd id
1994  *
1995  * Return: None
1996  */
1997 static void wmi_set_system_pm_pkt_tag(uint16_t *htc_tag, wmi_buf_t buf,
1998 				      uint32_t cmd_id)
1999 {
2000 	switch (cmd_id) {
2001 	case WMI_WOW_ENABLE_CMDID:
2002 	case WMI_PDEV_SUSPEND_CMDID:
2003 		*htc_tag = HTC_TX_PACKET_SYSTEM_SUSPEND;
2004 		break;
2005 	case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
2006 	case WMI_PDEV_RESUME_CMDID:
2007 		*htc_tag = HTC_TX_PACKET_SYSTEM_RESUME;
2008 		break;
2009 	case WMI_D0_WOW_ENABLE_DISABLE_CMDID:
2010 		if (wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id))
2011 			*htc_tag = HTC_TX_PACKET_SYSTEM_RESUME;
2012 		else
2013 			*htc_tag = HTC_TX_PACKET_SYSTEM_SUSPEND;
2014 		break;
2015 	default:
2016 		break;
2017 	}
2018 }
2019 #else
2020 static inline void wmi_set_system_pm_pkt_tag(uint16_t *htc_tag, wmi_buf_t buf,
2021 					     uint32_t cmd_id)
2022 {
2023 }
2024 #endif
2025 
2026 QDF_STATUS wmi_unified_cmd_send_fl(wmi_unified_t wmi_handle, wmi_buf_t buf,
2027 				   uint32_t len, uint32_t cmd_id,
2028 				   const char *func, uint32_t line)
2029 {
2030 	HTC_PACKET *pkt;
2031 	uint16_t htc_tag = 0;
2032 	bool rtpm_inprogress;
2033 
2034 	rtpm_inprogress = wmi_get_runtime_pm_inprogress(wmi_handle);
2035 	if (rtpm_inprogress) {
2036 		htc_tag = wmi_handle->ops->wmi_set_htc_tx_tag(wmi_handle, buf,
2037 							      cmd_id);
2038 	} else if (qdf_atomic_read(&wmi_handle->is_target_suspended) &&
2039 		   !wmi_is_pm_resume_cmd(cmd_id) &&
2040 		   !wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id)) {
2041 			wmi_nofl_err("Target is suspended (via %s:%u)",
2042 					func, line);
2043 		return QDF_STATUS_E_BUSY;
2044 	}
2045 
2046 	if (wmi_handle->wmi_stopinprogress) {
2047 		wmi_nofl_err("%s:%d, WMI stop in progress, wmi_handle:%pK",
2048 			     func, line, wmi_handle);
2049 		return QDF_STATUS_E_INVAL;
2050 	}
2051 
2052 #ifndef WMI_NON_TLV_SUPPORT
2053 	/* Do sanity check on the TLV parameter structure */
2054 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2055 		void *buf_ptr = (void *)qdf_nbuf_data(buf);
2056 
2057 		if (wmi_handle->ops->wmi_check_command_params(NULL, buf_ptr, len, cmd_id)
2058 			!= 0) {
2059 			wmi_nofl_err("%s:%d, Invalid WMI Param Buffer for Cmd:%d",
2060 				     func, line, cmd_id);
2061 			return QDF_STATUS_E_INVAL;
2062 		}
2063 	}
2064 #endif
2065 
2066 	if (qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) {
2067 		wmi_nofl_err("%s:%d, Failed to send cmd %x, no memory",
2068 			     func, line, cmd_id);
2069 		return QDF_STATUS_E_NOMEM;
2070 	}
2071 
2072 	qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR));
2073 	WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
2074 
2075 	qdf_atomic_inc(&wmi_handle->pending_cmds);
2076 	if (qdf_atomic_read(&wmi_handle->pending_cmds) >=
2077 			wmi_handle->wmi_max_cmds) {
2078 		wmi_nofl_err("hostcredits = %d",
2079 			     wmi_get_host_credits(wmi_handle));
2080 		htc_dump_counter_info(wmi_handle->htc_handle);
2081 		qdf_atomic_dec(&wmi_handle->pending_cmds);
2082 		wmi_nofl_err("%s:%d, MAX %d WMI Pending cmds reached",
2083 			     func, line, wmi_handle->wmi_max_cmds);
2084 		wmi_unified_debug_dump(wmi_handle);
2085 		htc_ce_tasklet_debug_dump(wmi_handle->htc_handle);
2086 		qdf_trigger_self_recovery(wmi_handle->soc->wmi_psoc,
2087 					  QDF_WMI_EXCEED_MAX_PENDING_CMDS);
2088 		return QDF_STATUS_E_BUSY;
2089 	}
2090 
2091 	pkt = qdf_mem_malloc_fl(sizeof(*pkt), func, line);
2092 	if (!pkt) {
2093 		qdf_atomic_dec(&wmi_handle->pending_cmds);
2094 		return QDF_STATUS_E_NOMEM;
2095 	}
2096 
2097 	if (!rtpm_inprogress)
2098 		wmi_set_system_pm_pkt_tag(&htc_tag, buf, cmd_id);
2099 
2100 	SET_HTC_PACKET_INFO_TX(pkt,
2101 			       NULL,
2102 			       qdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR),
2103 			       wmi_handle->wmi_endpoint_id, htc_tag);
2104 
2105 	SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf);
2106 	wmi_log_cmd_id(cmd_id, htc_tag);
2107 	wmi_ext_dbg_msg_cmd_record(wmi_handle,
2108 				   qdf_nbuf_data(buf), qdf_nbuf_len(buf));
2109 #ifdef WMI_INTERFACE_EVENT_LOGGING
2110 	if (wmi_handle->log_info.wmi_logging_enable) {
2111 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2112 		/*
2113 		 * Record 16 bytes of WMI cmd data -
2114 		 * exclude TLV and WMI headers
2115 		 *
2116 		 * WMI mgmt command already recorded in wmi_mgmt_cmd_record
2117 		 */
2118 		if (wmi_handle->ops->is_management_record(cmd_id) == false) {
2119 			uint8_t *tmpbuf = (uint8_t *)qdf_nbuf_data(buf) +
2120 				wmi_handle->soc->buf_offset_command;
2121 
2122 			WMI_COMMAND_RECORD(wmi_handle, cmd_id, tmpbuf);
2123 			wmi_specific_cmd_record(wmi_handle, cmd_id, tmpbuf);
2124 		}
2125 
2126 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2127 	}
2128 #endif
2129 	return wmi_htc_send_pkt(wmi_handle, pkt, func, line);
2130 }
2131 qdf_export_symbol(wmi_unified_cmd_send_fl);
2132 
2133 /**
2134  * wmi_unified_get_event_handler_ix() - gives event handler's index
2135  * @wmi_handle: handle to wmi
2136  * @event_id: wmi  event id
2137  *
2138  * Return: event handler's index
2139  */
2140 static int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle,
2141 					    uint32_t event_id)
2142 {
2143 	uint32_t idx = 0;
2144 	int32_t invalid_idx = -1;
2145 	struct wmi_soc *soc = wmi_handle->soc;
2146 
2147 	for (idx = 0; (idx < soc->max_event_idx &&
2148 		       idx < WMI_UNIFIED_MAX_EVENT); ++idx) {
2149 		if (wmi_handle->event_id[idx] == event_id &&
2150 		    wmi_handle->event_handler[idx]) {
2151 			return idx;
2152 		}
2153 	}
2154 
2155 	return invalid_idx;
2156 }
2157 
2158 /**
2159  * wmi_register_event_handler_with_ctx() - register event handler with
2160  * exec ctx and buffer type
2161  * @wmi_handle: handle to wmi
2162  * @event_id: wmi event id
2163  * @handler_func: wmi event handler function
2164  * @rx_ctx: rx execution context for wmi rx events
2165  * @rx_buf_type: rx execution context for wmi rx events
2166  *
2167  * Return: QDF_STATUS_SUCCESS on successful register event else failure.
2168  */
2169 static QDF_STATUS
2170 wmi_register_event_handler_with_ctx(wmi_unified_t wmi_handle,
2171 				    uint32_t event_id,
2172 				    wmi_unified_event_handler handler_func,
2173 				    enum wmi_rx_exec_ctx rx_ctx,
2174 				    enum wmi_rx_buff_type rx_buf_type)
2175 {
2176 	uint32_t idx = 0;
2177 	uint32_t evt_id;
2178 	struct wmi_soc *soc;
2179 
2180 	if (!wmi_handle) {
2181 		wmi_err("WMI handle is NULL");
2182 		return QDF_STATUS_E_FAILURE;
2183 	}
2184 
2185 	soc = wmi_handle->soc;
2186 
2187 	if (event_id >= wmi_events_max) {
2188 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
2189 			  "%s: Event id %d is unavailable",
2190 					__func__, event_id);
2191 		return QDF_STATUS_E_FAILURE;
2192 	}
2193 
2194 	if (wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2195 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG,
2196 			  "%s: Event id %d is not supported",
2197 			  __func__, event_id);
2198 		return QDF_STATUS_E_NOSUPPORT;
2199 	}
2200 	evt_id = wmi_handle->wmi_events[event_id];
2201 
2202 	if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) {
2203 		wmi_info("event handler already registered 0x%x", evt_id);
2204 		return QDF_STATUS_E_FAILURE;
2205 	}
2206 	if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) {
2207 		wmi_err("no more event handlers 0x%x",
2208 			 evt_id);
2209 		return QDF_STATUS_E_FAILURE;
2210 	}
2211 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG,
2212 		  "Registered event handler for event 0x%8x", evt_id);
2213 	idx = soc->max_event_idx;
2214 	wmi_handle->event_handler[idx] = handler_func;
2215 	wmi_handle->event_id[idx] = evt_id;
2216 
2217 	qdf_spin_lock_bh(&soc->ctx_lock);
2218 	wmi_handle->ctx[idx].exec_ctx = rx_ctx;
2219 	wmi_handle->ctx[idx].buff_type = rx_buf_type;
2220 	qdf_spin_unlock_bh(&soc->ctx_lock);
2221 	soc->max_event_idx++;
2222 
2223 	return QDF_STATUS_SUCCESS;
2224 }
2225 
2226 QDF_STATUS
2227 wmi_unified_register_event(wmi_unified_t wmi_handle,
2228 			   uint32_t event_id,
2229 			   wmi_unified_event_handler handler_func)
2230 {
2231 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2232 						   handler_func,
2233 						   WMI_RX_UMAC_CTX,
2234 						   WMI_RX_PROCESSED_BUFF);
2235 }
2236 
2237 QDF_STATUS
2238 wmi_unified_register_event_handler(wmi_unified_t wmi_handle,
2239 				   wmi_conv_event_id event_id,
2240 				   wmi_unified_event_handler handler_func,
2241 				   uint8_t rx_ctx)
2242 {
2243 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2244 						   handler_func, rx_ctx,
2245 						   WMI_RX_PROCESSED_BUFF);
2246 }
2247 
2248 qdf_export_symbol(wmi_unified_register_event_handler);
2249 
2250 QDF_STATUS
2251 wmi_unified_register_raw_event_handler(wmi_unified_t wmi_handle,
2252 				       wmi_conv_event_id event_id,
2253 				       wmi_unified_event_handler handler_func,
2254 				       enum wmi_rx_exec_ctx rx_ctx)
2255 {
2256 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2257 						   handler_func, rx_ctx,
2258 						   WMI_RX_RAW_BUFF);
2259 }
2260 
2261 qdf_export_symbol(wmi_unified_register_raw_event_handler);
2262 
2263 QDF_STATUS wmi_unified_unregister_event(wmi_unified_t wmi_handle,
2264 					uint32_t event_id)
2265 {
2266 	uint32_t idx = 0;
2267 	uint32_t evt_id;
2268 	struct wmi_soc *soc;
2269 
2270 	if (!wmi_handle) {
2271 		wmi_err("WMI handle is NULL");
2272 		return QDF_STATUS_E_FAILURE;
2273 	}
2274 
2275 	soc = wmi_handle->soc;
2276 	if (event_id >= wmi_events_max ||
2277 		wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2278 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
2279 			  "%s: Event id %d is unavailable",
2280 					__func__, event_id);
2281 		return QDF_STATUS_E_FAILURE;
2282 	}
2283 	evt_id = wmi_handle->wmi_events[event_id];
2284 
2285 	idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id);
2286 	if (idx == -1) {
2287 		wmi_warn("event handler is not registered: evt id 0x%x",
2288 			 evt_id);
2289 		return QDF_STATUS_E_FAILURE;
2290 	}
2291 	wmi_handle->event_handler[idx] = NULL;
2292 	wmi_handle->event_id[idx] = 0;
2293 	--soc->max_event_idx;
2294 	wmi_handle->event_handler[idx] =
2295 		wmi_handle->event_handler[soc->max_event_idx];
2296 	wmi_handle->event_id[idx] =
2297 		wmi_handle->event_id[soc->max_event_idx];
2298 
2299 	qdf_spin_lock_bh(&soc->ctx_lock);
2300 
2301 	wmi_handle->ctx[idx].exec_ctx =
2302 		wmi_handle->ctx[soc->max_event_idx].exec_ctx;
2303 	wmi_handle->ctx[idx].buff_type =
2304 		wmi_handle->ctx[soc->max_event_idx].buff_type;
2305 
2306 	qdf_spin_unlock_bh(&soc->ctx_lock);
2307 
2308 	return QDF_STATUS_SUCCESS;
2309 }
2310 
2311 QDF_STATUS wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle,
2312 						wmi_conv_event_id event_id)
2313 {
2314 	uint32_t idx = 0;
2315 	uint32_t evt_id;
2316 	struct wmi_soc *soc;
2317 
2318 	if (!wmi_handle) {
2319 		wmi_err("WMI handle is NULL");
2320 		return QDF_STATUS_E_FAILURE;
2321 	}
2322 
2323 	soc = wmi_handle->soc;
2324 
2325 	if (event_id >= wmi_events_max) {
2326 		wmi_err("Event id %d is unavailable", event_id);
2327 		return QDF_STATUS_E_FAILURE;
2328 	}
2329 
2330 	if (wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2331 		wmi_debug("Event id %d is not supported", event_id);
2332 		return QDF_STATUS_E_NOSUPPORT;
2333 	}
2334 
2335 	evt_id = wmi_handle->wmi_events[event_id];
2336 
2337 	idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id);
2338 	if (idx == -1) {
2339 		wmi_err("event handler is not registered: evt id 0x%x",
2340 			 evt_id);
2341 		return QDF_STATUS_E_FAILURE;
2342 	}
2343 	wmi_handle->event_handler[idx] = NULL;
2344 	wmi_handle->event_id[idx] = 0;
2345 	--soc->max_event_idx;
2346 	wmi_handle->event_handler[idx] =
2347 		wmi_handle->event_handler[soc->max_event_idx];
2348 	wmi_handle->event_id[idx] =
2349 		wmi_handle->event_id[soc->max_event_idx];
2350 
2351 	qdf_spin_lock_bh(&soc->ctx_lock);
2352 
2353 	wmi_handle->ctx[idx].exec_ctx =
2354 		wmi_handle->ctx[soc->max_event_idx].exec_ctx;
2355 	wmi_handle->ctx[idx].buff_type =
2356 		wmi_handle->ctx[soc->max_event_idx].buff_type;
2357 
2358 	qdf_spin_unlock_bh(&soc->ctx_lock);
2359 
2360 	return QDF_STATUS_SUCCESS;
2361 }
2362 qdf_export_symbol(wmi_unified_unregister_event_handler);
2363 
2364 static void
2365 wmi_process_rx_diag_event_worker_thread_ctx(struct wmi_unified *wmi_handle,
2366 					    void *evt_buf)
2367 {
2368 	uint32_t num_diag_events_pending;
2369 
2370 	qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock);
2371 	if (RX_DIAG_WQ_MAX_SIZE > 0) {
2372 		num_diag_events_pending = qdf_nbuf_queue_len(
2373 						&wmi_handle->diag_event_queue);
2374 
2375 		if (num_diag_events_pending >= RX_DIAG_WQ_MAX_SIZE) {
2376 			qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
2377 			wmi_handle->wmi_rx_diag_events_dropped++;
2378 			wmi_debug_rl("Rx diag events dropped count: %d",
2379 				     wmi_handle->wmi_rx_diag_events_dropped);
2380 			qdf_nbuf_free(evt_buf);
2381 			return;
2382 		}
2383 	}
2384 
2385 	qdf_nbuf_queue_add(&wmi_handle->diag_event_queue, evt_buf);
2386 	qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
2387 	qdf_queue_work(0, wmi_handle->wmi_rx_diag_work_queue,
2388 		       &wmi_handle->rx_diag_event_work);
2389 }
2390 
2391 void wmi_process_fw_event_worker_thread_ctx(struct wmi_unified *wmi_handle,
2392 					    void *evt_buf)
2393 {
2394 
2395 	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
2396 	qdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf);
2397 	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
2398 	qdf_queue_work(0, wmi_handle->wmi_rx_work_queue,
2399 			&wmi_handle->rx_event_work);
2400 
2401 	return;
2402 }
2403 
2404 qdf_export_symbol(wmi_process_fw_event_worker_thread_ctx);
2405 
2406 uint32_t wmi_critical_events_in_flight(struct wmi_unified *wmi)
2407 {
2408 	return qdf_atomic_read(&wmi->critical_events_in_flight);
2409 }
2410 
2411 static bool
2412 wmi_is_event_critical(struct wmi_unified *wmi_handle, uint32_t event_id)
2413 {
2414 	if (wmi_handle->wmi_events[wmi_roam_synch_event_id] == event_id)
2415 		return true;
2416 
2417 	return false;
2418 }
2419 
2420 static QDF_STATUS wmi_discard_fw_event(struct scheduler_msg *msg)
2421 {
2422 	struct wmi_process_fw_event_params *event_param;
2423 
2424 	if (!msg->bodyptr)
2425 		return QDF_STATUS_E_INVAL;
2426 
2427 	event_param = (struct wmi_process_fw_event_params *)msg->bodyptr;
2428 	qdf_nbuf_free(event_param->evt_buf);
2429 	qdf_mem_free(msg->bodyptr);
2430 	msg->bodyptr = NULL;
2431 	msg->bodyval = 0;
2432 	msg->type = 0;
2433 
2434 	return QDF_STATUS_SUCCESS;
2435 }
2436 
2437 static QDF_STATUS wmi_process_fw_event_handler(struct scheduler_msg *msg)
2438 {
2439 	struct wmi_process_fw_event_params *params =
2440 		(struct wmi_process_fw_event_params *)msg->bodyptr;
2441 	struct wmi_unified *wmi_handle;
2442 	uint32_t event_id;
2443 
2444 	wmi_handle = (struct wmi_unified *)params->wmi_handle;
2445 	event_id = WMI_GET_FIELD(qdf_nbuf_data(params->evt_buf),
2446 				 WMI_CMD_HDR, COMMANDID);
2447 	wmi_process_fw_event(wmi_handle, params->evt_buf);
2448 
2449 	if (wmi_is_event_critical(wmi_handle, event_id))
2450 		qdf_atomic_dec(&wmi_handle->critical_events_in_flight);
2451 
2452 	qdf_mem_free(msg->bodyptr);
2453 
2454 	return QDF_STATUS_SUCCESS;
2455 }
2456 
2457 /**
2458  * wmi_process_fw_event_sched_thread_ctx() - common event handler to serialize
2459  *                                  event processing through scheduler thread
2460  * @wmi: wmi context
2461  * @ev: event buffer
2462  *
2463  * Return: 0 on success, errno on failure
2464  */
2465 static QDF_STATUS
2466 wmi_process_fw_event_sched_thread_ctx(struct wmi_unified *wmi,
2467 				      void *ev)
2468 {
2469 	struct wmi_process_fw_event_params *params_buf;
2470 	struct scheduler_msg msg = { 0 };
2471 	uint32_t event_id;
2472 
2473 	params_buf = qdf_mem_malloc(sizeof(struct wmi_process_fw_event_params));
2474 	if (!params_buf) {
2475 		wmi_err("malloc failed");
2476 		qdf_nbuf_free(ev);
2477 		return QDF_STATUS_E_NOMEM;
2478 	}
2479 
2480 	params_buf->wmi_handle = wmi;
2481 	params_buf->evt_buf = ev;
2482 
2483 	event_id = WMI_GET_FIELD(qdf_nbuf_data(params_buf->evt_buf),
2484 				 WMI_CMD_HDR, COMMANDID);
2485 	if (wmi_is_event_critical(wmi, event_id))
2486 		qdf_atomic_inc(&wmi->critical_events_in_flight);
2487 
2488 	msg.bodyptr = params_buf;
2489 	msg.bodyval = 0;
2490 	msg.callback = wmi_process_fw_event_handler;
2491 	msg.flush_callback = wmi_discard_fw_event;
2492 
2493 	if (QDF_STATUS_SUCCESS !=
2494 		scheduler_post_message(QDF_MODULE_ID_TARGET_IF,
2495 				       QDF_MODULE_ID_TARGET_IF,
2496 				       QDF_MODULE_ID_TARGET_IF, &msg)) {
2497 		qdf_nbuf_free(ev);
2498 		qdf_mem_free(params_buf);
2499 		return QDF_STATUS_E_FAULT;
2500 	}
2501 
2502 	return QDF_STATUS_SUCCESS;
2503 }
2504 
2505 /**
2506  * wmi_get_pdev_ep: Get wmi handle based on endpoint
2507  * @soc: handle to wmi soc
2508  * @ep: endpoint id
2509  *
2510  * Return: none
2511  */
2512 static struct wmi_unified *wmi_get_pdev_ep(struct wmi_soc *soc,
2513 						HTC_ENDPOINT_ID ep)
2514 {
2515 	uint32_t i;
2516 
2517 	for (i = 0; i < WMI_MAX_RADIOS; i++)
2518 		if (soc->wmi_endpoint_id[i] == ep)
2519 			break;
2520 
2521 	if (i == WMI_MAX_RADIOS)
2522 		return NULL;
2523 
2524 	return soc->wmi_pdev[i];
2525 }
2526 
2527 /**
2528  * wmi_mtrace_rx() - Wrappper function for qdf_mtrace api
2529  * @message_id: 32-Bit Wmi message ID
2530  * @vdev_id: Vdev ID
2531  * @data: Actual message contents
2532  *
2533  * This function converts the 32-bit WMI message ID in 15-bit message ID
2534  * format for qdf_mtrace as in qdf_mtrace message there are only 15
2535  * bits reserved for message ID.
2536  * out of these 15-bits, 8-bits (From LSB) specifies the WMI_GRP_ID
2537  * and remaining 7-bits specifies the actual WMI command. With this
2538  * notation there can be maximum 256 groups and each group can have
2539  * max 128 commands can be supported.
2540  *
2541  * Return: None
2542  */
2543 static void wmi_mtrace_rx(uint32_t message_id, uint16_t vdev_id, uint32_t data)
2544 {
2545 	uint16_t mtrace_message_id;
2546 
2547 	mtrace_message_id = QDF_WMI_MTRACE_CMD_ID(message_id) |
2548 		(QDF_WMI_MTRACE_GRP_ID(message_id) <<
2549 						QDF_WMI_MTRACE_CMD_NUM_BITS);
2550 	qdf_mtrace(QDF_MODULE_ID_WMI, QDF_MODULE_ID_WMA,
2551 		   mtrace_message_id, vdev_id, data);
2552 }
2553 
2554 /**
2555  * wmi_process_control_rx() - process fw events callbacks
2556  * @wmi_handle: handle to wmi_unified
2557  * @evt_buf: handle to wmi_buf_t
2558  *
2559  * Return: none
2560  */
2561 static void wmi_process_control_rx(struct wmi_unified *wmi_handle,
2562 				   wmi_buf_t evt_buf)
2563 {
2564 	struct wmi_soc *soc = wmi_handle->soc;
2565 	uint32_t id;
2566 	uint32_t idx;
2567 	enum wmi_rx_exec_ctx exec_ctx;
2568 
2569 	id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2570 	idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
2571 	if (qdf_unlikely(idx == A_ERROR)) {
2572 		wmi_debug("no handler registered for event id 0x%x", id);
2573 		qdf_nbuf_free(evt_buf);
2574 		return;
2575 	}
2576 	wmi_mtrace_rx(id, 0xFF, idx);
2577 	qdf_spin_lock_bh(&soc->ctx_lock);
2578 	exec_ctx = wmi_handle->ctx[idx].exec_ctx;
2579 	qdf_spin_unlock_bh(&soc->ctx_lock);
2580 
2581 #ifdef WMI_INTERFACE_EVENT_LOGGING
2582 	if (wmi_handle->log_info.wmi_logging_enable) {
2583 		uint8_t *data;
2584 		data = qdf_nbuf_data(evt_buf);
2585 
2586 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2587 		/* Exclude 4 bytes of TLV header */
2588 		if (wmi_handle->ops->is_diag_event(id)) {
2589 			WMI_DIAG_RX_EVENT_RECORD(wmi_handle, id,
2590 				((uint8_t *) data +
2591 				wmi_handle->soc->buf_offset_event));
2592 		} else if (wmi_handle->ops->is_management_record(id)) {
2593 			WMI_MGMT_RX_EVENT_RECORD(wmi_handle, id,
2594 				((uint8_t *) data +
2595 				wmi_handle->soc->buf_offset_event));
2596 		} else {
2597 			WMI_RX_EVENT_RECORD(wmi_handle, id, ((uint8_t *) data +
2598 				wmi_handle->soc->buf_offset_event));
2599 		}
2600 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2601 	}
2602 #endif
2603 
2604 	if (exec_ctx == WMI_RX_WORK_CTX) {
2605 		wmi_process_fw_event_worker_thread_ctx(wmi_handle, evt_buf);
2606 	} else if (exec_ctx == WMI_RX_TASKLET_CTX) {
2607 		wmi_process_fw_event(wmi_handle, evt_buf);
2608 	} else if (exec_ctx == WMI_RX_SERIALIZER_CTX) {
2609 		wmi_process_fw_event_sched_thread_ctx(wmi_handle, evt_buf);
2610 	} else if (exec_ctx == WMI_RX_DIAG_WORK_CTX) {
2611 		wmi_process_rx_diag_event_worker_thread_ctx(wmi_handle,
2612 							    evt_buf);
2613 	} else {
2614 		wmi_err("Invalid event context %d", exec_ctx);
2615 		qdf_nbuf_free(evt_buf);
2616 	}
2617 
2618 }
2619 
2620 /**
2621  * wmi_control_rx() - process fw events callbacks
2622  * @ctx: handle to wmi
2623  * @htc_packet: pointer to htc packet
2624  *
2625  * Return: none
2626  */
2627 static void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet)
2628 {
2629 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2630 	struct wmi_unified *wmi_handle;
2631 	wmi_buf_t evt_buf;
2632 
2633 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2634 
2635 	wmi_handle = wmi_get_pdev_ep(soc, htc_packet->Endpoint);
2636 	if (!wmi_handle) {
2637 		wmi_err("unable to get wmi_handle to Endpoint %d",
2638 			htc_packet->Endpoint);
2639 		qdf_nbuf_free(evt_buf);
2640 		return;
2641 	}
2642 
2643 	wmi_process_control_rx(wmi_handle, evt_buf);
2644 }
2645 
2646 #if defined(WLAN_FEATURE_WMI_DIAG_OVER_CE7) || \
2647 	defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
2648 /**
2649  * wmi_control_diag_rx() - process diag fw events callbacks
2650  * @ctx: handle to wmi
2651  * @htc_packet: pointer to htc packet
2652  *
2653  * Return: none
2654  */
2655 static void wmi_control_diag_rx(void *ctx, HTC_PACKET *htc_packet)
2656 {
2657 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2658 	struct wmi_unified *wmi_handle;
2659 	wmi_buf_t evt_buf;
2660 
2661 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2662 
2663 	wmi_handle = soc->wmi_pdev[0];
2664 
2665 	if (!wmi_handle) {
2666 		wmi_err("unable to get wmi_handle for diag event end point id:%d", htc_packet->Endpoint);
2667 		qdf_nbuf_free(evt_buf);
2668 		return;
2669 	}
2670 
2671 	wmi_process_control_rx(wmi_handle, evt_buf);
2672 }
2673 #endif
2674 
2675 #if defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
2676 /**
2677  * wmi_control_dbr_rx() - process dbr fw events callbacks
2678  * @ctx: handle to wmi
2679  * @htc_packet: pointer to htc packet
2680  *
2681  * Return: none
2682  */
2683 static void wmi_control_dbr_rx(void *ctx, HTC_PACKET *htc_packet)
2684 {
2685 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2686 	struct wmi_unified *wmi_handle;
2687 	wmi_buf_t evt_buf;
2688 
2689 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2690 	wmi_handle = soc->wmi_pdev[0];
2691 
2692 	if (!wmi_handle) {
2693 		wmi_err("unable to get wmi_handle for dbr event endpoint id:%d",
2694 			htc_packet->Endpoint);
2695 		qdf_nbuf_free(evt_buf);
2696 		return;
2697 	}
2698 
2699 	wmi_process_control_rx(wmi_handle, evt_buf);
2700 }
2701 #endif
2702 
2703 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI
2704 QDF_STATUS wmi_unified_cmd_send_over_qmi(struct wmi_unified *wmi_handle,
2705 					 wmi_buf_t buf, uint32_t buflen,
2706 					 uint32_t cmd_id)
2707 {
2708 	QDF_STATUS status;
2709 	int32_t ret;
2710 
2711 	if (!qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR))) {
2712 		wmi_err("Failed to send cmd %x, no memory", cmd_id);
2713 		return QDF_STATUS_E_NOMEM;
2714 	}
2715 
2716 	qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR));
2717 	WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
2718 	wmi_debug("Sending WMI_CMD_ID: 0x%x over qmi", cmd_id);
2719 	status = qdf_wmi_send_recv_qmi(qdf_nbuf_data(buf),
2720 				       buflen + sizeof(WMI_CMD_HDR),
2721 				       wmi_handle,
2722 				       wmi_process_qmi_fw_event);
2723 	if (QDF_IS_STATUS_ERROR(status)) {
2724 		qdf_nbuf_pull_head(buf, sizeof(WMI_CMD_HDR));
2725 		wmi_warn("WMI send on QMI failed. Retrying WMI on HTC");
2726 	} else {
2727 		ret = qdf_atomic_inc_return(&wmi_handle->num_stats_over_qmi);
2728 		wmi_debug("num stats over qmi: %d", ret);
2729 		wmi_buf_free(buf);
2730 	}
2731 
2732 	return status;
2733 }
2734 
2735 static int __wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len)
2736 {
2737 	struct wmi_unified *wmi_handle = wmi_cb_ctx;
2738 	wmi_buf_t evt_buf;
2739 	uint32_t evt_id;
2740 
2741 	if (!wmi_handle || !buf || !len) {
2742 		wmi_err_rl("%s is invalid", !wmi_handle ?
2743 				"wmi_buf" : !buf ? "buf" : "length");
2744 		return -EINVAL;
2745 	}
2746 
2747 	evt_buf = wmi_buf_alloc(wmi_handle, len);
2748 	if (!evt_buf)
2749 		return -ENOMEM;
2750 
2751 	qdf_mem_copy(qdf_nbuf_data(evt_buf), buf, len);
2752 	evt_id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2753 	wmi_debug("Received WMI_EVT_ID: 0x%x over qmi", evt_id);
2754 	wmi_process_control_rx(wmi_handle, evt_buf);
2755 
2756 	return 0;
2757 }
2758 
2759 int wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len)
2760 {
2761 	struct qdf_op_sync *op_sync;
2762 	int ret;
2763 
2764 	if (qdf_op_protect(&op_sync))
2765 		return -EINVAL;
2766 	ret = __wmi_process_qmi_fw_event(wmi_cb_ctx, buf, len);
2767 	qdf_op_unprotect(op_sync);
2768 
2769 	return ret;
2770 }
2771 #endif
2772 
2773 void wmi_process_fw_event(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
2774 {
2775 	__wmi_control_rx(wmi_handle, evt_buf);
2776 }
2777 
2778 void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
2779 {
2780 	uint32_t id;
2781 	uint8_t *data;
2782 	uint32_t len;
2783 	void *wmi_cmd_struct_ptr = NULL;
2784 #ifndef WMI_NON_TLV_SUPPORT
2785 	int tlv_ok_status = 0;
2786 #endif
2787 	uint32_t idx = 0;
2788 	struct wmi_raw_event_buffer ev_buf;
2789 	enum wmi_rx_buff_type ev_buff_type;
2790 
2791 	id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2792 
2793 	wmi_ext_dbg_msg_event_record(wmi_handle, qdf_nbuf_data(evt_buf),
2794 				     qdf_nbuf_len(evt_buf));
2795 
2796 	if (qdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL)
2797 		goto end;
2798 
2799 	data = qdf_nbuf_data(evt_buf);
2800 	len = qdf_nbuf_len(evt_buf);
2801 
2802 #ifndef WMI_NON_TLV_SUPPORT
2803 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2804 		/* Validate and pad(if necessary) the TLVs */
2805 		tlv_ok_status =
2806 			wmi_handle->ops->wmi_check_and_pad_event(wmi_handle->scn_handle,
2807 							data, len, id,
2808 							&wmi_cmd_struct_ptr);
2809 		if (tlv_ok_status != 0) {
2810 			QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR,
2811 				  "%s: Error: id=0x%x, wmitlv check status=%d",
2812 				  __func__, id, tlv_ok_status);
2813 			goto end;
2814 		}
2815 	}
2816 #endif
2817 
2818 	idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
2819 	if (idx == A_ERROR) {
2820 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR,
2821 		   "%s : event handler is not registered: event id 0x%x",
2822 			__func__, id);
2823 		goto end;
2824 	}
2825 #ifdef WMI_INTERFACE_EVENT_LOGGING
2826 	if (wmi_handle->log_info.wmi_logging_enable) {
2827 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2828 		/* Exclude 4 bytes of TLV header */
2829 		if (wmi_handle->ops->is_diag_event(id)) {
2830 			/*
2831 			 * skip diag event logging in WMI event buffer
2832 			 * as its already logged in WMI RX event buffer
2833 			 */
2834 		} else if (wmi_handle->ops->is_management_record(id)) {
2835 			/*
2836 			 * skip wmi mgmt event logging in WMI event buffer
2837 			 * as its already logged in WMI RX event buffer
2838 			 */
2839 		} else {
2840 			uint8_t *tmpbuf = (uint8_t *)data +
2841 					wmi_handle->soc->buf_offset_event;
2842 
2843 			WMI_EVENT_RECORD(wmi_handle, id, tmpbuf);
2844 			wmi_specific_evt_record(wmi_handle, id, tmpbuf);
2845 		}
2846 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2847 	}
2848 #endif
2849 	/* Call the WMI registered event handler */
2850 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2851 		ev_buff_type = wmi_handle->ctx[idx].buff_type;
2852 		if (ev_buff_type == WMI_RX_PROCESSED_BUFF) {
2853 			wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2854 				wmi_cmd_struct_ptr, len);
2855 		} else if (ev_buff_type == WMI_RX_RAW_BUFF) {
2856 			ev_buf.evt_raw_buf = data;
2857 			ev_buf.evt_processed_buf = wmi_cmd_struct_ptr;
2858 			wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2859 							(void *)&ev_buf, len);
2860 		}
2861 	}
2862 	else
2863 		wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2864 			data, len);
2865 
2866 end:
2867 	/* Free event buffer and allocated event tlv */
2868 #ifndef WMI_NON_TLV_SUPPORT
2869 	if (wmi_handle->target_type == WMI_TLV_TARGET)
2870 		wmi_handle->ops->wmi_free_allocated_event(id, &wmi_cmd_struct_ptr);
2871 #endif
2872 
2873 	qdf_nbuf_free(evt_buf);
2874 
2875 }
2876 
2877 #define WMI_WQ_WD_TIMEOUT (30 * 1000) /* 30s */
2878 
2879 static inline void wmi_workqueue_watchdog_warn(uint32_t msg_type_id)
2880 {
2881 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2882 		  "%s: WLAN_BUG_RCA: Message type %x has exceeded its allotted time of %ds",
2883 		  __func__, msg_type_id, WMI_WQ_WD_TIMEOUT / 1000);
2884 }
2885 
2886 #ifdef CONFIG_SLUB_DEBUG_ON
2887 static void wmi_workqueue_watchdog_bite(void *arg)
2888 {
2889 	struct wmi_wq_dbg_info *info = arg;
2890 
2891 	wmi_workqueue_watchdog_warn(info->wd_msg_type_id);
2892 	qdf_print_thread_trace(info->task);
2893 
2894 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2895 		  "%s: Going down for WMI WQ Watchdog Bite!", __func__);
2896 	QDF_BUG(0);
2897 }
2898 #else
2899 static inline void wmi_workqueue_watchdog_bite(void *arg)
2900 {
2901 	struct wmi_wq_dbg_info *info = arg;
2902 
2903 	wmi_workqueue_watchdog_warn(info->wd_msg_type_id);
2904 
2905 	qdf_print_thread_trace(info->task);
2906 }
2907 #endif
2908 
2909 /**
2910  * wmi_rx_event_work() - process rx event in rx work queue context
2911  * @arg: opaque pointer to wmi handle
2912  *
2913  * This function process any fw event to serialize it through rx worker thread.
2914  *
2915  * Return: none
2916  */
2917 static void wmi_rx_event_work(void *arg)
2918 {
2919 	wmi_buf_t buf;
2920 	struct wmi_unified *wmi = arg;
2921 	qdf_timer_t wd_timer;
2922 	struct wmi_wq_dbg_info info;
2923 
2924 	/* initialize WMI workqueue watchdog timer */
2925 	qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite,
2926 			&info, QDF_TIMER_TYPE_SW);
2927 	qdf_spin_lock_bh(&wmi->eventq_lock);
2928 	buf = qdf_nbuf_queue_remove(&wmi->event_queue);
2929 	qdf_spin_unlock_bh(&wmi->eventq_lock);
2930 	while (buf) {
2931 		qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT);
2932 		info.wd_msg_type_id =
2933 		   WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID);
2934 		info.wmi_wq = wmi->wmi_rx_work_queue;
2935 		info.task = qdf_get_current_task();
2936 		__wmi_control_rx(wmi, buf);
2937 		qdf_timer_stop(&wd_timer);
2938 		qdf_spin_lock_bh(&wmi->eventq_lock);
2939 		buf = qdf_nbuf_queue_remove(&wmi->event_queue);
2940 		qdf_spin_unlock_bh(&wmi->eventq_lock);
2941 	}
2942 	qdf_timer_free(&wd_timer);
2943 }
2944 
2945 /**
2946  * wmi_rx_diag_event_work() - process rx diag event in work queue context
2947  * @arg: opaque pointer to wmi handle
2948  *
2949  * This function process fw diag event to serialize it through rx worker thread.
2950  *
2951  * Return: none
2952  */
2953 static void wmi_rx_diag_event_work(void *arg)
2954 {
2955 	wmi_buf_t buf;
2956 	struct wmi_unified *wmi = arg;
2957 	qdf_timer_t wd_timer;
2958 	struct wmi_wq_dbg_info info;
2959 	uint32_t diag_event_process_count = 0;
2960 
2961 	if (!wmi) {
2962 		wmi_err("Invalid WMI handle");
2963 		return;
2964 	}
2965 
2966 	/* initialize WMI workqueue watchdog timer */
2967 	qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite,
2968 		       &info, QDF_TIMER_TYPE_SW);
2969 	qdf_spin_lock_bh(&wmi->diag_eventq_lock);
2970 	buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue);
2971 	qdf_spin_unlock_bh(&wmi->diag_eventq_lock);
2972 	while (buf) {
2973 		qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT);
2974 		info.wd_msg_type_id =
2975 		   WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID);
2976 		info.wmi_wq = NULL;
2977 		info.task = qdf_get_current_task();
2978 		__wmi_control_rx(wmi, buf);
2979 		qdf_timer_stop(&wd_timer);
2980 
2981 		if (diag_event_process_count++ >
2982 		    RX_DIAG_EVENT_WORK_PROCESS_MAX_COUNT) {
2983 			qdf_queue_work(0, wmi->wmi_rx_diag_work_queue,
2984 				       &wmi->rx_diag_event_work);
2985 			break;
2986 		}
2987 
2988 		qdf_spin_lock_bh(&wmi->diag_eventq_lock);
2989 		buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue);
2990 		qdf_spin_unlock_bh(&wmi->diag_eventq_lock);
2991 	}
2992 	qdf_timer_free(&wd_timer);
2993 }
2994 
2995 #ifdef FEATURE_RUNTIME_PM
2996 /**
2997  * wmi_runtime_pm_init() - initialize runtime pm wmi variables
2998  * @wmi_handle: wmi context
2999  */
3000 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
3001 {
3002 	qdf_atomic_init(&wmi_handle->runtime_pm_inprogress);
3003 }
3004 
3005 void wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, A_BOOL val)
3006 {
3007 	qdf_atomic_set(&wmi_handle->runtime_pm_inprogress, val);
3008 }
3009 
3010 bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle)
3011 {
3012 	return qdf_atomic_read(&wmi_handle->runtime_pm_inprogress);
3013 }
3014 #else
3015 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
3016 {
3017 }
3018 #endif
3019 
3020 void *wmi_unified_get_soc_handle(struct wmi_unified *wmi_handle)
3021 {
3022 	return wmi_handle->soc;
3023 }
3024 
3025 /**
3026  * wmi_interface_logging_init: Interface looging init
3027  * @wmi_handle: Pointer to wmi handle object
3028  * @pdev_idx: pdev index
3029  *
3030  * Return: None
3031  */
3032 #ifdef WMI_INTERFACE_EVENT_LOGGING
3033 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle,
3034 					      uint32_t pdev_idx)
3035 {
3036 	if (QDF_STATUS_SUCCESS == wmi_log_init(wmi_handle)) {
3037 		qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
3038 		wmi_debugfs_init(wmi_handle, pdev_idx);
3039 	}
3040 }
3041 #else
3042 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle,
3043 					      uint32_t pdev_idx)
3044 {
3045 }
3046 #endif
3047 
3048 static QDF_STATUS wmi_initialize_worker_context(struct wmi_unified *wmi_handle)
3049 {
3050 	wmi_handle->wmi_rx_work_queue =
3051 		qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue");
3052 	if (!wmi_handle->wmi_rx_work_queue) {
3053 		wmi_err("failed to create wmi_rx_event_work_queue");
3054 		return QDF_STATUS_E_RESOURCES;
3055 	}
3056 
3057 	qdf_spinlock_create(&wmi_handle->eventq_lock);
3058 	qdf_nbuf_queue_init(&wmi_handle->event_queue);
3059 	qdf_create_work(0, &wmi_handle->rx_event_work,
3060 			wmi_rx_event_work, wmi_handle);
3061 
3062 	wmi_handle->wmi_rx_diag_work_queue =
3063 		qdf_alloc_unbound_workqueue("wmi_rx_diag_event_work_queue");
3064 	if (!wmi_handle->wmi_rx_diag_work_queue) {
3065 		wmi_err("failed to create wmi_rx_diag_event_work_queue");
3066 		return QDF_STATUS_E_RESOURCES;
3067 	}
3068 	qdf_spinlock_create(&wmi_handle->diag_eventq_lock);
3069 	qdf_nbuf_queue_init(&wmi_handle->diag_event_queue);
3070 	qdf_create_work(0, &wmi_handle->rx_diag_event_work,
3071 			wmi_rx_diag_event_work, wmi_handle);
3072 	wmi_handle->wmi_rx_diag_events_dropped = 0;
3073 
3074 	return QDF_STATUS_SUCCESS;
3075 }
3076 
3077 void *wmi_unified_get_pdev_handle(struct wmi_soc *soc, uint32_t pdev_idx)
3078 {
3079 	struct wmi_unified *wmi_handle;
3080 	QDF_STATUS status;
3081 
3082 	if (pdev_idx >= WMI_MAX_RADIOS)
3083 		return NULL;
3084 
3085 	if (!soc->wmi_pdev[pdev_idx]) {
3086 		wmi_handle =
3087 			(struct wmi_unified *) qdf_mem_malloc(
3088 					sizeof(struct wmi_unified));
3089 		if (!wmi_handle)
3090 			return NULL;
3091 
3092 		status = wmi_initialize_worker_context(wmi_handle);
3093 		if (QDF_IS_STATUS_ERROR(status))
3094 			goto error;
3095 
3096 		wmi_handle->scn_handle = soc->scn_handle;
3097 		wmi_handle->event_id = soc->event_id;
3098 		wmi_handle->event_handler = soc->event_handler;
3099 		wmi_handle->ctx = soc->ctx;
3100 		wmi_handle->ops = soc->ops;
3101 		wmi_handle->wmi_events = soc->wmi_events;
3102 		wmi_handle->services = soc->services;
3103 		wmi_handle->soc = soc;
3104 		wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map;
3105 		wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map;
3106 		wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map;
3107 		wmi_handle->evt_phy_id_map = soc->evt_phy_id_map;
3108 		wmi_interface_logging_init(wmi_handle, pdev_idx);
3109 		qdf_atomic_init(&wmi_handle->pending_cmds);
3110 		qdf_atomic_init(&wmi_handle->is_target_suspended);
3111 		wmi_handle->target_type = soc->target_type;
3112 		wmi_handle->wmi_max_cmds = soc->wmi_max_cmds;
3113 
3114 		wmi_interface_sequence_init(wmi_handle);
3115 		if (wmi_ext_dbgfs_init(wmi_handle, pdev_idx) !=
3116 		    QDF_STATUS_SUCCESS)
3117 			wmi_err("Failed to initialize wmi extended debugfs");
3118 
3119 		soc->wmi_pdev[pdev_idx] = wmi_handle;
3120 	} else
3121 		wmi_handle = soc->wmi_pdev[pdev_idx];
3122 
3123 	wmi_handle->wmi_stopinprogress = 0;
3124 	wmi_handle->wmi_endpoint_id = soc->wmi_endpoint_id[pdev_idx];
3125 	wmi_handle->htc_handle = soc->htc_handle;
3126 	wmi_handle->max_msg_len = soc->max_msg_len[pdev_idx];
3127 	wmi_handle->tag_crash_inject = false;
3128 	wmi_interface_sequence_reset(wmi_handle);
3129 
3130 	return wmi_handle;
3131 
3132 error:
3133 	qdf_mem_free(wmi_handle);
3134 
3135 	return NULL;
3136 }
3137 qdf_export_symbol(wmi_unified_get_pdev_handle);
3138 
3139 static void (*wmi_attach_register[WMI_MAX_TARGET_TYPE])(wmi_unified_t);
3140 
3141 void wmi_unified_register_module(enum wmi_target_type target_type,
3142 			void (*wmi_attach)(wmi_unified_t wmi_handle))
3143 {
3144 	if (target_type < WMI_MAX_TARGET_TYPE)
3145 		wmi_attach_register[target_type] = wmi_attach;
3146 
3147 	return;
3148 }
3149 qdf_export_symbol(wmi_unified_register_module);
3150 
3151 /**
3152  * wmi_wbuff_register() - register wmi with wbuff
3153  * @wmi_handle: handle to wmi
3154  *
3155  * Return: void
3156  */
3157 static void wmi_wbuff_register(struct wmi_unified *wmi_handle)
3158 {
3159 	struct wbuff_alloc_request wbuff_alloc[4];
3160 
3161 	wbuff_alloc[0].slot = WBUFF_POOL_0;
3162 	wbuff_alloc[0].size = WMI_WBUFF_POOL_0_SIZE;
3163 	wbuff_alloc[1].slot = WBUFF_POOL_1;
3164 	wbuff_alloc[1].size = WMI_WBUFF_POOL_1_SIZE;
3165 	wbuff_alloc[2].slot = WBUFF_POOL_2;
3166 	wbuff_alloc[2].size = WMI_WBUFF_POOL_2_SIZE;
3167 	wbuff_alloc[3].slot = WBUFF_POOL_3;
3168 	wbuff_alloc[3].size = WMI_WBUFF_POOL_3_SIZE;
3169 
3170 	wmi_handle->wbuff_handle = wbuff_module_register(wbuff_alloc, 4,
3171 							 WMI_MIN_HEAD_ROOM, 4);
3172 }
3173 
3174 /**
3175  * wmi_wbuff_deregister() - deregister wmi with wbuff
3176  * @wmi_handle: handle to wmi
3177  *
3178  * Return: void
3179  */
3180 static inline void wmi_wbuff_deregister(struct wmi_unified *wmi_handle)
3181 {
3182 	wbuff_module_deregister(wmi_handle->wbuff_handle);
3183 	wmi_handle->wbuff_handle = NULL;
3184 }
3185 
3186 void *wmi_unified_attach(void *scn_handle,
3187 			 struct wmi_unified_attach_params *param)
3188 {
3189 	struct wmi_unified *wmi_handle;
3190 	struct wmi_soc *soc;
3191 	QDF_STATUS status;
3192 
3193 	soc = (struct wmi_soc *) qdf_mem_malloc(sizeof(struct wmi_soc));
3194 	if (!soc)
3195 		return NULL;
3196 
3197 	wmi_handle =
3198 		(struct wmi_unified *) qdf_mem_malloc(
3199 			sizeof(struct wmi_unified));
3200 	if (!wmi_handle) {
3201 		qdf_mem_free(soc);
3202 		return NULL;
3203 	}
3204 
3205 	status = wmi_initialize_worker_context(wmi_handle);
3206 	if (QDF_IS_STATUS_ERROR(status))
3207 		goto error;
3208 
3209 	wmi_handle->soc = soc;
3210 	wmi_handle->soc->soc_idx = param->soc_id;
3211 	wmi_handle->soc->is_async_ep = param->is_async_ep;
3212 	wmi_handle->event_id = soc->event_id;
3213 	wmi_handle->event_handler = soc->event_handler;
3214 	wmi_handle->ctx = soc->ctx;
3215 	wmi_handle->wmi_events = soc->wmi_events;
3216 	wmi_handle->services = soc->services;
3217 	wmi_handle->scn_handle = scn_handle;
3218 	wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map;
3219 	wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map;
3220 	wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map;
3221 	wmi_handle->evt_phy_id_map = soc->evt_phy_id_map;
3222 	soc->scn_handle = scn_handle;
3223 	wmi_handle->target_type = param->target_type;
3224 	soc->target_type = param->target_type;
3225 
3226 	if (param->target_type >= WMI_MAX_TARGET_TYPE)
3227 		goto error;
3228 
3229 	if (wmi_attach_register[param->target_type]) {
3230 		wmi_attach_register[param->target_type](wmi_handle);
3231 	} else {
3232 		wmi_err("wmi attach is not registered");
3233 		goto error;
3234 	}
3235 
3236 	qdf_atomic_init(&wmi_handle->pending_cmds);
3237 	qdf_atomic_init(&wmi_handle->is_target_suspended);
3238 	qdf_atomic_init(&wmi_handle->is_target_suspend_acked);
3239 	qdf_atomic_init(&wmi_handle->num_stats_over_qmi);
3240 	wmi_runtime_pm_init(wmi_handle);
3241 	wmi_interface_logging_init(wmi_handle, WMI_HOST_PDEV_ID_0);
3242 
3243 	wmi_interface_sequence_init(wmi_handle);
3244 	/* Assign target cookie capability */
3245 	wmi_handle->use_cookie = param->use_cookie;
3246 	wmi_handle->osdev = param->osdev;
3247 	wmi_handle->wmi_stopinprogress = 0;
3248 	wmi_handle->wmi_max_cmds = param->max_commands;
3249 	soc->wmi_max_cmds = param->max_commands;
3250 	/* Increase the ref count once refcount infra is present */
3251 	soc->wmi_psoc = param->psoc;
3252 	qdf_spinlock_create(&soc->ctx_lock);
3253 	soc->ops = wmi_handle->ops;
3254 	soc->wmi_pdev[0] = wmi_handle;
3255 	if (wmi_ext_dbgfs_init(wmi_handle, 0) != QDF_STATUS_SUCCESS)
3256 		wmi_err("Failed to initialize wmi extended debugfs");
3257 
3258 	wmi_wbuff_register(wmi_handle);
3259 
3260 	wmi_hang_event_notifier_register(wmi_handle);
3261 
3262 	wmi_minidump_attach(wmi_handle);
3263 
3264 	return wmi_handle;
3265 
3266 error:
3267 	qdf_mem_free(soc);
3268 	qdf_mem_free(wmi_handle);
3269 
3270 	return NULL;
3271 }
3272 
3273 void wmi_unified_detach(struct wmi_unified *wmi_handle)
3274 {
3275 	wmi_buf_t buf;
3276 	struct wmi_soc *soc;
3277 	uint8_t i;
3278 
3279 	wmi_minidump_detach(wmi_handle);
3280 
3281 	wmi_hang_event_notifier_unregister();
3282 
3283 	wmi_wbuff_deregister(wmi_handle);
3284 
3285 	soc = wmi_handle->soc;
3286 	for (i = 0; i < WMI_MAX_RADIOS; i++) {
3287 		if (soc->wmi_pdev[i]) {
3288 			qdf_flush_workqueue(0,
3289 				soc->wmi_pdev[i]->wmi_rx_work_queue);
3290 			qdf_destroy_workqueue(0,
3291 				soc->wmi_pdev[i]->wmi_rx_work_queue);
3292 			wmi_debugfs_remove(soc->wmi_pdev[i]);
3293 			buf = qdf_nbuf_queue_remove(
3294 					&soc->wmi_pdev[i]->event_queue);
3295 			while (buf) {
3296 				qdf_nbuf_free(buf);
3297 				buf = qdf_nbuf_queue_remove(
3298 						&soc->wmi_pdev[i]->event_queue);
3299 			}
3300 
3301 			qdf_flush_work(&soc->wmi_pdev[i]->rx_diag_event_work);
3302 			buf = qdf_nbuf_queue_remove(
3303 					&soc->wmi_pdev[i]->diag_event_queue);
3304 			while (buf) {
3305 				qdf_nbuf_free(buf);
3306 				buf = qdf_nbuf_queue_remove(
3307 					&soc->wmi_pdev[i]->diag_event_queue);
3308 			}
3309 
3310 			wmi_log_buffer_free(soc->wmi_pdev[i]);
3311 
3312 			/* Free events logs list */
3313 			if (soc->wmi_pdev[i]->events_logs_list)
3314 				qdf_mem_free(
3315 					soc->wmi_pdev[i]->events_logs_list);
3316 
3317 			qdf_spinlock_destroy(&soc->wmi_pdev[i]->eventq_lock);
3318 			qdf_spinlock_destroy(
3319 					&soc->wmi_pdev[i]->diag_eventq_lock);
3320 
3321 			wmi_interface_sequence_deinit(soc->wmi_pdev[i]);
3322 			wmi_ext_dbgfs_deinit(soc->wmi_pdev[i]);
3323 
3324 			qdf_mem_free(soc->wmi_pdev[i]);
3325 		}
3326 	}
3327 	qdf_spinlock_destroy(&soc->ctx_lock);
3328 
3329 	if (soc->wmi_service_bitmap) {
3330 		qdf_mem_free(soc->wmi_service_bitmap);
3331 		soc->wmi_service_bitmap = NULL;
3332 	}
3333 
3334 	if (soc->wmi_ext_service_bitmap) {
3335 		qdf_mem_free(soc->wmi_ext_service_bitmap);
3336 		soc->wmi_ext_service_bitmap = NULL;
3337 	}
3338 
3339 	if (soc->wmi_ext2_service_bitmap) {
3340 		qdf_mem_free(soc->wmi_ext2_service_bitmap);
3341 		soc->wmi_ext2_service_bitmap = NULL;
3342 	}
3343 
3344 	/* Decrease the ref count once refcount infra is present */
3345 	soc->wmi_psoc = NULL;
3346 	qdf_mem_free(soc);
3347 }
3348 
3349 void
3350 wmi_unified_remove_work(struct wmi_unified *wmi_handle)
3351 {
3352 	wmi_buf_t buf;
3353 
3354 	qdf_flush_workqueue(0, wmi_handle->wmi_rx_work_queue);
3355 	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
3356 	buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
3357 	while (buf) {
3358 		qdf_nbuf_free(buf);
3359 		buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
3360 	}
3361 	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
3362 
3363 	/* Remove diag events work */
3364 	qdf_flush_workqueue(0, wmi_handle->wmi_rx_diag_work_queue);
3365 	qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock);
3366 	buf = qdf_nbuf_queue_remove(&wmi_handle->diag_event_queue);
3367 	while (buf) {
3368 		qdf_nbuf_free(buf);
3369 		buf = qdf_nbuf_queue_remove(&wmi_handle->diag_event_queue);
3370 	}
3371 	qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
3372 }
3373 
3374 /**
3375  * wmi_htc_tx_complete() - Process htc tx completion
3376  *
3377  * @ctx: handle to wmi
3378  * @htc_pkt: pointer to htc packet
3379  *
3380  * Return: none.
3381  */
3382 static void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt)
3383 {
3384 	struct wmi_soc *soc = (struct wmi_soc *) ctx;
3385 	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
3386 	u_int8_t *buf_ptr;
3387 	u_int32_t len;
3388 	struct wmi_unified *wmi_handle;
3389 #ifdef WMI_INTERFACE_EVENT_LOGGING
3390 	struct wmi_debug_log_info *log_info;
3391 	uint32_t cmd_id;
3392 	uint8_t *offset_ptr;
3393 	qdf_dma_addr_t dma_addr;
3394 	uint64_t phy_addr;
3395 #endif
3396 
3397 	ASSERT(wmi_cmd_buf);
3398 	wmi_handle = wmi_get_pdev_ep(soc, htc_pkt->Endpoint);
3399 	if (!wmi_handle) {
3400 		wmi_err("Unable to get wmi handle");
3401 		QDF_ASSERT(0);
3402 		return;
3403 	}
3404 	buf_ptr = (u_int8_t *)wmi_buf_data(wmi_cmd_buf);
3405 #ifdef WMI_INTERFACE_EVENT_LOGGING
3406 	log_info = &wmi_handle->log_info;
3407 
3408 	if (wmi_handle && log_info->wmi_logging_enable) {
3409 		cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf),
3410 				WMI_CMD_HDR, COMMANDID);
3411 
3412 		dma_addr = QDF_NBUF_CB_PADDR(wmi_cmd_buf);
3413 		phy_addr = qdf_mem_virt_to_phys(qdf_nbuf_data(wmi_cmd_buf));
3414 
3415 		qdf_spin_lock_bh(&log_info->wmi_record_lock);
3416 		/* Record 16 bytes of WMI cmd tx complete data
3417 		 * - exclude TLV and WMI headers
3418 		 */
3419 		offset_ptr = buf_ptr + wmi_handle->soc->buf_offset_command;
3420 		if (wmi_handle->ops->is_management_record(cmd_id)) {
3421 			WMI_MGMT_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id,
3422 						       offset_ptr);
3423 		} else {
3424 			WMI_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id,
3425 						  offset_ptr, dma_addr,
3426 						  phy_addr);
3427 		}
3428 
3429 		qdf_spin_unlock_bh(&log_info->wmi_record_lock);
3430 	}
3431 #endif
3432 
3433 	wmi_interface_sequence_check(wmi_handle, wmi_cmd_buf);
3434 
3435 	len = qdf_nbuf_len(wmi_cmd_buf);
3436 	qdf_mem_zero(buf_ptr, len);
3437 	wmi_buf_free(wmi_cmd_buf);
3438 	qdf_mem_free(htc_pkt);
3439 	qdf_atomic_dec(&wmi_handle->pending_cmds);
3440 }
3441 
3442 #ifdef FEATURE_RUNTIME_PM
3443 /**
3444  * wmi_htc_log_pkt() - Print information of WMI command from HTC packet
3445  *
3446  * @ctx: handle of WMI context
3447  * @htc_pkt: handle of HTC packet
3448  *
3449  * Return: none
3450  */
3451 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt)
3452 {
3453 	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
3454 	uint32_t cmd_id;
3455 
3456 	ASSERT(wmi_cmd_buf);
3457 	cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), WMI_CMD_HDR,
3458 			       COMMANDID);
3459 
3460 	wmi_debug("WMI command from HTC packet: %s, ID: %d",
3461 		 wmi_id_to_name(cmd_id), cmd_id);
3462 }
3463 #else
3464 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt)
3465 {
3466 }
3467 #endif
3468 
3469 /**
3470  * wmi_connect_pdev_htc_service() -  WMI API to get connect to HTC service
3471  * @soc: handle to WMI SoC
3472  * @pdev_idx: Pdev index
3473  *
3474  * Return: QDF_STATUS
3475  */
3476 static QDF_STATUS wmi_connect_pdev_htc_service(struct wmi_soc *soc,
3477 					       uint32_t pdev_idx)
3478 {
3479 	QDF_STATUS status;
3480 	struct htc_service_connect_resp response;
3481 	struct htc_service_connect_req connect;
3482 
3483 	OS_MEMZERO(&connect, sizeof(connect));
3484 	OS_MEMZERO(&response, sizeof(response));
3485 
3486 	/* meta data is unused for now */
3487 	connect.pMetaData = NULL;
3488 	connect.MetaDataLength = 0;
3489 	/* these fields are the same for all service endpoints */
3490 	connect.EpCallbacks.pContext = soc;
3491 	connect.EpCallbacks.EpTxCompleteMultiple =
3492 		NULL /* Control path completion ar6000_tx_complete */;
3493 	connect.EpCallbacks.EpRecv = wmi_control_rx /* Control path rx */;
3494 	connect.EpCallbacks.EpRecvRefill = NULL /* ar6000_rx_refill */;
3495 	connect.EpCallbacks.EpSendFull = NULL /* ar6000_tx_queue_full */;
3496 	connect.EpCallbacks.EpTxComplete =
3497 		wmi_htc_tx_complete /* ar6000_tx_queue_full */;
3498 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3499 
3500 	/* connect to control service */
3501 	connect.service_id = soc->svc_ids[pdev_idx];
3502 	status = htc_connect_service(soc->htc_handle, &connect, &response);
3503 
3504 	if (QDF_IS_STATUS_ERROR(status)) {
3505 		wmi_err("Failed to connect to WMI CONTROL service status:%d",
3506 			 status);
3507 		return status;
3508 	}
3509 
3510 	if (soc->is_async_ep)
3511 		htc_set_async_ep(soc->htc_handle, response.Endpoint, true);
3512 
3513 	soc->wmi_endpoint_id[pdev_idx] = response.Endpoint;
3514 	soc->max_msg_len[pdev_idx] = response.MaxMsgLength;
3515 
3516 	return QDF_STATUS_SUCCESS;
3517 }
3518 
3519 QDF_STATUS
3520 wmi_unified_connect_htc_service(struct wmi_unified *wmi_handle,
3521 				HTC_HANDLE htc_handle)
3522 {
3523 	uint32_t i;
3524 	uint8_t wmi_ep_count;
3525 
3526 	wmi_handle->soc->htc_handle = htc_handle;
3527 
3528 	wmi_ep_count = htc_get_wmi_endpoint_count(htc_handle);
3529 	if (wmi_ep_count > WMI_MAX_RADIOS)
3530 		return QDF_STATUS_E_FAULT;
3531 
3532 	for (i = 0; i < wmi_ep_count; i++)
3533 		wmi_connect_pdev_htc_service(wmi_handle->soc, i);
3534 
3535 	wmi_handle->htc_handle = htc_handle;
3536 	wmi_handle->wmi_endpoint_id = wmi_handle->soc->wmi_endpoint_id[0];
3537 	wmi_handle->max_msg_len = wmi_handle->soc->max_msg_len[0];
3538 
3539 	return QDF_STATUS_SUCCESS;
3540 }
3541 
3542 #if defined(WLAN_FEATURE_WMI_DIAG_OVER_CE7) || \
3543 	defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
3544 QDF_STATUS wmi_diag_connect_pdev_htc_service(struct wmi_unified *wmi_handle,
3545 					     HTC_HANDLE htc_handle)
3546 {
3547 	QDF_STATUS status;
3548 	struct htc_service_connect_resp response = {0};
3549 	struct htc_service_connect_req connect = {0};
3550 
3551 	/* meta data is unused for now */
3552 	connect.pMetaData = NULL;
3553 	connect.MetaDataLength = 0;
3554 	connect.EpCallbacks.pContext = wmi_handle->soc;
3555 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3556 	connect.EpCallbacks.EpRecv = wmi_control_diag_rx /* wmi diag rx */;
3557 	connect.EpCallbacks.EpRecvRefill = NULL;
3558 	connect.EpCallbacks.EpSendFull = NULL;
3559 	connect.EpCallbacks.EpTxComplete = NULL;
3560 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3561 
3562 	/* connect to wmi diag service */
3563 	connect.service_id = WMI_CONTROL_DIAG_SVC;
3564 	status = htc_connect_service(htc_handle, &connect, &response);
3565 
3566 	if (QDF_IS_STATUS_ERROR(status)) {
3567 		wmi_err("Failed to connect to WMI DIAG service status:%d",
3568 			status);
3569 		return status;
3570 	}
3571 
3572 	if (wmi_handle->soc->is_async_ep)
3573 		htc_set_async_ep(htc_handle, response.Endpoint, true);
3574 
3575 	wmi_handle->soc->wmi_diag_endpoint_id = response.Endpoint;
3576 
3577 	return QDF_STATUS_SUCCESS;
3578 }
3579 #endif
3580 
3581 #if defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
3582 QDF_STATUS wmi_dbr_connect_pdev_htc_service(struct wmi_unified *wmi_handle,
3583 					    HTC_HANDLE htc_handle)
3584 {
3585 	QDF_STATUS status;
3586 	struct htc_service_connect_resp response = {0};
3587 	struct htc_service_connect_req connect = {0};
3588 
3589 	/* meta data is unused for now */
3590 	connect.pMetaData = NULL;
3591 	connect.MetaDataLength = 0;
3592 	connect.EpCallbacks.pContext = wmi_handle->soc;
3593 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3594 	connect.EpCallbacks.EpRecv = wmi_control_dbr_rx /* wmi dbr rx */;
3595 	connect.EpCallbacks.EpRecvRefill = NULL;
3596 	connect.EpCallbacks.EpSendFull = NULL;
3597 	connect.EpCallbacks.EpTxComplete = NULL;
3598 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3599 
3600 	/* connect to wmi dbr service */
3601 	connect.service_id = WMI_CONTROL_DBR_SVC;
3602 	status = htc_connect_service(htc_handle, &connect, &response);
3603 
3604 	if (QDF_IS_STATUS_ERROR(status)) {
3605 		wmi_err("Failed to connect to WMI DBR service status:%d",
3606 			status);
3607 		return status;
3608 	}
3609 
3610 	if (wmi_handle->soc->is_async_ep)
3611 		htc_set_async_ep(htc_handle, response.Endpoint, true);
3612 
3613 	wmi_handle->soc->wmi_dbr_endpoint_id = response.Endpoint;
3614 
3615 	return QDF_STATUS_SUCCESS;
3616 }
3617 #endif
3618 
3619 int wmi_get_host_credits(wmi_unified_t wmi_handle)
3620 {
3621 	int host_credits = 0;
3622 
3623 	htc_get_control_endpoint_tx_host_credits(wmi_handle->htc_handle,
3624 						 &host_credits);
3625 	return host_credits;
3626 }
3627 
3628 int wmi_get_pending_cmds(wmi_unified_t wmi_handle)
3629 {
3630 	return qdf_atomic_read(&wmi_handle->pending_cmds);
3631 }
3632 
3633 void wmi_set_target_suspend(wmi_unified_t wmi_handle, A_BOOL val)
3634 {
3635 	qdf_atomic_set(&wmi_handle->is_target_suspended, val);
3636 }
3637 
3638 void wmi_set_target_suspend_acked(wmi_unified_t wmi_handle, A_BOOL val)
3639 {
3640 	qdf_atomic_set(&wmi_handle->is_target_suspend_acked, val);
3641 	qdf_atomic_set(&wmi_handle->num_stats_over_qmi, 0);
3642 }
3643 
3644 bool wmi_is_target_suspended(struct wmi_unified *wmi_handle)
3645 {
3646 	return qdf_atomic_read(&wmi_handle->is_target_suspended);
3647 }
3648 qdf_export_symbol(wmi_is_target_suspended);
3649 
3650 bool wmi_is_target_suspend_acked(struct wmi_unified *wmi_handle)
3651 {
3652 	return qdf_atomic_read(&wmi_handle->is_target_suspend_acked);
3653 }
3654 qdf_export_symbol(wmi_is_target_suspend_acked);
3655 
3656 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI
3657 void wmi_set_qmi_stats(wmi_unified_t wmi_handle, bool val)
3658 {
3659 	wmi_handle->is_qmi_stats_enabled = val;
3660 }
3661 
3662 bool wmi_is_qmi_stats_enabled(struct wmi_unified *wmi_handle)
3663 {
3664 	return wmi_handle->is_qmi_stats_enabled;
3665 }
3666 #endif
3667 
3668 void wmi_tag_crash_inject(wmi_unified_t wmi_handle, A_BOOL flag)
3669 {
3670 	wmi_handle->tag_crash_inject = flag;
3671 }
3672 
3673 void wmi_set_is_wow_bus_suspended(wmi_unified_t wmi_handle, A_BOOL val)
3674 {
3675 	qdf_atomic_set(&wmi_handle->is_wow_bus_suspended, val);
3676 }
3677 
3678 void wmi_set_tgt_assert(wmi_unified_t wmi_handle, bool val)
3679 {
3680 	wmi_handle->tgt_force_assert_enable = val;
3681 }
3682 
3683 int
3684 wmi_stop(wmi_unified_t wmi_handle)
3685 {
3686 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
3687 		  "WMI Stop");
3688 	wmi_handle->wmi_stopinprogress = 1;
3689 	return 0;
3690 }
3691 
3692 int
3693 wmi_start(wmi_unified_t wmi_handle)
3694 {
3695 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
3696 		  "WMI Start");
3697 	wmi_handle->wmi_stopinprogress = 0;
3698 	return 0;
3699 }
3700 
3701 bool
3702 wmi_is_blocked(wmi_unified_t wmi_handle)
3703 {
3704 	return (!(!wmi_handle->wmi_stopinprogress));
3705 }
3706 
3707 void
3708 wmi_flush_endpoint(wmi_unified_t wmi_handle)
3709 {
3710 	htc_flush_endpoint(wmi_handle->htc_handle,
3711 		wmi_handle->wmi_endpoint_id, 0);
3712 }
3713 qdf_export_symbol(wmi_flush_endpoint);
3714 
3715 void wmi_pdev_id_conversion_enable(wmi_unified_t wmi_handle,
3716 				   uint32_t *pdev_id_map,
3717 				   uint8_t size)
3718 {
3719 	if (wmi_handle->target_type == WMI_TLV_TARGET)
3720 		wmi_handle->ops->wmi_pdev_id_conversion_enable(wmi_handle,
3721 							       pdev_id_map,
3722 							       size);
3723 }
3724 
3725 int __wmi_validate_handle(wmi_unified_t wmi_handle, const char *func)
3726 {
3727         if (!wmi_handle) {
3728                 wmi_err("Invalid WMI handle (via %s)", func);
3729                 return -EINVAL;
3730         }
3731 
3732         return 0;
3733 }
3734