xref: /wlan-dirver/qca-wifi-host-cmn/wmi/src/wmi_unified.c (revision b64de0cf4a6e7eb96a50234ed34293d231cfcf7f)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * Host WMI unified implementation
22  */
23 #include "htc_api.h"
24 #include "htc_api.h"
25 #include "wmi_unified_priv.h"
26 #include "wmi_unified_api.h"
27 #include "qdf_module.h"
28 #include "qdf_platform.h"
29 #include "qdf_ssr_driver_dump.h"
30 #ifdef WMI_EXT_DBG
31 #include "qdf_list.h"
32 #include "qdf_atomic.h"
33 #endif
34 
35 #ifndef WMI_NON_TLV_SUPPORT
36 #include "wmi_tlv_helper.h"
37 #endif
38 
39 #include <linux/debugfs.h>
40 #include <target_if.h>
41 #include <qdf_debugfs.h>
42 #include "wmi_filtered_logging.h"
43 #include <wmi_hang_event.h>
44 
45 #ifdef DP_UMAC_HW_RESET_SUPPORT
46 #include <cdp_txrx_ctrl.h>
47 #endif
48 
49 /* This check for CONFIG_WIN temporary added due to redeclaration compilation
50 error in MCL. Error is caused due to inclusion of wmi.h in wmi_unified_api.h
51 which gets included here through ol_if_athvar.h. Eventually it is expected that
52 wmi.h will be removed from wmi_unified_api.h after cleanup, which will need
53 WMI_CMD_HDR to be defined here. */
54 /* Copied from wmi.h */
55 #undef MS
56 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
57 #undef SM
58 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
59 #undef WO
60 #define WO(_f)      ((_f##_OFFSET) >> 2)
61 
62 #undef GET_FIELD
63 #define GET_FIELD(_addr, _f) MS(*((uint32_t *)(_addr) + WO(_f)), _f)
64 #undef SET_FIELD
65 #define SET_FIELD(_addr, _f, _val)  \
66 	    (*((uint32_t *)(_addr) + WO(_f)) = \
67 		(*((uint32_t *)(_addr) + WO(_f)) & ~_f##_MASK) | SM(_val, _f))
68 
69 #define WMI_GET_FIELD(_msg_buf, _msg_type, _f) \
70 	    GET_FIELD(_msg_buf, _msg_type ## _ ## _f)
71 
72 #define WMI_SET_FIELD(_msg_buf, _msg_type, _f, _val) \
73 	    SET_FIELD(_msg_buf, _msg_type ## _ ## _f, _val)
74 
75 #define WMI_EP_APASS           0x0
76 #define WMI_EP_LPASS           0x1
77 #define WMI_EP_SENSOR          0x2
78 
79 #define WMI_INFOS_DBG_FILE_PERM (QDF_FILE_USR_READ | \
80 				 QDF_FILE_USR_WRITE | \
81 				 QDF_FILE_GRP_READ | \
82 				 QDF_FILE_OTH_READ)
83 
84 /*
85  *  * Control Path
86  *   */
87 typedef PREPACK struct {
88 	uint32_t	commandId:24,
89 			reserved:2, /* used for WMI endpoint ID */
90 			plt_priv:6; /* platform private */
91 } POSTPACK WMI_CMD_HDR;        /* used for commands and events */
92 
93 #define WMI_CMD_HDR_COMMANDID_LSB           0
94 #define WMI_CMD_HDR_COMMANDID_MASK          0x00ffffff
95 #define WMI_CMD_HDR_COMMANDID_OFFSET        0x00000000
96 #define WMI_CMD_HDR_WMI_ENDPOINTID_MASK        0x03000000
97 #define WMI_CMD_HDR_WMI_ENDPOINTID_OFFSET      24
98 #define WMI_CMD_HDR_PLT_PRIV_LSB               24
99 #define WMI_CMD_HDR_PLT_PRIV_MASK              0xff000000
100 #define WMI_CMD_HDR_PLT_PRIV_OFFSET            0x00000000
101 /* end of copy wmi.h */
102 
103 #define WMI_MIN_HEAD_ROOM 64
104 
105 /* WBUFF pool sizes for WMI */
106 /* Allocation of size 256 bytes */
107 #define WMI_WBUFF_POOL_0_SIZE 128
108 /* Allocation of size 512 bytes */
109 #define WMI_WBUFF_POOL_1_SIZE 16
110 /* Allocation of size 1024 bytes */
111 #define WMI_WBUFF_POOL_2_SIZE 8
112 /* Allocation of size 2048 bytes */
113 #define WMI_WBUFF_POOL_3_SIZE 8
114 
115 /* wbuff pool buffer lengths in bytes for WMI*/
116 #define WMI_WBUFF_LEN_POOL0 256
117 #define WMI_WBUFF_LEN_POOL1 512
118 #define WMI_WBUFF_LEN_POOL2 1024
119 #define WMI_WBUFF_LEN_POOL3 2048
120 
121 #define RX_DIAG_EVENT_WORK_PROCESS_MAX_COUNT 500
122 
123 #ifdef WMI_INTERFACE_EVENT_LOGGING
124 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0))
125 /* TODO Cleanup this backported function */
126 static int wmi_bp_seq_printf(qdf_debugfs_file_t m, const char *f, ...)
127 {
128 	va_list args;
129 
130 	va_start(args, f);
131 	seq_vprintf(m, f, args);
132 	va_end(args);
133 
134 	return 0;
135 }
136 #else
137 #define wmi_bp_seq_printf(m, fmt, ...) seq_printf((m), fmt, ##__VA_ARGS__)
138 #endif
139 
140 #ifndef MAX_WMI_INSTANCES
141 #define CUSTOM_MGMT_CMD_DATA_SIZE 4
142 #endif
143 
144 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
145 /* WMI commands */
146 uint32_t g_wmi_command_buf_idx = 0;
147 struct wmi_command_debug wmi_command_log_buffer[WMI_CMD_DEBUG_MAX_ENTRY];
148 
149 /* WMI commands TX completed */
150 uint32_t g_wmi_command_tx_cmp_buf_idx = 0;
151 struct wmi_command_cmp_debug
152 	wmi_command_tx_cmp_log_buffer[WMI_CMD_CMPL_DEBUG_MAX_ENTRY];
153 
154 /* WMI events when processed */
155 uint32_t g_wmi_event_buf_idx = 0;
156 struct wmi_event_debug wmi_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
157 
158 /* WMI events when queued */
159 uint32_t g_wmi_rx_event_buf_idx = 0;
160 struct wmi_event_debug wmi_rx_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
161 #endif
162 
163 static void wmi_minidump_detach(struct wmi_unified *wmi_handle)
164 {
165 	struct wmi_log_buf_t *info =
166 		&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
167 	uint32_t buf_size = info->size * sizeof(struct wmi_command_cmp_debug);
168 
169 	qdf_ssr_driver_dump_unregister_region("wmi_debug_log_info");
170 	qdf_ssr_driver_dump_unregister_region("wmi_rx_event_idx");
171 	qdf_ssr_driver_dump_unregister_region("wmi_rx_event");
172 	qdf_ssr_driver_dump_unregister_region("wmi_event_log_idx");
173 	qdf_ssr_driver_dump_unregister_region("wmi_event_log");
174 	qdf_ssr_driver_dump_unregister_region("wmi_command_log_idx");
175 	qdf_ssr_driver_dump_unregister_region("wmi_command_log");
176 	qdf_ssr_driver_dump_unregister_region("wmi_tx_cmp_idx");
177 	qdf_ssr_driver_dump_unregister_region("wmi_tx_cmp");
178 	qdf_minidump_remove(info->buf, buf_size, "wmi_tx_cmp");
179 }
180 
181 static void wmi_minidump_attach(struct wmi_unified *wmi_handle)
182 {
183 	struct wmi_log_buf_t *info =
184 		&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
185 	uint32_t buf_size = info->size * sizeof(struct wmi_command_cmp_debug);
186 
187 	qdf_minidump_log(info->buf, buf_size, "wmi_tx_cmp");
188 
189 	qdf_ssr_driver_dump_register_region("wmi_tx_cmp", info->buf, buf_size);
190 	qdf_ssr_driver_dump_register_region("wmi_tx_cmp_idx",
191 					    info->p_buf_tail_idx,
192 					    sizeof(*info->p_buf_tail_idx));
193 
194 	info = &wmi_handle->log_info.wmi_command_log_buf_info;
195 	buf_size = info->size * sizeof(struct wmi_command_debug);
196 
197 	qdf_ssr_driver_dump_register_region("wmi_command_log", info->buf,
198 					    buf_size);
199 	qdf_ssr_driver_dump_register_region("wmi_command_log_idx",
200 					    info->p_buf_tail_idx,
201 					    sizeof(*info->p_buf_tail_idx));
202 
203 	info = &wmi_handle->log_info.wmi_event_log_buf_info;
204 	buf_size = info->size * sizeof(struct wmi_event_debug);
205 
206 	qdf_ssr_driver_dump_register_region("wmi_event_log", info->buf,
207 					    buf_size);
208 	qdf_ssr_driver_dump_register_region("wmi_event_log_idx",
209 					    info->p_buf_tail_idx,
210 					    sizeof(*info->p_buf_tail_idx));
211 
212 	info = &wmi_handle->log_info.wmi_rx_event_log_buf_info;
213 	buf_size = info->size * sizeof(struct wmi_event_debug);
214 
215 	qdf_ssr_driver_dump_register_region("wmi_rx_event", info->buf,
216 					    buf_size);
217 	qdf_ssr_driver_dump_register_region("wmi_rx_event_idx",
218 					    info->p_buf_tail_idx,
219 					    sizeof(*info->p_buf_tail_idx));
220 
221 	qdf_ssr_driver_dump_register_region("wmi_debug_log_info",
222 					    &wmi_handle->log_info,
223 					    sizeof(wmi_handle->log_info));
224 }
225 
226 #define WMI_COMMAND_RECORD(h, a, b) {					\
227 	if (wmi_cmd_log_max_entry <=					\
228 		*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))	\
229 		*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx) = 0;\
230 	((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\
231 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)]\
232 						.command = a;		\
233 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.		\
234 				wmi_command_log_buf_info.buf)		\
235 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].data,\
236 			b, wmi_record_max_length);			\
237 	((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\
238 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].\
239 		time = qdf_get_log_timestamp();			\
240 	(*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))++;	\
241 	h->log_info.wmi_command_log_buf_info.length++;			\
242 }
243 
244 #define WMI_COMMAND_TX_CMP_RECORD(h, a, b, da, pa) {			\
245 	if (wmi_cmd_cmpl_log_max_entry <=				\
246 		*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))\
247 		*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
248 				p_buf_tail_idx) = 0;			\
249 	((struct wmi_command_cmp_debug *)h->log_info.			\
250 		wmi_command_tx_cmp_log_buf_info.buf)			\
251 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
252 				p_buf_tail_idx)].			\
253 							command	= a;	\
254 	qdf_mem_copy(((struct wmi_command_cmp_debug *)h->log_info.	\
255 				wmi_command_tx_cmp_log_buf_info.buf)	\
256 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
257 			p_buf_tail_idx)].				\
258 		data, b, wmi_record_max_length);			\
259 	((struct wmi_command_cmp_debug *)h->log_info.			\
260 		wmi_command_tx_cmp_log_buf_info.buf)			\
261 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
262 				p_buf_tail_idx)].			\
263 		time = qdf_get_log_timestamp();				\
264 	((struct wmi_command_cmp_debug *)h->log_info.			\
265 		wmi_command_tx_cmp_log_buf_info.buf)			\
266 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
267 				p_buf_tail_idx)].			\
268 		dma_addr = da;						\
269 	((struct wmi_command_cmp_debug *)h->log_info.			\
270 		wmi_command_tx_cmp_log_buf_info.buf)			\
271 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
272 				p_buf_tail_idx)].			\
273 		phy_addr = pa;						\
274 	(*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))++;\
275 	h->log_info.wmi_command_tx_cmp_log_buf_info.length++;		\
276 }
277 
278 #define WMI_EVENT_RECORD(h, a, b) {					\
279 	if (wmi_event_log_max_entry <=					\
280 		*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))	\
281 		*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx) = 0;\
282 	((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\
283 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].	\
284 		event = a;						\
285 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
286 				wmi_event_log_buf_info.buf)		\
287 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].data, b,\
288 		wmi_record_max_length);					\
289 	((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\
290 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].time =\
291 		qdf_get_log_timestamp();				\
292 	(*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))++;	\
293 	h->log_info.wmi_event_log_buf_info.length++;			\
294 }
295 
296 #define WMI_RX_EVENT_RECORD(h, a, b) {					\
297 	if (wmi_event_log_max_entry <=					\
298 		*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))\
299 		*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx) = 0;\
300 	((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\
301 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
302 		event = a;						\
303 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
304 				wmi_rx_event_log_buf_info.buf)		\
305 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
306 			data, b, wmi_record_max_length);		\
307 	((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\
308 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
309 		time =	qdf_get_log_timestamp();			\
310 	(*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))++;	\
311 	h->log_info.wmi_rx_event_log_buf_info.length++;			\
312 }
313 
314 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
315 uint32_t g_wmi_mgmt_command_buf_idx = 0;
316 struct
317 wmi_command_debug wmi_mgmt_command_log_buffer[WMI_MGMT_TX_DEBUG_MAX_ENTRY];
318 
319 /* wmi_mgmt commands TX completed */
320 uint32_t g_wmi_mgmt_command_tx_cmp_buf_idx = 0;
321 struct wmi_command_debug
322 wmi_mgmt_command_tx_cmp_log_buffer[WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY];
323 
324 /* wmi_mgmt events when received */
325 uint32_t g_wmi_mgmt_rx_event_buf_idx = 0;
326 struct wmi_event_debug
327 wmi_mgmt_rx_event_log_buffer[WMI_MGMT_RX_DEBUG_MAX_ENTRY];
328 
329 /* wmi_diag events when received */
330 uint32_t g_wmi_diag_rx_event_buf_idx = 0;
331 struct wmi_event_debug
332 wmi_diag_rx_event_log_buffer[WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY];
333 #endif
334 
335 #define WMI_MGMT_COMMAND_RECORD(h, a, b) {                              \
336 	if (wmi_mgmt_tx_log_max_entry <=                                   \
337 		*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)) \
338 		*(h->log_info.wmi_mgmt_command_log_buf_info.		\
339 				p_buf_tail_idx) = 0;			\
340 	((struct wmi_command_debug *)h->log_info.                       \
341 		 wmi_mgmt_command_log_buf_info.buf)                     \
342 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
343 			command = a;                                    \
344 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.          \
345 				wmi_mgmt_command_log_buf_info.buf)      \
346 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
347 		data, b,                                                \
348 		wmi_record_max_length);                                	\
349 	((struct wmi_command_debug *)h->log_info.                       \
350 		 wmi_mgmt_command_log_buf_info.buf)                     \
351 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
352 			time =        qdf_get_log_timestamp();          \
353 	(*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx))++;\
354 	h->log_info.wmi_mgmt_command_log_buf_info.length++;             \
355 }
356 
357 #define WMI_MGMT_COMMAND_TX_CMP_RECORD(h, a, b) {			\
358 	if (wmi_mgmt_tx_cmpl_log_max_entry <=				\
359 		*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
360 			p_buf_tail_idx))				\
361 		*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
362 			p_buf_tail_idx) = 0;				\
363 	((struct wmi_command_debug *)h->log_info.			\
364 			wmi_mgmt_command_tx_cmp_log_buf_info.buf)	\
365 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
366 				p_buf_tail_idx)].command = a;		\
367 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.		\
368 				wmi_mgmt_command_tx_cmp_log_buf_info.buf)\
369 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
370 			p_buf_tail_idx)].data, b,			\
371 			wmi_record_max_length);				\
372 	((struct wmi_command_debug *)h->log_info.			\
373 			wmi_mgmt_command_tx_cmp_log_buf_info.buf)	\
374 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
375 				p_buf_tail_idx)].time =			\
376 		qdf_get_log_timestamp();				\
377 	(*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.		\
378 			p_buf_tail_idx))++;				\
379 	h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.length++;	\
380 }
381 
382 #define WMI_MGMT_RX_EVENT_RECORD(h, a, b) do {				\
383 	if (wmi_mgmt_rx_log_max_entry <=				\
384 		*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))\
385 		*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx) = 0;\
386 	((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\
387 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)]\
388 					.event = a;			\
389 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
390 				wmi_mgmt_event_log_buf_info.buf)	\
391 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\
392 			data, b, wmi_record_max_length);		\
393 	((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\
394 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\
395 			time = qdf_get_log_timestamp();			\
396 	(*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))++;	\
397 	h->log_info.wmi_mgmt_event_log_buf_info.length++;		\
398 } while (0);
399 
400 #define WMI_DIAG_RX_EVENT_RECORD(h, a, b) do {                             \
401 	if (wmi_diag_log_max_entry <=                                   \
402 		*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))\
403 		*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx) = 0;\
404 	((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\
405 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)]\
406 					.event = a;                     \
407 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.            \
408 				wmi_diag_event_log_buf_info.buf)        \
409 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\
410 			data, b, wmi_record_max_length);                \
411 	((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\
412 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\
413 			time = qdf_get_log_timestamp();                 \
414 	(*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))++;  \
415 	h->log_info.wmi_diag_event_log_buf_info.length++;               \
416 } while (0);
417 
418 /* These are defined to made it as module param, which can be configured */
419 /* WMI Commands */
420 uint32_t wmi_cmd_log_max_entry = WMI_CMD_DEBUG_MAX_ENTRY;
421 uint32_t wmi_cmd_cmpl_log_max_entry = WMI_CMD_CMPL_DEBUG_MAX_ENTRY;
422 /* WMI Events */
423 uint32_t wmi_event_log_max_entry = WMI_EVENT_DEBUG_MAX_ENTRY;
424 /* WMI MGMT Tx */
425 uint32_t wmi_mgmt_tx_log_max_entry = WMI_MGMT_TX_DEBUG_MAX_ENTRY;
426 uint32_t wmi_mgmt_tx_cmpl_log_max_entry = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY;
427 /* WMI MGMT Rx */
428 uint32_t wmi_mgmt_rx_log_max_entry = WMI_MGMT_RX_DEBUG_MAX_ENTRY;
429 /* WMI Diag Event */
430 uint32_t wmi_diag_log_max_entry = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY;
431 /* WMI capture size */
432 uint32_t wmi_record_max_length = WMI_DEBUG_ENTRY_MAX_LENGTH;
433 uint32_t wmi_display_size = 100;
434 
435 /**
436  * wmi_log_init() - Initialize WMI event logging
437  * @wmi_handle: WMI handle.
438  *
439  * Return: Initialization status
440  */
441 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
442 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
443 {
444 	struct wmi_log_buf_t *cmd_log_buf =
445 			&wmi_handle->log_info.wmi_command_log_buf_info;
446 	struct wmi_log_buf_t *cmd_tx_cmpl_log_buf =
447 			&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
448 
449 	struct wmi_log_buf_t *event_log_buf =
450 			&wmi_handle->log_info.wmi_event_log_buf_info;
451 	struct wmi_log_buf_t *rx_event_log_buf =
452 			&wmi_handle->log_info.wmi_rx_event_log_buf_info;
453 
454 	struct wmi_log_buf_t *mgmt_cmd_log_buf =
455 			&wmi_handle->log_info.wmi_mgmt_command_log_buf_info;
456 	struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf =
457 		&wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info;
458 	struct wmi_log_buf_t *mgmt_event_log_buf =
459 			&wmi_handle->log_info.wmi_mgmt_event_log_buf_info;
460 	struct wmi_log_buf_t *diag_event_log_buf =
461 			&wmi_handle->log_info.wmi_diag_event_log_buf_info;
462 
463 	/* WMI commands */
464 	cmd_log_buf->length = 0;
465 	cmd_log_buf->buf_tail_idx = 0;
466 	cmd_log_buf->buf = wmi_command_log_buffer;
467 	cmd_log_buf->p_buf_tail_idx = &g_wmi_command_buf_idx;
468 	cmd_log_buf->size = WMI_CMD_DEBUG_MAX_ENTRY;
469 
470 	/* WMI commands TX completed */
471 	cmd_tx_cmpl_log_buf->length = 0;
472 	cmd_tx_cmpl_log_buf->buf_tail_idx = 0;
473 	cmd_tx_cmpl_log_buf->buf = wmi_command_tx_cmp_log_buffer;
474 	cmd_tx_cmpl_log_buf->p_buf_tail_idx = &g_wmi_command_tx_cmp_buf_idx;
475 	cmd_tx_cmpl_log_buf->size = WMI_CMD_CMPL_DEBUG_MAX_ENTRY;
476 
477 	/* WMI events when processed */
478 	event_log_buf->length = 0;
479 	event_log_buf->buf_tail_idx = 0;
480 	event_log_buf->buf = wmi_event_log_buffer;
481 	event_log_buf->p_buf_tail_idx = &g_wmi_event_buf_idx;
482 	event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY;
483 
484 	/* WMI events when queued */
485 	rx_event_log_buf->length = 0;
486 	rx_event_log_buf->buf_tail_idx = 0;
487 	rx_event_log_buf->buf = wmi_rx_event_log_buffer;
488 	rx_event_log_buf->p_buf_tail_idx = &g_wmi_rx_event_buf_idx;
489 	rx_event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY;
490 
491 	/* WMI Management commands */
492 	mgmt_cmd_log_buf->length = 0;
493 	mgmt_cmd_log_buf->buf_tail_idx = 0;
494 	mgmt_cmd_log_buf->buf = wmi_mgmt_command_log_buffer;
495 	mgmt_cmd_log_buf->p_buf_tail_idx = &g_wmi_mgmt_command_buf_idx;
496 	mgmt_cmd_log_buf->size = WMI_MGMT_TX_DEBUG_MAX_ENTRY;
497 
498 	/* WMI Management commands Tx completed*/
499 	mgmt_cmd_tx_cmp_log_buf->length = 0;
500 	mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0;
501 	mgmt_cmd_tx_cmp_log_buf->buf = wmi_mgmt_command_tx_cmp_log_buffer;
502 	mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx =
503 		&g_wmi_mgmt_command_tx_cmp_buf_idx;
504 	mgmt_cmd_tx_cmp_log_buf->size = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY;
505 
506 	/* WMI Management events when received */
507 	mgmt_event_log_buf->length = 0;
508 	mgmt_event_log_buf->buf_tail_idx = 0;
509 	mgmt_event_log_buf->buf = wmi_mgmt_rx_event_log_buffer;
510 	mgmt_event_log_buf->p_buf_tail_idx = &g_wmi_mgmt_rx_event_buf_idx;
511 	mgmt_event_log_buf->size = WMI_MGMT_RX_DEBUG_MAX_ENTRY;
512 
513 	/* WMI diag events when received */
514 	diag_event_log_buf->length = 0;
515 	diag_event_log_buf->buf_tail_idx = 0;
516 	diag_event_log_buf->buf = wmi_diag_rx_event_log_buffer;
517 	diag_event_log_buf->p_buf_tail_idx = &g_wmi_diag_rx_event_buf_idx;
518 	diag_event_log_buf->size = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY;
519 
520 	qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
521 	wmi_handle->log_info.wmi_logging_enable = 1;
522 
523 	return QDF_STATUS_SUCCESS;
524 }
525 #else
526 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
527 {
528 	struct wmi_log_buf_t *cmd_log_buf =
529 			&wmi_handle->log_info.wmi_command_log_buf_info;
530 	struct wmi_log_buf_t *cmd_tx_cmpl_log_buf =
531 			&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
532 
533 	struct wmi_log_buf_t *event_log_buf =
534 			&wmi_handle->log_info.wmi_event_log_buf_info;
535 	struct wmi_log_buf_t *rx_event_log_buf =
536 			&wmi_handle->log_info.wmi_rx_event_log_buf_info;
537 
538 	struct wmi_log_buf_t *mgmt_cmd_log_buf =
539 			&wmi_handle->log_info.wmi_mgmt_command_log_buf_info;
540 	struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf =
541 		&wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info;
542 	struct wmi_log_buf_t *mgmt_event_log_buf =
543 			&wmi_handle->log_info.wmi_mgmt_event_log_buf_info;
544 	struct wmi_log_buf_t *diag_event_log_buf =
545 			&wmi_handle->log_info.wmi_diag_event_log_buf_info;
546 
547 	wmi_handle->log_info.wmi_logging_enable = 0;
548 
549 	/* WMI commands */
550 	cmd_log_buf->length = 0;
551 	cmd_log_buf->buf_tail_idx = 0;
552 	cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc(
553 		wmi_cmd_log_max_entry * sizeof(struct wmi_command_debug));
554 	cmd_log_buf->size = wmi_cmd_log_max_entry;
555 
556 	if (!cmd_log_buf->buf)
557 		return QDF_STATUS_E_NOMEM;
558 
559 	cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx;
560 
561 	/* WMI commands TX completed */
562 	cmd_tx_cmpl_log_buf->length = 0;
563 	cmd_tx_cmpl_log_buf->buf_tail_idx = 0;
564 	cmd_tx_cmpl_log_buf->buf = (struct wmi_command_cmp_debug *) qdf_mem_malloc(
565 		wmi_cmd_cmpl_log_max_entry * sizeof(struct wmi_command_cmp_debug));
566 	cmd_tx_cmpl_log_buf->size = wmi_cmd_cmpl_log_max_entry;
567 
568 	if (!cmd_tx_cmpl_log_buf->buf)
569 		return QDF_STATUS_E_NOMEM;
570 
571 	cmd_tx_cmpl_log_buf->p_buf_tail_idx =
572 		&cmd_tx_cmpl_log_buf->buf_tail_idx;
573 
574 	/* WMI events when processed */
575 	event_log_buf->length = 0;
576 	event_log_buf->buf_tail_idx = 0;
577 	event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
578 		wmi_event_log_max_entry * sizeof(struct wmi_event_debug));
579 	event_log_buf->size = wmi_event_log_max_entry;
580 
581 	if (!event_log_buf->buf)
582 		return QDF_STATUS_E_NOMEM;
583 
584 	event_log_buf->p_buf_tail_idx = &event_log_buf->buf_tail_idx;
585 
586 	/* WMI events when queued */
587 	rx_event_log_buf->length = 0;
588 	rx_event_log_buf->buf_tail_idx = 0;
589 	rx_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
590 		wmi_event_log_max_entry * sizeof(struct wmi_event_debug));
591 	rx_event_log_buf->size = wmi_event_log_max_entry;
592 
593 	if (!rx_event_log_buf->buf)
594 		return QDF_STATUS_E_NOMEM;
595 
596 	rx_event_log_buf->p_buf_tail_idx = &rx_event_log_buf->buf_tail_idx;
597 
598 	/* WMI Management commands */
599 	mgmt_cmd_log_buf->length = 0;
600 	mgmt_cmd_log_buf->buf_tail_idx = 0;
601 	mgmt_cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc(
602 		wmi_mgmt_tx_log_max_entry * sizeof(struct wmi_command_debug));
603 	mgmt_cmd_log_buf->size = wmi_mgmt_tx_log_max_entry;
604 
605 	if (!mgmt_cmd_log_buf->buf)
606 		return QDF_STATUS_E_NOMEM;
607 
608 	mgmt_cmd_log_buf->p_buf_tail_idx = &mgmt_cmd_log_buf->buf_tail_idx;
609 
610 	/* WMI Management commands Tx completed*/
611 	mgmt_cmd_tx_cmp_log_buf->length = 0;
612 	mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0;
613 	mgmt_cmd_tx_cmp_log_buf->buf = (struct wmi_command_debug *)
614 		qdf_mem_malloc(
615 		wmi_mgmt_tx_cmpl_log_max_entry *
616 		sizeof(struct wmi_command_debug));
617 	mgmt_cmd_tx_cmp_log_buf->size = wmi_mgmt_tx_cmpl_log_max_entry;
618 
619 	if (!mgmt_cmd_tx_cmp_log_buf->buf)
620 		return QDF_STATUS_E_NOMEM;
621 
622 	mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx =
623 		&mgmt_cmd_tx_cmp_log_buf->buf_tail_idx;
624 
625 	/* WMI Management events when received */
626 	mgmt_event_log_buf->length = 0;
627 	mgmt_event_log_buf->buf_tail_idx = 0;
628 
629 	mgmt_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
630 		wmi_mgmt_rx_log_max_entry *
631 		sizeof(struct wmi_event_debug));
632 	mgmt_event_log_buf->size = wmi_mgmt_rx_log_max_entry;
633 
634 	if (!mgmt_event_log_buf->buf)
635 		return QDF_STATUS_E_NOMEM;
636 
637 	mgmt_event_log_buf->p_buf_tail_idx = &mgmt_event_log_buf->buf_tail_idx;
638 
639 	/* WMI diag events when received */
640 	diag_event_log_buf->length = 0;
641 	diag_event_log_buf->buf_tail_idx = 0;
642 
643 	diag_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
644 		wmi_diag_log_max_entry *
645 		sizeof(struct wmi_event_debug));
646 	diag_event_log_buf->size = wmi_diag_log_max_entry;
647 
648 	if (!diag_event_log_buf->buf)
649 		return QDF_STATUS_E_NOMEM;
650 
651 	diag_event_log_buf->p_buf_tail_idx = &diag_event_log_buf->buf_tail_idx;
652 
653 	qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
654 	wmi_handle->log_info.wmi_logging_enable = 1;
655 
656 	wmi_filtered_logging_init(wmi_handle);
657 
658 	return QDF_STATUS_SUCCESS;
659 }
660 #endif
661 
662 /**
663  * wmi_log_buffer_free() - Free all dynamic allocated buffer memory for
664  * event logging
665  * @wmi_handle: WMI handle.
666  *
667  * Return: None
668  */
669 #ifdef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
670 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
671 {
672 	wmi_filtered_logging_free(wmi_handle);
673 
674 	if (wmi_handle->log_info.wmi_command_log_buf_info.buf)
675 		qdf_mem_free(wmi_handle->log_info.wmi_command_log_buf_info.buf);
676 	if (wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf)
677 		qdf_mem_free(
678 		wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf);
679 	if (wmi_handle->log_info.wmi_event_log_buf_info.buf)
680 		qdf_mem_free(wmi_handle->log_info.wmi_event_log_buf_info.buf);
681 	if (wmi_handle->log_info.wmi_rx_event_log_buf_info.buf)
682 		qdf_mem_free(
683 			wmi_handle->log_info.wmi_rx_event_log_buf_info.buf);
684 	if (wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf)
685 		qdf_mem_free(
686 			wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf);
687 	if (wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf)
688 		qdf_mem_free(
689 		wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf);
690 	if (wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf)
691 		qdf_mem_free(
692 			wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf);
693 	if (wmi_handle->log_info.wmi_diag_event_log_buf_info.buf)
694 		qdf_mem_free(
695 			wmi_handle->log_info.wmi_diag_event_log_buf_info.buf);
696 	wmi_handle->log_info.wmi_logging_enable = 0;
697 
698 	qdf_spinlock_destroy(&wmi_handle->log_info.wmi_record_lock);
699 }
700 #else
701 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
702 {
703 	/* Do Nothing */
704 }
705 #endif
706 
707 /**
708  * wmi_print_cmd_log_buffer() - an output agnostic wmi command log printer
709  * @log_buffer: the command log buffer metadata of the buffer to print
710  * @count: the maximum number of entries to print
711  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
712  * @print_priv: any data required by the print method, e.g. a file handle
713  *
714  * Return: None
715  */
716 static void
717 wmi_print_cmd_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
718 			 qdf_abstract_print *print, void *print_priv)
719 {
720 	static const int data_len =
721 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
722 	char str[128];
723 	uint32_t idx;
724 
725 	if (count > log_buffer->size)
726 		count = log_buffer->size;
727 	if (count > log_buffer->length)
728 		count = log_buffer->length;
729 
730 	/* subtract count from index, and wrap if necessary */
731 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
732 	idx %= log_buffer->size;
733 
734 	print(print_priv, "Time (seconds)      Cmd Id              Payload");
735 	while (count) {
736 		struct wmi_command_debug *cmd_log = (struct wmi_command_debug *)
737 			&((struct wmi_command_debug *)log_buffer->buf)[idx];
738 		uint64_t secs, usecs;
739 		int len = 0;
740 		int i;
741 
742 		qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
743 		len += scnprintf(str + len, sizeof(str) - len,
744 				 "% 8lld.%06lld    %6u (0x%06x)    ",
745 				 secs, usecs,
746 				 cmd_log->command, cmd_log->command);
747 		for (i = 0; i < data_len; ++i) {
748 			len += scnprintf(str + len, sizeof(str) - len,
749 					 "0x%08x ", cmd_log->data[i]);
750 		}
751 
752 		print(print_priv, str);
753 
754 		--count;
755 		++idx;
756 		if (idx >= log_buffer->size)
757 			idx = 0;
758 	}
759 }
760 
761 /**
762  * wmi_dump_last_cmd_rec_info() - last wmi command tx completion time print
763  * @wmi_handle: wmi handle
764  *
765  * Return: None
766  */
767 static void
768 wmi_dump_last_cmd_rec_info(wmi_unified_t wmi_handle) {
769 	uint32_t idx, idx_tx_cmp, cmd_tmp_log, cmd_tmp_tx_cmp;
770 	uint64_t secs, secs_tx_cmp, usecs, usecs_tx_cmp;
771 	struct wmi_command_debug *cmd_log;
772 	struct wmi_command_debug *cmd_log_tx_cmp;
773 	struct wmi_log_buf_t *log_buf =
774 		&wmi_handle->log_info.wmi_command_log_buf_info;
775 	struct wmi_log_buf_t *log_buf_tx_cmp =
776 		&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
777 
778 	qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
779 
780 	(*log_buf->p_buf_tail_idx == 0) ? (idx = log_buf->size) :
781 		(idx = *log_buf->p_buf_tail_idx - 1);
782 	idx %= log_buf->size;
783 
784 	(*log_buf_tx_cmp->p_buf_tail_idx == 0) ? (idx_tx_cmp =
785 		log_buf_tx_cmp->size) : (idx_tx_cmp =
786 		*log_buf_tx_cmp->p_buf_tail_idx - 1);
787 	idx_tx_cmp %= log_buf_tx_cmp->size;
788 	cmd_log = &((struct wmi_command_debug *)log_buf->buf)[idx];
789 	cmd_log_tx_cmp = &((struct wmi_command_debug *)log_buf_tx_cmp->buf)
790 		[idx_tx_cmp];
791 	cmd_tmp_log = cmd_log->command;
792 	cmd_tmp_tx_cmp = cmd_log_tx_cmp->command;
793 	qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
794 	qdf_log_timestamp_to_secs(cmd_log_tx_cmp->time, &secs_tx_cmp,
795 				  &usecs_tx_cmp);
796 
797 	qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
798 
799 	wmi_nofl_err("Last wmi command Time (s) = % 8lld.%06lld ",
800 		     secs, usecs);
801 	wmi_nofl_err("Last wmi Cmd_Id = (0x%06x) ", cmd_tmp_log);
802 	wmi_nofl_err("Last wmi command tx completion Time (s) = % 8lld.%06lld",
803 		     secs_tx_cmp, usecs_tx_cmp);
804 	wmi_nofl_err("Last wmi command tx completion Cmd_Id = (0x%06x) ",
805 		     cmd_tmp_tx_cmp);
806 }
807 
808 /**
809  * wmi_print_cmd_cmp_log_buffer() - wmi command completion log printer
810  * @log_buffer: the command completion log buffer metadata of the buffer to print
811  * @count: the maximum number of entries to print
812  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
813  * @print_priv: any data required by the print method, e.g. a file handle
814  *
815  * Return: None
816  */
817 static void
818 wmi_print_cmd_cmp_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
819 			 qdf_abstract_print *print, void *print_priv)
820 {
821 	static const int data_len =
822 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
823 	char str[128];
824 	uint32_t idx;
825 
826 	if (count > log_buffer->size)
827 		count = log_buffer->size;
828 	if (count > log_buffer->length)
829 		count = log_buffer->length;
830 
831 	/* subtract count from index, and wrap if necessary */
832 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
833 	idx %= log_buffer->size;
834 
835 	print(print_priv, "Time (seconds)      Cmd Id              Payload");
836 	while (count) {
837 		struct wmi_command_cmp_debug *cmd_log = (struct wmi_command_cmp_debug *)
838 			&((struct wmi_command_cmp_debug *)log_buffer->buf)[idx];
839 		uint64_t secs, usecs;
840 		int len = 0;
841 		int i;
842 
843 		qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
844 		len += scnprintf(str + len, sizeof(str) - len,
845 				 "% 8lld.%06lld    %6u (0x%06x)    ",
846 				 secs, usecs,
847 				 cmd_log->command, cmd_log->command);
848 		for (i = 0; i < data_len; ++i) {
849 			len += scnprintf(str + len, sizeof(str) - len,
850 					 "0x%08x ", cmd_log->data[i]);
851 		}
852 
853 		print(print_priv, str);
854 
855 		--count;
856 		++idx;
857 		if (idx >= log_buffer->size)
858 			idx = 0;
859 	}
860 }
861 
862 /**
863  * wmi_print_event_log_buffer() - an output agnostic wmi event log printer
864  * @log_buffer: the event log buffer metadata of the buffer to print
865  * @count: the maximum number of entries to print
866  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
867  * @print_priv: any data required by the print method, e.g. a file handle
868  *
869  * Return: None
870  */
871 static void
872 wmi_print_event_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
873 			   qdf_abstract_print *print, void *print_priv)
874 {
875 	static const int data_len =
876 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
877 	char str[128];
878 	uint32_t idx;
879 
880 	if (count > log_buffer->size)
881 		count = log_buffer->size;
882 	if (count > log_buffer->length)
883 		count = log_buffer->length;
884 
885 	/* subtract count from index, and wrap if necessary */
886 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
887 	idx %= log_buffer->size;
888 
889 	print(print_priv, "Time (seconds)      Event Id             Payload");
890 	while (count) {
891 		struct wmi_event_debug *event_log = (struct wmi_event_debug *)
892 			&((struct wmi_event_debug *)log_buffer->buf)[idx];
893 		uint64_t secs, usecs;
894 		int len = 0;
895 		int i;
896 
897 		qdf_log_timestamp_to_secs(event_log->time, &secs, &usecs);
898 		len += scnprintf(str + len, sizeof(str) - len,
899 				 "% 8lld.%06lld    %6u (0x%06x)    ",
900 				 secs, usecs,
901 				 event_log->event, event_log->event);
902 		for (i = 0; i < data_len; ++i) {
903 			len += scnprintf(str + len, sizeof(str) - len,
904 					 "0x%08x ", event_log->data[i]);
905 		}
906 
907 		print(print_priv, str);
908 
909 		--count;
910 		++idx;
911 		if (idx >= log_buffer->size)
912 			idx = 0;
913 	}
914 }
915 
916 inline void
917 wmi_print_cmd_log(wmi_unified_t wmi, uint32_t count,
918 		  qdf_abstract_print *print, void *print_priv)
919 {
920 	wmi_print_cmd_log_buffer(
921 		&wmi->log_info.wmi_command_log_buf_info,
922 		count, print, print_priv);
923 }
924 
925 inline void
926 wmi_print_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count,
927 			 qdf_abstract_print *print, void *print_priv)
928 {
929 	wmi_print_cmd_cmp_log_buffer(
930 		&wmi->log_info.wmi_command_tx_cmp_log_buf_info,
931 		count, print, print_priv);
932 }
933 
934 inline void
935 wmi_print_mgmt_cmd_log(wmi_unified_t wmi, uint32_t count,
936 		       qdf_abstract_print *print, void *print_priv)
937 {
938 	wmi_print_cmd_log_buffer(
939 		&wmi->log_info.wmi_mgmt_command_log_buf_info,
940 		count, print, print_priv);
941 }
942 
943 inline void
944 wmi_print_mgmt_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count,
945 			      qdf_abstract_print *print, void *print_priv)
946 {
947 	wmi_print_cmd_log_buffer(
948 		&wmi->log_info.wmi_mgmt_command_tx_cmp_log_buf_info,
949 		count, print, print_priv);
950 }
951 
952 inline void
953 wmi_print_event_log(wmi_unified_t wmi, uint32_t count,
954 		    qdf_abstract_print *print, void *print_priv)
955 {
956 	wmi_print_event_log_buffer(
957 		&wmi->log_info.wmi_event_log_buf_info,
958 		count, print, print_priv);
959 }
960 
961 inline void
962 wmi_print_rx_event_log(wmi_unified_t wmi, uint32_t count,
963 		       qdf_abstract_print *print, void *print_priv)
964 {
965 	wmi_print_event_log_buffer(
966 		&wmi->log_info.wmi_rx_event_log_buf_info,
967 		count, print, print_priv);
968 }
969 
970 inline void
971 wmi_print_mgmt_event_log(wmi_unified_t wmi, uint32_t count,
972 			 qdf_abstract_print *print, void *print_priv)
973 {
974 	wmi_print_event_log_buffer(
975 		&wmi->log_info.wmi_mgmt_event_log_buf_info,
976 		count, print, print_priv);
977 }
978 
979 
980 /* debugfs routines*/
981 
982 /*
983  * debug_wmi_##func_base##_show() - debugfs functions to display content of
984  * command and event buffers. Macro uses max buffer length to display
985  * buffer when it is wraparound.
986  *
987  * @m: debugfs handler to access wmi_handle
988  * @v: Variable arguments (not used)
989  *
990  * Return: Length of characters printed
991  */
992 #define GENERATE_COMMAND_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size, wmi_record_type)\
993 	static int debug_wmi_##func_base##_show(struct seq_file *m,	\
994 						void *v)		\
995 	{								\
996 		wmi_unified_t wmi_handle = (wmi_unified_t) m->private;	\
997 		struct wmi_log_buf_t *wmi_log =				\
998 			&wmi_handle->log_info.wmi_##func_base##_buf_info;\
999 		int pos, nread, outlen;					\
1000 		int i;							\
1001 		uint64_t secs, usecs;					\
1002 									\
1003 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
1004 		if (!wmi_log->length) {					\
1005 			qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
1006 			return wmi_bp_seq_printf(m,			\
1007 			"no elements to read from ring buffer!\n");	\
1008 		}							\
1009 									\
1010 		if (wmi_log->length <= wmi_ring_size)			\
1011 			nread = wmi_log->length;			\
1012 		else							\
1013 			nread = wmi_ring_size;				\
1014 									\
1015 		if (*(wmi_log->p_buf_tail_idx) == 0)			\
1016 			/* tail can be 0 after wrap-around */		\
1017 			pos = wmi_ring_size - 1;			\
1018 		else							\
1019 			pos = *(wmi_log->p_buf_tail_idx) - 1;		\
1020 									\
1021 		outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\
1022 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
1023 		while (nread--) {					\
1024 			struct wmi_record_type *wmi_record;		\
1025 									\
1026 			wmi_record = (struct wmi_record_type *)	\
1027 			&(((struct wmi_record_type *)wmi_log->buf)[pos]);\
1028 			outlen += wmi_bp_seq_printf(m, "CMD ID = %x\n",	\
1029 				(wmi_record->command));			\
1030 			qdf_log_timestamp_to_secs(wmi_record->time, &secs,\
1031 				&usecs);				\
1032 			outlen +=					\
1033 			wmi_bp_seq_printf(m, "CMD TIME = [%llu.%06llu]\n",\
1034 				secs, usecs);				\
1035 			outlen += wmi_bp_seq_printf(m, "CMD = ");	\
1036 			for (i = 0; i < (wmi_record_max_length/		\
1037 					sizeof(uint32_t)); i++)		\
1038 				outlen += wmi_bp_seq_printf(m, "%x ",	\
1039 					wmi_record->data[i]);		\
1040 			outlen += wmi_bp_seq_printf(m, "\n");		\
1041 									\
1042 			if (pos == 0)					\
1043 				pos = wmi_ring_size - 1;		\
1044 			else						\
1045 				pos--;					\
1046 		}							\
1047 		return outlen;						\
1048 	}								\
1049 
1050 #define GENERATE_EVENT_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size)	\
1051 	static int debug_wmi_##func_base##_show(struct seq_file *m,	\
1052 						void *v)		\
1053 	{								\
1054 		wmi_unified_t wmi_handle = (wmi_unified_t) m->private;	\
1055 		struct wmi_log_buf_t *wmi_log =				\
1056 			&wmi_handle->log_info.wmi_##func_base##_buf_info;\
1057 		int pos, nread, outlen;					\
1058 		int i;							\
1059 		uint64_t secs, usecs;					\
1060 									\
1061 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
1062 		if (!wmi_log->length) {					\
1063 			qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
1064 			return wmi_bp_seq_printf(m,			\
1065 			"no elements to read from ring buffer!\n");	\
1066 		}							\
1067 									\
1068 		if (wmi_log->length <= wmi_ring_size)			\
1069 			nread = wmi_log->length;			\
1070 		else							\
1071 			nread = wmi_ring_size;				\
1072 									\
1073 		if (*(wmi_log->p_buf_tail_idx) == 0)			\
1074 			/* tail can be 0 after wrap-around */		\
1075 			pos = wmi_ring_size - 1;			\
1076 		else							\
1077 			pos = *(wmi_log->p_buf_tail_idx) - 1;		\
1078 									\
1079 		outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\
1080 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
1081 		while (nread--) {					\
1082 			struct wmi_event_debug *wmi_record;		\
1083 									\
1084 			wmi_record = (struct wmi_event_debug *)		\
1085 			&(((struct wmi_event_debug *)wmi_log->buf)[pos]);\
1086 			qdf_log_timestamp_to_secs(wmi_record->time, &secs,\
1087 				&usecs);				\
1088 			outlen += wmi_bp_seq_printf(m, "Event ID = %x\n",\
1089 				(wmi_record->event));			\
1090 			outlen +=					\
1091 			wmi_bp_seq_printf(m, "Event TIME = [%llu.%06llu]\n",\
1092 				secs, usecs);				\
1093 			outlen += wmi_bp_seq_printf(m, "CMD = ");	\
1094 			for (i = 0; i < (wmi_record_max_length/		\
1095 					sizeof(uint32_t)); i++)		\
1096 				outlen += wmi_bp_seq_printf(m, "%x ",	\
1097 					wmi_record->data[i]);		\
1098 			outlen += wmi_bp_seq_printf(m, "\n");		\
1099 									\
1100 			if (pos == 0)					\
1101 				pos = wmi_ring_size - 1;		\
1102 			else						\
1103 				pos--;					\
1104 		}							\
1105 		return outlen;						\
1106 	}
1107 
1108 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_log, wmi_display_size,
1109 				  wmi_command_debug);
1110 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_tx_cmp_log, wmi_display_size,
1111 				  wmi_command_cmp_debug);
1112 GENERATE_EVENT_DEBUG_SHOW_FUNCS(event_log, wmi_display_size);
1113 GENERATE_EVENT_DEBUG_SHOW_FUNCS(rx_event_log, wmi_display_size);
1114 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_log, wmi_display_size,
1115 				  wmi_command_debug);
1116 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_tx_cmp_log,
1117 					wmi_display_size,
1118 					wmi_command_debug);
1119 GENERATE_EVENT_DEBUG_SHOW_FUNCS(mgmt_event_log, wmi_display_size);
1120 
1121 /**
1122  * debug_wmi_enable_show() - debugfs functions to display enable state of
1123  * wmi logging feature.
1124  *
1125  * @m: debugfs handler to access wmi_handle
1126  * @v: Variable arguments (not used)
1127  *
1128  * Return: always 1
1129  */
1130 static int debug_wmi_enable_show(struct seq_file *m, void *v)
1131 {
1132 	wmi_unified_t wmi_handle = (wmi_unified_t) m->private;
1133 
1134 	return wmi_bp_seq_printf(m, "%d\n",
1135 			wmi_handle->log_info.wmi_logging_enable);
1136 }
1137 
1138 /**
1139  * debug_wmi_log_size_show() - debugfs functions to display configured size of
1140  * wmi logging command/event buffer and management command/event buffer.
1141  *
1142  * @m: debugfs handler to access wmi_handle
1143  * @v: Variable arguments (not used)
1144  *
1145  * Return: Length of characters printed
1146  */
1147 static int debug_wmi_log_size_show(struct seq_file *m, void *v)
1148 {
1149 
1150 	wmi_bp_seq_printf(m, "WMI command/cmpl log max size:%d/%d\n",
1151 			  wmi_cmd_log_max_entry, wmi_cmd_cmpl_log_max_entry);
1152 	wmi_bp_seq_printf(m, "WMI management Tx/cmpl log max size:%d/%d\n",
1153 			  wmi_mgmt_tx_log_max_entry,
1154 			  wmi_mgmt_tx_cmpl_log_max_entry);
1155 	wmi_bp_seq_printf(m, "WMI event log max size:%d\n",
1156 			  wmi_event_log_max_entry);
1157 	wmi_bp_seq_printf(m, "WMI management Rx log max size:%d\n",
1158 			  wmi_mgmt_rx_log_max_entry);
1159 	return wmi_bp_seq_printf(m,
1160 				 "WMI diag log max size:%d\n",
1161 				 wmi_diag_log_max_entry);
1162 }
1163 
1164 /*
1165  * debug_wmi_##func_base##_write() - debugfs functions to clear
1166  * wmi logging command/event buffer and management command/event buffer.
1167  *
1168  * @file: file handler to access wmi_handle
1169  * @buf: received data buffer
1170  * @count: length of received buffer
1171  * @ppos: Not used
1172  *
1173  * Return: count
1174  */
1175 #define GENERATE_DEBUG_WRITE_FUNCS(func_base, wmi_ring_size, wmi_record_type)\
1176 	static ssize_t debug_wmi_##func_base##_write(struct file *file,	\
1177 				const char __user *buf,			\
1178 				size_t count, loff_t *ppos)		\
1179 	{								\
1180 		int k, ret;						\
1181 		wmi_unified_t wmi_handle =				\
1182 			((struct seq_file *)file->private_data)->private;\
1183 		struct wmi_log_buf_t *wmi_log = &wmi_handle->log_info.	\
1184 				wmi_##func_base##_buf_info;		\
1185 		char locbuf[50];					\
1186 									\
1187 		if ((!buf) || (count > 50))				\
1188 			return -EFAULT;					\
1189 									\
1190 		if (copy_from_user(locbuf, buf, count))			\
1191 			return -EFAULT;					\
1192 									\
1193 		ret = sscanf(locbuf, "%d", &k);				\
1194 		if ((ret != 1) || (k != 0)) {                           \
1195 			wmi_err("Wrong input, echo 0 to clear the wmi buffer");\
1196 			return -EINVAL;					\
1197 		}							\
1198 									\
1199 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
1200 		qdf_mem_zero(wmi_log->buf, wmi_ring_size *		\
1201 				sizeof(struct wmi_record_type));	\
1202 		wmi_log->length = 0;					\
1203 		*(wmi_log->p_buf_tail_idx) = 0;				\
1204 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
1205 									\
1206 		return count;						\
1207 	}
1208 
1209 GENERATE_DEBUG_WRITE_FUNCS(command_log, wmi_cmd_log_max_entry,
1210 			   wmi_command_debug);
1211 GENERATE_DEBUG_WRITE_FUNCS(command_tx_cmp_log, wmi_cmd_cmpl_log_max_entry,
1212 			   wmi_command_cmp_debug);
1213 GENERATE_DEBUG_WRITE_FUNCS(event_log, wmi_event_log_max_entry,
1214 			   wmi_event_debug);
1215 GENERATE_DEBUG_WRITE_FUNCS(rx_event_log, wmi_event_log_max_entry,
1216 			   wmi_event_debug);
1217 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_log, wmi_mgmt_tx_log_max_entry,
1218 			   wmi_command_debug);
1219 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_tx_cmp_log,
1220 			   wmi_mgmt_tx_cmpl_log_max_entry, wmi_command_debug);
1221 GENERATE_DEBUG_WRITE_FUNCS(mgmt_event_log, wmi_mgmt_rx_log_max_entry,
1222 			   wmi_event_debug);
1223 
1224 /**
1225  * debug_wmi_enable_write() - debugfs functions to enable/disable
1226  * wmi logging feature.
1227  *
1228  * @file: file handler to access wmi_handle
1229  * @buf: received data buffer
1230  * @count: length of received buffer
1231  * @ppos: Not used
1232  *
1233  * Return: count
1234  */
1235 static ssize_t debug_wmi_enable_write(struct file *file, const char __user *buf,
1236 					size_t count, loff_t *ppos)
1237 {
1238 	wmi_unified_t wmi_handle =
1239 		((struct seq_file *)file->private_data)->private;
1240 	int k, ret;
1241 	char locbuf[50];
1242 
1243 	if ((!buf) || (count > 50))
1244 		return -EFAULT;
1245 
1246 	if (copy_from_user(locbuf, buf, count))
1247 		return -EFAULT;
1248 
1249 	ret = sscanf(locbuf, "%d", &k);
1250 	if ((ret != 1) || ((k != 0) && (k != 1)))
1251 		return -EINVAL;
1252 
1253 	wmi_handle->log_info.wmi_logging_enable = k;
1254 	return count;
1255 }
1256 
1257 /**
1258  * debug_wmi_log_size_write() - reserved.
1259  *
1260  * @file: file handler to access wmi_handle
1261  * @buf: received data buffer
1262  * @count: length of received buffer
1263  * @ppos: Not used
1264  *
1265  * Return: count
1266  */
1267 static ssize_t debug_wmi_log_size_write(struct file *file,
1268 		const char __user *buf, size_t count, loff_t *ppos)
1269 {
1270 	return -EINVAL;
1271 }
1272 
1273 /* Structure to maintain debug information */
1274 struct wmi_debugfs_info {
1275 	const char *name;
1276 	const struct file_operations *ops;
1277 };
1278 
1279 #define DEBUG_FOO(func_base) { .name = #func_base,			\
1280 	.ops = &debug_##func_base##_ops }
1281 
1282 /*
1283  * debug_##func_base##_open() - Open debugfs entry for respective command
1284  * and event buffer.
1285  *
1286  * @inode: node for debug dir entry
1287  * @file: file handler
1288  *
1289  * Return: open status
1290  */
1291 #define GENERATE_DEBUG_STRUCTS(func_base)				\
1292 	static int debug_##func_base##_open(struct inode *inode,	\
1293 						struct file *file)	\
1294 	{								\
1295 		return single_open(file, debug_##func_base##_show,	\
1296 				inode->i_private);			\
1297 	}								\
1298 									\
1299 									\
1300 	static struct file_operations debug_##func_base##_ops = {	\
1301 		.open		= debug_##func_base##_open,		\
1302 		.read		= seq_read,				\
1303 		.llseek		= seq_lseek,				\
1304 		.write		= debug_##func_base##_write,		\
1305 		.release	= single_release,			\
1306 	};
1307 
1308 GENERATE_DEBUG_STRUCTS(wmi_command_log);
1309 GENERATE_DEBUG_STRUCTS(wmi_command_tx_cmp_log);
1310 GENERATE_DEBUG_STRUCTS(wmi_event_log);
1311 GENERATE_DEBUG_STRUCTS(wmi_rx_event_log);
1312 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_log);
1313 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_tx_cmp_log);
1314 GENERATE_DEBUG_STRUCTS(wmi_mgmt_event_log);
1315 GENERATE_DEBUG_STRUCTS(wmi_enable);
1316 GENERATE_DEBUG_STRUCTS(wmi_log_size);
1317 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
1318 GENERATE_DEBUG_STRUCTS(filtered_wmi_cmds);
1319 GENERATE_DEBUG_STRUCTS(filtered_wmi_evts);
1320 GENERATE_DEBUG_STRUCTS(wmi_filtered_command_log);
1321 GENERATE_DEBUG_STRUCTS(wmi_filtered_event_log);
1322 #endif
1323 
1324 struct wmi_debugfs_info wmi_debugfs_infos[NUM_DEBUG_INFOS] = {
1325 	DEBUG_FOO(wmi_command_log),
1326 	DEBUG_FOO(wmi_command_tx_cmp_log),
1327 	DEBUG_FOO(wmi_event_log),
1328 	DEBUG_FOO(wmi_rx_event_log),
1329 	DEBUG_FOO(wmi_mgmt_command_log),
1330 	DEBUG_FOO(wmi_mgmt_command_tx_cmp_log),
1331 	DEBUG_FOO(wmi_mgmt_event_log),
1332 	DEBUG_FOO(wmi_enable),
1333 	DEBUG_FOO(wmi_log_size),
1334 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
1335 	DEBUG_FOO(filtered_wmi_cmds),
1336 	DEBUG_FOO(filtered_wmi_evts),
1337 	DEBUG_FOO(wmi_filtered_command_log),
1338 	DEBUG_FOO(wmi_filtered_event_log),
1339 #endif
1340 };
1341 
1342 /**
1343  * wmi_debugfs_create() - Create debug_fs entry for wmi logging.
1344  *
1345  * @wmi_handle: wmi handle
1346  * @par_entry: debug directory entry
1347  *
1348  * Return: none
1349  */
1350 static void wmi_debugfs_create(wmi_unified_t wmi_handle,
1351 			       struct dentry *par_entry)
1352 {
1353 	int i;
1354 
1355 	if (!par_entry)
1356 		goto out;
1357 
1358 	for (i = 0; i < NUM_DEBUG_INFOS; ++i) {
1359 		wmi_handle->debugfs_de[i] = qdf_debugfs_create_entry(
1360 						wmi_debugfs_infos[i].name,
1361 						WMI_INFOS_DBG_FILE_PERM,
1362 						par_entry,
1363 						wmi_handle,
1364 						wmi_debugfs_infos[i].ops);
1365 
1366 		if (!wmi_handle->debugfs_de[i]) {
1367 			wmi_err("debug Entry creation failed!");
1368 			goto out;
1369 		}
1370 	}
1371 
1372 	return;
1373 
1374 out:
1375 	wmi_err("debug Entry creation failed!");
1376 	wmi_log_buffer_free(wmi_handle);
1377 	return;
1378 }
1379 
1380 /**
1381  * wmi_debugfs_remove() - Remove debugfs entry for wmi logging.
1382  * @wmi_handle: wmi handle
1383  *
1384  * Return: none
1385  */
1386 static void wmi_debugfs_remove(wmi_unified_t wmi_handle)
1387 {
1388 	int i;
1389 	struct dentry *dentry = wmi_handle->log_info.wmi_log_debugfs_dir;
1390 
1391 	if (dentry) {
1392 		for (i = 0; i < NUM_DEBUG_INFOS; ++i) {
1393 			if (wmi_handle->debugfs_de[i])
1394 				wmi_handle->debugfs_de[i] = NULL;
1395 		}
1396 	}
1397 
1398 	if (dentry)
1399 		qdf_debugfs_remove_dir_recursive(dentry);
1400 }
1401 
1402 /**
1403  * wmi_debugfs_init() - debugfs functions to create debugfs directory and to
1404  *                      create debugfs entries.
1405  * @wmi_handle: wmi handler
1406  * @pdev_idx: pdev id
1407  *
1408  * Return: init status
1409  */
1410 static QDF_STATUS wmi_debugfs_init(wmi_unified_t wmi_handle, uint32_t pdev_idx)
1411 {
1412 	char buf[32];
1413 
1414 	snprintf(buf, sizeof(buf), "WMI_SOC%u_PDEV%u",
1415 		 wmi_handle->soc->soc_idx, pdev_idx);
1416 
1417 	wmi_handle->log_info.wmi_log_debugfs_dir =
1418 		qdf_debugfs_create_dir(buf, NULL);
1419 
1420 	if (!wmi_handle->log_info.wmi_log_debugfs_dir) {
1421 		wmi_err("error while creating debugfs dir for %s", buf);
1422 		return QDF_STATUS_E_FAILURE;
1423 	}
1424 	wmi_debugfs_create(wmi_handle,
1425 			   wmi_handle->log_info.wmi_log_debugfs_dir);
1426 
1427 	return QDF_STATUS_SUCCESS;
1428 }
1429 
1430 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
1431 			void *header, uint32_t vdev_id, uint32_t chanfreq)
1432 {
1433 
1434 	uint32_t data[CUSTOM_MGMT_CMD_DATA_SIZE];
1435 
1436 	data[0] = ((struct wmi_command_header *)header)->type;
1437 	data[1] = ((struct wmi_command_header *)header)->sub_type;
1438 	data[2] = vdev_id;
1439 	data[3] = chanfreq;
1440 
1441 	qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
1442 
1443 	WMI_MGMT_COMMAND_RECORD(wmi_handle, cmd, (uint8_t *)data);
1444 	wmi_specific_cmd_record(wmi_handle, cmd, (uint8_t *)data);
1445 	qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
1446 }
1447 #else
1448 static void wmi_debugfs_remove(wmi_unified_t wmi_handle) { }
1449 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
1450 			void *header, uint32_t vdev_id, uint32_t chanfreq) { }
1451 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) { }
1452 static void wmi_minidump_detach(struct wmi_unified *wmi_handle) { }
1453 static void wmi_minidump_attach(struct wmi_unified *wmi_handle) { }
1454 static void wmi_dump_last_cmd_rec_info(wmi_unified_t wmi_handle) { }
1455 #endif /*WMI_INTERFACE_EVENT_LOGGING */
1456 qdf_export_symbol(wmi_mgmt_cmd_record);
1457 
1458 #ifdef WMI_EXT_DBG
1459 
1460 /**
1461  * wmi_ext_dbg_msg_enqueue() - enqueue wmi message
1462  * @wmi_handle: wmi handler
1463  * @msg: WMI message
1464  *
1465  * Return: size of wmi message queue after enqueue
1466  */
1467 static uint32_t wmi_ext_dbg_msg_enqueue(struct wmi_unified *wmi_handle,
1468 					struct wmi_ext_dbg_msg *msg)
1469 {
1470 	uint32_t list_size;
1471 
1472 	qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1473 	qdf_list_insert_back_size(&wmi_handle->wmi_ext_dbg_msg_queue,
1474 				  &msg->node, &list_size);
1475 	qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1476 
1477 	return list_size;
1478 }
1479 
1480 /**
1481  * wmi_ext_dbg_msg_dequeue() - dequeue wmi message
1482  * @wmi_handle: wmi handler
1483  *
1484  * Return: wmi msg on success else NULL
1485  */
1486 static struct wmi_ext_dbg_msg *wmi_ext_dbg_msg_dequeue(struct wmi_unified
1487 						       *wmi_handle)
1488 {
1489 	qdf_list_node_t *list_node = NULL;
1490 
1491 	qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1492 	qdf_list_remove_front(&wmi_handle->wmi_ext_dbg_msg_queue, &list_node);
1493 	qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1494 
1495 	if (!list_node)
1496 		return NULL;
1497 
1498 	return qdf_container_of(list_node, struct wmi_ext_dbg_msg, node);
1499 }
1500 
1501 /**
1502  * wmi_ext_dbg_msg_record() - record wmi messages
1503  * @wmi_handle: wmi handler
1504  * @buf: wmi message buffer
1505  * @len: wmi message length
1506  * @type: wmi message type
1507  *
1508  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1509  */
1510 static QDF_STATUS wmi_ext_dbg_msg_record(struct wmi_unified *wmi_handle,
1511 					 uint8_t *buf, uint32_t len,
1512 					 enum WMI_MSG_TYPE type)
1513 {
1514 	struct wmi_ext_dbg_msg *msg;
1515 	uint32_t list_size;
1516 
1517 	msg = wmi_ext_dbg_msg_get(len);
1518 	if (!msg)
1519 		return QDF_STATUS_E_NOMEM;
1520 
1521 	msg->len = len;
1522 	msg->type = type;
1523 	qdf_mem_copy(msg->buf, buf, len);
1524 	msg->ts = qdf_get_log_timestamp();
1525 	list_size = wmi_ext_dbg_msg_enqueue(wmi_handle, msg);
1526 
1527 	if (list_size >= wmi_handle->wmi_ext_dbg_msg_queue_size) {
1528 		msg = wmi_ext_dbg_msg_dequeue(wmi_handle);
1529 		wmi_ext_dbg_msg_put(msg);
1530 	}
1531 
1532 	return QDF_STATUS_SUCCESS;
1533 }
1534 
1535 /**
1536  * wmi_ext_dbg_msg_cmd_record() - record wmi command messages
1537  * @wmi_handle: wmi handler
1538  * @buf: wmi command buffer
1539  * @len: wmi command message length
1540  *
1541  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1542  */
1543 static QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified *wmi_handle,
1544 					     uint8_t *buf, uint32_t len)
1545 {
1546 	return wmi_ext_dbg_msg_record(wmi_handle, buf, len,
1547 				      WMI_MSG_TYPE_CMD);
1548 }
1549 
1550 /**
1551  * wmi_ext_dbg_msg_event_record() - record wmi event messages
1552  * @wmi_handle: wmi handler
1553  * @buf: wmi event buffer
1554  * @len: wmi event message length
1555  *
1556  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1557  */
1558 static QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified *wmi_handle,
1559 					       uint8_t *buf, uint32_t len)
1560 {
1561 	uint32_t id;
1562 
1563 	id = WMI_GET_FIELD(buf, WMI_CMD_HDR, COMMANDID);
1564 	if (id != wmi_handle->wmi_events[wmi_diag_event_id])
1565 		return wmi_ext_dbg_msg_record(wmi_handle, buf, len,
1566 					      WMI_MSG_TYPE_EVENT);
1567 
1568 	return QDF_STATUS_SUCCESS;
1569 }
1570 
1571 /**
1572  * wmi_ext_dbg_msg_queue_init() - create debugfs queue and associated lock
1573  * @wmi_handle: wmi handler
1574  *
1575  * Return: none
1576  */
1577 static void wmi_ext_dbg_msg_queue_init(struct wmi_unified *wmi_handle)
1578 {
1579 	qdf_list_create(&wmi_handle->wmi_ext_dbg_msg_queue,
1580 			wmi_handle->wmi_ext_dbg_msg_queue_size);
1581 	qdf_spinlock_create(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1582 }
1583 
1584 /**
1585  * wmi_ext_dbg_msg_queue_deinit() - destroy debugfs queue and associated lock
1586  * @wmi_handle: wmi handler
1587  *
1588  * Return: none
1589  */
1590 static void wmi_ext_dbg_msg_queue_deinit(struct wmi_unified *wmi_handle)
1591 {
1592 	qdf_list_destroy(&wmi_handle->wmi_ext_dbg_msg_queue);
1593 	qdf_spinlock_destroy(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1594 }
1595 
1596 /**
1597  * wmi_ext_dbg_msg_show() - debugfs function to display whole content of
1598  * wmi command/event messages including headers.
1599  * @file: qdf debugfs file handler
1600  * @arg: pointer to wmi handler
1601  *
1602  * Return: QDF_STATUS_SUCCESS if all the messages are shown successfully,
1603  * else QDF_STATUS_E_AGAIN if more data to show.
1604  */
1605 static QDF_STATUS wmi_ext_dbg_msg_show(qdf_debugfs_file_t file, void *arg)
1606 {
1607 	struct wmi_unified *wmi_handle = (struct wmi_unified *)arg;
1608 	struct wmi_ext_dbg_msg *msg;
1609 	uint64_t secs, usecs;
1610 
1611 	msg = wmi_ext_dbg_msg_dequeue(wmi_handle);
1612 	if (!msg)
1613 		return QDF_STATUS_SUCCESS;
1614 
1615 	qdf_debugfs_printf(file, "%s: 0x%x\n",
1616 			   msg->type == WMI_MSG_TYPE_CMD ? "COMMAND" :
1617 			   "EVENT", WMI_GET_FIELD(msg->buf, WMI_CMD_HDR,
1618 						  COMMANDID));
1619 	qdf_log_timestamp_to_secs(msg->ts, &secs, &usecs);
1620 	qdf_debugfs_printf(file, "Time: %llu.%llu\n", secs, usecs);
1621 	qdf_debugfs_printf(file, "Length:%d\n", msg->len);
1622 	qdf_debugfs_hexdump(file, msg->buf, msg->len,
1623 			    WMI_EXT_DBG_DUMP_ROW_SIZE,
1624 			    WMI_EXT_DBG_DUMP_GROUP_SIZE);
1625 	qdf_debugfs_printf(file, "\n");
1626 
1627 	if (qdf_debugfs_overflow(file)) {
1628 		qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1629 		qdf_list_insert_front(&wmi_handle->wmi_ext_dbg_msg_queue,
1630 				      &msg->node);
1631 		qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1632 
1633 	} else {
1634 		wmi_ext_dbg_msg_put(msg);
1635 	}
1636 
1637 	return QDF_STATUS_E_AGAIN;
1638 }
1639 
1640 /**
1641  * wmi_ext_dbg_msg_write() - debugfs write not supported
1642  * @priv: private data
1643  * @buf: received data buffer
1644  * @len: length of received buffer
1645  *
1646  * Return: QDF_STATUS_E_NOSUPPORT.
1647  */
1648 static QDF_STATUS wmi_ext_dbg_msg_write(void *priv, const char *buf,
1649 					qdf_size_t len)
1650 {
1651 	return QDF_STATUS_E_NOSUPPORT;
1652 }
1653 
1654 static struct qdf_debugfs_fops wmi_ext_dbgfs_ops[WMI_MAX_RADIOS];
1655 
1656 /**
1657  * wmi_ext_dbgfs_init() - init debugfs items for extended wmi dump.
1658  * @wmi_handle: wmi handler
1659  * @pdev_idx: pdev index
1660  *
1661  * Return: QDF_STATUS_SUCCESS if debugfs is initialized else
1662  * QDF_STATUS_E_FAILURE
1663  */
1664 static QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle,
1665 				     uint32_t pdev_idx)
1666 {
1667 	qdf_dentry_t dentry;
1668 	char buf[32];
1669 
1670 	/* To maintain backward compatibility, naming convention for PDEV 0
1671 	 * dentry is kept same as before. For more than 1 PDEV, dentry
1672 	 * names will be appended with PDEVx.
1673 	*/
1674 	if (wmi_handle->soc->soc_idx == 0 && pdev_idx == 0) {
1675 		dentry  = qdf_debugfs_create_dir(WMI_EXT_DBG_DIR, NULL);
1676 	} else {
1677 		snprintf(buf, sizeof(buf), "WMI_EXT_DBG_SOC%u_PDEV%u",
1678 			 wmi_handle->soc->soc_idx, pdev_idx);
1679 		dentry  = qdf_debugfs_create_dir(buf, NULL);
1680 	}
1681 
1682 	if (!dentry) {
1683 		wmi_err("error while creating extended wmi debugfs dir");
1684 		return QDF_STATUS_E_FAILURE;
1685 	}
1686 
1687 	wmi_ext_dbgfs_ops[pdev_idx].show = wmi_ext_dbg_msg_show;
1688 	wmi_ext_dbgfs_ops[pdev_idx].write = wmi_ext_dbg_msg_write;
1689 	wmi_ext_dbgfs_ops[pdev_idx].priv = wmi_handle;
1690 	if (!qdf_debugfs_create_file(WMI_EXT_DBG_FILE, WMI_EXT_DBG_FILE_PERM,
1691 				     dentry, &wmi_ext_dbgfs_ops[pdev_idx])) {
1692 		qdf_debugfs_remove_dir(dentry);
1693 		wmi_err("Error while creating extended wmi debugfs file");
1694 		return QDF_STATUS_E_FAILURE;
1695 	}
1696 
1697 	wmi_handle->wmi_ext_dbg_dentry = dentry;
1698 	wmi_handle->wmi_ext_dbg_msg_queue_size = WMI_EXT_DBG_QUEUE_SIZE;
1699 	wmi_ext_dbg_msg_queue_init(wmi_handle);
1700 
1701 	return QDF_STATUS_SUCCESS;
1702 }
1703 
1704 /**
1705  * wmi_ext_dbgfs_deinit() - cleanup/deinit debugfs items of extended wmi dump.
1706  * @wmi_handle: wmi handler
1707  *
1708  * Return: QDF_STATUS_SUCCESS if cleanup is successful
1709  */
1710 static QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle)
1711 {
1712 	struct wmi_ext_dbg_msg *msg;
1713 
1714 	while ((msg = wmi_ext_dbg_msg_dequeue(wmi_handle)))
1715 		wmi_ext_dbg_msg_put(msg);
1716 
1717 	wmi_ext_dbg_msg_queue_deinit(wmi_handle);
1718 	qdf_debugfs_remove_dir_recursive(wmi_handle->wmi_ext_dbg_dentry);
1719 
1720 	return QDF_STATUS_SUCCESS;
1721 }
1722 
1723 #else
1724 
1725 static inline QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified
1726 						    *wmi_handle,
1727 						    uint8_t *buf, uint32_t len)
1728 {
1729 		return QDF_STATUS_SUCCESS;
1730 }
1731 
1732 static inline QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified
1733 						      *wmi_handle,
1734 						      uint8_t *buf, uint32_t len)
1735 {
1736 		return QDF_STATUS_SUCCESS;
1737 }
1738 
1739 static inline QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle,
1740 					    uint32_t pdev_idx)
1741 {
1742 		return QDF_STATUS_SUCCESS;
1743 }
1744 
1745 static inline QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle)
1746 {
1747 		return QDF_STATUS_SUCCESS;
1748 }
1749 
1750 #endif /*WMI_EXT_DBG */
1751 
1752 int wmi_get_host_credits(wmi_unified_t wmi_handle);
1753 /* WMI buffer APIs */
1754 
1755 #ifdef NBUF_MEMORY_DEBUG
1756 wmi_buf_t
1757 wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint32_t len,
1758 		    const char *func_name,
1759 		    uint32_t line_num)
1760 {
1761 	wmi_buf_t wmi_buf;
1762 
1763 	if (roundup(len, 4) > wmi_handle->max_msg_len) {
1764 		wmi_err("Invalid length %u (via %s:%u) max size: %u",
1765 			len, func_name, line_num,
1766 			wmi_handle->max_msg_len);
1767 		QDF_ASSERT(0);
1768 		return NULL;
1769 	}
1770 
1771 	wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, WBUFF_MAX_POOL_ID,
1772 				 len, func_name, line_num);
1773 	if (!wmi_buf)
1774 		wmi_buf = qdf_nbuf_alloc_debug(NULL,
1775 					       roundup(len + WMI_MIN_HEAD_ROOM,
1776 						       4),
1777 					       WMI_MIN_HEAD_ROOM, 4, false,
1778 					       func_name, line_num);
1779 	if (!wmi_buf)
1780 		return NULL;
1781 
1782 	/* Clear the wmi buffer */
1783 	OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
1784 
1785 	/*
1786 	 * Set the length of the buffer to match the allocation size.
1787 	 */
1788 	qdf_nbuf_set_pktlen(wmi_buf, len);
1789 
1790 	return wmi_buf;
1791 }
1792 qdf_export_symbol(wmi_buf_alloc_debug);
1793 
1794 void wmi_buf_free(wmi_buf_t net_buf)
1795 {
1796 	net_buf = wbuff_buff_put(net_buf);
1797 	if (net_buf)
1798 		qdf_nbuf_free(net_buf);
1799 }
1800 qdf_export_symbol(wmi_buf_free);
1801 #else
1802 wmi_buf_t wmi_buf_alloc_fl(wmi_unified_t wmi_handle, uint32_t len,
1803 			   const char *func, uint32_t line)
1804 {
1805 	wmi_buf_t wmi_buf;
1806 
1807 	if (roundup(len, 4) > wmi_handle->max_msg_len) {
1808 		QDF_DEBUG_PANIC("Invalid length %u (via %s:%u) max size: %u",
1809 				len, func, line, wmi_handle->max_msg_len);
1810 		return NULL;
1811 	}
1812 
1813 	wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, WBUFF_MAX_POOL_ID,
1814 				 len, __func__, __LINE__);
1815 	if (!wmi_buf)
1816 		wmi_buf = qdf_nbuf_alloc_fl(NULL, roundup(len +
1817 				WMI_MIN_HEAD_ROOM, 4), WMI_MIN_HEAD_ROOM, 4,
1818 				false, func, line);
1819 
1820 	if (!wmi_buf) {
1821 		wmi_nofl_err("%s:%d, failed to alloc len:%d", func, line, len);
1822 		return NULL;
1823 	}
1824 
1825 	/* Clear the wmi buffer */
1826 	OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
1827 
1828 	/*
1829 	 * Set the length of the buffer to match the allocation size.
1830 	 */
1831 	qdf_nbuf_set_pktlen(wmi_buf, len);
1832 
1833 	return wmi_buf;
1834 }
1835 qdf_export_symbol(wmi_buf_alloc_fl);
1836 
1837 void wmi_buf_free(wmi_buf_t net_buf)
1838 {
1839 	net_buf = wbuff_buff_put(net_buf);
1840 	if (net_buf)
1841 		qdf_nbuf_free(net_buf);
1842 }
1843 qdf_export_symbol(wmi_buf_free);
1844 #endif
1845 
1846 uint16_t wmi_get_max_msg_len(wmi_unified_t wmi_handle)
1847 {
1848 	return wmi_handle->max_msg_len - WMI_MIN_HEAD_ROOM;
1849 }
1850 qdf_export_symbol(wmi_get_max_msg_len);
1851 
1852 #ifndef WMI_CMD_STRINGS
1853 static uint8_t *wmi_id_to_name(uint32_t wmi_command)
1854 {
1855 	return "Invalid WMI cmd";
1856 }
1857 #endif
1858 
1859 static inline void wmi_log_cmd_id(uint32_t cmd_id, uint32_t tag)
1860 {
1861 	wmi_nofl_debug("Send cmd %s(0x%x) tag:%d",
1862 		       wmi_id_to_name(cmd_id), cmd_id, tag);
1863 }
1864 
1865 /**
1866  * wmi_is_pm_resume_cmd() - check if a cmd is part of the resume sequence
1867  * @cmd_id: command to check
1868  *
1869  * Return: true if the command is part of the resume sequence.
1870  */
1871 #ifdef WLAN_POWER_MANAGEMENT_OFFLOAD
1872 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id)
1873 {
1874 	switch (cmd_id) {
1875 	case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
1876 	case WMI_PDEV_RESUME_CMDID:
1877 		return true;
1878 
1879 	default:
1880 		return false;
1881 	}
1882 }
1883 
1884 #else
1885 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id)
1886 {
1887 	return false;
1888 }
1889 
1890 #endif
1891 
1892 #ifdef FEATURE_WLAN_D0WOW
1893 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id)
1894 {
1895 	wmi_d0_wow_enable_disable_cmd_fixed_param *cmd;
1896 
1897 	if (cmd_id == WMI_D0_WOW_ENABLE_DISABLE_CMDID) {
1898 		cmd = (wmi_d0_wow_enable_disable_cmd_fixed_param *)
1899 			wmi_buf_data(buf);
1900 		if (!cmd->enable)
1901 			return true;
1902 		else
1903 			return false;
1904 	}
1905 
1906 	return false;
1907 }
1908 #else
1909 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id)
1910 {
1911 	return false;
1912 }
1913 
1914 #endif
1915 
1916 #ifdef WMI_INTERFACE_SEQUENCE_CHECK
1917 static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle)
1918 {
1919 	wmi_handle->wmi_sequence = 0;
1920 	wmi_handle->wmi_exp_sequence = 0;
1921 	wmi_handle->wmi_sequence_stop = false;
1922 }
1923 
1924 static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle)
1925 {
1926 	qdf_spinlock_create(&wmi_handle->wmi_seq_lock);
1927 	wmi_interface_sequence_reset(wmi_handle);
1928 }
1929 
1930 static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle)
1931 {
1932 	qdf_spinlock_destroy(&wmi_handle->wmi_seq_lock);
1933 }
1934 
1935 void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle)
1936 {
1937 	wmi_handle->wmi_sequence_stop = true;
1938 }
1939 
1940 static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle,
1941 					  HTC_PACKET *pkt,
1942 					  const char *func, uint32_t line)
1943 {
1944 	wmi_buf_t buf = GET_HTC_PACKET_NET_BUF_CONTEXT(pkt);
1945 	QDF_STATUS status;
1946 
1947 	qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock);
1948 	status = htc_send_pkt(wmi_handle->htc_handle, pkt);
1949 	if (QDF_STATUS_SUCCESS != status) {
1950 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1951 		qdf_atomic_dec(&wmi_handle->pending_cmds);
1952 		wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d",
1953 			     func, line, status);
1954 		qdf_mem_free(pkt);
1955 		return status;
1956 	}
1957 	/* Record the sequence number in the SKB */
1958 	qdf_nbuf_set_mark(buf, wmi_handle->wmi_sequence);
1959 	/* Increment the sequence number */
1960 	wmi_handle->wmi_sequence = (wmi_handle->wmi_sequence + 1)
1961 				   & (wmi_handle->wmi_max_cmds - 1);
1962 	qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1963 
1964 	return status;
1965 }
1966 
1967 static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle,
1968 						wmi_buf_t buf)
1969 {
1970 	/* Skip sequence check when wmi sequence stop is set */
1971 	if (wmi_handle->wmi_sequence_stop)
1972 		return;
1973 
1974 	qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock);
1975 	/* Match the completion sequence and expected sequence number */
1976 	if (qdf_nbuf_get_mark(buf) != wmi_handle->wmi_exp_sequence) {
1977 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1978 		wmi_nofl_err("WMI Tx Completion Sequence number mismatch");
1979 		wmi_nofl_err("Expected %d Received %d",
1980 			     wmi_handle->wmi_exp_sequence,
1981 			     qdf_nbuf_get_mark(buf));
1982 		/* Trigger Recovery */
1983 		qdf_trigger_self_recovery(wmi_handle->soc,
1984 					  QDF_WMI_BUF_SEQUENCE_MISMATCH);
1985 	} else {
1986 		/* Increment the expected sequence number */
1987 		wmi_handle->wmi_exp_sequence =
1988 				(wmi_handle->wmi_exp_sequence + 1)
1989 				& (wmi_handle->wmi_max_cmds - 1);
1990 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1991 	}
1992 }
1993 #else
1994 static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle)
1995 {
1996 }
1997 
1998 static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle)
1999 {
2000 }
2001 
2002 static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle)
2003 {
2004 }
2005 
2006 void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle)
2007 {
2008 }
2009 
2010 static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle,
2011 					  HTC_PACKET *pkt,
2012 					  const char *func, uint32_t line)
2013 {
2014 	QDF_STATUS status;
2015 
2016 	status = htc_send_pkt(wmi_handle->htc_handle, pkt);
2017 	if (QDF_STATUS_SUCCESS != status) {
2018 		qdf_atomic_dec(&wmi_handle->pending_cmds);
2019 		wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d",
2020 			     func, line, status);
2021 		qdf_mem_free(pkt);
2022 		return status;
2023 	}
2024 
2025 	return status;
2026 }
2027 
2028 static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle,
2029 						wmi_buf_t buf)
2030 {
2031 }
2032 #endif
2033 
2034 static inline void wmi_unified_debug_dump(wmi_unified_t wmi_handle)
2035 {
2036 	wmi_nofl_err("Endpoint ID = %d, Tx Queue Depth = %d, soc_id = %u, target type = %s",
2037 		     wmi_handle->wmi_endpoint_id,
2038 		     htc_get_tx_queue_depth(wmi_handle->htc_handle,
2039 					    wmi_handle->wmi_endpoint_id),
2040 		     wmi_handle->soc->soc_idx,
2041 		     (wmi_handle->target_type ==
2042 		      WMI_TLV_TARGET ? "WMI_TLV_TARGET" :
2043 						"WMI_NON_TLV_TARGET"));
2044 }
2045 
2046 #ifdef SYSTEM_PM_CHECK
2047 /**
2048  * wmi_set_system_pm_pkt_tag() - API to set tag for system pm packets
2049  * @htc_tag: HTC tag
2050  * @buf: wmi cmd buffer
2051  * @cmd_id: cmd id
2052  *
2053  * Return: None
2054  */
2055 static void wmi_set_system_pm_pkt_tag(uint16_t *htc_tag, wmi_buf_t buf,
2056 				      uint32_t cmd_id)
2057 {
2058 	switch (cmd_id) {
2059 	case WMI_WOW_ENABLE_CMDID:
2060 	case WMI_PDEV_SUSPEND_CMDID:
2061 		*htc_tag = HTC_TX_PACKET_SYSTEM_SUSPEND;
2062 		break;
2063 	case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
2064 	case WMI_PDEV_RESUME_CMDID:
2065 		*htc_tag = HTC_TX_PACKET_SYSTEM_RESUME;
2066 		break;
2067 	case WMI_D0_WOW_ENABLE_DISABLE_CMDID:
2068 		if (wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id))
2069 			*htc_tag = HTC_TX_PACKET_SYSTEM_RESUME;
2070 		else
2071 			*htc_tag = HTC_TX_PACKET_SYSTEM_SUSPEND;
2072 		break;
2073 	default:
2074 		break;
2075 	}
2076 }
2077 #else
2078 static inline void wmi_set_system_pm_pkt_tag(uint16_t *htc_tag, wmi_buf_t buf,
2079 					     uint32_t cmd_id)
2080 {
2081 }
2082 #endif
2083 
2084 #ifdef DP_UMAC_HW_RESET_SUPPORT
2085 /**
2086  * wmi_unified_is_max_pending_commands_reached() - API to check if WMI max
2087  * pending commands are reached.
2088  * @wmi_handle: Pointer to WMI handle
2089  *
2090  * Return: If umac reset is in progress and max wmi pending commands are reached
2091  * then return false. The reason is FW will not reap the WMI commands from CE
2092  * ring when umac reset is in progress. Hence, all the pending WMI command to
2093  * host SW ring.
2094  */
2095 static inline bool
2096 wmi_unified_is_max_pending_commands_reached(wmi_unified_t wmi_handle)
2097 {
2098 	ol_txrx_soc_handle soc_txrx_handle;
2099 
2100 	soc_txrx_handle = (ol_txrx_soc_handle)wlan_psoc_get_dp_handle(
2101 			wmi_handle->soc->wmi_psoc);
2102 	if (!soc_txrx_handle) {
2103 		wmi_err("psoc handle is NULL");
2104 		return false;
2105 	}
2106 
2107 	return ((qdf_atomic_read(&wmi_handle->pending_cmds) >=
2108 			wmi_handle->wmi_max_cmds) &&
2109 		!cdp_umac_reset_is_inprogress(soc_txrx_handle));
2110 }
2111 #else
2112 static inline bool
2113 wmi_unified_is_max_pending_commands_reached(wmi_unified_t wmi_handle)
2114 {
2115 	return (qdf_atomic_read(&wmi_handle->pending_cmds) >=
2116 			wmi_handle->wmi_max_cmds);
2117 }
2118 #endif
2119 
2120 QDF_STATUS wmi_unified_cmd_send_fl(wmi_unified_t wmi_handle, wmi_buf_t buf,
2121 				   uint32_t len, uint32_t cmd_id,
2122 				   const char *func, uint32_t line)
2123 {
2124 	HTC_PACKET *pkt;
2125 	uint16_t htc_tag = 0;
2126 	bool rtpm_inprogress;
2127 
2128 	rtpm_inprogress = wmi_get_runtime_pm_inprogress(wmi_handle);
2129 	if (rtpm_inprogress) {
2130 		htc_tag = wmi_handle->ops->wmi_set_htc_tx_tag(wmi_handle, buf,
2131 							      cmd_id);
2132 	} else if (qdf_atomic_read(&wmi_handle->is_target_suspended) &&
2133 		   !wmi_is_pm_resume_cmd(cmd_id) &&
2134 		   !wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id)) {
2135 			wmi_nofl_err("Target is suspended (via %s:%u)",
2136 				     func, line);
2137 			qdf_trigger_self_recovery(wmi_handle->soc->wmi_psoc,
2138 						  QDF_WMI_CMD_SENT_DURING_SUSPEND);
2139 		return QDF_STATUS_E_BUSY;
2140 	}
2141 
2142 	if (wmi_handle->wmi_stopinprogress) {
2143 		wmi_nofl_err("%s:%d, WMI stop in progress, wmi_handle:%pK",
2144 			     func, line, wmi_handle);
2145 		return QDF_STATUS_E_INVAL;
2146 	}
2147 
2148 	if (wmi_has_wow_enable_ack_failed(wmi_handle)) {
2149 		wmi_nofl_err("wow enable ack already failed(via %s:%u)",
2150 			     func, line);
2151 		return QDF_STATUS_E_INVAL;
2152 	}
2153 
2154 #ifndef WMI_NON_TLV_SUPPORT
2155 	/* Do sanity check on the TLV parameter structure */
2156 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2157 		void *buf_ptr = (void *)qdf_nbuf_data(buf);
2158 
2159 		if (wmi_handle->ops->wmi_check_command_params(NULL, buf_ptr, len, cmd_id)
2160 			!= 0) {
2161 			wmi_nofl_err("%s:%d, Invalid WMI Param Buffer for Cmd:%d",
2162 				     func, line, cmd_id);
2163 			return QDF_STATUS_E_INVAL;
2164 		}
2165 	}
2166 #endif
2167 
2168 	if (qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) {
2169 		wmi_nofl_err("%s:%d, Failed to send cmd %x, no memory",
2170 			     func, line, cmd_id);
2171 		return QDF_STATUS_E_NOMEM;
2172 	}
2173 
2174 	qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR));
2175 	WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
2176 
2177 	qdf_atomic_inc(&wmi_handle->pending_cmds);
2178 	if (wmi_unified_is_max_pending_commands_reached(wmi_handle)) {
2179 		wmi_dump_last_cmd_rec_info(wmi_handle);
2180 		wmi_nofl_err("hostcredits = %d",
2181 			     wmi_get_host_credits(wmi_handle));
2182 		htc_dump_counter_info(wmi_handle->htc_handle);
2183 		qdf_atomic_dec(&wmi_handle->pending_cmds);
2184 		wmi_nofl_err("%s:%d, MAX %d WMI Pending cmds reached",
2185 			     func, line, wmi_handle->wmi_max_cmds);
2186 		wmi_unified_debug_dump(wmi_handle);
2187 		htc_ce_tasklet_debug_dump(wmi_handle->htc_handle);
2188 		qdf_trigger_self_recovery(wmi_handle->soc->wmi_psoc,
2189 					  QDF_WMI_EXCEED_MAX_PENDING_CMDS);
2190 		return QDF_STATUS_E_BUSY;
2191 	}
2192 
2193 	pkt = qdf_mem_malloc_fl(sizeof(*pkt), func, line);
2194 	if (!pkt) {
2195 		qdf_atomic_dec(&wmi_handle->pending_cmds);
2196 		return QDF_STATUS_E_NOMEM;
2197 	}
2198 
2199 	if (!rtpm_inprogress)
2200 		wmi_set_system_pm_pkt_tag(&htc_tag, buf, cmd_id);
2201 
2202 	SET_HTC_PACKET_INFO_TX(pkt,
2203 			       NULL,
2204 			       qdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR),
2205 			       wmi_handle->wmi_endpoint_id, htc_tag);
2206 
2207 	SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf);
2208 	wmi_log_cmd_id(cmd_id, htc_tag);
2209 	wmi_ext_dbg_msg_cmd_record(wmi_handle,
2210 				   qdf_nbuf_data(buf), qdf_nbuf_len(buf));
2211 #ifdef WMI_INTERFACE_EVENT_LOGGING
2212 	if (wmi_handle->log_info.wmi_logging_enable) {
2213 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2214 		/*
2215 		 * Record 16 bytes of WMI cmd data -
2216 		 * exclude TLV and WMI headers
2217 		 *
2218 		 * WMI mgmt command already recorded in wmi_mgmt_cmd_record
2219 		 */
2220 		if (wmi_handle->ops->is_management_record(cmd_id) == false) {
2221 			uint8_t *tmpbuf = (uint8_t *)qdf_nbuf_data(buf) +
2222 				wmi_handle->soc->buf_offset_command;
2223 
2224 			WMI_COMMAND_RECORD(wmi_handle, cmd_id, tmpbuf);
2225 			wmi_specific_cmd_record(wmi_handle, cmd_id, tmpbuf);
2226 		}
2227 
2228 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2229 	}
2230 #endif
2231 	return wmi_htc_send_pkt(wmi_handle, pkt, func, line);
2232 }
2233 qdf_export_symbol(wmi_unified_cmd_send_fl);
2234 
2235 /**
2236  * wmi_unified_get_event_handler_ix() - gives event handler's index
2237  * @wmi_handle: handle to wmi
2238  * @event_id: wmi  event id
2239  *
2240  * Return: event handler's index
2241  */
2242 static int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle,
2243 					    uint32_t event_id)
2244 {
2245 	uint32_t idx = 0;
2246 	int32_t invalid_idx = -1;
2247 	struct wmi_soc *soc = wmi_handle->soc;
2248 
2249 	for (idx = 0; (idx < soc->max_event_idx &&
2250 		       idx < WMI_UNIFIED_MAX_EVENT); ++idx) {
2251 		if (wmi_handle->event_id[idx] == event_id &&
2252 		    wmi_handle->event_handler[idx]) {
2253 			return idx;
2254 		}
2255 	}
2256 
2257 	return invalid_idx;
2258 }
2259 
2260 /**
2261  * wmi_register_event_handler_with_ctx() - register event handler with
2262  * exec ctx and buffer type
2263  * @wmi_handle: handle to wmi
2264  * @event_id: wmi event id
2265  * @handler_func: wmi event handler function
2266  * @rx_ctx: rx execution context for wmi rx events
2267  * @rx_buf_type: rx execution context for wmi rx events
2268  *
2269  * Return: QDF_STATUS_SUCCESS on successful register event else failure.
2270  */
2271 static QDF_STATUS
2272 wmi_register_event_handler_with_ctx(wmi_unified_t wmi_handle,
2273 				    uint32_t event_id,
2274 				    wmi_unified_event_handler handler_func,
2275 				    enum wmi_rx_exec_ctx rx_ctx,
2276 				    enum wmi_rx_buff_type rx_buf_type)
2277 {
2278 	uint32_t idx = 0;
2279 	uint32_t evt_id;
2280 	struct wmi_soc *soc;
2281 
2282 	if (!wmi_handle) {
2283 		wmi_err("WMI handle is NULL");
2284 		return QDF_STATUS_E_FAILURE;
2285 	}
2286 
2287 	soc = wmi_handle->soc;
2288 
2289 	if (event_id >= wmi_events_max) {
2290 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
2291 			  "%s: Event id %d is unavailable",
2292 					__func__, event_id);
2293 		return QDF_STATUS_E_FAILURE;
2294 	}
2295 
2296 	if (wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2297 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG,
2298 			  "%s: Event id %d is not supported",
2299 			  __func__, event_id);
2300 		return QDF_STATUS_E_NOSUPPORT;
2301 	}
2302 	evt_id = wmi_handle->wmi_events[event_id];
2303 
2304 	if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) {
2305 		wmi_info("event handler already registered 0x%x", evt_id);
2306 		return QDF_STATUS_E_FAILURE;
2307 	}
2308 	if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) {
2309 		wmi_err("no more event handlers 0x%x",
2310 			 evt_id);
2311 		return QDF_STATUS_E_FAILURE;
2312 	}
2313 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG,
2314 		  "Registered event handler for event 0x%8x", evt_id);
2315 	idx = soc->max_event_idx;
2316 	wmi_handle->event_handler[idx] = handler_func;
2317 	wmi_handle->event_id[idx] = evt_id;
2318 
2319 	qdf_spin_lock_bh(&soc->ctx_lock);
2320 	wmi_handle->ctx[idx].exec_ctx = rx_ctx;
2321 	wmi_handle->ctx[idx].buff_type = rx_buf_type;
2322 	qdf_spin_unlock_bh(&soc->ctx_lock);
2323 	soc->max_event_idx++;
2324 
2325 	return QDF_STATUS_SUCCESS;
2326 }
2327 
2328 QDF_STATUS
2329 wmi_unified_register_event(wmi_unified_t wmi_handle,
2330 			   uint32_t event_id,
2331 			   wmi_unified_event_handler handler_func)
2332 {
2333 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2334 						   handler_func,
2335 						   WMI_RX_UMAC_CTX,
2336 						   WMI_RX_PROCESSED_BUFF);
2337 }
2338 
2339 QDF_STATUS
2340 wmi_unified_register_event_handler(wmi_unified_t wmi_handle,
2341 				   wmi_conv_event_id event_id,
2342 				   wmi_unified_event_handler handler_func,
2343 				   uint8_t rx_ctx)
2344 {
2345 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2346 						   handler_func, rx_ctx,
2347 						   WMI_RX_PROCESSED_BUFF);
2348 }
2349 
2350 qdf_export_symbol(wmi_unified_register_event_handler);
2351 
2352 QDF_STATUS
2353 wmi_unified_register_raw_event_handler(wmi_unified_t wmi_handle,
2354 				       wmi_conv_event_id event_id,
2355 				       wmi_unified_event_handler handler_func,
2356 				       enum wmi_rx_exec_ctx rx_ctx)
2357 {
2358 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2359 						   handler_func, rx_ctx,
2360 						   WMI_RX_RAW_BUFF);
2361 }
2362 
2363 qdf_export_symbol(wmi_unified_register_raw_event_handler);
2364 
2365 QDF_STATUS wmi_unified_unregister_event(wmi_unified_t wmi_handle,
2366 					uint32_t event_id)
2367 {
2368 	uint32_t idx = 0;
2369 	uint32_t evt_id;
2370 	struct wmi_soc *soc;
2371 
2372 	if (!wmi_handle) {
2373 		wmi_err("WMI handle is NULL");
2374 		return QDF_STATUS_E_FAILURE;
2375 	}
2376 
2377 	soc = wmi_handle->soc;
2378 	if (event_id >= wmi_events_max ||
2379 		wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2380 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
2381 			  "%s: Event id %d is unavailable",
2382 					__func__, event_id);
2383 		return QDF_STATUS_E_FAILURE;
2384 	}
2385 	evt_id = wmi_handle->wmi_events[event_id];
2386 
2387 	idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id);
2388 	if (idx == -1) {
2389 		wmi_warn("event handler is not registered: evt id 0x%x",
2390 			 evt_id);
2391 		return QDF_STATUS_E_FAILURE;
2392 	}
2393 	wmi_handle->event_handler[idx] = NULL;
2394 	wmi_handle->event_id[idx] = 0;
2395 	--soc->max_event_idx;
2396 	wmi_handle->event_handler[idx] =
2397 		wmi_handle->event_handler[soc->max_event_idx];
2398 	wmi_handle->event_id[idx] =
2399 		wmi_handle->event_id[soc->max_event_idx];
2400 
2401 	qdf_spin_lock_bh(&soc->ctx_lock);
2402 
2403 	wmi_handle->ctx[idx].exec_ctx =
2404 		wmi_handle->ctx[soc->max_event_idx].exec_ctx;
2405 	wmi_handle->ctx[idx].buff_type =
2406 		wmi_handle->ctx[soc->max_event_idx].buff_type;
2407 
2408 	qdf_spin_unlock_bh(&soc->ctx_lock);
2409 
2410 	return QDF_STATUS_SUCCESS;
2411 }
2412 
2413 QDF_STATUS wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle,
2414 						wmi_conv_event_id event_id)
2415 {
2416 	uint32_t idx = 0;
2417 	uint32_t evt_id;
2418 	struct wmi_soc *soc;
2419 
2420 	if (!wmi_handle) {
2421 		wmi_err("WMI handle is NULL");
2422 		return QDF_STATUS_E_FAILURE;
2423 	}
2424 
2425 	soc = wmi_handle->soc;
2426 
2427 	if (event_id >= wmi_events_max) {
2428 		wmi_err("Event id %d is unavailable", event_id);
2429 		return QDF_STATUS_E_FAILURE;
2430 	}
2431 
2432 	if (wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2433 		wmi_debug("Event id %d is not supported", event_id);
2434 		return QDF_STATUS_E_NOSUPPORT;
2435 	}
2436 
2437 	evt_id = wmi_handle->wmi_events[event_id];
2438 
2439 	idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id);
2440 	if (idx == -1) {
2441 		wmi_err("event handler is not registered: evt id 0x%x",
2442 			 evt_id);
2443 		return QDF_STATUS_E_FAILURE;
2444 	}
2445 	wmi_handle->event_handler[idx] = NULL;
2446 	wmi_handle->event_id[idx] = 0;
2447 	--soc->max_event_idx;
2448 	wmi_handle->event_handler[idx] =
2449 		wmi_handle->event_handler[soc->max_event_idx];
2450 	wmi_handle->event_id[idx] =
2451 		wmi_handle->event_id[soc->max_event_idx];
2452 
2453 	qdf_spin_lock_bh(&soc->ctx_lock);
2454 
2455 	wmi_handle->ctx[idx].exec_ctx =
2456 		wmi_handle->ctx[soc->max_event_idx].exec_ctx;
2457 	wmi_handle->ctx[idx].buff_type =
2458 		wmi_handle->ctx[soc->max_event_idx].buff_type;
2459 
2460 	qdf_spin_unlock_bh(&soc->ctx_lock);
2461 
2462 	return QDF_STATUS_SUCCESS;
2463 }
2464 qdf_export_symbol(wmi_unified_unregister_event_handler);
2465 
2466 static void
2467 wmi_process_rx_diag_event_worker_thread_ctx(struct wmi_unified *wmi_handle,
2468 					    void *evt_buf)
2469 {
2470 	uint32_t num_diag_events_pending;
2471 
2472 	qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock);
2473 	if (RX_DIAG_WQ_MAX_SIZE > 0) {
2474 		num_diag_events_pending = qdf_nbuf_queue_len(
2475 						&wmi_handle->diag_event_queue);
2476 
2477 		if (num_diag_events_pending >= RX_DIAG_WQ_MAX_SIZE) {
2478 			qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
2479 			wmi_handle->wmi_rx_diag_events_dropped++;
2480 			wmi_debug_rl("Rx diag events dropped count: %d",
2481 				     wmi_handle->wmi_rx_diag_events_dropped);
2482 			qdf_nbuf_free(evt_buf);
2483 			return;
2484 		}
2485 	}
2486 
2487 	qdf_nbuf_queue_add(&wmi_handle->diag_event_queue, evt_buf);
2488 	qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
2489 	qdf_queue_work(0, wmi_handle->wmi_rx_diag_work_queue,
2490 		       &wmi_handle->rx_diag_event_work);
2491 }
2492 
2493 void wmi_process_fw_event_worker_thread_ctx(struct wmi_unified *wmi_handle,
2494 					    void *evt_buf)
2495 {
2496 
2497 	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
2498 	qdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf);
2499 	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
2500 	qdf_queue_work(0, wmi_handle->wmi_rx_work_queue,
2501 			&wmi_handle->rx_event_work);
2502 
2503 	return;
2504 }
2505 
2506 qdf_export_symbol(wmi_process_fw_event_worker_thread_ctx);
2507 
2508 uint32_t wmi_critical_events_in_flight(struct wmi_unified *wmi)
2509 {
2510 	return qdf_atomic_read(&wmi->critical_events_in_flight);
2511 }
2512 
2513 static bool
2514 wmi_is_event_critical(struct wmi_unified *wmi_handle, uint32_t event_id)
2515 {
2516 	if (wmi_handle->wmi_events[wmi_roam_synch_event_id] == event_id)
2517 		return true;
2518 
2519 	return false;
2520 }
2521 
2522 static QDF_STATUS wmi_discard_fw_event(struct scheduler_msg *msg)
2523 {
2524 	struct wmi_process_fw_event_params *event_param;
2525 
2526 	if (!msg->bodyptr)
2527 		return QDF_STATUS_E_INVAL;
2528 
2529 	event_param = (struct wmi_process_fw_event_params *)msg->bodyptr;
2530 	qdf_nbuf_free(event_param->evt_buf);
2531 	qdf_mem_free(msg->bodyptr);
2532 	msg->bodyptr = NULL;
2533 	msg->bodyval = 0;
2534 	msg->type = 0;
2535 
2536 	return QDF_STATUS_SUCCESS;
2537 }
2538 
2539 static QDF_STATUS wmi_process_fw_event_handler(struct scheduler_msg *msg)
2540 {
2541 	struct wmi_process_fw_event_params *params =
2542 		(struct wmi_process_fw_event_params *)msg->bodyptr;
2543 	struct wmi_unified *wmi_handle;
2544 	uint32_t event_id;
2545 
2546 	wmi_handle = (struct wmi_unified *)params->wmi_handle;
2547 	event_id = WMI_GET_FIELD(qdf_nbuf_data(params->evt_buf),
2548 				 WMI_CMD_HDR, COMMANDID);
2549 	wmi_process_fw_event(wmi_handle, params->evt_buf);
2550 
2551 	if (wmi_is_event_critical(wmi_handle, event_id))
2552 		qdf_atomic_dec(&wmi_handle->critical_events_in_flight);
2553 
2554 	qdf_mem_free(msg->bodyptr);
2555 
2556 	return QDF_STATUS_SUCCESS;
2557 }
2558 
2559 /**
2560  * wmi_process_fw_event_sched_thread_ctx() - common event handler to serialize
2561  *                                  event processing through scheduler thread
2562  * @wmi: wmi context
2563  * @ev: event buffer
2564  *
2565  * Return: 0 on success, errno on failure
2566  */
2567 static QDF_STATUS
2568 wmi_process_fw_event_sched_thread_ctx(struct wmi_unified *wmi,
2569 				      void *ev)
2570 {
2571 	struct wmi_process_fw_event_params *params_buf;
2572 	struct scheduler_msg msg = { 0 };
2573 	uint32_t event_id;
2574 
2575 	params_buf = qdf_mem_malloc(sizeof(struct wmi_process_fw_event_params));
2576 	if (!params_buf) {
2577 		wmi_err("malloc failed");
2578 		qdf_nbuf_free(ev);
2579 		return QDF_STATUS_E_NOMEM;
2580 	}
2581 
2582 	params_buf->wmi_handle = wmi;
2583 	params_buf->evt_buf = ev;
2584 
2585 	event_id = WMI_GET_FIELD(qdf_nbuf_data(params_buf->evt_buf),
2586 				 WMI_CMD_HDR, COMMANDID);
2587 	if (wmi_is_event_critical(wmi, event_id))
2588 		qdf_atomic_inc(&wmi->critical_events_in_flight);
2589 
2590 	msg.bodyptr = params_buf;
2591 	msg.bodyval = 0;
2592 	msg.callback = wmi_process_fw_event_handler;
2593 	msg.flush_callback = wmi_discard_fw_event;
2594 
2595 	if (QDF_STATUS_SUCCESS !=
2596 		scheduler_post_message(QDF_MODULE_ID_TARGET_IF,
2597 				       QDF_MODULE_ID_TARGET_IF,
2598 				       QDF_MODULE_ID_TARGET_IF, &msg)) {
2599 		qdf_nbuf_free(ev);
2600 		qdf_mem_free(params_buf);
2601 		return QDF_STATUS_E_FAULT;
2602 	}
2603 
2604 	return QDF_STATUS_SUCCESS;
2605 }
2606 
2607 /**
2608  * wmi_get_pdev_ep: Get wmi handle based on endpoint
2609  * @soc: handle to wmi soc
2610  * @ep: endpoint id
2611  *
2612  * Return: none
2613  */
2614 static struct wmi_unified *wmi_get_pdev_ep(struct wmi_soc *soc,
2615 						HTC_ENDPOINT_ID ep)
2616 {
2617 	uint32_t i;
2618 
2619 	for (i = 0; i < WMI_MAX_RADIOS; i++)
2620 		if (soc->wmi_endpoint_id[i] == ep)
2621 			break;
2622 
2623 	if (i == WMI_MAX_RADIOS)
2624 		return NULL;
2625 
2626 	return soc->wmi_pdev[i];
2627 }
2628 
2629 /**
2630  * wmi_mtrace_rx() - Wrappper function for qdf_mtrace api
2631  * @message_id: 32-Bit Wmi message ID
2632  * @vdev_id: Vdev ID
2633  * @data: Actual message contents
2634  *
2635  * This function converts the 32-bit WMI message ID in 15-bit message ID
2636  * format for qdf_mtrace as in qdf_mtrace message there are only 15
2637  * bits reserved for message ID.
2638  * out of these 15-bits, 8-bits (From LSB) specifies the WMI_GRP_ID
2639  * and remaining 7-bits specifies the actual WMI command. With this
2640  * notation there can be maximum 256 groups and each group can have
2641  * max 128 commands can be supported.
2642  *
2643  * Return: None
2644  */
2645 static void wmi_mtrace_rx(uint32_t message_id, uint16_t vdev_id, uint32_t data)
2646 {
2647 	uint16_t mtrace_message_id;
2648 
2649 	mtrace_message_id = QDF_WMI_MTRACE_CMD_ID(message_id) |
2650 		(QDF_WMI_MTRACE_GRP_ID(message_id) <<
2651 						QDF_WMI_MTRACE_CMD_NUM_BITS);
2652 	qdf_mtrace(QDF_MODULE_ID_WMI, QDF_MODULE_ID_WMA,
2653 		   mtrace_message_id, vdev_id, data);
2654 }
2655 
2656 #ifdef WLAN_FEATURE_CE_RX_BUFFER_REUSE
2657 static void wmi_rx_nbuf_free(qdf_nbuf_t nbuf)
2658 {
2659 	nbuf = wbuff_buff_put(nbuf);
2660 	if (nbuf)
2661 		qdf_nbuf_free(nbuf);
2662 }
2663 #else
2664 static inline void wmi_rx_nbuf_free(qdf_nbuf_t nbuf)
2665 {
2666 	return qdf_nbuf_free(nbuf);
2667 }
2668 #endif
2669 
2670 /**
2671  * wmi_process_control_rx() - process fw events callbacks
2672  * @wmi_handle: handle to wmi_unified
2673  * @evt_buf: handle to wmi_buf_t
2674  *
2675  * Return: none
2676  */
2677 static void wmi_process_control_rx(struct wmi_unified *wmi_handle,
2678 				   wmi_buf_t evt_buf)
2679 {
2680 	struct wmi_soc *soc = wmi_handle->soc;
2681 	uint32_t id;
2682 	uint32_t idx;
2683 	enum wmi_rx_exec_ctx exec_ctx;
2684 
2685 	id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2686 	idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
2687 	if (qdf_unlikely(idx == A_ERROR)) {
2688 		wmi_debug("no handler registered for event id 0x%x", id);
2689 		wmi_rx_nbuf_free(evt_buf);
2690 		return;
2691 	}
2692 	wmi_mtrace_rx(id, 0xFF, idx);
2693 	qdf_spin_lock_bh(&soc->ctx_lock);
2694 	exec_ctx = wmi_handle->ctx[idx].exec_ctx;
2695 	qdf_spin_unlock_bh(&soc->ctx_lock);
2696 
2697 #ifdef WMI_INTERFACE_EVENT_LOGGING
2698 	if (wmi_handle->log_info.wmi_logging_enable) {
2699 		uint8_t *data;
2700 		data = qdf_nbuf_data(evt_buf);
2701 
2702 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2703 		/* Exclude 4 bytes of TLV header */
2704 		if (wmi_handle->ops->is_diag_event(id)) {
2705 			WMI_DIAG_RX_EVENT_RECORD(wmi_handle, id,
2706 				((uint8_t *) data +
2707 				wmi_handle->soc->buf_offset_event));
2708 		} else if (wmi_handle->ops->is_management_record(id)) {
2709 			WMI_MGMT_RX_EVENT_RECORD(wmi_handle, id,
2710 				((uint8_t *) data +
2711 				wmi_handle->soc->buf_offset_event));
2712 		} else {
2713 			WMI_RX_EVENT_RECORD(wmi_handle, id, ((uint8_t *) data +
2714 				wmi_handle->soc->buf_offset_event));
2715 		}
2716 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2717 	}
2718 #endif
2719 
2720 	if (exec_ctx == WMI_RX_WORK_CTX) {
2721 		wmi_process_fw_event_worker_thread_ctx(wmi_handle, evt_buf);
2722 	} else if (exec_ctx == WMI_RX_TASKLET_CTX) {
2723 		wmi_process_fw_event(wmi_handle, evt_buf);
2724 	} else if (exec_ctx == WMI_RX_SERIALIZER_CTX) {
2725 		wmi_process_fw_event_sched_thread_ctx(wmi_handle, evt_buf);
2726 	} else if (exec_ctx == WMI_RX_DIAG_WORK_CTX) {
2727 		wmi_process_rx_diag_event_worker_thread_ctx(wmi_handle,
2728 							    evt_buf);
2729 	} else {
2730 		wmi_err("Invalid event context %d", exec_ctx);
2731 		wmi_rx_nbuf_free(evt_buf);
2732 	}
2733 
2734 }
2735 
2736 /**
2737  * wmi_control_rx() - process fw events callbacks
2738  * @ctx: handle to wmi
2739  * @htc_packet: pointer to htc packet
2740  *
2741  * Return: none
2742  */
2743 static void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet)
2744 {
2745 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2746 	struct wmi_unified *wmi_handle;
2747 	wmi_buf_t evt_buf;
2748 
2749 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2750 
2751 	wmi_handle = wmi_get_pdev_ep(soc, htc_packet->Endpoint);
2752 	if (!wmi_handle) {
2753 		wmi_err("unable to get wmi_handle to Endpoint %d",
2754 			htc_packet->Endpoint);
2755 		wmi_rx_nbuf_free(evt_buf);
2756 		return;
2757 	}
2758 
2759 	wmi_process_control_rx(wmi_handle, evt_buf);
2760 }
2761 
2762 #if defined(WLAN_FEATURE_WMI_DIAG_OVER_CE7) || \
2763 	defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
2764 /**
2765  * wmi_control_diag_rx() - process diag fw events callbacks
2766  * @ctx: handle to wmi
2767  * @htc_packet: pointer to htc packet
2768  *
2769  * Return: none
2770  */
2771 static void wmi_control_diag_rx(void *ctx, HTC_PACKET *htc_packet)
2772 {
2773 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2774 	struct wmi_unified *wmi_handle;
2775 	wmi_buf_t evt_buf;
2776 
2777 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2778 
2779 	wmi_handle = soc->wmi_pdev[0];
2780 
2781 	if (!wmi_handle) {
2782 		wmi_err("unable to get wmi_handle for diag event end point id:%d", htc_packet->Endpoint);
2783 		wmi_rx_nbuf_free(evt_buf);
2784 		return;
2785 	}
2786 
2787 	wmi_process_control_rx(wmi_handle, evt_buf);
2788 }
2789 #endif
2790 
2791 #if defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
2792 /**
2793  * wmi_control_dbr_rx() - process dbr fw events callbacks
2794  * @ctx: handle to wmi
2795  * @htc_packet: pointer to htc packet
2796  *
2797  * Return: none
2798  */
2799 static void wmi_control_dbr_rx(void *ctx, HTC_PACKET *htc_packet)
2800 {
2801 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2802 	struct wmi_unified *wmi_handle;
2803 	wmi_buf_t evt_buf;
2804 
2805 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2806 	wmi_handle = soc->wmi_pdev[0];
2807 
2808 	if (!wmi_handle) {
2809 		wmi_err("unable to get wmi_handle for dbr event endpoint id:%d",
2810 			htc_packet->Endpoint);
2811 		wmi_rx_nbuf_free(evt_buf);
2812 		return;
2813 	}
2814 
2815 	wmi_process_control_rx(wmi_handle, evt_buf);
2816 }
2817 #endif
2818 
2819 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI
2820 QDF_STATUS wmi_unified_cmd_send_over_qmi(struct wmi_unified *wmi_handle,
2821 					 wmi_buf_t buf, uint32_t buflen,
2822 					 uint32_t cmd_id)
2823 {
2824 	QDF_STATUS status;
2825 	int32_t ret;
2826 
2827 	if (!qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR))) {
2828 		wmi_err("Failed to send cmd %x, no memory", cmd_id);
2829 		return QDF_STATUS_E_NOMEM;
2830 	}
2831 
2832 	qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR));
2833 	WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
2834 	wmi_debug("Sending WMI_CMD_ID: 0x%x over qmi", cmd_id);
2835 	status = qdf_wmi_send_recv_qmi(qdf_nbuf_data(buf),
2836 				       buflen + sizeof(WMI_CMD_HDR),
2837 				       wmi_handle,
2838 				       wmi_process_qmi_fw_event);
2839 	if (QDF_IS_STATUS_ERROR(status)) {
2840 		qdf_nbuf_pull_head(buf, sizeof(WMI_CMD_HDR));
2841 		wmi_warn("WMI send on QMI failed. Retrying WMI on HTC");
2842 	} else {
2843 		ret = qdf_atomic_inc_return(&wmi_handle->num_stats_over_qmi);
2844 		wmi_debug("num stats over qmi: %d", ret);
2845 		wmi_buf_free(buf);
2846 	}
2847 
2848 	return status;
2849 }
2850 
2851 static int __wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len)
2852 {
2853 	struct wmi_unified *wmi_handle = wmi_cb_ctx;
2854 	wmi_buf_t evt_buf;
2855 	uint32_t evt_id;
2856 
2857 	if (!wmi_handle || !buf || !len) {
2858 		wmi_err_rl("%s is invalid", !wmi_handle ?
2859 				"wmi_buf" : !buf ? "buf" : "length");
2860 		return -EINVAL;
2861 	}
2862 
2863 	evt_buf = wmi_buf_alloc(wmi_handle, len);
2864 	if (!evt_buf)
2865 		return -ENOMEM;
2866 
2867 	qdf_mem_copy(qdf_nbuf_data(evt_buf), buf, len);
2868 	evt_id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2869 	wmi_debug("Received WMI_EVT_ID: 0x%x over qmi", evt_id);
2870 	wmi_process_control_rx(wmi_handle, evt_buf);
2871 
2872 	return 0;
2873 }
2874 
2875 int wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len)
2876 {
2877 	struct qdf_op_sync *op_sync;
2878 	int ret;
2879 
2880 	if (qdf_op_protect(&op_sync))
2881 		return -EINVAL;
2882 	ret = __wmi_process_qmi_fw_event(wmi_cb_ctx, buf, len);
2883 	qdf_op_unprotect(op_sync);
2884 
2885 	return ret;
2886 }
2887 #endif
2888 
2889 void wmi_process_fw_event(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
2890 {
2891 	__wmi_control_rx(wmi_handle, evt_buf);
2892 }
2893 
2894 void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
2895 {
2896 	uint32_t id;
2897 	uint8_t *data;
2898 	uint32_t len;
2899 	void *wmi_cmd_struct_ptr = NULL;
2900 #ifndef WMI_NON_TLV_SUPPORT
2901 	int tlv_ok_status = 0;
2902 #endif
2903 	uint32_t idx = 0;
2904 	struct wmi_raw_event_buffer ev_buf;
2905 	enum wmi_rx_buff_type ev_buff_type;
2906 
2907 	id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2908 
2909 	wmi_ext_dbg_msg_event_record(wmi_handle, qdf_nbuf_data(evt_buf),
2910 				     qdf_nbuf_len(evt_buf));
2911 
2912 	if (qdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL)
2913 		goto end;
2914 
2915 	data = qdf_nbuf_data(evt_buf);
2916 	len = qdf_nbuf_len(evt_buf);
2917 
2918 #ifndef WMI_NON_TLV_SUPPORT
2919 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2920 		/* Validate and pad(if necessary) the TLVs */
2921 		tlv_ok_status =
2922 			wmi_handle->ops->wmi_check_and_pad_event(wmi_handle->scn_handle,
2923 							data, len, id,
2924 							&wmi_cmd_struct_ptr);
2925 		if (tlv_ok_status != 0) {
2926 			QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR,
2927 				  "%s: Error: id=0x%x, wmitlv check status=%d",
2928 				  __func__, id, tlv_ok_status);
2929 			goto end;
2930 		}
2931 	}
2932 #endif
2933 
2934 	idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
2935 	if (idx == A_ERROR) {
2936 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR,
2937 		   "%s : event handler is not registered: event id 0x%x",
2938 			__func__, id);
2939 		goto end;
2940 	}
2941 #ifdef WMI_INTERFACE_EVENT_LOGGING
2942 	if (wmi_handle->log_info.wmi_logging_enable) {
2943 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2944 		/* Exclude 4 bytes of TLV header */
2945 		if (wmi_handle->ops->is_diag_event(id)) {
2946 			/*
2947 			 * skip diag event logging in WMI event buffer
2948 			 * as its already logged in WMI RX event buffer
2949 			 */
2950 		} else if (wmi_handle->ops->is_management_record(id)) {
2951 			/*
2952 			 * skip wmi mgmt event logging in WMI event buffer
2953 			 * as its already logged in WMI RX event buffer
2954 			 */
2955 		} else {
2956 			uint8_t *tmpbuf = (uint8_t *)data +
2957 					wmi_handle->soc->buf_offset_event;
2958 
2959 			WMI_EVENT_RECORD(wmi_handle, id, tmpbuf);
2960 			wmi_specific_evt_record(wmi_handle, id, tmpbuf);
2961 		}
2962 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2963 	}
2964 #endif
2965 	/* Call the WMI registered event handler */
2966 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2967 		ev_buff_type = wmi_handle->ctx[idx].buff_type;
2968 		if (ev_buff_type == WMI_RX_PROCESSED_BUFF) {
2969 			wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2970 				wmi_cmd_struct_ptr, len);
2971 		} else if (ev_buff_type == WMI_RX_RAW_BUFF) {
2972 			ev_buf.evt_raw_buf = data;
2973 			ev_buf.evt_processed_buf = wmi_cmd_struct_ptr;
2974 			wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2975 							(void *)&ev_buf, len);
2976 		}
2977 	}
2978 	else
2979 		wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2980 			data, len);
2981 
2982 end:
2983 	/* Free event buffer and allocated event tlv */
2984 #ifndef WMI_NON_TLV_SUPPORT
2985 	if (wmi_handle->target_type == WMI_TLV_TARGET)
2986 		wmi_handle->ops->wmi_free_allocated_event(id, &wmi_cmd_struct_ptr);
2987 #endif
2988 
2989 	wmi_rx_nbuf_free(evt_buf);
2990 
2991 }
2992 
2993 #define WMI_WQ_WD_TIMEOUT (30 * 1000) /* 30s */
2994 
2995 static inline void wmi_workqueue_watchdog_warn(uint32_t msg_type_id)
2996 {
2997 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2998 		  "%s: WLAN_BUG_RCA: Message type %x has exceeded its allotted time of %ds",
2999 		  __func__, msg_type_id, WMI_WQ_WD_TIMEOUT / 1000);
3000 }
3001 
3002 #ifdef CONFIG_SLUB_DEBUG_ON
3003 static void wmi_workqueue_watchdog_bite(void *arg)
3004 {
3005 	struct wmi_wq_dbg_info *info = arg;
3006 
3007 	wmi_workqueue_watchdog_warn(info->wd_msg_type_id);
3008 	qdf_print_thread_trace(info->task);
3009 
3010 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
3011 		  "%s: Going down for WMI WQ Watchdog Bite!", __func__);
3012 	QDF_BUG(0);
3013 }
3014 #else
3015 static inline void wmi_workqueue_watchdog_bite(void *arg)
3016 {
3017 	struct wmi_wq_dbg_info *info = arg;
3018 
3019 	wmi_workqueue_watchdog_warn(info->wd_msg_type_id);
3020 
3021 	qdf_print_thread_trace(info->task);
3022 }
3023 #endif
3024 
3025 /**
3026  * wmi_rx_event_work() - process rx event in rx work queue context
3027  * @arg: opaque pointer to wmi handle
3028  *
3029  * This function process any fw event to serialize it through rx worker thread.
3030  *
3031  * Return: none
3032  */
3033 static void wmi_rx_event_work(void *arg)
3034 {
3035 	wmi_buf_t buf;
3036 	struct wmi_unified *wmi = arg;
3037 	qdf_timer_t wd_timer;
3038 	struct wmi_wq_dbg_info info;
3039 
3040 	/* initialize WMI workqueue watchdog timer */
3041 	qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite,
3042 			&info, QDF_TIMER_TYPE_SW);
3043 	qdf_spin_lock_bh(&wmi->eventq_lock);
3044 	buf = qdf_nbuf_queue_remove(&wmi->event_queue);
3045 	qdf_spin_unlock_bh(&wmi->eventq_lock);
3046 	while (buf) {
3047 		qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT);
3048 		info.wd_msg_type_id =
3049 		   WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID);
3050 		info.wmi_wq = wmi->wmi_rx_work_queue;
3051 		info.task = qdf_get_current_task();
3052 		__wmi_control_rx(wmi, buf);
3053 		qdf_timer_stop(&wd_timer);
3054 		qdf_spin_lock_bh(&wmi->eventq_lock);
3055 		buf = qdf_nbuf_queue_remove(&wmi->event_queue);
3056 		qdf_spin_unlock_bh(&wmi->eventq_lock);
3057 	}
3058 	qdf_timer_free(&wd_timer);
3059 }
3060 
3061 /**
3062  * wmi_rx_diag_event_work() - process rx diag event in work queue context
3063  * @arg: opaque pointer to wmi handle
3064  *
3065  * This function process fw diag event to serialize it through rx worker thread.
3066  *
3067  * Return: none
3068  */
3069 static void wmi_rx_diag_event_work(void *arg)
3070 {
3071 	wmi_buf_t buf;
3072 	struct wmi_unified *wmi = arg;
3073 	qdf_timer_t wd_timer;
3074 	struct wmi_wq_dbg_info info;
3075 	uint32_t diag_event_process_count = 0;
3076 
3077 	if (!wmi) {
3078 		wmi_err("Invalid WMI handle");
3079 		return;
3080 	}
3081 
3082 	/* initialize WMI workqueue watchdog timer */
3083 	qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite,
3084 		       &info, QDF_TIMER_TYPE_SW);
3085 	qdf_spin_lock_bh(&wmi->diag_eventq_lock);
3086 	buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue);
3087 	qdf_spin_unlock_bh(&wmi->diag_eventq_lock);
3088 	while (buf) {
3089 		qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT);
3090 		info.wd_msg_type_id =
3091 		   WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID);
3092 		info.wmi_wq = NULL;
3093 		info.task = qdf_get_current_task();
3094 		__wmi_control_rx(wmi, buf);
3095 		qdf_timer_stop(&wd_timer);
3096 
3097 		if (diag_event_process_count++ >
3098 		    RX_DIAG_EVENT_WORK_PROCESS_MAX_COUNT) {
3099 			qdf_queue_work(0, wmi->wmi_rx_diag_work_queue,
3100 				       &wmi->rx_diag_event_work);
3101 			break;
3102 		}
3103 
3104 		qdf_spin_lock_bh(&wmi->diag_eventq_lock);
3105 		buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue);
3106 		qdf_spin_unlock_bh(&wmi->diag_eventq_lock);
3107 	}
3108 	qdf_timer_free(&wd_timer);
3109 }
3110 
3111 #ifdef FEATURE_RUNTIME_PM
3112 /**
3113  * wmi_runtime_pm_init() - initialize runtime pm wmi variables
3114  * @wmi_handle: wmi context
3115  */
3116 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
3117 {
3118 	qdf_atomic_init(&wmi_handle->runtime_pm_inprogress);
3119 }
3120 
3121 void wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, A_BOOL val)
3122 {
3123 	qdf_atomic_set(&wmi_handle->runtime_pm_inprogress, val);
3124 }
3125 
3126 bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle)
3127 {
3128 	return qdf_atomic_read(&wmi_handle->runtime_pm_inprogress);
3129 }
3130 #else
3131 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
3132 {
3133 }
3134 #endif
3135 
3136 void wmi_set_wow_enable_ack_failed(wmi_unified_t wmi_handle)
3137 {
3138 	qdf_atomic_set(&wmi_handle->is_wow_enable_ack_failed, 1);
3139 }
3140 
3141 void wmi_clear_wow_enable_ack_failed(wmi_unified_t wmi_handle)
3142 {
3143 	qdf_atomic_set(&wmi_handle->is_wow_enable_ack_failed, 0);
3144 }
3145 
3146 bool wmi_has_wow_enable_ack_failed(wmi_unified_t wmi_handle)
3147 {
3148 	return qdf_atomic_read(&wmi_handle->is_wow_enable_ack_failed);
3149 }
3150 
3151 void *wmi_unified_get_soc_handle(struct wmi_unified *wmi_handle)
3152 {
3153 	return wmi_handle->soc;
3154 }
3155 
3156 /**
3157  * wmi_interface_logging_init: Interface looging init
3158  * @wmi_handle: Pointer to wmi handle object
3159  * @pdev_idx: pdev index
3160  *
3161  * Return: None
3162  */
3163 #ifdef WMI_INTERFACE_EVENT_LOGGING
3164 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle,
3165 					      uint32_t pdev_idx)
3166 {
3167 	if (QDF_STATUS_SUCCESS == wmi_log_init(wmi_handle)) {
3168 		qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
3169 		wmi_debugfs_init(wmi_handle, pdev_idx);
3170 	}
3171 }
3172 #else
3173 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle,
3174 					      uint32_t pdev_idx)
3175 {
3176 }
3177 #endif
3178 
3179 static QDF_STATUS wmi_initialize_worker_context(struct wmi_unified *wmi_handle)
3180 {
3181 	wmi_handle->wmi_rx_work_queue =
3182 		qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue");
3183 	if (!wmi_handle->wmi_rx_work_queue) {
3184 		wmi_err("failed to create wmi_rx_event_work_queue");
3185 		return QDF_STATUS_E_RESOURCES;
3186 	}
3187 
3188 	qdf_spinlock_create(&wmi_handle->eventq_lock);
3189 	qdf_nbuf_queue_init(&wmi_handle->event_queue);
3190 	qdf_create_work(0, &wmi_handle->rx_event_work,
3191 			wmi_rx_event_work, wmi_handle);
3192 
3193 	wmi_handle->wmi_rx_diag_work_queue =
3194 		qdf_alloc_unbound_workqueue("wmi_rx_diag_event_work_queue");
3195 	if (!wmi_handle->wmi_rx_diag_work_queue) {
3196 		wmi_err("failed to create wmi_rx_diag_event_work_queue");
3197 		return QDF_STATUS_E_RESOURCES;
3198 	}
3199 	qdf_spinlock_create(&wmi_handle->diag_eventq_lock);
3200 	qdf_nbuf_queue_init(&wmi_handle->diag_event_queue);
3201 	qdf_create_work(0, &wmi_handle->rx_diag_event_work,
3202 			wmi_rx_diag_event_work, wmi_handle);
3203 	wmi_handle->wmi_rx_diag_events_dropped = 0;
3204 
3205 	return QDF_STATUS_SUCCESS;
3206 }
3207 
3208 void *wmi_unified_get_pdev_handle(struct wmi_soc *soc, uint32_t pdev_idx)
3209 {
3210 	struct wmi_unified *wmi_handle;
3211 	QDF_STATUS status;
3212 
3213 	if (pdev_idx >= WMI_MAX_RADIOS)
3214 		return NULL;
3215 
3216 	if (!soc->wmi_pdev[pdev_idx]) {
3217 		wmi_handle =
3218 			(struct wmi_unified *) qdf_mem_malloc(
3219 					sizeof(struct wmi_unified));
3220 		if (!wmi_handle)
3221 			return NULL;
3222 
3223 		status = wmi_initialize_worker_context(wmi_handle);
3224 		if (QDF_IS_STATUS_ERROR(status))
3225 			goto error;
3226 
3227 		wmi_handle->scn_handle = soc->scn_handle;
3228 		wmi_handle->event_id = soc->event_id;
3229 		wmi_handle->event_handler = soc->event_handler;
3230 		wmi_handle->ctx = soc->ctx;
3231 		wmi_handle->ops = soc->ops;
3232 		wmi_handle->wmi_events = soc->wmi_events;
3233 		wmi_handle->services = soc->services;
3234 		wmi_handle->soc = soc;
3235 		wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map;
3236 		wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map;
3237 		wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map;
3238 		wmi_handle->evt_phy_id_map = soc->evt_phy_id_map;
3239 		wmi_interface_logging_init(wmi_handle, pdev_idx);
3240 		qdf_atomic_init(&wmi_handle->is_target_suspended);
3241 		qdf_atomic_init(&wmi_handle->is_wow_enable_ack_failed);
3242 		wmi_handle->target_type = soc->target_type;
3243 		wmi_handle->wmi_max_cmds = soc->wmi_max_cmds;
3244 
3245 		wmi_interface_sequence_init(wmi_handle);
3246 		if (wmi_ext_dbgfs_init(wmi_handle, pdev_idx) !=
3247 		    QDF_STATUS_SUCCESS)
3248 			wmi_err("Failed to initialize wmi extended debugfs");
3249 
3250 		soc->wmi_pdev[pdev_idx] = wmi_handle;
3251 	} else
3252 		wmi_handle = soc->wmi_pdev[pdev_idx];
3253 
3254 	qdf_atomic_init(&wmi_handle->pending_cmds);
3255 	wmi_handle->wmi_stopinprogress = 0;
3256 	wmi_handle->wmi_endpoint_id = soc->wmi_endpoint_id[pdev_idx];
3257 	wmi_handle->htc_handle = soc->htc_handle;
3258 	wmi_handle->max_msg_len = soc->max_msg_len[pdev_idx];
3259 	wmi_handle->tag_crash_inject = false;
3260 	wmi_interface_sequence_reset(wmi_handle);
3261 
3262 	return wmi_handle;
3263 
3264 error:
3265 	qdf_mem_free(wmi_handle);
3266 
3267 	return NULL;
3268 }
3269 qdf_export_symbol(wmi_unified_get_pdev_handle);
3270 
3271 static void (*wmi_attach_register[WMI_MAX_TARGET_TYPE])(wmi_unified_t);
3272 
3273 void wmi_unified_register_module(enum wmi_target_type target_type,
3274 			void (*wmi_attach)(wmi_unified_t wmi_handle))
3275 {
3276 	if (target_type < WMI_MAX_TARGET_TYPE)
3277 		wmi_attach_register[target_type] = wmi_attach;
3278 
3279 	return;
3280 }
3281 qdf_export_symbol(wmi_unified_register_module);
3282 
3283 /**
3284  * wmi_wbuff_register() - register wmi with wbuff
3285  * @wmi_handle: handle to wmi
3286  *
3287  * Return: void
3288  */
3289 static void wmi_wbuff_register(struct wmi_unified *wmi_handle)
3290 {
3291 	struct wbuff_alloc_request wbuff_alloc[4];
3292 	uint8_t reserve = WMI_MIN_HEAD_ROOM;
3293 
3294 	wbuff_alloc[0].pool_id = 0;
3295 	wbuff_alloc[0].pool_size = WMI_WBUFF_POOL_0_SIZE;
3296 	wbuff_alloc[0].buffer_size = roundup(WMI_WBUFF_LEN_POOL0 + reserve, 4);
3297 
3298 	wbuff_alloc[1].pool_id = 1;
3299 	wbuff_alloc[1].pool_size = WMI_WBUFF_POOL_1_SIZE;
3300 	wbuff_alloc[1].buffer_size = roundup(WMI_WBUFF_LEN_POOL1 + reserve, 4);
3301 
3302 	wbuff_alloc[2].pool_id = 2;
3303 	wbuff_alloc[2].pool_size = WMI_WBUFF_POOL_2_SIZE;
3304 	wbuff_alloc[2].buffer_size = roundup(WMI_WBUFF_LEN_POOL2 + reserve, 4);
3305 
3306 	wbuff_alloc[3].pool_id = 3;
3307 	wbuff_alloc[3].pool_size = WMI_WBUFF_POOL_3_SIZE;
3308 	wbuff_alloc[3].buffer_size = roundup(WMI_WBUFF_LEN_POOL3 + reserve, 4);
3309 
3310 	wmi_handle->wbuff_handle =
3311 		wbuff_module_register(wbuff_alloc, QDF_ARRAY_SIZE(wbuff_alloc),
3312 				      reserve, 4, WBUFF_MODULE_WMI_TX);
3313 }
3314 
3315 /**
3316  * wmi_wbuff_deregister() - deregister wmi with wbuff
3317  * @wmi_handle: handle to wmi
3318  *
3319  * Return: void
3320  */
3321 static inline void wmi_wbuff_deregister(struct wmi_unified *wmi_handle)
3322 {
3323 	wbuff_module_deregister(wmi_handle->wbuff_handle);
3324 	wmi_handle->wbuff_handle = NULL;
3325 }
3326 
3327 void *wmi_unified_attach(void *scn_handle,
3328 			 struct wmi_unified_attach_params *param)
3329 {
3330 	struct wmi_unified *wmi_handle;
3331 	struct wmi_soc *soc;
3332 	QDF_STATUS status;
3333 
3334 	soc = (struct wmi_soc *) qdf_mem_malloc(sizeof(struct wmi_soc));
3335 	if (!soc)
3336 		return NULL;
3337 
3338 	wmi_handle =
3339 		(struct wmi_unified *) qdf_mem_malloc(
3340 			sizeof(struct wmi_unified));
3341 	if (!wmi_handle) {
3342 		qdf_mem_free(soc);
3343 		return NULL;
3344 	}
3345 
3346 	status = wmi_initialize_worker_context(wmi_handle);
3347 	if (QDF_IS_STATUS_ERROR(status))
3348 		goto error;
3349 
3350 	wmi_handle->soc = soc;
3351 	wmi_handle->soc->soc_idx = param->soc_id;
3352 	wmi_handle->soc->is_async_ep = param->is_async_ep;
3353 	wmi_handle->event_id = soc->event_id;
3354 	wmi_handle->event_handler = soc->event_handler;
3355 	wmi_handle->ctx = soc->ctx;
3356 	wmi_handle->wmi_events = soc->wmi_events;
3357 	wmi_handle->services = soc->services;
3358 	wmi_handle->scn_handle = scn_handle;
3359 	wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map;
3360 	wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map;
3361 	wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map;
3362 	wmi_handle->evt_phy_id_map = soc->evt_phy_id_map;
3363 	soc->scn_handle = scn_handle;
3364 	wmi_handle->target_type = param->target_type;
3365 	soc->target_type = param->target_type;
3366 
3367 	if (param->target_type >= WMI_MAX_TARGET_TYPE)
3368 		goto error;
3369 
3370 	if (wmi_attach_register[param->target_type]) {
3371 		wmi_attach_register[param->target_type](wmi_handle);
3372 	} else {
3373 		wmi_err("wmi attach is not registered");
3374 		goto error;
3375 	}
3376 
3377 	qdf_atomic_init(&wmi_handle->pending_cmds);
3378 	qdf_atomic_init(&wmi_handle->is_target_suspended);
3379 	qdf_atomic_init(&wmi_handle->is_target_suspend_acked);
3380 	qdf_atomic_init(&wmi_handle->num_stats_over_qmi);
3381 	qdf_atomic_init(&wmi_handle->is_wow_enable_ack_failed);
3382 	wmi_runtime_pm_init(wmi_handle);
3383 	wmi_interface_logging_init(wmi_handle, WMI_HOST_PDEV_ID_0);
3384 
3385 	wmi_interface_sequence_init(wmi_handle);
3386 	/* Assign target cookie capability */
3387 	wmi_handle->use_cookie = param->use_cookie;
3388 	wmi_handle->osdev = param->osdev;
3389 	wmi_handle->wmi_stopinprogress = 0;
3390 	wmi_handle->wmi_max_cmds = param->max_commands;
3391 	soc->wmi_max_cmds = param->max_commands;
3392 	/* Increase the ref count once refcount infra is present */
3393 	soc->wmi_psoc = param->psoc;
3394 	qdf_spinlock_create(&soc->ctx_lock);
3395 	soc->ops = wmi_handle->ops;
3396 	soc->wmi_pdev[0] = wmi_handle;
3397 	if (wmi_ext_dbgfs_init(wmi_handle, 0) != QDF_STATUS_SUCCESS)
3398 		wmi_err("Failed to initialize wmi extended debugfs");
3399 
3400 	wmi_wbuff_register(wmi_handle);
3401 
3402 	wmi_hang_event_notifier_register(wmi_handle);
3403 
3404 	wmi_minidump_attach(wmi_handle);
3405 
3406 	return wmi_handle;
3407 
3408 error:
3409 	qdf_mem_free(soc);
3410 	qdf_mem_free(wmi_handle);
3411 
3412 	return NULL;
3413 }
3414 
3415 void wmi_unified_detach(struct wmi_unified *wmi_handle)
3416 {
3417 	wmi_buf_t buf;
3418 	struct wmi_soc *soc;
3419 	uint8_t i;
3420 
3421 	wmi_minidump_detach(wmi_handle);
3422 
3423 	wmi_hang_event_notifier_unregister();
3424 
3425 	wmi_wbuff_deregister(wmi_handle);
3426 
3427 	soc = wmi_handle->soc;
3428 	for (i = 0; i < WMI_MAX_RADIOS; i++) {
3429 		if (soc->wmi_pdev[i]) {
3430 			qdf_flush_workqueue(0,
3431 				soc->wmi_pdev[i]->wmi_rx_work_queue);
3432 			qdf_destroy_workqueue(0,
3433 				soc->wmi_pdev[i]->wmi_rx_work_queue);
3434 			wmi_debugfs_remove(soc->wmi_pdev[i]);
3435 			buf = qdf_nbuf_queue_remove(
3436 					&soc->wmi_pdev[i]->event_queue);
3437 			while (buf) {
3438 				qdf_nbuf_free(buf);
3439 				buf = qdf_nbuf_queue_remove(
3440 						&soc->wmi_pdev[i]->event_queue);
3441 			}
3442 
3443 			qdf_flush_workqueue(0,
3444 				soc->wmi_pdev[i]->wmi_rx_diag_work_queue);
3445 			qdf_destroy_workqueue(0,
3446 				soc->wmi_pdev[i]->wmi_rx_diag_work_queue);
3447 			buf = qdf_nbuf_queue_remove(
3448 					&soc->wmi_pdev[i]->diag_event_queue);
3449 			while (buf) {
3450 				qdf_nbuf_free(buf);
3451 				buf = qdf_nbuf_queue_remove(
3452 					&soc->wmi_pdev[i]->diag_event_queue);
3453 			}
3454 
3455 			wmi_log_buffer_free(soc->wmi_pdev[i]);
3456 
3457 			/* Free events logs list */
3458 			if (soc->wmi_pdev[i]->events_logs_list)
3459 				qdf_mem_free(
3460 					soc->wmi_pdev[i]->events_logs_list);
3461 
3462 			qdf_spinlock_destroy(&soc->wmi_pdev[i]->eventq_lock);
3463 			qdf_spinlock_destroy(
3464 					&soc->wmi_pdev[i]->diag_eventq_lock);
3465 
3466 			wmi_interface_sequence_deinit(soc->wmi_pdev[i]);
3467 			wmi_ext_dbgfs_deinit(soc->wmi_pdev[i]);
3468 			wmi_clear_wow_enable_ack_failed(soc->wmi_pdev[i]);
3469 
3470 			qdf_mem_free(soc->wmi_pdev[i]);
3471 		}
3472 	}
3473 	qdf_spinlock_destroy(&soc->ctx_lock);
3474 
3475 	if (soc->wmi_service_bitmap) {
3476 		qdf_mem_free(soc->wmi_service_bitmap);
3477 		soc->wmi_service_bitmap = NULL;
3478 	}
3479 
3480 	if (soc->wmi_ext_service_bitmap) {
3481 		qdf_mem_free(soc->wmi_ext_service_bitmap);
3482 		soc->wmi_ext_service_bitmap = NULL;
3483 	}
3484 
3485 	if (soc->wmi_ext2_service_bitmap) {
3486 		qdf_mem_free(soc->wmi_ext2_service_bitmap);
3487 		soc->wmi_ext2_service_bitmap = NULL;
3488 	}
3489 
3490 	/* Decrease the ref count once refcount infra is present */
3491 	soc->wmi_psoc = NULL;
3492 	qdf_mem_free(soc);
3493 }
3494 
3495 void
3496 wmi_unified_remove_work(struct wmi_unified *wmi_handle)
3497 {
3498 	wmi_buf_t buf;
3499 
3500 	qdf_flush_workqueue(0, wmi_handle->wmi_rx_work_queue);
3501 	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
3502 	buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
3503 	while (buf) {
3504 		qdf_nbuf_free(buf);
3505 		buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
3506 	}
3507 	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
3508 
3509 	/* Remove diag events work */
3510 	qdf_flush_workqueue(0, wmi_handle->wmi_rx_diag_work_queue);
3511 	qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock);
3512 	buf = qdf_nbuf_queue_remove(&wmi_handle->diag_event_queue);
3513 	while (buf) {
3514 		qdf_nbuf_free(buf);
3515 		buf = qdf_nbuf_queue_remove(&wmi_handle->diag_event_queue);
3516 	}
3517 	qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
3518 }
3519 
3520 /**
3521  * wmi_htc_tx_complete() - Process htc tx completion
3522  *
3523  * @ctx: handle to wmi
3524  * @htc_pkt: pointer to htc packet
3525  *
3526  * Return: none.
3527  */
3528 static void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt)
3529 {
3530 	struct wmi_soc *soc = (struct wmi_soc *) ctx;
3531 	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
3532 	u_int8_t *buf_ptr;
3533 	u_int32_t len;
3534 	struct wmi_unified *wmi_handle;
3535 #ifdef WMI_INTERFACE_EVENT_LOGGING
3536 	struct wmi_debug_log_info *log_info;
3537 	uint32_t cmd_id;
3538 	uint8_t *offset_ptr;
3539 	qdf_dma_addr_t dma_addr;
3540 	uint64_t phy_addr;
3541 #endif
3542 
3543 	ASSERT(wmi_cmd_buf);
3544 	wmi_handle = wmi_get_pdev_ep(soc, htc_pkt->Endpoint);
3545 	if (!wmi_handle) {
3546 		wmi_err("Unable to get wmi handle");
3547 		QDF_ASSERT(0);
3548 		return;
3549 	}
3550 	buf_ptr = (u_int8_t *)wmi_buf_data(wmi_cmd_buf);
3551 #ifdef WMI_INTERFACE_EVENT_LOGGING
3552 	log_info = &wmi_handle->log_info;
3553 
3554 	if (wmi_handle && log_info->wmi_logging_enable) {
3555 		cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf),
3556 				WMI_CMD_HDR, COMMANDID);
3557 
3558 		dma_addr = QDF_NBUF_CB_PADDR(wmi_cmd_buf);
3559 		phy_addr = qdf_mem_virt_to_phys(qdf_nbuf_data(wmi_cmd_buf));
3560 
3561 		qdf_spin_lock_bh(&log_info->wmi_record_lock);
3562 		/* Record 16 bytes of WMI cmd tx complete data
3563 		 * - exclude TLV and WMI headers
3564 		 */
3565 		offset_ptr = buf_ptr + wmi_handle->soc->buf_offset_command;
3566 		if (wmi_handle->ops->is_management_record(cmd_id)) {
3567 			WMI_MGMT_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id,
3568 						       offset_ptr);
3569 		} else {
3570 			if (wmi_handle->ops->is_force_fw_hang_cmd(cmd_id)) {
3571 				wmi_info("Tx completion received for WMI_FORCE_FW_HANG_CMDID, current_time:%ld",
3572 					 qdf_mc_timer_get_system_time());
3573 			}
3574 
3575 			WMI_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id,
3576 						  offset_ptr, dma_addr,
3577 						  phy_addr);
3578 		}
3579 
3580 		qdf_spin_unlock_bh(&log_info->wmi_record_lock);
3581 	}
3582 #endif
3583 
3584 	wmi_interface_sequence_check(wmi_handle, wmi_cmd_buf);
3585 
3586 	len = qdf_nbuf_len(wmi_cmd_buf);
3587 	qdf_mem_zero(buf_ptr, len);
3588 	wmi_buf_free(wmi_cmd_buf);
3589 	qdf_mem_free(htc_pkt);
3590 	qdf_atomic_dec(&wmi_handle->pending_cmds);
3591 }
3592 
3593 #ifdef FEATURE_RUNTIME_PM
3594 /**
3595  * wmi_htc_log_pkt() - Print information of WMI command from HTC packet
3596  *
3597  * @ctx: handle of WMI context
3598  * @htc_pkt: handle of HTC packet
3599  *
3600  * Return: none
3601  */
3602 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt)
3603 {
3604 	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
3605 	uint32_t cmd_id;
3606 
3607 	ASSERT(wmi_cmd_buf);
3608 	cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), WMI_CMD_HDR,
3609 			       COMMANDID);
3610 
3611 	wmi_debug("WMI command from HTC packet: %s, ID: %d",
3612 		 wmi_id_to_name(cmd_id), cmd_id);
3613 }
3614 #else
3615 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt)
3616 {
3617 }
3618 #endif
3619 
3620 /**
3621  * wmi_connect_pdev_htc_service() -  WMI API to get connect to HTC service
3622  * @soc: handle to WMI SoC
3623  * @pdev_idx: Pdev index
3624  *
3625  * Return: QDF_STATUS
3626  */
3627 static QDF_STATUS wmi_connect_pdev_htc_service(struct wmi_soc *soc,
3628 					       uint32_t pdev_idx)
3629 {
3630 	QDF_STATUS status;
3631 	struct htc_service_connect_resp response;
3632 	struct htc_service_connect_req connect;
3633 
3634 	OS_MEMZERO(&connect, sizeof(connect));
3635 	OS_MEMZERO(&response, sizeof(response));
3636 
3637 	/* meta data is unused for now */
3638 	connect.pMetaData = NULL;
3639 	connect.MetaDataLength = 0;
3640 	/* these fields are the same for all service endpoints */
3641 	connect.EpCallbacks.pContext = soc;
3642 	connect.EpCallbacks.EpTxCompleteMultiple =
3643 		NULL /* Control path completion ar6000_tx_complete */;
3644 	connect.EpCallbacks.EpRecv = wmi_control_rx /* Control path rx */;
3645 	connect.EpCallbacks.EpRecvRefill = NULL /* ar6000_rx_refill */;
3646 	connect.EpCallbacks.EpSendFull = NULL /* ar6000_tx_queue_full */;
3647 	connect.EpCallbacks.EpTxComplete =
3648 		wmi_htc_tx_complete /* ar6000_tx_queue_full */;
3649 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3650 
3651 	/* connect to control service */
3652 	connect.service_id = soc->svc_ids[pdev_idx];
3653 	status = htc_connect_service(soc->htc_handle, &connect, &response);
3654 
3655 	if (QDF_IS_STATUS_ERROR(status)) {
3656 		wmi_err("Failed to connect to WMI CONTROL service status:%d",
3657 			 status);
3658 		return status;
3659 	}
3660 
3661 	if (soc->is_async_ep)
3662 		htc_set_async_ep(soc->htc_handle, response.Endpoint, true);
3663 
3664 	soc->wmi_endpoint_id[pdev_idx] = response.Endpoint;
3665 	soc->max_msg_len[pdev_idx] = response.MaxMsgLength;
3666 
3667 	return QDF_STATUS_SUCCESS;
3668 }
3669 
3670 QDF_STATUS
3671 wmi_unified_connect_htc_service(struct wmi_unified *wmi_handle,
3672 				HTC_HANDLE htc_handle)
3673 {
3674 	uint32_t i;
3675 	uint8_t wmi_ep_count;
3676 
3677 	wmi_handle->soc->htc_handle = htc_handle;
3678 
3679 	wmi_ep_count = htc_get_wmi_endpoint_count(htc_handle);
3680 	if (wmi_ep_count > WMI_MAX_RADIOS)
3681 		return QDF_STATUS_E_FAULT;
3682 
3683 	for (i = 0; i < wmi_ep_count; i++)
3684 		wmi_connect_pdev_htc_service(wmi_handle->soc, i);
3685 
3686 	wmi_handle->htc_handle = htc_handle;
3687 	wmi_handle->wmi_endpoint_id = wmi_handle->soc->wmi_endpoint_id[0];
3688 	wmi_handle->max_msg_len = wmi_handle->soc->max_msg_len[0];
3689 
3690 	return QDF_STATUS_SUCCESS;
3691 }
3692 
3693 #if defined(WLAN_FEATURE_WMI_DIAG_OVER_CE7) || \
3694 	defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
3695 QDF_STATUS wmi_diag_connect_pdev_htc_service(struct wmi_unified *wmi_handle,
3696 					     HTC_HANDLE htc_handle)
3697 {
3698 	QDF_STATUS status;
3699 	struct htc_service_connect_resp response = {0};
3700 	struct htc_service_connect_req connect = {0};
3701 
3702 	/* meta data is unused for now */
3703 	connect.pMetaData = NULL;
3704 	connect.MetaDataLength = 0;
3705 	connect.EpCallbacks.pContext = wmi_handle->soc;
3706 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3707 	connect.EpCallbacks.EpRecv = wmi_control_diag_rx /* wmi diag rx */;
3708 	connect.EpCallbacks.EpRecvRefill = NULL;
3709 	connect.EpCallbacks.EpSendFull = NULL;
3710 	connect.EpCallbacks.EpTxComplete = NULL;
3711 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3712 
3713 	/* connect to wmi diag service */
3714 	connect.service_id = WMI_CONTROL_DIAG_SVC;
3715 	status = htc_connect_service(htc_handle, &connect, &response);
3716 
3717 	if (QDF_IS_STATUS_ERROR(status)) {
3718 		wmi_err("Failed to connect to WMI DIAG service status:%d",
3719 			status);
3720 		return status;
3721 	}
3722 
3723 	if (wmi_handle->soc->is_async_ep)
3724 		htc_set_async_ep(htc_handle, response.Endpoint, true);
3725 
3726 	wmi_handle->soc->wmi_diag_endpoint_id = response.Endpoint;
3727 
3728 	return QDF_STATUS_SUCCESS;
3729 }
3730 #endif
3731 
3732 #if defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
3733 QDF_STATUS wmi_dbr_connect_pdev_htc_service(struct wmi_unified *wmi_handle,
3734 					    HTC_HANDLE htc_handle)
3735 {
3736 	QDF_STATUS status;
3737 	struct htc_service_connect_resp response = {0};
3738 	struct htc_service_connect_req connect = {0};
3739 
3740 	/* meta data is unused for now */
3741 	connect.pMetaData = NULL;
3742 	connect.MetaDataLength = 0;
3743 	connect.EpCallbacks.pContext = wmi_handle->soc;
3744 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3745 	connect.EpCallbacks.EpRecv = wmi_control_dbr_rx /* wmi dbr rx */;
3746 	connect.EpCallbacks.EpRecvRefill = NULL;
3747 	connect.EpCallbacks.EpSendFull = NULL;
3748 	connect.EpCallbacks.EpTxComplete = NULL;
3749 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3750 
3751 	/* connect to wmi dbr service */
3752 	connect.service_id = WMI_CONTROL_DBR_SVC;
3753 	status = htc_connect_service(htc_handle, &connect, &response);
3754 
3755 	if (QDF_IS_STATUS_ERROR(status)) {
3756 		wmi_err("Failed to connect to WMI DBR service status:%d",
3757 			status);
3758 		return status;
3759 	}
3760 
3761 	if (wmi_handle->soc->is_async_ep)
3762 		htc_set_async_ep(htc_handle, response.Endpoint, true);
3763 
3764 	wmi_handle->soc->wmi_dbr_endpoint_id = response.Endpoint;
3765 
3766 	return QDF_STATUS_SUCCESS;
3767 }
3768 #endif
3769 
3770 int wmi_get_host_credits(wmi_unified_t wmi_handle)
3771 {
3772 	int host_credits = 0;
3773 
3774 	htc_get_control_endpoint_tx_host_credits(wmi_handle->htc_handle,
3775 						 &host_credits);
3776 	return host_credits;
3777 }
3778 
3779 int wmi_get_pending_cmds(wmi_unified_t wmi_handle)
3780 {
3781 	return qdf_atomic_read(&wmi_handle->pending_cmds);
3782 }
3783 
3784 void wmi_set_target_suspend(wmi_unified_t wmi_handle, A_BOOL val)
3785 {
3786 	qdf_atomic_set(&wmi_handle->is_target_suspended, val);
3787 }
3788 
3789 void wmi_set_target_suspend_acked(wmi_unified_t wmi_handle, A_BOOL val)
3790 {
3791 	qdf_atomic_set(&wmi_handle->is_target_suspend_acked, val);
3792 	qdf_atomic_set(&wmi_handle->num_stats_over_qmi, 0);
3793 }
3794 
3795 bool wmi_is_target_suspended(struct wmi_unified *wmi_handle)
3796 {
3797 	return qdf_atomic_read(&wmi_handle->is_target_suspended);
3798 }
3799 qdf_export_symbol(wmi_is_target_suspended);
3800 
3801 bool wmi_is_target_suspend_acked(struct wmi_unified *wmi_handle)
3802 {
3803 	return qdf_atomic_read(&wmi_handle->is_target_suspend_acked);
3804 }
3805 qdf_export_symbol(wmi_is_target_suspend_acked);
3806 
3807 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI
3808 void wmi_set_qmi_stats(wmi_unified_t wmi_handle, bool val)
3809 {
3810 	wmi_handle->is_qmi_stats_enabled = val;
3811 }
3812 
3813 bool wmi_is_qmi_stats_enabled(struct wmi_unified *wmi_handle)
3814 {
3815 	return wmi_handle->is_qmi_stats_enabled;
3816 }
3817 #endif
3818 
3819 void wmi_tag_crash_inject(wmi_unified_t wmi_handle, A_BOOL flag)
3820 {
3821 	wmi_handle->tag_crash_inject = flag;
3822 }
3823 
3824 void wmi_set_is_wow_bus_suspended(wmi_unified_t wmi_handle, A_BOOL val)
3825 {
3826 	qdf_atomic_set(&wmi_handle->is_wow_bus_suspended, val);
3827 }
3828 
3829 void wmi_set_tgt_assert(wmi_unified_t wmi_handle, bool val)
3830 {
3831 	wmi_handle->tgt_force_assert_enable = val;
3832 }
3833 
3834 int
3835 wmi_stop(wmi_unified_t wmi_handle)
3836 {
3837 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
3838 		  "WMI Stop");
3839 	wmi_handle->wmi_stopinprogress = 1;
3840 	return 0;
3841 }
3842 
3843 int
3844 wmi_start(wmi_unified_t wmi_handle)
3845 {
3846 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
3847 		  "WMI Start");
3848 	wmi_handle->wmi_stopinprogress = 0;
3849 	return 0;
3850 }
3851 
3852 bool
3853 wmi_is_blocked(wmi_unified_t wmi_handle)
3854 {
3855 	return (!(!wmi_handle->wmi_stopinprogress));
3856 }
3857 
3858 void
3859 wmi_flush_endpoint(wmi_unified_t wmi_handle)
3860 {
3861 	htc_flush_endpoint(wmi_handle->htc_handle,
3862 		wmi_handle->wmi_endpoint_id, 0);
3863 }
3864 qdf_export_symbol(wmi_flush_endpoint);
3865 
3866 HTC_ENDPOINT_ID wmi_get_endpoint(wmi_unified_t wmi_handle)
3867 {
3868 	return wmi_handle->wmi_endpoint_id;
3869 }
3870 
3871 void wmi_pdev_id_conversion_enable(wmi_unified_t wmi_handle,
3872 				   uint32_t *pdev_id_map,
3873 				   uint8_t size)
3874 {
3875 	if (wmi_handle->target_type == WMI_TLV_TARGET)
3876 		wmi_handle->ops->wmi_pdev_id_conversion_enable(wmi_handle,
3877 							       pdev_id_map,
3878 							       size);
3879 }
3880 
3881 int __wmi_validate_handle(wmi_unified_t wmi_handle, const char *func)
3882 {
3883         if (!wmi_handle) {
3884                 wmi_err("Invalid WMI handle (via %s)", func);
3885                 return -EINVAL;
3886         }
3887 
3888         return 0;
3889 }
3890