xref: /wlan-dirver/qca-wifi-host-cmn/wmi/src/wmi_unified.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Host WMI unified implementation
21  */
22 #include "htc_api.h"
23 #include "htc_api.h"
24 #include "wmi_unified_priv.h"
25 #include "wmi_unified_api.h"
26 #include "qdf_module.h"
27 #include "qdf_platform.h"
28 #ifdef WMI_EXT_DBG
29 #include "qdf_list.h"
30 #include "qdf_atomic.h"
31 #endif
32 
33 #ifndef WMI_NON_TLV_SUPPORT
34 #include "wmi_tlv_helper.h"
35 #endif
36 
37 #include <linux/debugfs.h>
38 #include <target_if.h>
39 #include <qdf_debugfs.h>
40 #include "wmi_filtered_logging.h"
41 #include <wmi_hang_event.h>
42 
43 /* This check for CONFIG_WIN temporary added due to redeclaration compilation
44 error in MCL. Error is caused due to inclusion of wmi.h in wmi_unified_api.h
45 which gets included here through ol_if_athvar.h. Eventually it is expected that
46 wmi.h will be removed from wmi_unified_api.h after cleanup, which will need
47 WMI_CMD_HDR to be defined here. */
48 /* Copied from wmi.h */
49 #undef MS
50 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
51 #undef SM
52 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
53 #undef WO
54 #define WO(_f)      ((_f##_OFFSET) >> 2)
55 
56 #undef GET_FIELD
57 #define GET_FIELD(_addr, _f) MS(*((uint32_t *)(_addr) + WO(_f)), _f)
58 #undef SET_FIELD
59 #define SET_FIELD(_addr, _f, _val)  \
60 	    (*((uint32_t *)(_addr) + WO(_f)) = \
61 		(*((uint32_t *)(_addr) + WO(_f)) & ~_f##_MASK) | SM(_val, _f))
62 
63 #define WMI_GET_FIELD(_msg_buf, _msg_type, _f) \
64 	    GET_FIELD(_msg_buf, _msg_type ## _ ## _f)
65 
66 #define WMI_SET_FIELD(_msg_buf, _msg_type, _f, _val) \
67 	    SET_FIELD(_msg_buf, _msg_type ## _ ## _f, _val)
68 
69 #define WMI_EP_APASS           0x0
70 #define WMI_EP_LPASS           0x1
71 #define WMI_EP_SENSOR          0x2
72 
73 #define WMI_INFOS_DBG_FILE_PERM (QDF_FILE_USR_READ | \
74 				 QDF_FILE_USR_WRITE | \
75 				 QDF_FILE_GRP_READ | \
76 				 QDF_FILE_OTH_READ)
77 
78 /*
79  *  * Control Path
80  *   */
81 typedef PREPACK struct {
82 	uint32_t	commandId:24,
83 			reserved:2, /* used for WMI endpoint ID */
84 			plt_priv:6; /* platform private */
85 } POSTPACK WMI_CMD_HDR;        /* used for commands and events */
86 
87 #define WMI_CMD_HDR_COMMANDID_LSB           0
88 #define WMI_CMD_HDR_COMMANDID_MASK          0x00ffffff
89 #define WMI_CMD_HDR_COMMANDID_OFFSET        0x00000000
90 #define WMI_CMD_HDR_WMI_ENDPOINTID_MASK        0x03000000
91 #define WMI_CMD_HDR_WMI_ENDPOINTID_OFFSET      24
92 #define WMI_CMD_HDR_PLT_PRIV_LSB               24
93 #define WMI_CMD_HDR_PLT_PRIV_MASK              0xff000000
94 #define WMI_CMD_HDR_PLT_PRIV_OFFSET            0x00000000
95 /* end of copy wmi.h */
96 
97 #define WMI_MIN_HEAD_ROOM 64
98 
99 /* WBUFF pool sizes for WMI */
100 /* Allocation of size 256 bytes */
101 #define WMI_WBUFF_POOL_0_SIZE 128
102 /* Allocation of size 512 bytes */
103 #define WMI_WBUFF_POOL_1_SIZE 16
104 /* Allocation of size 1024 bytes */
105 #define WMI_WBUFF_POOL_2_SIZE 8
106 /* Allocation of size 2048 bytes */
107 #define WMI_WBUFF_POOL_3_SIZE 8
108 
109 #define RX_DIAG_EVENT_WORK_PROCESS_MAX_COUNT 500
110 
111 #ifdef WMI_INTERFACE_EVENT_LOGGING
112 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0))
113 /* TODO Cleanup this backported function */
114 static int wmi_bp_seq_printf(qdf_debugfs_file_t m, const char *f, ...)
115 {
116 	va_list args;
117 
118 	va_start(args, f);
119 	seq_vprintf(m, f, args);
120 	va_end(args);
121 
122 	return 0;
123 }
124 #else
125 #define wmi_bp_seq_printf(m, fmt, ...) seq_printf((m), fmt, ##__VA_ARGS__)
126 #endif
127 
128 #ifndef MAX_WMI_INSTANCES
129 #define CUSTOM_MGMT_CMD_DATA_SIZE 4
130 #endif
131 
132 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
133 /* WMI commands */
134 uint32_t g_wmi_command_buf_idx = 0;
135 struct wmi_command_debug wmi_command_log_buffer[WMI_CMD_DEBUG_MAX_ENTRY];
136 
137 /* WMI commands TX completed */
138 uint32_t g_wmi_command_tx_cmp_buf_idx = 0;
139 struct wmi_command_cmp_debug
140 	wmi_command_tx_cmp_log_buffer[WMI_CMD_CMPL_DEBUG_MAX_ENTRY];
141 
142 /* WMI events when processed */
143 uint32_t g_wmi_event_buf_idx = 0;
144 struct wmi_event_debug wmi_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
145 
146 /* WMI events when queued */
147 uint32_t g_wmi_rx_event_buf_idx = 0;
148 struct wmi_event_debug wmi_rx_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
149 #endif
150 
151 static void wmi_minidump_detach(struct wmi_unified *wmi_handle)
152 {
153 	struct wmi_log_buf_t *info =
154 		&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
155 	uint32_t buf_size = info->size * sizeof(struct wmi_command_cmp_debug);
156 
157 	qdf_minidump_remove(info->buf, buf_size, "wmi_tx_cmp");
158 }
159 
160 static void wmi_minidump_attach(struct wmi_unified *wmi_handle)
161 {
162 	struct wmi_log_buf_t *info =
163 		&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
164 	uint32_t buf_size = info->size * sizeof(struct wmi_command_cmp_debug);
165 
166 	qdf_minidump_log(info->buf, buf_size, "wmi_tx_cmp");
167 }
168 
169 #define WMI_COMMAND_RECORD(h, a, b) {					\
170 	if (wmi_cmd_log_max_entry <=					\
171 		*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))	\
172 		*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx) = 0;\
173 	((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\
174 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)]\
175 						.command = a;		\
176 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.		\
177 				wmi_command_log_buf_info.buf)		\
178 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].data,\
179 			b, wmi_record_max_length);			\
180 	((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\
181 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].\
182 		time = qdf_get_log_timestamp();			\
183 	(*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))++;	\
184 	h->log_info.wmi_command_log_buf_info.length++;			\
185 }
186 
187 #define WMI_COMMAND_TX_CMP_RECORD(h, a, b, da, pa) {			\
188 	if (wmi_cmd_cmpl_log_max_entry <=				\
189 		*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))\
190 		*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
191 				p_buf_tail_idx) = 0;			\
192 	((struct wmi_command_cmp_debug *)h->log_info.			\
193 		wmi_command_tx_cmp_log_buf_info.buf)			\
194 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
195 				p_buf_tail_idx)].			\
196 							command	= a;	\
197 	qdf_mem_copy(((struct wmi_command_cmp_debug *)h->log_info.	\
198 				wmi_command_tx_cmp_log_buf_info.buf)	\
199 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
200 			p_buf_tail_idx)].				\
201 		data, b, wmi_record_max_length);			\
202 	((struct wmi_command_cmp_debug *)h->log_info.			\
203 		wmi_command_tx_cmp_log_buf_info.buf)			\
204 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
205 				p_buf_tail_idx)].			\
206 		time = qdf_get_log_timestamp();				\
207 	((struct wmi_command_cmp_debug *)h->log_info.			\
208 		wmi_command_tx_cmp_log_buf_info.buf)			\
209 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
210 				p_buf_tail_idx)].			\
211 		dma_addr = da;						\
212 	((struct wmi_command_cmp_debug *)h->log_info.			\
213 		wmi_command_tx_cmp_log_buf_info.buf)			\
214 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
215 				p_buf_tail_idx)].			\
216 		phy_addr = pa;						\
217 	(*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))++;\
218 	h->log_info.wmi_command_tx_cmp_log_buf_info.length++;		\
219 }
220 
221 #define WMI_EVENT_RECORD(h, a, b) {					\
222 	if (wmi_event_log_max_entry <=					\
223 		*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))	\
224 		*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx) = 0;\
225 	((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\
226 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].	\
227 		event = a;						\
228 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
229 				wmi_event_log_buf_info.buf)		\
230 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].data, b,\
231 		wmi_record_max_length);					\
232 	((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\
233 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].time =\
234 		qdf_get_log_timestamp();				\
235 	(*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))++;	\
236 	h->log_info.wmi_event_log_buf_info.length++;			\
237 }
238 
239 #define WMI_RX_EVENT_RECORD(h, a, b) {					\
240 	if (wmi_event_log_max_entry <=					\
241 		*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))\
242 		*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx) = 0;\
243 	((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\
244 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
245 		event = a;						\
246 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
247 				wmi_rx_event_log_buf_info.buf)		\
248 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
249 			data, b, wmi_record_max_length);		\
250 	((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\
251 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
252 		time =	qdf_get_log_timestamp();			\
253 	(*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))++;	\
254 	h->log_info.wmi_rx_event_log_buf_info.length++;			\
255 }
256 
257 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
258 uint32_t g_wmi_mgmt_command_buf_idx = 0;
259 struct
260 wmi_command_debug wmi_mgmt_command_log_buffer[WMI_MGMT_TX_DEBUG_MAX_ENTRY];
261 
262 /* wmi_mgmt commands TX completed */
263 uint32_t g_wmi_mgmt_command_tx_cmp_buf_idx = 0;
264 struct wmi_command_debug
265 wmi_mgmt_command_tx_cmp_log_buffer[WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY];
266 
267 /* wmi_mgmt events when received */
268 uint32_t g_wmi_mgmt_rx_event_buf_idx = 0;
269 struct wmi_event_debug
270 wmi_mgmt_rx_event_log_buffer[WMI_MGMT_RX_DEBUG_MAX_ENTRY];
271 
272 /* wmi_diag events when received */
273 uint32_t g_wmi_diag_rx_event_buf_idx = 0;
274 struct wmi_event_debug
275 wmi_diag_rx_event_log_buffer[WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY];
276 #endif
277 
278 #define WMI_MGMT_COMMAND_RECORD(h, a, b) {                              \
279 	if (wmi_mgmt_tx_log_max_entry <=                                   \
280 		*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)) \
281 		*(h->log_info.wmi_mgmt_command_log_buf_info.		\
282 				p_buf_tail_idx) = 0;			\
283 	((struct wmi_command_debug *)h->log_info.                       \
284 		 wmi_mgmt_command_log_buf_info.buf)                     \
285 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
286 			command = a;                                    \
287 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.          \
288 				wmi_mgmt_command_log_buf_info.buf)      \
289 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
290 		data, b,                                                \
291 		wmi_record_max_length);                                	\
292 	((struct wmi_command_debug *)h->log_info.                       \
293 		 wmi_mgmt_command_log_buf_info.buf)                     \
294 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
295 			time =        qdf_get_log_timestamp();          \
296 	(*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx))++;\
297 	h->log_info.wmi_mgmt_command_log_buf_info.length++;             \
298 }
299 
300 #define WMI_MGMT_COMMAND_TX_CMP_RECORD(h, a, b) {			\
301 	if (wmi_mgmt_tx_cmpl_log_max_entry <=				\
302 		*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
303 			p_buf_tail_idx))				\
304 		*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
305 			p_buf_tail_idx) = 0;				\
306 	((struct wmi_command_debug *)h->log_info.			\
307 			wmi_mgmt_command_tx_cmp_log_buf_info.buf)	\
308 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
309 				p_buf_tail_idx)].command = a;		\
310 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.		\
311 				wmi_mgmt_command_tx_cmp_log_buf_info.buf)\
312 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
313 			p_buf_tail_idx)].data, b,			\
314 			wmi_record_max_length);				\
315 	((struct wmi_command_debug *)h->log_info.			\
316 			wmi_mgmt_command_tx_cmp_log_buf_info.buf)	\
317 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
318 				p_buf_tail_idx)].time =			\
319 		qdf_get_log_timestamp();				\
320 	(*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.		\
321 			p_buf_tail_idx))++;				\
322 	h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.length++;	\
323 }
324 
325 #define WMI_MGMT_RX_EVENT_RECORD(h, a, b) do {				\
326 	if (wmi_mgmt_rx_log_max_entry <=				\
327 		*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))\
328 		*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx) = 0;\
329 	((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\
330 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)]\
331 					.event = a;			\
332 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
333 				wmi_mgmt_event_log_buf_info.buf)	\
334 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\
335 			data, b, wmi_record_max_length);		\
336 	((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\
337 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\
338 			time = qdf_get_log_timestamp();			\
339 	(*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))++;	\
340 	h->log_info.wmi_mgmt_event_log_buf_info.length++;		\
341 } while (0);
342 
343 #define WMI_DIAG_RX_EVENT_RECORD(h, a, b) do {                             \
344 	if (wmi_diag_log_max_entry <=                                   \
345 		*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))\
346 		*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx) = 0;\
347 	((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\
348 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)]\
349 					.event = a;                     \
350 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.            \
351 				wmi_diag_event_log_buf_info.buf)        \
352 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\
353 			data, b, wmi_record_max_length);                \
354 	((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\
355 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\
356 			time = qdf_get_log_timestamp();                 \
357 	(*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))++;  \
358 	h->log_info.wmi_diag_event_log_buf_info.length++;               \
359 } while (0);
360 
361 /* These are defined to made it as module param, which can be configured */
362 /* WMI Commands */
363 uint32_t wmi_cmd_log_max_entry = WMI_CMD_DEBUG_MAX_ENTRY;
364 uint32_t wmi_cmd_cmpl_log_max_entry = WMI_CMD_CMPL_DEBUG_MAX_ENTRY;
365 /* WMI Events */
366 uint32_t wmi_event_log_max_entry = WMI_EVENT_DEBUG_MAX_ENTRY;
367 /* WMI MGMT Tx */
368 uint32_t wmi_mgmt_tx_log_max_entry = WMI_MGMT_TX_DEBUG_MAX_ENTRY;
369 uint32_t wmi_mgmt_tx_cmpl_log_max_entry = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY;
370 /* WMI MGMT Rx */
371 uint32_t wmi_mgmt_rx_log_max_entry = WMI_MGMT_RX_DEBUG_MAX_ENTRY;
372 /* WMI Diag Event */
373 uint32_t wmi_diag_log_max_entry = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY;
374 /* WMI capture size */
375 uint32_t wmi_record_max_length = WMI_DEBUG_ENTRY_MAX_LENGTH;
376 uint32_t wmi_display_size = 100;
377 
378 /**
379  * wmi_log_init() - Initialize WMI event logging
380  * @wmi_handle: WMI handle.
381  *
382  * Return: Initialization status
383  */
384 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
385 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
386 {
387 	struct wmi_log_buf_t *cmd_log_buf =
388 			&wmi_handle->log_info.wmi_command_log_buf_info;
389 	struct wmi_log_buf_t *cmd_tx_cmpl_log_buf =
390 			&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
391 
392 	struct wmi_log_buf_t *event_log_buf =
393 			&wmi_handle->log_info.wmi_event_log_buf_info;
394 	struct wmi_log_buf_t *rx_event_log_buf =
395 			&wmi_handle->log_info.wmi_rx_event_log_buf_info;
396 
397 	struct wmi_log_buf_t *mgmt_cmd_log_buf =
398 			&wmi_handle->log_info.wmi_mgmt_command_log_buf_info;
399 	struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf =
400 		&wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info;
401 	struct wmi_log_buf_t *mgmt_event_log_buf =
402 			&wmi_handle->log_info.wmi_mgmt_event_log_buf_info;
403 	struct wmi_log_buf_t *diag_event_log_buf =
404 			&wmi_handle->log_info.wmi_diag_event_log_buf_info;
405 
406 	/* WMI commands */
407 	cmd_log_buf->length = 0;
408 	cmd_log_buf->buf_tail_idx = 0;
409 	cmd_log_buf->buf = wmi_command_log_buffer;
410 	cmd_log_buf->p_buf_tail_idx = &g_wmi_command_buf_idx;
411 	cmd_log_buf->size = WMI_CMD_DEBUG_MAX_ENTRY;
412 
413 	/* WMI commands TX completed */
414 	cmd_tx_cmpl_log_buf->length = 0;
415 	cmd_tx_cmpl_log_buf->buf_tail_idx = 0;
416 	cmd_tx_cmpl_log_buf->buf = wmi_command_tx_cmp_log_buffer;
417 	cmd_tx_cmpl_log_buf->p_buf_tail_idx = &g_wmi_command_tx_cmp_buf_idx;
418 	cmd_tx_cmpl_log_buf->size = WMI_CMD_CMPL_DEBUG_MAX_ENTRY;
419 
420 	/* WMI events when processed */
421 	event_log_buf->length = 0;
422 	event_log_buf->buf_tail_idx = 0;
423 	event_log_buf->buf = wmi_event_log_buffer;
424 	event_log_buf->p_buf_tail_idx = &g_wmi_event_buf_idx;
425 	event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY;
426 
427 	/* WMI events when queued */
428 	rx_event_log_buf->length = 0;
429 	rx_event_log_buf->buf_tail_idx = 0;
430 	rx_event_log_buf->buf = wmi_rx_event_log_buffer;
431 	rx_event_log_buf->p_buf_tail_idx = &g_wmi_rx_event_buf_idx;
432 	rx_event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY;
433 
434 	/* WMI Management commands */
435 	mgmt_cmd_log_buf->length = 0;
436 	mgmt_cmd_log_buf->buf_tail_idx = 0;
437 	mgmt_cmd_log_buf->buf = wmi_mgmt_command_log_buffer;
438 	mgmt_cmd_log_buf->p_buf_tail_idx = &g_wmi_mgmt_command_buf_idx;
439 	mgmt_cmd_log_buf->size = WMI_MGMT_TX_DEBUG_MAX_ENTRY;
440 
441 	/* WMI Management commands Tx completed*/
442 	mgmt_cmd_tx_cmp_log_buf->length = 0;
443 	mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0;
444 	mgmt_cmd_tx_cmp_log_buf->buf = wmi_mgmt_command_tx_cmp_log_buffer;
445 	mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx =
446 		&g_wmi_mgmt_command_tx_cmp_buf_idx;
447 	mgmt_cmd_tx_cmp_log_buf->size = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY;
448 
449 	/* WMI Management events when received */
450 	mgmt_event_log_buf->length = 0;
451 	mgmt_event_log_buf->buf_tail_idx = 0;
452 	mgmt_event_log_buf->buf = wmi_mgmt_rx_event_log_buffer;
453 	mgmt_event_log_buf->p_buf_tail_idx = &g_wmi_mgmt_rx_event_buf_idx;
454 	mgmt_event_log_buf->size = WMI_MGMT_RX_DEBUG_MAX_ENTRY;
455 
456 	/* WMI diag events when received */
457 	diag_event_log_buf->length = 0;
458 	diag_event_log_buf->buf_tail_idx = 0;
459 	diag_event_log_buf->buf = wmi_diag_rx_event_log_buffer;
460 	diag_event_log_buf->p_buf_tail_idx = &g_wmi_diag_rx_event_buf_idx;
461 	diag_event_log_buf->size = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY;
462 
463 	qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
464 	wmi_handle->log_info.wmi_logging_enable = 1;
465 
466 	return QDF_STATUS_SUCCESS;
467 }
468 #else
469 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
470 {
471 	struct wmi_log_buf_t *cmd_log_buf =
472 			&wmi_handle->log_info.wmi_command_log_buf_info;
473 	struct wmi_log_buf_t *cmd_tx_cmpl_log_buf =
474 			&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
475 
476 	struct wmi_log_buf_t *event_log_buf =
477 			&wmi_handle->log_info.wmi_event_log_buf_info;
478 	struct wmi_log_buf_t *rx_event_log_buf =
479 			&wmi_handle->log_info.wmi_rx_event_log_buf_info;
480 
481 	struct wmi_log_buf_t *mgmt_cmd_log_buf =
482 			&wmi_handle->log_info.wmi_mgmt_command_log_buf_info;
483 	struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf =
484 		&wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info;
485 	struct wmi_log_buf_t *mgmt_event_log_buf =
486 			&wmi_handle->log_info.wmi_mgmt_event_log_buf_info;
487 	struct wmi_log_buf_t *diag_event_log_buf =
488 			&wmi_handle->log_info.wmi_diag_event_log_buf_info;
489 
490 	wmi_handle->log_info.wmi_logging_enable = 0;
491 
492 	/* WMI commands */
493 	cmd_log_buf->length = 0;
494 	cmd_log_buf->buf_tail_idx = 0;
495 	cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc(
496 		wmi_cmd_log_max_entry * sizeof(struct wmi_command_debug));
497 	cmd_log_buf->size = wmi_cmd_log_max_entry;
498 
499 	if (!cmd_log_buf->buf)
500 		return QDF_STATUS_E_NOMEM;
501 
502 	cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx;
503 
504 	/* WMI commands TX completed */
505 	cmd_tx_cmpl_log_buf->length = 0;
506 	cmd_tx_cmpl_log_buf->buf_tail_idx = 0;
507 	cmd_tx_cmpl_log_buf->buf = (struct wmi_command_cmp_debug *) qdf_mem_malloc(
508 		wmi_cmd_cmpl_log_max_entry * sizeof(struct wmi_command_cmp_debug));
509 	cmd_tx_cmpl_log_buf->size = wmi_cmd_cmpl_log_max_entry;
510 
511 	if (!cmd_tx_cmpl_log_buf->buf)
512 		return QDF_STATUS_E_NOMEM;
513 
514 	cmd_tx_cmpl_log_buf->p_buf_tail_idx =
515 		&cmd_tx_cmpl_log_buf->buf_tail_idx;
516 
517 	/* WMI events when processed */
518 	event_log_buf->length = 0;
519 	event_log_buf->buf_tail_idx = 0;
520 	event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
521 		wmi_event_log_max_entry * sizeof(struct wmi_event_debug));
522 	event_log_buf->size = wmi_event_log_max_entry;
523 
524 	if (!event_log_buf->buf)
525 		return QDF_STATUS_E_NOMEM;
526 
527 	event_log_buf->p_buf_tail_idx = &event_log_buf->buf_tail_idx;
528 
529 	/* WMI events when queued */
530 	rx_event_log_buf->length = 0;
531 	rx_event_log_buf->buf_tail_idx = 0;
532 	rx_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
533 		wmi_event_log_max_entry * sizeof(struct wmi_event_debug));
534 	rx_event_log_buf->size = wmi_event_log_max_entry;
535 
536 	if (!rx_event_log_buf->buf)
537 		return QDF_STATUS_E_NOMEM;
538 
539 	rx_event_log_buf->p_buf_tail_idx = &rx_event_log_buf->buf_tail_idx;
540 
541 	/* WMI Management commands */
542 	mgmt_cmd_log_buf->length = 0;
543 	mgmt_cmd_log_buf->buf_tail_idx = 0;
544 	mgmt_cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc(
545 		wmi_mgmt_tx_log_max_entry * sizeof(struct wmi_command_debug));
546 	mgmt_cmd_log_buf->size = wmi_mgmt_tx_log_max_entry;
547 
548 	if (!mgmt_cmd_log_buf->buf)
549 		return QDF_STATUS_E_NOMEM;
550 
551 	mgmt_cmd_log_buf->p_buf_tail_idx = &mgmt_cmd_log_buf->buf_tail_idx;
552 
553 	/* WMI Management commands Tx completed*/
554 	mgmt_cmd_tx_cmp_log_buf->length = 0;
555 	mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0;
556 	mgmt_cmd_tx_cmp_log_buf->buf = (struct wmi_command_debug *)
557 		qdf_mem_malloc(
558 		wmi_mgmt_tx_cmpl_log_max_entry *
559 		sizeof(struct wmi_command_debug));
560 	mgmt_cmd_tx_cmp_log_buf->size = wmi_mgmt_tx_cmpl_log_max_entry;
561 
562 	if (!mgmt_cmd_tx_cmp_log_buf->buf)
563 		return QDF_STATUS_E_NOMEM;
564 
565 	mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx =
566 		&mgmt_cmd_tx_cmp_log_buf->buf_tail_idx;
567 
568 	/* WMI Management events when received */
569 	mgmt_event_log_buf->length = 0;
570 	mgmt_event_log_buf->buf_tail_idx = 0;
571 
572 	mgmt_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
573 		wmi_mgmt_rx_log_max_entry *
574 		sizeof(struct wmi_event_debug));
575 	mgmt_event_log_buf->size = wmi_mgmt_rx_log_max_entry;
576 
577 	if (!mgmt_event_log_buf->buf)
578 		return QDF_STATUS_E_NOMEM;
579 
580 	mgmt_event_log_buf->p_buf_tail_idx = &mgmt_event_log_buf->buf_tail_idx;
581 
582 	/* WMI diag events when received */
583 	diag_event_log_buf->length = 0;
584 	diag_event_log_buf->buf_tail_idx = 0;
585 
586 	diag_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
587 		wmi_diag_log_max_entry *
588 		sizeof(struct wmi_event_debug));
589 	diag_event_log_buf->size = wmi_diag_log_max_entry;
590 
591 	if (!diag_event_log_buf->buf)
592 		return QDF_STATUS_E_NOMEM;
593 
594 	diag_event_log_buf->p_buf_tail_idx = &diag_event_log_buf->buf_tail_idx;
595 
596 	qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
597 	wmi_handle->log_info.wmi_logging_enable = 1;
598 
599 	wmi_filtered_logging_init(wmi_handle);
600 
601 	return QDF_STATUS_SUCCESS;
602 }
603 #endif
604 
605 /**
606  * wmi_log_buffer_free() - Free all dynamic allocated buffer memory for
607  * event logging
608  * @wmi_handle: WMI handle.
609  *
610  * Return: None
611  */
612 #ifdef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
613 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
614 {
615 	wmi_filtered_logging_free(wmi_handle);
616 
617 	if (wmi_handle->log_info.wmi_command_log_buf_info.buf)
618 		qdf_mem_free(wmi_handle->log_info.wmi_command_log_buf_info.buf);
619 	if (wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf)
620 		qdf_mem_free(
621 		wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf);
622 	if (wmi_handle->log_info.wmi_event_log_buf_info.buf)
623 		qdf_mem_free(wmi_handle->log_info.wmi_event_log_buf_info.buf);
624 	if (wmi_handle->log_info.wmi_rx_event_log_buf_info.buf)
625 		qdf_mem_free(
626 			wmi_handle->log_info.wmi_rx_event_log_buf_info.buf);
627 	if (wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf)
628 		qdf_mem_free(
629 			wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf);
630 	if (wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf)
631 		qdf_mem_free(
632 		wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf);
633 	if (wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf)
634 		qdf_mem_free(
635 			wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf);
636 	if (wmi_handle->log_info.wmi_diag_event_log_buf_info.buf)
637 		qdf_mem_free(
638 			wmi_handle->log_info.wmi_diag_event_log_buf_info.buf);
639 	wmi_handle->log_info.wmi_logging_enable = 0;
640 
641 	qdf_spinlock_destroy(&wmi_handle->log_info.wmi_record_lock);
642 }
643 #else
644 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
645 {
646 	/* Do Nothing */
647 }
648 #endif
649 
650 /**
651  * wmi_print_cmd_log_buffer() - an output agnostic wmi command log printer
652  * @log_buffer: the command log buffer metadata of the buffer to print
653  * @count: the maximum number of entries to print
654  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
655  * @print_priv: any data required by the print method, e.g. a file handle
656  *
657  * Return: None
658  */
659 static void
660 wmi_print_cmd_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
661 			 qdf_abstract_print *print, void *print_priv)
662 {
663 	static const int data_len =
664 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
665 	char str[128];
666 	uint32_t idx;
667 
668 	if (count > log_buffer->size)
669 		count = log_buffer->size;
670 	if (count > log_buffer->length)
671 		count = log_buffer->length;
672 
673 	/* subtract count from index, and wrap if necessary */
674 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
675 	idx %= log_buffer->size;
676 
677 	print(print_priv, "Time (seconds)      Cmd Id              Payload");
678 	while (count) {
679 		struct wmi_command_debug *cmd_log = (struct wmi_command_debug *)
680 			&((struct wmi_command_debug *)log_buffer->buf)[idx];
681 		uint64_t secs, usecs;
682 		int len = 0;
683 		int i;
684 
685 		qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
686 		len += scnprintf(str + len, sizeof(str) - len,
687 				 "% 8lld.%06lld    %6u (0x%06x)    ",
688 				 secs, usecs,
689 				 cmd_log->command, cmd_log->command);
690 		for (i = 0; i < data_len; ++i) {
691 			len += scnprintf(str + len, sizeof(str) - len,
692 					 "0x%08x ", cmd_log->data[i]);
693 		}
694 
695 		print(print_priv, str);
696 
697 		--count;
698 		++idx;
699 		if (idx >= log_buffer->size)
700 			idx = 0;
701 	}
702 }
703 
704 /**
705  * wmi_print_cmd_cmp_log_buffer() - wmi command completion log printer
706  * @log_buffer: the command completion log buffer metadata of the buffer to print
707  * @count: the maximum number of entries to print
708  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
709  * @print_priv: any data required by the print method, e.g. a file handle
710  *
711  * Return: None
712  */
713 static void
714 wmi_print_cmd_cmp_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
715 			 qdf_abstract_print *print, void *print_priv)
716 {
717 	static const int data_len =
718 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
719 	char str[128];
720 	uint32_t idx;
721 
722 	if (count > log_buffer->size)
723 		count = log_buffer->size;
724 	if (count > log_buffer->length)
725 		count = log_buffer->length;
726 
727 	/* subtract count from index, and wrap if necessary */
728 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
729 	idx %= log_buffer->size;
730 
731 	print(print_priv, "Time (seconds)      Cmd Id              Payload");
732 	while (count) {
733 		struct wmi_command_cmp_debug *cmd_log = (struct wmi_command_cmp_debug *)
734 			&((struct wmi_command_cmp_debug *)log_buffer->buf)[idx];
735 		uint64_t secs, usecs;
736 		int len = 0;
737 		int i;
738 
739 		qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
740 		len += scnprintf(str + len, sizeof(str) - len,
741 				 "% 8lld.%06lld    %6u (0x%06x)    ",
742 				 secs, usecs,
743 				 cmd_log->command, cmd_log->command);
744 		for (i = 0; i < data_len; ++i) {
745 			len += scnprintf(str + len, sizeof(str) - len,
746 					 "0x%08x ", cmd_log->data[i]);
747 		}
748 
749 		print(print_priv, str);
750 
751 		--count;
752 		++idx;
753 		if (idx >= log_buffer->size)
754 			idx = 0;
755 	}
756 }
757 
758 /**
759  * wmi_print_event_log_buffer() - an output agnostic wmi event log printer
760  * @log_buffer: the event log buffer metadata of the buffer to print
761  * @count: the maximum number of entries to print
762  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
763  * @print_priv: any data required by the print method, e.g. a file handle
764  *
765  * Return: None
766  */
767 static void
768 wmi_print_event_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
769 			   qdf_abstract_print *print, void *print_priv)
770 {
771 	static const int data_len =
772 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
773 	char str[128];
774 	uint32_t idx;
775 
776 	if (count > log_buffer->size)
777 		count = log_buffer->size;
778 	if (count > log_buffer->length)
779 		count = log_buffer->length;
780 
781 	/* subtract count from index, and wrap if necessary */
782 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
783 	idx %= log_buffer->size;
784 
785 	print(print_priv, "Time (seconds)      Event Id             Payload");
786 	while (count) {
787 		struct wmi_event_debug *event_log = (struct wmi_event_debug *)
788 			&((struct wmi_event_debug *)log_buffer->buf)[idx];
789 		uint64_t secs, usecs;
790 		int len = 0;
791 		int i;
792 
793 		qdf_log_timestamp_to_secs(event_log->time, &secs, &usecs);
794 		len += scnprintf(str + len, sizeof(str) - len,
795 				 "% 8lld.%06lld    %6u (0x%06x)    ",
796 				 secs, usecs,
797 				 event_log->event, event_log->event);
798 		for (i = 0; i < data_len; ++i) {
799 			len += scnprintf(str + len, sizeof(str) - len,
800 					 "0x%08x ", event_log->data[i]);
801 		}
802 
803 		print(print_priv, str);
804 
805 		--count;
806 		++idx;
807 		if (idx >= log_buffer->size)
808 			idx = 0;
809 	}
810 }
811 
812 inline void
813 wmi_print_cmd_log(wmi_unified_t wmi, uint32_t count,
814 		  qdf_abstract_print *print, void *print_priv)
815 {
816 	wmi_print_cmd_log_buffer(
817 		&wmi->log_info.wmi_command_log_buf_info,
818 		count, print, print_priv);
819 }
820 
821 inline void
822 wmi_print_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count,
823 			 qdf_abstract_print *print, void *print_priv)
824 {
825 	wmi_print_cmd_cmp_log_buffer(
826 		&wmi->log_info.wmi_command_tx_cmp_log_buf_info,
827 		count, print, print_priv);
828 }
829 
830 inline void
831 wmi_print_mgmt_cmd_log(wmi_unified_t wmi, uint32_t count,
832 		       qdf_abstract_print *print, void *print_priv)
833 {
834 	wmi_print_cmd_log_buffer(
835 		&wmi->log_info.wmi_mgmt_command_log_buf_info,
836 		count, print, print_priv);
837 }
838 
839 inline void
840 wmi_print_mgmt_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count,
841 			      qdf_abstract_print *print, void *print_priv)
842 {
843 	wmi_print_cmd_log_buffer(
844 		&wmi->log_info.wmi_mgmt_command_tx_cmp_log_buf_info,
845 		count, print, print_priv);
846 }
847 
848 inline void
849 wmi_print_event_log(wmi_unified_t wmi, uint32_t count,
850 		    qdf_abstract_print *print, void *print_priv)
851 {
852 	wmi_print_event_log_buffer(
853 		&wmi->log_info.wmi_event_log_buf_info,
854 		count, print, print_priv);
855 }
856 
857 inline void
858 wmi_print_rx_event_log(wmi_unified_t wmi, uint32_t count,
859 		       qdf_abstract_print *print, void *print_priv)
860 {
861 	wmi_print_event_log_buffer(
862 		&wmi->log_info.wmi_rx_event_log_buf_info,
863 		count, print, print_priv);
864 }
865 
866 inline void
867 wmi_print_mgmt_event_log(wmi_unified_t wmi, uint32_t count,
868 			 qdf_abstract_print *print, void *print_priv)
869 {
870 	wmi_print_event_log_buffer(
871 		&wmi->log_info.wmi_mgmt_event_log_buf_info,
872 		count, print, print_priv);
873 }
874 
875 
876 /* debugfs routines*/
877 
878 /**
879  * debug_wmi_##func_base##_show() - debugfs functions to display content of
880  * command and event buffers. Macro uses max buffer length to display
881  * buffer when it is wraparound.
882  *
883  * @m: debugfs handler to access wmi_handle
884  * @v: Variable arguments (not used)
885  *
886  * Return: Length of characters printed
887  */
888 #define GENERATE_COMMAND_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size, wmi_record_type)\
889 	static int debug_wmi_##func_base##_show(struct seq_file *m,	\
890 						void *v)		\
891 	{								\
892 		wmi_unified_t wmi_handle = (wmi_unified_t) m->private;	\
893 		struct wmi_log_buf_t *wmi_log =				\
894 			&wmi_handle->log_info.wmi_##func_base##_buf_info;\
895 		int pos, nread, outlen;					\
896 		int i;							\
897 		uint64_t secs, usecs;					\
898 									\
899 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
900 		if (!wmi_log->length) {					\
901 			qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
902 			return wmi_bp_seq_printf(m,			\
903 			"no elements to read from ring buffer!\n");	\
904 		}							\
905 									\
906 		if (wmi_log->length <= wmi_ring_size)			\
907 			nread = wmi_log->length;			\
908 		else							\
909 			nread = wmi_ring_size;				\
910 									\
911 		if (*(wmi_log->p_buf_tail_idx) == 0)			\
912 			/* tail can be 0 after wrap-around */		\
913 			pos = wmi_ring_size - 1;			\
914 		else							\
915 			pos = *(wmi_log->p_buf_tail_idx) - 1;		\
916 									\
917 		outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\
918 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
919 		while (nread--) {					\
920 			struct wmi_record_type *wmi_record;		\
921 									\
922 			wmi_record = (struct wmi_record_type *)	\
923 			&(((struct wmi_record_type *)wmi_log->buf)[pos]);\
924 			outlen += wmi_bp_seq_printf(m, "CMD ID = %x\n",	\
925 				(wmi_record->command));			\
926 			qdf_log_timestamp_to_secs(wmi_record->time, &secs,\
927 				&usecs);				\
928 			outlen +=					\
929 			wmi_bp_seq_printf(m, "CMD TIME = [%llu.%06llu]\n",\
930 				secs, usecs);				\
931 			outlen += wmi_bp_seq_printf(m, "CMD = ");	\
932 			for (i = 0; i < (wmi_record_max_length/		\
933 					sizeof(uint32_t)); i++)		\
934 				outlen += wmi_bp_seq_printf(m, "%x ",	\
935 					wmi_record->data[i]);		\
936 			outlen += wmi_bp_seq_printf(m, "\n");		\
937 									\
938 			if (pos == 0)					\
939 				pos = wmi_ring_size - 1;		\
940 			else						\
941 				pos--;					\
942 		}							\
943 		return outlen;						\
944 	}								\
945 
946 #define GENERATE_EVENT_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size)	\
947 	static int debug_wmi_##func_base##_show(struct seq_file *m,	\
948 						void *v)		\
949 	{								\
950 		wmi_unified_t wmi_handle = (wmi_unified_t) m->private;	\
951 		struct wmi_log_buf_t *wmi_log =				\
952 			&wmi_handle->log_info.wmi_##func_base##_buf_info;\
953 		int pos, nread, outlen;					\
954 		int i;							\
955 		uint64_t secs, usecs;					\
956 									\
957 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
958 		if (!wmi_log->length) {					\
959 			qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
960 			return wmi_bp_seq_printf(m,			\
961 			"no elements to read from ring buffer!\n");	\
962 		}							\
963 									\
964 		if (wmi_log->length <= wmi_ring_size)			\
965 			nread = wmi_log->length;			\
966 		else							\
967 			nread = wmi_ring_size;				\
968 									\
969 		if (*(wmi_log->p_buf_tail_idx) == 0)			\
970 			/* tail can be 0 after wrap-around */		\
971 			pos = wmi_ring_size - 1;			\
972 		else							\
973 			pos = *(wmi_log->p_buf_tail_idx) - 1;		\
974 									\
975 		outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\
976 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
977 		while (nread--) {					\
978 			struct wmi_event_debug *wmi_record;		\
979 									\
980 			wmi_record = (struct wmi_event_debug *)		\
981 			&(((struct wmi_event_debug *)wmi_log->buf)[pos]);\
982 			qdf_log_timestamp_to_secs(wmi_record->time, &secs,\
983 				&usecs);				\
984 			outlen += wmi_bp_seq_printf(m, "Event ID = %x\n",\
985 				(wmi_record->event));			\
986 			outlen +=					\
987 			wmi_bp_seq_printf(m, "Event TIME = [%llu.%06llu]\n",\
988 				secs, usecs);				\
989 			outlen += wmi_bp_seq_printf(m, "CMD = ");	\
990 			for (i = 0; i < (wmi_record_max_length/		\
991 					sizeof(uint32_t)); i++)		\
992 				outlen += wmi_bp_seq_printf(m, "%x ",	\
993 					wmi_record->data[i]);		\
994 			outlen += wmi_bp_seq_printf(m, "\n");		\
995 									\
996 			if (pos == 0)					\
997 				pos = wmi_ring_size - 1;		\
998 			else						\
999 				pos--;					\
1000 		}							\
1001 		return outlen;						\
1002 	}
1003 
1004 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_log, wmi_display_size,
1005 				  wmi_command_debug);
1006 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_tx_cmp_log, wmi_display_size,
1007 				  wmi_command_cmp_debug);
1008 GENERATE_EVENT_DEBUG_SHOW_FUNCS(event_log, wmi_display_size);
1009 GENERATE_EVENT_DEBUG_SHOW_FUNCS(rx_event_log, wmi_display_size);
1010 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_log, wmi_display_size,
1011 				  wmi_command_debug);
1012 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_tx_cmp_log,
1013 					wmi_display_size,
1014 					wmi_command_debug);
1015 GENERATE_EVENT_DEBUG_SHOW_FUNCS(mgmt_event_log, wmi_display_size);
1016 
1017 /**
1018  * debug_wmi_enable_show() - debugfs functions to display enable state of
1019  * wmi logging feature.
1020  *
1021  * @m: debugfs handler to access wmi_handle
1022  * @v: Variable arguments (not used)
1023  *
1024  * Return: always 1
1025  */
1026 static int debug_wmi_enable_show(struct seq_file *m, void *v)
1027 {
1028 	wmi_unified_t wmi_handle = (wmi_unified_t) m->private;
1029 
1030 	return wmi_bp_seq_printf(m, "%d\n",
1031 			wmi_handle->log_info.wmi_logging_enable);
1032 }
1033 
1034 /**
1035  * debug_wmi_log_size_show() - debugfs functions to display configured size of
1036  * wmi logging command/event buffer and management command/event buffer.
1037  *
1038  * @m: debugfs handler to access wmi_handle
1039  * @v: Variable arguments (not used)
1040  *
1041  * Return: Length of characters printed
1042  */
1043 static int debug_wmi_log_size_show(struct seq_file *m, void *v)
1044 {
1045 
1046 	wmi_bp_seq_printf(m, "WMI command/cmpl log max size:%d/%d\n",
1047 			  wmi_cmd_log_max_entry, wmi_cmd_cmpl_log_max_entry);
1048 	wmi_bp_seq_printf(m, "WMI management Tx/cmpl log max size:%d/%d\n",
1049 			  wmi_mgmt_tx_log_max_entry,
1050 			  wmi_mgmt_tx_cmpl_log_max_entry);
1051 	wmi_bp_seq_printf(m, "WMI event log max size:%d\n",
1052 			  wmi_event_log_max_entry);
1053 	wmi_bp_seq_printf(m, "WMI management Rx log max size:%d\n",
1054 			  wmi_mgmt_rx_log_max_entry);
1055 	return wmi_bp_seq_printf(m,
1056 				 "WMI diag log max size:%d\n",
1057 				 wmi_diag_log_max_entry);
1058 }
1059 
1060 /**
1061  * debug_wmi_##func_base##_write() - debugfs functions to clear
1062  * wmi logging command/event buffer and management command/event buffer.
1063  *
1064  * @file: file handler to access wmi_handle
1065  * @buf: received data buffer
1066  * @count: length of received buffer
1067  * @ppos: Not used
1068  *
1069  * Return: count
1070  */
1071 #define GENERATE_DEBUG_WRITE_FUNCS(func_base, wmi_ring_size, wmi_record_type)\
1072 	static ssize_t debug_wmi_##func_base##_write(struct file *file,	\
1073 				const char __user *buf,			\
1074 				size_t count, loff_t *ppos)		\
1075 	{								\
1076 		int k, ret;						\
1077 		wmi_unified_t wmi_handle =				\
1078 			((struct seq_file *)file->private_data)->private;\
1079 		struct wmi_log_buf_t *wmi_log = &wmi_handle->log_info.	\
1080 				wmi_##func_base##_buf_info;		\
1081 		char locbuf[50];					\
1082 									\
1083 		if ((!buf) || (count > 50))				\
1084 			return -EFAULT;					\
1085 									\
1086 		if (copy_from_user(locbuf, buf, count))			\
1087 			return -EFAULT;					\
1088 									\
1089 		ret = sscanf(locbuf, "%d", &k);				\
1090 		if ((ret != 1) || (k != 0)) {                           \
1091 			wmi_err("Wrong input, echo 0 to clear the wmi buffer");\
1092 			return -EINVAL;					\
1093 		}							\
1094 									\
1095 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
1096 		qdf_mem_zero(wmi_log->buf, wmi_ring_size *		\
1097 				sizeof(struct wmi_record_type));	\
1098 		wmi_log->length = 0;					\
1099 		*(wmi_log->p_buf_tail_idx) = 0;				\
1100 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
1101 									\
1102 		return count;						\
1103 	}
1104 
1105 GENERATE_DEBUG_WRITE_FUNCS(command_log, wmi_cmd_log_max_entry,
1106 			   wmi_command_debug);
1107 GENERATE_DEBUG_WRITE_FUNCS(command_tx_cmp_log, wmi_cmd_cmpl_log_max_entry,
1108 			   wmi_command_cmp_debug);
1109 GENERATE_DEBUG_WRITE_FUNCS(event_log, wmi_event_log_max_entry,
1110 			   wmi_event_debug);
1111 GENERATE_DEBUG_WRITE_FUNCS(rx_event_log, wmi_event_log_max_entry,
1112 			   wmi_event_debug);
1113 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_log, wmi_mgmt_tx_log_max_entry,
1114 			   wmi_command_debug);
1115 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_tx_cmp_log,
1116 			   wmi_mgmt_tx_cmpl_log_max_entry, wmi_command_debug);
1117 GENERATE_DEBUG_WRITE_FUNCS(mgmt_event_log, wmi_mgmt_rx_log_max_entry,
1118 			   wmi_event_debug);
1119 
1120 /**
1121  * debug_wmi_enable_write() - debugfs functions to enable/disable
1122  * wmi logging feature.
1123  *
1124  * @file: file handler to access wmi_handle
1125  * @buf: received data buffer
1126  * @count: length of received buffer
1127  * @ppos: Not used
1128  *
1129  * Return: count
1130  */
1131 static ssize_t debug_wmi_enable_write(struct file *file, const char __user *buf,
1132 					size_t count, loff_t *ppos)
1133 {
1134 	wmi_unified_t wmi_handle =
1135 		((struct seq_file *)file->private_data)->private;
1136 	int k, ret;
1137 	char locbuf[50];
1138 
1139 	if ((!buf) || (count > 50))
1140 		return -EFAULT;
1141 
1142 	if (copy_from_user(locbuf, buf, count))
1143 		return -EFAULT;
1144 
1145 	ret = sscanf(locbuf, "%d", &k);
1146 	if ((ret != 1) || ((k != 0) && (k != 1)))
1147 		return -EINVAL;
1148 
1149 	wmi_handle->log_info.wmi_logging_enable = k;
1150 	return count;
1151 }
1152 
1153 /**
1154  * debug_wmi_log_size_write() - reserved.
1155  *
1156  * @file: file handler to access wmi_handle
1157  * @buf: received data buffer
1158  * @count: length of received buffer
1159  * @ppos: Not used
1160  *
1161  * Return: count
1162  */
1163 static ssize_t debug_wmi_log_size_write(struct file *file,
1164 		const char __user *buf, size_t count, loff_t *ppos)
1165 {
1166 	return -EINVAL;
1167 }
1168 
1169 /* Structure to maintain debug information */
1170 struct wmi_debugfs_info {
1171 	const char *name;
1172 	const struct file_operations *ops;
1173 };
1174 
1175 #define DEBUG_FOO(func_base) { .name = #func_base,			\
1176 	.ops = &debug_##func_base##_ops }
1177 
1178 /**
1179  * debug_##func_base##_open() - Open debugfs entry for respective command
1180  * and event buffer.
1181  *
1182  * @inode: node for debug dir entry
1183  * @file: file handler
1184  *
1185  * Return: open status
1186  */
1187 #define GENERATE_DEBUG_STRUCTS(func_base)				\
1188 	static int debug_##func_base##_open(struct inode *inode,	\
1189 						struct file *file)	\
1190 	{								\
1191 		return single_open(file, debug_##func_base##_show,	\
1192 				inode->i_private);			\
1193 	}								\
1194 									\
1195 									\
1196 	static struct file_operations debug_##func_base##_ops = {	\
1197 		.open		= debug_##func_base##_open,		\
1198 		.read		= seq_read,				\
1199 		.llseek		= seq_lseek,				\
1200 		.write		= debug_##func_base##_write,		\
1201 		.release	= single_release,			\
1202 	};
1203 
1204 GENERATE_DEBUG_STRUCTS(wmi_command_log);
1205 GENERATE_DEBUG_STRUCTS(wmi_command_tx_cmp_log);
1206 GENERATE_DEBUG_STRUCTS(wmi_event_log);
1207 GENERATE_DEBUG_STRUCTS(wmi_rx_event_log);
1208 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_log);
1209 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_tx_cmp_log);
1210 GENERATE_DEBUG_STRUCTS(wmi_mgmt_event_log);
1211 GENERATE_DEBUG_STRUCTS(wmi_enable);
1212 GENERATE_DEBUG_STRUCTS(wmi_log_size);
1213 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
1214 GENERATE_DEBUG_STRUCTS(filtered_wmi_cmds);
1215 GENERATE_DEBUG_STRUCTS(filtered_wmi_evts);
1216 GENERATE_DEBUG_STRUCTS(wmi_filtered_command_log);
1217 GENERATE_DEBUG_STRUCTS(wmi_filtered_event_log);
1218 #endif
1219 
1220 struct wmi_debugfs_info wmi_debugfs_infos[NUM_DEBUG_INFOS] = {
1221 	DEBUG_FOO(wmi_command_log),
1222 	DEBUG_FOO(wmi_command_tx_cmp_log),
1223 	DEBUG_FOO(wmi_event_log),
1224 	DEBUG_FOO(wmi_rx_event_log),
1225 	DEBUG_FOO(wmi_mgmt_command_log),
1226 	DEBUG_FOO(wmi_mgmt_command_tx_cmp_log),
1227 	DEBUG_FOO(wmi_mgmt_event_log),
1228 	DEBUG_FOO(wmi_enable),
1229 	DEBUG_FOO(wmi_log_size),
1230 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
1231 	DEBUG_FOO(filtered_wmi_cmds),
1232 	DEBUG_FOO(filtered_wmi_evts),
1233 	DEBUG_FOO(wmi_filtered_command_log),
1234 	DEBUG_FOO(wmi_filtered_event_log),
1235 #endif
1236 };
1237 
1238 /**
1239  * wmi_debugfs_create() - Create debug_fs entry for wmi logging.
1240  *
1241  * @wmi_handle: wmi handle
1242  * @par_entry: debug directory entry
1243  * @id: Index to debug info data array
1244  *
1245  * Return: none
1246  */
1247 static void wmi_debugfs_create(wmi_unified_t wmi_handle,
1248 			       struct dentry *par_entry)
1249 {
1250 	int i;
1251 
1252 	if (!par_entry)
1253 		goto out;
1254 
1255 	for (i = 0; i < NUM_DEBUG_INFOS; ++i) {
1256 		wmi_handle->debugfs_de[i] = qdf_debugfs_create_entry(
1257 						wmi_debugfs_infos[i].name,
1258 						WMI_INFOS_DBG_FILE_PERM,
1259 						par_entry,
1260 						wmi_handle,
1261 						wmi_debugfs_infos[i].ops);
1262 
1263 		if (!wmi_handle->debugfs_de[i]) {
1264 			wmi_err("debug Entry creation failed!");
1265 			goto out;
1266 		}
1267 	}
1268 
1269 	return;
1270 
1271 out:
1272 	wmi_err("debug Entry creation failed!");
1273 	wmi_log_buffer_free(wmi_handle);
1274 	return;
1275 }
1276 
1277 /**
1278  * wmi_debugfs_remove() - Remove debugfs entry for wmi logging.
1279  * @wmi_handle: wmi handle
1280  * @dentry: debugfs directory entry
1281  * @id: Index to debug info data array
1282  *
1283  * Return: none
1284  */
1285 static void wmi_debugfs_remove(wmi_unified_t wmi_handle)
1286 {
1287 	int i;
1288 	struct dentry *dentry = wmi_handle->log_info.wmi_log_debugfs_dir;
1289 
1290 	if (dentry) {
1291 		for (i = 0; i < NUM_DEBUG_INFOS; ++i) {
1292 			if (wmi_handle->debugfs_de[i])
1293 				wmi_handle->debugfs_de[i] = NULL;
1294 		}
1295 	}
1296 
1297 	if (dentry)
1298 		qdf_debugfs_remove_dir_recursive(dentry);
1299 }
1300 
1301 /**
1302  * wmi_debugfs_init() - debugfs functions to create debugfs directory and to
1303  * create debugfs enteries.
1304  *
1305  * @h: wmi handler
1306  *
1307  * Return: init status
1308  */
1309 static QDF_STATUS wmi_debugfs_init(wmi_unified_t wmi_handle, uint32_t pdev_idx)
1310 {
1311 	char buf[32];
1312 
1313 	snprintf(buf, sizeof(buf), "WMI_SOC%u_PDEV%u",
1314 		 wmi_handle->soc->soc_idx, pdev_idx);
1315 
1316 	wmi_handle->log_info.wmi_log_debugfs_dir =
1317 		qdf_debugfs_create_dir(buf, NULL);
1318 
1319 	if (!wmi_handle->log_info.wmi_log_debugfs_dir) {
1320 		wmi_err("error while creating debugfs dir for %s", buf);
1321 		return QDF_STATUS_E_FAILURE;
1322 	}
1323 	wmi_debugfs_create(wmi_handle,
1324 			   wmi_handle->log_info.wmi_log_debugfs_dir);
1325 
1326 	return QDF_STATUS_SUCCESS;
1327 }
1328 
1329 /**
1330  * wmi_mgmt_cmd_record() - Wrapper function for mgmt command logging macro
1331  *
1332  * @wmi_handle: wmi handle
1333  * @cmd: mgmt command
1334  * @header: pointer to 802.11 header
1335  * @vdev_id: vdev id
1336  * @chanfreq: channel frequency
1337  *
1338  * Return: none
1339  */
1340 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
1341 			void *header, uint32_t vdev_id, uint32_t chanfreq)
1342 {
1343 
1344 	uint32_t data[CUSTOM_MGMT_CMD_DATA_SIZE];
1345 
1346 	data[0] = ((struct wmi_command_header *)header)->type;
1347 	data[1] = ((struct wmi_command_header *)header)->sub_type;
1348 	data[2] = vdev_id;
1349 	data[3] = chanfreq;
1350 
1351 	qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
1352 
1353 	WMI_MGMT_COMMAND_RECORD(wmi_handle, cmd, (uint8_t *)data);
1354 	wmi_specific_cmd_record(wmi_handle, cmd, (uint8_t *)data);
1355 	qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
1356 }
1357 #else
1358 /**
1359  * wmi_debugfs_remove() - Remove debugfs entry for wmi logging.
1360  * @wmi_handle: wmi handle
1361  * @dentry: debugfs directory entry
1362  * @id: Index to debug info data array
1363  *
1364  * Return: none
1365  */
1366 static void wmi_debugfs_remove(wmi_unified_t wmi_handle) { }
1367 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
1368 			void *header, uint32_t vdev_id, uint32_t chanfreq) { }
1369 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) { }
1370 static void wmi_minidump_detach(struct wmi_unified *wmi_handle) { }
1371 static void wmi_minidump_attach(struct wmi_unified *wmi_handle) { }
1372 #endif /*WMI_INTERFACE_EVENT_LOGGING */
1373 qdf_export_symbol(wmi_mgmt_cmd_record);
1374 
1375 #ifdef WMI_EXT_DBG
1376 
1377 /**
1378  * wmi_ext_dbg_msg_enqueue() - enqueue wmi message
1379  * @wmi_handle: wmi handler
1380  *
1381  * Return: size of wmi message queue after enqueue
1382  */
1383 static uint32_t wmi_ext_dbg_msg_enqueue(struct wmi_unified *wmi_handle,
1384 					struct wmi_ext_dbg_msg *msg)
1385 {
1386 	uint32_t list_size;
1387 
1388 	qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1389 	qdf_list_insert_back_size(&wmi_handle->wmi_ext_dbg_msg_queue,
1390 				  &msg->node, &list_size);
1391 	qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1392 
1393 	return list_size;
1394 }
1395 
1396 /**
1397  * wmi_ext_dbg_msg_dequeue() - dequeue wmi message
1398  * @wmi_handle: wmi handler
1399  *
1400  * Return: wmi msg on success else NULL
1401  */
1402 static struct wmi_ext_dbg_msg *wmi_ext_dbg_msg_dequeue(struct wmi_unified
1403 						       *wmi_handle)
1404 {
1405 	qdf_list_node_t *list_node = NULL;
1406 
1407 	qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1408 	qdf_list_remove_front(&wmi_handle->wmi_ext_dbg_msg_queue, &list_node);
1409 	qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1410 
1411 	if (!list_node)
1412 		return NULL;
1413 
1414 	return qdf_container_of(list_node, struct wmi_ext_dbg_msg, node);
1415 }
1416 
1417 /**
1418  * wmi_ext_dbg_msg_record() - record wmi messages
1419  * @wmi_handle: wmi handler
1420  * @buf: wmi message buffer
1421  * @len: wmi message length
1422  * @type: wmi message type
1423  *
1424  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1425  */
1426 static QDF_STATUS wmi_ext_dbg_msg_record(struct wmi_unified *wmi_handle,
1427 					 uint8_t *buf, uint32_t len,
1428 					 enum WMI_MSG_TYPE type)
1429 {
1430 	struct wmi_ext_dbg_msg *msg;
1431 	uint32_t list_size;
1432 
1433 	msg = wmi_ext_dbg_msg_get(len);
1434 	if (!msg)
1435 		return QDF_STATUS_E_NOMEM;
1436 
1437 	msg->len = len;
1438 	msg->type = type;
1439 	qdf_mem_copy(msg->buf, buf, len);
1440 	msg->ts = qdf_get_log_timestamp();
1441 	list_size = wmi_ext_dbg_msg_enqueue(wmi_handle, msg);
1442 
1443 	if (list_size >= wmi_handle->wmi_ext_dbg_msg_queue_size) {
1444 		msg = wmi_ext_dbg_msg_dequeue(wmi_handle);
1445 		wmi_ext_dbg_msg_put(msg);
1446 	}
1447 
1448 	return QDF_STATUS_SUCCESS;
1449 }
1450 
1451 /**
1452  * wmi_ext_dbg_msg_cmd_record() - record wmi command messages
1453  * @wmi_handle: wmi handler
1454  * @buf: wmi command buffer
1455  * @len: wmi command message length
1456  *
1457  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1458  */
1459 static QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified *wmi_handle,
1460 					     uint8_t *buf, uint32_t len)
1461 {
1462 	return wmi_ext_dbg_msg_record(wmi_handle, buf, len,
1463 				      WMI_MSG_TYPE_CMD);
1464 }
1465 
1466 /**
1467  * wmi_ext_dbg_msg_event_record() - record wmi event messages
1468  * @wmi_handle: wmi handler
1469  * @buf: wmi event buffer
1470  * @len: wmi event message length
1471  *
1472  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1473  */
1474 static QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified *wmi_handle,
1475 					       uint8_t *buf, uint32_t len)
1476 {
1477 	uint32_t id;
1478 
1479 	id = WMI_GET_FIELD(buf, WMI_CMD_HDR, COMMANDID);
1480 	if (id != wmi_handle->wmi_events[wmi_diag_event_id])
1481 		return wmi_ext_dbg_msg_record(wmi_handle, buf, len,
1482 					      WMI_MSG_TYPE_EVENT);
1483 
1484 	return QDF_STATUS_SUCCESS;
1485 }
1486 
1487 /**
1488  * wmi_ext_dbg_msg_queue_init() - create debugfs queue and associated lock
1489  * @wmi_handle: wmi handler
1490  *
1491  * Return: none
1492  */
1493 static void wmi_ext_dbg_msg_queue_init(struct wmi_unified *wmi_handle)
1494 {
1495 	qdf_list_create(&wmi_handle->wmi_ext_dbg_msg_queue,
1496 			wmi_handle->wmi_ext_dbg_msg_queue_size);
1497 	qdf_spinlock_create(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1498 }
1499 
1500 /**
1501  * wmi_ext_dbg_msg_queue_deinit() - destroy debugfs queue and associated lock
1502  * @wmi_handle: wmi handler
1503  *
1504  * Return: none
1505  */
1506 static void wmi_ext_dbg_msg_queue_deinit(struct wmi_unified *wmi_handle)
1507 {
1508 	qdf_list_destroy(&wmi_handle->wmi_ext_dbg_msg_queue);
1509 	qdf_spinlock_destroy(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1510 }
1511 
1512 /**
1513  * wmi_ext_dbg_msg_show() - debugfs function to display whole content of
1514  * wmi command/event messages including headers.
1515  * @file: qdf debugfs file handler
1516  * @arg: pointer to wmi handler
1517  *
1518  * Return: QDF_STATUS_SUCCESS if all the messages are shown successfully,
1519  * else QDF_STATUS_E_AGAIN if more data to show.
1520  */
1521 static QDF_STATUS wmi_ext_dbg_msg_show(qdf_debugfs_file_t file, void *arg)
1522 {
1523 	struct wmi_unified *wmi_handle = (struct wmi_unified *)arg;
1524 	struct wmi_ext_dbg_msg *msg;
1525 	uint64_t secs, usecs;
1526 
1527 	msg = wmi_ext_dbg_msg_dequeue(wmi_handle);
1528 	if (!msg)
1529 		return QDF_STATUS_SUCCESS;
1530 
1531 	qdf_debugfs_printf(file, "%s: 0x%x\n",
1532 			   msg->type == WMI_MSG_TYPE_CMD ? "COMMAND" :
1533 			   "EVENT", WMI_GET_FIELD(msg->buf, WMI_CMD_HDR,
1534 						  COMMANDID));
1535 	qdf_log_timestamp_to_secs(msg->ts, &secs, &usecs);
1536 	qdf_debugfs_printf(file, "Time: %llu.%llu\n", secs, usecs);
1537 	qdf_debugfs_printf(file, "Length:%d\n", msg->len);
1538 	qdf_debugfs_hexdump(file, msg->buf, msg->len,
1539 			    WMI_EXT_DBG_DUMP_ROW_SIZE,
1540 			    WMI_EXT_DBG_DUMP_GROUP_SIZE);
1541 	qdf_debugfs_printf(file, "\n");
1542 
1543 	if (qdf_debugfs_overflow(file)) {
1544 		qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1545 		qdf_list_insert_front(&wmi_handle->wmi_ext_dbg_msg_queue,
1546 				      &msg->node);
1547 		qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1548 
1549 	} else {
1550 		wmi_ext_dbg_msg_put(msg);
1551 	}
1552 
1553 	return QDF_STATUS_E_AGAIN;
1554 }
1555 
1556 /**
1557  * wmi_ext_dbg_msg_write() - debugfs write not supported
1558  * @priv: private data
1559  * @buf: received data buffer
1560  * @len: length of received buffer
1561  *
1562  * Return: QDF_STATUS_E_NOSUPPORT.
1563  */
1564 static QDF_STATUS wmi_ext_dbg_msg_write(void *priv, const char *buf,
1565 					qdf_size_t len)
1566 {
1567 	return QDF_STATUS_E_NOSUPPORT;
1568 }
1569 
1570 static struct qdf_debugfs_fops wmi_ext_dbgfs_ops[WMI_MAX_RADIOS];
1571 
1572 /**
1573  * wmi_ext_debugfs_init() - init debugfs items for extended wmi dump.
1574  * @wmi_handle: wmi handler
1575  * @pdev_idx: pdev index
1576  *
1577  * Return: QDF_STATUS_SUCCESS if debugfs is initialized else
1578  * QDF_STATUS_E_FAILURE
1579  */
1580 static QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle,
1581 				     uint32_t pdev_idx)
1582 {
1583 	qdf_dentry_t dentry;
1584 	char buf[32];
1585 
1586 	/* To maintain backward compatibility, naming convention for PDEV 0
1587 	 * dentry is kept same as before. For more than 1 PDEV, dentry
1588 	 * names will be appended with PDEVx.
1589 	*/
1590 	if (wmi_handle->soc->soc_idx == 0 && pdev_idx == 0) {
1591 		dentry  = qdf_debugfs_create_dir(WMI_EXT_DBG_DIR, NULL);
1592 	} else {
1593 		snprintf(buf, sizeof(buf), "WMI_EXT_DBG_SOC%u_PDEV%u",
1594 			 wmi_handle->soc->soc_idx, pdev_idx);
1595 		dentry  = qdf_debugfs_create_dir(buf, NULL);
1596 	}
1597 
1598 	if (!dentry) {
1599 		wmi_err("error while creating extended wmi debugfs dir");
1600 		return QDF_STATUS_E_FAILURE;
1601 	}
1602 
1603 	wmi_ext_dbgfs_ops[pdev_idx].show = wmi_ext_dbg_msg_show;
1604 	wmi_ext_dbgfs_ops[pdev_idx].write = wmi_ext_dbg_msg_write;
1605 	wmi_ext_dbgfs_ops[pdev_idx].priv = wmi_handle;
1606 	if (!qdf_debugfs_create_file(WMI_EXT_DBG_FILE, WMI_EXT_DBG_FILE_PERM,
1607 				     dentry, &wmi_ext_dbgfs_ops[pdev_idx])) {
1608 		qdf_debugfs_remove_dir(dentry);
1609 		wmi_err("Error while creating extended wmi debugfs file");
1610 		return QDF_STATUS_E_FAILURE;
1611 	}
1612 
1613 	wmi_handle->wmi_ext_dbg_dentry = dentry;
1614 	wmi_handle->wmi_ext_dbg_msg_queue_size = WMI_EXT_DBG_QUEUE_SIZE;
1615 	wmi_ext_dbg_msg_queue_init(wmi_handle);
1616 
1617 	return QDF_STATUS_SUCCESS;
1618 }
1619 
1620 /**
1621  * wmi_ext_debugfs_deinit() - cleanup/deinit debugfs items of extended wmi dump.
1622  * @wmi_handle: wmi handler
1623  *
1624  * Return: QDF_STATUS_SUCCESS if cleanup is successful
1625  */
1626 static QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle)
1627 {
1628 	struct wmi_ext_dbg_msg *msg;
1629 
1630 	while ((msg = wmi_ext_dbg_msg_dequeue(wmi_handle)))
1631 		wmi_ext_dbg_msg_put(msg);
1632 
1633 	wmi_ext_dbg_msg_queue_deinit(wmi_handle);
1634 	qdf_debugfs_remove_dir_recursive(wmi_handle->wmi_ext_dbg_dentry);
1635 
1636 	return QDF_STATUS_SUCCESS;
1637 }
1638 
1639 #else
1640 
1641 static inline QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified
1642 						    *wmi_handle,
1643 						    uint8_t *buf, uint32_t len)
1644 {
1645 		return QDF_STATUS_SUCCESS;
1646 }
1647 
1648 static inline QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified
1649 						      *wmi_handle,
1650 						      uint8_t *buf, uint32_t len)
1651 {
1652 		return QDF_STATUS_SUCCESS;
1653 }
1654 
1655 static inline QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle,
1656 					    uint32_t pdev_idx)
1657 {
1658 		return QDF_STATUS_SUCCESS;
1659 }
1660 
1661 static inline QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle)
1662 {
1663 		return QDF_STATUS_SUCCESS;
1664 }
1665 
1666 #endif /*WMI_EXT_DBG */
1667 
1668 int wmi_get_host_credits(wmi_unified_t wmi_handle);
1669 /* WMI buffer APIs */
1670 
1671 #ifdef NBUF_MEMORY_DEBUG
1672 wmi_buf_t
1673 wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint32_t len,
1674 		    const char *func_name,
1675 		    uint32_t line_num)
1676 {
1677 	wmi_buf_t wmi_buf;
1678 
1679 	if (roundup(len + sizeof(WMI_CMD_HDR), 4) > wmi_handle->max_msg_len) {
1680 		QDF_ASSERT(0);
1681 		return NULL;
1682 	}
1683 
1684 	wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, func_name,
1685 				 line_num);
1686 	if (!wmi_buf)
1687 		wmi_buf = qdf_nbuf_alloc_debug(NULL,
1688 					       roundup(len + WMI_MIN_HEAD_ROOM,
1689 						       4),
1690 					       WMI_MIN_HEAD_ROOM, 4, false,
1691 					       func_name, line_num);
1692 	if (!wmi_buf)
1693 		return NULL;
1694 
1695 	/* Clear the wmi buffer */
1696 	OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
1697 
1698 	/*
1699 	 * Set the length of the buffer to match the allocation size.
1700 	 */
1701 	qdf_nbuf_set_pktlen(wmi_buf, len);
1702 
1703 	return wmi_buf;
1704 }
1705 qdf_export_symbol(wmi_buf_alloc_debug);
1706 
1707 void wmi_buf_free(wmi_buf_t net_buf)
1708 {
1709 	net_buf = wbuff_buff_put(net_buf);
1710 	if (net_buf)
1711 		qdf_nbuf_free(net_buf);
1712 }
1713 qdf_export_symbol(wmi_buf_free);
1714 #else
1715 wmi_buf_t wmi_buf_alloc_fl(wmi_unified_t wmi_handle, uint32_t len,
1716 			   const char *func, uint32_t line)
1717 {
1718 	wmi_buf_t wmi_buf;
1719 
1720 	if (roundup(len + sizeof(WMI_CMD_HDR), 4) > wmi_handle->max_msg_len) {
1721 		QDF_DEBUG_PANIC("Invalid length %u (via %s:%u)",
1722 				len, func, line);
1723 		return NULL;
1724 	}
1725 
1726 	wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, __func__,
1727 				 __LINE__);
1728 	if (!wmi_buf)
1729 		wmi_buf = qdf_nbuf_alloc_fl(NULL, roundup(len +
1730 				WMI_MIN_HEAD_ROOM, 4), WMI_MIN_HEAD_ROOM, 4,
1731 				false, func, line);
1732 
1733 	if (!wmi_buf) {
1734 		wmi_nofl_err("%s:%d, failed to alloc len:%d", func, line, len);
1735 		return NULL;
1736 	}
1737 
1738 	/* Clear the wmi buffer */
1739 	OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
1740 
1741 	/*
1742 	 * Set the length of the buffer to match the allocation size.
1743 	 */
1744 	qdf_nbuf_set_pktlen(wmi_buf, len);
1745 
1746 	return wmi_buf;
1747 }
1748 qdf_export_symbol(wmi_buf_alloc_fl);
1749 
1750 void wmi_buf_free(wmi_buf_t net_buf)
1751 {
1752 	net_buf = wbuff_buff_put(net_buf);
1753 	if (net_buf)
1754 		qdf_nbuf_free(net_buf);
1755 }
1756 qdf_export_symbol(wmi_buf_free);
1757 #endif
1758 
1759 /**
1760  * wmi_get_max_msg_len() - get maximum WMI message length
1761  * @wmi_handle: WMI handle.
1762  *
1763  * This function returns the maximum WMI message length
1764  *
1765  * Return: maximum WMI message length
1766  */
1767 uint16_t wmi_get_max_msg_len(wmi_unified_t wmi_handle)
1768 {
1769 	return wmi_handle->max_msg_len - WMI_MIN_HEAD_ROOM;
1770 }
1771 qdf_export_symbol(wmi_get_max_msg_len);
1772 
1773 #ifndef WMI_CMD_STRINGS
1774 static uint8_t *wmi_id_to_name(uint32_t wmi_command)
1775 {
1776 	return "Invalid WMI cmd";
1777 }
1778 #endif
1779 
1780 static inline void wmi_log_cmd_id(uint32_t cmd_id, uint32_t tag)
1781 {
1782 	wmi_debug("Send WMI command:%s command_id:%d htc_tag:%d",
1783 		 wmi_id_to_name(cmd_id), cmd_id, tag);
1784 }
1785 
1786 /**
1787  * wmi_is_pm_resume_cmd() - check if a cmd is part of the resume sequence
1788  * @cmd_id: command to check
1789  *
1790  * Return: true if the command is part of the resume sequence.
1791  */
1792 #ifdef WLAN_POWER_MANAGEMENT_OFFLOAD
1793 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id)
1794 {
1795 	switch (cmd_id) {
1796 	case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
1797 	case WMI_PDEV_RESUME_CMDID:
1798 		return true;
1799 
1800 	default:
1801 		return false;
1802 	}
1803 }
1804 
1805 #else
1806 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id)
1807 {
1808 	return false;
1809 }
1810 
1811 #endif
1812 
1813 #ifdef FEATURE_WLAN_D0WOW
1814 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id)
1815 {
1816 	wmi_d0_wow_enable_disable_cmd_fixed_param *cmd;
1817 
1818 	if (cmd_id == WMI_D0_WOW_ENABLE_DISABLE_CMDID) {
1819 		cmd = (wmi_d0_wow_enable_disable_cmd_fixed_param *)
1820 			wmi_buf_data(buf);
1821 		if (!cmd->enable)
1822 			return true;
1823 		else
1824 			return false;
1825 	}
1826 
1827 	return false;
1828 }
1829 #else
1830 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id)
1831 {
1832 	return false;
1833 }
1834 
1835 #endif
1836 
1837 #ifdef WMI_INTERFACE_SEQUENCE_CHECK
1838 static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle)
1839 {
1840 	wmi_handle->wmi_sequence = 0;
1841 	wmi_handle->wmi_exp_sequence = 0;
1842 	wmi_handle->wmi_sequence_stop = false;
1843 }
1844 
1845 static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle)
1846 {
1847 	qdf_spinlock_create(&wmi_handle->wmi_seq_lock);
1848 	wmi_interface_sequence_reset(wmi_handle);
1849 }
1850 
1851 static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle)
1852 {
1853 	qdf_spinlock_destroy(&wmi_handle->wmi_seq_lock);
1854 }
1855 
1856 void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle)
1857 {
1858 	wmi_handle->wmi_sequence_stop = true;
1859 }
1860 
1861 static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle,
1862 					  HTC_PACKET *pkt,
1863 					  const char *func, uint32_t line)
1864 {
1865 	wmi_buf_t buf = GET_HTC_PACKET_NET_BUF_CONTEXT(pkt);
1866 	QDF_STATUS status;
1867 
1868 	qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock);
1869 	status = htc_send_pkt(wmi_handle->htc_handle, pkt);
1870 	if (QDF_STATUS_SUCCESS != status) {
1871 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1872 		qdf_atomic_dec(&wmi_handle->pending_cmds);
1873 		wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d",
1874 			     func, line, status);
1875 		qdf_mem_free(pkt);
1876 		return status;
1877 	}
1878 	/* Record the sequence number in the SKB */
1879 	qdf_nbuf_set_mark(buf, wmi_handle->wmi_sequence);
1880 	/* Increment the sequence number */
1881 	wmi_handle->wmi_sequence = (wmi_handle->wmi_sequence + 1)
1882 				   & (wmi_handle->wmi_max_cmds - 1);
1883 	qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1884 
1885 	return status;
1886 }
1887 
1888 static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle,
1889 						wmi_buf_t buf)
1890 {
1891 	/* Skip sequence check when wmi sequence stop is set */
1892 	if (wmi_handle->wmi_sequence_stop)
1893 		return;
1894 
1895 	qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock);
1896 	/* Match the completion sequence and expected sequence number */
1897 	if (qdf_nbuf_get_mark(buf) != wmi_handle->wmi_exp_sequence) {
1898 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1899 		wmi_nofl_err("WMI Tx Completion Sequence number mismatch");
1900 		wmi_nofl_err("Expected %d Received %d",
1901 			     wmi_handle->wmi_exp_sequence,
1902 			     qdf_nbuf_get_mark(buf));
1903 		/* Trigger Recovery */
1904 		qdf_trigger_self_recovery(wmi_handle->soc,
1905 					  QDF_WMI_BUF_SEQUENCE_MISMATCH);
1906 	} else {
1907 		/* Increment the expected sequence number */
1908 		wmi_handle->wmi_exp_sequence =
1909 				(wmi_handle->wmi_exp_sequence + 1)
1910 				& (wmi_handle->wmi_max_cmds - 1);
1911 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1912 	}
1913 }
1914 #else
1915 static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle)
1916 {
1917 }
1918 
1919 static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle)
1920 {
1921 }
1922 
1923 static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle)
1924 {
1925 }
1926 
1927 void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle)
1928 {
1929 }
1930 
1931 static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle,
1932 					  HTC_PACKET *pkt,
1933 					  const char *func, uint32_t line)
1934 {
1935 	QDF_STATUS status;
1936 
1937 	status = htc_send_pkt(wmi_handle->htc_handle, pkt);
1938 	if (QDF_STATUS_SUCCESS != status) {
1939 		qdf_atomic_dec(&wmi_handle->pending_cmds);
1940 		wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d",
1941 			     func, line, status);
1942 		qdf_mem_free(pkt);
1943 		return status;
1944 	}
1945 
1946 	return status;
1947 }
1948 
1949 static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle,
1950 						wmi_buf_t buf)
1951 {
1952 }
1953 #endif
1954 
1955 static inline void wmi_unified_debug_dump(wmi_unified_t wmi_handle)
1956 {
1957 	wmi_nofl_err("Endpoint ID = %d, Tx Queue Depth = %d, soc_id = %u, target type = %s",
1958 		     wmi_handle->wmi_endpoint_id,
1959 		     htc_get_tx_queue_depth(wmi_handle->htc_handle,
1960 					    wmi_handle->wmi_endpoint_id),
1961 		     wmi_handle->soc->soc_idx,
1962 		     (wmi_handle->target_type ==
1963 		      WMI_TLV_TARGET ? "WMI_TLV_TARGET" :
1964 						"WMI_NON_TLV_TARGET"));
1965 }
1966 
1967 #ifdef SYSTEM_PM_CHECK
1968 /**
1969  * wmi_set_system_pm_pkt_tag() - API to set tag for system pm packets
1970  * @htc_tag: HTC tag
1971  * @buf: wmi cmd buffer
1972  * @cmd_id: cmd id
1973  *
1974  * Return: None
1975  */
1976 static void wmi_set_system_pm_pkt_tag(uint16_t *htc_tag, wmi_buf_t buf,
1977 				      uint32_t cmd_id)
1978 {
1979 	switch (cmd_id) {
1980 	case WMI_WOW_ENABLE_CMDID:
1981 	case WMI_PDEV_SUSPEND_CMDID:
1982 		*htc_tag = HTC_TX_PACKET_SYSTEM_SUSPEND;
1983 		break;
1984 	case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
1985 	case WMI_PDEV_RESUME_CMDID:
1986 		*htc_tag = HTC_TX_PACKET_SYSTEM_RESUME;
1987 		break;
1988 	case WMI_D0_WOW_ENABLE_DISABLE_CMDID:
1989 		if (wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id))
1990 			*htc_tag = HTC_TX_PACKET_SYSTEM_RESUME;
1991 		else
1992 			*htc_tag = HTC_TX_PACKET_SYSTEM_SUSPEND;
1993 		break;
1994 	default:
1995 		break;
1996 	}
1997 }
1998 #else
1999 static inline void wmi_set_system_pm_pkt_tag(uint16_t *htc_tag, wmi_buf_t buf,
2000 					     uint32_t cmd_id)
2001 {
2002 }
2003 #endif
2004 
2005 QDF_STATUS wmi_unified_cmd_send_fl(wmi_unified_t wmi_handle, wmi_buf_t buf,
2006 				   uint32_t len, uint32_t cmd_id,
2007 				   const char *func, uint32_t line)
2008 {
2009 	HTC_PACKET *pkt;
2010 	uint16_t htc_tag = 0;
2011 	bool rtpm_inprogress;
2012 
2013 	rtpm_inprogress = wmi_get_runtime_pm_inprogress(wmi_handle);
2014 	if (rtpm_inprogress) {
2015 		htc_tag = wmi_handle->ops->wmi_set_htc_tx_tag(wmi_handle, buf,
2016 							      cmd_id);
2017 	} else if (qdf_atomic_read(&wmi_handle->is_target_suspended) &&
2018 		   !wmi_is_pm_resume_cmd(cmd_id) &&
2019 		   !wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id)) {
2020 			wmi_nofl_err("Target is suspended (via %s:%u)",
2021 					func, line);
2022 		return QDF_STATUS_E_BUSY;
2023 	}
2024 
2025 	if (wmi_handle->wmi_stopinprogress) {
2026 		wmi_nofl_err("%s:%d, WMI stop in progress, wmi_handle:%pK",
2027 			     func, line, wmi_handle);
2028 		return QDF_STATUS_E_INVAL;
2029 	}
2030 
2031 #ifndef WMI_NON_TLV_SUPPORT
2032 	/* Do sanity check on the TLV parameter structure */
2033 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2034 		void *buf_ptr = (void *)qdf_nbuf_data(buf);
2035 
2036 		if (wmi_handle->ops->wmi_check_command_params(NULL, buf_ptr, len, cmd_id)
2037 			!= 0) {
2038 			wmi_nofl_err("%s:%d, Invalid WMI Param Buffer for Cmd:%d",
2039 				     func, line, cmd_id);
2040 			return QDF_STATUS_E_INVAL;
2041 		}
2042 	}
2043 #endif
2044 
2045 	if (qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) {
2046 		wmi_nofl_err("%s:%d, Failed to send cmd %x, no memory",
2047 			     func, line, cmd_id);
2048 		return QDF_STATUS_E_NOMEM;
2049 	}
2050 
2051 	qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR));
2052 	WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
2053 
2054 	qdf_atomic_inc(&wmi_handle->pending_cmds);
2055 	if (qdf_atomic_read(&wmi_handle->pending_cmds) >=
2056 			wmi_handle->wmi_max_cmds) {
2057 		wmi_nofl_err("hostcredits = %d",
2058 			     wmi_get_host_credits(wmi_handle));
2059 		htc_dump_counter_info(wmi_handle->htc_handle);
2060 		qdf_atomic_dec(&wmi_handle->pending_cmds);
2061 		wmi_nofl_err("%s:%d, MAX %d WMI Pending cmds reached",
2062 			     func, line, wmi_handle->wmi_max_cmds);
2063 		wmi_unified_debug_dump(wmi_handle);
2064 		htc_ce_tasklet_debug_dump(wmi_handle->htc_handle);
2065 		qdf_trigger_self_recovery(wmi_handle->soc->wmi_psoc,
2066 					  QDF_WMI_EXCEED_MAX_PENDING_CMDS);
2067 		return QDF_STATUS_E_BUSY;
2068 	}
2069 
2070 	pkt = qdf_mem_malloc_fl(sizeof(*pkt), func, line);
2071 	if (!pkt) {
2072 		qdf_atomic_dec(&wmi_handle->pending_cmds);
2073 		return QDF_STATUS_E_NOMEM;
2074 	}
2075 
2076 	if (!rtpm_inprogress)
2077 		wmi_set_system_pm_pkt_tag(&htc_tag, buf, cmd_id);
2078 
2079 	SET_HTC_PACKET_INFO_TX(pkt,
2080 			       NULL,
2081 			       qdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR),
2082 			       wmi_handle->wmi_endpoint_id, htc_tag);
2083 
2084 	SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf);
2085 	wmi_log_cmd_id(cmd_id, htc_tag);
2086 	wmi_ext_dbg_msg_cmd_record(wmi_handle,
2087 				   qdf_nbuf_data(buf), qdf_nbuf_len(buf));
2088 #ifdef WMI_INTERFACE_EVENT_LOGGING
2089 	if (wmi_handle->log_info.wmi_logging_enable) {
2090 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2091 		/*
2092 		 * Record 16 bytes of WMI cmd data -
2093 		 * exclude TLV and WMI headers
2094 		 *
2095 		 * WMI mgmt command already recorded in wmi_mgmt_cmd_record
2096 		 */
2097 		if (wmi_handle->ops->is_management_record(cmd_id) == false) {
2098 			uint8_t *tmpbuf = (uint8_t *)qdf_nbuf_data(buf) +
2099 				wmi_handle->soc->buf_offset_command;
2100 
2101 			WMI_COMMAND_RECORD(wmi_handle, cmd_id, tmpbuf);
2102 			wmi_specific_cmd_record(wmi_handle, cmd_id, tmpbuf);
2103 		}
2104 
2105 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2106 	}
2107 #endif
2108 	return wmi_htc_send_pkt(wmi_handle, pkt, func, line);
2109 }
2110 qdf_export_symbol(wmi_unified_cmd_send_fl);
2111 
2112 /**
2113  * wmi_unified_get_event_handler_ix() - gives event handler's index
2114  * @wmi_handle: handle to wmi
2115  * @event_id: wmi  event id
2116  *
2117  * Return: event handler's index
2118  */
2119 static int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle,
2120 					    uint32_t event_id)
2121 {
2122 	uint32_t idx = 0;
2123 	int32_t invalid_idx = -1;
2124 	struct wmi_soc *soc = wmi_handle->soc;
2125 
2126 	for (idx = 0; (idx < soc->max_event_idx &&
2127 		       idx < WMI_UNIFIED_MAX_EVENT); ++idx) {
2128 		if (wmi_handle->event_id[idx] == event_id &&
2129 		    wmi_handle->event_handler[idx]) {
2130 			return idx;
2131 		}
2132 	}
2133 
2134 	return invalid_idx;
2135 }
2136 
2137 /**
2138  * wmi_register_event_handler_with_ctx() - register event handler with
2139  * exec ctx and buffer type
2140  * @wmi_handle: handle to wmi
2141  * @event_id: wmi event id
2142  * @handler_func: wmi event handler function
2143  * @rx_ctx: rx execution context for wmi rx events
2144  * @rx_buf_type: rx execution context for wmi rx events
2145  *
2146  * Return: QDF_STATUS_SUCCESS on successful register event else failure.
2147  */
2148 static QDF_STATUS
2149 wmi_register_event_handler_with_ctx(wmi_unified_t wmi_handle,
2150 				    uint32_t event_id,
2151 				    wmi_unified_event_handler handler_func,
2152 				    enum wmi_rx_exec_ctx rx_ctx,
2153 				    enum wmi_rx_buff_type rx_buf_type)
2154 {
2155 	uint32_t idx = 0;
2156 	uint32_t evt_id;
2157 	struct wmi_soc *soc;
2158 
2159 	if (!wmi_handle) {
2160 		wmi_err("WMI handle is NULL");
2161 		return QDF_STATUS_E_FAILURE;
2162 	}
2163 
2164 	soc = wmi_handle->soc;
2165 
2166 	if (event_id >= wmi_events_max ||
2167 		wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2168 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
2169 			  "%s: Event id %d is unavailable",
2170 					__func__, event_id);
2171 		return QDF_STATUS_E_FAILURE;
2172 	}
2173 	evt_id = wmi_handle->wmi_events[event_id];
2174 
2175 	if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) {
2176 		wmi_info("event handler already registered 0x%x", evt_id);
2177 		return QDF_STATUS_E_FAILURE;
2178 	}
2179 	if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) {
2180 		wmi_err("no more event handlers 0x%x",
2181 			 evt_id);
2182 		return QDF_STATUS_E_FAILURE;
2183 	}
2184 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG,
2185 		  "Registered event handler for event 0x%8x", evt_id);
2186 	idx = soc->max_event_idx;
2187 	wmi_handle->event_handler[idx] = handler_func;
2188 	wmi_handle->event_id[idx] = evt_id;
2189 
2190 	qdf_spin_lock_bh(&soc->ctx_lock);
2191 	wmi_handle->ctx[idx].exec_ctx = rx_ctx;
2192 	wmi_handle->ctx[idx].buff_type = rx_buf_type;
2193 	qdf_spin_unlock_bh(&soc->ctx_lock);
2194 	soc->max_event_idx++;
2195 
2196 	return QDF_STATUS_SUCCESS;
2197 }
2198 
2199 QDF_STATUS
2200 wmi_unified_register_event(wmi_unified_t wmi_handle,
2201 			   uint32_t event_id,
2202 			   wmi_unified_event_handler handler_func)
2203 {
2204 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2205 						   handler_func,
2206 						   WMI_RX_UMAC_CTX,
2207 						   WMI_RX_PROCESSED_BUFF);
2208 }
2209 
2210 QDF_STATUS
2211 wmi_unified_register_event_handler(wmi_unified_t wmi_handle,
2212 				   wmi_conv_event_id event_id,
2213 				   wmi_unified_event_handler handler_func,
2214 				   uint8_t rx_ctx)
2215 {
2216 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2217 						   handler_func, rx_ctx,
2218 						   WMI_RX_PROCESSED_BUFF);
2219 }
2220 
2221 qdf_export_symbol(wmi_unified_register_event_handler);
2222 
2223 QDF_STATUS
2224 wmi_unified_register_raw_event_handler(wmi_unified_t wmi_handle,
2225 				       wmi_conv_event_id event_id,
2226 				       wmi_unified_event_handler handler_func,
2227 				       enum wmi_rx_exec_ctx rx_ctx)
2228 {
2229 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2230 						   handler_func, rx_ctx,
2231 						   WMI_RX_RAW_BUFF);
2232 }
2233 
2234 qdf_export_symbol(wmi_unified_register_raw_event_handler);
2235 
2236 QDF_STATUS wmi_unified_unregister_event(wmi_unified_t wmi_handle,
2237 					uint32_t event_id)
2238 {
2239 	uint32_t idx = 0;
2240 	uint32_t evt_id;
2241 	struct wmi_soc *soc;
2242 
2243 	if (!wmi_handle) {
2244 		wmi_err("WMI handle is NULL");
2245 		return QDF_STATUS_E_FAILURE;
2246 	}
2247 
2248 	soc = wmi_handle->soc;
2249 	if (event_id >= wmi_events_max ||
2250 		wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2251 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
2252 			  "%s: Event id %d is unavailable",
2253 					__func__, event_id);
2254 		return QDF_STATUS_E_FAILURE;
2255 	}
2256 	evt_id = wmi_handle->wmi_events[event_id];
2257 
2258 	idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id);
2259 	if (idx == -1) {
2260 		wmi_warn("event handler is not registered: evt id 0x%x",
2261 			 evt_id);
2262 		return QDF_STATUS_E_FAILURE;
2263 	}
2264 	wmi_handle->event_handler[idx] = NULL;
2265 	wmi_handle->event_id[idx] = 0;
2266 	--soc->max_event_idx;
2267 	wmi_handle->event_handler[idx] =
2268 		wmi_handle->event_handler[soc->max_event_idx];
2269 	wmi_handle->event_id[idx] =
2270 		wmi_handle->event_id[soc->max_event_idx];
2271 
2272 	qdf_spin_lock_bh(&soc->ctx_lock);
2273 
2274 	wmi_handle->ctx[idx].exec_ctx =
2275 		wmi_handle->ctx[soc->max_event_idx].exec_ctx;
2276 	wmi_handle->ctx[idx].buff_type =
2277 		wmi_handle->ctx[soc->max_event_idx].buff_type;
2278 
2279 	qdf_spin_unlock_bh(&soc->ctx_lock);
2280 
2281 	return QDF_STATUS_SUCCESS;
2282 }
2283 
2284 QDF_STATUS wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle,
2285 						wmi_conv_event_id event_id)
2286 {
2287 	uint32_t idx = 0;
2288 	uint32_t evt_id;
2289 	struct wmi_soc *soc;
2290 
2291 	if (!wmi_handle) {
2292 		wmi_err("WMI handle is NULL");
2293 		return QDF_STATUS_E_FAILURE;
2294 	}
2295 
2296 	soc = wmi_handle->soc;
2297 
2298 	if (event_id >= wmi_events_max ||
2299 		wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2300 		wmi_err("Event id %d is unavailable", event_id);
2301 		return QDF_STATUS_E_FAILURE;
2302 	}
2303 	evt_id = wmi_handle->wmi_events[event_id];
2304 
2305 	idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id);
2306 	if (idx == -1) {
2307 		wmi_err("event handler is not registered: evt id 0x%x",
2308 			 evt_id);
2309 		return QDF_STATUS_E_FAILURE;
2310 	}
2311 	wmi_handle->event_handler[idx] = NULL;
2312 	wmi_handle->event_id[idx] = 0;
2313 	--soc->max_event_idx;
2314 	wmi_handle->event_handler[idx] =
2315 		wmi_handle->event_handler[soc->max_event_idx];
2316 	wmi_handle->event_id[idx] =
2317 		wmi_handle->event_id[soc->max_event_idx];
2318 
2319 	qdf_spin_lock_bh(&soc->ctx_lock);
2320 
2321 	wmi_handle->ctx[idx].exec_ctx =
2322 		wmi_handle->ctx[soc->max_event_idx].exec_ctx;
2323 	wmi_handle->ctx[idx].buff_type =
2324 		wmi_handle->ctx[soc->max_event_idx].buff_type;
2325 
2326 	qdf_spin_unlock_bh(&soc->ctx_lock);
2327 
2328 	return QDF_STATUS_SUCCESS;
2329 }
2330 qdf_export_symbol(wmi_unified_unregister_event_handler);
2331 
2332 static void
2333 wmi_process_rx_diag_event_worker_thread_ctx(struct wmi_unified *wmi_handle,
2334 					    void *evt_buf)
2335 {
2336 	uint32_t num_diag_events_pending;
2337 
2338 	qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock);
2339 	if (RX_DIAG_WQ_MAX_SIZE > 0) {
2340 		num_diag_events_pending = qdf_nbuf_queue_len(
2341 						&wmi_handle->diag_event_queue);
2342 
2343 		if (num_diag_events_pending >= RX_DIAG_WQ_MAX_SIZE) {
2344 			qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
2345 			wmi_handle->wmi_rx_diag_events_dropped++;
2346 			wmi_debug_rl("Rx diag events dropped count: %d",
2347 				     wmi_handle->wmi_rx_diag_events_dropped);
2348 			qdf_nbuf_free(evt_buf);
2349 			return;
2350 		}
2351 	}
2352 
2353 	qdf_nbuf_queue_add(&wmi_handle->diag_event_queue, evt_buf);
2354 	qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
2355 	qdf_queue_work(0, wmi_handle->wmi_rx_diag_work_queue,
2356 		       &wmi_handle->rx_diag_event_work);
2357 }
2358 
2359 void wmi_process_fw_event_worker_thread_ctx(struct wmi_unified *wmi_handle,
2360 					    void *evt_buf)
2361 {
2362 
2363 	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
2364 	qdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf);
2365 	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
2366 	qdf_queue_work(0, wmi_handle->wmi_rx_work_queue,
2367 			&wmi_handle->rx_event_work);
2368 
2369 	return;
2370 }
2371 
2372 qdf_export_symbol(wmi_process_fw_event_worker_thread_ctx);
2373 
2374 uint32_t wmi_critical_events_in_flight(struct wmi_unified *wmi)
2375 {
2376 	return qdf_atomic_read(&wmi->critical_events_in_flight);
2377 }
2378 
2379 static bool
2380 wmi_is_event_critical(struct wmi_unified *wmi_handle, uint32_t event_id)
2381 {
2382 	if (wmi_handle->wmi_events[wmi_roam_synch_event_id] == event_id)
2383 		return true;
2384 
2385 	return false;
2386 }
2387 
2388 static QDF_STATUS wmi_discard_fw_event(struct scheduler_msg *msg)
2389 {
2390 	struct wmi_process_fw_event_params *event_param;
2391 
2392 	if (!msg->bodyptr)
2393 		return QDF_STATUS_E_INVAL;
2394 
2395 	event_param = (struct wmi_process_fw_event_params *)msg->bodyptr;
2396 	qdf_nbuf_free(event_param->evt_buf);
2397 	qdf_mem_free(msg->bodyptr);
2398 	msg->bodyptr = NULL;
2399 	msg->bodyval = 0;
2400 	msg->type = 0;
2401 
2402 	return QDF_STATUS_SUCCESS;
2403 }
2404 
2405 static QDF_STATUS wmi_process_fw_event_handler(struct scheduler_msg *msg)
2406 {
2407 	struct wmi_process_fw_event_params *params =
2408 		(struct wmi_process_fw_event_params *)msg->bodyptr;
2409 	struct wmi_unified *wmi_handle;
2410 	uint32_t event_id;
2411 
2412 	wmi_handle = (struct wmi_unified *)params->wmi_handle;
2413 	event_id = WMI_GET_FIELD(qdf_nbuf_data(params->evt_buf),
2414 				 WMI_CMD_HDR, COMMANDID);
2415 	wmi_process_fw_event(wmi_handle, params->evt_buf);
2416 
2417 	if (wmi_is_event_critical(wmi_handle, event_id))
2418 		qdf_atomic_dec(&wmi_handle->critical_events_in_flight);
2419 
2420 	qdf_mem_free(msg->bodyptr);
2421 
2422 	return QDF_STATUS_SUCCESS;
2423 }
2424 
2425 /**
2426  * wmi_process_fw_event_sched_thread_ctx() - common event handler to serialize
2427  *                                  event processing through scheduler thread
2428  * @ctx: wmi context
2429  * @ev: event buffer
2430  * @rx_ctx: rx execution context
2431  *
2432  * Return: 0 on success, errno on failure
2433  */
2434 static QDF_STATUS
2435 wmi_process_fw_event_sched_thread_ctx(struct wmi_unified *wmi,
2436 				      void *ev)
2437 {
2438 	struct wmi_process_fw_event_params *params_buf;
2439 	struct scheduler_msg msg = { 0 };
2440 	uint32_t event_id;
2441 
2442 	params_buf = qdf_mem_malloc(sizeof(struct wmi_process_fw_event_params));
2443 	if (!params_buf) {
2444 		wmi_err("malloc failed");
2445 		qdf_nbuf_free(ev);
2446 		return QDF_STATUS_E_NOMEM;
2447 	}
2448 
2449 	params_buf->wmi_handle = wmi;
2450 	params_buf->evt_buf = ev;
2451 
2452 	event_id = WMI_GET_FIELD(qdf_nbuf_data(params_buf->evt_buf),
2453 				 WMI_CMD_HDR, COMMANDID);
2454 	if (wmi_is_event_critical(wmi, event_id))
2455 		qdf_atomic_inc(&wmi->critical_events_in_flight);
2456 
2457 	msg.bodyptr = params_buf;
2458 	msg.bodyval = 0;
2459 	msg.callback = wmi_process_fw_event_handler;
2460 	msg.flush_callback = wmi_discard_fw_event;
2461 
2462 	if (QDF_STATUS_SUCCESS !=
2463 		scheduler_post_message(QDF_MODULE_ID_TARGET_IF,
2464 				       QDF_MODULE_ID_TARGET_IF,
2465 				       QDF_MODULE_ID_TARGET_IF, &msg)) {
2466 		qdf_nbuf_free(ev);
2467 		qdf_mem_free(params_buf);
2468 		return QDF_STATUS_E_FAULT;
2469 	}
2470 
2471 	return QDF_STATUS_SUCCESS;
2472 }
2473 
2474 /**
2475  * wmi_get_pdev_ep: Get wmi handle based on endpoint
2476  * @soc: handle to wmi soc
2477  * @ep: endpoint id
2478  *
2479  * Return: none
2480  */
2481 static struct wmi_unified *wmi_get_pdev_ep(struct wmi_soc *soc,
2482 						HTC_ENDPOINT_ID ep)
2483 {
2484 	uint32_t i;
2485 
2486 	for (i = 0; i < WMI_MAX_RADIOS; i++)
2487 		if (soc->wmi_endpoint_id[i] == ep)
2488 			break;
2489 
2490 	if (i == WMI_MAX_RADIOS)
2491 		return NULL;
2492 
2493 	return soc->wmi_pdev[i];
2494 }
2495 
2496 /**
2497  * wmi_mtrace_rx() - Wrappper function for qdf_mtrace api
2498  * @message_id: 32-Bit Wmi message ID
2499  * @vdev_id: Vdev ID
2500  * @data: Actual message contents
2501  *
2502  * This function converts the 32-bit WMI message ID in 15-bit message ID
2503  * format for qdf_mtrace as in qdf_mtrace message there are only 15
2504  * bits reserved for message ID.
2505  * out of these 15-bits, 8-bits (From LSB) specifies the WMI_GRP_ID
2506  * and remaining 7-bits specifies the actual WMI command. With this
2507  * notation there can be maximum 256 groups and each group can have
2508  * max 128 commands can be supported.
2509  *
2510  * Return: None
2511  */
2512 static void wmi_mtrace_rx(uint32_t message_id, uint16_t vdev_id, uint32_t data)
2513 {
2514 	uint16_t mtrace_message_id;
2515 
2516 	mtrace_message_id = QDF_WMI_MTRACE_CMD_ID(message_id) |
2517 		(QDF_WMI_MTRACE_GRP_ID(message_id) <<
2518 						QDF_WMI_MTRACE_CMD_NUM_BITS);
2519 	qdf_mtrace(QDF_MODULE_ID_WMI, QDF_MODULE_ID_WMA,
2520 		   mtrace_message_id, vdev_id, data);
2521 }
2522 
2523 /**
2524  * wmi_process_control_rx() - process fw events callbacks
2525  * @wmi_handle: handle to wmi_unified
2526  * @evt_buf: handle to wmi_buf_t
2527  *
2528  * Return: none
2529  */
2530 static void wmi_process_control_rx(struct wmi_unified *wmi_handle,
2531 				   wmi_buf_t evt_buf)
2532 {
2533 	struct wmi_soc *soc = wmi_handle->soc;
2534 	uint32_t id;
2535 	uint32_t idx;
2536 	enum wmi_rx_exec_ctx exec_ctx;
2537 
2538 	id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2539 	idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
2540 	if (qdf_unlikely(idx == A_ERROR)) {
2541 		wmi_debug("no handler registered for event id 0x%x", id);
2542 		qdf_nbuf_free(evt_buf);
2543 		return;
2544 	}
2545 	wmi_mtrace_rx(id, 0xFF, idx);
2546 	qdf_spin_lock_bh(&soc->ctx_lock);
2547 	exec_ctx = wmi_handle->ctx[idx].exec_ctx;
2548 	qdf_spin_unlock_bh(&soc->ctx_lock);
2549 
2550 #ifdef WMI_INTERFACE_EVENT_LOGGING
2551 	if (wmi_handle->log_info.wmi_logging_enable) {
2552 		uint8_t *data;
2553 		data = qdf_nbuf_data(evt_buf);
2554 
2555 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2556 		/* Exclude 4 bytes of TLV header */
2557 		if (wmi_handle->ops->is_diag_event(id)) {
2558 			WMI_DIAG_RX_EVENT_RECORD(wmi_handle, id,
2559 				((uint8_t *) data +
2560 				wmi_handle->soc->buf_offset_event));
2561 		} else if (wmi_handle->ops->is_management_record(id)) {
2562 			WMI_MGMT_RX_EVENT_RECORD(wmi_handle, id,
2563 				((uint8_t *) data +
2564 				wmi_handle->soc->buf_offset_event));
2565 		} else {
2566 			WMI_RX_EVENT_RECORD(wmi_handle, id, ((uint8_t *) data +
2567 				wmi_handle->soc->buf_offset_event));
2568 		}
2569 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2570 	}
2571 #endif
2572 
2573 	if (exec_ctx == WMI_RX_WORK_CTX) {
2574 		wmi_process_fw_event_worker_thread_ctx(wmi_handle, evt_buf);
2575 	} else if (exec_ctx == WMI_RX_TASKLET_CTX) {
2576 		wmi_process_fw_event(wmi_handle, evt_buf);
2577 	} else if (exec_ctx == WMI_RX_SERIALIZER_CTX) {
2578 		wmi_process_fw_event_sched_thread_ctx(wmi_handle, evt_buf);
2579 	} else if (exec_ctx == WMI_RX_DIAG_WORK_CTX) {
2580 		wmi_process_rx_diag_event_worker_thread_ctx(wmi_handle,
2581 							    evt_buf);
2582 	} else {
2583 		wmi_err("Invalid event context %d", exec_ctx);
2584 		qdf_nbuf_free(evt_buf);
2585 	}
2586 
2587 }
2588 
2589 /**
2590  * wmi_control_rx() - process fw events callbacks
2591  * @ctx: handle to wmi
2592  * @htc_packet: pointer to htc packet
2593  *
2594  * Return: none
2595  */
2596 static void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet)
2597 {
2598 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2599 	struct wmi_unified *wmi_handle;
2600 	wmi_buf_t evt_buf;
2601 
2602 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2603 
2604 	wmi_handle = wmi_get_pdev_ep(soc, htc_packet->Endpoint);
2605 	if (!wmi_handle) {
2606 		wmi_err("unable to get wmi_handle to Endpoint %d",
2607 			htc_packet->Endpoint);
2608 		qdf_nbuf_free(evt_buf);
2609 		return;
2610 	}
2611 
2612 	wmi_process_control_rx(wmi_handle, evt_buf);
2613 }
2614 
2615 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
2616 /**
2617  * wmi_control_diag_rx() - process diag fw events callbacks
2618  * @ctx: handle to wmi
2619  * @htc_packet: pointer to htc packet
2620  *
2621  * Return: none
2622  */
2623 static void wmi_control_diag_rx(void *ctx, HTC_PACKET *htc_packet)
2624 {
2625 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2626 	struct wmi_unified *wmi_handle;
2627 	wmi_buf_t evt_buf;
2628 
2629 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2630 
2631 	wmi_handle = soc->wmi_pdev[0];
2632 	if (!wmi_handle) {
2633 		wmi_err("unable to get wmi_handle for diag event end point id:%d", htc_packet->Endpoint);
2634 		qdf_nbuf_free(evt_buf);
2635 		return;
2636 	}
2637 
2638 	wmi_process_control_rx(wmi_handle, evt_buf);
2639 }
2640 #endif
2641 
2642 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI
2643 QDF_STATUS wmi_unified_cmd_send_over_qmi(struct wmi_unified *wmi_handle,
2644 					 wmi_buf_t buf, uint32_t buflen,
2645 					 uint32_t cmd_id)
2646 {
2647 	QDF_STATUS status;
2648 	int32_t ret;
2649 
2650 	if (!qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR))) {
2651 		wmi_err("Failed to send cmd %x, no memory", cmd_id);
2652 		return QDF_STATUS_E_NOMEM;
2653 	}
2654 
2655 	qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR));
2656 	WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
2657 	wmi_debug("Sending WMI_CMD_ID: 0x%x over qmi", cmd_id);
2658 	status = qdf_wmi_send_recv_qmi(qdf_nbuf_data(buf),
2659 				       buflen + sizeof(WMI_CMD_HDR),
2660 				       wmi_handle,
2661 				       wmi_process_qmi_fw_event);
2662 	if (QDF_IS_STATUS_ERROR(status)) {
2663 		qdf_nbuf_pull_head(buf, sizeof(WMI_CMD_HDR));
2664 		wmi_warn("WMI send on QMI failed. Retrying WMI on HTC");
2665 	} else {
2666 		ret = qdf_atomic_inc_return(&wmi_handle->num_stats_over_qmi);
2667 		wmi_debug("num stats over qmi: %d", ret);
2668 		wmi_buf_free(buf);
2669 	}
2670 
2671 	return status;
2672 }
2673 
2674 static int __wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len)
2675 {
2676 	struct wmi_unified *wmi_handle = wmi_cb_ctx;
2677 	wmi_buf_t evt_buf;
2678 	uint32_t evt_id;
2679 
2680 	if (!wmi_handle || !buf)
2681 		return -EINVAL;
2682 
2683 	evt_buf = wmi_buf_alloc(wmi_handle, len);
2684 	if (!evt_buf)
2685 		return -ENOMEM;
2686 
2687 	qdf_mem_copy(qdf_nbuf_data(evt_buf), buf, len);
2688 	evt_id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2689 	wmi_debug("Received WMI_EVT_ID: %d over qmi", evt_id);
2690 	wmi_process_control_rx(wmi_handle, evt_buf);
2691 
2692 	return 0;
2693 }
2694 
2695 int wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len)
2696 {
2697 	struct qdf_op_sync *op_sync;
2698 	int ret;
2699 
2700 	if (qdf_op_protect(&op_sync))
2701 		return -EINVAL;
2702 	ret = __wmi_process_qmi_fw_event(wmi_cb_ctx, buf, len);
2703 	qdf_op_unprotect(op_sync);
2704 
2705 	return ret;
2706 }
2707 #endif
2708 
2709 /**
2710  * wmi_process_fw_event() - process any fw event
2711  * @wmi_handle: wmi handle
2712  * @evt_buf: fw event buffer
2713  *
2714  * This function process fw event in caller context
2715  *
2716  * Return: none
2717  */
2718 void wmi_process_fw_event(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
2719 {
2720 	__wmi_control_rx(wmi_handle, evt_buf);
2721 }
2722 
2723 /**
2724  * __wmi_control_rx() - process serialize wmi event callback
2725  * @wmi_handle: wmi handle
2726  * @evt_buf: fw event buffer
2727  *
2728  * Return: none
2729  */
2730 void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
2731 {
2732 	uint32_t id;
2733 	uint8_t *data;
2734 	uint32_t len;
2735 	void *wmi_cmd_struct_ptr = NULL;
2736 #ifndef WMI_NON_TLV_SUPPORT
2737 	int tlv_ok_status = 0;
2738 #endif
2739 	uint32_t idx = 0;
2740 	struct wmi_raw_event_buffer ev_buf;
2741 	enum wmi_rx_buff_type ev_buff_type;
2742 
2743 	id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2744 
2745 	wmi_ext_dbg_msg_event_record(wmi_handle, qdf_nbuf_data(evt_buf),
2746 				     qdf_nbuf_len(evt_buf));
2747 
2748 	if (qdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL)
2749 		goto end;
2750 
2751 	data = qdf_nbuf_data(evt_buf);
2752 	len = qdf_nbuf_len(evt_buf);
2753 
2754 #ifndef WMI_NON_TLV_SUPPORT
2755 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2756 		/* Validate and pad(if necessary) the TLVs */
2757 		tlv_ok_status =
2758 			wmi_handle->ops->wmi_check_and_pad_event(wmi_handle->scn_handle,
2759 							data, len, id,
2760 							&wmi_cmd_struct_ptr);
2761 		if (tlv_ok_status != 0) {
2762 			QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR,
2763 				  "%s: Error: id=0x%x, wmitlv check status=%d",
2764 				  __func__, id, tlv_ok_status);
2765 			goto end;
2766 		}
2767 	}
2768 #endif
2769 
2770 	idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
2771 	if (idx == A_ERROR) {
2772 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR,
2773 		   "%s : event handler is not registered: event id 0x%x",
2774 			__func__, id);
2775 		goto end;
2776 	}
2777 #ifdef WMI_INTERFACE_EVENT_LOGGING
2778 	if (wmi_handle->log_info.wmi_logging_enable) {
2779 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2780 		/* Exclude 4 bytes of TLV header */
2781 		if (wmi_handle->ops->is_diag_event(id)) {
2782 			/*
2783 			 * skip diag event logging in WMI event buffer
2784 			 * as its already logged in WMI RX event buffer
2785 			 */
2786 		} else if (wmi_handle->ops->is_management_record(id)) {
2787 			/*
2788 			 * skip wmi mgmt event logging in WMI event buffer
2789 			 * as its already logged in WMI RX event buffer
2790 			 */
2791 		} else {
2792 			uint8_t *tmpbuf = (uint8_t *)data +
2793 					wmi_handle->soc->buf_offset_event;
2794 
2795 			WMI_EVENT_RECORD(wmi_handle, id, tmpbuf);
2796 			wmi_specific_evt_record(wmi_handle, id, tmpbuf);
2797 		}
2798 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2799 	}
2800 #endif
2801 	/* Call the WMI registered event handler */
2802 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2803 		ev_buff_type = wmi_handle->ctx[idx].buff_type;
2804 		if (ev_buff_type == WMI_RX_PROCESSED_BUFF) {
2805 			wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2806 				wmi_cmd_struct_ptr, len);
2807 		} else if (ev_buff_type == WMI_RX_RAW_BUFF) {
2808 			ev_buf.evt_raw_buf = data;
2809 			ev_buf.evt_processed_buf = wmi_cmd_struct_ptr;
2810 			wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2811 							(void *)&ev_buf, len);
2812 		}
2813 	}
2814 	else
2815 		wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2816 			data, len);
2817 
2818 end:
2819 	/* Free event buffer and allocated event tlv */
2820 #ifndef WMI_NON_TLV_SUPPORT
2821 	if (wmi_handle->target_type == WMI_TLV_TARGET)
2822 		wmi_handle->ops->wmi_free_allocated_event(id, &wmi_cmd_struct_ptr);
2823 #endif
2824 
2825 	qdf_nbuf_free(evt_buf);
2826 
2827 }
2828 
2829 #define WMI_WQ_WD_TIMEOUT (30 * 1000) /* 30s */
2830 
2831 static inline void wmi_workqueue_watchdog_warn(uint32_t msg_type_id)
2832 {
2833 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2834 		  "%s: WLAN_BUG_RCA: Message type %x has exceeded its alloted time of %ds",
2835 		  __func__, msg_type_id, WMI_WQ_WD_TIMEOUT / 1000);
2836 }
2837 
2838 #ifdef CONFIG_SLUB_DEBUG_ON
2839 static void wmi_workqueue_watchdog_bite(void *arg)
2840 {
2841 	struct wmi_wq_dbg_info *info = arg;
2842 
2843 	wmi_workqueue_watchdog_warn(info->wd_msg_type_id);
2844 	qdf_print_thread_trace(info->task);
2845 
2846 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2847 		  "%s: Going down for WMI WQ Watchdog Bite!", __func__);
2848 	QDF_BUG(0);
2849 }
2850 #else
2851 static inline void wmi_workqueue_watchdog_bite(void *arg)
2852 {
2853 	struct wmi_wq_dbg_info *info = arg;
2854 
2855 	wmi_workqueue_watchdog_warn(info->wd_msg_type_id);
2856 
2857 	qdf_print_thread_trace(info->task);
2858 }
2859 #endif
2860 
2861 /**
2862  * wmi_rx_event_work() - process rx event in rx work queue context
2863  * @arg: opaque pointer to wmi handle
2864  *
2865  * This function process any fw event to serialize it through rx worker thread.
2866  *
2867  * Return: none
2868  */
2869 static void wmi_rx_event_work(void *arg)
2870 {
2871 	wmi_buf_t buf;
2872 	struct wmi_unified *wmi = arg;
2873 	qdf_timer_t wd_timer;
2874 	struct wmi_wq_dbg_info info;
2875 
2876 	/* initialize WMI workqueue watchdog timer */
2877 	qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite,
2878 			&info, QDF_TIMER_TYPE_SW);
2879 	qdf_spin_lock_bh(&wmi->eventq_lock);
2880 	buf = qdf_nbuf_queue_remove(&wmi->event_queue);
2881 	qdf_spin_unlock_bh(&wmi->eventq_lock);
2882 	while (buf) {
2883 		qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT);
2884 		info.wd_msg_type_id =
2885 		   WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID);
2886 		info.wmi_wq = wmi->wmi_rx_work_queue;
2887 		info.task = qdf_get_current_task();
2888 		__wmi_control_rx(wmi, buf);
2889 		qdf_timer_stop(&wd_timer);
2890 		qdf_spin_lock_bh(&wmi->eventq_lock);
2891 		buf = qdf_nbuf_queue_remove(&wmi->event_queue);
2892 		qdf_spin_unlock_bh(&wmi->eventq_lock);
2893 	}
2894 	qdf_timer_free(&wd_timer);
2895 }
2896 
2897 /**
2898  * wmi_rx_diag_event_work() - process rx diag event in work queue context
2899  * @arg: opaque pointer to wmi handle
2900  *
2901  * This function process fw diag event to serialize it through rx worker thread.
2902  *
2903  * Return: none
2904  */
2905 static void wmi_rx_diag_event_work(void *arg)
2906 {
2907 	wmi_buf_t buf;
2908 	struct wmi_unified *wmi = arg;
2909 	qdf_timer_t wd_timer;
2910 	struct wmi_wq_dbg_info info;
2911 	uint32_t diag_event_process_count = 0;
2912 
2913 	if (!wmi) {
2914 		wmi_err("Invalid WMI handle");
2915 		return;
2916 	}
2917 
2918 	/* initialize WMI workqueue watchdog timer */
2919 	qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite,
2920 		       &info, QDF_TIMER_TYPE_SW);
2921 	qdf_spin_lock_bh(&wmi->diag_eventq_lock);
2922 	buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue);
2923 	qdf_spin_unlock_bh(&wmi->diag_eventq_lock);
2924 	while (buf) {
2925 		qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT);
2926 		info.wd_msg_type_id =
2927 		   WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID);
2928 		info.wmi_wq = NULL;
2929 		info.task = qdf_get_current_task();
2930 		__wmi_control_rx(wmi, buf);
2931 		qdf_timer_stop(&wd_timer);
2932 
2933 		if (diag_event_process_count++ >
2934 		    RX_DIAG_EVENT_WORK_PROCESS_MAX_COUNT) {
2935 			qdf_queue_work(0, wmi->wmi_rx_diag_work_queue,
2936 				       &wmi->rx_diag_event_work);
2937 			break;
2938 		}
2939 
2940 		qdf_spin_lock_bh(&wmi->diag_eventq_lock);
2941 		buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue);
2942 		qdf_spin_unlock_bh(&wmi->diag_eventq_lock);
2943 	}
2944 	qdf_timer_free(&wd_timer);
2945 }
2946 
2947 #ifdef FEATURE_RUNTIME_PM
2948 /**
2949  * wmi_runtime_pm_init() - initialize runtime pm wmi variables
2950  * @wmi_handle: wmi context
2951  */
2952 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
2953 {
2954 	qdf_atomic_init(&wmi_handle->runtime_pm_inprogress);
2955 }
2956 
2957 /**
2958  * wmi_set_runtime_pm_inprogress() - set runtime pm progress flag
2959  * @wmi_handle: wmi context
2960  * @val: runtime pm progress flag
2961  */
2962 void wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, A_BOOL val)
2963 {
2964 	qdf_atomic_set(&wmi_handle->runtime_pm_inprogress, val);
2965 }
2966 
2967 /**
2968  * wmi_get_runtime_pm_inprogress() - get runtime pm progress flag
2969  * @wmi_handle: wmi context
2970  */
2971 inline bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle)
2972 {
2973 	return qdf_atomic_read(&wmi_handle->runtime_pm_inprogress);
2974 }
2975 #else
2976 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
2977 {
2978 }
2979 #endif
2980 
2981 /**
2982  * wmi_unified_get_soc_handle: Get WMI SoC handle
2983  * @param wmi_handle: WMI context got from wmi_attach
2984  *
2985  * return: Pointer to Soc handle
2986  */
2987 void *wmi_unified_get_soc_handle(struct wmi_unified *wmi_handle)
2988 {
2989 	return wmi_handle->soc;
2990 }
2991 
2992 /**
2993  * wmi_interface_logging_init: Interface looging init
2994  * @param wmi_handle: Pointer to wmi handle object
2995  *
2996  * return: None
2997  */
2998 #ifdef WMI_INTERFACE_EVENT_LOGGING
2999 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle,
3000 					      uint32_t pdev_idx)
3001 {
3002 	if (QDF_STATUS_SUCCESS == wmi_log_init(wmi_handle)) {
3003 		qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
3004 		wmi_debugfs_init(wmi_handle, pdev_idx);
3005 	}
3006 }
3007 #else
3008 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle,
3009 					      uint32_t pdev_idx)
3010 {
3011 }
3012 #endif
3013 
3014 static QDF_STATUS wmi_initialize_worker_context(struct wmi_unified *wmi_handle)
3015 {
3016 	wmi_handle->wmi_rx_work_queue =
3017 		qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue");
3018 	if (!wmi_handle->wmi_rx_work_queue) {
3019 		wmi_err("failed to create wmi_rx_event_work_queue");
3020 		return QDF_STATUS_E_RESOURCES;
3021 	}
3022 
3023 	qdf_spinlock_create(&wmi_handle->eventq_lock);
3024 	qdf_nbuf_queue_init(&wmi_handle->event_queue);
3025 	qdf_create_work(0, &wmi_handle->rx_event_work,
3026 			wmi_rx_event_work, wmi_handle);
3027 
3028 	wmi_handle->wmi_rx_diag_work_queue =
3029 		qdf_alloc_unbound_workqueue("wmi_rx_diag_event_work_queue");
3030 	if (!wmi_handle->wmi_rx_diag_work_queue) {
3031 		wmi_err("failed to create wmi_rx_diag_event_work_queue");
3032 		return QDF_STATUS_E_RESOURCES;
3033 	}
3034 	qdf_spinlock_create(&wmi_handle->diag_eventq_lock);
3035 	qdf_nbuf_queue_init(&wmi_handle->diag_event_queue);
3036 	qdf_create_work(0, &wmi_handle->rx_diag_event_work,
3037 			wmi_rx_diag_event_work, wmi_handle);
3038 	wmi_handle->wmi_rx_diag_events_dropped = 0;
3039 
3040 	return QDF_STATUS_SUCCESS;
3041 }
3042 
3043 /**
3044  * wmi_unified_get_pdev_handle: Get WMI SoC handle
3045  * @param wmi_soc: Pointer to wmi soc object
3046  * @param pdev_idx: pdev index
3047  *
3048  * return: Pointer to wmi handle or NULL on failure
3049  */
3050 void *wmi_unified_get_pdev_handle(struct wmi_soc *soc, uint32_t pdev_idx)
3051 {
3052 	struct wmi_unified *wmi_handle;
3053 	QDF_STATUS status;
3054 
3055 	if (pdev_idx >= WMI_MAX_RADIOS)
3056 		return NULL;
3057 
3058 	if (!soc->wmi_pdev[pdev_idx]) {
3059 		wmi_handle =
3060 			(struct wmi_unified *) qdf_mem_malloc(
3061 					sizeof(struct wmi_unified));
3062 		if (!wmi_handle)
3063 			return NULL;
3064 
3065 		status = wmi_initialize_worker_context(wmi_handle);
3066 		if (QDF_IS_STATUS_ERROR(status))
3067 			goto error;
3068 
3069 		wmi_handle->scn_handle = soc->scn_handle;
3070 		wmi_handle->event_id = soc->event_id;
3071 		wmi_handle->event_handler = soc->event_handler;
3072 		wmi_handle->ctx = soc->ctx;
3073 		wmi_handle->ops = soc->ops;
3074 		wmi_handle->wmi_events = soc->wmi_events;
3075 		wmi_handle->services = soc->services;
3076 		wmi_handle->soc = soc;
3077 		wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map;
3078 		wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map;
3079 		wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map;
3080 		wmi_handle->evt_phy_id_map = soc->evt_phy_id_map;
3081 		wmi_interface_logging_init(wmi_handle, pdev_idx);
3082 		qdf_atomic_init(&wmi_handle->pending_cmds);
3083 		qdf_atomic_init(&wmi_handle->is_target_suspended);
3084 		wmi_handle->target_type = soc->target_type;
3085 		wmi_handle->wmi_max_cmds = soc->wmi_max_cmds;
3086 
3087 		wmi_interface_sequence_init(wmi_handle);
3088 		if (wmi_ext_dbgfs_init(wmi_handle, pdev_idx) !=
3089 		    QDF_STATUS_SUCCESS)
3090 			wmi_err("Failed to initialize wmi extended debugfs");
3091 
3092 		soc->wmi_pdev[pdev_idx] = wmi_handle;
3093 	} else
3094 		wmi_handle = soc->wmi_pdev[pdev_idx];
3095 
3096 	wmi_handle->wmi_stopinprogress = 0;
3097 	wmi_handle->wmi_endpoint_id = soc->wmi_endpoint_id[pdev_idx];
3098 	wmi_handle->htc_handle = soc->htc_handle;
3099 	wmi_handle->max_msg_len = soc->max_msg_len[pdev_idx];
3100 	wmi_handle->tag_crash_inject = false;
3101 	wmi_interface_sequence_reset(wmi_handle);
3102 
3103 	return wmi_handle;
3104 
3105 error:
3106 	qdf_mem_free(wmi_handle);
3107 
3108 	return NULL;
3109 }
3110 qdf_export_symbol(wmi_unified_get_pdev_handle);
3111 
3112 static void (*wmi_attach_register[WMI_MAX_TARGET_TYPE])(wmi_unified_t);
3113 
3114 void wmi_unified_register_module(enum wmi_target_type target_type,
3115 			void (*wmi_attach)(wmi_unified_t wmi_handle))
3116 {
3117 	if (target_type < WMI_MAX_TARGET_TYPE)
3118 		wmi_attach_register[target_type] = wmi_attach;
3119 
3120 	return;
3121 }
3122 qdf_export_symbol(wmi_unified_register_module);
3123 
3124 /**
3125  * wmi_wbuff_register() - register wmi with wbuff
3126  * @wmi_handle: handle to wmi
3127  *
3128  * @Return: void
3129  */
3130 static void wmi_wbuff_register(struct wmi_unified *wmi_handle)
3131 {
3132 	struct wbuff_alloc_request wbuff_alloc[4];
3133 
3134 	wbuff_alloc[0].slot = WBUFF_POOL_0;
3135 	wbuff_alloc[0].size = WMI_WBUFF_POOL_0_SIZE;
3136 	wbuff_alloc[1].slot = WBUFF_POOL_1;
3137 	wbuff_alloc[1].size = WMI_WBUFF_POOL_1_SIZE;
3138 	wbuff_alloc[2].slot = WBUFF_POOL_2;
3139 	wbuff_alloc[2].size = WMI_WBUFF_POOL_2_SIZE;
3140 	wbuff_alloc[3].slot = WBUFF_POOL_3;
3141 	wbuff_alloc[3].size = WMI_WBUFF_POOL_3_SIZE;
3142 
3143 	wmi_handle->wbuff_handle = wbuff_module_register(wbuff_alloc, 4,
3144 							 WMI_MIN_HEAD_ROOM, 4);
3145 }
3146 
3147 /**
3148  * wmi_wbuff_deregister() - deregister wmi with wbuff
3149  * @wmi_handle: handle to wmi
3150  *
3151  * @Return: void
3152  */
3153 static inline void wmi_wbuff_deregister(struct wmi_unified *wmi_handle)
3154 {
3155 	wbuff_module_deregister(wmi_handle->wbuff_handle);
3156 	wmi_handle->wbuff_handle = NULL;
3157 }
3158 
3159 /**
3160  * wmi_unified_attach() -  attach for unified WMI
3161  * @scn_handle: handle to SCN
3162  * @osdev: OS device context
3163  * @target_type: TLV or not-TLV based target
3164  * @use_cookie: cookie based allocation enabled/disabled
3165  * @ops: umac rx callbacks
3166  * @psoc: objmgr psoc
3167  *
3168  * @Return: wmi handle.
3169  */
3170 void *wmi_unified_attach(void *scn_handle,
3171 			 struct wmi_unified_attach_params *param)
3172 {
3173 	struct wmi_unified *wmi_handle;
3174 	struct wmi_soc *soc;
3175 	QDF_STATUS status;
3176 
3177 	soc = (struct wmi_soc *) qdf_mem_malloc(sizeof(struct wmi_soc));
3178 	if (!soc)
3179 		return NULL;
3180 
3181 	wmi_handle =
3182 		(struct wmi_unified *) qdf_mem_malloc(
3183 			sizeof(struct wmi_unified));
3184 	if (!wmi_handle) {
3185 		qdf_mem_free(soc);
3186 		return NULL;
3187 	}
3188 
3189 	status = wmi_initialize_worker_context(wmi_handle);
3190 	if (QDF_IS_STATUS_ERROR(status))
3191 		goto error;
3192 
3193 	wmi_handle->soc = soc;
3194 	wmi_handle->soc->soc_idx = param->soc_id;
3195 	wmi_handle->soc->is_async_ep = param->is_async_ep;
3196 	wmi_handle->event_id = soc->event_id;
3197 	wmi_handle->event_handler = soc->event_handler;
3198 	wmi_handle->ctx = soc->ctx;
3199 	wmi_handle->wmi_events = soc->wmi_events;
3200 	wmi_handle->services = soc->services;
3201 	wmi_handle->scn_handle = scn_handle;
3202 	wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map;
3203 	wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map;
3204 	wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map;
3205 	wmi_handle->evt_phy_id_map = soc->evt_phy_id_map;
3206 	soc->scn_handle = scn_handle;
3207 	wmi_handle->target_type = param->target_type;
3208 	soc->target_type = param->target_type;
3209 
3210 	if (param->target_type >= WMI_MAX_TARGET_TYPE)
3211 		goto error;
3212 
3213 	if (wmi_attach_register[param->target_type]) {
3214 		wmi_attach_register[param->target_type](wmi_handle);
3215 	} else {
3216 		wmi_err("wmi attach is not registered");
3217 		goto error;
3218 	}
3219 
3220 	qdf_atomic_init(&wmi_handle->pending_cmds);
3221 	qdf_atomic_init(&wmi_handle->is_target_suspended);
3222 	qdf_atomic_init(&wmi_handle->is_target_suspend_acked);
3223 	qdf_atomic_init(&wmi_handle->num_stats_over_qmi);
3224 	wmi_runtime_pm_init(wmi_handle);
3225 	wmi_interface_logging_init(wmi_handle, WMI_HOST_PDEV_ID_0);
3226 
3227 	wmi_interface_sequence_init(wmi_handle);
3228 	/* Assign target cookie capablity */
3229 	wmi_handle->use_cookie = param->use_cookie;
3230 	wmi_handle->osdev = param->osdev;
3231 	wmi_handle->wmi_stopinprogress = 0;
3232 	wmi_handle->wmi_max_cmds = param->max_commands;
3233 	soc->wmi_max_cmds = param->max_commands;
3234 	/* Increase the ref count once refcount infra is present */
3235 	soc->wmi_psoc = param->psoc;
3236 	qdf_spinlock_create(&soc->ctx_lock);
3237 	soc->ops = wmi_handle->ops;
3238 	soc->wmi_pdev[0] = wmi_handle;
3239 	if (wmi_ext_dbgfs_init(wmi_handle, 0) != QDF_STATUS_SUCCESS)
3240 		wmi_err("Failed to initialize wmi extended debugfs");
3241 
3242 	wmi_wbuff_register(wmi_handle);
3243 
3244 	wmi_hang_event_notifier_register(wmi_handle);
3245 
3246 	wmi_minidump_attach(wmi_handle);
3247 
3248 	return wmi_handle;
3249 
3250 error:
3251 	qdf_mem_free(soc);
3252 	qdf_mem_free(wmi_handle);
3253 
3254 	return NULL;
3255 }
3256 
3257 /**
3258  * wmi_unified_detach() -  detach for unified WMI
3259  *
3260  * @wmi_handle  : handle to wmi.
3261  *
3262  * @Return: none.
3263  */
3264 void wmi_unified_detach(struct wmi_unified *wmi_handle)
3265 {
3266 	wmi_buf_t buf;
3267 	struct wmi_soc *soc;
3268 	uint8_t i;
3269 
3270 	wmi_minidump_detach(wmi_handle);
3271 
3272 	wmi_hang_event_notifier_unregister();
3273 
3274 	wmi_wbuff_deregister(wmi_handle);
3275 
3276 	soc = wmi_handle->soc;
3277 	for (i = 0; i < WMI_MAX_RADIOS; i++) {
3278 		if (soc->wmi_pdev[i]) {
3279 			qdf_flush_workqueue(0,
3280 				soc->wmi_pdev[i]->wmi_rx_work_queue);
3281 			qdf_destroy_workqueue(0,
3282 				soc->wmi_pdev[i]->wmi_rx_work_queue);
3283 			wmi_debugfs_remove(soc->wmi_pdev[i]);
3284 			buf = qdf_nbuf_queue_remove(
3285 					&soc->wmi_pdev[i]->event_queue);
3286 			while (buf) {
3287 				qdf_nbuf_free(buf);
3288 				buf = qdf_nbuf_queue_remove(
3289 						&soc->wmi_pdev[i]->event_queue);
3290 			}
3291 
3292 			qdf_flush_work(&soc->wmi_pdev[i]->rx_diag_event_work);
3293 			buf = qdf_nbuf_queue_remove(
3294 					&soc->wmi_pdev[i]->diag_event_queue);
3295 			while (buf) {
3296 				qdf_nbuf_free(buf);
3297 				buf = qdf_nbuf_queue_remove(
3298 					&soc->wmi_pdev[i]->diag_event_queue);
3299 			}
3300 
3301 			wmi_log_buffer_free(soc->wmi_pdev[i]);
3302 
3303 			/* Free events logs list */
3304 			if (soc->wmi_pdev[i]->events_logs_list)
3305 				qdf_mem_free(
3306 					soc->wmi_pdev[i]->events_logs_list);
3307 
3308 			qdf_spinlock_destroy(&soc->wmi_pdev[i]->eventq_lock);
3309 			qdf_spinlock_destroy(
3310 					&soc->wmi_pdev[i]->diag_eventq_lock);
3311 
3312 			wmi_interface_sequence_deinit(soc->wmi_pdev[i]);
3313 			wmi_ext_dbgfs_deinit(soc->wmi_pdev[i]);
3314 
3315 			qdf_mem_free(soc->wmi_pdev[i]);
3316 		}
3317 	}
3318 	qdf_spinlock_destroy(&soc->ctx_lock);
3319 
3320 	if (soc->wmi_service_bitmap) {
3321 		qdf_mem_free(soc->wmi_service_bitmap);
3322 		soc->wmi_service_bitmap = NULL;
3323 	}
3324 
3325 	if (soc->wmi_ext_service_bitmap) {
3326 		qdf_mem_free(soc->wmi_ext_service_bitmap);
3327 		soc->wmi_ext_service_bitmap = NULL;
3328 	}
3329 
3330 	if (soc->wmi_ext2_service_bitmap) {
3331 		qdf_mem_free(soc->wmi_ext2_service_bitmap);
3332 		soc->wmi_ext2_service_bitmap = NULL;
3333 	}
3334 
3335 	/* Decrease the ref count once refcount infra is present */
3336 	soc->wmi_psoc = NULL;
3337 	qdf_mem_free(soc);
3338 }
3339 
3340 /**
3341  * wmi_unified_remove_work() - detach for WMI work
3342  * @wmi_handle: handle to WMI
3343  *
3344  * A function that does not fully detach WMI, but just remove work
3345  * queue items associated with it. This is used to make sure that
3346  * before any other processing code that may destroy related contexts
3347  * (HTC, etc), work queue processing on WMI has already been stopped.
3348  *
3349  * Return: None
3350  */
3351 void
3352 wmi_unified_remove_work(struct wmi_unified *wmi_handle)
3353 {
3354 	wmi_buf_t buf;
3355 
3356 	qdf_flush_workqueue(0, wmi_handle->wmi_rx_work_queue);
3357 	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
3358 	buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
3359 	while (buf) {
3360 		qdf_nbuf_free(buf);
3361 		buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
3362 	}
3363 	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
3364 
3365 	/* Remove diag events work */
3366 	qdf_flush_workqueue(0, wmi_handle->wmi_rx_diag_work_queue);
3367 	qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock);
3368 	buf = qdf_nbuf_queue_remove(&wmi_handle->diag_event_queue);
3369 	while (buf) {
3370 		qdf_nbuf_free(buf);
3371 		buf = qdf_nbuf_queue_remove(&wmi_handle->diag_event_queue);
3372 	}
3373 	qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
3374 }
3375 
3376 /**
3377  * wmi_htc_tx_complete() - Process htc tx completion
3378  *
3379  * @ctx: handle to wmi
3380  * @htc_packet: pointer to htc packet
3381  *
3382  * @Return: none.
3383  */
3384 static void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt)
3385 {
3386 	struct wmi_soc *soc = (struct wmi_soc *) ctx;
3387 	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
3388 	u_int8_t *buf_ptr;
3389 	u_int32_t len;
3390 	struct wmi_unified *wmi_handle;
3391 #ifdef WMI_INTERFACE_EVENT_LOGGING
3392 	struct wmi_debug_log_info *log_info;
3393 	uint32_t cmd_id;
3394 	uint8_t *offset_ptr;
3395 	qdf_dma_addr_t dma_addr;
3396 	uint64_t phy_addr;
3397 #endif
3398 
3399 	ASSERT(wmi_cmd_buf);
3400 	wmi_handle = wmi_get_pdev_ep(soc, htc_pkt->Endpoint);
3401 	if (!wmi_handle) {
3402 		wmi_err("Unable to get wmi handle");
3403 		QDF_ASSERT(0);
3404 		return;
3405 	}
3406 	buf_ptr = (u_int8_t *)wmi_buf_data(wmi_cmd_buf);
3407 #ifdef WMI_INTERFACE_EVENT_LOGGING
3408 	log_info = &wmi_handle->log_info;
3409 
3410 	if (wmi_handle && log_info->wmi_logging_enable) {
3411 		cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf),
3412 				WMI_CMD_HDR, COMMANDID);
3413 
3414 		dma_addr = QDF_NBUF_CB_PADDR(wmi_cmd_buf);
3415 		phy_addr = qdf_mem_virt_to_phys(qdf_nbuf_data(wmi_cmd_buf));
3416 
3417 		qdf_spin_lock_bh(&log_info->wmi_record_lock);
3418 		/* Record 16 bytes of WMI cmd tx complete data
3419 		 * - exclude TLV and WMI headers
3420 		 */
3421 		offset_ptr = buf_ptr + wmi_handle->soc->buf_offset_command;
3422 		if (wmi_handle->ops->is_management_record(cmd_id)) {
3423 			WMI_MGMT_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id,
3424 						       offset_ptr);
3425 		} else {
3426 			WMI_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id,
3427 						  offset_ptr, dma_addr,
3428 						  phy_addr);
3429 		}
3430 
3431 		qdf_spin_unlock_bh(&log_info->wmi_record_lock);
3432 	}
3433 #endif
3434 
3435 	wmi_interface_sequence_check(wmi_handle, wmi_cmd_buf);
3436 
3437 	len = qdf_nbuf_len(wmi_cmd_buf);
3438 	qdf_mem_zero(buf_ptr, len);
3439 	wmi_buf_free(wmi_cmd_buf);
3440 	qdf_mem_free(htc_pkt);
3441 	qdf_atomic_dec(&wmi_handle->pending_cmds);
3442 }
3443 
3444 #ifdef FEATURE_RUNTIME_PM
3445 /**
3446  * wmi_htc_log_pkt() - Print information of WMI command from HTC packet
3447  *
3448  * @ctx: handle of WMI context
3449  * @htc_pkt: handle of HTC packet
3450  *
3451  * @Return: none
3452  */
3453 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt)
3454 {
3455 	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
3456 	uint32_t cmd_id;
3457 
3458 	ASSERT(wmi_cmd_buf);
3459 	cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), WMI_CMD_HDR,
3460 			       COMMANDID);
3461 
3462 	wmi_debug("WMI command from HTC packet: %s, ID: %d",
3463 		 wmi_id_to_name(cmd_id), cmd_id);
3464 }
3465 #else
3466 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt)
3467 {
3468 }
3469 #endif
3470 
3471 /**
3472  * wmi_connect_pdev_htc_service() -  WMI API to get connect to HTC service
3473  *
3474  * @wmi_handle: handle to WMI.
3475  * @pdev_idx: Pdev index
3476  *
3477  * @Return: QDF_STATUS
3478  */
3479 static QDF_STATUS wmi_connect_pdev_htc_service(struct wmi_soc *soc,
3480 					       uint32_t pdev_idx)
3481 {
3482 	QDF_STATUS status;
3483 	struct htc_service_connect_resp response;
3484 	struct htc_service_connect_req connect;
3485 
3486 	OS_MEMZERO(&connect, sizeof(connect));
3487 	OS_MEMZERO(&response, sizeof(response));
3488 
3489 	/* meta data is unused for now */
3490 	connect.pMetaData = NULL;
3491 	connect.MetaDataLength = 0;
3492 	/* these fields are the same for all service endpoints */
3493 	connect.EpCallbacks.pContext = soc;
3494 	connect.EpCallbacks.EpTxCompleteMultiple =
3495 		NULL /* Control path completion ar6000_tx_complete */;
3496 	connect.EpCallbacks.EpRecv = wmi_control_rx /* Control path rx */;
3497 	connect.EpCallbacks.EpRecvRefill = NULL /* ar6000_rx_refill */;
3498 	connect.EpCallbacks.EpSendFull = NULL /* ar6000_tx_queue_full */;
3499 	connect.EpCallbacks.EpTxComplete =
3500 		wmi_htc_tx_complete /* ar6000_tx_queue_full */;
3501 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3502 
3503 	/* connect to control service */
3504 	connect.service_id = soc->svc_ids[pdev_idx];
3505 	status = htc_connect_service(soc->htc_handle, &connect, &response);
3506 
3507 	if (QDF_IS_STATUS_ERROR(status)) {
3508 		wmi_err("Failed to connect to WMI CONTROL service status:%d",
3509 			 status);
3510 		return status;
3511 	}
3512 
3513 	if (soc->is_async_ep)
3514 		htc_set_async_ep(soc->htc_handle, response.Endpoint, true);
3515 
3516 	soc->wmi_endpoint_id[pdev_idx] = response.Endpoint;
3517 	soc->max_msg_len[pdev_idx] = response.MaxMsgLength;
3518 
3519 	return QDF_STATUS_SUCCESS;
3520 }
3521 
3522 QDF_STATUS
3523 wmi_unified_connect_htc_service(struct wmi_unified *wmi_handle,
3524 				HTC_HANDLE htc_handle)
3525 {
3526 	uint32_t i;
3527 	uint8_t wmi_ep_count;
3528 
3529 	wmi_handle->soc->htc_handle = htc_handle;
3530 
3531 	wmi_ep_count = htc_get_wmi_endpoint_count(htc_handle);
3532 	if (wmi_ep_count > WMI_MAX_RADIOS)
3533 		return QDF_STATUS_E_FAULT;
3534 
3535 	for (i = 0; i < wmi_ep_count; i++)
3536 		wmi_connect_pdev_htc_service(wmi_handle->soc, i);
3537 
3538 	wmi_handle->htc_handle = htc_handle;
3539 	wmi_handle->wmi_endpoint_id = wmi_handle->soc->wmi_endpoint_id[0];
3540 	wmi_handle->max_msg_len = wmi_handle->soc->max_msg_len[0];
3541 
3542 	return QDF_STATUS_SUCCESS;
3543 }
3544 
3545 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
3546 QDF_STATUS wmi_diag_connect_pdev_htc_service(struct wmi_unified *wmi_handle,
3547 					     HTC_HANDLE htc_handle)
3548 {
3549 	QDF_STATUS status;
3550 	struct htc_service_connect_resp response = {0};
3551 	struct htc_service_connect_req connect = {0};
3552 
3553 	/* meta data is unused for now */
3554 	connect.pMetaData = NULL;
3555 	connect.MetaDataLength = 0;
3556 	connect.EpCallbacks.pContext = wmi_handle->soc;
3557 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3558 	connect.EpCallbacks.EpRecv = wmi_control_diag_rx /* wmi diag rx */;
3559 	connect.EpCallbacks.EpRecvRefill = NULL;
3560 	connect.EpCallbacks.EpSendFull = NULL;
3561 	connect.EpCallbacks.EpTxComplete = NULL;
3562 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3563 
3564 	/* connect to wmi diag service */
3565 	connect.service_id = WMI_CONTROL_DIAG_SVC;
3566 	status = htc_connect_service(htc_handle, &connect, &response);
3567 
3568 	if (QDF_IS_STATUS_ERROR(status)) {
3569 		wmi_err("Failed to connect to WMI DIAG service status:%d",
3570 			 status);
3571 		return status;
3572 	}
3573 
3574 	if (wmi_handle->soc->is_async_ep)
3575 		htc_set_async_ep(htc_handle, response.Endpoint, true);
3576 
3577 	wmi_handle->soc->wmi_diag_endpoint_id = response.Endpoint;
3578 
3579 	return QDF_STATUS_SUCCESS;
3580 }
3581 #endif
3582 
3583 /**
3584  * wmi_get_host_credits() -  WMI API to get updated host_credits
3585  *
3586  * @wmi_handle: handle to WMI.
3587  *
3588  * @Return: updated host_credits.
3589  */
3590 int wmi_get_host_credits(wmi_unified_t wmi_handle)
3591 {
3592 	int host_credits = 0;
3593 
3594 	htc_get_control_endpoint_tx_host_credits(wmi_handle->htc_handle,
3595 						 &host_credits);
3596 	return host_credits;
3597 }
3598 
3599 /**
3600  * wmi_get_pending_cmds() - WMI API to get WMI Pending Commands in the HTC
3601  *                          queue
3602  *
3603  * @wmi_handle: handle to WMI.
3604  *
3605  * @Return: Pending Commands in the HTC queue.
3606  */
3607 int wmi_get_pending_cmds(wmi_unified_t wmi_handle)
3608 {
3609 	return qdf_atomic_read(&wmi_handle->pending_cmds);
3610 }
3611 
3612 /**
3613  * wmi_set_target_suspend() -  WMI API to set target suspend state
3614  *
3615  * @wmi_handle: handle to WMI.
3616  * @val: suspend state boolean.
3617  *
3618  * @Return: none.
3619  */
3620 void wmi_set_target_suspend(wmi_unified_t wmi_handle, A_BOOL val)
3621 {
3622 	qdf_atomic_set(&wmi_handle->is_target_suspended, val);
3623 }
3624 
3625 /**
3626  * wmi_set_target_suspend_acked() -  WMI API to set target suspend acked flag
3627  *
3628  * @wmi_handle: handle to WMI.
3629  * @val: target suspend command acked flag.
3630  *
3631  * @Return: none.
3632  */
3633 void wmi_set_target_suspend_acked(wmi_unified_t wmi_handle, A_BOOL val)
3634 {
3635 	qdf_atomic_set(&wmi_handle->is_target_suspend_acked, val);
3636 }
3637 
3638 /**
3639  * wmi_is_target_suspended() - WMI API to check target suspend state
3640  * @wmi_handle: handle to WMI.
3641  *
3642  * WMI API to check target suspend state
3643  *
3644  * Return: true if target is suspended, else false.
3645  */
3646 bool wmi_is_target_suspended(struct wmi_unified *wmi_handle)
3647 {
3648 	return qdf_atomic_read(&wmi_handle->is_target_suspended);
3649 }
3650 qdf_export_symbol(wmi_is_target_suspended);
3651 
3652 /**
3653  * wmi_is_target_suspend_acked() - WMI API to check target suspend command is
3654  *                                 acked or not
3655  * @wmi_handle: handle to WMI.
3656  *
3657  * WMI API to check whether the target suspend command is acked or not
3658  *
3659  * Return: true if target suspend command is acked, else false.
3660  */
3661 bool wmi_is_target_suspend_acked(struct wmi_unified *wmi_handle)
3662 {
3663 	return qdf_atomic_read(&wmi_handle->is_target_suspend_acked);
3664 }
3665 qdf_export_symbol(wmi_is_target_suspend_acked);
3666 
3667 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI
3668 void wmi_set_qmi_stats(wmi_unified_t wmi_handle, bool val)
3669 {
3670 	wmi_handle->is_qmi_stats_enabled = val;
3671 }
3672 
3673 bool wmi_is_qmi_stats_enabled(struct wmi_unified *wmi_handle)
3674 {
3675 	return wmi_handle->is_qmi_stats_enabled;
3676 }
3677 #endif
3678 
3679 /**
3680  * WMI API to set crash injection state
3681  * @param wmi_handle:	handle to WMI.
3682  * @param val:		crash injection state boolean.
3683  */
3684 void wmi_tag_crash_inject(wmi_unified_t wmi_handle, A_BOOL flag)
3685 {
3686 	wmi_handle->tag_crash_inject = flag;
3687 }
3688 
3689 /**
3690  * WMI API to set bus suspend state
3691  * @param wmi_handle:	handle to WMI.
3692  * @param val:		suspend state boolean.
3693  */
3694 void wmi_set_is_wow_bus_suspended(wmi_unified_t wmi_handle, A_BOOL val)
3695 {
3696 	qdf_atomic_set(&wmi_handle->is_wow_bus_suspended, val);
3697 }
3698 
3699 void wmi_set_tgt_assert(wmi_unified_t wmi_handle, bool val)
3700 {
3701 	wmi_handle->tgt_force_assert_enable = val;
3702 }
3703 
3704 /**
3705  * wmi_stop() - generic function to block unified WMI command
3706  * @wmi_handle: handle to WMI.
3707  *
3708  * @Return: success always.
3709  */
3710 int
3711 wmi_stop(wmi_unified_t wmi_handle)
3712 {
3713 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
3714 		  "WMI Stop");
3715 	wmi_handle->wmi_stopinprogress = 1;
3716 	return 0;
3717 }
3718 
3719 /**
3720  * wmi_start() - generic function to allow unified WMI command
3721  * @wmi_handle: handle to WMI.
3722  *
3723  * @Return: success always.
3724  */
3725 int
3726 wmi_start(wmi_unified_t wmi_handle)
3727 {
3728 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
3729 		  "WMI Start");
3730 	wmi_handle->wmi_stopinprogress = 0;
3731 	return 0;
3732 }
3733 
3734 /**
3735  * wmi_is_blocked() - generic function to check if WMI is blocked
3736  * @wmi_handle: handle to WMI.
3737  *
3738  * @Return: true, if blocked, false if not blocked
3739  */
3740 bool
3741 wmi_is_blocked(wmi_unified_t wmi_handle)
3742 {
3743 	return (!(!wmi_handle->wmi_stopinprogress));
3744 }
3745 
3746 /**
3747  * API to flush all the previous packets  associated with the wmi endpoint
3748  *
3749  * @param wmi_handle      : handle to WMI.
3750  */
3751 void
3752 wmi_flush_endpoint(wmi_unified_t wmi_handle)
3753 {
3754 	htc_flush_endpoint(wmi_handle->htc_handle,
3755 		wmi_handle->wmi_endpoint_id, 0);
3756 }
3757 qdf_export_symbol(wmi_flush_endpoint);
3758 
3759 /**
3760  * wmi_pdev_id_conversion_enable() - API to enable pdev_id/phy_id conversion
3761  *                     in WMI. By default pdev_id conversion is not done in WMI.
3762  *                     This API can be used enable conversion in WMI.
3763  * @param wmi_handle   : handle to WMI
3764  * @param pdev_map     : pointer to pdev_map
3765  * @size               : size of pdev_id_map
3766  * Return none
3767  */
3768 void wmi_pdev_id_conversion_enable(wmi_unified_t wmi_handle,
3769 				   uint32_t *pdev_id_map,
3770 				   uint8_t size)
3771 {
3772 	if (wmi_handle->target_type == WMI_TLV_TARGET)
3773 		wmi_handle->ops->wmi_pdev_id_conversion_enable(wmi_handle,
3774 							       pdev_id_map,
3775 							       size);
3776 }
3777 
3778 int __wmi_validate_handle(wmi_unified_t wmi_handle, const char *func)
3779 {
3780         if (!wmi_handle) {
3781                 wmi_err("Invalid WMI handle (via %s)", func);
3782                 return -EINVAL;
3783         }
3784 
3785         return 0;
3786 }
3787