xref: /wlan-dirver/qca-wifi-host-cmn/wmi/src/wmi_unified.c (revision cdcad2eab912e0b481bf1c0872ab940ace144232)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * Host WMI unified implementation
22  */
23 #include "htc_api.h"
24 #include "htc_api.h"
25 #include "wmi_unified_priv.h"
26 #include "wmi_unified_api.h"
27 #include "qdf_module.h"
28 #include "qdf_platform.h"
29 #ifdef WMI_EXT_DBG
30 #include "qdf_list.h"
31 #include "qdf_atomic.h"
32 #endif
33 
34 #ifndef WMI_NON_TLV_SUPPORT
35 #include "wmi_tlv_helper.h"
36 #endif
37 
38 #include <linux/debugfs.h>
39 #include <target_if.h>
40 #include <qdf_debugfs.h>
41 #include "wmi_filtered_logging.h"
42 #include <wmi_hang_event.h>
43 
44 /* This check for CONFIG_WIN temporary added due to redeclaration compilation
45 error in MCL. Error is caused due to inclusion of wmi.h in wmi_unified_api.h
46 which gets included here through ol_if_athvar.h. Eventually it is expected that
47 wmi.h will be removed from wmi_unified_api.h after cleanup, which will need
48 WMI_CMD_HDR to be defined here. */
49 /* Copied from wmi.h */
50 #undef MS
51 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
52 #undef SM
53 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
54 #undef WO
55 #define WO(_f)      ((_f##_OFFSET) >> 2)
56 
57 #undef GET_FIELD
58 #define GET_FIELD(_addr, _f) MS(*((uint32_t *)(_addr) + WO(_f)), _f)
59 #undef SET_FIELD
60 #define SET_FIELD(_addr, _f, _val)  \
61 	    (*((uint32_t *)(_addr) + WO(_f)) = \
62 		(*((uint32_t *)(_addr) + WO(_f)) & ~_f##_MASK) | SM(_val, _f))
63 
64 #define WMI_GET_FIELD(_msg_buf, _msg_type, _f) \
65 	    GET_FIELD(_msg_buf, _msg_type ## _ ## _f)
66 
67 #define WMI_SET_FIELD(_msg_buf, _msg_type, _f, _val) \
68 	    SET_FIELD(_msg_buf, _msg_type ## _ ## _f, _val)
69 
70 #define WMI_EP_APASS           0x0
71 #define WMI_EP_LPASS           0x1
72 #define WMI_EP_SENSOR          0x2
73 
74 #define WMI_INFOS_DBG_FILE_PERM (QDF_FILE_USR_READ | \
75 				 QDF_FILE_USR_WRITE | \
76 				 QDF_FILE_GRP_READ | \
77 				 QDF_FILE_OTH_READ)
78 
79 /*
80  *  * Control Path
81  *   */
82 typedef PREPACK struct {
83 	uint32_t	commandId:24,
84 			reserved:2, /* used for WMI endpoint ID */
85 			plt_priv:6; /* platform private */
86 } POSTPACK WMI_CMD_HDR;        /* used for commands and events */
87 
88 #define WMI_CMD_HDR_COMMANDID_LSB           0
89 #define WMI_CMD_HDR_COMMANDID_MASK          0x00ffffff
90 #define WMI_CMD_HDR_COMMANDID_OFFSET        0x00000000
91 #define WMI_CMD_HDR_WMI_ENDPOINTID_MASK        0x03000000
92 #define WMI_CMD_HDR_WMI_ENDPOINTID_OFFSET      24
93 #define WMI_CMD_HDR_PLT_PRIV_LSB               24
94 #define WMI_CMD_HDR_PLT_PRIV_MASK              0xff000000
95 #define WMI_CMD_HDR_PLT_PRIV_OFFSET            0x00000000
96 /* end of copy wmi.h */
97 
98 #define WMI_MIN_HEAD_ROOM 64
99 
100 /* WBUFF pool sizes for WMI */
101 /* Allocation of size 256 bytes */
102 #define WMI_WBUFF_POOL_0_SIZE 128
103 /* Allocation of size 512 bytes */
104 #define WMI_WBUFF_POOL_1_SIZE 16
105 /* Allocation of size 1024 bytes */
106 #define WMI_WBUFF_POOL_2_SIZE 8
107 /* Allocation of size 2048 bytes */
108 #define WMI_WBUFF_POOL_3_SIZE 8
109 
110 #define RX_DIAG_EVENT_WORK_PROCESS_MAX_COUNT 500
111 
112 #ifdef WMI_INTERFACE_EVENT_LOGGING
113 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0))
114 /* TODO Cleanup this backported function */
115 static int wmi_bp_seq_printf(qdf_debugfs_file_t m, const char *f, ...)
116 {
117 	va_list args;
118 
119 	va_start(args, f);
120 	seq_vprintf(m, f, args);
121 	va_end(args);
122 
123 	return 0;
124 }
125 #else
126 #define wmi_bp_seq_printf(m, fmt, ...) seq_printf((m), fmt, ##__VA_ARGS__)
127 #endif
128 
129 #ifndef MAX_WMI_INSTANCES
130 #define CUSTOM_MGMT_CMD_DATA_SIZE 4
131 #endif
132 
133 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
134 /* WMI commands */
135 uint32_t g_wmi_command_buf_idx = 0;
136 struct wmi_command_debug wmi_command_log_buffer[WMI_CMD_DEBUG_MAX_ENTRY];
137 
138 /* WMI commands TX completed */
139 uint32_t g_wmi_command_tx_cmp_buf_idx = 0;
140 struct wmi_command_cmp_debug
141 	wmi_command_tx_cmp_log_buffer[WMI_CMD_CMPL_DEBUG_MAX_ENTRY];
142 
143 /* WMI events when processed */
144 uint32_t g_wmi_event_buf_idx = 0;
145 struct wmi_event_debug wmi_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
146 
147 /* WMI events when queued */
148 uint32_t g_wmi_rx_event_buf_idx = 0;
149 struct wmi_event_debug wmi_rx_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
150 #endif
151 
152 static void wmi_minidump_detach(struct wmi_unified *wmi_handle)
153 {
154 	struct wmi_log_buf_t *info =
155 		&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
156 	uint32_t buf_size = info->size * sizeof(struct wmi_command_cmp_debug);
157 
158 	qdf_minidump_remove(info->buf, buf_size, "wmi_tx_cmp");
159 }
160 
161 static void wmi_minidump_attach(struct wmi_unified *wmi_handle)
162 {
163 	struct wmi_log_buf_t *info =
164 		&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
165 	uint32_t buf_size = info->size * sizeof(struct wmi_command_cmp_debug);
166 
167 	qdf_minidump_log(info->buf, buf_size, "wmi_tx_cmp");
168 }
169 
170 #define WMI_COMMAND_RECORD(h, a, b) {					\
171 	if (wmi_cmd_log_max_entry <=					\
172 		*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))	\
173 		*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx) = 0;\
174 	((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\
175 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)]\
176 						.command = a;		\
177 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.		\
178 				wmi_command_log_buf_info.buf)		\
179 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].data,\
180 			b, wmi_record_max_length);			\
181 	((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\
182 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].\
183 		time = qdf_get_log_timestamp();			\
184 	(*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))++;	\
185 	h->log_info.wmi_command_log_buf_info.length++;			\
186 }
187 
188 #define WMI_COMMAND_TX_CMP_RECORD(h, a, b, da, pa) {			\
189 	if (wmi_cmd_cmpl_log_max_entry <=				\
190 		*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))\
191 		*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
192 				p_buf_tail_idx) = 0;			\
193 	((struct wmi_command_cmp_debug *)h->log_info.			\
194 		wmi_command_tx_cmp_log_buf_info.buf)			\
195 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
196 				p_buf_tail_idx)].			\
197 							command	= a;	\
198 	qdf_mem_copy(((struct wmi_command_cmp_debug *)h->log_info.	\
199 				wmi_command_tx_cmp_log_buf_info.buf)	\
200 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
201 			p_buf_tail_idx)].				\
202 		data, b, wmi_record_max_length);			\
203 	((struct wmi_command_cmp_debug *)h->log_info.			\
204 		wmi_command_tx_cmp_log_buf_info.buf)			\
205 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
206 				p_buf_tail_idx)].			\
207 		time = qdf_get_log_timestamp();				\
208 	((struct wmi_command_cmp_debug *)h->log_info.			\
209 		wmi_command_tx_cmp_log_buf_info.buf)			\
210 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
211 				p_buf_tail_idx)].			\
212 		dma_addr = da;						\
213 	((struct wmi_command_cmp_debug *)h->log_info.			\
214 		wmi_command_tx_cmp_log_buf_info.buf)			\
215 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
216 				p_buf_tail_idx)].			\
217 		phy_addr = pa;						\
218 	(*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))++;\
219 	h->log_info.wmi_command_tx_cmp_log_buf_info.length++;		\
220 }
221 
222 #define WMI_EVENT_RECORD(h, a, b) {					\
223 	if (wmi_event_log_max_entry <=					\
224 		*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))	\
225 		*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx) = 0;\
226 	((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\
227 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].	\
228 		event = a;						\
229 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
230 				wmi_event_log_buf_info.buf)		\
231 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].data, b,\
232 		wmi_record_max_length);					\
233 	((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\
234 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].time =\
235 		qdf_get_log_timestamp();				\
236 	(*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))++;	\
237 	h->log_info.wmi_event_log_buf_info.length++;			\
238 }
239 
240 #define WMI_RX_EVENT_RECORD(h, a, b) {					\
241 	if (wmi_event_log_max_entry <=					\
242 		*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))\
243 		*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx) = 0;\
244 	((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\
245 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
246 		event = a;						\
247 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
248 				wmi_rx_event_log_buf_info.buf)		\
249 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
250 			data, b, wmi_record_max_length);		\
251 	((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\
252 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
253 		time =	qdf_get_log_timestamp();			\
254 	(*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))++;	\
255 	h->log_info.wmi_rx_event_log_buf_info.length++;			\
256 }
257 
258 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
259 uint32_t g_wmi_mgmt_command_buf_idx = 0;
260 struct
261 wmi_command_debug wmi_mgmt_command_log_buffer[WMI_MGMT_TX_DEBUG_MAX_ENTRY];
262 
263 /* wmi_mgmt commands TX completed */
264 uint32_t g_wmi_mgmt_command_tx_cmp_buf_idx = 0;
265 struct wmi_command_debug
266 wmi_mgmt_command_tx_cmp_log_buffer[WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY];
267 
268 /* wmi_mgmt events when received */
269 uint32_t g_wmi_mgmt_rx_event_buf_idx = 0;
270 struct wmi_event_debug
271 wmi_mgmt_rx_event_log_buffer[WMI_MGMT_RX_DEBUG_MAX_ENTRY];
272 
273 /* wmi_diag events when received */
274 uint32_t g_wmi_diag_rx_event_buf_idx = 0;
275 struct wmi_event_debug
276 wmi_diag_rx_event_log_buffer[WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY];
277 #endif
278 
279 #define WMI_MGMT_COMMAND_RECORD(h, a, b) {                              \
280 	if (wmi_mgmt_tx_log_max_entry <=                                   \
281 		*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)) \
282 		*(h->log_info.wmi_mgmt_command_log_buf_info.		\
283 				p_buf_tail_idx) = 0;			\
284 	((struct wmi_command_debug *)h->log_info.                       \
285 		 wmi_mgmt_command_log_buf_info.buf)                     \
286 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
287 			command = a;                                    \
288 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.          \
289 				wmi_mgmt_command_log_buf_info.buf)      \
290 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
291 		data, b,                                                \
292 		wmi_record_max_length);                                	\
293 	((struct wmi_command_debug *)h->log_info.                       \
294 		 wmi_mgmt_command_log_buf_info.buf)                     \
295 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
296 			time =        qdf_get_log_timestamp();          \
297 	(*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx))++;\
298 	h->log_info.wmi_mgmt_command_log_buf_info.length++;             \
299 }
300 
301 #define WMI_MGMT_COMMAND_TX_CMP_RECORD(h, a, b) {			\
302 	if (wmi_mgmt_tx_cmpl_log_max_entry <=				\
303 		*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
304 			p_buf_tail_idx))				\
305 		*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
306 			p_buf_tail_idx) = 0;				\
307 	((struct wmi_command_debug *)h->log_info.			\
308 			wmi_mgmt_command_tx_cmp_log_buf_info.buf)	\
309 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
310 				p_buf_tail_idx)].command = a;		\
311 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.		\
312 				wmi_mgmt_command_tx_cmp_log_buf_info.buf)\
313 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
314 			p_buf_tail_idx)].data, b,			\
315 			wmi_record_max_length);				\
316 	((struct wmi_command_debug *)h->log_info.			\
317 			wmi_mgmt_command_tx_cmp_log_buf_info.buf)	\
318 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
319 				p_buf_tail_idx)].time =			\
320 		qdf_get_log_timestamp();				\
321 	(*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.		\
322 			p_buf_tail_idx))++;				\
323 	h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.length++;	\
324 }
325 
326 #define WMI_MGMT_RX_EVENT_RECORD(h, a, b) do {				\
327 	if (wmi_mgmt_rx_log_max_entry <=				\
328 		*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))\
329 		*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx) = 0;\
330 	((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\
331 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)]\
332 					.event = a;			\
333 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
334 				wmi_mgmt_event_log_buf_info.buf)	\
335 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\
336 			data, b, wmi_record_max_length);		\
337 	((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\
338 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\
339 			time = qdf_get_log_timestamp();			\
340 	(*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))++;	\
341 	h->log_info.wmi_mgmt_event_log_buf_info.length++;		\
342 } while (0);
343 
344 #define WMI_DIAG_RX_EVENT_RECORD(h, a, b) do {                             \
345 	if (wmi_diag_log_max_entry <=                                   \
346 		*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))\
347 		*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx) = 0;\
348 	((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\
349 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)]\
350 					.event = a;                     \
351 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.            \
352 				wmi_diag_event_log_buf_info.buf)        \
353 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\
354 			data, b, wmi_record_max_length);                \
355 	((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\
356 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\
357 			time = qdf_get_log_timestamp();                 \
358 	(*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))++;  \
359 	h->log_info.wmi_diag_event_log_buf_info.length++;               \
360 } while (0);
361 
362 /* These are defined to made it as module param, which can be configured */
363 /* WMI Commands */
364 uint32_t wmi_cmd_log_max_entry = WMI_CMD_DEBUG_MAX_ENTRY;
365 uint32_t wmi_cmd_cmpl_log_max_entry = WMI_CMD_CMPL_DEBUG_MAX_ENTRY;
366 /* WMI Events */
367 uint32_t wmi_event_log_max_entry = WMI_EVENT_DEBUG_MAX_ENTRY;
368 /* WMI MGMT Tx */
369 uint32_t wmi_mgmt_tx_log_max_entry = WMI_MGMT_TX_DEBUG_MAX_ENTRY;
370 uint32_t wmi_mgmt_tx_cmpl_log_max_entry = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY;
371 /* WMI MGMT Rx */
372 uint32_t wmi_mgmt_rx_log_max_entry = WMI_MGMT_RX_DEBUG_MAX_ENTRY;
373 /* WMI Diag Event */
374 uint32_t wmi_diag_log_max_entry = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY;
375 /* WMI capture size */
376 uint32_t wmi_record_max_length = WMI_DEBUG_ENTRY_MAX_LENGTH;
377 uint32_t wmi_display_size = 100;
378 
379 /**
380  * wmi_log_init() - Initialize WMI event logging
381  * @wmi_handle: WMI handle.
382  *
383  * Return: Initialization status
384  */
385 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
386 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
387 {
388 	struct wmi_log_buf_t *cmd_log_buf =
389 			&wmi_handle->log_info.wmi_command_log_buf_info;
390 	struct wmi_log_buf_t *cmd_tx_cmpl_log_buf =
391 			&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
392 
393 	struct wmi_log_buf_t *event_log_buf =
394 			&wmi_handle->log_info.wmi_event_log_buf_info;
395 	struct wmi_log_buf_t *rx_event_log_buf =
396 			&wmi_handle->log_info.wmi_rx_event_log_buf_info;
397 
398 	struct wmi_log_buf_t *mgmt_cmd_log_buf =
399 			&wmi_handle->log_info.wmi_mgmt_command_log_buf_info;
400 	struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf =
401 		&wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info;
402 	struct wmi_log_buf_t *mgmt_event_log_buf =
403 			&wmi_handle->log_info.wmi_mgmt_event_log_buf_info;
404 	struct wmi_log_buf_t *diag_event_log_buf =
405 			&wmi_handle->log_info.wmi_diag_event_log_buf_info;
406 
407 	/* WMI commands */
408 	cmd_log_buf->length = 0;
409 	cmd_log_buf->buf_tail_idx = 0;
410 	cmd_log_buf->buf = wmi_command_log_buffer;
411 	cmd_log_buf->p_buf_tail_idx = &g_wmi_command_buf_idx;
412 	cmd_log_buf->size = WMI_CMD_DEBUG_MAX_ENTRY;
413 
414 	/* WMI commands TX completed */
415 	cmd_tx_cmpl_log_buf->length = 0;
416 	cmd_tx_cmpl_log_buf->buf_tail_idx = 0;
417 	cmd_tx_cmpl_log_buf->buf = wmi_command_tx_cmp_log_buffer;
418 	cmd_tx_cmpl_log_buf->p_buf_tail_idx = &g_wmi_command_tx_cmp_buf_idx;
419 	cmd_tx_cmpl_log_buf->size = WMI_CMD_CMPL_DEBUG_MAX_ENTRY;
420 
421 	/* WMI events when processed */
422 	event_log_buf->length = 0;
423 	event_log_buf->buf_tail_idx = 0;
424 	event_log_buf->buf = wmi_event_log_buffer;
425 	event_log_buf->p_buf_tail_idx = &g_wmi_event_buf_idx;
426 	event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY;
427 
428 	/* WMI events when queued */
429 	rx_event_log_buf->length = 0;
430 	rx_event_log_buf->buf_tail_idx = 0;
431 	rx_event_log_buf->buf = wmi_rx_event_log_buffer;
432 	rx_event_log_buf->p_buf_tail_idx = &g_wmi_rx_event_buf_idx;
433 	rx_event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY;
434 
435 	/* WMI Management commands */
436 	mgmt_cmd_log_buf->length = 0;
437 	mgmt_cmd_log_buf->buf_tail_idx = 0;
438 	mgmt_cmd_log_buf->buf = wmi_mgmt_command_log_buffer;
439 	mgmt_cmd_log_buf->p_buf_tail_idx = &g_wmi_mgmt_command_buf_idx;
440 	mgmt_cmd_log_buf->size = WMI_MGMT_TX_DEBUG_MAX_ENTRY;
441 
442 	/* WMI Management commands Tx completed*/
443 	mgmt_cmd_tx_cmp_log_buf->length = 0;
444 	mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0;
445 	mgmt_cmd_tx_cmp_log_buf->buf = wmi_mgmt_command_tx_cmp_log_buffer;
446 	mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx =
447 		&g_wmi_mgmt_command_tx_cmp_buf_idx;
448 	mgmt_cmd_tx_cmp_log_buf->size = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY;
449 
450 	/* WMI Management events when received */
451 	mgmt_event_log_buf->length = 0;
452 	mgmt_event_log_buf->buf_tail_idx = 0;
453 	mgmt_event_log_buf->buf = wmi_mgmt_rx_event_log_buffer;
454 	mgmt_event_log_buf->p_buf_tail_idx = &g_wmi_mgmt_rx_event_buf_idx;
455 	mgmt_event_log_buf->size = WMI_MGMT_RX_DEBUG_MAX_ENTRY;
456 
457 	/* WMI diag events when received */
458 	diag_event_log_buf->length = 0;
459 	diag_event_log_buf->buf_tail_idx = 0;
460 	diag_event_log_buf->buf = wmi_diag_rx_event_log_buffer;
461 	diag_event_log_buf->p_buf_tail_idx = &g_wmi_diag_rx_event_buf_idx;
462 	diag_event_log_buf->size = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY;
463 
464 	qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
465 	wmi_handle->log_info.wmi_logging_enable = 1;
466 
467 	return QDF_STATUS_SUCCESS;
468 }
469 #else
470 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
471 {
472 	struct wmi_log_buf_t *cmd_log_buf =
473 			&wmi_handle->log_info.wmi_command_log_buf_info;
474 	struct wmi_log_buf_t *cmd_tx_cmpl_log_buf =
475 			&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
476 
477 	struct wmi_log_buf_t *event_log_buf =
478 			&wmi_handle->log_info.wmi_event_log_buf_info;
479 	struct wmi_log_buf_t *rx_event_log_buf =
480 			&wmi_handle->log_info.wmi_rx_event_log_buf_info;
481 
482 	struct wmi_log_buf_t *mgmt_cmd_log_buf =
483 			&wmi_handle->log_info.wmi_mgmt_command_log_buf_info;
484 	struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf =
485 		&wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info;
486 	struct wmi_log_buf_t *mgmt_event_log_buf =
487 			&wmi_handle->log_info.wmi_mgmt_event_log_buf_info;
488 	struct wmi_log_buf_t *diag_event_log_buf =
489 			&wmi_handle->log_info.wmi_diag_event_log_buf_info;
490 
491 	wmi_handle->log_info.wmi_logging_enable = 0;
492 
493 	/* WMI commands */
494 	cmd_log_buf->length = 0;
495 	cmd_log_buf->buf_tail_idx = 0;
496 	cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc(
497 		wmi_cmd_log_max_entry * sizeof(struct wmi_command_debug));
498 	cmd_log_buf->size = wmi_cmd_log_max_entry;
499 
500 	if (!cmd_log_buf->buf)
501 		return QDF_STATUS_E_NOMEM;
502 
503 	cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx;
504 
505 	/* WMI commands TX completed */
506 	cmd_tx_cmpl_log_buf->length = 0;
507 	cmd_tx_cmpl_log_buf->buf_tail_idx = 0;
508 	cmd_tx_cmpl_log_buf->buf = (struct wmi_command_cmp_debug *) qdf_mem_malloc(
509 		wmi_cmd_cmpl_log_max_entry * sizeof(struct wmi_command_cmp_debug));
510 	cmd_tx_cmpl_log_buf->size = wmi_cmd_cmpl_log_max_entry;
511 
512 	if (!cmd_tx_cmpl_log_buf->buf)
513 		return QDF_STATUS_E_NOMEM;
514 
515 	cmd_tx_cmpl_log_buf->p_buf_tail_idx =
516 		&cmd_tx_cmpl_log_buf->buf_tail_idx;
517 
518 	/* WMI events when processed */
519 	event_log_buf->length = 0;
520 	event_log_buf->buf_tail_idx = 0;
521 	event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
522 		wmi_event_log_max_entry * sizeof(struct wmi_event_debug));
523 	event_log_buf->size = wmi_event_log_max_entry;
524 
525 	if (!event_log_buf->buf)
526 		return QDF_STATUS_E_NOMEM;
527 
528 	event_log_buf->p_buf_tail_idx = &event_log_buf->buf_tail_idx;
529 
530 	/* WMI events when queued */
531 	rx_event_log_buf->length = 0;
532 	rx_event_log_buf->buf_tail_idx = 0;
533 	rx_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
534 		wmi_event_log_max_entry * sizeof(struct wmi_event_debug));
535 	rx_event_log_buf->size = wmi_event_log_max_entry;
536 
537 	if (!rx_event_log_buf->buf)
538 		return QDF_STATUS_E_NOMEM;
539 
540 	rx_event_log_buf->p_buf_tail_idx = &rx_event_log_buf->buf_tail_idx;
541 
542 	/* WMI Management commands */
543 	mgmt_cmd_log_buf->length = 0;
544 	mgmt_cmd_log_buf->buf_tail_idx = 0;
545 	mgmt_cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc(
546 		wmi_mgmt_tx_log_max_entry * sizeof(struct wmi_command_debug));
547 	mgmt_cmd_log_buf->size = wmi_mgmt_tx_log_max_entry;
548 
549 	if (!mgmt_cmd_log_buf->buf)
550 		return QDF_STATUS_E_NOMEM;
551 
552 	mgmt_cmd_log_buf->p_buf_tail_idx = &mgmt_cmd_log_buf->buf_tail_idx;
553 
554 	/* WMI Management commands Tx completed*/
555 	mgmt_cmd_tx_cmp_log_buf->length = 0;
556 	mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0;
557 	mgmt_cmd_tx_cmp_log_buf->buf = (struct wmi_command_debug *)
558 		qdf_mem_malloc(
559 		wmi_mgmt_tx_cmpl_log_max_entry *
560 		sizeof(struct wmi_command_debug));
561 	mgmt_cmd_tx_cmp_log_buf->size = wmi_mgmt_tx_cmpl_log_max_entry;
562 
563 	if (!mgmt_cmd_tx_cmp_log_buf->buf)
564 		return QDF_STATUS_E_NOMEM;
565 
566 	mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx =
567 		&mgmt_cmd_tx_cmp_log_buf->buf_tail_idx;
568 
569 	/* WMI Management events when received */
570 	mgmt_event_log_buf->length = 0;
571 	mgmt_event_log_buf->buf_tail_idx = 0;
572 
573 	mgmt_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
574 		wmi_mgmt_rx_log_max_entry *
575 		sizeof(struct wmi_event_debug));
576 	mgmt_event_log_buf->size = wmi_mgmt_rx_log_max_entry;
577 
578 	if (!mgmt_event_log_buf->buf)
579 		return QDF_STATUS_E_NOMEM;
580 
581 	mgmt_event_log_buf->p_buf_tail_idx = &mgmt_event_log_buf->buf_tail_idx;
582 
583 	/* WMI diag events when received */
584 	diag_event_log_buf->length = 0;
585 	diag_event_log_buf->buf_tail_idx = 0;
586 
587 	diag_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
588 		wmi_diag_log_max_entry *
589 		sizeof(struct wmi_event_debug));
590 	diag_event_log_buf->size = wmi_diag_log_max_entry;
591 
592 	if (!diag_event_log_buf->buf)
593 		return QDF_STATUS_E_NOMEM;
594 
595 	diag_event_log_buf->p_buf_tail_idx = &diag_event_log_buf->buf_tail_idx;
596 
597 	qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
598 	wmi_handle->log_info.wmi_logging_enable = 1;
599 
600 	wmi_filtered_logging_init(wmi_handle);
601 
602 	return QDF_STATUS_SUCCESS;
603 }
604 #endif
605 
606 /**
607  * wmi_log_buffer_free() - Free all dynamic allocated buffer memory for
608  * event logging
609  * @wmi_handle: WMI handle.
610  *
611  * Return: None
612  */
613 #ifdef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
614 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
615 {
616 	wmi_filtered_logging_free(wmi_handle);
617 
618 	if (wmi_handle->log_info.wmi_command_log_buf_info.buf)
619 		qdf_mem_free(wmi_handle->log_info.wmi_command_log_buf_info.buf);
620 	if (wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf)
621 		qdf_mem_free(
622 		wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf);
623 	if (wmi_handle->log_info.wmi_event_log_buf_info.buf)
624 		qdf_mem_free(wmi_handle->log_info.wmi_event_log_buf_info.buf);
625 	if (wmi_handle->log_info.wmi_rx_event_log_buf_info.buf)
626 		qdf_mem_free(
627 			wmi_handle->log_info.wmi_rx_event_log_buf_info.buf);
628 	if (wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf)
629 		qdf_mem_free(
630 			wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf);
631 	if (wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf)
632 		qdf_mem_free(
633 		wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf);
634 	if (wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf)
635 		qdf_mem_free(
636 			wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf);
637 	if (wmi_handle->log_info.wmi_diag_event_log_buf_info.buf)
638 		qdf_mem_free(
639 			wmi_handle->log_info.wmi_diag_event_log_buf_info.buf);
640 	wmi_handle->log_info.wmi_logging_enable = 0;
641 
642 	qdf_spinlock_destroy(&wmi_handle->log_info.wmi_record_lock);
643 }
644 #else
645 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
646 {
647 	/* Do Nothing */
648 }
649 #endif
650 
651 /**
652  * wmi_print_cmd_log_buffer() - an output agnostic wmi command log printer
653  * @log_buffer: the command log buffer metadata of the buffer to print
654  * @count: the maximum number of entries to print
655  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
656  * @print_priv: any data required by the print method, e.g. a file handle
657  *
658  * Return: None
659  */
660 static void
661 wmi_print_cmd_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
662 			 qdf_abstract_print *print, void *print_priv)
663 {
664 	static const int data_len =
665 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
666 	char str[128];
667 	uint32_t idx;
668 
669 	if (count > log_buffer->size)
670 		count = log_buffer->size;
671 	if (count > log_buffer->length)
672 		count = log_buffer->length;
673 
674 	/* subtract count from index, and wrap if necessary */
675 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
676 	idx %= log_buffer->size;
677 
678 	print(print_priv, "Time (seconds)      Cmd Id              Payload");
679 	while (count) {
680 		struct wmi_command_debug *cmd_log = (struct wmi_command_debug *)
681 			&((struct wmi_command_debug *)log_buffer->buf)[idx];
682 		uint64_t secs, usecs;
683 		int len = 0;
684 		int i;
685 
686 		qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
687 		len += scnprintf(str + len, sizeof(str) - len,
688 				 "% 8lld.%06lld    %6u (0x%06x)    ",
689 				 secs, usecs,
690 				 cmd_log->command, cmd_log->command);
691 		for (i = 0; i < data_len; ++i) {
692 			len += scnprintf(str + len, sizeof(str) - len,
693 					 "0x%08x ", cmd_log->data[i]);
694 		}
695 
696 		print(print_priv, str);
697 
698 		--count;
699 		++idx;
700 		if (idx >= log_buffer->size)
701 			idx = 0;
702 	}
703 }
704 
705 /**
706  * wmi_print_cmd_cmp_log_buffer() - wmi command completion log printer
707  * @log_buffer: the command completion log buffer metadata of the buffer to print
708  * @count: the maximum number of entries to print
709  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
710  * @print_priv: any data required by the print method, e.g. a file handle
711  *
712  * Return: None
713  */
714 static void
715 wmi_print_cmd_cmp_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
716 			 qdf_abstract_print *print, void *print_priv)
717 {
718 	static const int data_len =
719 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
720 	char str[128];
721 	uint32_t idx;
722 
723 	if (count > log_buffer->size)
724 		count = log_buffer->size;
725 	if (count > log_buffer->length)
726 		count = log_buffer->length;
727 
728 	/* subtract count from index, and wrap if necessary */
729 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
730 	idx %= log_buffer->size;
731 
732 	print(print_priv, "Time (seconds)      Cmd Id              Payload");
733 	while (count) {
734 		struct wmi_command_cmp_debug *cmd_log = (struct wmi_command_cmp_debug *)
735 			&((struct wmi_command_cmp_debug *)log_buffer->buf)[idx];
736 		uint64_t secs, usecs;
737 		int len = 0;
738 		int i;
739 
740 		qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
741 		len += scnprintf(str + len, sizeof(str) - len,
742 				 "% 8lld.%06lld    %6u (0x%06x)    ",
743 				 secs, usecs,
744 				 cmd_log->command, cmd_log->command);
745 		for (i = 0; i < data_len; ++i) {
746 			len += scnprintf(str + len, sizeof(str) - len,
747 					 "0x%08x ", cmd_log->data[i]);
748 		}
749 
750 		print(print_priv, str);
751 
752 		--count;
753 		++idx;
754 		if (idx >= log_buffer->size)
755 			idx = 0;
756 	}
757 }
758 
759 /**
760  * wmi_print_event_log_buffer() - an output agnostic wmi event log printer
761  * @log_buffer: the event log buffer metadata of the buffer to print
762  * @count: the maximum number of entries to print
763  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
764  * @print_priv: any data required by the print method, e.g. a file handle
765  *
766  * Return: None
767  */
768 static void
769 wmi_print_event_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
770 			   qdf_abstract_print *print, void *print_priv)
771 {
772 	static const int data_len =
773 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
774 	char str[128];
775 	uint32_t idx;
776 
777 	if (count > log_buffer->size)
778 		count = log_buffer->size;
779 	if (count > log_buffer->length)
780 		count = log_buffer->length;
781 
782 	/* subtract count from index, and wrap if necessary */
783 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
784 	idx %= log_buffer->size;
785 
786 	print(print_priv, "Time (seconds)      Event Id             Payload");
787 	while (count) {
788 		struct wmi_event_debug *event_log = (struct wmi_event_debug *)
789 			&((struct wmi_event_debug *)log_buffer->buf)[idx];
790 		uint64_t secs, usecs;
791 		int len = 0;
792 		int i;
793 
794 		qdf_log_timestamp_to_secs(event_log->time, &secs, &usecs);
795 		len += scnprintf(str + len, sizeof(str) - len,
796 				 "% 8lld.%06lld    %6u (0x%06x)    ",
797 				 secs, usecs,
798 				 event_log->event, event_log->event);
799 		for (i = 0; i < data_len; ++i) {
800 			len += scnprintf(str + len, sizeof(str) - len,
801 					 "0x%08x ", event_log->data[i]);
802 		}
803 
804 		print(print_priv, str);
805 
806 		--count;
807 		++idx;
808 		if (idx >= log_buffer->size)
809 			idx = 0;
810 	}
811 }
812 
813 inline void
814 wmi_print_cmd_log(wmi_unified_t wmi, uint32_t count,
815 		  qdf_abstract_print *print, void *print_priv)
816 {
817 	wmi_print_cmd_log_buffer(
818 		&wmi->log_info.wmi_command_log_buf_info,
819 		count, print, print_priv);
820 }
821 
822 inline void
823 wmi_print_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count,
824 			 qdf_abstract_print *print, void *print_priv)
825 {
826 	wmi_print_cmd_cmp_log_buffer(
827 		&wmi->log_info.wmi_command_tx_cmp_log_buf_info,
828 		count, print, print_priv);
829 }
830 
831 inline void
832 wmi_print_mgmt_cmd_log(wmi_unified_t wmi, uint32_t count,
833 		       qdf_abstract_print *print, void *print_priv)
834 {
835 	wmi_print_cmd_log_buffer(
836 		&wmi->log_info.wmi_mgmt_command_log_buf_info,
837 		count, print, print_priv);
838 }
839 
840 inline void
841 wmi_print_mgmt_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count,
842 			      qdf_abstract_print *print, void *print_priv)
843 {
844 	wmi_print_cmd_log_buffer(
845 		&wmi->log_info.wmi_mgmt_command_tx_cmp_log_buf_info,
846 		count, print, print_priv);
847 }
848 
849 inline void
850 wmi_print_event_log(wmi_unified_t wmi, uint32_t count,
851 		    qdf_abstract_print *print, void *print_priv)
852 {
853 	wmi_print_event_log_buffer(
854 		&wmi->log_info.wmi_event_log_buf_info,
855 		count, print, print_priv);
856 }
857 
858 inline void
859 wmi_print_rx_event_log(wmi_unified_t wmi, uint32_t count,
860 		       qdf_abstract_print *print, void *print_priv)
861 {
862 	wmi_print_event_log_buffer(
863 		&wmi->log_info.wmi_rx_event_log_buf_info,
864 		count, print, print_priv);
865 }
866 
867 inline void
868 wmi_print_mgmt_event_log(wmi_unified_t wmi, uint32_t count,
869 			 qdf_abstract_print *print, void *print_priv)
870 {
871 	wmi_print_event_log_buffer(
872 		&wmi->log_info.wmi_mgmt_event_log_buf_info,
873 		count, print, print_priv);
874 }
875 
876 
877 /* debugfs routines*/
878 
879 /*
880  * debug_wmi_##func_base##_show() - debugfs functions to display content of
881  * command and event buffers. Macro uses max buffer length to display
882  * buffer when it is wraparound.
883  *
884  * @m: debugfs handler to access wmi_handle
885  * @v: Variable arguments (not used)
886  *
887  * Return: Length of characters printed
888  */
889 #define GENERATE_COMMAND_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size, wmi_record_type)\
890 	static int debug_wmi_##func_base##_show(struct seq_file *m,	\
891 						void *v)		\
892 	{								\
893 		wmi_unified_t wmi_handle = (wmi_unified_t) m->private;	\
894 		struct wmi_log_buf_t *wmi_log =				\
895 			&wmi_handle->log_info.wmi_##func_base##_buf_info;\
896 		int pos, nread, outlen;					\
897 		int i;							\
898 		uint64_t secs, usecs;					\
899 									\
900 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
901 		if (!wmi_log->length) {					\
902 			qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
903 			return wmi_bp_seq_printf(m,			\
904 			"no elements to read from ring buffer!\n");	\
905 		}							\
906 									\
907 		if (wmi_log->length <= wmi_ring_size)			\
908 			nread = wmi_log->length;			\
909 		else							\
910 			nread = wmi_ring_size;				\
911 									\
912 		if (*(wmi_log->p_buf_tail_idx) == 0)			\
913 			/* tail can be 0 after wrap-around */		\
914 			pos = wmi_ring_size - 1;			\
915 		else							\
916 			pos = *(wmi_log->p_buf_tail_idx) - 1;		\
917 									\
918 		outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\
919 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
920 		while (nread--) {					\
921 			struct wmi_record_type *wmi_record;		\
922 									\
923 			wmi_record = (struct wmi_record_type *)	\
924 			&(((struct wmi_record_type *)wmi_log->buf)[pos]);\
925 			outlen += wmi_bp_seq_printf(m, "CMD ID = %x\n",	\
926 				(wmi_record->command));			\
927 			qdf_log_timestamp_to_secs(wmi_record->time, &secs,\
928 				&usecs);				\
929 			outlen +=					\
930 			wmi_bp_seq_printf(m, "CMD TIME = [%llu.%06llu]\n",\
931 				secs, usecs);				\
932 			outlen += wmi_bp_seq_printf(m, "CMD = ");	\
933 			for (i = 0; i < (wmi_record_max_length/		\
934 					sizeof(uint32_t)); i++)		\
935 				outlen += wmi_bp_seq_printf(m, "%x ",	\
936 					wmi_record->data[i]);		\
937 			outlen += wmi_bp_seq_printf(m, "\n");		\
938 									\
939 			if (pos == 0)					\
940 				pos = wmi_ring_size - 1;		\
941 			else						\
942 				pos--;					\
943 		}							\
944 		return outlen;						\
945 	}								\
946 
947 #define GENERATE_EVENT_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size)	\
948 	static int debug_wmi_##func_base##_show(struct seq_file *m,	\
949 						void *v)		\
950 	{								\
951 		wmi_unified_t wmi_handle = (wmi_unified_t) m->private;	\
952 		struct wmi_log_buf_t *wmi_log =				\
953 			&wmi_handle->log_info.wmi_##func_base##_buf_info;\
954 		int pos, nread, outlen;					\
955 		int i;							\
956 		uint64_t secs, usecs;					\
957 									\
958 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
959 		if (!wmi_log->length) {					\
960 			qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
961 			return wmi_bp_seq_printf(m,			\
962 			"no elements to read from ring buffer!\n");	\
963 		}							\
964 									\
965 		if (wmi_log->length <= wmi_ring_size)			\
966 			nread = wmi_log->length;			\
967 		else							\
968 			nread = wmi_ring_size;				\
969 									\
970 		if (*(wmi_log->p_buf_tail_idx) == 0)			\
971 			/* tail can be 0 after wrap-around */		\
972 			pos = wmi_ring_size - 1;			\
973 		else							\
974 			pos = *(wmi_log->p_buf_tail_idx) - 1;		\
975 									\
976 		outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\
977 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
978 		while (nread--) {					\
979 			struct wmi_event_debug *wmi_record;		\
980 									\
981 			wmi_record = (struct wmi_event_debug *)		\
982 			&(((struct wmi_event_debug *)wmi_log->buf)[pos]);\
983 			qdf_log_timestamp_to_secs(wmi_record->time, &secs,\
984 				&usecs);				\
985 			outlen += wmi_bp_seq_printf(m, "Event ID = %x\n",\
986 				(wmi_record->event));			\
987 			outlen +=					\
988 			wmi_bp_seq_printf(m, "Event TIME = [%llu.%06llu]\n",\
989 				secs, usecs);				\
990 			outlen += wmi_bp_seq_printf(m, "CMD = ");	\
991 			for (i = 0; i < (wmi_record_max_length/		\
992 					sizeof(uint32_t)); i++)		\
993 				outlen += wmi_bp_seq_printf(m, "%x ",	\
994 					wmi_record->data[i]);		\
995 			outlen += wmi_bp_seq_printf(m, "\n");		\
996 									\
997 			if (pos == 0)					\
998 				pos = wmi_ring_size - 1;		\
999 			else						\
1000 				pos--;					\
1001 		}							\
1002 		return outlen;						\
1003 	}
1004 
1005 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_log, wmi_display_size,
1006 				  wmi_command_debug);
1007 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_tx_cmp_log, wmi_display_size,
1008 				  wmi_command_cmp_debug);
1009 GENERATE_EVENT_DEBUG_SHOW_FUNCS(event_log, wmi_display_size);
1010 GENERATE_EVENT_DEBUG_SHOW_FUNCS(rx_event_log, wmi_display_size);
1011 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_log, wmi_display_size,
1012 				  wmi_command_debug);
1013 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_tx_cmp_log,
1014 					wmi_display_size,
1015 					wmi_command_debug);
1016 GENERATE_EVENT_DEBUG_SHOW_FUNCS(mgmt_event_log, wmi_display_size);
1017 
1018 /**
1019  * debug_wmi_enable_show() - debugfs functions to display enable state of
1020  * wmi logging feature.
1021  *
1022  * @m: debugfs handler to access wmi_handle
1023  * @v: Variable arguments (not used)
1024  *
1025  * Return: always 1
1026  */
1027 static int debug_wmi_enable_show(struct seq_file *m, void *v)
1028 {
1029 	wmi_unified_t wmi_handle = (wmi_unified_t) m->private;
1030 
1031 	return wmi_bp_seq_printf(m, "%d\n",
1032 			wmi_handle->log_info.wmi_logging_enable);
1033 }
1034 
1035 /**
1036  * debug_wmi_log_size_show() - debugfs functions to display configured size of
1037  * wmi logging command/event buffer and management command/event buffer.
1038  *
1039  * @m: debugfs handler to access wmi_handle
1040  * @v: Variable arguments (not used)
1041  *
1042  * Return: Length of characters printed
1043  */
1044 static int debug_wmi_log_size_show(struct seq_file *m, void *v)
1045 {
1046 
1047 	wmi_bp_seq_printf(m, "WMI command/cmpl log max size:%d/%d\n",
1048 			  wmi_cmd_log_max_entry, wmi_cmd_cmpl_log_max_entry);
1049 	wmi_bp_seq_printf(m, "WMI management Tx/cmpl log max size:%d/%d\n",
1050 			  wmi_mgmt_tx_log_max_entry,
1051 			  wmi_mgmt_tx_cmpl_log_max_entry);
1052 	wmi_bp_seq_printf(m, "WMI event log max size:%d\n",
1053 			  wmi_event_log_max_entry);
1054 	wmi_bp_seq_printf(m, "WMI management Rx log max size:%d\n",
1055 			  wmi_mgmt_rx_log_max_entry);
1056 	return wmi_bp_seq_printf(m,
1057 				 "WMI diag log max size:%d\n",
1058 				 wmi_diag_log_max_entry);
1059 }
1060 
1061 /*
1062  * debug_wmi_##func_base##_write() - debugfs functions to clear
1063  * wmi logging command/event buffer and management command/event buffer.
1064  *
1065  * @file: file handler to access wmi_handle
1066  * @buf: received data buffer
1067  * @count: length of received buffer
1068  * @ppos: Not used
1069  *
1070  * Return: count
1071  */
1072 #define GENERATE_DEBUG_WRITE_FUNCS(func_base, wmi_ring_size, wmi_record_type)\
1073 	static ssize_t debug_wmi_##func_base##_write(struct file *file,	\
1074 				const char __user *buf,			\
1075 				size_t count, loff_t *ppos)		\
1076 	{								\
1077 		int k, ret;						\
1078 		wmi_unified_t wmi_handle =				\
1079 			((struct seq_file *)file->private_data)->private;\
1080 		struct wmi_log_buf_t *wmi_log = &wmi_handle->log_info.	\
1081 				wmi_##func_base##_buf_info;		\
1082 		char locbuf[50];					\
1083 									\
1084 		if ((!buf) || (count > 50))				\
1085 			return -EFAULT;					\
1086 									\
1087 		if (copy_from_user(locbuf, buf, count))			\
1088 			return -EFAULT;					\
1089 									\
1090 		ret = sscanf(locbuf, "%d", &k);				\
1091 		if ((ret != 1) || (k != 0)) {                           \
1092 			wmi_err("Wrong input, echo 0 to clear the wmi buffer");\
1093 			return -EINVAL;					\
1094 		}							\
1095 									\
1096 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
1097 		qdf_mem_zero(wmi_log->buf, wmi_ring_size *		\
1098 				sizeof(struct wmi_record_type));	\
1099 		wmi_log->length = 0;					\
1100 		*(wmi_log->p_buf_tail_idx) = 0;				\
1101 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
1102 									\
1103 		return count;						\
1104 	}
1105 
1106 GENERATE_DEBUG_WRITE_FUNCS(command_log, wmi_cmd_log_max_entry,
1107 			   wmi_command_debug);
1108 GENERATE_DEBUG_WRITE_FUNCS(command_tx_cmp_log, wmi_cmd_cmpl_log_max_entry,
1109 			   wmi_command_cmp_debug);
1110 GENERATE_DEBUG_WRITE_FUNCS(event_log, wmi_event_log_max_entry,
1111 			   wmi_event_debug);
1112 GENERATE_DEBUG_WRITE_FUNCS(rx_event_log, wmi_event_log_max_entry,
1113 			   wmi_event_debug);
1114 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_log, wmi_mgmt_tx_log_max_entry,
1115 			   wmi_command_debug);
1116 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_tx_cmp_log,
1117 			   wmi_mgmt_tx_cmpl_log_max_entry, wmi_command_debug);
1118 GENERATE_DEBUG_WRITE_FUNCS(mgmt_event_log, wmi_mgmt_rx_log_max_entry,
1119 			   wmi_event_debug);
1120 
1121 /**
1122  * debug_wmi_enable_write() - debugfs functions to enable/disable
1123  * wmi logging feature.
1124  *
1125  * @file: file handler to access wmi_handle
1126  * @buf: received data buffer
1127  * @count: length of received buffer
1128  * @ppos: Not used
1129  *
1130  * Return: count
1131  */
1132 static ssize_t debug_wmi_enable_write(struct file *file, const char __user *buf,
1133 					size_t count, loff_t *ppos)
1134 {
1135 	wmi_unified_t wmi_handle =
1136 		((struct seq_file *)file->private_data)->private;
1137 	int k, ret;
1138 	char locbuf[50];
1139 
1140 	if ((!buf) || (count > 50))
1141 		return -EFAULT;
1142 
1143 	if (copy_from_user(locbuf, buf, count))
1144 		return -EFAULT;
1145 
1146 	ret = sscanf(locbuf, "%d", &k);
1147 	if ((ret != 1) || ((k != 0) && (k != 1)))
1148 		return -EINVAL;
1149 
1150 	wmi_handle->log_info.wmi_logging_enable = k;
1151 	return count;
1152 }
1153 
1154 /**
1155  * debug_wmi_log_size_write() - reserved.
1156  *
1157  * @file: file handler to access wmi_handle
1158  * @buf: received data buffer
1159  * @count: length of received buffer
1160  * @ppos: Not used
1161  *
1162  * Return: count
1163  */
1164 static ssize_t debug_wmi_log_size_write(struct file *file,
1165 		const char __user *buf, size_t count, loff_t *ppos)
1166 {
1167 	return -EINVAL;
1168 }
1169 
1170 /* Structure to maintain debug information */
1171 struct wmi_debugfs_info {
1172 	const char *name;
1173 	const struct file_operations *ops;
1174 };
1175 
1176 #define DEBUG_FOO(func_base) { .name = #func_base,			\
1177 	.ops = &debug_##func_base##_ops }
1178 
1179 /*
1180  * debug_##func_base##_open() - Open debugfs entry for respective command
1181  * and event buffer.
1182  *
1183  * @inode: node for debug dir entry
1184  * @file: file handler
1185  *
1186  * Return: open status
1187  */
1188 #define GENERATE_DEBUG_STRUCTS(func_base)				\
1189 	static int debug_##func_base##_open(struct inode *inode,	\
1190 						struct file *file)	\
1191 	{								\
1192 		return single_open(file, debug_##func_base##_show,	\
1193 				inode->i_private);			\
1194 	}								\
1195 									\
1196 									\
1197 	static struct file_operations debug_##func_base##_ops = {	\
1198 		.open		= debug_##func_base##_open,		\
1199 		.read		= seq_read,				\
1200 		.llseek		= seq_lseek,				\
1201 		.write		= debug_##func_base##_write,		\
1202 		.release	= single_release,			\
1203 	};
1204 
1205 GENERATE_DEBUG_STRUCTS(wmi_command_log);
1206 GENERATE_DEBUG_STRUCTS(wmi_command_tx_cmp_log);
1207 GENERATE_DEBUG_STRUCTS(wmi_event_log);
1208 GENERATE_DEBUG_STRUCTS(wmi_rx_event_log);
1209 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_log);
1210 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_tx_cmp_log);
1211 GENERATE_DEBUG_STRUCTS(wmi_mgmt_event_log);
1212 GENERATE_DEBUG_STRUCTS(wmi_enable);
1213 GENERATE_DEBUG_STRUCTS(wmi_log_size);
1214 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
1215 GENERATE_DEBUG_STRUCTS(filtered_wmi_cmds);
1216 GENERATE_DEBUG_STRUCTS(filtered_wmi_evts);
1217 GENERATE_DEBUG_STRUCTS(wmi_filtered_command_log);
1218 GENERATE_DEBUG_STRUCTS(wmi_filtered_event_log);
1219 #endif
1220 
1221 struct wmi_debugfs_info wmi_debugfs_infos[NUM_DEBUG_INFOS] = {
1222 	DEBUG_FOO(wmi_command_log),
1223 	DEBUG_FOO(wmi_command_tx_cmp_log),
1224 	DEBUG_FOO(wmi_event_log),
1225 	DEBUG_FOO(wmi_rx_event_log),
1226 	DEBUG_FOO(wmi_mgmt_command_log),
1227 	DEBUG_FOO(wmi_mgmt_command_tx_cmp_log),
1228 	DEBUG_FOO(wmi_mgmt_event_log),
1229 	DEBUG_FOO(wmi_enable),
1230 	DEBUG_FOO(wmi_log_size),
1231 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
1232 	DEBUG_FOO(filtered_wmi_cmds),
1233 	DEBUG_FOO(filtered_wmi_evts),
1234 	DEBUG_FOO(wmi_filtered_command_log),
1235 	DEBUG_FOO(wmi_filtered_event_log),
1236 #endif
1237 };
1238 
1239 /**
1240  * wmi_debugfs_create() - Create debug_fs entry for wmi logging.
1241  *
1242  * @wmi_handle: wmi handle
1243  * @par_entry: debug directory entry
1244  *
1245  * Return: none
1246  */
1247 static void wmi_debugfs_create(wmi_unified_t wmi_handle,
1248 			       struct dentry *par_entry)
1249 {
1250 	int i;
1251 
1252 	if (!par_entry)
1253 		goto out;
1254 
1255 	for (i = 0; i < NUM_DEBUG_INFOS; ++i) {
1256 		wmi_handle->debugfs_de[i] = qdf_debugfs_create_entry(
1257 						wmi_debugfs_infos[i].name,
1258 						WMI_INFOS_DBG_FILE_PERM,
1259 						par_entry,
1260 						wmi_handle,
1261 						wmi_debugfs_infos[i].ops);
1262 
1263 		if (!wmi_handle->debugfs_de[i]) {
1264 			wmi_err("debug Entry creation failed!");
1265 			goto out;
1266 		}
1267 	}
1268 
1269 	return;
1270 
1271 out:
1272 	wmi_err("debug Entry creation failed!");
1273 	wmi_log_buffer_free(wmi_handle);
1274 	return;
1275 }
1276 
1277 /**
1278  * wmi_debugfs_remove() - Remove debugfs entry for wmi logging.
1279  * @wmi_handle: wmi handle
1280  *
1281  * Return: none
1282  */
1283 static void wmi_debugfs_remove(wmi_unified_t wmi_handle)
1284 {
1285 	int i;
1286 	struct dentry *dentry = wmi_handle->log_info.wmi_log_debugfs_dir;
1287 
1288 	if (dentry) {
1289 		for (i = 0; i < NUM_DEBUG_INFOS; ++i) {
1290 			if (wmi_handle->debugfs_de[i])
1291 				wmi_handle->debugfs_de[i] = NULL;
1292 		}
1293 	}
1294 
1295 	if (dentry)
1296 		qdf_debugfs_remove_dir_recursive(dentry);
1297 }
1298 
1299 /**
1300  * wmi_debugfs_init() - debugfs functions to create debugfs directory and to
1301  *                      create debugfs entries.
1302  * @wmi_handle: wmi handler
1303  * @pdev_idx: pdev id
1304  *
1305  * Return: init status
1306  */
1307 static QDF_STATUS wmi_debugfs_init(wmi_unified_t wmi_handle, uint32_t pdev_idx)
1308 {
1309 	char buf[32];
1310 
1311 	snprintf(buf, sizeof(buf), "WMI_SOC%u_PDEV%u",
1312 		 wmi_handle->soc->soc_idx, pdev_idx);
1313 
1314 	wmi_handle->log_info.wmi_log_debugfs_dir =
1315 		qdf_debugfs_create_dir(buf, NULL);
1316 
1317 	if (!wmi_handle->log_info.wmi_log_debugfs_dir) {
1318 		wmi_err("error while creating debugfs dir for %s", buf);
1319 		return QDF_STATUS_E_FAILURE;
1320 	}
1321 	wmi_debugfs_create(wmi_handle,
1322 			   wmi_handle->log_info.wmi_log_debugfs_dir);
1323 
1324 	return QDF_STATUS_SUCCESS;
1325 }
1326 
1327 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
1328 			void *header, uint32_t vdev_id, uint32_t chanfreq)
1329 {
1330 
1331 	uint32_t data[CUSTOM_MGMT_CMD_DATA_SIZE];
1332 
1333 	data[0] = ((struct wmi_command_header *)header)->type;
1334 	data[1] = ((struct wmi_command_header *)header)->sub_type;
1335 	data[2] = vdev_id;
1336 	data[3] = chanfreq;
1337 
1338 	qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
1339 
1340 	WMI_MGMT_COMMAND_RECORD(wmi_handle, cmd, (uint8_t *)data);
1341 	wmi_specific_cmd_record(wmi_handle, cmd, (uint8_t *)data);
1342 	qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
1343 }
1344 #else
1345 static void wmi_debugfs_remove(wmi_unified_t wmi_handle) { }
1346 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
1347 			void *header, uint32_t vdev_id, uint32_t chanfreq) { }
1348 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) { }
1349 static void wmi_minidump_detach(struct wmi_unified *wmi_handle) { }
1350 static void wmi_minidump_attach(struct wmi_unified *wmi_handle) { }
1351 #endif /*WMI_INTERFACE_EVENT_LOGGING */
1352 qdf_export_symbol(wmi_mgmt_cmd_record);
1353 
1354 #ifdef WMI_EXT_DBG
1355 
1356 /**
1357  * wmi_ext_dbg_msg_enqueue() - enqueue wmi message
1358  * @wmi_handle: wmi handler
1359  * @msg: WMI message
1360  *
1361  * Return: size of wmi message queue after enqueue
1362  */
1363 static uint32_t wmi_ext_dbg_msg_enqueue(struct wmi_unified *wmi_handle,
1364 					struct wmi_ext_dbg_msg *msg)
1365 {
1366 	uint32_t list_size;
1367 
1368 	qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1369 	qdf_list_insert_back_size(&wmi_handle->wmi_ext_dbg_msg_queue,
1370 				  &msg->node, &list_size);
1371 	qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1372 
1373 	return list_size;
1374 }
1375 
1376 /**
1377  * wmi_ext_dbg_msg_dequeue() - dequeue wmi message
1378  * @wmi_handle: wmi handler
1379  *
1380  * Return: wmi msg on success else NULL
1381  */
1382 static struct wmi_ext_dbg_msg *wmi_ext_dbg_msg_dequeue(struct wmi_unified
1383 						       *wmi_handle)
1384 {
1385 	qdf_list_node_t *list_node = NULL;
1386 
1387 	qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1388 	qdf_list_remove_front(&wmi_handle->wmi_ext_dbg_msg_queue, &list_node);
1389 	qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1390 
1391 	if (!list_node)
1392 		return NULL;
1393 
1394 	return qdf_container_of(list_node, struct wmi_ext_dbg_msg, node);
1395 }
1396 
1397 /**
1398  * wmi_ext_dbg_msg_record() - record wmi messages
1399  * @wmi_handle: wmi handler
1400  * @buf: wmi message buffer
1401  * @len: wmi message length
1402  * @type: wmi message type
1403  *
1404  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1405  */
1406 static QDF_STATUS wmi_ext_dbg_msg_record(struct wmi_unified *wmi_handle,
1407 					 uint8_t *buf, uint32_t len,
1408 					 enum WMI_MSG_TYPE type)
1409 {
1410 	struct wmi_ext_dbg_msg *msg;
1411 	uint32_t list_size;
1412 
1413 	msg = wmi_ext_dbg_msg_get(len);
1414 	if (!msg)
1415 		return QDF_STATUS_E_NOMEM;
1416 
1417 	msg->len = len;
1418 	msg->type = type;
1419 	qdf_mem_copy(msg->buf, buf, len);
1420 	msg->ts = qdf_get_log_timestamp();
1421 	list_size = wmi_ext_dbg_msg_enqueue(wmi_handle, msg);
1422 
1423 	if (list_size >= wmi_handle->wmi_ext_dbg_msg_queue_size) {
1424 		msg = wmi_ext_dbg_msg_dequeue(wmi_handle);
1425 		wmi_ext_dbg_msg_put(msg);
1426 	}
1427 
1428 	return QDF_STATUS_SUCCESS;
1429 }
1430 
1431 /**
1432  * wmi_ext_dbg_msg_cmd_record() - record wmi command messages
1433  * @wmi_handle: wmi handler
1434  * @buf: wmi command buffer
1435  * @len: wmi command message length
1436  *
1437  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1438  */
1439 static QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified *wmi_handle,
1440 					     uint8_t *buf, uint32_t len)
1441 {
1442 	return wmi_ext_dbg_msg_record(wmi_handle, buf, len,
1443 				      WMI_MSG_TYPE_CMD);
1444 }
1445 
1446 /**
1447  * wmi_ext_dbg_msg_event_record() - record wmi event messages
1448  * @wmi_handle: wmi handler
1449  * @buf: wmi event buffer
1450  * @len: wmi event message length
1451  *
1452  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1453  */
1454 static QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified *wmi_handle,
1455 					       uint8_t *buf, uint32_t len)
1456 {
1457 	uint32_t id;
1458 
1459 	id = WMI_GET_FIELD(buf, WMI_CMD_HDR, COMMANDID);
1460 	if (id != wmi_handle->wmi_events[wmi_diag_event_id])
1461 		return wmi_ext_dbg_msg_record(wmi_handle, buf, len,
1462 					      WMI_MSG_TYPE_EVENT);
1463 
1464 	return QDF_STATUS_SUCCESS;
1465 }
1466 
1467 /**
1468  * wmi_ext_dbg_msg_queue_init() - create debugfs queue and associated lock
1469  * @wmi_handle: wmi handler
1470  *
1471  * Return: none
1472  */
1473 static void wmi_ext_dbg_msg_queue_init(struct wmi_unified *wmi_handle)
1474 {
1475 	qdf_list_create(&wmi_handle->wmi_ext_dbg_msg_queue,
1476 			wmi_handle->wmi_ext_dbg_msg_queue_size);
1477 	qdf_spinlock_create(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1478 }
1479 
1480 /**
1481  * wmi_ext_dbg_msg_queue_deinit() - destroy debugfs queue and associated lock
1482  * @wmi_handle: wmi handler
1483  *
1484  * Return: none
1485  */
1486 static void wmi_ext_dbg_msg_queue_deinit(struct wmi_unified *wmi_handle)
1487 {
1488 	qdf_list_destroy(&wmi_handle->wmi_ext_dbg_msg_queue);
1489 	qdf_spinlock_destroy(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1490 }
1491 
1492 /**
1493  * wmi_ext_dbg_msg_show() - debugfs function to display whole content of
1494  * wmi command/event messages including headers.
1495  * @file: qdf debugfs file handler
1496  * @arg: pointer to wmi handler
1497  *
1498  * Return: QDF_STATUS_SUCCESS if all the messages are shown successfully,
1499  * else QDF_STATUS_E_AGAIN if more data to show.
1500  */
1501 static QDF_STATUS wmi_ext_dbg_msg_show(qdf_debugfs_file_t file, void *arg)
1502 {
1503 	struct wmi_unified *wmi_handle = (struct wmi_unified *)arg;
1504 	struct wmi_ext_dbg_msg *msg;
1505 	uint64_t secs, usecs;
1506 
1507 	msg = wmi_ext_dbg_msg_dequeue(wmi_handle);
1508 	if (!msg)
1509 		return QDF_STATUS_SUCCESS;
1510 
1511 	qdf_debugfs_printf(file, "%s: 0x%x\n",
1512 			   msg->type == WMI_MSG_TYPE_CMD ? "COMMAND" :
1513 			   "EVENT", WMI_GET_FIELD(msg->buf, WMI_CMD_HDR,
1514 						  COMMANDID));
1515 	qdf_log_timestamp_to_secs(msg->ts, &secs, &usecs);
1516 	qdf_debugfs_printf(file, "Time: %llu.%llu\n", secs, usecs);
1517 	qdf_debugfs_printf(file, "Length:%d\n", msg->len);
1518 	qdf_debugfs_hexdump(file, msg->buf, msg->len,
1519 			    WMI_EXT_DBG_DUMP_ROW_SIZE,
1520 			    WMI_EXT_DBG_DUMP_GROUP_SIZE);
1521 	qdf_debugfs_printf(file, "\n");
1522 
1523 	if (qdf_debugfs_overflow(file)) {
1524 		qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1525 		qdf_list_insert_front(&wmi_handle->wmi_ext_dbg_msg_queue,
1526 				      &msg->node);
1527 		qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1528 
1529 	} else {
1530 		wmi_ext_dbg_msg_put(msg);
1531 	}
1532 
1533 	return QDF_STATUS_E_AGAIN;
1534 }
1535 
1536 /**
1537  * wmi_ext_dbg_msg_write() - debugfs write not supported
1538  * @priv: private data
1539  * @buf: received data buffer
1540  * @len: length of received buffer
1541  *
1542  * Return: QDF_STATUS_E_NOSUPPORT.
1543  */
1544 static QDF_STATUS wmi_ext_dbg_msg_write(void *priv, const char *buf,
1545 					qdf_size_t len)
1546 {
1547 	return QDF_STATUS_E_NOSUPPORT;
1548 }
1549 
1550 static struct qdf_debugfs_fops wmi_ext_dbgfs_ops[WMI_MAX_RADIOS];
1551 
1552 /**
1553  * wmi_ext_dbgfs_init() - init debugfs items for extended wmi dump.
1554  * @wmi_handle: wmi handler
1555  * @pdev_idx: pdev index
1556  *
1557  * Return: QDF_STATUS_SUCCESS if debugfs is initialized else
1558  * QDF_STATUS_E_FAILURE
1559  */
1560 static QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle,
1561 				     uint32_t pdev_idx)
1562 {
1563 	qdf_dentry_t dentry;
1564 	char buf[32];
1565 
1566 	/* To maintain backward compatibility, naming convention for PDEV 0
1567 	 * dentry is kept same as before. For more than 1 PDEV, dentry
1568 	 * names will be appended with PDEVx.
1569 	*/
1570 	if (wmi_handle->soc->soc_idx == 0 && pdev_idx == 0) {
1571 		dentry  = qdf_debugfs_create_dir(WMI_EXT_DBG_DIR, NULL);
1572 	} else {
1573 		snprintf(buf, sizeof(buf), "WMI_EXT_DBG_SOC%u_PDEV%u",
1574 			 wmi_handle->soc->soc_idx, pdev_idx);
1575 		dentry  = qdf_debugfs_create_dir(buf, NULL);
1576 	}
1577 
1578 	if (!dentry) {
1579 		wmi_err("error while creating extended wmi debugfs dir");
1580 		return QDF_STATUS_E_FAILURE;
1581 	}
1582 
1583 	wmi_ext_dbgfs_ops[pdev_idx].show = wmi_ext_dbg_msg_show;
1584 	wmi_ext_dbgfs_ops[pdev_idx].write = wmi_ext_dbg_msg_write;
1585 	wmi_ext_dbgfs_ops[pdev_idx].priv = wmi_handle;
1586 	if (!qdf_debugfs_create_file(WMI_EXT_DBG_FILE, WMI_EXT_DBG_FILE_PERM,
1587 				     dentry, &wmi_ext_dbgfs_ops[pdev_idx])) {
1588 		qdf_debugfs_remove_dir(dentry);
1589 		wmi_err("Error while creating extended wmi debugfs file");
1590 		return QDF_STATUS_E_FAILURE;
1591 	}
1592 
1593 	wmi_handle->wmi_ext_dbg_dentry = dentry;
1594 	wmi_handle->wmi_ext_dbg_msg_queue_size = WMI_EXT_DBG_QUEUE_SIZE;
1595 	wmi_ext_dbg_msg_queue_init(wmi_handle);
1596 
1597 	return QDF_STATUS_SUCCESS;
1598 }
1599 
1600 /**
1601  * wmi_ext_dbgfs_deinit() - cleanup/deinit debugfs items of extended wmi dump.
1602  * @wmi_handle: wmi handler
1603  *
1604  * Return: QDF_STATUS_SUCCESS if cleanup is successful
1605  */
1606 static QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle)
1607 {
1608 	struct wmi_ext_dbg_msg *msg;
1609 
1610 	while ((msg = wmi_ext_dbg_msg_dequeue(wmi_handle)))
1611 		wmi_ext_dbg_msg_put(msg);
1612 
1613 	wmi_ext_dbg_msg_queue_deinit(wmi_handle);
1614 	qdf_debugfs_remove_dir_recursive(wmi_handle->wmi_ext_dbg_dentry);
1615 
1616 	return QDF_STATUS_SUCCESS;
1617 }
1618 
1619 #else
1620 
1621 static inline QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified
1622 						    *wmi_handle,
1623 						    uint8_t *buf, uint32_t len)
1624 {
1625 		return QDF_STATUS_SUCCESS;
1626 }
1627 
1628 static inline QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified
1629 						      *wmi_handle,
1630 						      uint8_t *buf, uint32_t len)
1631 {
1632 		return QDF_STATUS_SUCCESS;
1633 }
1634 
1635 static inline QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle,
1636 					    uint32_t pdev_idx)
1637 {
1638 		return QDF_STATUS_SUCCESS;
1639 }
1640 
1641 static inline QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle)
1642 {
1643 		return QDF_STATUS_SUCCESS;
1644 }
1645 
1646 #endif /*WMI_EXT_DBG */
1647 
1648 int wmi_get_host_credits(wmi_unified_t wmi_handle);
1649 /* WMI buffer APIs */
1650 
1651 #ifdef NBUF_MEMORY_DEBUG
1652 wmi_buf_t
1653 wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint32_t len,
1654 		    const char *func_name,
1655 		    uint32_t line_num)
1656 {
1657 	wmi_buf_t wmi_buf;
1658 
1659 	if (roundup(len, 4) > wmi_handle->max_msg_len) {
1660 		wmi_err("Invalid length %u (via %s:%u) max size: %u",
1661 			len, func_name, line_num,
1662 			wmi_handle->max_msg_len);
1663 		QDF_ASSERT(0);
1664 		return NULL;
1665 	}
1666 
1667 	wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, func_name,
1668 				 line_num);
1669 	if (!wmi_buf)
1670 		wmi_buf = qdf_nbuf_alloc_debug(NULL,
1671 					       roundup(len + WMI_MIN_HEAD_ROOM,
1672 						       4),
1673 					       WMI_MIN_HEAD_ROOM, 4, false,
1674 					       func_name, line_num);
1675 	if (!wmi_buf)
1676 		return NULL;
1677 
1678 	/* Clear the wmi buffer */
1679 	OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
1680 
1681 	/*
1682 	 * Set the length of the buffer to match the allocation size.
1683 	 */
1684 	qdf_nbuf_set_pktlen(wmi_buf, len);
1685 
1686 	return wmi_buf;
1687 }
1688 qdf_export_symbol(wmi_buf_alloc_debug);
1689 
1690 void wmi_buf_free(wmi_buf_t net_buf)
1691 {
1692 	net_buf = wbuff_buff_put(net_buf);
1693 	if (net_buf)
1694 		qdf_nbuf_free(net_buf);
1695 }
1696 qdf_export_symbol(wmi_buf_free);
1697 #else
1698 wmi_buf_t wmi_buf_alloc_fl(wmi_unified_t wmi_handle, uint32_t len,
1699 			   const char *func, uint32_t line)
1700 {
1701 	wmi_buf_t wmi_buf;
1702 
1703 	if (roundup(len, 4) > wmi_handle->max_msg_len) {
1704 		QDF_DEBUG_PANIC("Invalid length %u (via %s:%u) max size: %u",
1705 				len, func, line, wmi_handle->max_msg_len);
1706 		return NULL;
1707 	}
1708 
1709 	wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, __func__,
1710 				 __LINE__);
1711 	if (!wmi_buf)
1712 		wmi_buf = qdf_nbuf_alloc_fl(NULL, roundup(len +
1713 				WMI_MIN_HEAD_ROOM, 4), WMI_MIN_HEAD_ROOM, 4,
1714 				false, func, line);
1715 
1716 	if (!wmi_buf) {
1717 		wmi_nofl_err("%s:%d, failed to alloc len:%d", func, line, len);
1718 		return NULL;
1719 	}
1720 
1721 	/* Clear the wmi buffer */
1722 	OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
1723 
1724 	/*
1725 	 * Set the length of the buffer to match the allocation size.
1726 	 */
1727 	qdf_nbuf_set_pktlen(wmi_buf, len);
1728 
1729 	return wmi_buf;
1730 }
1731 qdf_export_symbol(wmi_buf_alloc_fl);
1732 
1733 void wmi_buf_free(wmi_buf_t net_buf)
1734 {
1735 	net_buf = wbuff_buff_put(net_buf);
1736 	if (net_buf)
1737 		qdf_nbuf_free(net_buf);
1738 }
1739 qdf_export_symbol(wmi_buf_free);
1740 #endif
1741 
1742 uint16_t wmi_get_max_msg_len(wmi_unified_t wmi_handle)
1743 {
1744 	return wmi_handle->max_msg_len - WMI_MIN_HEAD_ROOM;
1745 }
1746 qdf_export_symbol(wmi_get_max_msg_len);
1747 
1748 #ifndef WMI_CMD_STRINGS
1749 static uint8_t *wmi_id_to_name(uint32_t wmi_command)
1750 {
1751 	return "Invalid WMI cmd";
1752 }
1753 #endif
1754 
1755 static inline void wmi_log_cmd_id(uint32_t cmd_id, uint32_t tag)
1756 {
1757 	wmi_nofl_debug("Send cmd %s(0x%x) tag:%d",
1758 		       wmi_id_to_name(cmd_id), cmd_id, tag);
1759 }
1760 
1761 /**
1762  * wmi_is_pm_resume_cmd() - check if a cmd is part of the resume sequence
1763  * @cmd_id: command to check
1764  *
1765  * Return: true if the command is part of the resume sequence.
1766  */
1767 #ifdef WLAN_POWER_MANAGEMENT_OFFLOAD
1768 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id)
1769 {
1770 	switch (cmd_id) {
1771 	case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
1772 	case WMI_PDEV_RESUME_CMDID:
1773 		return true;
1774 
1775 	default:
1776 		return false;
1777 	}
1778 }
1779 
1780 #else
1781 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id)
1782 {
1783 	return false;
1784 }
1785 
1786 #endif
1787 
1788 #ifdef FEATURE_WLAN_D0WOW
1789 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id)
1790 {
1791 	wmi_d0_wow_enable_disable_cmd_fixed_param *cmd;
1792 
1793 	if (cmd_id == WMI_D0_WOW_ENABLE_DISABLE_CMDID) {
1794 		cmd = (wmi_d0_wow_enable_disable_cmd_fixed_param *)
1795 			wmi_buf_data(buf);
1796 		if (!cmd->enable)
1797 			return true;
1798 		else
1799 			return false;
1800 	}
1801 
1802 	return false;
1803 }
1804 #else
1805 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id)
1806 {
1807 	return false;
1808 }
1809 
1810 #endif
1811 
1812 #ifdef WMI_INTERFACE_SEQUENCE_CHECK
1813 static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle)
1814 {
1815 	wmi_handle->wmi_sequence = 0;
1816 	wmi_handle->wmi_exp_sequence = 0;
1817 	wmi_handle->wmi_sequence_stop = false;
1818 }
1819 
1820 static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle)
1821 {
1822 	qdf_spinlock_create(&wmi_handle->wmi_seq_lock);
1823 	wmi_interface_sequence_reset(wmi_handle);
1824 }
1825 
1826 static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle)
1827 {
1828 	qdf_spinlock_destroy(&wmi_handle->wmi_seq_lock);
1829 }
1830 
1831 void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle)
1832 {
1833 	wmi_handle->wmi_sequence_stop = true;
1834 }
1835 
1836 static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle,
1837 					  HTC_PACKET *pkt,
1838 					  const char *func, uint32_t line)
1839 {
1840 	wmi_buf_t buf = GET_HTC_PACKET_NET_BUF_CONTEXT(pkt);
1841 	QDF_STATUS status;
1842 
1843 	qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock);
1844 	status = htc_send_pkt(wmi_handle->htc_handle, pkt);
1845 	if (QDF_STATUS_SUCCESS != status) {
1846 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1847 		qdf_atomic_dec(&wmi_handle->pending_cmds);
1848 		wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d",
1849 			     func, line, status);
1850 		qdf_mem_free(pkt);
1851 		return status;
1852 	}
1853 	/* Record the sequence number in the SKB */
1854 	qdf_nbuf_set_mark(buf, wmi_handle->wmi_sequence);
1855 	/* Increment the sequence number */
1856 	wmi_handle->wmi_sequence = (wmi_handle->wmi_sequence + 1)
1857 				   & (wmi_handle->wmi_max_cmds - 1);
1858 	qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1859 
1860 	return status;
1861 }
1862 
1863 static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle,
1864 						wmi_buf_t buf)
1865 {
1866 	/* Skip sequence check when wmi sequence stop is set */
1867 	if (wmi_handle->wmi_sequence_stop)
1868 		return;
1869 
1870 	qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock);
1871 	/* Match the completion sequence and expected sequence number */
1872 	if (qdf_nbuf_get_mark(buf) != wmi_handle->wmi_exp_sequence) {
1873 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1874 		wmi_nofl_err("WMI Tx Completion Sequence number mismatch");
1875 		wmi_nofl_err("Expected %d Received %d",
1876 			     wmi_handle->wmi_exp_sequence,
1877 			     qdf_nbuf_get_mark(buf));
1878 		/* Trigger Recovery */
1879 		qdf_trigger_self_recovery(wmi_handle->soc,
1880 					  QDF_WMI_BUF_SEQUENCE_MISMATCH);
1881 	} else {
1882 		/* Increment the expected sequence number */
1883 		wmi_handle->wmi_exp_sequence =
1884 				(wmi_handle->wmi_exp_sequence + 1)
1885 				& (wmi_handle->wmi_max_cmds - 1);
1886 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1887 	}
1888 }
1889 #else
1890 static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle)
1891 {
1892 }
1893 
1894 static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle)
1895 {
1896 }
1897 
1898 static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle)
1899 {
1900 }
1901 
1902 void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle)
1903 {
1904 }
1905 
1906 static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle,
1907 					  HTC_PACKET *pkt,
1908 					  const char *func, uint32_t line)
1909 {
1910 	QDF_STATUS status;
1911 
1912 	status = htc_send_pkt(wmi_handle->htc_handle, pkt);
1913 	if (QDF_STATUS_SUCCESS != status) {
1914 		qdf_atomic_dec(&wmi_handle->pending_cmds);
1915 		wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d",
1916 			     func, line, status);
1917 		qdf_mem_free(pkt);
1918 		return status;
1919 	}
1920 
1921 	return status;
1922 }
1923 
1924 static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle,
1925 						wmi_buf_t buf)
1926 {
1927 }
1928 #endif
1929 
1930 static inline void wmi_unified_debug_dump(wmi_unified_t wmi_handle)
1931 {
1932 	wmi_nofl_err("Endpoint ID = %d, Tx Queue Depth = %d, soc_id = %u, target type = %s",
1933 		     wmi_handle->wmi_endpoint_id,
1934 		     htc_get_tx_queue_depth(wmi_handle->htc_handle,
1935 					    wmi_handle->wmi_endpoint_id),
1936 		     wmi_handle->soc->soc_idx,
1937 		     (wmi_handle->target_type ==
1938 		      WMI_TLV_TARGET ? "WMI_TLV_TARGET" :
1939 						"WMI_NON_TLV_TARGET"));
1940 }
1941 
1942 #ifdef SYSTEM_PM_CHECK
1943 /**
1944  * wmi_set_system_pm_pkt_tag() - API to set tag for system pm packets
1945  * @htc_tag: HTC tag
1946  * @buf: wmi cmd buffer
1947  * @cmd_id: cmd id
1948  *
1949  * Return: None
1950  */
1951 static void wmi_set_system_pm_pkt_tag(uint16_t *htc_tag, wmi_buf_t buf,
1952 				      uint32_t cmd_id)
1953 {
1954 	switch (cmd_id) {
1955 	case WMI_WOW_ENABLE_CMDID:
1956 	case WMI_PDEV_SUSPEND_CMDID:
1957 		*htc_tag = HTC_TX_PACKET_SYSTEM_SUSPEND;
1958 		break;
1959 	case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
1960 	case WMI_PDEV_RESUME_CMDID:
1961 		*htc_tag = HTC_TX_PACKET_SYSTEM_RESUME;
1962 		break;
1963 	case WMI_D0_WOW_ENABLE_DISABLE_CMDID:
1964 		if (wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id))
1965 			*htc_tag = HTC_TX_PACKET_SYSTEM_RESUME;
1966 		else
1967 			*htc_tag = HTC_TX_PACKET_SYSTEM_SUSPEND;
1968 		break;
1969 	default:
1970 		break;
1971 	}
1972 }
1973 #else
1974 static inline void wmi_set_system_pm_pkt_tag(uint16_t *htc_tag, wmi_buf_t buf,
1975 					     uint32_t cmd_id)
1976 {
1977 }
1978 #endif
1979 
1980 QDF_STATUS wmi_unified_cmd_send_fl(wmi_unified_t wmi_handle, wmi_buf_t buf,
1981 				   uint32_t len, uint32_t cmd_id,
1982 				   const char *func, uint32_t line)
1983 {
1984 	HTC_PACKET *pkt;
1985 	uint16_t htc_tag = 0;
1986 	bool rtpm_inprogress;
1987 
1988 	rtpm_inprogress = wmi_get_runtime_pm_inprogress(wmi_handle);
1989 	if (rtpm_inprogress) {
1990 		htc_tag = wmi_handle->ops->wmi_set_htc_tx_tag(wmi_handle, buf,
1991 							      cmd_id);
1992 	} else if (qdf_atomic_read(&wmi_handle->is_target_suspended) &&
1993 		   !wmi_is_pm_resume_cmd(cmd_id) &&
1994 		   !wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id)) {
1995 			wmi_nofl_err("Target is suspended (via %s:%u)",
1996 					func, line);
1997 		return QDF_STATUS_E_BUSY;
1998 	}
1999 
2000 	if (wmi_handle->wmi_stopinprogress) {
2001 		wmi_nofl_err("%s:%d, WMI stop in progress, wmi_handle:%pK",
2002 			     func, line, wmi_handle);
2003 		return QDF_STATUS_E_INVAL;
2004 	}
2005 
2006 #ifndef WMI_NON_TLV_SUPPORT
2007 	/* Do sanity check on the TLV parameter structure */
2008 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2009 		void *buf_ptr = (void *)qdf_nbuf_data(buf);
2010 
2011 		if (wmi_handle->ops->wmi_check_command_params(NULL, buf_ptr, len, cmd_id)
2012 			!= 0) {
2013 			wmi_nofl_err("%s:%d, Invalid WMI Param Buffer for Cmd:%d",
2014 				     func, line, cmd_id);
2015 			return QDF_STATUS_E_INVAL;
2016 		}
2017 	}
2018 #endif
2019 
2020 	if (qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) {
2021 		wmi_nofl_err("%s:%d, Failed to send cmd %x, no memory",
2022 			     func, line, cmd_id);
2023 		return QDF_STATUS_E_NOMEM;
2024 	}
2025 
2026 	qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR));
2027 	WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
2028 
2029 	qdf_atomic_inc(&wmi_handle->pending_cmds);
2030 	if (qdf_atomic_read(&wmi_handle->pending_cmds) >=
2031 			wmi_handle->wmi_max_cmds) {
2032 		wmi_nofl_err("hostcredits = %d",
2033 			     wmi_get_host_credits(wmi_handle));
2034 		htc_dump_counter_info(wmi_handle->htc_handle);
2035 		qdf_atomic_dec(&wmi_handle->pending_cmds);
2036 		wmi_nofl_err("%s:%d, MAX %d WMI Pending cmds reached",
2037 			     func, line, wmi_handle->wmi_max_cmds);
2038 		wmi_unified_debug_dump(wmi_handle);
2039 		htc_ce_tasklet_debug_dump(wmi_handle->htc_handle);
2040 		qdf_trigger_self_recovery(wmi_handle->soc->wmi_psoc,
2041 					  QDF_WMI_EXCEED_MAX_PENDING_CMDS);
2042 		return QDF_STATUS_E_BUSY;
2043 	}
2044 
2045 	pkt = qdf_mem_malloc_fl(sizeof(*pkt), func, line);
2046 	if (!pkt) {
2047 		qdf_atomic_dec(&wmi_handle->pending_cmds);
2048 		return QDF_STATUS_E_NOMEM;
2049 	}
2050 
2051 	if (!rtpm_inprogress)
2052 		wmi_set_system_pm_pkt_tag(&htc_tag, buf, cmd_id);
2053 
2054 	SET_HTC_PACKET_INFO_TX(pkt,
2055 			       NULL,
2056 			       qdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR),
2057 			       wmi_handle->wmi_endpoint_id, htc_tag);
2058 
2059 	SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf);
2060 	wmi_log_cmd_id(cmd_id, htc_tag);
2061 	wmi_ext_dbg_msg_cmd_record(wmi_handle,
2062 				   qdf_nbuf_data(buf), qdf_nbuf_len(buf));
2063 #ifdef WMI_INTERFACE_EVENT_LOGGING
2064 	if (wmi_handle->log_info.wmi_logging_enable) {
2065 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2066 		/*
2067 		 * Record 16 bytes of WMI cmd data -
2068 		 * exclude TLV and WMI headers
2069 		 *
2070 		 * WMI mgmt command already recorded in wmi_mgmt_cmd_record
2071 		 */
2072 		if (wmi_handle->ops->is_management_record(cmd_id) == false) {
2073 			uint8_t *tmpbuf = (uint8_t *)qdf_nbuf_data(buf) +
2074 				wmi_handle->soc->buf_offset_command;
2075 
2076 			WMI_COMMAND_RECORD(wmi_handle, cmd_id, tmpbuf);
2077 			wmi_specific_cmd_record(wmi_handle, cmd_id, tmpbuf);
2078 		}
2079 
2080 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2081 	}
2082 #endif
2083 	return wmi_htc_send_pkt(wmi_handle, pkt, func, line);
2084 }
2085 qdf_export_symbol(wmi_unified_cmd_send_fl);
2086 
2087 /**
2088  * wmi_unified_get_event_handler_ix() - gives event handler's index
2089  * @wmi_handle: handle to wmi
2090  * @event_id: wmi  event id
2091  *
2092  * Return: event handler's index
2093  */
2094 static int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle,
2095 					    uint32_t event_id)
2096 {
2097 	uint32_t idx = 0;
2098 	int32_t invalid_idx = -1;
2099 	struct wmi_soc *soc = wmi_handle->soc;
2100 
2101 	for (idx = 0; (idx < soc->max_event_idx &&
2102 		       idx < WMI_UNIFIED_MAX_EVENT); ++idx) {
2103 		if (wmi_handle->event_id[idx] == event_id &&
2104 		    wmi_handle->event_handler[idx]) {
2105 			return idx;
2106 		}
2107 	}
2108 
2109 	return invalid_idx;
2110 }
2111 
2112 /**
2113  * wmi_register_event_handler_with_ctx() - register event handler with
2114  * exec ctx and buffer type
2115  * @wmi_handle: handle to wmi
2116  * @event_id: wmi event id
2117  * @handler_func: wmi event handler function
2118  * @rx_ctx: rx execution context for wmi rx events
2119  * @rx_buf_type: rx execution context for wmi rx events
2120  *
2121  * Return: QDF_STATUS_SUCCESS on successful register event else failure.
2122  */
2123 static QDF_STATUS
2124 wmi_register_event_handler_with_ctx(wmi_unified_t wmi_handle,
2125 				    uint32_t event_id,
2126 				    wmi_unified_event_handler handler_func,
2127 				    enum wmi_rx_exec_ctx rx_ctx,
2128 				    enum wmi_rx_buff_type rx_buf_type)
2129 {
2130 	uint32_t idx = 0;
2131 	uint32_t evt_id;
2132 	struct wmi_soc *soc;
2133 
2134 	if (!wmi_handle) {
2135 		wmi_err("WMI handle is NULL");
2136 		return QDF_STATUS_E_FAILURE;
2137 	}
2138 
2139 	soc = wmi_handle->soc;
2140 
2141 	if (event_id >= wmi_events_max) {
2142 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
2143 			  "%s: Event id %d is unavailable",
2144 					__func__, event_id);
2145 		return QDF_STATUS_E_FAILURE;
2146 	}
2147 
2148 	if (wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2149 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG,
2150 			  "%s: Event id %d is not supported",
2151 			  __func__, event_id);
2152 		return QDF_STATUS_E_NOSUPPORT;
2153 	}
2154 	evt_id = wmi_handle->wmi_events[event_id];
2155 
2156 	if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) {
2157 		wmi_info("event handler already registered 0x%x", evt_id);
2158 		return QDF_STATUS_E_FAILURE;
2159 	}
2160 	if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) {
2161 		wmi_err("no more event handlers 0x%x",
2162 			 evt_id);
2163 		return QDF_STATUS_E_FAILURE;
2164 	}
2165 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG,
2166 		  "Registered event handler for event 0x%8x", evt_id);
2167 	idx = soc->max_event_idx;
2168 	wmi_handle->event_handler[idx] = handler_func;
2169 	wmi_handle->event_id[idx] = evt_id;
2170 
2171 	qdf_spin_lock_bh(&soc->ctx_lock);
2172 	wmi_handle->ctx[idx].exec_ctx = rx_ctx;
2173 	wmi_handle->ctx[idx].buff_type = rx_buf_type;
2174 	qdf_spin_unlock_bh(&soc->ctx_lock);
2175 	soc->max_event_idx++;
2176 
2177 	return QDF_STATUS_SUCCESS;
2178 }
2179 
2180 QDF_STATUS
2181 wmi_unified_register_event(wmi_unified_t wmi_handle,
2182 			   uint32_t event_id,
2183 			   wmi_unified_event_handler handler_func)
2184 {
2185 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2186 						   handler_func,
2187 						   WMI_RX_UMAC_CTX,
2188 						   WMI_RX_PROCESSED_BUFF);
2189 }
2190 
2191 QDF_STATUS
2192 wmi_unified_register_event_handler(wmi_unified_t wmi_handle,
2193 				   wmi_conv_event_id event_id,
2194 				   wmi_unified_event_handler handler_func,
2195 				   uint8_t rx_ctx)
2196 {
2197 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2198 						   handler_func, rx_ctx,
2199 						   WMI_RX_PROCESSED_BUFF);
2200 }
2201 
2202 qdf_export_symbol(wmi_unified_register_event_handler);
2203 
2204 QDF_STATUS
2205 wmi_unified_register_raw_event_handler(wmi_unified_t wmi_handle,
2206 				       wmi_conv_event_id event_id,
2207 				       wmi_unified_event_handler handler_func,
2208 				       enum wmi_rx_exec_ctx rx_ctx)
2209 {
2210 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2211 						   handler_func, rx_ctx,
2212 						   WMI_RX_RAW_BUFF);
2213 }
2214 
2215 qdf_export_symbol(wmi_unified_register_raw_event_handler);
2216 
2217 QDF_STATUS wmi_unified_unregister_event(wmi_unified_t wmi_handle,
2218 					uint32_t event_id)
2219 {
2220 	uint32_t idx = 0;
2221 	uint32_t evt_id;
2222 	struct wmi_soc *soc;
2223 
2224 	if (!wmi_handle) {
2225 		wmi_err("WMI handle is NULL");
2226 		return QDF_STATUS_E_FAILURE;
2227 	}
2228 
2229 	soc = wmi_handle->soc;
2230 	if (event_id >= wmi_events_max ||
2231 		wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2232 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
2233 			  "%s: Event id %d is unavailable",
2234 					__func__, event_id);
2235 		return QDF_STATUS_E_FAILURE;
2236 	}
2237 	evt_id = wmi_handle->wmi_events[event_id];
2238 
2239 	idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id);
2240 	if (idx == -1) {
2241 		wmi_warn("event handler is not registered: evt id 0x%x",
2242 			 evt_id);
2243 		return QDF_STATUS_E_FAILURE;
2244 	}
2245 	wmi_handle->event_handler[idx] = NULL;
2246 	wmi_handle->event_id[idx] = 0;
2247 	--soc->max_event_idx;
2248 	wmi_handle->event_handler[idx] =
2249 		wmi_handle->event_handler[soc->max_event_idx];
2250 	wmi_handle->event_id[idx] =
2251 		wmi_handle->event_id[soc->max_event_idx];
2252 
2253 	qdf_spin_lock_bh(&soc->ctx_lock);
2254 
2255 	wmi_handle->ctx[idx].exec_ctx =
2256 		wmi_handle->ctx[soc->max_event_idx].exec_ctx;
2257 	wmi_handle->ctx[idx].buff_type =
2258 		wmi_handle->ctx[soc->max_event_idx].buff_type;
2259 
2260 	qdf_spin_unlock_bh(&soc->ctx_lock);
2261 
2262 	return QDF_STATUS_SUCCESS;
2263 }
2264 
2265 QDF_STATUS wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle,
2266 						wmi_conv_event_id event_id)
2267 {
2268 	uint32_t idx = 0;
2269 	uint32_t evt_id;
2270 	struct wmi_soc *soc;
2271 
2272 	if (!wmi_handle) {
2273 		wmi_err("WMI handle is NULL");
2274 		return QDF_STATUS_E_FAILURE;
2275 	}
2276 
2277 	soc = wmi_handle->soc;
2278 
2279 	if (event_id >= wmi_events_max) {
2280 		wmi_err("Event id %d is unavailable", event_id);
2281 		return QDF_STATUS_E_FAILURE;
2282 	}
2283 
2284 	if (wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2285 		wmi_debug("Event id %d is not supported", event_id);
2286 		return QDF_STATUS_E_NOSUPPORT;
2287 	}
2288 
2289 	evt_id = wmi_handle->wmi_events[event_id];
2290 
2291 	idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id);
2292 	if (idx == -1) {
2293 		wmi_err("event handler is not registered: evt id 0x%x",
2294 			 evt_id);
2295 		return QDF_STATUS_E_FAILURE;
2296 	}
2297 	wmi_handle->event_handler[idx] = NULL;
2298 	wmi_handle->event_id[idx] = 0;
2299 	--soc->max_event_idx;
2300 	wmi_handle->event_handler[idx] =
2301 		wmi_handle->event_handler[soc->max_event_idx];
2302 	wmi_handle->event_id[idx] =
2303 		wmi_handle->event_id[soc->max_event_idx];
2304 
2305 	qdf_spin_lock_bh(&soc->ctx_lock);
2306 
2307 	wmi_handle->ctx[idx].exec_ctx =
2308 		wmi_handle->ctx[soc->max_event_idx].exec_ctx;
2309 	wmi_handle->ctx[idx].buff_type =
2310 		wmi_handle->ctx[soc->max_event_idx].buff_type;
2311 
2312 	qdf_spin_unlock_bh(&soc->ctx_lock);
2313 
2314 	return QDF_STATUS_SUCCESS;
2315 }
2316 qdf_export_symbol(wmi_unified_unregister_event_handler);
2317 
2318 static void
2319 wmi_process_rx_diag_event_worker_thread_ctx(struct wmi_unified *wmi_handle,
2320 					    void *evt_buf)
2321 {
2322 	uint32_t num_diag_events_pending;
2323 
2324 	qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock);
2325 	if (RX_DIAG_WQ_MAX_SIZE > 0) {
2326 		num_diag_events_pending = qdf_nbuf_queue_len(
2327 						&wmi_handle->diag_event_queue);
2328 
2329 		if (num_diag_events_pending >= RX_DIAG_WQ_MAX_SIZE) {
2330 			qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
2331 			wmi_handle->wmi_rx_diag_events_dropped++;
2332 			wmi_debug_rl("Rx diag events dropped count: %d",
2333 				     wmi_handle->wmi_rx_diag_events_dropped);
2334 			qdf_nbuf_free(evt_buf);
2335 			return;
2336 		}
2337 	}
2338 
2339 	qdf_nbuf_queue_add(&wmi_handle->diag_event_queue, evt_buf);
2340 	qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
2341 	qdf_queue_work(0, wmi_handle->wmi_rx_diag_work_queue,
2342 		       &wmi_handle->rx_diag_event_work);
2343 }
2344 
2345 void wmi_process_fw_event_worker_thread_ctx(struct wmi_unified *wmi_handle,
2346 					    void *evt_buf)
2347 {
2348 
2349 	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
2350 	qdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf);
2351 	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
2352 	qdf_queue_work(0, wmi_handle->wmi_rx_work_queue,
2353 			&wmi_handle->rx_event_work);
2354 
2355 	return;
2356 }
2357 
2358 qdf_export_symbol(wmi_process_fw_event_worker_thread_ctx);
2359 
2360 uint32_t wmi_critical_events_in_flight(struct wmi_unified *wmi)
2361 {
2362 	return qdf_atomic_read(&wmi->critical_events_in_flight);
2363 }
2364 
2365 static bool
2366 wmi_is_event_critical(struct wmi_unified *wmi_handle, uint32_t event_id)
2367 {
2368 	if (wmi_handle->wmi_events[wmi_roam_synch_event_id] == event_id)
2369 		return true;
2370 
2371 	return false;
2372 }
2373 
2374 static QDF_STATUS wmi_discard_fw_event(struct scheduler_msg *msg)
2375 {
2376 	struct wmi_process_fw_event_params *event_param;
2377 
2378 	if (!msg->bodyptr)
2379 		return QDF_STATUS_E_INVAL;
2380 
2381 	event_param = (struct wmi_process_fw_event_params *)msg->bodyptr;
2382 	qdf_nbuf_free(event_param->evt_buf);
2383 	qdf_mem_free(msg->bodyptr);
2384 	msg->bodyptr = NULL;
2385 	msg->bodyval = 0;
2386 	msg->type = 0;
2387 
2388 	return QDF_STATUS_SUCCESS;
2389 }
2390 
2391 static QDF_STATUS wmi_process_fw_event_handler(struct scheduler_msg *msg)
2392 {
2393 	struct wmi_process_fw_event_params *params =
2394 		(struct wmi_process_fw_event_params *)msg->bodyptr;
2395 	struct wmi_unified *wmi_handle;
2396 	uint32_t event_id;
2397 
2398 	wmi_handle = (struct wmi_unified *)params->wmi_handle;
2399 	event_id = WMI_GET_FIELD(qdf_nbuf_data(params->evt_buf),
2400 				 WMI_CMD_HDR, COMMANDID);
2401 	wmi_process_fw_event(wmi_handle, params->evt_buf);
2402 
2403 	if (wmi_is_event_critical(wmi_handle, event_id))
2404 		qdf_atomic_dec(&wmi_handle->critical_events_in_flight);
2405 
2406 	qdf_mem_free(msg->bodyptr);
2407 
2408 	return QDF_STATUS_SUCCESS;
2409 }
2410 
2411 /**
2412  * wmi_process_fw_event_sched_thread_ctx() - common event handler to serialize
2413  *                                  event processing through scheduler thread
2414  * @wmi: wmi context
2415  * @ev: event buffer
2416  *
2417  * Return: 0 on success, errno on failure
2418  */
2419 static QDF_STATUS
2420 wmi_process_fw_event_sched_thread_ctx(struct wmi_unified *wmi,
2421 				      void *ev)
2422 {
2423 	struct wmi_process_fw_event_params *params_buf;
2424 	struct scheduler_msg msg = { 0 };
2425 	uint32_t event_id;
2426 
2427 	params_buf = qdf_mem_malloc(sizeof(struct wmi_process_fw_event_params));
2428 	if (!params_buf) {
2429 		wmi_err("malloc failed");
2430 		qdf_nbuf_free(ev);
2431 		return QDF_STATUS_E_NOMEM;
2432 	}
2433 
2434 	params_buf->wmi_handle = wmi;
2435 	params_buf->evt_buf = ev;
2436 
2437 	event_id = WMI_GET_FIELD(qdf_nbuf_data(params_buf->evt_buf),
2438 				 WMI_CMD_HDR, COMMANDID);
2439 	if (wmi_is_event_critical(wmi, event_id))
2440 		qdf_atomic_inc(&wmi->critical_events_in_flight);
2441 
2442 	msg.bodyptr = params_buf;
2443 	msg.bodyval = 0;
2444 	msg.callback = wmi_process_fw_event_handler;
2445 	msg.flush_callback = wmi_discard_fw_event;
2446 
2447 	if (QDF_STATUS_SUCCESS !=
2448 		scheduler_post_message(QDF_MODULE_ID_TARGET_IF,
2449 				       QDF_MODULE_ID_TARGET_IF,
2450 				       QDF_MODULE_ID_TARGET_IF, &msg)) {
2451 		qdf_nbuf_free(ev);
2452 		qdf_mem_free(params_buf);
2453 		return QDF_STATUS_E_FAULT;
2454 	}
2455 
2456 	return QDF_STATUS_SUCCESS;
2457 }
2458 
2459 /**
2460  * wmi_get_pdev_ep: Get wmi handle based on endpoint
2461  * @soc: handle to wmi soc
2462  * @ep: endpoint id
2463  *
2464  * Return: none
2465  */
2466 static struct wmi_unified *wmi_get_pdev_ep(struct wmi_soc *soc,
2467 						HTC_ENDPOINT_ID ep)
2468 {
2469 	uint32_t i;
2470 
2471 	for (i = 0; i < WMI_MAX_RADIOS; i++)
2472 		if (soc->wmi_endpoint_id[i] == ep)
2473 			break;
2474 
2475 	if (i == WMI_MAX_RADIOS)
2476 		return NULL;
2477 
2478 	return soc->wmi_pdev[i];
2479 }
2480 
2481 /**
2482  * wmi_mtrace_rx() - Wrappper function for qdf_mtrace api
2483  * @message_id: 32-Bit Wmi message ID
2484  * @vdev_id: Vdev ID
2485  * @data: Actual message contents
2486  *
2487  * This function converts the 32-bit WMI message ID in 15-bit message ID
2488  * format for qdf_mtrace as in qdf_mtrace message there are only 15
2489  * bits reserved for message ID.
2490  * out of these 15-bits, 8-bits (From LSB) specifies the WMI_GRP_ID
2491  * and remaining 7-bits specifies the actual WMI command. With this
2492  * notation there can be maximum 256 groups and each group can have
2493  * max 128 commands can be supported.
2494  *
2495  * Return: None
2496  */
2497 static void wmi_mtrace_rx(uint32_t message_id, uint16_t vdev_id, uint32_t data)
2498 {
2499 	uint16_t mtrace_message_id;
2500 
2501 	mtrace_message_id = QDF_WMI_MTRACE_CMD_ID(message_id) |
2502 		(QDF_WMI_MTRACE_GRP_ID(message_id) <<
2503 						QDF_WMI_MTRACE_CMD_NUM_BITS);
2504 	qdf_mtrace(QDF_MODULE_ID_WMI, QDF_MODULE_ID_WMA,
2505 		   mtrace_message_id, vdev_id, data);
2506 }
2507 
2508 /**
2509  * wmi_process_control_rx() - process fw events callbacks
2510  * @wmi_handle: handle to wmi_unified
2511  * @evt_buf: handle to wmi_buf_t
2512  *
2513  * Return: none
2514  */
2515 static void wmi_process_control_rx(struct wmi_unified *wmi_handle,
2516 				   wmi_buf_t evt_buf)
2517 {
2518 	struct wmi_soc *soc = wmi_handle->soc;
2519 	uint32_t id;
2520 	uint32_t idx;
2521 	enum wmi_rx_exec_ctx exec_ctx;
2522 
2523 	id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2524 	idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
2525 	if (qdf_unlikely(idx == A_ERROR)) {
2526 		wmi_debug("no handler registered for event id 0x%x", id);
2527 		qdf_nbuf_free(evt_buf);
2528 		return;
2529 	}
2530 	wmi_mtrace_rx(id, 0xFF, idx);
2531 	qdf_spin_lock_bh(&soc->ctx_lock);
2532 	exec_ctx = wmi_handle->ctx[idx].exec_ctx;
2533 	qdf_spin_unlock_bh(&soc->ctx_lock);
2534 
2535 #ifdef WMI_INTERFACE_EVENT_LOGGING
2536 	if (wmi_handle->log_info.wmi_logging_enable) {
2537 		uint8_t *data;
2538 		data = qdf_nbuf_data(evt_buf);
2539 
2540 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2541 		/* Exclude 4 bytes of TLV header */
2542 		if (wmi_handle->ops->is_diag_event(id)) {
2543 			WMI_DIAG_RX_EVENT_RECORD(wmi_handle, id,
2544 				((uint8_t *) data +
2545 				wmi_handle->soc->buf_offset_event));
2546 		} else if (wmi_handle->ops->is_management_record(id)) {
2547 			WMI_MGMT_RX_EVENT_RECORD(wmi_handle, id,
2548 				((uint8_t *) data +
2549 				wmi_handle->soc->buf_offset_event));
2550 		} else {
2551 			WMI_RX_EVENT_RECORD(wmi_handle, id, ((uint8_t *) data +
2552 				wmi_handle->soc->buf_offset_event));
2553 		}
2554 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2555 	}
2556 #endif
2557 
2558 	if (exec_ctx == WMI_RX_WORK_CTX) {
2559 		wmi_process_fw_event_worker_thread_ctx(wmi_handle, evt_buf);
2560 	} else if (exec_ctx == WMI_RX_TASKLET_CTX) {
2561 		wmi_process_fw_event(wmi_handle, evt_buf);
2562 	} else if (exec_ctx == WMI_RX_SERIALIZER_CTX) {
2563 		wmi_process_fw_event_sched_thread_ctx(wmi_handle, evt_buf);
2564 	} else if (exec_ctx == WMI_RX_DIAG_WORK_CTX) {
2565 		wmi_process_rx_diag_event_worker_thread_ctx(wmi_handle,
2566 							    evt_buf);
2567 	} else {
2568 		wmi_err("Invalid event context %d", exec_ctx);
2569 		qdf_nbuf_free(evt_buf);
2570 	}
2571 
2572 }
2573 
2574 /**
2575  * wmi_control_rx() - process fw events callbacks
2576  * @ctx: handle to wmi
2577  * @htc_packet: pointer to htc packet
2578  *
2579  * Return: none
2580  */
2581 static void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet)
2582 {
2583 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2584 	struct wmi_unified *wmi_handle;
2585 	wmi_buf_t evt_buf;
2586 
2587 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2588 
2589 	wmi_handle = wmi_get_pdev_ep(soc, htc_packet->Endpoint);
2590 	if (!wmi_handle) {
2591 		wmi_err("unable to get wmi_handle to Endpoint %d",
2592 			htc_packet->Endpoint);
2593 		qdf_nbuf_free(evt_buf);
2594 		return;
2595 	}
2596 
2597 	wmi_process_control_rx(wmi_handle, evt_buf);
2598 }
2599 
2600 #if defined(WLAN_FEATURE_WMI_DIAG_OVER_CE7) || \
2601 	defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
2602 /**
2603  * wmi_control_diag_rx() - process diag fw events callbacks
2604  * @ctx: handle to wmi
2605  * @htc_packet: pointer to htc packet
2606  *
2607  * Return: none
2608  */
2609 static void wmi_control_diag_rx(void *ctx, HTC_PACKET *htc_packet)
2610 {
2611 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2612 	struct wmi_unified *wmi_handle;
2613 	wmi_buf_t evt_buf;
2614 
2615 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2616 
2617 	wmi_handle = soc->wmi_pdev[0];
2618 
2619 	if (!wmi_handle) {
2620 		wmi_err("unable to get wmi_handle for diag event end point id:%d", htc_packet->Endpoint);
2621 		qdf_nbuf_free(evt_buf);
2622 		return;
2623 	}
2624 
2625 	wmi_process_control_rx(wmi_handle, evt_buf);
2626 }
2627 #endif
2628 
2629 #if defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
2630 /**
2631  * wmi_control_dbr_rx() - process dbr fw events callbacks
2632  * @ctx: handle to wmi
2633  * @htc_packet: pointer to htc packet
2634  *
2635  * Return: none
2636  */
2637 static void wmi_control_dbr_rx(void *ctx, HTC_PACKET *htc_packet)
2638 {
2639 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2640 	struct wmi_unified *wmi_handle;
2641 	wmi_buf_t evt_buf;
2642 
2643 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2644 	wmi_handle = soc->wmi_pdev[0];
2645 
2646 	if (!wmi_handle) {
2647 		wmi_err("unable to get wmi_handle for dbr event endpoint id:%d",
2648 			htc_packet->Endpoint);
2649 		qdf_nbuf_free(evt_buf);
2650 		return;
2651 	}
2652 
2653 	wmi_process_control_rx(wmi_handle, evt_buf);
2654 }
2655 #endif
2656 
2657 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI
2658 QDF_STATUS wmi_unified_cmd_send_over_qmi(struct wmi_unified *wmi_handle,
2659 					 wmi_buf_t buf, uint32_t buflen,
2660 					 uint32_t cmd_id)
2661 {
2662 	QDF_STATUS status;
2663 	int32_t ret;
2664 
2665 	if (!qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR))) {
2666 		wmi_err("Failed to send cmd %x, no memory", cmd_id);
2667 		return QDF_STATUS_E_NOMEM;
2668 	}
2669 
2670 	qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR));
2671 	WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
2672 	wmi_debug("Sending WMI_CMD_ID: 0x%x over qmi", cmd_id);
2673 	status = qdf_wmi_send_recv_qmi(qdf_nbuf_data(buf),
2674 				       buflen + sizeof(WMI_CMD_HDR),
2675 				       wmi_handle,
2676 				       wmi_process_qmi_fw_event);
2677 	if (QDF_IS_STATUS_ERROR(status)) {
2678 		qdf_nbuf_pull_head(buf, sizeof(WMI_CMD_HDR));
2679 		wmi_warn("WMI send on QMI failed. Retrying WMI on HTC");
2680 	} else {
2681 		ret = qdf_atomic_inc_return(&wmi_handle->num_stats_over_qmi);
2682 		wmi_debug("num stats over qmi: %d", ret);
2683 		wmi_buf_free(buf);
2684 	}
2685 
2686 	return status;
2687 }
2688 
2689 static int __wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len)
2690 {
2691 	struct wmi_unified *wmi_handle = wmi_cb_ctx;
2692 	wmi_buf_t evt_buf;
2693 	uint32_t evt_id;
2694 
2695 	if (!wmi_handle || !buf || !len) {
2696 		wmi_err_rl("%s is invalid", !wmi_handle ?
2697 				"wmi_buf" : !buf ? "buf" : "length");
2698 		return -EINVAL;
2699 	}
2700 
2701 	evt_buf = wmi_buf_alloc(wmi_handle, len);
2702 	if (!evt_buf)
2703 		return -ENOMEM;
2704 
2705 	qdf_mem_copy(qdf_nbuf_data(evt_buf), buf, len);
2706 	evt_id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2707 	wmi_debug("Received WMI_EVT_ID: 0x%x over qmi", evt_id);
2708 	wmi_process_control_rx(wmi_handle, evt_buf);
2709 
2710 	return 0;
2711 }
2712 
2713 int wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len)
2714 {
2715 	struct qdf_op_sync *op_sync;
2716 	int ret;
2717 
2718 	if (qdf_op_protect(&op_sync))
2719 		return -EINVAL;
2720 	ret = __wmi_process_qmi_fw_event(wmi_cb_ctx, buf, len);
2721 	qdf_op_unprotect(op_sync);
2722 
2723 	return ret;
2724 }
2725 #endif
2726 
2727 void wmi_process_fw_event(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
2728 {
2729 	__wmi_control_rx(wmi_handle, evt_buf);
2730 }
2731 
2732 void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
2733 {
2734 	uint32_t id;
2735 	uint8_t *data;
2736 	uint32_t len;
2737 	void *wmi_cmd_struct_ptr = NULL;
2738 #ifndef WMI_NON_TLV_SUPPORT
2739 	int tlv_ok_status = 0;
2740 #endif
2741 	uint32_t idx = 0;
2742 	struct wmi_raw_event_buffer ev_buf;
2743 	enum wmi_rx_buff_type ev_buff_type;
2744 
2745 	id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2746 
2747 	wmi_ext_dbg_msg_event_record(wmi_handle, qdf_nbuf_data(evt_buf),
2748 				     qdf_nbuf_len(evt_buf));
2749 
2750 	if (qdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL)
2751 		goto end;
2752 
2753 	data = qdf_nbuf_data(evt_buf);
2754 	len = qdf_nbuf_len(evt_buf);
2755 
2756 #ifndef WMI_NON_TLV_SUPPORT
2757 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2758 		/* Validate and pad(if necessary) the TLVs */
2759 		tlv_ok_status =
2760 			wmi_handle->ops->wmi_check_and_pad_event(wmi_handle->scn_handle,
2761 							data, len, id,
2762 							&wmi_cmd_struct_ptr);
2763 		if (tlv_ok_status != 0) {
2764 			QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR,
2765 				  "%s: Error: id=0x%x, wmitlv check status=%d",
2766 				  __func__, id, tlv_ok_status);
2767 			goto end;
2768 		}
2769 	}
2770 #endif
2771 
2772 	idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
2773 	if (idx == A_ERROR) {
2774 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR,
2775 		   "%s : event handler is not registered: event id 0x%x",
2776 			__func__, id);
2777 		goto end;
2778 	}
2779 #ifdef WMI_INTERFACE_EVENT_LOGGING
2780 	if (wmi_handle->log_info.wmi_logging_enable) {
2781 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2782 		/* Exclude 4 bytes of TLV header */
2783 		if (wmi_handle->ops->is_diag_event(id)) {
2784 			/*
2785 			 * skip diag event logging in WMI event buffer
2786 			 * as its already logged in WMI RX event buffer
2787 			 */
2788 		} else if (wmi_handle->ops->is_management_record(id)) {
2789 			/*
2790 			 * skip wmi mgmt event logging in WMI event buffer
2791 			 * as its already logged in WMI RX event buffer
2792 			 */
2793 		} else {
2794 			uint8_t *tmpbuf = (uint8_t *)data +
2795 					wmi_handle->soc->buf_offset_event;
2796 
2797 			WMI_EVENT_RECORD(wmi_handle, id, tmpbuf);
2798 			wmi_specific_evt_record(wmi_handle, id, tmpbuf);
2799 		}
2800 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2801 	}
2802 #endif
2803 	/* Call the WMI registered event handler */
2804 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2805 		ev_buff_type = wmi_handle->ctx[idx].buff_type;
2806 		if (ev_buff_type == WMI_RX_PROCESSED_BUFF) {
2807 			wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2808 				wmi_cmd_struct_ptr, len);
2809 		} else if (ev_buff_type == WMI_RX_RAW_BUFF) {
2810 			ev_buf.evt_raw_buf = data;
2811 			ev_buf.evt_processed_buf = wmi_cmd_struct_ptr;
2812 			wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2813 							(void *)&ev_buf, len);
2814 		}
2815 	}
2816 	else
2817 		wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2818 			data, len);
2819 
2820 end:
2821 	/* Free event buffer and allocated event tlv */
2822 #ifndef WMI_NON_TLV_SUPPORT
2823 	if (wmi_handle->target_type == WMI_TLV_TARGET)
2824 		wmi_handle->ops->wmi_free_allocated_event(id, &wmi_cmd_struct_ptr);
2825 #endif
2826 
2827 	qdf_nbuf_free(evt_buf);
2828 
2829 }
2830 
2831 #define WMI_WQ_WD_TIMEOUT (30 * 1000) /* 30s */
2832 
2833 static inline void wmi_workqueue_watchdog_warn(uint32_t msg_type_id)
2834 {
2835 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2836 		  "%s: WLAN_BUG_RCA: Message type %x has exceeded its allotted time of %ds",
2837 		  __func__, msg_type_id, WMI_WQ_WD_TIMEOUT / 1000);
2838 }
2839 
2840 #ifdef CONFIG_SLUB_DEBUG_ON
2841 static void wmi_workqueue_watchdog_bite(void *arg)
2842 {
2843 	struct wmi_wq_dbg_info *info = arg;
2844 
2845 	wmi_workqueue_watchdog_warn(info->wd_msg_type_id);
2846 	qdf_print_thread_trace(info->task);
2847 
2848 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2849 		  "%s: Going down for WMI WQ Watchdog Bite!", __func__);
2850 	QDF_BUG(0);
2851 }
2852 #else
2853 static inline void wmi_workqueue_watchdog_bite(void *arg)
2854 {
2855 	struct wmi_wq_dbg_info *info = arg;
2856 
2857 	wmi_workqueue_watchdog_warn(info->wd_msg_type_id);
2858 
2859 	qdf_print_thread_trace(info->task);
2860 }
2861 #endif
2862 
2863 /**
2864  * wmi_rx_event_work() - process rx event in rx work queue context
2865  * @arg: opaque pointer to wmi handle
2866  *
2867  * This function process any fw event to serialize it through rx worker thread.
2868  *
2869  * Return: none
2870  */
2871 static void wmi_rx_event_work(void *arg)
2872 {
2873 	wmi_buf_t buf;
2874 	struct wmi_unified *wmi = arg;
2875 	qdf_timer_t wd_timer;
2876 	struct wmi_wq_dbg_info info;
2877 
2878 	/* initialize WMI workqueue watchdog timer */
2879 	qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite,
2880 			&info, QDF_TIMER_TYPE_SW);
2881 	qdf_spin_lock_bh(&wmi->eventq_lock);
2882 	buf = qdf_nbuf_queue_remove(&wmi->event_queue);
2883 	qdf_spin_unlock_bh(&wmi->eventq_lock);
2884 	while (buf) {
2885 		qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT);
2886 		info.wd_msg_type_id =
2887 		   WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID);
2888 		info.wmi_wq = wmi->wmi_rx_work_queue;
2889 		info.task = qdf_get_current_task();
2890 		__wmi_control_rx(wmi, buf);
2891 		qdf_timer_stop(&wd_timer);
2892 		qdf_spin_lock_bh(&wmi->eventq_lock);
2893 		buf = qdf_nbuf_queue_remove(&wmi->event_queue);
2894 		qdf_spin_unlock_bh(&wmi->eventq_lock);
2895 	}
2896 	qdf_timer_free(&wd_timer);
2897 }
2898 
2899 /**
2900  * wmi_rx_diag_event_work() - process rx diag event in work queue context
2901  * @arg: opaque pointer to wmi handle
2902  *
2903  * This function process fw diag event to serialize it through rx worker thread.
2904  *
2905  * Return: none
2906  */
2907 static void wmi_rx_diag_event_work(void *arg)
2908 {
2909 	wmi_buf_t buf;
2910 	struct wmi_unified *wmi = arg;
2911 	qdf_timer_t wd_timer;
2912 	struct wmi_wq_dbg_info info;
2913 	uint32_t diag_event_process_count = 0;
2914 
2915 	if (!wmi) {
2916 		wmi_err("Invalid WMI handle");
2917 		return;
2918 	}
2919 
2920 	/* initialize WMI workqueue watchdog timer */
2921 	qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite,
2922 		       &info, QDF_TIMER_TYPE_SW);
2923 	qdf_spin_lock_bh(&wmi->diag_eventq_lock);
2924 	buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue);
2925 	qdf_spin_unlock_bh(&wmi->diag_eventq_lock);
2926 	while (buf) {
2927 		qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT);
2928 		info.wd_msg_type_id =
2929 		   WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID);
2930 		info.wmi_wq = NULL;
2931 		info.task = qdf_get_current_task();
2932 		__wmi_control_rx(wmi, buf);
2933 		qdf_timer_stop(&wd_timer);
2934 
2935 		if (diag_event_process_count++ >
2936 		    RX_DIAG_EVENT_WORK_PROCESS_MAX_COUNT) {
2937 			qdf_queue_work(0, wmi->wmi_rx_diag_work_queue,
2938 				       &wmi->rx_diag_event_work);
2939 			break;
2940 		}
2941 
2942 		qdf_spin_lock_bh(&wmi->diag_eventq_lock);
2943 		buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue);
2944 		qdf_spin_unlock_bh(&wmi->diag_eventq_lock);
2945 	}
2946 	qdf_timer_free(&wd_timer);
2947 }
2948 
2949 #ifdef FEATURE_RUNTIME_PM
2950 /**
2951  * wmi_runtime_pm_init() - initialize runtime pm wmi variables
2952  * @wmi_handle: wmi context
2953  */
2954 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
2955 {
2956 	qdf_atomic_init(&wmi_handle->runtime_pm_inprogress);
2957 }
2958 
2959 void wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, A_BOOL val)
2960 {
2961 	qdf_atomic_set(&wmi_handle->runtime_pm_inprogress, val);
2962 }
2963 
2964 bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle)
2965 {
2966 	return qdf_atomic_read(&wmi_handle->runtime_pm_inprogress);
2967 }
2968 #else
2969 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
2970 {
2971 }
2972 #endif
2973 
2974 void *wmi_unified_get_soc_handle(struct wmi_unified *wmi_handle)
2975 {
2976 	return wmi_handle->soc;
2977 }
2978 
2979 /**
2980  * wmi_interface_logging_init: Interface looging init
2981  * @wmi_handle: Pointer to wmi handle object
2982  * @pdev_idx: pdev index
2983  *
2984  * Return: None
2985  */
2986 #ifdef WMI_INTERFACE_EVENT_LOGGING
2987 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle,
2988 					      uint32_t pdev_idx)
2989 {
2990 	if (QDF_STATUS_SUCCESS == wmi_log_init(wmi_handle)) {
2991 		qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
2992 		wmi_debugfs_init(wmi_handle, pdev_idx);
2993 	}
2994 }
2995 #else
2996 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle,
2997 					      uint32_t pdev_idx)
2998 {
2999 }
3000 #endif
3001 
3002 static QDF_STATUS wmi_initialize_worker_context(struct wmi_unified *wmi_handle)
3003 {
3004 	wmi_handle->wmi_rx_work_queue =
3005 		qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue");
3006 	if (!wmi_handle->wmi_rx_work_queue) {
3007 		wmi_err("failed to create wmi_rx_event_work_queue");
3008 		return QDF_STATUS_E_RESOURCES;
3009 	}
3010 
3011 	qdf_spinlock_create(&wmi_handle->eventq_lock);
3012 	qdf_nbuf_queue_init(&wmi_handle->event_queue);
3013 	qdf_create_work(0, &wmi_handle->rx_event_work,
3014 			wmi_rx_event_work, wmi_handle);
3015 
3016 	wmi_handle->wmi_rx_diag_work_queue =
3017 		qdf_alloc_unbound_workqueue("wmi_rx_diag_event_work_queue");
3018 	if (!wmi_handle->wmi_rx_diag_work_queue) {
3019 		wmi_err("failed to create wmi_rx_diag_event_work_queue");
3020 		return QDF_STATUS_E_RESOURCES;
3021 	}
3022 	qdf_spinlock_create(&wmi_handle->diag_eventq_lock);
3023 	qdf_nbuf_queue_init(&wmi_handle->diag_event_queue);
3024 	qdf_create_work(0, &wmi_handle->rx_diag_event_work,
3025 			wmi_rx_diag_event_work, wmi_handle);
3026 	wmi_handle->wmi_rx_diag_events_dropped = 0;
3027 
3028 	return QDF_STATUS_SUCCESS;
3029 }
3030 
3031 void *wmi_unified_get_pdev_handle(struct wmi_soc *soc, uint32_t pdev_idx)
3032 {
3033 	struct wmi_unified *wmi_handle;
3034 	QDF_STATUS status;
3035 
3036 	if (pdev_idx >= WMI_MAX_RADIOS)
3037 		return NULL;
3038 
3039 	if (!soc->wmi_pdev[pdev_idx]) {
3040 		wmi_handle =
3041 			(struct wmi_unified *) qdf_mem_malloc(
3042 					sizeof(struct wmi_unified));
3043 		if (!wmi_handle)
3044 			return NULL;
3045 
3046 		status = wmi_initialize_worker_context(wmi_handle);
3047 		if (QDF_IS_STATUS_ERROR(status))
3048 			goto error;
3049 
3050 		wmi_handle->scn_handle = soc->scn_handle;
3051 		wmi_handle->event_id = soc->event_id;
3052 		wmi_handle->event_handler = soc->event_handler;
3053 		wmi_handle->ctx = soc->ctx;
3054 		wmi_handle->ops = soc->ops;
3055 		wmi_handle->wmi_events = soc->wmi_events;
3056 		wmi_handle->services = soc->services;
3057 		wmi_handle->soc = soc;
3058 		wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map;
3059 		wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map;
3060 		wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map;
3061 		wmi_handle->evt_phy_id_map = soc->evt_phy_id_map;
3062 		wmi_interface_logging_init(wmi_handle, pdev_idx);
3063 		qdf_atomic_init(&wmi_handle->pending_cmds);
3064 		qdf_atomic_init(&wmi_handle->is_target_suspended);
3065 		wmi_handle->target_type = soc->target_type;
3066 		wmi_handle->wmi_max_cmds = soc->wmi_max_cmds;
3067 
3068 		wmi_interface_sequence_init(wmi_handle);
3069 		if (wmi_ext_dbgfs_init(wmi_handle, pdev_idx) !=
3070 		    QDF_STATUS_SUCCESS)
3071 			wmi_err("Failed to initialize wmi extended debugfs");
3072 
3073 		soc->wmi_pdev[pdev_idx] = wmi_handle;
3074 	} else
3075 		wmi_handle = soc->wmi_pdev[pdev_idx];
3076 
3077 	wmi_handle->wmi_stopinprogress = 0;
3078 	wmi_handle->wmi_endpoint_id = soc->wmi_endpoint_id[pdev_idx];
3079 	wmi_handle->htc_handle = soc->htc_handle;
3080 	wmi_handle->max_msg_len = soc->max_msg_len[pdev_idx];
3081 	wmi_handle->tag_crash_inject = false;
3082 	wmi_interface_sequence_reset(wmi_handle);
3083 
3084 	return wmi_handle;
3085 
3086 error:
3087 	qdf_mem_free(wmi_handle);
3088 
3089 	return NULL;
3090 }
3091 qdf_export_symbol(wmi_unified_get_pdev_handle);
3092 
3093 static void (*wmi_attach_register[WMI_MAX_TARGET_TYPE])(wmi_unified_t);
3094 
3095 void wmi_unified_register_module(enum wmi_target_type target_type,
3096 			void (*wmi_attach)(wmi_unified_t wmi_handle))
3097 {
3098 	if (target_type < WMI_MAX_TARGET_TYPE)
3099 		wmi_attach_register[target_type] = wmi_attach;
3100 
3101 	return;
3102 }
3103 qdf_export_symbol(wmi_unified_register_module);
3104 
3105 /**
3106  * wmi_wbuff_register() - register wmi with wbuff
3107  * @wmi_handle: handle to wmi
3108  *
3109  * Return: void
3110  */
3111 static void wmi_wbuff_register(struct wmi_unified *wmi_handle)
3112 {
3113 	struct wbuff_alloc_request wbuff_alloc[4];
3114 
3115 	wbuff_alloc[0].slot = WBUFF_POOL_0;
3116 	wbuff_alloc[0].size = WMI_WBUFF_POOL_0_SIZE;
3117 	wbuff_alloc[1].slot = WBUFF_POOL_1;
3118 	wbuff_alloc[1].size = WMI_WBUFF_POOL_1_SIZE;
3119 	wbuff_alloc[2].slot = WBUFF_POOL_2;
3120 	wbuff_alloc[2].size = WMI_WBUFF_POOL_2_SIZE;
3121 	wbuff_alloc[3].slot = WBUFF_POOL_3;
3122 	wbuff_alloc[3].size = WMI_WBUFF_POOL_3_SIZE;
3123 
3124 	wmi_handle->wbuff_handle = wbuff_module_register(wbuff_alloc, 4,
3125 							 WMI_MIN_HEAD_ROOM, 4);
3126 }
3127 
3128 /**
3129  * wmi_wbuff_deregister() - deregister wmi with wbuff
3130  * @wmi_handle: handle to wmi
3131  *
3132  * Return: void
3133  */
3134 static inline void wmi_wbuff_deregister(struct wmi_unified *wmi_handle)
3135 {
3136 	wbuff_module_deregister(wmi_handle->wbuff_handle);
3137 	wmi_handle->wbuff_handle = NULL;
3138 }
3139 
3140 void *wmi_unified_attach(void *scn_handle,
3141 			 struct wmi_unified_attach_params *param)
3142 {
3143 	struct wmi_unified *wmi_handle;
3144 	struct wmi_soc *soc;
3145 	QDF_STATUS status;
3146 
3147 	soc = (struct wmi_soc *) qdf_mem_malloc(sizeof(struct wmi_soc));
3148 	if (!soc)
3149 		return NULL;
3150 
3151 	wmi_handle =
3152 		(struct wmi_unified *) qdf_mem_malloc(
3153 			sizeof(struct wmi_unified));
3154 	if (!wmi_handle) {
3155 		qdf_mem_free(soc);
3156 		return NULL;
3157 	}
3158 
3159 	status = wmi_initialize_worker_context(wmi_handle);
3160 	if (QDF_IS_STATUS_ERROR(status))
3161 		goto error;
3162 
3163 	wmi_handle->soc = soc;
3164 	wmi_handle->soc->soc_idx = param->soc_id;
3165 	wmi_handle->soc->is_async_ep = param->is_async_ep;
3166 	wmi_handle->event_id = soc->event_id;
3167 	wmi_handle->event_handler = soc->event_handler;
3168 	wmi_handle->ctx = soc->ctx;
3169 	wmi_handle->wmi_events = soc->wmi_events;
3170 	wmi_handle->services = soc->services;
3171 	wmi_handle->scn_handle = scn_handle;
3172 	wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map;
3173 	wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map;
3174 	wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map;
3175 	wmi_handle->evt_phy_id_map = soc->evt_phy_id_map;
3176 	soc->scn_handle = scn_handle;
3177 	wmi_handle->target_type = param->target_type;
3178 	soc->target_type = param->target_type;
3179 
3180 	if (param->target_type >= WMI_MAX_TARGET_TYPE)
3181 		goto error;
3182 
3183 	if (wmi_attach_register[param->target_type]) {
3184 		wmi_attach_register[param->target_type](wmi_handle);
3185 	} else {
3186 		wmi_err("wmi attach is not registered");
3187 		goto error;
3188 	}
3189 
3190 	qdf_atomic_init(&wmi_handle->pending_cmds);
3191 	qdf_atomic_init(&wmi_handle->is_target_suspended);
3192 	qdf_atomic_init(&wmi_handle->is_target_suspend_acked);
3193 	qdf_atomic_init(&wmi_handle->num_stats_over_qmi);
3194 	wmi_runtime_pm_init(wmi_handle);
3195 	wmi_interface_logging_init(wmi_handle, WMI_HOST_PDEV_ID_0);
3196 
3197 	wmi_interface_sequence_init(wmi_handle);
3198 	/* Assign target cookie capability */
3199 	wmi_handle->use_cookie = param->use_cookie;
3200 	wmi_handle->osdev = param->osdev;
3201 	wmi_handle->wmi_stopinprogress = 0;
3202 	wmi_handle->wmi_max_cmds = param->max_commands;
3203 	soc->wmi_max_cmds = param->max_commands;
3204 	/* Increase the ref count once refcount infra is present */
3205 	soc->wmi_psoc = param->psoc;
3206 	qdf_spinlock_create(&soc->ctx_lock);
3207 	soc->ops = wmi_handle->ops;
3208 	soc->wmi_pdev[0] = wmi_handle;
3209 	if (wmi_ext_dbgfs_init(wmi_handle, 0) != QDF_STATUS_SUCCESS)
3210 		wmi_err("Failed to initialize wmi extended debugfs");
3211 
3212 	wmi_wbuff_register(wmi_handle);
3213 
3214 	wmi_hang_event_notifier_register(wmi_handle);
3215 
3216 	wmi_minidump_attach(wmi_handle);
3217 
3218 	return wmi_handle;
3219 
3220 error:
3221 	qdf_mem_free(soc);
3222 	qdf_mem_free(wmi_handle);
3223 
3224 	return NULL;
3225 }
3226 
3227 void wmi_unified_detach(struct wmi_unified *wmi_handle)
3228 {
3229 	wmi_buf_t buf;
3230 	struct wmi_soc *soc;
3231 	uint8_t i;
3232 
3233 	wmi_minidump_detach(wmi_handle);
3234 
3235 	wmi_hang_event_notifier_unregister();
3236 
3237 	wmi_wbuff_deregister(wmi_handle);
3238 
3239 	soc = wmi_handle->soc;
3240 	for (i = 0; i < WMI_MAX_RADIOS; i++) {
3241 		if (soc->wmi_pdev[i]) {
3242 			qdf_flush_workqueue(0,
3243 				soc->wmi_pdev[i]->wmi_rx_work_queue);
3244 			qdf_destroy_workqueue(0,
3245 				soc->wmi_pdev[i]->wmi_rx_work_queue);
3246 			wmi_debugfs_remove(soc->wmi_pdev[i]);
3247 			buf = qdf_nbuf_queue_remove(
3248 					&soc->wmi_pdev[i]->event_queue);
3249 			while (buf) {
3250 				qdf_nbuf_free(buf);
3251 				buf = qdf_nbuf_queue_remove(
3252 						&soc->wmi_pdev[i]->event_queue);
3253 			}
3254 
3255 			qdf_flush_work(&soc->wmi_pdev[i]->rx_diag_event_work);
3256 			buf = qdf_nbuf_queue_remove(
3257 					&soc->wmi_pdev[i]->diag_event_queue);
3258 			while (buf) {
3259 				qdf_nbuf_free(buf);
3260 				buf = qdf_nbuf_queue_remove(
3261 					&soc->wmi_pdev[i]->diag_event_queue);
3262 			}
3263 
3264 			wmi_log_buffer_free(soc->wmi_pdev[i]);
3265 
3266 			/* Free events logs list */
3267 			if (soc->wmi_pdev[i]->events_logs_list)
3268 				qdf_mem_free(
3269 					soc->wmi_pdev[i]->events_logs_list);
3270 
3271 			qdf_spinlock_destroy(&soc->wmi_pdev[i]->eventq_lock);
3272 			qdf_spinlock_destroy(
3273 					&soc->wmi_pdev[i]->diag_eventq_lock);
3274 
3275 			wmi_interface_sequence_deinit(soc->wmi_pdev[i]);
3276 			wmi_ext_dbgfs_deinit(soc->wmi_pdev[i]);
3277 
3278 			qdf_mem_free(soc->wmi_pdev[i]);
3279 		}
3280 	}
3281 	qdf_spinlock_destroy(&soc->ctx_lock);
3282 
3283 	if (soc->wmi_service_bitmap) {
3284 		qdf_mem_free(soc->wmi_service_bitmap);
3285 		soc->wmi_service_bitmap = NULL;
3286 	}
3287 
3288 	if (soc->wmi_ext_service_bitmap) {
3289 		qdf_mem_free(soc->wmi_ext_service_bitmap);
3290 		soc->wmi_ext_service_bitmap = NULL;
3291 	}
3292 
3293 	if (soc->wmi_ext2_service_bitmap) {
3294 		qdf_mem_free(soc->wmi_ext2_service_bitmap);
3295 		soc->wmi_ext2_service_bitmap = NULL;
3296 	}
3297 
3298 	/* Decrease the ref count once refcount infra is present */
3299 	soc->wmi_psoc = NULL;
3300 	qdf_mem_free(soc);
3301 }
3302 
3303 void
3304 wmi_unified_remove_work(struct wmi_unified *wmi_handle)
3305 {
3306 	wmi_buf_t buf;
3307 
3308 	qdf_flush_workqueue(0, wmi_handle->wmi_rx_work_queue);
3309 	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
3310 	buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
3311 	while (buf) {
3312 		qdf_nbuf_free(buf);
3313 		buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
3314 	}
3315 	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
3316 
3317 	/* Remove diag events work */
3318 	qdf_flush_workqueue(0, wmi_handle->wmi_rx_diag_work_queue);
3319 	qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock);
3320 	buf = qdf_nbuf_queue_remove(&wmi_handle->diag_event_queue);
3321 	while (buf) {
3322 		qdf_nbuf_free(buf);
3323 		buf = qdf_nbuf_queue_remove(&wmi_handle->diag_event_queue);
3324 	}
3325 	qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
3326 }
3327 
3328 /**
3329  * wmi_htc_tx_complete() - Process htc tx completion
3330  *
3331  * @ctx: handle to wmi
3332  * @htc_pkt: pointer to htc packet
3333  *
3334  * Return: none.
3335  */
3336 static void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt)
3337 {
3338 	struct wmi_soc *soc = (struct wmi_soc *) ctx;
3339 	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
3340 	u_int8_t *buf_ptr;
3341 	u_int32_t len;
3342 	struct wmi_unified *wmi_handle;
3343 #ifdef WMI_INTERFACE_EVENT_LOGGING
3344 	struct wmi_debug_log_info *log_info;
3345 	uint32_t cmd_id;
3346 	uint8_t *offset_ptr;
3347 	qdf_dma_addr_t dma_addr;
3348 	uint64_t phy_addr;
3349 #endif
3350 
3351 	ASSERT(wmi_cmd_buf);
3352 	wmi_handle = wmi_get_pdev_ep(soc, htc_pkt->Endpoint);
3353 	if (!wmi_handle) {
3354 		wmi_err("Unable to get wmi handle");
3355 		QDF_ASSERT(0);
3356 		return;
3357 	}
3358 	buf_ptr = (u_int8_t *)wmi_buf_data(wmi_cmd_buf);
3359 #ifdef WMI_INTERFACE_EVENT_LOGGING
3360 	log_info = &wmi_handle->log_info;
3361 
3362 	if (wmi_handle && log_info->wmi_logging_enable) {
3363 		cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf),
3364 				WMI_CMD_HDR, COMMANDID);
3365 
3366 		dma_addr = QDF_NBUF_CB_PADDR(wmi_cmd_buf);
3367 		phy_addr = qdf_mem_virt_to_phys(qdf_nbuf_data(wmi_cmd_buf));
3368 
3369 		qdf_spin_lock_bh(&log_info->wmi_record_lock);
3370 		/* Record 16 bytes of WMI cmd tx complete data
3371 		 * - exclude TLV and WMI headers
3372 		 */
3373 		offset_ptr = buf_ptr + wmi_handle->soc->buf_offset_command;
3374 		if (wmi_handle->ops->is_management_record(cmd_id)) {
3375 			WMI_MGMT_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id,
3376 						       offset_ptr);
3377 		} else {
3378 			WMI_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id,
3379 						  offset_ptr, dma_addr,
3380 						  phy_addr);
3381 		}
3382 
3383 		qdf_spin_unlock_bh(&log_info->wmi_record_lock);
3384 	}
3385 #endif
3386 
3387 	wmi_interface_sequence_check(wmi_handle, wmi_cmd_buf);
3388 
3389 	len = qdf_nbuf_len(wmi_cmd_buf);
3390 	qdf_mem_zero(buf_ptr, len);
3391 	wmi_buf_free(wmi_cmd_buf);
3392 	qdf_mem_free(htc_pkt);
3393 	qdf_atomic_dec(&wmi_handle->pending_cmds);
3394 }
3395 
3396 #ifdef FEATURE_RUNTIME_PM
3397 /**
3398  * wmi_htc_log_pkt() - Print information of WMI command from HTC packet
3399  *
3400  * @ctx: handle of WMI context
3401  * @htc_pkt: handle of HTC packet
3402  *
3403  * Return: none
3404  */
3405 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt)
3406 {
3407 	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
3408 	uint32_t cmd_id;
3409 
3410 	ASSERT(wmi_cmd_buf);
3411 	cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), WMI_CMD_HDR,
3412 			       COMMANDID);
3413 
3414 	wmi_debug("WMI command from HTC packet: %s, ID: %d",
3415 		 wmi_id_to_name(cmd_id), cmd_id);
3416 }
3417 #else
3418 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt)
3419 {
3420 }
3421 #endif
3422 
3423 /**
3424  * wmi_connect_pdev_htc_service() -  WMI API to get connect to HTC service
3425  * @soc: handle to WMI SoC
3426  * @pdev_idx: Pdev index
3427  *
3428  * Return: QDF_STATUS
3429  */
3430 static QDF_STATUS wmi_connect_pdev_htc_service(struct wmi_soc *soc,
3431 					       uint32_t pdev_idx)
3432 {
3433 	QDF_STATUS status;
3434 	struct htc_service_connect_resp response;
3435 	struct htc_service_connect_req connect;
3436 
3437 	OS_MEMZERO(&connect, sizeof(connect));
3438 	OS_MEMZERO(&response, sizeof(response));
3439 
3440 	/* meta data is unused for now */
3441 	connect.pMetaData = NULL;
3442 	connect.MetaDataLength = 0;
3443 	/* these fields are the same for all service endpoints */
3444 	connect.EpCallbacks.pContext = soc;
3445 	connect.EpCallbacks.EpTxCompleteMultiple =
3446 		NULL /* Control path completion ar6000_tx_complete */;
3447 	connect.EpCallbacks.EpRecv = wmi_control_rx /* Control path rx */;
3448 	connect.EpCallbacks.EpRecvRefill = NULL /* ar6000_rx_refill */;
3449 	connect.EpCallbacks.EpSendFull = NULL /* ar6000_tx_queue_full */;
3450 	connect.EpCallbacks.EpTxComplete =
3451 		wmi_htc_tx_complete /* ar6000_tx_queue_full */;
3452 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3453 
3454 	/* connect to control service */
3455 	connect.service_id = soc->svc_ids[pdev_idx];
3456 	status = htc_connect_service(soc->htc_handle, &connect, &response);
3457 
3458 	if (QDF_IS_STATUS_ERROR(status)) {
3459 		wmi_err("Failed to connect to WMI CONTROL service status:%d",
3460 			 status);
3461 		return status;
3462 	}
3463 
3464 	if (soc->is_async_ep)
3465 		htc_set_async_ep(soc->htc_handle, response.Endpoint, true);
3466 
3467 	soc->wmi_endpoint_id[pdev_idx] = response.Endpoint;
3468 	soc->max_msg_len[pdev_idx] = response.MaxMsgLength;
3469 
3470 	return QDF_STATUS_SUCCESS;
3471 }
3472 
3473 QDF_STATUS
3474 wmi_unified_connect_htc_service(struct wmi_unified *wmi_handle,
3475 				HTC_HANDLE htc_handle)
3476 {
3477 	uint32_t i;
3478 	uint8_t wmi_ep_count;
3479 
3480 	wmi_handle->soc->htc_handle = htc_handle;
3481 
3482 	wmi_ep_count = htc_get_wmi_endpoint_count(htc_handle);
3483 	if (wmi_ep_count > WMI_MAX_RADIOS)
3484 		return QDF_STATUS_E_FAULT;
3485 
3486 	for (i = 0; i < wmi_ep_count; i++)
3487 		wmi_connect_pdev_htc_service(wmi_handle->soc, i);
3488 
3489 	wmi_handle->htc_handle = htc_handle;
3490 	wmi_handle->wmi_endpoint_id = wmi_handle->soc->wmi_endpoint_id[0];
3491 	wmi_handle->max_msg_len = wmi_handle->soc->max_msg_len[0];
3492 
3493 	return QDF_STATUS_SUCCESS;
3494 }
3495 
3496 #if defined(WLAN_FEATURE_WMI_DIAG_OVER_CE7) || \
3497 	defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
3498 QDF_STATUS wmi_diag_connect_pdev_htc_service(struct wmi_unified *wmi_handle,
3499 					     HTC_HANDLE htc_handle)
3500 {
3501 	QDF_STATUS status;
3502 	struct htc_service_connect_resp response = {0};
3503 	struct htc_service_connect_req connect = {0};
3504 
3505 	/* meta data is unused for now */
3506 	connect.pMetaData = NULL;
3507 	connect.MetaDataLength = 0;
3508 	connect.EpCallbacks.pContext = wmi_handle->soc;
3509 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3510 	connect.EpCallbacks.EpRecv = wmi_control_diag_rx /* wmi diag rx */;
3511 	connect.EpCallbacks.EpRecvRefill = NULL;
3512 	connect.EpCallbacks.EpSendFull = NULL;
3513 	connect.EpCallbacks.EpTxComplete = NULL;
3514 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3515 
3516 	/* connect to wmi diag service */
3517 	connect.service_id = WMI_CONTROL_DIAG_SVC;
3518 	status = htc_connect_service(htc_handle, &connect, &response);
3519 
3520 	if (QDF_IS_STATUS_ERROR(status)) {
3521 		wmi_err("Failed to connect to WMI DIAG service status:%d",
3522 			status);
3523 		return status;
3524 	}
3525 
3526 	if (wmi_handle->soc->is_async_ep)
3527 		htc_set_async_ep(htc_handle, response.Endpoint, true);
3528 
3529 	wmi_handle->soc->wmi_diag_endpoint_id = response.Endpoint;
3530 
3531 	return QDF_STATUS_SUCCESS;
3532 }
3533 #endif
3534 
3535 #if defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
3536 QDF_STATUS wmi_dbr_connect_pdev_htc_service(struct wmi_unified *wmi_handle,
3537 					    HTC_HANDLE htc_handle)
3538 {
3539 	QDF_STATUS status;
3540 	struct htc_service_connect_resp response = {0};
3541 	struct htc_service_connect_req connect = {0};
3542 
3543 	/* meta data is unused for now */
3544 	connect.pMetaData = NULL;
3545 	connect.MetaDataLength = 0;
3546 	connect.EpCallbacks.pContext = wmi_handle->soc;
3547 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3548 	connect.EpCallbacks.EpRecv = wmi_control_dbr_rx /* wmi dbr rx */;
3549 	connect.EpCallbacks.EpRecvRefill = NULL;
3550 	connect.EpCallbacks.EpSendFull = NULL;
3551 	connect.EpCallbacks.EpTxComplete = NULL;
3552 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3553 
3554 	/* connect to wmi dbr service */
3555 	connect.service_id = WMI_CONTROL_DBR_SVC;
3556 	status = htc_connect_service(htc_handle, &connect, &response);
3557 
3558 	if (QDF_IS_STATUS_ERROR(status)) {
3559 		wmi_err("Failed to connect to WMI DBR service status:%d",
3560 			status);
3561 		return status;
3562 	}
3563 
3564 	if (wmi_handle->soc->is_async_ep)
3565 		htc_set_async_ep(htc_handle, response.Endpoint, true);
3566 
3567 	wmi_handle->soc->wmi_dbr_endpoint_id = response.Endpoint;
3568 
3569 	return QDF_STATUS_SUCCESS;
3570 }
3571 #endif
3572 
3573 int wmi_get_host_credits(wmi_unified_t wmi_handle)
3574 {
3575 	int host_credits = 0;
3576 
3577 	htc_get_control_endpoint_tx_host_credits(wmi_handle->htc_handle,
3578 						 &host_credits);
3579 	return host_credits;
3580 }
3581 
3582 int wmi_get_pending_cmds(wmi_unified_t wmi_handle)
3583 {
3584 	return qdf_atomic_read(&wmi_handle->pending_cmds);
3585 }
3586 
3587 void wmi_set_target_suspend(wmi_unified_t wmi_handle, A_BOOL val)
3588 {
3589 	qdf_atomic_set(&wmi_handle->is_target_suspended, val);
3590 }
3591 
3592 void wmi_set_target_suspend_acked(wmi_unified_t wmi_handle, A_BOOL val)
3593 {
3594 	qdf_atomic_set(&wmi_handle->is_target_suspend_acked, val);
3595 	qdf_atomic_set(&wmi_handle->num_stats_over_qmi, 0);
3596 }
3597 
3598 bool wmi_is_target_suspended(struct wmi_unified *wmi_handle)
3599 {
3600 	return qdf_atomic_read(&wmi_handle->is_target_suspended);
3601 }
3602 qdf_export_symbol(wmi_is_target_suspended);
3603 
3604 bool wmi_is_target_suspend_acked(struct wmi_unified *wmi_handle)
3605 {
3606 	return qdf_atomic_read(&wmi_handle->is_target_suspend_acked);
3607 }
3608 qdf_export_symbol(wmi_is_target_suspend_acked);
3609 
3610 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI
3611 void wmi_set_qmi_stats(wmi_unified_t wmi_handle, bool val)
3612 {
3613 	wmi_handle->is_qmi_stats_enabled = val;
3614 }
3615 
3616 bool wmi_is_qmi_stats_enabled(struct wmi_unified *wmi_handle)
3617 {
3618 	return wmi_handle->is_qmi_stats_enabled;
3619 }
3620 #endif
3621 
3622 void wmi_tag_crash_inject(wmi_unified_t wmi_handle, A_BOOL flag)
3623 {
3624 	wmi_handle->tag_crash_inject = flag;
3625 }
3626 
3627 void wmi_set_is_wow_bus_suspended(wmi_unified_t wmi_handle, A_BOOL val)
3628 {
3629 	qdf_atomic_set(&wmi_handle->is_wow_bus_suspended, val);
3630 }
3631 
3632 void wmi_set_tgt_assert(wmi_unified_t wmi_handle, bool val)
3633 {
3634 	wmi_handle->tgt_force_assert_enable = val;
3635 }
3636 
3637 int
3638 wmi_stop(wmi_unified_t wmi_handle)
3639 {
3640 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
3641 		  "WMI Stop");
3642 	wmi_handle->wmi_stopinprogress = 1;
3643 	return 0;
3644 }
3645 
3646 int
3647 wmi_start(wmi_unified_t wmi_handle)
3648 {
3649 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
3650 		  "WMI Start");
3651 	wmi_handle->wmi_stopinprogress = 0;
3652 	return 0;
3653 }
3654 
3655 bool
3656 wmi_is_blocked(wmi_unified_t wmi_handle)
3657 {
3658 	return (!(!wmi_handle->wmi_stopinprogress));
3659 }
3660 
3661 void
3662 wmi_flush_endpoint(wmi_unified_t wmi_handle)
3663 {
3664 	htc_flush_endpoint(wmi_handle->htc_handle,
3665 		wmi_handle->wmi_endpoint_id, 0);
3666 }
3667 qdf_export_symbol(wmi_flush_endpoint);
3668 
3669 void wmi_pdev_id_conversion_enable(wmi_unified_t wmi_handle,
3670 				   uint32_t *pdev_id_map,
3671 				   uint8_t size)
3672 {
3673 	if (wmi_handle->target_type == WMI_TLV_TARGET)
3674 		wmi_handle->ops->wmi_pdev_id_conversion_enable(wmi_handle,
3675 							       pdev_id_map,
3676 							       size);
3677 }
3678 
3679 int __wmi_validate_handle(wmi_unified_t wmi_handle, const char *func)
3680 {
3681         if (!wmi_handle) {
3682                 wmi_err("Invalid WMI handle (via %s)", func);
3683                 return -EINVAL;
3684         }
3685 
3686         return 0;
3687 }
3688