xref: /wlan-dirver/qca-wifi-host-cmn/wmi/src/wmi_unified.c (revision e9dba9646bfd1954b96d80bae0adc757244cbde8)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Host WMI unified implementation
21  */
22 #include "htc_api.h"
23 #include "htc_api.h"
24 #include "wmi_unified_priv.h"
25 #include "wmi_unified_api.h"
26 #include "qdf_module.h"
27 #include "qdf_platform.h"
28 #ifdef WMI_EXT_DBG
29 #include "qdf_list.h"
30 #include "qdf_atomic.h"
31 #endif
32 
33 #ifndef WMI_NON_TLV_SUPPORT
34 #include "wmi_tlv_helper.h"
35 #endif
36 
37 #include <linux/debugfs.h>
38 #include <target_if.h>
39 #include <qdf_debugfs.h>
40 #include "wmi_filtered_logging.h"
41 #include <wmi_hang_event.h>
42 
43 /* This check for CONFIG_WIN temporary added due to redeclaration compilation
44 error in MCL. Error is caused due to inclusion of wmi.h in wmi_unified_api.h
45 which gets included here through ol_if_athvar.h. Eventually it is expected that
46 wmi.h will be removed from wmi_unified_api.h after cleanup, which will need
47 WMI_CMD_HDR to be defined here. */
48 /* Copied from wmi.h */
49 #undef MS
50 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
51 #undef SM
52 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
53 #undef WO
54 #define WO(_f)      ((_f##_OFFSET) >> 2)
55 
56 #undef GET_FIELD
57 #define GET_FIELD(_addr, _f) MS(*((uint32_t *)(_addr) + WO(_f)), _f)
58 #undef SET_FIELD
59 #define SET_FIELD(_addr, _f, _val)  \
60 	    (*((uint32_t *)(_addr) + WO(_f)) = \
61 		(*((uint32_t *)(_addr) + WO(_f)) & ~_f##_MASK) | SM(_val, _f))
62 
63 #define WMI_GET_FIELD(_msg_buf, _msg_type, _f) \
64 	    GET_FIELD(_msg_buf, _msg_type ## _ ## _f)
65 
66 #define WMI_SET_FIELD(_msg_buf, _msg_type, _f, _val) \
67 	    SET_FIELD(_msg_buf, _msg_type ## _ ## _f, _val)
68 
69 #define WMI_EP_APASS           0x0
70 #define WMI_EP_LPASS           0x1
71 #define WMI_EP_SENSOR          0x2
72 
73 #define WMI_INFOS_DBG_FILE_PERM (QDF_FILE_USR_READ | \
74 				 QDF_FILE_USR_WRITE | \
75 				 QDF_FILE_GRP_READ | \
76 				 QDF_FILE_OTH_READ)
77 
78 /*
79  *  * Control Path
80  *   */
81 typedef PREPACK struct {
82 	uint32_t	commandId:24,
83 			reserved:2, /* used for WMI endpoint ID */
84 			plt_priv:6; /* platform private */
85 } POSTPACK WMI_CMD_HDR;        /* used for commands and events */
86 
87 #define WMI_CMD_HDR_COMMANDID_LSB           0
88 #define WMI_CMD_HDR_COMMANDID_MASK          0x00ffffff
89 #define WMI_CMD_HDR_COMMANDID_OFFSET        0x00000000
90 #define WMI_CMD_HDR_WMI_ENDPOINTID_MASK        0x03000000
91 #define WMI_CMD_HDR_WMI_ENDPOINTID_OFFSET      24
92 #define WMI_CMD_HDR_PLT_PRIV_LSB               24
93 #define WMI_CMD_HDR_PLT_PRIV_MASK              0xff000000
94 #define WMI_CMD_HDR_PLT_PRIV_OFFSET            0x00000000
95 /* end of copy wmi.h */
96 
97 #define WMI_MIN_HEAD_ROOM 64
98 
99 /* WBUFF pool sizes for WMI */
100 /* Allocation of size 256 bytes */
101 #define WMI_WBUFF_POOL_0_SIZE 128
102 /* Allocation of size 512 bytes */
103 #define WMI_WBUFF_POOL_1_SIZE 16
104 /* Allocation of size 1024 bytes */
105 #define WMI_WBUFF_POOL_2_SIZE 8
106 /* Allocation of size 2048 bytes */
107 #define WMI_WBUFF_POOL_3_SIZE 8
108 
109 #ifdef WMI_INTERFACE_EVENT_LOGGING
110 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0))
111 /* TODO Cleanup this backported function */
112 static int wmi_bp_seq_printf(qdf_debugfs_file_t m, const char *f, ...)
113 {
114 	va_list args;
115 
116 	va_start(args, f);
117 	seq_vprintf(m, f, args);
118 	va_end(args);
119 
120 	return 0;
121 }
122 #else
123 #define wmi_bp_seq_printf(m, fmt, ...) seq_printf((m), fmt, ##__VA_ARGS__)
124 #endif
125 
126 #ifndef MAX_WMI_INSTANCES
127 #define CUSTOM_MGMT_CMD_DATA_SIZE 4
128 #endif
129 
130 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
131 /* WMI commands */
132 uint32_t g_wmi_command_buf_idx = 0;
133 struct wmi_command_debug wmi_command_log_buffer[WMI_CMD_DEBUG_MAX_ENTRY];
134 
135 /* WMI commands TX completed */
136 uint32_t g_wmi_command_tx_cmp_buf_idx = 0;
137 struct wmi_command_cmp_debug
138 	wmi_command_tx_cmp_log_buffer[WMI_CMD_CMPL_DEBUG_MAX_ENTRY];
139 
140 /* WMI events when processed */
141 uint32_t g_wmi_event_buf_idx = 0;
142 struct wmi_event_debug wmi_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
143 
144 /* WMI events when queued */
145 uint32_t g_wmi_rx_event_buf_idx = 0;
146 struct wmi_event_debug wmi_rx_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
147 #endif
148 
149 #define WMI_COMMAND_RECORD(h, a, b) {					\
150 	if (wmi_cmd_log_max_entry <=					\
151 		*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))	\
152 		*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx) = 0;\
153 	((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\
154 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)]\
155 						.command = a;		\
156 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.		\
157 				wmi_command_log_buf_info.buf)		\
158 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].data,\
159 			b, wmi_record_max_length);			\
160 	((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\
161 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].\
162 		time = qdf_get_log_timestamp();			\
163 	(*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))++;	\
164 	h->log_info.wmi_command_log_buf_info.length++;			\
165 }
166 
167 #define WMI_COMMAND_TX_CMP_RECORD(h, a, b, da, pa) {			\
168 	if (wmi_cmd_cmpl_log_max_entry <=				\
169 		*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))\
170 		*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
171 				p_buf_tail_idx) = 0;			\
172 	((struct wmi_command_cmp_debug *)h->log_info.			\
173 		wmi_command_tx_cmp_log_buf_info.buf)			\
174 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
175 				p_buf_tail_idx)].			\
176 							command	= a;	\
177 	qdf_mem_copy(((struct wmi_command_cmp_debug *)h->log_info.	\
178 				wmi_command_tx_cmp_log_buf_info.buf)	\
179 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
180 			p_buf_tail_idx)].				\
181 		data, b, wmi_record_max_length);			\
182 	((struct wmi_command_cmp_debug *)h->log_info.			\
183 		wmi_command_tx_cmp_log_buf_info.buf)			\
184 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
185 				p_buf_tail_idx)].			\
186 		time = qdf_get_log_timestamp();				\
187 	((struct wmi_command_cmp_debug *)h->log_info.			\
188 		wmi_command_tx_cmp_log_buf_info.buf)			\
189 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
190 				p_buf_tail_idx)].			\
191 		dma_addr = da;						\
192 	((struct wmi_command_cmp_debug *)h->log_info.			\
193 		wmi_command_tx_cmp_log_buf_info.buf)			\
194 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
195 				p_buf_tail_idx)].			\
196 		phy_addr = pa;						\
197 	(*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))++;\
198 	h->log_info.wmi_command_tx_cmp_log_buf_info.length++;		\
199 }
200 
201 #define WMI_EVENT_RECORD(h, a, b) {					\
202 	if (wmi_event_log_max_entry <=					\
203 		*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))	\
204 		*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx) = 0;\
205 	((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\
206 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].	\
207 		event = a;						\
208 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
209 				wmi_event_log_buf_info.buf)		\
210 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].data, b,\
211 		wmi_record_max_length);					\
212 	((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\
213 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].time =\
214 		qdf_get_log_timestamp();				\
215 	(*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))++;	\
216 	h->log_info.wmi_event_log_buf_info.length++;			\
217 }
218 
219 #define WMI_RX_EVENT_RECORD(h, a, b) {					\
220 	if (wmi_event_log_max_entry <=					\
221 		*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))\
222 		*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx) = 0;\
223 	((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\
224 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
225 		event = a;						\
226 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
227 				wmi_rx_event_log_buf_info.buf)		\
228 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
229 			data, b, wmi_record_max_length);		\
230 	((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\
231 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
232 		time =	qdf_get_log_timestamp();			\
233 	(*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))++;	\
234 	h->log_info.wmi_rx_event_log_buf_info.length++;			\
235 }
236 
237 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
238 uint32_t g_wmi_mgmt_command_buf_idx = 0;
239 struct
240 wmi_command_debug wmi_mgmt_command_log_buffer[WMI_MGMT_TX_DEBUG_MAX_ENTRY];
241 
242 /* wmi_mgmt commands TX completed */
243 uint32_t g_wmi_mgmt_command_tx_cmp_buf_idx = 0;
244 struct wmi_command_debug
245 wmi_mgmt_command_tx_cmp_log_buffer[WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY];
246 
247 /* wmi_mgmt events when received */
248 uint32_t g_wmi_mgmt_rx_event_buf_idx = 0;
249 struct wmi_event_debug
250 wmi_mgmt_rx_event_log_buffer[WMI_MGMT_RX_DEBUG_MAX_ENTRY];
251 
252 /* wmi_diag events when received */
253 uint32_t g_wmi_diag_rx_event_buf_idx = 0;
254 struct wmi_event_debug
255 wmi_diag_rx_event_log_buffer[WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY];
256 #endif
257 
258 #define WMI_MGMT_COMMAND_RECORD(h, a, b) {                              \
259 	if (wmi_mgmt_tx_log_max_entry <=                                   \
260 		*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)) \
261 		*(h->log_info.wmi_mgmt_command_log_buf_info.		\
262 				p_buf_tail_idx) = 0;			\
263 	((struct wmi_command_debug *)h->log_info.                       \
264 		 wmi_mgmt_command_log_buf_info.buf)                     \
265 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
266 			command = a;                                    \
267 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.          \
268 				wmi_mgmt_command_log_buf_info.buf)      \
269 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
270 		data, b,                                                \
271 		wmi_record_max_length);                                	\
272 	((struct wmi_command_debug *)h->log_info.                       \
273 		 wmi_mgmt_command_log_buf_info.buf)                     \
274 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
275 			time =        qdf_get_log_timestamp();          \
276 	(*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx))++;\
277 	h->log_info.wmi_mgmt_command_log_buf_info.length++;             \
278 }
279 
280 #define WMI_MGMT_COMMAND_TX_CMP_RECORD(h, a, b) {			\
281 	if (wmi_mgmt_tx_cmpl_log_max_entry <=				\
282 		*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
283 			p_buf_tail_idx))				\
284 		*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
285 			p_buf_tail_idx) = 0;				\
286 	((struct wmi_command_debug *)h->log_info.			\
287 			wmi_mgmt_command_tx_cmp_log_buf_info.buf)	\
288 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
289 				p_buf_tail_idx)].command = a;		\
290 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.		\
291 				wmi_mgmt_command_tx_cmp_log_buf_info.buf)\
292 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
293 			p_buf_tail_idx)].data, b,			\
294 			wmi_record_max_length);				\
295 	((struct wmi_command_debug *)h->log_info.			\
296 			wmi_mgmt_command_tx_cmp_log_buf_info.buf)	\
297 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
298 				p_buf_tail_idx)].time =			\
299 		qdf_get_log_timestamp();				\
300 	(*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.		\
301 			p_buf_tail_idx))++;				\
302 	h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.length++;	\
303 }
304 
305 #define WMI_MGMT_RX_EVENT_RECORD(h, a, b) do {				\
306 	if (wmi_mgmt_rx_log_max_entry <=				\
307 		*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))\
308 		*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx) = 0;\
309 	((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\
310 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)]\
311 					.event = a;			\
312 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
313 				wmi_mgmt_event_log_buf_info.buf)	\
314 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\
315 			data, b, wmi_record_max_length);		\
316 	((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\
317 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\
318 			time = qdf_get_log_timestamp();			\
319 	(*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))++;	\
320 	h->log_info.wmi_mgmt_event_log_buf_info.length++;		\
321 } while (0);
322 
323 #define WMI_DIAG_RX_EVENT_RECORD(h, a, b) do {                             \
324 	if (wmi_diag_log_max_entry <=                                   \
325 		*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))\
326 		*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx) = 0;\
327 	((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\
328 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)]\
329 					.event = a;                     \
330 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.            \
331 				wmi_diag_event_log_buf_info.buf)        \
332 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\
333 			data, b, wmi_record_max_length);                \
334 	((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\
335 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\
336 			time = qdf_get_log_timestamp();                 \
337 	(*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))++;  \
338 	h->log_info.wmi_diag_event_log_buf_info.length++;               \
339 } while (0);
340 
341 /* These are defined to made it as module param, which can be configured */
342 /* WMI Commands */
343 uint32_t wmi_cmd_log_max_entry = WMI_CMD_DEBUG_MAX_ENTRY;
344 uint32_t wmi_cmd_cmpl_log_max_entry = WMI_CMD_CMPL_DEBUG_MAX_ENTRY;
345 /* WMI Events */
346 uint32_t wmi_event_log_max_entry = WMI_EVENT_DEBUG_MAX_ENTRY;
347 /* WMI MGMT Tx */
348 uint32_t wmi_mgmt_tx_log_max_entry = WMI_MGMT_TX_DEBUG_MAX_ENTRY;
349 uint32_t wmi_mgmt_tx_cmpl_log_max_entry = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY;
350 /* WMI MGMT Rx */
351 uint32_t wmi_mgmt_rx_log_max_entry = WMI_MGMT_RX_DEBUG_MAX_ENTRY;
352 /* WMI Diag Event */
353 uint32_t wmi_diag_log_max_entry = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY;
354 /* WMI capture size */
355 uint32_t wmi_record_max_length = WMI_DEBUG_ENTRY_MAX_LENGTH;
356 uint32_t wmi_display_size = 100;
357 
358 /**
359  * wmi_log_init() - Initialize WMI event logging
360  * @wmi_handle: WMI handle.
361  *
362  * Return: Initialization status
363  */
364 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
365 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
366 {
367 	struct wmi_log_buf_t *cmd_log_buf =
368 			&wmi_handle->log_info.wmi_command_log_buf_info;
369 	struct wmi_log_buf_t *cmd_tx_cmpl_log_buf =
370 			&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
371 
372 	struct wmi_log_buf_t *event_log_buf =
373 			&wmi_handle->log_info.wmi_event_log_buf_info;
374 	struct wmi_log_buf_t *rx_event_log_buf =
375 			&wmi_handle->log_info.wmi_rx_event_log_buf_info;
376 
377 	struct wmi_log_buf_t *mgmt_cmd_log_buf =
378 			&wmi_handle->log_info.wmi_mgmt_command_log_buf_info;
379 	struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf =
380 		&wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info;
381 	struct wmi_log_buf_t *mgmt_event_log_buf =
382 			&wmi_handle->log_info.wmi_mgmt_event_log_buf_info;
383 	struct wmi_log_buf_t *diag_event_log_buf =
384 			&wmi_handle->log_info.wmi_diag_event_log_buf_info;
385 
386 	/* WMI commands */
387 	cmd_log_buf->length = 0;
388 	cmd_log_buf->buf_tail_idx = 0;
389 	cmd_log_buf->buf = wmi_command_log_buffer;
390 	cmd_log_buf->p_buf_tail_idx = &g_wmi_command_buf_idx;
391 	cmd_log_buf->size = WMI_CMD_DEBUG_MAX_ENTRY;
392 
393 	/* WMI commands TX completed */
394 	cmd_tx_cmpl_log_buf->length = 0;
395 	cmd_tx_cmpl_log_buf->buf_tail_idx = 0;
396 	cmd_tx_cmpl_log_buf->buf = wmi_command_tx_cmp_log_buffer;
397 	cmd_tx_cmpl_log_buf->p_buf_tail_idx = &g_wmi_command_tx_cmp_buf_idx;
398 	cmd_tx_cmpl_log_buf->size = WMI_CMD_CMPL_DEBUG_MAX_ENTRY;
399 
400 	/* WMI events when processed */
401 	event_log_buf->length = 0;
402 	event_log_buf->buf_tail_idx = 0;
403 	event_log_buf->buf = wmi_event_log_buffer;
404 	event_log_buf->p_buf_tail_idx = &g_wmi_event_buf_idx;
405 	event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY;
406 
407 	/* WMI events when queued */
408 	rx_event_log_buf->length = 0;
409 	rx_event_log_buf->buf_tail_idx = 0;
410 	rx_event_log_buf->buf = wmi_rx_event_log_buffer;
411 	rx_event_log_buf->p_buf_tail_idx = &g_wmi_rx_event_buf_idx;
412 	rx_event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY;
413 
414 	/* WMI Management commands */
415 	mgmt_cmd_log_buf->length = 0;
416 	mgmt_cmd_log_buf->buf_tail_idx = 0;
417 	mgmt_cmd_log_buf->buf = wmi_mgmt_command_log_buffer;
418 	mgmt_cmd_log_buf->p_buf_tail_idx = &g_wmi_mgmt_command_buf_idx;
419 	mgmt_cmd_log_buf->size = WMI_MGMT_TX_DEBUG_MAX_ENTRY;
420 
421 	/* WMI Management commands Tx completed*/
422 	mgmt_cmd_tx_cmp_log_buf->length = 0;
423 	mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0;
424 	mgmt_cmd_tx_cmp_log_buf->buf = wmi_mgmt_command_tx_cmp_log_buffer;
425 	mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx =
426 		&g_wmi_mgmt_command_tx_cmp_buf_idx;
427 	mgmt_cmd_tx_cmp_log_buf->size = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY;
428 
429 	/* WMI Management events when received */
430 	mgmt_event_log_buf->length = 0;
431 	mgmt_event_log_buf->buf_tail_idx = 0;
432 	mgmt_event_log_buf->buf = wmi_mgmt_rx_event_log_buffer;
433 	mgmt_event_log_buf->p_buf_tail_idx = &g_wmi_mgmt_rx_event_buf_idx;
434 	mgmt_event_log_buf->size = WMI_MGMT_RX_DEBUG_MAX_ENTRY;
435 
436 	/* WMI diag events when received */
437 	diag_event_log_buf->length = 0;
438 	diag_event_log_buf->buf_tail_idx = 0;
439 	diag_event_log_buf->buf = wmi_diag_rx_event_log_buffer;
440 	diag_event_log_buf->p_buf_tail_idx = &g_wmi_diag_rx_event_buf_idx;
441 	diag_event_log_buf->size = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY;
442 
443 	qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
444 	wmi_handle->log_info.wmi_logging_enable = 1;
445 
446 	return QDF_STATUS_SUCCESS;
447 }
448 #else
449 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
450 {
451 	struct wmi_log_buf_t *cmd_log_buf =
452 			&wmi_handle->log_info.wmi_command_log_buf_info;
453 	struct wmi_log_buf_t *cmd_tx_cmpl_log_buf =
454 			&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
455 
456 	struct wmi_log_buf_t *event_log_buf =
457 			&wmi_handle->log_info.wmi_event_log_buf_info;
458 	struct wmi_log_buf_t *rx_event_log_buf =
459 			&wmi_handle->log_info.wmi_rx_event_log_buf_info;
460 
461 	struct wmi_log_buf_t *mgmt_cmd_log_buf =
462 			&wmi_handle->log_info.wmi_mgmt_command_log_buf_info;
463 	struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf =
464 		&wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info;
465 	struct wmi_log_buf_t *mgmt_event_log_buf =
466 			&wmi_handle->log_info.wmi_mgmt_event_log_buf_info;
467 	struct wmi_log_buf_t *diag_event_log_buf =
468 			&wmi_handle->log_info.wmi_diag_event_log_buf_info;
469 
470 	wmi_handle->log_info.wmi_logging_enable = 0;
471 
472 	/* WMI commands */
473 	cmd_log_buf->length = 0;
474 	cmd_log_buf->buf_tail_idx = 0;
475 	cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc(
476 		wmi_cmd_log_max_entry * sizeof(struct wmi_command_debug));
477 	cmd_log_buf->size = wmi_cmd_log_max_entry;
478 
479 	if (!cmd_log_buf->buf)
480 		return QDF_STATUS_E_NOMEM;
481 
482 	cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx;
483 
484 	/* WMI commands TX completed */
485 	cmd_tx_cmpl_log_buf->length = 0;
486 	cmd_tx_cmpl_log_buf->buf_tail_idx = 0;
487 	cmd_tx_cmpl_log_buf->buf = (struct wmi_command_cmp_debug *) qdf_mem_malloc(
488 		wmi_cmd_cmpl_log_max_entry * sizeof(struct wmi_command_cmp_debug));
489 	cmd_tx_cmpl_log_buf->size = wmi_cmd_cmpl_log_max_entry;
490 
491 	if (!cmd_tx_cmpl_log_buf->buf)
492 		return QDF_STATUS_E_NOMEM;
493 
494 	cmd_tx_cmpl_log_buf->p_buf_tail_idx =
495 		&cmd_tx_cmpl_log_buf->buf_tail_idx;
496 
497 	/* WMI events when processed */
498 	event_log_buf->length = 0;
499 	event_log_buf->buf_tail_idx = 0;
500 	event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
501 		wmi_event_log_max_entry * sizeof(struct wmi_event_debug));
502 	event_log_buf->size = wmi_event_log_max_entry;
503 
504 	if (!event_log_buf->buf)
505 		return QDF_STATUS_E_NOMEM;
506 
507 	event_log_buf->p_buf_tail_idx = &event_log_buf->buf_tail_idx;
508 
509 	/* WMI events when queued */
510 	rx_event_log_buf->length = 0;
511 	rx_event_log_buf->buf_tail_idx = 0;
512 	rx_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
513 		wmi_event_log_max_entry * sizeof(struct wmi_event_debug));
514 	rx_event_log_buf->size = wmi_event_log_max_entry;
515 
516 	if (!rx_event_log_buf->buf)
517 		return QDF_STATUS_E_NOMEM;
518 
519 	rx_event_log_buf->p_buf_tail_idx = &rx_event_log_buf->buf_tail_idx;
520 
521 	/* WMI Management commands */
522 	mgmt_cmd_log_buf->length = 0;
523 	mgmt_cmd_log_buf->buf_tail_idx = 0;
524 	mgmt_cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc(
525 		wmi_mgmt_tx_log_max_entry * sizeof(struct wmi_command_debug));
526 	mgmt_cmd_log_buf->size = wmi_mgmt_tx_log_max_entry;
527 
528 	if (!mgmt_cmd_log_buf->buf)
529 		return QDF_STATUS_E_NOMEM;
530 
531 	mgmt_cmd_log_buf->p_buf_tail_idx = &mgmt_cmd_log_buf->buf_tail_idx;
532 
533 	/* WMI Management commands Tx completed*/
534 	mgmt_cmd_tx_cmp_log_buf->length = 0;
535 	mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0;
536 	mgmt_cmd_tx_cmp_log_buf->buf = (struct wmi_command_debug *)
537 		qdf_mem_malloc(
538 		wmi_mgmt_tx_cmpl_log_max_entry *
539 		sizeof(struct wmi_command_debug));
540 	mgmt_cmd_tx_cmp_log_buf->size = wmi_mgmt_tx_cmpl_log_max_entry;
541 
542 	if (!mgmt_cmd_tx_cmp_log_buf->buf)
543 		return QDF_STATUS_E_NOMEM;
544 
545 	mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx =
546 		&mgmt_cmd_tx_cmp_log_buf->buf_tail_idx;
547 
548 	/* WMI Management events when received */
549 	mgmt_event_log_buf->length = 0;
550 	mgmt_event_log_buf->buf_tail_idx = 0;
551 
552 	mgmt_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
553 		wmi_mgmt_rx_log_max_entry *
554 		sizeof(struct wmi_event_debug));
555 	mgmt_event_log_buf->size = wmi_mgmt_rx_log_max_entry;
556 
557 	if (!mgmt_event_log_buf->buf)
558 		return QDF_STATUS_E_NOMEM;
559 
560 	mgmt_event_log_buf->p_buf_tail_idx = &mgmt_event_log_buf->buf_tail_idx;
561 
562 	/* WMI diag events when received */
563 	diag_event_log_buf->length = 0;
564 	diag_event_log_buf->buf_tail_idx = 0;
565 
566 	diag_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
567 		wmi_diag_log_max_entry *
568 		sizeof(struct wmi_event_debug));
569 	diag_event_log_buf->size = wmi_diag_log_max_entry;
570 
571 	if (!diag_event_log_buf->buf)
572 		return QDF_STATUS_E_NOMEM;
573 
574 	diag_event_log_buf->p_buf_tail_idx = &diag_event_log_buf->buf_tail_idx;
575 
576 	qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
577 	wmi_handle->log_info.wmi_logging_enable = 1;
578 
579 	wmi_filtered_logging_init(wmi_handle);
580 
581 	return QDF_STATUS_SUCCESS;
582 }
583 #endif
584 
585 /**
586  * wmi_log_buffer_free() - Free all dynamic allocated buffer memory for
587  * event logging
588  * @wmi_handle: WMI handle.
589  *
590  * Return: None
591  */
592 #ifdef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
593 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
594 {
595 	wmi_filtered_logging_free(wmi_handle);
596 
597 	if (wmi_handle->log_info.wmi_command_log_buf_info.buf)
598 		qdf_mem_free(wmi_handle->log_info.wmi_command_log_buf_info.buf);
599 	if (wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf)
600 		qdf_mem_free(
601 		wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf);
602 	if (wmi_handle->log_info.wmi_event_log_buf_info.buf)
603 		qdf_mem_free(wmi_handle->log_info.wmi_event_log_buf_info.buf);
604 	if (wmi_handle->log_info.wmi_rx_event_log_buf_info.buf)
605 		qdf_mem_free(
606 			wmi_handle->log_info.wmi_rx_event_log_buf_info.buf);
607 	if (wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf)
608 		qdf_mem_free(
609 			wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf);
610 	if (wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf)
611 		qdf_mem_free(
612 		wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf);
613 	if (wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf)
614 		qdf_mem_free(
615 			wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf);
616 	if (wmi_handle->log_info.wmi_diag_event_log_buf_info.buf)
617 		qdf_mem_free(
618 			wmi_handle->log_info.wmi_diag_event_log_buf_info.buf);
619 	wmi_handle->log_info.wmi_logging_enable = 0;
620 
621 	qdf_spinlock_destroy(&wmi_handle->log_info.wmi_record_lock);
622 }
623 #else
624 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
625 {
626 	/* Do Nothing */
627 }
628 #endif
629 
630 /**
631  * wmi_print_cmd_log_buffer() - an output agnostic wmi command log printer
632  * @log_buffer: the command log buffer metadata of the buffer to print
633  * @count: the maximum number of entries to print
634  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
635  * @print_priv: any data required by the print method, e.g. a file handle
636  *
637  * Return: None
638  */
639 static void
640 wmi_print_cmd_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
641 			 qdf_abstract_print *print, void *print_priv)
642 {
643 	static const int data_len =
644 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
645 	char str[128];
646 	uint32_t idx;
647 
648 	if (count > log_buffer->size)
649 		count = log_buffer->size;
650 	if (count > log_buffer->length)
651 		count = log_buffer->length;
652 
653 	/* subtract count from index, and wrap if necessary */
654 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
655 	idx %= log_buffer->size;
656 
657 	print(print_priv, "Time (seconds)      Cmd Id              Payload");
658 	while (count) {
659 		struct wmi_command_debug *cmd_log = (struct wmi_command_debug *)
660 			&((struct wmi_command_debug *)log_buffer->buf)[idx];
661 		uint64_t secs, usecs;
662 		int len = 0;
663 		int i;
664 
665 		qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
666 		len += scnprintf(str + len, sizeof(str) - len,
667 				 "% 8lld.%06lld    %6u (0x%06x)    ",
668 				 secs, usecs,
669 				 cmd_log->command, cmd_log->command);
670 		for (i = 0; i < data_len; ++i) {
671 			len += scnprintf(str + len, sizeof(str) - len,
672 					 "0x%08x ", cmd_log->data[i]);
673 		}
674 
675 		print(print_priv, str);
676 
677 		--count;
678 		++idx;
679 		if (idx >= log_buffer->size)
680 			idx = 0;
681 	}
682 }
683 
684 /**
685  * wmi_print_cmd_cmp_log_buffer() - wmi command completion log printer
686  * @log_buffer: the command completion log buffer metadata of the buffer to print
687  * @count: the maximum number of entries to print
688  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
689  * @print_priv: any data required by the print method, e.g. a file handle
690  *
691  * Return: None
692  */
693 static void
694 wmi_print_cmd_cmp_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
695 			 qdf_abstract_print *print, void *print_priv)
696 {
697 	static const int data_len =
698 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
699 	char str[128];
700 	uint32_t idx;
701 
702 	if (count > log_buffer->size)
703 		count = log_buffer->size;
704 	if (count > log_buffer->length)
705 		count = log_buffer->length;
706 
707 	/* subtract count from index, and wrap if necessary */
708 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
709 	idx %= log_buffer->size;
710 
711 	print(print_priv, "Time (seconds)      Cmd Id              Payload");
712 	while (count) {
713 		struct wmi_command_cmp_debug *cmd_log = (struct wmi_command_cmp_debug *)
714 			&((struct wmi_command_cmp_debug *)log_buffer->buf)[idx];
715 		uint64_t secs, usecs;
716 		int len = 0;
717 		int i;
718 
719 		qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
720 		len += scnprintf(str + len, sizeof(str) - len,
721 				 "% 8lld.%06lld    %6u (0x%06x)    ",
722 				 secs, usecs,
723 				 cmd_log->command, cmd_log->command);
724 		for (i = 0; i < data_len; ++i) {
725 			len += scnprintf(str + len, sizeof(str) - len,
726 					 "0x%08x ", cmd_log->data[i]);
727 		}
728 
729 		print(print_priv, str);
730 
731 		--count;
732 		++idx;
733 		if (idx >= log_buffer->size)
734 			idx = 0;
735 	}
736 }
737 
738 /**
739  * wmi_print_event_log_buffer() - an output agnostic wmi event log printer
740  * @log_buffer: the event log buffer metadata of the buffer to print
741  * @count: the maximum number of entries to print
742  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
743  * @print_priv: any data required by the print method, e.g. a file handle
744  *
745  * Return: None
746  */
747 static void
748 wmi_print_event_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
749 			   qdf_abstract_print *print, void *print_priv)
750 {
751 	static const int data_len =
752 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
753 	char str[128];
754 	uint32_t idx;
755 
756 	if (count > log_buffer->size)
757 		count = log_buffer->size;
758 	if (count > log_buffer->length)
759 		count = log_buffer->length;
760 
761 	/* subtract count from index, and wrap if necessary */
762 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
763 	idx %= log_buffer->size;
764 
765 	print(print_priv, "Time (seconds)      Event Id             Payload");
766 	while (count) {
767 		struct wmi_event_debug *event_log = (struct wmi_event_debug *)
768 			&((struct wmi_event_debug *)log_buffer->buf)[idx];
769 		uint64_t secs, usecs;
770 		int len = 0;
771 		int i;
772 
773 		qdf_log_timestamp_to_secs(event_log->time, &secs, &usecs);
774 		len += scnprintf(str + len, sizeof(str) - len,
775 				 "% 8lld.%06lld    %6u (0x%06x)    ",
776 				 secs, usecs,
777 				 event_log->event, event_log->event);
778 		for (i = 0; i < data_len; ++i) {
779 			len += scnprintf(str + len, sizeof(str) - len,
780 					 "0x%08x ", event_log->data[i]);
781 		}
782 
783 		print(print_priv, str);
784 
785 		--count;
786 		++idx;
787 		if (idx >= log_buffer->size)
788 			idx = 0;
789 	}
790 }
791 
792 inline void
793 wmi_print_cmd_log(wmi_unified_t wmi, uint32_t count,
794 		  qdf_abstract_print *print, void *print_priv)
795 {
796 	wmi_print_cmd_log_buffer(
797 		&wmi->log_info.wmi_command_log_buf_info,
798 		count, print, print_priv);
799 }
800 
801 inline void
802 wmi_print_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count,
803 			 qdf_abstract_print *print, void *print_priv)
804 {
805 	wmi_print_cmd_cmp_log_buffer(
806 		&wmi->log_info.wmi_command_tx_cmp_log_buf_info,
807 		count, print, print_priv);
808 }
809 
810 inline void
811 wmi_print_mgmt_cmd_log(wmi_unified_t wmi, uint32_t count,
812 		       qdf_abstract_print *print, void *print_priv)
813 {
814 	wmi_print_cmd_log_buffer(
815 		&wmi->log_info.wmi_mgmt_command_log_buf_info,
816 		count, print, print_priv);
817 }
818 
819 inline void
820 wmi_print_mgmt_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count,
821 			      qdf_abstract_print *print, void *print_priv)
822 {
823 	wmi_print_cmd_log_buffer(
824 		&wmi->log_info.wmi_mgmt_command_tx_cmp_log_buf_info,
825 		count, print, print_priv);
826 }
827 
828 inline void
829 wmi_print_event_log(wmi_unified_t wmi, uint32_t count,
830 		    qdf_abstract_print *print, void *print_priv)
831 {
832 	wmi_print_event_log_buffer(
833 		&wmi->log_info.wmi_event_log_buf_info,
834 		count, print, print_priv);
835 }
836 
837 inline void
838 wmi_print_rx_event_log(wmi_unified_t wmi, uint32_t count,
839 		       qdf_abstract_print *print, void *print_priv)
840 {
841 	wmi_print_event_log_buffer(
842 		&wmi->log_info.wmi_rx_event_log_buf_info,
843 		count, print, print_priv);
844 }
845 
846 inline void
847 wmi_print_mgmt_event_log(wmi_unified_t wmi, uint32_t count,
848 			 qdf_abstract_print *print, void *print_priv)
849 {
850 	wmi_print_event_log_buffer(
851 		&wmi->log_info.wmi_mgmt_event_log_buf_info,
852 		count, print, print_priv);
853 }
854 
855 
856 /* debugfs routines*/
857 
858 /**
859  * debug_wmi_##func_base##_show() - debugfs functions to display content of
860  * command and event buffers. Macro uses max buffer length to display
861  * buffer when it is wraparound.
862  *
863  * @m: debugfs handler to access wmi_handle
864  * @v: Variable arguments (not used)
865  *
866  * Return: Length of characters printed
867  */
868 #define GENERATE_COMMAND_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size, wmi_record_type)\
869 	static int debug_wmi_##func_base##_show(struct seq_file *m,	\
870 						void *v)		\
871 	{								\
872 		wmi_unified_t wmi_handle = (wmi_unified_t) m->private;	\
873 		struct wmi_log_buf_t *wmi_log =				\
874 			&wmi_handle->log_info.wmi_##func_base##_buf_info;\
875 		int pos, nread, outlen;					\
876 		int i;							\
877 		uint64_t secs, usecs;					\
878 									\
879 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
880 		if (!wmi_log->length) {					\
881 			qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
882 			return wmi_bp_seq_printf(m,			\
883 			"no elements to read from ring buffer!\n");	\
884 		}							\
885 									\
886 		if (wmi_log->length <= wmi_ring_size)			\
887 			nread = wmi_log->length;			\
888 		else							\
889 			nread = wmi_ring_size;				\
890 									\
891 		if (*(wmi_log->p_buf_tail_idx) == 0)			\
892 			/* tail can be 0 after wrap-around */		\
893 			pos = wmi_ring_size - 1;			\
894 		else							\
895 			pos = *(wmi_log->p_buf_tail_idx) - 1;		\
896 									\
897 		outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\
898 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
899 		while (nread--) {					\
900 			struct wmi_record_type *wmi_record;		\
901 									\
902 			wmi_record = (struct wmi_record_type *)	\
903 			&(((struct wmi_record_type *)wmi_log->buf)[pos]);\
904 			outlen += wmi_bp_seq_printf(m, "CMD ID = %x\n",	\
905 				(wmi_record->command));			\
906 			qdf_log_timestamp_to_secs(wmi_record->time, &secs,\
907 				&usecs);				\
908 			outlen +=					\
909 			wmi_bp_seq_printf(m, "CMD TIME = [%llu.%06llu]\n",\
910 				secs, usecs);				\
911 			outlen += wmi_bp_seq_printf(m, "CMD = ");	\
912 			for (i = 0; i < (wmi_record_max_length/		\
913 					sizeof(uint32_t)); i++)		\
914 				outlen += wmi_bp_seq_printf(m, "%x ",	\
915 					wmi_record->data[i]);		\
916 			outlen += wmi_bp_seq_printf(m, "\n");		\
917 									\
918 			if (pos == 0)					\
919 				pos = wmi_ring_size - 1;		\
920 			else						\
921 				pos--;					\
922 		}							\
923 		return outlen;						\
924 	}								\
925 
926 #define GENERATE_EVENT_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size)	\
927 	static int debug_wmi_##func_base##_show(struct seq_file *m,	\
928 						void *v)		\
929 	{								\
930 		wmi_unified_t wmi_handle = (wmi_unified_t) m->private;	\
931 		struct wmi_log_buf_t *wmi_log =				\
932 			&wmi_handle->log_info.wmi_##func_base##_buf_info;\
933 		int pos, nread, outlen;					\
934 		int i;							\
935 		uint64_t secs, usecs;					\
936 									\
937 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
938 		if (!wmi_log->length) {					\
939 			qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
940 			return wmi_bp_seq_printf(m,			\
941 			"no elements to read from ring buffer!\n");	\
942 		}							\
943 									\
944 		if (wmi_log->length <= wmi_ring_size)			\
945 			nread = wmi_log->length;			\
946 		else							\
947 			nread = wmi_ring_size;				\
948 									\
949 		if (*(wmi_log->p_buf_tail_idx) == 0)			\
950 			/* tail can be 0 after wrap-around */		\
951 			pos = wmi_ring_size - 1;			\
952 		else							\
953 			pos = *(wmi_log->p_buf_tail_idx) - 1;		\
954 									\
955 		outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\
956 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
957 		while (nread--) {					\
958 			struct wmi_event_debug *wmi_record;		\
959 									\
960 			wmi_record = (struct wmi_event_debug *)		\
961 			&(((struct wmi_event_debug *)wmi_log->buf)[pos]);\
962 			qdf_log_timestamp_to_secs(wmi_record->time, &secs,\
963 				&usecs);				\
964 			outlen += wmi_bp_seq_printf(m, "Event ID = %x\n",\
965 				(wmi_record->event));			\
966 			outlen +=					\
967 			wmi_bp_seq_printf(m, "Event TIME = [%llu.%06llu]\n",\
968 				secs, usecs);				\
969 			outlen += wmi_bp_seq_printf(m, "CMD = ");	\
970 			for (i = 0; i < (wmi_record_max_length/		\
971 					sizeof(uint32_t)); i++)		\
972 				outlen += wmi_bp_seq_printf(m, "%x ",	\
973 					wmi_record->data[i]);		\
974 			outlen += wmi_bp_seq_printf(m, "\n");		\
975 									\
976 			if (pos == 0)					\
977 				pos = wmi_ring_size - 1;		\
978 			else						\
979 				pos--;					\
980 		}							\
981 		return outlen;						\
982 	}
983 
984 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_log, wmi_display_size,
985 				  wmi_command_debug);
986 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_tx_cmp_log, wmi_display_size,
987 				  wmi_command_cmp_debug);
988 GENERATE_EVENT_DEBUG_SHOW_FUNCS(event_log, wmi_display_size);
989 GENERATE_EVENT_DEBUG_SHOW_FUNCS(rx_event_log, wmi_display_size);
990 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_log, wmi_display_size,
991 				  wmi_command_debug);
992 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_tx_cmp_log,
993 					wmi_display_size,
994 					wmi_command_debug);
995 GENERATE_EVENT_DEBUG_SHOW_FUNCS(mgmt_event_log, wmi_display_size);
996 
997 /**
998  * debug_wmi_enable_show() - debugfs functions to display enable state of
999  * wmi logging feature.
1000  *
1001  * @m: debugfs handler to access wmi_handle
1002  * @v: Variable arguments (not used)
1003  *
1004  * Return: always 1
1005  */
1006 static int debug_wmi_enable_show(struct seq_file *m, void *v)
1007 {
1008 	wmi_unified_t wmi_handle = (wmi_unified_t) m->private;
1009 
1010 	return wmi_bp_seq_printf(m, "%d\n",
1011 			wmi_handle->log_info.wmi_logging_enable);
1012 }
1013 
1014 /**
1015  * debug_wmi_log_size_show() - debugfs functions to display configured size of
1016  * wmi logging command/event buffer and management command/event buffer.
1017  *
1018  * @m: debugfs handler to access wmi_handle
1019  * @v: Variable arguments (not used)
1020  *
1021  * Return: Length of characters printed
1022  */
1023 static int debug_wmi_log_size_show(struct seq_file *m, void *v)
1024 {
1025 
1026 	wmi_bp_seq_printf(m, "WMI command/cmpl log max size:%d/%d\n",
1027 			  wmi_cmd_log_max_entry, wmi_cmd_cmpl_log_max_entry);
1028 	wmi_bp_seq_printf(m, "WMI management Tx/cmpl log max size:%d/%d\n",
1029 			  wmi_mgmt_tx_log_max_entry,
1030 			  wmi_mgmt_tx_cmpl_log_max_entry);
1031 	wmi_bp_seq_printf(m, "WMI event log max size:%d\n",
1032 			  wmi_event_log_max_entry);
1033 	wmi_bp_seq_printf(m, "WMI management Rx log max size:%d\n",
1034 			  wmi_mgmt_rx_log_max_entry);
1035 	return wmi_bp_seq_printf(m,
1036 				 "WMI diag log max size:%d\n",
1037 				 wmi_diag_log_max_entry);
1038 }
1039 
1040 /**
1041  * debug_wmi_##func_base##_write() - debugfs functions to clear
1042  * wmi logging command/event buffer and management command/event buffer.
1043  *
1044  * @file: file handler to access wmi_handle
1045  * @buf: received data buffer
1046  * @count: length of received buffer
1047  * @ppos: Not used
1048  *
1049  * Return: count
1050  */
1051 #define GENERATE_DEBUG_WRITE_FUNCS(func_base, wmi_ring_size, wmi_record_type)\
1052 	static ssize_t debug_wmi_##func_base##_write(struct file *file,	\
1053 				const char __user *buf,			\
1054 				size_t count, loff_t *ppos)		\
1055 	{								\
1056 		int k, ret;						\
1057 		wmi_unified_t wmi_handle =				\
1058 			((struct seq_file *)file->private_data)->private;\
1059 		struct wmi_log_buf_t *wmi_log = &wmi_handle->log_info.	\
1060 				wmi_##func_base##_buf_info;		\
1061 		char locbuf[50];					\
1062 									\
1063 		if ((!buf) || (count > 50))				\
1064 			return -EFAULT;					\
1065 									\
1066 		if (copy_from_user(locbuf, buf, count))			\
1067 			return -EFAULT;					\
1068 									\
1069 		ret = sscanf(locbuf, "%d", &k);				\
1070 		if ((ret != 1) || (k != 0)) {                           \
1071 			wmi_err("Wrong input, echo 0 to clear the wmi buffer");\
1072 			return -EINVAL;					\
1073 		}							\
1074 									\
1075 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
1076 		qdf_mem_zero(wmi_log->buf, wmi_ring_size *		\
1077 				sizeof(struct wmi_record_type));	\
1078 		wmi_log->length = 0;					\
1079 		*(wmi_log->p_buf_tail_idx) = 0;				\
1080 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
1081 									\
1082 		return count;						\
1083 	}
1084 
1085 GENERATE_DEBUG_WRITE_FUNCS(command_log, wmi_cmd_log_max_entry,
1086 			   wmi_command_debug);
1087 GENERATE_DEBUG_WRITE_FUNCS(command_tx_cmp_log, wmi_cmd_cmpl_log_max_entry,
1088 			   wmi_command_cmp_debug);
1089 GENERATE_DEBUG_WRITE_FUNCS(event_log, wmi_event_log_max_entry,
1090 			   wmi_event_debug);
1091 GENERATE_DEBUG_WRITE_FUNCS(rx_event_log, wmi_event_log_max_entry,
1092 			   wmi_event_debug);
1093 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_log, wmi_mgmt_tx_log_max_entry,
1094 			   wmi_command_debug);
1095 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_tx_cmp_log,
1096 			   wmi_mgmt_tx_cmpl_log_max_entry, wmi_command_debug);
1097 GENERATE_DEBUG_WRITE_FUNCS(mgmt_event_log, wmi_mgmt_rx_log_max_entry,
1098 			   wmi_event_debug);
1099 
1100 /**
1101  * debug_wmi_enable_write() - debugfs functions to enable/disable
1102  * wmi logging feature.
1103  *
1104  * @file: file handler to access wmi_handle
1105  * @buf: received data buffer
1106  * @count: length of received buffer
1107  * @ppos: Not used
1108  *
1109  * Return: count
1110  */
1111 static ssize_t debug_wmi_enable_write(struct file *file, const char __user *buf,
1112 					size_t count, loff_t *ppos)
1113 {
1114 	wmi_unified_t wmi_handle =
1115 		((struct seq_file *)file->private_data)->private;
1116 	int k, ret;
1117 	char locbuf[50];
1118 
1119 	if ((!buf) || (count > 50))
1120 		return -EFAULT;
1121 
1122 	if (copy_from_user(locbuf, buf, count))
1123 		return -EFAULT;
1124 
1125 	ret = sscanf(locbuf, "%d", &k);
1126 	if ((ret != 1) || ((k != 0) && (k != 1)))
1127 		return -EINVAL;
1128 
1129 	wmi_handle->log_info.wmi_logging_enable = k;
1130 	return count;
1131 }
1132 
1133 /**
1134  * debug_wmi_log_size_write() - reserved.
1135  *
1136  * @file: file handler to access wmi_handle
1137  * @buf: received data buffer
1138  * @count: length of received buffer
1139  * @ppos: Not used
1140  *
1141  * Return: count
1142  */
1143 static ssize_t debug_wmi_log_size_write(struct file *file,
1144 		const char __user *buf, size_t count, loff_t *ppos)
1145 {
1146 	return -EINVAL;
1147 }
1148 
1149 /* Structure to maintain debug information */
1150 struct wmi_debugfs_info {
1151 	const char *name;
1152 	const struct file_operations *ops;
1153 };
1154 
1155 #define DEBUG_FOO(func_base) { .name = #func_base,			\
1156 	.ops = &debug_##func_base##_ops }
1157 
1158 /**
1159  * debug_##func_base##_open() - Open debugfs entry for respective command
1160  * and event buffer.
1161  *
1162  * @inode: node for debug dir entry
1163  * @file: file handler
1164  *
1165  * Return: open status
1166  */
1167 #define GENERATE_DEBUG_STRUCTS(func_base)				\
1168 	static int debug_##func_base##_open(struct inode *inode,	\
1169 						struct file *file)	\
1170 	{								\
1171 		return single_open(file, debug_##func_base##_show,	\
1172 				inode->i_private);			\
1173 	}								\
1174 									\
1175 									\
1176 	static struct file_operations debug_##func_base##_ops = {	\
1177 		.open		= debug_##func_base##_open,		\
1178 		.read		= seq_read,				\
1179 		.llseek		= seq_lseek,				\
1180 		.write		= debug_##func_base##_write,		\
1181 		.release	= single_release,			\
1182 	};
1183 
1184 GENERATE_DEBUG_STRUCTS(wmi_command_log);
1185 GENERATE_DEBUG_STRUCTS(wmi_command_tx_cmp_log);
1186 GENERATE_DEBUG_STRUCTS(wmi_event_log);
1187 GENERATE_DEBUG_STRUCTS(wmi_rx_event_log);
1188 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_log);
1189 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_tx_cmp_log);
1190 GENERATE_DEBUG_STRUCTS(wmi_mgmt_event_log);
1191 GENERATE_DEBUG_STRUCTS(wmi_enable);
1192 GENERATE_DEBUG_STRUCTS(wmi_log_size);
1193 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
1194 GENERATE_DEBUG_STRUCTS(filtered_wmi_cmds);
1195 GENERATE_DEBUG_STRUCTS(filtered_wmi_evts);
1196 GENERATE_DEBUG_STRUCTS(wmi_filtered_command_log);
1197 GENERATE_DEBUG_STRUCTS(wmi_filtered_event_log);
1198 #endif
1199 
1200 struct wmi_debugfs_info wmi_debugfs_infos[NUM_DEBUG_INFOS] = {
1201 	DEBUG_FOO(wmi_command_log),
1202 	DEBUG_FOO(wmi_command_tx_cmp_log),
1203 	DEBUG_FOO(wmi_event_log),
1204 	DEBUG_FOO(wmi_rx_event_log),
1205 	DEBUG_FOO(wmi_mgmt_command_log),
1206 	DEBUG_FOO(wmi_mgmt_command_tx_cmp_log),
1207 	DEBUG_FOO(wmi_mgmt_event_log),
1208 	DEBUG_FOO(wmi_enable),
1209 	DEBUG_FOO(wmi_log_size),
1210 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
1211 	DEBUG_FOO(filtered_wmi_cmds),
1212 	DEBUG_FOO(filtered_wmi_evts),
1213 	DEBUG_FOO(wmi_filtered_command_log),
1214 	DEBUG_FOO(wmi_filtered_event_log),
1215 #endif
1216 };
1217 
1218 /**
1219  * wmi_debugfs_create() - Create debug_fs entry for wmi logging.
1220  *
1221  * @wmi_handle: wmi handle
1222  * @par_entry: debug directory entry
1223  * @id: Index to debug info data array
1224  *
1225  * Return: none
1226  */
1227 static void wmi_debugfs_create(wmi_unified_t wmi_handle,
1228 			       struct dentry *par_entry)
1229 {
1230 	int i;
1231 
1232 	if (!par_entry)
1233 		goto out;
1234 
1235 	for (i = 0; i < NUM_DEBUG_INFOS; ++i) {
1236 		wmi_handle->debugfs_de[i] = qdf_debugfs_create_entry(
1237 						wmi_debugfs_infos[i].name,
1238 						WMI_INFOS_DBG_FILE_PERM,
1239 						par_entry,
1240 						wmi_handle,
1241 						wmi_debugfs_infos[i].ops);
1242 
1243 		if (!wmi_handle->debugfs_de[i]) {
1244 			wmi_err("debug Entry creation failed!");
1245 			goto out;
1246 		}
1247 	}
1248 
1249 	return;
1250 
1251 out:
1252 	wmi_err("debug Entry creation failed!");
1253 	wmi_log_buffer_free(wmi_handle);
1254 	return;
1255 }
1256 
1257 /**
1258  * wmi_debugfs_remove() - Remove debugfs entry for wmi logging.
1259  * @wmi_handle: wmi handle
1260  * @dentry: debugfs directory entry
1261  * @id: Index to debug info data array
1262  *
1263  * Return: none
1264  */
1265 static void wmi_debugfs_remove(wmi_unified_t wmi_handle)
1266 {
1267 	int i;
1268 	struct dentry *dentry = wmi_handle->log_info.wmi_log_debugfs_dir;
1269 
1270 	if (dentry) {
1271 		for (i = 0; i < NUM_DEBUG_INFOS; ++i) {
1272 			if (wmi_handle->debugfs_de[i])
1273 				wmi_handle->debugfs_de[i] = NULL;
1274 		}
1275 	}
1276 
1277 	if (dentry)
1278 		qdf_debugfs_remove_dir_recursive(dentry);
1279 }
1280 
1281 /**
1282  * wmi_debugfs_init() - debugfs functions to create debugfs directory and to
1283  * create debugfs enteries.
1284  *
1285  * @h: wmi handler
1286  *
1287  * Return: init status
1288  */
1289 static QDF_STATUS wmi_debugfs_init(wmi_unified_t wmi_handle, uint32_t pdev_idx)
1290 {
1291 	char buf[32];
1292 
1293 	snprintf(buf, sizeof(buf), "WMI_SOC%u_PDEV%u",
1294 		 wmi_handle->soc->soc_idx, pdev_idx);
1295 
1296 	wmi_handle->log_info.wmi_log_debugfs_dir =
1297 		qdf_debugfs_create_dir(buf, NULL);
1298 
1299 	if (!wmi_handle->log_info.wmi_log_debugfs_dir) {
1300 		wmi_err("error while creating debugfs dir for %s", buf);
1301 		return QDF_STATUS_E_FAILURE;
1302 	}
1303 	wmi_debugfs_create(wmi_handle,
1304 			   wmi_handle->log_info.wmi_log_debugfs_dir);
1305 
1306 	return QDF_STATUS_SUCCESS;
1307 }
1308 
1309 /**
1310  * wmi_mgmt_cmd_record() - Wrapper function for mgmt command logging macro
1311  *
1312  * @wmi_handle: wmi handle
1313  * @cmd: mgmt command
1314  * @header: pointer to 802.11 header
1315  * @vdev_id: vdev id
1316  * @chanfreq: channel frequency
1317  *
1318  * Return: none
1319  */
1320 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
1321 			void *header, uint32_t vdev_id, uint32_t chanfreq)
1322 {
1323 
1324 	uint32_t data[CUSTOM_MGMT_CMD_DATA_SIZE];
1325 
1326 	data[0] = ((struct wmi_command_header *)header)->type;
1327 	data[1] = ((struct wmi_command_header *)header)->sub_type;
1328 	data[2] = vdev_id;
1329 	data[3] = chanfreq;
1330 
1331 	qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
1332 
1333 	WMI_MGMT_COMMAND_RECORD(wmi_handle, cmd, (uint8_t *)data);
1334 	wmi_specific_cmd_record(wmi_handle, cmd, (uint8_t *)data);
1335 	qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
1336 }
1337 #else
1338 /**
1339  * wmi_debugfs_remove() - Remove debugfs entry for wmi logging.
1340  * @wmi_handle: wmi handle
1341  * @dentry: debugfs directory entry
1342  * @id: Index to debug info data array
1343  *
1344  * Return: none
1345  */
1346 static void wmi_debugfs_remove(wmi_unified_t wmi_handle) { }
1347 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
1348 			void *header, uint32_t vdev_id, uint32_t chanfreq) { }
1349 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) { }
1350 #endif /*WMI_INTERFACE_EVENT_LOGGING */
1351 qdf_export_symbol(wmi_mgmt_cmd_record);
1352 
1353 #ifdef WMI_EXT_DBG
1354 
1355 /**
1356  * wmi_ext_dbg_msg_enqueue() - enqueue wmi message
1357  * @wmi_handle: wmi handler
1358  *
1359  * Return: size of wmi message queue after enqueue
1360  */
1361 static uint32_t wmi_ext_dbg_msg_enqueue(struct wmi_unified *wmi_handle,
1362 					struct wmi_ext_dbg_msg *msg)
1363 {
1364 	uint32_t list_size;
1365 
1366 	qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1367 	qdf_list_insert_back_size(&wmi_handle->wmi_ext_dbg_msg_queue,
1368 				  &msg->node, &list_size);
1369 	qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1370 
1371 	return list_size;
1372 }
1373 
1374 /**
1375  * wmi_ext_dbg_msg_dequeue() - dequeue wmi message
1376  * @wmi_handle: wmi handler
1377  *
1378  * Return: wmi msg on success else NULL
1379  */
1380 static struct wmi_ext_dbg_msg *wmi_ext_dbg_msg_dequeue(struct wmi_unified
1381 						       *wmi_handle)
1382 {
1383 	qdf_list_node_t *list_node = NULL;
1384 
1385 	qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1386 	qdf_list_remove_front(&wmi_handle->wmi_ext_dbg_msg_queue, &list_node);
1387 	qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1388 
1389 	if (!list_node)
1390 		return NULL;
1391 
1392 	return qdf_container_of(list_node, struct wmi_ext_dbg_msg, node);
1393 }
1394 
1395 /**
1396  * wmi_ext_dbg_msg_record() - record wmi messages
1397  * @wmi_handle: wmi handler
1398  * @buf: wmi message buffer
1399  * @len: wmi message length
1400  * @type: wmi message type
1401  *
1402  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1403  */
1404 static QDF_STATUS wmi_ext_dbg_msg_record(struct wmi_unified *wmi_handle,
1405 					 uint8_t *buf, uint32_t len,
1406 					 enum WMI_MSG_TYPE type)
1407 {
1408 	struct wmi_ext_dbg_msg *msg;
1409 	uint32_t list_size;
1410 
1411 	msg = wmi_ext_dbg_msg_get(len);
1412 	if (!msg)
1413 		return QDF_STATUS_E_NOMEM;
1414 
1415 	msg->len = len;
1416 	msg->type = type;
1417 	qdf_mem_copy(msg->buf, buf, len);
1418 	msg->ts = qdf_get_log_timestamp();
1419 	list_size = wmi_ext_dbg_msg_enqueue(wmi_handle, msg);
1420 
1421 	if (list_size >= wmi_handle->wmi_ext_dbg_msg_queue_size) {
1422 		msg = wmi_ext_dbg_msg_dequeue(wmi_handle);
1423 		wmi_ext_dbg_msg_put(msg);
1424 	}
1425 
1426 	return QDF_STATUS_SUCCESS;
1427 }
1428 
1429 /**
1430  * wmi_ext_dbg_msg_cmd_record() - record wmi command messages
1431  * @wmi_handle: wmi handler
1432  * @buf: wmi command buffer
1433  * @len: wmi command message length
1434  *
1435  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1436  */
1437 static QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified *wmi_handle,
1438 					     uint8_t *buf, uint32_t len)
1439 {
1440 	return wmi_ext_dbg_msg_record(wmi_handle, buf, len,
1441 				      WMI_MSG_TYPE_CMD);
1442 }
1443 
1444 /**
1445  * wmi_ext_dbg_msg_event_record() - record wmi event messages
1446  * @wmi_handle: wmi handler
1447  * @buf: wmi event buffer
1448  * @len: wmi event message length
1449  *
1450  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1451  */
1452 static QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified *wmi_handle,
1453 					       uint8_t *buf, uint32_t len)
1454 {
1455 	uint32_t id;
1456 
1457 	id = WMI_GET_FIELD(buf, WMI_CMD_HDR, COMMANDID);
1458 	if (id != wmi_handle->wmi_events[wmi_diag_event_id])
1459 		return wmi_ext_dbg_msg_record(wmi_handle, buf, len,
1460 					      WMI_MSG_TYPE_EVENT);
1461 
1462 	return QDF_STATUS_SUCCESS;
1463 }
1464 
1465 /**
1466  * wmi_ext_dbg_msg_queue_init() - create debugfs queue and associated lock
1467  * @wmi_handle: wmi handler
1468  *
1469  * Return: none
1470  */
1471 static void wmi_ext_dbg_msg_queue_init(struct wmi_unified *wmi_handle)
1472 {
1473 	qdf_list_create(&wmi_handle->wmi_ext_dbg_msg_queue,
1474 			wmi_handle->wmi_ext_dbg_msg_queue_size);
1475 	qdf_spinlock_create(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1476 }
1477 
1478 /**
1479  * wmi_ext_dbg_msg_queue_deinit() - destroy debugfs queue and associated lock
1480  * @wmi_handle: wmi handler
1481  *
1482  * Return: none
1483  */
1484 static void wmi_ext_dbg_msg_queue_deinit(struct wmi_unified *wmi_handle)
1485 {
1486 	qdf_list_destroy(&wmi_handle->wmi_ext_dbg_msg_queue);
1487 	qdf_spinlock_destroy(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1488 }
1489 
1490 /**
1491  * wmi_ext_dbg_msg_show() - debugfs function to display whole content of
1492  * wmi command/event messages including headers.
1493  * @file: qdf debugfs file handler
1494  * @arg: pointer to wmi handler
1495  *
1496  * Return: QDF_STATUS_SUCCESS if all the messages are shown successfully,
1497  * else QDF_STATUS_E_AGAIN if more data to show.
1498  */
1499 static QDF_STATUS wmi_ext_dbg_msg_show(qdf_debugfs_file_t file, void *arg)
1500 {
1501 	struct wmi_unified *wmi_handle = (struct wmi_unified *)arg;
1502 	struct wmi_ext_dbg_msg *msg;
1503 	uint64_t secs, usecs;
1504 
1505 	msg = wmi_ext_dbg_msg_dequeue(wmi_handle);
1506 	if (!msg)
1507 		return QDF_STATUS_SUCCESS;
1508 
1509 	qdf_debugfs_printf(file, "%s: 0x%x\n",
1510 			   msg->type == WMI_MSG_TYPE_CMD ? "COMMAND" :
1511 			   "EVENT", WMI_GET_FIELD(msg->buf, WMI_CMD_HDR,
1512 						  COMMANDID));
1513 	qdf_log_timestamp_to_secs(msg->ts, &secs, &usecs);
1514 	qdf_debugfs_printf(file, "Time: %llu.%llu\n", secs, usecs);
1515 	qdf_debugfs_printf(file, "Length:%d\n", msg->len);
1516 	qdf_debugfs_hexdump(file, msg->buf, msg->len,
1517 			    WMI_EXT_DBG_DUMP_ROW_SIZE,
1518 			    WMI_EXT_DBG_DUMP_GROUP_SIZE);
1519 	qdf_debugfs_printf(file, "\n");
1520 
1521 	if (qdf_debugfs_overflow(file)) {
1522 		qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1523 		qdf_list_insert_front(&wmi_handle->wmi_ext_dbg_msg_queue,
1524 				      &msg->node);
1525 		qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1526 
1527 	} else {
1528 		wmi_ext_dbg_msg_put(msg);
1529 	}
1530 
1531 	return QDF_STATUS_E_AGAIN;
1532 }
1533 
1534 /**
1535  * wmi_ext_dbg_msg_write() - debugfs write not supported
1536  * @priv: private data
1537  * @buf: received data buffer
1538  * @len: length of received buffer
1539  *
1540  * Return: QDF_STATUS_E_NOSUPPORT.
1541  */
1542 static QDF_STATUS wmi_ext_dbg_msg_write(void *priv, const char *buf,
1543 					qdf_size_t len)
1544 {
1545 	return QDF_STATUS_E_NOSUPPORT;
1546 }
1547 
1548 static struct qdf_debugfs_fops wmi_ext_dbgfs_ops[WMI_MAX_RADIOS];
1549 
1550 /**
1551  * wmi_ext_debugfs_init() - init debugfs items for extended wmi dump.
1552  * @wmi_handle: wmi handler
1553  * @pdev_idx: pdev index
1554  *
1555  * Return: QDF_STATUS_SUCCESS if debugfs is initialized else
1556  * QDF_STATUS_E_FAILURE
1557  */
1558 static QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle,
1559 				     uint32_t pdev_idx)
1560 {
1561 	qdf_dentry_t dentry;
1562 	char buf[32];
1563 
1564 	/* To maintain backward compatibility, naming convention for PDEV 0
1565 	 * dentry is kept same as before. For more than 1 PDEV, dentry
1566 	 * names will be appended with PDEVx.
1567 	*/
1568 	if (wmi_handle->soc->soc_idx == 0 && pdev_idx == 0) {
1569 		dentry  = qdf_debugfs_create_dir(WMI_EXT_DBG_DIR, NULL);
1570 	} else {
1571 		snprintf(buf, sizeof(buf), "WMI_EXT_DBG_SOC%u_PDEV%u",
1572 			 wmi_handle->soc->soc_idx, pdev_idx);
1573 		dentry  = qdf_debugfs_create_dir(buf, NULL);
1574 	}
1575 
1576 	if (!dentry) {
1577 		wmi_err("error while creating extended wmi debugfs dir");
1578 		return QDF_STATUS_E_FAILURE;
1579 	}
1580 
1581 	wmi_ext_dbgfs_ops[pdev_idx].show = wmi_ext_dbg_msg_show;
1582 	wmi_ext_dbgfs_ops[pdev_idx].write = wmi_ext_dbg_msg_write;
1583 	wmi_ext_dbgfs_ops[pdev_idx].priv = wmi_handle;
1584 	if (!qdf_debugfs_create_file(WMI_EXT_DBG_FILE, WMI_EXT_DBG_FILE_PERM,
1585 				     dentry, &wmi_ext_dbgfs_ops[pdev_idx])) {
1586 		qdf_debugfs_remove_dir(dentry);
1587 		wmi_err("Error while creating extended wmi debugfs file");
1588 		return QDF_STATUS_E_FAILURE;
1589 	}
1590 
1591 	wmi_handle->wmi_ext_dbg_dentry = dentry;
1592 	wmi_handle->wmi_ext_dbg_msg_queue_size = WMI_EXT_DBG_QUEUE_SIZE;
1593 	wmi_ext_dbg_msg_queue_init(wmi_handle);
1594 
1595 	return QDF_STATUS_SUCCESS;
1596 }
1597 
1598 /**
1599  * wmi_ext_debugfs_deinit() - cleanup/deinit debugfs items of extended wmi dump.
1600  * @wmi_handle: wmi handler
1601  *
1602  * Return: QDF_STATUS_SUCCESS if cleanup is successful
1603  */
1604 static QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle)
1605 {
1606 	struct wmi_ext_dbg_msg *msg;
1607 
1608 	while ((msg = wmi_ext_dbg_msg_dequeue(wmi_handle)))
1609 		wmi_ext_dbg_msg_put(msg);
1610 
1611 	wmi_ext_dbg_msg_queue_deinit(wmi_handle);
1612 	qdf_debugfs_remove_dir_recursive(wmi_handle->wmi_ext_dbg_dentry);
1613 
1614 	return QDF_STATUS_SUCCESS;
1615 }
1616 
1617 #else
1618 
1619 static inline QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified
1620 						    *wmi_handle,
1621 						    uint8_t *buf, uint32_t len)
1622 {
1623 		return QDF_STATUS_SUCCESS;
1624 }
1625 
1626 static inline QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified
1627 						      *wmi_handle,
1628 						      uint8_t *buf, uint32_t len)
1629 {
1630 		return QDF_STATUS_SUCCESS;
1631 }
1632 
1633 static inline QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle,
1634 					    uint32_t pdev_idx)
1635 {
1636 		return QDF_STATUS_SUCCESS;
1637 }
1638 
1639 static inline QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle)
1640 {
1641 		return QDF_STATUS_SUCCESS;
1642 }
1643 
1644 #endif /*WMI_EXT_DBG */
1645 
1646 int wmi_get_host_credits(wmi_unified_t wmi_handle);
1647 /* WMI buffer APIs */
1648 
1649 #ifdef NBUF_MEMORY_DEBUG
1650 wmi_buf_t
1651 wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint32_t len,
1652 		    const char *func_name,
1653 		    uint32_t line_num)
1654 {
1655 	wmi_buf_t wmi_buf;
1656 
1657 	if (roundup(len + sizeof(WMI_CMD_HDR), 4) > wmi_handle->max_msg_len) {
1658 		QDF_ASSERT(0);
1659 		return NULL;
1660 	}
1661 
1662 	wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, func_name,
1663 				 line_num);
1664 	if (!wmi_buf)
1665 		wmi_buf = qdf_nbuf_alloc_debug(NULL,
1666 					       roundup(len + WMI_MIN_HEAD_ROOM,
1667 						       4),
1668 					       WMI_MIN_HEAD_ROOM, 4, false,
1669 					       func_name, line_num);
1670 	if (!wmi_buf)
1671 		return NULL;
1672 
1673 	/* Clear the wmi buffer */
1674 	OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
1675 
1676 	/*
1677 	 * Set the length of the buffer to match the allocation size.
1678 	 */
1679 	qdf_nbuf_set_pktlen(wmi_buf, len);
1680 
1681 	return wmi_buf;
1682 }
1683 qdf_export_symbol(wmi_buf_alloc_debug);
1684 
1685 void wmi_buf_free(wmi_buf_t net_buf)
1686 {
1687 	net_buf = wbuff_buff_put(net_buf);
1688 	if (net_buf)
1689 		qdf_nbuf_free(net_buf);
1690 }
1691 qdf_export_symbol(wmi_buf_free);
1692 #else
1693 wmi_buf_t wmi_buf_alloc_fl(wmi_unified_t wmi_handle, uint32_t len,
1694 			   const char *func, uint32_t line)
1695 {
1696 	wmi_buf_t wmi_buf;
1697 
1698 	if (roundup(len + sizeof(WMI_CMD_HDR), 4) > wmi_handle->max_msg_len) {
1699 		QDF_DEBUG_PANIC("Invalid length %u (via %s:%u)",
1700 				len, func, line);
1701 		return NULL;
1702 	}
1703 
1704 	wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, __func__,
1705 				 __LINE__);
1706 	if (!wmi_buf)
1707 		wmi_buf = qdf_nbuf_alloc_fl(NULL, roundup(len +
1708 				WMI_MIN_HEAD_ROOM, 4), WMI_MIN_HEAD_ROOM, 4,
1709 				false, func, line);
1710 
1711 	if (!wmi_buf) {
1712 		wmi_nofl_err("%s:%d, failed to alloc len:%d", func, line, len);
1713 		return NULL;
1714 	}
1715 
1716 	/* Clear the wmi buffer */
1717 	OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
1718 
1719 	/*
1720 	 * Set the length of the buffer to match the allocation size.
1721 	 */
1722 	qdf_nbuf_set_pktlen(wmi_buf, len);
1723 
1724 	return wmi_buf;
1725 }
1726 qdf_export_symbol(wmi_buf_alloc_fl);
1727 
1728 void wmi_buf_free(wmi_buf_t net_buf)
1729 {
1730 	net_buf = wbuff_buff_put(net_buf);
1731 	if (net_buf)
1732 		qdf_nbuf_free(net_buf);
1733 }
1734 qdf_export_symbol(wmi_buf_free);
1735 #endif
1736 
1737 /**
1738  * wmi_get_max_msg_len() - get maximum WMI message length
1739  * @wmi_handle: WMI handle.
1740  *
1741  * This function returns the maximum WMI message length
1742  *
1743  * Return: maximum WMI message length
1744  */
1745 uint16_t wmi_get_max_msg_len(wmi_unified_t wmi_handle)
1746 {
1747 	return wmi_handle->max_msg_len - WMI_MIN_HEAD_ROOM;
1748 }
1749 qdf_export_symbol(wmi_get_max_msg_len);
1750 
1751 #ifndef WMI_CMD_STRINGS
1752 static uint8_t *wmi_id_to_name(uint32_t wmi_command)
1753 {
1754 	return "Invalid WMI cmd";
1755 }
1756 #endif
1757 
1758 static inline void wmi_log_cmd_id(uint32_t cmd_id, uint32_t tag)
1759 {
1760 	wmi_debug("Send WMI command:%s command_id:%d htc_tag:%d",
1761 		 wmi_id_to_name(cmd_id), cmd_id, tag);
1762 }
1763 
1764 /**
1765  * wmi_is_pm_resume_cmd() - check if a cmd is part of the resume sequence
1766  * @cmd_id: command to check
1767  *
1768  * Return: true if the command is part of the resume sequence.
1769  */
1770 #ifdef WLAN_POWER_MANAGEMENT_OFFLOAD
1771 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id)
1772 {
1773 	switch (cmd_id) {
1774 	case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
1775 	case WMI_PDEV_RESUME_CMDID:
1776 		return true;
1777 
1778 	default:
1779 		return false;
1780 	}
1781 }
1782 
1783 #else
1784 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id)
1785 {
1786 	return false;
1787 }
1788 
1789 #endif
1790 
1791 #ifdef FEATURE_WLAN_D0WOW
1792 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id)
1793 {
1794 	wmi_d0_wow_enable_disable_cmd_fixed_param *cmd;
1795 
1796 	if (cmd_id == WMI_D0_WOW_ENABLE_DISABLE_CMDID) {
1797 		cmd = (wmi_d0_wow_enable_disable_cmd_fixed_param *)
1798 			wmi_buf_data(buf);
1799 		if (!cmd->enable)
1800 			return true;
1801 		else
1802 			return false;
1803 	}
1804 
1805 	return false;
1806 }
1807 #else
1808 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id)
1809 {
1810 	return false;
1811 }
1812 
1813 #endif
1814 
1815 #ifdef WMI_INTERFACE_SEQUENCE_CHECK
1816 static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle)
1817 {
1818 	wmi_handle->wmi_sequence = 0;
1819 	wmi_handle->wmi_exp_sequence = 0;
1820 	wmi_handle->wmi_sequence_stop = false;
1821 }
1822 
1823 static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle)
1824 {
1825 	qdf_spinlock_create(&wmi_handle->wmi_seq_lock);
1826 	wmi_interface_sequence_reset(wmi_handle);
1827 }
1828 
1829 static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle)
1830 {
1831 	qdf_spinlock_destroy(&wmi_handle->wmi_seq_lock);
1832 }
1833 
1834 void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle)
1835 {
1836 	wmi_handle->wmi_sequence_stop = true;
1837 }
1838 
1839 static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle,
1840 					  HTC_PACKET *pkt,
1841 					  const char *func, uint32_t line)
1842 {
1843 	wmi_buf_t buf = GET_HTC_PACKET_NET_BUF_CONTEXT(pkt);
1844 	QDF_STATUS status;
1845 
1846 	qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock);
1847 	status = htc_send_pkt(wmi_handle->htc_handle, pkt);
1848 	if (QDF_STATUS_SUCCESS != status) {
1849 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1850 		qdf_atomic_dec(&wmi_handle->pending_cmds);
1851 		wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d",
1852 			     func, line, status);
1853 		qdf_mem_free(pkt);
1854 		return status;
1855 	}
1856 	/* Record the sequence number in the SKB */
1857 	qdf_nbuf_set_mark(buf, wmi_handle->wmi_sequence);
1858 	/* Increment the sequence number */
1859 	wmi_handle->wmi_sequence = (wmi_handle->wmi_sequence + 1)
1860 				   & (wmi_handle->wmi_max_cmds - 1);
1861 	qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1862 
1863 	return status;
1864 }
1865 
1866 static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle,
1867 						wmi_buf_t buf)
1868 {
1869 	/* Skip sequence check when wmi sequence stop is set */
1870 	if (wmi_handle->wmi_sequence_stop)
1871 		return;
1872 
1873 	qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock);
1874 	/* Match the completion sequence and expected sequence number */
1875 	if (qdf_nbuf_get_mark(buf) != wmi_handle->wmi_exp_sequence) {
1876 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1877 		wmi_nofl_err("WMI Tx Completion Sequence number mismatch");
1878 		wmi_nofl_err("Expected %d Received %d",
1879 			     wmi_handle->wmi_exp_sequence,
1880 			     qdf_nbuf_get_mark(buf));
1881 		/* Trigger Recovery */
1882 		qdf_trigger_self_recovery(wmi_handle->soc,
1883 					  QDF_WMI_BUF_SEQUENCE_MISMATCH);
1884 	} else {
1885 		/* Increment the expected sequence number */
1886 		wmi_handle->wmi_exp_sequence =
1887 				(wmi_handle->wmi_exp_sequence + 1)
1888 				& (wmi_handle->wmi_max_cmds - 1);
1889 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1890 	}
1891 }
1892 #else
1893 static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle)
1894 {
1895 }
1896 
1897 static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle)
1898 {
1899 }
1900 
1901 static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle)
1902 {
1903 }
1904 
1905 void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle)
1906 {
1907 }
1908 
1909 static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle,
1910 					  HTC_PACKET *pkt,
1911 					  const char *func, uint32_t line)
1912 {
1913 	QDF_STATUS status;
1914 
1915 	status = htc_send_pkt(wmi_handle->htc_handle, pkt);
1916 	if (QDF_STATUS_SUCCESS != status) {
1917 		qdf_atomic_dec(&wmi_handle->pending_cmds);
1918 		wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d",
1919 			     func, line, status);
1920 		qdf_mem_free(pkt);
1921 		return status;
1922 	}
1923 
1924 	return status;
1925 }
1926 
1927 static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle,
1928 						wmi_buf_t buf)
1929 {
1930 }
1931 #endif
1932 
1933 static inline void wmi_unified_debug_dump(wmi_unified_t wmi_handle)
1934 {
1935 	wmi_nofl_err("Endpoint ID = %d, Tx Queue Depth = %d, soc_id = %u, target type = %s",
1936 		     wmi_handle->wmi_endpoint_id,
1937 		     htc_get_tx_queue_depth(wmi_handle->htc_handle,
1938 					    wmi_handle->wmi_endpoint_id),
1939 		     wmi_handle->soc->soc_idx,
1940 		     (wmi_handle->target_type ==
1941 		      WMI_TLV_TARGET ? "WMI_TLV_TARGET" :
1942 						"WMI_NON_TLV_TARGET"));
1943 }
1944 
1945 QDF_STATUS wmi_unified_cmd_send_fl(wmi_unified_t wmi_handle, wmi_buf_t buf,
1946 				   uint32_t len, uint32_t cmd_id,
1947 				   const char *func, uint32_t line)
1948 {
1949 	HTC_PACKET *pkt;
1950 	uint16_t htc_tag = 0;
1951 
1952 	if (wmi_get_runtime_pm_inprogress(wmi_handle)) {
1953 		htc_tag = wmi_handle->ops->wmi_set_htc_tx_tag(wmi_handle, buf,
1954 							      cmd_id);
1955 	} else if (qdf_atomic_read(&wmi_handle->is_target_suspended) &&
1956 		   !wmi_is_pm_resume_cmd(cmd_id) &&
1957 		   !wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id)) {
1958 			wmi_nofl_err("Target is suspended (via %s:%u)",
1959 					func, line);
1960 		return QDF_STATUS_E_BUSY;
1961 	}
1962 
1963 	if (wmi_handle->wmi_stopinprogress) {
1964 		wmi_nofl_err("%s:%d, WMI stop in progress, wmi_handle:%pK",
1965 			     func, line, wmi_handle);
1966 		return QDF_STATUS_E_INVAL;
1967 	}
1968 
1969 #ifndef WMI_NON_TLV_SUPPORT
1970 	/* Do sanity check on the TLV parameter structure */
1971 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
1972 		void *buf_ptr = (void *)qdf_nbuf_data(buf);
1973 
1974 		if (wmi_handle->ops->wmi_check_command_params(NULL, buf_ptr, len, cmd_id)
1975 			!= 0) {
1976 			wmi_nofl_err("%s:%d, Invalid WMI Param Buffer for Cmd:%d",
1977 				     func, line, cmd_id);
1978 			return QDF_STATUS_E_INVAL;
1979 		}
1980 	}
1981 #endif
1982 
1983 	if (qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) {
1984 		wmi_nofl_err("%s:%d, Failed to send cmd %x, no memory",
1985 			     func, line, cmd_id);
1986 		return QDF_STATUS_E_NOMEM;
1987 	}
1988 
1989 	qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR));
1990 	WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
1991 
1992 	qdf_atomic_inc(&wmi_handle->pending_cmds);
1993 	if (qdf_atomic_read(&wmi_handle->pending_cmds) >=
1994 			wmi_handle->wmi_max_cmds) {
1995 		wmi_nofl_err("hostcredits = %d",
1996 			     wmi_get_host_credits(wmi_handle));
1997 		htc_dump_counter_info(wmi_handle->htc_handle);
1998 		qdf_atomic_dec(&wmi_handle->pending_cmds);
1999 		wmi_nofl_err("%s:%d, MAX %d WMI Pending cmds reached",
2000 			     func, line, wmi_handle->wmi_max_cmds);
2001 		wmi_unified_debug_dump(wmi_handle);
2002 		htc_ce_tasklet_debug_dump(wmi_handle->htc_handle);
2003 		qdf_trigger_self_recovery(wmi_handle->soc->wmi_psoc,
2004 					  QDF_WMI_EXCEED_MAX_PENDING_CMDS);
2005 		return QDF_STATUS_E_BUSY;
2006 	}
2007 
2008 	pkt = qdf_mem_malloc_fl(sizeof(*pkt), func, line);
2009 	if (!pkt) {
2010 		qdf_atomic_dec(&wmi_handle->pending_cmds);
2011 		return QDF_STATUS_E_NOMEM;
2012 	}
2013 
2014 	SET_HTC_PACKET_INFO_TX(pkt,
2015 			       NULL,
2016 			       qdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR),
2017 			       wmi_handle->wmi_endpoint_id, htc_tag);
2018 
2019 	SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf);
2020 	wmi_log_cmd_id(cmd_id, htc_tag);
2021 	wmi_ext_dbg_msg_cmd_record(wmi_handle,
2022 				   qdf_nbuf_data(buf), qdf_nbuf_len(buf));
2023 #ifdef WMI_INTERFACE_EVENT_LOGGING
2024 	if (wmi_handle->log_info.wmi_logging_enable) {
2025 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2026 		/*
2027 		 * Record 16 bytes of WMI cmd data -
2028 		 * exclude TLV and WMI headers
2029 		 *
2030 		 * WMI mgmt command already recorded in wmi_mgmt_cmd_record
2031 		 */
2032 		if (wmi_handle->ops->is_management_record(cmd_id) == false) {
2033 			uint8_t *tmpbuf = (uint8_t *)qdf_nbuf_data(buf) +
2034 				wmi_handle->soc->buf_offset_command;
2035 
2036 			WMI_COMMAND_RECORD(wmi_handle, cmd_id, tmpbuf);
2037 			wmi_specific_cmd_record(wmi_handle, cmd_id, tmpbuf);
2038 		}
2039 
2040 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2041 	}
2042 #endif
2043 	return wmi_htc_send_pkt(wmi_handle, pkt, func, line);
2044 }
2045 qdf_export_symbol(wmi_unified_cmd_send_fl);
2046 
2047 /**
2048  * wmi_unified_get_event_handler_ix() - gives event handler's index
2049  * @wmi_handle: handle to wmi
2050  * @event_id: wmi  event id
2051  *
2052  * Return: event handler's index
2053  */
2054 static int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle,
2055 					    uint32_t event_id)
2056 {
2057 	uint32_t idx = 0;
2058 	int32_t invalid_idx = -1;
2059 	struct wmi_soc *soc = wmi_handle->soc;
2060 
2061 	for (idx = 0; (idx < soc->max_event_idx &&
2062 		       idx < WMI_UNIFIED_MAX_EVENT); ++idx) {
2063 		if (wmi_handle->event_id[idx] == event_id &&
2064 		    wmi_handle->event_handler[idx]) {
2065 			return idx;
2066 		}
2067 	}
2068 
2069 	return invalid_idx;
2070 }
2071 
2072 /**
2073  * wmi_register_event_handler_with_ctx() - register event handler with
2074  * exec ctx and buffer type
2075  * @wmi_handle: handle to wmi
2076  * @event_id: wmi event id
2077  * @handler_func: wmi event handler function
2078  * @rx_ctx: rx execution context for wmi rx events
2079  * @rx_buf_type: rx execution context for wmi rx events
2080  *
2081  * Return: QDF_STATUS_SUCCESS on successful register event else failure.
2082  */
2083 static QDF_STATUS
2084 wmi_register_event_handler_with_ctx(wmi_unified_t wmi_handle,
2085 				    uint32_t event_id,
2086 				    wmi_unified_event_handler handler_func,
2087 				    enum wmi_rx_exec_ctx rx_ctx,
2088 				    enum wmi_rx_buff_type rx_buf_type)
2089 {
2090 	uint32_t idx = 0;
2091 	uint32_t evt_id;
2092 	struct wmi_soc *soc;
2093 
2094 	if (!wmi_handle) {
2095 		wmi_err("WMI handle is NULL");
2096 		return QDF_STATUS_E_FAILURE;
2097 	}
2098 
2099 	soc = wmi_handle->soc;
2100 
2101 	if (event_id >= wmi_events_max ||
2102 		wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2103 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
2104 			  "%s: Event id %d is unavailable",
2105 					__func__, event_id);
2106 		return QDF_STATUS_E_FAILURE;
2107 	}
2108 	evt_id = wmi_handle->wmi_events[event_id];
2109 
2110 	if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) {
2111 		wmi_info("event handler already registered 0x%x", evt_id);
2112 		return QDF_STATUS_E_FAILURE;
2113 	}
2114 	if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) {
2115 		wmi_err("no more event handlers 0x%x",
2116 			 evt_id);
2117 		return QDF_STATUS_E_FAILURE;
2118 	}
2119 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG,
2120 		  "Registered event handler for event 0x%8x", evt_id);
2121 	idx = soc->max_event_idx;
2122 	wmi_handle->event_handler[idx] = handler_func;
2123 	wmi_handle->event_id[idx] = evt_id;
2124 
2125 	qdf_spin_lock_bh(&soc->ctx_lock);
2126 	wmi_handle->ctx[idx].exec_ctx = rx_ctx;
2127 	wmi_handle->ctx[idx].buff_type = rx_buf_type;
2128 	qdf_spin_unlock_bh(&soc->ctx_lock);
2129 	soc->max_event_idx++;
2130 
2131 	return QDF_STATUS_SUCCESS;
2132 }
2133 
2134 QDF_STATUS
2135 wmi_unified_register_event(wmi_unified_t wmi_handle,
2136 			   uint32_t event_id,
2137 			   wmi_unified_event_handler handler_func)
2138 {
2139 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2140 						   handler_func,
2141 						   WMI_RX_UMAC_CTX,
2142 						   WMI_RX_PROCESSED_BUFF);
2143 }
2144 
2145 QDF_STATUS
2146 wmi_unified_register_event_handler(wmi_unified_t wmi_handle,
2147 				   wmi_conv_event_id event_id,
2148 				   wmi_unified_event_handler handler_func,
2149 				   uint8_t rx_ctx)
2150 {
2151 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2152 						   handler_func, rx_ctx,
2153 						   WMI_RX_PROCESSED_BUFF);
2154 }
2155 
2156 qdf_export_symbol(wmi_unified_register_event_handler);
2157 
2158 QDF_STATUS
2159 wmi_unified_register_raw_event_handler(wmi_unified_t wmi_handle,
2160 				       wmi_conv_event_id event_id,
2161 				       wmi_unified_event_handler handler_func,
2162 				       enum wmi_rx_exec_ctx rx_ctx)
2163 {
2164 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2165 						   handler_func, rx_ctx,
2166 						   WMI_RX_RAW_BUFF);
2167 }
2168 
2169 qdf_export_symbol(wmi_unified_register_raw_event_handler);
2170 
2171 QDF_STATUS wmi_unified_unregister_event(wmi_unified_t wmi_handle,
2172 					uint32_t event_id)
2173 {
2174 	uint32_t idx = 0;
2175 	uint32_t evt_id;
2176 	struct wmi_soc *soc = wmi_handle->soc;
2177 
2178 	if (event_id >= wmi_events_max ||
2179 		wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2180 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
2181 			  "%s: Event id %d is unavailable",
2182 					__func__, event_id);
2183 		return QDF_STATUS_E_FAILURE;
2184 	}
2185 	evt_id = wmi_handle->wmi_events[event_id];
2186 
2187 	idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id);
2188 	if (idx == -1) {
2189 		wmi_warn("event handler is not registered: evt id 0x%x",
2190 			 evt_id);
2191 		return QDF_STATUS_E_FAILURE;
2192 	}
2193 	wmi_handle->event_handler[idx] = NULL;
2194 	wmi_handle->event_id[idx] = 0;
2195 	--soc->max_event_idx;
2196 	wmi_handle->event_handler[idx] =
2197 		wmi_handle->event_handler[soc->max_event_idx];
2198 	wmi_handle->event_id[idx] =
2199 		wmi_handle->event_id[soc->max_event_idx];
2200 
2201 	return QDF_STATUS_SUCCESS;
2202 }
2203 
2204 QDF_STATUS wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle,
2205 						wmi_conv_event_id event_id)
2206 {
2207 	uint32_t idx = 0;
2208 	uint32_t evt_id;
2209 	struct wmi_soc *soc;
2210 
2211 	if (!wmi_handle) {
2212 		wmi_err("WMI handle is NULL");
2213 		return QDF_STATUS_E_FAILURE;
2214 	}
2215 
2216 	soc = wmi_handle->soc;
2217 
2218 	if (event_id >= wmi_events_max ||
2219 		wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2220 		wmi_err("Event id %d is unavailable", event_id);
2221 		return QDF_STATUS_E_FAILURE;
2222 	}
2223 	evt_id = wmi_handle->wmi_events[event_id];
2224 
2225 	idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id);
2226 	if (idx == -1) {
2227 		wmi_err("event handler is not registered: evt id 0x%x",
2228 			 evt_id);
2229 		return QDF_STATUS_E_FAILURE;
2230 	}
2231 	wmi_handle->event_handler[idx] = NULL;
2232 	wmi_handle->event_id[idx] = 0;
2233 	--soc->max_event_idx;
2234 	wmi_handle->event_handler[idx] =
2235 		wmi_handle->event_handler[soc->max_event_idx];
2236 	wmi_handle->event_id[idx] =
2237 		wmi_handle->event_id[soc->max_event_idx];
2238 
2239 	return QDF_STATUS_SUCCESS;
2240 }
2241 qdf_export_symbol(wmi_unified_unregister_event_handler);
2242 
2243 static void
2244 wmi_process_rx_diag_event_worker_thread_ctx(struct wmi_unified *wmi_handle,
2245 					    void *evt_buf)
2246 {
2247 	uint32_t num_diag_events_pending;
2248 
2249 	qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock);
2250 	if (RX_DIAG_WQ_MAX_SIZE > 0) {
2251 		num_diag_events_pending = qdf_nbuf_queue_len(
2252 						&wmi_handle->diag_event_queue);
2253 
2254 		if (num_diag_events_pending == RX_DIAG_WQ_MAX_SIZE) {
2255 			qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
2256 			wmi_handle->wmi_rx_diag_events_dropped++;
2257 			wmi_debug_rl("Rx diag events dropped count: %d",
2258 				     wmi_handle->wmi_rx_diag_events_dropped);
2259 			qdf_nbuf_free(evt_buf);
2260 			return;
2261 		}
2262 	}
2263 
2264 	qdf_nbuf_queue_add(&wmi_handle->diag_event_queue, evt_buf);
2265 	qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
2266 	qdf_sched_work(0, &wmi_handle->rx_diag_event_work);
2267 }
2268 
2269 void wmi_process_fw_event_worker_thread_ctx(struct wmi_unified *wmi_handle,
2270 					    void *evt_buf)
2271 {
2272 
2273 	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
2274 	qdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf);
2275 	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
2276 	qdf_queue_work(0, wmi_handle->wmi_rx_work_queue,
2277 			&wmi_handle->rx_event_work);
2278 
2279 	return;
2280 }
2281 
2282 qdf_export_symbol(wmi_process_fw_event_worker_thread_ctx);
2283 
2284 uint32_t wmi_critical_events_in_flight(struct wmi_unified *wmi)
2285 {
2286 	return qdf_atomic_read(&wmi->critical_events_in_flight);
2287 }
2288 
2289 static bool
2290 wmi_is_event_critical(struct wmi_unified *wmi_handle, uint32_t event_id)
2291 {
2292 	if (wmi_handle->wmi_events[wmi_roam_synch_event_id] == event_id)
2293 		return true;
2294 
2295 	return false;
2296 }
2297 
2298 static QDF_STATUS wmi_discard_fw_event(struct scheduler_msg *msg)
2299 {
2300 	struct wmi_process_fw_event_params *event_param;
2301 
2302 	if (!msg->bodyptr)
2303 		return QDF_STATUS_E_INVAL;
2304 
2305 	event_param = (struct wmi_process_fw_event_params *)msg->bodyptr;
2306 	qdf_nbuf_free(event_param->evt_buf);
2307 	qdf_mem_free(msg->bodyptr);
2308 	msg->bodyptr = NULL;
2309 	msg->bodyval = 0;
2310 	msg->type = 0;
2311 
2312 	return QDF_STATUS_SUCCESS;
2313 }
2314 
2315 static QDF_STATUS wmi_process_fw_event_handler(struct scheduler_msg *msg)
2316 {
2317 	struct wmi_process_fw_event_params *params =
2318 		(struct wmi_process_fw_event_params *)msg->bodyptr;
2319 	struct wmi_unified *wmi_handle;
2320 	uint32_t event_id;
2321 
2322 	wmi_handle = (struct wmi_unified *)params->wmi_handle;
2323 	event_id = WMI_GET_FIELD(qdf_nbuf_data(params->evt_buf),
2324 				 WMI_CMD_HDR, COMMANDID);
2325 	wmi_process_fw_event(wmi_handle, params->evt_buf);
2326 
2327 	if (wmi_is_event_critical(wmi_handle, event_id))
2328 		qdf_atomic_dec(&wmi_handle->critical_events_in_flight);
2329 
2330 	qdf_mem_free(msg->bodyptr);
2331 
2332 	return QDF_STATUS_SUCCESS;
2333 }
2334 
2335 /**
2336  * wmi_process_fw_event_sched_thread_ctx() - common event handler to serialize
2337  *                                  event processing through scheduler thread
2338  * @ctx: wmi context
2339  * @ev: event buffer
2340  * @rx_ctx: rx execution context
2341  *
2342  * Return: 0 on success, errno on failure
2343  */
2344 static QDF_STATUS
2345 wmi_process_fw_event_sched_thread_ctx(struct wmi_unified *wmi,
2346 				      void *ev)
2347 {
2348 	struct wmi_process_fw_event_params *params_buf;
2349 	struct scheduler_msg msg = { 0 };
2350 	uint32_t event_id;
2351 
2352 	params_buf = qdf_mem_malloc(sizeof(struct wmi_process_fw_event_params));
2353 	if (!params_buf) {
2354 		wmi_err("malloc failed");
2355 		qdf_nbuf_free(ev);
2356 		return QDF_STATUS_E_NOMEM;
2357 	}
2358 
2359 	params_buf->wmi_handle = wmi;
2360 	params_buf->evt_buf = ev;
2361 
2362 	event_id = WMI_GET_FIELD(qdf_nbuf_data(params_buf->evt_buf),
2363 				 WMI_CMD_HDR, COMMANDID);
2364 	if (wmi_is_event_critical(wmi, event_id))
2365 		qdf_atomic_inc(&wmi->critical_events_in_flight);
2366 
2367 	msg.bodyptr = params_buf;
2368 	msg.bodyval = 0;
2369 	msg.callback = wmi_process_fw_event_handler;
2370 	msg.flush_callback = wmi_discard_fw_event;
2371 
2372 	if (QDF_STATUS_SUCCESS !=
2373 		scheduler_post_message(QDF_MODULE_ID_TARGET_IF,
2374 				       QDF_MODULE_ID_TARGET_IF,
2375 				       QDF_MODULE_ID_TARGET_IF, &msg)) {
2376 		qdf_nbuf_free(ev);
2377 		qdf_mem_free(params_buf);
2378 		return QDF_STATUS_E_FAULT;
2379 	}
2380 
2381 	return QDF_STATUS_SUCCESS;
2382 }
2383 
2384 /**
2385  * wmi_get_pdev_ep: Get wmi handle based on endpoint
2386  * @soc: handle to wmi soc
2387  * @ep: endpoint id
2388  *
2389  * Return: none
2390  */
2391 static struct wmi_unified *wmi_get_pdev_ep(struct wmi_soc *soc,
2392 						HTC_ENDPOINT_ID ep)
2393 {
2394 	uint32_t i;
2395 
2396 	for (i = 0; i < WMI_MAX_RADIOS; i++)
2397 		if (soc->wmi_endpoint_id[i] == ep)
2398 			break;
2399 
2400 	if (i == WMI_MAX_RADIOS)
2401 		return NULL;
2402 
2403 	return soc->wmi_pdev[i];
2404 }
2405 
2406 /**
2407  * wmi_mtrace_rx() - Wrappper function for qdf_mtrace api
2408  * @message_id: 32-Bit Wmi message ID
2409  * @vdev_id: Vdev ID
2410  * @data: Actual message contents
2411  *
2412  * This function converts the 32-bit WMI message ID in 15-bit message ID
2413  * format for qdf_mtrace as in qdf_mtrace message there are only 15
2414  * bits reserved for message ID.
2415  * out of these 15-bits, 8-bits (From LSB) specifies the WMI_GRP_ID
2416  * and remaining 7-bits specifies the actual WMI command. With this
2417  * notation there can be maximum 256 groups and each group can have
2418  * max 128 commands can be supported.
2419  *
2420  * Return: None
2421  */
2422 static void wmi_mtrace_rx(uint32_t message_id, uint16_t vdev_id, uint32_t data)
2423 {
2424 	uint16_t mtrace_message_id;
2425 
2426 	mtrace_message_id = QDF_WMI_MTRACE_CMD_ID(message_id) |
2427 		(QDF_WMI_MTRACE_GRP_ID(message_id) <<
2428 						QDF_WMI_MTRACE_CMD_NUM_BITS);
2429 	qdf_mtrace(QDF_MODULE_ID_WMI, QDF_MODULE_ID_WMA,
2430 		   mtrace_message_id, vdev_id, data);
2431 }
2432 
2433 /**
2434  * wmi_process_control_rx() - process fw events callbacks
2435  * @wmi_handle: handle to wmi_unified
2436  * @evt_buf: handle to wmi_buf_t
2437  *
2438  * Return: none
2439  */
2440 static void wmi_process_control_rx(struct wmi_unified *wmi_handle,
2441 				   wmi_buf_t evt_buf)
2442 {
2443 	struct wmi_soc *soc = wmi_handle->soc;
2444 	uint32_t id;
2445 	uint32_t idx;
2446 	enum wmi_rx_exec_ctx exec_ctx;
2447 
2448 	id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2449 	idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
2450 	if (qdf_unlikely(idx == A_ERROR)) {
2451 		wmi_debug("no handler registered for event id 0x%x", id);
2452 		qdf_nbuf_free(evt_buf);
2453 		return;
2454 	}
2455 	wmi_mtrace_rx(id, 0xFF, idx);
2456 	qdf_spin_lock_bh(&soc->ctx_lock);
2457 	exec_ctx = wmi_handle->ctx[idx].exec_ctx;
2458 	qdf_spin_unlock_bh(&soc->ctx_lock);
2459 
2460 #ifdef WMI_INTERFACE_EVENT_LOGGING
2461 	if (wmi_handle->log_info.wmi_logging_enable) {
2462 		uint8_t *data;
2463 		data = qdf_nbuf_data(evt_buf);
2464 
2465 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2466 		/* Exclude 4 bytes of TLV header */
2467 		if (wmi_handle->ops->is_diag_event(id)) {
2468 			WMI_DIAG_RX_EVENT_RECORD(wmi_handle, id,
2469 				((uint8_t *) data +
2470 				wmi_handle->soc->buf_offset_event));
2471 		} else if (wmi_handle->ops->is_management_record(id)) {
2472 			WMI_MGMT_RX_EVENT_RECORD(wmi_handle, id,
2473 				((uint8_t *) data +
2474 				wmi_handle->soc->buf_offset_event));
2475 		} else {
2476 			WMI_RX_EVENT_RECORD(wmi_handle, id, ((uint8_t *) data +
2477 				wmi_handle->soc->buf_offset_event));
2478 		}
2479 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2480 	}
2481 #endif
2482 
2483 	if (exec_ctx == WMI_RX_WORK_CTX) {
2484 		wmi_process_fw_event_worker_thread_ctx(wmi_handle, evt_buf);
2485 	} else if (exec_ctx == WMI_RX_TASKLET_CTX) {
2486 		wmi_process_fw_event(wmi_handle, evt_buf);
2487 	} else if (exec_ctx == WMI_RX_SERIALIZER_CTX) {
2488 		wmi_process_fw_event_sched_thread_ctx(wmi_handle, evt_buf);
2489 	} else if (exec_ctx == WMI_RX_DIAG_WORK_CTX) {
2490 		wmi_process_rx_diag_event_worker_thread_ctx(wmi_handle,
2491 							    evt_buf);
2492 	} else {
2493 		wmi_err("Invalid event context %d", exec_ctx);
2494 		qdf_nbuf_free(evt_buf);
2495 	}
2496 
2497 }
2498 
2499 /**
2500  * wmi_control_rx() - process fw events callbacks
2501  * @ctx: handle to wmi
2502  * @htc_packet: pointer to htc packet
2503  *
2504  * Return: none
2505  */
2506 static void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet)
2507 {
2508 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2509 	struct wmi_unified *wmi_handle;
2510 	wmi_buf_t evt_buf;
2511 
2512 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2513 
2514 	wmi_handle = wmi_get_pdev_ep(soc, htc_packet->Endpoint);
2515 	if (!wmi_handle) {
2516 		wmi_err("unable to get wmi_handle to Endpoint %d",
2517 			htc_packet->Endpoint);
2518 		qdf_nbuf_free(evt_buf);
2519 		return;
2520 	}
2521 
2522 	wmi_process_control_rx(wmi_handle, evt_buf);
2523 }
2524 
2525 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
2526 /**
2527  * wmi_control_diag_rx() - process diag fw events callbacks
2528  * @ctx: handle to wmi
2529  * @htc_packet: pointer to htc packet
2530  *
2531  * Return: none
2532  */
2533 static void wmi_control_diag_rx(void *ctx, HTC_PACKET *htc_packet)
2534 {
2535 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2536 	struct wmi_unified *wmi_handle;
2537 	wmi_buf_t evt_buf;
2538 
2539 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2540 
2541 	wmi_handle = soc->wmi_pdev[0];
2542 	if (!wmi_handle) {
2543 		wmi_err("unable to get wmi_handle for diag event end point id:%d", htc_packet->Endpoint);
2544 		qdf_nbuf_free(evt_buf);
2545 		return;
2546 	}
2547 
2548 	wmi_process_control_rx(wmi_handle, evt_buf);
2549 }
2550 #endif
2551 
2552 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI
2553 QDF_STATUS wmi_unified_cmd_send_over_qmi(struct wmi_unified *wmi_handle,
2554 					 wmi_buf_t buf, uint32_t buflen,
2555 					 uint32_t cmd_id)
2556 {
2557 	QDF_STATUS status;
2558 	int32_t ret;
2559 
2560 	if (!qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR))) {
2561 		wmi_err("Failed to send cmd %x, no memory", cmd_id);
2562 		return QDF_STATUS_E_NOMEM;
2563 	}
2564 
2565 	qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR));
2566 	WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
2567 	wmi_debug("Sending WMI_CMD_ID: 0x%x over qmi", cmd_id);
2568 	status = qdf_wmi_send_recv_qmi(qdf_nbuf_data(buf),
2569 				       buflen + sizeof(WMI_CMD_HDR),
2570 				       wmi_handle,
2571 				       wmi_process_qmi_fw_event);
2572 	if (QDF_IS_STATUS_ERROR(status)) {
2573 		qdf_nbuf_pull_head(buf, sizeof(WMI_CMD_HDR));
2574 		wmi_warn("WMI send on QMI failed. Retrying WMI on HTC");
2575 	} else {
2576 		ret = qdf_atomic_inc_return(&wmi_handle->num_stats_over_qmi);
2577 		wmi_debug("num stats over qmi: %d", ret);
2578 		wmi_buf_free(buf);
2579 	}
2580 
2581 	return status;
2582 }
2583 
2584 static int __wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len)
2585 {
2586 	struct wmi_unified *wmi_handle = wmi_cb_ctx;
2587 	wmi_buf_t evt_buf;
2588 	uint32_t evt_id;
2589 
2590 	if (!wmi_handle || !buf)
2591 		return -EINVAL;
2592 
2593 	evt_buf = wmi_buf_alloc(wmi_handle, len);
2594 	if (!evt_buf)
2595 		return -ENOMEM;
2596 
2597 	qdf_mem_copy(qdf_nbuf_data(evt_buf), buf, len);
2598 	evt_id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2599 	wmi_debug("Received WMI_EVT_ID: %d over qmi", evt_id);
2600 	wmi_process_control_rx(wmi_handle, evt_buf);
2601 
2602 	return 0;
2603 }
2604 
2605 int wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len)
2606 {
2607 	struct qdf_op_sync *op_sync;
2608 	int ret;
2609 
2610 	if (qdf_op_protect(&op_sync))
2611 		return -EINVAL;
2612 	ret = __wmi_process_qmi_fw_event(wmi_cb_ctx, buf, len);
2613 	qdf_op_unprotect(op_sync);
2614 
2615 	return ret;
2616 }
2617 #endif
2618 
2619 /**
2620  * wmi_process_fw_event() - process any fw event
2621  * @wmi_handle: wmi handle
2622  * @evt_buf: fw event buffer
2623  *
2624  * This function process fw event in caller context
2625  *
2626  * Return: none
2627  */
2628 void wmi_process_fw_event(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
2629 {
2630 	__wmi_control_rx(wmi_handle, evt_buf);
2631 }
2632 
2633 /**
2634  * __wmi_control_rx() - process serialize wmi event callback
2635  * @wmi_handle: wmi handle
2636  * @evt_buf: fw event buffer
2637  *
2638  * Return: none
2639  */
2640 void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
2641 {
2642 	uint32_t id;
2643 	uint8_t *data;
2644 	uint32_t len;
2645 	void *wmi_cmd_struct_ptr = NULL;
2646 #ifndef WMI_NON_TLV_SUPPORT
2647 	int tlv_ok_status = 0;
2648 #endif
2649 	uint32_t idx = 0;
2650 	struct wmi_raw_event_buffer ev_buf;
2651 	enum wmi_rx_buff_type ev_buff_type;
2652 
2653 	id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2654 
2655 	wmi_ext_dbg_msg_event_record(wmi_handle, qdf_nbuf_data(evt_buf),
2656 				     qdf_nbuf_len(evt_buf));
2657 
2658 	if (qdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL)
2659 		goto end;
2660 
2661 	data = qdf_nbuf_data(evt_buf);
2662 	len = qdf_nbuf_len(evt_buf);
2663 
2664 #ifndef WMI_NON_TLV_SUPPORT
2665 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2666 		/* Validate and pad(if necessary) the TLVs */
2667 		tlv_ok_status =
2668 			wmi_handle->ops->wmi_check_and_pad_event(wmi_handle->scn_handle,
2669 							data, len, id,
2670 							&wmi_cmd_struct_ptr);
2671 		if (tlv_ok_status != 0) {
2672 			QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR,
2673 				  "%s: Error: id=0x%x, wmitlv check status=%d",
2674 				  __func__, id, tlv_ok_status);
2675 			goto end;
2676 		}
2677 	}
2678 #endif
2679 
2680 	idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
2681 	if (idx == A_ERROR) {
2682 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR,
2683 		   "%s : event handler is not registered: event id 0x%x",
2684 			__func__, id);
2685 		goto end;
2686 	}
2687 #ifdef WMI_INTERFACE_EVENT_LOGGING
2688 	if (wmi_handle->log_info.wmi_logging_enable) {
2689 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2690 		/* Exclude 4 bytes of TLV header */
2691 		if (wmi_handle->ops->is_diag_event(id)) {
2692 			/*
2693 			 * skip diag event logging in WMI event buffer
2694 			 * as its already logged in WMI RX event buffer
2695 			 */
2696 		} else if (wmi_handle->ops->is_management_record(id)) {
2697 			/*
2698 			 * skip wmi mgmt event logging in WMI event buffer
2699 			 * as its already logged in WMI RX event buffer
2700 			 */
2701 		} else {
2702 			uint8_t *tmpbuf = (uint8_t *)data +
2703 					wmi_handle->soc->buf_offset_event;
2704 
2705 			WMI_EVENT_RECORD(wmi_handle, id, tmpbuf);
2706 			wmi_specific_evt_record(wmi_handle, id, tmpbuf);
2707 		}
2708 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2709 	}
2710 #endif
2711 	/* Call the WMI registered event handler */
2712 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2713 		ev_buff_type = wmi_handle->ctx[idx].buff_type;
2714 		if (ev_buff_type == WMI_RX_PROCESSED_BUFF) {
2715 			wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2716 				wmi_cmd_struct_ptr, len);
2717 		} else if (ev_buff_type == WMI_RX_RAW_BUFF) {
2718 			ev_buf.evt_raw_buf = data;
2719 			ev_buf.evt_processed_buf = wmi_cmd_struct_ptr;
2720 			wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2721 							(void *)&ev_buf, len);
2722 		}
2723 	}
2724 	else
2725 		wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2726 			data, len);
2727 
2728 end:
2729 	/* Free event buffer and allocated event tlv */
2730 #ifndef WMI_NON_TLV_SUPPORT
2731 	if (wmi_handle->target_type == WMI_TLV_TARGET)
2732 		wmi_handle->ops->wmi_free_allocated_event(id, &wmi_cmd_struct_ptr);
2733 #endif
2734 
2735 	qdf_nbuf_free(evt_buf);
2736 
2737 }
2738 
2739 #define WMI_WQ_WD_TIMEOUT (30 * 1000) /* 30s */
2740 
2741 static inline void wmi_workqueue_watchdog_warn(uint32_t msg_type_id)
2742 {
2743 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2744 		  "%s: WLAN_BUG_RCA: Message type %x has exceeded its alloted time of %ds",
2745 		  __func__, msg_type_id, WMI_WQ_WD_TIMEOUT / 1000);
2746 }
2747 
2748 #ifdef CONFIG_SLUB_DEBUG_ON
2749 static void wmi_workqueue_watchdog_bite(void *arg)
2750 {
2751 	struct wmi_wq_dbg_info *info = arg;
2752 
2753 	wmi_workqueue_watchdog_warn(info->wd_msg_type_id);
2754 	qdf_print_thread_trace(info->task);
2755 
2756 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2757 		  "%s: Going down for WMI WQ Watchdog Bite!", __func__);
2758 	QDF_BUG(0);
2759 }
2760 #else
2761 static inline void wmi_workqueue_watchdog_bite(void *arg)
2762 {
2763 	struct wmi_wq_dbg_info *info = arg;
2764 
2765 	wmi_workqueue_watchdog_warn(info->wd_msg_type_id);
2766 
2767 	qdf_print_thread_trace(info->task);
2768 }
2769 #endif
2770 
2771 /**
2772  * wmi_rx_event_work() - process rx event in rx work queue context
2773  * @arg: opaque pointer to wmi handle
2774  *
2775  * This function process any fw event to serialize it through rx worker thread.
2776  *
2777  * Return: none
2778  */
2779 static void wmi_rx_event_work(void *arg)
2780 {
2781 	wmi_buf_t buf;
2782 	struct wmi_unified *wmi = arg;
2783 	qdf_timer_t wd_timer;
2784 	struct wmi_wq_dbg_info info;
2785 
2786 	/* initialize WMI workqueue watchdog timer */
2787 	qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite,
2788 			&info, QDF_TIMER_TYPE_SW);
2789 	qdf_spin_lock_bh(&wmi->eventq_lock);
2790 	buf = qdf_nbuf_queue_remove(&wmi->event_queue);
2791 	qdf_spin_unlock_bh(&wmi->eventq_lock);
2792 	while (buf) {
2793 		qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT);
2794 		info.wd_msg_type_id =
2795 		   WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID);
2796 		info.wmi_wq = wmi->wmi_rx_work_queue;
2797 		info.task = qdf_get_current_task();
2798 		__wmi_control_rx(wmi, buf);
2799 		qdf_timer_stop(&wd_timer);
2800 		qdf_spin_lock_bh(&wmi->eventq_lock);
2801 		buf = qdf_nbuf_queue_remove(&wmi->event_queue);
2802 		qdf_spin_unlock_bh(&wmi->eventq_lock);
2803 	}
2804 	qdf_timer_free(&wd_timer);
2805 }
2806 
2807 /**
2808  * wmi_rx_diag_event_work() - process rx diag event in work queue context
2809  * @arg: opaque pointer to wmi handle
2810  *
2811  * This function process fw diag event to serialize it through rx worker thread.
2812  *
2813  * Return: none
2814  */
2815 static void wmi_rx_diag_event_work(void *arg)
2816 {
2817 	wmi_buf_t buf;
2818 	struct wmi_unified *wmi = arg;
2819 	qdf_timer_t wd_timer;
2820 	struct wmi_wq_dbg_info info;
2821 
2822 	if (!wmi) {
2823 		wmi_err("Invalid WMI handle");
2824 		return;
2825 	}
2826 
2827 	/* initialize WMI workqueue watchdog timer */
2828 	qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite,
2829 		       &info, QDF_TIMER_TYPE_SW);
2830 	qdf_spin_lock_bh(&wmi->diag_eventq_lock);
2831 	buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue);
2832 	qdf_spin_unlock_bh(&wmi->diag_eventq_lock);
2833 	while (buf) {
2834 		qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT);
2835 		info.wd_msg_type_id =
2836 		   WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID);
2837 		info.wmi_wq = NULL;
2838 		info.task = qdf_get_current_task();
2839 		__wmi_control_rx(wmi, buf);
2840 		qdf_timer_stop(&wd_timer);
2841 		qdf_spin_lock_bh(&wmi->diag_eventq_lock);
2842 		buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue);
2843 		qdf_spin_unlock_bh(&wmi->diag_eventq_lock);
2844 	}
2845 	qdf_timer_free(&wd_timer);
2846 }
2847 
2848 #ifdef FEATURE_RUNTIME_PM
2849 /**
2850  * wmi_runtime_pm_init() - initialize runtime pm wmi variables
2851  * @wmi_handle: wmi context
2852  */
2853 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
2854 {
2855 	qdf_atomic_init(&wmi_handle->runtime_pm_inprogress);
2856 }
2857 
2858 /**
2859  * wmi_set_runtime_pm_inprogress() - set runtime pm progress flag
2860  * @wmi_handle: wmi context
2861  * @val: runtime pm progress flag
2862  */
2863 void wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, A_BOOL val)
2864 {
2865 	qdf_atomic_set(&wmi_handle->runtime_pm_inprogress, val);
2866 }
2867 
2868 /**
2869  * wmi_get_runtime_pm_inprogress() - get runtime pm progress flag
2870  * @wmi_handle: wmi context
2871  */
2872 inline bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle)
2873 {
2874 	return qdf_atomic_read(&wmi_handle->runtime_pm_inprogress);
2875 }
2876 #else
2877 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
2878 {
2879 }
2880 #endif
2881 
2882 /**
2883  * wmi_unified_get_soc_handle: Get WMI SoC handle
2884  * @param wmi_handle: WMI context got from wmi_attach
2885  *
2886  * return: Pointer to Soc handle
2887  */
2888 void *wmi_unified_get_soc_handle(struct wmi_unified *wmi_handle)
2889 {
2890 	return wmi_handle->soc;
2891 }
2892 
2893 /**
2894  * wmi_interface_logging_init: Interface looging init
2895  * @param wmi_handle: Pointer to wmi handle object
2896  *
2897  * return: None
2898  */
2899 #ifdef WMI_INTERFACE_EVENT_LOGGING
2900 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle,
2901 					      uint32_t pdev_idx)
2902 {
2903 	if (QDF_STATUS_SUCCESS == wmi_log_init(wmi_handle)) {
2904 		qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
2905 		wmi_debugfs_init(wmi_handle, pdev_idx);
2906 	}
2907 }
2908 #else
2909 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle,
2910 					      uint32_t pdev_idx)
2911 {
2912 }
2913 #endif
2914 
2915 static QDF_STATUS wmi_initialize_worker_context(struct wmi_unified *wmi_handle)
2916 {
2917 	wmi_handle->wmi_rx_work_queue =
2918 		qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue");
2919 	if (!wmi_handle->wmi_rx_work_queue) {
2920 		wmi_err("failed to create wmi_rx_event_work_queue");
2921 		return QDF_STATUS_E_RESOURCES;
2922 	}
2923 
2924 	qdf_spinlock_create(&wmi_handle->eventq_lock);
2925 	qdf_nbuf_queue_init(&wmi_handle->event_queue);
2926 	qdf_create_work(0, &wmi_handle->rx_event_work,
2927 			wmi_rx_event_work, wmi_handle);
2928 	qdf_spinlock_create(&wmi_handle->diag_eventq_lock);
2929 	qdf_nbuf_queue_init(&wmi_handle->diag_event_queue);
2930 	qdf_create_work(0, &wmi_handle->rx_diag_event_work,
2931 			wmi_rx_diag_event_work, wmi_handle);
2932 	wmi_handle->wmi_rx_diag_events_dropped = 0;
2933 
2934 	return QDF_STATUS_SUCCESS;
2935 }
2936 
2937 /**
2938  * wmi_unified_get_pdev_handle: Get WMI SoC handle
2939  * @param wmi_soc: Pointer to wmi soc object
2940  * @param pdev_idx: pdev index
2941  *
2942  * return: Pointer to wmi handle or NULL on failure
2943  */
2944 void *wmi_unified_get_pdev_handle(struct wmi_soc *soc, uint32_t pdev_idx)
2945 {
2946 	struct wmi_unified *wmi_handle;
2947 	QDF_STATUS status;
2948 
2949 	if (pdev_idx >= WMI_MAX_RADIOS)
2950 		return NULL;
2951 
2952 	if (!soc->wmi_pdev[pdev_idx]) {
2953 		wmi_handle =
2954 			(struct wmi_unified *) qdf_mem_malloc(
2955 					sizeof(struct wmi_unified));
2956 		if (!wmi_handle)
2957 			return NULL;
2958 
2959 		status = wmi_initialize_worker_context(wmi_handle);
2960 		if (QDF_IS_STATUS_ERROR(status))
2961 			goto error;
2962 
2963 		wmi_handle->scn_handle = soc->scn_handle;
2964 		wmi_handle->event_id = soc->event_id;
2965 		wmi_handle->event_handler = soc->event_handler;
2966 		wmi_handle->ctx = soc->ctx;
2967 		wmi_handle->ops = soc->ops;
2968 		wmi_handle->wmi_events = soc->wmi_events;
2969 		wmi_handle->services = soc->services;
2970 		wmi_handle->soc = soc;
2971 		wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map;
2972 		wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map;
2973 		wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map;
2974 		wmi_handle->evt_phy_id_map = soc->evt_phy_id_map;
2975 		wmi_interface_logging_init(wmi_handle, pdev_idx);
2976 		qdf_atomic_init(&wmi_handle->pending_cmds);
2977 		qdf_atomic_init(&wmi_handle->is_target_suspended);
2978 		wmi_handle->target_type = soc->target_type;
2979 		wmi_handle->wmi_max_cmds = soc->wmi_max_cmds;
2980 
2981 		wmi_interface_sequence_init(wmi_handle);
2982 		if (wmi_ext_dbgfs_init(wmi_handle, pdev_idx) !=
2983 		    QDF_STATUS_SUCCESS)
2984 			wmi_err("Failed to initialize wmi extended debugfs");
2985 
2986 		soc->wmi_pdev[pdev_idx] = wmi_handle;
2987 	} else
2988 		wmi_handle = soc->wmi_pdev[pdev_idx];
2989 
2990 	wmi_handle->wmi_stopinprogress = 0;
2991 	wmi_handle->wmi_endpoint_id = soc->wmi_endpoint_id[pdev_idx];
2992 	wmi_handle->htc_handle = soc->htc_handle;
2993 	wmi_handle->max_msg_len = soc->max_msg_len[pdev_idx];
2994 	wmi_handle->tag_crash_inject = false;
2995 	wmi_interface_sequence_reset(wmi_handle);
2996 
2997 	return wmi_handle;
2998 
2999 error:
3000 	qdf_mem_free(wmi_handle);
3001 
3002 	return NULL;
3003 }
3004 qdf_export_symbol(wmi_unified_get_pdev_handle);
3005 
3006 static void (*wmi_attach_register[WMI_MAX_TARGET_TYPE])(wmi_unified_t);
3007 
3008 void wmi_unified_register_module(enum wmi_target_type target_type,
3009 			void (*wmi_attach)(wmi_unified_t wmi_handle))
3010 {
3011 	if (target_type < WMI_MAX_TARGET_TYPE)
3012 		wmi_attach_register[target_type] = wmi_attach;
3013 
3014 	return;
3015 }
3016 qdf_export_symbol(wmi_unified_register_module);
3017 
3018 /**
3019  * wmi_wbuff_register() - register wmi with wbuff
3020  * @wmi_handle: handle to wmi
3021  *
3022  * @Return: void
3023  */
3024 static void wmi_wbuff_register(struct wmi_unified *wmi_handle)
3025 {
3026 	struct wbuff_alloc_request wbuff_alloc[4];
3027 
3028 	wbuff_alloc[0].slot = WBUFF_POOL_0;
3029 	wbuff_alloc[0].size = WMI_WBUFF_POOL_0_SIZE;
3030 	wbuff_alloc[1].slot = WBUFF_POOL_1;
3031 	wbuff_alloc[1].size = WMI_WBUFF_POOL_1_SIZE;
3032 	wbuff_alloc[2].slot = WBUFF_POOL_2;
3033 	wbuff_alloc[2].size = WMI_WBUFF_POOL_2_SIZE;
3034 	wbuff_alloc[3].slot = WBUFF_POOL_3;
3035 	wbuff_alloc[3].size = WMI_WBUFF_POOL_3_SIZE;
3036 
3037 	wmi_handle->wbuff_handle = wbuff_module_register(wbuff_alloc, 4,
3038 							 WMI_MIN_HEAD_ROOM, 4);
3039 }
3040 
3041 /**
3042  * wmi_wbuff_deregister() - deregister wmi with wbuff
3043  * @wmi_handle: handle to wmi
3044  *
3045  * @Return: void
3046  */
3047 static inline void wmi_wbuff_deregister(struct wmi_unified *wmi_handle)
3048 {
3049 	wbuff_module_deregister(wmi_handle->wbuff_handle);
3050 	wmi_handle->wbuff_handle = NULL;
3051 }
3052 
3053 /**
3054  * wmi_unified_attach() -  attach for unified WMI
3055  * @scn_handle: handle to SCN
3056  * @osdev: OS device context
3057  * @target_type: TLV or not-TLV based target
3058  * @use_cookie: cookie based allocation enabled/disabled
3059  * @ops: umac rx callbacks
3060  * @psoc: objmgr psoc
3061  *
3062  * @Return: wmi handle.
3063  */
3064 void *wmi_unified_attach(void *scn_handle,
3065 			 struct wmi_unified_attach_params *param)
3066 {
3067 	struct wmi_unified *wmi_handle;
3068 	struct wmi_soc *soc;
3069 	QDF_STATUS status;
3070 
3071 	soc = (struct wmi_soc *) qdf_mem_malloc(sizeof(struct wmi_soc));
3072 	if (!soc)
3073 		return NULL;
3074 
3075 	wmi_handle =
3076 		(struct wmi_unified *) qdf_mem_malloc(
3077 			sizeof(struct wmi_unified));
3078 	if (!wmi_handle) {
3079 		qdf_mem_free(soc);
3080 		return NULL;
3081 	}
3082 
3083 	status = wmi_initialize_worker_context(wmi_handle);
3084 	if (QDF_IS_STATUS_ERROR(status))
3085 		goto error;
3086 
3087 	wmi_handle->soc = soc;
3088 	wmi_handle->soc->soc_idx = param->soc_id;
3089 	wmi_handle->soc->is_async_ep = param->is_async_ep;
3090 	wmi_handle->event_id = soc->event_id;
3091 	wmi_handle->event_handler = soc->event_handler;
3092 	wmi_handle->ctx = soc->ctx;
3093 	wmi_handle->wmi_events = soc->wmi_events;
3094 	wmi_handle->services = soc->services;
3095 	wmi_handle->scn_handle = scn_handle;
3096 	wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map;
3097 	wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map;
3098 	wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map;
3099 	wmi_handle->evt_phy_id_map = soc->evt_phy_id_map;
3100 	soc->scn_handle = scn_handle;
3101 	wmi_handle->target_type = param->target_type;
3102 	soc->target_type = param->target_type;
3103 
3104 	if (param->target_type >= WMI_MAX_TARGET_TYPE)
3105 		goto error;
3106 
3107 	if (wmi_attach_register[param->target_type]) {
3108 		wmi_attach_register[param->target_type](wmi_handle);
3109 	} else {
3110 		wmi_err("wmi attach is not registered");
3111 		goto error;
3112 	}
3113 
3114 	qdf_atomic_init(&wmi_handle->pending_cmds);
3115 	qdf_atomic_init(&wmi_handle->is_target_suspended);
3116 	qdf_atomic_init(&wmi_handle->is_target_suspend_acked);
3117 	qdf_atomic_init(&wmi_handle->num_stats_over_qmi);
3118 	wmi_runtime_pm_init(wmi_handle);
3119 	wmi_interface_logging_init(wmi_handle, WMI_HOST_PDEV_ID_0);
3120 
3121 	wmi_interface_sequence_init(wmi_handle);
3122 	/* Assign target cookie capablity */
3123 	wmi_handle->use_cookie = param->use_cookie;
3124 	wmi_handle->osdev = param->osdev;
3125 	wmi_handle->wmi_stopinprogress = 0;
3126 	wmi_handle->wmi_max_cmds = param->max_commands;
3127 	soc->wmi_max_cmds = param->max_commands;
3128 	/* Increase the ref count once refcount infra is present */
3129 	soc->wmi_psoc = param->psoc;
3130 	qdf_spinlock_create(&soc->ctx_lock);
3131 	soc->ops = wmi_handle->ops;
3132 	soc->wmi_pdev[0] = wmi_handle;
3133 	if (wmi_ext_dbgfs_init(wmi_handle, 0) != QDF_STATUS_SUCCESS)
3134 		wmi_err("Failed to initialize wmi extended debugfs");
3135 
3136 	wmi_wbuff_register(wmi_handle);
3137 
3138 	wmi_hang_event_notifier_register(wmi_handle);
3139 
3140 	return wmi_handle;
3141 
3142 error:
3143 	qdf_mem_free(soc);
3144 	qdf_mem_free(wmi_handle);
3145 
3146 	return NULL;
3147 }
3148 
3149 /**
3150  * wmi_unified_detach() -  detach for unified WMI
3151  *
3152  * @wmi_handle  : handle to wmi.
3153  *
3154  * @Return: none.
3155  */
3156 void wmi_unified_detach(struct wmi_unified *wmi_handle)
3157 {
3158 	wmi_buf_t buf;
3159 	struct wmi_soc *soc;
3160 	uint8_t i;
3161 
3162 	wmi_hang_event_notifier_unregister();
3163 
3164 	wmi_wbuff_deregister(wmi_handle);
3165 
3166 	soc = wmi_handle->soc;
3167 	for (i = 0; i < WMI_MAX_RADIOS; i++) {
3168 		if (soc->wmi_pdev[i]) {
3169 			qdf_flush_workqueue(0,
3170 				soc->wmi_pdev[i]->wmi_rx_work_queue);
3171 			qdf_destroy_workqueue(0,
3172 				soc->wmi_pdev[i]->wmi_rx_work_queue);
3173 			wmi_debugfs_remove(soc->wmi_pdev[i]);
3174 			buf = qdf_nbuf_queue_remove(
3175 					&soc->wmi_pdev[i]->event_queue);
3176 			while (buf) {
3177 				qdf_nbuf_free(buf);
3178 				buf = qdf_nbuf_queue_remove(
3179 						&soc->wmi_pdev[i]->event_queue);
3180 			}
3181 
3182 			qdf_flush_work(&soc->wmi_pdev[i]->rx_diag_event_work);
3183 			buf = qdf_nbuf_queue_remove(
3184 					&soc->wmi_pdev[i]->diag_event_queue);
3185 			while (buf) {
3186 				qdf_nbuf_free(buf);
3187 				buf = qdf_nbuf_queue_remove(
3188 					&soc->wmi_pdev[i]->diag_event_queue);
3189 			}
3190 
3191 			wmi_log_buffer_free(soc->wmi_pdev[i]);
3192 
3193 			/* Free events logs list */
3194 			if (soc->wmi_pdev[i]->events_logs_list)
3195 				qdf_mem_free(
3196 					soc->wmi_pdev[i]->events_logs_list);
3197 
3198 			qdf_spinlock_destroy(&soc->wmi_pdev[i]->eventq_lock);
3199 			qdf_spinlock_destroy(
3200 					&soc->wmi_pdev[i]->diag_eventq_lock);
3201 
3202 			wmi_interface_sequence_deinit(soc->wmi_pdev[i]);
3203 			wmi_ext_dbgfs_deinit(soc->wmi_pdev[i]);
3204 
3205 			qdf_mem_free(soc->wmi_pdev[i]);
3206 		}
3207 	}
3208 	qdf_spinlock_destroy(&soc->ctx_lock);
3209 
3210 	if (soc->wmi_service_bitmap) {
3211 		qdf_mem_free(soc->wmi_service_bitmap);
3212 		soc->wmi_service_bitmap = NULL;
3213 	}
3214 
3215 	if (soc->wmi_ext_service_bitmap) {
3216 		qdf_mem_free(soc->wmi_ext_service_bitmap);
3217 		soc->wmi_ext_service_bitmap = NULL;
3218 	}
3219 
3220 	if (soc->wmi_ext2_service_bitmap) {
3221 		qdf_mem_free(soc->wmi_ext2_service_bitmap);
3222 		soc->wmi_ext2_service_bitmap = NULL;
3223 	}
3224 
3225 	/* Decrease the ref count once refcount infra is present */
3226 	soc->wmi_psoc = NULL;
3227 	qdf_mem_free(soc);
3228 }
3229 
3230 /**
3231  * wmi_unified_remove_work() - detach for WMI work
3232  * @wmi_handle: handle to WMI
3233  *
3234  * A function that does not fully detach WMI, but just remove work
3235  * queue items associated with it. This is used to make sure that
3236  * before any other processing code that may destroy related contexts
3237  * (HTC, etc), work queue processing on WMI has already been stopped.
3238  *
3239  * Return: None
3240  */
3241 void
3242 wmi_unified_remove_work(struct wmi_unified *wmi_handle)
3243 {
3244 	wmi_buf_t buf;
3245 
3246 	qdf_flush_workqueue(0, wmi_handle->wmi_rx_work_queue);
3247 	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
3248 	buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
3249 	while (buf) {
3250 		qdf_nbuf_free(buf);
3251 		buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
3252 	}
3253 	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
3254 
3255 	/* Remove diag events work */
3256 	qdf_flush_work(&wmi_handle->rx_diag_event_work);
3257 	qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock);
3258 	buf = qdf_nbuf_queue_remove(&wmi_handle->diag_event_queue);
3259 	while (buf) {
3260 		qdf_nbuf_free(buf);
3261 		buf = qdf_nbuf_queue_remove(&wmi_handle->diag_event_queue);
3262 	}
3263 	qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
3264 }
3265 
3266 /**
3267  * wmi_htc_tx_complete() - Process htc tx completion
3268  *
3269  * @ctx: handle to wmi
3270  * @htc_packet: pointer to htc packet
3271  *
3272  * @Return: none.
3273  */
3274 static void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt)
3275 {
3276 	struct wmi_soc *soc = (struct wmi_soc *) ctx;
3277 	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
3278 	u_int8_t *buf_ptr;
3279 	u_int32_t len;
3280 	struct wmi_unified *wmi_handle;
3281 #ifdef WMI_INTERFACE_EVENT_LOGGING
3282 	struct wmi_debug_log_info *log_info;
3283 	uint32_t cmd_id;
3284 	uint8_t *offset_ptr;
3285 	qdf_dma_addr_t dma_addr;
3286 	uint64_t phy_addr;
3287 #endif
3288 
3289 	ASSERT(wmi_cmd_buf);
3290 	wmi_handle = wmi_get_pdev_ep(soc, htc_pkt->Endpoint);
3291 	if (!wmi_handle) {
3292 		wmi_err("Unable to get wmi handle");
3293 		QDF_ASSERT(0);
3294 		return;
3295 	}
3296 	buf_ptr = (u_int8_t *)wmi_buf_data(wmi_cmd_buf);
3297 #ifdef WMI_INTERFACE_EVENT_LOGGING
3298 	log_info = &wmi_handle->log_info;
3299 
3300 	if (wmi_handle && log_info->wmi_logging_enable) {
3301 		cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf),
3302 				WMI_CMD_HDR, COMMANDID);
3303 
3304 		dma_addr = QDF_NBUF_CB_PADDR(wmi_cmd_buf);
3305 		phy_addr = qdf_mem_virt_to_phys(qdf_nbuf_data(wmi_cmd_buf));
3306 
3307 		qdf_spin_lock_bh(&log_info->wmi_record_lock);
3308 		/* Record 16 bytes of WMI cmd tx complete data
3309 		 * - exclude TLV and WMI headers
3310 		 */
3311 		offset_ptr = buf_ptr + wmi_handle->soc->buf_offset_command;
3312 		if (wmi_handle->ops->is_management_record(cmd_id)) {
3313 			WMI_MGMT_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id,
3314 						       offset_ptr);
3315 		} else {
3316 			WMI_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id,
3317 						  offset_ptr, dma_addr,
3318 						  phy_addr);
3319 		}
3320 
3321 		qdf_spin_unlock_bh(&log_info->wmi_record_lock);
3322 	}
3323 #endif
3324 
3325 	wmi_interface_sequence_check(wmi_handle, wmi_cmd_buf);
3326 
3327 	len = qdf_nbuf_len(wmi_cmd_buf);
3328 	qdf_mem_zero(buf_ptr, len);
3329 	wmi_buf_free(wmi_cmd_buf);
3330 	qdf_mem_free(htc_pkt);
3331 	qdf_atomic_dec(&wmi_handle->pending_cmds);
3332 }
3333 
3334 #ifdef FEATURE_RUNTIME_PM
3335 /**
3336  * wmi_htc_log_pkt() - Print information of WMI command from HTC packet
3337  *
3338  * @ctx: handle of WMI context
3339  * @htc_pkt: handle of HTC packet
3340  *
3341  * @Return: none
3342  */
3343 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt)
3344 {
3345 	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
3346 	uint32_t cmd_id;
3347 
3348 	ASSERT(wmi_cmd_buf);
3349 	cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), WMI_CMD_HDR,
3350 			       COMMANDID);
3351 
3352 	wmi_debug("WMI command from HTC packet: %s, ID: %d",
3353 		 wmi_id_to_name(cmd_id), cmd_id);
3354 }
3355 #else
3356 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt)
3357 {
3358 }
3359 #endif
3360 
3361 /**
3362  * wmi_connect_pdev_htc_service() -  WMI API to get connect to HTC service
3363  *
3364  * @wmi_handle: handle to WMI.
3365  * @pdev_idx: Pdev index
3366  *
3367  * @Return: QDF_STATUS
3368  */
3369 static QDF_STATUS wmi_connect_pdev_htc_service(struct wmi_soc *soc,
3370 					       uint32_t pdev_idx)
3371 {
3372 	QDF_STATUS status;
3373 	struct htc_service_connect_resp response;
3374 	struct htc_service_connect_req connect;
3375 
3376 	OS_MEMZERO(&connect, sizeof(connect));
3377 	OS_MEMZERO(&response, sizeof(response));
3378 
3379 	/* meta data is unused for now */
3380 	connect.pMetaData = NULL;
3381 	connect.MetaDataLength = 0;
3382 	/* these fields are the same for all service endpoints */
3383 	connect.EpCallbacks.pContext = soc;
3384 	connect.EpCallbacks.EpTxCompleteMultiple =
3385 		NULL /* Control path completion ar6000_tx_complete */;
3386 	connect.EpCallbacks.EpRecv = wmi_control_rx /* Control path rx */;
3387 	connect.EpCallbacks.EpRecvRefill = NULL /* ar6000_rx_refill */;
3388 	connect.EpCallbacks.EpSendFull = NULL /* ar6000_tx_queue_full */;
3389 	connect.EpCallbacks.EpTxComplete =
3390 		wmi_htc_tx_complete /* ar6000_tx_queue_full */;
3391 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3392 
3393 	/* connect to control service */
3394 	connect.service_id = soc->svc_ids[pdev_idx];
3395 	status = htc_connect_service(soc->htc_handle, &connect, &response);
3396 
3397 	if (QDF_IS_STATUS_ERROR(status)) {
3398 		wmi_err("Failed to connect to WMI CONTROL service status:%d",
3399 			 status);
3400 		return status;
3401 	}
3402 
3403 	if (soc->is_async_ep)
3404 		htc_set_async_ep(soc->htc_handle, response.Endpoint, true);
3405 
3406 	soc->wmi_endpoint_id[pdev_idx] = response.Endpoint;
3407 	soc->max_msg_len[pdev_idx] = response.MaxMsgLength;
3408 
3409 	return QDF_STATUS_SUCCESS;
3410 }
3411 
3412 QDF_STATUS
3413 wmi_unified_connect_htc_service(struct wmi_unified *wmi_handle,
3414 				HTC_HANDLE htc_handle)
3415 {
3416 	uint32_t i;
3417 	uint8_t wmi_ep_count;
3418 
3419 	wmi_handle->soc->htc_handle = htc_handle;
3420 
3421 	wmi_ep_count = htc_get_wmi_endpoint_count(htc_handle);
3422 	if (wmi_ep_count > WMI_MAX_RADIOS)
3423 		return QDF_STATUS_E_FAULT;
3424 
3425 	for (i = 0; i < wmi_ep_count; i++)
3426 		wmi_connect_pdev_htc_service(wmi_handle->soc, i);
3427 
3428 	wmi_handle->htc_handle = htc_handle;
3429 	wmi_handle->wmi_endpoint_id = wmi_handle->soc->wmi_endpoint_id[0];
3430 	wmi_handle->max_msg_len = wmi_handle->soc->max_msg_len[0];
3431 
3432 	return QDF_STATUS_SUCCESS;
3433 }
3434 
3435 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
3436 QDF_STATUS wmi_diag_connect_pdev_htc_service(struct wmi_unified *wmi_handle,
3437 					     HTC_HANDLE htc_handle)
3438 {
3439 	QDF_STATUS status;
3440 	struct htc_service_connect_resp response = {0};
3441 	struct htc_service_connect_req connect = {0};
3442 
3443 	/* meta data is unused for now */
3444 	connect.pMetaData = NULL;
3445 	connect.MetaDataLength = 0;
3446 	connect.EpCallbacks.pContext = wmi_handle->soc;
3447 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3448 	connect.EpCallbacks.EpRecv = wmi_control_diag_rx /* wmi diag rx */;
3449 	connect.EpCallbacks.EpRecvRefill = NULL;
3450 	connect.EpCallbacks.EpSendFull = NULL;
3451 	connect.EpCallbacks.EpTxComplete = NULL;
3452 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3453 
3454 	/* connect to wmi diag service */
3455 	connect.service_id = WMI_CONTROL_DIAG_SVC;
3456 	status = htc_connect_service(htc_handle, &connect, &response);
3457 
3458 	if (QDF_IS_STATUS_ERROR(status)) {
3459 		wmi_err("Failed to connect to WMI DIAG service status:%d",
3460 			 status);
3461 		return status;
3462 	}
3463 
3464 	if (wmi_handle->soc->is_async_ep)
3465 		htc_set_async_ep(htc_handle, response.Endpoint, true);
3466 
3467 	wmi_handle->soc->wmi_diag_endpoint_id = response.Endpoint;
3468 
3469 	return QDF_STATUS_SUCCESS;
3470 }
3471 #endif
3472 
3473 /**
3474  * wmi_get_host_credits() -  WMI API to get updated host_credits
3475  *
3476  * @wmi_handle: handle to WMI.
3477  *
3478  * @Return: updated host_credits.
3479  */
3480 int wmi_get_host_credits(wmi_unified_t wmi_handle)
3481 {
3482 	int host_credits = 0;
3483 
3484 	htc_get_control_endpoint_tx_host_credits(wmi_handle->htc_handle,
3485 						 &host_credits);
3486 	return host_credits;
3487 }
3488 
3489 /**
3490  * wmi_get_pending_cmds() - WMI API to get WMI Pending Commands in the HTC
3491  *                          queue
3492  *
3493  * @wmi_handle: handle to WMI.
3494  *
3495  * @Return: Pending Commands in the HTC queue.
3496  */
3497 int wmi_get_pending_cmds(wmi_unified_t wmi_handle)
3498 {
3499 	return qdf_atomic_read(&wmi_handle->pending_cmds);
3500 }
3501 
3502 /**
3503  * wmi_set_target_suspend() -  WMI API to set target suspend state
3504  *
3505  * @wmi_handle: handle to WMI.
3506  * @val: suspend state boolean.
3507  *
3508  * @Return: none.
3509  */
3510 void wmi_set_target_suspend(wmi_unified_t wmi_handle, A_BOOL val)
3511 {
3512 	qdf_atomic_set(&wmi_handle->is_target_suspended, val);
3513 }
3514 
3515 /**
3516  * wmi_set_target_suspend_acked() -  WMI API to set target suspend acked flag
3517  *
3518  * @wmi_handle: handle to WMI.
3519  * @val: target suspend command acked flag.
3520  *
3521  * @Return: none.
3522  */
3523 void wmi_set_target_suspend_acked(wmi_unified_t wmi_handle, A_BOOL val)
3524 {
3525 	qdf_atomic_set(&wmi_handle->is_target_suspend_acked, val);
3526 }
3527 
3528 /**
3529  * wmi_is_target_suspended() - WMI API to check target suspend state
3530  * @wmi_handle: handle to WMI.
3531  *
3532  * WMI API to check target suspend state
3533  *
3534  * Return: true if target is suspended, else false.
3535  */
3536 bool wmi_is_target_suspended(struct wmi_unified *wmi_handle)
3537 {
3538 	return qdf_atomic_read(&wmi_handle->is_target_suspended);
3539 }
3540 qdf_export_symbol(wmi_is_target_suspended);
3541 
3542 /**
3543  * wmi_is_target_suspend_acked() - WMI API to check target suspend command is
3544  *                                 acked or not
3545  * @wmi_handle: handle to WMI.
3546  *
3547  * WMI API to check whether the target suspend command is acked or not
3548  *
3549  * Return: true if target suspend command is acked, else false.
3550  */
3551 bool wmi_is_target_suspend_acked(struct wmi_unified *wmi_handle)
3552 {
3553 	return qdf_atomic_read(&wmi_handle->is_target_suspend_acked);
3554 }
3555 qdf_export_symbol(wmi_is_target_suspend_acked);
3556 
3557 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI
3558 void wmi_set_qmi_stats(wmi_unified_t wmi_handle, bool val)
3559 {
3560 	wmi_handle->is_qmi_stats_enabled = val;
3561 }
3562 
3563 bool wmi_is_qmi_stats_enabled(struct wmi_unified *wmi_handle)
3564 {
3565 	return wmi_handle->is_qmi_stats_enabled;
3566 }
3567 #endif
3568 
3569 /**
3570  * WMI API to set crash injection state
3571  * @param wmi_handle:	handle to WMI.
3572  * @param val:		crash injection state boolean.
3573  */
3574 void wmi_tag_crash_inject(wmi_unified_t wmi_handle, A_BOOL flag)
3575 {
3576 	wmi_handle->tag_crash_inject = flag;
3577 }
3578 
3579 /**
3580  * WMI API to set bus suspend state
3581  * @param wmi_handle:	handle to WMI.
3582  * @param val:		suspend state boolean.
3583  */
3584 void wmi_set_is_wow_bus_suspended(wmi_unified_t wmi_handle, A_BOOL val)
3585 {
3586 	qdf_atomic_set(&wmi_handle->is_wow_bus_suspended, val);
3587 }
3588 
3589 void wmi_set_tgt_assert(wmi_unified_t wmi_handle, bool val)
3590 {
3591 	wmi_handle->tgt_force_assert_enable = val;
3592 }
3593 
3594 /**
3595  * wmi_stop() - generic function to block unified WMI command
3596  * @wmi_handle: handle to WMI.
3597  *
3598  * @Return: success always.
3599  */
3600 int
3601 wmi_stop(wmi_unified_t wmi_handle)
3602 {
3603 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
3604 		  "WMI Stop");
3605 	wmi_handle->wmi_stopinprogress = 1;
3606 	return 0;
3607 }
3608 
3609 /**
3610  * wmi_start() - generic function to allow unified WMI command
3611  * @wmi_handle: handle to WMI.
3612  *
3613  * @Return: success always.
3614  */
3615 int
3616 wmi_start(wmi_unified_t wmi_handle)
3617 {
3618 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
3619 		  "WMI Start");
3620 	wmi_handle->wmi_stopinprogress = 0;
3621 	return 0;
3622 }
3623 
3624 /**
3625  * wmi_is_blocked() - generic function to check if WMI is blocked
3626  * @wmi_handle: handle to WMI.
3627  *
3628  * @Return: true, if blocked, false if not blocked
3629  */
3630 bool
3631 wmi_is_blocked(wmi_unified_t wmi_handle)
3632 {
3633 	return (!(!wmi_handle->wmi_stopinprogress));
3634 }
3635 
3636 /**
3637  * API to flush all the previous packets  associated with the wmi endpoint
3638  *
3639  * @param wmi_handle      : handle to WMI.
3640  */
3641 void
3642 wmi_flush_endpoint(wmi_unified_t wmi_handle)
3643 {
3644 	htc_flush_endpoint(wmi_handle->htc_handle,
3645 		wmi_handle->wmi_endpoint_id, 0);
3646 }
3647 qdf_export_symbol(wmi_flush_endpoint);
3648 
3649 /**
3650  * wmi_pdev_id_conversion_enable() - API to enable pdev_id/phy_id conversion
3651  *                     in WMI. By default pdev_id conversion is not done in WMI.
3652  *                     This API can be used enable conversion in WMI.
3653  * @param wmi_handle   : handle to WMI
3654  * @param pdev_map     : pointer to pdev_map
3655  * @size               : size of pdev_id_map
3656  * Return none
3657  */
3658 void wmi_pdev_id_conversion_enable(wmi_unified_t wmi_handle,
3659 				   uint32_t *pdev_id_map,
3660 				   uint8_t size)
3661 {
3662 	if (wmi_handle->target_type == WMI_TLV_TARGET)
3663 		wmi_handle->ops->wmi_pdev_id_conversion_enable(wmi_handle,
3664 							       pdev_id_map,
3665 							       size);
3666 }
3667 
3668 int __wmi_validate_handle(wmi_unified_t wmi_handle, const char *func)
3669 {
3670         if (!wmi_handle) {
3671                 wmi_err("Invalid WMI handle (via %s)", func);
3672                 return -EINVAL;
3673         }
3674 
3675         return 0;
3676 }
3677