xref: /wlan-dirver/qca-wifi-host-cmn/wmi/src/wmi_unified.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Host WMI unified implementation
21  */
22 #include "htc_api.h"
23 #include "htc_api.h"
24 #include "wmi_unified_priv.h"
25 #include "wmi_unified_api.h"
26 #include "qdf_module.h"
27 #include "qdf_platform.h"
28 #ifdef WMI_EXT_DBG
29 #include "qdf_list.h"
30 #include "qdf_atomic.h"
31 #endif
32 
33 #ifndef WMI_NON_TLV_SUPPORT
34 #include "wmi_tlv_helper.h"
35 #endif
36 
37 #include <linux/debugfs.h>
38 #include <target_if.h>
39 #include <qdf_debugfs.h>
40 #include "wmi_filtered_logging.h"
41 #include <wmi_hang_event.h>
42 
43 /* This check for CONFIG_WIN temporary added due to redeclaration compilation
44 error in MCL. Error is caused due to inclusion of wmi.h in wmi_unified_api.h
45 which gets included here through ol_if_athvar.h. Eventually it is expected that
46 wmi.h will be removed from wmi_unified_api.h after cleanup, which will need
47 WMI_CMD_HDR to be defined here. */
48 /* Copied from wmi.h */
49 #undef MS
50 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
51 #undef SM
52 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
53 #undef WO
54 #define WO(_f)      ((_f##_OFFSET) >> 2)
55 
56 #undef GET_FIELD
57 #define GET_FIELD(_addr, _f) MS(*((uint32_t *)(_addr) + WO(_f)), _f)
58 #undef SET_FIELD
59 #define SET_FIELD(_addr, _f, _val)  \
60 	    (*((uint32_t *)(_addr) + WO(_f)) = \
61 		(*((uint32_t *)(_addr) + WO(_f)) & ~_f##_MASK) | SM(_val, _f))
62 
63 #define WMI_GET_FIELD(_msg_buf, _msg_type, _f) \
64 	    GET_FIELD(_msg_buf, _msg_type ## _ ## _f)
65 
66 #define WMI_SET_FIELD(_msg_buf, _msg_type, _f, _val) \
67 	    SET_FIELD(_msg_buf, _msg_type ## _ ## _f, _val)
68 
69 #define WMI_EP_APASS           0x0
70 #define WMI_EP_LPASS           0x1
71 #define WMI_EP_SENSOR          0x2
72 
73 /*
74  *  * Control Path
75  *   */
76 typedef PREPACK struct {
77 	uint32_t	commandId:24,
78 			reserved:2, /* used for WMI endpoint ID */
79 			plt_priv:6; /* platform private */
80 } POSTPACK WMI_CMD_HDR;        /* used for commands and events */
81 
82 #define WMI_CMD_HDR_COMMANDID_LSB           0
83 #define WMI_CMD_HDR_COMMANDID_MASK          0x00ffffff
84 #define WMI_CMD_HDR_COMMANDID_OFFSET        0x00000000
85 #define WMI_CMD_HDR_WMI_ENDPOINTID_MASK        0x03000000
86 #define WMI_CMD_HDR_WMI_ENDPOINTID_OFFSET      24
87 #define WMI_CMD_HDR_PLT_PRIV_LSB               24
88 #define WMI_CMD_HDR_PLT_PRIV_MASK              0xff000000
89 #define WMI_CMD_HDR_PLT_PRIV_OFFSET            0x00000000
90 /* end of copy wmi.h */
91 
92 #define WMI_MIN_HEAD_ROOM 64
93 
94 /* WBUFF pool sizes for WMI */
95 /* Allocation of size 256 bytes */
96 #define WMI_WBUFF_POOL_0_SIZE 128
97 /* Allocation of size 512 bytes */
98 #define WMI_WBUFF_POOL_1_SIZE 16
99 /* Allocation of size 1024 bytes */
100 #define WMI_WBUFF_POOL_2_SIZE 8
101 /* Allocation of size 2048 bytes */
102 #define WMI_WBUFF_POOL_3_SIZE 8
103 
104 #ifdef WMI_INTERFACE_EVENT_LOGGING
105 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0))
106 /* TODO Cleanup this backported function */
107 static int wmi_bp_seq_printf(qdf_debugfs_file_t m, const char *f, ...)
108 {
109 	va_list args;
110 
111 	va_start(args, f);
112 	seq_vprintf(m, f, args);
113 	va_end(args);
114 
115 	return 0;
116 }
117 #else
118 #define wmi_bp_seq_printf(m, fmt, ...) seq_printf((m), fmt, ##__VA_ARGS__)
119 #endif
120 
121 #ifndef MAX_WMI_INSTANCES
122 #define CUSTOM_MGMT_CMD_DATA_SIZE 4
123 #endif
124 
125 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
126 /* WMI commands */
127 uint32_t g_wmi_command_buf_idx = 0;
128 struct wmi_command_debug wmi_command_log_buffer[WMI_CMD_DEBUG_MAX_ENTRY];
129 
130 /* WMI commands TX completed */
131 uint32_t g_wmi_command_tx_cmp_buf_idx = 0;
132 struct wmi_command_cmp_debug
133 	wmi_command_tx_cmp_log_buffer[WMI_CMD_CMPL_DEBUG_MAX_ENTRY];
134 
135 /* WMI events when processed */
136 uint32_t g_wmi_event_buf_idx = 0;
137 struct wmi_event_debug wmi_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
138 
139 /* WMI events when queued */
140 uint32_t g_wmi_rx_event_buf_idx = 0;
141 struct wmi_event_debug wmi_rx_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
142 #endif
143 
144 #define WMI_COMMAND_RECORD(h, a, b) {					\
145 	if (wmi_cmd_log_max_entry <=					\
146 		*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))	\
147 		*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx) = 0;\
148 	((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\
149 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)]\
150 						.command = a;		\
151 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.		\
152 				wmi_command_log_buf_info.buf)		\
153 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].data,\
154 			b, wmi_record_max_length);			\
155 	((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\
156 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].\
157 		time = qdf_get_log_timestamp();			\
158 	(*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))++;	\
159 	h->log_info.wmi_command_log_buf_info.length++;			\
160 }
161 
162 #define WMI_COMMAND_TX_CMP_RECORD(h, a, b, da, pa) {			\
163 	if (wmi_cmd_cmpl_log_max_entry <=				\
164 		*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))\
165 		*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
166 				p_buf_tail_idx) = 0;			\
167 	((struct wmi_command_cmp_debug *)h->log_info.			\
168 		wmi_command_tx_cmp_log_buf_info.buf)			\
169 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
170 				p_buf_tail_idx)].			\
171 							command	= a;	\
172 	qdf_mem_copy(((struct wmi_command_cmp_debug *)h->log_info.	\
173 				wmi_command_tx_cmp_log_buf_info.buf)	\
174 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
175 			p_buf_tail_idx)].				\
176 		data, b, wmi_record_max_length);			\
177 	((struct wmi_command_cmp_debug *)h->log_info.			\
178 		wmi_command_tx_cmp_log_buf_info.buf)			\
179 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
180 				p_buf_tail_idx)].			\
181 		time = qdf_get_log_timestamp();				\
182 	((struct wmi_command_cmp_debug *)h->log_info.			\
183 		wmi_command_tx_cmp_log_buf_info.buf)			\
184 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
185 				p_buf_tail_idx)].			\
186 		dma_addr = da;						\
187 	((struct wmi_command_cmp_debug *)h->log_info.			\
188 		wmi_command_tx_cmp_log_buf_info.buf)			\
189 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
190 				p_buf_tail_idx)].			\
191 		phy_addr = pa;						\
192 	(*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))++;\
193 	h->log_info.wmi_command_tx_cmp_log_buf_info.length++;		\
194 }
195 
196 #define WMI_EVENT_RECORD(h, a, b) {					\
197 	if (wmi_event_log_max_entry <=					\
198 		*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))	\
199 		*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx) = 0;\
200 	((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\
201 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].	\
202 		event = a;						\
203 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
204 				wmi_event_log_buf_info.buf)		\
205 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].data, b,\
206 		wmi_record_max_length);					\
207 	((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\
208 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].time =\
209 		qdf_get_log_timestamp();				\
210 	(*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))++;	\
211 	h->log_info.wmi_event_log_buf_info.length++;			\
212 }
213 
214 #define WMI_RX_EVENT_RECORD(h, a, b) {					\
215 	if (wmi_event_log_max_entry <=					\
216 		*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))\
217 		*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx) = 0;\
218 	((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\
219 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
220 		event = a;						\
221 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
222 				wmi_rx_event_log_buf_info.buf)		\
223 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
224 			data, b, wmi_record_max_length);		\
225 	((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\
226 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
227 		time =	qdf_get_log_timestamp();			\
228 	(*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))++;	\
229 	h->log_info.wmi_rx_event_log_buf_info.length++;			\
230 }
231 
232 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
233 uint32_t g_wmi_mgmt_command_buf_idx = 0;
234 struct
235 wmi_command_debug wmi_mgmt_command_log_buffer[WMI_MGMT_TX_DEBUG_MAX_ENTRY];
236 
237 /* wmi_mgmt commands TX completed */
238 uint32_t g_wmi_mgmt_command_tx_cmp_buf_idx = 0;
239 struct wmi_command_debug
240 wmi_mgmt_command_tx_cmp_log_buffer[WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY];
241 
242 /* wmi_mgmt events when received */
243 uint32_t g_wmi_mgmt_rx_event_buf_idx = 0;
244 struct wmi_event_debug
245 wmi_mgmt_rx_event_log_buffer[WMI_MGMT_RX_DEBUG_MAX_ENTRY];
246 
247 /* wmi_diag events when received */
248 uint32_t g_wmi_diag_rx_event_buf_idx = 0;
249 struct wmi_event_debug
250 wmi_diag_rx_event_log_buffer[WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY];
251 #endif
252 
253 #define WMI_MGMT_COMMAND_RECORD(h, a, b) {                              \
254 	if (wmi_mgmt_tx_log_max_entry <=                                   \
255 		*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)) \
256 		*(h->log_info.wmi_mgmt_command_log_buf_info.		\
257 				p_buf_tail_idx) = 0;			\
258 	((struct wmi_command_debug *)h->log_info.                       \
259 		 wmi_mgmt_command_log_buf_info.buf)                     \
260 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
261 			command = a;                                    \
262 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.          \
263 				wmi_mgmt_command_log_buf_info.buf)      \
264 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
265 		data, b,                                                \
266 		wmi_record_max_length);                                	\
267 	((struct wmi_command_debug *)h->log_info.                       \
268 		 wmi_mgmt_command_log_buf_info.buf)                     \
269 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
270 			time =        qdf_get_log_timestamp();          \
271 	(*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx))++;\
272 	h->log_info.wmi_mgmt_command_log_buf_info.length++;             \
273 }
274 
275 #define WMI_MGMT_COMMAND_TX_CMP_RECORD(h, a, b) {			\
276 	if (wmi_mgmt_tx_cmpl_log_max_entry <=				\
277 		*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
278 			p_buf_tail_idx))				\
279 		*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
280 			p_buf_tail_idx) = 0;				\
281 	((struct wmi_command_debug *)h->log_info.			\
282 			wmi_mgmt_command_tx_cmp_log_buf_info.buf)	\
283 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
284 				p_buf_tail_idx)].command = a;		\
285 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.		\
286 				wmi_mgmt_command_tx_cmp_log_buf_info.buf)\
287 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
288 			p_buf_tail_idx)].data, b,			\
289 			wmi_record_max_length);				\
290 	((struct wmi_command_debug *)h->log_info.			\
291 			wmi_mgmt_command_tx_cmp_log_buf_info.buf)	\
292 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
293 				p_buf_tail_idx)].time =			\
294 		qdf_get_log_timestamp();				\
295 	(*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.		\
296 			p_buf_tail_idx))++;				\
297 	h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.length++;	\
298 }
299 
300 #define WMI_MGMT_RX_EVENT_RECORD(h, a, b) do {				\
301 	if (wmi_mgmt_rx_log_max_entry <=				\
302 		*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))\
303 		*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx) = 0;\
304 	((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\
305 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)]\
306 					.event = a;			\
307 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
308 				wmi_mgmt_event_log_buf_info.buf)	\
309 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\
310 			data, b, wmi_record_max_length);		\
311 	((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\
312 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\
313 			time = qdf_get_log_timestamp();			\
314 	(*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))++;	\
315 	h->log_info.wmi_mgmt_event_log_buf_info.length++;		\
316 } while (0);
317 
318 #define WMI_DIAG_RX_EVENT_RECORD(h, a, b) do {                             \
319 	if (wmi_diag_log_max_entry <=                                   \
320 		*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))\
321 		*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx) = 0;\
322 	((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\
323 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)]\
324 					.event = a;                     \
325 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.            \
326 				wmi_diag_event_log_buf_info.buf)        \
327 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\
328 			data, b, wmi_record_max_length);                \
329 	((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\
330 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\
331 			time = qdf_get_log_timestamp();                 \
332 	(*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))++;  \
333 	h->log_info.wmi_diag_event_log_buf_info.length++;               \
334 } while (0);
335 
336 /* These are defined to made it as module param, which can be configured */
337 /* WMI Commands */
338 uint32_t wmi_cmd_log_max_entry = WMI_CMD_DEBUG_MAX_ENTRY;
339 uint32_t wmi_cmd_cmpl_log_max_entry = WMI_CMD_CMPL_DEBUG_MAX_ENTRY;
340 /* WMI Events */
341 uint32_t wmi_event_log_max_entry = WMI_EVENT_DEBUG_MAX_ENTRY;
342 /* WMI MGMT Tx */
343 uint32_t wmi_mgmt_tx_log_max_entry = WMI_MGMT_TX_DEBUG_MAX_ENTRY;
344 uint32_t wmi_mgmt_tx_cmpl_log_max_entry = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY;
345 /* WMI MGMT Rx */
346 uint32_t wmi_mgmt_rx_log_max_entry = WMI_MGMT_RX_DEBUG_MAX_ENTRY;
347 /* WMI Diag Event */
348 uint32_t wmi_diag_log_max_entry = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY;
349 /* WMI capture size */
350 uint32_t wmi_record_max_length = WMI_DEBUG_ENTRY_MAX_LENGTH;
351 uint32_t wmi_display_size = 100;
352 
353 /**
354  * wmi_log_init() - Initialize WMI event logging
355  * @wmi_handle: WMI handle.
356  *
357  * Return: Initialization status
358  */
359 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
360 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
361 {
362 	struct wmi_log_buf_t *cmd_log_buf =
363 			&wmi_handle->log_info.wmi_command_log_buf_info;
364 	struct wmi_log_buf_t *cmd_tx_cmpl_log_buf =
365 			&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
366 
367 	struct wmi_log_buf_t *event_log_buf =
368 			&wmi_handle->log_info.wmi_event_log_buf_info;
369 	struct wmi_log_buf_t *rx_event_log_buf =
370 			&wmi_handle->log_info.wmi_rx_event_log_buf_info;
371 
372 	struct wmi_log_buf_t *mgmt_cmd_log_buf =
373 			&wmi_handle->log_info.wmi_mgmt_command_log_buf_info;
374 	struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf =
375 		&wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info;
376 	struct wmi_log_buf_t *mgmt_event_log_buf =
377 			&wmi_handle->log_info.wmi_mgmt_event_log_buf_info;
378 	struct wmi_log_buf_t *diag_event_log_buf =
379 			&wmi_handle->log_info.wmi_diag_event_log_buf_info;
380 
381 	/* WMI commands */
382 	cmd_log_buf->length = 0;
383 	cmd_log_buf->buf_tail_idx = 0;
384 	cmd_log_buf->buf = wmi_command_log_buffer;
385 	cmd_log_buf->p_buf_tail_idx = &g_wmi_command_buf_idx;
386 	cmd_log_buf->size = WMI_CMD_DEBUG_MAX_ENTRY;
387 
388 	/* WMI commands TX completed */
389 	cmd_tx_cmpl_log_buf->length = 0;
390 	cmd_tx_cmpl_log_buf->buf_tail_idx = 0;
391 	cmd_tx_cmpl_log_buf->buf = wmi_command_tx_cmp_log_buffer;
392 	cmd_tx_cmpl_log_buf->p_buf_tail_idx = &g_wmi_command_tx_cmp_buf_idx;
393 	cmd_tx_cmpl_log_buf->size = WMI_CMD_CMPL_DEBUG_MAX_ENTRY;
394 
395 	/* WMI events when processed */
396 	event_log_buf->length = 0;
397 	event_log_buf->buf_tail_idx = 0;
398 	event_log_buf->buf = wmi_event_log_buffer;
399 	event_log_buf->p_buf_tail_idx = &g_wmi_event_buf_idx;
400 	event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY;
401 
402 	/* WMI events when queued */
403 	rx_event_log_buf->length = 0;
404 	rx_event_log_buf->buf_tail_idx = 0;
405 	rx_event_log_buf->buf = wmi_rx_event_log_buffer;
406 	rx_event_log_buf->p_buf_tail_idx = &g_wmi_rx_event_buf_idx;
407 	rx_event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY;
408 
409 	/* WMI Management commands */
410 	mgmt_cmd_log_buf->length = 0;
411 	mgmt_cmd_log_buf->buf_tail_idx = 0;
412 	mgmt_cmd_log_buf->buf = wmi_mgmt_command_log_buffer;
413 	mgmt_cmd_log_buf->p_buf_tail_idx = &g_wmi_mgmt_command_buf_idx;
414 	mgmt_cmd_log_buf->size = WMI_MGMT_TX_DEBUG_MAX_ENTRY;
415 
416 	/* WMI Management commands Tx completed*/
417 	mgmt_cmd_tx_cmp_log_buf->length = 0;
418 	mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0;
419 	mgmt_cmd_tx_cmp_log_buf->buf = wmi_mgmt_command_tx_cmp_log_buffer;
420 	mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx =
421 		&g_wmi_mgmt_command_tx_cmp_buf_idx;
422 	mgmt_cmd_tx_cmp_log_buf->size = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY;
423 
424 	/* WMI Management events when received */
425 	mgmt_event_log_buf->length = 0;
426 	mgmt_event_log_buf->buf_tail_idx = 0;
427 	mgmt_event_log_buf->buf = wmi_mgmt_rx_event_log_buffer;
428 	mgmt_event_log_buf->p_buf_tail_idx = &g_wmi_mgmt_rx_event_buf_idx;
429 	mgmt_event_log_buf->size = WMI_MGMT_RX_DEBUG_MAX_ENTRY;
430 
431 	/* WMI diag events when received */
432 	diag_event_log_buf->length = 0;
433 	diag_event_log_buf->buf_tail_idx = 0;
434 	diag_event_log_buf->buf = wmi_diag_rx_event_log_buffer;
435 	diag_event_log_buf->p_buf_tail_idx = &g_wmi_diag_rx_event_buf_idx;
436 	diag_event_log_buf->size = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY;
437 
438 	qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
439 	wmi_handle->log_info.wmi_logging_enable = 1;
440 
441 	return QDF_STATUS_SUCCESS;
442 }
443 #else
444 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
445 {
446 	struct wmi_log_buf_t *cmd_log_buf =
447 			&wmi_handle->log_info.wmi_command_log_buf_info;
448 	struct wmi_log_buf_t *cmd_tx_cmpl_log_buf =
449 			&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
450 
451 	struct wmi_log_buf_t *event_log_buf =
452 			&wmi_handle->log_info.wmi_event_log_buf_info;
453 	struct wmi_log_buf_t *rx_event_log_buf =
454 			&wmi_handle->log_info.wmi_rx_event_log_buf_info;
455 
456 	struct wmi_log_buf_t *mgmt_cmd_log_buf =
457 			&wmi_handle->log_info.wmi_mgmt_command_log_buf_info;
458 	struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf =
459 		&wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info;
460 	struct wmi_log_buf_t *mgmt_event_log_buf =
461 			&wmi_handle->log_info.wmi_mgmt_event_log_buf_info;
462 	struct wmi_log_buf_t *diag_event_log_buf =
463 			&wmi_handle->log_info.wmi_diag_event_log_buf_info;
464 
465 	wmi_handle->log_info.wmi_logging_enable = 0;
466 
467 	/* WMI commands */
468 	cmd_log_buf->length = 0;
469 	cmd_log_buf->buf_tail_idx = 0;
470 	cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc(
471 		wmi_cmd_log_max_entry * sizeof(struct wmi_command_debug));
472 	cmd_log_buf->size = wmi_cmd_log_max_entry;
473 
474 	if (!cmd_log_buf->buf)
475 		return QDF_STATUS_E_NOMEM;
476 
477 	cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx;
478 
479 	/* WMI commands TX completed */
480 	cmd_tx_cmpl_log_buf->length = 0;
481 	cmd_tx_cmpl_log_buf->buf_tail_idx = 0;
482 	cmd_tx_cmpl_log_buf->buf = (struct wmi_command_cmp_debug *) qdf_mem_malloc(
483 		wmi_cmd_cmpl_log_max_entry * sizeof(struct wmi_command_cmp_debug));
484 	cmd_tx_cmpl_log_buf->size = wmi_cmd_cmpl_log_max_entry;
485 
486 	if (!cmd_tx_cmpl_log_buf->buf)
487 		return QDF_STATUS_E_NOMEM;
488 
489 	cmd_tx_cmpl_log_buf->p_buf_tail_idx =
490 		&cmd_tx_cmpl_log_buf->buf_tail_idx;
491 
492 	/* WMI events when processed */
493 	event_log_buf->length = 0;
494 	event_log_buf->buf_tail_idx = 0;
495 	event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
496 		wmi_event_log_max_entry * sizeof(struct wmi_event_debug));
497 	event_log_buf->size = wmi_event_log_max_entry;
498 
499 	if (!event_log_buf->buf)
500 		return QDF_STATUS_E_NOMEM;
501 
502 	event_log_buf->p_buf_tail_idx = &event_log_buf->buf_tail_idx;
503 
504 	/* WMI events when queued */
505 	rx_event_log_buf->length = 0;
506 	rx_event_log_buf->buf_tail_idx = 0;
507 	rx_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
508 		wmi_event_log_max_entry * sizeof(struct wmi_event_debug));
509 	rx_event_log_buf->size = wmi_event_log_max_entry;
510 
511 	if (!rx_event_log_buf->buf)
512 		return QDF_STATUS_E_NOMEM;
513 
514 	rx_event_log_buf->p_buf_tail_idx = &rx_event_log_buf->buf_tail_idx;
515 
516 	/* WMI Management commands */
517 	mgmt_cmd_log_buf->length = 0;
518 	mgmt_cmd_log_buf->buf_tail_idx = 0;
519 	mgmt_cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc(
520 		wmi_mgmt_tx_log_max_entry * sizeof(struct wmi_command_debug));
521 	mgmt_cmd_log_buf->size = wmi_mgmt_tx_log_max_entry;
522 
523 	if (!mgmt_cmd_log_buf->buf)
524 		return QDF_STATUS_E_NOMEM;
525 
526 	mgmt_cmd_log_buf->p_buf_tail_idx = &mgmt_cmd_log_buf->buf_tail_idx;
527 
528 	/* WMI Management commands Tx completed*/
529 	mgmt_cmd_tx_cmp_log_buf->length = 0;
530 	mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0;
531 	mgmt_cmd_tx_cmp_log_buf->buf = (struct wmi_command_debug *)
532 		qdf_mem_malloc(
533 		wmi_mgmt_tx_cmpl_log_max_entry *
534 		sizeof(struct wmi_command_debug));
535 	mgmt_cmd_tx_cmp_log_buf->size = wmi_mgmt_tx_cmpl_log_max_entry;
536 
537 	if (!mgmt_cmd_tx_cmp_log_buf->buf)
538 		return QDF_STATUS_E_NOMEM;
539 
540 	mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx =
541 		&mgmt_cmd_tx_cmp_log_buf->buf_tail_idx;
542 
543 	/* WMI Management events when received */
544 	mgmt_event_log_buf->length = 0;
545 	mgmt_event_log_buf->buf_tail_idx = 0;
546 
547 	mgmt_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
548 		wmi_mgmt_rx_log_max_entry *
549 		sizeof(struct wmi_event_debug));
550 	mgmt_event_log_buf->size = wmi_mgmt_rx_log_max_entry;
551 
552 	if (!mgmt_event_log_buf->buf)
553 		return QDF_STATUS_E_NOMEM;
554 
555 	mgmt_event_log_buf->p_buf_tail_idx = &mgmt_event_log_buf->buf_tail_idx;
556 
557 	/* WMI diag events when received */
558 	diag_event_log_buf->length = 0;
559 	diag_event_log_buf->buf_tail_idx = 0;
560 
561 	diag_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
562 		wmi_diag_log_max_entry *
563 		sizeof(struct wmi_event_debug));
564 	diag_event_log_buf->size = wmi_diag_log_max_entry;
565 
566 	if (!diag_event_log_buf->buf)
567 		return QDF_STATUS_E_NOMEM;
568 
569 	diag_event_log_buf->p_buf_tail_idx = &diag_event_log_buf->buf_tail_idx;
570 
571 	qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
572 	wmi_handle->log_info.wmi_logging_enable = 1;
573 
574 	wmi_filtered_logging_init(wmi_handle);
575 
576 	return QDF_STATUS_SUCCESS;
577 }
578 #endif
579 
580 /**
581  * wmi_log_buffer_free() - Free all dynamic allocated buffer memory for
582  * event logging
583  * @wmi_handle: WMI handle.
584  *
585  * Return: None
586  */
587 #ifdef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
588 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
589 {
590 	wmi_filtered_logging_free(wmi_handle);
591 
592 	if (wmi_handle->log_info.wmi_command_log_buf_info.buf)
593 		qdf_mem_free(wmi_handle->log_info.wmi_command_log_buf_info.buf);
594 	if (wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf)
595 		qdf_mem_free(
596 		wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf);
597 	if (wmi_handle->log_info.wmi_event_log_buf_info.buf)
598 		qdf_mem_free(wmi_handle->log_info.wmi_event_log_buf_info.buf);
599 	if (wmi_handle->log_info.wmi_rx_event_log_buf_info.buf)
600 		qdf_mem_free(
601 			wmi_handle->log_info.wmi_rx_event_log_buf_info.buf);
602 	if (wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf)
603 		qdf_mem_free(
604 			wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf);
605 	if (wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf)
606 		qdf_mem_free(
607 		wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf);
608 	if (wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf)
609 		qdf_mem_free(
610 			wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf);
611 	if (wmi_handle->log_info.wmi_diag_event_log_buf_info.buf)
612 		qdf_mem_free(
613 			wmi_handle->log_info.wmi_diag_event_log_buf_info.buf);
614 	wmi_handle->log_info.wmi_logging_enable = 0;
615 
616 	qdf_spinlock_destroy(&wmi_handle->log_info.wmi_record_lock);
617 }
618 #else
619 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
620 {
621 	/* Do Nothing */
622 }
623 #endif
624 
625 /**
626  * wmi_print_cmd_log_buffer() - an output agnostic wmi command log printer
627  * @log_buffer: the command log buffer metadata of the buffer to print
628  * @count: the maximum number of entries to print
629  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
630  * @print_priv: any data required by the print method, e.g. a file handle
631  *
632  * Return: None
633  */
634 static void
635 wmi_print_cmd_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
636 			 qdf_abstract_print *print, void *print_priv)
637 {
638 	static const int data_len =
639 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
640 	char str[128];
641 	uint32_t idx;
642 
643 	if (count > log_buffer->size)
644 		count = log_buffer->size;
645 	if (count > log_buffer->length)
646 		count = log_buffer->length;
647 
648 	/* subtract count from index, and wrap if necessary */
649 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
650 	idx %= log_buffer->size;
651 
652 	print(print_priv, "Time (seconds)      Cmd Id              Payload");
653 	while (count) {
654 		struct wmi_command_debug *cmd_log = (struct wmi_command_debug *)
655 			&((struct wmi_command_debug *)log_buffer->buf)[idx];
656 		uint64_t secs, usecs;
657 		int len = 0;
658 		int i;
659 
660 		qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
661 		len += scnprintf(str + len, sizeof(str) - len,
662 				 "% 8lld.%06lld    %6u (0x%06x)    ",
663 				 secs, usecs,
664 				 cmd_log->command, cmd_log->command);
665 		for (i = 0; i < data_len; ++i) {
666 			len += scnprintf(str + len, sizeof(str) - len,
667 					 "0x%08x ", cmd_log->data[i]);
668 		}
669 
670 		print(print_priv, str);
671 
672 		--count;
673 		++idx;
674 		if (idx >= log_buffer->size)
675 			idx = 0;
676 	}
677 }
678 
679 /**
680  * wmi_print_cmd_cmp_log_buffer() - wmi command completion log printer
681  * @log_buffer: the command completion log buffer metadata of the buffer to print
682  * @count: the maximum number of entries to print
683  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
684  * @print_priv: any data required by the print method, e.g. a file handle
685  *
686  * Return: None
687  */
688 static void
689 wmi_print_cmd_cmp_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
690 			 qdf_abstract_print *print, void *print_priv)
691 {
692 	static const int data_len =
693 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
694 	char str[128];
695 	uint32_t idx;
696 
697 	if (count > log_buffer->size)
698 		count = log_buffer->size;
699 	if (count > log_buffer->length)
700 		count = log_buffer->length;
701 
702 	/* subtract count from index, and wrap if necessary */
703 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
704 	idx %= log_buffer->size;
705 
706 	print(print_priv, "Time (seconds)      Cmd Id              Payload");
707 	while (count) {
708 		struct wmi_command_cmp_debug *cmd_log = (struct wmi_command_cmp_debug *)
709 			&((struct wmi_command_cmp_debug *)log_buffer->buf)[idx];
710 		uint64_t secs, usecs;
711 		int len = 0;
712 		int i;
713 
714 		qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
715 		len += scnprintf(str + len, sizeof(str) - len,
716 				 "% 8lld.%06lld    %6u (0x%06x)    ",
717 				 secs, usecs,
718 				 cmd_log->command, cmd_log->command);
719 		for (i = 0; i < data_len; ++i) {
720 			len += scnprintf(str + len, sizeof(str) - len,
721 					 "0x%08x ", cmd_log->data[i]);
722 		}
723 
724 		print(print_priv, str);
725 
726 		--count;
727 		++idx;
728 		if (idx >= log_buffer->size)
729 			idx = 0;
730 	}
731 }
732 
733 /**
734  * wmi_print_event_log_buffer() - an output agnostic wmi event log printer
735  * @log_buffer: the event log buffer metadata of the buffer to print
736  * @count: the maximum number of entries to print
737  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
738  * @print_priv: any data required by the print method, e.g. a file handle
739  *
740  * Return: None
741  */
742 static void
743 wmi_print_event_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
744 			   qdf_abstract_print *print, void *print_priv)
745 {
746 	static const int data_len =
747 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
748 	char str[128];
749 	uint32_t idx;
750 
751 	if (count > log_buffer->size)
752 		count = log_buffer->size;
753 	if (count > log_buffer->length)
754 		count = log_buffer->length;
755 
756 	/* subtract count from index, and wrap if necessary */
757 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
758 	idx %= log_buffer->size;
759 
760 	print(print_priv, "Time (seconds)      Event Id             Payload");
761 	while (count) {
762 		struct wmi_event_debug *event_log = (struct wmi_event_debug *)
763 			&((struct wmi_event_debug *)log_buffer->buf)[idx];
764 		uint64_t secs, usecs;
765 		int len = 0;
766 		int i;
767 
768 		qdf_log_timestamp_to_secs(event_log->time, &secs, &usecs);
769 		len += scnprintf(str + len, sizeof(str) - len,
770 				 "% 8lld.%06lld    %6u (0x%06x)    ",
771 				 secs, usecs,
772 				 event_log->event, event_log->event);
773 		for (i = 0; i < data_len; ++i) {
774 			len += scnprintf(str + len, sizeof(str) - len,
775 					 "0x%08x ", event_log->data[i]);
776 		}
777 
778 		print(print_priv, str);
779 
780 		--count;
781 		++idx;
782 		if (idx >= log_buffer->size)
783 			idx = 0;
784 	}
785 }
786 
787 inline void
788 wmi_print_cmd_log(wmi_unified_t wmi, uint32_t count,
789 		  qdf_abstract_print *print, void *print_priv)
790 {
791 	wmi_print_cmd_log_buffer(
792 		&wmi->log_info.wmi_command_log_buf_info,
793 		count, print, print_priv);
794 }
795 
796 inline void
797 wmi_print_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count,
798 			 qdf_abstract_print *print, void *print_priv)
799 {
800 	wmi_print_cmd_cmp_log_buffer(
801 		&wmi->log_info.wmi_command_tx_cmp_log_buf_info,
802 		count, print, print_priv);
803 }
804 
805 inline void
806 wmi_print_mgmt_cmd_log(wmi_unified_t wmi, uint32_t count,
807 		       qdf_abstract_print *print, void *print_priv)
808 {
809 	wmi_print_cmd_log_buffer(
810 		&wmi->log_info.wmi_mgmt_command_log_buf_info,
811 		count, print, print_priv);
812 }
813 
814 inline void
815 wmi_print_mgmt_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count,
816 			      qdf_abstract_print *print, void *print_priv)
817 {
818 	wmi_print_cmd_log_buffer(
819 		&wmi->log_info.wmi_mgmt_command_tx_cmp_log_buf_info,
820 		count, print, print_priv);
821 }
822 
823 inline void
824 wmi_print_event_log(wmi_unified_t wmi, uint32_t count,
825 		    qdf_abstract_print *print, void *print_priv)
826 {
827 	wmi_print_event_log_buffer(
828 		&wmi->log_info.wmi_event_log_buf_info,
829 		count, print, print_priv);
830 }
831 
832 inline void
833 wmi_print_rx_event_log(wmi_unified_t wmi, uint32_t count,
834 		       qdf_abstract_print *print, void *print_priv)
835 {
836 	wmi_print_event_log_buffer(
837 		&wmi->log_info.wmi_rx_event_log_buf_info,
838 		count, print, print_priv);
839 }
840 
841 inline void
842 wmi_print_mgmt_event_log(wmi_unified_t wmi, uint32_t count,
843 			 qdf_abstract_print *print, void *print_priv)
844 {
845 	wmi_print_event_log_buffer(
846 		&wmi->log_info.wmi_mgmt_event_log_buf_info,
847 		count, print, print_priv);
848 }
849 
850 
851 /* debugfs routines*/
852 
853 /**
854  * debug_wmi_##func_base##_show() - debugfs functions to display content of
855  * command and event buffers. Macro uses max buffer length to display
856  * buffer when it is wraparound.
857  *
858  * @m: debugfs handler to access wmi_handle
859  * @v: Variable arguments (not used)
860  *
861  * Return: Length of characters printed
862  */
863 #define GENERATE_COMMAND_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size, wmi_record_type)\
864 	static int debug_wmi_##func_base##_show(struct seq_file *m,	\
865 						void *v)		\
866 	{								\
867 		wmi_unified_t wmi_handle = (wmi_unified_t) m->private;	\
868 		struct wmi_log_buf_t *wmi_log =				\
869 			&wmi_handle->log_info.wmi_##func_base##_buf_info;\
870 		int pos, nread, outlen;					\
871 		int i;							\
872 		uint64_t secs, usecs;					\
873 									\
874 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
875 		if (!wmi_log->length) {					\
876 			qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
877 			return wmi_bp_seq_printf(m,			\
878 			"no elements to read from ring buffer!\n");	\
879 		}							\
880 									\
881 		if (wmi_log->length <= wmi_ring_size)			\
882 			nread = wmi_log->length;			\
883 		else							\
884 			nread = wmi_ring_size;				\
885 									\
886 		if (*(wmi_log->p_buf_tail_idx) == 0)			\
887 			/* tail can be 0 after wrap-around */		\
888 			pos = wmi_ring_size - 1;			\
889 		else							\
890 			pos = *(wmi_log->p_buf_tail_idx) - 1;		\
891 									\
892 		outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\
893 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
894 		while (nread--) {					\
895 			struct wmi_record_type *wmi_record;		\
896 									\
897 			wmi_record = (struct wmi_record_type *)	\
898 			&(((struct wmi_record_type *)wmi_log->buf)[pos]);\
899 			outlen += wmi_bp_seq_printf(m, "CMD ID = %x\n",	\
900 				(wmi_record->command));			\
901 			qdf_log_timestamp_to_secs(wmi_record->time, &secs,\
902 				&usecs);				\
903 			outlen +=					\
904 			wmi_bp_seq_printf(m, "CMD TIME = [%llu.%06llu]\n",\
905 				secs, usecs);				\
906 			outlen += wmi_bp_seq_printf(m, "CMD = ");	\
907 			for (i = 0; i < (wmi_record_max_length/		\
908 					sizeof(uint32_t)); i++)		\
909 				outlen += wmi_bp_seq_printf(m, "%x ",	\
910 					wmi_record->data[i]);		\
911 			outlen += wmi_bp_seq_printf(m, "\n");		\
912 									\
913 			if (pos == 0)					\
914 				pos = wmi_ring_size - 1;		\
915 			else						\
916 				pos--;					\
917 		}							\
918 		return outlen;						\
919 	}								\
920 
921 #define GENERATE_EVENT_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size)	\
922 	static int debug_wmi_##func_base##_show(struct seq_file *m,	\
923 						void *v)		\
924 	{								\
925 		wmi_unified_t wmi_handle = (wmi_unified_t) m->private;	\
926 		struct wmi_log_buf_t *wmi_log =				\
927 			&wmi_handle->log_info.wmi_##func_base##_buf_info;\
928 		int pos, nread, outlen;					\
929 		int i;							\
930 		uint64_t secs, usecs;					\
931 									\
932 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
933 		if (!wmi_log->length) {					\
934 			qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
935 			return wmi_bp_seq_printf(m,			\
936 			"no elements to read from ring buffer!\n");	\
937 		}							\
938 									\
939 		if (wmi_log->length <= wmi_ring_size)			\
940 			nread = wmi_log->length;			\
941 		else							\
942 			nread = wmi_ring_size;				\
943 									\
944 		if (*(wmi_log->p_buf_tail_idx) == 0)			\
945 			/* tail can be 0 after wrap-around */		\
946 			pos = wmi_ring_size - 1;			\
947 		else							\
948 			pos = *(wmi_log->p_buf_tail_idx) - 1;		\
949 									\
950 		outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\
951 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
952 		while (nread--) {					\
953 			struct wmi_event_debug *wmi_record;		\
954 									\
955 			wmi_record = (struct wmi_event_debug *)		\
956 			&(((struct wmi_event_debug *)wmi_log->buf)[pos]);\
957 			qdf_log_timestamp_to_secs(wmi_record->time, &secs,\
958 				&usecs);				\
959 			outlen += wmi_bp_seq_printf(m, "Event ID = %x\n",\
960 				(wmi_record->event));			\
961 			outlen +=					\
962 			wmi_bp_seq_printf(m, "Event TIME = [%llu.%06llu]\n",\
963 				secs, usecs);				\
964 			outlen += wmi_bp_seq_printf(m, "CMD = ");	\
965 			for (i = 0; i < (wmi_record_max_length/		\
966 					sizeof(uint32_t)); i++)		\
967 				outlen += wmi_bp_seq_printf(m, "%x ",	\
968 					wmi_record->data[i]);		\
969 			outlen += wmi_bp_seq_printf(m, "\n");		\
970 									\
971 			if (pos == 0)					\
972 				pos = wmi_ring_size - 1;		\
973 			else						\
974 				pos--;					\
975 		}							\
976 		return outlen;						\
977 	}
978 
979 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_log, wmi_display_size,
980 				  wmi_command_debug);
981 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_tx_cmp_log, wmi_display_size,
982 				  wmi_command_cmp_debug);
983 GENERATE_EVENT_DEBUG_SHOW_FUNCS(event_log, wmi_display_size);
984 GENERATE_EVENT_DEBUG_SHOW_FUNCS(rx_event_log, wmi_display_size);
985 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_log, wmi_display_size,
986 				  wmi_command_debug);
987 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_tx_cmp_log,
988 					wmi_display_size,
989 					wmi_command_debug);
990 GENERATE_EVENT_DEBUG_SHOW_FUNCS(mgmt_event_log, wmi_display_size);
991 
992 /**
993  * debug_wmi_enable_show() - debugfs functions to display enable state of
994  * wmi logging feature.
995  *
996  * @m: debugfs handler to access wmi_handle
997  * @v: Variable arguments (not used)
998  *
999  * Return: always 1
1000  */
1001 static int debug_wmi_enable_show(struct seq_file *m, void *v)
1002 {
1003 	wmi_unified_t wmi_handle = (wmi_unified_t) m->private;
1004 
1005 	return wmi_bp_seq_printf(m, "%d\n",
1006 			wmi_handle->log_info.wmi_logging_enable);
1007 }
1008 
1009 /**
1010  * debug_wmi_log_size_show() - debugfs functions to display configured size of
1011  * wmi logging command/event buffer and management command/event buffer.
1012  *
1013  * @m: debugfs handler to access wmi_handle
1014  * @v: Variable arguments (not used)
1015  *
1016  * Return: Length of characters printed
1017  */
1018 static int debug_wmi_log_size_show(struct seq_file *m, void *v)
1019 {
1020 
1021 	wmi_bp_seq_printf(m, "WMI command/cmpl log max size:%d/%d\n",
1022 			  wmi_cmd_log_max_entry, wmi_cmd_cmpl_log_max_entry);
1023 	wmi_bp_seq_printf(m, "WMI management Tx/cmpl log max size:%d/%d\n",
1024 			  wmi_mgmt_tx_log_max_entry,
1025 			  wmi_mgmt_tx_cmpl_log_max_entry);
1026 	wmi_bp_seq_printf(m, "WMI event log max size:%d\n",
1027 			  wmi_event_log_max_entry);
1028 	wmi_bp_seq_printf(m, "WMI management Rx log max size:%d\n",
1029 			  wmi_mgmt_rx_log_max_entry);
1030 	return wmi_bp_seq_printf(m,
1031 				 "WMI diag log max size:%d\n",
1032 				 wmi_diag_log_max_entry);
1033 }
1034 
1035 /**
1036  * debug_wmi_##func_base##_write() - debugfs functions to clear
1037  * wmi logging command/event buffer and management command/event buffer.
1038  *
1039  * @file: file handler to access wmi_handle
1040  * @buf: received data buffer
1041  * @count: length of received buffer
1042  * @ppos: Not used
1043  *
1044  * Return: count
1045  */
1046 #define GENERATE_DEBUG_WRITE_FUNCS(func_base, wmi_ring_size, wmi_record_type)\
1047 	static ssize_t debug_wmi_##func_base##_write(struct file *file,	\
1048 				const char __user *buf,			\
1049 				size_t count, loff_t *ppos)		\
1050 	{								\
1051 		int k, ret;						\
1052 		wmi_unified_t wmi_handle =				\
1053 			((struct seq_file *)file->private_data)->private;\
1054 		struct wmi_log_buf_t *wmi_log = &wmi_handle->log_info.	\
1055 				wmi_##func_base##_buf_info;		\
1056 		char locbuf[50];					\
1057 									\
1058 		if ((!buf) || (count > 50))				\
1059 			return -EFAULT;					\
1060 									\
1061 		if (copy_from_user(locbuf, buf, count))			\
1062 			return -EFAULT;					\
1063 									\
1064 		ret = sscanf(locbuf, "%d", &k);				\
1065 		if ((ret != 1) || (k != 0)) {                           \
1066 			wmi_err("Wrong input, echo 0 to clear the wmi buffer");\
1067 			return -EINVAL;					\
1068 		}							\
1069 									\
1070 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
1071 		qdf_mem_zero(wmi_log->buf, wmi_ring_size *		\
1072 				sizeof(struct wmi_record_type));	\
1073 		wmi_log->length = 0;					\
1074 		*(wmi_log->p_buf_tail_idx) = 0;				\
1075 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
1076 									\
1077 		return count;						\
1078 	}
1079 
1080 GENERATE_DEBUG_WRITE_FUNCS(command_log, wmi_cmd_log_max_entry,
1081 			   wmi_command_debug);
1082 GENERATE_DEBUG_WRITE_FUNCS(command_tx_cmp_log, wmi_cmd_cmpl_log_max_entry,
1083 			   wmi_command_cmp_debug);
1084 GENERATE_DEBUG_WRITE_FUNCS(event_log, wmi_event_log_max_entry,
1085 			   wmi_event_debug);
1086 GENERATE_DEBUG_WRITE_FUNCS(rx_event_log, wmi_event_log_max_entry,
1087 			   wmi_event_debug);
1088 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_log, wmi_mgmt_tx_log_max_entry,
1089 			   wmi_command_debug);
1090 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_tx_cmp_log,
1091 			   wmi_mgmt_tx_cmpl_log_max_entry, wmi_command_debug);
1092 GENERATE_DEBUG_WRITE_FUNCS(mgmt_event_log, wmi_mgmt_rx_log_max_entry,
1093 			   wmi_event_debug);
1094 
1095 /**
1096  * debug_wmi_enable_write() - debugfs functions to enable/disable
1097  * wmi logging feature.
1098  *
1099  * @file: file handler to access wmi_handle
1100  * @buf: received data buffer
1101  * @count: length of received buffer
1102  * @ppos: Not used
1103  *
1104  * Return: count
1105  */
1106 static ssize_t debug_wmi_enable_write(struct file *file, const char __user *buf,
1107 					size_t count, loff_t *ppos)
1108 {
1109 	wmi_unified_t wmi_handle =
1110 		((struct seq_file *)file->private_data)->private;
1111 	int k, ret;
1112 	char locbuf[50];
1113 
1114 	if ((!buf) || (count > 50))
1115 		return -EFAULT;
1116 
1117 	if (copy_from_user(locbuf, buf, count))
1118 		return -EFAULT;
1119 
1120 	ret = sscanf(locbuf, "%d", &k);
1121 	if ((ret != 1) || ((k != 0) && (k != 1)))
1122 		return -EINVAL;
1123 
1124 	wmi_handle->log_info.wmi_logging_enable = k;
1125 	return count;
1126 }
1127 
1128 /**
1129  * debug_wmi_log_size_write() - reserved.
1130  *
1131  * @file: file handler to access wmi_handle
1132  * @buf: received data buffer
1133  * @count: length of received buffer
1134  * @ppos: Not used
1135  *
1136  * Return: count
1137  */
1138 static ssize_t debug_wmi_log_size_write(struct file *file,
1139 		const char __user *buf, size_t count, loff_t *ppos)
1140 {
1141 	return -EINVAL;
1142 }
1143 
1144 /* Structure to maintain debug information */
1145 struct wmi_debugfs_info {
1146 	const char *name;
1147 	const struct file_operations *ops;
1148 };
1149 
1150 #define DEBUG_FOO(func_base) { .name = #func_base,			\
1151 	.ops = &debug_##func_base##_ops }
1152 
1153 /**
1154  * debug_##func_base##_open() - Open debugfs entry for respective command
1155  * and event buffer.
1156  *
1157  * @inode: node for debug dir entry
1158  * @file: file handler
1159  *
1160  * Return: open status
1161  */
1162 #define GENERATE_DEBUG_STRUCTS(func_base)				\
1163 	static int debug_##func_base##_open(struct inode *inode,	\
1164 						struct file *file)	\
1165 	{								\
1166 		return single_open(file, debug_##func_base##_show,	\
1167 				inode->i_private);			\
1168 	}								\
1169 									\
1170 									\
1171 	static struct file_operations debug_##func_base##_ops = {	\
1172 		.open		= debug_##func_base##_open,		\
1173 		.read		= seq_read,				\
1174 		.llseek		= seq_lseek,				\
1175 		.write		= debug_##func_base##_write,		\
1176 		.release	= single_release,			\
1177 	};
1178 
1179 GENERATE_DEBUG_STRUCTS(wmi_command_log);
1180 GENERATE_DEBUG_STRUCTS(wmi_command_tx_cmp_log);
1181 GENERATE_DEBUG_STRUCTS(wmi_event_log);
1182 GENERATE_DEBUG_STRUCTS(wmi_rx_event_log);
1183 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_log);
1184 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_tx_cmp_log);
1185 GENERATE_DEBUG_STRUCTS(wmi_mgmt_event_log);
1186 GENERATE_DEBUG_STRUCTS(wmi_enable);
1187 GENERATE_DEBUG_STRUCTS(wmi_log_size);
1188 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
1189 GENERATE_DEBUG_STRUCTS(filtered_wmi_cmds);
1190 GENERATE_DEBUG_STRUCTS(filtered_wmi_evts);
1191 GENERATE_DEBUG_STRUCTS(wmi_filtered_command_log);
1192 GENERATE_DEBUG_STRUCTS(wmi_filtered_event_log);
1193 #endif
1194 
1195 struct wmi_debugfs_info wmi_debugfs_infos[NUM_DEBUG_INFOS] = {
1196 	DEBUG_FOO(wmi_command_log),
1197 	DEBUG_FOO(wmi_command_tx_cmp_log),
1198 	DEBUG_FOO(wmi_event_log),
1199 	DEBUG_FOO(wmi_rx_event_log),
1200 	DEBUG_FOO(wmi_mgmt_command_log),
1201 	DEBUG_FOO(wmi_mgmt_command_tx_cmp_log),
1202 	DEBUG_FOO(wmi_mgmt_event_log),
1203 	DEBUG_FOO(wmi_enable),
1204 	DEBUG_FOO(wmi_log_size),
1205 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
1206 	DEBUG_FOO(filtered_wmi_cmds),
1207 	DEBUG_FOO(filtered_wmi_evts),
1208 	DEBUG_FOO(wmi_filtered_command_log),
1209 	DEBUG_FOO(wmi_filtered_event_log),
1210 #endif
1211 };
1212 
1213 /**
1214  * wmi_debugfs_create() - Create debug_fs entry for wmi logging.
1215  *
1216  * @wmi_handle: wmi handle
1217  * @par_entry: debug directory entry
1218  * @id: Index to debug info data array
1219  *
1220  * Return: none
1221  */
1222 static void wmi_debugfs_create(wmi_unified_t wmi_handle,
1223 			       struct dentry *par_entry)
1224 {
1225 	int i;
1226 
1227 	if (!par_entry)
1228 		goto out;
1229 
1230 	for (i = 0; i < NUM_DEBUG_INFOS; ++i) {
1231 		wmi_handle->debugfs_de[i] = debugfs_create_file(
1232 				wmi_debugfs_infos[i].name, 0644, par_entry,
1233 				wmi_handle, wmi_debugfs_infos[i].ops);
1234 
1235 		if (!wmi_handle->debugfs_de[i]) {
1236 			wmi_err("debug Entry creation failed!");
1237 			goto out;
1238 		}
1239 	}
1240 
1241 	return;
1242 
1243 out:
1244 	wmi_err("debug Entry creation failed!");
1245 	wmi_log_buffer_free(wmi_handle);
1246 	return;
1247 }
1248 
1249 /**
1250  * wmi_debugfs_remove() - Remove debugfs entry for wmi logging.
1251  * @wmi_handle: wmi handle
1252  * @dentry: debugfs directory entry
1253  * @id: Index to debug info data array
1254  *
1255  * Return: none
1256  */
1257 static void wmi_debugfs_remove(wmi_unified_t wmi_handle)
1258 {
1259 	int i;
1260 	struct dentry *dentry = wmi_handle->log_info.wmi_log_debugfs_dir;
1261 
1262 	if (dentry) {
1263 		for (i = 0; i < NUM_DEBUG_INFOS; ++i) {
1264 			if (wmi_handle->debugfs_de[i])
1265 				wmi_handle->debugfs_de[i] = NULL;
1266 		}
1267 	}
1268 
1269 	if (dentry)
1270 		debugfs_remove_recursive(dentry);
1271 }
1272 
1273 /**
1274  * wmi_debugfs_init() - debugfs functions to create debugfs directory and to
1275  * create debugfs enteries.
1276  *
1277  * @h: wmi handler
1278  *
1279  * Return: init status
1280  */
1281 static QDF_STATUS wmi_debugfs_init(wmi_unified_t wmi_handle, uint32_t pdev_idx)
1282 {
1283 	char buf[32];
1284 
1285 	snprintf(buf, sizeof(buf), "WMI_SOC%u_PDEV%u",
1286 		 wmi_handle->soc->soc_idx, pdev_idx);
1287 
1288 	wmi_handle->log_info.wmi_log_debugfs_dir =
1289 		debugfs_create_dir(buf, NULL);
1290 
1291 	if (!wmi_handle->log_info.wmi_log_debugfs_dir) {
1292 		wmi_err("error while creating debugfs dir for %s", buf);
1293 		return QDF_STATUS_E_FAILURE;
1294 	}
1295 	wmi_debugfs_create(wmi_handle,
1296 			   wmi_handle->log_info.wmi_log_debugfs_dir);
1297 
1298 	return QDF_STATUS_SUCCESS;
1299 }
1300 
1301 /**
1302  * wmi_mgmt_cmd_record() - Wrapper function for mgmt command logging macro
1303  *
1304  * @wmi_handle: wmi handle
1305  * @cmd: mgmt command
1306  * @header: pointer to 802.11 header
1307  * @vdev_id: vdev id
1308  * @chanfreq: channel frequency
1309  *
1310  * Return: none
1311  */
1312 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
1313 			void *header, uint32_t vdev_id, uint32_t chanfreq)
1314 {
1315 
1316 	uint32_t data[CUSTOM_MGMT_CMD_DATA_SIZE];
1317 
1318 	data[0] = ((struct wmi_command_header *)header)->type;
1319 	data[1] = ((struct wmi_command_header *)header)->sub_type;
1320 	data[2] = vdev_id;
1321 	data[3] = chanfreq;
1322 
1323 	qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
1324 
1325 	WMI_MGMT_COMMAND_RECORD(wmi_handle, cmd, (uint8_t *)data);
1326 	wmi_specific_cmd_record(wmi_handle, cmd, (uint8_t *)data);
1327 	qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
1328 }
1329 #else
1330 /**
1331  * wmi_debugfs_remove() - Remove debugfs entry for wmi logging.
1332  * @wmi_handle: wmi handle
1333  * @dentry: debugfs directory entry
1334  * @id: Index to debug info data array
1335  *
1336  * Return: none
1337  */
1338 static void wmi_debugfs_remove(wmi_unified_t wmi_handle) { }
1339 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
1340 			void *header, uint32_t vdev_id, uint32_t chanfreq) { }
1341 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) { }
1342 #endif /*WMI_INTERFACE_EVENT_LOGGING */
1343 qdf_export_symbol(wmi_mgmt_cmd_record);
1344 
1345 #ifdef WMI_EXT_DBG
1346 
1347 /**
1348  * wmi_ext_dbg_msg_enqueue() - enqueue wmi message
1349  * @wmi_handle: wmi handler
1350  *
1351  * Return: size of wmi message queue after enqueue
1352  */
1353 static uint32_t wmi_ext_dbg_msg_enqueue(struct wmi_unified *wmi_handle,
1354 					struct wmi_ext_dbg_msg *msg)
1355 {
1356 	uint32_t list_size;
1357 
1358 	qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1359 	qdf_list_insert_back_size(&wmi_handle->wmi_ext_dbg_msg_queue,
1360 				  &msg->node, &list_size);
1361 	qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1362 
1363 	return list_size;
1364 }
1365 
1366 /**
1367  * wmi_ext_dbg_msg_dequeue() - dequeue wmi message
1368  * @wmi_handle: wmi handler
1369  *
1370  * Return: wmi msg on success else NULL
1371  */
1372 static struct wmi_ext_dbg_msg *wmi_ext_dbg_msg_dequeue(struct wmi_unified
1373 						       *wmi_handle)
1374 {
1375 	qdf_list_node_t *list_node = NULL;
1376 
1377 	qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1378 	qdf_list_remove_front(&wmi_handle->wmi_ext_dbg_msg_queue, &list_node);
1379 	qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1380 
1381 	if (!list_node)
1382 		return NULL;
1383 
1384 	return qdf_container_of(list_node, struct wmi_ext_dbg_msg, node);
1385 }
1386 
1387 /**
1388  * wmi_ext_dbg_msg_record() - record wmi messages
1389  * @wmi_handle: wmi handler
1390  * @buf: wmi message buffer
1391  * @len: wmi message length
1392  * @type: wmi message type
1393  *
1394  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1395  */
1396 static QDF_STATUS wmi_ext_dbg_msg_record(struct wmi_unified *wmi_handle,
1397 					 uint8_t *buf, uint32_t len,
1398 					 enum WMI_MSG_TYPE type)
1399 {
1400 	struct wmi_ext_dbg_msg *msg;
1401 	uint32_t list_size;
1402 
1403 	msg = wmi_ext_dbg_msg_get(len);
1404 	if (!msg)
1405 		return QDF_STATUS_E_NOMEM;
1406 
1407 	msg->len = len;
1408 	msg->type = type;
1409 	qdf_mem_copy(msg->buf, buf, len);
1410 	msg->ts = qdf_get_log_timestamp();
1411 	list_size = wmi_ext_dbg_msg_enqueue(wmi_handle, msg);
1412 
1413 	if (list_size >= wmi_handle->wmi_ext_dbg_msg_queue_size) {
1414 		msg = wmi_ext_dbg_msg_dequeue(wmi_handle);
1415 		wmi_ext_dbg_msg_put(msg);
1416 	}
1417 
1418 	return QDF_STATUS_SUCCESS;
1419 }
1420 
1421 /**
1422  * wmi_ext_dbg_msg_cmd_record() - record wmi command messages
1423  * @wmi_handle: wmi handler
1424  * @buf: wmi command buffer
1425  * @len: wmi command message length
1426  *
1427  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1428  */
1429 static QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified *wmi_handle,
1430 					     uint8_t *buf, uint32_t len)
1431 {
1432 	return wmi_ext_dbg_msg_record(wmi_handle, buf, len,
1433 				      WMI_MSG_TYPE_CMD);
1434 }
1435 
1436 /**
1437  * wmi_ext_dbg_msg_event_record() - record wmi event messages
1438  * @wmi_handle: wmi handler
1439  * @buf: wmi event buffer
1440  * @len: wmi event message length
1441  *
1442  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1443  */
1444 static QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified *wmi_handle,
1445 					       uint8_t *buf, uint32_t len)
1446 {
1447 	uint32_t id;
1448 
1449 	id = WMI_GET_FIELD(buf, WMI_CMD_HDR, COMMANDID);
1450 	if (id != wmi_handle->wmi_events[wmi_diag_event_id])
1451 		return wmi_ext_dbg_msg_record(wmi_handle, buf, len,
1452 					      WMI_MSG_TYPE_EVENT);
1453 
1454 	return QDF_STATUS_SUCCESS;
1455 }
1456 
1457 /**
1458  * wmi_ext_dbg_msg_queue_init() - create debugfs queue and associated lock
1459  * @wmi_handle: wmi handler
1460  *
1461  * Return: none
1462  */
1463 static void wmi_ext_dbg_msg_queue_init(struct wmi_unified *wmi_handle)
1464 {
1465 	qdf_list_create(&wmi_handle->wmi_ext_dbg_msg_queue,
1466 			wmi_handle->wmi_ext_dbg_msg_queue_size);
1467 	qdf_spinlock_create(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1468 }
1469 
1470 /**
1471  * wmi_ext_dbg_msg_queue_deinit() - destroy debugfs queue and associated lock
1472  * @wmi_handle: wmi handler
1473  *
1474  * Return: none
1475  */
1476 static void wmi_ext_dbg_msg_queue_deinit(struct wmi_unified *wmi_handle)
1477 {
1478 	qdf_list_destroy(&wmi_handle->wmi_ext_dbg_msg_queue);
1479 	qdf_spinlock_destroy(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1480 }
1481 
1482 /**
1483  * wmi_ext_dbg_msg_show() - debugfs function to display whole content of
1484  * wmi command/event messages including headers.
1485  * @file: qdf debugfs file handler
1486  * @arg: pointer to wmi handler
1487  *
1488  * Return: QDF_STATUS_SUCCESS if all the messages are shown successfully,
1489  * else QDF_STATUS_E_AGAIN if more data to show.
1490  */
1491 static QDF_STATUS wmi_ext_dbg_msg_show(qdf_debugfs_file_t file, void *arg)
1492 {
1493 	struct wmi_unified *wmi_handle = (struct wmi_unified *)arg;
1494 	struct wmi_ext_dbg_msg *msg;
1495 	uint64_t secs, usecs;
1496 
1497 	msg = wmi_ext_dbg_msg_dequeue(wmi_handle);
1498 	if (!msg)
1499 		return QDF_STATUS_SUCCESS;
1500 
1501 	qdf_debugfs_printf(file, "%s: 0x%x\n",
1502 			   msg->type == WMI_MSG_TYPE_CMD ? "COMMAND" :
1503 			   "EVENT", WMI_GET_FIELD(msg->buf, WMI_CMD_HDR,
1504 						  COMMANDID));
1505 	qdf_log_timestamp_to_secs(msg->ts, &secs, &usecs);
1506 	qdf_debugfs_printf(file, "Time: %llu.%llu\n", secs, usecs);
1507 	qdf_debugfs_printf(file, "Length:%d\n", msg->len);
1508 	qdf_debugfs_hexdump(file, msg->buf, msg->len,
1509 			    WMI_EXT_DBG_DUMP_ROW_SIZE,
1510 			    WMI_EXT_DBG_DUMP_GROUP_SIZE);
1511 	qdf_debugfs_printf(file, "\n");
1512 
1513 	if (qdf_debugfs_overflow(file)) {
1514 		qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1515 		qdf_list_insert_front(&wmi_handle->wmi_ext_dbg_msg_queue,
1516 				      &msg->node);
1517 		qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1518 
1519 	} else {
1520 		wmi_ext_dbg_msg_put(msg);
1521 	}
1522 
1523 	return QDF_STATUS_E_AGAIN;
1524 }
1525 
1526 /**
1527  * wmi_ext_dbg_msg_write() - debugfs write not supported
1528  * @priv: private data
1529  * @buf: received data buffer
1530  * @len: length of received buffer
1531  *
1532  * Return: QDF_STATUS_E_NOSUPPORT.
1533  */
1534 static QDF_STATUS wmi_ext_dbg_msg_write(void *priv, const char *buf,
1535 					qdf_size_t len)
1536 {
1537 	return QDF_STATUS_E_NOSUPPORT;
1538 }
1539 
1540 static struct qdf_debugfs_fops wmi_ext_dbgfs_ops[WMI_MAX_RADIOS];
1541 
1542 /**
1543  * wmi_ext_debugfs_init() - init debugfs items for extended wmi dump.
1544  * @wmi_handle: wmi handler
1545  * @pdev_idx: pdev index
1546  *
1547  * Return: QDF_STATUS_SUCCESS if debugfs is initialized else
1548  * QDF_STATUS_E_FAILURE
1549  */
1550 static QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle,
1551 				     uint32_t pdev_idx)
1552 {
1553 	qdf_dentry_t dentry;
1554 	char buf[32];
1555 
1556 	/* To maintain backward compatibility, naming convention for PDEV 0
1557 	 * dentry is kept same as before. For more than 1 PDEV, dentry
1558 	 * names will be appended with PDEVx.
1559 	*/
1560 	if (wmi_handle->soc->soc_idx == 0 && pdev_idx == 0) {
1561 		dentry  = qdf_debugfs_create_dir(WMI_EXT_DBG_DIR, NULL);
1562 	} else {
1563 		snprintf(buf, sizeof(buf), "WMI_EXT_DBG_SOC%u_PDEV%u",
1564 			 wmi_handle->soc->soc_idx, pdev_idx);
1565 		dentry  = qdf_debugfs_create_dir(buf, NULL);
1566 	}
1567 
1568 	if (!dentry) {
1569 		wmi_err("error while creating extended wmi debugfs dir");
1570 		return QDF_STATUS_E_FAILURE;
1571 	}
1572 
1573 	wmi_ext_dbgfs_ops[pdev_idx].show = wmi_ext_dbg_msg_show;
1574 	wmi_ext_dbgfs_ops[pdev_idx].write = wmi_ext_dbg_msg_write;
1575 	wmi_ext_dbgfs_ops[pdev_idx].priv = wmi_handle;
1576 	if (!qdf_debugfs_create_file(WMI_EXT_DBG_FILE, WMI_EXT_DBG_FILE_PERM,
1577 				     dentry, &wmi_ext_dbgfs_ops[pdev_idx])) {
1578 		qdf_debugfs_remove_dir(dentry);
1579 		wmi_err("Error while creating extended wmi debugfs file");
1580 		return QDF_STATUS_E_FAILURE;
1581 	}
1582 
1583 	wmi_handle->wmi_ext_dbg_dentry = dentry;
1584 	wmi_handle->wmi_ext_dbg_msg_queue_size = WMI_EXT_DBG_QUEUE_SIZE;
1585 	wmi_ext_dbg_msg_queue_init(wmi_handle);
1586 
1587 	return QDF_STATUS_SUCCESS;
1588 }
1589 
1590 /**
1591  * wmi_ext_debugfs_deinit() - cleanup/deinit debugfs items of extended wmi dump.
1592  * @wmi_handle: wmi handler
1593  *
1594  * Return: QDF_STATUS_SUCCESS if cleanup is successful
1595  */
1596 static QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle)
1597 {
1598 	struct wmi_ext_dbg_msg *msg;
1599 
1600 	while ((msg = wmi_ext_dbg_msg_dequeue(wmi_handle)))
1601 		wmi_ext_dbg_msg_put(msg);
1602 
1603 	wmi_ext_dbg_msg_queue_deinit(wmi_handle);
1604 	qdf_debugfs_remove_dir_recursive(wmi_handle->wmi_ext_dbg_dentry);
1605 
1606 	return QDF_STATUS_SUCCESS;
1607 }
1608 
1609 #else
1610 
1611 static inline QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified
1612 						    *wmi_handle,
1613 						    uint8_t *buf, uint32_t len)
1614 {
1615 		return QDF_STATUS_SUCCESS;
1616 }
1617 
1618 static inline QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified
1619 						      *wmi_handle,
1620 						      uint8_t *buf, uint32_t len)
1621 {
1622 		return QDF_STATUS_SUCCESS;
1623 }
1624 
1625 static inline QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle,
1626 					    uint32_t pdev_idx)
1627 {
1628 		return QDF_STATUS_SUCCESS;
1629 }
1630 
1631 static inline QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle)
1632 {
1633 		return QDF_STATUS_SUCCESS;
1634 }
1635 
1636 #endif /*WMI_EXT_DBG */
1637 
1638 int wmi_get_host_credits(wmi_unified_t wmi_handle);
1639 /* WMI buffer APIs */
1640 
1641 #ifdef NBUF_MEMORY_DEBUG
1642 wmi_buf_t
1643 wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint32_t len,
1644 		    const char *func_name,
1645 		    uint32_t line_num)
1646 {
1647 	wmi_buf_t wmi_buf;
1648 
1649 	if (roundup(len + WMI_MIN_HEAD_ROOM, 4) > wmi_handle->max_msg_len) {
1650 		QDF_ASSERT(0);
1651 		return NULL;
1652 	}
1653 
1654 	wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, func_name,
1655 				 line_num);
1656 	if (!wmi_buf)
1657 		wmi_buf = qdf_nbuf_alloc_debug(NULL,
1658 					       roundup(len + WMI_MIN_HEAD_ROOM,
1659 						       4),
1660 					       WMI_MIN_HEAD_ROOM, 4, false,
1661 					       func_name, line_num);
1662 	if (!wmi_buf)
1663 		return NULL;
1664 
1665 	/* Clear the wmi buffer */
1666 	OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
1667 
1668 	/*
1669 	 * Set the length of the buffer to match the allocation size.
1670 	 */
1671 	qdf_nbuf_set_pktlen(wmi_buf, len);
1672 
1673 	return wmi_buf;
1674 }
1675 qdf_export_symbol(wmi_buf_alloc_debug);
1676 
1677 void wmi_buf_free(wmi_buf_t net_buf)
1678 {
1679 	net_buf = wbuff_buff_put(net_buf);
1680 	if (net_buf)
1681 		qdf_nbuf_free(net_buf);
1682 }
1683 qdf_export_symbol(wmi_buf_free);
1684 #else
1685 wmi_buf_t wmi_buf_alloc_fl(wmi_unified_t wmi_handle, uint32_t len,
1686 			   const char *func, uint32_t line)
1687 {
1688 	wmi_buf_t wmi_buf;
1689 
1690 	if (roundup(len + WMI_MIN_HEAD_ROOM, 4) > wmi_handle->max_msg_len) {
1691 		QDF_DEBUG_PANIC("Invalid length %u (via %s:%u)",
1692 				len, func, line);
1693 		return NULL;
1694 	}
1695 
1696 	wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, __func__,
1697 				 __LINE__);
1698 	if (!wmi_buf)
1699 		wmi_buf = qdf_nbuf_alloc_fl(NULL, roundup(len +
1700 				WMI_MIN_HEAD_ROOM, 4), WMI_MIN_HEAD_ROOM, 4,
1701 				false, func, line);
1702 
1703 	if (!wmi_buf) {
1704 		wmi_nofl_err("%s:%d, failed to alloc len:%d", func, line, len);
1705 		return NULL;
1706 	}
1707 
1708 	/* Clear the wmi buffer */
1709 	OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
1710 
1711 	/*
1712 	 * Set the length of the buffer to match the allocation size.
1713 	 */
1714 	qdf_nbuf_set_pktlen(wmi_buf, len);
1715 
1716 	return wmi_buf;
1717 }
1718 qdf_export_symbol(wmi_buf_alloc_fl);
1719 
1720 void wmi_buf_free(wmi_buf_t net_buf)
1721 {
1722 	net_buf = wbuff_buff_put(net_buf);
1723 	if (net_buf)
1724 		qdf_nbuf_free(net_buf);
1725 }
1726 qdf_export_symbol(wmi_buf_free);
1727 #endif
1728 
1729 /**
1730  * wmi_get_max_msg_len() - get maximum WMI message length
1731  * @wmi_handle: WMI handle.
1732  *
1733  * This function returns the maximum WMI message length
1734  *
1735  * Return: maximum WMI message length
1736  */
1737 uint16_t wmi_get_max_msg_len(wmi_unified_t wmi_handle)
1738 {
1739 	return wmi_handle->max_msg_len - WMI_MIN_HEAD_ROOM;
1740 }
1741 qdf_export_symbol(wmi_get_max_msg_len);
1742 
1743 #ifndef WMI_CMD_STRINGS
1744 static uint8_t *wmi_id_to_name(uint32_t wmi_command)
1745 {
1746 	return "Invalid WMI cmd";
1747 }
1748 #endif
1749 
1750 static inline void wmi_log_cmd_id(uint32_t cmd_id, uint32_t tag)
1751 {
1752 	wmi_debug("Send WMI command:%s command_id:%d htc_tag:%d",
1753 		 wmi_id_to_name(cmd_id), cmd_id, tag);
1754 }
1755 
1756 /**
1757  * wmi_is_pm_resume_cmd() - check if a cmd is part of the resume sequence
1758  * @cmd_id: command to check
1759  *
1760  * Return: true if the command is part of the resume sequence.
1761  */
1762 #ifdef WLAN_POWER_MANAGEMENT_OFFLOAD
1763 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id)
1764 {
1765 	switch (cmd_id) {
1766 	case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
1767 	case WMI_PDEV_RESUME_CMDID:
1768 		return true;
1769 
1770 	default:
1771 		return false;
1772 	}
1773 }
1774 
1775 #else
1776 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id)
1777 {
1778 	return false;
1779 }
1780 
1781 #endif
1782 
1783 #ifdef FEATURE_WLAN_D0WOW
1784 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id)
1785 {
1786 	wmi_d0_wow_enable_disable_cmd_fixed_param *cmd;
1787 
1788 	if (cmd_id == WMI_D0_WOW_ENABLE_DISABLE_CMDID) {
1789 		cmd = (wmi_d0_wow_enable_disable_cmd_fixed_param *)
1790 			wmi_buf_data(buf);
1791 		if (!cmd->enable)
1792 			return true;
1793 		else
1794 			return false;
1795 	}
1796 
1797 	return false;
1798 }
1799 #else
1800 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id)
1801 {
1802 	return false;
1803 }
1804 
1805 #endif
1806 
1807 #ifdef WMI_INTERFACE_SEQUENCE_CHECK
1808 static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle)
1809 {
1810 	wmi_handle->wmi_sequence = 0;
1811 	wmi_handle->wmi_exp_sequence = 0;
1812 	wmi_handle->wmi_sequence_stop = false;
1813 }
1814 
1815 static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle)
1816 {
1817 	qdf_spinlock_create(&wmi_handle->wmi_seq_lock);
1818 	wmi_interface_sequence_reset(wmi_handle);
1819 }
1820 
1821 static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle)
1822 {
1823 	qdf_spinlock_destroy(&wmi_handle->wmi_seq_lock);
1824 }
1825 
1826 void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle)
1827 {
1828 	wmi_handle->wmi_sequence_stop = true;
1829 }
1830 
1831 static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle,
1832 					  HTC_PACKET *pkt,
1833 					  const char *func, uint32_t line)
1834 {
1835 	wmi_buf_t buf = GET_HTC_PACKET_NET_BUF_CONTEXT(pkt);
1836 	QDF_STATUS status;
1837 
1838 	qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock);
1839 	status = htc_send_pkt(wmi_handle->htc_handle, pkt);
1840 	if (QDF_STATUS_SUCCESS != status) {
1841 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1842 		qdf_atomic_dec(&wmi_handle->pending_cmds);
1843 		wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d",
1844 			     func, line, status);
1845 		qdf_mem_free(pkt);
1846 		return status;
1847 	}
1848 	/* Record the sequence number in the SKB */
1849 	qdf_nbuf_set_mark(buf, wmi_handle->wmi_sequence);
1850 	/* Increment the sequence number */
1851 	wmi_handle->wmi_sequence = (wmi_handle->wmi_sequence + 1)
1852 				   & (wmi_handle->wmi_max_cmds - 1);
1853 	qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1854 
1855 	return status;
1856 }
1857 
1858 static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle,
1859 						wmi_buf_t buf)
1860 {
1861 	/* Skip sequence check when wmi sequence stop is set */
1862 	if (wmi_handle->wmi_sequence_stop)
1863 		return;
1864 
1865 	qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock);
1866 	/* Match the completion sequence and expected sequence number */
1867 	if (qdf_nbuf_get_mark(buf) != wmi_handle->wmi_exp_sequence) {
1868 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1869 		wmi_nofl_err("WMI Tx Completion Sequence number mismatch");
1870 		wmi_nofl_err("Expected %d Received %d",
1871 			     wmi_handle->wmi_exp_sequence,
1872 			     qdf_nbuf_get_mark(buf));
1873 		/* Trigger Recovery */
1874 		qdf_trigger_self_recovery(wmi_handle->soc,
1875 					  QDF_WMI_BUF_SEQUENCE_MISMATCH);
1876 	} else {
1877 		/* Increment the expected sequence number */
1878 		wmi_handle->wmi_exp_sequence =
1879 				(wmi_handle->wmi_exp_sequence + 1)
1880 				& (wmi_handle->wmi_max_cmds - 1);
1881 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1882 	}
1883 }
1884 #else
1885 static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle)
1886 {
1887 }
1888 
1889 static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle)
1890 {
1891 }
1892 
1893 static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle)
1894 {
1895 }
1896 
1897 void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle)
1898 {
1899 }
1900 
1901 static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle,
1902 					  HTC_PACKET *pkt,
1903 					  const char *func, uint32_t line)
1904 {
1905 	QDF_STATUS status;
1906 
1907 	status = htc_send_pkt(wmi_handle->htc_handle, pkt);
1908 	if (QDF_STATUS_SUCCESS != status) {
1909 		qdf_atomic_dec(&wmi_handle->pending_cmds);
1910 		wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d",
1911 			     func, line, status);
1912 		qdf_mem_free(pkt);
1913 		return status;
1914 	}
1915 
1916 	return status;
1917 }
1918 
1919 static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle,
1920 						wmi_buf_t buf)
1921 {
1922 }
1923 #endif
1924 
1925 static inline void wmi_unified_debug_dump(wmi_unified_t wmi_handle)
1926 {
1927 	wmi_nofl_err("Endpoint ID = %d, Tx Queue Depth = %d, soc_id = %u, target type = %s",
1928 		     wmi_handle->wmi_endpoint_id,
1929 		     htc_get_tx_queue_depth(wmi_handle->htc_handle,
1930 					    wmi_handle->wmi_endpoint_id),
1931 		     wmi_handle->soc->soc_idx,
1932 		     (wmi_handle->target_type ==
1933 		      WMI_TLV_TARGET ? "WMI_TLV_TARGET" :
1934 						"WMI_NON_TLV_TARGET"));
1935 }
1936 
1937 QDF_STATUS wmi_unified_cmd_send_fl(wmi_unified_t wmi_handle, wmi_buf_t buf,
1938 				   uint32_t len, uint32_t cmd_id,
1939 				   const char *func, uint32_t line)
1940 {
1941 	HTC_PACKET *pkt;
1942 	uint16_t htc_tag = 0;
1943 
1944 	if (wmi_get_runtime_pm_inprogress(wmi_handle)) {
1945 		htc_tag = wmi_handle->ops->wmi_set_htc_tx_tag(wmi_handle, buf,
1946 							      cmd_id);
1947 	} else if (qdf_atomic_read(&wmi_handle->is_target_suspended) &&
1948 		   !wmi_is_pm_resume_cmd(cmd_id) &&
1949 		   !wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id)) {
1950 			wmi_nofl_err("Target is suspended (via %s:%u)",
1951 					func, line);
1952 		return QDF_STATUS_E_BUSY;
1953 	}
1954 
1955 	if (wmi_handle->wmi_stopinprogress) {
1956 		wmi_nofl_err("%s:%d, WMI stop in progress, wmi_handle:%pK",
1957 			     func, line, wmi_handle);
1958 		return QDF_STATUS_E_INVAL;
1959 	}
1960 
1961 #ifndef WMI_NON_TLV_SUPPORT
1962 	/* Do sanity check on the TLV parameter structure */
1963 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
1964 		void *buf_ptr = (void *)qdf_nbuf_data(buf);
1965 
1966 		if (wmi_handle->ops->wmi_check_command_params(NULL, buf_ptr, len, cmd_id)
1967 			!= 0) {
1968 			wmi_nofl_err("%s:%d, Invalid WMI Param Buffer for Cmd:%d",
1969 				     func, line, cmd_id);
1970 			return QDF_STATUS_E_INVAL;
1971 		}
1972 	}
1973 #endif
1974 
1975 	if (qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) {
1976 		wmi_nofl_err("%s:%d, Failed to send cmd %x, no memory",
1977 			     func, line, cmd_id);
1978 		return QDF_STATUS_E_NOMEM;
1979 	}
1980 
1981 	qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR));
1982 	WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
1983 
1984 	qdf_atomic_inc(&wmi_handle->pending_cmds);
1985 	if (qdf_atomic_read(&wmi_handle->pending_cmds) >=
1986 			wmi_handle->wmi_max_cmds) {
1987 		wmi_nofl_err("hostcredits = %d",
1988 			     wmi_get_host_credits(wmi_handle));
1989 		htc_dump_counter_info(wmi_handle->htc_handle);
1990 		qdf_atomic_dec(&wmi_handle->pending_cmds);
1991 		wmi_nofl_err("%s:%d, MAX %d WMI Pending cmds reached",
1992 			     func, line, wmi_handle->wmi_max_cmds);
1993 		wmi_unified_debug_dump(wmi_handle);
1994 		htc_ce_tasklet_debug_dump(wmi_handle->htc_handle);
1995 		qdf_trigger_self_recovery(wmi_handle->soc->wmi_psoc,
1996 					  QDF_WMI_EXCEED_MAX_PENDING_CMDS);
1997 		return QDF_STATUS_E_BUSY;
1998 	}
1999 
2000 	pkt = qdf_mem_malloc_fl(sizeof(*pkt), func, line);
2001 	if (!pkt) {
2002 		qdf_atomic_dec(&wmi_handle->pending_cmds);
2003 		return QDF_STATUS_E_NOMEM;
2004 	}
2005 
2006 	SET_HTC_PACKET_INFO_TX(pkt,
2007 			       NULL,
2008 			       qdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR),
2009 			       wmi_handle->wmi_endpoint_id, htc_tag);
2010 
2011 	SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf);
2012 	wmi_log_cmd_id(cmd_id, htc_tag);
2013 	wmi_ext_dbg_msg_cmd_record(wmi_handle,
2014 				   qdf_nbuf_data(buf), qdf_nbuf_len(buf));
2015 #ifdef WMI_INTERFACE_EVENT_LOGGING
2016 	if (wmi_handle->log_info.wmi_logging_enable) {
2017 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2018 		/*
2019 		 * Record 16 bytes of WMI cmd data -
2020 		 * exclude TLV and WMI headers
2021 		 *
2022 		 * WMI mgmt command already recorded in wmi_mgmt_cmd_record
2023 		 */
2024 		if (wmi_handle->ops->is_management_record(cmd_id) == false) {
2025 			uint8_t *tmpbuf = (uint8_t *)qdf_nbuf_data(buf) +
2026 				wmi_handle->soc->buf_offset_command;
2027 
2028 			WMI_COMMAND_RECORD(wmi_handle, cmd_id, tmpbuf);
2029 			wmi_specific_cmd_record(wmi_handle, cmd_id, tmpbuf);
2030 		}
2031 
2032 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2033 	}
2034 #endif
2035 	return wmi_htc_send_pkt(wmi_handle, pkt, func, line);
2036 }
2037 qdf_export_symbol(wmi_unified_cmd_send_fl);
2038 
2039 /**
2040  * wmi_unified_get_event_handler_ix() - gives event handler's index
2041  * @wmi_handle: handle to wmi
2042  * @event_id: wmi  event id
2043  *
2044  * Return: event handler's index
2045  */
2046 static int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle,
2047 					    uint32_t event_id)
2048 {
2049 	uint32_t idx = 0;
2050 	int32_t invalid_idx = -1;
2051 	struct wmi_soc *soc = wmi_handle->soc;
2052 
2053 	for (idx = 0; (idx < soc->max_event_idx &&
2054 		       idx < WMI_UNIFIED_MAX_EVENT); ++idx) {
2055 		if (wmi_handle->event_id[idx] == event_id &&
2056 		    wmi_handle->event_handler[idx]) {
2057 			return idx;
2058 		}
2059 	}
2060 
2061 	return invalid_idx;
2062 }
2063 
2064 /**
2065  * wmi_register_event_handler_with_ctx() - register event handler with
2066  * exec ctx and buffer type
2067  * @wmi_handle: handle to wmi
2068  * @event_id: wmi event id
2069  * @handler_func: wmi event handler function
2070  * @rx_ctx: rx execution context for wmi rx events
2071  * @rx_buf_type: rx execution context for wmi rx events
2072  *
2073  * Return: QDF_STATUS_SUCCESS on successful register event else failure.
2074  */
2075 static QDF_STATUS
2076 wmi_register_event_handler_with_ctx(wmi_unified_t wmi_handle,
2077 				    uint32_t event_id,
2078 				    wmi_unified_event_handler handler_func,
2079 				    enum wmi_rx_exec_ctx rx_ctx,
2080 				    enum wmi_rx_buff_type rx_buf_type)
2081 {
2082 	uint32_t idx = 0;
2083 	uint32_t evt_id;
2084 	struct wmi_soc *soc;
2085 
2086 	if (!wmi_handle) {
2087 		wmi_err("WMI handle is NULL");
2088 		return QDF_STATUS_E_FAILURE;
2089 	}
2090 
2091 	soc = wmi_handle->soc;
2092 
2093 	if (event_id >= wmi_events_max ||
2094 		wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2095 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
2096 			  "%s: Event id %d is unavailable",
2097 					__func__, event_id);
2098 		return QDF_STATUS_E_FAILURE;
2099 	}
2100 	evt_id = wmi_handle->wmi_events[event_id];
2101 
2102 	if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) {
2103 		wmi_info("event handler already registered 0x%x", evt_id);
2104 		return QDF_STATUS_E_FAILURE;
2105 	}
2106 	if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) {
2107 		wmi_err("no more event handlers 0x%x",
2108 			 evt_id);
2109 		return QDF_STATUS_E_FAILURE;
2110 	}
2111 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG,
2112 		  "Registered event handler for event 0x%8x", evt_id);
2113 	idx = soc->max_event_idx;
2114 	wmi_handle->event_handler[idx] = handler_func;
2115 	wmi_handle->event_id[idx] = evt_id;
2116 
2117 	qdf_spin_lock_bh(&soc->ctx_lock);
2118 	wmi_handle->ctx[idx].exec_ctx = rx_ctx;
2119 	wmi_handle->ctx[idx].buff_type = rx_buf_type;
2120 	qdf_spin_unlock_bh(&soc->ctx_lock);
2121 	soc->max_event_idx++;
2122 
2123 	return QDF_STATUS_SUCCESS;
2124 }
2125 
2126 QDF_STATUS
2127 wmi_unified_register_event(wmi_unified_t wmi_handle,
2128 			   uint32_t event_id,
2129 			   wmi_unified_event_handler handler_func)
2130 {
2131 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2132 						   handler_func,
2133 						   WMI_RX_UMAC_CTX,
2134 						   WMI_RX_PROCESSED_BUFF);
2135 }
2136 
2137 QDF_STATUS
2138 wmi_unified_register_event_handler(wmi_unified_t wmi_handle,
2139 				   wmi_conv_event_id event_id,
2140 				   wmi_unified_event_handler handler_func,
2141 				   uint8_t rx_ctx)
2142 {
2143 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2144 						   handler_func, rx_ctx,
2145 						   WMI_RX_PROCESSED_BUFF);
2146 }
2147 
2148 qdf_export_symbol(wmi_unified_register_event_handler);
2149 
2150 QDF_STATUS
2151 wmi_unified_register_raw_event_handler(wmi_unified_t wmi_handle,
2152 				       wmi_conv_event_id event_id,
2153 				       wmi_unified_event_handler handler_func,
2154 				       enum wmi_rx_exec_ctx rx_ctx)
2155 {
2156 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2157 						   handler_func, rx_ctx,
2158 						   WMI_RX_RAW_BUFF);
2159 }
2160 
2161 qdf_export_symbol(wmi_unified_register_raw_event_handler);
2162 
2163 QDF_STATUS wmi_unified_unregister_event(wmi_unified_t wmi_handle,
2164 					uint32_t event_id)
2165 {
2166 	uint32_t idx = 0;
2167 	uint32_t evt_id;
2168 	struct wmi_soc *soc = wmi_handle->soc;
2169 
2170 	if (event_id >= wmi_events_max ||
2171 		wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2172 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
2173 			  "%s: Event id %d is unavailable",
2174 					__func__, event_id);
2175 		return QDF_STATUS_E_FAILURE;
2176 	}
2177 	evt_id = wmi_handle->wmi_events[event_id];
2178 
2179 	idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id);
2180 	if (idx == -1) {
2181 		wmi_warn("event handler is not registered: evt id 0x%x",
2182 			 evt_id);
2183 		return QDF_STATUS_E_FAILURE;
2184 	}
2185 	wmi_handle->event_handler[idx] = NULL;
2186 	wmi_handle->event_id[idx] = 0;
2187 	--soc->max_event_idx;
2188 	wmi_handle->event_handler[idx] =
2189 		wmi_handle->event_handler[soc->max_event_idx];
2190 	wmi_handle->event_id[idx] =
2191 		wmi_handle->event_id[soc->max_event_idx];
2192 
2193 	return QDF_STATUS_SUCCESS;
2194 }
2195 
2196 QDF_STATUS wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle,
2197 						wmi_conv_event_id event_id)
2198 {
2199 	uint32_t idx = 0;
2200 	uint32_t evt_id;
2201 	struct wmi_soc *soc;
2202 
2203 	if (!wmi_handle) {
2204 		wmi_err("WMI handle is NULL");
2205 		return QDF_STATUS_E_FAILURE;
2206 	}
2207 
2208 	soc = wmi_handle->soc;
2209 
2210 	if (event_id >= wmi_events_max ||
2211 		wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2212 		wmi_err("Event id %d is unavailable", event_id);
2213 		return QDF_STATUS_E_FAILURE;
2214 	}
2215 	evt_id = wmi_handle->wmi_events[event_id];
2216 
2217 	idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id);
2218 	if (idx == -1) {
2219 		wmi_err("event handler is not registered: evt id 0x%x",
2220 			 evt_id);
2221 		return QDF_STATUS_E_FAILURE;
2222 	}
2223 	wmi_handle->event_handler[idx] = NULL;
2224 	wmi_handle->event_id[idx] = 0;
2225 	--soc->max_event_idx;
2226 	wmi_handle->event_handler[idx] =
2227 		wmi_handle->event_handler[soc->max_event_idx];
2228 	wmi_handle->event_id[idx] =
2229 		wmi_handle->event_id[soc->max_event_idx];
2230 
2231 	return QDF_STATUS_SUCCESS;
2232 }
2233 qdf_export_symbol(wmi_unified_unregister_event_handler);
2234 
2235 static void
2236 wmi_process_rx_diag_event_worker_thread_ctx(struct wmi_unified *wmi_handle,
2237 					    void *evt_buf)
2238 {
2239 	uint32_t num_diag_events_pending;
2240 
2241 	qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock);
2242 	if (RX_DIAG_WQ_MAX_SIZE > 0) {
2243 		num_diag_events_pending = qdf_nbuf_queue_len(
2244 						&wmi_handle->diag_event_queue);
2245 
2246 		if (num_diag_events_pending == RX_DIAG_WQ_MAX_SIZE) {
2247 			qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
2248 			wmi_handle->wmi_rx_diag_events_dropped++;
2249 			wmi_debug_rl("Rx diag events dropped count: %d",
2250 				     wmi_handle->wmi_rx_diag_events_dropped);
2251 			qdf_nbuf_free(evt_buf);
2252 			return;
2253 		}
2254 	}
2255 
2256 	qdf_nbuf_queue_add(&wmi_handle->diag_event_queue, evt_buf);
2257 	qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
2258 	qdf_sched_work(0, &wmi_handle->rx_diag_event_work);
2259 }
2260 
2261 void wmi_process_fw_event_worker_thread_ctx(struct wmi_unified *wmi_handle,
2262 					    void *evt_buf)
2263 {
2264 
2265 	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
2266 	qdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf);
2267 	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
2268 	qdf_queue_work(0, wmi_handle->wmi_rx_work_queue,
2269 			&wmi_handle->rx_event_work);
2270 
2271 	return;
2272 }
2273 
2274 qdf_export_symbol(wmi_process_fw_event_worker_thread_ctx);
2275 
2276 uint32_t wmi_critical_events_in_flight(struct wmi_unified *wmi)
2277 {
2278 	return qdf_atomic_read(&wmi->critical_events_in_flight);
2279 }
2280 
2281 static bool
2282 wmi_is_event_critical(struct wmi_unified *wmi_handle, uint32_t event_id)
2283 {
2284 	if (wmi_handle->wmi_events[wmi_roam_synch_event_id] == event_id)
2285 		return true;
2286 
2287 	return false;
2288 }
2289 
2290 static QDF_STATUS wmi_discard_fw_event(struct scheduler_msg *msg)
2291 {
2292 	struct wmi_process_fw_event_params *event_param;
2293 
2294 	if (!msg->bodyptr)
2295 		return QDF_STATUS_E_INVAL;
2296 
2297 	event_param = (struct wmi_process_fw_event_params *)msg->bodyptr;
2298 	qdf_nbuf_free(event_param->evt_buf);
2299 	qdf_mem_free(msg->bodyptr);
2300 	msg->bodyptr = NULL;
2301 	msg->bodyval = 0;
2302 	msg->type = 0;
2303 
2304 	return QDF_STATUS_SUCCESS;
2305 }
2306 
2307 static QDF_STATUS wmi_process_fw_event_handler(struct scheduler_msg *msg)
2308 {
2309 	struct wmi_process_fw_event_params *params =
2310 		(struct wmi_process_fw_event_params *)msg->bodyptr;
2311 	struct wmi_unified *wmi_handle;
2312 	uint32_t event_id;
2313 
2314 	wmi_handle = (struct wmi_unified *)params->wmi_handle;
2315 	event_id = WMI_GET_FIELD(qdf_nbuf_data(params->evt_buf),
2316 				 WMI_CMD_HDR, COMMANDID);
2317 	wmi_process_fw_event(wmi_handle, params->evt_buf);
2318 
2319 	if (wmi_is_event_critical(wmi_handle, event_id))
2320 		qdf_atomic_dec(&wmi_handle->critical_events_in_flight);
2321 
2322 	qdf_mem_free(msg->bodyptr);
2323 
2324 	return QDF_STATUS_SUCCESS;
2325 }
2326 
2327 /**
2328  * wmi_process_fw_event_sched_thread_ctx() - common event handler to serialize
2329  *                                  event processing through scheduler thread
2330  * @ctx: wmi context
2331  * @ev: event buffer
2332  * @rx_ctx: rx execution context
2333  *
2334  * Return: 0 on success, errno on failure
2335  */
2336 static QDF_STATUS
2337 wmi_process_fw_event_sched_thread_ctx(struct wmi_unified *wmi,
2338 				      void *ev)
2339 {
2340 	struct wmi_process_fw_event_params *params_buf;
2341 	struct scheduler_msg msg = { 0 };
2342 	uint32_t event_id;
2343 
2344 	params_buf = qdf_mem_malloc(sizeof(struct wmi_process_fw_event_params));
2345 	if (!params_buf) {
2346 		wmi_err("malloc failed");
2347 		qdf_nbuf_free(ev);
2348 		return QDF_STATUS_E_NOMEM;
2349 	}
2350 
2351 	params_buf->wmi_handle = wmi;
2352 	params_buf->evt_buf = ev;
2353 
2354 	event_id = WMI_GET_FIELD(qdf_nbuf_data(params_buf->evt_buf),
2355 				 WMI_CMD_HDR, COMMANDID);
2356 	if (wmi_is_event_critical(wmi, event_id))
2357 		qdf_atomic_inc(&wmi->critical_events_in_flight);
2358 
2359 	msg.bodyptr = params_buf;
2360 	msg.bodyval = 0;
2361 	msg.callback = wmi_process_fw_event_handler;
2362 	msg.flush_callback = wmi_discard_fw_event;
2363 
2364 	if (QDF_STATUS_SUCCESS !=
2365 		scheduler_post_message(QDF_MODULE_ID_TARGET_IF,
2366 				       QDF_MODULE_ID_TARGET_IF,
2367 				       QDF_MODULE_ID_TARGET_IF, &msg)) {
2368 		qdf_nbuf_free(ev);
2369 		qdf_mem_free(params_buf);
2370 		return QDF_STATUS_E_FAULT;
2371 	}
2372 
2373 	return QDF_STATUS_SUCCESS;
2374 }
2375 
2376 /**
2377  * wmi_get_pdev_ep: Get wmi handle based on endpoint
2378  * @soc: handle to wmi soc
2379  * @ep: endpoint id
2380  *
2381  * Return: none
2382  */
2383 static struct wmi_unified *wmi_get_pdev_ep(struct wmi_soc *soc,
2384 						HTC_ENDPOINT_ID ep)
2385 {
2386 	uint32_t i;
2387 
2388 	for (i = 0; i < WMI_MAX_RADIOS; i++)
2389 		if (soc->wmi_endpoint_id[i] == ep)
2390 			break;
2391 
2392 	if (i == WMI_MAX_RADIOS)
2393 		return NULL;
2394 
2395 	return soc->wmi_pdev[i];
2396 }
2397 
2398 /**
2399  * wmi_mtrace_rx() - Wrappper function for qdf_mtrace api
2400  * @message_id: 32-Bit Wmi message ID
2401  * @vdev_id: Vdev ID
2402  * @data: Actual message contents
2403  *
2404  * This function converts the 32-bit WMI message ID in 15-bit message ID
2405  * format for qdf_mtrace as in qdf_mtrace message there are only 15
2406  * bits reserved for message ID.
2407  * out of these 15-bits, 8-bits (From LSB) specifies the WMI_GRP_ID
2408  * and remaining 7-bits specifies the actual WMI command. With this
2409  * notation there can be maximum 256 groups and each group can have
2410  * max 128 commands can be supported.
2411  *
2412  * Return: None
2413  */
2414 static void wmi_mtrace_rx(uint32_t message_id, uint16_t vdev_id, uint32_t data)
2415 {
2416 	uint16_t mtrace_message_id;
2417 
2418 	mtrace_message_id = QDF_WMI_MTRACE_CMD_ID(message_id) |
2419 		(QDF_WMI_MTRACE_GRP_ID(message_id) <<
2420 						QDF_WMI_MTRACE_CMD_NUM_BITS);
2421 	qdf_mtrace(QDF_MODULE_ID_WMI, QDF_MODULE_ID_WMA,
2422 		   mtrace_message_id, vdev_id, data);
2423 }
2424 
2425 /**
2426  * wmi_process_control_rx() - process fw events callbacks
2427  * @wmi_handle: handle to wmi_unified
2428  * @evt_buf: handle to wmi_buf_t
2429  *
2430  * Return: none
2431  */
2432 static void wmi_process_control_rx(struct wmi_unified *wmi_handle,
2433 				   wmi_buf_t evt_buf)
2434 {
2435 	struct wmi_soc *soc = wmi_handle->soc;
2436 	uint32_t id;
2437 	uint32_t idx;
2438 	enum wmi_rx_exec_ctx exec_ctx;
2439 
2440 	id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2441 	idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
2442 	if (qdf_unlikely(idx == A_ERROR)) {
2443 		wmi_debug("no handler registered for event id 0x%x", id);
2444 		qdf_nbuf_free(evt_buf);
2445 		return;
2446 	}
2447 	wmi_mtrace_rx(id, 0xFF, idx);
2448 	qdf_spin_lock_bh(&soc->ctx_lock);
2449 	exec_ctx = wmi_handle->ctx[idx].exec_ctx;
2450 	qdf_spin_unlock_bh(&soc->ctx_lock);
2451 
2452 #ifdef WMI_INTERFACE_EVENT_LOGGING
2453 	if (wmi_handle->log_info.wmi_logging_enable) {
2454 		uint8_t *data;
2455 		data = qdf_nbuf_data(evt_buf);
2456 
2457 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2458 		/* Exclude 4 bytes of TLV header */
2459 		if (wmi_handle->ops->is_diag_event(id)) {
2460 			WMI_DIAG_RX_EVENT_RECORD(wmi_handle, id,
2461 				((uint8_t *) data +
2462 				wmi_handle->soc->buf_offset_event));
2463 		} else if (wmi_handle->ops->is_management_record(id)) {
2464 			WMI_MGMT_RX_EVENT_RECORD(wmi_handle, id,
2465 				((uint8_t *) data +
2466 				wmi_handle->soc->buf_offset_event));
2467 		} else {
2468 			WMI_RX_EVENT_RECORD(wmi_handle, id, ((uint8_t *) data +
2469 				wmi_handle->soc->buf_offset_event));
2470 		}
2471 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2472 	}
2473 #endif
2474 
2475 	if (exec_ctx == WMI_RX_WORK_CTX) {
2476 		wmi_process_fw_event_worker_thread_ctx(wmi_handle, evt_buf);
2477 	} else if (exec_ctx == WMI_RX_TASKLET_CTX) {
2478 		wmi_process_fw_event(wmi_handle, evt_buf);
2479 	} else if (exec_ctx == WMI_RX_SERIALIZER_CTX) {
2480 		wmi_process_fw_event_sched_thread_ctx(wmi_handle, evt_buf);
2481 	} else if (exec_ctx == WMI_RX_DIAG_WORK_CTX) {
2482 		wmi_process_rx_diag_event_worker_thread_ctx(wmi_handle,
2483 							    evt_buf);
2484 	} else {
2485 		wmi_err("Invalid event context %d", exec_ctx);
2486 		qdf_nbuf_free(evt_buf);
2487 	}
2488 
2489 }
2490 
2491 /**
2492  * wmi_control_rx() - process fw events callbacks
2493  * @ctx: handle to wmi
2494  * @htc_packet: pointer to htc packet
2495  *
2496  * Return: none
2497  */
2498 static void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet)
2499 {
2500 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2501 	struct wmi_unified *wmi_handle;
2502 	wmi_buf_t evt_buf;
2503 
2504 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2505 
2506 	wmi_handle = wmi_get_pdev_ep(soc, htc_packet->Endpoint);
2507 	if (!wmi_handle) {
2508 		wmi_err("unable to get wmi_handle to Endpoint %d",
2509 			htc_packet->Endpoint);
2510 		qdf_nbuf_free(evt_buf);
2511 		return;
2512 	}
2513 
2514 	wmi_process_control_rx(wmi_handle, evt_buf);
2515 }
2516 
2517 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
2518 /**
2519  * wmi_control_diag_rx() - process diag fw events callbacks
2520  * @ctx: handle to wmi
2521  * @htc_packet: pointer to htc packet
2522  *
2523  * Return: none
2524  */
2525 static void wmi_control_diag_rx(void *ctx, HTC_PACKET *htc_packet)
2526 {
2527 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2528 	struct wmi_unified *wmi_handle;
2529 	wmi_buf_t evt_buf;
2530 
2531 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2532 
2533 	wmi_handle = soc->wmi_pdev[0];
2534 	if (!wmi_handle) {
2535 		wmi_err("unable to get wmi_handle for diag event end point id:%d", htc_packet->Endpoint);
2536 		qdf_nbuf_free(evt_buf);
2537 		return;
2538 	}
2539 
2540 	wmi_process_control_rx(wmi_handle, evt_buf);
2541 }
2542 #endif
2543 
2544 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI
2545 QDF_STATUS wmi_unified_cmd_send_over_qmi(struct wmi_unified *wmi_handle,
2546 					 wmi_buf_t buf, uint32_t buflen,
2547 					 uint32_t cmd_id)
2548 {
2549 	QDF_STATUS status;
2550 	int32_t ret;
2551 
2552 	if (!qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR))) {
2553 		wmi_err("Failed to send cmd %x, no memory", cmd_id);
2554 		return QDF_STATUS_E_NOMEM;
2555 	}
2556 
2557 	qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR));
2558 	WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
2559 	wmi_debug("Sending WMI_CMD_ID: 0x%x over qmi", cmd_id);
2560 	status = qdf_wmi_send_recv_qmi(qdf_nbuf_data(buf),
2561 				       buflen + sizeof(WMI_CMD_HDR),
2562 				       wmi_handle,
2563 				       wmi_process_qmi_fw_event);
2564 	if (QDF_IS_STATUS_ERROR(status)) {
2565 		qdf_nbuf_pull_head(buf, sizeof(WMI_CMD_HDR));
2566 		wmi_warn("WMI send on QMI failed. Retrying WMI on HTC");
2567 	} else {
2568 		ret = qdf_atomic_inc_return(&wmi_handle->num_stats_over_qmi);
2569 		wmi_debug("num stats over qmi: %d", ret);
2570 		wmi_buf_free(buf);
2571 	}
2572 
2573 	return status;
2574 }
2575 
2576 static int __wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len)
2577 {
2578 	struct wmi_unified *wmi_handle = wmi_cb_ctx;
2579 	wmi_buf_t evt_buf;
2580 	uint32_t evt_id;
2581 	int wmi_msg_len;
2582 
2583 	if (!wmi_handle || !buf)
2584 		return -EINVAL;
2585 
2586 	/**
2587 	 * Subtract WMI_MIN_HEAD_ROOM from received QMI event length to get
2588 	 * wmi message length
2589 	 */
2590 	wmi_msg_len = len - WMI_MIN_HEAD_ROOM;
2591 
2592 	evt_buf = wmi_buf_alloc(wmi_handle, wmi_msg_len);
2593 	if (!evt_buf)
2594 		return -ENOMEM;
2595 
2596 	/*
2597 	 * Set the length of the buffer to match the allocation size.
2598 	 */
2599 	qdf_nbuf_set_pktlen(evt_buf, len);
2600 
2601 	qdf_mem_copy(qdf_nbuf_data(evt_buf), buf, len);
2602 	evt_id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2603 	wmi_debug("Received WMI_EVT_ID: %d over qmi", evt_id);
2604 	wmi_process_control_rx(wmi_handle, evt_buf);
2605 
2606 	return 0;
2607 }
2608 
2609 int wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len)
2610 {
2611 	struct qdf_op_sync *op_sync;
2612 	int ret;
2613 
2614 	if (qdf_op_protect(&op_sync))
2615 		return -EINVAL;
2616 	ret = __wmi_process_qmi_fw_event(wmi_cb_ctx, buf, len);
2617 	qdf_op_unprotect(op_sync);
2618 
2619 	return ret;
2620 }
2621 #endif
2622 
2623 /**
2624  * wmi_process_fw_event() - process any fw event
2625  * @wmi_handle: wmi handle
2626  * @evt_buf: fw event buffer
2627  *
2628  * This function process fw event in caller context
2629  *
2630  * Return: none
2631  */
2632 void wmi_process_fw_event(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
2633 {
2634 	__wmi_control_rx(wmi_handle, evt_buf);
2635 }
2636 
2637 /**
2638  * __wmi_control_rx() - process serialize wmi event callback
2639  * @wmi_handle: wmi handle
2640  * @evt_buf: fw event buffer
2641  *
2642  * Return: none
2643  */
2644 void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
2645 {
2646 	uint32_t id;
2647 	uint8_t *data;
2648 	uint32_t len;
2649 	void *wmi_cmd_struct_ptr = NULL;
2650 #ifndef WMI_NON_TLV_SUPPORT
2651 	int tlv_ok_status = 0;
2652 #endif
2653 	uint32_t idx = 0;
2654 	struct wmi_raw_event_buffer ev_buf;
2655 	enum wmi_rx_buff_type ev_buff_type;
2656 
2657 	id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2658 
2659 	wmi_ext_dbg_msg_event_record(wmi_handle, qdf_nbuf_data(evt_buf),
2660 				     qdf_nbuf_len(evt_buf));
2661 
2662 	if (qdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL)
2663 		goto end;
2664 
2665 	data = qdf_nbuf_data(evt_buf);
2666 	len = qdf_nbuf_len(evt_buf);
2667 
2668 #ifndef WMI_NON_TLV_SUPPORT
2669 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2670 		/* Validate and pad(if necessary) the TLVs */
2671 		tlv_ok_status =
2672 			wmi_handle->ops->wmi_check_and_pad_event(wmi_handle->scn_handle,
2673 							data, len, id,
2674 							&wmi_cmd_struct_ptr);
2675 		if (tlv_ok_status != 0) {
2676 			QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR,
2677 				  "%s: Error: id=0x%x, wmitlv check status=%d",
2678 				  __func__, id, tlv_ok_status);
2679 			goto end;
2680 		}
2681 	}
2682 #endif
2683 
2684 	idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
2685 	if (idx == A_ERROR) {
2686 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR,
2687 		   "%s : event handler is not registered: event id 0x%x",
2688 			__func__, id);
2689 		goto end;
2690 	}
2691 #ifdef WMI_INTERFACE_EVENT_LOGGING
2692 	if (wmi_handle->log_info.wmi_logging_enable) {
2693 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2694 		/* Exclude 4 bytes of TLV header */
2695 		if (wmi_handle->ops->is_diag_event(id)) {
2696 			/*
2697 			 * skip diag event logging in WMI event buffer
2698 			 * as its already logged in WMI RX event buffer
2699 			 */
2700 		} else if (wmi_handle->ops->is_management_record(id)) {
2701 			/*
2702 			 * skip wmi mgmt event logging in WMI event buffer
2703 			 * as its already logged in WMI RX event buffer
2704 			 */
2705 		} else {
2706 			uint8_t *tmpbuf = (uint8_t *)data +
2707 					wmi_handle->soc->buf_offset_event;
2708 
2709 			WMI_EVENT_RECORD(wmi_handle, id, tmpbuf);
2710 			wmi_specific_evt_record(wmi_handle, id, tmpbuf);
2711 		}
2712 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2713 	}
2714 #endif
2715 	/* Call the WMI registered event handler */
2716 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2717 		ev_buff_type = wmi_handle->ctx[idx].buff_type;
2718 		if (ev_buff_type == WMI_RX_PROCESSED_BUFF) {
2719 			wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2720 				wmi_cmd_struct_ptr, len);
2721 		} else if (ev_buff_type == WMI_RX_RAW_BUFF) {
2722 			ev_buf.evt_raw_buf = data;
2723 			ev_buf.evt_processed_buf = wmi_cmd_struct_ptr;
2724 			wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2725 							(void *)&ev_buf, len);
2726 		}
2727 	}
2728 	else
2729 		wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2730 			data, len);
2731 
2732 end:
2733 	/* Free event buffer and allocated event tlv */
2734 #ifndef WMI_NON_TLV_SUPPORT
2735 	if (wmi_handle->target_type == WMI_TLV_TARGET)
2736 		wmi_handle->ops->wmi_free_allocated_event(id, &wmi_cmd_struct_ptr);
2737 #endif
2738 
2739 	qdf_nbuf_free(evt_buf);
2740 
2741 }
2742 
2743 #define WMI_WQ_WD_TIMEOUT (30 * 1000) /* 30s */
2744 
2745 static inline void wmi_workqueue_watchdog_warn(uint32_t msg_type_id)
2746 {
2747 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2748 		  "%s: WLAN_BUG_RCA: Message type %x has exceeded its alloted time of %ds",
2749 		  __func__, msg_type_id, WMI_WQ_WD_TIMEOUT / 1000);
2750 }
2751 
2752 #ifdef CONFIG_SLUB_DEBUG_ON
2753 static void wmi_workqueue_watchdog_bite(void *arg)
2754 {
2755 	struct wmi_wq_dbg_info *info = arg;
2756 
2757 	wmi_workqueue_watchdog_warn(info->wd_msg_type_id);
2758 	qdf_print_thread_trace(info->task);
2759 
2760 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2761 		  "%s: Going down for WMI WQ Watchdog Bite!", __func__);
2762 	QDF_BUG(0);
2763 }
2764 #else
2765 static inline void wmi_workqueue_watchdog_bite(void *arg)
2766 {
2767 	struct wmi_wq_dbg_info *info = arg;
2768 
2769 	wmi_workqueue_watchdog_warn(info->wd_msg_type_id);
2770 
2771 	qdf_print_thread_trace(info->task);
2772 }
2773 #endif
2774 
2775 /**
2776  * wmi_rx_event_work() - process rx event in rx work queue context
2777  * @arg: opaque pointer to wmi handle
2778  *
2779  * This function process any fw event to serialize it through rx worker thread.
2780  *
2781  * Return: none
2782  */
2783 static void wmi_rx_event_work(void *arg)
2784 {
2785 	wmi_buf_t buf;
2786 	struct wmi_unified *wmi = arg;
2787 	qdf_timer_t wd_timer;
2788 	struct wmi_wq_dbg_info info;
2789 
2790 	/* initialize WMI workqueue watchdog timer */
2791 	qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite,
2792 			&info, QDF_TIMER_TYPE_SW);
2793 	qdf_spin_lock_bh(&wmi->eventq_lock);
2794 	buf = qdf_nbuf_queue_remove(&wmi->event_queue);
2795 	qdf_spin_unlock_bh(&wmi->eventq_lock);
2796 	while (buf) {
2797 		qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT);
2798 		info.wd_msg_type_id =
2799 		   WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID);
2800 		info.wmi_wq = wmi->wmi_rx_work_queue;
2801 		info.task = qdf_get_current_task();
2802 		__wmi_control_rx(wmi, buf);
2803 		qdf_timer_stop(&wd_timer);
2804 		qdf_spin_lock_bh(&wmi->eventq_lock);
2805 		buf = qdf_nbuf_queue_remove(&wmi->event_queue);
2806 		qdf_spin_unlock_bh(&wmi->eventq_lock);
2807 	}
2808 	qdf_timer_free(&wd_timer);
2809 }
2810 
2811 /**
2812  * wmi_rx_diag_event_work() - process rx diag event in work queue context
2813  * @arg: opaque pointer to wmi handle
2814  *
2815  * This function process fw diag event to serialize it through rx worker thread.
2816  *
2817  * Return: none
2818  */
2819 static void wmi_rx_diag_event_work(void *arg)
2820 {
2821 	wmi_buf_t buf;
2822 	struct wmi_unified *wmi = arg;
2823 	qdf_timer_t wd_timer;
2824 	struct wmi_wq_dbg_info info;
2825 
2826 	if (!wmi) {
2827 		wmi_err("Invalid WMI handle");
2828 		return;
2829 	}
2830 
2831 	/* initialize WMI workqueue watchdog timer */
2832 	qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite,
2833 		       &info, QDF_TIMER_TYPE_SW);
2834 	qdf_spin_lock_bh(&wmi->diag_eventq_lock);
2835 	buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue);
2836 	qdf_spin_unlock_bh(&wmi->diag_eventq_lock);
2837 	while (buf) {
2838 		qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT);
2839 		info.wd_msg_type_id =
2840 		   WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID);
2841 		info.wmi_wq = NULL;
2842 		info.task = qdf_get_current_task();
2843 		__wmi_control_rx(wmi, buf);
2844 		qdf_timer_stop(&wd_timer);
2845 		qdf_spin_lock_bh(&wmi->diag_eventq_lock);
2846 		buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue);
2847 		qdf_spin_unlock_bh(&wmi->diag_eventq_lock);
2848 	}
2849 	qdf_timer_free(&wd_timer);
2850 }
2851 
2852 #ifdef FEATURE_RUNTIME_PM
2853 /**
2854  * wmi_runtime_pm_init() - initialize runtime pm wmi variables
2855  * @wmi_handle: wmi context
2856  */
2857 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
2858 {
2859 	qdf_atomic_init(&wmi_handle->runtime_pm_inprogress);
2860 }
2861 
2862 /**
2863  * wmi_set_runtime_pm_inprogress() - set runtime pm progress flag
2864  * @wmi_handle: wmi context
2865  * @val: runtime pm progress flag
2866  */
2867 void wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, A_BOOL val)
2868 {
2869 	qdf_atomic_set(&wmi_handle->runtime_pm_inprogress, val);
2870 }
2871 
2872 /**
2873  * wmi_get_runtime_pm_inprogress() - get runtime pm progress flag
2874  * @wmi_handle: wmi context
2875  */
2876 inline bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle)
2877 {
2878 	return qdf_atomic_read(&wmi_handle->runtime_pm_inprogress);
2879 }
2880 #else
2881 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
2882 {
2883 }
2884 #endif
2885 
2886 /**
2887  * wmi_unified_get_soc_handle: Get WMI SoC handle
2888  * @param wmi_handle: WMI context got from wmi_attach
2889  *
2890  * return: Pointer to Soc handle
2891  */
2892 void *wmi_unified_get_soc_handle(struct wmi_unified *wmi_handle)
2893 {
2894 	return wmi_handle->soc;
2895 }
2896 
2897 /**
2898  * wmi_interface_logging_init: Interface looging init
2899  * @param wmi_handle: Pointer to wmi handle object
2900  *
2901  * return: None
2902  */
2903 #ifdef WMI_INTERFACE_EVENT_LOGGING
2904 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle,
2905 					      uint32_t pdev_idx)
2906 {
2907 	if (QDF_STATUS_SUCCESS == wmi_log_init(wmi_handle)) {
2908 		qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
2909 		wmi_debugfs_init(wmi_handle, pdev_idx);
2910 	}
2911 }
2912 #else
2913 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle,
2914 					      uint32_t pdev_idx)
2915 {
2916 }
2917 #endif
2918 
2919 static QDF_STATUS wmi_initialize_worker_context(struct wmi_unified *wmi_handle)
2920 {
2921 	wmi_handle->wmi_rx_work_queue =
2922 		qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue");
2923 	if (!wmi_handle->wmi_rx_work_queue) {
2924 		wmi_err("failed to create wmi_rx_event_work_queue");
2925 		return QDF_STATUS_E_RESOURCES;
2926 	}
2927 
2928 	qdf_spinlock_create(&wmi_handle->eventq_lock);
2929 	qdf_nbuf_queue_init(&wmi_handle->event_queue);
2930 	qdf_create_work(0, &wmi_handle->rx_event_work,
2931 			wmi_rx_event_work, wmi_handle);
2932 	qdf_spinlock_create(&wmi_handle->diag_eventq_lock);
2933 	qdf_nbuf_queue_init(&wmi_handle->diag_event_queue);
2934 	qdf_create_work(0, &wmi_handle->rx_diag_event_work,
2935 			wmi_rx_diag_event_work, wmi_handle);
2936 	wmi_handle->wmi_rx_diag_events_dropped = 0;
2937 
2938 	return QDF_STATUS_SUCCESS;
2939 }
2940 
2941 /**
2942  * wmi_unified_get_pdev_handle: Get WMI SoC handle
2943  * @param wmi_soc: Pointer to wmi soc object
2944  * @param pdev_idx: pdev index
2945  *
2946  * return: Pointer to wmi handle or NULL on failure
2947  */
2948 void *wmi_unified_get_pdev_handle(struct wmi_soc *soc, uint32_t pdev_idx)
2949 {
2950 	struct wmi_unified *wmi_handle;
2951 	QDF_STATUS status;
2952 
2953 	if (pdev_idx >= WMI_MAX_RADIOS)
2954 		return NULL;
2955 
2956 	if (!soc->wmi_pdev[pdev_idx]) {
2957 		wmi_handle =
2958 			(struct wmi_unified *) qdf_mem_malloc(
2959 					sizeof(struct wmi_unified));
2960 		if (!wmi_handle)
2961 			return NULL;
2962 
2963 		status = wmi_initialize_worker_context(wmi_handle);
2964 		if (QDF_IS_STATUS_ERROR(status))
2965 			goto error;
2966 
2967 		wmi_handle->scn_handle = soc->scn_handle;
2968 		wmi_handle->event_id = soc->event_id;
2969 		wmi_handle->event_handler = soc->event_handler;
2970 		wmi_handle->ctx = soc->ctx;
2971 		wmi_handle->ops = soc->ops;
2972 		wmi_handle->wmi_events = soc->wmi_events;
2973 		wmi_handle->services = soc->services;
2974 		wmi_handle->soc = soc;
2975 		wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map;
2976 		wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map;
2977 		wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map;
2978 		wmi_handle->evt_phy_id_map = soc->evt_phy_id_map;
2979 		wmi_interface_logging_init(wmi_handle, pdev_idx);
2980 		qdf_atomic_init(&wmi_handle->pending_cmds);
2981 		qdf_atomic_init(&wmi_handle->is_target_suspended);
2982 		wmi_handle->target_type = soc->target_type;
2983 		wmi_handle->wmi_max_cmds = soc->wmi_max_cmds;
2984 
2985 		wmi_interface_sequence_init(wmi_handle);
2986 		if (wmi_ext_dbgfs_init(wmi_handle, pdev_idx) !=
2987 		    QDF_STATUS_SUCCESS)
2988 			wmi_err("Failed to initialize wmi extended debugfs");
2989 
2990 		soc->wmi_pdev[pdev_idx] = wmi_handle;
2991 	} else
2992 		wmi_handle = soc->wmi_pdev[pdev_idx];
2993 
2994 	wmi_handle->wmi_stopinprogress = 0;
2995 	wmi_handle->wmi_endpoint_id = soc->wmi_endpoint_id[pdev_idx];
2996 	wmi_handle->htc_handle = soc->htc_handle;
2997 	wmi_handle->max_msg_len = soc->max_msg_len[pdev_idx];
2998 	wmi_handle->tag_crash_inject = false;
2999 	wmi_interface_sequence_reset(wmi_handle);
3000 
3001 	return wmi_handle;
3002 
3003 error:
3004 	qdf_mem_free(wmi_handle);
3005 
3006 	return NULL;
3007 }
3008 qdf_export_symbol(wmi_unified_get_pdev_handle);
3009 
3010 static void (*wmi_attach_register[WMI_MAX_TARGET_TYPE])(wmi_unified_t);
3011 
3012 void wmi_unified_register_module(enum wmi_target_type target_type,
3013 			void (*wmi_attach)(wmi_unified_t wmi_handle))
3014 {
3015 	if (target_type < WMI_MAX_TARGET_TYPE)
3016 		wmi_attach_register[target_type] = wmi_attach;
3017 
3018 	return;
3019 }
3020 qdf_export_symbol(wmi_unified_register_module);
3021 
3022 /**
3023  * wmi_wbuff_register() - register wmi with wbuff
3024  * @wmi_handle: handle to wmi
3025  *
3026  * @Return: void
3027  */
3028 static void wmi_wbuff_register(struct wmi_unified *wmi_handle)
3029 {
3030 	struct wbuff_alloc_request wbuff_alloc[4];
3031 
3032 	wbuff_alloc[0].slot = WBUFF_POOL_0;
3033 	wbuff_alloc[0].size = WMI_WBUFF_POOL_0_SIZE;
3034 	wbuff_alloc[1].slot = WBUFF_POOL_1;
3035 	wbuff_alloc[1].size = WMI_WBUFF_POOL_1_SIZE;
3036 	wbuff_alloc[2].slot = WBUFF_POOL_2;
3037 	wbuff_alloc[2].size = WMI_WBUFF_POOL_2_SIZE;
3038 	wbuff_alloc[3].slot = WBUFF_POOL_3;
3039 	wbuff_alloc[3].size = WMI_WBUFF_POOL_3_SIZE;
3040 
3041 	wmi_handle->wbuff_handle = wbuff_module_register(wbuff_alloc, 4,
3042 							 WMI_MIN_HEAD_ROOM, 4);
3043 }
3044 
3045 /**
3046  * wmi_wbuff_deregister() - deregister wmi with wbuff
3047  * @wmi_handle: handle to wmi
3048  *
3049  * @Return: void
3050  */
3051 static inline void wmi_wbuff_deregister(struct wmi_unified *wmi_handle)
3052 {
3053 	wbuff_module_deregister(wmi_handle->wbuff_handle);
3054 	wmi_handle->wbuff_handle = NULL;
3055 }
3056 
3057 /**
3058  * wmi_unified_attach() -  attach for unified WMI
3059  * @scn_handle: handle to SCN
3060  * @osdev: OS device context
3061  * @target_type: TLV or not-TLV based target
3062  * @use_cookie: cookie based allocation enabled/disabled
3063  * @ops: umac rx callbacks
3064  * @psoc: objmgr psoc
3065  *
3066  * @Return: wmi handle.
3067  */
3068 void *wmi_unified_attach(void *scn_handle,
3069 			 struct wmi_unified_attach_params *param)
3070 {
3071 	struct wmi_unified *wmi_handle;
3072 	struct wmi_soc *soc;
3073 	QDF_STATUS status;
3074 
3075 	soc = (struct wmi_soc *) qdf_mem_malloc(sizeof(struct wmi_soc));
3076 	if (!soc)
3077 		return NULL;
3078 
3079 	wmi_handle =
3080 		(struct wmi_unified *) qdf_mem_malloc(
3081 			sizeof(struct wmi_unified));
3082 	if (!wmi_handle) {
3083 		qdf_mem_free(soc);
3084 		return NULL;
3085 	}
3086 
3087 	status = wmi_initialize_worker_context(wmi_handle);
3088 	if (QDF_IS_STATUS_ERROR(status))
3089 		goto error;
3090 
3091 	wmi_handle->soc = soc;
3092 	wmi_handle->soc->soc_idx = param->soc_id;
3093 	wmi_handle->soc->is_async_ep = param->is_async_ep;
3094 	wmi_handle->event_id = soc->event_id;
3095 	wmi_handle->event_handler = soc->event_handler;
3096 	wmi_handle->ctx = soc->ctx;
3097 	wmi_handle->wmi_events = soc->wmi_events;
3098 	wmi_handle->services = soc->services;
3099 	wmi_handle->scn_handle = scn_handle;
3100 	wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map;
3101 	wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map;
3102 	wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map;
3103 	wmi_handle->evt_phy_id_map = soc->evt_phy_id_map;
3104 	soc->scn_handle = scn_handle;
3105 	wmi_handle->target_type = param->target_type;
3106 	soc->target_type = param->target_type;
3107 
3108 	if (param->target_type >= WMI_MAX_TARGET_TYPE)
3109 		goto error;
3110 
3111 	if (wmi_attach_register[param->target_type]) {
3112 		wmi_attach_register[param->target_type](wmi_handle);
3113 	} else {
3114 		wmi_err("wmi attach is not registered");
3115 		goto error;
3116 	}
3117 
3118 	qdf_atomic_init(&wmi_handle->pending_cmds);
3119 	qdf_atomic_init(&wmi_handle->is_target_suspended);
3120 	qdf_atomic_init(&wmi_handle->num_stats_over_qmi);
3121 	wmi_runtime_pm_init(wmi_handle);
3122 	wmi_interface_logging_init(wmi_handle, WMI_HOST_PDEV_ID_0);
3123 
3124 	wmi_interface_sequence_init(wmi_handle);
3125 	/* Assign target cookie capablity */
3126 	wmi_handle->use_cookie = param->use_cookie;
3127 	wmi_handle->osdev = param->osdev;
3128 	wmi_handle->wmi_stopinprogress = 0;
3129 	wmi_handle->wmi_max_cmds = param->max_commands;
3130 	soc->wmi_max_cmds = param->max_commands;
3131 	/* Increase the ref count once refcount infra is present */
3132 	soc->wmi_psoc = param->psoc;
3133 	qdf_spinlock_create(&soc->ctx_lock);
3134 	soc->ops = wmi_handle->ops;
3135 	soc->wmi_pdev[0] = wmi_handle;
3136 	if (wmi_ext_dbgfs_init(wmi_handle, 0) != QDF_STATUS_SUCCESS)
3137 		wmi_err("Failed to initialize wmi extended debugfs");
3138 
3139 	wmi_wbuff_register(wmi_handle);
3140 
3141 	wmi_hang_event_notifier_register(wmi_handle);
3142 
3143 	return wmi_handle;
3144 
3145 error:
3146 	qdf_mem_free(soc);
3147 	qdf_mem_free(wmi_handle);
3148 
3149 	return NULL;
3150 }
3151 
3152 /**
3153  * wmi_unified_detach() -  detach for unified WMI
3154  *
3155  * @wmi_handle  : handle to wmi.
3156  *
3157  * @Return: none.
3158  */
3159 void wmi_unified_detach(struct wmi_unified *wmi_handle)
3160 {
3161 	wmi_buf_t buf;
3162 	struct wmi_soc *soc;
3163 	uint8_t i;
3164 
3165 	wmi_hang_event_notifier_unregister();
3166 
3167 	wmi_wbuff_deregister(wmi_handle);
3168 
3169 	soc = wmi_handle->soc;
3170 	for (i = 0; i < WMI_MAX_RADIOS; i++) {
3171 		if (soc->wmi_pdev[i]) {
3172 			qdf_flush_workqueue(0,
3173 				soc->wmi_pdev[i]->wmi_rx_work_queue);
3174 			qdf_destroy_workqueue(0,
3175 				soc->wmi_pdev[i]->wmi_rx_work_queue);
3176 			wmi_debugfs_remove(soc->wmi_pdev[i]);
3177 			buf = qdf_nbuf_queue_remove(
3178 					&soc->wmi_pdev[i]->event_queue);
3179 			while (buf) {
3180 				qdf_nbuf_free(buf);
3181 				buf = qdf_nbuf_queue_remove(
3182 						&soc->wmi_pdev[i]->event_queue);
3183 			}
3184 
3185 			buf = qdf_nbuf_queue_remove(
3186 					&soc->wmi_pdev[i]->diag_event_queue);
3187 			while (buf) {
3188 				qdf_nbuf_free(buf);
3189 				buf = qdf_nbuf_queue_remove(
3190 					&soc->wmi_pdev[i]->diag_event_queue);
3191 			}
3192 
3193 			wmi_log_buffer_free(soc->wmi_pdev[i]);
3194 
3195 			/* Free events logs list */
3196 			if (soc->wmi_pdev[i]->events_logs_list)
3197 				qdf_mem_free(
3198 					soc->wmi_pdev[i]->events_logs_list);
3199 
3200 			qdf_spinlock_destroy(&soc->wmi_pdev[i]->eventq_lock);
3201 			qdf_spinlock_destroy(
3202 					&soc->wmi_pdev[i]->diag_eventq_lock);
3203 
3204 			wmi_interface_sequence_deinit(soc->wmi_pdev[i]);
3205 			wmi_ext_dbgfs_deinit(soc->wmi_pdev[i]);
3206 
3207 			qdf_mem_free(soc->wmi_pdev[i]);
3208 		}
3209 	}
3210 	qdf_spinlock_destroy(&soc->ctx_lock);
3211 
3212 	if (soc->wmi_service_bitmap) {
3213 		qdf_mem_free(soc->wmi_service_bitmap);
3214 		soc->wmi_service_bitmap = NULL;
3215 	}
3216 
3217 	if (soc->wmi_ext_service_bitmap) {
3218 		qdf_mem_free(soc->wmi_ext_service_bitmap);
3219 		soc->wmi_ext_service_bitmap = NULL;
3220 	}
3221 
3222 	if (soc->wmi_ext2_service_bitmap) {
3223 		qdf_mem_free(soc->wmi_ext2_service_bitmap);
3224 		soc->wmi_ext2_service_bitmap = NULL;
3225 	}
3226 
3227 	/* Decrease the ref count once refcount infra is present */
3228 	soc->wmi_psoc = NULL;
3229 	qdf_mem_free(soc);
3230 }
3231 
3232 /**
3233  * wmi_unified_remove_work() - detach for WMI work
3234  * @wmi_handle: handle to WMI
3235  *
3236  * A function that does not fully detach WMI, but just remove work
3237  * queue items associated with it. This is used to make sure that
3238  * before any other processing code that may destroy related contexts
3239  * (HTC, etc), work queue processing on WMI has already been stopped.
3240  *
3241  * Return: None
3242  */
3243 void
3244 wmi_unified_remove_work(struct wmi_unified *wmi_handle)
3245 {
3246 	wmi_buf_t buf;
3247 
3248 	qdf_flush_workqueue(0, wmi_handle->wmi_rx_work_queue);
3249 	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
3250 	buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
3251 	while (buf) {
3252 		qdf_nbuf_free(buf);
3253 		buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
3254 	}
3255 	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
3256 }
3257 
3258 /**
3259  * wmi_htc_tx_complete() - Process htc tx completion
3260  *
3261  * @ctx: handle to wmi
3262  * @htc_packet: pointer to htc packet
3263  *
3264  * @Return: none.
3265  */
3266 static void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt)
3267 {
3268 	struct wmi_soc *soc = (struct wmi_soc *) ctx;
3269 	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
3270 	u_int8_t *buf_ptr;
3271 	u_int32_t len;
3272 	struct wmi_unified *wmi_handle;
3273 #ifdef WMI_INTERFACE_EVENT_LOGGING
3274 	struct wmi_debug_log_info *log_info;
3275 	uint32_t cmd_id;
3276 	uint8_t *offset_ptr;
3277 	qdf_dma_addr_t dma_addr;
3278 	uint64_t phy_addr;
3279 #endif
3280 
3281 	ASSERT(wmi_cmd_buf);
3282 	wmi_handle = wmi_get_pdev_ep(soc, htc_pkt->Endpoint);
3283 	if (!wmi_handle) {
3284 		wmi_err("Unable to get wmi handle");
3285 		QDF_ASSERT(0);
3286 		return;
3287 	}
3288 	buf_ptr = (u_int8_t *)wmi_buf_data(wmi_cmd_buf);
3289 #ifdef WMI_INTERFACE_EVENT_LOGGING
3290 	log_info = &wmi_handle->log_info;
3291 
3292 	if (wmi_handle && log_info->wmi_logging_enable) {
3293 		cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf),
3294 				WMI_CMD_HDR, COMMANDID);
3295 
3296 		dma_addr = QDF_NBUF_CB_PADDR(wmi_cmd_buf);
3297 		phy_addr = qdf_mem_virt_to_phys(qdf_nbuf_data(wmi_cmd_buf));
3298 
3299 		qdf_spin_lock_bh(&log_info->wmi_record_lock);
3300 		/* Record 16 bytes of WMI cmd tx complete data
3301 		 * - exclude TLV and WMI headers
3302 		 */
3303 		offset_ptr = buf_ptr + wmi_handle->soc->buf_offset_command;
3304 		if (wmi_handle->ops->is_management_record(cmd_id)) {
3305 			WMI_MGMT_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id,
3306 						       offset_ptr);
3307 		} else {
3308 			WMI_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id,
3309 						  offset_ptr, dma_addr,
3310 						  phy_addr);
3311 		}
3312 
3313 		qdf_spin_unlock_bh(&log_info->wmi_record_lock);
3314 	}
3315 #endif
3316 
3317 	wmi_interface_sequence_check(wmi_handle, wmi_cmd_buf);
3318 
3319 	len = qdf_nbuf_len(wmi_cmd_buf);
3320 	qdf_mem_zero(buf_ptr, len);
3321 	wmi_buf_free(wmi_cmd_buf);
3322 	qdf_mem_free(htc_pkt);
3323 	qdf_atomic_dec(&wmi_handle->pending_cmds);
3324 }
3325 
3326 #ifdef FEATURE_RUNTIME_PM
3327 /**
3328  * wmi_htc_log_pkt() - Print information of WMI command from HTC packet
3329  *
3330  * @ctx: handle of WMI context
3331  * @htc_pkt: handle of HTC packet
3332  *
3333  * @Return: none
3334  */
3335 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt)
3336 {
3337 	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
3338 	uint32_t cmd_id;
3339 
3340 	ASSERT(wmi_cmd_buf);
3341 	cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), WMI_CMD_HDR,
3342 			       COMMANDID);
3343 
3344 	wmi_debug("WMI command from HTC packet: %s, ID: %d",
3345 		 wmi_id_to_name(cmd_id), cmd_id);
3346 }
3347 #else
3348 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt)
3349 {
3350 }
3351 #endif
3352 
3353 /**
3354  * wmi_connect_pdev_htc_service() -  WMI API to get connect to HTC service
3355  *
3356  * @wmi_handle: handle to WMI.
3357  * @pdev_idx: Pdev index
3358  *
3359  * @Return: QDF_STATUS
3360  */
3361 static QDF_STATUS wmi_connect_pdev_htc_service(struct wmi_soc *soc,
3362 					       uint32_t pdev_idx)
3363 {
3364 	QDF_STATUS status;
3365 	struct htc_service_connect_resp response;
3366 	struct htc_service_connect_req connect;
3367 
3368 	OS_MEMZERO(&connect, sizeof(connect));
3369 	OS_MEMZERO(&response, sizeof(response));
3370 
3371 	/* meta data is unused for now */
3372 	connect.pMetaData = NULL;
3373 	connect.MetaDataLength = 0;
3374 	/* these fields are the same for all service endpoints */
3375 	connect.EpCallbacks.pContext = soc;
3376 	connect.EpCallbacks.EpTxCompleteMultiple =
3377 		NULL /* Control path completion ar6000_tx_complete */;
3378 	connect.EpCallbacks.EpRecv = wmi_control_rx /* Control path rx */;
3379 	connect.EpCallbacks.EpRecvRefill = NULL /* ar6000_rx_refill */;
3380 	connect.EpCallbacks.EpSendFull = NULL /* ar6000_tx_queue_full */;
3381 	connect.EpCallbacks.EpTxComplete =
3382 		wmi_htc_tx_complete /* ar6000_tx_queue_full */;
3383 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3384 
3385 	/* connect to control service */
3386 	connect.service_id = soc->svc_ids[pdev_idx];
3387 	status = htc_connect_service(soc->htc_handle, &connect, &response);
3388 
3389 	if (QDF_IS_STATUS_ERROR(status)) {
3390 		wmi_err("Failed to connect to WMI CONTROL service status:%d",
3391 			 status);
3392 		return status;
3393 	}
3394 
3395 	if (soc->is_async_ep)
3396 		htc_set_async_ep(soc->htc_handle, response.Endpoint, true);
3397 
3398 	soc->wmi_endpoint_id[pdev_idx] = response.Endpoint;
3399 	soc->max_msg_len[pdev_idx] = response.MaxMsgLength;
3400 
3401 	return QDF_STATUS_SUCCESS;
3402 }
3403 
3404 QDF_STATUS
3405 wmi_unified_connect_htc_service(struct wmi_unified *wmi_handle,
3406 				HTC_HANDLE htc_handle)
3407 {
3408 	uint32_t i;
3409 	uint8_t wmi_ep_count;
3410 
3411 	wmi_handle->soc->htc_handle = htc_handle;
3412 
3413 	wmi_ep_count = htc_get_wmi_endpoint_count(htc_handle);
3414 	if (wmi_ep_count > WMI_MAX_RADIOS)
3415 		return QDF_STATUS_E_FAULT;
3416 
3417 	for (i = 0; i < wmi_ep_count; i++)
3418 		wmi_connect_pdev_htc_service(wmi_handle->soc, i);
3419 
3420 	wmi_handle->htc_handle = htc_handle;
3421 	wmi_handle->wmi_endpoint_id = wmi_handle->soc->wmi_endpoint_id[0];
3422 	wmi_handle->max_msg_len = wmi_handle->soc->max_msg_len[0];
3423 
3424 	return QDF_STATUS_SUCCESS;
3425 }
3426 
3427 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
3428 QDF_STATUS wmi_diag_connect_pdev_htc_service(struct wmi_unified *wmi_handle,
3429 					     HTC_HANDLE htc_handle)
3430 {
3431 	QDF_STATUS status;
3432 	struct htc_service_connect_resp response = {0};
3433 	struct htc_service_connect_req connect = {0};
3434 
3435 	/* meta data is unused for now */
3436 	connect.pMetaData = NULL;
3437 	connect.MetaDataLength = 0;
3438 	connect.EpCallbacks.pContext = wmi_handle->soc;
3439 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3440 	connect.EpCallbacks.EpRecv = wmi_control_diag_rx /* wmi diag rx */;
3441 	connect.EpCallbacks.EpRecvRefill = NULL;
3442 	connect.EpCallbacks.EpSendFull = NULL;
3443 	connect.EpCallbacks.EpTxComplete = NULL;
3444 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3445 
3446 	/* connect to wmi diag service */
3447 	connect.service_id = WMI_CONTROL_DIAG_SVC;
3448 	status = htc_connect_service(htc_handle, &connect, &response);
3449 
3450 	if (QDF_IS_STATUS_ERROR(status)) {
3451 		wmi_err("Failed to connect to WMI DIAG service status:%d",
3452 			 status);
3453 		return status;
3454 	}
3455 
3456 	if (wmi_handle->soc->is_async_ep)
3457 		htc_set_async_ep(htc_handle, response.Endpoint, true);
3458 
3459 	wmi_handle->soc->wmi_diag_endpoint_id = response.Endpoint;
3460 
3461 	return QDF_STATUS_SUCCESS;
3462 }
3463 #endif
3464 
3465 /**
3466  * wmi_get_host_credits() -  WMI API to get updated host_credits
3467  *
3468  * @wmi_handle: handle to WMI.
3469  *
3470  * @Return: updated host_credits.
3471  */
3472 int wmi_get_host_credits(wmi_unified_t wmi_handle)
3473 {
3474 	int host_credits = 0;
3475 
3476 	htc_get_control_endpoint_tx_host_credits(wmi_handle->htc_handle,
3477 						 &host_credits);
3478 	return host_credits;
3479 }
3480 
3481 /**
3482  * wmi_get_pending_cmds() - WMI API to get WMI Pending Commands in the HTC
3483  *                          queue
3484  *
3485  * @wmi_handle: handle to WMI.
3486  *
3487  * @Return: Pending Commands in the HTC queue.
3488  */
3489 int wmi_get_pending_cmds(wmi_unified_t wmi_handle)
3490 {
3491 	return qdf_atomic_read(&wmi_handle->pending_cmds);
3492 }
3493 
3494 /**
3495  * wmi_set_target_suspend() -  WMI API to set target suspend state
3496  *
3497  * @wmi_handle: handle to WMI.
3498  * @val: suspend state boolean.
3499  *
3500  * @Return: none.
3501  */
3502 void wmi_set_target_suspend(wmi_unified_t wmi_handle, A_BOOL val)
3503 {
3504 	qdf_atomic_set(&wmi_handle->is_target_suspended, val);
3505 }
3506 
3507 /**
3508  * wmi_is_target_suspended() - WMI API to check target suspend state
3509  * @wmi_handle: handle to WMI.
3510  *
3511  * WMI API to check target suspend state
3512  *
3513  * Return: true if target is suspended, else false.
3514  */
3515 bool wmi_is_target_suspended(struct wmi_unified *wmi_handle)
3516 {
3517 	return qdf_atomic_read(&wmi_handle->is_target_suspended);
3518 }
3519 qdf_export_symbol(wmi_is_target_suspended);
3520 
3521 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI
3522 void wmi_set_qmi_stats(wmi_unified_t wmi_handle, bool val)
3523 {
3524 	wmi_handle->is_qmi_stats_enabled = val;
3525 }
3526 
3527 bool wmi_is_qmi_stats_enabled(struct wmi_unified *wmi_handle)
3528 {
3529 	return wmi_handle->is_qmi_stats_enabled;
3530 }
3531 #endif
3532 
3533 /**
3534  * WMI API to set crash injection state
3535  * @param wmi_handle:	handle to WMI.
3536  * @param val:		crash injection state boolean.
3537  */
3538 void wmi_tag_crash_inject(wmi_unified_t wmi_handle, A_BOOL flag)
3539 {
3540 	wmi_handle->tag_crash_inject = flag;
3541 }
3542 
3543 /**
3544  * WMI API to set bus suspend state
3545  * @param wmi_handle:	handle to WMI.
3546  * @param val:		suspend state boolean.
3547  */
3548 void wmi_set_is_wow_bus_suspended(wmi_unified_t wmi_handle, A_BOOL val)
3549 {
3550 	qdf_atomic_set(&wmi_handle->is_wow_bus_suspended, val);
3551 }
3552 
3553 void wmi_set_tgt_assert(wmi_unified_t wmi_handle, bool val)
3554 {
3555 	wmi_handle->tgt_force_assert_enable = val;
3556 }
3557 
3558 /**
3559  * wmi_stop() - generic function to block unified WMI command
3560  * @wmi_handle: handle to WMI.
3561  *
3562  * @Return: success always.
3563  */
3564 int
3565 wmi_stop(wmi_unified_t wmi_handle)
3566 {
3567 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
3568 		  "WMI Stop");
3569 	wmi_handle->wmi_stopinprogress = 1;
3570 	return 0;
3571 }
3572 
3573 /**
3574  * wmi_start() - generic function to allow unified WMI command
3575  * @wmi_handle: handle to WMI.
3576  *
3577  * @Return: success always.
3578  */
3579 int
3580 wmi_start(wmi_unified_t wmi_handle)
3581 {
3582 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
3583 		  "WMI Start");
3584 	wmi_handle->wmi_stopinprogress = 0;
3585 	return 0;
3586 }
3587 
3588 /**
3589  * wmi_is_blocked() - generic function to check if WMI is blocked
3590  * @wmi_handle: handle to WMI.
3591  *
3592  * @Return: true, if blocked, false if not blocked
3593  */
3594 bool
3595 wmi_is_blocked(wmi_unified_t wmi_handle)
3596 {
3597 	return (!(!wmi_handle->wmi_stopinprogress));
3598 }
3599 
3600 /**
3601  * API to flush all the previous packets  associated with the wmi endpoint
3602  *
3603  * @param wmi_handle      : handle to WMI.
3604  */
3605 void
3606 wmi_flush_endpoint(wmi_unified_t wmi_handle)
3607 {
3608 	htc_flush_endpoint(wmi_handle->htc_handle,
3609 		wmi_handle->wmi_endpoint_id, 0);
3610 }
3611 qdf_export_symbol(wmi_flush_endpoint);
3612 
3613 /**
3614  * wmi_pdev_id_conversion_enable() - API to enable pdev_id/phy_id conversion
3615  *                     in WMI. By default pdev_id conversion is not done in WMI.
3616  *                     This API can be used enable conversion in WMI.
3617  * @param wmi_handle   : handle to WMI
3618  * @param pdev_map     : pointer to pdev_map
3619  * @size               : size of pdev_id_map
3620  * Return none
3621  */
3622 void wmi_pdev_id_conversion_enable(wmi_unified_t wmi_handle,
3623 				   uint32_t *pdev_id_map,
3624 				   uint8_t size)
3625 {
3626 	if (wmi_handle->target_type == WMI_TLV_TARGET)
3627 		wmi_handle->ops->wmi_pdev_id_conversion_enable(wmi_handle,
3628 							       pdev_id_map,
3629 							       size);
3630 }
3631