xref: /wlan-dirver/qca-wifi-host-cmn/wmi/src/wmi_unified.c (revision f7586e623efa756e484a12a6c70ae6864eb1c1a2)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * Host WMI unified implementation
22  */
23 #include "htc_api.h"
24 #include "htc_api.h"
25 #include "wmi_unified_priv.h"
26 #include "wmi_unified_api.h"
27 #include "qdf_module.h"
28 #include "qdf_platform.h"
29 #ifdef WMI_EXT_DBG
30 #include "qdf_list.h"
31 #include "qdf_atomic.h"
32 #endif
33 
34 #ifndef WMI_NON_TLV_SUPPORT
35 #include "wmi_tlv_helper.h"
36 #endif
37 
38 #include <linux/debugfs.h>
39 #include <target_if.h>
40 #include <qdf_debugfs.h>
41 #include "wmi_filtered_logging.h"
42 #include <wmi_hang_event.h>
43 
44 /* This check for CONFIG_WIN temporary added due to redeclaration compilation
45 error in MCL. Error is caused due to inclusion of wmi.h in wmi_unified_api.h
46 which gets included here through ol_if_athvar.h. Eventually it is expected that
47 wmi.h will be removed from wmi_unified_api.h after cleanup, which will need
48 WMI_CMD_HDR to be defined here. */
49 /* Copied from wmi.h */
50 #undef MS
51 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
52 #undef SM
53 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
54 #undef WO
55 #define WO(_f)      ((_f##_OFFSET) >> 2)
56 
57 #undef GET_FIELD
58 #define GET_FIELD(_addr, _f) MS(*((uint32_t *)(_addr) + WO(_f)), _f)
59 #undef SET_FIELD
60 #define SET_FIELD(_addr, _f, _val)  \
61 	    (*((uint32_t *)(_addr) + WO(_f)) = \
62 		(*((uint32_t *)(_addr) + WO(_f)) & ~_f##_MASK) | SM(_val, _f))
63 
64 #define WMI_GET_FIELD(_msg_buf, _msg_type, _f) \
65 	    GET_FIELD(_msg_buf, _msg_type ## _ ## _f)
66 
67 #define WMI_SET_FIELD(_msg_buf, _msg_type, _f, _val) \
68 	    SET_FIELD(_msg_buf, _msg_type ## _ ## _f, _val)
69 
70 #define WMI_EP_APASS           0x0
71 #define WMI_EP_LPASS           0x1
72 #define WMI_EP_SENSOR          0x2
73 
74 #define WMI_INFOS_DBG_FILE_PERM (QDF_FILE_USR_READ | \
75 				 QDF_FILE_USR_WRITE | \
76 				 QDF_FILE_GRP_READ | \
77 				 QDF_FILE_OTH_READ)
78 
79 /*
80  *  * Control Path
81  *   */
82 typedef PREPACK struct {
83 	uint32_t	commandId:24,
84 			reserved:2, /* used for WMI endpoint ID */
85 			plt_priv:6; /* platform private */
86 } POSTPACK WMI_CMD_HDR;        /* used for commands and events */
87 
88 #define WMI_CMD_HDR_COMMANDID_LSB           0
89 #define WMI_CMD_HDR_COMMANDID_MASK          0x00ffffff
90 #define WMI_CMD_HDR_COMMANDID_OFFSET        0x00000000
91 #define WMI_CMD_HDR_WMI_ENDPOINTID_MASK        0x03000000
92 #define WMI_CMD_HDR_WMI_ENDPOINTID_OFFSET      24
93 #define WMI_CMD_HDR_PLT_PRIV_LSB               24
94 #define WMI_CMD_HDR_PLT_PRIV_MASK              0xff000000
95 #define WMI_CMD_HDR_PLT_PRIV_OFFSET            0x00000000
96 /* end of copy wmi.h */
97 
98 #define WMI_MIN_HEAD_ROOM 64
99 
100 /* WBUFF pool sizes for WMI */
101 /* Allocation of size 256 bytes */
102 #define WMI_WBUFF_POOL_0_SIZE 128
103 /* Allocation of size 512 bytes */
104 #define WMI_WBUFF_POOL_1_SIZE 16
105 /* Allocation of size 1024 bytes */
106 #define WMI_WBUFF_POOL_2_SIZE 8
107 /* Allocation of size 2048 bytes */
108 #define WMI_WBUFF_POOL_3_SIZE 8
109 
110 #define RX_DIAG_EVENT_WORK_PROCESS_MAX_COUNT 500
111 
112 #ifdef WMI_INTERFACE_EVENT_LOGGING
113 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0))
114 /* TODO Cleanup this backported function */
115 static int wmi_bp_seq_printf(qdf_debugfs_file_t m, const char *f, ...)
116 {
117 	va_list args;
118 
119 	va_start(args, f);
120 	seq_vprintf(m, f, args);
121 	va_end(args);
122 
123 	return 0;
124 }
125 #else
126 #define wmi_bp_seq_printf(m, fmt, ...) seq_printf((m), fmt, ##__VA_ARGS__)
127 #endif
128 
129 #ifndef MAX_WMI_INSTANCES
130 #define CUSTOM_MGMT_CMD_DATA_SIZE 4
131 #endif
132 
133 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
134 /* WMI commands */
135 uint32_t g_wmi_command_buf_idx = 0;
136 struct wmi_command_debug wmi_command_log_buffer[WMI_CMD_DEBUG_MAX_ENTRY];
137 
138 /* WMI commands TX completed */
139 uint32_t g_wmi_command_tx_cmp_buf_idx = 0;
140 struct wmi_command_cmp_debug
141 	wmi_command_tx_cmp_log_buffer[WMI_CMD_CMPL_DEBUG_MAX_ENTRY];
142 
143 /* WMI events when processed */
144 uint32_t g_wmi_event_buf_idx = 0;
145 struct wmi_event_debug wmi_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
146 
147 /* WMI events when queued */
148 uint32_t g_wmi_rx_event_buf_idx = 0;
149 struct wmi_event_debug wmi_rx_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
150 #endif
151 
152 static void wmi_minidump_detach(struct wmi_unified *wmi_handle)
153 {
154 	struct wmi_log_buf_t *info =
155 		&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
156 	uint32_t buf_size = info->size * sizeof(struct wmi_command_cmp_debug);
157 
158 	qdf_minidump_remove(info->buf, buf_size, "wmi_tx_cmp");
159 }
160 
161 static void wmi_minidump_attach(struct wmi_unified *wmi_handle)
162 {
163 	struct wmi_log_buf_t *info =
164 		&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
165 	uint32_t buf_size = info->size * sizeof(struct wmi_command_cmp_debug);
166 
167 	qdf_minidump_log(info->buf, buf_size, "wmi_tx_cmp");
168 }
169 
170 #define WMI_COMMAND_RECORD(h, a, b) {					\
171 	if (wmi_cmd_log_max_entry <=					\
172 		*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))	\
173 		*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx) = 0;\
174 	((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\
175 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)]\
176 						.command = a;		\
177 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.		\
178 				wmi_command_log_buf_info.buf)		\
179 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].data,\
180 			b, wmi_record_max_length);			\
181 	((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\
182 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].\
183 		time = qdf_get_log_timestamp();			\
184 	(*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))++;	\
185 	h->log_info.wmi_command_log_buf_info.length++;			\
186 }
187 
188 #define WMI_COMMAND_TX_CMP_RECORD(h, a, b, da, pa) {			\
189 	if (wmi_cmd_cmpl_log_max_entry <=				\
190 		*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))\
191 		*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
192 				p_buf_tail_idx) = 0;			\
193 	((struct wmi_command_cmp_debug *)h->log_info.			\
194 		wmi_command_tx_cmp_log_buf_info.buf)			\
195 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
196 				p_buf_tail_idx)].			\
197 							command	= a;	\
198 	qdf_mem_copy(((struct wmi_command_cmp_debug *)h->log_info.	\
199 				wmi_command_tx_cmp_log_buf_info.buf)	\
200 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
201 			p_buf_tail_idx)].				\
202 		data, b, wmi_record_max_length);			\
203 	((struct wmi_command_cmp_debug *)h->log_info.			\
204 		wmi_command_tx_cmp_log_buf_info.buf)			\
205 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
206 				p_buf_tail_idx)].			\
207 		time = qdf_get_log_timestamp();				\
208 	((struct wmi_command_cmp_debug *)h->log_info.			\
209 		wmi_command_tx_cmp_log_buf_info.buf)			\
210 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
211 				p_buf_tail_idx)].			\
212 		dma_addr = da;						\
213 	((struct wmi_command_cmp_debug *)h->log_info.			\
214 		wmi_command_tx_cmp_log_buf_info.buf)			\
215 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
216 				p_buf_tail_idx)].			\
217 		phy_addr = pa;						\
218 	(*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))++;\
219 	h->log_info.wmi_command_tx_cmp_log_buf_info.length++;		\
220 }
221 
222 #define WMI_EVENT_RECORD(h, a, b) {					\
223 	if (wmi_event_log_max_entry <=					\
224 		*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))	\
225 		*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx) = 0;\
226 	((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\
227 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].	\
228 		event = a;						\
229 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
230 				wmi_event_log_buf_info.buf)		\
231 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].data, b,\
232 		wmi_record_max_length);					\
233 	((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\
234 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].time =\
235 		qdf_get_log_timestamp();				\
236 	(*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))++;	\
237 	h->log_info.wmi_event_log_buf_info.length++;			\
238 }
239 
240 #define WMI_RX_EVENT_RECORD(h, a, b) {					\
241 	if (wmi_event_log_max_entry <=					\
242 		*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))\
243 		*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx) = 0;\
244 	((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\
245 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
246 		event = a;						\
247 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
248 				wmi_rx_event_log_buf_info.buf)		\
249 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
250 			data, b, wmi_record_max_length);		\
251 	((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\
252 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
253 		time =	qdf_get_log_timestamp();			\
254 	(*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))++;	\
255 	h->log_info.wmi_rx_event_log_buf_info.length++;			\
256 }
257 
258 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
259 uint32_t g_wmi_mgmt_command_buf_idx = 0;
260 struct
261 wmi_command_debug wmi_mgmt_command_log_buffer[WMI_MGMT_TX_DEBUG_MAX_ENTRY];
262 
263 /* wmi_mgmt commands TX completed */
264 uint32_t g_wmi_mgmt_command_tx_cmp_buf_idx = 0;
265 struct wmi_command_debug
266 wmi_mgmt_command_tx_cmp_log_buffer[WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY];
267 
268 /* wmi_mgmt events when received */
269 uint32_t g_wmi_mgmt_rx_event_buf_idx = 0;
270 struct wmi_event_debug
271 wmi_mgmt_rx_event_log_buffer[WMI_MGMT_RX_DEBUG_MAX_ENTRY];
272 
273 /* wmi_diag events when received */
274 uint32_t g_wmi_diag_rx_event_buf_idx = 0;
275 struct wmi_event_debug
276 wmi_diag_rx_event_log_buffer[WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY];
277 #endif
278 
279 #define WMI_MGMT_COMMAND_RECORD(h, a, b) {                              \
280 	if (wmi_mgmt_tx_log_max_entry <=                                   \
281 		*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)) \
282 		*(h->log_info.wmi_mgmt_command_log_buf_info.		\
283 				p_buf_tail_idx) = 0;			\
284 	((struct wmi_command_debug *)h->log_info.                       \
285 		 wmi_mgmt_command_log_buf_info.buf)                     \
286 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
287 			command = a;                                    \
288 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.          \
289 				wmi_mgmt_command_log_buf_info.buf)      \
290 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
291 		data, b,                                                \
292 		wmi_record_max_length);                                	\
293 	((struct wmi_command_debug *)h->log_info.                       \
294 		 wmi_mgmt_command_log_buf_info.buf)                     \
295 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
296 			time =        qdf_get_log_timestamp();          \
297 	(*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx))++;\
298 	h->log_info.wmi_mgmt_command_log_buf_info.length++;             \
299 }
300 
301 #define WMI_MGMT_COMMAND_TX_CMP_RECORD(h, a, b) {			\
302 	if (wmi_mgmt_tx_cmpl_log_max_entry <=				\
303 		*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
304 			p_buf_tail_idx))				\
305 		*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
306 			p_buf_tail_idx) = 0;				\
307 	((struct wmi_command_debug *)h->log_info.			\
308 			wmi_mgmt_command_tx_cmp_log_buf_info.buf)	\
309 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
310 				p_buf_tail_idx)].command = a;		\
311 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.		\
312 				wmi_mgmt_command_tx_cmp_log_buf_info.buf)\
313 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
314 			p_buf_tail_idx)].data, b,			\
315 			wmi_record_max_length);				\
316 	((struct wmi_command_debug *)h->log_info.			\
317 			wmi_mgmt_command_tx_cmp_log_buf_info.buf)	\
318 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
319 				p_buf_tail_idx)].time =			\
320 		qdf_get_log_timestamp();				\
321 	(*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.		\
322 			p_buf_tail_idx))++;				\
323 	h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.length++;	\
324 }
325 
326 #define WMI_MGMT_RX_EVENT_RECORD(h, a, b) do {				\
327 	if (wmi_mgmt_rx_log_max_entry <=				\
328 		*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))\
329 		*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx) = 0;\
330 	((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\
331 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)]\
332 					.event = a;			\
333 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
334 				wmi_mgmt_event_log_buf_info.buf)	\
335 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\
336 			data, b, wmi_record_max_length);		\
337 	((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\
338 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\
339 			time = qdf_get_log_timestamp();			\
340 	(*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))++;	\
341 	h->log_info.wmi_mgmt_event_log_buf_info.length++;		\
342 } while (0);
343 
344 #define WMI_DIAG_RX_EVENT_RECORD(h, a, b) do {                             \
345 	if (wmi_diag_log_max_entry <=                                   \
346 		*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))\
347 		*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx) = 0;\
348 	((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\
349 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)]\
350 					.event = a;                     \
351 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.            \
352 				wmi_diag_event_log_buf_info.buf)        \
353 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\
354 			data, b, wmi_record_max_length);                \
355 	((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\
356 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\
357 			time = qdf_get_log_timestamp();                 \
358 	(*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))++;  \
359 	h->log_info.wmi_diag_event_log_buf_info.length++;               \
360 } while (0);
361 
362 /* These are defined to made it as module param, which can be configured */
363 /* WMI Commands */
364 uint32_t wmi_cmd_log_max_entry = WMI_CMD_DEBUG_MAX_ENTRY;
365 uint32_t wmi_cmd_cmpl_log_max_entry = WMI_CMD_CMPL_DEBUG_MAX_ENTRY;
366 /* WMI Events */
367 uint32_t wmi_event_log_max_entry = WMI_EVENT_DEBUG_MAX_ENTRY;
368 /* WMI MGMT Tx */
369 uint32_t wmi_mgmt_tx_log_max_entry = WMI_MGMT_TX_DEBUG_MAX_ENTRY;
370 uint32_t wmi_mgmt_tx_cmpl_log_max_entry = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY;
371 /* WMI MGMT Rx */
372 uint32_t wmi_mgmt_rx_log_max_entry = WMI_MGMT_RX_DEBUG_MAX_ENTRY;
373 /* WMI Diag Event */
374 uint32_t wmi_diag_log_max_entry = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY;
375 /* WMI capture size */
376 uint32_t wmi_record_max_length = WMI_DEBUG_ENTRY_MAX_LENGTH;
377 uint32_t wmi_display_size = 100;
378 
379 /**
380  * wmi_log_init() - Initialize WMI event logging
381  * @wmi_handle: WMI handle.
382  *
383  * Return: Initialization status
384  */
385 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
386 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
387 {
388 	struct wmi_log_buf_t *cmd_log_buf =
389 			&wmi_handle->log_info.wmi_command_log_buf_info;
390 	struct wmi_log_buf_t *cmd_tx_cmpl_log_buf =
391 			&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
392 
393 	struct wmi_log_buf_t *event_log_buf =
394 			&wmi_handle->log_info.wmi_event_log_buf_info;
395 	struct wmi_log_buf_t *rx_event_log_buf =
396 			&wmi_handle->log_info.wmi_rx_event_log_buf_info;
397 
398 	struct wmi_log_buf_t *mgmt_cmd_log_buf =
399 			&wmi_handle->log_info.wmi_mgmt_command_log_buf_info;
400 	struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf =
401 		&wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info;
402 	struct wmi_log_buf_t *mgmt_event_log_buf =
403 			&wmi_handle->log_info.wmi_mgmt_event_log_buf_info;
404 	struct wmi_log_buf_t *diag_event_log_buf =
405 			&wmi_handle->log_info.wmi_diag_event_log_buf_info;
406 
407 	/* WMI commands */
408 	cmd_log_buf->length = 0;
409 	cmd_log_buf->buf_tail_idx = 0;
410 	cmd_log_buf->buf = wmi_command_log_buffer;
411 	cmd_log_buf->p_buf_tail_idx = &g_wmi_command_buf_idx;
412 	cmd_log_buf->size = WMI_CMD_DEBUG_MAX_ENTRY;
413 
414 	/* WMI commands TX completed */
415 	cmd_tx_cmpl_log_buf->length = 0;
416 	cmd_tx_cmpl_log_buf->buf_tail_idx = 0;
417 	cmd_tx_cmpl_log_buf->buf = wmi_command_tx_cmp_log_buffer;
418 	cmd_tx_cmpl_log_buf->p_buf_tail_idx = &g_wmi_command_tx_cmp_buf_idx;
419 	cmd_tx_cmpl_log_buf->size = WMI_CMD_CMPL_DEBUG_MAX_ENTRY;
420 
421 	/* WMI events when processed */
422 	event_log_buf->length = 0;
423 	event_log_buf->buf_tail_idx = 0;
424 	event_log_buf->buf = wmi_event_log_buffer;
425 	event_log_buf->p_buf_tail_idx = &g_wmi_event_buf_idx;
426 	event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY;
427 
428 	/* WMI events when queued */
429 	rx_event_log_buf->length = 0;
430 	rx_event_log_buf->buf_tail_idx = 0;
431 	rx_event_log_buf->buf = wmi_rx_event_log_buffer;
432 	rx_event_log_buf->p_buf_tail_idx = &g_wmi_rx_event_buf_idx;
433 	rx_event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY;
434 
435 	/* WMI Management commands */
436 	mgmt_cmd_log_buf->length = 0;
437 	mgmt_cmd_log_buf->buf_tail_idx = 0;
438 	mgmt_cmd_log_buf->buf = wmi_mgmt_command_log_buffer;
439 	mgmt_cmd_log_buf->p_buf_tail_idx = &g_wmi_mgmt_command_buf_idx;
440 	mgmt_cmd_log_buf->size = WMI_MGMT_TX_DEBUG_MAX_ENTRY;
441 
442 	/* WMI Management commands Tx completed*/
443 	mgmt_cmd_tx_cmp_log_buf->length = 0;
444 	mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0;
445 	mgmt_cmd_tx_cmp_log_buf->buf = wmi_mgmt_command_tx_cmp_log_buffer;
446 	mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx =
447 		&g_wmi_mgmt_command_tx_cmp_buf_idx;
448 	mgmt_cmd_tx_cmp_log_buf->size = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY;
449 
450 	/* WMI Management events when received */
451 	mgmt_event_log_buf->length = 0;
452 	mgmt_event_log_buf->buf_tail_idx = 0;
453 	mgmt_event_log_buf->buf = wmi_mgmt_rx_event_log_buffer;
454 	mgmt_event_log_buf->p_buf_tail_idx = &g_wmi_mgmt_rx_event_buf_idx;
455 	mgmt_event_log_buf->size = WMI_MGMT_RX_DEBUG_MAX_ENTRY;
456 
457 	/* WMI diag events when received */
458 	diag_event_log_buf->length = 0;
459 	diag_event_log_buf->buf_tail_idx = 0;
460 	diag_event_log_buf->buf = wmi_diag_rx_event_log_buffer;
461 	diag_event_log_buf->p_buf_tail_idx = &g_wmi_diag_rx_event_buf_idx;
462 	diag_event_log_buf->size = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY;
463 
464 	qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
465 	wmi_handle->log_info.wmi_logging_enable = 1;
466 
467 	return QDF_STATUS_SUCCESS;
468 }
469 #else
470 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
471 {
472 	struct wmi_log_buf_t *cmd_log_buf =
473 			&wmi_handle->log_info.wmi_command_log_buf_info;
474 	struct wmi_log_buf_t *cmd_tx_cmpl_log_buf =
475 			&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
476 
477 	struct wmi_log_buf_t *event_log_buf =
478 			&wmi_handle->log_info.wmi_event_log_buf_info;
479 	struct wmi_log_buf_t *rx_event_log_buf =
480 			&wmi_handle->log_info.wmi_rx_event_log_buf_info;
481 
482 	struct wmi_log_buf_t *mgmt_cmd_log_buf =
483 			&wmi_handle->log_info.wmi_mgmt_command_log_buf_info;
484 	struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf =
485 		&wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info;
486 	struct wmi_log_buf_t *mgmt_event_log_buf =
487 			&wmi_handle->log_info.wmi_mgmt_event_log_buf_info;
488 	struct wmi_log_buf_t *diag_event_log_buf =
489 			&wmi_handle->log_info.wmi_diag_event_log_buf_info;
490 
491 	wmi_handle->log_info.wmi_logging_enable = 0;
492 
493 	/* WMI commands */
494 	cmd_log_buf->length = 0;
495 	cmd_log_buf->buf_tail_idx = 0;
496 	cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc(
497 		wmi_cmd_log_max_entry * sizeof(struct wmi_command_debug));
498 	cmd_log_buf->size = wmi_cmd_log_max_entry;
499 
500 	if (!cmd_log_buf->buf)
501 		return QDF_STATUS_E_NOMEM;
502 
503 	cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx;
504 
505 	/* WMI commands TX completed */
506 	cmd_tx_cmpl_log_buf->length = 0;
507 	cmd_tx_cmpl_log_buf->buf_tail_idx = 0;
508 	cmd_tx_cmpl_log_buf->buf = (struct wmi_command_cmp_debug *) qdf_mem_malloc(
509 		wmi_cmd_cmpl_log_max_entry * sizeof(struct wmi_command_cmp_debug));
510 	cmd_tx_cmpl_log_buf->size = wmi_cmd_cmpl_log_max_entry;
511 
512 	if (!cmd_tx_cmpl_log_buf->buf)
513 		return QDF_STATUS_E_NOMEM;
514 
515 	cmd_tx_cmpl_log_buf->p_buf_tail_idx =
516 		&cmd_tx_cmpl_log_buf->buf_tail_idx;
517 
518 	/* WMI events when processed */
519 	event_log_buf->length = 0;
520 	event_log_buf->buf_tail_idx = 0;
521 	event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
522 		wmi_event_log_max_entry * sizeof(struct wmi_event_debug));
523 	event_log_buf->size = wmi_event_log_max_entry;
524 
525 	if (!event_log_buf->buf)
526 		return QDF_STATUS_E_NOMEM;
527 
528 	event_log_buf->p_buf_tail_idx = &event_log_buf->buf_tail_idx;
529 
530 	/* WMI events when queued */
531 	rx_event_log_buf->length = 0;
532 	rx_event_log_buf->buf_tail_idx = 0;
533 	rx_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
534 		wmi_event_log_max_entry * sizeof(struct wmi_event_debug));
535 	rx_event_log_buf->size = wmi_event_log_max_entry;
536 
537 	if (!rx_event_log_buf->buf)
538 		return QDF_STATUS_E_NOMEM;
539 
540 	rx_event_log_buf->p_buf_tail_idx = &rx_event_log_buf->buf_tail_idx;
541 
542 	/* WMI Management commands */
543 	mgmt_cmd_log_buf->length = 0;
544 	mgmt_cmd_log_buf->buf_tail_idx = 0;
545 	mgmt_cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc(
546 		wmi_mgmt_tx_log_max_entry * sizeof(struct wmi_command_debug));
547 	mgmt_cmd_log_buf->size = wmi_mgmt_tx_log_max_entry;
548 
549 	if (!mgmt_cmd_log_buf->buf)
550 		return QDF_STATUS_E_NOMEM;
551 
552 	mgmt_cmd_log_buf->p_buf_tail_idx = &mgmt_cmd_log_buf->buf_tail_idx;
553 
554 	/* WMI Management commands Tx completed*/
555 	mgmt_cmd_tx_cmp_log_buf->length = 0;
556 	mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0;
557 	mgmt_cmd_tx_cmp_log_buf->buf = (struct wmi_command_debug *)
558 		qdf_mem_malloc(
559 		wmi_mgmt_tx_cmpl_log_max_entry *
560 		sizeof(struct wmi_command_debug));
561 	mgmt_cmd_tx_cmp_log_buf->size = wmi_mgmt_tx_cmpl_log_max_entry;
562 
563 	if (!mgmt_cmd_tx_cmp_log_buf->buf)
564 		return QDF_STATUS_E_NOMEM;
565 
566 	mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx =
567 		&mgmt_cmd_tx_cmp_log_buf->buf_tail_idx;
568 
569 	/* WMI Management events when received */
570 	mgmt_event_log_buf->length = 0;
571 	mgmt_event_log_buf->buf_tail_idx = 0;
572 
573 	mgmt_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
574 		wmi_mgmt_rx_log_max_entry *
575 		sizeof(struct wmi_event_debug));
576 	mgmt_event_log_buf->size = wmi_mgmt_rx_log_max_entry;
577 
578 	if (!mgmt_event_log_buf->buf)
579 		return QDF_STATUS_E_NOMEM;
580 
581 	mgmt_event_log_buf->p_buf_tail_idx = &mgmt_event_log_buf->buf_tail_idx;
582 
583 	/* WMI diag events when received */
584 	diag_event_log_buf->length = 0;
585 	diag_event_log_buf->buf_tail_idx = 0;
586 
587 	diag_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
588 		wmi_diag_log_max_entry *
589 		sizeof(struct wmi_event_debug));
590 	diag_event_log_buf->size = wmi_diag_log_max_entry;
591 
592 	if (!diag_event_log_buf->buf)
593 		return QDF_STATUS_E_NOMEM;
594 
595 	diag_event_log_buf->p_buf_tail_idx = &diag_event_log_buf->buf_tail_idx;
596 
597 	qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
598 	wmi_handle->log_info.wmi_logging_enable = 1;
599 
600 	wmi_filtered_logging_init(wmi_handle);
601 
602 	return QDF_STATUS_SUCCESS;
603 }
604 #endif
605 
606 /**
607  * wmi_log_buffer_free() - Free all dynamic allocated buffer memory for
608  * event logging
609  * @wmi_handle: WMI handle.
610  *
611  * Return: None
612  */
613 #ifdef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
614 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
615 {
616 	wmi_filtered_logging_free(wmi_handle);
617 
618 	if (wmi_handle->log_info.wmi_command_log_buf_info.buf)
619 		qdf_mem_free(wmi_handle->log_info.wmi_command_log_buf_info.buf);
620 	if (wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf)
621 		qdf_mem_free(
622 		wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf);
623 	if (wmi_handle->log_info.wmi_event_log_buf_info.buf)
624 		qdf_mem_free(wmi_handle->log_info.wmi_event_log_buf_info.buf);
625 	if (wmi_handle->log_info.wmi_rx_event_log_buf_info.buf)
626 		qdf_mem_free(
627 			wmi_handle->log_info.wmi_rx_event_log_buf_info.buf);
628 	if (wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf)
629 		qdf_mem_free(
630 			wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf);
631 	if (wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf)
632 		qdf_mem_free(
633 		wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf);
634 	if (wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf)
635 		qdf_mem_free(
636 			wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf);
637 	if (wmi_handle->log_info.wmi_diag_event_log_buf_info.buf)
638 		qdf_mem_free(
639 			wmi_handle->log_info.wmi_diag_event_log_buf_info.buf);
640 	wmi_handle->log_info.wmi_logging_enable = 0;
641 
642 	qdf_spinlock_destroy(&wmi_handle->log_info.wmi_record_lock);
643 }
644 #else
645 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
646 {
647 	/* Do Nothing */
648 }
649 #endif
650 
651 /**
652  * wmi_print_cmd_log_buffer() - an output agnostic wmi command log printer
653  * @log_buffer: the command log buffer metadata of the buffer to print
654  * @count: the maximum number of entries to print
655  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
656  * @print_priv: any data required by the print method, e.g. a file handle
657  *
658  * Return: None
659  */
660 static void
661 wmi_print_cmd_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
662 			 qdf_abstract_print *print, void *print_priv)
663 {
664 	static const int data_len =
665 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
666 	char str[128];
667 	uint32_t idx;
668 
669 	if (count > log_buffer->size)
670 		count = log_buffer->size;
671 	if (count > log_buffer->length)
672 		count = log_buffer->length;
673 
674 	/* subtract count from index, and wrap if necessary */
675 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
676 	idx %= log_buffer->size;
677 
678 	print(print_priv, "Time (seconds)      Cmd Id              Payload");
679 	while (count) {
680 		struct wmi_command_debug *cmd_log = (struct wmi_command_debug *)
681 			&((struct wmi_command_debug *)log_buffer->buf)[idx];
682 		uint64_t secs, usecs;
683 		int len = 0;
684 		int i;
685 
686 		qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
687 		len += scnprintf(str + len, sizeof(str) - len,
688 				 "% 8lld.%06lld    %6u (0x%06x)    ",
689 				 secs, usecs,
690 				 cmd_log->command, cmd_log->command);
691 		for (i = 0; i < data_len; ++i) {
692 			len += scnprintf(str + len, sizeof(str) - len,
693 					 "0x%08x ", cmd_log->data[i]);
694 		}
695 
696 		print(print_priv, str);
697 
698 		--count;
699 		++idx;
700 		if (idx >= log_buffer->size)
701 			idx = 0;
702 	}
703 }
704 
705 /**
706  * wmi_print_cmd_cmp_log_buffer() - wmi command completion log printer
707  * @log_buffer: the command completion log buffer metadata of the buffer to print
708  * @count: the maximum number of entries to print
709  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
710  * @print_priv: any data required by the print method, e.g. a file handle
711  *
712  * Return: None
713  */
714 static void
715 wmi_print_cmd_cmp_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
716 			 qdf_abstract_print *print, void *print_priv)
717 {
718 	static const int data_len =
719 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
720 	char str[128];
721 	uint32_t idx;
722 
723 	if (count > log_buffer->size)
724 		count = log_buffer->size;
725 	if (count > log_buffer->length)
726 		count = log_buffer->length;
727 
728 	/* subtract count from index, and wrap if necessary */
729 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
730 	idx %= log_buffer->size;
731 
732 	print(print_priv, "Time (seconds)      Cmd Id              Payload");
733 	while (count) {
734 		struct wmi_command_cmp_debug *cmd_log = (struct wmi_command_cmp_debug *)
735 			&((struct wmi_command_cmp_debug *)log_buffer->buf)[idx];
736 		uint64_t secs, usecs;
737 		int len = 0;
738 		int i;
739 
740 		qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
741 		len += scnprintf(str + len, sizeof(str) - len,
742 				 "% 8lld.%06lld    %6u (0x%06x)    ",
743 				 secs, usecs,
744 				 cmd_log->command, cmd_log->command);
745 		for (i = 0; i < data_len; ++i) {
746 			len += scnprintf(str + len, sizeof(str) - len,
747 					 "0x%08x ", cmd_log->data[i]);
748 		}
749 
750 		print(print_priv, str);
751 
752 		--count;
753 		++idx;
754 		if (idx >= log_buffer->size)
755 			idx = 0;
756 	}
757 }
758 
759 /**
760  * wmi_print_event_log_buffer() - an output agnostic wmi event log printer
761  * @log_buffer: the event log buffer metadata of the buffer to print
762  * @count: the maximum number of entries to print
763  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
764  * @print_priv: any data required by the print method, e.g. a file handle
765  *
766  * Return: None
767  */
768 static void
769 wmi_print_event_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
770 			   qdf_abstract_print *print, void *print_priv)
771 {
772 	static const int data_len =
773 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
774 	char str[128];
775 	uint32_t idx;
776 
777 	if (count > log_buffer->size)
778 		count = log_buffer->size;
779 	if (count > log_buffer->length)
780 		count = log_buffer->length;
781 
782 	/* subtract count from index, and wrap if necessary */
783 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
784 	idx %= log_buffer->size;
785 
786 	print(print_priv, "Time (seconds)      Event Id             Payload");
787 	while (count) {
788 		struct wmi_event_debug *event_log = (struct wmi_event_debug *)
789 			&((struct wmi_event_debug *)log_buffer->buf)[idx];
790 		uint64_t secs, usecs;
791 		int len = 0;
792 		int i;
793 
794 		qdf_log_timestamp_to_secs(event_log->time, &secs, &usecs);
795 		len += scnprintf(str + len, sizeof(str) - len,
796 				 "% 8lld.%06lld    %6u (0x%06x)    ",
797 				 secs, usecs,
798 				 event_log->event, event_log->event);
799 		for (i = 0; i < data_len; ++i) {
800 			len += scnprintf(str + len, sizeof(str) - len,
801 					 "0x%08x ", event_log->data[i]);
802 		}
803 
804 		print(print_priv, str);
805 
806 		--count;
807 		++idx;
808 		if (idx >= log_buffer->size)
809 			idx = 0;
810 	}
811 }
812 
813 inline void
814 wmi_print_cmd_log(wmi_unified_t wmi, uint32_t count,
815 		  qdf_abstract_print *print, void *print_priv)
816 {
817 	wmi_print_cmd_log_buffer(
818 		&wmi->log_info.wmi_command_log_buf_info,
819 		count, print, print_priv);
820 }
821 
822 inline void
823 wmi_print_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count,
824 			 qdf_abstract_print *print, void *print_priv)
825 {
826 	wmi_print_cmd_cmp_log_buffer(
827 		&wmi->log_info.wmi_command_tx_cmp_log_buf_info,
828 		count, print, print_priv);
829 }
830 
831 inline void
832 wmi_print_mgmt_cmd_log(wmi_unified_t wmi, uint32_t count,
833 		       qdf_abstract_print *print, void *print_priv)
834 {
835 	wmi_print_cmd_log_buffer(
836 		&wmi->log_info.wmi_mgmt_command_log_buf_info,
837 		count, print, print_priv);
838 }
839 
840 inline void
841 wmi_print_mgmt_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count,
842 			      qdf_abstract_print *print, void *print_priv)
843 {
844 	wmi_print_cmd_log_buffer(
845 		&wmi->log_info.wmi_mgmt_command_tx_cmp_log_buf_info,
846 		count, print, print_priv);
847 }
848 
849 inline void
850 wmi_print_event_log(wmi_unified_t wmi, uint32_t count,
851 		    qdf_abstract_print *print, void *print_priv)
852 {
853 	wmi_print_event_log_buffer(
854 		&wmi->log_info.wmi_event_log_buf_info,
855 		count, print, print_priv);
856 }
857 
858 inline void
859 wmi_print_rx_event_log(wmi_unified_t wmi, uint32_t count,
860 		       qdf_abstract_print *print, void *print_priv)
861 {
862 	wmi_print_event_log_buffer(
863 		&wmi->log_info.wmi_rx_event_log_buf_info,
864 		count, print, print_priv);
865 }
866 
867 inline void
868 wmi_print_mgmt_event_log(wmi_unified_t wmi, uint32_t count,
869 			 qdf_abstract_print *print, void *print_priv)
870 {
871 	wmi_print_event_log_buffer(
872 		&wmi->log_info.wmi_mgmt_event_log_buf_info,
873 		count, print, print_priv);
874 }
875 
876 
877 /* debugfs routines*/
878 
879 /**
880  * debug_wmi_##func_base##_show() - debugfs functions to display content of
881  * command and event buffers. Macro uses max buffer length to display
882  * buffer when it is wraparound.
883  *
884  * @m: debugfs handler to access wmi_handle
885  * @v: Variable arguments (not used)
886  *
887  * Return: Length of characters printed
888  */
889 #define GENERATE_COMMAND_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size, wmi_record_type)\
890 	static int debug_wmi_##func_base##_show(struct seq_file *m,	\
891 						void *v)		\
892 	{								\
893 		wmi_unified_t wmi_handle = (wmi_unified_t) m->private;	\
894 		struct wmi_log_buf_t *wmi_log =				\
895 			&wmi_handle->log_info.wmi_##func_base##_buf_info;\
896 		int pos, nread, outlen;					\
897 		int i;							\
898 		uint64_t secs, usecs;					\
899 									\
900 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
901 		if (!wmi_log->length) {					\
902 			qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
903 			return wmi_bp_seq_printf(m,			\
904 			"no elements to read from ring buffer!\n");	\
905 		}							\
906 									\
907 		if (wmi_log->length <= wmi_ring_size)			\
908 			nread = wmi_log->length;			\
909 		else							\
910 			nread = wmi_ring_size;				\
911 									\
912 		if (*(wmi_log->p_buf_tail_idx) == 0)			\
913 			/* tail can be 0 after wrap-around */		\
914 			pos = wmi_ring_size - 1;			\
915 		else							\
916 			pos = *(wmi_log->p_buf_tail_idx) - 1;		\
917 									\
918 		outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\
919 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
920 		while (nread--) {					\
921 			struct wmi_record_type *wmi_record;		\
922 									\
923 			wmi_record = (struct wmi_record_type *)	\
924 			&(((struct wmi_record_type *)wmi_log->buf)[pos]);\
925 			outlen += wmi_bp_seq_printf(m, "CMD ID = %x\n",	\
926 				(wmi_record->command));			\
927 			qdf_log_timestamp_to_secs(wmi_record->time, &secs,\
928 				&usecs);				\
929 			outlen +=					\
930 			wmi_bp_seq_printf(m, "CMD TIME = [%llu.%06llu]\n",\
931 				secs, usecs);				\
932 			outlen += wmi_bp_seq_printf(m, "CMD = ");	\
933 			for (i = 0; i < (wmi_record_max_length/		\
934 					sizeof(uint32_t)); i++)		\
935 				outlen += wmi_bp_seq_printf(m, "%x ",	\
936 					wmi_record->data[i]);		\
937 			outlen += wmi_bp_seq_printf(m, "\n");		\
938 									\
939 			if (pos == 0)					\
940 				pos = wmi_ring_size - 1;		\
941 			else						\
942 				pos--;					\
943 		}							\
944 		return outlen;						\
945 	}								\
946 
947 #define GENERATE_EVENT_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size)	\
948 	static int debug_wmi_##func_base##_show(struct seq_file *m,	\
949 						void *v)		\
950 	{								\
951 		wmi_unified_t wmi_handle = (wmi_unified_t) m->private;	\
952 		struct wmi_log_buf_t *wmi_log =				\
953 			&wmi_handle->log_info.wmi_##func_base##_buf_info;\
954 		int pos, nread, outlen;					\
955 		int i;							\
956 		uint64_t secs, usecs;					\
957 									\
958 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
959 		if (!wmi_log->length) {					\
960 			qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
961 			return wmi_bp_seq_printf(m,			\
962 			"no elements to read from ring buffer!\n");	\
963 		}							\
964 									\
965 		if (wmi_log->length <= wmi_ring_size)			\
966 			nread = wmi_log->length;			\
967 		else							\
968 			nread = wmi_ring_size;				\
969 									\
970 		if (*(wmi_log->p_buf_tail_idx) == 0)			\
971 			/* tail can be 0 after wrap-around */		\
972 			pos = wmi_ring_size - 1;			\
973 		else							\
974 			pos = *(wmi_log->p_buf_tail_idx) - 1;		\
975 									\
976 		outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\
977 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
978 		while (nread--) {					\
979 			struct wmi_event_debug *wmi_record;		\
980 									\
981 			wmi_record = (struct wmi_event_debug *)		\
982 			&(((struct wmi_event_debug *)wmi_log->buf)[pos]);\
983 			qdf_log_timestamp_to_secs(wmi_record->time, &secs,\
984 				&usecs);				\
985 			outlen += wmi_bp_seq_printf(m, "Event ID = %x\n",\
986 				(wmi_record->event));			\
987 			outlen +=					\
988 			wmi_bp_seq_printf(m, "Event TIME = [%llu.%06llu]\n",\
989 				secs, usecs);				\
990 			outlen += wmi_bp_seq_printf(m, "CMD = ");	\
991 			for (i = 0; i < (wmi_record_max_length/		\
992 					sizeof(uint32_t)); i++)		\
993 				outlen += wmi_bp_seq_printf(m, "%x ",	\
994 					wmi_record->data[i]);		\
995 			outlen += wmi_bp_seq_printf(m, "\n");		\
996 									\
997 			if (pos == 0)					\
998 				pos = wmi_ring_size - 1;		\
999 			else						\
1000 				pos--;					\
1001 		}							\
1002 		return outlen;						\
1003 	}
1004 
1005 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_log, wmi_display_size,
1006 				  wmi_command_debug);
1007 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_tx_cmp_log, wmi_display_size,
1008 				  wmi_command_cmp_debug);
1009 GENERATE_EVENT_DEBUG_SHOW_FUNCS(event_log, wmi_display_size);
1010 GENERATE_EVENT_DEBUG_SHOW_FUNCS(rx_event_log, wmi_display_size);
1011 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_log, wmi_display_size,
1012 				  wmi_command_debug);
1013 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_tx_cmp_log,
1014 					wmi_display_size,
1015 					wmi_command_debug);
1016 GENERATE_EVENT_DEBUG_SHOW_FUNCS(mgmt_event_log, wmi_display_size);
1017 
1018 /**
1019  * debug_wmi_enable_show() - debugfs functions to display enable state of
1020  * wmi logging feature.
1021  *
1022  * @m: debugfs handler to access wmi_handle
1023  * @v: Variable arguments (not used)
1024  *
1025  * Return: always 1
1026  */
1027 static int debug_wmi_enable_show(struct seq_file *m, void *v)
1028 {
1029 	wmi_unified_t wmi_handle = (wmi_unified_t) m->private;
1030 
1031 	return wmi_bp_seq_printf(m, "%d\n",
1032 			wmi_handle->log_info.wmi_logging_enable);
1033 }
1034 
1035 /**
1036  * debug_wmi_log_size_show() - debugfs functions to display configured size of
1037  * wmi logging command/event buffer and management command/event buffer.
1038  *
1039  * @m: debugfs handler to access wmi_handle
1040  * @v: Variable arguments (not used)
1041  *
1042  * Return: Length of characters printed
1043  */
1044 static int debug_wmi_log_size_show(struct seq_file *m, void *v)
1045 {
1046 
1047 	wmi_bp_seq_printf(m, "WMI command/cmpl log max size:%d/%d\n",
1048 			  wmi_cmd_log_max_entry, wmi_cmd_cmpl_log_max_entry);
1049 	wmi_bp_seq_printf(m, "WMI management Tx/cmpl log max size:%d/%d\n",
1050 			  wmi_mgmt_tx_log_max_entry,
1051 			  wmi_mgmt_tx_cmpl_log_max_entry);
1052 	wmi_bp_seq_printf(m, "WMI event log max size:%d\n",
1053 			  wmi_event_log_max_entry);
1054 	wmi_bp_seq_printf(m, "WMI management Rx log max size:%d\n",
1055 			  wmi_mgmt_rx_log_max_entry);
1056 	return wmi_bp_seq_printf(m,
1057 				 "WMI diag log max size:%d\n",
1058 				 wmi_diag_log_max_entry);
1059 }
1060 
1061 /**
1062  * debug_wmi_##func_base##_write() - debugfs functions to clear
1063  * wmi logging command/event buffer and management command/event buffer.
1064  *
1065  * @file: file handler to access wmi_handle
1066  * @buf: received data buffer
1067  * @count: length of received buffer
1068  * @ppos: Not used
1069  *
1070  * Return: count
1071  */
1072 #define GENERATE_DEBUG_WRITE_FUNCS(func_base, wmi_ring_size, wmi_record_type)\
1073 	static ssize_t debug_wmi_##func_base##_write(struct file *file,	\
1074 				const char __user *buf,			\
1075 				size_t count, loff_t *ppos)		\
1076 	{								\
1077 		int k, ret;						\
1078 		wmi_unified_t wmi_handle =				\
1079 			((struct seq_file *)file->private_data)->private;\
1080 		struct wmi_log_buf_t *wmi_log = &wmi_handle->log_info.	\
1081 				wmi_##func_base##_buf_info;		\
1082 		char locbuf[50];					\
1083 									\
1084 		if ((!buf) || (count > 50))				\
1085 			return -EFAULT;					\
1086 									\
1087 		if (copy_from_user(locbuf, buf, count))			\
1088 			return -EFAULT;					\
1089 									\
1090 		ret = sscanf(locbuf, "%d", &k);				\
1091 		if ((ret != 1) || (k != 0)) {                           \
1092 			wmi_err("Wrong input, echo 0 to clear the wmi buffer");\
1093 			return -EINVAL;					\
1094 		}							\
1095 									\
1096 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
1097 		qdf_mem_zero(wmi_log->buf, wmi_ring_size *		\
1098 				sizeof(struct wmi_record_type));	\
1099 		wmi_log->length = 0;					\
1100 		*(wmi_log->p_buf_tail_idx) = 0;				\
1101 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
1102 									\
1103 		return count;						\
1104 	}
1105 
1106 GENERATE_DEBUG_WRITE_FUNCS(command_log, wmi_cmd_log_max_entry,
1107 			   wmi_command_debug);
1108 GENERATE_DEBUG_WRITE_FUNCS(command_tx_cmp_log, wmi_cmd_cmpl_log_max_entry,
1109 			   wmi_command_cmp_debug);
1110 GENERATE_DEBUG_WRITE_FUNCS(event_log, wmi_event_log_max_entry,
1111 			   wmi_event_debug);
1112 GENERATE_DEBUG_WRITE_FUNCS(rx_event_log, wmi_event_log_max_entry,
1113 			   wmi_event_debug);
1114 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_log, wmi_mgmt_tx_log_max_entry,
1115 			   wmi_command_debug);
1116 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_tx_cmp_log,
1117 			   wmi_mgmt_tx_cmpl_log_max_entry, wmi_command_debug);
1118 GENERATE_DEBUG_WRITE_FUNCS(mgmt_event_log, wmi_mgmt_rx_log_max_entry,
1119 			   wmi_event_debug);
1120 
1121 /**
1122  * debug_wmi_enable_write() - debugfs functions to enable/disable
1123  * wmi logging feature.
1124  *
1125  * @file: file handler to access wmi_handle
1126  * @buf: received data buffer
1127  * @count: length of received buffer
1128  * @ppos: Not used
1129  *
1130  * Return: count
1131  */
1132 static ssize_t debug_wmi_enable_write(struct file *file, const char __user *buf,
1133 					size_t count, loff_t *ppos)
1134 {
1135 	wmi_unified_t wmi_handle =
1136 		((struct seq_file *)file->private_data)->private;
1137 	int k, ret;
1138 	char locbuf[50];
1139 
1140 	if ((!buf) || (count > 50))
1141 		return -EFAULT;
1142 
1143 	if (copy_from_user(locbuf, buf, count))
1144 		return -EFAULT;
1145 
1146 	ret = sscanf(locbuf, "%d", &k);
1147 	if ((ret != 1) || ((k != 0) && (k != 1)))
1148 		return -EINVAL;
1149 
1150 	wmi_handle->log_info.wmi_logging_enable = k;
1151 	return count;
1152 }
1153 
1154 /**
1155  * debug_wmi_log_size_write() - reserved.
1156  *
1157  * @file: file handler to access wmi_handle
1158  * @buf: received data buffer
1159  * @count: length of received buffer
1160  * @ppos: Not used
1161  *
1162  * Return: count
1163  */
1164 static ssize_t debug_wmi_log_size_write(struct file *file,
1165 		const char __user *buf, size_t count, loff_t *ppos)
1166 {
1167 	return -EINVAL;
1168 }
1169 
1170 /* Structure to maintain debug information */
1171 struct wmi_debugfs_info {
1172 	const char *name;
1173 	const struct file_operations *ops;
1174 };
1175 
1176 #define DEBUG_FOO(func_base) { .name = #func_base,			\
1177 	.ops = &debug_##func_base##_ops }
1178 
1179 /**
1180  * debug_##func_base##_open() - Open debugfs entry for respective command
1181  * and event buffer.
1182  *
1183  * @inode: node for debug dir entry
1184  * @file: file handler
1185  *
1186  * Return: open status
1187  */
1188 #define GENERATE_DEBUG_STRUCTS(func_base)				\
1189 	static int debug_##func_base##_open(struct inode *inode,	\
1190 						struct file *file)	\
1191 	{								\
1192 		return single_open(file, debug_##func_base##_show,	\
1193 				inode->i_private);			\
1194 	}								\
1195 									\
1196 									\
1197 	static struct file_operations debug_##func_base##_ops = {	\
1198 		.open		= debug_##func_base##_open,		\
1199 		.read		= seq_read,				\
1200 		.llseek		= seq_lseek,				\
1201 		.write		= debug_##func_base##_write,		\
1202 		.release	= single_release,			\
1203 	};
1204 
1205 GENERATE_DEBUG_STRUCTS(wmi_command_log);
1206 GENERATE_DEBUG_STRUCTS(wmi_command_tx_cmp_log);
1207 GENERATE_DEBUG_STRUCTS(wmi_event_log);
1208 GENERATE_DEBUG_STRUCTS(wmi_rx_event_log);
1209 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_log);
1210 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_tx_cmp_log);
1211 GENERATE_DEBUG_STRUCTS(wmi_mgmt_event_log);
1212 GENERATE_DEBUG_STRUCTS(wmi_enable);
1213 GENERATE_DEBUG_STRUCTS(wmi_log_size);
1214 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
1215 GENERATE_DEBUG_STRUCTS(filtered_wmi_cmds);
1216 GENERATE_DEBUG_STRUCTS(filtered_wmi_evts);
1217 GENERATE_DEBUG_STRUCTS(wmi_filtered_command_log);
1218 GENERATE_DEBUG_STRUCTS(wmi_filtered_event_log);
1219 #endif
1220 
1221 struct wmi_debugfs_info wmi_debugfs_infos[NUM_DEBUG_INFOS] = {
1222 	DEBUG_FOO(wmi_command_log),
1223 	DEBUG_FOO(wmi_command_tx_cmp_log),
1224 	DEBUG_FOO(wmi_event_log),
1225 	DEBUG_FOO(wmi_rx_event_log),
1226 	DEBUG_FOO(wmi_mgmt_command_log),
1227 	DEBUG_FOO(wmi_mgmt_command_tx_cmp_log),
1228 	DEBUG_FOO(wmi_mgmt_event_log),
1229 	DEBUG_FOO(wmi_enable),
1230 	DEBUG_FOO(wmi_log_size),
1231 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
1232 	DEBUG_FOO(filtered_wmi_cmds),
1233 	DEBUG_FOO(filtered_wmi_evts),
1234 	DEBUG_FOO(wmi_filtered_command_log),
1235 	DEBUG_FOO(wmi_filtered_event_log),
1236 #endif
1237 };
1238 
1239 /**
1240  * wmi_debugfs_create() - Create debug_fs entry for wmi logging.
1241  *
1242  * @wmi_handle: wmi handle
1243  * @par_entry: debug directory entry
1244  * @id: Index to debug info data array
1245  *
1246  * Return: none
1247  */
1248 static void wmi_debugfs_create(wmi_unified_t wmi_handle,
1249 			       struct dentry *par_entry)
1250 {
1251 	int i;
1252 
1253 	if (!par_entry)
1254 		goto out;
1255 
1256 	for (i = 0; i < NUM_DEBUG_INFOS; ++i) {
1257 		wmi_handle->debugfs_de[i] = qdf_debugfs_create_entry(
1258 						wmi_debugfs_infos[i].name,
1259 						WMI_INFOS_DBG_FILE_PERM,
1260 						par_entry,
1261 						wmi_handle,
1262 						wmi_debugfs_infos[i].ops);
1263 
1264 		if (!wmi_handle->debugfs_de[i]) {
1265 			wmi_err("debug Entry creation failed!");
1266 			goto out;
1267 		}
1268 	}
1269 
1270 	return;
1271 
1272 out:
1273 	wmi_err("debug Entry creation failed!");
1274 	wmi_log_buffer_free(wmi_handle);
1275 	return;
1276 }
1277 
1278 /**
1279  * wmi_debugfs_remove() - Remove debugfs entry for wmi logging.
1280  * @wmi_handle: wmi handle
1281  * @dentry: debugfs directory entry
1282  * @id: Index to debug info data array
1283  *
1284  * Return: none
1285  */
1286 static void wmi_debugfs_remove(wmi_unified_t wmi_handle)
1287 {
1288 	int i;
1289 	struct dentry *dentry = wmi_handle->log_info.wmi_log_debugfs_dir;
1290 
1291 	if (dentry) {
1292 		for (i = 0; i < NUM_DEBUG_INFOS; ++i) {
1293 			if (wmi_handle->debugfs_de[i])
1294 				wmi_handle->debugfs_de[i] = NULL;
1295 		}
1296 	}
1297 
1298 	if (dentry)
1299 		qdf_debugfs_remove_dir_recursive(dentry);
1300 }
1301 
1302 /**
1303  * wmi_debugfs_init() - debugfs functions to create debugfs directory and to
1304  * create debugfs enteries.
1305  *
1306  * @h: wmi handler
1307  *
1308  * Return: init status
1309  */
1310 static QDF_STATUS wmi_debugfs_init(wmi_unified_t wmi_handle, uint32_t pdev_idx)
1311 {
1312 	char buf[32];
1313 
1314 	snprintf(buf, sizeof(buf), "WMI_SOC%u_PDEV%u",
1315 		 wmi_handle->soc->soc_idx, pdev_idx);
1316 
1317 	wmi_handle->log_info.wmi_log_debugfs_dir =
1318 		qdf_debugfs_create_dir(buf, NULL);
1319 
1320 	if (!wmi_handle->log_info.wmi_log_debugfs_dir) {
1321 		wmi_err("error while creating debugfs dir for %s", buf);
1322 		return QDF_STATUS_E_FAILURE;
1323 	}
1324 	wmi_debugfs_create(wmi_handle,
1325 			   wmi_handle->log_info.wmi_log_debugfs_dir);
1326 
1327 	return QDF_STATUS_SUCCESS;
1328 }
1329 
1330 /**
1331  * wmi_mgmt_cmd_record() - Wrapper function for mgmt command logging macro
1332  *
1333  * @wmi_handle: wmi handle
1334  * @cmd: mgmt command
1335  * @header: pointer to 802.11 header
1336  * @vdev_id: vdev id
1337  * @chanfreq: channel frequency
1338  *
1339  * Return: none
1340  */
1341 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
1342 			void *header, uint32_t vdev_id, uint32_t chanfreq)
1343 {
1344 
1345 	uint32_t data[CUSTOM_MGMT_CMD_DATA_SIZE];
1346 
1347 	data[0] = ((struct wmi_command_header *)header)->type;
1348 	data[1] = ((struct wmi_command_header *)header)->sub_type;
1349 	data[2] = vdev_id;
1350 	data[3] = chanfreq;
1351 
1352 	qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
1353 
1354 	WMI_MGMT_COMMAND_RECORD(wmi_handle, cmd, (uint8_t *)data);
1355 	wmi_specific_cmd_record(wmi_handle, cmd, (uint8_t *)data);
1356 	qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
1357 }
1358 #else
1359 /**
1360  * wmi_debugfs_remove() - Remove debugfs entry for wmi logging.
1361  * @wmi_handle: wmi handle
1362  * @dentry: debugfs directory entry
1363  * @id: Index to debug info data array
1364  *
1365  * Return: none
1366  */
1367 static void wmi_debugfs_remove(wmi_unified_t wmi_handle) { }
1368 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
1369 			void *header, uint32_t vdev_id, uint32_t chanfreq) { }
1370 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) { }
1371 static void wmi_minidump_detach(struct wmi_unified *wmi_handle) { }
1372 static void wmi_minidump_attach(struct wmi_unified *wmi_handle) { }
1373 #endif /*WMI_INTERFACE_EVENT_LOGGING */
1374 qdf_export_symbol(wmi_mgmt_cmd_record);
1375 
1376 #ifdef WMI_EXT_DBG
1377 
1378 /**
1379  * wmi_ext_dbg_msg_enqueue() - enqueue wmi message
1380  * @wmi_handle: wmi handler
1381  *
1382  * Return: size of wmi message queue after enqueue
1383  */
1384 static uint32_t wmi_ext_dbg_msg_enqueue(struct wmi_unified *wmi_handle,
1385 					struct wmi_ext_dbg_msg *msg)
1386 {
1387 	uint32_t list_size;
1388 
1389 	qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1390 	qdf_list_insert_back_size(&wmi_handle->wmi_ext_dbg_msg_queue,
1391 				  &msg->node, &list_size);
1392 	qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1393 
1394 	return list_size;
1395 }
1396 
1397 /**
1398  * wmi_ext_dbg_msg_dequeue() - dequeue wmi message
1399  * @wmi_handle: wmi handler
1400  *
1401  * Return: wmi msg on success else NULL
1402  */
1403 static struct wmi_ext_dbg_msg *wmi_ext_dbg_msg_dequeue(struct wmi_unified
1404 						       *wmi_handle)
1405 {
1406 	qdf_list_node_t *list_node = NULL;
1407 
1408 	qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1409 	qdf_list_remove_front(&wmi_handle->wmi_ext_dbg_msg_queue, &list_node);
1410 	qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1411 
1412 	if (!list_node)
1413 		return NULL;
1414 
1415 	return qdf_container_of(list_node, struct wmi_ext_dbg_msg, node);
1416 }
1417 
1418 /**
1419  * wmi_ext_dbg_msg_record() - record wmi messages
1420  * @wmi_handle: wmi handler
1421  * @buf: wmi message buffer
1422  * @len: wmi message length
1423  * @type: wmi message type
1424  *
1425  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1426  */
1427 static QDF_STATUS wmi_ext_dbg_msg_record(struct wmi_unified *wmi_handle,
1428 					 uint8_t *buf, uint32_t len,
1429 					 enum WMI_MSG_TYPE type)
1430 {
1431 	struct wmi_ext_dbg_msg *msg;
1432 	uint32_t list_size;
1433 
1434 	msg = wmi_ext_dbg_msg_get(len);
1435 	if (!msg)
1436 		return QDF_STATUS_E_NOMEM;
1437 
1438 	msg->len = len;
1439 	msg->type = type;
1440 	qdf_mem_copy(msg->buf, buf, len);
1441 	msg->ts = qdf_get_log_timestamp();
1442 	list_size = wmi_ext_dbg_msg_enqueue(wmi_handle, msg);
1443 
1444 	if (list_size >= wmi_handle->wmi_ext_dbg_msg_queue_size) {
1445 		msg = wmi_ext_dbg_msg_dequeue(wmi_handle);
1446 		wmi_ext_dbg_msg_put(msg);
1447 	}
1448 
1449 	return QDF_STATUS_SUCCESS;
1450 }
1451 
1452 /**
1453  * wmi_ext_dbg_msg_cmd_record() - record wmi command messages
1454  * @wmi_handle: wmi handler
1455  * @buf: wmi command buffer
1456  * @len: wmi command message length
1457  *
1458  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1459  */
1460 static QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified *wmi_handle,
1461 					     uint8_t *buf, uint32_t len)
1462 {
1463 	return wmi_ext_dbg_msg_record(wmi_handle, buf, len,
1464 				      WMI_MSG_TYPE_CMD);
1465 }
1466 
1467 /**
1468  * wmi_ext_dbg_msg_event_record() - record wmi event messages
1469  * @wmi_handle: wmi handler
1470  * @buf: wmi event buffer
1471  * @len: wmi event message length
1472  *
1473  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1474  */
1475 static QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified *wmi_handle,
1476 					       uint8_t *buf, uint32_t len)
1477 {
1478 	uint32_t id;
1479 
1480 	id = WMI_GET_FIELD(buf, WMI_CMD_HDR, COMMANDID);
1481 	if (id != wmi_handle->wmi_events[wmi_diag_event_id])
1482 		return wmi_ext_dbg_msg_record(wmi_handle, buf, len,
1483 					      WMI_MSG_TYPE_EVENT);
1484 
1485 	return QDF_STATUS_SUCCESS;
1486 }
1487 
1488 /**
1489  * wmi_ext_dbg_msg_queue_init() - create debugfs queue and associated lock
1490  * @wmi_handle: wmi handler
1491  *
1492  * Return: none
1493  */
1494 static void wmi_ext_dbg_msg_queue_init(struct wmi_unified *wmi_handle)
1495 {
1496 	qdf_list_create(&wmi_handle->wmi_ext_dbg_msg_queue,
1497 			wmi_handle->wmi_ext_dbg_msg_queue_size);
1498 	qdf_spinlock_create(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1499 }
1500 
1501 /**
1502  * wmi_ext_dbg_msg_queue_deinit() - destroy debugfs queue and associated lock
1503  * @wmi_handle: wmi handler
1504  *
1505  * Return: none
1506  */
1507 static void wmi_ext_dbg_msg_queue_deinit(struct wmi_unified *wmi_handle)
1508 {
1509 	qdf_list_destroy(&wmi_handle->wmi_ext_dbg_msg_queue);
1510 	qdf_spinlock_destroy(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1511 }
1512 
1513 /**
1514  * wmi_ext_dbg_msg_show() - debugfs function to display whole content of
1515  * wmi command/event messages including headers.
1516  * @file: qdf debugfs file handler
1517  * @arg: pointer to wmi handler
1518  *
1519  * Return: QDF_STATUS_SUCCESS if all the messages are shown successfully,
1520  * else QDF_STATUS_E_AGAIN if more data to show.
1521  */
1522 static QDF_STATUS wmi_ext_dbg_msg_show(qdf_debugfs_file_t file, void *arg)
1523 {
1524 	struct wmi_unified *wmi_handle = (struct wmi_unified *)arg;
1525 	struct wmi_ext_dbg_msg *msg;
1526 	uint64_t secs, usecs;
1527 
1528 	msg = wmi_ext_dbg_msg_dequeue(wmi_handle);
1529 	if (!msg)
1530 		return QDF_STATUS_SUCCESS;
1531 
1532 	qdf_debugfs_printf(file, "%s: 0x%x\n",
1533 			   msg->type == WMI_MSG_TYPE_CMD ? "COMMAND" :
1534 			   "EVENT", WMI_GET_FIELD(msg->buf, WMI_CMD_HDR,
1535 						  COMMANDID));
1536 	qdf_log_timestamp_to_secs(msg->ts, &secs, &usecs);
1537 	qdf_debugfs_printf(file, "Time: %llu.%llu\n", secs, usecs);
1538 	qdf_debugfs_printf(file, "Length:%d\n", msg->len);
1539 	qdf_debugfs_hexdump(file, msg->buf, msg->len,
1540 			    WMI_EXT_DBG_DUMP_ROW_SIZE,
1541 			    WMI_EXT_DBG_DUMP_GROUP_SIZE);
1542 	qdf_debugfs_printf(file, "\n");
1543 
1544 	if (qdf_debugfs_overflow(file)) {
1545 		qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1546 		qdf_list_insert_front(&wmi_handle->wmi_ext_dbg_msg_queue,
1547 				      &msg->node);
1548 		qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1549 
1550 	} else {
1551 		wmi_ext_dbg_msg_put(msg);
1552 	}
1553 
1554 	return QDF_STATUS_E_AGAIN;
1555 }
1556 
1557 /**
1558  * wmi_ext_dbg_msg_write() - debugfs write not supported
1559  * @priv: private data
1560  * @buf: received data buffer
1561  * @len: length of received buffer
1562  *
1563  * Return: QDF_STATUS_E_NOSUPPORT.
1564  */
1565 static QDF_STATUS wmi_ext_dbg_msg_write(void *priv, const char *buf,
1566 					qdf_size_t len)
1567 {
1568 	return QDF_STATUS_E_NOSUPPORT;
1569 }
1570 
1571 static struct qdf_debugfs_fops wmi_ext_dbgfs_ops[WMI_MAX_RADIOS];
1572 
1573 /**
1574  * wmi_ext_debugfs_init() - init debugfs items for extended wmi dump.
1575  * @wmi_handle: wmi handler
1576  * @pdev_idx: pdev index
1577  *
1578  * Return: QDF_STATUS_SUCCESS if debugfs is initialized else
1579  * QDF_STATUS_E_FAILURE
1580  */
1581 static QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle,
1582 				     uint32_t pdev_idx)
1583 {
1584 	qdf_dentry_t dentry;
1585 	char buf[32];
1586 
1587 	/* To maintain backward compatibility, naming convention for PDEV 0
1588 	 * dentry is kept same as before. For more than 1 PDEV, dentry
1589 	 * names will be appended with PDEVx.
1590 	*/
1591 	if (wmi_handle->soc->soc_idx == 0 && pdev_idx == 0) {
1592 		dentry  = qdf_debugfs_create_dir(WMI_EXT_DBG_DIR, NULL);
1593 	} else {
1594 		snprintf(buf, sizeof(buf), "WMI_EXT_DBG_SOC%u_PDEV%u",
1595 			 wmi_handle->soc->soc_idx, pdev_idx);
1596 		dentry  = qdf_debugfs_create_dir(buf, NULL);
1597 	}
1598 
1599 	if (!dentry) {
1600 		wmi_err("error while creating extended wmi debugfs dir");
1601 		return QDF_STATUS_E_FAILURE;
1602 	}
1603 
1604 	wmi_ext_dbgfs_ops[pdev_idx].show = wmi_ext_dbg_msg_show;
1605 	wmi_ext_dbgfs_ops[pdev_idx].write = wmi_ext_dbg_msg_write;
1606 	wmi_ext_dbgfs_ops[pdev_idx].priv = wmi_handle;
1607 	if (!qdf_debugfs_create_file(WMI_EXT_DBG_FILE, WMI_EXT_DBG_FILE_PERM,
1608 				     dentry, &wmi_ext_dbgfs_ops[pdev_idx])) {
1609 		qdf_debugfs_remove_dir(dentry);
1610 		wmi_err("Error while creating extended wmi debugfs file");
1611 		return QDF_STATUS_E_FAILURE;
1612 	}
1613 
1614 	wmi_handle->wmi_ext_dbg_dentry = dentry;
1615 	wmi_handle->wmi_ext_dbg_msg_queue_size = WMI_EXT_DBG_QUEUE_SIZE;
1616 	wmi_ext_dbg_msg_queue_init(wmi_handle);
1617 
1618 	return QDF_STATUS_SUCCESS;
1619 }
1620 
1621 /**
1622  * wmi_ext_debugfs_deinit() - cleanup/deinit debugfs items of extended wmi dump.
1623  * @wmi_handle: wmi handler
1624  *
1625  * Return: QDF_STATUS_SUCCESS if cleanup is successful
1626  */
1627 static QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle)
1628 {
1629 	struct wmi_ext_dbg_msg *msg;
1630 
1631 	while ((msg = wmi_ext_dbg_msg_dequeue(wmi_handle)))
1632 		wmi_ext_dbg_msg_put(msg);
1633 
1634 	wmi_ext_dbg_msg_queue_deinit(wmi_handle);
1635 	qdf_debugfs_remove_dir_recursive(wmi_handle->wmi_ext_dbg_dentry);
1636 
1637 	return QDF_STATUS_SUCCESS;
1638 }
1639 
1640 #else
1641 
1642 static inline QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified
1643 						    *wmi_handle,
1644 						    uint8_t *buf, uint32_t len)
1645 {
1646 		return QDF_STATUS_SUCCESS;
1647 }
1648 
1649 static inline QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified
1650 						      *wmi_handle,
1651 						      uint8_t *buf, uint32_t len)
1652 {
1653 		return QDF_STATUS_SUCCESS;
1654 }
1655 
1656 static inline QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle,
1657 					    uint32_t pdev_idx)
1658 {
1659 		return QDF_STATUS_SUCCESS;
1660 }
1661 
1662 static inline QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle)
1663 {
1664 		return QDF_STATUS_SUCCESS;
1665 }
1666 
1667 #endif /*WMI_EXT_DBG */
1668 
1669 int wmi_get_host_credits(wmi_unified_t wmi_handle);
1670 /* WMI buffer APIs */
1671 
1672 #ifdef NBUF_MEMORY_DEBUG
1673 wmi_buf_t
1674 wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint32_t len,
1675 		    const char *func_name,
1676 		    uint32_t line_num)
1677 {
1678 	wmi_buf_t wmi_buf;
1679 
1680 	if (roundup(len, 4) > wmi_handle->max_msg_len) {
1681 		wmi_err("Invalid length %u (via %s:%u) max size: %u",
1682 			len, func_name, line_num,
1683 			wmi_handle->max_msg_len);
1684 		QDF_ASSERT(0);
1685 		return NULL;
1686 	}
1687 
1688 	wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, func_name,
1689 				 line_num);
1690 	if (!wmi_buf)
1691 		wmi_buf = qdf_nbuf_alloc_debug(NULL,
1692 					       roundup(len + WMI_MIN_HEAD_ROOM,
1693 						       4),
1694 					       WMI_MIN_HEAD_ROOM, 4, false,
1695 					       func_name, line_num);
1696 	if (!wmi_buf)
1697 		return NULL;
1698 
1699 	/* Clear the wmi buffer */
1700 	OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
1701 
1702 	/*
1703 	 * Set the length of the buffer to match the allocation size.
1704 	 */
1705 	qdf_nbuf_set_pktlen(wmi_buf, len);
1706 
1707 	return wmi_buf;
1708 }
1709 qdf_export_symbol(wmi_buf_alloc_debug);
1710 
1711 void wmi_buf_free(wmi_buf_t net_buf)
1712 {
1713 	net_buf = wbuff_buff_put(net_buf);
1714 	if (net_buf)
1715 		qdf_nbuf_free(net_buf);
1716 }
1717 qdf_export_symbol(wmi_buf_free);
1718 #else
1719 wmi_buf_t wmi_buf_alloc_fl(wmi_unified_t wmi_handle, uint32_t len,
1720 			   const char *func, uint32_t line)
1721 {
1722 	wmi_buf_t wmi_buf;
1723 
1724 	if (roundup(len, 4) > wmi_handle->max_msg_len) {
1725 		QDF_DEBUG_PANIC("Invalid length %u (via %s:%u) max size: %u",
1726 				len, func, line, wmi_handle->max_msg_len);
1727 		return NULL;
1728 	}
1729 
1730 	wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, __func__,
1731 				 __LINE__);
1732 	if (!wmi_buf)
1733 		wmi_buf = qdf_nbuf_alloc_fl(NULL, roundup(len +
1734 				WMI_MIN_HEAD_ROOM, 4), WMI_MIN_HEAD_ROOM, 4,
1735 				false, func, line);
1736 
1737 	if (!wmi_buf) {
1738 		wmi_nofl_err("%s:%d, failed to alloc len:%d", func, line, len);
1739 		return NULL;
1740 	}
1741 
1742 	/* Clear the wmi buffer */
1743 	OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
1744 
1745 	/*
1746 	 * Set the length of the buffer to match the allocation size.
1747 	 */
1748 	qdf_nbuf_set_pktlen(wmi_buf, len);
1749 
1750 	return wmi_buf;
1751 }
1752 qdf_export_symbol(wmi_buf_alloc_fl);
1753 
1754 void wmi_buf_free(wmi_buf_t net_buf)
1755 {
1756 	net_buf = wbuff_buff_put(net_buf);
1757 	if (net_buf)
1758 		qdf_nbuf_free(net_buf);
1759 }
1760 qdf_export_symbol(wmi_buf_free);
1761 #endif
1762 
1763 /**
1764  * wmi_get_max_msg_len() - get maximum WMI message length
1765  * @wmi_handle: WMI handle.
1766  *
1767  * This function returns the maximum WMI message length
1768  *
1769  * Return: maximum WMI message length
1770  */
1771 uint16_t wmi_get_max_msg_len(wmi_unified_t wmi_handle)
1772 {
1773 	return wmi_handle->max_msg_len - WMI_MIN_HEAD_ROOM;
1774 }
1775 qdf_export_symbol(wmi_get_max_msg_len);
1776 
1777 #ifndef WMI_CMD_STRINGS
1778 static uint8_t *wmi_id_to_name(uint32_t wmi_command)
1779 {
1780 	return "Invalid WMI cmd";
1781 }
1782 #endif
1783 
1784 static inline void wmi_log_cmd_id(uint32_t cmd_id, uint32_t tag)
1785 {
1786 	wmi_debug("Send WMI command:%s command_id:%d htc_tag:%d",
1787 		 wmi_id_to_name(cmd_id), cmd_id, tag);
1788 }
1789 
1790 /**
1791  * wmi_is_pm_resume_cmd() - check if a cmd is part of the resume sequence
1792  * @cmd_id: command to check
1793  *
1794  * Return: true if the command is part of the resume sequence.
1795  */
1796 #ifdef WLAN_POWER_MANAGEMENT_OFFLOAD
1797 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id)
1798 {
1799 	switch (cmd_id) {
1800 	case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
1801 	case WMI_PDEV_RESUME_CMDID:
1802 		return true;
1803 
1804 	default:
1805 		return false;
1806 	}
1807 }
1808 
1809 #else
1810 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id)
1811 {
1812 	return false;
1813 }
1814 
1815 #endif
1816 
1817 #ifdef FEATURE_WLAN_D0WOW
1818 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id)
1819 {
1820 	wmi_d0_wow_enable_disable_cmd_fixed_param *cmd;
1821 
1822 	if (cmd_id == WMI_D0_WOW_ENABLE_DISABLE_CMDID) {
1823 		cmd = (wmi_d0_wow_enable_disable_cmd_fixed_param *)
1824 			wmi_buf_data(buf);
1825 		if (!cmd->enable)
1826 			return true;
1827 		else
1828 			return false;
1829 	}
1830 
1831 	return false;
1832 }
1833 #else
1834 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id)
1835 {
1836 	return false;
1837 }
1838 
1839 #endif
1840 
1841 #ifdef WMI_INTERFACE_SEQUENCE_CHECK
1842 static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle)
1843 {
1844 	wmi_handle->wmi_sequence = 0;
1845 	wmi_handle->wmi_exp_sequence = 0;
1846 	wmi_handle->wmi_sequence_stop = false;
1847 }
1848 
1849 static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle)
1850 {
1851 	qdf_spinlock_create(&wmi_handle->wmi_seq_lock);
1852 	wmi_interface_sequence_reset(wmi_handle);
1853 }
1854 
1855 static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle)
1856 {
1857 	qdf_spinlock_destroy(&wmi_handle->wmi_seq_lock);
1858 }
1859 
1860 void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle)
1861 {
1862 	wmi_handle->wmi_sequence_stop = true;
1863 }
1864 
1865 static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle,
1866 					  HTC_PACKET *pkt,
1867 					  const char *func, uint32_t line)
1868 {
1869 	wmi_buf_t buf = GET_HTC_PACKET_NET_BUF_CONTEXT(pkt);
1870 	QDF_STATUS status;
1871 
1872 	qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock);
1873 	status = htc_send_pkt(wmi_handle->htc_handle, pkt);
1874 	if (QDF_STATUS_SUCCESS != status) {
1875 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1876 		qdf_atomic_dec(&wmi_handle->pending_cmds);
1877 		wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d",
1878 			     func, line, status);
1879 		qdf_mem_free(pkt);
1880 		return status;
1881 	}
1882 	/* Record the sequence number in the SKB */
1883 	qdf_nbuf_set_mark(buf, wmi_handle->wmi_sequence);
1884 	/* Increment the sequence number */
1885 	wmi_handle->wmi_sequence = (wmi_handle->wmi_sequence + 1)
1886 				   & (wmi_handle->wmi_max_cmds - 1);
1887 	qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1888 
1889 	return status;
1890 }
1891 
1892 static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle,
1893 						wmi_buf_t buf)
1894 {
1895 	/* Skip sequence check when wmi sequence stop is set */
1896 	if (wmi_handle->wmi_sequence_stop)
1897 		return;
1898 
1899 	qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock);
1900 	/* Match the completion sequence and expected sequence number */
1901 	if (qdf_nbuf_get_mark(buf) != wmi_handle->wmi_exp_sequence) {
1902 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1903 		wmi_nofl_err("WMI Tx Completion Sequence number mismatch");
1904 		wmi_nofl_err("Expected %d Received %d",
1905 			     wmi_handle->wmi_exp_sequence,
1906 			     qdf_nbuf_get_mark(buf));
1907 		/* Trigger Recovery */
1908 		qdf_trigger_self_recovery(wmi_handle->soc,
1909 					  QDF_WMI_BUF_SEQUENCE_MISMATCH);
1910 	} else {
1911 		/* Increment the expected sequence number */
1912 		wmi_handle->wmi_exp_sequence =
1913 				(wmi_handle->wmi_exp_sequence + 1)
1914 				& (wmi_handle->wmi_max_cmds - 1);
1915 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1916 	}
1917 }
1918 #else
1919 static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle)
1920 {
1921 }
1922 
1923 static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle)
1924 {
1925 }
1926 
1927 static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle)
1928 {
1929 }
1930 
1931 void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle)
1932 {
1933 }
1934 
1935 static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle,
1936 					  HTC_PACKET *pkt,
1937 					  const char *func, uint32_t line)
1938 {
1939 	QDF_STATUS status;
1940 
1941 	status = htc_send_pkt(wmi_handle->htc_handle, pkt);
1942 	if (QDF_STATUS_SUCCESS != status) {
1943 		qdf_atomic_dec(&wmi_handle->pending_cmds);
1944 		wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d",
1945 			     func, line, status);
1946 		qdf_mem_free(pkt);
1947 		return status;
1948 	}
1949 
1950 	return status;
1951 }
1952 
1953 static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle,
1954 						wmi_buf_t buf)
1955 {
1956 }
1957 #endif
1958 
1959 static inline void wmi_unified_debug_dump(wmi_unified_t wmi_handle)
1960 {
1961 	wmi_nofl_err("Endpoint ID = %d, Tx Queue Depth = %d, soc_id = %u, target type = %s",
1962 		     wmi_handle->wmi_endpoint_id,
1963 		     htc_get_tx_queue_depth(wmi_handle->htc_handle,
1964 					    wmi_handle->wmi_endpoint_id),
1965 		     wmi_handle->soc->soc_idx,
1966 		     (wmi_handle->target_type ==
1967 		      WMI_TLV_TARGET ? "WMI_TLV_TARGET" :
1968 						"WMI_NON_TLV_TARGET"));
1969 }
1970 
1971 #ifdef SYSTEM_PM_CHECK
1972 /**
1973  * wmi_set_system_pm_pkt_tag() - API to set tag for system pm packets
1974  * @htc_tag: HTC tag
1975  * @buf: wmi cmd buffer
1976  * @cmd_id: cmd id
1977  *
1978  * Return: None
1979  */
1980 static void wmi_set_system_pm_pkt_tag(uint16_t *htc_tag, wmi_buf_t buf,
1981 				      uint32_t cmd_id)
1982 {
1983 	switch (cmd_id) {
1984 	case WMI_WOW_ENABLE_CMDID:
1985 	case WMI_PDEV_SUSPEND_CMDID:
1986 		*htc_tag = HTC_TX_PACKET_SYSTEM_SUSPEND;
1987 		break;
1988 	case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
1989 	case WMI_PDEV_RESUME_CMDID:
1990 		*htc_tag = HTC_TX_PACKET_SYSTEM_RESUME;
1991 		break;
1992 	case WMI_D0_WOW_ENABLE_DISABLE_CMDID:
1993 		if (wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id))
1994 			*htc_tag = HTC_TX_PACKET_SYSTEM_RESUME;
1995 		else
1996 			*htc_tag = HTC_TX_PACKET_SYSTEM_SUSPEND;
1997 		break;
1998 	default:
1999 		break;
2000 	}
2001 }
2002 #else
2003 static inline void wmi_set_system_pm_pkt_tag(uint16_t *htc_tag, wmi_buf_t buf,
2004 					     uint32_t cmd_id)
2005 {
2006 }
2007 #endif
2008 
2009 QDF_STATUS wmi_unified_cmd_send_fl(wmi_unified_t wmi_handle, wmi_buf_t buf,
2010 				   uint32_t len, uint32_t cmd_id,
2011 				   const char *func, uint32_t line)
2012 {
2013 	HTC_PACKET *pkt;
2014 	uint16_t htc_tag = 0;
2015 	bool rtpm_inprogress;
2016 
2017 	rtpm_inprogress = wmi_get_runtime_pm_inprogress(wmi_handle);
2018 	if (rtpm_inprogress) {
2019 		htc_tag = wmi_handle->ops->wmi_set_htc_tx_tag(wmi_handle, buf,
2020 							      cmd_id);
2021 	} else if (qdf_atomic_read(&wmi_handle->is_target_suspended) &&
2022 		   !wmi_is_pm_resume_cmd(cmd_id) &&
2023 		   !wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id)) {
2024 			wmi_nofl_err("Target is suspended (via %s:%u)",
2025 					func, line);
2026 		return QDF_STATUS_E_BUSY;
2027 	}
2028 
2029 	if (wmi_handle->wmi_stopinprogress) {
2030 		wmi_nofl_err("%s:%d, WMI stop in progress, wmi_handle:%pK",
2031 			     func, line, wmi_handle);
2032 		return QDF_STATUS_E_INVAL;
2033 	}
2034 
2035 #ifndef WMI_NON_TLV_SUPPORT
2036 	/* Do sanity check on the TLV parameter structure */
2037 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2038 		void *buf_ptr = (void *)qdf_nbuf_data(buf);
2039 
2040 		if (wmi_handle->ops->wmi_check_command_params(NULL, buf_ptr, len, cmd_id)
2041 			!= 0) {
2042 			wmi_nofl_err("%s:%d, Invalid WMI Param Buffer for Cmd:%d",
2043 				     func, line, cmd_id);
2044 			return QDF_STATUS_E_INVAL;
2045 		}
2046 	}
2047 #endif
2048 
2049 	if (qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) {
2050 		wmi_nofl_err("%s:%d, Failed to send cmd %x, no memory",
2051 			     func, line, cmd_id);
2052 		return QDF_STATUS_E_NOMEM;
2053 	}
2054 
2055 	qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR));
2056 	WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
2057 
2058 	qdf_atomic_inc(&wmi_handle->pending_cmds);
2059 	if (qdf_atomic_read(&wmi_handle->pending_cmds) >=
2060 			wmi_handle->wmi_max_cmds) {
2061 		wmi_nofl_err("hostcredits = %d",
2062 			     wmi_get_host_credits(wmi_handle));
2063 		htc_dump_counter_info(wmi_handle->htc_handle);
2064 		qdf_atomic_dec(&wmi_handle->pending_cmds);
2065 		wmi_nofl_err("%s:%d, MAX %d WMI Pending cmds reached",
2066 			     func, line, wmi_handle->wmi_max_cmds);
2067 		wmi_unified_debug_dump(wmi_handle);
2068 		htc_ce_tasklet_debug_dump(wmi_handle->htc_handle);
2069 		qdf_trigger_self_recovery(wmi_handle->soc->wmi_psoc,
2070 					  QDF_WMI_EXCEED_MAX_PENDING_CMDS);
2071 		return QDF_STATUS_E_BUSY;
2072 	}
2073 
2074 	pkt = qdf_mem_malloc_fl(sizeof(*pkt), func, line);
2075 	if (!pkt) {
2076 		qdf_atomic_dec(&wmi_handle->pending_cmds);
2077 		return QDF_STATUS_E_NOMEM;
2078 	}
2079 
2080 	if (!rtpm_inprogress)
2081 		wmi_set_system_pm_pkt_tag(&htc_tag, buf, cmd_id);
2082 
2083 	SET_HTC_PACKET_INFO_TX(pkt,
2084 			       NULL,
2085 			       qdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR),
2086 			       wmi_handle->wmi_endpoint_id, htc_tag);
2087 
2088 	SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf);
2089 	wmi_log_cmd_id(cmd_id, htc_tag);
2090 	wmi_ext_dbg_msg_cmd_record(wmi_handle,
2091 				   qdf_nbuf_data(buf), qdf_nbuf_len(buf));
2092 #ifdef WMI_INTERFACE_EVENT_LOGGING
2093 	if (wmi_handle->log_info.wmi_logging_enable) {
2094 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2095 		/*
2096 		 * Record 16 bytes of WMI cmd data -
2097 		 * exclude TLV and WMI headers
2098 		 *
2099 		 * WMI mgmt command already recorded in wmi_mgmt_cmd_record
2100 		 */
2101 		if (wmi_handle->ops->is_management_record(cmd_id) == false) {
2102 			uint8_t *tmpbuf = (uint8_t *)qdf_nbuf_data(buf) +
2103 				wmi_handle->soc->buf_offset_command;
2104 
2105 			WMI_COMMAND_RECORD(wmi_handle, cmd_id, tmpbuf);
2106 			wmi_specific_cmd_record(wmi_handle, cmd_id, tmpbuf);
2107 		}
2108 
2109 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2110 	}
2111 #endif
2112 	return wmi_htc_send_pkt(wmi_handle, pkt, func, line);
2113 }
2114 qdf_export_symbol(wmi_unified_cmd_send_fl);
2115 
2116 /**
2117  * wmi_unified_get_event_handler_ix() - gives event handler's index
2118  * @wmi_handle: handle to wmi
2119  * @event_id: wmi  event id
2120  *
2121  * Return: event handler's index
2122  */
2123 static int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle,
2124 					    uint32_t event_id)
2125 {
2126 	uint32_t idx = 0;
2127 	int32_t invalid_idx = -1;
2128 	struct wmi_soc *soc = wmi_handle->soc;
2129 
2130 	for (idx = 0; (idx < soc->max_event_idx &&
2131 		       idx < WMI_UNIFIED_MAX_EVENT); ++idx) {
2132 		if (wmi_handle->event_id[idx] == event_id &&
2133 		    wmi_handle->event_handler[idx]) {
2134 			return idx;
2135 		}
2136 	}
2137 
2138 	return invalid_idx;
2139 }
2140 
2141 /**
2142  * wmi_register_event_handler_with_ctx() - register event handler with
2143  * exec ctx and buffer type
2144  * @wmi_handle: handle to wmi
2145  * @event_id: wmi event id
2146  * @handler_func: wmi event handler function
2147  * @rx_ctx: rx execution context for wmi rx events
2148  * @rx_buf_type: rx execution context for wmi rx events
2149  *
2150  * Return: QDF_STATUS_SUCCESS on successful register event else failure.
2151  */
2152 static QDF_STATUS
2153 wmi_register_event_handler_with_ctx(wmi_unified_t wmi_handle,
2154 				    uint32_t event_id,
2155 				    wmi_unified_event_handler handler_func,
2156 				    enum wmi_rx_exec_ctx rx_ctx,
2157 				    enum wmi_rx_buff_type rx_buf_type)
2158 {
2159 	uint32_t idx = 0;
2160 	uint32_t evt_id;
2161 	struct wmi_soc *soc;
2162 
2163 	if (!wmi_handle) {
2164 		wmi_err("WMI handle is NULL");
2165 		return QDF_STATUS_E_FAILURE;
2166 	}
2167 
2168 	soc = wmi_handle->soc;
2169 
2170 	if (event_id >= wmi_events_max) {
2171 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
2172 			  "%s: Event id %d is unavailable",
2173 					__func__, event_id);
2174 		return QDF_STATUS_E_FAILURE;
2175 	}
2176 
2177 	if (wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2178 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG,
2179 			  "%s: Event id %d is not supported",
2180 			  __func__, event_id);
2181 		return QDF_STATUS_E_NOSUPPORT;
2182 	}
2183 	evt_id = wmi_handle->wmi_events[event_id];
2184 
2185 	if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) {
2186 		wmi_info("event handler already registered 0x%x", evt_id);
2187 		return QDF_STATUS_E_FAILURE;
2188 	}
2189 	if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) {
2190 		wmi_err("no more event handlers 0x%x",
2191 			 evt_id);
2192 		return QDF_STATUS_E_FAILURE;
2193 	}
2194 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG,
2195 		  "Registered event handler for event 0x%8x", evt_id);
2196 	idx = soc->max_event_idx;
2197 	wmi_handle->event_handler[idx] = handler_func;
2198 	wmi_handle->event_id[idx] = evt_id;
2199 
2200 	qdf_spin_lock_bh(&soc->ctx_lock);
2201 	wmi_handle->ctx[idx].exec_ctx = rx_ctx;
2202 	wmi_handle->ctx[idx].buff_type = rx_buf_type;
2203 	qdf_spin_unlock_bh(&soc->ctx_lock);
2204 	soc->max_event_idx++;
2205 
2206 	return QDF_STATUS_SUCCESS;
2207 }
2208 
2209 QDF_STATUS
2210 wmi_unified_register_event(wmi_unified_t wmi_handle,
2211 			   uint32_t event_id,
2212 			   wmi_unified_event_handler handler_func)
2213 {
2214 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2215 						   handler_func,
2216 						   WMI_RX_UMAC_CTX,
2217 						   WMI_RX_PROCESSED_BUFF);
2218 }
2219 
2220 QDF_STATUS
2221 wmi_unified_register_event_handler(wmi_unified_t wmi_handle,
2222 				   wmi_conv_event_id event_id,
2223 				   wmi_unified_event_handler handler_func,
2224 				   uint8_t rx_ctx)
2225 {
2226 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2227 						   handler_func, rx_ctx,
2228 						   WMI_RX_PROCESSED_BUFF);
2229 }
2230 
2231 qdf_export_symbol(wmi_unified_register_event_handler);
2232 
2233 QDF_STATUS
2234 wmi_unified_register_raw_event_handler(wmi_unified_t wmi_handle,
2235 				       wmi_conv_event_id event_id,
2236 				       wmi_unified_event_handler handler_func,
2237 				       enum wmi_rx_exec_ctx rx_ctx)
2238 {
2239 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2240 						   handler_func, rx_ctx,
2241 						   WMI_RX_RAW_BUFF);
2242 }
2243 
2244 qdf_export_symbol(wmi_unified_register_raw_event_handler);
2245 
2246 QDF_STATUS wmi_unified_unregister_event(wmi_unified_t wmi_handle,
2247 					uint32_t event_id)
2248 {
2249 	uint32_t idx = 0;
2250 	uint32_t evt_id;
2251 	struct wmi_soc *soc;
2252 
2253 	if (!wmi_handle) {
2254 		wmi_err("WMI handle is NULL");
2255 		return QDF_STATUS_E_FAILURE;
2256 	}
2257 
2258 	soc = wmi_handle->soc;
2259 	if (event_id >= wmi_events_max ||
2260 		wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2261 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
2262 			  "%s: Event id %d is unavailable",
2263 					__func__, event_id);
2264 		return QDF_STATUS_E_FAILURE;
2265 	}
2266 	evt_id = wmi_handle->wmi_events[event_id];
2267 
2268 	idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id);
2269 	if (idx == -1) {
2270 		wmi_warn("event handler is not registered: evt id 0x%x",
2271 			 evt_id);
2272 		return QDF_STATUS_E_FAILURE;
2273 	}
2274 	wmi_handle->event_handler[idx] = NULL;
2275 	wmi_handle->event_id[idx] = 0;
2276 	--soc->max_event_idx;
2277 	wmi_handle->event_handler[idx] =
2278 		wmi_handle->event_handler[soc->max_event_idx];
2279 	wmi_handle->event_id[idx] =
2280 		wmi_handle->event_id[soc->max_event_idx];
2281 
2282 	qdf_spin_lock_bh(&soc->ctx_lock);
2283 
2284 	wmi_handle->ctx[idx].exec_ctx =
2285 		wmi_handle->ctx[soc->max_event_idx].exec_ctx;
2286 	wmi_handle->ctx[idx].buff_type =
2287 		wmi_handle->ctx[soc->max_event_idx].buff_type;
2288 
2289 	qdf_spin_unlock_bh(&soc->ctx_lock);
2290 
2291 	return QDF_STATUS_SUCCESS;
2292 }
2293 
2294 QDF_STATUS wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle,
2295 						wmi_conv_event_id event_id)
2296 {
2297 	uint32_t idx = 0;
2298 	uint32_t evt_id;
2299 	struct wmi_soc *soc;
2300 
2301 	if (!wmi_handle) {
2302 		wmi_err("WMI handle is NULL");
2303 		return QDF_STATUS_E_FAILURE;
2304 	}
2305 
2306 	soc = wmi_handle->soc;
2307 
2308 	if (event_id >= wmi_events_max) {
2309 		wmi_err("Event id %d is unavailable", event_id);
2310 		return QDF_STATUS_E_FAILURE;
2311 	}
2312 
2313 	if (wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2314 		wmi_debug("Event id %d is not supported", event_id);
2315 		return QDF_STATUS_E_NOSUPPORT;
2316 	}
2317 
2318 	evt_id = wmi_handle->wmi_events[event_id];
2319 
2320 	idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id);
2321 	if (idx == -1) {
2322 		wmi_err("event handler is not registered: evt id 0x%x",
2323 			 evt_id);
2324 		return QDF_STATUS_E_FAILURE;
2325 	}
2326 	wmi_handle->event_handler[idx] = NULL;
2327 	wmi_handle->event_id[idx] = 0;
2328 	--soc->max_event_idx;
2329 	wmi_handle->event_handler[idx] =
2330 		wmi_handle->event_handler[soc->max_event_idx];
2331 	wmi_handle->event_id[idx] =
2332 		wmi_handle->event_id[soc->max_event_idx];
2333 
2334 	qdf_spin_lock_bh(&soc->ctx_lock);
2335 
2336 	wmi_handle->ctx[idx].exec_ctx =
2337 		wmi_handle->ctx[soc->max_event_idx].exec_ctx;
2338 	wmi_handle->ctx[idx].buff_type =
2339 		wmi_handle->ctx[soc->max_event_idx].buff_type;
2340 
2341 	qdf_spin_unlock_bh(&soc->ctx_lock);
2342 
2343 	return QDF_STATUS_SUCCESS;
2344 }
2345 qdf_export_symbol(wmi_unified_unregister_event_handler);
2346 
2347 static void
2348 wmi_process_rx_diag_event_worker_thread_ctx(struct wmi_unified *wmi_handle,
2349 					    void *evt_buf)
2350 {
2351 	uint32_t num_diag_events_pending;
2352 
2353 	qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock);
2354 	if (RX_DIAG_WQ_MAX_SIZE > 0) {
2355 		num_diag_events_pending = qdf_nbuf_queue_len(
2356 						&wmi_handle->diag_event_queue);
2357 
2358 		if (num_diag_events_pending >= RX_DIAG_WQ_MAX_SIZE) {
2359 			qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
2360 			wmi_handle->wmi_rx_diag_events_dropped++;
2361 			wmi_debug_rl("Rx diag events dropped count: %d",
2362 				     wmi_handle->wmi_rx_diag_events_dropped);
2363 			qdf_nbuf_free(evt_buf);
2364 			return;
2365 		}
2366 	}
2367 
2368 	qdf_nbuf_queue_add(&wmi_handle->diag_event_queue, evt_buf);
2369 	qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
2370 	qdf_queue_work(0, wmi_handle->wmi_rx_diag_work_queue,
2371 		       &wmi_handle->rx_diag_event_work);
2372 }
2373 
2374 void wmi_process_fw_event_worker_thread_ctx(struct wmi_unified *wmi_handle,
2375 					    void *evt_buf)
2376 {
2377 
2378 	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
2379 	qdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf);
2380 	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
2381 	qdf_queue_work(0, wmi_handle->wmi_rx_work_queue,
2382 			&wmi_handle->rx_event_work);
2383 
2384 	return;
2385 }
2386 
2387 qdf_export_symbol(wmi_process_fw_event_worker_thread_ctx);
2388 
2389 uint32_t wmi_critical_events_in_flight(struct wmi_unified *wmi)
2390 {
2391 	return qdf_atomic_read(&wmi->critical_events_in_flight);
2392 }
2393 
2394 static bool
2395 wmi_is_event_critical(struct wmi_unified *wmi_handle, uint32_t event_id)
2396 {
2397 	if (wmi_handle->wmi_events[wmi_roam_synch_event_id] == event_id)
2398 		return true;
2399 
2400 	return false;
2401 }
2402 
2403 static QDF_STATUS wmi_discard_fw_event(struct scheduler_msg *msg)
2404 {
2405 	struct wmi_process_fw_event_params *event_param;
2406 
2407 	if (!msg->bodyptr)
2408 		return QDF_STATUS_E_INVAL;
2409 
2410 	event_param = (struct wmi_process_fw_event_params *)msg->bodyptr;
2411 	qdf_nbuf_free(event_param->evt_buf);
2412 	qdf_mem_free(msg->bodyptr);
2413 	msg->bodyptr = NULL;
2414 	msg->bodyval = 0;
2415 	msg->type = 0;
2416 
2417 	return QDF_STATUS_SUCCESS;
2418 }
2419 
2420 static QDF_STATUS wmi_process_fw_event_handler(struct scheduler_msg *msg)
2421 {
2422 	struct wmi_process_fw_event_params *params =
2423 		(struct wmi_process_fw_event_params *)msg->bodyptr;
2424 	struct wmi_unified *wmi_handle;
2425 	uint32_t event_id;
2426 
2427 	wmi_handle = (struct wmi_unified *)params->wmi_handle;
2428 	event_id = WMI_GET_FIELD(qdf_nbuf_data(params->evt_buf),
2429 				 WMI_CMD_HDR, COMMANDID);
2430 	wmi_process_fw_event(wmi_handle, params->evt_buf);
2431 
2432 	if (wmi_is_event_critical(wmi_handle, event_id))
2433 		qdf_atomic_dec(&wmi_handle->critical_events_in_flight);
2434 
2435 	qdf_mem_free(msg->bodyptr);
2436 
2437 	return QDF_STATUS_SUCCESS;
2438 }
2439 
2440 /**
2441  * wmi_process_fw_event_sched_thread_ctx() - common event handler to serialize
2442  *                                  event processing through scheduler thread
2443  * @ctx: wmi context
2444  * @ev: event buffer
2445  * @rx_ctx: rx execution context
2446  *
2447  * Return: 0 on success, errno on failure
2448  */
2449 static QDF_STATUS
2450 wmi_process_fw_event_sched_thread_ctx(struct wmi_unified *wmi,
2451 				      void *ev)
2452 {
2453 	struct wmi_process_fw_event_params *params_buf;
2454 	struct scheduler_msg msg = { 0 };
2455 	uint32_t event_id;
2456 
2457 	params_buf = qdf_mem_malloc(sizeof(struct wmi_process_fw_event_params));
2458 	if (!params_buf) {
2459 		wmi_err("malloc failed");
2460 		qdf_nbuf_free(ev);
2461 		return QDF_STATUS_E_NOMEM;
2462 	}
2463 
2464 	params_buf->wmi_handle = wmi;
2465 	params_buf->evt_buf = ev;
2466 
2467 	event_id = WMI_GET_FIELD(qdf_nbuf_data(params_buf->evt_buf),
2468 				 WMI_CMD_HDR, COMMANDID);
2469 	if (wmi_is_event_critical(wmi, event_id))
2470 		qdf_atomic_inc(&wmi->critical_events_in_flight);
2471 
2472 	msg.bodyptr = params_buf;
2473 	msg.bodyval = 0;
2474 	msg.callback = wmi_process_fw_event_handler;
2475 	msg.flush_callback = wmi_discard_fw_event;
2476 
2477 	if (QDF_STATUS_SUCCESS !=
2478 		scheduler_post_message(QDF_MODULE_ID_TARGET_IF,
2479 				       QDF_MODULE_ID_TARGET_IF,
2480 				       QDF_MODULE_ID_TARGET_IF, &msg)) {
2481 		qdf_nbuf_free(ev);
2482 		qdf_mem_free(params_buf);
2483 		return QDF_STATUS_E_FAULT;
2484 	}
2485 
2486 	return QDF_STATUS_SUCCESS;
2487 }
2488 
2489 /**
2490  * wmi_get_pdev_ep: Get wmi handle based on endpoint
2491  * @soc: handle to wmi soc
2492  * @ep: endpoint id
2493  *
2494  * Return: none
2495  */
2496 static struct wmi_unified *wmi_get_pdev_ep(struct wmi_soc *soc,
2497 						HTC_ENDPOINT_ID ep)
2498 {
2499 	uint32_t i;
2500 
2501 	for (i = 0; i < WMI_MAX_RADIOS; i++)
2502 		if (soc->wmi_endpoint_id[i] == ep)
2503 			break;
2504 
2505 	if (i == WMI_MAX_RADIOS)
2506 		return NULL;
2507 
2508 	return soc->wmi_pdev[i];
2509 }
2510 
2511 /**
2512  * wmi_mtrace_rx() - Wrappper function for qdf_mtrace api
2513  * @message_id: 32-Bit Wmi message ID
2514  * @vdev_id: Vdev ID
2515  * @data: Actual message contents
2516  *
2517  * This function converts the 32-bit WMI message ID in 15-bit message ID
2518  * format for qdf_mtrace as in qdf_mtrace message there are only 15
2519  * bits reserved for message ID.
2520  * out of these 15-bits, 8-bits (From LSB) specifies the WMI_GRP_ID
2521  * and remaining 7-bits specifies the actual WMI command. With this
2522  * notation there can be maximum 256 groups and each group can have
2523  * max 128 commands can be supported.
2524  *
2525  * Return: None
2526  */
2527 static void wmi_mtrace_rx(uint32_t message_id, uint16_t vdev_id, uint32_t data)
2528 {
2529 	uint16_t mtrace_message_id;
2530 
2531 	mtrace_message_id = QDF_WMI_MTRACE_CMD_ID(message_id) |
2532 		(QDF_WMI_MTRACE_GRP_ID(message_id) <<
2533 						QDF_WMI_MTRACE_CMD_NUM_BITS);
2534 	qdf_mtrace(QDF_MODULE_ID_WMI, QDF_MODULE_ID_WMA,
2535 		   mtrace_message_id, vdev_id, data);
2536 }
2537 
2538 /**
2539  * wmi_process_control_rx() - process fw events callbacks
2540  * @wmi_handle: handle to wmi_unified
2541  * @evt_buf: handle to wmi_buf_t
2542  *
2543  * Return: none
2544  */
2545 static void wmi_process_control_rx(struct wmi_unified *wmi_handle,
2546 				   wmi_buf_t evt_buf)
2547 {
2548 	struct wmi_soc *soc = wmi_handle->soc;
2549 	uint32_t id;
2550 	uint32_t idx;
2551 	enum wmi_rx_exec_ctx exec_ctx;
2552 
2553 	id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2554 	idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
2555 	if (qdf_unlikely(idx == A_ERROR)) {
2556 		wmi_debug("no handler registered for event id 0x%x", id);
2557 		qdf_nbuf_free(evt_buf);
2558 		return;
2559 	}
2560 	wmi_mtrace_rx(id, 0xFF, idx);
2561 	qdf_spin_lock_bh(&soc->ctx_lock);
2562 	exec_ctx = wmi_handle->ctx[idx].exec_ctx;
2563 	qdf_spin_unlock_bh(&soc->ctx_lock);
2564 
2565 #ifdef WMI_INTERFACE_EVENT_LOGGING
2566 	if (wmi_handle->log_info.wmi_logging_enable) {
2567 		uint8_t *data;
2568 		data = qdf_nbuf_data(evt_buf);
2569 
2570 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2571 		/* Exclude 4 bytes of TLV header */
2572 		if (wmi_handle->ops->is_diag_event(id)) {
2573 			WMI_DIAG_RX_EVENT_RECORD(wmi_handle, id,
2574 				((uint8_t *) data +
2575 				wmi_handle->soc->buf_offset_event));
2576 		} else if (wmi_handle->ops->is_management_record(id)) {
2577 			WMI_MGMT_RX_EVENT_RECORD(wmi_handle, id,
2578 				((uint8_t *) data +
2579 				wmi_handle->soc->buf_offset_event));
2580 		} else {
2581 			WMI_RX_EVENT_RECORD(wmi_handle, id, ((uint8_t *) data +
2582 				wmi_handle->soc->buf_offset_event));
2583 		}
2584 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2585 	}
2586 #endif
2587 
2588 	if (exec_ctx == WMI_RX_WORK_CTX) {
2589 		wmi_process_fw_event_worker_thread_ctx(wmi_handle, evt_buf);
2590 	} else if (exec_ctx == WMI_RX_TASKLET_CTX) {
2591 		wmi_process_fw_event(wmi_handle, evt_buf);
2592 	} else if (exec_ctx == WMI_RX_SERIALIZER_CTX) {
2593 		wmi_process_fw_event_sched_thread_ctx(wmi_handle, evt_buf);
2594 	} else if (exec_ctx == WMI_RX_DIAG_WORK_CTX) {
2595 		wmi_process_rx_diag_event_worker_thread_ctx(wmi_handle,
2596 							    evt_buf);
2597 	} else {
2598 		wmi_err("Invalid event context %d", exec_ctx);
2599 		qdf_nbuf_free(evt_buf);
2600 	}
2601 
2602 }
2603 
2604 /**
2605  * wmi_control_rx() - process fw events callbacks
2606  * @ctx: handle to wmi
2607  * @htc_packet: pointer to htc packet
2608  *
2609  * Return: none
2610  */
2611 static void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet)
2612 {
2613 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2614 	struct wmi_unified *wmi_handle;
2615 	wmi_buf_t evt_buf;
2616 
2617 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2618 
2619 	wmi_handle = wmi_get_pdev_ep(soc, htc_packet->Endpoint);
2620 	if (!wmi_handle) {
2621 		wmi_err("unable to get wmi_handle to Endpoint %d",
2622 			htc_packet->Endpoint);
2623 		qdf_nbuf_free(evt_buf);
2624 		return;
2625 	}
2626 
2627 	wmi_process_control_rx(wmi_handle, evt_buf);
2628 }
2629 
2630 #if defined(WLAN_FEATURE_WMI_DIAG_OVER_CE7) || \
2631 	defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
2632 /**
2633  * wmi_control_diag_rx() - process diag fw events callbacks
2634  * @ctx: handle to wmi
2635  * @htc_packet: pointer to htc packet
2636  *
2637  * Return: none
2638  */
2639 static void wmi_control_diag_rx(void *ctx, HTC_PACKET *htc_packet)
2640 {
2641 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2642 	struct wmi_unified *wmi_handle;
2643 	wmi_buf_t evt_buf;
2644 
2645 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2646 
2647 	wmi_handle = soc->wmi_pdev[0];
2648 
2649 	if (!wmi_handle) {
2650 		wmi_err("unable to get wmi_handle for diag event end point id:%d", htc_packet->Endpoint);
2651 		qdf_nbuf_free(evt_buf);
2652 		return;
2653 	}
2654 
2655 	wmi_process_control_rx(wmi_handle, evt_buf);
2656 }
2657 #endif
2658 
2659 #if defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
2660 /**
2661  * wmi_control_dbr_rx() - process dbr fw events callbacks
2662  * @ctx: handle to wmi
2663  * @htc_packet: pointer to htc packet
2664  *
2665  * Return: none
2666  */
2667 static void wmi_control_dbr_rx(void *ctx, HTC_PACKET *htc_packet)
2668 {
2669 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2670 	struct wmi_unified *wmi_handle;
2671 	wmi_buf_t evt_buf;
2672 
2673 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2674 	wmi_handle = soc->wmi_pdev[0];
2675 
2676 	if (!wmi_handle) {
2677 		wmi_err("unable to get wmi_handle for dbr event endpoint id:%d",
2678 			htc_packet->Endpoint);
2679 		qdf_nbuf_free(evt_buf);
2680 		return;
2681 	}
2682 
2683 	wmi_process_control_rx(wmi_handle, evt_buf);
2684 }
2685 #endif
2686 
2687 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI
2688 QDF_STATUS wmi_unified_cmd_send_over_qmi(struct wmi_unified *wmi_handle,
2689 					 wmi_buf_t buf, uint32_t buflen,
2690 					 uint32_t cmd_id)
2691 {
2692 	QDF_STATUS status;
2693 	int32_t ret;
2694 
2695 	if (!qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR))) {
2696 		wmi_err("Failed to send cmd %x, no memory", cmd_id);
2697 		return QDF_STATUS_E_NOMEM;
2698 	}
2699 
2700 	qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR));
2701 	WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
2702 	wmi_debug("Sending WMI_CMD_ID: 0x%x over qmi", cmd_id);
2703 	status = qdf_wmi_send_recv_qmi(qdf_nbuf_data(buf),
2704 				       buflen + sizeof(WMI_CMD_HDR),
2705 				       wmi_handle,
2706 				       wmi_process_qmi_fw_event);
2707 	if (QDF_IS_STATUS_ERROR(status)) {
2708 		qdf_nbuf_pull_head(buf, sizeof(WMI_CMD_HDR));
2709 		wmi_warn("WMI send on QMI failed. Retrying WMI on HTC");
2710 	} else {
2711 		ret = qdf_atomic_inc_return(&wmi_handle->num_stats_over_qmi);
2712 		wmi_debug("num stats over qmi: %d", ret);
2713 		wmi_buf_free(buf);
2714 	}
2715 
2716 	return status;
2717 }
2718 
2719 static int __wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len)
2720 {
2721 	struct wmi_unified *wmi_handle = wmi_cb_ctx;
2722 	wmi_buf_t evt_buf;
2723 	uint32_t evt_id;
2724 
2725 	if (!wmi_handle || !buf || !len) {
2726 		wmi_err_rl("%s is invalid", !wmi_handle ?
2727 				"wmi_buf" : !buf ? "buf" : "length");
2728 		return -EINVAL;
2729 	}
2730 
2731 	evt_buf = wmi_buf_alloc(wmi_handle, len);
2732 	if (!evt_buf)
2733 		return -ENOMEM;
2734 
2735 	qdf_mem_copy(qdf_nbuf_data(evt_buf), buf, len);
2736 	evt_id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2737 	wmi_debug("Received WMI_EVT_ID: 0x%x over qmi", evt_id);
2738 	wmi_process_control_rx(wmi_handle, evt_buf);
2739 
2740 	return 0;
2741 }
2742 
2743 int wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len)
2744 {
2745 	struct qdf_op_sync *op_sync;
2746 	int ret;
2747 
2748 	if (qdf_op_protect(&op_sync))
2749 		return -EINVAL;
2750 	ret = __wmi_process_qmi_fw_event(wmi_cb_ctx, buf, len);
2751 	qdf_op_unprotect(op_sync);
2752 
2753 	return ret;
2754 }
2755 #endif
2756 
2757 /**
2758  * wmi_process_fw_event() - process any fw event
2759  * @wmi_handle: wmi handle
2760  * @evt_buf: fw event buffer
2761  *
2762  * This function process fw event in caller context
2763  *
2764  * Return: none
2765  */
2766 void wmi_process_fw_event(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
2767 {
2768 	__wmi_control_rx(wmi_handle, evt_buf);
2769 }
2770 
2771 /**
2772  * __wmi_control_rx() - process serialize wmi event callback
2773  * @wmi_handle: wmi handle
2774  * @evt_buf: fw event buffer
2775  *
2776  * Return: none
2777  */
2778 void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
2779 {
2780 	uint32_t id;
2781 	uint8_t *data;
2782 	uint32_t len;
2783 	void *wmi_cmd_struct_ptr = NULL;
2784 #ifndef WMI_NON_TLV_SUPPORT
2785 	int tlv_ok_status = 0;
2786 #endif
2787 	uint32_t idx = 0;
2788 	struct wmi_raw_event_buffer ev_buf;
2789 	enum wmi_rx_buff_type ev_buff_type;
2790 
2791 	id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2792 
2793 	wmi_ext_dbg_msg_event_record(wmi_handle, qdf_nbuf_data(evt_buf),
2794 				     qdf_nbuf_len(evt_buf));
2795 
2796 	if (qdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL)
2797 		goto end;
2798 
2799 	data = qdf_nbuf_data(evt_buf);
2800 	len = qdf_nbuf_len(evt_buf);
2801 
2802 #ifndef WMI_NON_TLV_SUPPORT
2803 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2804 		/* Validate and pad(if necessary) the TLVs */
2805 		tlv_ok_status =
2806 			wmi_handle->ops->wmi_check_and_pad_event(wmi_handle->scn_handle,
2807 							data, len, id,
2808 							&wmi_cmd_struct_ptr);
2809 		if (tlv_ok_status != 0) {
2810 			QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR,
2811 				  "%s: Error: id=0x%x, wmitlv check status=%d",
2812 				  __func__, id, tlv_ok_status);
2813 			goto end;
2814 		}
2815 	}
2816 #endif
2817 
2818 	idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
2819 	if (idx == A_ERROR) {
2820 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR,
2821 		   "%s : event handler is not registered: event id 0x%x",
2822 			__func__, id);
2823 		goto end;
2824 	}
2825 #ifdef WMI_INTERFACE_EVENT_LOGGING
2826 	if (wmi_handle->log_info.wmi_logging_enable) {
2827 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2828 		/* Exclude 4 bytes of TLV header */
2829 		if (wmi_handle->ops->is_diag_event(id)) {
2830 			/*
2831 			 * skip diag event logging in WMI event buffer
2832 			 * as its already logged in WMI RX event buffer
2833 			 */
2834 		} else if (wmi_handle->ops->is_management_record(id)) {
2835 			/*
2836 			 * skip wmi mgmt event logging in WMI event buffer
2837 			 * as its already logged in WMI RX event buffer
2838 			 */
2839 		} else {
2840 			uint8_t *tmpbuf = (uint8_t *)data +
2841 					wmi_handle->soc->buf_offset_event;
2842 
2843 			WMI_EVENT_RECORD(wmi_handle, id, tmpbuf);
2844 			wmi_specific_evt_record(wmi_handle, id, tmpbuf);
2845 		}
2846 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2847 	}
2848 #endif
2849 	/* Call the WMI registered event handler */
2850 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2851 		ev_buff_type = wmi_handle->ctx[idx].buff_type;
2852 		if (ev_buff_type == WMI_RX_PROCESSED_BUFF) {
2853 			wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2854 				wmi_cmd_struct_ptr, len);
2855 		} else if (ev_buff_type == WMI_RX_RAW_BUFF) {
2856 			ev_buf.evt_raw_buf = data;
2857 			ev_buf.evt_processed_buf = wmi_cmd_struct_ptr;
2858 			wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2859 							(void *)&ev_buf, len);
2860 		}
2861 	}
2862 	else
2863 		wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2864 			data, len);
2865 
2866 end:
2867 	/* Free event buffer and allocated event tlv */
2868 #ifndef WMI_NON_TLV_SUPPORT
2869 	if (wmi_handle->target_type == WMI_TLV_TARGET)
2870 		wmi_handle->ops->wmi_free_allocated_event(id, &wmi_cmd_struct_ptr);
2871 #endif
2872 
2873 	qdf_nbuf_free(evt_buf);
2874 
2875 }
2876 
2877 #define WMI_WQ_WD_TIMEOUT (30 * 1000) /* 30s */
2878 
2879 static inline void wmi_workqueue_watchdog_warn(uint32_t msg_type_id)
2880 {
2881 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2882 		  "%s: WLAN_BUG_RCA: Message type %x has exceeded its alloted time of %ds",
2883 		  __func__, msg_type_id, WMI_WQ_WD_TIMEOUT / 1000);
2884 }
2885 
2886 #ifdef CONFIG_SLUB_DEBUG_ON
2887 static void wmi_workqueue_watchdog_bite(void *arg)
2888 {
2889 	struct wmi_wq_dbg_info *info = arg;
2890 
2891 	wmi_workqueue_watchdog_warn(info->wd_msg_type_id);
2892 	qdf_print_thread_trace(info->task);
2893 
2894 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2895 		  "%s: Going down for WMI WQ Watchdog Bite!", __func__);
2896 	QDF_BUG(0);
2897 }
2898 #else
2899 static inline void wmi_workqueue_watchdog_bite(void *arg)
2900 {
2901 	struct wmi_wq_dbg_info *info = arg;
2902 
2903 	wmi_workqueue_watchdog_warn(info->wd_msg_type_id);
2904 
2905 	qdf_print_thread_trace(info->task);
2906 }
2907 #endif
2908 
2909 /**
2910  * wmi_rx_event_work() - process rx event in rx work queue context
2911  * @arg: opaque pointer to wmi handle
2912  *
2913  * This function process any fw event to serialize it through rx worker thread.
2914  *
2915  * Return: none
2916  */
2917 static void wmi_rx_event_work(void *arg)
2918 {
2919 	wmi_buf_t buf;
2920 	struct wmi_unified *wmi = arg;
2921 	qdf_timer_t wd_timer;
2922 	struct wmi_wq_dbg_info info;
2923 
2924 	/* initialize WMI workqueue watchdog timer */
2925 	qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite,
2926 			&info, QDF_TIMER_TYPE_SW);
2927 	qdf_spin_lock_bh(&wmi->eventq_lock);
2928 	buf = qdf_nbuf_queue_remove(&wmi->event_queue);
2929 	qdf_spin_unlock_bh(&wmi->eventq_lock);
2930 	while (buf) {
2931 		qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT);
2932 		info.wd_msg_type_id =
2933 		   WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID);
2934 		info.wmi_wq = wmi->wmi_rx_work_queue;
2935 		info.task = qdf_get_current_task();
2936 		__wmi_control_rx(wmi, buf);
2937 		qdf_timer_stop(&wd_timer);
2938 		qdf_spin_lock_bh(&wmi->eventq_lock);
2939 		buf = qdf_nbuf_queue_remove(&wmi->event_queue);
2940 		qdf_spin_unlock_bh(&wmi->eventq_lock);
2941 	}
2942 	qdf_timer_free(&wd_timer);
2943 }
2944 
2945 /**
2946  * wmi_rx_diag_event_work() - process rx diag event in work queue context
2947  * @arg: opaque pointer to wmi handle
2948  *
2949  * This function process fw diag event to serialize it through rx worker thread.
2950  *
2951  * Return: none
2952  */
2953 static void wmi_rx_diag_event_work(void *arg)
2954 {
2955 	wmi_buf_t buf;
2956 	struct wmi_unified *wmi = arg;
2957 	qdf_timer_t wd_timer;
2958 	struct wmi_wq_dbg_info info;
2959 	uint32_t diag_event_process_count = 0;
2960 
2961 	if (!wmi) {
2962 		wmi_err("Invalid WMI handle");
2963 		return;
2964 	}
2965 
2966 	/* initialize WMI workqueue watchdog timer */
2967 	qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite,
2968 		       &info, QDF_TIMER_TYPE_SW);
2969 	qdf_spin_lock_bh(&wmi->diag_eventq_lock);
2970 	buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue);
2971 	qdf_spin_unlock_bh(&wmi->diag_eventq_lock);
2972 	while (buf) {
2973 		qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT);
2974 		info.wd_msg_type_id =
2975 		   WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID);
2976 		info.wmi_wq = NULL;
2977 		info.task = qdf_get_current_task();
2978 		__wmi_control_rx(wmi, buf);
2979 		qdf_timer_stop(&wd_timer);
2980 
2981 		if (diag_event_process_count++ >
2982 		    RX_DIAG_EVENT_WORK_PROCESS_MAX_COUNT) {
2983 			qdf_queue_work(0, wmi->wmi_rx_diag_work_queue,
2984 				       &wmi->rx_diag_event_work);
2985 			break;
2986 		}
2987 
2988 		qdf_spin_lock_bh(&wmi->diag_eventq_lock);
2989 		buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue);
2990 		qdf_spin_unlock_bh(&wmi->diag_eventq_lock);
2991 	}
2992 	qdf_timer_free(&wd_timer);
2993 }
2994 
2995 #ifdef FEATURE_RUNTIME_PM
2996 /**
2997  * wmi_runtime_pm_init() - initialize runtime pm wmi variables
2998  * @wmi_handle: wmi context
2999  */
3000 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
3001 {
3002 	qdf_atomic_init(&wmi_handle->runtime_pm_inprogress);
3003 }
3004 
3005 /**
3006  * wmi_set_runtime_pm_inprogress() - set runtime pm progress flag
3007  * @wmi_handle: wmi context
3008  * @val: runtime pm progress flag
3009  */
3010 void wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, A_BOOL val)
3011 {
3012 	qdf_atomic_set(&wmi_handle->runtime_pm_inprogress, val);
3013 }
3014 
3015 /**
3016  * wmi_get_runtime_pm_inprogress() - get runtime pm progress flag
3017  * @wmi_handle: wmi context
3018  */
3019 inline bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle)
3020 {
3021 	return qdf_atomic_read(&wmi_handle->runtime_pm_inprogress);
3022 }
3023 #else
3024 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
3025 {
3026 }
3027 #endif
3028 
3029 /**
3030  * wmi_unified_get_soc_handle: Get WMI SoC handle
3031  * @param wmi_handle: WMI context got from wmi_attach
3032  *
3033  * return: Pointer to Soc handle
3034  */
3035 void *wmi_unified_get_soc_handle(struct wmi_unified *wmi_handle)
3036 {
3037 	return wmi_handle->soc;
3038 }
3039 
3040 /**
3041  * wmi_interface_logging_init: Interface looging init
3042  * @param wmi_handle: Pointer to wmi handle object
3043  *
3044  * return: None
3045  */
3046 #ifdef WMI_INTERFACE_EVENT_LOGGING
3047 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle,
3048 					      uint32_t pdev_idx)
3049 {
3050 	if (QDF_STATUS_SUCCESS == wmi_log_init(wmi_handle)) {
3051 		qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
3052 		wmi_debugfs_init(wmi_handle, pdev_idx);
3053 	}
3054 }
3055 #else
3056 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle,
3057 					      uint32_t pdev_idx)
3058 {
3059 }
3060 #endif
3061 
3062 static QDF_STATUS wmi_initialize_worker_context(struct wmi_unified *wmi_handle)
3063 {
3064 	wmi_handle->wmi_rx_work_queue =
3065 		qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue");
3066 	if (!wmi_handle->wmi_rx_work_queue) {
3067 		wmi_err("failed to create wmi_rx_event_work_queue");
3068 		return QDF_STATUS_E_RESOURCES;
3069 	}
3070 
3071 	qdf_spinlock_create(&wmi_handle->eventq_lock);
3072 	qdf_nbuf_queue_init(&wmi_handle->event_queue);
3073 	qdf_create_work(0, &wmi_handle->rx_event_work,
3074 			wmi_rx_event_work, wmi_handle);
3075 
3076 	wmi_handle->wmi_rx_diag_work_queue =
3077 		qdf_alloc_unbound_workqueue("wmi_rx_diag_event_work_queue");
3078 	if (!wmi_handle->wmi_rx_diag_work_queue) {
3079 		wmi_err("failed to create wmi_rx_diag_event_work_queue");
3080 		return QDF_STATUS_E_RESOURCES;
3081 	}
3082 	qdf_spinlock_create(&wmi_handle->diag_eventq_lock);
3083 	qdf_nbuf_queue_init(&wmi_handle->diag_event_queue);
3084 	qdf_create_work(0, &wmi_handle->rx_diag_event_work,
3085 			wmi_rx_diag_event_work, wmi_handle);
3086 	wmi_handle->wmi_rx_diag_events_dropped = 0;
3087 
3088 	return QDF_STATUS_SUCCESS;
3089 }
3090 
3091 /**
3092  * wmi_unified_get_pdev_handle: Get WMI SoC handle
3093  * @param wmi_soc: Pointer to wmi soc object
3094  * @param pdev_idx: pdev index
3095  *
3096  * return: Pointer to wmi handle or NULL on failure
3097  */
3098 void *wmi_unified_get_pdev_handle(struct wmi_soc *soc, uint32_t pdev_idx)
3099 {
3100 	struct wmi_unified *wmi_handle;
3101 	QDF_STATUS status;
3102 
3103 	if (pdev_idx >= WMI_MAX_RADIOS)
3104 		return NULL;
3105 
3106 	if (!soc->wmi_pdev[pdev_idx]) {
3107 		wmi_handle =
3108 			(struct wmi_unified *) qdf_mem_malloc(
3109 					sizeof(struct wmi_unified));
3110 		if (!wmi_handle)
3111 			return NULL;
3112 
3113 		status = wmi_initialize_worker_context(wmi_handle);
3114 		if (QDF_IS_STATUS_ERROR(status))
3115 			goto error;
3116 
3117 		wmi_handle->scn_handle = soc->scn_handle;
3118 		wmi_handle->event_id = soc->event_id;
3119 		wmi_handle->event_handler = soc->event_handler;
3120 		wmi_handle->ctx = soc->ctx;
3121 		wmi_handle->ops = soc->ops;
3122 		wmi_handle->wmi_events = soc->wmi_events;
3123 		wmi_handle->services = soc->services;
3124 		wmi_handle->soc = soc;
3125 		wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map;
3126 		wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map;
3127 		wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map;
3128 		wmi_handle->evt_phy_id_map = soc->evt_phy_id_map;
3129 		wmi_interface_logging_init(wmi_handle, pdev_idx);
3130 		qdf_atomic_init(&wmi_handle->pending_cmds);
3131 		qdf_atomic_init(&wmi_handle->is_target_suspended);
3132 		wmi_handle->target_type = soc->target_type;
3133 		wmi_handle->wmi_max_cmds = soc->wmi_max_cmds;
3134 
3135 		wmi_interface_sequence_init(wmi_handle);
3136 		if (wmi_ext_dbgfs_init(wmi_handle, pdev_idx) !=
3137 		    QDF_STATUS_SUCCESS)
3138 			wmi_err("Failed to initialize wmi extended debugfs");
3139 
3140 		soc->wmi_pdev[pdev_idx] = wmi_handle;
3141 	} else
3142 		wmi_handle = soc->wmi_pdev[pdev_idx];
3143 
3144 	wmi_handle->wmi_stopinprogress = 0;
3145 	wmi_handle->wmi_endpoint_id = soc->wmi_endpoint_id[pdev_idx];
3146 	wmi_handle->htc_handle = soc->htc_handle;
3147 	wmi_handle->max_msg_len = soc->max_msg_len[pdev_idx];
3148 	wmi_handle->tag_crash_inject = false;
3149 	wmi_interface_sequence_reset(wmi_handle);
3150 
3151 	return wmi_handle;
3152 
3153 error:
3154 	qdf_mem_free(wmi_handle);
3155 
3156 	return NULL;
3157 }
3158 qdf_export_symbol(wmi_unified_get_pdev_handle);
3159 
3160 static void (*wmi_attach_register[WMI_MAX_TARGET_TYPE])(wmi_unified_t);
3161 
3162 void wmi_unified_register_module(enum wmi_target_type target_type,
3163 			void (*wmi_attach)(wmi_unified_t wmi_handle))
3164 {
3165 	if (target_type < WMI_MAX_TARGET_TYPE)
3166 		wmi_attach_register[target_type] = wmi_attach;
3167 
3168 	return;
3169 }
3170 qdf_export_symbol(wmi_unified_register_module);
3171 
3172 /**
3173  * wmi_wbuff_register() - register wmi with wbuff
3174  * @wmi_handle: handle to wmi
3175  *
3176  * @Return: void
3177  */
3178 static void wmi_wbuff_register(struct wmi_unified *wmi_handle)
3179 {
3180 	struct wbuff_alloc_request wbuff_alloc[4];
3181 
3182 	wbuff_alloc[0].slot = WBUFF_POOL_0;
3183 	wbuff_alloc[0].size = WMI_WBUFF_POOL_0_SIZE;
3184 	wbuff_alloc[1].slot = WBUFF_POOL_1;
3185 	wbuff_alloc[1].size = WMI_WBUFF_POOL_1_SIZE;
3186 	wbuff_alloc[2].slot = WBUFF_POOL_2;
3187 	wbuff_alloc[2].size = WMI_WBUFF_POOL_2_SIZE;
3188 	wbuff_alloc[3].slot = WBUFF_POOL_3;
3189 	wbuff_alloc[3].size = WMI_WBUFF_POOL_3_SIZE;
3190 
3191 	wmi_handle->wbuff_handle = wbuff_module_register(wbuff_alloc, 4,
3192 							 WMI_MIN_HEAD_ROOM, 4);
3193 }
3194 
3195 /**
3196  * wmi_wbuff_deregister() - deregister wmi with wbuff
3197  * @wmi_handle: handle to wmi
3198  *
3199  * @Return: void
3200  */
3201 static inline void wmi_wbuff_deregister(struct wmi_unified *wmi_handle)
3202 {
3203 	wbuff_module_deregister(wmi_handle->wbuff_handle);
3204 	wmi_handle->wbuff_handle = NULL;
3205 }
3206 
3207 /**
3208  * wmi_unified_attach() -  attach for unified WMI
3209  * @scn_handle: handle to SCN
3210  * @osdev: OS device context
3211  * @target_type: TLV or not-TLV based target
3212  * @use_cookie: cookie based allocation enabled/disabled
3213  * @ops: umac rx callbacks
3214  * @psoc: objmgr psoc
3215  *
3216  * @Return: wmi handle.
3217  */
3218 void *wmi_unified_attach(void *scn_handle,
3219 			 struct wmi_unified_attach_params *param)
3220 {
3221 	struct wmi_unified *wmi_handle;
3222 	struct wmi_soc *soc;
3223 	QDF_STATUS status;
3224 
3225 	soc = (struct wmi_soc *) qdf_mem_malloc(sizeof(struct wmi_soc));
3226 	if (!soc)
3227 		return NULL;
3228 
3229 	wmi_handle =
3230 		(struct wmi_unified *) qdf_mem_malloc(
3231 			sizeof(struct wmi_unified));
3232 	if (!wmi_handle) {
3233 		qdf_mem_free(soc);
3234 		return NULL;
3235 	}
3236 
3237 	status = wmi_initialize_worker_context(wmi_handle);
3238 	if (QDF_IS_STATUS_ERROR(status))
3239 		goto error;
3240 
3241 	wmi_handle->soc = soc;
3242 	wmi_handle->soc->soc_idx = param->soc_id;
3243 	wmi_handle->soc->is_async_ep = param->is_async_ep;
3244 	wmi_handle->event_id = soc->event_id;
3245 	wmi_handle->event_handler = soc->event_handler;
3246 	wmi_handle->ctx = soc->ctx;
3247 	wmi_handle->wmi_events = soc->wmi_events;
3248 	wmi_handle->services = soc->services;
3249 	wmi_handle->scn_handle = scn_handle;
3250 	wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map;
3251 	wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map;
3252 	wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map;
3253 	wmi_handle->evt_phy_id_map = soc->evt_phy_id_map;
3254 	soc->scn_handle = scn_handle;
3255 	wmi_handle->target_type = param->target_type;
3256 	soc->target_type = param->target_type;
3257 
3258 	if (param->target_type >= WMI_MAX_TARGET_TYPE)
3259 		goto error;
3260 
3261 	if (wmi_attach_register[param->target_type]) {
3262 		wmi_attach_register[param->target_type](wmi_handle);
3263 	} else {
3264 		wmi_err("wmi attach is not registered");
3265 		goto error;
3266 	}
3267 
3268 	qdf_atomic_init(&wmi_handle->pending_cmds);
3269 	qdf_atomic_init(&wmi_handle->is_target_suspended);
3270 	qdf_atomic_init(&wmi_handle->is_target_suspend_acked);
3271 	qdf_atomic_init(&wmi_handle->num_stats_over_qmi);
3272 	wmi_runtime_pm_init(wmi_handle);
3273 	wmi_interface_logging_init(wmi_handle, WMI_HOST_PDEV_ID_0);
3274 
3275 	wmi_interface_sequence_init(wmi_handle);
3276 	/* Assign target cookie capablity */
3277 	wmi_handle->use_cookie = param->use_cookie;
3278 	wmi_handle->osdev = param->osdev;
3279 	wmi_handle->wmi_stopinprogress = 0;
3280 	wmi_handle->wmi_max_cmds = param->max_commands;
3281 	soc->wmi_max_cmds = param->max_commands;
3282 	/* Increase the ref count once refcount infra is present */
3283 	soc->wmi_psoc = param->psoc;
3284 	qdf_spinlock_create(&soc->ctx_lock);
3285 	soc->ops = wmi_handle->ops;
3286 	soc->wmi_pdev[0] = wmi_handle;
3287 	if (wmi_ext_dbgfs_init(wmi_handle, 0) != QDF_STATUS_SUCCESS)
3288 		wmi_err("Failed to initialize wmi extended debugfs");
3289 
3290 	wmi_wbuff_register(wmi_handle);
3291 
3292 	wmi_hang_event_notifier_register(wmi_handle);
3293 
3294 	wmi_minidump_attach(wmi_handle);
3295 
3296 	return wmi_handle;
3297 
3298 error:
3299 	qdf_mem_free(soc);
3300 	qdf_mem_free(wmi_handle);
3301 
3302 	return NULL;
3303 }
3304 
3305 /**
3306  * wmi_unified_detach() -  detach for unified WMI
3307  *
3308  * @wmi_handle  : handle to wmi.
3309  *
3310  * @Return: none.
3311  */
3312 void wmi_unified_detach(struct wmi_unified *wmi_handle)
3313 {
3314 	wmi_buf_t buf;
3315 	struct wmi_soc *soc;
3316 	uint8_t i;
3317 
3318 	wmi_minidump_detach(wmi_handle);
3319 
3320 	wmi_hang_event_notifier_unregister();
3321 
3322 	wmi_wbuff_deregister(wmi_handle);
3323 
3324 	soc = wmi_handle->soc;
3325 	for (i = 0; i < WMI_MAX_RADIOS; i++) {
3326 		if (soc->wmi_pdev[i]) {
3327 			qdf_flush_workqueue(0,
3328 				soc->wmi_pdev[i]->wmi_rx_work_queue);
3329 			qdf_destroy_workqueue(0,
3330 				soc->wmi_pdev[i]->wmi_rx_work_queue);
3331 			wmi_debugfs_remove(soc->wmi_pdev[i]);
3332 			buf = qdf_nbuf_queue_remove(
3333 					&soc->wmi_pdev[i]->event_queue);
3334 			while (buf) {
3335 				qdf_nbuf_free(buf);
3336 				buf = qdf_nbuf_queue_remove(
3337 						&soc->wmi_pdev[i]->event_queue);
3338 			}
3339 
3340 			qdf_flush_work(&soc->wmi_pdev[i]->rx_diag_event_work);
3341 			buf = qdf_nbuf_queue_remove(
3342 					&soc->wmi_pdev[i]->diag_event_queue);
3343 			while (buf) {
3344 				qdf_nbuf_free(buf);
3345 				buf = qdf_nbuf_queue_remove(
3346 					&soc->wmi_pdev[i]->diag_event_queue);
3347 			}
3348 
3349 			wmi_log_buffer_free(soc->wmi_pdev[i]);
3350 
3351 			/* Free events logs list */
3352 			if (soc->wmi_pdev[i]->events_logs_list)
3353 				qdf_mem_free(
3354 					soc->wmi_pdev[i]->events_logs_list);
3355 
3356 			qdf_spinlock_destroy(&soc->wmi_pdev[i]->eventq_lock);
3357 			qdf_spinlock_destroy(
3358 					&soc->wmi_pdev[i]->diag_eventq_lock);
3359 
3360 			wmi_interface_sequence_deinit(soc->wmi_pdev[i]);
3361 			wmi_ext_dbgfs_deinit(soc->wmi_pdev[i]);
3362 
3363 			qdf_mem_free(soc->wmi_pdev[i]);
3364 		}
3365 	}
3366 	qdf_spinlock_destroy(&soc->ctx_lock);
3367 
3368 	if (soc->wmi_service_bitmap) {
3369 		qdf_mem_free(soc->wmi_service_bitmap);
3370 		soc->wmi_service_bitmap = NULL;
3371 	}
3372 
3373 	if (soc->wmi_ext_service_bitmap) {
3374 		qdf_mem_free(soc->wmi_ext_service_bitmap);
3375 		soc->wmi_ext_service_bitmap = NULL;
3376 	}
3377 
3378 	if (soc->wmi_ext2_service_bitmap) {
3379 		qdf_mem_free(soc->wmi_ext2_service_bitmap);
3380 		soc->wmi_ext2_service_bitmap = NULL;
3381 	}
3382 
3383 	/* Decrease the ref count once refcount infra is present */
3384 	soc->wmi_psoc = NULL;
3385 	qdf_mem_free(soc);
3386 }
3387 
3388 /**
3389  * wmi_unified_remove_work() - detach for WMI work
3390  * @wmi_handle: handle to WMI
3391  *
3392  * A function that does not fully detach WMI, but just remove work
3393  * queue items associated with it. This is used to make sure that
3394  * before any other processing code that may destroy related contexts
3395  * (HTC, etc), work queue processing on WMI has already been stopped.
3396  *
3397  * Return: None
3398  */
3399 void
3400 wmi_unified_remove_work(struct wmi_unified *wmi_handle)
3401 {
3402 	wmi_buf_t buf;
3403 
3404 	qdf_flush_workqueue(0, wmi_handle->wmi_rx_work_queue);
3405 	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
3406 	buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
3407 	while (buf) {
3408 		qdf_nbuf_free(buf);
3409 		buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
3410 	}
3411 	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
3412 
3413 	/* Remove diag events work */
3414 	qdf_flush_workqueue(0, wmi_handle->wmi_rx_diag_work_queue);
3415 	qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock);
3416 	buf = qdf_nbuf_queue_remove(&wmi_handle->diag_event_queue);
3417 	while (buf) {
3418 		qdf_nbuf_free(buf);
3419 		buf = qdf_nbuf_queue_remove(&wmi_handle->diag_event_queue);
3420 	}
3421 	qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
3422 }
3423 
3424 /**
3425  * wmi_htc_tx_complete() - Process htc tx completion
3426  *
3427  * @ctx: handle to wmi
3428  * @htc_packet: pointer to htc packet
3429  *
3430  * @Return: none.
3431  */
3432 static void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt)
3433 {
3434 	struct wmi_soc *soc = (struct wmi_soc *) ctx;
3435 	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
3436 	u_int8_t *buf_ptr;
3437 	u_int32_t len;
3438 	struct wmi_unified *wmi_handle;
3439 #ifdef WMI_INTERFACE_EVENT_LOGGING
3440 	struct wmi_debug_log_info *log_info;
3441 	uint32_t cmd_id;
3442 	uint8_t *offset_ptr;
3443 	qdf_dma_addr_t dma_addr;
3444 	uint64_t phy_addr;
3445 #endif
3446 
3447 	ASSERT(wmi_cmd_buf);
3448 	wmi_handle = wmi_get_pdev_ep(soc, htc_pkt->Endpoint);
3449 	if (!wmi_handle) {
3450 		wmi_err("Unable to get wmi handle");
3451 		QDF_ASSERT(0);
3452 		return;
3453 	}
3454 	buf_ptr = (u_int8_t *)wmi_buf_data(wmi_cmd_buf);
3455 #ifdef WMI_INTERFACE_EVENT_LOGGING
3456 	log_info = &wmi_handle->log_info;
3457 
3458 	if (wmi_handle && log_info->wmi_logging_enable) {
3459 		cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf),
3460 				WMI_CMD_HDR, COMMANDID);
3461 
3462 		dma_addr = QDF_NBUF_CB_PADDR(wmi_cmd_buf);
3463 		phy_addr = qdf_mem_virt_to_phys(qdf_nbuf_data(wmi_cmd_buf));
3464 
3465 		qdf_spin_lock_bh(&log_info->wmi_record_lock);
3466 		/* Record 16 bytes of WMI cmd tx complete data
3467 		 * - exclude TLV and WMI headers
3468 		 */
3469 		offset_ptr = buf_ptr + wmi_handle->soc->buf_offset_command;
3470 		if (wmi_handle->ops->is_management_record(cmd_id)) {
3471 			WMI_MGMT_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id,
3472 						       offset_ptr);
3473 		} else {
3474 			WMI_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id,
3475 						  offset_ptr, dma_addr,
3476 						  phy_addr);
3477 		}
3478 
3479 		qdf_spin_unlock_bh(&log_info->wmi_record_lock);
3480 	}
3481 #endif
3482 
3483 	wmi_interface_sequence_check(wmi_handle, wmi_cmd_buf);
3484 
3485 	len = qdf_nbuf_len(wmi_cmd_buf);
3486 	qdf_mem_zero(buf_ptr, len);
3487 	wmi_buf_free(wmi_cmd_buf);
3488 	qdf_mem_free(htc_pkt);
3489 	qdf_atomic_dec(&wmi_handle->pending_cmds);
3490 }
3491 
3492 #ifdef FEATURE_RUNTIME_PM
3493 /**
3494  * wmi_htc_log_pkt() - Print information of WMI command from HTC packet
3495  *
3496  * @ctx: handle of WMI context
3497  * @htc_pkt: handle of HTC packet
3498  *
3499  * @Return: none
3500  */
3501 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt)
3502 {
3503 	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
3504 	uint32_t cmd_id;
3505 
3506 	ASSERT(wmi_cmd_buf);
3507 	cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), WMI_CMD_HDR,
3508 			       COMMANDID);
3509 
3510 	wmi_debug("WMI command from HTC packet: %s, ID: %d",
3511 		 wmi_id_to_name(cmd_id), cmd_id);
3512 }
3513 #else
3514 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt)
3515 {
3516 }
3517 #endif
3518 
3519 /**
3520  * wmi_connect_pdev_htc_service() -  WMI API to get connect to HTC service
3521  *
3522  * @wmi_handle: handle to WMI.
3523  * @pdev_idx: Pdev index
3524  *
3525  * @Return: QDF_STATUS
3526  */
3527 static QDF_STATUS wmi_connect_pdev_htc_service(struct wmi_soc *soc,
3528 					       uint32_t pdev_idx)
3529 {
3530 	QDF_STATUS status;
3531 	struct htc_service_connect_resp response;
3532 	struct htc_service_connect_req connect;
3533 
3534 	OS_MEMZERO(&connect, sizeof(connect));
3535 	OS_MEMZERO(&response, sizeof(response));
3536 
3537 	/* meta data is unused for now */
3538 	connect.pMetaData = NULL;
3539 	connect.MetaDataLength = 0;
3540 	/* these fields are the same for all service endpoints */
3541 	connect.EpCallbacks.pContext = soc;
3542 	connect.EpCallbacks.EpTxCompleteMultiple =
3543 		NULL /* Control path completion ar6000_tx_complete */;
3544 	connect.EpCallbacks.EpRecv = wmi_control_rx /* Control path rx */;
3545 	connect.EpCallbacks.EpRecvRefill = NULL /* ar6000_rx_refill */;
3546 	connect.EpCallbacks.EpSendFull = NULL /* ar6000_tx_queue_full */;
3547 	connect.EpCallbacks.EpTxComplete =
3548 		wmi_htc_tx_complete /* ar6000_tx_queue_full */;
3549 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3550 
3551 	/* connect to control service */
3552 	connect.service_id = soc->svc_ids[pdev_idx];
3553 	status = htc_connect_service(soc->htc_handle, &connect, &response);
3554 
3555 	if (QDF_IS_STATUS_ERROR(status)) {
3556 		wmi_err("Failed to connect to WMI CONTROL service status:%d",
3557 			 status);
3558 		return status;
3559 	}
3560 
3561 	if (soc->is_async_ep)
3562 		htc_set_async_ep(soc->htc_handle, response.Endpoint, true);
3563 
3564 	soc->wmi_endpoint_id[pdev_idx] = response.Endpoint;
3565 	soc->max_msg_len[pdev_idx] = response.MaxMsgLength;
3566 
3567 	return QDF_STATUS_SUCCESS;
3568 }
3569 
3570 QDF_STATUS
3571 wmi_unified_connect_htc_service(struct wmi_unified *wmi_handle,
3572 				HTC_HANDLE htc_handle)
3573 {
3574 	uint32_t i;
3575 	uint8_t wmi_ep_count;
3576 
3577 	wmi_handle->soc->htc_handle = htc_handle;
3578 
3579 	wmi_ep_count = htc_get_wmi_endpoint_count(htc_handle);
3580 	if (wmi_ep_count > WMI_MAX_RADIOS)
3581 		return QDF_STATUS_E_FAULT;
3582 
3583 	for (i = 0; i < wmi_ep_count; i++)
3584 		wmi_connect_pdev_htc_service(wmi_handle->soc, i);
3585 
3586 	wmi_handle->htc_handle = htc_handle;
3587 	wmi_handle->wmi_endpoint_id = wmi_handle->soc->wmi_endpoint_id[0];
3588 	wmi_handle->max_msg_len = wmi_handle->soc->max_msg_len[0];
3589 
3590 	return QDF_STATUS_SUCCESS;
3591 }
3592 
3593 #if defined(WLAN_FEATURE_WMI_DIAG_OVER_CE7) || \
3594 	defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
3595 QDF_STATUS wmi_diag_connect_pdev_htc_service(struct wmi_unified *wmi_handle,
3596 					     HTC_HANDLE htc_handle)
3597 {
3598 	QDF_STATUS status;
3599 	struct htc_service_connect_resp response = {0};
3600 	struct htc_service_connect_req connect = {0};
3601 
3602 	/* meta data is unused for now */
3603 	connect.pMetaData = NULL;
3604 	connect.MetaDataLength = 0;
3605 	connect.EpCallbacks.pContext = wmi_handle->soc;
3606 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3607 	connect.EpCallbacks.EpRecv = wmi_control_diag_rx /* wmi diag rx */;
3608 	connect.EpCallbacks.EpRecvRefill = NULL;
3609 	connect.EpCallbacks.EpSendFull = NULL;
3610 	connect.EpCallbacks.EpTxComplete = NULL;
3611 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3612 
3613 	/* connect to wmi diag service */
3614 	connect.service_id = WMI_CONTROL_DIAG_SVC;
3615 	status = htc_connect_service(htc_handle, &connect, &response);
3616 
3617 	if (QDF_IS_STATUS_ERROR(status)) {
3618 		wmi_err("Failed to connect to WMI DIAG service status:%d",
3619 			status);
3620 		return status;
3621 	}
3622 
3623 	if (wmi_handle->soc->is_async_ep)
3624 		htc_set_async_ep(htc_handle, response.Endpoint, true);
3625 
3626 	wmi_handle->soc->wmi_diag_endpoint_id = response.Endpoint;
3627 
3628 	return QDF_STATUS_SUCCESS;
3629 }
3630 #endif
3631 
3632 #if defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
3633 QDF_STATUS wmi_dbr_connect_pdev_htc_service(struct wmi_unified *wmi_handle,
3634 					    HTC_HANDLE htc_handle)
3635 {
3636 	QDF_STATUS status;
3637 	struct htc_service_connect_resp response = {0};
3638 	struct htc_service_connect_req connect = {0};
3639 
3640 	/* meta data is unused for now */
3641 	connect.pMetaData = NULL;
3642 	connect.MetaDataLength = 0;
3643 	connect.EpCallbacks.pContext = wmi_handle->soc;
3644 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3645 	connect.EpCallbacks.EpRecv = wmi_control_dbr_rx /* wmi dbr rx */;
3646 	connect.EpCallbacks.EpRecvRefill = NULL;
3647 	connect.EpCallbacks.EpSendFull = NULL;
3648 	connect.EpCallbacks.EpTxComplete = NULL;
3649 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3650 
3651 	/* connect to wmi dbr service */
3652 	connect.service_id = WMI_CONTROL_DBR_SVC;
3653 	status = htc_connect_service(htc_handle, &connect, &response);
3654 
3655 	if (QDF_IS_STATUS_ERROR(status)) {
3656 		wmi_err("Failed to connect to WMI DBR service status:%d",
3657 			status);
3658 		return status;
3659 	}
3660 
3661 	if (wmi_handle->soc->is_async_ep)
3662 		htc_set_async_ep(htc_handle, response.Endpoint, true);
3663 
3664 	wmi_handle->soc->wmi_dbr_endpoint_id = response.Endpoint;
3665 
3666 	return QDF_STATUS_SUCCESS;
3667 }
3668 #endif
3669 
3670 /**
3671  * wmi_get_host_credits() -  WMI API to get updated host_credits
3672  *
3673  * @wmi_handle: handle to WMI.
3674  *
3675  * @Return: updated host_credits.
3676  */
3677 int wmi_get_host_credits(wmi_unified_t wmi_handle)
3678 {
3679 	int host_credits = 0;
3680 
3681 	htc_get_control_endpoint_tx_host_credits(wmi_handle->htc_handle,
3682 						 &host_credits);
3683 	return host_credits;
3684 }
3685 
3686 /**
3687  * wmi_get_pending_cmds() - WMI API to get WMI Pending Commands in the HTC
3688  *                          queue
3689  *
3690  * @wmi_handle: handle to WMI.
3691  *
3692  * @Return: Pending Commands in the HTC queue.
3693  */
3694 int wmi_get_pending_cmds(wmi_unified_t wmi_handle)
3695 {
3696 	return qdf_atomic_read(&wmi_handle->pending_cmds);
3697 }
3698 
3699 /**
3700  * wmi_set_target_suspend() -  WMI API to set target suspend state
3701  *
3702  * @wmi_handle: handle to WMI.
3703  * @val: suspend state boolean.
3704  *
3705  * @Return: none.
3706  */
3707 void wmi_set_target_suspend(wmi_unified_t wmi_handle, A_BOOL val)
3708 {
3709 	qdf_atomic_set(&wmi_handle->is_target_suspended, val);
3710 }
3711 
3712 /**
3713  * wmi_set_target_suspend_acked() -  WMI API to set target suspend acked flag
3714  *
3715  * @wmi_handle: handle to WMI.
3716  * @val: target suspend command acked flag.
3717  *
3718  * @Return: none.
3719  */
3720 void wmi_set_target_suspend_acked(wmi_unified_t wmi_handle, A_BOOL val)
3721 {
3722 	qdf_atomic_set(&wmi_handle->is_target_suspend_acked, val);
3723 	qdf_atomic_set(&wmi_handle->num_stats_over_qmi, 0);
3724 }
3725 
3726 /**
3727  * wmi_is_target_suspended() - WMI API to check target suspend state
3728  * @wmi_handle: handle to WMI.
3729  *
3730  * WMI API to check target suspend state
3731  *
3732  * Return: true if target is suspended, else false.
3733  */
3734 bool wmi_is_target_suspended(struct wmi_unified *wmi_handle)
3735 {
3736 	return qdf_atomic_read(&wmi_handle->is_target_suspended);
3737 }
3738 qdf_export_symbol(wmi_is_target_suspended);
3739 
3740 /**
3741  * wmi_is_target_suspend_acked() - WMI API to check target suspend command is
3742  *                                 acked or not
3743  * @wmi_handle: handle to WMI.
3744  *
3745  * WMI API to check whether the target suspend command is acked or not
3746  *
3747  * Return: true if target suspend command is acked, else false.
3748  */
3749 bool wmi_is_target_suspend_acked(struct wmi_unified *wmi_handle)
3750 {
3751 	return qdf_atomic_read(&wmi_handle->is_target_suspend_acked);
3752 }
3753 qdf_export_symbol(wmi_is_target_suspend_acked);
3754 
3755 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI
3756 void wmi_set_qmi_stats(wmi_unified_t wmi_handle, bool val)
3757 {
3758 	wmi_handle->is_qmi_stats_enabled = val;
3759 }
3760 
3761 bool wmi_is_qmi_stats_enabled(struct wmi_unified *wmi_handle)
3762 {
3763 	return wmi_handle->is_qmi_stats_enabled;
3764 }
3765 #endif
3766 
3767 /**
3768  * WMI API to set crash injection state
3769  * @param wmi_handle:	handle to WMI.
3770  * @param val:		crash injection state boolean.
3771  */
3772 void wmi_tag_crash_inject(wmi_unified_t wmi_handle, A_BOOL flag)
3773 {
3774 	wmi_handle->tag_crash_inject = flag;
3775 }
3776 
3777 /**
3778  * WMI API to set bus suspend state
3779  * @param wmi_handle:	handle to WMI.
3780  * @param val:		suspend state boolean.
3781  */
3782 void wmi_set_is_wow_bus_suspended(wmi_unified_t wmi_handle, A_BOOL val)
3783 {
3784 	qdf_atomic_set(&wmi_handle->is_wow_bus_suspended, val);
3785 }
3786 
3787 void wmi_set_tgt_assert(wmi_unified_t wmi_handle, bool val)
3788 {
3789 	wmi_handle->tgt_force_assert_enable = val;
3790 }
3791 
3792 /**
3793  * wmi_stop() - generic function to block unified WMI command
3794  * @wmi_handle: handle to WMI.
3795  *
3796  * @Return: success always.
3797  */
3798 int
3799 wmi_stop(wmi_unified_t wmi_handle)
3800 {
3801 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
3802 		  "WMI Stop");
3803 	wmi_handle->wmi_stopinprogress = 1;
3804 	return 0;
3805 }
3806 
3807 /**
3808  * wmi_start() - generic function to allow unified WMI command
3809  * @wmi_handle: handle to WMI.
3810  *
3811  * @Return: success always.
3812  */
3813 int
3814 wmi_start(wmi_unified_t wmi_handle)
3815 {
3816 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
3817 		  "WMI Start");
3818 	wmi_handle->wmi_stopinprogress = 0;
3819 	return 0;
3820 }
3821 
3822 /**
3823  * wmi_is_blocked() - generic function to check if WMI is blocked
3824  * @wmi_handle: handle to WMI.
3825  *
3826  * @Return: true, if blocked, false if not blocked
3827  */
3828 bool
3829 wmi_is_blocked(wmi_unified_t wmi_handle)
3830 {
3831 	return (!(!wmi_handle->wmi_stopinprogress));
3832 }
3833 
3834 /**
3835  * API to flush all the previous packets  associated with the wmi endpoint
3836  *
3837  * @param wmi_handle      : handle to WMI.
3838  */
3839 void
3840 wmi_flush_endpoint(wmi_unified_t wmi_handle)
3841 {
3842 	htc_flush_endpoint(wmi_handle->htc_handle,
3843 		wmi_handle->wmi_endpoint_id, 0);
3844 }
3845 qdf_export_symbol(wmi_flush_endpoint);
3846 
3847 /**
3848  * wmi_pdev_id_conversion_enable() - API to enable pdev_id/phy_id conversion
3849  *                     in WMI. By default pdev_id conversion is not done in WMI.
3850  *                     This API can be used enable conversion in WMI.
3851  * @param wmi_handle   : handle to WMI
3852  * @param pdev_map     : pointer to pdev_map
3853  * @size               : size of pdev_id_map
3854  * Return none
3855  */
3856 void wmi_pdev_id_conversion_enable(wmi_unified_t wmi_handle,
3857 				   uint32_t *pdev_id_map,
3858 				   uint8_t size)
3859 {
3860 	if (wmi_handle->target_type == WMI_TLV_TARGET)
3861 		wmi_handle->ops->wmi_pdev_id_conversion_enable(wmi_handle,
3862 							       pdev_id_map,
3863 							       size);
3864 }
3865 
3866 int __wmi_validate_handle(wmi_unified_t wmi_handle, const char *func)
3867 {
3868         if (!wmi_handle) {
3869                 wmi_err("Invalid WMI handle (via %s)", func);
3870                 return -EINVAL;
3871         }
3872 
3873         return 0;
3874 }
3875