xref: /wlan-dirver/qca-wifi-host-cmn/wmi/src/wmi_unified.c (revision 7be08f15775e0e24440f836dfb6edcd5fe601d80)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * Host WMI unified implementation
22  */
23 #include "htc_api.h"
24 #include "htc_api.h"
25 #include "wmi_unified_priv.h"
26 #include "wmi_unified_api.h"
27 #include "qdf_module.h"
28 #include "qdf_platform.h"
29 #ifdef WMI_EXT_DBG
30 #include "qdf_list.h"
31 #include "qdf_atomic.h"
32 #endif
33 
34 #ifndef WMI_NON_TLV_SUPPORT
35 #include "wmi_tlv_helper.h"
36 #endif
37 
38 #include <linux/debugfs.h>
39 #include <target_if.h>
40 #include <qdf_debugfs.h>
41 #include "wmi_filtered_logging.h"
42 #include <wmi_hang_event.h>
43 
44 /* This check for CONFIG_WIN temporary added due to redeclaration compilation
45 error in MCL. Error is caused due to inclusion of wmi.h in wmi_unified_api.h
46 which gets included here through ol_if_athvar.h. Eventually it is expected that
47 wmi.h will be removed from wmi_unified_api.h after cleanup, which will need
48 WMI_CMD_HDR to be defined here. */
49 /* Copied from wmi.h */
50 #undef MS
51 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
52 #undef SM
53 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
54 #undef WO
55 #define WO(_f)      ((_f##_OFFSET) >> 2)
56 
57 #undef GET_FIELD
58 #define GET_FIELD(_addr, _f) MS(*((uint32_t *)(_addr) + WO(_f)), _f)
59 #undef SET_FIELD
60 #define SET_FIELD(_addr, _f, _val)  \
61 	    (*((uint32_t *)(_addr) + WO(_f)) = \
62 		(*((uint32_t *)(_addr) + WO(_f)) & ~_f##_MASK) | SM(_val, _f))
63 
64 #define WMI_GET_FIELD(_msg_buf, _msg_type, _f) \
65 	    GET_FIELD(_msg_buf, _msg_type ## _ ## _f)
66 
67 #define WMI_SET_FIELD(_msg_buf, _msg_type, _f, _val) \
68 	    SET_FIELD(_msg_buf, _msg_type ## _ ## _f, _val)
69 
70 #define WMI_EP_APASS           0x0
71 #define WMI_EP_LPASS           0x1
72 #define WMI_EP_SENSOR          0x2
73 
74 #define WMI_INFOS_DBG_FILE_PERM (QDF_FILE_USR_READ | \
75 				 QDF_FILE_USR_WRITE | \
76 				 QDF_FILE_GRP_READ | \
77 				 QDF_FILE_OTH_READ)
78 
79 /*
80  *  * Control Path
81  *   */
82 typedef PREPACK struct {
83 	uint32_t	commandId:24,
84 			reserved:2, /* used for WMI endpoint ID */
85 			plt_priv:6; /* platform private */
86 } POSTPACK WMI_CMD_HDR;        /* used for commands and events */
87 
88 #define WMI_CMD_HDR_COMMANDID_LSB           0
89 #define WMI_CMD_HDR_COMMANDID_MASK          0x00ffffff
90 #define WMI_CMD_HDR_COMMANDID_OFFSET        0x00000000
91 #define WMI_CMD_HDR_WMI_ENDPOINTID_MASK        0x03000000
92 #define WMI_CMD_HDR_WMI_ENDPOINTID_OFFSET      24
93 #define WMI_CMD_HDR_PLT_PRIV_LSB               24
94 #define WMI_CMD_HDR_PLT_PRIV_MASK              0xff000000
95 #define WMI_CMD_HDR_PLT_PRIV_OFFSET            0x00000000
96 /* end of copy wmi.h */
97 
98 #define WMI_MIN_HEAD_ROOM 64
99 
100 /* WBUFF pool sizes for WMI */
101 /* Allocation of size 256 bytes */
102 #define WMI_WBUFF_POOL_0_SIZE 128
103 /* Allocation of size 512 bytes */
104 #define WMI_WBUFF_POOL_1_SIZE 16
105 /* Allocation of size 1024 bytes */
106 #define WMI_WBUFF_POOL_2_SIZE 8
107 /* Allocation of size 2048 bytes */
108 #define WMI_WBUFF_POOL_3_SIZE 8
109 
110 #define RX_DIAG_EVENT_WORK_PROCESS_MAX_COUNT 500
111 
112 #ifdef WMI_INTERFACE_EVENT_LOGGING
113 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0))
114 /* TODO Cleanup this backported function */
115 static int wmi_bp_seq_printf(qdf_debugfs_file_t m, const char *f, ...)
116 {
117 	va_list args;
118 
119 	va_start(args, f);
120 	seq_vprintf(m, f, args);
121 	va_end(args);
122 
123 	return 0;
124 }
125 #else
126 #define wmi_bp_seq_printf(m, fmt, ...) seq_printf((m), fmt, ##__VA_ARGS__)
127 #endif
128 
129 #ifndef MAX_WMI_INSTANCES
130 #define CUSTOM_MGMT_CMD_DATA_SIZE 4
131 #endif
132 
133 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
134 /* WMI commands */
135 uint32_t g_wmi_command_buf_idx = 0;
136 struct wmi_command_debug wmi_command_log_buffer[WMI_CMD_DEBUG_MAX_ENTRY];
137 
138 /* WMI commands TX completed */
139 uint32_t g_wmi_command_tx_cmp_buf_idx = 0;
140 struct wmi_command_cmp_debug
141 	wmi_command_tx_cmp_log_buffer[WMI_CMD_CMPL_DEBUG_MAX_ENTRY];
142 
143 /* WMI events when processed */
144 uint32_t g_wmi_event_buf_idx = 0;
145 struct wmi_event_debug wmi_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
146 
147 /* WMI events when queued */
148 uint32_t g_wmi_rx_event_buf_idx = 0;
149 struct wmi_event_debug wmi_rx_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
150 #endif
151 
152 static void wmi_minidump_detach(struct wmi_unified *wmi_handle)
153 {
154 	struct wmi_log_buf_t *info =
155 		&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
156 	uint32_t buf_size = info->size * sizeof(struct wmi_command_cmp_debug);
157 
158 	qdf_minidump_remove(info->buf, buf_size, "wmi_tx_cmp");
159 }
160 
161 static void wmi_minidump_attach(struct wmi_unified *wmi_handle)
162 {
163 	struct wmi_log_buf_t *info =
164 		&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
165 	uint32_t buf_size = info->size * sizeof(struct wmi_command_cmp_debug);
166 
167 	qdf_minidump_log(info->buf, buf_size, "wmi_tx_cmp");
168 }
169 
170 #define WMI_COMMAND_RECORD(h, a, b) {					\
171 	if (wmi_cmd_log_max_entry <=					\
172 		*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))	\
173 		*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx) = 0;\
174 	((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\
175 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)]\
176 						.command = a;		\
177 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.		\
178 				wmi_command_log_buf_info.buf)		\
179 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].data,\
180 			b, wmi_record_max_length);			\
181 	((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\
182 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].\
183 		time = qdf_get_log_timestamp();			\
184 	(*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))++;	\
185 	h->log_info.wmi_command_log_buf_info.length++;			\
186 }
187 
188 #define WMI_COMMAND_TX_CMP_RECORD(h, a, b, da, pa) {			\
189 	if (wmi_cmd_cmpl_log_max_entry <=				\
190 		*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))\
191 		*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
192 				p_buf_tail_idx) = 0;			\
193 	((struct wmi_command_cmp_debug *)h->log_info.			\
194 		wmi_command_tx_cmp_log_buf_info.buf)			\
195 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
196 				p_buf_tail_idx)].			\
197 							command	= a;	\
198 	qdf_mem_copy(((struct wmi_command_cmp_debug *)h->log_info.	\
199 				wmi_command_tx_cmp_log_buf_info.buf)	\
200 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
201 			p_buf_tail_idx)].				\
202 		data, b, wmi_record_max_length);			\
203 	((struct wmi_command_cmp_debug *)h->log_info.			\
204 		wmi_command_tx_cmp_log_buf_info.buf)			\
205 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
206 				p_buf_tail_idx)].			\
207 		time = qdf_get_log_timestamp();				\
208 	((struct wmi_command_cmp_debug *)h->log_info.			\
209 		wmi_command_tx_cmp_log_buf_info.buf)			\
210 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
211 				p_buf_tail_idx)].			\
212 		dma_addr = da;						\
213 	((struct wmi_command_cmp_debug *)h->log_info.			\
214 		wmi_command_tx_cmp_log_buf_info.buf)			\
215 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
216 				p_buf_tail_idx)].			\
217 		phy_addr = pa;						\
218 	(*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))++;\
219 	h->log_info.wmi_command_tx_cmp_log_buf_info.length++;		\
220 }
221 
222 #define WMI_EVENT_RECORD(h, a, b) {					\
223 	if (wmi_event_log_max_entry <=					\
224 		*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))	\
225 		*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx) = 0;\
226 	((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\
227 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].	\
228 		event = a;						\
229 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
230 				wmi_event_log_buf_info.buf)		\
231 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].data, b,\
232 		wmi_record_max_length);					\
233 	((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\
234 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].time =\
235 		qdf_get_log_timestamp();				\
236 	(*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))++;	\
237 	h->log_info.wmi_event_log_buf_info.length++;			\
238 }
239 
240 #define WMI_RX_EVENT_RECORD(h, a, b) {					\
241 	if (wmi_event_log_max_entry <=					\
242 		*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))\
243 		*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx) = 0;\
244 	((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\
245 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
246 		event = a;						\
247 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
248 				wmi_rx_event_log_buf_info.buf)		\
249 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
250 			data, b, wmi_record_max_length);		\
251 	((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\
252 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
253 		time =	qdf_get_log_timestamp();			\
254 	(*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))++;	\
255 	h->log_info.wmi_rx_event_log_buf_info.length++;			\
256 }
257 
258 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
259 uint32_t g_wmi_mgmt_command_buf_idx = 0;
260 struct
261 wmi_command_debug wmi_mgmt_command_log_buffer[WMI_MGMT_TX_DEBUG_MAX_ENTRY];
262 
263 /* wmi_mgmt commands TX completed */
264 uint32_t g_wmi_mgmt_command_tx_cmp_buf_idx = 0;
265 struct wmi_command_debug
266 wmi_mgmt_command_tx_cmp_log_buffer[WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY];
267 
268 /* wmi_mgmt events when received */
269 uint32_t g_wmi_mgmt_rx_event_buf_idx = 0;
270 struct wmi_event_debug
271 wmi_mgmt_rx_event_log_buffer[WMI_MGMT_RX_DEBUG_MAX_ENTRY];
272 
273 /* wmi_diag events when received */
274 uint32_t g_wmi_diag_rx_event_buf_idx = 0;
275 struct wmi_event_debug
276 wmi_diag_rx_event_log_buffer[WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY];
277 #endif
278 
279 #define WMI_MGMT_COMMAND_RECORD(h, a, b) {                              \
280 	if (wmi_mgmt_tx_log_max_entry <=                                   \
281 		*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)) \
282 		*(h->log_info.wmi_mgmt_command_log_buf_info.		\
283 				p_buf_tail_idx) = 0;			\
284 	((struct wmi_command_debug *)h->log_info.                       \
285 		 wmi_mgmt_command_log_buf_info.buf)                     \
286 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
287 			command = a;                                    \
288 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.          \
289 				wmi_mgmt_command_log_buf_info.buf)      \
290 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
291 		data, b,                                                \
292 		wmi_record_max_length);                                	\
293 	((struct wmi_command_debug *)h->log_info.                       \
294 		 wmi_mgmt_command_log_buf_info.buf)                     \
295 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
296 			time =        qdf_get_log_timestamp();          \
297 	(*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx))++;\
298 	h->log_info.wmi_mgmt_command_log_buf_info.length++;             \
299 }
300 
301 #define WMI_MGMT_COMMAND_TX_CMP_RECORD(h, a, b) {			\
302 	if (wmi_mgmt_tx_cmpl_log_max_entry <=				\
303 		*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
304 			p_buf_tail_idx))				\
305 		*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
306 			p_buf_tail_idx) = 0;				\
307 	((struct wmi_command_debug *)h->log_info.			\
308 			wmi_mgmt_command_tx_cmp_log_buf_info.buf)	\
309 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
310 				p_buf_tail_idx)].command = a;		\
311 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.		\
312 				wmi_mgmt_command_tx_cmp_log_buf_info.buf)\
313 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
314 			p_buf_tail_idx)].data, b,			\
315 			wmi_record_max_length);				\
316 	((struct wmi_command_debug *)h->log_info.			\
317 			wmi_mgmt_command_tx_cmp_log_buf_info.buf)	\
318 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
319 				p_buf_tail_idx)].time =			\
320 		qdf_get_log_timestamp();				\
321 	(*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.		\
322 			p_buf_tail_idx))++;				\
323 	h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.length++;	\
324 }
325 
326 #define WMI_MGMT_RX_EVENT_RECORD(h, a, b) do {				\
327 	if (wmi_mgmt_rx_log_max_entry <=				\
328 		*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))\
329 		*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx) = 0;\
330 	((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\
331 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)]\
332 					.event = a;			\
333 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
334 				wmi_mgmt_event_log_buf_info.buf)	\
335 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\
336 			data, b, wmi_record_max_length);		\
337 	((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\
338 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\
339 			time = qdf_get_log_timestamp();			\
340 	(*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))++;	\
341 	h->log_info.wmi_mgmt_event_log_buf_info.length++;		\
342 } while (0);
343 
344 #define WMI_DIAG_RX_EVENT_RECORD(h, a, b) do {                             \
345 	if (wmi_diag_log_max_entry <=                                   \
346 		*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))\
347 		*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx) = 0;\
348 	((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\
349 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)]\
350 					.event = a;                     \
351 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.            \
352 				wmi_diag_event_log_buf_info.buf)        \
353 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\
354 			data, b, wmi_record_max_length);                \
355 	((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\
356 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\
357 			time = qdf_get_log_timestamp();                 \
358 	(*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))++;  \
359 	h->log_info.wmi_diag_event_log_buf_info.length++;               \
360 } while (0);
361 
362 /* These are defined to made it as module param, which can be configured */
363 /* WMI Commands */
364 uint32_t wmi_cmd_log_max_entry = WMI_CMD_DEBUG_MAX_ENTRY;
365 uint32_t wmi_cmd_cmpl_log_max_entry = WMI_CMD_CMPL_DEBUG_MAX_ENTRY;
366 /* WMI Events */
367 uint32_t wmi_event_log_max_entry = WMI_EVENT_DEBUG_MAX_ENTRY;
368 /* WMI MGMT Tx */
369 uint32_t wmi_mgmt_tx_log_max_entry = WMI_MGMT_TX_DEBUG_MAX_ENTRY;
370 uint32_t wmi_mgmt_tx_cmpl_log_max_entry = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY;
371 /* WMI MGMT Rx */
372 uint32_t wmi_mgmt_rx_log_max_entry = WMI_MGMT_RX_DEBUG_MAX_ENTRY;
373 /* WMI Diag Event */
374 uint32_t wmi_diag_log_max_entry = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY;
375 /* WMI capture size */
376 uint32_t wmi_record_max_length = WMI_DEBUG_ENTRY_MAX_LENGTH;
377 uint32_t wmi_display_size = 100;
378 
379 /**
380  * wmi_log_init() - Initialize WMI event logging
381  * @wmi_handle: WMI handle.
382  *
383  * Return: Initialization status
384  */
385 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
386 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
387 {
388 	struct wmi_log_buf_t *cmd_log_buf =
389 			&wmi_handle->log_info.wmi_command_log_buf_info;
390 	struct wmi_log_buf_t *cmd_tx_cmpl_log_buf =
391 			&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
392 
393 	struct wmi_log_buf_t *event_log_buf =
394 			&wmi_handle->log_info.wmi_event_log_buf_info;
395 	struct wmi_log_buf_t *rx_event_log_buf =
396 			&wmi_handle->log_info.wmi_rx_event_log_buf_info;
397 
398 	struct wmi_log_buf_t *mgmt_cmd_log_buf =
399 			&wmi_handle->log_info.wmi_mgmt_command_log_buf_info;
400 	struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf =
401 		&wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info;
402 	struct wmi_log_buf_t *mgmt_event_log_buf =
403 			&wmi_handle->log_info.wmi_mgmt_event_log_buf_info;
404 	struct wmi_log_buf_t *diag_event_log_buf =
405 			&wmi_handle->log_info.wmi_diag_event_log_buf_info;
406 
407 	/* WMI commands */
408 	cmd_log_buf->length = 0;
409 	cmd_log_buf->buf_tail_idx = 0;
410 	cmd_log_buf->buf = wmi_command_log_buffer;
411 	cmd_log_buf->p_buf_tail_idx = &g_wmi_command_buf_idx;
412 	cmd_log_buf->size = WMI_CMD_DEBUG_MAX_ENTRY;
413 
414 	/* WMI commands TX completed */
415 	cmd_tx_cmpl_log_buf->length = 0;
416 	cmd_tx_cmpl_log_buf->buf_tail_idx = 0;
417 	cmd_tx_cmpl_log_buf->buf = wmi_command_tx_cmp_log_buffer;
418 	cmd_tx_cmpl_log_buf->p_buf_tail_idx = &g_wmi_command_tx_cmp_buf_idx;
419 	cmd_tx_cmpl_log_buf->size = WMI_CMD_CMPL_DEBUG_MAX_ENTRY;
420 
421 	/* WMI events when processed */
422 	event_log_buf->length = 0;
423 	event_log_buf->buf_tail_idx = 0;
424 	event_log_buf->buf = wmi_event_log_buffer;
425 	event_log_buf->p_buf_tail_idx = &g_wmi_event_buf_idx;
426 	event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY;
427 
428 	/* WMI events when queued */
429 	rx_event_log_buf->length = 0;
430 	rx_event_log_buf->buf_tail_idx = 0;
431 	rx_event_log_buf->buf = wmi_rx_event_log_buffer;
432 	rx_event_log_buf->p_buf_tail_idx = &g_wmi_rx_event_buf_idx;
433 	rx_event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY;
434 
435 	/* WMI Management commands */
436 	mgmt_cmd_log_buf->length = 0;
437 	mgmt_cmd_log_buf->buf_tail_idx = 0;
438 	mgmt_cmd_log_buf->buf = wmi_mgmt_command_log_buffer;
439 	mgmt_cmd_log_buf->p_buf_tail_idx = &g_wmi_mgmt_command_buf_idx;
440 	mgmt_cmd_log_buf->size = WMI_MGMT_TX_DEBUG_MAX_ENTRY;
441 
442 	/* WMI Management commands Tx completed*/
443 	mgmt_cmd_tx_cmp_log_buf->length = 0;
444 	mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0;
445 	mgmt_cmd_tx_cmp_log_buf->buf = wmi_mgmt_command_tx_cmp_log_buffer;
446 	mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx =
447 		&g_wmi_mgmt_command_tx_cmp_buf_idx;
448 	mgmt_cmd_tx_cmp_log_buf->size = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY;
449 
450 	/* WMI Management events when received */
451 	mgmt_event_log_buf->length = 0;
452 	mgmt_event_log_buf->buf_tail_idx = 0;
453 	mgmt_event_log_buf->buf = wmi_mgmt_rx_event_log_buffer;
454 	mgmt_event_log_buf->p_buf_tail_idx = &g_wmi_mgmt_rx_event_buf_idx;
455 	mgmt_event_log_buf->size = WMI_MGMT_RX_DEBUG_MAX_ENTRY;
456 
457 	/* WMI diag events when received */
458 	diag_event_log_buf->length = 0;
459 	diag_event_log_buf->buf_tail_idx = 0;
460 	diag_event_log_buf->buf = wmi_diag_rx_event_log_buffer;
461 	diag_event_log_buf->p_buf_tail_idx = &g_wmi_diag_rx_event_buf_idx;
462 	diag_event_log_buf->size = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY;
463 
464 	qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
465 	wmi_handle->log_info.wmi_logging_enable = 1;
466 
467 	return QDF_STATUS_SUCCESS;
468 }
469 #else
470 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
471 {
472 	struct wmi_log_buf_t *cmd_log_buf =
473 			&wmi_handle->log_info.wmi_command_log_buf_info;
474 	struct wmi_log_buf_t *cmd_tx_cmpl_log_buf =
475 			&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
476 
477 	struct wmi_log_buf_t *event_log_buf =
478 			&wmi_handle->log_info.wmi_event_log_buf_info;
479 	struct wmi_log_buf_t *rx_event_log_buf =
480 			&wmi_handle->log_info.wmi_rx_event_log_buf_info;
481 
482 	struct wmi_log_buf_t *mgmt_cmd_log_buf =
483 			&wmi_handle->log_info.wmi_mgmt_command_log_buf_info;
484 	struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf =
485 		&wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info;
486 	struct wmi_log_buf_t *mgmt_event_log_buf =
487 			&wmi_handle->log_info.wmi_mgmt_event_log_buf_info;
488 	struct wmi_log_buf_t *diag_event_log_buf =
489 			&wmi_handle->log_info.wmi_diag_event_log_buf_info;
490 
491 	wmi_handle->log_info.wmi_logging_enable = 0;
492 
493 	/* WMI commands */
494 	cmd_log_buf->length = 0;
495 	cmd_log_buf->buf_tail_idx = 0;
496 	cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc(
497 		wmi_cmd_log_max_entry * sizeof(struct wmi_command_debug));
498 	cmd_log_buf->size = wmi_cmd_log_max_entry;
499 
500 	if (!cmd_log_buf->buf)
501 		return QDF_STATUS_E_NOMEM;
502 
503 	cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx;
504 
505 	/* WMI commands TX completed */
506 	cmd_tx_cmpl_log_buf->length = 0;
507 	cmd_tx_cmpl_log_buf->buf_tail_idx = 0;
508 	cmd_tx_cmpl_log_buf->buf = (struct wmi_command_cmp_debug *) qdf_mem_malloc(
509 		wmi_cmd_cmpl_log_max_entry * sizeof(struct wmi_command_cmp_debug));
510 	cmd_tx_cmpl_log_buf->size = wmi_cmd_cmpl_log_max_entry;
511 
512 	if (!cmd_tx_cmpl_log_buf->buf)
513 		return QDF_STATUS_E_NOMEM;
514 
515 	cmd_tx_cmpl_log_buf->p_buf_tail_idx =
516 		&cmd_tx_cmpl_log_buf->buf_tail_idx;
517 
518 	/* WMI events when processed */
519 	event_log_buf->length = 0;
520 	event_log_buf->buf_tail_idx = 0;
521 	event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
522 		wmi_event_log_max_entry * sizeof(struct wmi_event_debug));
523 	event_log_buf->size = wmi_event_log_max_entry;
524 
525 	if (!event_log_buf->buf)
526 		return QDF_STATUS_E_NOMEM;
527 
528 	event_log_buf->p_buf_tail_idx = &event_log_buf->buf_tail_idx;
529 
530 	/* WMI events when queued */
531 	rx_event_log_buf->length = 0;
532 	rx_event_log_buf->buf_tail_idx = 0;
533 	rx_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
534 		wmi_event_log_max_entry * sizeof(struct wmi_event_debug));
535 	rx_event_log_buf->size = wmi_event_log_max_entry;
536 
537 	if (!rx_event_log_buf->buf)
538 		return QDF_STATUS_E_NOMEM;
539 
540 	rx_event_log_buf->p_buf_tail_idx = &rx_event_log_buf->buf_tail_idx;
541 
542 	/* WMI Management commands */
543 	mgmt_cmd_log_buf->length = 0;
544 	mgmt_cmd_log_buf->buf_tail_idx = 0;
545 	mgmt_cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc(
546 		wmi_mgmt_tx_log_max_entry * sizeof(struct wmi_command_debug));
547 	mgmt_cmd_log_buf->size = wmi_mgmt_tx_log_max_entry;
548 
549 	if (!mgmt_cmd_log_buf->buf)
550 		return QDF_STATUS_E_NOMEM;
551 
552 	mgmt_cmd_log_buf->p_buf_tail_idx = &mgmt_cmd_log_buf->buf_tail_idx;
553 
554 	/* WMI Management commands Tx completed*/
555 	mgmt_cmd_tx_cmp_log_buf->length = 0;
556 	mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0;
557 	mgmt_cmd_tx_cmp_log_buf->buf = (struct wmi_command_debug *)
558 		qdf_mem_malloc(
559 		wmi_mgmt_tx_cmpl_log_max_entry *
560 		sizeof(struct wmi_command_debug));
561 	mgmt_cmd_tx_cmp_log_buf->size = wmi_mgmt_tx_cmpl_log_max_entry;
562 
563 	if (!mgmt_cmd_tx_cmp_log_buf->buf)
564 		return QDF_STATUS_E_NOMEM;
565 
566 	mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx =
567 		&mgmt_cmd_tx_cmp_log_buf->buf_tail_idx;
568 
569 	/* WMI Management events when received */
570 	mgmt_event_log_buf->length = 0;
571 	mgmt_event_log_buf->buf_tail_idx = 0;
572 
573 	mgmt_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
574 		wmi_mgmt_rx_log_max_entry *
575 		sizeof(struct wmi_event_debug));
576 	mgmt_event_log_buf->size = wmi_mgmt_rx_log_max_entry;
577 
578 	if (!mgmt_event_log_buf->buf)
579 		return QDF_STATUS_E_NOMEM;
580 
581 	mgmt_event_log_buf->p_buf_tail_idx = &mgmt_event_log_buf->buf_tail_idx;
582 
583 	/* WMI diag events when received */
584 	diag_event_log_buf->length = 0;
585 	diag_event_log_buf->buf_tail_idx = 0;
586 
587 	diag_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
588 		wmi_diag_log_max_entry *
589 		sizeof(struct wmi_event_debug));
590 	diag_event_log_buf->size = wmi_diag_log_max_entry;
591 
592 	if (!diag_event_log_buf->buf)
593 		return QDF_STATUS_E_NOMEM;
594 
595 	diag_event_log_buf->p_buf_tail_idx = &diag_event_log_buf->buf_tail_idx;
596 
597 	qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
598 	wmi_handle->log_info.wmi_logging_enable = 1;
599 
600 	wmi_filtered_logging_init(wmi_handle);
601 
602 	return QDF_STATUS_SUCCESS;
603 }
604 #endif
605 
606 /**
607  * wmi_log_buffer_free() - Free all dynamic allocated buffer memory for
608  * event logging
609  * @wmi_handle: WMI handle.
610  *
611  * Return: None
612  */
613 #ifdef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
614 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
615 {
616 	wmi_filtered_logging_free(wmi_handle);
617 
618 	if (wmi_handle->log_info.wmi_command_log_buf_info.buf)
619 		qdf_mem_free(wmi_handle->log_info.wmi_command_log_buf_info.buf);
620 	if (wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf)
621 		qdf_mem_free(
622 		wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf);
623 	if (wmi_handle->log_info.wmi_event_log_buf_info.buf)
624 		qdf_mem_free(wmi_handle->log_info.wmi_event_log_buf_info.buf);
625 	if (wmi_handle->log_info.wmi_rx_event_log_buf_info.buf)
626 		qdf_mem_free(
627 			wmi_handle->log_info.wmi_rx_event_log_buf_info.buf);
628 	if (wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf)
629 		qdf_mem_free(
630 			wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf);
631 	if (wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf)
632 		qdf_mem_free(
633 		wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf);
634 	if (wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf)
635 		qdf_mem_free(
636 			wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf);
637 	if (wmi_handle->log_info.wmi_diag_event_log_buf_info.buf)
638 		qdf_mem_free(
639 			wmi_handle->log_info.wmi_diag_event_log_buf_info.buf);
640 	wmi_handle->log_info.wmi_logging_enable = 0;
641 
642 	qdf_spinlock_destroy(&wmi_handle->log_info.wmi_record_lock);
643 }
644 #else
645 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
646 {
647 	/* Do Nothing */
648 }
649 #endif
650 
651 /**
652  * wmi_print_cmd_log_buffer() - an output agnostic wmi command log printer
653  * @log_buffer: the command log buffer metadata of the buffer to print
654  * @count: the maximum number of entries to print
655  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
656  * @print_priv: any data required by the print method, e.g. a file handle
657  *
658  * Return: None
659  */
660 static void
661 wmi_print_cmd_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
662 			 qdf_abstract_print *print, void *print_priv)
663 {
664 	static const int data_len =
665 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
666 	char str[128];
667 	uint32_t idx;
668 
669 	if (count > log_buffer->size)
670 		count = log_buffer->size;
671 	if (count > log_buffer->length)
672 		count = log_buffer->length;
673 
674 	/* subtract count from index, and wrap if necessary */
675 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
676 	idx %= log_buffer->size;
677 
678 	print(print_priv, "Time (seconds)      Cmd Id              Payload");
679 	while (count) {
680 		struct wmi_command_debug *cmd_log = (struct wmi_command_debug *)
681 			&((struct wmi_command_debug *)log_buffer->buf)[idx];
682 		uint64_t secs, usecs;
683 		int len = 0;
684 		int i;
685 
686 		qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
687 		len += scnprintf(str + len, sizeof(str) - len,
688 				 "% 8lld.%06lld    %6u (0x%06x)    ",
689 				 secs, usecs,
690 				 cmd_log->command, cmd_log->command);
691 		for (i = 0; i < data_len; ++i) {
692 			len += scnprintf(str + len, sizeof(str) - len,
693 					 "0x%08x ", cmd_log->data[i]);
694 		}
695 
696 		print(print_priv, str);
697 
698 		--count;
699 		++idx;
700 		if (idx >= log_buffer->size)
701 			idx = 0;
702 	}
703 }
704 
705 /**
706  * wmi_print_cmd_cmp_log_buffer() - wmi command completion log printer
707  * @log_buffer: the command completion log buffer metadata of the buffer to print
708  * @count: the maximum number of entries to print
709  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
710  * @print_priv: any data required by the print method, e.g. a file handle
711  *
712  * Return: None
713  */
714 static void
715 wmi_print_cmd_cmp_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
716 			 qdf_abstract_print *print, void *print_priv)
717 {
718 	static const int data_len =
719 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
720 	char str[128];
721 	uint32_t idx;
722 
723 	if (count > log_buffer->size)
724 		count = log_buffer->size;
725 	if (count > log_buffer->length)
726 		count = log_buffer->length;
727 
728 	/* subtract count from index, and wrap if necessary */
729 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
730 	idx %= log_buffer->size;
731 
732 	print(print_priv, "Time (seconds)      Cmd Id              Payload");
733 	while (count) {
734 		struct wmi_command_cmp_debug *cmd_log = (struct wmi_command_cmp_debug *)
735 			&((struct wmi_command_cmp_debug *)log_buffer->buf)[idx];
736 		uint64_t secs, usecs;
737 		int len = 0;
738 		int i;
739 
740 		qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
741 		len += scnprintf(str + len, sizeof(str) - len,
742 				 "% 8lld.%06lld    %6u (0x%06x)    ",
743 				 secs, usecs,
744 				 cmd_log->command, cmd_log->command);
745 		for (i = 0; i < data_len; ++i) {
746 			len += scnprintf(str + len, sizeof(str) - len,
747 					 "0x%08x ", cmd_log->data[i]);
748 		}
749 
750 		print(print_priv, str);
751 
752 		--count;
753 		++idx;
754 		if (idx >= log_buffer->size)
755 			idx = 0;
756 	}
757 }
758 
759 /**
760  * wmi_print_event_log_buffer() - an output agnostic wmi event log printer
761  * @log_buffer: the event log buffer metadata of the buffer to print
762  * @count: the maximum number of entries to print
763  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
764  * @print_priv: any data required by the print method, e.g. a file handle
765  *
766  * Return: None
767  */
768 static void
769 wmi_print_event_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
770 			   qdf_abstract_print *print, void *print_priv)
771 {
772 	static const int data_len =
773 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
774 	char str[128];
775 	uint32_t idx;
776 
777 	if (count > log_buffer->size)
778 		count = log_buffer->size;
779 	if (count > log_buffer->length)
780 		count = log_buffer->length;
781 
782 	/* subtract count from index, and wrap if necessary */
783 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
784 	idx %= log_buffer->size;
785 
786 	print(print_priv, "Time (seconds)      Event Id             Payload");
787 	while (count) {
788 		struct wmi_event_debug *event_log = (struct wmi_event_debug *)
789 			&((struct wmi_event_debug *)log_buffer->buf)[idx];
790 		uint64_t secs, usecs;
791 		int len = 0;
792 		int i;
793 
794 		qdf_log_timestamp_to_secs(event_log->time, &secs, &usecs);
795 		len += scnprintf(str + len, sizeof(str) - len,
796 				 "% 8lld.%06lld    %6u (0x%06x)    ",
797 				 secs, usecs,
798 				 event_log->event, event_log->event);
799 		for (i = 0; i < data_len; ++i) {
800 			len += scnprintf(str + len, sizeof(str) - len,
801 					 "0x%08x ", event_log->data[i]);
802 		}
803 
804 		print(print_priv, str);
805 
806 		--count;
807 		++idx;
808 		if (idx >= log_buffer->size)
809 			idx = 0;
810 	}
811 }
812 
813 inline void
814 wmi_print_cmd_log(wmi_unified_t wmi, uint32_t count,
815 		  qdf_abstract_print *print, void *print_priv)
816 {
817 	wmi_print_cmd_log_buffer(
818 		&wmi->log_info.wmi_command_log_buf_info,
819 		count, print, print_priv);
820 }
821 
822 inline void
823 wmi_print_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count,
824 			 qdf_abstract_print *print, void *print_priv)
825 {
826 	wmi_print_cmd_cmp_log_buffer(
827 		&wmi->log_info.wmi_command_tx_cmp_log_buf_info,
828 		count, print, print_priv);
829 }
830 
831 inline void
832 wmi_print_mgmt_cmd_log(wmi_unified_t wmi, uint32_t count,
833 		       qdf_abstract_print *print, void *print_priv)
834 {
835 	wmi_print_cmd_log_buffer(
836 		&wmi->log_info.wmi_mgmt_command_log_buf_info,
837 		count, print, print_priv);
838 }
839 
840 inline void
841 wmi_print_mgmt_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count,
842 			      qdf_abstract_print *print, void *print_priv)
843 {
844 	wmi_print_cmd_log_buffer(
845 		&wmi->log_info.wmi_mgmt_command_tx_cmp_log_buf_info,
846 		count, print, print_priv);
847 }
848 
849 inline void
850 wmi_print_event_log(wmi_unified_t wmi, uint32_t count,
851 		    qdf_abstract_print *print, void *print_priv)
852 {
853 	wmi_print_event_log_buffer(
854 		&wmi->log_info.wmi_event_log_buf_info,
855 		count, print, print_priv);
856 }
857 
858 inline void
859 wmi_print_rx_event_log(wmi_unified_t wmi, uint32_t count,
860 		       qdf_abstract_print *print, void *print_priv)
861 {
862 	wmi_print_event_log_buffer(
863 		&wmi->log_info.wmi_rx_event_log_buf_info,
864 		count, print, print_priv);
865 }
866 
867 inline void
868 wmi_print_mgmt_event_log(wmi_unified_t wmi, uint32_t count,
869 			 qdf_abstract_print *print, void *print_priv)
870 {
871 	wmi_print_event_log_buffer(
872 		&wmi->log_info.wmi_mgmt_event_log_buf_info,
873 		count, print, print_priv);
874 }
875 
876 
877 /* debugfs routines*/
878 
879 /**
880  * debug_wmi_##func_base##_show() - debugfs functions to display content of
881  * command and event buffers. Macro uses max buffer length to display
882  * buffer when it is wraparound.
883  *
884  * @m: debugfs handler to access wmi_handle
885  * @v: Variable arguments (not used)
886  *
887  * Return: Length of characters printed
888  */
889 #define GENERATE_COMMAND_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size, wmi_record_type)\
890 	static int debug_wmi_##func_base##_show(struct seq_file *m,	\
891 						void *v)		\
892 	{								\
893 		wmi_unified_t wmi_handle = (wmi_unified_t) m->private;	\
894 		struct wmi_log_buf_t *wmi_log =				\
895 			&wmi_handle->log_info.wmi_##func_base##_buf_info;\
896 		int pos, nread, outlen;					\
897 		int i;							\
898 		uint64_t secs, usecs;					\
899 									\
900 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
901 		if (!wmi_log->length) {					\
902 			qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
903 			return wmi_bp_seq_printf(m,			\
904 			"no elements to read from ring buffer!\n");	\
905 		}							\
906 									\
907 		if (wmi_log->length <= wmi_ring_size)			\
908 			nread = wmi_log->length;			\
909 		else							\
910 			nread = wmi_ring_size;				\
911 									\
912 		if (*(wmi_log->p_buf_tail_idx) == 0)			\
913 			/* tail can be 0 after wrap-around */		\
914 			pos = wmi_ring_size - 1;			\
915 		else							\
916 			pos = *(wmi_log->p_buf_tail_idx) - 1;		\
917 									\
918 		outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\
919 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
920 		while (nread--) {					\
921 			struct wmi_record_type *wmi_record;		\
922 									\
923 			wmi_record = (struct wmi_record_type *)	\
924 			&(((struct wmi_record_type *)wmi_log->buf)[pos]);\
925 			outlen += wmi_bp_seq_printf(m, "CMD ID = %x\n",	\
926 				(wmi_record->command));			\
927 			qdf_log_timestamp_to_secs(wmi_record->time, &secs,\
928 				&usecs);				\
929 			outlen +=					\
930 			wmi_bp_seq_printf(m, "CMD TIME = [%llu.%06llu]\n",\
931 				secs, usecs);				\
932 			outlen += wmi_bp_seq_printf(m, "CMD = ");	\
933 			for (i = 0; i < (wmi_record_max_length/		\
934 					sizeof(uint32_t)); i++)		\
935 				outlen += wmi_bp_seq_printf(m, "%x ",	\
936 					wmi_record->data[i]);		\
937 			outlen += wmi_bp_seq_printf(m, "\n");		\
938 									\
939 			if (pos == 0)					\
940 				pos = wmi_ring_size - 1;		\
941 			else						\
942 				pos--;					\
943 		}							\
944 		return outlen;						\
945 	}								\
946 
947 #define GENERATE_EVENT_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size)	\
948 	static int debug_wmi_##func_base##_show(struct seq_file *m,	\
949 						void *v)		\
950 	{								\
951 		wmi_unified_t wmi_handle = (wmi_unified_t) m->private;	\
952 		struct wmi_log_buf_t *wmi_log =				\
953 			&wmi_handle->log_info.wmi_##func_base##_buf_info;\
954 		int pos, nread, outlen;					\
955 		int i;							\
956 		uint64_t secs, usecs;					\
957 									\
958 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
959 		if (!wmi_log->length) {					\
960 			qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
961 			return wmi_bp_seq_printf(m,			\
962 			"no elements to read from ring buffer!\n");	\
963 		}							\
964 									\
965 		if (wmi_log->length <= wmi_ring_size)			\
966 			nread = wmi_log->length;			\
967 		else							\
968 			nread = wmi_ring_size;				\
969 									\
970 		if (*(wmi_log->p_buf_tail_idx) == 0)			\
971 			/* tail can be 0 after wrap-around */		\
972 			pos = wmi_ring_size - 1;			\
973 		else							\
974 			pos = *(wmi_log->p_buf_tail_idx) - 1;		\
975 									\
976 		outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\
977 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
978 		while (nread--) {					\
979 			struct wmi_event_debug *wmi_record;		\
980 									\
981 			wmi_record = (struct wmi_event_debug *)		\
982 			&(((struct wmi_event_debug *)wmi_log->buf)[pos]);\
983 			qdf_log_timestamp_to_secs(wmi_record->time, &secs,\
984 				&usecs);				\
985 			outlen += wmi_bp_seq_printf(m, "Event ID = %x\n",\
986 				(wmi_record->event));			\
987 			outlen +=					\
988 			wmi_bp_seq_printf(m, "Event TIME = [%llu.%06llu]\n",\
989 				secs, usecs);				\
990 			outlen += wmi_bp_seq_printf(m, "CMD = ");	\
991 			for (i = 0; i < (wmi_record_max_length/		\
992 					sizeof(uint32_t)); i++)		\
993 				outlen += wmi_bp_seq_printf(m, "%x ",	\
994 					wmi_record->data[i]);		\
995 			outlen += wmi_bp_seq_printf(m, "\n");		\
996 									\
997 			if (pos == 0)					\
998 				pos = wmi_ring_size - 1;		\
999 			else						\
1000 				pos--;					\
1001 		}							\
1002 		return outlen;						\
1003 	}
1004 
1005 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_log, wmi_display_size,
1006 				  wmi_command_debug);
1007 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_tx_cmp_log, wmi_display_size,
1008 				  wmi_command_cmp_debug);
1009 GENERATE_EVENT_DEBUG_SHOW_FUNCS(event_log, wmi_display_size);
1010 GENERATE_EVENT_DEBUG_SHOW_FUNCS(rx_event_log, wmi_display_size);
1011 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_log, wmi_display_size,
1012 				  wmi_command_debug);
1013 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_tx_cmp_log,
1014 					wmi_display_size,
1015 					wmi_command_debug);
1016 GENERATE_EVENT_DEBUG_SHOW_FUNCS(mgmt_event_log, wmi_display_size);
1017 
1018 /**
1019  * debug_wmi_enable_show() - debugfs functions to display enable state of
1020  * wmi logging feature.
1021  *
1022  * @m: debugfs handler to access wmi_handle
1023  * @v: Variable arguments (not used)
1024  *
1025  * Return: always 1
1026  */
1027 static int debug_wmi_enable_show(struct seq_file *m, void *v)
1028 {
1029 	wmi_unified_t wmi_handle = (wmi_unified_t) m->private;
1030 
1031 	return wmi_bp_seq_printf(m, "%d\n",
1032 			wmi_handle->log_info.wmi_logging_enable);
1033 }
1034 
1035 /**
1036  * debug_wmi_log_size_show() - debugfs functions to display configured size of
1037  * wmi logging command/event buffer and management command/event buffer.
1038  *
1039  * @m: debugfs handler to access wmi_handle
1040  * @v: Variable arguments (not used)
1041  *
1042  * Return: Length of characters printed
1043  */
1044 static int debug_wmi_log_size_show(struct seq_file *m, void *v)
1045 {
1046 
1047 	wmi_bp_seq_printf(m, "WMI command/cmpl log max size:%d/%d\n",
1048 			  wmi_cmd_log_max_entry, wmi_cmd_cmpl_log_max_entry);
1049 	wmi_bp_seq_printf(m, "WMI management Tx/cmpl log max size:%d/%d\n",
1050 			  wmi_mgmt_tx_log_max_entry,
1051 			  wmi_mgmt_tx_cmpl_log_max_entry);
1052 	wmi_bp_seq_printf(m, "WMI event log max size:%d\n",
1053 			  wmi_event_log_max_entry);
1054 	wmi_bp_seq_printf(m, "WMI management Rx log max size:%d\n",
1055 			  wmi_mgmt_rx_log_max_entry);
1056 	return wmi_bp_seq_printf(m,
1057 				 "WMI diag log max size:%d\n",
1058 				 wmi_diag_log_max_entry);
1059 }
1060 
1061 /**
1062  * debug_wmi_##func_base##_write() - debugfs functions to clear
1063  * wmi logging command/event buffer and management command/event buffer.
1064  *
1065  * @file: file handler to access wmi_handle
1066  * @buf: received data buffer
1067  * @count: length of received buffer
1068  * @ppos: Not used
1069  *
1070  * Return: count
1071  */
1072 #define GENERATE_DEBUG_WRITE_FUNCS(func_base, wmi_ring_size, wmi_record_type)\
1073 	static ssize_t debug_wmi_##func_base##_write(struct file *file,	\
1074 				const char __user *buf,			\
1075 				size_t count, loff_t *ppos)		\
1076 	{								\
1077 		int k, ret;						\
1078 		wmi_unified_t wmi_handle =				\
1079 			((struct seq_file *)file->private_data)->private;\
1080 		struct wmi_log_buf_t *wmi_log = &wmi_handle->log_info.	\
1081 				wmi_##func_base##_buf_info;		\
1082 		char locbuf[50];					\
1083 									\
1084 		if ((!buf) || (count > 50))				\
1085 			return -EFAULT;					\
1086 									\
1087 		if (copy_from_user(locbuf, buf, count))			\
1088 			return -EFAULT;					\
1089 									\
1090 		ret = sscanf(locbuf, "%d", &k);				\
1091 		if ((ret != 1) || (k != 0)) {                           \
1092 			wmi_err("Wrong input, echo 0 to clear the wmi buffer");\
1093 			return -EINVAL;					\
1094 		}							\
1095 									\
1096 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
1097 		qdf_mem_zero(wmi_log->buf, wmi_ring_size *		\
1098 				sizeof(struct wmi_record_type));	\
1099 		wmi_log->length = 0;					\
1100 		*(wmi_log->p_buf_tail_idx) = 0;				\
1101 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
1102 									\
1103 		return count;						\
1104 	}
1105 
1106 GENERATE_DEBUG_WRITE_FUNCS(command_log, wmi_cmd_log_max_entry,
1107 			   wmi_command_debug);
1108 GENERATE_DEBUG_WRITE_FUNCS(command_tx_cmp_log, wmi_cmd_cmpl_log_max_entry,
1109 			   wmi_command_cmp_debug);
1110 GENERATE_DEBUG_WRITE_FUNCS(event_log, wmi_event_log_max_entry,
1111 			   wmi_event_debug);
1112 GENERATE_DEBUG_WRITE_FUNCS(rx_event_log, wmi_event_log_max_entry,
1113 			   wmi_event_debug);
1114 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_log, wmi_mgmt_tx_log_max_entry,
1115 			   wmi_command_debug);
1116 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_tx_cmp_log,
1117 			   wmi_mgmt_tx_cmpl_log_max_entry, wmi_command_debug);
1118 GENERATE_DEBUG_WRITE_FUNCS(mgmt_event_log, wmi_mgmt_rx_log_max_entry,
1119 			   wmi_event_debug);
1120 
1121 /**
1122  * debug_wmi_enable_write() - debugfs functions to enable/disable
1123  * wmi logging feature.
1124  *
1125  * @file: file handler to access wmi_handle
1126  * @buf: received data buffer
1127  * @count: length of received buffer
1128  * @ppos: Not used
1129  *
1130  * Return: count
1131  */
1132 static ssize_t debug_wmi_enable_write(struct file *file, const char __user *buf,
1133 					size_t count, loff_t *ppos)
1134 {
1135 	wmi_unified_t wmi_handle =
1136 		((struct seq_file *)file->private_data)->private;
1137 	int k, ret;
1138 	char locbuf[50];
1139 
1140 	if ((!buf) || (count > 50))
1141 		return -EFAULT;
1142 
1143 	if (copy_from_user(locbuf, buf, count))
1144 		return -EFAULT;
1145 
1146 	ret = sscanf(locbuf, "%d", &k);
1147 	if ((ret != 1) || ((k != 0) && (k != 1)))
1148 		return -EINVAL;
1149 
1150 	wmi_handle->log_info.wmi_logging_enable = k;
1151 	return count;
1152 }
1153 
1154 /**
1155  * debug_wmi_log_size_write() - reserved.
1156  *
1157  * @file: file handler to access wmi_handle
1158  * @buf: received data buffer
1159  * @count: length of received buffer
1160  * @ppos: Not used
1161  *
1162  * Return: count
1163  */
1164 static ssize_t debug_wmi_log_size_write(struct file *file,
1165 		const char __user *buf, size_t count, loff_t *ppos)
1166 {
1167 	return -EINVAL;
1168 }
1169 
1170 /* Structure to maintain debug information */
1171 struct wmi_debugfs_info {
1172 	const char *name;
1173 	const struct file_operations *ops;
1174 };
1175 
1176 #define DEBUG_FOO(func_base) { .name = #func_base,			\
1177 	.ops = &debug_##func_base##_ops }
1178 
1179 /**
1180  * debug_##func_base##_open() - Open debugfs entry for respective command
1181  * and event buffer.
1182  *
1183  * @inode: node for debug dir entry
1184  * @file: file handler
1185  *
1186  * Return: open status
1187  */
1188 #define GENERATE_DEBUG_STRUCTS(func_base)				\
1189 	static int debug_##func_base##_open(struct inode *inode,	\
1190 						struct file *file)	\
1191 	{								\
1192 		return single_open(file, debug_##func_base##_show,	\
1193 				inode->i_private);			\
1194 	}								\
1195 									\
1196 									\
1197 	static struct file_operations debug_##func_base##_ops = {	\
1198 		.open		= debug_##func_base##_open,		\
1199 		.read		= seq_read,				\
1200 		.llseek		= seq_lseek,				\
1201 		.write		= debug_##func_base##_write,		\
1202 		.release	= single_release,			\
1203 	};
1204 
1205 GENERATE_DEBUG_STRUCTS(wmi_command_log);
1206 GENERATE_DEBUG_STRUCTS(wmi_command_tx_cmp_log);
1207 GENERATE_DEBUG_STRUCTS(wmi_event_log);
1208 GENERATE_DEBUG_STRUCTS(wmi_rx_event_log);
1209 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_log);
1210 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_tx_cmp_log);
1211 GENERATE_DEBUG_STRUCTS(wmi_mgmt_event_log);
1212 GENERATE_DEBUG_STRUCTS(wmi_enable);
1213 GENERATE_DEBUG_STRUCTS(wmi_log_size);
1214 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
1215 GENERATE_DEBUG_STRUCTS(filtered_wmi_cmds);
1216 GENERATE_DEBUG_STRUCTS(filtered_wmi_evts);
1217 GENERATE_DEBUG_STRUCTS(wmi_filtered_command_log);
1218 GENERATE_DEBUG_STRUCTS(wmi_filtered_event_log);
1219 #endif
1220 
1221 struct wmi_debugfs_info wmi_debugfs_infos[NUM_DEBUG_INFOS] = {
1222 	DEBUG_FOO(wmi_command_log),
1223 	DEBUG_FOO(wmi_command_tx_cmp_log),
1224 	DEBUG_FOO(wmi_event_log),
1225 	DEBUG_FOO(wmi_rx_event_log),
1226 	DEBUG_FOO(wmi_mgmt_command_log),
1227 	DEBUG_FOO(wmi_mgmt_command_tx_cmp_log),
1228 	DEBUG_FOO(wmi_mgmt_event_log),
1229 	DEBUG_FOO(wmi_enable),
1230 	DEBUG_FOO(wmi_log_size),
1231 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
1232 	DEBUG_FOO(filtered_wmi_cmds),
1233 	DEBUG_FOO(filtered_wmi_evts),
1234 	DEBUG_FOO(wmi_filtered_command_log),
1235 	DEBUG_FOO(wmi_filtered_event_log),
1236 #endif
1237 };
1238 
1239 /**
1240  * wmi_debugfs_create() - Create debug_fs entry for wmi logging.
1241  *
1242  * @wmi_handle: wmi handle
1243  * @par_entry: debug directory entry
1244  * @id: Index to debug info data array
1245  *
1246  * Return: none
1247  */
1248 static void wmi_debugfs_create(wmi_unified_t wmi_handle,
1249 			       struct dentry *par_entry)
1250 {
1251 	int i;
1252 
1253 	if (!par_entry)
1254 		goto out;
1255 
1256 	for (i = 0; i < NUM_DEBUG_INFOS; ++i) {
1257 		wmi_handle->debugfs_de[i] = qdf_debugfs_create_entry(
1258 						wmi_debugfs_infos[i].name,
1259 						WMI_INFOS_DBG_FILE_PERM,
1260 						par_entry,
1261 						wmi_handle,
1262 						wmi_debugfs_infos[i].ops);
1263 
1264 		if (!wmi_handle->debugfs_de[i]) {
1265 			wmi_err("debug Entry creation failed!");
1266 			goto out;
1267 		}
1268 	}
1269 
1270 	return;
1271 
1272 out:
1273 	wmi_err("debug Entry creation failed!");
1274 	wmi_log_buffer_free(wmi_handle);
1275 	return;
1276 }
1277 
1278 /**
1279  * wmi_debugfs_remove() - Remove debugfs entry for wmi logging.
1280  * @wmi_handle: wmi handle
1281  * @dentry: debugfs directory entry
1282  * @id: Index to debug info data array
1283  *
1284  * Return: none
1285  */
1286 static void wmi_debugfs_remove(wmi_unified_t wmi_handle)
1287 {
1288 	int i;
1289 	struct dentry *dentry = wmi_handle->log_info.wmi_log_debugfs_dir;
1290 
1291 	if (dentry) {
1292 		for (i = 0; i < NUM_DEBUG_INFOS; ++i) {
1293 			if (wmi_handle->debugfs_de[i])
1294 				wmi_handle->debugfs_de[i] = NULL;
1295 		}
1296 	}
1297 
1298 	if (dentry)
1299 		qdf_debugfs_remove_dir_recursive(dentry);
1300 }
1301 
1302 /**
1303  * wmi_debugfs_init() - debugfs functions to create debugfs directory and to
1304  * create debugfs enteries.
1305  *
1306  * @h: wmi handler
1307  *
1308  * Return: init status
1309  */
1310 static QDF_STATUS wmi_debugfs_init(wmi_unified_t wmi_handle, uint32_t pdev_idx)
1311 {
1312 	char buf[32];
1313 
1314 	snprintf(buf, sizeof(buf), "WMI_SOC%u_PDEV%u",
1315 		 wmi_handle->soc->soc_idx, pdev_idx);
1316 
1317 	wmi_handle->log_info.wmi_log_debugfs_dir =
1318 		qdf_debugfs_create_dir(buf, NULL);
1319 
1320 	if (!wmi_handle->log_info.wmi_log_debugfs_dir) {
1321 		wmi_err("error while creating debugfs dir for %s", buf);
1322 		return QDF_STATUS_E_FAILURE;
1323 	}
1324 	wmi_debugfs_create(wmi_handle,
1325 			   wmi_handle->log_info.wmi_log_debugfs_dir);
1326 
1327 	return QDF_STATUS_SUCCESS;
1328 }
1329 
1330 /**
1331  * wmi_mgmt_cmd_record() - Wrapper function for mgmt command logging macro
1332  *
1333  * @wmi_handle: wmi handle
1334  * @cmd: mgmt command
1335  * @header: pointer to 802.11 header
1336  * @vdev_id: vdev id
1337  * @chanfreq: channel frequency
1338  *
1339  * Return: none
1340  */
1341 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
1342 			void *header, uint32_t vdev_id, uint32_t chanfreq)
1343 {
1344 
1345 	uint32_t data[CUSTOM_MGMT_CMD_DATA_SIZE];
1346 
1347 	data[0] = ((struct wmi_command_header *)header)->type;
1348 	data[1] = ((struct wmi_command_header *)header)->sub_type;
1349 	data[2] = vdev_id;
1350 	data[3] = chanfreq;
1351 
1352 	qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
1353 
1354 	WMI_MGMT_COMMAND_RECORD(wmi_handle, cmd, (uint8_t *)data);
1355 	wmi_specific_cmd_record(wmi_handle, cmd, (uint8_t *)data);
1356 	qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
1357 }
1358 #else
1359 /**
1360  * wmi_debugfs_remove() - Remove debugfs entry for wmi logging.
1361  * @wmi_handle: wmi handle
1362  * @dentry: debugfs directory entry
1363  * @id: Index to debug info data array
1364  *
1365  * Return: none
1366  */
1367 static void wmi_debugfs_remove(wmi_unified_t wmi_handle) { }
1368 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
1369 			void *header, uint32_t vdev_id, uint32_t chanfreq) { }
1370 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) { }
1371 static void wmi_minidump_detach(struct wmi_unified *wmi_handle) { }
1372 static void wmi_minidump_attach(struct wmi_unified *wmi_handle) { }
1373 #endif /*WMI_INTERFACE_EVENT_LOGGING */
1374 qdf_export_symbol(wmi_mgmt_cmd_record);
1375 
1376 #ifdef WMI_EXT_DBG
1377 
1378 /**
1379  * wmi_ext_dbg_msg_enqueue() - enqueue wmi message
1380  * @wmi_handle: wmi handler
1381  *
1382  * Return: size of wmi message queue after enqueue
1383  */
1384 static uint32_t wmi_ext_dbg_msg_enqueue(struct wmi_unified *wmi_handle,
1385 					struct wmi_ext_dbg_msg *msg)
1386 {
1387 	uint32_t list_size;
1388 
1389 	qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1390 	qdf_list_insert_back_size(&wmi_handle->wmi_ext_dbg_msg_queue,
1391 				  &msg->node, &list_size);
1392 	qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1393 
1394 	return list_size;
1395 }
1396 
1397 /**
1398  * wmi_ext_dbg_msg_dequeue() - dequeue wmi message
1399  * @wmi_handle: wmi handler
1400  *
1401  * Return: wmi msg on success else NULL
1402  */
1403 static struct wmi_ext_dbg_msg *wmi_ext_dbg_msg_dequeue(struct wmi_unified
1404 						       *wmi_handle)
1405 {
1406 	qdf_list_node_t *list_node = NULL;
1407 
1408 	qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1409 	qdf_list_remove_front(&wmi_handle->wmi_ext_dbg_msg_queue, &list_node);
1410 	qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1411 
1412 	if (!list_node)
1413 		return NULL;
1414 
1415 	return qdf_container_of(list_node, struct wmi_ext_dbg_msg, node);
1416 }
1417 
1418 /**
1419  * wmi_ext_dbg_msg_record() - record wmi messages
1420  * @wmi_handle: wmi handler
1421  * @buf: wmi message buffer
1422  * @len: wmi message length
1423  * @type: wmi message type
1424  *
1425  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1426  */
1427 static QDF_STATUS wmi_ext_dbg_msg_record(struct wmi_unified *wmi_handle,
1428 					 uint8_t *buf, uint32_t len,
1429 					 enum WMI_MSG_TYPE type)
1430 {
1431 	struct wmi_ext_dbg_msg *msg;
1432 	uint32_t list_size;
1433 
1434 	msg = wmi_ext_dbg_msg_get(len);
1435 	if (!msg)
1436 		return QDF_STATUS_E_NOMEM;
1437 
1438 	msg->len = len;
1439 	msg->type = type;
1440 	qdf_mem_copy(msg->buf, buf, len);
1441 	msg->ts = qdf_get_log_timestamp();
1442 	list_size = wmi_ext_dbg_msg_enqueue(wmi_handle, msg);
1443 
1444 	if (list_size >= wmi_handle->wmi_ext_dbg_msg_queue_size) {
1445 		msg = wmi_ext_dbg_msg_dequeue(wmi_handle);
1446 		wmi_ext_dbg_msg_put(msg);
1447 	}
1448 
1449 	return QDF_STATUS_SUCCESS;
1450 }
1451 
1452 /**
1453  * wmi_ext_dbg_msg_cmd_record() - record wmi command messages
1454  * @wmi_handle: wmi handler
1455  * @buf: wmi command buffer
1456  * @len: wmi command message length
1457  *
1458  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1459  */
1460 static QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified *wmi_handle,
1461 					     uint8_t *buf, uint32_t len)
1462 {
1463 	return wmi_ext_dbg_msg_record(wmi_handle, buf, len,
1464 				      WMI_MSG_TYPE_CMD);
1465 }
1466 
1467 /**
1468  * wmi_ext_dbg_msg_event_record() - record wmi event messages
1469  * @wmi_handle: wmi handler
1470  * @buf: wmi event buffer
1471  * @len: wmi event message length
1472  *
1473  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1474  */
1475 static QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified *wmi_handle,
1476 					       uint8_t *buf, uint32_t len)
1477 {
1478 	uint32_t id;
1479 
1480 	id = WMI_GET_FIELD(buf, WMI_CMD_HDR, COMMANDID);
1481 	if (id != wmi_handle->wmi_events[wmi_diag_event_id])
1482 		return wmi_ext_dbg_msg_record(wmi_handle, buf, len,
1483 					      WMI_MSG_TYPE_EVENT);
1484 
1485 	return QDF_STATUS_SUCCESS;
1486 }
1487 
1488 /**
1489  * wmi_ext_dbg_msg_queue_init() - create debugfs queue and associated lock
1490  * @wmi_handle: wmi handler
1491  *
1492  * Return: none
1493  */
1494 static void wmi_ext_dbg_msg_queue_init(struct wmi_unified *wmi_handle)
1495 {
1496 	qdf_list_create(&wmi_handle->wmi_ext_dbg_msg_queue,
1497 			wmi_handle->wmi_ext_dbg_msg_queue_size);
1498 	qdf_spinlock_create(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1499 }
1500 
1501 /**
1502  * wmi_ext_dbg_msg_queue_deinit() - destroy debugfs queue and associated lock
1503  * @wmi_handle: wmi handler
1504  *
1505  * Return: none
1506  */
1507 static void wmi_ext_dbg_msg_queue_deinit(struct wmi_unified *wmi_handle)
1508 {
1509 	qdf_list_destroy(&wmi_handle->wmi_ext_dbg_msg_queue);
1510 	qdf_spinlock_destroy(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1511 }
1512 
1513 /**
1514  * wmi_ext_dbg_msg_show() - debugfs function to display whole content of
1515  * wmi command/event messages including headers.
1516  * @file: qdf debugfs file handler
1517  * @arg: pointer to wmi handler
1518  *
1519  * Return: QDF_STATUS_SUCCESS if all the messages are shown successfully,
1520  * else QDF_STATUS_E_AGAIN if more data to show.
1521  */
1522 static QDF_STATUS wmi_ext_dbg_msg_show(qdf_debugfs_file_t file, void *arg)
1523 {
1524 	struct wmi_unified *wmi_handle = (struct wmi_unified *)arg;
1525 	struct wmi_ext_dbg_msg *msg;
1526 	uint64_t secs, usecs;
1527 
1528 	msg = wmi_ext_dbg_msg_dequeue(wmi_handle);
1529 	if (!msg)
1530 		return QDF_STATUS_SUCCESS;
1531 
1532 	qdf_debugfs_printf(file, "%s: 0x%x\n",
1533 			   msg->type == WMI_MSG_TYPE_CMD ? "COMMAND" :
1534 			   "EVENT", WMI_GET_FIELD(msg->buf, WMI_CMD_HDR,
1535 						  COMMANDID));
1536 	qdf_log_timestamp_to_secs(msg->ts, &secs, &usecs);
1537 	qdf_debugfs_printf(file, "Time: %llu.%llu\n", secs, usecs);
1538 	qdf_debugfs_printf(file, "Length:%d\n", msg->len);
1539 	qdf_debugfs_hexdump(file, msg->buf, msg->len,
1540 			    WMI_EXT_DBG_DUMP_ROW_SIZE,
1541 			    WMI_EXT_DBG_DUMP_GROUP_SIZE);
1542 	qdf_debugfs_printf(file, "\n");
1543 
1544 	if (qdf_debugfs_overflow(file)) {
1545 		qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1546 		qdf_list_insert_front(&wmi_handle->wmi_ext_dbg_msg_queue,
1547 				      &msg->node);
1548 		qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1549 
1550 	} else {
1551 		wmi_ext_dbg_msg_put(msg);
1552 	}
1553 
1554 	return QDF_STATUS_E_AGAIN;
1555 }
1556 
1557 /**
1558  * wmi_ext_dbg_msg_write() - debugfs write not supported
1559  * @priv: private data
1560  * @buf: received data buffer
1561  * @len: length of received buffer
1562  *
1563  * Return: QDF_STATUS_E_NOSUPPORT.
1564  */
1565 static QDF_STATUS wmi_ext_dbg_msg_write(void *priv, const char *buf,
1566 					qdf_size_t len)
1567 {
1568 	return QDF_STATUS_E_NOSUPPORT;
1569 }
1570 
1571 static struct qdf_debugfs_fops wmi_ext_dbgfs_ops[WMI_MAX_RADIOS];
1572 
1573 /**
1574  * wmi_ext_debugfs_init() - init debugfs items for extended wmi dump.
1575  * @wmi_handle: wmi handler
1576  * @pdev_idx: pdev index
1577  *
1578  * Return: QDF_STATUS_SUCCESS if debugfs is initialized else
1579  * QDF_STATUS_E_FAILURE
1580  */
1581 static QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle,
1582 				     uint32_t pdev_idx)
1583 {
1584 	qdf_dentry_t dentry;
1585 	char buf[32];
1586 
1587 	/* To maintain backward compatibility, naming convention for PDEV 0
1588 	 * dentry is kept same as before. For more than 1 PDEV, dentry
1589 	 * names will be appended with PDEVx.
1590 	*/
1591 	if (wmi_handle->soc->soc_idx == 0 && pdev_idx == 0) {
1592 		dentry  = qdf_debugfs_create_dir(WMI_EXT_DBG_DIR, NULL);
1593 	} else {
1594 		snprintf(buf, sizeof(buf), "WMI_EXT_DBG_SOC%u_PDEV%u",
1595 			 wmi_handle->soc->soc_idx, pdev_idx);
1596 		dentry  = qdf_debugfs_create_dir(buf, NULL);
1597 	}
1598 
1599 	if (!dentry) {
1600 		wmi_err("error while creating extended wmi debugfs dir");
1601 		return QDF_STATUS_E_FAILURE;
1602 	}
1603 
1604 	wmi_ext_dbgfs_ops[pdev_idx].show = wmi_ext_dbg_msg_show;
1605 	wmi_ext_dbgfs_ops[pdev_idx].write = wmi_ext_dbg_msg_write;
1606 	wmi_ext_dbgfs_ops[pdev_idx].priv = wmi_handle;
1607 	if (!qdf_debugfs_create_file(WMI_EXT_DBG_FILE, WMI_EXT_DBG_FILE_PERM,
1608 				     dentry, &wmi_ext_dbgfs_ops[pdev_idx])) {
1609 		qdf_debugfs_remove_dir(dentry);
1610 		wmi_err("Error while creating extended wmi debugfs file");
1611 		return QDF_STATUS_E_FAILURE;
1612 	}
1613 
1614 	wmi_handle->wmi_ext_dbg_dentry = dentry;
1615 	wmi_handle->wmi_ext_dbg_msg_queue_size = WMI_EXT_DBG_QUEUE_SIZE;
1616 	wmi_ext_dbg_msg_queue_init(wmi_handle);
1617 
1618 	return QDF_STATUS_SUCCESS;
1619 }
1620 
1621 /**
1622  * wmi_ext_debugfs_deinit() - cleanup/deinit debugfs items of extended wmi dump.
1623  * @wmi_handle: wmi handler
1624  *
1625  * Return: QDF_STATUS_SUCCESS if cleanup is successful
1626  */
1627 static QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle)
1628 {
1629 	struct wmi_ext_dbg_msg *msg;
1630 
1631 	while ((msg = wmi_ext_dbg_msg_dequeue(wmi_handle)))
1632 		wmi_ext_dbg_msg_put(msg);
1633 
1634 	wmi_ext_dbg_msg_queue_deinit(wmi_handle);
1635 	qdf_debugfs_remove_dir_recursive(wmi_handle->wmi_ext_dbg_dentry);
1636 
1637 	return QDF_STATUS_SUCCESS;
1638 }
1639 
1640 #else
1641 
1642 static inline QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified
1643 						    *wmi_handle,
1644 						    uint8_t *buf, uint32_t len)
1645 {
1646 		return QDF_STATUS_SUCCESS;
1647 }
1648 
1649 static inline QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified
1650 						      *wmi_handle,
1651 						      uint8_t *buf, uint32_t len)
1652 {
1653 		return QDF_STATUS_SUCCESS;
1654 }
1655 
1656 static inline QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle,
1657 					    uint32_t pdev_idx)
1658 {
1659 		return QDF_STATUS_SUCCESS;
1660 }
1661 
1662 static inline QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle)
1663 {
1664 		return QDF_STATUS_SUCCESS;
1665 }
1666 
1667 #endif /*WMI_EXT_DBG */
1668 
1669 int wmi_get_host_credits(wmi_unified_t wmi_handle);
1670 /* WMI buffer APIs */
1671 
1672 #ifdef NBUF_MEMORY_DEBUG
1673 wmi_buf_t
1674 wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint32_t len,
1675 		    const char *func_name,
1676 		    uint32_t line_num)
1677 {
1678 	wmi_buf_t wmi_buf;
1679 
1680 	if (roundup(len + sizeof(WMI_CMD_HDR), 4) > wmi_handle->max_msg_len) {
1681 		QDF_ASSERT(0);
1682 		return NULL;
1683 	}
1684 
1685 	wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, func_name,
1686 				 line_num);
1687 	if (!wmi_buf)
1688 		wmi_buf = qdf_nbuf_alloc_debug(NULL,
1689 					       roundup(len + WMI_MIN_HEAD_ROOM,
1690 						       4),
1691 					       WMI_MIN_HEAD_ROOM, 4, false,
1692 					       func_name, line_num);
1693 	if (!wmi_buf)
1694 		return NULL;
1695 
1696 	/* Clear the wmi buffer */
1697 	OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
1698 
1699 	/*
1700 	 * Set the length of the buffer to match the allocation size.
1701 	 */
1702 	qdf_nbuf_set_pktlen(wmi_buf, len);
1703 
1704 	return wmi_buf;
1705 }
1706 qdf_export_symbol(wmi_buf_alloc_debug);
1707 
1708 void wmi_buf_free(wmi_buf_t net_buf)
1709 {
1710 	net_buf = wbuff_buff_put(net_buf);
1711 	if (net_buf)
1712 		qdf_nbuf_free(net_buf);
1713 }
1714 qdf_export_symbol(wmi_buf_free);
1715 #else
1716 wmi_buf_t wmi_buf_alloc_fl(wmi_unified_t wmi_handle, uint32_t len,
1717 			   const char *func, uint32_t line)
1718 {
1719 	wmi_buf_t wmi_buf;
1720 
1721 	if (roundup(len + sizeof(WMI_CMD_HDR), 4) > wmi_handle->max_msg_len) {
1722 		QDF_DEBUG_PANIC("Invalid length %u (via %s:%u)",
1723 				len, func, line);
1724 		return NULL;
1725 	}
1726 
1727 	wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, __func__,
1728 				 __LINE__);
1729 	if (!wmi_buf)
1730 		wmi_buf = qdf_nbuf_alloc_fl(NULL, roundup(len +
1731 				WMI_MIN_HEAD_ROOM, 4), WMI_MIN_HEAD_ROOM, 4,
1732 				false, func, line);
1733 
1734 	if (!wmi_buf) {
1735 		wmi_nofl_err("%s:%d, failed to alloc len:%d", func, line, len);
1736 		return NULL;
1737 	}
1738 
1739 	/* Clear the wmi buffer */
1740 	OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
1741 
1742 	/*
1743 	 * Set the length of the buffer to match the allocation size.
1744 	 */
1745 	qdf_nbuf_set_pktlen(wmi_buf, len);
1746 
1747 	return wmi_buf;
1748 }
1749 qdf_export_symbol(wmi_buf_alloc_fl);
1750 
1751 void wmi_buf_free(wmi_buf_t net_buf)
1752 {
1753 	net_buf = wbuff_buff_put(net_buf);
1754 	if (net_buf)
1755 		qdf_nbuf_free(net_buf);
1756 }
1757 qdf_export_symbol(wmi_buf_free);
1758 #endif
1759 
1760 /**
1761  * wmi_get_max_msg_len() - get maximum WMI message length
1762  * @wmi_handle: WMI handle.
1763  *
1764  * This function returns the maximum WMI message length
1765  *
1766  * Return: maximum WMI message length
1767  */
1768 uint16_t wmi_get_max_msg_len(wmi_unified_t wmi_handle)
1769 {
1770 	return wmi_handle->max_msg_len - WMI_MIN_HEAD_ROOM;
1771 }
1772 qdf_export_symbol(wmi_get_max_msg_len);
1773 
1774 #ifndef WMI_CMD_STRINGS
1775 static uint8_t *wmi_id_to_name(uint32_t wmi_command)
1776 {
1777 	return "Invalid WMI cmd";
1778 }
1779 #endif
1780 
1781 static inline void wmi_log_cmd_id(uint32_t cmd_id, uint32_t tag)
1782 {
1783 	wmi_debug("Send WMI command:%s command_id:%d htc_tag:%d",
1784 		 wmi_id_to_name(cmd_id), cmd_id, tag);
1785 }
1786 
1787 /**
1788  * wmi_is_pm_resume_cmd() - check if a cmd is part of the resume sequence
1789  * @cmd_id: command to check
1790  *
1791  * Return: true if the command is part of the resume sequence.
1792  */
1793 #ifdef WLAN_POWER_MANAGEMENT_OFFLOAD
1794 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id)
1795 {
1796 	switch (cmd_id) {
1797 	case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
1798 	case WMI_PDEV_RESUME_CMDID:
1799 		return true;
1800 
1801 	default:
1802 		return false;
1803 	}
1804 }
1805 
1806 #else
1807 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id)
1808 {
1809 	return false;
1810 }
1811 
1812 #endif
1813 
1814 #ifdef FEATURE_WLAN_D0WOW
1815 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id)
1816 {
1817 	wmi_d0_wow_enable_disable_cmd_fixed_param *cmd;
1818 
1819 	if (cmd_id == WMI_D0_WOW_ENABLE_DISABLE_CMDID) {
1820 		cmd = (wmi_d0_wow_enable_disable_cmd_fixed_param *)
1821 			wmi_buf_data(buf);
1822 		if (!cmd->enable)
1823 			return true;
1824 		else
1825 			return false;
1826 	}
1827 
1828 	return false;
1829 }
1830 #else
1831 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id)
1832 {
1833 	return false;
1834 }
1835 
1836 #endif
1837 
1838 #ifdef WMI_INTERFACE_SEQUENCE_CHECK
1839 static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle)
1840 {
1841 	wmi_handle->wmi_sequence = 0;
1842 	wmi_handle->wmi_exp_sequence = 0;
1843 	wmi_handle->wmi_sequence_stop = false;
1844 }
1845 
1846 static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle)
1847 {
1848 	qdf_spinlock_create(&wmi_handle->wmi_seq_lock);
1849 	wmi_interface_sequence_reset(wmi_handle);
1850 }
1851 
1852 static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle)
1853 {
1854 	qdf_spinlock_destroy(&wmi_handle->wmi_seq_lock);
1855 }
1856 
1857 void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle)
1858 {
1859 	wmi_handle->wmi_sequence_stop = true;
1860 }
1861 
1862 static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle,
1863 					  HTC_PACKET *pkt,
1864 					  const char *func, uint32_t line)
1865 {
1866 	wmi_buf_t buf = GET_HTC_PACKET_NET_BUF_CONTEXT(pkt);
1867 	QDF_STATUS status;
1868 
1869 	qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock);
1870 	status = htc_send_pkt(wmi_handle->htc_handle, pkt);
1871 	if (QDF_STATUS_SUCCESS != status) {
1872 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1873 		qdf_atomic_dec(&wmi_handle->pending_cmds);
1874 		wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d",
1875 			     func, line, status);
1876 		qdf_mem_free(pkt);
1877 		return status;
1878 	}
1879 	/* Record the sequence number in the SKB */
1880 	qdf_nbuf_set_mark(buf, wmi_handle->wmi_sequence);
1881 	/* Increment the sequence number */
1882 	wmi_handle->wmi_sequence = (wmi_handle->wmi_sequence + 1)
1883 				   & (wmi_handle->wmi_max_cmds - 1);
1884 	qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1885 
1886 	return status;
1887 }
1888 
1889 static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle,
1890 						wmi_buf_t buf)
1891 {
1892 	/* Skip sequence check when wmi sequence stop is set */
1893 	if (wmi_handle->wmi_sequence_stop)
1894 		return;
1895 
1896 	qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock);
1897 	/* Match the completion sequence and expected sequence number */
1898 	if (qdf_nbuf_get_mark(buf) != wmi_handle->wmi_exp_sequence) {
1899 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1900 		wmi_nofl_err("WMI Tx Completion Sequence number mismatch");
1901 		wmi_nofl_err("Expected %d Received %d",
1902 			     wmi_handle->wmi_exp_sequence,
1903 			     qdf_nbuf_get_mark(buf));
1904 		/* Trigger Recovery */
1905 		qdf_trigger_self_recovery(wmi_handle->soc,
1906 					  QDF_WMI_BUF_SEQUENCE_MISMATCH);
1907 	} else {
1908 		/* Increment the expected sequence number */
1909 		wmi_handle->wmi_exp_sequence =
1910 				(wmi_handle->wmi_exp_sequence + 1)
1911 				& (wmi_handle->wmi_max_cmds - 1);
1912 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1913 	}
1914 }
1915 #else
1916 static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle)
1917 {
1918 }
1919 
1920 static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle)
1921 {
1922 }
1923 
1924 static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle)
1925 {
1926 }
1927 
1928 void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle)
1929 {
1930 }
1931 
1932 static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle,
1933 					  HTC_PACKET *pkt,
1934 					  const char *func, uint32_t line)
1935 {
1936 	QDF_STATUS status;
1937 
1938 	status = htc_send_pkt(wmi_handle->htc_handle, pkt);
1939 	if (QDF_STATUS_SUCCESS != status) {
1940 		qdf_atomic_dec(&wmi_handle->pending_cmds);
1941 		wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d",
1942 			     func, line, status);
1943 		qdf_mem_free(pkt);
1944 		return status;
1945 	}
1946 
1947 	return status;
1948 }
1949 
1950 static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle,
1951 						wmi_buf_t buf)
1952 {
1953 }
1954 #endif
1955 
1956 static inline void wmi_unified_debug_dump(wmi_unified_t wmi_handle)
1957 {
1958 	wmi_nofl_err("Endpoint ID = %d, Tx Queue Depth = %d, soc_id = %u, target type = %s",
1959 		     wmi_handle->wmi_endpoint_id,
1960 		     htc_get_tx_queue_depth(wmi_handle->htc_handle,
1961 					    wmi_handle->wmi_endpoint_id),
1962 		     wmi_handle->soc->soc_idx,
1963 		     (wmi_handle->target_type ==
1964 		      WMI_TLV_TARGET ? "WMI_TLV_TARGET" :
1965 						"WMI_NON_TLV_TARGET"));
1966 }
1967 
1968 #ifdef SYSTEM_PM_CHECK
1969 /**
1970  * wmi_set_system_pm_pkt_tag() - API to set tag for system pm packets
1971  * @htc_tag: HTC tag
1972  * @buf: wmi cmd buffer
1973  * @cmd_id: cmd id
1974  *
1975  * Return: None
1976  */
1977 static void wmi_set_system_pm_pkt_tag(uint16_t *htc_tag, wmi_buf_t buf,
1978 				      uint32_t cmd_id)
1979 {
1980 	switch (cmd_id) {
1981 	case WMI_WOW_ENABLE_CMDID:
1982 	case WMI_PDEV_SUSPEND_CMDID:
1983 		*htc_tag = HTC_TX_PACKET_SYSTEM_SUSPEND;
1984 		break;
1985 	case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
1986 	case WMI_PDEV_RESUME_CMDID:
1987 		*htc_tag = HTC_TX_PACKET_SYSTEM_RESUME;
1988 		break;
1989 	case WMI_D0_WOW_ENABLE_DISABLE_CMDID:
1990 		if (wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id))
1991 			*htc_tag = HTC_TX_PACKET_SYSTEM_RESUME;
1992 		else
1993 			*htc_tag = HTC_TX_PACKET_SYSTEM_SUSPEND;
1994 		break;
1995 	default:
1996 		break;
1997 	}
1998 }
1999 #else
2000 static inline void wmi_set_system_pm_pkt_tag(uint16_t *htc_tag, wmi_buf_t buf,
2001 					     uint32_t cmd_id)
2002 {
2003 }
2004 #endif
2005 
2006 QDF_STATUS wmi_unified_cmd_send_fl(wmi_unified_t wmi_handle, wmi_buf_t buf,
2007 				   uint32_t len, uint32_t cmd_id,
2008 				   const char *func, uint32_t line)
2009 {
2010 	HTC_PACKET *pkt;
2011 	uint16_t htc_tag = 0;
2012 	bool rtpm_inprogress;
2013 
2014 	rtpm_inprogress = wmi_get_runtime_pm_inprogress(wmi_handle);
2015 	if (rtpm_inprogress) {
2016 		htc_tag = wmi_handle->ops->wmi_set_htc_tx_tag(wmi_handle, buf,
2017 							      cmd_id);
2018 	} else if (qdf_atomic_read(&wmi_handle->is_target_suspended) &&
2019 		   !wmi_is_pm_resume_cmd(cmd_id) &&
2020 		   !wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id)) {
2021 			wmi_nofl_err("Target is suspended (via %s:%u)",
2022 					func, line);
2023 		return QDF_STATUS_E_BUSY;
2024 	}
2025 
2026 	if (wmi_handle->wmi_stopinprogress) {
2027 		wmi_nofl_err("%s:%d, WMI stop in progress, wmi_handle:%pK",
2028 			     func, line, wmi_handle);
2029 		return QDF_STATUS_E_INVAL;
2030 	}
2031 
2032 #ifndef WMI_NON_TLV_SUPPORT
2033 	/* Do sanity check on the TLV parameter structure */
2034 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2035 		void *buf_ptr = (void *)qdf_nbuf_data(buf);
2036 
2037 		if (wmi_handle->ops->wmi_check_command_params(NULL, buf_ptr, len, cmd_id)
2038 			!= 0) {
2039 			wmi_nofl_err("%s:%d, Invalid WMI Param Buffer for Cmd:%d",
2040 				     func, line, cmd_id);
2041 			return QDF_STATUS_E_INVAL;
2042 		}
2043 	}
2044 #endif
2045 
2046 	if (qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) {
2047 		wmi_nofl_err("%s:%d, Failed to send cmd %x, no memory",
2048 			     func, line, cmd_id);
2049 		return QDF_STATUS_E_NOMEM;
2050 	}
2051 
2052 	qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR));
2053 	WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
2054 
2055 	qdf_atomic_inc(&wmi_handle->pending_cmds);
2056 	if (qdf_atomic_read(&wmi_handle->pending_cmds) >=
2057 			wmi_handle->wmi_max_cmds) {
2058 		wmi_nofl_err("hostcredits = %d",
2059 			     wmi_get_host_credits(wmi_handle));
2060 		htc_dump_counter_info(wmi_handle->htc_handle);
2061 		qdf_atomic_dec(&wmi_handle->pending_cmds);
2062 		wmi_nofl_err("%s:%d, MAX %d WMI Pending cmds reached",
2063 			     func, line, wmi_handle->wmi_max_cmds);
2064 		wmi_unified_debug_dump(wmi_handle);
2065 		htc_ce_tasklet_debug_dump(wmi_handle->htc_handle);
2066 		qdf_trigger_self_recovery(wmi_handle->soc->wmi_psoc,
2067 					  QDF_WMI_EXCEED_MAX_PENDING_CMDS);
2068 		return QDF_STATUS_E_BUSY;
2069 	}
2070 
2071 	pkt = qdf_mem_malloc_fl(sizeof(*pkt), func, line);
2072 	if (!pkt) {
2073 		qdf_atomic_dec(&wmi_handle->pending_cmds);
2074 		return QDF_STATUS_E_NOMEM;
2075 	}
2076 
2077 	if (!rtpm_inprogress)
2078 		wmi_set_system_pm_pkt_tag(&htc_tag, buf, cmd_id);
2079 
2080 	SET_HTC_PACKET_INFO_TX(pkt,
2081 			       NULL,
2082 			       qdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR),
2083 			       wmi_handle->wmi_endpoint_id, htc_tag);
2084 
2085 	SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf);
2086 	wmi_log_cmd_id(cmd_id, htc_tag);
2087 	wmi_ext_dbg_msg_cmd_record(wmi_handle,
2088 				   qdf_nbuf_data(buf), qdf_nbuf_len(buf));
2089 #ifdef WMI_INTERFACE_EVENT_LOGGING
2090 	if (wmi_handle->log_info.wmi_logging_enable) {
2091 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2092 		/*
2093 		 * Record 16 bytes of WMI cmd data -
2094 		 * exclude TLV and WMI headers
2095 		 *
2096 		 * WMI mgmt command already recorded in wmi_mgmt_cmd_record
2097 		 */
2098 		if (wmi_handle->ops->is_management_record(cmd_id) == false) {
2099 			uint8_t *tmpbuf = (uint8_t *)qdf_nbuf_data(buf) +
2100 				wmi_handle->soc->buf_offset_command;
2101 
2102 			WMI_COMMAND_RECORD(wmi_handle, cmd_id, tmpbuf);
2103 			wmi_specific_cmd_record(wmi_handle, cmd_id, tmpbuf);
2104 		}
2105 
2106 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2107 	}
2108 #endif
2109 	return wmi_htc_send_pkt(wmi_handle, pkt, func, line);
2110 }
2111 qdf_export_symbol(wmi_unified_cmd_send_fl);
2112 
2113 /**
2114  * wmi_unified_get_event_handler_ix() - gives event handler's index
2115  * @wmi_handle: handle to wmi
2116  * @event_id: wmi  event id
2117  *
2118  * Return: event handler's index
2119  */
2120 static int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle,
2121 					    uint32_t event_id)
2122 {
2123 	uint32_t idx = 0;
2124 	int32_t invalid_idx = -1;
2125 	struct wmi_soc *soc = wmi_handle->soc;
2126 
2127 	for (idx = 0; (idx < soc->max_event_idx &&
2128 		       idx < WMI_UNIFIED_MAX_EVENT); ++idx) {
2129 		if (wmi_handle->event_id[idx] == event_id &&
2130 		    wmi_handle->event_handler[idx]) {
2131 			return idx;
2132 		}
2133 	}
2134 
2135 	return invalid_idx;
2136 }
2137 
2138 /**
2139  * wmi_register_event_handler_with_ctx() - register event handler with
2140  * exec ctx and buffer type
2141  * @wmi_handle: handle to wmi
2142  * @event_id: wmi event id
2143  * @handler_func: wmi event handler function
2144  * @rx_ctx: rx execution context for wmi rx events
2145  * @rx_buf_type: rx execution context for wmi rx events
2146  *
2147  * Return: QDF_STATUS_SUCCESS on successful register event else failure.
2148  */
2149 static QDF_STATUS
2150 wmi_register_event_handler_with_ctx(wmi_unified_t wmi_handle,
2151 				    uint32_t event_id,
2152 				    wmi_unified_event_handler handler_func,
2153 				    enum wmi_rx_exec_ctx rx_ctx,
2154 				    enum wmi_rx_buff_type rx_buf_type)
2155 {
2156 	uint32_t idx = 0;
2157 	uint32_t evt_id;
2158 	struct wmi_soc *soc;
2159 
2160 	if (!wmi_handle) {
2161 		wmi_err("WMI handle is NULL");
2162 		return QDF_STATUS_E_FAILURE;
2163 	}
2164 
2165 	soc = wmi_handle->soc;
2166 
2167 	if (event_id >= wmi_events_max ||
2168 		wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2169 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
2170 			  "%s: Event id %d is unavailable",
2171 					__func__, event_id);
2172 		return QDF_STATUS_E_FAILURE;
2173 	}
2174 	evt_id = wmi_handle->wmi_events[event_id];
2175 
2176 	if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) {
2177 		wmi_info("event handler already registered 0x%x", evt_id);
2178 		return QDF_STATUS_E_FAILURE;
2179 	}
2180 	if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) {
2181 		wmi_err("no more event handlers 0x%x",
2182 			 evt_id);
2183 		return QDF_STATUS_E_FAILURE;
2184 	}
2185 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG,
2186 		  "Registered event handler for event 0x%8x", evt_id);
2187 	idx = soc->max_event_idx;
2188 	wmi_handle->event_handler[idx] = handler_func;
2189 	wmi_handle->event_id[idx] = evt_id;
2190 
2191 	qdf_spin_lock_bh(&soc->ctx_lock);
2192 	wmi_handle->ctx[idx].exec_ctx = rx_ctx;
2193 	wmi_handle->ctx[idx].buff_type = rx_buf_type;
2194 	qdf_spin_unlock_bh(&soc->ctx_lock);
2195 	soc->max_event_idx++;
2196 
2197 	return QDF_STATUS_SUCCESS;
2198 }
2199 
2200 QDF_STATUS
2201 wmi_unified_register_event(wmi_unified_t wmi_handle,
2202 			   uint32_t event_id,
2203 			   wmi_unified_event_handler handler_func)
2204 {
2205 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2206 						   handler_func,
2207 						   WMI_RX_UMAC_CTX,
2208 						   WMI_RX_PROCESSED_BUFF);
2209 }
2210 
2211 QDF_STATUS
2212 wmi_unified_register_event_handler(wmi_unified_t wmi_handle,
2213 				   wmi_conv_event_id event_id,
2214 				   wmi_unified_event_handler handler_func,
2215 				   uint8_t rx_ctx)
2216 {
2217 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2218 						   handler_func, rx_ctx,
2219 						   WMI_RX_PROCESSED_BUFF);
2220 }
2221 
2222 qdf_export_symbol(wmi_unified_register_event_handler);
2223 
2224 QDF_STATUS
2225 wmi_unified_register_raw_event_handler(wmi_unified_t wmi_handle,
2226 				       wmi_conv_event_id event_id,
2227 				       wmi_unified_event_handler handler_func,
2228 				       enum wmi_rx_exec_ctx rx_ctx)
2229 {
2230 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2231 						   handler_func, rx_ctx,
2232 						   WMI_RX_RAW_BUFF);
2233 }
2234 
2235 qdf_export_symbol(wmi_unified_register_raw_event_handler);
2236 
2237 QDF_STATUS wmi_unified_unregister_event(wmi_unified_t wmi_handle,
2238 					uint32_t event_id)
2239 {
2240 	uint32_t idx = 0;
2241 	uint32_t evt_id;
2242 	struct wmi_soc *soc;
2243 
2244 	if (!wmi_handle) {
2245 		wmi_err("WMI handle is NULL");
2246 		return QDF_STATUS_E_FAILURE;
2247 	}
2248 
2249 	soc = wmi_handle->soc;
2250 	if (event_id >= wmi_events_max ||
2251 		wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2252 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
2253 			  "%s: Event id %d is unavailable",
2254 					__func__, event_id);
2255 		return QDF_STATUS_E_FAILURE;
2256 	}
2257 	evt_id = wmi_handle->wmi_events[event_id];
2258 
2259 	idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id);
2260 	if (idx == -1) {
2261 		wmi_warn("event handler is not registered: evt id 0x%x",
2262 			 evt_id);
2263 		return QDF_STATUS_E_FAILURE;
2264 	}
2265 	wmi_handle->event_handler[idx] = NULL;
2266 	wmi_handle->event_id[idx] = 0;
2267 	--soc->max_event_idx;
2268 	wmi_handle->event_handler[idx] =
2269 		wmi_handle->event_handler[soc->max_event_idx];
2270 	wmi_handle->event_id[idx] =
2271 		wmi_handle->event_id[soc->max_event_idx];
2272 
2273 	qdf_spin_lock_bh(&soc->ctx_lock);
2274 
2275 	wmi_handle->ctx[idx].exec_ctx =
2276 		wmi_handle->ctx[soc->max_event_idx].exec_ctx;
2277 	wmi_handle->ctx[idx].buff_type =
2278 		wmi_handle->ctx[soc->max_event_idx].buff_type;
2279 
2280 	qdf_spin_unlock_bh(&soc->ctx_lock);
2281 
2282 	return QDF_STATUS_SUCCESS;
2283 }
2284 
2285 QDF_STATUS wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle,
2286 						wmi_conv_event_id event_id)
2287 {
2288 	uint32_t idx = 0;
2289 	uint32_t evt_id;
2290 	struct wmi_soc *soc;
2291 
2292 	if (!wmi_handle) {
2293 		wmi_err("WMI handle is NULL");
2294 		return QDF_STATUS_E_FAILURE;
2295 	}
2296 
2297 	soc = wmi_handle->soc;
2298 
2299 	if (event_id >= wmi_events_max ||
2300 		wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2301 		wmi_err("Event id %d is unavailable", event_id);
2302 		return QDF_STATUS_E_FAILURE;
2303 	}
2304 	evt_id = wmi_handle->wmi_events[event_id];
2305 
2306 	idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id);
2307 	if (idx == -1) {
2308 		wmi_err("event handler is not registered: evt id 0x%x",
2309 			 evt_id);
2310 		return QDF_STATUS_E_FAILURE;
2311 	}
2312 	wmi_handle->event_handler[idx] = NULL;
2313 	wmi_handle->event_id[idx] = 0;
2314 	--soc->max_event_idx;
2315 	wmi_handle->event_handler[idx] =
2316 		wmi_handle->event_handler[soc->max_event_idx];
2317 	wmi_handle->event_id[idx] =
2318 		wmi_handle->event_id[soc->max_event_idx];
2319 
2320 	qdf_spin_lock_bh(&soc->ctx_lock);
2321 
2322 	wmi_handle->ctx[idx].exec_ctx =
2323 		wmi_handle->ctx[soc->max_event_idx].exec_ctx;
2324 	wmi_handle->ctx[idx].buff_type =
2325 		wmi_handle->ctx[soc->max_event_idx].buff_type;
2326 
2327 	qdf_spin_unlock_bh(&soc->ctx_lock);
2328 
2329 	return QDF_STATUS_SUCCESS;
2330 }
2331 qdf_export_symbol(wmi_unified_unregister_event_handler);
2332 
2333 static void
2334 wmi_process_rx_diag_event_worker_thread_ctx(struct wmi_unified *wmi_handle,
2335 					    void *evt_buf)
2336 {
2337 	uint32_t num_diag_events_pending;
2338 
2339 	qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock);
2340 	if (RX_DIAG_WQ_MAX_SIZE > 0) {
2341 		num_diag_events_pending = qdf_nbuf_queue_len(
2342 						&wmi_handle->diag_event_queue);
2343 
2344 		if (num_diag_events_pending >= RX_DIAG_WQ_MAX_SIZE) {
2345 			qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
2346 			wmi_handle->wmi_rx_diag_events_dropped++;
2347 			wmi_debug_rl("Rx diag events dropped count: %d",
2348 				     wmi_handle->wmi_rx_diag_events_dropped);
2349 			qdf_nbuf_free(evt_buf);
2350 			return;
2351 		}
2352 	}
2353 
2354 	qdf_nbuf_queue_add(&wmi_handle->diag_event_queue, evt_buf);
2355 	qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
2356 	qdf_queue_work(0, wmi_handle->wmi_rx_diag_work_queue,
2357 		       &wmi_handle->rx_diag_event_work);
2358 }
2359 
2360 void wmi_process_fw_event_worker_thread_ctx(struct wmi_unified *wmi_handle,
2361 					    void *evt_buf)
2362 {
2363 
2364 	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
2365 	qdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf);
2366 	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
2367 	qdf_queue_work(0, wmi_handle->wmi_rx_work_queue,
2368 			&wmi_handle->rx_event_work);
2369 
2370 	return;
2371 }
2372 
2373 qdf_export_symbol(wmi_process_fw_event_worker_thread_ctx);
2374 
2375 uint32_t wmi_critical_events_in_flight(struct wmi_unified *wmi)
2376 {
2377 	return qdf_atomic_read(&wmi->critical_events_in_flight);
2378 }
2379 
2380 static bool
2381 wmi_is_event_critical(struct wmi_unified *wmi_handle, uint32_t event_id)
2382 {
2383 	if (wmi_handle->wmi_events[wmi_roam_synch_event_id] == event_id)
2384 		return true;
2385 
2386 	return false;
2387 }
2388 
2389 static QDF_STATUS wmi_discard_fw_event(struct scheduler_msg *msg)
2390 {
2391 	struct wmi_process_fw_event_params *event_param;
2392 
2393 	if (!msg->bodyptr)
2394 		return QDF_STATUS_E_INVAL;
2395 
2396 	event_param = (struct wmi_process_fw_event_params *)msg->bodyptr;
2397 	qdf_nbuf_free(event_param->evt_buf);
2398 	qdf_mem_free(msg->bodyptr);
2399 	msg->bodyptr = NULL;
2400 	msg->bodyval = 0;
2401 	msg->type = 0;
2402 
2403 	return QDF_STATUS_SUCCESS;
2404 }
2405 
2406 static QDF_STATUS wmi_process_fw_event_handler(struct scheduler_msg *msg)
2407 {
2408 	struct wmi_process_fw_event_params *params =
2409 		(struct wmi_process_fw_event_params *)msg->bodyptr;
2410 	struct wmi_unified *wmi_handle;
2411 	uint32_t event_id;
2412 
2413 	wmi_handle = (struct wmi_unified *)params->wmi_handle;
2414 	event_id = WMI_GET_FIELD(qdf_nbuf_data(params->evt_buf),
2415 				 WMI_CMD_HDR, COMMANDID);
2416 	wmi_process_fw_event(wmi_handle, params->evt_buf);
2417 
2418 	if (wmi_is_event_critical(wmi_handle, event_id))
2419 		qdf_atomic_dec(&wmi_handle->critical_events_in_flight);
2420 
2421 	qdf_mem_free(msg->bodyptr);
2422 
2423 	return QDF_STATUS_SUCCESS;
2424 }
2425 
2426 /**
2427  * wmi_process_fw_event_sched_thread_ctx() - common event handler to serialize
2428  *                                  event processing through scheduler thread
2429  * @ctx: wmi context
2430  * @ev: event buffer
2431  * @rx_ctx: rx execution context
2432  *
2433  * Return: 0 on success, errno on failure
2434  */
2435 static QDF_STATUS
2436 wmi_process_fw_event_sched_thread_ctx(struct wmi_unified *wmi,
2437 				      void *ev)
2438 {
2439 	struct wmi_process_fw_event_params *params_buf;
2440 	struct scheduler_msg msg = { 0 };
2441 	uint32_t event_id;
2442 
2443 	params_buf = qdf_mem_malloc(sizeof(struct wmi_process_fw_event_params));
2444 	if (!params_buf) {
2445 		wmi_err("malloc failed");
2446 		qdf_nbuf_free(ev);
2447 		return QDF_STATUS_E_NOMEM;
2448 	}
2449 
2450 	params_buf->wmi_handle = wmi;
2451 	params_buf->evt_buf = ev;
2452 
2453 	event_id = WMI_GET_FIELD(qdf_nbuf_data(params_buf->evt_buf),
2454 				 WMI_CMD_HDR, COMMANDID);
2455 	if (wmi_is_event_critical(wmi, event_id))
2456 		qdf_atomic_inc(&wmi->critical_events_in_flight);
2457 
2458 	msg.bodyptr = params_buf;
2459 	msg.bodyval = 0;
2460 	msg.callback = wmi_process_fw_event_handler;
2461 	msg.flush_callback = wmi_discard_fw_event;
2462 
2463 	if (QDF_STATUS_SUCCESS !=
2464 		scheduler_post_message(QDF_MODULE_ID_TARGET_IF,
2465 				       QDF_MODULE_ID_TARGET_IF,
2466 				       QDF_MODULE_ID_TARGET_IF, &msg)) {
2467 		qdf_nbuf_free(ev);
2468 		qdf_mem_free(params_buf);
2469 		return QDF_STATUS_E_FAULT;
2470 	}
2471 
2472 	return QDF_STATUS_SUCCESS;
2473 }
2474 
2475 /**
2476  * wmi_get_pdev_ep: Get wmi handle based on endpoint
2477  * @soc: handle to wmi soc
2478  * @ep: endpoint id
2479  *
2480  * Return: none
2481  */
2482 static struct wmi_unified *wmi_get_pdev_ep(struct wmi_soc *soc,
2483 						HTC_ENDPOINT_ID ep)
2484 {
2485 	uint32_t i;
2486 
2487 	for (i = 0; i < WMI_MAX_RADIOS; i++)
2488 		if (soc->wmi_endpoint_id[i] == ep)
2489 			break;
2490 
2491 	if (i == WMI_MAX_RADIOS)
2492 		return NULL;
2493 
2494 	return soc->wmi_pdev[i];
2495 }
2496 
2497 /**
2498  * wmi_mtrace_rx() - Wrappper function for qdf_mtrace api
2499  * @message_id: 32-Bit Wmi message ID
2500  * @vdev_id: Vdev ID
2501  * @data: Actual message contents
2502  *
2503  * This function converts the 32-bit WMI message ID in 15-bit message ID
2504  * format for qdf_mtrace as in qdf_mtrace message there are only 15
2505  * bits reserved for message ID.
2506  * out of these 15-bits, 8-bits (From LSB) specifies the WMI_GRP_ID
2507  * and remaining 7-bits specifies the actual WMI command. With this
2508  * notation there can be maximum 256 groups and each group can have
2509  * max 128 commands can be supported.
2510  *
2511  * Return: None
2512  */
2513 static void wmi_mtrace_rx(uint32_t message_id, uint16_t vdev_id, uint32_t data)
2514 {
2515 	uint16_t mtrace_message_id;
2516 
2517 	mtrace_message_id = QDF_WMI_MTRACE_CMD_ID(message_id) |
2518 		(QDF_WMI_MTRACE_GRP_ID(message_id) <<
2519 						QDF_WMI_MTRACE_CMD_NUM_BITS);
2520 	qdf_mtrace(QDF_MODULE_ID_WMI, QDF_MODULE_ID_WMA,
2521 		   mtrace_message_id, vdev_id, data);
2522 }
2523 
2524 /**
2525  * wmi_process_control_rx() - process fw events callbacks
2526  * @wmi_handle: handle to wmi_unified
2527  * @evt_buf: handle to wmi_buf_t
2528  *
2529  * Return: none
2530  */
2531 static void wmi_process_control_rx(struct wmi_unified *wmi_handle,
2532 				   wmi_buf_t evt_buf)
2533 {
2534 	struct wmi_soc *soc = wmi_handle->soc;
2535 	uint32_t id;
2536 	uint32_t idx;
2537 	enum wmi_rx_exec_ctx exec_ctx;
2538 
2539 	id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2540 	idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
2541 	if (qdf_unlikely(idx == A_ERROR)) {
2542 		wmi_debug("no handler registered for event id 0x%x", id);
2543 		qdf_nbuf_free(evt_buf);
2544 		return;
2545 	}
2546 	wmi_mtrace_rx(id, 0xFF, idx);
2547 	qdf_spin_lock_bh(&soc->ctx_lock);
2548 	exec_ctx = wmi_handle->ctx[idx].exec_ctx;
2549 	qdf_spin_unlock_bh(&soc->ctx_lock);
2550 
2551 #ifdef WMI_INTERFACE_EVENT_LOGGING
2552 	if (wmi_handle->log_info.wmi_logging_enable) {
2553 		uint8_t *data;
2554 		data = qdf_nbuf_data(evt_buf);
2555 
2556 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2557 		/* Exclude 4 bytes of TLV header */
2558 		if (wmi_handle->ops->is_diag_event(id)) {
2559 			WMI_DIAG_RX_EVENT_RECORD(wmi_handle, id,
2560 				((uint8_t *) data +
2561 				wmi_handle->soc->buf_offset_event));
2562 		} else if (wmi_handle->ops->is_management_record(id)) {
2563 			WMI_MGMT_RX_EVENT_RECORD(wmi_handle, id,
2564 				((uint8_t *) data +
2565 				wmi_handle->soc->buf_offset_event));
2566 		} else {
2567 			WMI_RX_EVENT_RECORD(wmi_handle, id, ((uint8_t *) data +
2568 				wmi_handle->soc->buf_offset_event));
2569 		}
2570 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2571 	}
2572 #endif
2573 
2574 	if (exec_ctx == WMI_RX_WORK_CTX) {
2575 		wmi_process_fw_event_worker_thread_ctx(wmi_handle, evt_buf);
2576 	} else if (exec_ctx == WMI_RX_TASKLET_CTX) {
2577 		wmi_process_fw_event(wmi_handle, evt_buf);
2578 	} else if (exec_ctx == WMI_RX_SERIALIZER_CTX) {
2579 		wmi_process_fw_event_sched_thread_ctx(wmi_handle, evt_buf);
2580 	} else if (exec_ctx == WMI_RX_DIAG_WORK_CTX) {
2581 		wmi_process_rx_diag_event_worker_thread_ctx(wmi_handle,
2582 							    evt_buf);
2583 	} else {
2584 		wmi_err("Invalid event context %d", exec_ctx);
2585 		qdf_nbuf_free(evt_buf);
2586 	}
2587 
2588 }
2589 
2590 /**
2591  * wmi_control_rx() - process fw events callbacks
2592  * @ctx: handle to wmi
2593  * @htc_packet: pointer to htc packet
2594  *
2595  * Return: none
2596  */
2597 static void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet)
2598 {
2599 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2600 	struct wmi_unified *wmi_handle;
2601 	wmi_buf_t evt_buf;
2602 
2603 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2604 
2605 	wmi_handle = wmi_get_pdev_ep(soc, htc_packet->Endpoint);
2606 	if (!wmi_handle) {
2607 		wmi_err("unable to get wmi_handle to Endpoint %d",
2608 			htc_packet->Endpoint);
2609 		qdf_nbuf_free(evt_buf);
2610 		return;
2611 	}
2612 
2613 	wmi_process_control_rx(wmi_handle, evt_buf);
2614 }
2615 
2616 #if defined(WLAN_FEATURE_WMI_DIAG_OVER_CE7)
2617 /**
2618  * wmi_control_diag_rx() - process diag fw events callbacks
2619  * @ctx: handle to wmi
2620  * @htc_packet: pointer to htc packet
2621  *
2622  * Return: none
2623  */
2624 static void wmi_control_diag_rx(void *ctx, HTC_PACKET *htc_packet)
2625 {
2626 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2627 	struct wmi_unified *wmi_handle;
2628 	wmi_buf_t evt_buf;
2629 
2630 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2631 
2632 	wmi_handle = soc->wmi_pdev[0];
2633 
2634 	if (!wmi_handle) {
2635 		wmi_err("unable to get wmi_handle for diag event end point id:%d", htc_packet->Endpoint);
2636 		qdf_nbuf_free(evt_buf);
2637 		return;
2638 	}
2639 
2640 	wmi_process_control_rx(wmi_handle, evt_buf);
2641 }
2642 
2643 #elif defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
2644 static void wmi_control_diag_rx(void *ctx, HTC_PACKET *htc_packet)
2645 {
2646 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2647 	struct wmi_unified *wmi_handle;
2648 	wmi_buf_t evt_buf;
2649 
2650 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2651 
2652 	wmi_handle = wmi_get_pdev_ep(soc, htc_packet->Endpoint);
2653 
2654 	if (!wmi_handle) {
2655 		wmi_err("unable to get wmi_handle for diag event end point id:%d", htc_packet->Endpoint);
2656 		qdf_nbuf_free(evt_buf);
2657 		return;
2658 	}
2659 
2660 	wmi_process_control_rx(wmi_handle, evt_buf);
2661 }
2662 
2663 #endif
2664 
2665 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI
2666 QDF_STATUS wmi_unified_cmd_send_over_qmi(struct wmi_unified *wmi_handle,
2667 					 wmi_buf_t buf, uint32_t buflen,
2668 					 uint32_t cmd_id)
2669 {
2670 	QDF_STATUS status;
2671 	int32_t ret;
2672 
2673 	if (!qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR))) {
2674 		wmi_err("Failed to send cmd %x, no memory", cmd_id);
2675 		return QDF_STATUS_E_NOMEM;
2676 	}
2677 
2678 	qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR));
2679 	WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
2680 	wmi_debug("Sending WMI_CMD_ID: 0x%x over qmi", cmd_id);
2681 	status = qdf_wmi_send_recv_qmi(qdf_nbuf_data(buf),
2682 				       buflen + sizeof(WMI_CMD_HDR),
2683 				       wmi_handle,
2684 				       wmi_process_qmi_fw_event);
2685 	if (QDF_IS_STATUS_ERROR(status)) {
2686 		qdf_nbuf_pull_head(buf, sizeof(WMI_CMD_HDR));
2687 		wmi_warn("WMI send on QMI failed. Retrying WMI on HTC");
2688 	} else {
2689 		ret = qdf_atomic_inc_return(&wmi_handle->num_stats_over_qmi);
2690 		wmi_debug("num stats over qmi: %d", ret);
2691 		wmi_buf_free(buf);
2692 	}
2693 
2694 	return status;
2695 }
2696 
2697 static int __wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len)
2698 {
2699 	struct wmi_unified *wmi_handle = wmi_cb_ctx;
2700 	wmi_buf_t evt_buf;
2701 	uint32_t evt_id;
2702 
2703 	if (!wmi_handle || !buf)
2704 		return -EINVAL;
2705 
2706 	evt_buf = wmi_buf_alloc(wmi_handle, len);
2707 	if (!evt_buf)
2708 		return -ENOMEM;
2709 
2710 	qdf_mem_copy(qdf_nbuf_data(evt_buf), buf, len);
2711 	evt_id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2712 	wmi_debug("Received WMI_EVT_ID: %d over qmi", evt_id);
2713 	wmi_process_control_rx(wmi_handle, evt_buf);
2714 
2715 	return 0;
2716 }
2717 
2718 int wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len)
2719 {
2720 	struct qdf_op_sync *op_sync;
2721 	int ret;
2722 
2723 	if (qdf_op_protect(&op_sync))
2724 		return -EINVAL;
2725 	ret = __wmi_process_qmi_fw_event(wmi_cb_ctx, buf, len);
2726 	qdf_op_unprotect(op_sync);
2727 
2728 	return ret;
2729 }
2730 #endif
2731 
2732 /**
2733  * wmi_process_fw_event() - process any fw event
2734  * @wmi_handle: wmi handle
2735  * @evt_buf: fw event buffer
2736  *
2737  * This function process fw event in caller context
2738  *
2739  * Return: none
2740  */
2741 void wmi_process_fw_event(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
2742 {
2743 	__wmi_control_rx(wmi_handle, evt_buf);
2744 }
2745 
2746 /**
2747  * __wmi_control_rx() - process serialize wmi event callback
2748  * @wmi_handle: wmi handle
2749  * @evt_buf: fw event buffer
2750  *
2751  * Return: none
2752  */
2753 void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
2754 {
2755 	uint32_t id;
2756 	uint8_t *data;
2757 	uint32_t len;
2758 	void *wmi_cmd_struct_ptr = NULL;
2759 #ifndef WMI_NON_TLV_SUPPORT
2760 	int tlv_ok_status = 0;
2761 #endif
2762 	uint32_t idx = 0;
2763 	struct wmi_raw_event_buffer ev_buf;
2764 	enum wmi_rx_buff_type ev_buff_type;
2765 
2766 	id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2767 
2768 	wmi_ext_dbg_msg_event_record(wmi_handle, qdf_nbuf_data(evt_buf),
2769 				     qdf_nbuf_len(evt_buf));
2770 
2771 	if (qdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL)
2772 		goto end;
2773 
2774 	data = qdf_nbuf_data(evt_buf);
2775 	len = qdf_nbuf_len(evt_buf);
2776 
2777 #ifndef WMI_NON_TLV_SUPPORT
2778 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2779 		/* Validate and pad(if necessary) the TLVs */
2780 		tlv_ok_status =
2781 			wmi_handle->ops->wmi_check_and_pad_event(wmi_handle->scn_handle,
2782 							data, len, id,
2783 							&wmi_cmd_struct_ptr);
2784 		if (tlv_ok_status != 0) {
2785 			QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR,
2786 				  "%s: Error: id=0x%x, wmitlv check status=%d",
2787 				  __func__, id, tlv_ok_status);
2788 			goto end;
2789 		}
2790 	}
2791 #endif
2792 
2793 	idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
2794 	if (idx == A_ERROR) {
2795 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR,
2796 		   "%s : event handler is not registered: event id 0x%x",
2797 			__func__, id);
2798 		goto end;
2799 	}
2800 #ifdef WMI_INTERFACE_EVENT_LOGGING
2801 	if (wmi_handle->log_info.wmi_logging_enable) {
2802 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2803 		/* Exclude 4 bytes of TLV header */
2804 		if (wmi_handle->ops->is_diag_event(id)) {
2805 			/*
2806 			 * skip diag event logging in WMI event buffer
2807 			 * as its already logged in WMI RX event buffer
2808 			 */
2809 		} else if (wmi_handle->ops->is_management_record(id)) {
2810 			/*
2811 			 * skip wmi mgmt event logging in WMI event buffer
2812 			 * as its already logged in WMI RX event buffer
2813 			 */
2814 		} else {
2815 			uint8_t *tmpbuf = (uint8_t *)data +
2816 					wmi_handle->soc->buf_offset_event;
2817 
2818 			WMI_EVENT_RECORD(wmi_handle, id, tmpbuf);
2819 			wmi_specific_evt_record(wmi_handle, id, tmpbuf);
2820 		}
2821 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2822 	}
2823 #endif
2824 	/* Call the WMI registered event handler */
2825 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2826 		ev_buff_type = wmi_handle->ctx[idx].buff_type;
2827 		if (ev_buff_type == WMI_RX_PROCESSED_BUFF) {
2828 			wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2829 				wmi_cmd_struct_ptr, len);
2830 		} else if (ev_buff_type == WMI_RX_RAW_BUFF) {
2831 			ev_buf.evt_raw_buf = data;
2832 			ev_buf.evt_processed_buf = wmi_cmd_struct_ptr;
2833 			wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2834 							(void *)&ev_buf, len);
2835 		}
2836 	}
2837 	else
2838 		wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2839 			data, len);
2840 
2841 end:
2842 	/* Free event buffer and allocated event tlv */
2843 #ifndef WMI_NON_TLV_SUPPORT
2844 	if (wmi_handle->target_type == WMI_TLV_TARGET)
2845 		wmi_handle->ops->wmi_free_allocated_event(id, &wmi_cmd_struct_ptr);
2846 #endif
2847 
2848 	qdf_nbuf_free(evt_buf);
2849 
2850 }
2851 
2852 #define WMI_WQ_WD_TIMEOUT (30 * 1000) /* 30s */
2853 
2854 static inline void wmi_workqueue_watchdog_warn(uint32_t msg_type_id)
2855 {
2856 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2857 		  "%s: WLAN_BUG_RCA: Message type %x has exceeded its alloted time of %ds",
2858 		  __func__, msg_type_id, WMI_WQ_WD_TIMEOUT / 1000);
2859 }
2860 
2861 #ifdef CONFIG_SLUB_DEBUG_ON
2862 static void wmi_workqueue_watchdog_bite(void *arg)
2863 {
2864 	struct wmi_wq_dbg_info *info = arg;
2865 
2866 	wmi_workqueue_watchdog_warn(info->wd_msg_type_id);
2867 	qdf_print_thread_trace(info->task);
2868 
2869 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2870 		  "%s: Going down for WMI WQ Watchdog Bite!", __func__);
2871 	QDF_BUG(0);
2872 }
2873 #else
2874 static inline void wmi_workqueue_watchdog_bite(void *arg)
2875 {
2876 	struct wmi_wq_dbg_info *info = arg;
2877 
2878 	wmi_workqueue_watchdog_warn(info->wd_msg_type_id);
2879 
2880 	qdf_print_thread_trace(info->task);
2881 }
2882 #endif
2883 
2884 /**
2885  * wmi_rx_event_work() - process rx event in rx work queue context
2886  * @arg: opaque pointer to wmi handle
2887  *
2888  * This function process any fw event to serialize it through rx worker thread.
2889  *
2890  * Return: none
2891  */
2892 static void wmi_rx_event_work(void *arg)
2893 {
2894 	wmi_buf_t buf;
2895 	struct wmi_unified *wmi = arg;
2896 	qdf_timer_t wd_timer;
2897 	struct wmi_wq_dbg_info info;
2898 
2899 	/* initialize WMI workqueue watchdog timer */
2900 	qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite,
2901 			&info, QDF_TIMER_TYPE_SW);
2902 	qdf_spin_lock_bh(&wmi->eventq_lock);
2903 	buf = qdf_nbuf_queue_remove(&wmi->event_queue);
2904 	qdf_spin_unlock_bh(&wmi->eventq_lock);
2905 	while (buf) {
2906 		qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT);
2907 		info.wd_msg_type_id =
2908 		   WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID);
2909 		info.wmi_wq = wmi->wmi_rx_work_queue;
2910 		info.task = qdf_get_current_task();
2911 		__wmi_control_rx(wmi, buf);
2912 		qdf_timer_stop(&wd_timer);
2913 		qdf_spin_lock_bh(&wmi->eventq_lock);
2914 		buf = qdf_nbuf_queue_remove(&wmi->event_queue);
2915 		qdf_spin_unlock_bh(&wmi->eventq_lock);
2916 	}
2917 	qdf_timer_free(&wd_timer);
2918 }
2919 
2920 /**
2921  * wmi_rx_diag_event_work() - process rx diag event in work queue context
2922  * @arg: opaque pointer to wmi handle
2923  *
2924  * This function process fw diag event to serialize it through rx worker thread.
2925  *
2926  * Return: none
2927  */
2928 static void wmi_rx_diag_event_work(void *arg)
2929 {
2930 	wmi_buf_t buf;
2931 	struct wmi_unified *wmi = arg;
2932 	qdf_timer_t wd_timer;
2933 	struct wmi_wq_dbg_info info;
2934 	uint32_t diag_event_process_count = 0;
2935 
2936 	if (!wmi) {
2937 		wmi_err("Invalid WMI handle");
2938 		return;
2939 	}
2940 
2941 	/* initialize WMI workqueue watchdog timer */
2942 	qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite,
2943 		       &info, QDF_TIMER_TYPE_SW);
2944 	qdf_spin_lock_bh(&wmi->diag_eventq_lock);
2945 	buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue);
2946 	qdf_spin_unlock_bh(&wmi->diag_eventq_lock);
2947 	while (buf) {
2948 		qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT);
2949 		info.wd_msg_type_id =
2950 		   WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID);
2951 		info.wmi_wq = NULL;
2952 		info.task = qdf_get_current_task();
2953 		__wmi_control_rx(wmi, buf);
2954 		qdf_timer_stop(&wd_timer);
2955 
2956 		if (diag_event_process_count++ >
2957 		    RX_DIAG_EVENT_WORK_PROCESS_MAX_COUNT) {
2958 			qdf_queue_work(0, wmi->wmi_rx_diag_work_queue,
2959 				       &wmi->rx_diag_event_work);
2960 			break;
2961 		}
2962 
2963 		qdf_spin_lock_bh(&wmi->diag_eventq_lock);
2964 		buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue);
2965 		qdf_spin_unlock_bh(&wmi->diag_eventq_lock);
2966 	}
2967 	qdf_timer_free(&wd_timer);
2968 }
2969 
2970 #ifdef FEATURE_RUNTIME_PM
2971 /**
2972  * wmi_runtime_pm_init() - initialize runtime pm wmi variables
2973  * @wmi_handle: wmi context
2974  */
2975 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
2976 {
2977 	qdf_atomic_init(&wmi_handle->runtime_pm_inprogress);
2978 }
2979 
2980 /**
2981  * wmi_set_runtime_pm_inprogress() - set runtime pm progress flag
2982  * @wmi_handle: wmi context
2983  * @val: runtime pm progress flag
2984  */
2985 void wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, A_BOOL val)
2986 {
2987 	qdf_atomic_set(&wmi_handle->runtime_pm_inprogress, val);
2988 }
2989 
2990 /**
2991  * wmi_get_runtime_pm_inprogress() - get runtime pm progress flag
2992  * @wmi_handle: wmi context
2993  */
2994 inline bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle)
2995 {
2996 	return qdf_atomic_read(&wmi_handle->runtime_pm_inprogress);
2997 }
2998 #else
2999 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
3000 {
3001 }
3002 #endif
3003 
3004 /**
3005  * wmi_unified_get_soc_handle: Get WMI SoC handle
3006  * @param wmi_handle: WMI context got from wmi_attach
3007  *
3008  * return: Pointer to Soc handle
3009  */
3010 void *wmi_unified_get_soc_handle(struct wmi_unified *wmi_handle)
3011 {
3012 	return wmi_handle->soc;
3013 }
3014 
3015 /**
3016  * wmi_interface_logging_init: Interface looging init
3017  * @param wmi_handle: Pointer to wmi handle object
3018  *
3019  * return: None
3020  */
3021 #ifdef WMI_INTERFACE_EVENT_LOGGING
3022 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle,
3023 					      uint32_t pdev_idx)
3024 {
3025 	if (QDF_STATUS_SUCCESS == wmi_log_init(wmi_handle)) {
3026 		qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
3027 		wmi_debugfs_init(wmi_handle, pdev_idx);
3028 	}
3029 }
3030 #else
3031 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle,
3032 					      uint32_t pdev_idx)
3033 {
3034 }
3035 #endif
3036 
3037 static QDF_STATUS wmi_initialize_worker_context(struct wmi_unified *wmi_handle)
3038 {
3039 	wmi_handle->wmi_rx_work_queue =
3040 		qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue");
3041 	if (!wmi_handle->wmi_rx_work_queue) {
3042 		wmi_err("failed to create wmi_rx_event_work_queue");
3043 		return QDF_STATUS_E_RESOURCES;
3044 	}
3045 
3046 	qdf_spinlock_create(&wmi_handle->eventq_lock);
3047 	qdf_nbuf_queue_init(&wmi_handle->event_queue);
3048 	qdf_create_work(0, &wmi_handle->rx_event_work,
3049 			wmi_rx_event_work, wmi_handle);
3050 
3051 	wmi_handle->wmi_rx_diag_work_queue =
3052 		qdf_alloc_unbound_workqueue("wmi_rx_diag_event_work_queue");
3053 	if (!wmi_handle->wmi_rx_diag_work_queue) {
3054 		wmi_err("failed to create wmi_rx_diag_event_work_queue");
3055 		return QDF_STATUS_E_RESOURCES;
3056 	}
3057 	qdf_spinlock_create(&wmi_handle->diag_eventq_lock);
3058 	qdf_nbuf_queue_init(&wmi_handle->diag_event_queue);
3059 	qdf_create_work(0, &wmi_handle->rx_diag_event_work,
3060 			wmi_rx_diag_event_work, wmi_handle);
3061 	wmi_handle->wmi_rx_diag_events_dropped = 0;
3062 
3063 	return QDF_STATUS_SUCCESS;
3064 }
3065 
3066 /**
3067  * wmi_unified_get_pdev_handle: Get WMI SoC handle
3068  * @param wmi_soc: Pointer to wmi soc object
3069  * @param pdev_idx: pdev index
3070  *
3071  * return: Pointer to wmi handle or NULL on failure
3072  */
3073 void *wmi_unified_get_pdev_handle(struct wmi_soc *soc, uint32_t pdev_idx)
3074 {
3075 	struct wmi_unified *wmi_handle;
3076 	QDF_STATUS status;
3077 
3078 	if (pdev_idx >= WMI_MAX_RADIOS)
3079 		return NULL;
3080 
3081 	if (!soc->wmi_pdev[pdev_idx]) {
3082 		wmi_handle =
3083 			(struct wmi_unified *) qdf_mem_malloc(
3084 					sizeof(struct wmi_unified));
3085 		if (!wmi_handle)
3086 			return NULL;
3087 
3088 		status = wmi_initialize_worker_context(wmi_handle);
3089 		if (QDF_IS_STATUS_ERROR(status))
3090 			goto error;
3091 
3092 		wmi_handle->scn_handle = soc->scn_handle;
3093 		wmi_handle->event_id = soc->event_id;
3094 		wmi_handle->event_handler = soc->event_handler;
3095 		wmi_handle->ctx = soc->ctx;
3096 		wmi_handle->ops = soc->ops;
3097 		wmi_handle->wmi_events = soc->wmi_events;
3098 		wmi_handle->services = soc->services;
3099 		wmi_handle->soc = soc;
3100 		wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map;
3101 		wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map;
3102 		wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map;
3103 		wmi_handle->evt_phy_id_map = soc->evt_phy_id_map;
3104 		wmi_interface_logging_init(wmi_handle, pdev_idx);
3105 		qdf_atomic_init(&wmi_handle->pending_cmds);
3106 		qdf_atomic_init(&wmi_handle->is_target_suspended);
3107 		wmi_handle->target_type = soc->target_type;
3108 		wmi_handle->wmi_max_cmds = soc->wmi_max_cmds;
3109 
3110 		wmi_interface_sequence_init(wmi_handle);
3111 		if (wmi_ext_dbgfs_init(wmi_handle, pdev_idx) !=
3112 		    QDF_STATUS_SUCCESS)
3113 			wmi_err("Failed to initialize wmi extended debugfs");
3114 
3115 		soc->wmi_pdev[pdev_idx] = wmi_handle;
3116 	} else
3117 		wmi_handle = soc->wmi_pdev[pdev_idx];
3118 
3119 	wmi_handle->wmi_stopinprogress = 0;
3120 	wmi_handle->wmi_endpoint_id = soc->wmi_endpoint_id[pdev_idx];
3121 	wmi_handle->htc_handle = soc->htc_handle;
3122 	wmi_handle->max_msg_len = soc->max_msg_len[pdev_idx];
3123 	wmi_handle->tag_crash_inject = false;
3124 	wmi_interface_sequence_reset(wmi_handle);
3125 
3126 	return wmi_handle;
3127 
3128 error:
3129 	qdf_mem_free(wmi_handle);
3130 
3131 	return NULL;
3132 }
3133 qdf_export_symbol(wmi_unified_get_pdev_handle);
3134 
3135 static void (*wmi_attach_register[WMI_MAX_TARGET_TYPE])(wmi_unified_t);
3136 
3137 void wmi_unified_register_module(enum wmi_target_type target_type,
3138 			void (*wmi_attach)(wmi_unified_t wmi_handle))
3139 {
3140 	if (target_type < WMI_MAX_TARGET_TYPE)
3141 		wmi_attach_register[target_type] = wmi_attach;
3142 
3143 	return;
3144 }
3145 qdf_export_symbol(wmi_unified_register_module);
3146 
3147 /**
3148  * wmi_wbuff_register() - register wmi with wbuff
3149  * @wmi_handle: handle to wmi
3150  *
3151  * @Return: void
3152  */
3153 static void wmi_wbuff_register(struct wmi_unified *wmi_handle)
3154 {
3155 	struct wbuff_alloc_request wbuff_alloc[4];
3156 
3157 	wbuff_alloc[0].slot = WBUFF_POOL_0;
3158 	wbuff_alloc[0].size = WMI_WBUFF_POOL_0_SIZE;
3159 	wbuff_alloc[1].slot = WBUFF_POOL_1;
3160 	wbuff_alloc[1].size = WMI_WBUFF_POOL_1_SIZE;
3161 	wbuff_alloc[2].slot = WBUFF_POOL_2;
3162 	wbuff_alloc[2].size = WMI_WBUFF_POOL_2_SIZE;
3163 	wbuff_alloc[3].slot = WBUFF_POOL_3;
3164 	wbuff_alloc[3].size = WMI_WBUFF_POOL_3_SIZE;
3165 
3166 	wmi_handle->wbuff_handle = wbuff_module_register(wbuff_alloc, 4,
3167 							 WMI_MIN_HEAD_ROOM, 4);
3168 }
3169 
3170 /**
3171  * wmi_wbuff_deregister() - deregister wmi with wbuff
3172  * @wmi_handle: handle to wmi
3173  *
3174  * @Return: void
3175  */
3176 static inline void wmi_wbuff_deregister(struct wmi_unified *wmi_handle)
3177 {
3178 	wbuff_module_deregister(wmi_handle->wbuff_handle);
3179 	wmi_handle->wbuff_handle = NULL;
3180 }
3181 
3182 /**
3183  * wmi_unified_attach() -  attach for unified WMI
3184  * @scn_handle: handle to SCN
3185  * @osdev: OS device context
3186  * @target_type: TLV or not-TLV based target
3187  * @use_cookie: cookie based allocation enabled/disabled
3188  * @ops: umac rx callbacks
3189  * @psoc: objmgr psoc
3190  *
3191  * @Return: wmi handle.
3192  */
3193 void *wmi_unified_attach(void *scn_handle,
3194 			 struct wmi_unified_attach_params *param)
3195 {
3196 	struct wmi_unified *wmi_handle;
3197 	struct wmi_soc *soc;
3198 	QDF_STATUS status;
3199 
3200 	soc = (struct wmi_soc *) qdf_mem_malloc(sizeof(struct wmi_soc));
3201 	if (!soc)
3202 		return NULL;
3203 
3204 	wmi_handle =
3205 		(struct wmi_unified *) qdf_mem_malloc(
3206 			sizeof(struct wmi_unified));
3207 	if (!wmi_handle) {
3208 		qdf_mem_free(soc);
3209 		return NULL;
3210 	}
3211 
3212 	status = wmi_initialize_worker_context(wmi_handle);
3213 	if (QDF_IS_STATUS_ERROR(status))
3214 		goto error;
3215 
3216 	wmi_handle->soc = soc;
3217 	wmi_handle->soc->soc_idx = param->soc_id;
3218 	wmi_handle->soc->is_async_ep = param->is_async_ep;
3219 	wmi_handle->event_id = soc->event_id;
3220 	wmi_handle->event_handler = soc->event_handler;
3221 	wmi_handle->ctx = soc->ctx;
3222 	wmi_handle->wmi_events = soc->wmi_events;
3223 	wmi_handle->services = soc->services;
3224 	wmi_handle->scn_handle = scn_handle;
3225 	wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map;
3226 	wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map;
3227 	wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map;
3228 	wmi_handle->evt_phy_id_map = soc->evt_phy_id_map;
3229 	soc->scn_handle = scn_handle;
3230 	wmi_handle->target_type = param->target_type;
3231 	soc->target_type = param->target_type;
3232 
3233 	if (param->target_type >= WMI_MAX_TARGET_TYPE)
3234 		goto error;
3235 
3236 	if (wmi_attach_register[param->target_type]) {
3237 		wmi_attach_register[param->target_type](wmi_handle);
3238 	} else {
3239 		wmi_err("wmi attach is not registered");
3240 		goto error;
3241 	}
3242 
3243 	qdf_atomic_init(&wmi_handle->pending_cmds);
3244 	qdf_atomic_init(&wmi_handle->is_target_suspended);
3245 	qdf_atomic_init(&wmi_handle->is_target_suspend_acked);
3246 	qdf_atomic_init(&wmi_handle->num_stats_over_qmi);
3247 	wmi_runtime_pm_init(wmi_handle);
3248 	wmi_interface_logging_init(wmi_handle, WMI_HOST_PDEV_ID_0);
3249 
3250 	wmi_interface_sequence_init(wmi_handle);
3251 	/* Assign target cookie capablity */
3252 	wmi_handle->use_cookie = param->use_cookie;
3253 	wmi_handle->osdev = param->osdev;
3254 	wmi_handle->wmi_stopinprogress = 0;
3255 	wmi_handle->wmi_max_cmds = param->max_commands;
3256 	soc->wmi_max_cmds = param->max_commands;
3257 	/* Increase the ref count once refcount infra is present */
3258 	soc->wmi_psoc = param->psoc;
3259 	qdf_spinlock_create(&soc->ctx_lock);
3260 	soc->ops = wmi_handle->ops;
3261 	soc->wmi_pdev[0] = wmi_handle;
3262 	if (wmi_ext_dbgfs_init(wmi_handle, 0) != QDF_STATUS_SUCCESS)
3263 		wmi_err("Failed to initialize wmi extended debugfs");
3264 
3265 	wmi_wbuff_register(wmi_handle);
3266 
3267 	wmi_hang_event_notifier_register(wmi_handle);
3268 
3269 	wmi_minidump_attach(wmi_handle);
3270 
3271 	return wmi_handle;
3272 
3273 error:
3274 	qdf_mem_free(soc);
3275 	qdf_mem_free(wmi_handle);
3276 
3277 	return NULL;
3278 }
3279 
3280 /**
3281  * wmi_unified_detach() -  detach for unified WMI
3282  *
3283  * @wmi_handle  : handle to wmi.
3284  *
3285  * @Return: none.
3286  */
3287 void wmi_unified_detach(struct wmi_unified *wmi_handle)
3288 {
3289 	wmi_buf_t buf;
3290 	struct wmi_soc *soc;
3291 	uint8_t i;
3292 
3293 	wmi_minidump_detach(wmi_handle);
3294 
3295 	wmi_hang_event_notifier_unregister();
3296 
3297 	wmi_wbuff_deregister(wmi_handle);
3298 
3299 	soc = wmi_handle->soc;
3300 	for (i = 0; i < WMI_MAX_RADIOS; i++) {
3301 		if (soc->wmi_pdev[i]) {
3302 			qdf_flush_workqueue(0,
3303 				soc->wmi_pdev[i]->wmi_rx_work_queue);
3304 			qdf_destroy_workqueue(0,
3305 				soc->wmi_pdev[i]->wmi_rx_work_queue);
3306 			wmi_debugfs_remove(soc->wmi_pdev[i]);
3307 			buf = qdf_nbuf_queue_remove(
3308 					&soc->wmi_pdev[i]->event_queue);
3309 			while (buf) {
3310 				qdf_nbuf_free(buf);
3311 				buf = qdf_nbuf_queue_remove(
3312 						&soc->wmi_pdev[i]->event_queue);
3313 			}
3314 
3315 			qdf_flush_work(&soc->wmi_pdev[i]->rx_diag_event_work);
3316 			buf = qdf_nbuf_queue_remove(
3317 					&soc->wmi_pdev[i]->diag_event_queue);
3318 			while (buf) {
3319 				qdf_nbuf_free(buf);
3320 				buf = qdf_nbuf_queue_remove(
3321 					&soc->wmi_pdev[i]->diag_event_queue);
3322 			}
3323 
3324 			wmi_log_buffer_free(soc->wmi_pdev[i]);
3325 
3326 			/* Free events logs list */
3327 			if (soc->wmi_pdev[i]->events_logs_list)
3328 				qdf_mem_free(
3329 					soc->wmi_pdev[i]->events_logs_list);
3330 
3331 			qdf_spinlock_destroy(&soc->wmi_pdev[i]->eventq_lock);
3332 			qdf_spinlock_destroy(
3333 					&soc->wmi_pdev[i]->diag_eventq_lock);
3334 
3335 			wmi_interface_sequence_deinit(soc->wmi_pdev[i]);
3336 			wmi_ext_dbgfs_deinit(soc->wmi_pdev[i]);
3337 
3338 			qdf_mem_free(soc->wmi_pdev[i]);
3339 		}
3340 	}
3341 	qdf_spinlock_destroy(&soc->ctx_lock);
3342 
3343 	if (soc->wmi_service_bitmap) {
3344 		qdf_mem_free(soc->wmi_service_bitmap);
3345 		soc->wmi_service_bitmap = NULL;
3346 	}
3347 
3348 	if (soc->wmi_ext_service_bitmap) {
3349 		qdf_mem_free(soc->wmi_ext_service_bitmap);
3350 		soc->wmi_ext_service_bitmap = NULL;
3351 	}
3352 
3353 	if (soc->wmi_ext2_service_bitmap) {
3354 		qdf_mem_free(soc->wmi_ext2_service_bitmap);
3355 		soc->wmi_ext2_service_bitmap = NULL;
3356 	}
3357 
3358 	/* Decrease the ref count once refcount infra is present */
3359 	soc->wmi_psoc = NULL;
3360 	qdf_mem_free(soc);
3361 }
3362 
3363 /**
3364  * wmi_unified_remove_work() - detach for WMI work
3365  * @wmi_handle: handle to WMI
3366  *
3367  * A function that does not fully detach WMI, but just remove work
3368  * queue items associated with it. This is used to make sure that
3369  * before any other processing code that may destroy related contexts
3370  * (HTC, etc), work queue processing on WMI has already been stopped.
3371  *
3372  * Return: None
3373  */
3374 void
3375 wmi_unified_remove_work(struct wmi_unified *wmi_handle)
3376 {
3377 	wmi_buf_t buf;
3378 
3379 	qdf_flush_workqueue(0, wmi_handle->wmi_rx_work_queue);
3380 	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
3381 	buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
3382 	while (buf) {
3383 		qdf_nbuf_free(buf);
3384 		buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
3385 	}
3386 	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
3387 
3388 	/* Remove diag events work */
3389 	qdf_flush_workqueue(0, wmi_handle->wmi_rx_diag_work_queue);
3390 	qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock);
3391 	buf = qdf_nbuf_queue_remove(&wmi_handle->diag_event_queue);
3392 	while (buf) {
3393 		qdf_nbuf_free(buf);
3394 		buf = qdf_nbuf_queue_remove(&wmi_handle->diag_event_queue);
3395 	}
3396 	qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
3397 }
3398 
3399 /**
3400  * wmi_htc_tx_complete() - Process htc tx completion
3401  *
3402  * @ctx: handle to wmi
3403  * @htc_packet: pointer to htc packet
3404  *
3405  * @Return: none.
3406  */
3407 static void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt)
3408 {
3409 	struct wmi_soc *soc = (struct wmi_soc *) ctx;
3410 	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
3411 	u_int8_t *buf_ptr;
3412 	u_int32_t len;
3413 	struct wmi_unified *wmi_handle;
3414 #ifdef WMI_INTERFACE_EVENT_LOGGING
3415 	struct wmi_debug_log_info *log_info;
3416 	uint32_t cmd_id;
3417 	uint8_t *offset_ptr;
3418 	qdf_dma_addr_t dma_addr;
3419 	uint64_t phy_addr;
3420 #endif
3421 
3422 	ASSERT(wmi_cmd_buf);
3423 	wmi_handle = wmi_get_pdev_ep(soc, htc_pkt->Endpoint);
3424 	if (!wmi_handle) {
3425 		wmi_err("Unable to get wmi handle");
3426 		QDF_ASSERT(0);
3427 		return;
3428 	}
3429 	buf_ptr = (u_int8_t *)wmi_buf_data(wmi_cmd_buf);
3430 #ifdef WMI_INTERFACE_EVENT_LOGGING
3431 	log_info = &wmi_handle->log_info;
3432 
3433 	if (wmi_handle && log_info->wmi_logging_enable) {
3434 		cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf),
3435 				WMI_CMD_HDR, COMMANDID);
3436 
3437 		dma_addr = QDF_NBUF_CB_PADDR(wmi_cmd_buf);
3438 		phy_addr = qdf_mem_virt_to_phys(qdf_nbuf_data(wmi_cmd_buf));
3439 
3440 		qdf_spin_lock_bh(&log_info->wmi_record_lock);
3441 		/* Record 16 bytes of WMI cmd tx complete data
3442 		 * - exclude TLV and WMI headers
3443 		 */
3444 		offset_ptr = buf_ptr + wmi_handle->soc->buf_offset_command;
3445 		if (wmi_handle->ops->is_management_record(cmd_id)) {
3446 			WMI_MGMT_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id,
3447 						       offset_ptr);
3448 		} else {
3449 			WMI_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id,
3450 						  offset_ptr, dma_addr,
3451 						  phy_addr);
3452 		}
3453 
3454 		qdf_spin_unlock_bh(&log_info->wmi_record_lock);
3455 	}
3456 #endif
3457 
3458 	wmi_interface_sequence_check(wmi_handle, wmi_cmd_buf);
3459 
3460 	len = qdf_nbuf_len(wmi_cmd_buf);
3461 	qdf_mem_zero(buf_ptr, len);
3462 	wmi_buf_free(wmi_cmd_buf);
3463 	qdf_mem_free(htc_pkt);
3464 	qdf_atomic_dec(&wmi_handle->pending_cmds);
3465 }
3466 
3467 #ifdef FEATURE_RUNTIME_PM
3468 /**
3469  * wmi_htc_log_pkt() - Print information of WMI command from HTC packet
3470  *
3471  * @ctx: handle of WMI context
3472  * @htc_pkt: handle of HTC packet
3473  *
3474  * @Return: none
3475  */
3476 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt)
3477 {
3478 	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
3479 	uint32_t cmd_id;
3480 
3481 	ASSERT(wmi_cmd_buf);
3482 	cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), WMI_CMD_HDR,
3483 			       COMMANDID);
3484 
3485 	wmi_debug("WMI command from HTC packet: %s, ID: %d",
3486 		 wmi_id_to_name(cmd_id), cmd_id);
3487 }
3488 #else
3489 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt)
3490 {
3491 }
3492 #endif
3493 
3494 /**
3495  * wmi_connect_pdev_htc_service() -  WMI API to get connect to HTC service
3496  *
3497  * @wmi_handle: handle to WMI.
3498  * @pdev_idx: Pdev index
3499  *
3500  * @Return: QDF_STATUS
3501  */
3502 static QDF_STATUS wmi_connect_pdev_htc_service(struct wmi_soc *soc,
3503 					       uint32_t pdev_idx)
3504 {
3505 	QDF_STATUS status;
3506 	struct htc_service_connect_resp response;
3507 	struct htc_service_connect_req connect;
3508 
3509 	OS_MEMZERO(&connect, sizeof(connect));
3510 	OS_MEMZERO(&response, sizeof(response));
3511 
3512 	/* meta data is unused for now */
3513 	connect.pMetaData = NULL;
3514 	connect.MetaDataLength = 0;
3515 	/* these fields are the same for all service endpoints */
3516 	connect.EpCallbacks.pContext = soc;
3517 	connect.EpCallbacks.EpTxCompleteMultiple =
3518 		NULL /* Control path completion ar6000_tx_complete */;
3519 	connect.EpCallbacks.EpRecv = wmi_control_rx /* Control path rx */;
3520 	connect.EpCallbacks.EpRecvRefill = NULL /* ar6000_rx_refill */;
3521 	connect.EpCallbacks.EpSendFull = NULL /* ar6000_tx_queue_full */;
3522 	connect.EpCallbacks.EpTxComplete =
3523 		wmi_htc_tx_complete /* ar6000_tx_queue_full */;
3524 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3525 
3526 	/* connect to control service */
3527 	connect.service_id = soc->svc_ids[pdev_idx];
3528 	status = htc_connect_service(soc->htc_handle, &connect, &response);
3529 
3530 	if (QDF_IS_STATUS_ERROR(status)) {
3531 		wmi_err("Failed to connect to WMI CONTROL service status:%d",
3532 			 status);
3533 		return status;
3534 	}
3535 
3536 	if (soc->is_async_ep)
3537 		htc_set_async_ep(soc->htc_handle, response.Endpoint, true);
3538 
3539 	soc->wmi_endpoint_id[pdev_idx] = response.Endpoint;
3540 	soc->max_msg_len[pdev_idx] = response.MaxMsgLength;
3541 
3542 	return QDF_STATUS_SUCCESS;
3543 }
3544 
3545 QDF_STATUS
3546 wmi_unified_connect_htc_service(struct wmi_unified *wmi_handle,
3547 				HTC_HANDLE htc_handle)
3548 {
3549 	uint32_t i;
3550 	uint8_t wmi_ep_count;
3551 
3552 	wmi_handle->soc->htc_handle = htc_handle;
3553 
3554 	wmi_ep_count = htc_get_wmi_endpoint_count(htc_handle);
3555 	if (wmi_ep_count > WMI_MAX_RADIOS)
3556 		return QDF_STATUS_E_FAULT;
3557 
3558 	for (i = 0; i < wmi_ep_count; i++)
3559 		wmi_connect_pdev_htc_service(wmi_handle->soc, i);
3560 
3561 	wmi_handle->htc_handle = htc_handle;
3562 	wmi_handle->wmi_endpoint_id = wmi_handle->soc->wmi_endpoint_id[0];
3563 	wmi_handle->max_msg_len = wmi_handle->soc->max_msg_len[0];
3564 
3565 	return QDF_STATUS_SUCCESS;
3566 }
3567 
3568 #if defined(WLAN_FEATURE_WMI_DIAG_OVER_CE7) || \
3569 	defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
3570 QDF_STATUS wmi_diag_connect_pdev_htc_service(struct wmi_unified *wmi_handle,
3571 					     HTC_HANDLE htc_handle)
3572 {
3573 	QDF_STATUS status;
3574 	struct htc_service_connect_resp response = {0};
3575 	struct htc_service_connect_req connect = {0};
3576 
3577 	/* meta data is unused for now */
3578 	connect.pMetaData = NULL;
3579 	connect.MetaDataLength = 0;
3580 	connect.EpCallbacks.pContext = wmi_handle->soc;
3581 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3582 	connect.EpCallbacks.EpRecv = wmi_control_diag_rx /* wmi diag rx */;
3583 	connect.EpCallbacks.EpRecvRefill = NULL;
3584 	connect.EpCallbacks.EpSendFull = NULL;
3585 	connect.EpCallbacks.EpTxComplete = NULL;
3586 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3587 
3588 	/* connect to wmi diag service */
3589 	connect.service_id = WMI_CONTROL_DIAG_SVC;
3590 	status = htc_connect_service(htc_handle, &connect, &response);
3591 
3592 	if (QDF_IS_STATUS_ERROR(status)) {
3593 		wmi_err("Failed to connect to WMI DIAG service status:%d",
3594 			status);
3595 		return status;
3596 	}
3597 
3598 	if (wmi_handle->soc->is_async_ep)
3599 		htc_set_async_ep(htc_handle, response.Endpoint, true);
3600 
3601 	wmi_handle->soc->wmi_diag_endpoint_id = response.Endpoint;
3602 
3603 	return QDF_STATUS_SUCCESS;
3604 }
3605 #endif
3606 
3607 /**
3608  * wmi_get_host_credits() -  WMI API to get updated host_credits
3609  *
3610  * @wmi_handle: handle to WMI.
3611  *
3612  * @Return: updated host_credits.
3613  */
3614 int wmi_get_host_credits(wmi_unified_t wmi_handle)
3615 {
3616 	int host_credits = 0;
3617 
3618 	htc_get_control_endpoint_tx_host_credits(wmi_handle->htc_handle,
3619 						 &host_credits);
3620 	return host_credits;
3621 }
3622 
3623 /**
3624  * wmi_get_pending_cmds() - WMI API to get WMI Pending Commands in the HTC
3625  *                          queue
3626  *
3627  * @wmi_handle: handle to WMI.
3628  *
3629  * @Return: Pending Commands in the HTC queue.
3630  */
3631 int wmi_get_pending_cmds(wmi_unified_t wmi_handle)
3632 {
3633 	return qdf_atomic_read(&wmi_handle->pending_cmds);
3634 }
3635 
3636 /**
3637  * wmi_set_target_suspend() -  WMI API to set target suspend state
3638  *
3639  * @wmi_handle: handle to WMI.
3640  * @val: suspend state boolean.
3641  *
3642  * @Return: none.
3643  */
3644 void wmi_set_target_suspend(wmi_unified_t wmi_handle, A_BOOL val)
3645 {
3646 	qdf_atomic_set(&wmi_handle->is_target_suspended, val);
3647 }
3648 
3649 /**
3650  * wmi_set_target_suspend_acked() -  WMI API to set target suspend acked flag
3651  *
3652  * @wmi_handle: handle to WMI.
3653  * @val: target suspend command acked flag.
3654  *
3655  * @Return: none.
3656  */
3657 void wmi_set_target_suspend_acked(wmi_unified_t wmi_handle, A_BOOL val)
3658 {
3659 	qdf_atomic_set(&wmi_handle->is_target_suspend_acked, val);
3660 }
3661 
3662 /**
3663  * wmi_is_target_suspended() - WMI API to check target suspend state
3664  * @wmi_handle: handle to WMI.
3665  *
3666  * WMI API to check target suspend state
3667  *
3668  * Return: true if target is suspended, else false.
3669  */
3670 bool wmi_is_target_suspended(struct wmi_unified *wmi_handle)
3671 {
3672 	return qdf_atomic_read(&wmi_handle->is_target_suspended);
3673 }
3674 qdf_export_symbol(wmi_is_target_suspended);
3675 
3676 /**
3677  * wmi_is_target_suspend_acked() - WMI API to check target suspend command is
3678  *                                 acked or not
3679  * @wmi_handle: handle to WMI.
3680  *
3681  * WMI API to check whether the target suspend command is acked or not
3682  *
3683  * Return: true if target suspend command is acked, else false.
3684  */
3685 bool wmi_is_target_suspend_acked(struct wmi_unified *wmi_handle)
3686 {
3687 	return qdf_atomic_read(&wmi_handle->is_target_suspend_acked);
3688 }
3689 qdf_export_symbol(wmi_is_target_suspend_acked);
3690 
3691 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI
3692 void wmi_set_qmi_stats(wmi_unified_t wmi_handle, bool val)
3693 {
3694 	wmi_handle->is_qmi_stats_enabled = val;
3695 }
3696 
3697 bool wmi_is_qmi_stats_enabled(struct wmi_unified *wmi_handle)
3698 {
3699 	return wmi_handle->is_qmi_stats_enabled;
3700 }
3701 #endif
3702 
3703 /**
3704  * WMI API to set crash injection state
3705  * @param wmi_handle:	handle to WMI.
3706  * @param val:		crash injection state boolean.
3707  */
3708 void wmi_tag_crash_inject(wmi_unified_t wmi_handle, A_BOOL flag)
3709 {
3710 	wmi_handle->tag_crash_inject = flag;
3711 }
3712 
3713 /**
3714  * WMI API to set bus suspend state
3715  * @param wmi_handle:	handle to WMI.
3716  * @param val:		suspend state boolean.
3717  */
3718 void wmi_set_is_wow_bus_suspended(wmi_unified_t wmi_handle, A_BOOL val)
3719 {
3720 	qdf_atomic_set(&wmi_handle->is_wow_bus_suspended, val);
3721 }
3722 
3723 void wmi_set_tgt_assert(wmi_unified_t wmi_handle, bool val)
3724 {
3725 	wmi_handle->tgt_force_assert_enable = val;
3726 }
3727 
3728 /**
3729  * wmi_stop() - generic function to block unified WMI command
3730  * @wmi_handle: handle to WMI.
3731  *
3732  * @Return: success always.
3733  */
3734 int
3735 wmi_stop(wmi_unified_t wmi_handle)
3736 {
3737 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
3738 		  "WMI Stop");
3739 	wmi_handle->wmi_stopinprogress = 1;
3740 	return 0;
3741 }
3742 
3743 /**
3744  * wmi_start() - generic function to allow unified WMI command
3745  * @wmi_handle: handle to WMI.
3746  *
3747  * @Return: success always.
3748  */
3749 int
3750 wmi_start(wmi_unified_t wmi_handle)
3751 {
3752 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
3753 		  "WMI Start");
3754 	wmi_handle->wmi_stopinprogress = 0;
3755 	return 0;
3756 }
3757 
3758 /**
3759  * wmi_is_blocked() - generic function to check if WMI is blocked
3760  * @wmi_handle: handle to WMI.
3761  *
3762  * @Return: true, if blocked, false if not blocked
3763  */
3764 bool
3765 wmi_is_blocked(wmi_unified_t wmi_handle)
3766 {
3767 	return (!(!wmi_handle->wmi_stopinprogress));
3768 }
3769 
3770 /**
3771  * API to flush all the previous packets  associated with the wmi endpoint
3772  *
3773  * @param wmi_handle      : handle to WMI.
3774  */
3775 void
3776 wmi_flush_endpoint(wmi_unified_t wmi_handle)
3777 {
3778 	htc_flush_endpoint(wmi_handle->htc_handle,
3779 		wmi_handle->wmi_endpoint_id, 0);
3780 }
3781 qdf_export_symbol(wmi_flush_endpoint);
3782 
3783 /**
3784  * wmi_pdev_id_conversion_enable() - API to enable pdev_id/phy_id conversion
3785  *                     in WMI. By default pdev_id conversion is not done in WMI.
3786  *                     This API can be used enable conversion in WMI.
3787  * @param wmi_handle   : handle to WMI
3788  * @param pdev_map     : pointer to pdev_map
3789  * @size               : size of pdev_id_map
3790  * Return none
3791  */
3792 void wmi_pdev_id_conversion_enable(wmi_unified_t wmi_handle,
3793 				   uint32_t *pdev_id_map,
3794 				   uint8_t size)
3795 {
3796 	if (wmi_handle->target_type == WMI_TLV_TARGET)
3797 		wmi_handle->ops->wmi_pdev_id_conversion_enable(wmi_handle,
3798 							       pdev_id_map,
3799 							       size);
3800 }
3801 
3802 int __wmi_validate_handle(wmi_unified_t wmi_handle, const char *func)
3803 {
3804         if (!wmi_handle) {
3805                 wmi_err("Invalid WMI handle (via %s)", func);
3806                 return -EINVAL;
3807         }
3808 
3809         return 0;
3810 }
3811