1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * Host WMI unified implementation
22  */
23 #include "htc_api.h"
24 #include "htc_api.h"
25 #include "wmi_unified_priv.h"
26 #include "wmi_unified_api.h"
27 #include "qdf_module.h"
28 #include "qdf_platform.h"
29 #include "qdf_ssr_driver_dump.h"
30 #ifdef WMI_EXT_DBG
31 #include "qdf_list.h"
32 #include "qdf_atomic.h"
33 #endif
34 
35 #ifndef WMI_NON_TLV_SUPPORT
36 #include "wmi_tlv_helper.h"
37 #endif
38 
39 #include <linux/debugfs.h>
40 #include <target_if.h>
41 #include <qdf_debugfs.h>
42 #include "wmi_filtered_logging.h"
43 #include <wmi_hang_event.h>
44 
45 #ifdef DP_UMAC_HW_RESET_SUPPORT
46 #include <cdp_txrx_ctrl.h>
47 #endif
48 
49 /* This check for CONFIG_WIN temporary added due to redeclaration compilation
50 error in MCL. Error is caused due to inclusion of wmi.h in wmi_unified_api.h
51 which gets included here through ol_if_athvar.h. Eventually it is expected that
52 wmi.h will be removed from wmi_unified_api.h after cleanup, which will need
53 WMI_CMD_HDR to be defined here. */
54 /* Copied from wmi.h */
55 #undef MS
56 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
57 #undef SM
58 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
59 #undef WO
60 #define WO(_f)      ((_f##_OFFSET) >> 2)
61 
62 #undef GET_FIELD
63 #define GET_FIELD(_addr, _f) MS(*((uint32_t *)(_addr) + WO(_f)), _f)
64 #undef SET_FIELD
65 #define SET_FIELD(_addr, _f, _val)  \
66 	    (*((uint32_t *)(_addr) + WO(_f)) = \
67 		(*((uint32_t *)(_addr) + WO(_f)) & ~_f##_MASK) | SM(_val, _f))
68 
69 #define WMI_GET_FIELD(_msg_buf, _msg_type, _f) \
70 	    GET_FIELD(_msg_buf, _msg_type ## _ ## _f)
71 
72 #define WMI_SET_FIELD(_msg_buf, _msg_type, _f, _val) \
73 	    SET_FIELD(_msg_buf, _msg_type ## _ ## _f, _val)
74 
75 #define WMI_EP_APASS           0x0
76 #define WMI_EP_LPASS           0x1
77 #define WMI_EP_SENSOR          0x2
78 
79 #define WMI_INFOS_DBG_FILE_PERM (QDF_FILE_USR_READ | \
80 				 QDF_FILE_USR_WRITE | \
81 				 QDF_FILE_GRP_READ | \
82 				 QDF_FILE_OTH_READ)
83 
84 /*
85  *  * Control Path
86  *   */
87 typedef PREPACK struct {
88 	uint32_t	commandId:24,
89 			reserved:2, /* used for WMI endpoint ID */
90 			plt_priv:6; /* platform private */
91 } POSTPACK WMI_CMD_HDR;        /* used for commands and events */
92 
93 #define WMI_CMD_HDR_COMMANDID_LSB           0
94 #define WMI_CMD_HDR_COMMANDID_MASK          0x00ffffff
95 #define WMI_CMD_HDR_COMMANDID_OFFSET        0x00000000
96 #define WMI_CMD_HDR_WMI_ENDPOINTID_MASK        0x03000000
97 #define WMI_CMD_HDR_WMI_ENDPOINTID_OFFSET      24
98 #define WMI_CMD_HDR_PLT_PRIV_LSB               24
99 #define WMI_CMD_HDR_PLT_PRIV_MASK              0xff000000
100 #define WMI_CMD_HDR_PLT_PRIV_OFFSET            0x00000000
101 /* end of copy wmi.h */
102 
103 #define WMI_MIN_HEAD_ROOM 64
104 
105 /* WBUFF pool sizes for WMI */
106 /* Allocation of size 256 bytes */
107 #define WMI_WBUFF_POOL_0_SIZE 128
108 /* Allocation of size 512 bytes */
109 #define WMI_WBUFF_POOL_1_SIZE 16
110 /* Allocation of size 1024 bytes */
111 #define WMI_WBUFF_POOL_2_SIZE 8
112 /* Allocation of size 2048 bytes */
113 #define WMI_WBUFF_POOL_3_SIZE 8
114 
115 /* wbuff pool buffer lengths in bytes for WMI*/
116 #define WMI_WBUFF_LEN_POOL0 256
117 #define WMI_WBUFF_LEN_POOL1 512
118 #define WMI_WBUFF_LEN_POOL2 1024
119 #define WMI_WBUFF_LEN_POOL3 2048
120 
121 #define RX_DIAG_EVENT_WORK_PROCESS_MAX_COUNT 500
122 
123 #ifdef WMI_INTERFACE_EVENT_LOGGING
124 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0))
125 /* TODO Cleanup this backported function */
wmi_bp_seq_printf(qdf_debugfs_file_t m,const char * f,...)126 static int wmi_bp_seq_printf(qdf_debugfs_file_t m, const char *f, ...)
127 {
128 	va_list args;
129 
130 	va_start(args, f);
131 	seq_vprintf(m, f, args);
132 	va_end(args);
133 
134 	return 0;
135 }
136 #else
137 #define wmi_bp_seq_printf(m, fmt, ...) seq_printf((m), fmt, ##__VA_ARGS__)
138 #endif
139 
140 #ifndef MAX_WMI_INSTANCES
141 #define CUSTOM_MGMT_CMD_DATA_SIZE 4
142 #endif
143 
144 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
145 /* WMI commands */
146 uint32_t g_wmi_command_buf_idx = 0;
147 struct wmi_command_debug wmi_command_log_buffer[WMI_CMD_DEBUG_MAX_ENTRY];
148 
149 /* WMI commands TX completed */
150 uint32_t g_wmi_command_tx_cmp_buf_idx = 0;
151 struct wmi_command_cmp_debug
152 	wmi_command_tx_cmp_log_buffer[WMI_CMD_CMPL_DEBUG_MAX_ENTRY];
153 
154 /* WMI events when processed */
155 uint32_t g_wmi_event_buf_idx = 0;
156 struct wmi_event_debug wmi_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
157 
158 /* WMI events when queued */
159 uint32_t g_wmi_rx_event_buf_idx = 0;
160 struct wmi_event_debug wmi_rx_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
161 #endif
162 
wmi_minidump_detach(struct wmi_unified * wmi_handle)163 static void wmi_minidump_detach(struct wmi_unified *wmi_handle)
164 {
165 	struct wmi_log_buf_t *info =
166 		&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
167 	uint32_t buf_size = info->size * sizeof(struct wmi_command_cmp_debug);
168 
169 	qdf_ssr_driver_dump_unregister_region("wmi_debug_log_info");
170 	qdf_ssr_driver_dump_unregister_region("wmi_rx_event_idx");
171 	qdf_ssr_driver_dump_unregister_region("wmi_rx_event");
172 	qdf_ssr_driver_dump_unregister_region("wmi_event_log_idx");
173 	qdf_ssr_driver_dump_unregister_region("wmi_event_log");
174 	qdf_ssr_driver_dump_unregister_region("wmi_command_log_idx");
175 	qdf_ssr_driver_dump_unregister_region("wmi_command_log");
176 	qdf_ssr_driver_dump_unregister_region("wmi_tx_cmp_idx");
177 	qdf_ssr_driver_dump_unregister_region("wmi_tx_cmp");
178 	qdf_minidump_remove(info->buf, buf_size, "wmi_tx_cmp");
179 }
180 
wmi_minidump_attach(struct wmi_unified * wmi_handle)181 static void wmi_minidump_attach(struct wmi_unified *wmi_handle)
182 {
183 	struct wmi_log_buf_t *info =
184 		&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
185 	uint32_t buf_size = info->size * sizeof(struct wmi_command_cmp_debug);
186 
187 	qdf_minidump_log(info->buf, buf_size, "wmi_tx_cmp");
188 
189 	qdf_ssr_driver_dump_register_region("wmi_tx_cmp", info->buf, buf_size);
190 	qdf_ssr_driver_dump_register_region("wmi_tx_cmp_idx",
191 					    info->p_buf_tail_idx,
192 					    sizeof(*info->p_buf_tail_idx));
193 
194 	info = &wmi_handle->log_info.wmi_command_log_buf_info;
195 	buf_size = info->size * sizeof(struct wmi_command_debug);
196 
197 	qdf_ssr_driver_dump_register_region("wmi_command_log", info->buf,
198 					    buf_size);
199 	qdf_ssr_driver_dump_register_region("wmi_command_log_idx",
200 					    info->p_buf_tail_idx,
201 					    sizeof(*info->p_buf_tail_idx));
202 
203 	info = &wmi_handle->log_info.wmi_event_log_buf_info;
204 	buf_size = info->size * sizeof(struct wmi_event_debug);
205 
206 	qdf_ssr_driver_dump_register_region("wmi_event_log", info->buf,
207 					    buf_size);
208 	qdf_ssr_driver_dump_register_region("wmi_event_log_idx",
209 					    info->p_buf_tail_idx,
210 					    sizeof(*info->p_buf_tail_idx));
211 
212 	info = &wmi_handle->log_info.wmi_rx_event_log_buf_info;
213 	buf_size = info->size * sizeof(struct wmi_event_debug);
214 
215 	qdf_ssr_driver_dump_register_region("wmi_rx_event", info->buf,
216 					    buf_size);
217 	qdf_ssr_driver_dump_register_region("wmi_rx_event_idx",
218 					    info->p_buf_tail_idx,
219 					    sizeof(*info->p_buf_tail_idx));
220 
221 	qdf_ssr_driver_dump_register_region("wmi_debug_log_info",
222 					    &wmi_handle->log_info,
223 					    sizeof(wmi_handle->log_info));
224 }
225 
226 #define WMI_COMMAND_RECORD(h, a, b) {					\
227 	if (wmi_cmd_log_max_entry <=					\
228 		*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))	\
229 		*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx) = 0;\
230 	((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\
231 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)]\
232 						.command = a;		\
233 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.		\
234 				wmi_command_log_buf_info.buf)		\
235 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].data,\
236 			b, wmi_record_max_length);			\
237 	((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\
238 		[*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].\
239 		time = qdf_get_log_timestamp();			\
240 	(*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))++;	\
241 	h->log_info.wmi_command_log_buf_info.length++;			\
242 }
243 
244 #define WMI_COMMAND_TX_CMP_RECORD(h, a, b, da, pa) {			\
245 	if (wmi_cmd_cmpl_log_max_entry <=				\
246 		*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))\
247 		*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
248 				p_buf_tail_idx) = 0;			\
249 	((struct wmi_command_cmp_debug *)h->log_info.			\
250 		wmi_command_tx_cmp_log_buf_info.buf)			\
251 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
252 				p_buf_tail_idx)].			\
253 							command	= a;	\
254 	qdf_mem_copy(((struct wmi_command_cmp_debug *)h->log_info.	\
255 				wmi_command_tx_cmp_log_buf_info.buf)	\
256 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
257 			p_buf_tail_idx)].				\
258 		data, b, wmi_record_max_length);			\
259 	((struct wmi_command_cmp_debug *)h->log_info.			\
260 		wmi_command_tx_cmp_log_buf_info.buf)			\
261 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
262 				p_buf_tail_idx)].			\
263 		time = qdf_get_log_timestamp();				\
264 	((struct wmi_command_cmp_debug *)h->log_info.			\
265 		wmi_command_tx_cmp_log_buf_info.buf)			\
266 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
267 				p_buf_tail_idx)].			\
268 		dma_addr = da;						\
269 	((struct wmi_command_cmp_debug *)h->log_info.			\
270 		wmi_command_tx_cmp_log_buf_info.buf)			\
271 		[*(h->log_info.wmi_command_tx_cmp_log_buf_info.		\
272 				p_buf_tail_idx)].			\
273 		phy_addr = pa;						\
274 	(*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))++;\
275 	h->log_info.wmi_command_tx_cmp_log_buf_info.length++;		\
276 }
277 
278 #define WMI_EVENT_RECORD(h, a, b) {					\
279 	if (wmi_event_log_max_entry <=					\
280 		*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))	\
281 		*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx) = 0;\
282 	((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\
283 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].	\
284 		event = a;						\
285 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
286 				wmi_event_log_buf_info.buf)		\
287 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].data, b,\
288 		wmi_record_max_length);					\
289 	((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\
290 		[*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].time =\
291 		qdf_get_log_timestamp();				\
292 	(*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))++;	\
293 	h->log_info.wmi_event_log_buf_info.length++;			\
294 }
295 
296 #define WMI_RX_EVENT_RECORD(h, a, b) {					\
297 	if (wmi_event_log_max_entry <=					\
298 		*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))\
299 		*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx) = 0;\
300 	((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\
301 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
302 		event = a;						\
303 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
304 				wmi_rx_event_log_buf_info.buf)		\
305 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
306 			data, b, wmi_record_max_length);		\
307 	((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\
308 		[*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
309 		time =	qdf_get_log_timestamp();			\
310 	(*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))++;	\
311 	h->log_info.wmi_rx_event_log_buf_info.length++;			\
312 }
313 
314 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
315 uint32_t g_wmi_mgmt_command_buf_idx = 0;
316 struct
317 wmi_command_debug wmi_mgmt_command_log_buffer[WMI_MGMT_TX_DEBUG_MAX_ENTRY];
318 
319 /* wmi_mgmt commands TX completed */
320 uint32_t g_wmi_mgmt_command_tx_cmp_buf_idx = 0;
321 struct wmi_command_debug
322 wmi_mgmt_command_tx_cmp_log_buffer[WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY];
323 
324 /* wmi_mgmt events when received */
325 uint32_t g_wmi_mgmt_rx_event_buf_idx = 0;
326 struct wmi_event_debug
327 wmi_mgmt_rx_event_log_buffer[WMI_MGMT_RX_DEBUG_MAX_ENTRY];
328 
329 /* wmi_diag events when received */
330 uint32_t g_wmi_diag_rx_event_buf_idx = 0;
331 struct wmi_event_debug
332 wmi_diag_rx_event_log_buffer[WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY];
333 #endif
334 
335 #define WMI_MGMT_COMMAND_RECORD(h, a, b) {                              \
336 	if (wmi_mgmt_tx_log_max_entry <=                                   \
337 		*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)) \
338 		*(h->log_info.wmi_mgmt_command_log_buf_info.		\
339 				p_buf_tail_idx) = 0;			\
340 	((struct wmi_command_debug *)h->log_info.                       \
341 		 wmi_mgmt_command_log_buf_info.buf)                     \
342 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
343 			command = a;                                    \
344 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.          \
345 				wmi_mgmt_command_log_buf_info.buf)      \
346 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
347 		data, b,                                                \
348 		wmi_record_max_length);                                	\
349 	((struct wmi_command_debug *)h->log_info.                       \
350 		 wmi_mgmt_command_log_buf_info.buf)                     \
351 		[*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
352 			time =        qdf_get_log_timestamp();          \
353 	(*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx))++;\
354 	h->log_info.wmi_mgmt_command_log_buf_info.length++;             \
355 }
356 
357 #define WMI_MGMT_COMMAND_TX_CMP_RECORD(h, a, b) {			\
358 	if (wmi_mgmt_tx_cmpl_log_max_entry <=				\
359 		*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
360 			p_buf_tail_idx))				\
361 		*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
362 			p_buf_tail_idx) = 0;				\
363 	((struct wmi_command_debug *)h->log_info.			\
364 			wmi_mgmt_command_tx_cmp_log_buf_info.buf)	\
365 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
366 				p_buf_tail_idx)].command = a;		\
367 	qdf_mem_copy(((struct wmi_command_debug *)h->log_info.		\
368 				wmi_mgmt_command_tx_cmp_log_buf_info.buf)\
369 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
370 			p_buf_tail_idx)].data, b,			\
371 			wmi_record_max_length);				\
372 	((struct wmi_command_debug *)h->log_info.			\
373 			wmi_mgmt_command_tx_cmp_log_buf_info.buf)	\
374 		[*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.	\
375 				p_buf_tail_idx)].time =			\
376 		qdf_get_log_timestamp();				\
377 	(*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.		\
378 			p_buf_tail_idx))++;				\
379 	h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.length++;	\
380 }
381 
382 #define WMI_MGMT_RX_EVENT_RECORD(h, a, b) do {				\
383 	if (wmi_mgmt_rx_log_max_entry <=				\
384 		*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))\
385 		*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx) = 0;\
386 	((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\
387 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)]\
388 					.event = a;			\
389 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.		\
390 				wmi_mgmt_event_log_buf_info.buf)	\
391 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\
392 			data, b, wmi_record_max_length);		\
393 	((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\
394 		[*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\
395 			time = qdf_get_log_timestamp();			\
396 	(*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))++;	\
397 	h->log_info.wmi_mgmt_event_log_buf_info.length++;		\
398 } while (0);
399 
400 #define WMI_DIAG_RX_EVENT_RECORD(h, a, b) do {                             \
401 	if (wmi_diag_log_max_entry <=                                   \
402 		*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))\
403 		*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx) = 0;\
404 	((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\
405 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)]\
406 					.event = a;                     \
407 	qdf_mem_copy(((struct wmi_event_debug *)h->log_info.            \
408 				wmi_diag_event_log_buf_info.buf)        \
409 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\
410 			data, b, wmi_record_max_length);                \
411 	((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\
412 		[*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\
413 			time = qdf_get_log_timestamp();                 \
414 	(*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))++;  \
415 	h->log_info.wmi_diag_event_log_buf_info.length++;               \
416 } while (0);
417 
418 /* These are defined to made it as module param, which can be configured */
419 /* WMI Commands */
420 uint32_t wmi_cmd_log_max_entry = WMI_CMD_DEBUG_MAX_ENTRY;
421 uint32_t wmi_cmd_cmpl_log_max_entry = WMI_CMD_CMPL_DEBUG_MAX_ENTRY;
422 /* WMI Events */
423 uint32_t wmi_event_log_max_entry = WMI_EVENT_DEBUG_MAX_ENTRY;
424 /* WMI MGMT Tx */
425 uint32_t wmi_mgmt_tx_log_max_entry = WMI_MGMT_TX_DEBUG_MAX_ENTRY;
426 uint32_t wmi_mgmt_tx_cmpl_log_max_entry = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY;
427 /* WMI MGMT Rx */
428 uint32_t wmi_mgmt_rx_log_max_entry = WMI_MGMT_RX_DEBUG_MAX_ENTRY;
429 /* WMI Diag Event */
430 uint32_t wmi_diag_log_max_entry = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY;
431 /* WMI capture size */
432 uint32_t wmi_record_max_length = WMI_DEBUG_ENTRY_MAX_LENGTH;
433 uint32_t wmi_display_size = 100;
434 
435 /**
436  * wmi_log_init() - Initialize WMI event logging
437  * @wmi_handle: WMI handle.
438  *
439  * Return: Initialization status
440  */
441 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
wmi_log_init(struct wmi_unified * wmi_handle)442 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
443 {
444 	struct wmi_log_buf_t *cmd_log_buf =
445 			&wmi_handle->log_info.wmi_command_log_buf_info;
446 	struct wmi_log_buf_t *cmd_tx_cmpl_log_buf =
447 			&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
448 
449 	struct wmi_log_buf_t *event_log_buf =
450 			&wmi_handle->log_info.wmi_event_log_buf_info;
451 	struct wmi_log_buf_t *rx_event_log_buf =
452 			&wmi_handle->log_info.wmi_rx_event_log_buf_info;
453 
454 	struct wmi_log_buf_t *mgmt_cmd_log_buf =
455 			&wmi_handle->log_info.wmi_mgmt_command_log_buf_info;
456 	struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf =
457 		&wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info;
458 	struct wmi_log_buf_t *mgmt_event_log_buf =
459 			&wmi_handle->log_info.wmi_mgmt_event_log_buf_info;
460 	struct wmi_log_buf_t *diag_event_log_buf =
461 			&wmi_handle->log_info.wmi_diag_event_log_buf_info;
462 
463 	/* WMI commands */
464 	cmd_log_buf->length = 0;
465 	cmd_log_buf->buf_tail_idx = 0;
466 	cmd_log_buf->buf = wmi_command_log_buffer;
467 	cmd_log_buf->p_buf_tail_idx = &g_wmi_command_buf_idx;
468 	cmd_log_buf->size = WMI_CMD_DEBUG_MAX_ENTRY;
469 
470 	/* WMI commands TX completed */
471 	cmd_tx_cmpl_log_buf->length = 0;
472 	cmd_tx_cmpl_log_buf->buf_tail_idx = 0;
473 	cmd_tx_cmpl_log_buf->buf = wmi_command_tx_cmp_log_buffer;
474 	cmd_tx_cmpl_log_buf->p_buf_tail_idx = &g_wmi_command_tx_cmp_buf_idx;
475 	cmd_tx_cmpl_log_buf->size = WMI_CMD_CMPL_DEBUG_MAX_ENTRY;
476 
477 	/* WMI events when processed */
478 	event_log_buf->length = 0;
479 	event_log_buf->buf_tail_idx = 0;
480 	event_log_buf->buf = wmi_event_log_buffer;
481 	event_log_buf->p_buf_tail_idx = &g_wmi_event_buf_idx;
482 	event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY;
483 
484 	/* WMI events when queued */
485 	rx_event_log_buf->length = 0;
486 	rx_event_log_buf->buf_tail_idx = 0;
487 	rx_event_log_buf->buf = wmi_rx_event_log_buffer;
488 	rx_event_log_buf->p_buf_tail_idx = &g_wmi_rx_event_buf_idx;
489 	rx_event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY;
490 
491 	/* WMI Management commands */
492 	mgmt_cmd_log_buf->length = 0;
493 	mgmt_cmd_log_buf->buf_tail_idx = 0;
494 	mgmt_cmd_log_buf->buf = wmi_mgmt_command_log_buffer;
495 	mgmt_cmd_log_buf->p_buf_tail_idx = &g_wmi_mgmt_command_buf_idx;
496 	mgmt_cmd_log_buf->size = WMI_MGMT_TX_DEBUG_MAX_ENTRY;
497 
498 	/* WMI Management commands Tx completed*/
499 	mgmt_cmd_tx_cmp_log_buf->length = 0;
500 	mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0;
501 	mgmt_cmd_tx_cmp_log_buf->buf = wmi_mgmt_command_tx_cmp_log_buffer;
502 	mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx =
503 		&g_wmi_mgmt_command_tx_cmp_buf_idx;
504 	mgmt_cmd_tx_cmp_log_buf->size = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY;
505 
506 	/* WMI Management events when received */
507 	mgmt_event_log_buf->length = 0;
508 	mgmt_event_log_buf->buf_tail_idx = 0;
509 	mgmt_event_log_buf->buf = wmi_mgmt_rx_event_log_buffer;
510 	mgmt_event_log_buf->p_buf_tail_idx = &g_wmi_mgmt_rx_event_buf_idx;
511 	mgmt_event_log_buf->size = WMI_MGMT_RX_DEBUG_MAX_ENTRY;
512 
513 	/* WMI diag events when received */
514 	diag_event_log_buf->length = 0;
515 	diag_event_log_buf->buf_tail_idx = 0;
516 	diag_event_log_buf->buf = wmi_diag_rx_event_log_buffer;
517 	diag_event_log_buf->p_buf_tail_idx = &g_wmi_diag_rx_event_buf_idx;
518 	diag_event_log_buf->size = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY;
519 
520 	qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
521 	wmi_handle->log_info.wmi_logging_enable = 1;
522 
523 	return QDF_STATUS_SUCCESS;
524 }
525 #else
wmi_log_init(struct wmi_unified * wmi_handle)526 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
527 {
528 	struct wmi_log_buf_t *cmd_log_buf =
529 			&wmi_handle->log_info.wmi_command_log_buf_info;
530 	struct wmi_log_buf_t *cmd_tx_cmpl_log_buf =
531 			&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
532 
533 	struct wmi_log_buf_t *event_log_buf =
534 			&wmi_handle->log_info.wmi_event_log_buf_info;
535 	struct wmi_log_buf_t *rx_event_log_buf =
536 			&wmi_handle->log_info.wmi_rx_event_log_buf_info;
537 
538 	struct wmi_log_buf_t *mgmt_cmd_log_buf =
539 			&wmi_handle->log_info.wmi_mgmt_command_log_buf_info;
540 	struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf =
541 		&wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info;
542 	struct wmi_log_buf_t *mgmt_event_log_buf =
543 			&wmi_handle->log_info.wmi_mgmt_event_log_buf_info;
544 	struct wmi_log_buf_t *diag_event_log_buf =
545 			&wmi_handle->log_info.wmi_diag_event_log_buf_info;
546 
547 	wmi_handle->log_info.wmi_logging_enable = 0;
548 
549 	/* WMI commands */
550 	cmd_log_buf->length = 0;
551 	cmd_log_buf->buf_tail_idx = 0;
552 	cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc(
553 		wmi_cmd_log_max_entry * sizeof(struct wmi_command_debug));
554 	cmd_log_buf->size = wmi_cmd_log_max_entry;
555 
556 	if (!cmd_log_buf->buf)
557 		return QDF_STATUS_E_NOMEM;
558 
559 	cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx;
560 
561 	/* WMI commands TX completed */
562 	cmd_tx_cmpl_log_buf->length = 0;
563 	cmd_tx_cmpl_log_buf->buf_tail_idx = 0;
564 	cmd_tx_cmpl_log_buf->buf = (struct wmi_command_cmp_debug *) qdf_mem_malloc(
565 		wmi_cmd_cmpl_log_max_entry * sizeof(struct wmi_command_cmp_debug));
566 	cmd_tx_cmpl_log_buf->size = wmi_cmd_cmpl_log_max_entry;
567 
568 	if (!cmd_tx_cmpl_log_buf->buf)
569 		return QDF_STATUS_E_NOMEM;
570 
571 	cmd_tx_cmpl_log_buf->p_buf_tail_idx =
572 		&cmd_tx_cmpl_log_buf->buf_tail_idx;
573 
574 	/* WMI events when processed */
575 	event_log_buf->length = 0;
576 	event_log_buf->buf_tail_idx = 0;
577 	event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
578 		wmi_event_log_max_entry * sizeof(struct wmi_event_debug));
579 	event_log_buf->size = wmi_event_log_max_entry;
580 
581 	if (!event_log_buf->buf)
582 		return QDF_STATUS_E_NOMEM;
583 
584 	event_log_buf->p_buf_tail_idx = &event_log_buf->buf_tail_idx;
585 
586 	/* WMI events when queued */
587 	rx_event_log_buf->length = 0;
588 	rx_event_log_buf->buf_tail_idx = 0;
589 	rx_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
590 		wmi_event_log_max_entry * sizeof(struct wmi_event_debug));
591 	rx_event_log_buf->size = wmi_event_log_max_entry;
592 
593 	if (!rx_event_log_buf->buf)
594 		return QDF_STATUS_E_NOMEM;
595 
596 	rx_event_log_buf->p_buf_tail_idx = &rx_event_log_buf->buf_tail_idx;
597 
598 	/* WMI Management commands */
599 	mgmt_cmd_log_buf->length = 0;
600 	mgmt_cmd_log_buf->buf_tail_idx = 0;
601 	mgmt_cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc(
602 		wmi_mgmt_tx_log_max_entry * sizeof(struct wmi_command_debug));
603 	mgmt_cmd_log_buf->size = wmi_mgmt_tx_log_max_entry;
604 
605 	if (!mgmt_cmd_log_buf->buf)
606 		return QDF_STATUS_E_NOMEM;
607 
608 	mgmt_cmd_log_buf->p_buf_tail_idx = &mgmt_cmd_log_buf->buf_tail_idx;
609 
610 	/* WMI Management commands Tx completed*/
611 	mgmt_cmd_tx_cmp_log_buf->length = 0;
612 	mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0;
613 	mgmt_cmd_tx_cmp_log_buf->buf = (struct wmi_command_debug *)
614 		qdf_mem_malloc(
615 		wmi_mgmt_tx_cmpl_log_max_entry *
616 		sizeof(struct wmi_command_debug));
617 	mgmt_cmd_tx_cmp_log_buf->size = wmi_mgmt_tx_cmpl_log_max_entry;
618 
619 	if (!mgmt_cmd_tx_cmp_log_buf->buf)
620 		return QDF_STATUS_E_NOMEM;
621 
622 	mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx =
623 		&mgmt_cmd_tx_cmp_log_buf->buf_tail_idx;
624 
625 	/* WMI Management events when received */
626 	mgmt_event_log_buf->length = 0;
627 	mgmt_event_log_buf->buf_tail_idx = 0;
628 
629 	mgmt_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
630 		wmi_mgmt_rx_log_max_entry *
631 		sizeof(struct wmi_event_debug));
632 	mgmt_event_log_buf->size = wmi_mgmt_rx_log_max_entry;
633 
634 	if (!mgmt_event_log_buf->buf)
635 		return QDF_STATUS_E_NOMEM;
636 
637 	mgmt_event_log_buf->p_buf_tail_idx = &mgmt_event_log_buf->buf_tail_idx;
638 
639 	/* WMI diag events when received */
640 	diag_event_log_buf->length = 0;
641 	diag_event_log_buf->buf_tail_idx = 0;
642 
643 	diag_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
644 		wmi_diag_log_max_entry *
645 		sizeof(struct wmi_event_debug));
646 	diag_event_log_buf->size = wmi_diag_log_max_entry;
647 
648 	if (!diag_event_log_buf->buf)
649 		return QDF_STATUS_E_NOMEM;
650 
651 	diag_event_log_buf->p_buf_tail_idx = &diag_event_log_buf->buf_tail_idx;
652 
653 	qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
654 	wmi_handle->log_info.wmi_logging_enable = 1;
655 
656 	wmi_filtered_logging_init(wmi_handle);
657 
658 	return QDF_STATUS_SUCCESS;
659 }
660 #endif
661 
662 /**
663  * wmi_log_buffer_free() - Free all dynamic allocated buffer memory for
664  * event logging
665  * @wmi_handle: WMI handle.
666  *
667  * Return: None
668  */
669 #ifdef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
wmi_log_buffer_free(struct wmi_unified * wmi_handle)670 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
671 {
672 	wmi_filtered_logging_free(wmi_handle);
673 
674 	if (wmi_handle->log_info.wmi_command_log_buf_info.buf)
675 		qdf_mem_free(wmi_handle->log_info.wmi_command_log_buf_info.buf);
676 	if (wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf)
677 		qdf_mem_free(
678 		wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf);
679 	if (wmi_handle->log_info.wmi_event_log_buf_info.buf)
680 		qdf_mem_free(wmi_handle->log_info.wmi_event_log_buf_info.buf);
681 	if (wmi_handle->log_info.wmi_rx_event_log_buf_info.buf)
682 		qdf_mem_free(
683 			wmi_handle->log_info.wmi_rx_event_log_buf_info.buf);
684 	if (wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf)
685 		qdf_mem_free(
686 			wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf);
687 	if (wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf)
688 		qdf_mem_free(
689 		wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf);
690 	if (wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf)
691 		qdf_mem_free(
692 			wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf);
693 	if (wmi_handle->log_info.wmi_diag_event_log_buf_info.buf)
694 		qdf_mem_free(
695 			wmi_handle->log_info.wmi_diag_event_log_buf_info.buf);
696 	wmi_handle->log_info.wmi_logging_enable = 0;
697 
698 	qdf_spinlock_destroy(&wmi_handle->log_info.wmi_record_lock);
699 }
700 #else
wmi_log_buffer_free(struct wmi_unified * wmi_handle)701 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
702 {
703 	/* Do Nothing */
704 }
705 #endif
706 
707 /**
708  * wmi_print_cmd_log_buffer() - an output agnostic wmi command log printer
709  * @log_buffer: the command log buffer metadata of the buffer to print
710  * @count: the maximum number of entries to print
711  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
712  * @print_priv: any data required by the print method, e.g. a file handle
713  *
714  * Return: None
715  */
716 static void
wmi_print_cmd_log_buffer(struct wmi_log_buf_t * log_buffer,uint32_t count,qdf_abstract_print * print,void * print_priv)717 wmi_print_cmd_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
718 			 qdf_abstract_print *print, void *print_priv)
719 {
720 	static const int data_len =
721 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
722 	char str[128];
723 	uint32_t idx;
724 
725 	if (count > log_buffer->size)
726 		count = log_buffer->size;
727 	if (count > log_buffer->length)
728 		count = log_buffer->length;
729 
730 	/* subtract count from index, and wrap if necessary */
731 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
732 	idx %= log_buffer->size;
733 
734 	print(print_priv, "Time (seconds)      Cmd Id              Payload");
735 	while (count) {
736 		struct wmi_command_debug *cmd_log = (struct wmi_command_debug *)
737 			&((struct wmi_command_debug *)log_buffer->buf)[idx];
738 		uint64_t secs, usecs;
739 		int len = 0;
740 		int i;
741 
742 		qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
743 		len += scnprintf(str + len, sizeof(str) - len,
744 				 "% 8lld.%06lld    %6u (0x%06x)    ",
745 				 secs, usecs,
746 				 cmd_log->command, cmd_log->command);
747 		for (i = 0; i < data_len; ++i) {
748 			len += scnprintf(str + len, sizeof(str) - len,
749 					 "0x%08x ", cmd_log->data[i]);
750 		}
751 
752 		print(print_priv, str);
753 
754 		--count;
755 		++idx;
756 		if (idx >= log_buffer->size)
757 			idx = 0;
758 	}
759 }
760 
761 /**
762  * wmi_dump_last_cmd_rec_info() - last wmi command tx completion time print
763  * @wmi_handle: wmi handle
764  *
765  * Return: None
766  */
767 static void
wmi_dump_last_cmd_rec_info(wmi_unified_t wmi_handle)768 wmi_dump_last_cmd_rec_info(wmi_unified_t wmi_handle) {
769 	uint32_t idx, idx_tx_cmp, cmd_tmp_log, cmd_tmp_tx_cmp;
770 	uint64_t secs, secs_tx_cmp, usecs, usecs_tx_cmp;
771 	struct wmi_command_debug *cmd_log;
772 	struct wmi_command_debug *cmd_log_tx_cmp;
773 	struct wmi_log_buf_t *log_buf =
774 		&wmi_handle->log_info.wmi_command_log_buf_info;
775 	struct wmi_log_buf_t *log_buf_tx_cmp =
776 		&wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
777 
778 	qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
779 
780 	(*log_buf->p_buf_tail_idx == 0) ? (idx = log_buf->size) :
781 		(idx = *log_buf->p_buf_tail_idx - 1);
782 	idx %= log_buf->size;
783 
784 	(*log_buf_tx_cmp->p_buf_tail_idx == 0) ? (idx_tx_cmp =
785 		log_buf_tx_cmp->size) : (idx_tx_cmp =
786 		*log_buf_tx_cmp->p_buf_tail_idx - 1);
787 	idx_tx_cmp %= log_buf_tx_cmp->size;
788 	cmd_log = &((struct wmi_command_debug *)log_buf->buf)[idx];
789 	cmd_log_tx_cmp = &((struct wmi_command_debug *)log_buf_tx_cmp->buf)
790 		[idx_tx_cmp];
791 	cmd_tmp_log = cmd_log->command;
792 	cmd_tmp_tx_cmp = cmd_log_tx_cmp->command;
793 	qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
794 	qdf_log_timestamp_to_secs(cmd_log_tx_cmp->time, &secs_tx_cmp,
795 				  &usecs_tx_cmp);
796 
797 	qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
798 
799 	wmi_nofl_err("Last wmi command Time (s) = % 8lld.%06lld ",
800 		     secs, usecs);
801 	wmi_nofl_err("Last wmi Cmd_Id = (0x%06x) ", cmd_tmp_log);
802 	wmi_nofl_err("Last wmi command tx completion Time (s) = % 8lld.%06lld",
803 		     secs_tx_cmp, usecs_tx_cmp);
804 	wmi_nofl_err("Last wmi command tx completion Cmd_Id = (0x%06x) ",
805 		     cmd_tmp_tx_cmp);
806 }
807 
808 /**
809  * wmi_print_cmd_cmp_log_buffer() - wmi command completion log printer
810  * @log_buffer: the command completion log buffer metadata of the buffer to print
811  * @count: the maximum number of entries to print
812  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
813  * @print_priv: any data required by the print method, e.g. a file handle
814  *
815  * Return: None
816  */
817 static void
wmi_print_cmd_cmp_log_buffer(struct wmi_log_buf_t * log_buffer,uint32_t count,qdf_abstract_print * print,void * print_priv)818 wmi_print_cmd_cmp_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
819 			 qdf_abstract_print *print, void *print_priv)
820 {
821 	static const int data_len =
822 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
823 	char str[128];
824 	uint32_t idx;
825 
826 	if (count > log_buffer->size)
827 		count = log_buffer->size;
828 	if (count > log_buffer->length)
829 		count = log_buffer->length;
830 
831 	/* subtract count from index, and wrap if necessary */
832 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
833 	idx %= log_buffer->size;
834 
835 	print(print_priv, "Time (seconds)      Cmd Id              Payload");
836 	while (count) {
837 		struct wmi_command_cmp_debug *cmd_log = (struct wmi_command_cmp_debug *)
838 			&((struct wmi_command_cmp_debug *)log_buffer->buf)[idx];
839 		uint64_t secs, usecs;
840 		int len = 0;
841 		int i;
842 
843 		qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
844 		len += scnprintf(str + len, sizeof(str) - len,
845 				 "% 8lld.%06lld    %6u (0x%06x)    ",
846 				 secs, usecs,
847 				 cmd_log->command, cmd_log->command);
848 		for (i = 0; i < data_len; ++i) {
849 			len += scnprintf(str + len, sizeof(str) - len,
850 					 "0x%08x ", cmd_log->data[i]);
851 		}
852 
853 		print(print_priv, str);
854 
855 		--count;
856 		++idx;
857 		if (idx >= log_buffer->size)
858 			idx = 0;
859 	}
860 }
861 
862 /**
863  * wmi_print_event_log_buffer() - an output agnostic wmi event log printer
864  * @log_buffer: the event log buffer metadata of the buffer to print
865  * @count: the maximum number of entries to print
866  * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
867  * @print_priv: any data required by the print method, e.g. a file handle
868  *
869  * Return: None
870  */
871 static void
wmi_print_event_log_buffer(struct wmi_log_buf_t * log_buffer,uint32_t count,qdf_abstract_print * print,void * print_priv)872 wmi_print_event_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
873 			   qdf_abstract_print *print, void *print_priv)
874 {
875 	static const int data_len =
876 		WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
877 	char str[128];
878 	uint32_t idx;
879 
880 	if (count > log_buffer->size)
881 		count = log_buffer->size;
882 	if (count > log_buffer->length)
883 		count = log_buffer->length;
884 
885 	/* subtract count from index, and wrap if necessary */
886 	idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
887 	idx %= log_buffer->size;
888 
889 	print(print_priv, "Time (seconds)      Event Id             Payload");
890 	while (count) {
891 		struct wmi_event_debug *event_log = (struct wmi_event_debug *)
892 			&((struct wmi_event_debug *)log_buffer->buf)[idx];
893 		uint64_t secs, usecs;
894 		int len = 0;
895 		int i;
896 
897 		qdf_log_timestamp_to_secs(event_log->time, &secs, &usecs);
898 		len += scnprintf(str + len, sizeof(str) - len,
899 				 "% 8lld.%06lld    %6u (0x%06x)    ",
900 				 secs, usecs,
901 				 event_log->event, event_log->event);
902 		for (i = 0; i < data_len; ++i) {
903 			len += scnprintf(str + len, sizeof(str) - len,
904 					 "0x%08x ", event_log->data[i]);
905 		}
906 
907 		print(print_priv, str);
908 
909 		--count;
910 		++idx;
911 		if (idx >= log_buffer->size)
912 			idx = 0;
913 	}
914 }
915 
916 inline void
wmi_print_cmd_log(wmi_unified_t wmi,uint32_t count,qdf_abstract_print * print,void * print_priv)917 wmi_print_cmd_log(wmi_unified_t wmi, uint32_t count,
918 		  qdf_abstract_print *print, void *print_priv)
919 {
920 	wmi_print_cmd_log_buffer(
921 		&wmi->log_info.wmi_command_log_buf_info,
922 		count, print, print_priv);
923 }
924 
925 inline void
wmi_print_cmd_tx_cmp_log(wmi_unified_t wmi,uint32_t count,qdf_abstract_print * print,void * print_priv)926 wmi_print_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count,
927 			 qdf_abstract_print *print, void *print_priv)
928 {
929 	wmi_print_cmd_cmp_log_buffer(
930 		&wmi->log_info.wmi_command_tx_cmp_log_buf_info,
931 		count, print, print_priv);
932 }
933 
934 inline void
wmi_print_mgmt_cmd_log(wmi_unified_t wmi,uint32_t count,qdf_abstract_print * print,void * print_priv)935 wmi_print_mgmt_cmd_log(wmi_unified_t wmi, uint32_t count,
936 		       qdf_abstract_print *print, void *print_priv)
937 {
938 	wmi_print_cmd_log_buffer(
939 		&wmi->log_info.wmi_mgmt_command_log_buf_info,
940 		count, print, print_priv);
941 }
942 
943 inline void
wmi_print_mgmt_cmd_tx_cmp_log(wmi_unified_t wmi,uint32_t count,qdf_abstract_print * print,void * print_priv)944 wmi_print_mgmt_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count,
945 			      qdf_abstract_print *print, void *print_priv)
946 {
947 	wmi_print_cmd_log_buffer(
948 		&wmi->log_info.wmi_mgmt_command_tx_cmp_log_buf_info,
949 		count, print, print_priv);
950 }
951 
952 inline void
wmi_print_event_log(wmi_unified_t wmi,uint32_t count,qdf_abstract_print * print,void * print_priv)953 wmi_print_event_log(wmi_unified_t wmi, uint32_t count,
954 		    qdf_abstract_print *print, void *print_priv)
955 {
956 	wmi_print_event_log_buffer(
957 		&wmi->log_info.wmi_event_log_buf_info,
958 		count, print, print_priv);
959 }
960 
961 inline void
wmi_print_rx_event_log(wmi_unified_t wmi,uint32_t count,qdf_abstract_print * print,void * print_priv)962 wmi_print_rx_event_log(wmi_unified_t wmi, uint32_t count,
963 		       qdf_abstract_print *print, void *print_priv)
964 {
965 	wmi_print_event_log_buffer(
966 		&wmi->log_info.wmi_rx_event_log_buf_info,
967 		count, print, print_priv);
968 }
969 
970 inline void
wmi_print_mgmt_event_log(wmi_unified_t wmi,uint32_t count,qdf_abstract_print * print,void * print_priv)971 wmi_print_mgmt_event_log(wmi_unified_t wmi, uint32_t count,
972 			 qdf_abstract_print *print, void *print_priv)
973 {
974 	wmi_print_event_log_buffer(
975 		&wmi->log_info.wmi_mgmt_event_log_buf_info,
976 		count, print, print_priv);
977 }
978 
979 
980 /* debugfs routines*/
981 
982 /*
983  * debug_wmi_##func_base##_show() - debugfs functions to display content of
984  * command and event buffers. Macro uses max buffer length to display
985  * buffer when it is wraparound.
986  *
987  * @m: debugfs handler to access wmi_handle
988  * @v: Variable arguments (not used)
989  *
990  * Return: Length of characters printed
991  */
992 #define GENERATE_COMMAND_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size, wmi_record_type)\
993 	static int debug_wmi_##func_base##_show(struct seq_file *m,	\
994 						void *v)		\
995 	{								\
996 		wmi_unified_t wmi_handle = (wmi_unified_t) m->private;	\
997 		struct wmi_log_buf_t *wmi_log =				\
998 			&wmi_handle->log_info.wmi_##func_base##_buf_info;\
999 		int pos, nread, outlen;					\
1000 		int i;							\
1001 		uint64_t secs, usecs;					\
1002 									\
1003 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
1004 		if (!wmi_log->length) {					\
1005 			qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
1006 			return wmi_bp_seq_printf(m,			\
1007 			"no elements to read from ring buffer!\n");	\
1008 		}							\
1009 									\
1010 		if (wmi_log->length <= wmi_ring_size)			\
1011 			nread = wmi_log->length;			\
1012 		else							\
1013 			nread = wmi_ring_size;				\
1014 									\
1015 		if (*(wmi_log->p_buf_tail_idx) == 0)			\
1016 			/* tail can be 0 after wrap-around */		\
1017 			pos = wmi_ring_size - 1;			\
1018 		else							\
1019 			pos = *(wmi_log->p_buf_tail_idx) - 1;		\
1020 									\
1021 		outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\
1022 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
1023 		while (nread--) {					\
1024 			struct wmi_record_type *wmi_record;		\
1025 									\
1026 			wmi_record = (struct wmi_record_type *)	\
1027 			&(((struct wmi_record_type *)wmi_log->buf)[pos]);\
1028 			outlen += wmi_bp_seq_printf(m, "CMD ID = %x\n",	\
1029 				(wmi_record->command));			\
1030 			qdf_log_timestamp_to_secs(wmi_record->time, &secs,\
1031 				&usecs);				\
1032 			outlen +=					\
1033 			wmi_bp_seq_printf(m, "CMD TIME = [%llu.%06llu]\n",\
1034 				secs, usecs);				\
1035 			outlen += wmi_bp_seq_printf(m, "CMD = ");	\
1036 			for (i = 0; i < (wmi_record_max_length/		\
1037 					sizeof(uint32_t)); i++)		\
1038 				outlen += wmi_bp_seq_printf(m, "%x ",	\
1039 					wmi_record->data[i]);		\
1040 			outlen += wmi_bp_seq_printf(m, "\n");		\
1041 									\
1042 			if (pos == 0)					\
1043 				pos = wmi_ring_size - 1;		\
1044 			else						\
1045 				pos--;					\
1046 		}							\
1047 		return outlen;						\
1048 	}								\
1049 
1050 #define GENERATE_EVENT_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size)	\
1051 	static int debug_wmi_##func_base##_show(struct seq_file *m,	\
1052 						void *v)		\
1053 	{								\
1054 		wmi_unified_t wmi_handle = (wmi_unified_t) m->private;	\
1055 		struct wmi_log_buf_t *wmi_log =				\
1056 			&wmi_handle->log_info.wmi_##func_base##_buf_info;\
1057 		int pos, nread, outlen;					\
1058 		int i;							\
1059 		uint64_t secs, usecs;					\
1060 									\
1061 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
1062 		if (!wmi_log->length) {					\
1063 			qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
1064 			return wmi_bp_seq_printf(m,			\
1065 			"no elements to read from ring buffer!\n");	\
1066 		}							\
1067 									\
1068 		if (wmi_log->length <= wmi_ring_size)			\
1069 			nread = wmi_log->length;			\
1070 		else							\
1071 			nread = wmi_ring_size;				\
1072 									\
1073 		if (*(wmi_log->p_buf_tail_idx) == 0)			\
1074 			/* tail can be 0 after wrap-around */		\
1075 			pos = wmi_ring_size - 1;			\
1076 		else							\
1077 			pos = *(wmi_log->p_buf_tail_idx) - 1;		\
1078 									\
1079 		outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\
1080 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
1081 		while (nread--) {					\
1082 			struct wmi_event_debug *wmi_record;		\
1083 									\
1084 			wmi_record = (struct wmi_event_debug *)		\
1085 			&(((struct wmi_event_debug *)wmi_log->buf)[pos]);\
1086 			qdf_log_timestamp_to_secs(wmi_record->time, &secs,\
1087 				&usecs);				\
1088 			outlen += wmi_bp_seq_printf(m, "Event ID = %x\n",\
1089 				(wmi_record->event));			\
1090 			outlen +=					\
1091 			wmi_bp_seq_printf(m, "Event TIME = [%llu.%06llu]\n",\
1092 				secs, usecs);				\
1093 			outlen += wmi_bp_seq_printf(m, "CMD = ");	\
1094 			for (i = 0; i < (wmi_record_max_length/		\
1095 					sizeof(uint32_t)); i++)		\
1096 				outlen += wmi_bp_seq_printf(m, "%x ",	\
1097 					wmi_record->data[i]);		\
1098 			outlen += wmi_bp_seq_printf(m, "\n");		\
1099 									\
1100 			if (pos == 0)					\
1101 				pos = wmi_ring_size - 1;		\
1102 			else						\
1103 				pos--;					\
1104 		}							\
1105 		return outlen;						\
1106 	}
1107 
1108 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_log, wmi_display_size,
1109 				  wmi_command_debug);
1110 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_tx_cmp_log, wmi_display_size,
1111 				  wmi_command_cmp_debug);
1112 GENERATE_EVENT_DEBUG_SHOW_FUNCS(event_log, wmi_display_size);
1113 GENERATE_EVENT_DEBUG_SHOW_FUNCS(rx_event_log, wmi_display_size);
1114 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_log, wmi_display_size,
1115 				  wmi_command_debug);
1116 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_tx_cmp_log,
1117 					wmi_display_size,
1118 					wmi_command_debug);
1119 GENERATE_EVENT_DEBUG_SHOW_FUNCS(mgmt_event_log, wmi_display_size);
1120 
1121 /**
1122  * debug_wmi_enable_show() - debugfs functions to display enable state of
1123  * wmi logging feature.
1124  *
1125  * @m: debugfs handler to access wmi_handle
1126  * @v: Variable arguments (not used)
1127  *
1128  * Return: always 1
1129  */
debug_wmi_enable_show(struct seq_file * m,void * v)1130 static int debug_wmi_enable_show(struct seq_file *m, void *v)
1131 {
1132 	wmi_unified_t wmi_handle = (wmi_unified_t) m->private;
1133 
1134 	return wmi_bp_seq_printf(m, "%d\n",
1135 			wmi_handle->log_info.wmi_logging_enable);
1136 }
1137 
1138 /**
1139  * debug_wmi_log_size_show() - debugfs functions to display configured size of
1140  * wmi logging command/event buffer and management command/event buffer.
1141  *
1142  * @m: debugfs handler to access wmi_handle
1143  * @v: Variable arguments (not used)
1144  *
1145  * Return: Length of characters printed
1146  */
debug_wmi_log_size_show(struct seq_file * m,void * v)1147 static int debug_wmi_log_size_show(struct seq_file *m, void *v)
1148 {
1149 
1150 	wmi_bp_seq_printf(m, "WMI command/cmpl log max size:%d/%d\n",
1151 			  wmi_cmd_log_max_entry, wmi_cmd_cmpl_log_max_entry);
1152 	wmi_bp_seq_printf(m, "WMI management Tx/cmpl log max size:%d/%d\n",
1153 			  wmi_mgmt_tx_log_max_entry,
1154 			  wmi_mgmt_tx_cmpl_log_max_entry);
1155 	wmi_bp_seq_printf(m, "WMI event log max size:%d\n",
1156 			  wmi_event_log_max_entry);
1157 	wmi_bp_seq_printf(m, "WMI management Rx log max size:%d\n",
1158 			  wmi_mgmt_rx_log_max_entry);
1159 	return wmi_bp_seq_printf(m,
1160 				 "WMI diag log max size:%d\n",
1161 				 wmi_diag_log_max_entry);
1162 }
1163 
1164 /*
1165  * debug_wmi_##func_base##_write() - debugfs functions to clear
1166  * wmi logging command/event buffer and management command/event buffer.
1167  *
1168  * @file: file handler to access wmi_handle
1169  * @buf: received data buffer
1170  * @count: length of received buffer
1171  * @ppos: Not used
1172  *
1173  * Return: count
1174  */
1175 #define GENERATE_DEBUG_WRITE_FUNCS(func_base, wmi_ring_size, wmi_record_type)\
1176 	static ssize_t debug_wmi_##func_base##_write(struct file *file,	\
1177 				const char __user *buf,			\
1178 				size_t count, loff_t *ppos)		\
1179 	{								\
1180 		int k, ret;						\
1181 		wmi_unified_t wmi_handle =				\
1182 			((struct seq_file *)file->private_data)->private;\
1183 		struct wmi_log_buf_t *wmi_log = &wmi_handle->log_info.	\
1184 				wmi_##func_base##_buf_info;		\
1185 		char locbuf[50] = {0x00};				\
1186 									\
1187 		if ((!buf) || (count > 50))				\
1188 			return -EFAULT;					\
1189 									\
1190 		if (copy_from_user(locbuf, buf, count))			\
1191 			return -EFAULT;					\
1192 									\
1193 		ret = sscanf(locbuf, "%d", &k);				\
1194 		if ((ret != 1) || (k != 0)) {                           \
1195 			wmi_err("Wrong input, echo 0 to clear the wmi buffer");\
1196 			return -EINVAL;					\
1197 		}							\
1198 									\
1199 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
1200 		qdf_mem_zero(wmi_log->buf, wmi_ring_size *		\
1201 				sizeof(struct wmi_record_type));	\
1202 		wmi_log->length = 0;					\
1203 		*(wmi_log->p_buf_tail_idx) = 0;				\
1204 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
1205 									\
1206 		return count;						\
1207 	}
1208 
1209 GENERATE_DEBUG_WRITE_FUNCS(command_log, wmi_cmd_log_max_entry,
1210 			   wmi_command_debug);
1211 GENERATE_DEBUG_WRITE_FUNCS(command_tx_cmp_log, wmi_cmd_cmpl_log_max_entry,
1212 			   wmi_command_cmp_debug);
1213 GENERATE_DEBUG_WRITE_FUNCS(event_log, wmi_event_log_max_entry,
1214 			   wmi_event_debug);
1215 GENERATE_DEBUG_WRITE_FUNCS(rx_event_log, wmi_event_log_max_entry,
1216 			   wmi_event_debug);
1217 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_log, wmi_mgmt_tx_log_max_entry,
1218 			   wmi_command_debug);
1219 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_tx_cmp_log,
1220 			   wmi_mgmt_tx_cmpl_log_max_entry, wmi_command_debug);
1221 GENERATE_DEBUG_WRITE_FUNCS(mgmt_event_log, wmi_mgmt_rx_log_max_entry,
1222 			   wmi_event_debug);
1223 
1224 /**
1225  * debug_wmi_enable_write() - debugfs functions to enable/disable
1226  * wmi logging feature.
1227  *
1228  * @file: file handler to access wmi_handle
1229  * @buf: received data buffer
1230  * @count: length of received buffer
1231  * @ppos: Not used
1232  *
1233  * Return: count
1234  */
debug_wmi_enable_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1235 static ssize_t debug_wmi_enable_write(struct file *file, const char __user *buf,
1236 					size_t count, loff_t *ppos)
1237 {
1238 	wmi_unified_t wmi_handle =
1239 		((struct seq_file *)file->private_data)->private;
1240 	int k, ret;
1241 	char locbuf[50] = {0x00};
1242 
1243 	if ((!buf) || (count > 50))
1244 		return -EFAULT;
1245 
1246 	if (copy_from_user(locbuf, buf, count))
1247 		return -EFAULT;
1248 
1249 	ret = sscanf(locbuf, "%d", &k);
1250 	if ((ret != 1) || ((k != 0) && (k != 1)))
1251 		return -EINVAL;
1252 
1253 	wmi_handle->log_info.wmi_logging_enable = k;
1254 	return count;
1255 }
1256 
1257 /**
1258  * debug_wmi_log_size_write() - reserved.
1259  *
1260  * @file: file handler to access wmi_handle
1261  * @buf: received data buffer
1262  * @count: length of received buffer
1263  * @ppos: Not used
1264  *
1265  * Return: count
1266  */
debug_wmi_log_size_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1267 static ssize_t debug_wmi_log_size_write(struct file *file,
1268 		const char __user *buf, size_t count, loff_t *ppos)
1269 {
1270 	return -EINVAL;
1271 }
1272 
1273 /* Structure to maintain debug information */
1274 struct wmi_debugfs_info {
1275 	const char *name;
1276 	const struct file_operations *ops;
1277 };
1278 
1279 #define DEBUG_FOO(func_base) { .name = #func_base,			\
1280 	.ops = &debug_##func_base##_ops }
1281 
1282 /*
1283  * debug_##func_base##_open() - Open debugfs entry for respective command
1284  * and event buffer.
1285  *
1286  * @inode: node for debug dir entry
1287  * @file: file handler
1288  *
1289  * Return: open status
1290  */
1291 #define GENERATE_DEBUG_STRUCTS(func_base)				\
1292 	static int debug_##func_base##_open(struct inode *inode,	\
1293 						struct file *file)	\
1294 	{								\
1295 		return single_open(file, debug_##func_base##_show,	\
1296 				inode->i_private);			\
1297 	}								\
1298 									\
1299 									\
1300 	static struct file_operations debug_##func_base##_ops = {	\
1301 		.open		= debug_##func_base##_open,		\
1302 		.read		= seq_read,				\
1303 		.llseek		= seq_lseek,				\
1304 		.write		= debug_##func_base##_write,		\
1305 		.release	= single_release,			\
1306 	};
1307 
1308 GENERATE_DEBUG_STRUCTS(wmi_command_log);
1309 GENERATE_DEBUG_STRUCTS(wmi_command_tx_cmp_log);
1310 GENERATE_DEBUG_STRUCTS(wmi_event_log);
1311 GENERATE_DEBUG_STRUCTS(wmi_rx_event_log);
1312 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_log);
1313 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_tx_cmp_log);
1314 GENERATE_DEBUG_STRUCTS(wmi_mgmt_event_log);
1315 GENERATE_DEBUG_STRUCTS(wmi_enable);
1316 GENERATE_DEBUG_STRUCTS(wmi_log_size);
1317 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
1318 GENERATE_DEBUG_STRUCTS(filtered_wmi_cmds);
1319 GENERATE_DEBUG_STRUCTS(filtered_wmi_evts);
1320 GENERATE_DEBUG_STRUCTS(wmi_filtered_command_log);
1321 GENERATE_DEBUG_STRUCTS(wmi_filtered_event_log);
1322 #endif
1323 
1324 struct wmi_debugfs_info wmi_debugfs_infos[NUM_DEBUG_INFOS] = {
1325 	DEBUG_FOO(wmi_command_log),
1326 	DEBUG_FOO(wmi_command_tx_cmp_log),
1327 	DEBUG_FOO(wmi_event_log),
1328 	DEBUG_FOO(wmi_rx_event_log),
1329 	DEBUG_FOO(wmi_mgmt_command_log),
1330 	DEBUG_FOO(wmi_mgmt_command_tx_cmp_log),
1331 	DEBUG_FOO(wmi_mgmt_event_log),
1332 	DEBUG_FOO(wmi_enable),
1333 	DEBUG_FOO(wmi_log_size),
1334 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
1335 	DEBUG_FOO(filtered_wmi_cmds),
1336 	DEBUG_FOO(filtered_wmi_evts),
1337 	DEBUG_FOO(wmi_filtered_command_log),
1338 	DEBUG_FOO(wmi_filtered_event_log),
1339 #endif
1340 };
1341 
1342 /**
1343  * wmi_debugfs_create() - Create debug_fs entry for wmi logging.
1344  *
1345  * @wmi_handle: wmi handle
1346  * @par_entry: debug directory entry
1347  *
1348  * Return: none
1349  */
wmi_debugfs_create(wmi_unified_t wmi_handle,struct dentry * par_entry)1350 static void wmi_debugfs_create(wmi_unified_t wmi_handle,
1351 			       struct dentry *par_entry)
1352 {
1353 	int i;
1354 
1355 	if (!par_entry)
1356 		goto out;
1357 
1358 	for (i = 0; i < NUM_DEBUG_INFOS; ++i) {
1359 		wmi_handle->debugfs_de[i] = qdf_debugfs_create_entry(
1360 						wmi_debugfs_infos[i].name,
1361 						WMI_INFOS_DBG_FILE_PERM,
1362 						par_entry,
1363 						wmi_handle,
1364 						wmi_debugfs_infos[i].ops);
1365 
1366 		if (!wmi_handle->debugfs_de[i]) {
1367 			wmi_err("debug Entry creation failed!");
1368 			goto out;
1369 		}
1370 	}
1371 
1372 	return;
1373 
1374 out:
1375 	wmi_err("debug Entry creation failed!");
1376 	wmi_log_buffer_free(wmi_handle);
1377 	return;
1378 }
1379 
1380 /**
1381  * wmi_debugfs_remove() - Remove debugfs entry for wmi logging.
1382  * @wmi_handle: wmi handle
1383  *
1384  * Return: none
1385  */
wmi_debugfs_remove(wmi_unified_t wmi_handle)1386 static void wmi_debugfs_remove(wmi_unified_t wmi_handle)
1387 {
1388 	int i;
1389 	struct dentry *dentry = wmi_handle->log_info.wmi_log_debugfs_dir;
1390 
1391 	if (dentry) {
1392 		for (i = 0; i < NUM_DEBUG_INFOS; ++i) {
1393 			if (wmi_handle->debugfs_de[i])
1394 				wmi_handle->debugfs_de[i] = NULL;
1395 		}
1396 	}
1397 
1398 	if (dentry)
1399 		qdf_debugfs_remove_dir_recursive(dentry);
1400 }
1401 
1402 /**
1403  * wmi_debugfs_init() - debugfs functions to create debugfs directory and to
1404  *                      create debugfs entries.
1405  * @wmi_handle: wmi handler
1406  * @pdev_idx: pdev id
1407  *
1408  * Return: init status
1409  */
wmi_debugfs_init(wmi_unified_t wmi_handle,uint32_t pdev_idx)1410 static QDF_STATUS wmi_debugfs_init(wmi_unified_t wmi_handle, uint32_t pdev_idx)
1411 {
1412 	char buf[32];
1413 
1414 	snprintf(buf, sizeof(buf), "WMI_SOC%u_PDEV%u",
1415 		 wmi_handle->soc->soc_idx, pdev_idx);
1416 
1417 	wmi_handle->log_info.wmi_log_debugfs_dir =
1418 		qdf_debugfs_create_dir(buf, NULL);
1419 
1420 	if (!wmi_handle->log_info.wmi_log_debugfs_dir) {
1421 		wmi_err("error while creating debugfs dir for %s", buf);
1422 		return QDF_STATUS_E_FAILURE;
1423 	}
1424 	wmi_debugfs_create(wmi_handle,
1425 			   wmi_handle->log_info.wmi_log_debugfs_dir);
1426 
1427 	return QDF_STATUS_SUCCESS;
1428 }
1429 
wmi_mgmt_cmd_record(wmi_unified_t wmi_handle,uint32_t cmd,void * header,uint32_t vdev_id,uint32_t chanfreq)1430 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
1431 			void *header, uint32_t vdev_id, uint32_t chanfreq)
1432 {
1433 
1434 	uint32_t data[CUSTOM_MGMT_CMD_DATA_SIZE];
1435 
1436 	data[0] = ((struct wmi_command_header *)header)->type;
1437 	data[1] = ((struct wmi_command_header *)header)->sub_type;
1438 	data[2] = vdev_id;
1439 	data[3] = chanfreq;
1440 
1441 	qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
1442 
1443 	WMI_MGMT_COMMAND_RECORD(wmi_handle, cmd, (uint8_t *)data);
1444 	wmi_specific_cmd_record(wmi_handle, cmd, (uint8_t *)data);
1445 	qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
1446 }
1447 #else
wmi_debugfs_remove(wmi_unified_t wmi_handle)1448 static void wmi_debugfs_remove(wmi_unified_t wmi_handle) { }
wmi_mgmt_cmd_record(wmi_unified_t wmi_handle,uint32_t cmd,void * header,uint32_t vdev_id,uint32_t chanfreq)1449 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
1450 			void *header, uint32_t vdev_id, uint32_t chanfreq) { }
wmi_log_buffer_free(struct wmi_unified * wmi_handle)1451 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) { }
wmi_minidump_detach(struct wmi_unified * wmi_handle)1452 static void wmi_minidump_detach(struct wmi_unified *wmi_handle) { }
wmi_minidump_attach(struct wmi_unified * wmi_handle)1453 static void wmi_minidump_attach(struct wmi_unified *wmi_handle) { }
wmi_dump_last_cmd_rec_info(wmi_unified_t wmi_handle)1454 static void wmi_dump_last_cmd_rec_info(wmi_unified_t wmi_handle) { }
1455 #endif /*WMI_INTERFACE_EVENT_LOGGING */
1456 qdf_export_symbol(wmi_mgmt_cmd_record);
1457 
1458 #ifdef WMI_EXT_DBG
1459 
1460 /**
1461  * wmi_ext_dbg_msg_enqueue() - enqueue wmi message
1462  * @wmi_handle: wmi handler
1463  * @msg: WMI message
1464  *
1465  * Return: size of wmi message queue after enqueue
1466  */
wmi_ext_dbg_msg_enqueue(struct wmi_unified * wmi_handle,struct wmi_ext_dbg_msg * msg)1467 static uint32_t wmi_ext_dbg_msg_enqueue(struct wmi_unified *wmi_handle,
1468 					struct wmi_ext_dbg_msg *msg)
1469 {
1470 	uint32_t list_size;
1471 
1472 	qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1473 	qdf_list_insert_back_size(&wmi_handle->wmi_ext_dbg_msg_queue,
1474 				  &msg->node, &list_size);
1475 	qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1476 
1477 	return list_size;
1478 }
1479 
1480 /**
1481  * wmi_ext_dbg_msg_dequeue() - dequeue wmi message
1482  * @wmi_handle: wmi handler
1483  *
1484  * Return: wmi msg on success else NULL
1485  */
wmi_ext_dbg_msg_dequeue(struct wmi_unified * wmi_handle)1486 static struct wmi_ext_dbg_msg *wmi_ext_dbg_msg_dequeue(struct wmi_unified
1487 						       *wmi_handle)
1488 {
1489 	qdf_list_node_t *list_node = NULL;
1490 
1491 	qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1492 	qdf_list_remove_front(&wmi_handle->wmi_ext_dbg_msg_queue, &list_node);
1493 	qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1494 
1495 	if (!list_node)
1496 		return NULL;
1497 
1498 	return qdf_container_of(list_node, struct wmi_ext_dbg_msg, node);
1499 }
1500 
1501 /**
1502  * wmi_ext_dbg_msg_record() - record wmi messages
1503  * @wmi_handle: wmi handler
1504  * @buf: wmi message buffer
1505  * @len: wmi message length
1506  * @type: wmi message type
1507  *
1508  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1509  */
wmi_ext_dbg_msg_record(struct wmi_unified * wmi_handle,uint8_t * buf,uint32_t len,enum WMI_MSG_TYPE type)1510 static QDF_STATUS wmi_ext_dbg_msg_record(struct wmi_unified *wmi_handle,
1511 					 uint8_t *buf, uint32_t len,
1512 					 enum WMI_MSG_TYPE type)
1513 {
1514 	struct wmi_ext_dbg_msg *msg;
1515 	uint32_t list_size;
1516 
1517 	msg = wmi_ext_dbg_msg_get(len);
1518 	if (!msg)
1519 		return QDF_STATUS_E_NOMEM;
1520 
1521 	msg->len = len;
1522 	msg->type = type;
1523 	qdf_mem_copy(msg->buf, buf, len);
1524 	msg->ts = qdf_get_log_timestamp();
1525 	list_size = wmi_ext_dbg_msg_enqueue(wmi_handle, msg);
1526 
1527 	if (list_size >= wmi_handle->wmi_ext_dbg_msg_queue_size) {
1528 		msg = wmi_ext_dbg_msg_dequeue(wmi_handle);
1529 		wmi_ext_dbg_msg_put(msg);
1530 	}
1531 
1532 	return QDF_STATUS_SUCCESS;
1533 }
1534 
1535 /**
1536  * wmi_ext_dbg_msg_cmd_record() - record wmi command messages
1537  * @wmi_handle: wmi handler
1538  * @buf: wmi command buffer
1539  * @len: wmi command message length
1540  *
1541  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1542  */
wmi_ext_dbg_msg_cmd_record(struct wmi_unified * wmi_handle,uint8_t * buf,uint32_t len)1543 static QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified *wmi_handle,
1544 					     uint8_t *buf, uint32_t len)
1545 {
1546 	return wmi_ext_dbg_msg_record(wmi_handle, buf, len,
1547 				      WMI_MSG_TYPE_CMD);
1548 }
1549 
1550 /**
1551  * wmi_ext_dbg_msg_event_record() - record wmi event messages
1552  * @wmi_handle: wmi handler
1553  * @buf: wmi event buffer
1554  * @len: wmi event message length
1555  *
1556  * Return: QDF_STATUS_SUCCESS on successful recording else failure.
1557  */
wmi_ext_dbg_msg_event_record(struct wmi_unified * wmi_handle,uint8_t * buf,uint32_t len)1558 static QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified *wmi_handle,
1559 					       uint8_t *buf, uint32_t len)
1560 {
1561 	uint32_t id;
1562 
1563 	id = WMI_GET_FIELD(buf, WMI_CMD_HDR, COMMANDID);
1564 	if (id != wmi_handle->wmi_events[wmi_diag_event_id])
1565 		return wmi_ext_dbg_msg_record(wmi_handle, buf, len,
1566 					      WMI_MSG_TYPE_EVENT);
1567 
1568 	return QDF_STATUS_SUCCESS;
1569 }
1570 
1571 /**
1572  * wmi_ext_dbg_msg_queue_init() - create debugfs queue and associated lock
1573  * @wmi_handle: wmi handler
1574  *
1575  * Return: none
1576  */
wmi_ext_dbg_msg_queue_init(struct wmi_unified * wmi_handle)1577 static void wmi_ext_dbg_msg_queue_init(struct wmi_unified *wmi_handle)
1578 {
1579 	qdf_list_create(&wmi_handle->wmi_ext_dbg_msg_queue,
1580 			wmi_handle->wmi_ext_dbg_msg_queue_size);
1581 	qdf_spinlock_create(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1582 }
1583 
1584 /**
1585  * wmi_ext_dbg_msg_queue_deinit() - destroy debugfs queue and associated lock
1586  * @wmi_handle: wmi handler
1587  *
1588  * Return: none
1589  */
wmi_ext_dbg_msg_queue_deinit(struct wmi_unified * wmi_handle)1590 static void wmi_ext_dbg_msg_queue_deinit(struct wmi_unified *wmi_handle)
1591 {
1592 	qdf_list_destroy(&wmi_handle->wmi_ext_dbg_msg_queue);
1593 	qdf_spinlock_destroy(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1594 }
1595 
1596 /**
1597  * wmi_ext_dbg_msg_show() - debugfs function to display whole content of
1598  * wmi command/event messages including headers.
1599  * @file: qdf debugfs file handler
1600  * @arg: pointer to wmi handler
1601  *
1602  * Return: QDF_STATUS_SUCCESS if all the messages are shown successfully,
1603  * else QDF_STATUS_E_AGAIN if more data to show.
1604  */
wmi_ext_dbg_msg_show(qdf_debugfs_file_t file,void * arg)1605 static QDF_STATUS wmi_ext_dbg_msg_show(qdf_debugfs_file_t file, void *arg)
1606 {
1607 	struct wmi_unified *wmi_handle = (struct wmi_unified *)arg;
1608 	struct wmi_ext_dbg_msg *msg;
1609 	uint64_t secs, usecs;
1610 
1611 	msg = wmi_ext_dbg_msg_dequeue(wmi_handle);
1612 	if (!msg)
1613 		return QDF_STATUS_SUCCESS;
1614 
1615 	qdf_debugfs_printf(file, "%s: 0x%x\n",
1616 			   msg->type == WMI_MSG_TYPE_CMD ? "COMMAND" :
1617 			   "EVENT", WMI_GET_FIELD(msg->buf, WMI_CMD_HDR,
1618 						  COMMANDID));
1619 	qdf_log_timestamp_to_secs(msg->ts, &secs, &usecs);
1620 	qdf_debugfs_printf(file, "Time: %llu.%llu\n", secs, usecs);
1621 	qdf_debugfs_printf(file, "Length:%d\n", msg->len);
1622 	qdf_debugfs_hexdump(file, msg->buf, msg->len,
1623 			    WMI_EXT_DBG_DUMP_ROW_SIZE,
1624 			    WMI_EXT_DBG_DUMP_GROUP_SIZE);
1625 	qdf_debugfs_printf(file, "\n");
1626 
1627 	if (qdf_debugfs_overflow(file)) {
1628 		qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1629 		qdf_list_insert_front(&wmi_handle->wmi_ext_dbg_msg_queue,
1630 				      &msg->node);
1631 		qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
1632 
1633 	} else {
1634 		wmi_ext_dbg_msg_put(msg);
1635 	}
1636 
1637 	return QDF_STATUS_E_AGAIN;
1638 }
1639 
1640 /**
1641  * wmi_ext_dbg_msg_write() - debugfs write not supported
1642  * @priv: private data
1643  * @buf: received data buffer
1644  * @len: length of received buffer
1645  *
1646  * Return: QDF_STATUS_E_NOSUPPORT.
1647  */
wmi_ext_dbg_msg_write(void * priv,const char * buf,qdf_size_t len)1648 static QDF_STATUS wmi_ext_dbg_msg_write(void *priv, const char *buf,
1649 					qdf_size_t len)
1650 {
1651 	return QDF_STATUS_E_NOSUPPORT;
1652 }
1653 
1654 static struct qdf_debugfs_fops wmi_ext_dbgfs_ops[WMI_MAX_RADIOS];
1655 
1656 /**
1657  * wmi_ext_dbgfs_init() - init debugfs items for extended wmi dump.
1658  * @wmi_handle: wmi handler
1659  * @pdev_idx: pdev index
1660  *
1661  * Return: QDF_STATUS_SUCCESS if debugfs is initialized else
1662  * QDF_STATUS_E_FAILURE
1663  */
wmi_ext_dbgfs_init(struct wmi_unified * wmi_handle,uint32_t pdev_idx)1664 static QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle,
1665 				     uint32_t pdev_idx)
1666 {
1667 	qdf_dentry_t dentry;
1668 	char buf[32];
1669 
1670 	/* To maintain backward compatibility, naming convention for PDEV 0
1671 	 * dentry is kept same as before. For more than 1 PDEV, dentry
1672 	 * names will be appended with PDEVx.
1673 	*/
1674 	if (wmi_handle->soc->soc_idx == 0 && pdev_idx == 0) {
1675 		dentry  = qdf_debugfs_create_dir(WMI_EXT_DBG_DIR, NULL);
1676 	} else {
1677 		snprintf(buf, sizeof(buf), "WMI_EXT_DBG_SOC%u_PDEV%u",
1678 			 wmi_handle->soc->soc_idx, pdev_idx);
1679 		dentry  = qdf_debugfs_create_dir(buf, NULL);
1680 	}
1681 
1682 	if (!dentry) {
1683 		wmi_err("error while creating extended wmi debugfs dir");
1684 		return QDF_STATUS_E_FAILURE;
1685 	}
1686 
1687 	wmi_ext_dbgfs_ops[pdev_idx].show = wmi_ext_dbg_msg_show;
1688 	wmi_ext_dbgfs_ops[pdev_idx].write = wmi_ext_dbg_msg_write;
1689 	wmi_ext_dbgfs_ops[pdev_idx].priv = wmi_handle;
1690 	if (!qdf_debugfs_create_file(WMI_EXT_DBG_FILE, WMI_EXT_DBG_FILE_PERM,
1691 				     dentry, &wmi_ext_dbgfs_ops[pdev_idx])) {
1692 		qdf_debugfs_remove_dir(dentry);
1693 		wmi_err("Error while creating extended wmi debugfs file");
1694 		return QDF_STATUS_E_FAILURE;
1695 	}
1696 
1697 	wmi_handle->wmi_ext_dbg_dentry = dentry;
1698 	wmi_handle->wmi_ext_dbg_msg_queue_size = WMI_EXT_DBG_QUEUE_SIZE;
1699 	wmi_ext_dbg_msg_queue_init(wmi_handle);
1700 
1701 	return QDF_STATUS_SUCCESS;
1702 }
1703 
1704 /**
1705  * wmi_ext_dbgfs_deinit() - cleanup/deinit debugfs items of extended wmi dump.
1706  * @wmi_handle: wmi handler
1707  *
1708  * Return: QDF_STATUS_SUCCESS if cleanup is successful
1709  */
wmi_ext_dbgfs_deinit(struct wmi_unified * wmi_handle)1710 static QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle)
1711 {
1712 	struct wmi_ext_dbg_msg *msg;
1713 
1714 	while ((msg = wmi_ext_dbg_msg_dequeue(wmi_handle)))
1715 		wmi_ext_dbg_msg_put(msg);
1716 
1717 	wmi_ext_dbg_msg_queue_deinit(wmi_handle);
1718 	qdf_debugfs_remove_dir_recursive(wmi_handle->wmi_ext_dbg_dentry);
1719 
1720 	return QDF_STATUS_SUCCESS;
1721 }
1722 
1723 #else
1724 
wmi_ext_dbg_msg_cmd_record(struct wmi_unified * wmi_handle,uint8_t * buf,uint32_t len)1725 static inline QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified
1726 						    *wmi_handle,
1727 						    uint8_t *buf, uint32_t len)
1728 {
1729 		return QDF_STATUS_SUCCESS;
1730 }
1731 
wmi_ext_dbg_msg_event_record(struct wmi_unified * wmi_handle,uint8_t * buf,uint32_t len)1732 static inline QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified
1733 						      *wmi_handle,
1734 						      uint8_t *buf, uint32_t len)
1735 {
1736 		return QDF_STATUS_SUCCESS;
1737 }
1738 
wmi_ext_dbgfs_init(struct wmi_unified * wmi_handle,uint32_t pdev_idx)1739 static inline QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle,
1740 					    uint32_t pdev_idx)
1741 {
1742 		return QDF_STATUS_SUCCESS;
1743 }
1744 
wmi_ext_dbgfs_deinit(struct wmi_unified * wmi_handle)1745 static inline QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle)
1746 {
1747 		return QDF_STATUS_SUCCESS;
1748 }
1749 
1750 #endif /*WMI_EXT_DBG */
1751 
1752 int wmi_get_host_credits(wmi_unified_t wmi_handle);
1753 /* WMI buffer APIs */
1754 
1755 #ifdef NBUF_MEMORY_DEBUG
1756 wmi_buf_t
wmi_buf_alloc_debug(wmi_unified_t wmi_handle,uint32_t len,const char * func_name,uint32_t line_num)1757 wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint32_t len,
1758 		    const char *func_name,
1759 		    uint32_t line_num)
1760 {
1761 	wmi_buf_t wmi_buf;
1762 
1763 	if (roundup(len, 4) > wmi_handle->max_msg_len) {
1764 		wmi_err("Invalid length %u (via %s:%u) max size: %u",
1765 			len, func_name, line_num,
1766 			wmi_handle->max_msg_len);
1767 		QDF_ASSERT(0);
1768 		return NULL;
1769 	}
1770 
1771 	wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, WBUFF_MAX_POOL_ID,
1772 				 len, func_name, line_num);
1773 	if (!wmi_buf)
1774 		wmi_buf = qdf_nbuf_alloc_debug(NULL,
1775 					       roundup(len + WMI_MIN_HEAD_ROOM,
1776 						       4),
1777 					       WMI_MIN_HEAD_ROOM, 4, false,
1778 					       func_name, line_num);
1779 	if (!wmi_buf)
1780 		return NULL;
1781 
1782 	/* Clear the wmi buffer */
1783 	OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
1784 
1785 	/*
1786 	 * Set the length of the buffer to match the allocation size.
1787 	 */
1788 	qdf_nbuf_set_pktlen(wmi_buf, len);
1789 
1790 	return wmi_buf;
1791 }
1792 qdf_export_symbol(wmi_buf_alloc_debug);
1793 
wmi_buf_free(wmi_buf_t net_buf)1794 void wmi_buf_free(wmi_buf_t net_buf)
1795 {
1796 	net_buf = wbuff_buff_put(net_buf);
1797 	if (net_buf)
1798 		qdf_nbuf_free(net_buf);
1799 }
1800 qdf_export_symbol(wmi_buf_free);
1801 #else
wmi_buf_alloc_fl(wmi_unified_t wmi_handle,uint32_t len,const char * func,uint32_t line)1802 wmi_buf_t wmi_buf_alloc_fl(wmi_unified_t wmi_handle, uint32_t len,
1803 			   const char *func, uint32_t line)
1804 {
1805 	wmi_buf_t wmi_buf;
1806 
1807 	if (roundup(len, 4) > wmi_handle->max_msg_len) {
1808 		QDF_DEBUG_PANIC("Invalid length %u (via %s:%u) max size: %u",
1809 				len, func, line, wmi_handle->max_msg_len);
1810 		return NULL;
1811 	}
1812 
1813 	wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, WBUFF_MAX_POOL_ID,
1814 				 len, __func__, __LINE__);
1815 	if (!wmi_buf)
1816 		wmi_buf = qdf_nbuf_alloc_fl(NULL, roundup(len +
1817 				WMI_MIN_HEAD_ROOM, 4), WMI_MIN_HEAD_ROOM, 4,
1818 				false, func, line);
1819 
1820 	if (!wmi_buf) {
1821 		wmi_nofl_err("%s:%d, failed to alloc len:%d", func, line, len);
1822 		return NULL;
1823 	}
1824 
1825 	/* Clear the wmi buffer */
1826 	OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
1827 
1828 	/*
1829 	 * Set the length of the buffer to match the allocation size.
1830 	 */
1831 	qdf_nbuf_set_pktlen(wmi_buf, len);
1832 
1833 	return wmi_buf;
1834 }
1835 qdf_export_symbol(wmi_buf_alloc_fl);
1836 
wmi_buf_free(wmi_buf_t net_buf)1837 void wmi_buf_free(wmi_buf_t net_buf)
1838 {
1839 	net_buf = wbuff_buff_put(net_buf);
1840 	if (net_buf)
1841 		qdf_nbuf_free(net_buf);
1842 }
1843 qdf_export_symbol(wmi_buf_free);
1844 #endif
1845 
wmi_get_max_msg_len(wmi_unified_t wmi_handle)1846 uint16_t wmi_get_max_msg_len(wmi_unified_t wmi_handle)
1847 {
1848 	return wmi_handle->max_msg_len - WMI_MIN_HEAD_ROOM;
1849 }
1850 qdf_export_symbol(wmi_get_max_msg_len);
1851 
1852 #ifndef WMI_CMD_STRINGS
wmi_id_to_name(uint32_t wmi_command)1853 static uint8_t *wmi_id_to_name(uint32_t wmi_command)
1854 {
1855 	return "Invalid WMI cmd";
1856 }
1857 #endif
1858 
wmi_log_cmd_id(uint32_t cmd_id,uint32_t tag)1859 static inline void wmi_log_cmd_id(uint32_t cmd_id, uint32_t tag)
1860 {
1861 	wmi_nofl_debug("Send cmd %s(0x%x) tag:%d",
1862 		       wmi_id_to_name(cmd_id), cmd_id, tag);
1863 }
1864 
1865 /**
1866  * wmi_is_pm_resume_cmd() - check if a cmd is part of the resume sequence
1867  * @cmd_id: command to check
1868  *
1869  * Return: true if the command is part of the resume sequence.
1870  */
1871 #ifdef WLAN_POWER_MANAGEMENT_OFFLOAD
wmi_is_pm_resume_cmd(uint32_t cmd_id)1872 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id)
1873 {
1874 	switch (cmd_id) {
1875 	case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
1876 	case WMI_PDEV_RESUME_CMDID:
1877 		return true;
1878 
1879 	default:
1880 		return false;
1881 	}
1882 }
1883 
1884 #else
wmi_is_pm_resume_cmd(uint32_t cmd_id)1885 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id)
1886 {
1887 	return false;
1888 }
1889 
1890 #endif
1891 
1892 #ifdef FEATURE_WLAN_D0WOW
wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf,uint32_t cmd_id)1893 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id)
1894 {
1895 	wmi_d0_wow_enable_disable_cmd_fixed_param *cmd;
1896 
1897 	if (cmd_id == WMI_D0_WOW_ENABLE_DISABLE_CMDID) {
1898 		cmd = (wmi_d0_wow_enable_disable_cmd_fixed_param *)
1899 			wmi_buf_data(buf);
1900 		if (!cmd->enable)
1901 			return true;
1902 		else
1903 			return false;
1904 	}
1905 
1906 	return false;
1907 }
1908 #else
wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf,uint32_t cmd_id)1909 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id)
1910 {
1911 	return false;
1912 }
1913 
1914 #endif
1915 
1916 #ifdef WMI_INTERFACE_SEQUENCE_CHECK
wmi_interface_sequence_reset(struct wmi_unified * wmi_handle)1917 static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle)
1918 {
1919 	wmi_handle->wmi_sequence = 0;
1920 	wmi_handle->wmi_exp_sequence = 0;
1921 	wmi_handle->wmi_sequence_stop = false;
1922 }
1923 
wmi_interface_sequence_init(struct wmi_unified * wmi_handle)1924 static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle)
1925 {
1926 	qdf_spinlock_create(&wmi_handle->wmi_seq_lock);
1927 	wmi_interface_sequence_reset(wmi_handle);
1928 }
1929 
wmi_interface_sequence_deinit(struct wmi_unified * wmi_handle)1930 static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle)
1931 {
1932 	qdf_spinlock_destroy(&wmi_handle->wmi_seq_lock);
1933 }
1934 
wmi_interface_sequence_stop(struct wmi_unified * wmi_handle)1935 void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle)
1936 {
1937 	wmi_handle->wmi_sequence_stop = true;
1938 }
1939 
wmi_htc_send_pkt(struct wmi_unified * wmi_handle,HTC_PACKET * pkt,const char * func,uint32_t line)1940 static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle,
1941 					  HTC_PACKET *pkt,
1942 					  const char *func, uint32_t line)
1943 {
1944 	wmi_buf_t buf = GET_HTC_PACKET_NET_BUF_CONTEXT(pkt);
1945 	QDF_STATUS status;
1946 
1947 	qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock);
1948 	status = htc_send_pkt(wmi_handle->htc_handle, pkt);
1949 	if (QDF_STATUS_SUCCESS != status) {
1950 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1951 		qdf_atomic_dec(&wmi_handle->pending_cmds);
1952 		wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d",
1953 			     func, line, status);
1954 		qdf_mem_free(pkt);
1955 		return status;
1956 	}
1957 	/* Record the sequence number in the SKB */
1958 	qdf_nbuf_set_mark(buf, wmi_handle->wmi_sequence);
1959 	/* Increment the sequence number */
1960 	wmi_handle->wmi_sequence = (wmi_handle->wmi_sequence + 1)
1961 				   & (wmi_handle->wmi_max_cmds - 1);
1962 	qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1963 
1964 	return status;
1965 }
1966 
wmi_interface_sequence_check(struct wmi_unified * wmi_handle,wmi_buf_t buf)1967 static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle,
1968 						wmi_buf_t buf)
1969 {
1970 	/* Skip sequence check when wmi sequence stop is set */
1971 	if (wmi_handle->wmi_sequence_stop)
1972 		return;
1973 
1974 	qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock);
1975 	/* Match the completion sequence and expected sequence number */
1976 	if (qdf_nbuf_get_mark(buf) != wmi_handle->wmi_exp_sequence) {
1977 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1978 		wmi_nofl_err("WMI Tx Completion Sequence number mismatch");
1979 		wmi_nofl_err("Expected %d Received %d",
1980 			     wmi_handle->wmi_exp_sequence,
1981 			     qdf_nbuf_get_mark(buf));
1982 		/* Trigger Recovery */
1983 		qdf_trigger_self_recovery(wmi_handle->soc,
1984 					  QDF_WMI_BUF_SEQUENCE_MISMATCH);
1985 	} else {
1986 		/* Increment the expected sequence number */
1987 		wmi_handle->wmi_exp_sequence =
1988 				(wmi_handle->wmi_exp_sequence + 1)
1989 				& (wmi_handle->wmi_max_cmds - 1);
1990 		qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
1991 	}
1992 }
1993 #else
wmi_interface_sequence_reset(struct wmi_unified * wmi_handle)1994 static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle)
1995 {
1996 }
1997 
wmi_interface_sequence_init(struct wmi_unified * wmi_handle)1998 static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle)
1999 {
2000 }
2001 
wmi_interface_sequence_deinit(struct wmi_unified * wmi_handle)2002 static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle)
2003 {
2004 }
2005 
wmi_interface_sequence_stop(struct wmi_unified * wmi_handle)2006 void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle)
2007 {
2008 }
2009 
wmi_htc_send_pkt(struct wmi_unified * wmi_handle,HTC_PACKET * pkt,const char * func,uint32_t line)2010 static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle,
2011 					  HTC_PACKET *pkt,
2012 					  const char *func, uint32_t line)
2013 {
2014 	QDF_STATUS status;
2015 
2016 	status = htc_send_pkt(wmi_handle->htc_handle, pkt);
2017 	if (QDF_STATUS_SUCCESS != status) {
2018 		qdf_atomic_dec(&wmi_handle->pending_cmds);
2019 		wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d",
2020 			     func, line, status);
2021 		qdf_mem_free(pkt);
2022 		return status;
2023 	}
2024 
2025 	return status;
2026 }
2027 
wmi_interface_sequence_check(struct wmi_unified * wmi_handle,wmi_buf_t buf)2028 static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle,
2029 						wmi_buf_t buf)
2030 {
2031 }
2032 #endif
2033 
wmi_unified_debug_dump(wmi_unified_t wmi_handle)2034 static inline void wmi_unified_debug_dump(wmi_unified_t wmi_handle)
2035 {
2036 	wmi_nofl_err("Endpoint ID = %d, Tx Queue Depth = %d, soc_id = %u, target type = %s",
2037 		     wmi_handle->wmi_endpoint_id,
2038 		     htc_get_tx_queue_depth(wmi_handle->htc_handle,
2039 					    wmi_handle->wmi_endpoint_id),
2040 		     wmi_handle->soc->soc_idx,
2041 		     (wmi_handle->target_type ==
2042 		      WMI_TLV_TARGET ? "WMI_TLV_TARGET" :
2043 						"WMI_NON_TLV_TARGET"));
2044 }
2045 
2046 #ifdef SYSTEM_PM_CHECK
2047 /**
2048  * wmi_set_system_pm_pkt_tag() - API to set tag for system pm packets
2049  * @htc_tag: HTC tag
2050  * @buf: wmi cmd buffer
2051  * @cmd_id: cmd id
2052  *
2053  * Return: None
2054  */
wmi_set_system_pm_pkt_tag(uint16_t * htc_tag,wmi_buf_t buf,uint32_t cmd_id)2055 static void wmi_set_system_pm_pkt_tag(uint16_t *htc_tag, wmi_buf_t buf,
2056 				      uint32_t cmd_id)
2057 {
2058 	switch (cmd_id) {
2059 	case WMI_WOW_ENABLE_CMDID:
2060 	case WMI_PDEV_SUSPEND_CMDID:
2061 		*htc_tag = HTC_TX_PACKET_SYSTEM_SUSPEND;
2062 		break;
2063 	case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
2064 	case WMI_PDEV_RESUME_CMDID:
2065 		*htc_tag = HTC_TX_PACKET_SYSTEM_RESUME;
2066 		break;
2067 	case WMI_D0_WOW_ENABLE_DISABLE_CMDID:
2068 		if (wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id))
2069 			*htc_tag = HTC_TX_PACKET_SYSTEM_RESUME;
2070 		else
2071 			*htc_tag = HTC_TX_PACKET_SYSTEM_SUSPEND;
2072 		break;
2073 	default:
2074 		break;
2075 	}
2076 }
2077 #else
wmi_set_system_pm_pkt_tag(uint16_t * htc_tag,wmi_buf_t buf,uint32_t cmd_id)2078 static inline void wmi_set_system_pm_pkt_tag(uint16_t *htc_tag, wmi_buf_t buf,
2079 					     uint32_t cmd_id)
2080 {
2081 }
2082 #endif
2083 
2084 #ifdef DP_UMAC_HW_RESET_SUPPORT
2085 /**
2086  * wmi_unified_is_max_pending_commands_reached() - API to check if WMI max
2087  * pending commands are reached.
2088  * @wmi_handle: Pointer to WMI handle
2089  *
2090  * Return: If umac reset is in progress and max wmi pending commands are reached
2091  * then return false. The reason is FW will not reap the WMI commands from CE
2092  * ring when umac reset is in progress. Hence, all the pending WMI command to
2093  * host SW ring.
2094  */
2095 static inline bool
wmi_unified_is_max_pending_commands_reached(wmi_unified_t wmi_handle)2096 wmi_unified_is_max_pending_commands_reached(wmi_unified_t wmi_handle)
2097 {
2098 	ol_txrx_soc_handle soc_txrx_handle;
2099 
2100 	soc_txrx_handle = (ol_txrx_soc_handle)wlan_psoc_get_dp_handle(
2101 			wmi_handle->soc->wmi_psoc);
2102 	if (!soc_txrx_handle) {
2103 		wmi_err("psoc handle is NULL");
2104 		return false;
2105 	}
2106 
2107 	return ((qdf_atomic_read(&wmi_handle->pending_cmds) >=
2108 			wmi_handle->wmi_max_cmds) &&
2109 		!cdp_umac_reset_is_inprogress(soc_txrx_handle));
2110 }
2111 #else
2112 static inline bool
wmi_unified_is_max_pending_commands_reached(wmi_unified_t wmi_handle)2113 wmi_unified_is_max_pending_commands_reached(wmi_unified_t wmi_handle)
2114 {
2115 	return (qdf_atomic_read(&wmi_handle->pending_cmds) >=
2116 			wmi_handle->wmi_max_cmds);
2117 }
2118 #endif
2119 
wmi_unified_cmd_send_fl(wmi_unified_t wmi_handle,wmi_buf_t buf,uint32_t len,uint32_t cmd_id,const char * func,uint32_t line)2120 QDF_STATUS wmi_unified_cmd_send_fl(wmi_unified_t wmi_handle, wmi_buf_t buf,
2121 				   uint32_t len, uint32_t cmd_id,
2122 				   const char *func, uint32_t line)
2123 {
2124 	HTC_PACKET *pkt;
2125 	uint16_t htc_tag = 0;
2126 	bool rtpm_inprogress;
2127 
2128 	rtpm_inprogress = wmi_get_runtime_pm_inprogress(wmi_handle);
2129 	if (rtpm_inprogress) {
2130 		htc_tag = wmi_handle->ops->wmi_set_htc_tx_tag(wmi_handle, buf,
2131 							      cmd_id);
2132 	} else if (qdf_atomic_read(&wmi_handle->is_target_suspended) &&
2133 		   !wmi_is_pm_resume_cmd(cmd_id) &&
2134 		   !wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id)) {
2135 			wmi_nofl_err("Target is suspended (via %s:%u)",
2136 					func, line);
2137 		return QDF_STATUS_E_BUSY;
2138 	}
2139 
2140 	if (wmi_handle->wmi_stopinprogress) {
2141 		wmi_nofl_err("%s:%d, WMI stop in progress, wmi_handle:%pK",
2142 			     func, line, wmi_handle);
2143 		return QDF_STATUS_E_INVAL;
2144 	}
2145 
2146 	if (wmi_has_wow_enable_ack_failed(wmi_handle)) {
2147 		wmi_nofl_err("wow enable ack already failed(via %s:%u)",
2148 			     func, line);
2149 		return QDF_STATUS_E_INVAL;
2150 	}
2151 
2152 #ifndef WMI_NON_TLV_SUPPORT
2153 	/* Do sanity check on the TLV parameter structure */
2154 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2155 		void *buf_ptr = (void *)qdf_nbuf_data(buf);
2156 
2157 		if (wmi_handle->ops->wmi_check_command_params(NULL, buf_ptr, len, cmd_id)
2158 			!= 0) {
2159 			wmi_nofl_err("%s:%d, Invalid WMI Param Buffer for Cmd:%d",
2160 				     func, line, cmd_id);
2161 			return QDF_STATUS_E_INVAL;
2162 		}
2163 	}
2164 #endif
2165 
2166 	if (qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) {
2167 		wmi_nofl_err("%s:%d, Failed to send cmd %x, no memory",
2168 			     func, line, cmd_id);
2169 		return QDF_STATUS_E_NOMEM;
2170 	}
2171 
2172 	qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR));
2173 	WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
2174 
2175 	qdf_atomic_inc(&wmi_handle->pending_cmds);
2176 	if (wmi_unified_is_max_pending_commands_reached(wmi_handle)) {
2177 		wmi_dump_last_cmd_rec_info(wmi_handle);
2178 		wmi_nofl_err("hostcredits = %d",
2179 			     wmi_get_host_credits(wmi_handle));
2180 		htc_dump_counter_info(wmi_handle->htc_handle);
2181 		qdf_atomic_dec(&wmi_handle->pending_cmds);
2182 		wmi_nofl_err("%s:%d, MAX %d WMI Pending cmds reached",
2183 			     func, line, wmi_handle->wmi_max_cmds);
2184 		wmi_unified_debug_dump(wmi_handle);
2185 		htc_ce_tasklet_debug_dump(wmi_handle->htc_handle);
2186 		qdf_trigger_self_recovery(wmi_handle->soc->wmi_psoc,
2187 					  QDF_WMI_EXCEED_MAX_PENDING_CMDS);
2188 		return QDF_STATUS_E_BUSY;
2189 	}
2190 
2191 	pkt = qdf_mem_malloc_fl(sizeof(*pkt), func, line);
2192 	if (!pkt) {
2193 		qdf_atomic_dec(&wmi_handle->pending_cmds);
2194 		return QDF_STATUS_E_NOMEM;
2195 	}
2196 
2197 	if (!rtpm_inprogress)
2198 		wmi_set_system_pm_pkt_tag(&htc_tag, buf, cmd_id);
2199 
2200 	SET_HTC_PACKET_INFO_TX(pkt,
2201 			       NULL,
2202 			       qdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR),
2203 			       wmi_handle->wmi_endpoint_id, htc_tag);
2204 
2205 	SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf);
2206 	wmi_log_cmd_id(cmd_id, htc_tag);
2207 	wmi_ext_dbg_msg_cmd_record(wmi_handle,
2208 				   qdf_nbuf_data(buf), qdf_nbuf_len(buf));
2209 #ifdef WMI_INTERFACE_EVENT_LOGGING
2210 	if (wmi_handle->log_info.wmi_logging_enable) {
2211 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2212 		/*
2213 		 * Record 16 bytes of WMI cmd data -
2214 		 * exclude TLV and WMI headers
2215 		 *
2216 		 * WMI mgmt command already recorded in wmi_mgmt_cmd_record
2217 		 */
2218 		if (wmi_handle->ops->is_management_record(cmd_id) == false) {
2219 			uint8_t *tmpbuf = (uint8_t *)qdf_nbuf_data(buf) +
2220 				wmi_handle->soc->buf_offset_command;
2221 
2222 			WMI_COMMAND_RECORD(wmi_handle, cmd_id, tmpbuf);
2223 			wmi_specific_cmd_record(wmi_handle, cmd_id, tmpbuf);
2224 		}
2225 
2226 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2227 	}
2228 #endif
2229 	return wmi_htc_send_pkt(wmi_handle, pkt, func, line);
2230 }
2231 qdf_export_symbol(wmi_unified_cmd_send_fl);
2232 
2233 /**
2234  * wmi_unified_get_event_handler_ix() - gives event handler's index
2235  * @wmi_handle: handle to wmi
2236  * @event_id: wmi  event id
2237  *
2238  * Return: event handler's index
2239  */
wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle,uint32_t event_id)2240 static int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle,
2241 					    uint32_t event_id)
2242 {
2243 	uint32_t idx = 0;
2244 	int32_t invalid_idx = -1;
2245 	struct wmi_soc *soc = wmi_handle->soc;
2246 
2247 	for (idx = 0; (idx < soc->max_event_idx &&
2248 		       idx < WMI_UNIFIED_MAX_EVENT); ++idx) {
2249 		if (wmi_handle->event_id[idx] == event_id &&
2250 		    wmi_handle->event_handler[idx]) {
2251 			return idx;
2252 		}
2253 	}
2254 
2255 	return invalid_idx;
2256 }
2257 
2258 /**
2259  * wmi_register_event_handler_with_ctx() - register event handler with
2260  * exec ctx and buffer type
2261  * @wmi_handle: handle to wmi
2262  * @event_id: wmi event id
2263  * @handler_func: wmi event handler function
2264  * @rx_ctx: rx execution context for wmi rx events
2265  * @rx_buf_type: rx execution context for wmi rx events
2266  *
2267  * Return: QDF_STATUS_SUCCESS on successful register event else failure.
2268  */
2269 static QDF_STATUS
wmi_register_event_handler_with_ctx(wmi_unified_t wmi_handle,uint32_t event_id,wmi_unified_event_handler handler_func,enum wmi_rx_exec_ctx rx_ctx,enum wmi_rx_buff_type rx_buf_type)2270 wmi_register_event_handler_with_ctx(wmi_unified_t wmi_handle,
2271 				    uint32_t event_id,
2272 				    wmi_unified_event_handler handler_func,
2273 				    enum wmi_rx_exec_ctx rx_ctx,
2274 				    enum wmi_rx_buff_type rx_buf_type)
2275 {
2276 	uint32_t idx = 0;
2277 	uint32_t evt_id;
2278 	struct wmi_soc *soc;
2279 
2280 	if (!wmi_handle) {
2281 		wmi_err("WMI handle is NULL");
2282 		return QDF_STATUS_E_FAILURE;
2283 	}
2284 
2285 	soc = wmi_handle->soc;
2286 
2287 	if (event_id >= wmi_events_max) {
2288 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
2289 			  "%s: Event id %d is unavailable",
2290 					__func__, event_id);
2291 		return QDF_STATUS_E_FAILURE;
2292 	}
2293 
2294 	if (wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2295 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG,
2296 			  "%s: Event id %d is not supported",
2297 			  __func__, event_id);
2298 		return QDF_STATUS_E_NOSUPPORT;
2299 	}
2300 	evt_id = wmi_handle->wmi_events[event_id];
2301 
2302 	if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) {
2303 		wmi_info("event handler already registered 0x%x", evt_id);
2304 		return QDF_STATUS_E_FAILURE;
2305 	}
2306 	if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) {
2307 		wmi_err("no more event handlers 0x%x",
2308 			 evt_id);
2309 		return QDF_STATUS_E_FAILURE;
2310 	}
2311 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG,
2312 		  "Registered event handler for event 0x%8x", evt_id);
2313 	idx = soc->max_event_idx;
2314 	wmi_handle->event_handler[idx] = handler_func;
2315 	wmi_handle->event_id[idx] = evt_id;
2316 
2317 	qdf_spin_lock_bh(&soc->ctx_lock);
2318 	wmi_handle->ctx[idx].exec_ctx = rx_ctx;
2319 	wmi_handle->ctx[idx].buff_type = rx_buf_type;
2320 	qdf_spin_unlock_bh(&soc->ctx_lock);
2321 	soc->max_event_idx++;
2322 
2323 	return QDF_STATUS_SUCCESS;
2324 }
2325 
2326 QDF_STATUS
wmi_unified_register_event(wmi_unified_t wmi_handle,uint32_t event_id,wmi_unified_event_handler handler_func)2327 wmi_unified_register_event(wmi_unified_t wmi_handle,
2328 			   uint32_t event_id,
2329 			   wmi_unified_event_handler handler_func)
2330 {
2331 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2332 						   handler_func,
2333 						   WMI_RX_UMAC_CTX,
2334 						   WMI_RX_PROCESSED_BUFF);
2335 }
2336 
2337 QDF_STATUS
wmi_unified_register_event_handler(wmi_unified_t wmi_handle,wmi_conv_event_id event_id,wmi_unified_event_handler handler_func,uint8_t rx_ctx)2338 wmi_unified_register_event_handler(wmi_unified_t wmi_handle,
2339 				   wmi_conv_event_id event_id,
2340 				   wmi_unified_event_handler handler_func,
2341 				   uint8_t rx_ctx)
2342 {
2343 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2344 						   handler_func, rx_ctx,
2345 						   WMI_RX_PROCESSED_BUFF);
2346 }
2347 
2348 qdf_export_symbol(wmi_unified_register_event_handler);
2349 
2350 QDF_STATUS
wmi_unified_register_raw_event_handler(wmi_unified_t wmi_handle,wmi_conv_event_id event_id,wmi_unified_event_handler handler_func,enum wmi_rx_exec_ctx rx_ctx)2351 wmi_unified_register_raw_event_handler(wmi_unified_t wmi_handle,
2352 				       wmi_conv_event_id event_id,
2353 				       wmi_unified_event_handler handler_func,
2354 				       enum wmi_rx_exec_ctx rx_ctx)
2355 {
2356 	return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
2357 						   handler_func, rx_ctx,
2358 						   WMI_RX_RAW_BUFF);
2359 }
2360 
2361 qdf_export_symbol(wmi_unified_register_raw_event_handler);
2362 
wmi_unified_unregister_event(wmi_unified_t wmi_handle,uint32_t event_id)2363 QDF_STATUS wmi_unified_unregister_event(wmi_unified_t wmi_handle,
2364 					uint32_t event_id)
2365 {
2366 	uint32_t idx = 0;
2367 	uint32_t evt_id;
2368 	struct wmi_soc *soc;
2369 
2370 	if (!wmi_handle) {
2371 		wmi_err("WMI handle is NULL");
2372 		return QDF_STATUS_E_FAILURE;
2373 	}
2374 
2375 	soc = wmi_handle->soc;
2376 	if (event_id >= wmi_events_max ||
2377 		wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2378 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
2379 			  "%s: Event id %d is unavailable",
2380 					__func__, event_id);
2381 		return QDF_STATUS_E_FAILURE;
2382 	}
2383 	evt_id = wmi_handle->wmi_events[event_id];
2384 
2385 	idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id);
2386 	if (idx == -1) {
2387 		wmi_warn("event handler is not registered: evt id 0x%x",
2388 			 evt_id);
2389 		return QDF_STATUS_E_FAILURE;
2390 	}
2391 	wmi_handle->event_handler[idx] = NULL;
2392 	wmi_handle->event_id[idx] = 0;
2393 	--soc->max_event_idx;
2394 	wmi_handle->event_handler[idx] =
2395 		wmi_handle->event_handler[soc->max_event_idx];
2396 	wmi_handle->event_id[idx] =
2397 		wmi_handle->event_id[soc->max_event_idx];
2398 
2399 	qdf_spin_lock_bh(&soc->ctx_lock);
2400 
2401 	wmi_handle->ctx[idx].exec_ctx =
2402 		wmi_handle->ctx[soc->max_event_idx].exec_ctx;
2403 	wmi_handle->ctx[idx].buff_type =
2404 		wmi_handle->ctx[soc->max_event_idx].buff_type;
2405 
2406 	qdf_spin_unlock_bh(&soc->ctx_lock);
2407 
2408 	return QDF_STATUS_SUCCESS;
2409 }
2410 
wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle,wmi_conv_event_id event_id)2411 QDF_STATUS wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle,
2412 						wmi_conv_event_id event_id)
2413 {
2414 	uint32_t idx = 0;
2415 	uint32_t evt_id;
2416 	struct wmi_soc *soc;
2417 
2418 	if (!wmi_handle) {
2419 		wmi_err("WMI handle is NULL");
2420 		return QDF_STATUS_E_FAILURE;
2421 	}
2422 
2423 	soc = wmi_handle->soc;
2424 
2425 	if (event_id >= wmi_events_max) {
2426 		wmi_err("Event id %d is unavailable", event_id);
2427 		return QDF_STATUS_E_FAILURE;
2428 	}
2429 
2430 	if (wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
2431 		wmi_debug("Event id %d is not supported", event_id);
2432 		return QDF_STATUS_E_NOSUPPORT;
2433 	}
2434 
2435 	evt_id = wmi_handle->wmi_events[event_id];
2436 
2437 	idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id);
2438 	if (idx == -1) {
2439 		wmi_err("event handler is not registered: evt id 0x%x",
2440 			 evt_id);
2441 		return QDF_STATUS_E_FAILURE;
2442 	}
2443 	wmi_handle->event_handler[idx] = NULL;
2444 	wmi_handle->event_id[idx] = 0;
2445 	--soc->max_event_idx;
2446 	wmi_handle->event_handler[idx] =
2447 		wmi_handle->event_handler[soc->max_event_idx];
2448 	wmi_handle->event_id[idx] =
2449 		wmi_handle->event_id[soc->max_event_idx];
2450 
2451 	qdf_spin_lock_bh(&soc->ctx_lock);
2452 
2453 	wmi_handle->ctx[idx].exec_ctx =
2454 		wmi_handle->ctx[soc->max_event_idx].exec_ctx;
2455 	wmi_handle->ctx[idx].buff_type =
2456 		wmi_handle->ctx[soc->max_event_idx].buff_type;
2457 
2458 	qdf_spin_unlock_bh(&soc->ctx_lock);
2459 
2460 	return QDF_STATUS_SUCCESS;
2461 }
2462 qdf_export_symbol(wmi_unified_unregister_event_handler);
2463 
2464 static void
wmi_process_rx_diag_event_worker_thread_ctx(struct wmi_unified * wmi_handle,void * evt_buf)2465 wmi_process_rx_diag_event_worker_thread_ctx(struct wmi_unified *wmi_handle,
2466 					    void *evt_buf)
2467 {
2468 	uint32_t num_diag_events_pending;
2469 
2470 	qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock);
2471 	if (RX_DIAG_WQ_MAX_SIZE > 0) {
2472 		num_diag_events_pending = qdf_nbuf_queue_len(
2473 						&wmi_handle->diag_event_queue);
2474 
2475 		if (num_diag_events_pending >= RX_DIAG_WQ_MAX_SIZE) {
2476 			qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
2477 			wmi_handle->wmi_rx_diag_events_dropped++;
2478 			wmi_debug_rl("Rx diag events dropped count: %d",
2479 				     wmi_handle->wmi_rx_diag_events_dropped);
2480 			qdf_nbuf_free(evt_buf);
2481 			return;
2482 		}
2483 	}
2484 
2485 	qdf_nbuf_queue_add(&wmi_handle->diag_event_queue, evt_buf);
2486 	qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
2487 	qdf_queue_work(0, wmi_handle->wmi_rx_diag_work_queue,
2488 		       &wmi_handle->rx_diag_event_work);
2489 }
2490 
wmi_process_fw_event_worker_thread_ctx(struct wmi_unified * wmi_handle,void * evt_buf)2491 void wmi_process_fw_event_worker_thread_ctx(struct wmi_unified *wmi_handle,
2492 					    void *evt_buf)
2493 {
2494 
2495 	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
2496 	qdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf);
2497 	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
2498 	qdf_queue_work(0, wmi_handle->wmi_rx_work_queue,
2499 			&wmi_handle->rx_event_work);
2500 
2501 	return;
2502 }
2503 
2504 qdf_export_symbol(wmi_process_fw_event_worker_thread_ctx);
2505 
wmi_critical_events_in_flight(struct wmi_unified * wmi)2506 uint32_t wmi_critical_events_in_flight(struct wmi_unified *wmi)
2507 {
2508 	return qdf_atomic_read(&wmi->critical_events_in_flight);
2509 }
2510 
2511 static bool
wmi_is_event_critical(struct wmi_unified * wmi_handle,uint32_t event_id)2512 wmi_is_event_critical(struct wmi_unified *wmi_handle, uint32_t event_id)
2513 {
2514 	if (wmi_handle->wmi_events[wmi_roam_synch_event_id] == event_id)
2515 		return true;
2516 
2517 	return false;
2518 }
2519 
wmi_discard_fw_event(struct scheduler_msg * msg)2520 static QDF_STATUS wmi_discard_fw_event(struct scheduler_msg *msg)
2521 {
2522 	struct wmi_process_fw_event_params *event_param;
2523 
2524 	if (!msg->bodyptr)
2525 		return QDF_STATUS_E_INVAL;
2526 
2527 	event_param = (struct wmi_process_fw_event_params *)msg->bodyptr;
2528 	qdf_nbuf_free(event_param->evt_buf);
2529 	qdf_mem_free(msg->bodyptr);
2530 	msg->bodyptr = NULL;
2531 	msg->bodyval = 0;
2532 	msg->type = 0;
2533 
2534 	return QDF_STATUS_SUCCESS;
2535 }
2536 
wmi_process_fw_event_handler(struct scheduler_msg * msg)2537 static QDF_STATUS wmi_process_fw_event_handler(struct scheduler_msg *msg)
2538 {
2539 	struct wmi_process_fw_event_params *params =
2540 		(struct wmi_process_fw_event_params *)msg->bodyptr;
2541 	struct wmi_unified *wmi_handle;
2542 	uint32_t event_id;
2543 
2544 	wmi_handle = (struct wmi_unified *)params->wmi_handle;
2545 	event_id = WMI_GET_FIELD(qdf_nbuf_data(params->evt_buf),
2546 				 WMI_CMD_HDR, COMMANDID);
2547 	wmi_process_fw_event(wmi_handle, params->evt_buf);
2548 
2549 	if (wmi_is_event_critical(wmi_handle, event_id))
2550 		qdf_atomic_dec(&wmi_handle->critical_events_in_flight);
2551 
2552 	qdf_mem_free(msg->bodyptr);
2553 
2554 	return QDF_STATUS_SUCCESS;
2555 }
2556 
2557 /**
2558  * wmi_process_fw_event_sched_thread_ctx() - common event handler to serialize
2559  *                                  event processing through scheduler thread
2560  * @wmi: wmi context
2561  * @ev: event buffer
2562  *
2563  * Return: 0 on success, errno on failure
2564  */
2565 static QDF_STATUS
wmi_process_fw_event_sched_thread_ctx(struct wmi_unified * wmi,void * ev)2566 wmi_process_fw_event_sched_thread_ctx(struct wmi_unified *wmi,
2567 				      void *ev)
2568 {
2569 	struct wmi_process_fw_event_params *params_buf;
2570 	struct scheduler_msg msg = { 0 };
2571 	uint32_t event_id;
2572 
2573 	params_buf = qdf_mem_malloc(sizeof(struct wmi_process_fw_event_params));
2574 	if (!params_buf) {
2575 		wmi_err("malloc failed");
2576 		qdf_nbuf_free(ev);
2577 		return QDF_STATUS_E_NOMEM;
2578 	}
2579 
2580 	params_buf->wmi_handle = wmi;
2581 	params_buf->evt_buf = ev;
2582 
2583 	event_id = WMI_GET_FIELD(qdf_nbuf_data(params_buf->evt_buf),
2584 				 WMI_CMD_HDR, COMMANDID);
2585 	if (wmi_is_event_critical(wmi, event_id))
2586 		qdf_atomic_inc(&wmi->critical_events_in_flight);
2587 
2588 	msg.bodyptr = params_buf;
2589 	msg.bodyval = 0;
2590 	msg.callback = wmi_process_fw_event_handler;
2591 	msg.flush_callback = wmi_discard_fw_event;
2592 
2593 	if (QDF_STATUS_SUCCESS !=
2594 		scheduler_post_message(QDF_MODULE_ID_TARGET_IF,
2595 				       QDF_MODULE_ID_TARGET_IF,
2596 				       QDF_MODULE_ID_TARGET_IF, &msg)) {
2597 		qdf_nbuf_free(ev);
2598 		qdf_mem_free(params_buf);
2599 		return QDF_STATUS_E_FAULT;
2600 	}
2601 
2602 	return QDF_STATUS_SUCCESS;
2603 }
2604 
2605 /**
2606  * wmi_get_pdev_ep: Get wmi handle based on endpoint
2607  * @soc: handle to wmi soc
2608  * @ep: endpoint id
2609  *
2610  * Return: none
2611  */
wmi_get_pdev_ep(struct wmi_soc * soc,HTC_ENDPOINT_ID ep)2612 static struct wmi_unified *wmi_get_pdev_ep(struct wmi_soc *soc,
2613 						HTC_ENDPOINT_ID ep)
2614 {
2615 	uint32_t i;
2616 
2617 	for (i = 0; i < WMI_MAX_RADIOS; i++)
2618 		if (soc->wmi_endpoint_id[i] == ep)
2619 			break;
2620 
2621 	if (i == WMI_MAX_RADIOS)
2622 		return NULL;
2623 
2624 	return soc->wmi_pdev[i];
2625 }
2626 
2627 /**
2628  * wmi_mtrace_rx() - Wrappper function for qdf_mtrace api
2629  * @message_id: 32-Bit Wmi message ID
2630  * @vdev_id: Vdev ID
2631  * @data: Actual message contents
2632  *
2633  * This function converts the 32-bit WMI message ID in 15-bit message ID
2634  * format for qdf_mtrace as in qdf_mtrace message there are only 15
2635  * bits reserved for message ID.
2636  * out of these 15-bits, 8-bits (From LSB) specifies the WMI_GRP_ID
2637  * and remaining 7-bits specifies the actual WMI command. With this
2638  * notation there can be maximum 256 groups and each group can have
2639  * max 128 commands can be supported.
2640  *
2641  * Return: None
2642  */
wmi_mtrace_rx(uint32_t message_id,uint16_t vdev_id,uint32_t data)2643 static void wmi_mtrace_rx(uint32_t message_id, uint16_t vdev_id, uint32_t data)
2644 {
2645 	uint16_t mtrace_message_id;
2646 
2647 	mtrace_message_id = QDF_WMI_MTRACE_CMD_ID(message_id) |
2648 		(QDF_WMI_MTRACE_GRP_ID(message_id) <<
2649 						QDF_WMI_MTRACE_CMD_NUM_BITS);
2650 	qdf_mtrace(QDF_MODULE_ID_WMI, QDF_MODULE_ID_WMA,
2651 		   mtrace_message_id, vdev_id, data);
2652 }
2653 
2654 #ifdef WLAN_FEATURE_CE_RX_BUFFER_REUSE
wmi_rx_nbuf_free(qdf_nbuf_t nbuf)2655 static void wmi_rx_nbuf_free(qdf_nbuf_t nbuf)
2656 {
2657 	nbuf = wbuff_buff_put(nbuf);
2658 	if (nbuf)
2659 		qdf_nbuf_free(nbuf);
2660 }
2661 #else
wmi_rx_nbuf_free(qdf_nbuf_t nbuf)2662 static inline void wmi_rx_nbuf_free(qdf_nbuf_t nbuf)
2663 {
2664 	return qdf_nbuf_free(nbuf);
2665 }
2666 #endif
2667 
2668 /**
2669  * wmi_process_control_rx() - process fw events callbacks
2670  * @wmi_handle: handle to wmi_unified
2671  * @evt_buf: handle to wmi_buf_t
2672  *
2673  * Return: none
2674  */
wmi_process_control_rx(struct wmi_unified * wmi_handle,wmi_buf_t evt_buf)2675 static void wmi_process_control_rx(struct wmi_unified *wmi_handle,
2676 				   wmi_buf_t evt_buf)
2677 {
2678 	struct wmi_soc *soc = wmi_handle->soc;
2679 	uint32_t id;
2680 	uint32_t idx;
2681 	enum wmi_rx_exec_ctx exec_ctx;
2682 
2683 	id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2684 	idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
2685 	if (qdf_unlikely(idx == A_ERROR)) {
2686 		wmi_debug("no handler registered for event id 0x%x", id);
2687 		wmi_rx_nbuf_free(evt_buf);
2688 		return;
2689 	}
2690 	wmi_mtrace_rx(id, 0xFF, idx);
2691 	qdf_spin_lock_bh(&soc->ctx_lock);
2692 	exec_ctx = wmi_handle->ctx[idx].exec_ctx;
2693 	qdf_spin_unlock_bh(&soc->ctx_lock);
2694 
2695 #ifdef WMI_INTERFACE_EVENT_LOGGING
2696 	if (wmi_handle->log_info.wmi_logging_enable) {
2697 		uint8_t *data;
2698 		data = qdf_nbuf_data(evt_buf);
2699 
2700 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2701 		/* Exclude 4 bytes of TLV header */
2702 		if (wmi_handle->ops->is_diag_event(id)) {
2703 			WMI_DIAG_RX_EVENT_RECORD(wmi_handle, id,
2704 				((uint8_t *) data +
2705 				wmi_handle->soc->buf_offset_event));
2706 		} else if (wmi_handle->ops->is_management_record(id)) {
2707 			WMI_MGMT_RX_EVENT_RECORD(wmi_handle, id,
2708 				((uint8_t *) data +
2709 				wmi_handle->soc->buf_offset_event));
2710 		} else {
2711 			WMI_RX_EVENT_RECORD(wmi_handle, id, ((uint8_t *) data +
2712 				wmi_handle->soc->buf_offset_event));
2713 		}
2714 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2715 	}
2716 #endif
2717 
2718 	if (exec_ctx == WMI_RX_WORK_CTX) {
2719 		wmi_process_fw_event_worker_thread_ctx(wmi_handle, evt_buf);
2720 	} else if (exec_ctx == WMI_RX_TASKLET_CTX) {
2721 		wmi_process_fw_event(wmi_handle, evt_buf);
2722 	} else if (exec_ctx == WMI_RX_SERIALIZER_CTX) {
2723 		wmi_process_fw_event_sched_thread_ctx(wmi_handle, evt_buf);
2724 	} else if (exec_ctx == WMI_RX_DIAG_WORK_CTX) {
2725 		wmi_process_rx_diag_event_worker_thread_ctx(wmi_handle,
2726 							    evt_buf);
2727 	} else {
2728 		wmi_err("Invalid event context %d", exec_ctx);
2729 		wmi_rx_nbuf_free(evt_buf);
2730 	}
2731 
2732 }
2733 
2734 /**
2735  * wmi_control_rx() - process fw events callbacks
2736  * @ctx: handle to wmi
2737  * @htc_packet: pointer to htc packet
2738  *
2739  * Return: none
2740  */
wmi_control_rx(void * ctx,HTC_PACKET * htc_packet)2741 static void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet)
2742 {
2743 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2744 	struct wmi_unified *wmi_handle;
2745 	wmi_buf_t evt_buf;
2746 
2747 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2748 
2749 	wmi_handle = wmi_get_pdev_ep(soc, htc_packet->Endpoint);
2750 	if (!wmi_handle) {
2751 		wmi_err("unable to get wmi_handle to Endpoint %d",
2752 			htc_packet->Endpoint);
2753 		wmi_rx_nbuf_free(evt_buf);
2754 		return;
2755 	}
2756 
2757 	wmi_process_control_rx(wmi_handle, evt_buf);
2758 }
2759 
2760 #if defined(WLAN_FEATURE_WMI_DIAG_OVER_CE7) || \
2761 	defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
2762 /**
2763  * wmi_control_diag_rx() - process diag fw events callbacks
2764  * @ctx: handle to wmi
2765  * @htc_packet: pointer to htc packet
2766  *
2767  * Return: none
2768  */
wmi_control_diag_rx(void * ctx,HTC_PACKET * htc_packet)2769 static void wmi_control_diag_rx(void *ctx, HTC_PACKET *htc_packet)
2770 {
2771 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2772 	struct wmi_unified *wmi_handle;
2773 	wmi_buf_t evt_buf;
2774 
2775 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2776 
2777 	wmi_handle = soc->wmi_pdev[0];
2778 
2779 	if (!wmi_handle) {
2780 		wmi_err("unable to get wmi_handle for diag event end point id:%d", htc_packet->Endpoint);
2781 		wmi_rx_nbuf_free(evt_buf);
2782 		return;
2783 	}
2784 
2785 	wmi_process_control_rx(wmi_handle, evt_buf);
2786 }
2787 #endif
2788 
2789 #if defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
2790 /**
2791  * wmi_control_dbr_rx() - process dbr fw events callbacks
2792  * @ctx: handle to wmi
2793  * @htc_packet: pointer to htc packet
2794  *
2795  * Return: none
2796  */
wmi_control_dbr_rx(void * ctx,HTC_PACKET * htc_packet)2797 static void wmi_control_dbr_rx(void *ctx, HTC_PACKET *htc_packet)
2798 {
2799 	struct wmi_soc *soc = (struct wmi_soc *)ctx;
2800 	struct wmi_unified *wmi_handle;
2801 	wmi_buf_t evt_buf;
2802 
2803 	evt_buf = (wmi_buf_t)htc_packet->pPktContext;
2804 	wmi_handle = soc->wmi_pdev[0];
2805 
2806 	if (!wmi_handle) {
2807 		wmi_err("unable to get wmi_handle for dbr event endpoint id:%d",
2808 			htc_packet->Endpoint);
2809 		wmi_rx_nbuf_free(evt_buf);
2810 		return;
2811 	}
2812 
2813 	wmi_process_control_rx(wmi_handle, evt_buf);
2814 }
2815 #endif
2816 
2817 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI
wmi_unified_cmd_send_over_qmi(struct wmi_unified * wmi_handle,wmi_buf_t buf,uint32_t buflen,uint32_t cmd_id)2818 QDF_STATUS wmi_unified_cmd_send_over_qmi(struct wmi_unified *wmi_handle,
2819 					 wmi_buf_t buf, uint32_t buflen,
2820 					 uint32_t cmd_id)
2821 {
2822 	QDF_STATUS status;
2823 	int32_t ret;
2824 
2825 	if (!qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR))) {
2826 		wmi_err("Failed to send cmd %x, no memory", cmd_id);
2827 		return QDF_STATUS_E_NOMEM;
2828 	}
2829 
2830 	qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR));
2831 	WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
2832 	wmi_debug("Sending WMI_CMD_ID: 0x%x over qmi", cmd_id);
2833 	status = qdf_wmi_send_recv_qmi(qdf_nbuf_data(buf),
2834 				       buflen + sizeof(WMI_CMD_HDR),
2835 				       wmi_handle,
2836 				       wmi_process_qmi_fw_event);
2837 	if (QDF_IS_STATUS_ERROR(status)) {
2838 		qdf_nbuf_pull_head(buf, sizeof(WMI_CMD_HDR));
2839 		wmi_warn("WMI send on QMI failed. Retrying WMI on HTC");
2840 	} else {
2841 		ret = qdf_atomic_inc_return(&wmi_handle->num_stats_over_qmi);
2842 		wmi_debug("num stats over qmi: %d", ret);
2843 		wmi_buf_free(buf);
2844 	}
2845 
2846 	return status;
2847 }
2848 
__wmi_process_qmi_fw_event(void * wmi_cb_ctx,void * buf,int len)2849 static int __wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len)
2850 {
2851 	struct wmi_unified *wmi_handle = wmi_cb_ctx;
2852 	wmi_buf_t evt_buf;
2853 	uint32_t evt_id;
2854 
2855 	if (!wmi_handle || !buf || !len) {
2856 		wmi_err_rl("%s is invalid", !wmi_handle ?
2857 				"wmi_buf" : !buf ? "buf" : "length");
2858 		return -EINVAL;
2859 	}
2860 
2861 	evt_buf = wmi_buf_alloc(wmi_handle, len);
2862 	if (!evt_buf)
2863 		return -ENOMEM;
2864 
2865 	qdf_mem_copy(qdf_nbuf_data(evt_buf), buf, len);
2866 	evt_id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2867 	wmi_debug("Received WMI_EVT_ID: 0x%x over qmi", evt_id);
2868 	wmi_process_control_rx(wmi_handle, evt_buf);
2869 
2870 	return 0;
2871 }
2872 
wmi_process_qmi_fw_event(void * wmi_cb_ctx,void * buf,int len)2873 int wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len)
2874 {
2875 	struct qdf_op_sync *op_sync;
2876 	int ret;
2877 
2878 	if (qdf_op_protect(&op_sync))
2879 		return -EINVAL;
2880 	ret = __wmi_process_qmi_fw_event(wmi_cb_ctx, buf, len);
2881 	qdf_op_unprotect(op_sync);
2882 
2883 	return ret;
2884 }
2885 #endif
2886 
wmi_process_fw_event(struct wmi_unified * wmi_handle,wmi_buf_t evt_buf)2887 void wmi_process_fw_event(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
2888 {
2889 	__wmi_control_rx(wmi_handle, evt_buf);
2890 }
2891 
__wmi_control_rx(struct wmi_unified * wmi_handle,wmi_buf_t evt_buf)2892 void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
2893 {
2894 	uint32_t id;
2895 	uint8_t *data;
2896 	uint32_t len;
2897 	void *wmi_cmd_struct_ptr = NULL;
2898 #ifndef WMI_NON_TLV_SUPPORT
2899 	int tlv_ok_status = 0;
2900 #endif
2901 	uint32_t idx = 0;
2902 	struct wmi_raw_event_buffer ev_buf;
2903 	enum wmi_rx_buff_type ev_buff_type;
2904 
2905 	id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
2906 
2907 	wmi_ext_dbg_msg_event_record(wmi_handle, qdf_nbuf_data(evt_buf),
2908 				     qdf_nbuf_len(evt_buf));
2909 
2910 	if (qdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL)
2911 		goto end;
2912 
2913 	data = qdf_nbuf_data(evt_buf);
2914 	len = qdf_nbuf_len(evt_buf);
2915 
2916 #ifndef WMI_NON_TLV_SUPPORT
2917 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2918 		/* Validate and pad(if necessary) the TLVs */
2919 		tlv_ok_status =
2920 			wmi_handle->ops->wmi_check_and_pad_event(wmi_handle->scn_handle,
2921 							data, len, id,
2922 							&wmi_cmd_struct_ptr);
2923 		if (tlv_ok_status != 0) {
2924 			QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR,
2925 				  "%s: Error: id=0x%x, wmitlv check status=%d",
2926 				  __func__, id, tlv_ok_status);
2927 			goto end;
2928 		}
2929 	}
2930 #endif
2931 
2932 	idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
2933 	if (idx == A_ERROR) {
2934 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR,
2935 		   "%s : event handler is not registered: event id 0x%x",
2936 			__func__, id);
2937 		goto end;
2938 	}
2939 #ifdef WMI_INTERFACE_EVENT_LOGGING
2940 	if (wmi_handle->log_info.wmi_logging_enable) {
2941 		qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
2942 		/* Exclude 4 bytes of TLV header */
2943 		if (wmi_handle->ops->is_diag_event(id)) {
2944 			/*
2945 			 * skip diag event logging in WMI event buffer
2946 			 * as its already logged in WMI RX event buffer
2947 			 */
2948 		} else if (wmi_handle->ops->is_management_record(id)) {
2949 			/*
2950 			 * skip wmi mgmt event logging in WMI event buffer
2951 			 * as its already logged in WMI RX event buffer
2952 			 */
2953 		} else {
2954 			uint8_t *tmpbuf = (uint8_t *)data +
2955 					wmi_handle->soc->buf_offset_event;
2956 
2957 			WMI_EVENT_RECORD(wmi_handle, id, tmpbuf);
2958 			wmi_specific_evt_record(wmi_handle, id, tmpbuf);
2959 		}
2960 		qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
2961 	}
2962 #endif
2963 	/* Call the WMI registered event handler */
2964 	if (wmi_handle->target_type == WMI_TLV_TARGET) {
2965 		ev_buff_type = wmi_handle->ctx[idx].buff_type;
2966 		if (ev_buff_type == WMI_RX_PROCESSED_BUFF) {
2967 			if (qdf_likely(wmi_handle->event_handler[idx]))
2968 				wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2969 								wmi_cmd_struct_ptr, len);
2970 		} else if (ev_buff_type == WMI_RX_RAW_BUFF) {
2971 			ev_buf.evt_raw_buf = data;
2972 			ev_buf.evt_processed_buf = wmi_cmd_struct_ptr;
2973 			wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2974 							(void *)&ev_buf, len);
2975 		}
2976 	}
2977 	else
2978 		wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
2979 			data, len);
2980 
2981 end:
2982 	/* Free event buffer and allocated event tlv */
2983 #ifndef WMI_NON_TLV_SUPPORT
2984 	if (wmi_handle->target_type == WMI_TLV_TARGET)
2985 		wmi_handle->ops->wmi_free_allocated_event(id, &wmi_cmd_struct_ptr);
2986 #endif
2987 
2988 	wmi_rx_nbuf_free(evt_buf);
2989 
2990 }
2991 
2992 #define WMI_WQ_WD_TIMEOUT (30 * 1000) /* 30s */
2993 
wmi_workqueue_watchdog_warn(uint32_t msg_type_id)2994 static inline void wmi_workqueue_watchdog_warn(uint32_t msg_type_id)
2995 {
2996 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2997 		  "%s: WLAN_BUG_RCA: Message type %x has exceeded its allotted time of %ds",
2998 		  __func__, msg_type_id, WMI_WQ_WD_TIMEOUT / 1000);
2999 }
3000 
3001 #ifdef CONFIG_SLUB_DEBUG_ON
wmi_workqueue_watchdog_bite(void * arg)3002 static void wmi_workqueue_watchdog_bite(void *arg)
3003 {
3004 	struct wmi_wq_dbg_info *info = arg;
3005 
3006 	wmi_workqueue_watchdog_warn(info->wd_msg_type_id);
3007 	qdf_print_thread_trace(info->task);
3008 
3009 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
3010 		  "%s: Going down for WMI WQ Watchdog Bite!", __func__);
3011 	QDF_BUG(0);
3012 }
3013 #else
wmi_workqueue_watchdog_bite(void * arg)3014 static inline void wmi_workqueue_watchdog_bite(void *arg)
3015 {
3016 	struct wmi_wq_dbg_info *info = arg;
3017 
3018 	wmi_workqueue_watchdog_warn(info->wd_msg_type_id);
3019 
3020 	qdf_print_thread_trace(info->task);
3021 }
3022 #endif
3023 
3024 /**
3025  * wmi_rx_event_work() - process rx event in rx work queue context
3026  * @arg: opaque pointer to wmi handle
3027  *
3028  * This function process any fw event to serialize it through rx worker thread.
3029  *
3030  * Return: none
3031  */
wmi_rx_event_work(void * arg)3032 static void wmi_rx_event_work(void *arg)
3033 {
3034 	wmi_buf_t buf;
3035 	struct wmi_unified *wmi = arg;
3036 	qdf_timer_t wd_timer;
3037 	struct wmi_wq_dbg_info info;
3038 
3039 	/* initialize WMI workqueue watchdog timer */
3040 	qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite,
3041 			&info, QDF_TIMER_TYPE_SW);
3042 	qdf_spin_lock_bh(&wmi->eventq_lock);
3043 	buf = qdf_nbuf_queue_remove(&wmi->event_queue);
3044 	qdf_spin_unlock_bh(&wmi->eventq_lock);
3045 	while (buf) {
3046 		qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT);
3047 		info.wd_msg_type_id =
3048 		   WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID);
3049 		info.wmi_wq = wmi->wmi_rx_work_queue;
3050 		info.task = qdf_get_current_task();
3051 		__wmi_control_rx(wmi, buf);
3052 		qdf_timer_stop(&wd_timer);
3053 		qdf_spin_lock_bh(&wmi->eventq_lock);
3054 		buf = qdf_nbuf_queue_remove(&wmi->event_queue);
3055 		qdf_spin_unlock_bh(&wmi->eventq_lock);
3056 	}
3057 	qdf_timer_free(&wd_timer);
3058 }
3059 
3060 /**
3061  * wmi_rx_diag_event_work() - process rx diag event in work queue context
3062  * @arg: opaque pointer to wmi handle
3063  *
3064  * This function process fw diag event to serialize it through rx worker thread.
3065  *
3066  * Return: none
3067  */
wmi_rx_diag_event_work(void * arg)3068 static void wmi_rx_diag_event_work(void *arg)
3069 {
3070 	wmi_buf_t buf;
3071 	struct wmi_unified *wmi = arg;
3072 	qdf_timer_t wd_timer;
3073 	struct wmi_wq_dbg_info info;
3074 	uint32_t diag_event_process_count = 0;
3075 
3076 	if (!wmi) {
3077 		wmi_err("Invalid WMI handle");
3078 		return;
3079 	}
3080 
3081 	/* initialize WMI workqueue watchdog timer */
3082 	qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite,
3083 		       &info, QDF_TIMER_TYPE_SW);
3084 	qdf_spin_lock_bh(&wmi->diag_eventq_lock);
3085 	buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue);
3086 	qdf_spin_unlock_bh(&wmi->diag_eventq_lock);
3087 	while (buf) {
3088 		qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT);
3089 		info.wd_msg_type_id =
3090 		   WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID);
3091 		info.wmi_wq = NULL;
3092 		info.task = qdf_get_current_task();
3093 		__wmi_control_rx(wmi, buf);
3094 		qdf_timer_stop(&wd_timer);
3095 
3096 		if (diag_event_process_count++ >
3097 		    RX_DIAG_EVENT_WORK_PROCESS_MAX_COUNT) {
3098 			qdf_queue_work(0, wmi->wmi_rx_diag_work_queue,
3099 				       &wmi->rx_diag_event_work);
3100 			break;
3101 		}
3102 
3103 		qdf_spin_lock_bh(&wmi->diag_eventq_lock);
3104 		buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue);
3105 		qdf_spin_unlock_bh(&wmi->diag_eventq_lock);
3106 	}
3107 	qdf_timer_free(&wd_timer);
3108 }
3109 
3110 #ifdef FEATURE_RUNTIME_PM
3111 /**
3112  * wmi_runtime_pm_init() - initialize runtime pm wmi variables
3113  * @wmi_handle: wmi context
3114  */
wmi_runtime_pm_init(struct wmi_unified * wmi_handle)3115 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
3116 {
3117 	qdf_atomic_init(&wmi_handle->runtime_pm_inprogress);
3118 }
3119 
wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle,A_BOOL val)3120 void wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, A_BOOL val)
3121 {
3122 	qdf_atomic_set(&wmi_handle->runtime_pm_inprogress, val);
3123 }
3124 
wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle)3125 bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle)
3126 {
3127 	return qdf_atomic_read(&wmi_handle->runtime_pm_inprogress);
3128 }
3129 #else
wmi_runtime_pm_init(struct wmi_unified * wmi_handle)3130 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
3131 {
3132 }
3133 #endif
3134 
wmi_set_wow_enable_ack_failed(wmi_unified_t wmi_handle)3135 void wmi_set_wow_enable_ack_failed(wmi_unified_t wmi_handle)
3136 {
3137 	qdf_atomic_set(&wmi_handle->is_wow_enable_ack_failed, 1);
3138 }
3139 
wmi_clear_wow_enable_ack_failed(wmi_unified_t wmi_handle)3140 void wmi_clear_wow_enable_ack_failed(wmi_unified_t wmi_handle)
3141 {
3142 	qdf_atomic_set(&wmi_handle->is_wow_enable_ack_failed, 0);
3143 }
3144 
wmi_has_wow_enable_ack_failed(wmi_unified_t wmi_handle)3145 bool wmi_has_wow_enable_ack_failed(wmi_unified_t wmi_handle)
3146 {
3147 	return qdf_atomic_read(&wmi_handle->is_wow_enable_ack_failed);
3148 }
3149 
wmi_unified_get_soc_handle(struct wmi_unified * wmi_handle)3150 void *wmi_unified_get_soc_handle(struct wmi_unified *wmi_handle)
3151 {
3152 	return wmi_handle->soc;
3153 }
3154 
3155 /**
3156  * wmi_interface_logging_init: Interface looging init
3157  * @wmi_handle: Pointer to wmi handle object
3158  * @pdev_idx: pdev index
3159  *
3160  * Return: None
3161  */
3162 #ifdef WMI_INTERFACE_EVENT_LOGGING
wmi_interface_logging_init(struct wmi_unified * wmi_handle,uint32_t pdev_idx)3163 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle,
3164 					      uint32_t pdev_idx)
3165 {
3166 	if (QDF_STATUS_SUCCESS == wmi_log_init(wmi_handle)) {
3167 		qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
3168 		wmi_debugfs_init(wmi_handle, pdev_idx);
3169 	}
3170 }
3171 #else
wmi_interface_logging_init(struct wmi_unified * wmi_handle,uint32_t pdev_idx)3172 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle,
3173 					      uint32_t pdev_idx)
3174 {
3175 }
3176 #endif
3177 
wmi_initialize_worker_context(struct wmi_unified * wmi_handle)3178 static QDF_STATUS wmi_initialize_worker_context(struct wmi_unified *wmi_handle)
3179 {
3180 	wmi_handle->wmi_rx_work_queue =
3181 		qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue");
3182 	if (!wmi_handle->wmi_rx_work_queue) {
3183 		wmi_err("failed to create wmi_rx_event_work_queue");
3184 		return QDF_STATUS_E_RESOURCES;
3185 	}
3186 
3187 	qdf_spinlock_create(&wmi_handle->eventq_lock);
3188 	qdf_nbuf_queue_init(&wmi_handle->event_queue);
3189 	qdf_create_work(0, &wmi_handle->rx_event_work,
3190 			wmi_rx_event_work, wmi_handle);
3191 
3192 	wmi_handle->wmi_rx_diag_work_queue =
3193 		qdf_alloc_unbound_workqueue("wmi_rx_diag_event_work_queue");
3194 	if (!wmi_handle->wmi_rx_diag_work_queue) {
3195 		wmi_err("failed to create wmi_rx_diag_event_work_queue");
3196 		return QDF_STATUS_E_RESOURCES;
3197 	}
3198 	qdf_spinlock_create(&wmi_handle->diag_eventq_lock);
3199 	qdf_nbuf_queue_init(&wmi_handle->diag_event_queue);
3200 	qdf_create_work(0, &wmi_handle->rx_diag_event_work,
3201 			wmi_rx_diag_event_work, wmi_handle);
3202 	wmi_handle->wmi_rx_diag_events_dropped = 0;
3203 
3204 	return QDF_STATUS_SUCCESS;
3205 }
3206 
wmi_unified_get_pdev_handle(struct wmi_soc * soc,uint32_t pdev_idx)3207 void *wmi_unified_get_pdev_handle(struct wmi_soc *soc, uint32_t pdev_idx)
3208 {
3209 	struct wmi_unified *wmi_handle;
3210 	QDF_STATUS status;
3211 
3212 	if (pdev_idx >= WMI_MAX_RADIOS)
3213 		return NULL;
3214 
3215 	if (!soc->wmi_pdev[pdev_idx]) {
3216 		wmi_handle =
3217 			(struct wmi_unified *) qdf_mem_malloc(
3218 					sizeof(struct wmi_unified));
3219 		if (!wmi_handle)
3220 			return NULL;
3221 
3222 		status = wmi_initialize_worker_context(wmi_handle);
3223 		if (QDF_IS_STATUS_ERROR(status))
3224 			goto error;
3225 
3226 		wmi_handle->scn_handle = soc->scn_handle;
3227 		wmi_handle->event_id = soc->event_id;
3228 		wmi_handle->event_handler = soc->event_handler;
3229 		wmi_handle->ctx = soc->ctx;
3230 		wmi_handle->ops = soc->ops;
3231 		wmi_handle->wmi_events = soc->wmi_events;
3232 		wmi_handle->services = soc->services;
3233 		wmi_handle->soc = soc;
3234 		wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map;
3235 		wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map;
3236 		wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map;
3237 		wmi_handle->evt_phy_id_map = soc->evt_phy_id_map;
3238 		wmi_interface_logging_init(wmi_handle, pdev_idx);
3239 		qdf_atomic_init(&wmi_handle->is_target_suspended);
3240 		qdf_atomic_init(&wmi_handle->is_wow_enable_ack_failed);
3241 		wmi_handle->target_type = soc->target_type;
3242 		wmi_handle->wmi_max_cmds = soc->wmi_max_cmds;
3243 
3244 		wmi_interface_sequence_init(wmi_handle);
3245 		if (wmi_ext_dbgfs_init(wmi_handle, pdev_idx) !=
3246 		    QDF_STATUS_SUCCESS)
3247 			wmi_err("Failed to initialize wmi extended debugfs");
3248 
3249 		soc->wmi_pdev[pdev_idx] = wmi_handle;
3250 	} else
3251 		wmi_handle = soc->wmi_pdev[pdev_idx];
3252 
3253 	qdf_atomic_init(&wmi_handle->pending_cmds);
3254 	wmi_handle->wmi_stopinprogress = 0;
3255 	wmi_handle->wmi_endpoint_id = soc->wmi_endpoint_id[pdev_idx];
3256 	wmi_handle->htc_handle = soc->htc_handle;
3257 	wmi_handle->max_msg_len = soc->max_msg_len[pdev_idx];
3258 	wmi_handle->tag_crash_inject = false;
3259 	wmi_interface_sequence_reset(wmi_handle);
3260 
3261 	return wmi_handle;
3262 
3263 error:
3264 	qdf_mem_free(wmi_handle);
3265 
3266 	return NULL;
3267 }
3268 qdf_export_symbol(wmi_unified_get_pdev_handle);
3269 
3270 static void (*wmi_attach_register[WMI_MAX_TARGET_TYPE])(wmi_unified_t);
3271 
wmi_unified_register_module(enum wmi_target_type target_type,void (* wmi_attach)(wmi_unified_t wmi_handle))3272 void wmi_unified_register_module(enum wmi_target_type target_type,
3273 			void (*wmi_attach)(wmi_unified_t wmi_handle))
3274 {
3275 	if (target_type < WMI_MAX_TARGET_TYPE)
3276 		wmi_attach_register[target_type] = wmi_attach;
3277 
3278 	return;
3279 }
3280 qdf_export_symbol(wmi_unified_register_module);
3281 
3282 /**
3283  * wmi_wbuff_register() - register wmi with wbuff
3284  * @wmi_handle: handle to wmi
3285  *
3286  * Return: void
3287  */
wmi_wbuff_register(struct wmi_unified * wmi_handle)3288 static void wmi_wbuff_register(struct wmi_unified *wmi_handle)
3289 {
3290 	struct wbuff_alloc_request wbuff_alloc[4];
3291 	uint8_t reserve = WMI_MIN_HEAD_ROOM;
3292 
3293 	wbuff_alloc[0].pool_id = 0;
3294 	wbuff_alloc[0].pool_size = WMI_WBUFF_POOL_0_SIZE;
3295 	wbuff_alloc[0].buffer_size = roundup(WMI_WBUFF_LEN_POOL0 + reserve, 4);
3296 
3297 	wbuff_alloc[1].pool_id = 1;
3298 	wbuff_alloc[1].pool_size = WMI_WBUFF_POOL_1_SIZE;
3299 	wbuff_alloc[1].buffer_size = roundup(WMI_WBUFF_LEN_POOL1 + reserve, 4);
3300 
3301 	wbuff_alloc[2].pool_id = 2;
3302 	wbuff_alloc[2].pool_size = WMI_WBUFF_POOL_2_SIZE;
3303 	wbuff_alloc[2].buffer_size = roundup(WMI_WBUFF_LEN_POOL2 + reserve, 4);
3304 
3305 	wbuff_alloc[3].pool_id = 3;
3306 	wbuff_alloc[3].pool_size = WMI_WBUFF_POOL_3_SIZE;
3307 	wbuff_alloc[3].buffer_size = roundup(WMI_WBUFF_LEN_POOL3 + reserve, 4);
3308 
3309 	wmi_handle->wbuff_handle =
3310 		wbuff_module_register(wbuff_alloc, QDF_ARRAY_SIZE(wbuff_alloc),
3311 				      reserve, 4, WBUFF_MODULE_WMI_TX);
3312 }
3313 
3314 /**
3315  * wmi_wbuff_deregister() - deregister wmi with wbuff
3316  * @wmi_handle: handle to wmi
3317  *
3318  * Return: void
3319  */
wmi_wbuff_deregister(struct wmi_unified * wmi_handle)3320 static inline void wmi_wbuff_deregister(struct wmi_unified *wmi_handle)
3321 {
3322 	wbuff_module_deregister(wmi_handle->wbuff_handle);
3323 	wmi_handle->wbuff_handle = NULL;
3324 }
3325 
wmi_unified_attach(void * scn_handle,struct wmi_unified_attach_params * param)3326 void *wmi_unified_attach(void *scn_handle,
3327 			 struct wmi_unified_attach_params *param)
3328 {
3329 	struct wmi_unified *wmi_handle;
3330 	struct wmi_soc *soc;
3331 	QDF_STATUS status;
3332 
3333 	soc = (struct wmi_soc *) qdf_mem_malloc(sizeof(struct wmi_soc));
3334 	if (!soc)
3335 		return NULL;
3336 
3337 	wmi_handle =
3338 		(struct wmi_unified *) qdf_mem_malloc(
3339 			sizeof(struct wmi_unified));
3340 	if (!wmi_handle) {
3341 		qdf_mem_free(soc);
3342 		return NULL;
3343 	}
3344 
3345 	status = wmi_initialize_worker_context(wmi_handle);
3346 	if (QDF_IS_STATUS_ERROR(status))
3347 		goto error;
3348 
3349 	wmi_handle->soc = soc;
3350 	wmi_handle->soc->soc_idx = param->soc_id;
3351 	wmi_handle->soc->is_async_ep = param->is_async_ep;
3352 	wmi_handle->event_id = soc->event_id;
3353 	wmi_handle->event_handler = soc->event_handler;
3354 	wmi_handle->ctx = soc->ctx;
3355 	wmi_handle->wmi_events = soc->wmi_events;
3356 	wmi_handle->services = soc->services;
3357 	wmi_handle->scn_handle = scn_handle;
3358 	wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map;
3359 	wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map;
3360 	wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map;
3361 	wmi_handle->evt_phy_id_map = soc->evt_phy_id_map;
3362 	soc->scn_handle = scn_handle;
3363 	wmi_handle->target_type = param->target_type;
3364 	soc->target_type = param->target_type;
3365 
3366 	if (param->target_type >= WMI_MAX_TARGET_TYPE)
3367 		goto error;
3368 
3369 	if (wmi_attach_register[param->target_type]) {
3370 		wmi_attach_register[param->target_type](wmi_handle);
3371 	} else {
3372 		wmi_err("wmi attach is not registered");
3373 		goto error;
3374 	}
3375 
3376 	qdf_atomic_init(&wmi_handle->pending_cmds);
3377 	qdf_atomic_init(&wmi_handle->is_target_suspended);
3378 	qdf_atomic_init(&wmi_handle->is_target_suspend_acked);
3379 	qdf_atomic_init(&wmi_handle->num_stats_over_qmi);
3380 	qdf_atomic_init(&wmi_handle->is_wow_enable_ack_failed);
3381 	wmi_runtime_pm_init(wmi_handle);
3382 	wmi_interface_logging_init(wmi_handle, WMI_HOST_PDEV_ID_0);
3383 
3384 	wmi_interface_sequence_init(wmi_handle);
3385 	/* Assign target cookie capability */
3386 	wmi_handle->use_cookie = param->use_cookie;
3387 	wmi_handle->osdev = param->osdev;
3388 	wmi_handle->wmi_stopinprogress = 0;
3389 	wmi_handle->wmi_max_cmds = param->max_commands;
3390 	soc->wmi_max_cmds = param->max_commands;
3391 	/* Increase the ref count once refcount infra is present */
3392 	soc->wmi_psoc = param->psoc;
3393 	qdf_spinlock_create(&soc->ctx_lock);
3394 	soc->ops = wmi_handle->ops;
3395 	soc->wmi_pdev[0] = wmi_handle;
3396 	if (wmi_ext_dbgfs_init(wmi_handle, 0) != QDF_STATUS_SUCCESS)
3397 		wmi_err("Failed to initialize wmi extended debugfs");
3398 
3399 	wmi_wbuff_register(wmi_handle);
3400 
3401 	wmi_hang_event_notifier_register(wmi_handle);
3402 
3403 	wmi_minidump_attach(wmi_handle);
3404 
3405 	return wmi_handle;
3406 
3407 error:
3408 	qdf_mem_free(soc);
3409 	qdf_mem_free(wmi_handle);
3410 
3411 	return NULL;
3412 }
3413 
wmi_unified_detach(struct wmi_unified * wmi_handle)3414 void wmi_unified_detach(struct wmi_unified *wmi_handle)
3415 {
3416 	wmi_buf_t buf;
3417 	struct wmi_soc *soc;
3418 	uint8_t i;
3419 
3420 	wmi_minidump_detach(wmi_handle);
3421 
3422 	wmi_hang_event_notifier_unregister();
3423 
3424 	wmi_wbuff_deregister(wmi_handle);
3425 
3426 	soc = wmi_handle->soc;
3427 	for (i = 0; i < WMI_MAX_RADIOS; i++) {
3428 		if (soc->wmi_pdev[i]) {
3429 			qdf_flush_workqueue(0,
3430 				soc->wmi_pdev[i]->wmi_rx_work_queue);
3431 			qdf_destroy_workqueue(0,
3432 				soc->wmi_pdev[i]->wmi_rx_work_queue);
3433 			wmi_debugfs_remove(soc->wmi_pdev[i]);
3434 			buf = qdf_nbuf_queue_remove(
3435 					&soc->wmi_pdev[i]->event_queue);
3436 			while (buf) {
3437 				qdf_nbuf_free(buf);
3438 				buf = qdf_nbuf_queue_remove(
3439 						&soc->wmi_pdev[i]->event_queue);
3440 			}
3441 
3442 			qdf_flush_workqueue(0,
3443 				soc->wmi_pdev[i]->wmi_rx_diag_work_queue);
3444 			qdf_destroy_workqueue(0,
3445 				soc->wmi_pdev[i]->wmi_rx_diag_work_queue);
3446 			buf = qdf_nbuf_queue_remove(
3447 					&soc->wmi_pdev[i]->diag_event_queue);
3448 			while (buf) {
3449 				qdf_nbuf_free(buf);
3450 				buf = qdf_nbuf_queue_remove(
3451 					&soc->wmi_pdev[i]->diag_event_queue);
3452 			}
3453 
3454 			wmi_log_buffer_free(soc->wmi_pdev[i]);
3455 
3456 			/* Free events logs list */
3457 			if (soc->wmi_pdev[i]->events_logs_list)
3458 				qdf_mem_free(
3459 					soc->wmi_pdev[i]->events_logs_list);
3460 
3461 			qdf_spinlock_destroy(&soc->wmi_pdev[i]->eventq_lock);
3462 			qdf_spinlock_destroy(
3463 					&soc->wmi_pdev[i]->diag_eventq_lock);
3464 
3465 			wmi_interface_sequence_deinit(soc->wmi_pdev[i]);
3466 			wmi_ext_dbgfs_deinit(soc->wmi_pdev[i]);
3467 			wmi_clear_wow_enable_ack_failed(soc->wmi_pdev[i]);
3468 
3469 			qdf_mem_free(soc->wmi_pdev[i]);
3470 		}
3471 	}
3472 	qdf_spinlock_destroy(&soc->ctx_lock);
3473 
3474 	if (soc->wmi_service_bitmap) {
3475 		qdf_mem_free(soc->wmi_service_bitmap);
3476 		soc->wmi_service_bitmap = NULL;
3477 	}
3478 
3479 	if (soc->wmi_ext_service_bitmap) {
3480 		qdf_mem_free(soc->wmi_ext_service_bitmap);
3481 		soc->wmi_ext_service_bitmap = NULL;
3482 	}
3483 
3484 	if (soc->wmi_ext2_service_bitmap) {
3485 		qdf_mem_free(soc->wmi_ext2_service_bitmap);
3486 		soc->wmi_ext2_service_bitmap = NULL;
3487 	}
3488 
3489 	/* Decrease the ref count once refcount infra is present */
3490 	soc->wmi_psoc = NULL;
3491 	qdf_mem_free(soc);
3492 }
3493 
3494 void
wmi_unified_remove_work(struct wmi_unified * wmi_handle)3495 wmi_unified_remove_work(struct wmi_unified *wmi_handle)
3496 {
3497 	wmi_buf_t buf;
3498 
3499 	qdf_flush_workqueue(0, wmi_handle->wmi_rx_work_queue);
3500 	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
3501 	buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
3502 	while (buf) {
3503 		qdf_nbuf_free(buf);
3504 		buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
3505 	}
3506 	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
3507 
3508 	/* Remove diag events work */
3509 	qdf_flush_workqueue(0, wmi_handle->wmi_rx_diag_work_queue);
3510 	qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock);
3511 	buf = qdf_nbuf_queue_remove(&wmi_handle->diag_event_queue);
3512 	while (buf) {
3513 		qdf_nbuf_free(buf);
3514 		buf = qdf_nbuf_queue_remove(&wmi_handle->diag_event_queue);
3515 	}
3516 	qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
3517 }
3518 
3519 /**
3520  * wmi_htc_tx_complete() - Process htc tx completion
3521  *
3522  * @ctx: handle to wmi
3523  * @htc_pkt: pointer to htc packet
3524  *
3525  * Return: none.
3526  */
wmi_htc_tx_complete(void * ctx,HTC_PACKET * htc_pkt)3527 static void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt)
3528 {
3529 	struct wmi_soc *soc = (struct wmi_soc *) ctx;
3530 	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
3531 	u_int8_t *buf_ptr;
3532 	u_int32_t len;
3533 	struct wmi_unified *wmi_handle;
3534 #ifdef WMI_INTERFACE_EVENT_LOGGING
3535 	struct wmi_debug_log_info *log_info;
3536 	uint32_t cmd_id;
3537 	uint8_t *offset_ptr;
3538 	qdf_dma_addr_t dma_addr;
3539 	uint64_t phy_addr;
3540 #endif
3541 
3542 	ASSERT(wmi_cmd_buf);
3543 	wmi_handle = wmi_get_pdev_ep(soc, htc_pkt->Endpoint);
3544 	if (!wmi_handle) {
3545 		wmi_err("Unable to get wmi handle");
3546 		QDF_ASSERT(0);
3547 		return;
3548 	}
3549 	buf_ptr = (u_int8_t *)wmi_buf_data(wmi_cmd_buf);
3550 #ifdef WMI_INTERFACE_EVENT_LOGGING
3551 	log_info = &wmi_handle->log_info;
3552 
3553 	if (wmi_handle && log_info->wmi_logging_enable) {
3554 		cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf),
3555 				WMI_CMD_HDR, COMMANDID);
3556 
3557 		dma_addr = QDF_NBUF_CB_PADDR(wmi_cmd_buf);
3558 		phy_addr = qdf_mem_virt_to_phys(qdf_nbuf_data(wmi_cmd_buf));
3559 
3560 		qdf_spin_lock_bh(&log_info->wmi_record_lock);
3561 		/* Record 16 bytes of WMI cmd tx complete data
3562 		 * - exclude TLV and WMI headers
3563 		 */
3564 		offset_ptr = buf_ptr + wmi_handle->soc->buf_offset_command;
3565 		if (wmi_handle->ops->is_management_record(cmd_id)) {
3566 			WMI_MGMT_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id,
3567 						       offset_ptr);
3568 		} else {
3569 			if (wmi_handle->ops->is_force_fw_hang_cmd(cmd_id)) {
3570 				wmi_info("Tx completion received for WMI_FORCE_FW_HANG_CMDID, current_time:%ld",
3571 					 qdf_mc_timer_get_system_time());
3572 			}
3573 
3574 			WMI_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id,
3575 						  offset_ptr, dma_addr,
3576 						  phy_addr);
3577 		}
3578 
3579 		qdf_spin_unlock_bh(&log_info->wmi_record_lock);
3580 	}
3581 #endif
3582 
3583 	wmi_interface_sequence_check(wmi_handle, wmi_cmd_buf);
3584 
3585 	len = qdf_nbuf_len(wmi_cmd_buf);
3586 	qdf_mem_zero(buf_ptr, len);
3587 	wmi_buf_free(wmi_cmd_buf);
3588 	qdf_mem_free(htc_pkt);
3589 	qdf_atomic_dec(&wmi_handle->pending_cmds);
3590 }
3591 
3592 #ifdef FEATURE_RUNTIME_PM
3593 /**
3594  * wmi_htc_log_pkt() - Print information of WMI command from HTC packet
3595  *
3596  * @ctx: handle of WMI context
3597  * @htc_pkt: handle of HTC packet
3598  *
3599  * Return: none
3600  */
wmi_htc_log_pkt(void * ctx,HTC_PACKET * htc_pkt)3601 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt)
3602 {
3603 	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
3604 	uint32_t cmd_id;
3605 
3606 	ASSERT(wmi_cmd_buf);
3607 	cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), WMI_CMD_HDR,
3608 			       COMMANDID);
3609 
3610 	wmi_debug("WMI command from HTC packet: %s, ID: %d",
3611 		 wmi_id_to_name(cmd_id), cmd_id);
3612 }
3613 #else
wmi_htc_log_pkt(void * ctx,HTC_PACKET * htc_pkt)3614 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt)
3615 {
3616 }
3617 #endif
3618 
3619 /**
3620  * wmi_connect_pdev_htc_service() -  WMI API to get connect to HTC service
3621  * @soc: handle to WMI SoC
3622  * @pdev_idx: Pdev index
3623  *
3624  * Return: QDF_STATUS
3625  */
wmi_connect_pdev_htc_service(struct wmi_soc * soc,uint32_t pdev_idx)3626 static QDF_STATUS wmi_connect_pdev_htc_service(struct wmi_soc *soc,
3627 					       uint32_t pdev_idx)
3628 {
3629 	QDF_STATUS status;
3630 	struct htc_service_connect_resp response;
3631 	struct htc_service_connect_req connect;
3632 
3633 	OS_MEMZERO(&connect, sizeof(connect));
3634 	OS_MEMZERO(&response, sizeof(response));
3635 
3636 	/* meta data is unused for now */
3637 	connect.pMetaData = NULL;
3638 	connect.MetaDataLength = 0;
3639 	/* these fields are the same for all service endpoints */
3640 	connect.EpCallbacks.pContext = soc;
3641 	connect.EpCallbacks.EpTxCompleteMultiple =
3642 		NULL /* Control path completion ar6000_tx_complete */;
3643 	connect.EpCallbacks.EpRecv = wmi_control_rx /* Control path rx */;
3644 	connect.EpCallbacks.EpRecvRefill = NULL /* ar6000_rx_refill */;
3645 	connect.EpCallbacks.EpSendFull = NULL /* ar6000_tx_queue_full */;
3646 	connect.EpCallbacks.EpTxComplete =
3647 		wmi_htc_tx_complete /* ar6000_tx_queue_full */;
3648 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3649 
3650 	/* connect to control service */
3651 	connect.service_id = soc->svc_ids[pdev_idx];
3652 	status = htc_connect_service(soc->htc_handle, &connect, &response);
3653 
3654 	if (QDF_IS_STATUS_ERROR(status)) {
3655 		wmi_err("Failed to connect to WMI CONTROL service status:%d",
3656 			 status);
3657 		return status;
3658 	}
3659 
3660 	if (soc->is_async_ep)
3661 		htc_set_async_ep(soc->htc_handle, response.Endpoint, true);
3662 
3663 	soc->wmi_endpoint_id[pdev_idx] = response.Endpoint;
3664 	soc->max_msg_len[pdev_idx] = response.MaxMsgLength;
3665 
3666 	return QDF_STATUS_SUCCESS;
3667 }
3668 
3669 QDF_STATUS
wmi_unified_connect_htc_service(struct wmi_unified * wmi_handle,HTC_HANDLE htc_handle)3670 wmi_unified_connect_htc_service(struct wmi_unified *wmi_handle,
3671 				HTC_HANDLE htc_handle)
3672 {
3673 	uint32_t i;
3674 	uint8_t wmi_ep_count;
3675 
3676 	wmi_handle->soc->htc_handle = htc_handle;
3677 
3678 	wmi_ep_count = htc_get_wmi_endpoint_count(htc_handle);
3679 	if (wmi_ep_count > WMI_MAX_RADIOS)
3680 		return QDF_STATUS_E_FAULT;
3681 
3682 	for (i = 0; i < wmi_ep_count; i++)
3683 		wmi_connect_pdev_htc_service(wmi_handle->soc, i);
3684 
3685 	wmi_handle->htc_handle = htc_handle;
3686 	wmi_handle->wmi_endpoint_id = wmi_handle->soc->wmi_endpoint_id[0];
3687 	wmi_handle->max_msg_len = wmi_handle->soc->max_msg_len[0];
3688 
3689 	return QDF_STATUS_SUCCESS;
3690 }
3691 
3692 #if defined(WLAN_FEATURE_WMI_DIAG_OVER_CE7) || \
3693 	defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
wmi_diag_connect_pdev_htc_service(struct wmi_unified * wmi_handle,HTC_HANDLE htc_handle)3694 QDF_STATUS wmi_diag_connect_pdev_htc_service(struct wmi_unified *wmi_handle,
3695 					     HTC_HANDLE htc_handle)
3696 {
3697 	QDF_STATUS status;
3698 	struct htc_service_connect_resp response = {0};
3699 	struct htc_service_connect_req connect = {0};
3700 
3701 	/* meta data is unused for now */
3702 	connect.pMetaData = NULL;
3703 	connect.MetaDataLength = 0;
3704 	connect.EpCallbacks.pContext = wmi_handle->soc;
3705 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3706 	connect.EpCallbacks.EpRecv = wmi_control_diag_rx /* wmi diag rx */;
3707 	connect.EpCallbacks.EpRecvRefill = NULL;
3708 	connect.EpCallbacks.EpSendFull = NULL;
3709 	connect.EpCallbacks.EpTxComplete = NULL;
3710 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3711 
3712 	/* connect to wmi diag service */
3713 	connect.service_id = WMI_CONTROL_DIAG_SVC;
3714 	status = htc_connect_service(htc_handle, &connect, &response);
3715 
3716 	if (QDF_IS_STATUS_ERROR(status)) {
3717 		wmi_err("Failed to connect to WMI DIAG service status:%d",
3718 			status);
3719 		return status;
3720 	}
3721 
3722 	if (wmi_handle->soc->is_async_ep)
3723 		htc_set_async_ep(htc_handle, response.Endpoint, true);
3724 
3725 	wmi_handle->soc->wmi_diag_endpoint_id = response.Endpoint;
3726 
3727 	return QDF_STATUS_SUCCESS;
3728 }
3729 #endif
3730 
3731 #if defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE)
wmi_dbr_connect_pdev_htc_service(struct wmi_unified * wmi_handle,HTC_HANDLE htc_handle)3732 QDF_STATUS wmi_dbr_connect_pdev_htc_service(struct wmi_unified *wmi_handle,
3733 					    HTC_HANDLE htc_handle)
3734 {
3735 	QDF_STATUS status;
3736 	struct htc_service_connect_resp response = {0};
3737 	struct htc_service_connect_req connect = {0};
3738 
3739 	/* meta data is unused for now */
3740 	connect.pMetaData = NULL;
3741 	connect.MetaDataLength = 0;
3742 	connect.EpCallbacks.pContext = wmi_handle->soc;
3743 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3744 	connect.EpCallbacks.EpRecv = wmi_control_dbr_rx /* wmi dbr rx */;
3745 	connect.EpCallbacks.EpRecvRefill = NULL;
3746 	connect.EpCallbacks.EpSendFull = NULL;
3747 	connect.EpCallbacks.EpTxComplete = NULL;
3748 	connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
3749 
3750 	/* connect to wmi dbr service */
3751 	connect.service_id = WMI_CONTROL_DBR_SVC;
3752 	status = htc_connect_service(htc_handle, &connect, &response);
3753 
3754 	if (QDF_IS_STATUS_ERROR(status)) {
3755 		wmi_err("Failed to connect to WMI DBR service status:%d",
3756 			status);
3757 		return status;
3758 	}
3759 
3760 	if (wmi_handle->soc->is_async_ep)
3761 		htc_set_async_ep(htc_handle, response.Endpoint, true);
3762 
3763 	wmi_handle->soc->wmi_dbr_endpoint_id = response.Endpoint;
3764 
3765 	return QDF_STATUS_SUCCESS;
3766 }
3767 #endif
3768 
wmi_get_host_credits(wmi_unified_t wmi_handle)3769 int wmi_get_host_credits(wmi_unified_t wmi_handle)
3770 {
3771 	int host_credits = 0;
3772 
3773 	htc_get_control_endpoint_tx_host_credits(wmi_handle->htc_handle,
3774 						 &host_credits);
3775 	return host_credits;
3776 }
3777 
wmi_get_pending_cmds(wmi_unified_t wmi_handle)3778 int wmi_get_pending_cmds(wmi_unified_t wmi_handle)
3779 {
3780 	return qdf_atomic_read(&wmi_handle->pending_cmds);
3781 }
3782 
wmi_set_target_suspend(wmi_unified_t wmi_handle,A_BOOL val)3783 void wmi_set_target_suspend(wmi_unified_t wmi_handle, A_BOOL val)
3784 {
3785 	qdf_atomic_set(&wmi_handle->is_target_suspended, val);
3786 }
3787 
wmi_set_target_suspend_acked(wmi_unified_t wmi_handle,A_BOOL val)3788 void wmi_set_target_suspend_acked(wmi_unified_t wmi_handle, A_BOOL val)
3789 {
3790 	qdf_atomic_set(&wmi_handle->is_target_suspend_acked, val);
3791 	qdf_atomic_set(&wmi_handle->num_stats_over_qmi, 0);
3792 }
3793 
wmi_is_target_suspended(struct wmi_unified * wmi_handle)3794 bool wmi_is_target_suspended(struct wmi_unified *wmi_handle)
3795 {
3796 	return qdf_atomic_read(&wmi_handle->is_target_suspended);
3797 }
3798 qdf_export_symbol(wmi_is_target_suspended);
3799 
wmi_is_target_suspend_acked(struct wmi_unified * wmi_handle)3800 bool wmi_is_target_suspend_acked(struct wmi_unified *wmi_handle)
3801 {
3802 	return qdf_atomic_read(&wmi_handle->is_target_suspend_acked);
3803 }
3804 qdf_export_symbol(wmi_is_target_suspend_acked);
3805 
3806 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI
wmi_set_qmi_stats(wmi_unified_t wmi_handle,bool val)3807 void wmi_set_qmi_stats(wmi_unified_t wmi_handle, bool val)
3808 {
3809 	wmi_handle->is_qmi_stats_enabled = val;
3810 }
3811 
wmi_is_qmi_stats_enabled(struct wmi_unified * wmi_handle)3812 bool wmi_is_qmi_stats_enabled(struct wmi_unified *wmi_handle)
3813 {
3814 	return wmi_handle->is_qmi_stats_enabled;
3815 }
3816 #endif
3817 
wmi_tag_crash_inject(wmi_unified_t wmi_handle,A_BOOL flag)3818 void wmi_tag_crash_inject(wmi_unified_t wmi_handle, A_BOOL flag)
3819 {
3820 	wmi_handle->tag_crash_inject = flag;
3821 }
3822 
wmi_set_is_wow_bus_suspended(wmi_unified_t wmi_handle,A_BOOL val)3823 void wmi_set_is_wow_bus_suspended(wmi_unified_t wmi_handle, A_BOOL val)
3824 {
3825 	qdf_atomic_set(&wmi_handle->is_wow_bus_suspended, val);
3826 }
3827 
wmi_set_tgt_assert(wmi_unified_t wmi_handle,bool val)3828 void wmi_set_tgt_assert(wmi_unified_t wmi_handle, bool val)
3829 {
3830 	wmi_handle->tgt_force_assert_enable = val;
3831 }
3832 
3833 int
wmi_stop(wmi_unified_t wmi_handle)3834 wmi_stop(wmi_unified_t wmi_handle)
3835 {
3836 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
3837 		  "WMI Stop");
3838 	wmi_handle->wmi_stopinprogress = 1;
3839 	return 0;
3840 }
3841 
3842 int
wmi_start(wmi_unified_t wmi_handle)3843 wmi_start(wmi_unified_t wmi_handle)
3844 {
3845 	QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
3846 		  "WMI Start");
3847 	wmi_handle->wmi_stopinprogress = 0;
3848 	return 0;
3849 }
3850 
3851 bool
wmi_is_blocked(wmi_unified_t wmi_handle)3852 wmi_is_blocked(wmi_unified_t wmi_handle)
3853 {
3854 	return (!(!wmi_handle->wmi_stopinprogress));
3855 }
3856 
3857 void
wmi_flush_endpoint(wmi_unified_t wmi_handle)3858 wmi_flush_endpoint(wmi_unified_t wmi_handle)
3859 {
3860 	htc_flush_endpoint(wmi_handle->htc_handle,
3861 		wmi_handle->wmi_endpoint_id, 0);
3862 }
3863 qdf_export_symbol(wmi_flush_endpoint);
3864 
wmi_get_endpoint(wmi_unified_t wmi_handle)3865 HTC_ENDPOINT_ID wmi_get_endpoint(wmi_unified_t wmi_handle)
3866 {
3867 	return wmi_handle->wmi_endpoint_id;
3868 }
3869 
wmi_pdev_id_conversion_enable(wmi_unified_t wmi_handle,uint32_t * pdev_id_map,uint8_t size)3870 void wmi_pdev_id_conversion_enable(wmi_unified_t wmi_handle,
3871 				   uint32_t *pdev_id_map,
3872 				   uint8_t size)
3873 {
3874 	if (wmi_handle->target_type == WMI_TLV_TARGET)
3875 		wmi_handle->ops->wmi_pdev_id_conversion_enable(wmi_handle,
3876 							       pdev_id_map,
3877 							       size);
3878 }
3879 
__wmi_validate_handle(wmi_unified_t wmi_handle,const char * func)3880 int __wmi_validate_handle(wmi_unified_t wmi_handle, const char *func)
3881 {
3882         if (!wmi_handle) {
3883                 wmi_err("Invalid WMI handle (via %s)", func);
3884                 return -EINVAL;
3885         }
3886 
3887         return 0;
3888 }
3889