1 /* 2 * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /* 21 * Host WMI unified implementation 22 */ 23 #include "htc_api.h" 24 #include "htc_api.h" 25 #include "wmi_unified_priv.h" 26 #include "wmi_unified_api.h" 27 #include "qdf_module.h" 28 #include "qdf_platform.h" 29 #include "qdf_ssr_driver_dump.h" 30 #ifdef WMI_EXT_DBG 31 #include "qdf_list.h" 32 #include "qdf_atomic.h" 33 #endif 34 35 #ifndef WMI_NON_TLV_SUPPORT 36 #include "wmi_tlv_helper.h" 37 #endif 38 39 #include <linux/debugfs.h> 40 #include <target_if.h> 41 #include <qdf_debugfs.h> 42 #include "wmi_filtered_logging.h" 43 #include <wmi_hang_event.h> 44 45 /* This check for CONFIG_WIN temporary added due to redeclaration compilation 46 error in MCL. Error is caused due to inclusion of wmi.h in wmi_unified_api.h 47 which gets included here through ol_if_athvar.h. Eventually it is expected that 48 wmi.h will be removed from wmi_unified_api.h after cleanup, which will need 49 WMI_CMD_HDR to be defined here. */ 50 /* Copied from wmi.h */ 51 #undef MS 52 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB) 53 #undef SM 54 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) 55 #undef WO 56 #define WO(_f) ((_f##_OFFSET) >> 2) 57 58 #undef GET_FIELD 59 #define GET_FIELD(_addr, _f) MS(*((uint32_t *)(_addr) + WO(_f)), _f) 60 #undef SET_FIELD 61 #define SET_FIELD(_addr, _f, _val) \ 62 (*((uint32_t *)(_addr) + WO(_f)) = \ 63 (*((uint32_t *)(_addr) + WO(_f)) & ~_f##_MASK) | SM(_val, _f)) 64 65 #define WMI_GET_FIELD(_msg_buf, _msg_type, _f) \ 66 GET_FIELD(_msg_buf, _msg_type ## _ ## _f) 67 68 #define WMI_SET_FIELD(_msg_buf, _msg_type, _f, _val) \ 69 SET_FIELD(_msg_buf, _msg_type ## _ ## _f, _val) 70 71 #define WMI_EP_APASS 0x0 72 #define WMI_EP_LPASS 0x1 73 #define WMI_EP_SENSOR 0x2 74 75 #define WMI_INFOS_DBG_FILE_PERM (QDF_FILE_USR_READ | \ 76 QDF_FILE_USR_WRITE | \ 77 QDF_FILE_GRP_READ | \ 78 QDF_FILE_OTH_READ) 79 80 /* 81 * * Control Path 82 * */ 83 typedef PREPACK struct { 84 uint32_t commandId:24, 85 reserved:2, /* used for WMI endpoint ID */ 86 plt_priv:6; /* platform private */ 87 } POSTPACK WMI_CMD_HDR; /* used for commands and events */ 88 89 #define WMI_CMD_HDR_COMMANDID_LSB 0 90 #define WMI_CMD_HDR_COMMANDID_MASK 0x00ffffff 91 #define WMI_CMD_HDR_COMMANDID_OFFSET 0x00000000 92 #define WMI_CMD_HDR_WMI_ENDPOINTID_MASK 0x03000000 93 #define WMI_CMD_HDR_WMI_ENDPOINTID_OFFSET 24 94 #define WMI_CMD_HDR_PLT_PRIV_LSB 24 95 #define WMI_CMD_HDR_PLT_PRIV_MASK 0xff000000 96 #define WMI_CMD_HDR_PLT_PRIV_OFFSET 0x00000000 97 /* end of copy wmi.h */ 98 99 #define WMI_MIN_HEAD_ROOM 64 100 101 /* WBUFF pool sizes for WMI */ 102 /* Allocation of size 256 bytes */ 103 #define WMI_WBUFF_POOL_0_SIZE 128 104 /* Allocation of size 512 bytes */ 105 #define WMI_WBUFF_POOL_1_SIZE 16 106 /* Allocation of size 1024 bytes */ 107 #define WMI_WBUFF_POOL_2_SIZE 8 108 /* Allocation of size 2048 bytes */ 109 #define WMI_WBUFF_POOL_3_SIZE 8 110 111 #define RX_DIAG_EVENT_WORK_PROCESS_MAX_COUNT 500 112 113 #ifdef WMI_INTERFACE_EVENT_LOGGING 114 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)) 115 /* TODO Cleanup this backported function */ 116 static int wmi_bp_seq_printf(qdf_debugfs_file_t m, const char *f, ...) 117 { 118 va_list args; 119 120 va_start(args, f); 121 seq_vprintf(m, f, args); 122 va_end(args); 123 124 return 0; 125 } 126 #else 127 #define wmi_bp_seq_printf(m, fmt, ...) seq_printf((m), fmt, ##__VA_ARGS__) 128 #endif 129 130 #ifndef MAX_WMI_INSTANCES 131 #define CUSTOM_MGMT_CMD_DATA_SIZE 4 132 #endif 133 134 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC 135 /* WMI commands */ 136 uint32_t g_wmi_command_buf_idx = 0; 137 struct wmi_command_debug wmi_command_log_buffer[WMI_CMD_DEBUG_MAX_ENTRY]; 138 139 /* WMI commands TX completed */ 140 uint32_t g_wmi_command_tx_cmp_buf_idx = 0; 141 struct wmi_command_cmp_debug 142 wmi_command_tx_cmp_log_buffer[WMI_CMD_CMPL_DEBUG_MAX_ENTRY]; 143 144 /* WMI events when processed */ 145 uint32_t g_wmi_event_buf_idx = 0; 146 struct wmi_event_debug wmi_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; 147 148 /* WMI events when queued */ 149 uint32_t g_wmi_rx_event_buf_idx = 0; 150 struct wmi_event_debug wmi_rx_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; 151 #endif 152 153 static void wmi_minidump_detach(struct wmi_unified *wmi_handle) 154 { 155 struct wmi_log_buf_t *info = 156 &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info; 157 uint32_t buf_size = info->size * sizeof(struct wmi_command_cmp_debug); 158 159 qdf_ssr_driver_dump_unregister_region("wmi_debug_log_info"); 160 qdf_ssr_driver_dump_unregister_region("wmi_rx_event_idx"); 161 qdf_ssr_driver_dump_unregister_region("wmi_rx_event"); 162 qdf_ssr_driver_dump_unregister_region("wmi_event_log_idx"); 163 qdf_ssr_driver_dump_unregister_region("wmi_event_log"); 164 qdf_ssr_driver_dump_unregister_region("wmi_command_log_idx"); 165 qdf_ssr_driver_dump_unregister_region("wmi_command_log"); 166 qdf_ssr_driver_dump_unregister_region("wmi_tx_cmp_idx"); 167 qdf_ssr_driver_dump_unregister_region("wmi_tx_cmp"); 168 qdf_minidump_remove(info->buf, buf_size, "wmi_tx_cmp"); 169 } 170 171 static void wmi_minidump_attach(struct wmi_unified *wmi_handle) 172 { 173 struct wmi_log_buf_t *info = 174 &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info; 175 uint32_t buf_size = info->size * sizeof(struct wmi_command_cmp_debug); 176 177 qdf_minidump_log(info->buf, buf_size, "wmi_tx_cmp"); 178 179 qdf_ssr_driver_dump_register_region("wmi_tx_cmp", info->buf, buf_size); 180 qdf_ssr_driver_dump_register_region("wmi_tx_cmp_idx", 181 info->p_buf_tail_idx, 182 sizeof(*info->p_buf_tail_idx)); 183 184 info = &wmi_handle->log_info.wmi_command_log_buf_info; 185 buf_size = info->size * sizeof(struct wmi_command_debug); 186 187 qdf_ssr_driver_dump_register_region("wmi_command_log", info->buf, 188 buf_size); 189 qdf_ssr_driver_dump_register_region("wmi_command_log_idx", 190 info->p_buf_tail_idx, 191 sizeof(*info->p_buf_tail_idx)); 192 193 info = &wmi_handle->log_info.wmi_event_log_buf_info; 194 buf_size = info->size * sizeof(struct wmi_event_debug); 195 196 qdf_ssr_driver_dump_register_region("wmi_event_log", info->buf, 197 buf_size); 198 qdf_ssr_driver_dump_register_region("wmi_event_log_idx", 199 info->p_buf_tail_idx, 200 sizeof(*info->p_buf_tail_idx)); 201 202 info = &wmi_handle->log_info.wmi_rx_event_log_buf_info; 203 buf_size = info->size * sizeof(struct wmi_event_debug); 204 205 qdf_ssr_driver_dump_register_region("wmi_rx_event", info->buf, 206 buf_size); 207 qdf_ssr_driver_dump_register_region("wmi_rx_event_idx", 208 info->p_buf_tail_idx, 209 sizeof(*info->p_buf_tail_idx)); 210 211 qdf_ssr_driver_dump_register_region("wmi_debug_log_info", 212 &wmi_handle->log_info, 213 sizeof(wmi_handle->log_info)); 214 } 215 216 #define WMI_COMMAND_RECORD(h, a, b) { \ 217 if (wmi_cmd_log_max_entry <= \ 218 *(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)) \ 219 *(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx) = 0;\ 220 ((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\ 221 [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)]\ 222 .command = a; \ 223 qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ 224 wmi_command_log_buf_info.buf) \ 225 [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].data,\ 226 b, wmi_record_max_length); \ 227 ((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\ 228 [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].\ 229 time = qdf_get_log_timestamp(); \ 230 (*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))++; \ 231 h->log_info.wmi_command_log_buf_info.length++; \ 232 } 233 234 #define WMI_COMMAND_TX_CMP_RECORD(h, a, b, da, pa) { \ 235 if (wmi_cmd_cmpl_log_max_entry <= \ 236 *(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))\ 237 *(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 238 p_buf_tail_idx) = 0; \ 239 ((struct wmi_command_cmp_debug *)h->log_info. \ 240 wmi_command_tx_cmp_log_buf_info.buf) \ 241 [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 242 p_buf_tail_idx)]. \ 243 command = a; \ 244 qdf_mem_copy(((struct wmi_command_cmp_debug *)h->log_info. \ 245 wmi_command_tx_cmp_log_buf_info.buf) \ 246 [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 247 p_buf_tail_idx)]. \ 248 data, b, wmi_record_max_length); \ 249 ((struct wmi_command_cmp_debug *)h->log_info. \ 250 wmi_command_tx_cmp_log_buf_info.buf) \ 251 [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 252 p_buf_tail_idx)]. \ 253 time = qdf_get_log_timestamp(); \ 254 ((struct wmi_command_cmp_debug *)h->log_info. \ 255 wmi_command_tx_cmp_log_buf_info.buf) \ 256 [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 257 p_buf_tail_idx)]. \ 258 dma_addr = da; \ 259 ((struct wmi_command_cmp_debug *)h->log_info. \ 260 wmi_command_tx_cmp_log_buf_info.buf) \ 261 [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 262 p_buf_tail_idx)]. \ 263 phy_addr = pa; \ 264 (*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))++;\ 265 h->log_info.wmi_command_tx_cmp_log_buf_info.length++; \ 266 } 267 268 #define WMI_EVENT_RECORD(h, a, b) { \ 269 if (wmi_event_log_max_entry <= \ 270 *(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)) \ 271 *(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx) = 0;\ 272 ((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\ 273 [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)]. \ 274 event = a; \ 275 qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ 276 wmi_event_log_buf_info.buf) \ 277 [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].data, b,\ 278 wmi_record_max_length); \ 279 ((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\ 280 [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].time =\ 281 qdf_get_log_timestamp(); \ 282 (*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))++; \ 283 h->log_info.wmi_event_log_buf_info.length++; \ 284 } 285 286 #define WMI_RX_EVENT_RECORD(h, a, b) { \ 287 if (wmi_event_log_max_entry <= \ 288 *(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))\ 289 *(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx) = 0;\ 290 ((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\ 291 [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ 292 event = a; \ 293 qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ 294 wmi_rx_event_log_buf_info.buf) \ 295 [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ 296 data, b, wmi_record_max_length); \ 297 ((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\ 298 [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ 299 time = qdf_get_log_timestamp(); \ 300 (*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))++; \ 301 h->log_info.wmi_rx_event_log_buf_info.length++; \ 302 } 303 304 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC 305 uint32_t g_wmi_mgmt_command_buf_idx = 0; 306 struct 307 wmi_command_debug wmi_mgmt_command_log_buffer[WMI_MGMT_TX_DEBUG_MAX_ENTRY]; 308 309 /* wmi_mgmt commands TX completed */ 310 uint32_t g_wmi_mgmt_command_tx_cmp_buf_idx = 0; 311 struct wmi_command_debug 312 wmi_mgmt_command_tx_cmp_log_buffer[WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY]; 313 314 /* wmi_mgmt events when received */ 315 uint32_t g_wmi_mgmt_rx_event_buf_idx = 0; 316 struct wmi_event_debug 317 wmi_mgmt_rx_event_log_buffer[WMI_MGMT_RX_DEBUG_MAX_ENTRY]; 318 319 /* wmi_diag events when received */ 320 uint32_t g_wmi_diag_rx_event_buf_idx = 0; 321 struct wmi_event_debug 322 wmi_diag_rx_event_log_buffer[WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY]; 323 #endif 324 325 #define WMI_MGMT_COMMAND_RECORD(h, a, b) { \ 326 if (wmi_mgmt_tx_log_max_entry <= \ 327 *(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)) \ 328 *(h->log_info.wmi_mgmt_command_log_buf_info. \ 329 p_buf_tail_idx) = 0; \ 330 ((struct wmi_command_debug *)h->log_info. \ 331 wmi_mgmt_command_log_buf_info.buf) \ 332 [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ 333 command = a; \ 334 qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ 335 wmi_mgmt_command_log_buf_info.buf) \ 336 [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ 337 data, b, \ 338 wmi_record_max_length); \ 339 ((struct wmi_command_debug *)h->log_info. \ 340 wmi_mgmt_command_log_buf_info.buf) \ 341 [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ 342 time = qdf_get_log_timestamp(); \ 343 (*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx))++;\ 344 h->log_info.wmi_mgmt_command_log_buf_info.length++; \ 345 } 346 347 #define WMI_MGMT_COMMAND_TX_CMP_RECORD(h, a, b) { \ 348 if (wmi_mgmt_tx_cmpl_log_max_entry <= \ 349 *(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 350 p_buf_tail_idx)) \ 351 *(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 352 p_buf_tail_idx) = 0; \ 353 ((struct wmi_command_debug *)h->log_info. \ 354 wmi_mgmt_command_tx_cmp_log_buf_info.buf) \ 355 [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 356 p_buf_tail_idx)].command = a; \ 357 qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ 358 wmi_mgmt_command_tx_cmp_log_buf_info.buf)\ 359 [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 360 p_buf_tail_idx)].data, b, \ 361 wmi_record_max_length); \ 362 ((struct wmi_command_debug *)h->log_info. \ 363 wmi_mgmt_command_tx_cmp_log_buf_info.buf) \ 364 [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 365 p_buf_tail_idx)].time = \ 366 qdf_get_log_timestamp(); \ 367 (*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 368 p_buf_tail_idx))++; \ 369 h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.length++; \ 370 } 371 372 #define WMI_MGMT_RX_EVENT_RECORD(h, a, b) do { \ 373 if (wmi_mgmt_rx_log_max_entry <= \ 374 *(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))\ 375 *(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx) = 0;\ 376 ((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\ 377 [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)]\ 378 .event = a; \ 379 qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ 380 wmi_mgmt_event_log_buf_info.buf) \ 381 [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\ 382 data, b, wmi_record_max_length); \ 383 ((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\ 384 [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\ 385 time = qdf_get_log_timestamp(); \ 386 (*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))++; \ 387 h->log_info.wmi_mgmt_event_log_buf_info.length++; \ 388 } while (0); 389 390 #define WMI_DIAG_RX_EVENT_RECORD(h, a, b) do { \ 391 if (wmi_diag_log_max_entry <= \ 392 *(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))\ 393 *(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx) = 0;\ 394 ((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\ 395 [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)]\ 396 .event = a; \ 397 qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ 398 wmi_diag_event_log_buf_info.buf) \ 399 [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\ 400 data, b, wmi_record_max_length); \ 401 ((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\ 402 [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\ 403 time = qdf_get_log_timestamp(); \ 404 (*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))++; \ 405 h->log_info.wmi_diag_event_log_buf_info.length++; \ 406 } while (0); 407 408 /* These are defined to made it as module param, which can be configured */ 409 /* WMI Commands */ 410 uint32_t wmi_cmd_log_max_entry = WMI_CMD_DEBUG_MAX_ENTRY; 411 uint32_t wmi_cmd_cmpl_log_max_entry = WMI_CMD_CMPL_DEBUG_MAX_ENTRY; 412 /* WMI Events */ 413 uint32_t wmi_event_log_max_entry = WMI_EVENT_DEBUG_MAX_ENTRY; 414 /* WMI MGMT Tx */ 415 uint32_t wmi_mgmt_tx_log_max_entry = WMI_MGMT_TX_DEBUG_MAX_ENTRY; 416 uint32_t wmi_mgmt_tx_cmpl_log_max_entry = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY; 417 /* WMI MGMT Rx */ 418 uint32_t wmi_mgmt_rx_log_max_entry = WMI_MGMT_RX_DEBUG_MAX_ENTRY; 419 /* WMI Diag Event */ 420 uint32_t wmi_diag_log_max_entry = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY; 421 /* WMI capture size */ 422 uint32_t wmi_record_max_length = WMI_DEBUG_ENTRY_MAX_LENGTH; 423 uint32_t wmi_display_size = 100; 424 425 /** 426 * wmi_log_init() - Initialize WMI event logging 427 * @wmi_handle: WMI handle. 428 * 429 * Return: Initialization status 430 */ 431 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC 432 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle) 433 { 434 struct wmi_log_buf_t *cmd_log_buf = 435 &wmi_handle->log_info.wmi_command_log_buf_info; 436 struct wmi_log_buf_t *cmd_tx_cmpl_log_buf = 437 &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info; 438 439 struct wmi_log_buf_t *event_log_buf = 440 &wmi_handle->log_info.wmi_event_log_buf_info; 441 struct wmi_log_buf_t *rx_event_log_buf = 442 &wmi_handle->log_info.wmi_rx_event_log_buf_info; 443 444 struct wmi_log_buf_t *mgmt_cmd_log_buf = 445 &wmi_handle->log_info.wmi_mgmt_command_log_buf_info; 446 struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf = 447 &wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info; 448 struct wmi_log_buf_t *mgmt_event_log_buf = 449 &wmi_handle->log_info.wmi_mgmt_event_log_buf_info; 450 struct wmi_log_buf_t *diag_event_log_buf = 451 &wmi_handle->log_info.wmi_diag_event_log_buf_info; 452 453 /* WMI commands */ 454 cmd_log_buf->length = 0; 455 cmd_log_buf->buf_tail_idx = 0; 456 cmd_log_buf->buf = wmi_command_log_buffer; 457 cmd_log_buf->p_buf_tail_idx = &g_wmi_command_buf_idx; 458 cmd_log_buf->size = WMI_CMD_DEBUG_MAX_ENTRY; 459 460 /* WMI commands TX completed */ 461 cmd_tx_cmpl_log_buf->length = 0; 462 cmd_tx_cmpl_log_buf->buf_tail_idx = 0; 463 cmd_tx_cmpl_log_buf->buf = wmi_command_tx_cmp_log_buffer; 464 cmd_tx_cmpl_log_buf->p_buf_tail_idx = &g_wmi_command_tx_cmp_buf_idx; 465 cmd_tx_cmpl_log_buf->size = WMI_CMD_CMPL_DEBUG_MAX_ENTRY; 466 467 /* WMI events when processed */ 468 event_log_buf->length = 0; 469 event_log_buf->buf_tail_idx = 0; 470 event_log_buf->buf = wmi_event_log_buffer; 471 event_log_buf->p_buf_tail_idx = &g_wmi_event_buf_idx; 472 event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; 473 474 /* WMI events when queued */ 475 rx_event_log_buf->length = 0; 476 rx_event_log_buf->buf_tail_idx = 0; 477 rx_event_log_buf->buf = wmi_rx_event_log_buffer; 478 rx_event_log_buf->p_buf_tail_idx = &g_wmi_rx_event_buf_idx; 479 rx_event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; 480 481 /* WMI Management commands */ 482 mgmt_cmd_log_buf->length = 0; 483 mgmt_cmd_log_buf->buf_tail_idx = 0; 484 mgmt_cmd_log_buf->buf = wmi_mgmt_command_log_buffer; 485 mgmt_cmd_log_buf->p_buf_tail_idx = &g_wmi_mgmt_command_buf_idx; 486 mgmt_cmd_log_buf->size = WMI_MGMT_TX_DEBUG_MAX_ENTRY; 487 488 /* WMI Management commands Tx completed*/ 489 mgmt_cmd_tx_cmp_log_buf->length = 0; 490 mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0; 491 mgmt_cmd_tx_cmp_log_buf->buf = wmi_mgmt_command_tx_cmp_log_buffer; 492 mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx = 493 &g_wmi_mgmt_command_tx_cmp_buf_idx; 494 mgmt_cmd_tx_cmp_log_buf->size = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY; 495 496 /* WMI Management events when received */ 497 mgmt_event_log_buf->length = 0; 498 mgmt_event_log_buf->buf_tail_idx = 0; 499 mgmt_event_log_buf->buf = wmi_mgmt_rx_event_log_buffer; 500 mgmt_event_log_buf->p_buf_tail_idx = &g_wmi_mgmt_rx_event_buf_idx; 501 mgmt_event_log_buf->size = WMI_MGMT_RX_DEBUG_MAX_ENTRY; 502 503 /* WMI diag events when received */ 504 diag_event_log_buf->length = 0; 505 diag_event_log_buf->buf_tail_idx = 0; 506 diag_event_log_buf->buf = wmi_diag_rx_event_log_buffer; 507 diag_event_log_buf->p_buf_tail_idx = &g_wmi_diag_rx_event_buf_idx; 508 diag_event_log_buf->size = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY; 509 510 qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); 511 wmi_handle->log_info.wmi_logging_enable = 1; 512 513 return QDF_STATUS_SUCCESS; 514 } 515 #else 516 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle) 517 { 518 struct wmi_log_buf_t *cmd_log_buf = 519 &wmi_handle->log_info.wmi_command_log_buf_info; 520 struct wmi_log_buf_t *cmd_tx_cmpl_log_buf = 521 &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info; 522 523 struct wmi_log_buf_t *event_log_buf = 524 &wmi_handle->log_info.wmi_event_log_buf_info; 525 struct wmi_log_buf_t *rx_event_log_buf = 526 &wmi_handle->log_info.wmi_rx_event_log_buf_info; 527 528 struct wmi_log_buf_t *mgmt_cmd_log_buf = 529 &wmi_handle->log_info.wmi_mgmt_command_log_buf_info; 530 struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf = 531 &wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info; 532 struct wmi_log_buf_t *mgmt_event_log_buf = 533 &wmi_handle->log_info.wmi_mgmt_event_log_buf_info; 534 struct wmi_log_buf_t *diag_event_log_buf = 535 &wmi_handle->log_info.wmi_diag_event_log_buf_info; 536 537 wmi_handle->log_info.wmi_logging_enable = 0; 538 539 /* WMI commands */ 540 cmd_log_buf->length = 0; 541 cmd_log_buf->buf_tail_idx = 0; 542 cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc( 543 wmi_cmd_log_max_entry * sizeof(struct wmi_command_debug)); 544 cmd_log_buf->size = wmi_cmd_log_max_entry; 545 546 if (!cmd_log_buf->buf) 547 return QDF_STATUS_E_NOMEM; 548 549 cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx; 550 551 /* WMI commands TX completed */ 552 cmd_tx_cmpl_log_buf->length = 0; 553 cmd_tx_cmpl_log_buf->buf_tail_idx = 0; 554 cmd_tx_cmpl_log_buf->buf = (struct wmi_command_cmp_debug *) qdf_mem_malloc( 555 wmi_cmd_cmpl_log_max_entry * sizeof(struct wmi_command_cmp_debug)); 556 cmd_tx_cmpl_log_buf->size = wmi_cmd_cmpl_log_max_entry; 557 558 if (!cmd_tx_cmpl_log_buf->buf) 559 return QDF_STATUS_E_NOMEM; 560 561 cmd_tx_cmpl_log_buf->p_buf_tail_idx = 562 &cmd_tx_cmpl_log_buf->buf_tail_idx; 563 564 /* WMI events when processed */ 565 event_log_buf->length = 0; 566 event_log_buf->buf_tail_idx = 0; 567 event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( 568 wmi_event_log_max_entry * sizeof(struct wmi_event_debug)); 569 event_log_buf->size = wmi_event_log_max_entry; 570 571 if (!event_log_buf->buf) 572 return QDF_STATUS_E_NOMEM; 573 574 event_log_buf->p_buf_tail_idx = &event_log_buf->buf_tail_idx; 575 576 /* WMI events when queued */ 577 rx_event_log_buf->length = 0; 578 rx_event_log_buf->buf_tail_idx = 0; 579 rx_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( 580 wmi_event_log_max_entry * sizeof(struct wmi_event_debug)); 581 rx_event_log_buf->size = wmi_event_log_max_entry; 582 583 if (!rx_event_log_buf->buf) 584 return QDF_STATUS_E_NOMEM; 585 586 rx_event_log_buf->p_buf_tail_idx = &rx_event_log_buf->buf_tail_idx; 587 588 /* WMI Management commands */ 589 mgmt_cmd_log_buf->length = 0; 590 mgmt_cmd_log_buf->buf_tail_idx = 0; 591 mgmt_cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc( 592 wmi_mgmt_tx_log_max_entry * sizeof(struct wmi_command_debug)); 593 mgmt_cmd_log_buf->size = wmi_mgmt_tx_log_max_entry; 594 595 if (!mgmt_cmd_log_buf->buf) 596 return QDF_STATUS_E_NOMEM; 597 598 mgmt_cmd_log_buf->p_buf_tail_idx = &mgmt_cmd_log_buf->buf_tail_idx; 599 600 /* WMI Management commands Tx completed*/ 601 mgmt_cmd_tx_cmp_log_buf->length = 0; 602 mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0; 603 mgmt_cmd_tx_cmp_log_buf->buf = (struct wmi_command_debug *) 604 qdf_mem_malloc( 605 wmi_mgmt_tx_cmpl_log_max_entry * 606 sizeof(struct wmi_command_debug)); 607 mgmt_cmd_tx_cmp_log_buf->size = wmi_mgmt_tx_cmpl_log_max_entry; 608 609 if (!mgmt_cmd_tx_cmp_log_buf->buf) 610 return QDF_STATUS_E_NOMEM; 611 612 mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx = 613 &mgmt_cmd_tx_cmp_log_buf->buf_tail_idx; 614 615 /* WMI Management events when received */ 616 mgmt_event_log_buf->length = 0; 617 mgmt_event_log_buf->buf_tail_idx = 0; 618 619 mgmt_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( 620 wmi_mgmt_rx_log_max_entry * 621 sizeof(struct wmi_event_debug)); 622 mgmt_event_log_buf->size = wmi_mgmt_rx_log_max_entry; 623 624 if (!mgmt_event_log_buf->buf) 625 return QDF_STATUS_E_NOMEM; 626 627 mgmt_event_log_buf->p_buf_tail_idx = &mgmt_event_log_buf->buf_tail_idx; 628 629 /* WMI diag events when received */ 630 diag_event_log_buf->length = 0; 631 diag_event_log_buf->buf_tail_idx = 0; 632 633 diag_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( 634 wmi_diag_log_max_entry * 635 sizeof(struct wmi_event_debug)); 636 diag_event_log_buf->size = wmi_diag_log_max_entry; 637 638 if (!diag_event_log_buf->buf) 639 return QDF_STATUS_E_NOMEM; 640 641 diag_event_log_buf->p_buf_tail_idx = &diag_event_log_buf->buf_tail_idx; 642 643 qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); 644 wmi_handle->log_info.wmi_logging_enable = 1; 645 646 wmi_filtered_logging_init(wmi_handle); 647 648 return QDF_STATUS_SUCCESS; 649 } 650 #endif 651 652 /** 653 * wmi_log_buffer_free() - Free all dynamic allocated buffer memory for 654 * event logging 655 * @wmi_handle: WMI handle. 656 * 657 * Return: None 658 */ 659 #ifdef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC 660 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) 661 { 662 wmi_filtered_logging_free(wmi_handle); 663 664 if (wmi_handle->log_info.wmi_command_log_buf_info.buf) 665 qdf_mem_free(wmi_handle->log_info.wmi_command_log_buf_info.buf); 666 if (wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf) 667 qdf_mem_free( 668 wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf); 669 if (wmi_handle->log_info.wmi_event_log_buf_info.buf) 670 qdf_mem_free(wmi_handle->log_info.wmi_event_log_buf_info.buf); 671 if (wmi_handle->log_info.wmi_rx_event_log_buf_info.buf) 672 qdf_mem_free( 673 wmi_handle->log_info.wmi_rx_event_log_buf_info.buf); 674 if (wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf) 675 qdf_mem_free( 676 wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf); 677 if (wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf) 678 qdf_mem_free( 679 wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf); 680 if (wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf) 681 qdf_mem_free( 682 wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf); 683 if (wmi_handle->log_info.wmi_diag_event_log_buf_info.buf) 684 qdf_mem_free( 685 wmi_handle->log_info.wmi_diag_event_log_buf_info.buf); 686 wmi_handle->log_info.wmi_logging_enable = 0; 687 688 qdf_spinlock_destroy(&wmi_handle->log_info.wmi_record_lock); 689 } 690 #else 691 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) 692 { 693 /* Do Nothing */ 694 } 695 #endif 696 697 /** 698 * wmi_print_cmd_log_buffer() - an output agnostic wmi command log printer 699 * @log_buffer: the command log buffer metadata of the buffer to print 700 * @count: the maximum number of entries to print 701 * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper 702 * @print_priv: any data required by the print method, e.g. a file handle 703 * 704 * Return: None 705 */ 706 static void 707 wmi_print_cmd_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count, 708 qdf_abstract_print *print, void *print_priv) 709 { 710 static const int data_len = 711 WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t); 712 char str[128]; 713 uint32_t idx; 714 715 if (count > log_buffer->size) 716 count = log_buffer->size; 717 if (count > log_buffer->length) 718 count = log_buffer->length; 719 720 /* subtract count from index, and wrap if necessary */ 721 idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count; 722 idx %= log_buffer->size; 723 724 print(print_priv, "Time (seconds) Cmd Id Payload"); 725 while (count) { 726 struct wmi_command_debug *cmd_log = (struct wmi_command_debug *) 727 &((struct wmi_command_debug *)log_buffer->buf)[idx]; 728 uint64_t secs, usecs; 729 int len = 0; 730 int i; 731 732 qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs); 733 len += scnprintf(str + len, sizeof(str) - len, 734 "% 8lld.%06lld %6u (0x%06x) ", 735 secs, usecs, 736 cmd_log->command, cmd_log->command); 737 for (i = 0; i < data_len; ++i) { 738 len += scnprintf(str + len, sizeof(str) - len, 739 "0x%08x ", cmd_log->data[i]); 740 } 741 742 print(print_priv, str); 743 744 --count; 745 ++idx; 746 if (idx >= log_buffer->size) 747 idx = 0; 748 } 749 } 750 751 /** 752 * wmi_dump_last_cmd_rec_info() - last wmi command tx completion time print 753 * @wmi_handle: wmi handle 754 * 755 * Return: None 756 */ 757 static void 758 wmi_dump_last_cmd_rec_info(wmi_unified_t wmi_handle) { 759 uint32_t idx, idx_tx_cmp, cmd_tmp_log, cmd_tmp_tx_cmp; 760 uint64_t secs, secs_tx_cmp, usecs, usecs_tx_cmp; 761 struct wmi_command_debug *cmd_log; 762 struct wmi_command_debug *cmd_log_tx_cmp; 763 struct wmi_log_buf_t *log_buf = 764 &wmi_handle->log_info.wmi_command_log_buf_info; 765 struct wmi_log_buf_t *log_buf_tx_cmp = 766 &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info; 767 768 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 769 770 (*log_buf->p_buf_tail_idx == 0) ? (idx = log_buf->size) : 771 (idx = *log_buf->p_buf_tail_idx - 1); 772 idx %= log_buf->size; 773 774 (*log_buf_tx_cmp->p_buf_tail_idx == 0) ? (idx_tx_cmp = 775 log_buf_tx_cmp->size) : (idx_tx_cmp = 776 *log_buf_tx_cmp->p_buf_tail_idx - 1); 777 idx_tx_cmp %= log_buf_tx_cmp->size; 778 cmd_log = &((struct wmi_command_debug *)log_buf->buf)[idx]; 779 cmd_log_tx_cmp = &((struct wmi_command_debug *)log_buf_tx_cmp->buf) 780 [idx_tx_cmp]; 781 cmd_tmp_log = cmd_log->command; 782 cmd_tmp_tx_cmp = cmd_log_tx_cmp->command; 783 qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs); 784 qdf_log_timestamp_to_secs(cmd_log_tx_cmp->time, &secs_tx_cmp, 785 &usecs_tx_cmp); 786 787 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 788 789 wmi_nofl_err("Last wmi command Time (s) = % 8lld.%06lld ", 790 secs, usecs); 791 wmi_nofl_err("Last wmi Cmd_Id = (0x%06x) ", cmd_tmp_log); 792 wmi_nofl_err("Last wmi command tx completion Time (s) = % 8lld.%06lld", 793 secs_tx_cmp, usecs_tx_cmp); 794 wmi_nofl_err("Last wmi command tx completion Cmd_Id = (0x%06x) ", 795 cmd_tmp_tx_cmp); 796 } 797 798 /** 799 * wmi_print_cmd_cmp_log_buffer() - wmi command completion log printer 800 * @log_buffer: the command completion log buffer metadata of the buffer to print 801 * @count: the maximum number of entries to print 802 * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper 803 * @print_priv: any data required by the print method, e.g. a file handle 804 * 805 * Return: None 806 */ 807 static void 808 wmi_print_cmd_cmp_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count, 809 qdf_abstract_print *print, void *print_priv) 810 { 811 static const int data_len = 812 WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t); 813 char str[128]; 814 uint32_t idx; 815 816 if (count > log_buffer->size) 817 count = log_buffer->size; 818 if (count > log_buffer->length) 819 count = log_buffer->length; 820 821 /* subtract count from index, and wrap if necessary */ 822 idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count; 823 idx %= log_buffer->size; 824 825 print(print_priv, "Time (seconds) Cmd Id Payload"); 826 while (count) { 827 struct wmi_command_cmp_debug *cmd_log = (struct wmi_command_cmp_debug *) 828 &((struct wmi_command_cmp_debug *)log_buffer->buf)[idx]; 829 uint64_t secs, usecs; 830 int len = 0; 831 int i; 832 833 qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs); 834 len += scnprintf(str + len, sizeof(str) - len, 835 "% 8lld.%06lld %6u (0x%06x) ", 836 secs, usecs, 837 cmd_log->command, cmd_log->command); 838 for (i = 0; i < data_len; ++i) { 839 len += scnprintf(str + len, sizeof(str) - len, 840 "0x%08x ", cmd_log->data[i]); 841 } 842 843 print(print_priv, str); 844 845 --count; 846 ++idx; 847 if (idx >= log_buffer->size) 848 idx = 0; 849 } 850 } 851 852 /** 853 * wmi_print_event_log_buffer() - an output agnostic wmi event log printer 854 * @log_buffer: the event log buffer metadata of the buffer to print 855 * @count: the maximum number of entries to print 856 * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper 857 * @print_priv: any data required by the print method, e.g. a file handle 858 * 859 * Return: None 860 */ 861 static void 862 wmi_print_event_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count, 863 qdf_abstract_print *print, void *print_priv) 864 { 865 static const int data_len = 866 WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t); 867 char str[128]; 868 uint32_t idx; 869 870 if (count > log_buffer->size) 871 count = log_buffer->size; 872 if (count > log_buffer->length) 873 count = log_buffer->length; 874 875 /* subtract count from index, and wrap if necessary */ 876 idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count; 877 idx %= log_buffer->size; 878 879 print(print_priv, "Time (seconds) Event Id Payload"); 880 while (count) { 881 struct wmi_event_debug *event_log = (struct wmi_event_debug *) 882 &((struct wmi_event_debug *)log_buffer->buf)[idx]; 883 uint64_t secs, usecs; 884 int len = 0; 885 int i; 886 887 qdf_log_timestamp_to_secs(event_log->time, &secs, &usecs); 888 len += scnprintf(str + len, sizeof(str) - len, 889 "% 8lld.%06lld %6u (0x%06x) ", 890 secs, usecs, 891 event_log->event, event_log->event); 892 for (i = 0; i < data_len; ++i) { 893 len += scnprintf(str + len, sizeof(str) - len, 894 "0x%08x ", event_log->data[i]); 895 } 896 897 print(print_priv, str); 898 899 --count; 900 ++idx; 901 if (idx >= log_buffer->size) 902 idx = 0; 903 } 904 } 905 906 inline void 907 wmi_print_cmd_log(wmi_unified_t wmi, uint32_t count, 908 qdf_abstract_print *print, void *print_priv) 909 { 910 wmi_print_cmd_log_buffer( 911 &wmi->log_info.wmi_command_log_buf_info, 912 count, print, print_priv); 913 } 914 915 inline void 916 wmi_print_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count, 917 qdf_abstract_print *print, void *print_priv) 918 { 919 wmi_print_cmd_cmp_log_buffer( 920 &wmi->log_info.wmi_command_tx_cmp_log_buf_info, 921 count, print, print_priv); 922 } 923 924 inline void 925 wmi_print_mgmt_cmd_log(wmi_unified_t wmi, uint32_t count, 926 qdf_abstract_print *print, void *print_priv) 927 { 928 wmi_print_cmd_log_buffer( 929 &wmi->log_info.wmi_mgmt_command_log_buf_info, 930 count, print, print_priv); 931 } 932 933 inline void 934 wmi_print_mgmt_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count, 935 qdf_abstract_print *print, void *print_priv) 936 { 937 wmi_print_cmd_log_buffer( 938 &wmi->log_info.wmi_mgmt_command_tx_cmp_log_buf_info, 939 count, print, print_priv); 940 } 941 942 inline void 943 wmi_print_event_log(wmi_unified_t wmi, uint32_t count, 944 qdf_abstract_print *print, void *print_priv) 945 { 946 wmi_print_event_log_buffer( 947 &wmi->log_info.wmi_event_log_buf_info, 948 count, print, print_priv); 949 } 950 951 inline void 952 wmi_print_rx_event_log(wmi_unified_t wmi, uint32_t count, 953 qdf_abstract_print *print, void *print_priv) 954 { 955 wmi_print_event_log_buffer( 956 &wmi->log_info.wmi_rx_event_log_buf_info, 957 count, print, print_priv); 958 } 959 960 inline void 961 wmi_print_mgmt_event_log(wmi_unified_t wmi, uint32_t count, 962 qdf_abstract_print *print, void *print_priv) 963 { 964 wmi_print_event_log_buffer( 965 &wmi->log_info.wmi_mgmt_event_log_buf_info, 966 count, print, print_priv); 967 } 968 969 970 /* debugfs routines*/ 971 972 /* 973 * debug_wmi_##func_base##_show() - debugfs functions to display content of 974 * command and event buffers. Macro uses max buffer length to display 975 * buffer when it is wraparound. 976 * 977 * @m: debugfs handler to access wmi_handle 978 * @v: Variable arguments (not used) 979 * 980 * Return: Length of characters printed 981 */ 982 #define GENERATE_COMMAND_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size, wmi_record_type)\ 983 static int debug_wmi_##func_base##_show(struct seq_file *m, \ 984 void *v) \ 985 { \ 986 wmi_unified_t wmi_handle = (wmi_unified_t) m->private; \ 987 struct wmi_log_buf_t *wmi_log = \ 988 &wmi_handle->log_info.wmi_##func_base##_buf_info;\ 989 int pos, nread, outlen; \ 990 int i; \ 991 uint64_t secs, usecs; \ 992 \ 993 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\ 994 if (!wmi_log->length) { \ 995 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\ 996 return wmi_bp_seq_printf(m, \ 997 "no elements to read from ring buffer!\n"); \ 998 } \ 999 \ 1000 if (wmi_log->length <= wmi_ring_size) \ 1001 nread = wmi_log->length; \ 1002 else \ 1003 nread = wmi_ring_size; \ 1004 \ 1005 if (*(wmi_log->p_buf_tail_idx) == 0) \ 1006 /* tail can be 0 after wrap-around */ \ 1007 pos = wmi_ring_size - 1; \ 1008 else \ 1009 pos = *(wmi_log->p_buf_tail_idx) - 1; \ 1010 \ 1011 outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\ 1012 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1013 while (nread--) { \ 1014 struct wmi_record_type *wmi_record; \ 1015 \ 1016 wmi_record = (struct wmi_record_type *) \ 1017 &(((struct wmi_record_type *)wmi_log->buf)[pos]);\ 1018 outlen += wmi_bp_seq_printf(m, "CMD ID = %x\n", \ 1019 (wmi_record->command)); \ 1020 qdf_log_timestamp_to_secs(wmi_record->time, &secs,\ 1021 &usecs); \ 1022 outlen += \ 1023 wmi_bp_seq_printf(m, "CMD TIME = [%llu.%06llu]\n",\ 1024 secs, usecs); \ 1025 outlen += wmi_bp_seq_printf(m, "CMD = "); \ 1026 for (i = 0; i < (wmi_record_max_length/ \ 1027 sizeof(uint32_t)); i++) \ 1028 outlen += wmi_bp_seq_printf(m, "%x ", \ 1029 wmi_record->data[i]); \ 1030 outlen += wmi_bp_seq_printf(m, "\n"); \ 1031 \ 1032 if (pos == 0) \ 1033 pos = wmi_ring_size - 1; \ 1034 else \ 1035 pos--; \ 1036 } \ 1037 return outlen; \ 1038 } \ 1039 1040 #define GENERATE_EVENT_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size) \ 1041 static int debug_wmi_##func_base##_show(struct seq_file *m, \ 1042 void *v) \ 1043 { \ 1044 wmi_unified_t wmi_handle = (wmi_unified_t) m->private; \ 1045 struct wmi_log_buf_t *wmi_log = \ 1046 &wmi_handle->log_info.wmi_##func_base##_buf_info;\ 1047 int pos, nread, outlen; \ 1048 int i; \ 1049 uint64_t secs, usecs; \ 1050 \ 1051 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1052 if (!wmi_log->length) { \ 1053 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1054 return wmi_bp_seq_printf(m, \ 1055 "no elements to read from ring buffer!\n"); \ 1056 } \ 1057 \ 1058 if (wmi_log->length <= wmi_ring_size) \ 1059 nread = wmi_log->length; \ 1060 else \ 1061 nread = wmi_ring_size; \ 1062 \ 1063 if (*(wmi_log->p_buf_tail_idx) == 0) \ 1064 /* tail can be 0 after wrap-around */ \ 1065 pos = wmi_ring_size - 1; \ 1066 else \ 1067 pos = *(wmi_log->p_buf_tail_idx) - 1; \ 1068 \ 1069 outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\ 1070 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1071 while (nread--) { \ 1072 struct wmi_event_debug *wmi_record; \ 1073 \ 1074 wmi_record = (struct wmi_event_debug *) \ 1075 &(((struct wmi_event_debug *)wmi_log->buf)[pos]);\ 1076 qdf_log_timestamp_to_secs(wmi_record->time, &secs,\ 1077 &usecs); \ 1078 outlen += wmi_bp_seq_printf(m, "Event ID = %x\n",\ 1079 (wmi_record->event)); \ 1080 outlen += \ 1081 wmi_bp_seq_printf(m, "Event TIME = [%llu.%06llu]\n",\ 1082 secs, usecs); \ 1083 outlen += wmi_bp_seq_printf(m, "CMD = "); \ 1084 for (i = 0; i < (wmi_record_max_length/ \ 1085 sizeof(uint32_t)); i++) \ 1086 outlen += wmi_bp_seq_printf(m, "%x ", \ 1087 wmi_record->data[i]); \ 1088 outlen += wmi_bp_seq_printf(m, "\n"); \ 1089 \ 1090 if (pos == 0) \ 1091 pos = wmi_ring_size - 1; \ 1092 else \ 1093 pos--; \ 1094 } \ 1095 return outlen; \ 1096 } 1097 1098 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_log, wmi_display_size, 1099 wmi_command_debug); 1100 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_tx_cmp_log, wmi_display_size, 1101 wmi_command_cmp_debug); 1102 GENERATE_EVENT_DEBUG_SHOW_FUNCS(event_log, wmi_display_size); 1103 GENERATE_EVENT_DEBUG_SHOW_FUNCS(rx_event_log, wmi_display_size); 1104 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_log, wmi_display_size, 1105 wmi_command_debug); 1106 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_tx_cmp_log, 1107 wmi_display_size, 1108 wmi_command_debug); 1109 GENERATE_EVENT_DEBUG_SHOW_FUNCS(mgmt_event_log, wmi_display_size); 1110 1111 /** 1112 * debug_wmi_enable_show() - debugfs functions to display enable state of 1113 * wmi logging feature. 1114 * 1115 * @m: debugfs handler to access wmi_handle 1116 * @v: Variable arguments (not used) 1117 * 1118 * Return: always 1 1119 */ 1120 static int debug_wmi_enable_show(struct seq_file *m, void *v) 1121 { 1122 wmi_unified_t wmi_handle = (wmi_unified_t) m->private; 1123 1124 return wmi_bp_seq_printf(m, "%d\n", 1125 wmi_handle->log_info.wmi_logging_enable); 1126 } 1127 1128 /** 1129 * debug_wmi_log_size_show() - debugfs functions to display configured size of 1130 * wmi logging command/event buffer and management command/event buffer. 1131 * 1132 * @m: debugfs handler to access wmi_handle 1133 * @v: Variable arguments (not used) 1134 * 1135 * Return: Length of characters printed 1136 */ 1137 static int debug_wmi_log_size_show(struct seq_file *m, void *v) 1138 { 1139 1140 wmi_bp_seq_printf(m, "WMI command/cmpl log max size:%d/%d\n", 1141 wmi_cmd_log_max_entry, wmi_cmd_cmpl_log_max_entry); 1142 wmi_bp_seq_printf(m, "WMI management Tx/cmpl log max size:%d/%d\n", 1143 wmi_mgmt_tx_log_max_entry, 1144 wmi_mgmt_tx_cmpl_log_max_entry); 1145 wmi_bp_seq_printf(m, "WMI event log max size:%d\n", 1146 wmi_event_log_max_entry); 1147 wmi_bp_seq_printf(m, "WMI management Rx log max size:%d\n", 1148 wmi_mgmt_rx_log_max_entry); 1149 return wmi_bp_seq_printf(m, 1150 "WMI diag log max size:%d\n", 1151 wmi_diag_log_max_entry); 1152 } 1153 1154 /* 1155 * debug_wmi_##func_base##_write() - debugfs functions to clear 1156 * wmi logging command/event buffer and management command/event buffer. 1157 * 1158 * @file: file handler to access wmi_handle 1159 * @buf: received data buffer 1160 * @count: length of received buffer 1161 * @ppos: Not used 1162 * 1163 * Return: count 1164 */ 1165 #define GENERATE_DEBUG_WRITE_FUNCS(func_base, wmi_ring_size, wmi_record_type)\ 1166 static ssize_t debug_wmi_##func_base##_write(struct file *file, \ 1167 const char __user *buf, \ 1168 size_t count, loff_t *ppos) \ 1169 { \ 1170 int k, ret; \ 1171 wmi_unified_t wmi_handle = \ 1172 ((struct seq_file *)file->private_data)->private;\ 1173 struct wmi_log_buf_t *wmi_log = &wmi_handle->log_info. \ 1174 wmi_##func_base##_buf_info; \ 1175 char locbuf[50]; \ 1176 \ 1177 if ((!buf) || (count > 50)) \ 1178 return -EFAULT; \ 1179 \ 1180 if (copy_from_user(locbuf, buf, count)) \ 1181 return -EFAULT; \ 1182 \ 1183 ret = sscanf(locbuf, "%d", &k); \ 1184 if ((ret != 1) || (k != 0)) { \ 1185 wmi_err("Wrong input, echo 0 to clear the wmi buffer");\ 1186 return -EINVAL; \ 1187 } \ 1188 \ 1189 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1190 qdf_mem_zero(wmi_log->buf, wmi_ring_size * \ 1191 sizeof(struct wmi_record_type)); \ 1192 wmi_log->length = 0; \ 1193 *(wmi_log->p_buf_tail_idx) = 0; \ 1194 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1195 \ 1196 return count; \ 1197 } 1198 1199 GENERATE_DEBUG_WRITE_FUNCS(command_log, wmi_cmd_log_max_entry, 1200 wmi_command_debug); 1201 GENERATE_DEBUG_WRITE_FUNCS(command_tx_cmp_log, wmi_cmd_cmpl_log_max_entry, 1202 wmi_command_cmp_debug); 1203 GENERATE_DEBUG_WRITE_FUNCS(event_log, wmi_event_log_max_entry, 1204 wmi_event_debug); 1205 GENERATE_DEBUG_WRITE_FUNCS(rx_event_log, wmi_event_log_max_entry, 1206 wmi_event_debug); 1207 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_log, wmi_mgmt_tx_log_max_entry, 1208 wmi_command_debug); 1209 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_tx_cmp_log, 1210 wmi_mgmt_tx_cmpl_log_max_entry, wmi_command_debug); 1211 GENERATE_DEBUG_WRITE_FUNCS(mgmt_event_log, wmi_mgmt_rx_log_max_entry, 1212 wmi_event_debug); 1213 1214 /** 1215 * debug_wmi_enable_write() - debugfs functions to enable/disable 1216 * wmi logging feature. 1217 * 1218 * @file: file handler to access wmi_handle 1219 * @buf: received data buffer 1220 * @count: length of received buffer 1221 * @ppos: Not used 1222 * 1223 * Return: count 1224 */ 1225 static ssize_t debug_wmi_enable_write(struct file *file, const char __user *buf, 1226 size_t count, loff_t *ppos) 1227 { 1228 wmi_unified_t wmi_handle = 1229 ((struct seq_file *)file->private_data)->private; 1230 int k, ret; 1231 char locbuf[50]; 1232 1233 if ((!buf) || (count > 50)) 1234 return -EFAULT; 1235 1236 if (copy_from_user(locbuf, buf, count)) 1237 return -EFAULT; 1238 1239 ret = sscanf(locbuf, "%d", &k); 1240 if ((ret != 1) || ((k != 0) && (k != 1))) 1241 return -EINVAL; 1242 1243 wmi_handle->log_info.wmi_logging_enable = k; 1244 return count; 1245 } 1246 1247 /** 1248 * debug_wmi_log_size_write() - reserved. 1249 * 1250 * @file: file handler to access wmi_handle 1251 * @buf: received data buffer 1252 * @count: length of received buffer 1253 * @ppos: Not used 1254 * 1255 * Return: count 1256 */ 1257 static ssize_t debug_wmi_log_size_write(struct file *file, 1258 const char __user *buf, size_t count, loff_t *ppos) 1259 { 1260 return -EINVAL; 1261 } 1262 1263 /* Structure to maintain debug information */ 1264 struct wmi_debugfs_info { 1265 const char *name; 1266 const struct file_operations *ops; 1267 }; 1268 1269 #define DEBUG_FOO(func_base) { .name = #func_base, \ 1270 .ops = &debug_##func_base##_ops } 1271 1272 /* 1273 * debug_##func_base##_open() - Open debugfs entry for respective command 1274 * and event buffer. 1275 * 1276 * @inode: node for debug dir entry 1277 * @file: file handler 1278 * 1279 * Return: open status 1280 */ 1281 #define GENERATE_DEBUG_STRUCTS(func_base) \ 1282 static int debug_##func_base##_open(struct inode *inode, \ 1283 struct file *file) \ 1284 { \ 1285 return single_open(file, debug_##func_base##_show, \ 1286 inode->i_private); \ 1287 } \ 1288 \ 1289 \ 1290 static struct file_operations debug_##func_base##_ops = { \ 1291 .open = debug_##func_base##_open, \ 1292 .read = seq_read, \ 1293 .llseek = seq_lseek, \ 1294 .write = debug_##func_base##_write, \ 1295 .release = single_release, \ 1296 }; 1297 1298 GENERATE_DEBUG_STRUCTS(wmi_command_log); 1299 GENERATE_DEBUG_STRUCTS(wmi_command_tx_cmp_log); 1300 GENERATE_DEBUG_STRUCTS(wmi_event_log); 1301 GENERATE_DEBUG_STRUCTS(wmi_rx_event_log); 1302 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_log); 1303 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_tx_cmp_log); 1304 GENERATE_DEBUG_STRUCTS(wmi_mgmt_event_log); 1305 GENERATE_DEBUG_STRUCTS(wmi_enable); 1306 GENERATE_DEBUG_STRUCTS(wmi_log_size); 1307 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING 1308 GENERATE_DEBUG_STRUCTS(filtered_wmi_cmds); 1309 GENERATE_DEBUG_STRUCTS(filtered_wmi_evts); 1310 GENERATE_DEBUG_STRUCTS(wmi_filtered_command_log); 1311 GENERATE_DEBUG_STRUCTS(wmi_filtered_event_log); 1312 #endif 1313 1314 struct wmi_debugfs_info wmi_debugfs_infos[NUM_DEBUG_INFOS] = { 1315 DEBUG_FOO(wmi_command_log), 1316 DEBUG_FOO(wmi_command_tx_cmp_log), 1317 DEBUG_FOO(wmi_event_log), 1318 DEBUG_FOO(wmi_rx_event_log), 1319 DEBUG_FOO(wmi_mgmt_command_log), 1320 DEBUG_FOO(wmi_mgmt_command_tx_cmp_log), 1321 DEBUG_FOO(wmi_mgmt_event_log), 1322 DEBUG_FOO(wmi_enable), 1323 DEBUG_FOO(wmi_log_size), 1324 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING 1325 DEBUG_FOO(filtered_wmi_cmds), 1326 DEBUG_FOO(filtered_wmi_evts), 1327 DEBUG_FOO(wmi_filtered_command_log), 1328 DEBUG_FOO(wmi_filtered_event_log), 1329 #endif 1330 }; 1331 1332 /** 1333 * wmi_debugfs_create() - Create debug_fs entry for wmi logging. 1334 * 1335 * @wmi_handle: wmi handle 1336 * @par_entry: debug directory entry 1337 * 1338 * Return: none 1339 */ 1340 static void wmi_debugfs_create(wmi_unified_t wmi_handle, 1341 struct dentry *par_entry) 1342 { 1343 int i; 1344 1345 if (!par_entry) 1346 goto out; 1347 1348 for (i = 0; i < NUM_DEBUG_INFOS; ++i) { 1349 wmi_handle->debugfs_de[i] = qdf_debugfs_create_entry( 1350 wmi_debugfs_infos[i].name, 1351 WMI_INFOS_DBG_FILE_PERM, 1352 par_entry, 1353 wmi_handle, 1354 wmi_debugfs_infos[i].ops); 1355 1356 if (!wmi_handle->debugfs_de[i]) { 1357 wmi_err("debug Entry creation failed!"); 1358 goto out; 1359 } 1360 } 1361 1362 return; 1363 1364 out: 1365 wmi_err("debug Entry creation failed!"); 1366 wmi_log_buffer_free(wmi_handle); 1367 return; 1368 } 1369 1370 /** 1371 * wmi_debugfs_remove() - Remove debugfs entry for wmi logging. 1372 * @wmi_handle: wmi handle 1373 * 1374 * Return: none 1375 */ 1376 static void wmi_debugfs_remove(wmi_unified_t wmi_handle) 1377 { 1378 int i; 1379 struct dentry *dentry = wmi_handle->log_info.wmi_log_debugfs_dir; 1380 1381 if (dentry) { 1382 for (i = 0; i < NUM_DEBUG_INFOS; ++i) { 1383 if (wmi_handle->debugfs_de[i]) 1384 wmi_handle->debugfs_de[i] = NULL; 1385 } 1386 } 1387 1388 if (dentry) 1389 qdf_debugfs_remove_dir_recursive(dentry); 1390 } 1391 1392 /** 1393 * wmi_debugfs_init() - debugfs functions to create debugfs directory and to 1394 * create debugfs entries. 1395 * @wmi_handle: wmi handler 1396 * @pdev_idx: pdev id 1397 * 1398 * Return: init status 1399 */ 1400 static QDF_STATUS wmi_debugfs_init(wmi_unified_t wmi_handle, uint32_t pdev_idx) 1401 { 1402 char buf[32]; 1403 1404 snprintf(buf, sizeof(buf), "WMI_SOC%u_PDEV%u", 1405 wmi_handle->soc->soc_idx, pdev_idx); 1406 1407 wmi_handle->log_info.wmi_log_debugfs_dir = 1408 qdf_debugfs_create_dir(buf, NULL); 1409 1410 if (!wmi_handle->log_info.wmi_log_debugfs_dir) { 1411 wmi_err("error while creating debugfs dir for %s", buf); 1412 return QDF_STATUS_E_FAILURE; 1413 } 1414 wmi_debugfs_create(wmi_handle, 1415 wmi_handle->log_info.wmi_log_debugfs_dir); 1416 1417 return QDF_STATUS_SUCCESS; 1418 } 1419 1420 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd, 1421 void *header, uint32_t vdev_id, uint32_t chanfreq) 1422 { 1423 1424 uint32_t data[CUSTOM_MGMT_CMD_DATA_SIZE]; 1425 1426 data[0] = ((struct wmi_command_header *)header)->type; 1427 data[1] = ((struct wmi_command_header *)header)->sub_type; 1428 data[2] = vdev_id; 1429 data[3] = chanfreq; 1430 1431 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 1432 1433 WMI_MGMT_COMMAND_RECORD(wmi_handle, cmd, (uint8_t *)data); 1434 wmi_specific_cmd_record(wmi_handle, cmd, (uint8_t *)data); 1435 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 1436 } 1437 #else 1438 static void wmi_debugfs_remove(wmi_unified_t wmi_handle) { } 1439 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd, 1440 void *header, uint32_t vdev_id, uint32_t chanfreq) { } 1441 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) { } 1442 static void wmi_minidump_detach(struct wmi_unified *wmi_handle) { } 1443 static void wmi_minidump_attach(struct wmi_unified *wmi_handle) { } 1444 static void wmi_dump_last_cmd_rec_info(wmi_unified_t wmi_handle) { } 1445 #endif /*WMI_INTERFACE_EVENT_LOGGING */ 1446 qdf_export_symbol(wmi_mgmt_cmd_record); 1447 1448 #ifdef WMI_EXT_DBG 1449 1450 /** 1451 * wmi_ext_dbg_msg_enqueue() - enqueue wmi message 1452 * @wmi_handle: wmi handler 1453 * @msg: WMI message 1454 * 1455 * Return: size of wmi message queue after enqueue 1456 */ 1457 static uint32_t wmi_ext_dbg_msg_enqueue(struct wmi_unified *wmi_handle, 1458 struct wmi_ext_dbg_msg *msg) 1459 { 1460 uint32_t list_size; 1461 1462 qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 1463 qdf_list_insert_back_size(&wmi_handle->wmi_ext_dbg_msg_queue, 1464 &msg->node, &list_size); 1465 qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 1466 1467 return list_size; 1468 } 1469 1470 /** 1471 * wmi_ext_dbg_msg_dequeue() - dequeue wmi message 1472 * @wmi_handle: wmi handler 1473 * 1474 * Return: wmi msg on success else NULL 1475 */ 1476 static struct wmi_ext_dbg_msg *wmi_ext_dbg_msg_dequeue(struct wmi_unified 1477 *wmi_handle) 1478 { 1479 qdf_list_node_t *list_node = NULL; 1480 1481 qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 1482 qdf_list_remove_front(&wmi_handle->wmi_ext_dbg_msg_queue, &list_node); 1483 qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 1484 1485 if (!list_node) 1486 return NULL; 1487 1488 return qdf_container_of(list_node, struct wmi_ext_dbg_msg, node); 1489 } 1490 1491 /** 1492 * wmi_ext_dbg_msg_record() - record wmi messages 1493 * @wmi_handle: wmi handler 1494 * @buf: wmi message buffer 1495 * @len: wmi message length 1496 * @type: wmi message type 1497 * 1498 * Return: QDF_STATUS_SUCCESS on successful recording else failure. 1499 */ 1500 static QDF_STATUS wmi_ext_dbg_msg_record(struct wmi_unified *wmi_handle, 1501 uint8_t *buf, uint32_t len, 1502 enum WMI_MSG_TYPE type) 1503 { 1504 struct wmi_ext_dbg_msg *msg; 1505 uint32_t list_size; 1506 1507 msg = wmi_ext_dbg_msg_get(len); 1508 if (!msg) 1509 return QDF_STATUS_E_NOMEM; 1510 1511 msg->len = len; 1512 msg->type = type; 1513 qdf_mem_copy(msg->buf, buf, len); 1514 msg->ts = qdf_get_log_timestamp(); 1515 list_size = wmi_ext_dbg_msg_enqueue(wmi_handle, msg); 1516 1517 if (list_size >= wmi_handle->wmi_ext_dbg_msg_queue_size) { 1518 msg = wmi_ext_dbg_msg_dequeue(wmi_handle); 1519 wmi_ext_dbg_msg_put(msg); 1520 } 1521 1522 return QDF_STATUS_SUCCESS; 1523 } 1524 1525 /** 1526 * wmi_ext_dbg_msg_cmd_record() - record wmi command messages 1527 * @wmi_handle: wmi handler 1528 * @buf: wmi command buffer 1529 * @len: wmi command message length 1530 * 1531 * Return: QDF_STATUS_SUCCESS on successful recording else failure. 1532 */ 1533 static QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified *wmi_handle, 1534 uint8_t *buf, uint32_t len) 1535 { 1536 return wmi_ext_dbg_msg_record(wmi_handle, buf, len, 1537 WMI_MSG_TYPE_CMD); 1538 } 1539 1540 /** 1541 * wmi_ext_dbg_msg_event_record() - record wmi event messages 1542 * @wmi_handle: wmi handler 1543 * @buf: wmi event buffer 1544 * @len: wmi event message length 1545 * 1546 * Return: QDF_STATUS_SUCCESS on successful recording else failure. 1547 */ 1548 static QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified *wmi_handle, 1549 uint8_t *buf, uint32_t len) 1550 { 1551 uint32_t id; 1552 1553 id = WMI_GET_FIELD(buf, WMI_CMD_HDR, COMMANDID); 1554 if (id != wmi_handle->wmi_events[wmi_diag_event_id]) 1555 return wmi_ext_dbg_msg_record(wmi_handle, buf, len, 1556 WMI_MSG_TYPE_EVENT); 1557 1558 return QDF_STATUS_SUCCESS; 1559 } 1560 1561 /** 1562 * wmi_ext_dbg_msg_queue_init() - create debugfs queue and associated lock 1563 * @wmi_handle: wmi handler 1564 * 1565 * Return: none 1566 */ 1567 static void wmi_ext_dbg_msg_queue_init(struct wmi_unified *wmi_handle) 1568 { 1569 qdf_list_create(&wmi_handle->wmi_ext_dbg_msg_queue, 1570 wmi_handle->wmi_ext_dbg_msg_queue_size); 1571 qdf_spinlock_create(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 1572 } 1573 1574 /** 1575 * wmi_ext_dbg_msg_queue_deinit() - destroy debugfs queue and associated lock 1576 * @wmi_handle: wmi handler 1577 * 1578 * Return: none 1579 */ 1580 static void wmi_ext_dbg_msg_queue_deinit(struct wmi_unified *wmi_handle) 1581 { 1582 qdf_list_destroy(&wmi_handle->wmi_ext_dbg_msg_queue); 1583 qdf_spinlock_destroy(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 1584 } 1585 1586 /** 1587 * wmi_ext_dbg_msg_show() - debugfs function to display whole content of 1588 * wmi command/event messages including headers. 1589 * @file: qdf debugfs file handler 1590 * @arg: pointer to wmi handler 1591 * 1592 * Return: QDF_STATUS_SUCCESS if all the messages are shown successfully, 1593 * else QDF_STATUS_E_AGAIN if more data to show. 1594 */ 1595 static QDF_STATUS wmi_ext_dbg_msg_show(qdf_debugfs_file_t file, void *arg) 1596 { 1597 struct wmi_unified *wmi_handle = (struct wmi_unified *)arg; 1598 struct wmi_ext_dbg_msg *msg; 1599 uint64_t secs, usecs; 1600 1601 msg = wmi_ext_dbg_msg_dequeue(wmi_handle); 1602 if (!msg) 1603 return QDF_STATUS_SUCCESS; 1604 1605 qdf_debugfs_printf(file, "%s: 0x%x\n", 1606 msg->type == WMI_MSG_TYPE_CMD ? "COMMAND" : 1607 "EVENT", WMI_GET_FIELD(msg->buf, WMI_CMD_HDR, 1608 COMMANDID)); 1609 qdf_log_timestamp_to_secs(msg->ts, &secs, &usecs); 1610 qdf_debugfs_printf(file, "Time: %llu.%llu\n", secs, usecs); 1611 qdf_debugfs_printf(file, "Length:%d\n", msg->len); 1612 qdf_debugfs_hexdump(file, msg->buf, msg->len, 1613 WMI_EXT_DBG_DUMP_ROW_SIZE, 1614 WMI_EXT_DBG_DUMP_GROUP_SIZE); 1615 qdf_debugfs_printf(file, "\n"); 1616 1617 if (qdf_debugfs_overflow(file)) { 1618 qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 1619 qdf_list_insert_front(&wmi_handle->wmi_ext_dbg_msg_queue, 1620 &msg->node); 1621 qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 1622 1623 } else { 1624 wmi_ext_dbg_msg_put(msg); 1625 } 1626 1627 return QDF_STATUS_E_AGAIN; 1628 } 1629 1630 /** 1631 * wmi_ext_dbg_msg_write() - debugfs write not supported 1632 * @priv: private data 1633 * @buf: received data buffer 1634 * @len: length of received buffer 1635 * 1636 * Return: QDF_STATUS_E_NOSUPPORT. 1637 */ 1638 static QDF_STATUS wmi_ext_dbg_msg_write(void *priv, const char *buf, 1639 qdf_size_t len) 1640 { 1641 return QDF_STATUS_E_NOSUPPORT; 1642 } 1643 1644 static struct qdf_debugfs_fops wmi_ext_dbgfs_ops[WMI_MAX_RADIOS]; 1645 1646 /** 1647 * wmi_ext_dbgfs_init() - init debugfs items for extended wmi dump. 1648 * @wmi_handle: wmi handler 1649 * @pdev_idx: pdev index 1650 * 1651 * Return: QDF_STATUS_SUCCESS if debugfs is initialized else 1652 * QDF_STATUS_E_FAILURE 1653 */ 1654 static QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle, 1655 uint32_t pdev_idx) 1656 { 1657 qdf_dentry_t dentry; 1658 char buf[32]; 1659 1660 /* To maintain backward compatibility, naming convention for PDEV 0 1661 * dentry is kept same as before. For more than 1 PDEV, dentry 1662 * names will be appended with PDEVx. 1663 */ 1664 if (wmi_handle->soc->soc_idx == 0 && pdev_idx == 0) { 1665 dentry = qdf_debugfs_create_dir(WMI_EXT_DBG_DIR, NULL); 1666 } else { 1667 snprintf(buf, sizeof(buf), "WMI_EXT_DBG_SOC%u_PDEV%u", 1668 wmi_handle->soc->soc_idx, pdev_idx); 1669 dentry = qdf_debugfs_create_dir(buf, NULL); 1670 } 1671 1672 if (!dentry) { 1673 wmi_err("error while creating extended wmi debugfs dir"); 1674 return QDF_STATUS_E_FAILURE; 1675 } 1676 1677 wmi_ext_dbgfs_ops[pdev_idx].show = wmi_ext_dbg_msg_show; 1678 wmi_ext_dbgfs_ops[pdev_idx].write = wmi_ext_dbg_msg_write; 1679 wmi_ext_dbgfs_ops[pdev_idx].priv = wmi_handle; 1680 if (!qdf_debugfs_create_file(WMI_EXT_DBG_FILE, WMI_EXT_DBG_FILE_PERM, 1681 dentry, &wmi_ext_dbgfs_ops[pdev_idx])) { 1682 qdf_debugfs_remove_dir(dentry); 1683 wmi_err("Error while creating extended wmi debugfs file"); 1684 return QDF_STATUS_E_FAILURE; 1685 } 1686 1687 wmi_handle->wmi_ext_dbg_dentry = dentry; 1688 wmi_handle->wmi_ext_dbg_msg_queue_size = WMI_EXT_DBG_QUEUE_SIZE; 1689 wmi_ext_dbg_msg_queue_init(wmi_handle); 1690 1691 return QDF_STATUS_SUCCESS; 1692 } 1693 1694 /** 1695 * wmi_ext_dbgfs_deinit() - cleanup/deinit debugfs items of extended wmi dump. 1696 * @wmi_handle: wmi handler 1697 * 1698 * Return: QDF_STATUS_SUCCESS if cleanup is successful 1699 */ 1700 static QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle) 1701 { 1702 struct wmi_ext_dbg_msg *msg; 1703 1704 while ((msg = wmi_ext_dbg_msg_dequeue(wmi_handle))) 1705 wmi_ext_dbg_msg_put(msg); 1706 1707 wmi_ext_dbg_msg_queue_deinit(wmi_handle); 1708 qdf_debugfs_remove_dir_recursive(wmi_handle->wmi_ext_dbg_dentry); 1709 1710 return QDF_STATUS_SUCCESS; 1711 } 1712 1713 #else 1714 1715 static inline QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified 1716 *wmi_handle, 1717 uint8_t *buf, uint32_t len) 1718 { 1719 return QDF_STATUS_SUCCESS; 1720 } 1721 1722 static inline QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified 1723 *wmi_handle, 1724 uint8_t *buf, uint32_t len) 1725 { 1726 return QDF_STATUS_SUCCESS; 1727 } 1728 1729 static inline QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle, 1730 uint32_t pdev_idx) 1731 { 1732 return QDF_STATUS_SUCCESS; 1733 } 1734 1735 static inline QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle) 1736 { 1737 return QDF_STATUS_SUCCESS; 1738 } 1739 1740 #endif /*WMI_EXT_DBG */ 1741 1742 int wmi_get_host_credits(wmi_unified_t wmi_handle); 1743 /* WMI buffer APIs */ 1744 1745 #ifdef NBUF_MEMORY_DEBUG 1746 wmi_buf_t 1747 wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint32_t len, 1748 const char *func_name, 1749 uint32_t line_num) 1750 { 1751 wmi_buf_t wmi_buf; 1752 1753 if (roundup(len, 4) > wmi_handle->max_msg_len) { 1754 wmi_err("Invalid length %u (via %s:%u) max size: %u", 1755 len, func_name, line_num, 1756 wmi_handle->max_msg_len); 1757 QDF_ASSERT(0); 1758 return NULL; 1759 } 1760 1761 wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, func_name, 1762 line_num); 1763 if (!wmi_buf) 1764 wmi_buf = qdf_nbuf_alloc_debug(NULL, 1765 roundup(len + WMI_MIN_HEAD_ROOM, 1766 4), 1767 WMI_MIN_HEAD_ROOM, 4, false, 1768 func_name, line_num); 1769 if (!wmi_buf) 1770 return NULL; 1771 1772 /* Clear the wmi buffer */ 1773 OS_MEMZERO(qdf_nbuf_data(wmi_buf), len); 1774 1775 /* 1776 * Set the length of the buffer to match the allocation size. 1777 */ 1778 qdf_nbuf_set_pktlen(wmi_buf, len); 1779 1780 return wmi_buf; 1781 } 1782 qdf_export_symbol(wmi_buf_alloc_debug); 1783 1784 void wmi_buf_free(wmi_buf_t net_buf) 1785 { 1786 net_buf = wbuff_buff_put(net_buf); 1787 if (net_buf) 1788 qdf_nbuf_free(net_buf); 1789 } 1790 qdf_export_symbol(wmi_buf_free); 1791 #else 1792 wmi_buf_t wmi_buf_alloc_fl(wmi_unified_t wmi_handle, uint32_t len, 1793 const char *func, uint32_t line) 1794 { 1795 wmi_buf_t wmi_buf; 1796 1797 if (roundup(len, 4) > wmi_handle->max_msg_len) { 1798 QDF_DEBUG_PANIC("Invalid length %u (via %s:%u) max size: %u", 1799 len, func, line, wmi_handle->max_msg_len); 1800 return NULL; 1801 } 1802 1803 wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, __func__, 1804 __LINE__); 1805 if (!wmi_buf) 1806 wmi_buf = qdf_nbuf_alloc_fl(NULL, roundup(len + 1807 WMI_MIN_HEAD_ROOM, 4), WMI_MIN_HEAD_ROOM, 4, 1808 false, func, line); 1809 1810 if (!wmi_buf) { 1811 wmi_nofl_err("%s:%d, failed to alloc len:%d", func, line, len); 1812 return NULL; 1813 } 1814 1815 /* Clear the wmi buffer */ 1816 OS_MEMZERO(qdf_nbuf_data(wmi_buf), len); 1817 1818 /* 1819 * Set the length of the buffer to match the allocation size. 1820 */ 1821 qdf_nbuf_set_pktlen(wmi_buf, len); 1822 1823 return wmi_buf; 1824 } 1825 qdf_export_symbol(wmi_buf_alloc_fl); 1826 1827 void wmi_buf_free(wmi_buf_t net_buf) 1828 { 1829 net_buf = wbuff_buff_put(net_buf); 1830 if (net_buf) 1831 qdf_nbuf_free(net_buf); 1832 } 1833 qdf_export_symbol(wmi_buf_free); 1834 #endif 1835 1836 uint16_t wmi_get_max_msg_len(wmi_unified_t wmi_handle) 1837 { 1838 return wmi_handle->max_msg_len - WMI_MIN_HEAD_ROOM; 1839 } 1840 qdf_export_symbol(wmi_get_max_msg_len); 1841 1842 #ifndef WMI_CMD_STRINGS 1843 static uint8_t *wmi_id_to_name(uint32_t wmi_command) 1844 { 1845 return "Invalid WMI cmd"; 1846 } 1847 #endif 1848 1849 static inline void wmi_log_cmd_id(uint32_t cmd_id, uint32_t tag) 1850 { 1851 wmi_nofl_debug("Send cmd %s(0x%x) tag:%d", 1852 wmi_id_to_name(cmd_id), cmd_id, tag); 1853 } 1854 1855 /** 1856 * wmi_is_pm_resume_cmd() - check if a cmd is part of the resume sequence 1857 * @cmd_id: command to check 1858 * 1859 * Return: true if the command is part of the resume sequence. 1860 */ 1861 #ifdef WLAN_POWER_MANAGEMENT_OFFLOAD 1862 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id) 1863 { 1864 switch (cmd_id) { 1865 case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID: 1866 case WMI_PDEV_RESUME_CMDID: 1867 return true; 1868 1869 default: 1870 return false; 1871 } 1872 } 1873 1874 #else 1875 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id) 1876 { 1877 return false; 1878 } 1879 1880 #endif 1881 1882 #ifdef FEATURE_WLAN_D0WOW 1883 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id) 1884 { 1885 wmi_d0_wow_enable_disable_cmd_fixed_param *cmd; 1886 1887 if (cmd_id == WMI_D0_WOW_ENABLE_DISABLE_CMDID) { 1888 cmd = (wmi_d0_wow_enable_disable_cmd_fixed_param *) 1889 wmi_buf_data(buf); 1890 if (!cmd->enable) 1891 return true; 1892 else 1893 return false; 1894 } 1895 1896 return false; 1897 } 1898 #else 1899 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id) 1900 { 1901 return false; 1902 } 1903 1904 #endif 1905 1906 #ifdef WMI_INTERFACE_SEQUENCE_CHECK 1907 static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle) 1908 { 1909 wmi_handle->wmi_sequence = 0; 1910 wmi_handle->wmi_exp_sequence = 0; 1911 wmi_handle->wmi_sequence_stop = false; 1912 } 1913 1914 static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle) 1915 { 1916 qdf_spinlock_create(&wmi_handle->wmi_seq_lock); 1917 wmi_interface_sequence_reset(wmi_handle); 1918 } 1919 1920 static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle) 1921 { 1922 qdf_spinlock_destroy(&wmi_handle->wmi_seq_lock); 1923 } 1924 1925 void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle) 1926 { 1927 wmi_handle->wmi_sequence_stop = true; 1928 } 1929 1930 static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle, 1931 HTC_PACKET *pkt, 1932 const char *func, uint32_t line) 1933 { 1934 wmi_buf_t buf = GET_HTC_PACKET_NET_BUF_CONTEXT(pkt); 1935 QDF_STATUS status; 1936 1937 qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock); 1938 status = htc_send_pkt(wmi_handle->htc_handle, pkt); 1939 if (QDF_STATUS_SUCCESS != status) { 1940 qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock); 1941 qdf_atomic_dec(&wmi_handle->pending_cmds); 1942 wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d", 1943 func, line, status); 1944 qdf_mem_free(pkt); 1945 return status; 1946 } 1947 /* Record the sequence number in the SKB */ 1948 qdf_nbuf_set_mark(buf, wmi_handle->wmi_sequence); 1949 /* Increment the sequence number */ 1950 wmi_handle->wmi_sequence = (wmi_handle->wmi_sequence + 1) 1951 & (wmi_handle->wmi_max_cmds - 1); 1952 qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock); 1953 1954 return status; 1955 } 1956 1957 static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle, 1958 wmi_buf_t buf) 1959 { 1960 /* Skip sequence check when wmi sequence stop is set */ 1961 if (wmi_handle->wmi_sequence_stop) 1962 return; 1963 1964 qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock); 1965 /* Match the completion sequence and expected sequence number */ 1966 if (qdf_nbuf_get_mark(buf) != wmi_handle->wmi_exp_sequence) { 1967 qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock); 1968 wmi_nofl_err("WMI Tx Completion Sequence number mismatch"); 1969 wmi_nofl_err("Expected %d Received %d", 1970 wmi_handle->wmi_exp_sequence, 1971 qdf_nbuf_get_mark(buf)); 1972 /* Trigger Recovery */ 1973 qdf_trigger_self_recovery(wmi_handle->soc, 1974 QDF_WMI_BUF_SEQUENCE_MISMATCH); 1975 } else { 1976 /* Increment the expected sequence number */ 1977 wmi_handle->wmi_exp_sequence = 1978 (wmi_handle->wmi_exp_sequence + 1) 1979 & (wmi_handle->wmi_max_cmds - 1); 1980 qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock); 1981 } 1982 } 1983 #else 1984 static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle) 1985 { 1986 } 1987 1988 static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle) 1989 { 1990 } 1991 1992 static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle) 1993 { 1994 } 1995 1996 void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle) 1997 { 1998 } 1999 2000 static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle, 2001 HTC_PACKET *pkt, 2002 const char *func, uint32_t line) 2003 { 2004 QDF_STATUS status; 2005 2006 status = htc_send_pkt(wmi_handle->htc_handle, pkt); 2007 if (QDF_STATUS_SUCCESS != status) { 2008 qdf_atomic_dec(&wmi_handle->pending_cmds); 2009 wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d", 2010 func, line, status); 2011 qdf_mem_free(pkt); 2012 return status; 2013 } 2014 2015 return status; 2016 } 2017 2018 static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle, 2019 wmi_buf_t buf) 2020 { 2021 } 2022 #endif 2023 2024 static inline void wmi_unified_debug_dump(wmi_unified_t wmi_handle) 2025 { 2026 wmi_nofl_err("Endpoint ID = %d, Tx Queue Depth = %d, soc_id = %u, target type = %s", 2027 wmi_handle->wmi_endpoint_id, 2028 htc_get_tx_queue_depth(wmi_handle->htc_handle, 2029 wmi_handle->wmi_endpoint_id), 2030 wmi_handle->soc->soc_idx, 2031 (wmi_handle->target_type == 2032 WMI_TLV_TARGET ? "WMI_TLV_TARGET" : 2033 "WMI_NON_TLV_TARGET")); 2034 } 2035 2036 #ifdef SYSTEM_PM_CHECK 2037 /** 2038 * wmi_set_system_pm_pkt_tag() - API to set tag for system pm packets 2039 * @htc_tag: HTC tag 2040 * @buf: wmi cmd buffer 2041 * @cmd_id: cmd id 2042 * 2043 * Return: None 2044 */ 2045 static void wmi_set_system_pm_pkt_tag(uint16_t *htc_tag, wmi_buf_t buf, 2046 uint32_t cmd_id) 2047 { 2048 switch (cmd_id) { 2049 case WMI_WOW_ENABLE_CMDID: 2050 case WMI_PDEV_SUSPEND_CMDID: 2051 *htc_tag = HTC_TX_PACKET_SYSTEM_SUSPEND; 2052 break; 2053 case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID: 2054 case WMI_PDEV_RESUME_CMDID: 2055 *htc_tag = HTC_TX_PACKET_SYSTEM_RESUME; 2056 break; 2057 case WMI_D0_WOW_ENABLE_DISABLE_CMDID: 2058 if (wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id)) 2059 *htc_tag = HTC_TX_PACKET_SYSTEM_RESUME; 2060 else 2061 *htc_tag = HTC_TX_PACKET_SYSTEM_SUSPEND; 2062 break; 2063 default: 2064 break; 2065 } 2066 } 2067 #else 2068 static inline void wmi_set_system_pm_pkt_tag(uint16_t *htc_tag, wmi_buf_t buf, 2069 uint32_t cmd_id) 2070 { 2071 } 2072 #endif 2073 2074 QDF_STATUS wmi_unified_cmd_send_fl(wmi_unified_t wmi_handle, wmi_buf_t buf, 2075 uint32_t len, uint32_t cmd_id, 2076 const char *func, uint32_t line) 2077 { 2078 HTC_PACKET *pkt; 2079 uint16_t htc_tag = 0; 2080 bool rtpm_inprogress; 2081 2082 rtpm_inprogress = wmi_get_runtime_pm_inprogress(wmi_handle); 2083 if (rtpm_inprogress) { 2084 htc_tag = wmi_handle->ops->wmi_set_htc_tx_tag(wmi_handle, buf, 2085 cmd_id); 2086 } else if (qdf_atomic_read(&wmi_handle->is_target_suspended) && 2087 !wmi_is_pm_resume_cmd(cmd_id) && 2088 !wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id)) { 2089 wmi_nofl_err("Target is suspended (via %s:%u)", 2090 func, line); 2091 qdf_trigger_self_recovery(wmi_handle->soc->wmi_psoc, 2092 QDF_WMI_CMD_SENT_DURING_SUSPEND); 2093 return QDF_STATUS_E_BUSY; 2094 } 2095 2096 if (wmi_handle->wmi_stopinprogress) { 2097 wmi_nofl_err("%s:%d, WMI stop in progress, wmi_handle:%pK", 2098 func, line, wmi_handle); 2099 return QDF_STATUS_E_INVAL; 2100 } 2101 2102 if (wmi_has_wow_enable_ack_failed(wmi_handle)) { 2103 wmi_nofl_err("wow enable ack already failed(via %s:%u)", 2104 func, line); 2105 return QDF_STATUS_E_INVAL; 2106 } 2107 2108 #ifndef WMI_NON_TLV_SUPPORT 2109 /* Do sanity check on the TLV parameter structure */ 2110 if (wmi_handle->target_type == WMI_TLV_TARGET) { 2111 void *buf_ptr = (void *)qdf_nbuf_data(buf); 2112 2113 if (wmi_handle->ops->wmi_check_command_params(NULL, buf_ptr, len, cmd_id) 2114 != 0) { 2115 wmi_nofl_err("%s:%d, Invalid WMI Param Buffer for Cmd:%d", 2116 func, line, cmd_id); 2117 return QDF_STATUS_E_INVAL; 2118 } 2119 } 2120 #endif 2121 2122 if (qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) { 2123 wmi_nofl_err("%s:%d, Failed to send cmd %x, no memory", 2124 func, line, cmd_id); 2125 return QDF_STATUS_E_NOMEM; 2126 } 2127 2128 qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR)); 2129 WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id); 2130 2131 qdf_atomic_inc(&wmi_handle->pending_cmds); 2132 if (qdf_atomic_read(&wmi_handle->pending_cmds) >= 2133 wmi_handle->wmi_max_cmds) { 2134 wmi_dump_last_cmd_rec_info(wmi_handle); 2135 wmi_nofl_err("hostcredits = %d", 2136 wmi_get_host_credits(wmi_handle)); 2137 htc_dump_counter_info(wmi_handle->htc_handle); 2138 qdf_atomic_dec(&wmi_handle->pending_cmds); 2139 wmi_nofl_err("%s:%d, MAX %d WMI Pending cmds reached", 2140 func, line, wmi_handle->wmi_max_cmds); 2141 wmi_unified_debug_dump(wmi_handle); 2142 htc_ce_tasklet_debug_dump(wmi_handle->htc_handle); 2143 qdf_trigger_self_recovery(wmi_handle->soc->wmi_psoc, 2144 QDF_WMI_EXCEED_MAX_PENDING_CMDS); 2145 return QDF_STATUS_E_BUSY; 2146 } 2147 2148 pkt = qdf_mem_malloc_fl(sizeof(*pkt), func, line); 2149 if (!pkt) { 2150 qdf_atomic_dec(&wmi_handle->pending_cmds); 2151 return QDF_STATUS_E_NOMEM; 2152 } 2153 2154 if (!rtpm_inprogress) 2155 wmi_set_system_pm_pkt_tag(&htc_tag, buf, cmd_id); 2156 2157 SET_HTC_PACKET_INFO_TX(pkt, 2158 NULL, 2159 qdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR), 2160 wmi_handle->wmi_endpoint_id, htc_tag); 2161 2162 SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf); 2163 wmi_log_cmd_id(cmd_id, htc_tag); 2164 wmi_ext_dbg_msg_cmd_record(wmi_handle, 2165 qdf_nbuf_data(buf), qdf_nbuf_len(buf)); 2166 #ifdef WMI_INTERFACE_EVENT_LOGGING 2167 if (wmi_handle->log_info.wmi_logging_enable) { 2168 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 2169 /* 2170 * Record 16 bytes of WMI cmd data - 2171 * exclude TLV and WMI headers 2172 * 2173 * WMI mgmt command already recorded in wmi_mgmt_cmd_record 2174 */ 2175 if (wmi_handle->ops->is_management_record(cmd_id) == false) { 2176 uint8_t *tmpbuf = (uint8_t *)qdf_nbuf_data(buf) + 2177 wmi_handle->soc->buf_offset_command; 2178 2179 WMI_COMMAND_RECORD(wmi_handle, cmd_id, tmpbuf); 2180 wmi_specific_cmd_record(wmi_handle, cmd_id, tmpbuf); 2181 } 2182 2183 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 2184 } 2185 #endif 2186 return wmi_htc_send_pkt(wmi_handle, pkt, func, line); 2187 } 2188 qdf_export_symbol(wmi_unified_cmd_send_fl); 2189 2190 /** 2191 * wmi_unified_get_event_handler_ix() - gives event handler's index 2192 * @wmi_handle: handle to wmi 2193 * @event_id: wmi event id 2194 * 2195 * Return: event handler's index 2196 */ 2197 static int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle, 2198 uint32_t event_id) 2199 { 2200 uint32_t idx = 0; 2201 int32_t invalid_idx = -1; 2202 struct wmi_soc *soc = wmi_handle->soc; 2203 2204 for (idx = 0; (idx < soc->max_event_idx && 2205 idx < WMI_UNIFIED_MAX_EVENT); ++idx) { 2206 if (wmi_handle->event_id[idx] == event_id && 2207 wmi_handle->event_handler[idx]) { 2208 return idx; 2209 } 2210 } 2211 2212 return invalid_idx; 2213 } 2214 2215 /** 2216 * wmi_register_event_handler_with_ctx() - register event handler with 2217 * exec ctx and buffer type 2218 * @wmi_handle: handle to wmi 2219 * @event_id: wmi event id 2220 * @handler_func: wmi event handler function 2221 * @rx_ctx: rx execution context for wmi rx events 2222 * @rx_buf_type: rx execution context for wmi rx events 2223 * 2224 * Return: QDF_STATUS_SUCCESS on successful register event else failure. 2225 */ 2226 static QDF_STATUS 2227 wmi_register_event_handler_with_ctx(wmi_unified_t wmi_handle, 2228 uint32_t event_id, 2229 wmi_unified_event_handler handler_func, 2230 enum wmi_rx_exec_ctx rx_ctx, 2231 enum wmi_rx_buff_type rx_buf_type) 2232 { 2233 uint32_t idx = 0; 2234 uint32_t evt_id; 2235 struct wmi_soc *soc; 2236 2237 if (!wmi_handle) { 2238 wmi_err("WMI handle is NULL"); 2239 return QDF_STATUS_E_FAILURE; 2240 } 2241 2242 soc = wmi_handle->soc; 2243 2244 if (event_id >= wmi_events_max) { 2245 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, 2246 "%s: Event id %d is unavailable", 2247 __func__, event_id); 2248 return QDF_STATUS_E_FAILURE; 2249 } 2250 2251 if (wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { 2252 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, 2253 "%s: Event id %d is not supported", 2254 __func__, event_id); 2255 return QDF_STATUS_E_NOSUPPORT; 2256 } 2257 evt_id = wmi_handle->wmi_events[event_id]; 2258 2259 if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) { 2260 wmi_info("event handler already registered 0x%x", evt_id); 2261 return QDF_STATUS_E_FAILURE; 2262 } 2263 if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) { 2264 wmi_err("no more event handlers 0x%x", 2265 evt_id); 2266 return QDF_STATUS_E_FAILURE; 2267 } 2268 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, 2269 "Registered event handler for event 0x%8x", evt_id); 2270 idx = soc->max_event_idx; 2271 wmi_handle->event_handler[idx] = handler_func; 2272 wmi_handle->event_id[idx] = evt_id; 2273 2274 qdf_spin_lock_bh(&soc->ctx_lock); 2275 wmi_handle->ctx[idx].exec_ctx = rx_ctx; 2276 wmi_handle->ctx[idx].buff_type = rx_buf_type; 2277 qdf_spin_unlock_bh(&soc->ctx_lock); 2278 soc->max_event_idx++; 2279 2280 return QDF_STATUS_SUCCESS; 2281 } 2282 2283 QDF_STATUS 2284 wmi_unified_register_event(wmi_unified_t wmi_handle, 2285 uint32_t event_id, 2286 wmi_unified_event_handler handler_func) 2287 { 2288 return wmi_register_event_handler_with_ctx(wmi_handle, event_id, 2289 handler_func, 2290 WMI_RX_UMAC_CTX, 2291 WMI_RX_PROCESSED_BUFF); 2292 } 2293 2294 QDF_STATUS 2295 wmi_unified_register_event_handler(wmi_unified_t wmi_handle, 2296 wmi_conv_event_id event_id, 2297 wmi_unified_event_handler handler_func, 2298 uint8_t rx_ctx) 2299 { 2300 return wmi_register_event_handler_with_ctx(wmi_handle, event_id, 2301 handler_func, rx_ctx, 2302 WMI_RX_PROCESSED_BUFF); 2303 } 2304 2305 qdf_export_symbol(wmi_unified_register_event_handler); 2306 2307 QDF_STATUS 2308 wmi_unified_register_raw_event_handler(wmi_unified_t wmi_handle, 2309 wmi_conv_event_id event_id, 2310 wmi_unified_event_handler handler_func, 2311 enum wmi_rx_exec_ctx rx_ctx) 2312 { 2313 return wmi_register_event_handler_with_ctx(wmi_handle, event_id, 2314 handler_func, rx_ctx, 2315 WMI_RX_RAW_BUFF); 2316 } 2317 2318 qdf_export_symbol(wmi_unified_register_raw_event_handler); 2319 2320 QDF_STATUS wmi_unified_unregister_event(wmi_unified_t wmi_handle, 2321 uint32_t event_id) 2322 { 2323 uint32_t idx = 0; 2324 uint32_t evt_id; 2325 struct wmi_soc *soc; 2326 2327 if (!wmi_handle) { 2328 wmi_err("WMI handle is NULL"); 2329 return QDF_STATUS_E_FAILURE; 2330 } 2331 2332 soc = wmi_handle->soc; 2333 if (event_id >= wmi_events_max || 2334 wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { 2335 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, 2336 "%s: Event id %d is unavailable", 2337 __func__, event_id); 2338 return QDF_STATUS_E_FAILURE; 2339 } 2340 evt_id = wmi_handle->wmi_events[event_id]; 2341 2342 idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id); 2343 if (idx == -1) { 2344 wmi_warn("event handler is not registered: evt id 0x%x", 2345 evt_id); 2346 return QDF_STATUS_E_FAILURE; 2347 } 2348 wmi_handle->event_handler[idx] = NULL; 2349 wmi_handle->event_id[idx] = 0; 2350 --soc->max_event_idx; 2351 wmi_handle->event_handler[idx] = 2352 wmi_handle->event_handler[soc->max_event_idx]; 2353 wmi_handle->event_id[idx] = 2354 wmi_handle->event_id[soc->max_event_idx]; 2355 2356 qdf_spin_lock_bh(&soc->ctx_lock); 2357 2358 wmi_handle->ctx[idx].exec_ctx = 2359 wmi_handle->ctx[soc->max_event_idx].exec_ctx; 2360 wmi_handle->ctx[idx].buff_type = 2361 wmi_handle->ctx[soc->max_event_idx].buff_type; 2362 2363 qdf_spin_unlock_bh(&soc->ctx_lock); 2364 2365 return QDF_STATUS_SUCCESS; 2366 } 2367 2368 QDF_STATUS wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle, 2369 wmi_conv_event_id event_id) 2370 { 2371 uint32_t idx = 0; 2372 uint32_t evt_id; 2373 struct wmi_soc *soc; 2374 2375 if (!wmi_handle) { 2376 wmi_err("WMI handle is NULL"); 2377 return QDF_STATUS_E_FAILURE; 2378 } 2379 2380 soc = wmi_handle->soc; 2381 2382 if (event_id >= wmi_events_max) { 2383 wmi_err("Event id %d is unavailable", event_id); 2384 return QDF_STATUS_E_FAILURE; 2385 } 2386 2387 if (wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { 2388 wmi_debug("Event id %d is not supported", event_id); 2389 return QDF_STATUS_E_NOSUPPORT; 2390 } 2391 2392 evt_id = wmi_handle->wmi_events[event_id]; 2393 2394 idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id); 2395 if (idx == -1) { 2396 wmi_err("event handler is not registered: evt id 0x%x", 2397 evt_id); 2398 return QDF_STATUS_E_FAILURE; 2399 } 2400 wmi_handle->event_handler[idx] = NULL; 2401 wmi_handle->event_id[idx] = 0; 2402 --soc->max_event_idx; 2403 wmi_handle->event_handler[idx] = 2404 wmi_handle->event_handler[soc->max_event_idx]; 2405 wmi_handle->event_id[idx] = 2406 wmi_handle->event_id[soc->max_event_idx]; 2407 2408 qdf_spin_lock_bh(&soc->ctx_lock); 2409 2410 wmi_handle->ctx[idx].exec_ctx = 2411 wmi_handle->ctx[soc->max_event_idx].exec_ctx; 2412 wmi_handle->ctx[idx].buff_type = 2413 wmi_handle->ctx[soc->max_event_idx].buff_type; 2414 2415 qdf_spin_unlock_bh(&soc->ctx_lock); 2416 2417 return QDF_STATUS_SUCCESS; 2418 } 2419 qdf_export_symbol(wmi_unified_unregister_event_handler); 2420 2421 static void 2422 wmi_process_rx_diag_event_worker_thread_ctx(struct wmi_unified *wmi_handle, 2423 void *evt_buf) 2424 { 2425 uint32_t num_diag_events_pending; 2426 2427 qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock); 2428 if (RX_DIAG_WQ_MAX_SIZE > 0) { 2429 num_diag_events_pending = qdf_nbuf_queue_len( 2430 &wmi_handle->diag_event_queue); 2431 2432 if (num_diag_events_pending >= RX_DIAG_WQ_MAX_SIZE) { 2433 qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock); 2434 wmi_handle->wmi_rx_diag_events_dropped++; 2435 wmi_debug_rl("Rx diag events dropped count: %d", 2436 wmi_handle->wmi_rx_diag_events_dropped); 2437 qdf_nbuf_free(evt_buf); 2438 return; 2439 } 2440 } 2441 2442 qdf_nbuf_queue_add(&wmi_handle->diag_event_queue, evt_buf); 2443 qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock); 2444 qdf_queue_work(0, wmi_handle->wmi_rx_diag_work_queue, 2445 &wmi_handle->rx_diag_event_work); 2446 } 2447 2448 void wmi_process_fw_event_worker_thread_ctx(struct wmi_unified *wmi_handle, 2449 void *evt_buf) 2450 { 2451 2452 qdf_spin_lock_bh(&wmi_handle->eventq_lock); 2453 qdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf); 2454 qdf_spin_unlock_bh(&wmi_handle->eventq_lock); 2455 qdf_queue_work(0, wmi_handle->wmi_rx_work_queue, 2456 &wmi_handle->rx_event_work); 2457 2458 return; 2459 } 2460 2461 qdf_export_symbol(wmi_process_fw_event_worker_thread_ctx); 2462 2463 uint32_t wmi_critical_events_in_flight(struct wmi_unified *wmi) 2464 { 2465 return qdf_atomic_read(&wmi->critical_events_in_flight); 2466 } 2467 2468 static bool 2469 wmi_is_event_critical(struct wmi_unified *wmi_handle, uint32_t event_id) 2470 { 2471 if (wmi_handle->wmi_events[wmi_roam_synch_event_id] == event_id) 2472 return true; 2473 2474 return false; 2475 } 2476 2477 static QDF_STATUS wmi_discard_fw_event(struct scheduler_msg *msg) 2478 { 2479 struct wmi_process_fw_event_params *event_param; 2480 2481 if (!msg->bodyptr) 2482 return QDF_STATUS_E_INVAL; 2483 2484 event_param = (struct wmi_process_fw_event_params *)msg->bodyptr; 2485 qdf_nbuf_free(event_param->evt_buf); 2486 qdf_mem_free(msg->bodyptr); 2487 msg->bodyptr = NULL; 2488 msg->bodyval = 0; 2489 msg->type = 0; 2490 2491 return QDF_STATUS_SUCCESS; 2492 } 2493 2494 static QDF_STATUS wmi_process_fw_event_handler(struct scheduler_msg *msg) 2495 { 2496 struct wmi_process_fw_event_params *params = 2497 (struct wmi_process_fw_event_params *)msg->bodyptr; 2498 struct wmi_unified *wmi_handle; 2499 uint32_t event_id; 2500 2501 wmi_handle = (struct wmi_unified *)params->wmi_handle; 2502 event_id = WMI_GET_FIELD(qdf_nbuf_data(params->evt_buf), 2503 WMI_CMD_HDR, COMMANDID); 2504 wmi_process_fw_event(wmi_handle, params->evt_buf); 2505 2506 if (wmi_is_event_critical(wmi_handle, event_id)) 2507 qdf_atomic_dec(&wmi_handle->critical_events_in_flight); 2508 2509 qdf_mem_free(msg->bodyptr); 2510 2511 return QDF_STATUS_SUCCESS; 2512 } 2513 2514 /** 2515 * wmi_process_fw_event_sched_thread_ctx() - common event handler to serialize 2516 * event processing through scheduler thread 2517 * @wmi: wmi context 2518 * @ev: event buffer 2519 * 2520 * Return: 0 on success, errno on failure 2521 */ 2522 static QDF_STATUS 2523 wmi_process_fw_event_sched_thread_ctx(struct wmi_unified *wmi, 2524 void *ev) 2525 { 2526 struct wmi_process_fw_event_params *params_buf; 2527 struct scheduler_msg msg = { 0 }; 2528 uint32_t event_id; 2529 2530 params_buf = qdf_mem_malloc(sizeof(struct wmi_process_fw_event_params)); 2531 if (!params_buf) { 2532 wmi_err("malloc failed"); 2533 qdf_nbuf_free(ev); 2534 return QDF_STATUS_E_NOMEM; 2535 } 2536 2537 params_buf->wmi_handle = wmi; 2538 params_buf->evt_buf = ev; 2539 2540 event_id = WMI_GET_FIELD(qdf_nbuf_data(params_buf->evt_buf), 2541 WMI_CMD_HDR, COMMANDID); 2542 if (wmi_is_event_critical(wmi, event_id)) 2543 qdf_atomic_inc(&wmi->critical_events_in_flight); 2544 2545 msg.bodyptr = params_buf; 2546 msg.bodyval = 0; 2547 msg.callback = wmi_process_fw_event_handler; 2548 msg.flush_callback = wmi_discard_fw_event; 2549 2550 if (QDF_STATUS_SUCCESS != 2551 scheduler_post_message(QDF_MODULE_ID_TARGET_IF, 2552 QDF_MODULE_ID_TARGET_IF, 2553 QDF_MODULE_ID_TARGET_IF, &msg)) { 2554 qdf_nbuf_free(ev); 2555 qdf_mem_free(params_buf); 2556 return QDF_STATUS_E_FAULT; 2557 } 2558 2559 return QDF_STATUS_SUCCESS; 2560 } 2561 2562 /** 2563 * wmi_get_pdev_ep: Get wmi handle based on endpoint 2564 * @soc: handle to wmi soc 2565 * @ep: endpoint id 2566 * 2567 * Return: none 2568 */ 2569 static struct wmi_unified *wmi_get_pdev_ep(struct wmi_soc *soc, 2570 HTC_ENDPOINT_ID ep) 2571 { 2572 uint32_t i; 2573 2574 for (i = 0; i < WMI_MAX_RADIOS; i++) 2575 if (soc->wmi_endpoint_id[i] == ep) 2576 break; 2577 2578 if (i == WMI_MAX_RADIOS) 2579 return NULL; 2580 2581 return soc->wmi_pdev[i]; 2582 } 2583 2584 /** 2585 * wmi_mtrace_rx() - Wrappper function for qdf_mtrace api 2586 * @message_id: 32-Bit Wmi message ID 2587 * @vdev_id: Vdev ID 2588 * @data: Actual message contents 2589 * 2590 * This function converts the 32-bit WMI message ID in 15-bit message ID 2591 * format for qdf_mtrace as in qdf_mtrace message there are only 15 2592 * bits reserved for message ID. 2593 * out of these 15-bits, 8-bits (From LSB) specifies the WMI_GRP_ID 2594 * and remaining 7-bits specifies the actual WMI command. With this 2595 * notation there can be maximum 256 groups and each group can have 2596 * max 128 commands can be supported. 2597 * 2598 * Return: None 2599 */ 2600 static void wmi_mtrace_rx(uint32_t message_id, uint16_t vdev_id, uint32_t data) 2601 { 2602 uint16_t mtrace_message_id; 2603 2604 mtrace_message_id = QDF_WMI_MTRACE_CMD_ID(message_id) | 2605 (QDF_WMI_MTRACE_GRP_ID(message_id) << 2606 QDF_WMI_MTRACE_CMD_NUM_BITS); 2607 qdf_mtrace(QDF_MODULE_ID_WMI, QDF_MODULE_ID_WMA, 2608 mtrace_message_id, vdev_id, data); 2609 } 2610 2611 /** 2612 * wmi_process_control_rx() - process fw events callbacks 2613 * @wmi_handle: handle to wmi_unified 2614 * @evt_buf: handle to wmi_buf_t 2615 * 2616 * Return: none 2617 */ 2618 static void wmi_process_control_rx(struct wmi_unified *wmi_handle, 2619 wmi_buf_t evt_buf) 2620 { 2621 struct wmi_soc *soc = wmi_handle->soc; 2622 uint32_t id; 2623 uint32_t idx; 2624 enum wmi_rx_exec_ctx exec_ctx; 2625 2626 id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); 2627 idx = wmi_unified_get_event_handler_ix(wmi_handle, id); 2628 if (qdf_unlikely(idx == A_ERROR)) { 2629 wmi_debug("no handler registered for event id 0x%x", id); 2630 qdf_nbuf_free(evt_buf); 2631 return; 2632 } 2633 wmi_mtrace_rx(id, 0xFF, idx); 2634 qdf_spin_lock_bh(&soc->ctx_lock); 2635 exec_ctx = wmi_handle->ctx[idx].exec_ctx; 2636 qdf_spin_unlock_bh(&soc->ctx_lock); 2637 2638 #ifdef WMI_INTERFACE_EVENT_LOGGING 2639 if (wmi_handle->log_info.wmi_logging_enable) { 2640 uint8_t *data; 2641 data = qdf_nbuf_data(evt_buf); 2642 2643 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 2644 /* Exclude 4 bytes of TLV header */ 2645 if (wmi_handle->ops->is_diag_event(id)) { 2646 WMI_DIAG_RX_EVENT_RECORD(wmi_handle, id, 2647 ((uint8_t *) data + 2648 wmi_handle->soc->buf_offset_event)); 2649 } else if (wmi_handle->ops->is_management_record(id)) { 2650 WMI_MGMT_RX_EVENT_RECORD(wmi_handle, id, 2651 ((uint8_t *) data + 2652 wmi_handle->soc->buf_offset_event)); 2653 } else { 2654 WMI_RX_EVENT_RECORD(wmi_handle, id, ((uint8_t *) data + 2655 wmi_handle->soc->buf_offset_event)); 2656 } 2657 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 2658 } 2659 #endif 2660 2661 if (exec_ctx == WMI_RX_WORK_CTX) { 2662 wmi_process_fw_event_worker_thread_ctx(wmi_handle, evt_buf); 2663 } else if (exec_ctx == WMI_RX_TASKLET_CTX) { 2664 wmi_process_fw_event(wmi_handle, evt_buf); 2665 } else if (exec_ctx == WMI_RX_SERIALIZER_CTX) { 2666 wmi_process_fw_event_sched_thread_ctx(wmi_handle, evt_buf); 2667 } else if (exec_ctx == WMI_RX_DIAG_WORK_CTX) { 2668 wmi_process_rx_diag_event_worker_thread_ctx(wmi_handle, 2669 evt_buf); 2670 } else { 2671 wmi_err("Invalid event context %d", exec_ctx); 2672 qdf_nbuf_free(evt_buf); 2673 } 2674 2675 } 2676 2677 /** 2678 * wmi_control_rx() - process fw events callbacks 2679 * @ctx: handle to wmi 2680 * @htc_packet: pointer to htc packet 2681 * 2682 * Return: none 2683 */ 2684 static void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet) 2685 { 2686 struct wmi_soc *soc = (struct wmi_soc *)ctx; 2687 struct wmi_unified *wmi_handle; 2688 wmi_buf_t evt_buf; 2689 2690 evt_buf = (wmi_buf_t)htc_packet->pPktContext; 2691 2692 wmi_handle = wmi_get_pdev_ep(soc, htc_packet->Endpoint); 2693 if (!wmi_handle) { 2694 wmi_err("unable to get wmi_handle to Endpoint %d", 2695 htc_packet->Endpoint); 2696 qdf_nbuf_free(evt_buf); 2697 return; 2698 } 2699 2700 wmi_process_control_rx(wmi_handle, evt_buf); 2701 } 2702 2703 #if defined(WLAN_FEATURE_WMI_DIAG_OVER_CE7) || \ 2704 defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE) 2705 /** 2706 * wmi_control_diag_rx() - process diag fw events callbacks 2707 * @ctx: handle to wmi 2708 * @htc_packet: pointer to htc packet 2709 * 2710 * Return: none 2711 */ 2712 static void wmi_control_diag_rx(void *ctx, HTC_PACKET *htc_packet) 2713 { 2714 struct wmi_soc *soc = (struct wmi_soc *)ctx; 2715 struct wmi_unified *wmi_handle; 2716 wmi_buf_t evt_buf; 2717 2718 evt_buf = (wmi_buf_t)htc_packet->pPktContext; 2719 2720 wmi_handle = soc->wmi_pdev[0]; 2721 2722 if (!wmi_handle) { 2723 wmi_err("unable to get wmi_handle for diag event end point id:%d", htc_packet->Endpoint); 2724 qdf_nbuf_free(evt_buf); 2725 return; 2726 } 2727 2728 wmi_process_control_rx(wmi_handle, evt_buf); 2729 } 2730 #endif 2731 2732 #if defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE) 2733 /** 2734 * wmi_control_dbr_rx() - process dbr fw events callbacks 2735 * @ctx: handle to wmi 2736 * @htc_packet: pointer to htc packet 2737 * 2738 * Return: none 2739 */ 2740 static void wmi_control_dbr_rx(void *ctx, HTC_PACKET *htc_packet) 2741 { 2742 struct wmi_soc *soc = (struct wmi_soc *)ctx; 2743 struct wmi_unified *wmi_handle; 2744 wmi_buf_t evt_buf; 2745 2746 evt_buf = (wmi_buf_t)htc_packet->pPktContext; 2747 wmi_handle = soc->wmi_pdev[0]; 2748 2749 if (!wmi_handle) { 2750 wmi_err("unable to get wmi_handle for dbr event endpoint id:%d", 2751 htc_packet->Endpoint); 2752 qdf_nbuf_free(evt_buf); 2753 return; 2754 } 2755 2756 wmi_process_control_rx(wmi_handle, evt_buf); 2757 } 2758 #endif 2759 2760 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI 2761 QDF_STATUS wmi_unified_cmd_send_over_qmi(struct wmi_unified *wmi_handle, 2762 wmi_buf_t buf, uint32_t buflen, 2763 uint32_t cmd_id) 2764 { 2765 QDF_STATUS status; 2766 int32_t ret; 2767 2768 if (!qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR))) { 2769 wmi_err("Failed to send cmd %x, no memory", cmd_id); 2770 return QDF_STATUS_E_NOMEM; 2771 } 2772 2773 qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR)); 2774 WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id); 2775 wmi_debug("Sending WMI_CMD_ID: 0x%x over qmi", cmd_id); 2776 status = qdf_wmi_send_recv_qmi(qdf_nbuf_data(buf), 2777 buflen + sizeof(WMI_CMD_HDR), 2778 wmi_handle, 2779 wmi_process_qmi_fw_event); 2780 if (QDF_IS_STATUS_ERROR(status)) { 2781 qdf_nbuf_pull_head(buf, sizeof(WMI_CMD_HDR)); 2782 wmi_warn("WMI send on QMI failed. Retrying WMI on HTC"); 2783 } else { 2784 ret = qdf_atomic_inc_return(&wmi_handle->num_stats_over_qmi); 2785 wmi_debug("num stats over qmi: %d", ret); 2786 wmi_buf_free(buf); 2787 } 2788 2789 return status; 2790 } 2791 2792 static int __wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len) 2793 { 2794 struct wmi_unified *wmi_handle = wmi_cb_ctx; 2795 wmi_buf_t evt_buf; 2796 uint32_t evt_id; 2797 2798 if (!wmi_handle || !buf || !len) { 2799 wmi_err_rl("%s is invalid", !wmi_handle ? 2800 "wmi_buf" : !buf ? "buf" : "length"); 2801 return -EINVAL; 2802 } 2803 2804 evt_buf = wmi_buf_alloc(wmi_handle, len); 2805 if (!evt_buf) 2806 return -ENOMEM; 2807 2808 qdf_mem_copy(qdf_nbuf_data(evt_buf), buf, len); 2809 evt_id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); 2810 wmi_debug("Received WMI_EVT_ID: 0x%x over qmi", evt_id); 2811 wmi_process_control_rx(wmi_handle, evt_buf); 2812 2813 return 0; 2814 } 2815 2816 int wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len) 2817 { 2818 struct qdf_op_sync *op_sync; 2819 int ret; 2820 2821 if (qdf_op_protect(&op_sync)) 2822 return -EINVAL; 2823 ret = __wmi_process_qmi_fw_event(wmi_cb_ctx, buf, len); 2824 qdf_op_unprotect(op_sync); 2825 2826 return ret; 2827 } 2828 #endif 2829 2830 void wmi_process_fw_event(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf) 2831 { 2832 __wmi_control_rx(wmi_handle, evt_buf); 2833 } 2834 2835 void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf) 2836 { 2837 uint32_t id; 2838 uint8_t *data; 2839 uint32_t len; 2840 void *wmi_cmd_struct_ptr = NULL; 2841 #ifndef WMI_NON_TLV_SUPPORT 2842 int tlv_ok_status = 0; 2843 #endif 2844 uint32_t idx = 0; 2845 struct wmi_raw_event_buffer ev_buf; 2846 enum wmi_rx_buff_type ev_buff_type; 2847 2848 id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); 2849 2850 wmi_ext_dbg_msg_event_record(wmi_handle, qdf_nbuf_data(evt_buf), 2851 qdf_nbuf_len(evt_buf)); 2852 2853 if (qdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL) 2854 goto end; 2855 2856 data = qdf_nbuf_data(evt_buf); 2857 len = qdf_nbuf_len(evt_buf); 2858 2859 #ifndef WMI_NON_TLV_SUPPORT 2860 if (wmi_handle->target_type == WMI_TLV_TARGET) { 2861 /* Validate and pad(if necessary) the TLVs */ 2862 tlv_ok_status = 2863 wmi_handle->ops->wmi_check_and_pad_event(wmi_handle->scn_handle, 2864 data, len, id, 2865 &wmi_cmd_struct_ptr); 2866 if (tlv_ok_status != 0) { 2867 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 2868 "%s: Error: id=0x%x, wmitlv check status=%d", 2869 __func__, id, tlv_ok_status); 2870 goto end; 2871 } 2872 } 2873 #endif 2874 2875 idx = wmi_unified_get_event_handler_ix(wmi_handle, id); 2876 if (idx == A_ERROR) { 2877 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 2878 "%s : event handler is not registered: event id 0x%x", 2879 __func__, id); 2880 goto end; 2881 } 2882 #ifdef WMI_INTERFACE_EVENT_LOGGING 2883 if (wmi_handle->log_info.wmi_logging_enable) { 2884 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 2885 /* Exclude 4 bytes of TLV header */ 2886 if (wmi_handle->ops->is_diag_event(id)) { 2887 /* 2888 * skip diag event logging in WMI event buffer 2889 * as its already logged in WMI RX event buffer 2890 */ 2891 } else if (wmi_handle->ops->is_management_record(id)) { 2892 /* 2893 * skip wmi mgmt event logging in WMI event buffer 2894 * as its already logged in WMI RX event buffer 2895 */ 2896 } else { 2897 uint8_t *tmpbuf = (uint8_t *)data + 2898 wmi_handle->soc->buf_offset_event; 2899 2900 WMI_EVENT_RECORD(wmi_handle, id, tmpbuf); 2901 wmi_specific_evt_record(wmi_handle, id, tmpbuf); 2902 } 2903 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 2904 } 2905 #endif 2906 /* Call the WMI registered event handler */ 2907 if (wmi_handle->target_type == WMI_TLV_TARGET) { 2908 ev_buff_type = wmi_handle->ctx[idx].buff_type; 2909 if (ev_buff_type == WMI_RX_PROCESSED_BUFF) { 2910 wmi_handle->event_handler[idx] (wmi_handle->scn_handle, 2911 wmi_cmd_struct_ptr, len); 2912 } else if (ev_buff_type == WMI_RX_RAW_BUFF) { 2913 ev_buf.evt_raw_buf = data; 2914 ev_buf.evt_processed_buf = wmi_cmd_struct_ptr; 2915 wmi_handle->event_handler[idx] (wmi_handle->scn_handle, 2916 (void *)&ev_buf, len); 2917 } 2918 } 2919 else 2920 wmi_handle->event_handler[idx] (wmi_handle->scn_handle, 2921 data, len); 2922 2923 end: 2924 /* Free event buffer and allocated event tlv */ 2925 #ifndef WMI_NON_TLV_SUPPORT 2926 if (wmi_handle->target_type == WMI_TLV_TARGET) 2927 wmi_handle->ops->wmi_free_allocated_event(id, &wmi_cmd_struct_ptr); 2928 #endif 2929 2930 qdf_nbuf_free(evt_buf); 2931 2932 } 2933 2934 #define WMI_WQ_WD_TIMEOUT (30 * 1000) /* 30s */ 2935 2936 static inline void wmi_workqueue_watchdog_warn(uint32_t msg_type_id) 2937 { 2938 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 2939 "%s: WLAN_BUG_RCA: Message type %x has exceeded its allotted time of %ds", 2940 __func__, msg_type_id, WMI_WQ_WD_TIMEOUT / 1000); 2941 } 2942 2943 #ifdef CONFIG_SLUB_DEBUG_ON 2944 static void wmi_workqueue_watchdog_bite(void *arg) 2945 { 2946 struct wmi_wq_dbg_info *info = arg; 2947 2948 wmi_workqueue_watchdog_warn(info->wd_msg_type_id); 2949 qdf_print_thread_trace(info->task); 2950 2951 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 2952 "%s: Going down for WMI WQ Watchdog Bite!", __func__); 2953 QDF_BUG(0); 2954 } 2955 #else 2956 static inline void wmi_workqueue_watchdog_bite(void *arg) 2957 { 2958 struct wmi_wq_dbg_info *info = arg; 2959 2960 wmi_workqueue_watchdog_warn(info->wd_msg_type_id); 2961 2962 qdf_print_thread_trace(info->task); 2963 } 2964 #endif 2965 2966 /** 2967 * wmi_rx_event_work() - process rx event in rx work queue context 2968 * @arg: opaque pointer to wmi handle 2969 * 2970 * This function process any fw event to serialize it through rx worker thread. 2971 * 2972 * Return: none 2973 */ 2974 static void wmi_rx_event_work(void *arg) 2975 { 2976 wmi_buf_t buf; 2977 struct wmi_unified *wmi = arg; 2978 qdf_timer_t wd_timer; 2979 struct wmi_wq_dbg_info info; 2980 2981 /* initialize WMI workqueue watchdog timer */ 2982 qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite, 2983 &info, QDF_TIMER_TYPE_SW); 2984 qdf_spin_lock_bh(&wmi->eventq_lock); 2985 buf = qdf_nbuf_queue_remove(&wmi->event_queue); 2986 qdf_spin_unlock_bh(&wmi->eventq_lock); 2987 while (buf) { 2988 qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT); 2989 info.wd_msg_type_id = 2990 WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID); 2991 info.wmi_wq = wmi->wmi_rx_work_queue; 2992 info.task = qdf_get_current_task(); 2993 __wmi_control_rx(wmi, buf); 2994 qdf_timer_stop(&wd_timer); 2995 qdf_spin_lock_bh(&wmi->eventq_lock); 2996 buf = qdf_nbuf_queue_remove(&wmi->event_queue); 2997 qdf_spin_unlock_bh(&wmi->eventq_lock); 2998 } 2999 qdf_timer_free(&wd_timer); 3000 } 3001 3002 /** 3003 * wmi_rx_diag_event_work() - process rx diag event in work queue context 3004 * @arg: opaque pointer to wmi handle 3005 * 3006 * This function process fw diag event to serialize it through rx worker thread. 3007 * 3008 * Return: none 3009 */ 3010 static void wmi_rx_diag_event_work(void *arg) 3011 { 3012 wmi_buf_t buf; 3013 struct wmi_unified *wmi = arg; 3014 qdf_timer_t wd_timer; 3015 struct wmi_wq_dbg_info info; 3016 uint32_t diag_event_process_count = 0; 3017 3018 if (!wmi) { 3019 wmi_err("Invalid WMI handle"); 3020 return; 3021 } 3022 3023 /* initialize WMI workqueue watchdog timer */ 3024 qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite, 3025 &info, QDF_TIMER_TYPE_SW); 3026 qdf_spin_lock_bh(&wmi->diag_eventq_lock); 3027 buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue); 3028 qdf_spin_unlock_bh(&wmi->diag_eventq_lock); 3029 while (buf) { 3030 qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT); 3031 info.wd_msg_type_id = 3032 WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID); 3033 info.wmi_wq = NULL; 3034 info.task = qdf_get_current_task(); 3035 __wmi_control_rx(wmi, buf); 3036 qdf_timer_stop(&wd_timer); 3037 3038 if (diag_event_process_count++ > 3039 RX_DIAG_EVENT_WORK_PROCESS_MAX_COUNT) { 3040 qdf_queue_work(0, wmi->wmi_rx_diag_work_queue, 3041 &wmi->rx_diag_event_work); 3042 break; 3043 } 3044 3045 qdf_spin_lock_bh(&wmi->diag_eventq_lock); 3046 buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue); 3047 qdf_spin_unlock_bh(&wmi->diag_eventq_lock); 3048 } 3049 qdf_timer_free(&wd_timer); 3050 } 3051 3052 #ifdef FEATURE_RUNTIME_PM 3053 /** 3054 * wmi_runtime_pm_init() - initialize runtime pm wmi variables 3055 * @wmi_handle: wmi context 3056 */ 3057 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle) 3058 { 3059 qdf_atomic_init(&wmi_handle->runtime_pm_inprogress); 3060 } 3061 3062 void wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, A_BOOL val) 3063 { 3064 qdf_atomic_set(&wmi_handle->runtime_pm_inprogress, val); 3065 } 3066 3067 bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle) 3068 { 3069 return qdf_atomic_read(&wmi_handle->runtime_pm_inprogress); 3070 } 3071 #else 3072 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle) 3073 { 3074 } 3075 #endif 3076 3077 void wmi_set_wow_enable_ack_failed(wmi_unified_t wmi_handle) 3078 { 3079 qdf_atomic_set(&wmi_handle->is_wow_enable_ack_failed, 1); 3080 } 3081 3082 void wmi_clear_wow_enable_ack_failed(wmi_unified_t wmi_handle) 3083 { 3084 qdf_atomic_set(&wmi_handle->is_wow_enable_ack_failed, 0); 3085 } 3086 3087 bool wmi_has_wow_enable_ack_failed(wmi_unified_t wmi_handle) 3088 { 3089 return qdf_atomic_read(&wmi_handle->is_wow_enable_ack_failed); 3090 } 3091 3092 void *wmi_unified_get_soc_handle(struct wmi_unified *wmi_handle) 3093 { 3094 return wmi_handle->soc; 3095 } 3096 3097 /** 3098 * wmi_interface_logging_init: Interface looging init 3099 * @wmi_handle: Pointer to wmi handle object 3100 * @pdev_idx: pdev index 3101 * 3102 * Return: None 3103 */ 3104 #ifdef WMI_INTERFACE_EVENT_LOGGING 3105 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle, 3106 uint32_t pdev_idx) 3107 { 3108 if (QDF_STATUS_SUCCESS == wmi_log_init(wmi_handle)) { 3109 qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); 3110 wmi_debugfs_init(wmi_handle, pdev_idx); 3111 } 3112 } 3113 #else 3114 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle, 3115 uint32_t pdev_idx) 3116 { 3117 } 3118 #endif 3119 3120 static QDF_STATUS wmi_initialize_worker_context(struct wmi_unified *wmi_handle) 3121 { 3122 wmi_handle->wmi_rx_work_queue = 3123 qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue"); 3124 if (!wmi_handle->wmi_rx_work_queue) { 3125 wmi_err("failed to create wmi_rx_event_work_queue"); 3126 return QDF_STATUS_E_RESOURCES; 3127 } 3128 3129 qdf_spinlock_create(&wmi_handle->eventq_lock); 3130 qdf_nbuf_queue_init(&wmi_handle->event_queue); 3131 qdf_create_work(0, &wmi_handle->rx_event_work, 3132 wmi_rx_event_work, wmi_handle); 3133 3134 wmi_handle->wmi_rx_diag_work_queue = 3135 qdf_alloc_unbound_workqueue("wmi_rx_diag_event_work_queue"); 3136 if (!wmi_handle->wmi_rx_diag_work_queue) { 3137 wmi_err("failed to create wmi_rx_diag_event_work_queue"); 3138 return QDF_STATUS_E_RESOURCES; 3139 } 3140 qdf_spinlock_create(&wmi_handle->diag_eventq_lock); 3141 qdf_nbuf_queue_init(&wmi_handle->diag_event_queue); 3142 qdf_create_work(0, &wmi_handle->rx_diag_event_work, 3143 wmi_rx_diag_event_work, wmi_handle); 3144 wmi_handle->wmi_rx_diag_events_dropped = 0; 3145 3146 return QDF_STATUS_SUCCESS; 3147 } 3148 3149 void *wmi_unified_get_pdev_handle(struct wmi_soc *soc, uint32_t pdev_idx) 3150 { 3151 struct wmi_unified *wmi_handle; 3152 QDF_STATUS status; 3153 3154 if (pdev_idx >= WMI_MAX_RADIOS) 3155 return NULL; 3156 3157 if (!soc->wmi_pdev[pdev_idx]) { 3158 wmi_handle = 3159 (struct wmi_unified *) qdf_mem_malloc( 3160 sizeof(struct wmi_unified)); 3161 if (!wmi_handle) 3162 return NULL; 3163 3164 status = wmi_initialize_worker_context(wmi_handle); 3165 if (QDF_IS_STATUS_ERROR(status)) 3166 goto error; 3167 3168 wmi_handle->scn_handle = soc->scn_handle; 3169 wmi_handle->event_id = soc->event_id; 3170 wmi_handle->event_handler = soc->event_handler; 3171 wmi_handle->ctx = soc->ctx; 3172 wmi_handle->ops = soc->ops; 3173 wmi_handle->wmi_events = soc->wmi_events; 3174 wmi_handle->services = soc->services; 3175 wmi_handle->soc = soc; 3176 wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map; 3177 wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map; 3178 wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map; 3179 wmi_handle->evt_phy_id_map = soc->evt_phy_id_map; 3180 wmi_interface_logging_init(wmi_handle, pdev_idx); 3181 qdf_atomic_init(&wmi_handle->pending_cmds); 3182 qdf_atomic_init(&wmi_handle->is_target_suspended); 3183 qdf_atomic_init(&wmi_handle->is_wow_enable_ack_failed); 3184 wmi_handle->target_type = soc->target_type; 3185 wmi_handle->wmi_max_cmds = soc->wmi_max_cmds; 3186 3187 wmi_interface_sequence_init(wmi_handle); 3188 if (wmi_ext_dbgfs_init(wmi_handle, pdev_idx) != 3189 QDF_STATUS_SUCCESS) 3190 wmi_err("Failed to initialize wmi extended debugfs"); 3191 3192 soc->wmi_pdev[pdev_idx] = wmi_handle; 3193 } else 3194 wmi_handle = soc->wmi_pdev[pdev_idx]; 3195 3196 wmi_handle->wmi_stopinprogress = 0; 3197 wmi_handle->wmi_endpoint_id = soc->wmi_endpoint_id[pdev_idx]; 3198 wmi_handle->htc_handle = soc->htc_handle; 3199 wmi_handle->max_msg_len = soc->max_msg_len[pdev_idx]; 3200 wmi_handle->tag_crash_inject = false; 3201 wmi_interface_sequence_reset(wmi_handle); 3202 3203 return wmi_handle; 3204 3205 error: 3206 qdf_mem_free(wmi_handle); 3207 3208 return NULL; 3209 } 3210 qdf_export_symbol(wmi_unified_get_pdev_handle); 3211 3212 static void (*wmi_attach_register[WMI_MAX_TARGET_TYPE])(wmi_unified_t); 3213 3214 void wmi_unified_register_module(enum wmi_target_type target_type, 3215 void (*wmi_attach)(wmi_unified_t wmi_handle)) 3216 { 3217 if (target_type < WMI_MAX_TARGET_TYPE) 3218 wmi_attach_register[target_type] = wmi_attach; 3219 3220 return; 3221 } 3222 qdf_export_symbol(wmi_unified_register_module); 3223 3224 /** 3225 * wmi_wbuff_register() - register wmi with wbuff 3226 * @wmi_handle: handle to wmi 3227 * 3228 * Return: void 3229 */ 3230 static void wmi_wbuff_register(struct wmi_unified *wmi_handle) 3231 { 3232 struct wbuff_alloc_request wbuff_alloc[4]; 3233 3234 wbuff_alloc[0].slot = WBUFF_POOL_0; 3235 wbuff_alloc[0].size = WMI_WBUFF_POOL_0_SIZE; 3236 wbuff_alloc[1].slot = WBUFF_POOL_1; 3237 wbuff_alloc[1].size = WMI_WBUFF_POOL_1_SIZE; 3238 wbuff_alloc[2].slot = WBUFF_POOL_2; 3239 wbuff_alloc[2].size = WMI_WBUFF_POOL_2_SIZE; 3240 wbuff_alloc[3].slot = WBUFF_POOL_3; 3241 wbuff_alloc[3].size = WMI_WBUFF_POOL_3_SIZE; 3242 3243 wmi_handle->wbuff_handle = wbuff_module_register(wbuff_alloc, 4, 3244 WMI_MIN_HEAD_ROOM, 4); 3245 } 3246 3247 /** 3248 * wmi_wbuff_deregister() - deregister wmi with wbuff 3249 * @wmi_handle: handle to wmi 3250 * 3251 * Return: void 3252 */ 3253 static inline void wmi_wbuff_deregister(struct wmi_unified *wmi_handle) 3254 { 3255 wbuff_module_deregister(wmi_handle->wbuff_handle); 3256 wmi_handle->wbuff_handle = NULL; 3257 } 3258 3259 void *wmi_unified_attach(void *scn_handle, 3260 struct wmi_unified_attach_params *param) 3261 { 3262 struct wmi_unified *wmi_handle; 3263 struct wmi_soc *soc; 3264 QDF_STATUS status; 3265 3266 soc = (struct wmi_soc *) qdf_mem_malloc(sizeof(struct wmi_soc)); 3267 if (!soc) 3268 return NULL; 3269 3270 wmi_handle = 3271 (struct wmi_unified *) qdf_mem_malloc( 3272 sizeof(struct wmi_unified)); 3273 if (!wmi_handle) { 3274 qdf_mem_free(soc); 3275 return NULL; 3276 } 3277 3278 status = wmi_initialize_worker_context(wmi_handle); 3279 if (QDF_IS_STATUS_ERROR(status)) 3280 goto error; 3281 3282 wmi_handle->soc = soc; 3283 wmi_handle->soc->soc_idx = param->soc_id; 3284 wmi_handle->soc->is_async_ep = param->is_async_ep; 3285 wmi_handle->event_id = soc->event_id; 3286 wmi_handle->event_handler = soc->event_handler; 3287 wmi_handle->ctx = soc->ctx; 3288 wmi_handle->wmi_events = soc->wmi_events; 3289 wmi_handle->services = soc->services; 3290 wmi_handle->scn_handle = scn_handle; 3291 wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map; 3292 wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map; 3293 wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map; 3294 wmi_handle->evt_phy_id_map = soc->evt_phy_id_map; 3295 soc->scn_handle = scn_handle; 3296 wmi_handle->target_type = param->target_type; 3297 soc->target_type = param->target_type; 3298 3299 if (param->target_type >= WMI_MAX_TARGET_TYPE) 3300 goto error; 3301 3302 if (wmi_attach_register[param->target_type]) { 3303 wmi_attach_register[param->target_type](wmi_handle); 3304 } else { 3305 wmi_err("wmi attach is not registered"); 3306 goto error; 3307 } 3308 3309 qdf_atomic_init(&wmi_handle->pending_cmds); 3310 qdf_atomic_init(&wmi_handle->is_target_suspended); 3311 qdf_atomic_init(&wmi_handle->is_target_suspend_acked); 3312 qdf_atomic_init(&wmi_handle->num_stats_over_qmi); 3313 qdf_atomic_init(&wmi_handle->is_wow_enable_ack_failed); 3314 wmi_runtime_pm_init(wmi_handle); 3315 wmi_interface_logging_init(wmi_handle, WMI_HOST_PDEV_ID_0); 3316 3317 wmi_interface_sequence_init(wmi_handle); 3318 /* Assign target cookie capability */ 3319 wmi_handle->use_cookie = param->use_cookie; 3320 wmi_handle->osdev = param->osdev; 3321 wmi_handle->wmi_stopinprogress = 0; 3322 wmi_handle->wmi_max_cmds = param->max_commands; 3323 soc->wmi_max_cmds = param->max_commands; 3324 /* Increase the ref count once refcount infra is present */ 3325 soc->wmi_psoc = param->psoc; 3326 qdf_spinlock_create(&soc->ctx_lock); 3327 soc->ops = wmi_handle->ops; 3328 soc->wmi_pdev[0] = wmi_handle; 3329 if (wmi_ext_dbgfs_init(wmi_handle, 0) != QDF_STATUS_SUCCESS) 3330 wmi_err("Failed to initialize wmi extended debugfs"); 3331 3332 wmi_wbuff_register(wmi_handle); 3333 3334 wmi_hang_event_notifier_register(wmi_handle); 3335 3336 wmi_minidump_attach(wmi_handle); 3337 3338 return wmi_handle; 3339 3340 error: 3341 qdf_mem_free(soc); 3342 qdf_mem_free(wmi_handle); 3343 3344 return NULL; 3345 } 3346 3347 void wmi_unified_detach(struct wmi_unified *wmi_handle) 3348 { 3349 wmi_buf_t buf; 3350 struct wmi_soc *soc; 3351 uint8_t i; 3352 3353 wmi_minidump_detach(wmi_handle); 3354 3355 wmi_hang_event_notifier_unregister(); 3356 3357 wmi_wbuff_deregister(wmi_handle); 3358 3359 soc = wmi_handle->soc; 3360 for (i = 0; i < WMI_MAX_RADIOS; i++) { 3361 if (soc->wmi_pdev[i]) { 3362 qdf_flush_workqueue(0, 3363 soc->wmi_pdev[i]->wmi_rx_work_queue); 3364 qdf_destroy_workqueue(0, 3365 soc->wmi_pdev[i]->wmi_rx_work_queue); 3366 wmi_debugfs_remove(soc->wmi_pdev[i]); 3367 buf = qdf_nbuf_queue_remove( 3368 &soc->wmi_pdev[i]->event_queue); 3369 while (buf) { 3370 qdf_nbuf_free(buf); 3371 buf = qdf_nbuf_queue_remove( 3372 &soc->wmi_pdev[i]->event_queue); 3373 } 3374 3375 qdf_flush_work(&soc->wmi_pdev[i]->rx_diag_event_work); 3376 buf = qdf_nbuf_queue_remove( 3377 &soc->wmi_pdev[i]->diag_event_queue); 3378 while (buf) { 3379 qdf_nbuf_free(buf); 3380 buf = qdf_nbuf_queue_remove( 3381 &soc->wmi_pdev[i]->diag_event_queue); 3382 } 3383 3384 wmi_log_buffer_free(soc->wmi_pdev[i]); 3385 3386 /* Free events logs list */ 3387 if (soc->wmi_pdev[i]->events_logs_list) 3388 qdf_mem_free( 3389 soc->wmi_pdev[i]->events_logs_list); 3390 3391 qdf_spinlock_destroy(&soc->wmi_pdev[i]->eventq_lock); 3392 qdf_spinlock_destroy( 3393 &soc->wmi_pdev[i]->diag_eventq_lock); 3394 3395 wmi_interface_sequence_deinit(soc->wmi_pdev[i]); 3396 wmi_ext_dbgfs_deinit(soc->wmi_pdev[i]); 3397 wmi_clear_wow_enable_ack_failed(soc->wmi_pdev[i]); 3398 3399 qdf_mem_free(soc->wmi_pdev[i]); 3400 } 3401 } 3402 qdf_spinlock_destroy(&soc->ctx_lock); 3403 3404 if (soc->wmi_service_bitmap) { 3405 qdf_mem_free(soc->wmi_service_bitmap); 3406 soc->wmi_service_bitmap = NULL; 3407 } 3408 3409 if (soc->wmi_ext_service_bitmap) { 3410 qdf_mem_free(soc->wmi_ext_service_bitmap); 3411 soc->wmi_ext_service_bitmap = NULL; 3412 } 3413 3414 if (soc->wmi_ext2_service_bitmap) { 3415 qdf_mem_free(soc->wmi_ext2_service_bitmap); 3416 soc->wmi_ext2_service_bitmap = NULL; 3417 } 3418 3419 /* Decrease the ref count once refcount infra is present */ 3420 soc->wmi_psoc = NULL; 3421 qdf_mem_free(soc); 3422 } 3423 3424 void 3425 wmi_unified_remove_work(struct wmi_unified *wmi_handle) 3426 { 3427 wmi_buf_t buf; 3428 3429 qdf_flush_workqueue(0, wmi_handle->wmi_rx_work_queue); 3430 qdf_spin_lock_bh(&wmi_handle->eventq_lock); 3431 buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue); 3432 while (buf) { 3433 qdf_nbuf_free(buf); 3434 buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue); 3435 } 3436 qdf_spin_unlock_bh(&wmi_handle->eventq_lock); 3437 3438 /* Remove diag events work */ 3439 qdf_flush_workqueue(0, wmi_handle->wmi_rx_diag_work_queue); 3440 qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock); 3441 buf = qdf_nbuf_queue_remove(&wmi_handle->diag_event_queue); 3442 while (buf) { 3443 qdf_nbuf_free(buf); 3444 buf = qdf_nbuf_queue_remove(&wmi_handle->diag_event_queue); 3445 } 3446 qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock); 3447 } 3448 3449 /** 3450 * wmi_htc_tx_complete() - Process htc tx completion 3451 * 3452 * @ctx: handle to wmi 3453 * @htc_pkt: pointer to htc packet 3454 * 3455 * Return: none. 3456 */ 3457 static void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt) 3458 { 3459 struct wmi_soc *soc = (struct wmi_soc *) ctx; 3460 wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt); 3461 u_int8_t *buf_ptr; 3462 u_int32_t len; 3463 struct wmi_unified *wmi_handle; 3464 #ifdef WMI_INTERFACE_EVENT_LOGGING 3465 struct wmi_debug_log_info *log_info; 3466 uint32_t cmd_id; 3467 uint8_t *offset_ptr; 3468 qdf_dma_addr_t dma_addr; 3469 uint64_t phy_addr; 3470 #endif 3471 3472 ASSERT(wmi_cmd_buf); 3473 wmi_handle = wmi_get_pdev_ep(soc, htc_pkt->Endpoint); 3474 if (!wmi_handle) { 3475 wmi_err("Unable to get wmi handle"); 3476 QDF_ASSERT(0); 3477 return; 3478 } 3479 buf_ptr = (u_int8_t *)wmi_buf_data(wmi_cmd_buf); 3480 #ifdef WMI_INTERFACE_EVENT_LOGGING 3481 log_info = &wmi_handle->log_info; 3482 3483 if (wmi_handle && log_info->wmi_logging_enable) { 3484 cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), 3485 WMI_CMD_HDR, COMMANDID); 3486 3487 dma_addr = QDF_NBUF_CB_PADDR(wmi_cmd_buf); 3488 phy_addr = qdf_mem_virt_to_phys(qdf_nbuf_data(wmi_cmd_buf)); 3489 3490 qdf_spin_lock_bh(&log_info->wmi_record_lock); 3491 /* Record 16 bytes of WMI cmd tx complete data 3492 * - exclude TLV and WMI headers 3493 */ 3494 offset_ptr = buf_ptr + wmi_handle->soc->buf_offset_command; 3495 if (wmi_handle->ops->is_management_record(cmd_id)) { 3496 WMI_MGMT_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id, 3497 offset_ptr); 3498 } else { 3499 WMI_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id, 3500 offset_ptr, dma_addr, 3501 phy_addr); 3502 } 3503 3504 qdf_spin_unlock_bh(&log_info->wmi_record_lock); 3505 } 3506 #endif 3507 3508 wmi_interface_sequence_check(wmi_handle, wmi_cmd_buf); 3509 3510 len = qdf_nbuf_len(wmi_cmd_buf); 3511 qdf_mem_zero(buf_ptr, len); 3512 wmi_buf_free(wmi_cmd_buf); 3513 qdf_mem_free(htc_pkt); 3514 qdf_atomic_dec(&wmi_handle->pending_cmds); 3515 } 3516 3517 #ifdef FEATURE_RUNTIME_PM 3518 /** 3519 * wmi_htc_log_pkt() - Print information of WMI command from HTC packet 3520 * 3521 * @ctx: handle of WMI context 3522 * @htc_pkt: handle of HTC packet 3523 * 3524 * Return: none 3525 */ 3526 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt) 3527 { 3528 wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt); 3529 uint32_t cmd_id; 3530 3531 ASSERT(wmi_cmd_buf); 3532 cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), WMI_CMD_HDR, 3533 COMMANDID); 3534 3535 wmi_debug("WMI command from HTC packet: %s, ID: %d", 3536 wmi_id_to_name(cmd_id), cmd_id); 3537 } 3538 #else 3539 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt) 3540 { 3541 } 3542 #endif 3543 3544 /** 3545 * wmi_connect_pdev_htc_service() - WMI API to get connect to HTC service 3546 * @soc: handle to WMI SoC 3547 * @pdev_idx: Pdev index 3548 * 3549 * Return: QDF_STATUS 3550 */ 3551 static QDF_STATUS wmi_connect_pdev_htc_service(struct wmi_soc *soc, 3552 uint32_t pdev_idx) 3553 { 3554 QDF_STATUS status; 3555 struct htc_service_connect_resp response; 3556 struct htc_service_connect_req connect; 3557 3558 OS_MEMZERO(&connect, sizeof(connect)); 3559 OS_MEMZERO(&response, sizeof(response)); 3560 3561 /* meta data is unused for now */ 3562 connect.pMetaData = NULL; 3563 connect.MetaDataLength = 0; 3564 /* these fields are the same for all service endpoints */ 3565 connect.EpCallbacks.pContext = soc; 3566 connect.EpCallbacks.EpTxCompleteMultiple = 3567 NULL /* Control path completion ar6000_tx_complete */; 3568 connect.EpCallbacks.EpRecv = wmi_control_rx /* Control path rx */; 3569 connect.EpCallbacks.EpRecvRefill = NULL /* ar6000_rx_refill */; 3570 connect.EpCallbacks.EpSendFull = NULL /* ar6000_tx_queue_full */; 3571 connect.EpCallbacks.EpTxComplete = 3572 wmi_htc_tx_complete /* ar6000_tx_queue_full */; 3573 connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt; 3574 3575 /* connect to control service */ 3576 connect.service_id = soc->svc_ids[pdev_idx]; 3577 status = htc_connect_service(soc->htc_handle, &connect, &response); 3578 3579 if (QDF_IS_STATUS_ERROR(status)) { 3580 wmi_err("Failed to connect to WMI CONTROL service status:%d", 3581 status); 3582 return status; 3583 } 3584 3585 if (soc->is_async_ep) 3586 htc_set_async_ep(soc->htc_handle, response.Endpoint, true); 3587 3588 soc->wmi_endpoint_id[pdev_idx] = response.Endpoint; 3589 soc->max_msg_len[pdev_idx] = response.MaxMsgLength; 3590 3591 return QDF_STATUS_SUCCESS; 3592 } 3593 3594 QDF_STATUS 3595 wmi_unified_connect_htc_service(struct wmi_unified *wmi_handle, 3596 HTC_HANDLE htc_handle) 3597 { 3598 uint32_t i; 3599 uint8_t wmi_ep_count; 3600 3601 wmi_handle->soc->htc_handle = htc_handle; 3602 3603 wmi_ep_count = htc_get_wmi_endpoint_count(htc_handle); 3604 if (wmi_ep_count > WMI_MAX_RADIOS) 3605 return QDF_STATUS_E_FAULT; 3606 3607 for (i = 0; i < wmi_ep_count; i++) 3608 wmi_connect_pdev_htc_service(wmi_handle->soc, i); 3609 3610 wmi_handle->htc_handle = htc_handle; 3611 wmi_handle->wmi_endpoint_id = wmi_handle->soc->wmi_endpoint_id[0]; 3612 wmi_handle->max_msg_len = wmi_handle->soc->max_msg_len[0]; 3613 3614 return QDF_STATUS_SUCCESS; 3615 } 3616 3617 #if defined(WLAN_FEATURE_WMI_DIAG_OVER_CE7) || \ 3618 defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE) 3619 QDF_STATUS wmi_diag_connect_pdev_htc_service(struct wmi_unified *wmi_handle, 3620 HTC_HANDLE htc_handle) 3621 { 3622 QDF_STATUS status; 3623 struct htc_service_connect_resp response = {0}; 3624 struct htc_service_connect_req connect = {0}; 3625 3626 /* meta data is unused for now */ 3627 connect.pMetaData = NULL; 3628 connect.MetaDataLength = 0; 3629 connect.EpCallbacks.pContext = wmi_handle->soc; 3630 connect.EpCallbacks.EpTxCompleteMultiple = NULL; 3631 connect.EpCallbacks.EpRecv = wmi_control_diag_rx /* wmi diag rx */; 3632 connect.EpCallbacks.EpRecvRefill = NULL; 3633 connect.EpCallbacks.EpSendFull = NULL; 3634 connect.EpCallbacks.EpTxComplete = NULL; 3635 connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt; 3636 3637 /* connect to wmi diag service */ 3638 connect.service_id = WMI_CONTROL_DIAG_SVC; 3639 status = htc_connect_service(htc_handle, &connect, &response); 3640 3641 if (QDF_IS_STATUS_ERROR(status)) { 3642 wmi_err("Failed to connect to WMI DIAG service status:%d", 3643 status); 3644 return status; 3645 } 3646 3647 if (wmi_handle->soc->is_async_ep) 3648 htc_set_async_ep(htc_handle, response.Endpoint, true); 3649 3650 wmi_handle->soc->wmi_diag_endpoint_id = response.Endpoint; 3651 3652 return QDF_STATUS_SUCCESS; 3653 } 3654 #endif 3655 3656 #if defined(WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE) 3657 QDF_STATUS wmi_dbr_connect_pdev_htc_service(struct wmi_unified *wmi_handle, 3658 HTC_HANDLE htc_handle) 3659 { 3660 QDF_STATUS status; 3661 struct htc_service_connect_resp response = {0}; 3662 struct htc_service_connect_req connect = {0}; 3663 3664 /* meta data is unused for now */ 3665 connect.pMetaData = NULL; 3666 connect.MetaDataLength = 0; 3667 connect.EpCallbacks.pContext = wmi_handle->soc; 3668 connect.EpCallbacks.EpTxCompleteMultiple = NULL; 3669 connect.EpCallbacks.EpRecv = wmi_control_dbr_rx /* wmi dbr rx */; 3670 connect.EpCallbacks.EpRecvRefill = NULL; 3671 connect.EpCallbacks.EpSendFull = NULL; 3672 connect.EpCallbacks.EpTxComplete = NULL; 3673 connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt; 3674 3675 /* connect to wmi dbr service */ 3676 connect.service_id = WMI_CONTROL_DBR_SVC; 3677 status = htc_connect_service(htc_handle, &connect, &response); 3678 3679 if (QDF_IS_STATUS_ERROR(status)) { 3680 wmi_err("Failed to connect to WMI DBR service status:%d", 3681 status); 3682 return status; 3683 } 3684 3685 if (wmi_handle->soc->is_async_ep) 3686 htc_set_async_ep(htc_handle, response.Endpoint, true); 3687 3688 wmi_handle->soc->wmi_dbr_endpoint_id = response.Endpoint; 3689 3690 return QDF_STATUS_SUCCESS; 3691 } 3692 #endif 3693 3694 int wmi_get_host_credits(wmi_unified_t wmi_handle) 3695 { 3696 int host_credits = 0; 3697 3698 htc_get_control_endpoint_tx_host_credits(wmi_handle->htc_handle, 3699 &host_credits); 3700 return host_credits; 3701 } 3702 3703 int wmi_get_pending_cmds(wmi_unified_t wmi_handle) 3704 { 3705 return qdf_atomic_read(&wmi_handle->pending_cmds); 3706 } 3707 3708 void wmi_set_target_suspend(wmi_unified_t wmi_handle, A_BOOL val) 3709 { 3710 qdf_atomic_set(&wmi_handle->is_target_suspended, val); 3711 } 3712 3713 void wmi_set_target_suspend_acked(wmi_unified_t wmi_handle, A_BOOL val) 3714 { 3715 qdf_atomic_set(&wmi_handle->is_target_suspend_acked, val); 3716 qdf_atomic_set(&wmi_handle->num_stats_over_qmi, 0); 3717 } 3718 3719 bool wmi_is_target_suspended(struct wmi_unified *wmi_handle) 3720 { 3721 return qdf_atomic_read(&wmi_handle->is_target_suspended); 3722 } 3723 qdf_export_symbol(wmi_is_target_suspended); 3724 3725 bool wmi_is_target_suspend_acked(struct wmi_unified *wmi_handle) 3726 { 3727 return qdf_atomic_read(&wmi_handle->is_target_suspend_acked); 3728 } 3729 qdf_export_symbol(wmi_is_target_suspend_acked); 3730 3731 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI 3732 void wmi_set_qmi_stats(wmi_unified_t wmi_handle, bool val) 3733 { 3734 wmi_handle->is_qmi_stats_enabled = val; 3735 } 3736 3737 bool wmi_is_qmi_stats_enabled(struct wmi_unified *wmi_handle) 3738 { 3739 return wmi_handle->is_qmi_stats_enabled; 3740 } 3741 #endif 3742 3743 void wmi_tag_crash_inject(wmi_unified_t wmi_handle, A_BOOL flag) 3744 { 3745 wmi_handle->tag_crash_inject = flag; 3746 } 3747 3748 void wmi_set_is_wow_bus_suspended(wmi_unified_t wmi_handle, A_BOOL val) 3749 { 3750 qdf_atomic_set(&wmi_handle->is_wow_bus_suspended, val); 3751 } 3752 3753 void wmi_set_tgt_assert(wmi_unified_t wmi_handle, bool val) 3754 { 3755 wmi_handle->tgt_force_assert_enable = val; 3756 } 3757 3758 int 3759 wmi_stop(wmi_unified_t wmi_handle) 3760 { 3761 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, 3762 "WMI Stop"); 3763 wmi_handle->wmi_stopinprogress = 1; 3764 return 0; 3765 } 3766 3767 int 3768 wmi_start(wmi_unified_t wmi_handle) 3769 { 3770 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, 3771 "WMI Start"); 3772 wmi_handle->wmi_stopinprogress = 0; 3773 return 0; 3774 } 3775 3776 bool 3777 wmi_is_blocked(wmi_unified_t wmi_handle) 3778 { 3779 return (!(!wmi_handle->wmi_stopinprogress)); 3780 } 3781 3782 void 3783 wmi_flush_endpoint(wmi_unified_t wmi_handle) 3784 { 3785 htc_flush_endpoint(wmi_handle->htc_handle, 3786 wmi_handle->wmi_endpoint_id, 0); 3787 } 3788 qdf_export_symbol(wmi_flush_endpoint); 3789 3790 HTC_ENDPOINT_ID wmi_get_endpoint(wmi_unified_t wmi_handle) 3791 { 3792 return wmi_handle->wmi_endpoint_id; 3793 } 3794 3795 void wmi_pdev_id_conversion_enable(wmi_unified_t wmi_handle, 3796 uint32_t *pdev_id_map, 3797 uint8_t size) 3798 { 3799 if (wmi_handle->target_type == WMI_TLV_TARGET) 3800 wmi_handle->ops->wmi_pdev_id_conversion_enable(wmi_handle, 3801 pdev_id_map, 3802 size); 3803 } 3804 3805 int __wmi_validate_handle(wmi_unified_t wmi_handle, const char *func) 3806 { 3807 if (!wmi_handle) { 3808 wmi_err("Invalid WMI handle (via %s)", func); 3809 return -EINVAL; 3810 } 3811 3812 return 0; 3813 } 3814