1 /* 2 * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * Host WMI unified implementation 21 */ 22 #include "htc_api.h" 23 #include "htc_api.h" 24 #include "wmi_unified_priv.h" 25 #include "wmi_unified_api.h" 26 #include "qdf_module.h" 27 #include "qdf_platform.h" 28 #ifdef WMI_EXT_DBG 29 #include "qdf_list.h" 30 #endif 31 32 #ifndef WMI_NON_TLV_SUPPORT 33 #include "wmi_tlv_helper.h" 34 #endif 35 36 #include <linux/debugfs.h> 37 #include <target_if.h> 38 #ifdef WMI_EXT_DBG 39 #include "qdf_atomic.h" 40 41 /** 42 * wmi_ext_dbg_msg_enqueue() - enqueue wmi message 43 * 44 * @wmi_handle: wmi handler 45 * 46 * Return: size of wmi message queue after enqueue 47 */ 48 static uint32_t wmi_ext_dbg_msg_enqueue(struct wmi_unified *wmi_handle, 49 struct wmi_ext_dbg_msg *msg) 50 { 51 uint32_t list_size; 52 53 qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 54 qdf_list_insert_back_size(&wmi_handle->wmi_ext_dbg_msg_queue, 55 &msg->node, &list_size); 56 qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 57 58 return list_size; 59 } 60 61 /** 62 * wmi_ext_dbg_msg_dequeue() - dequeue wmi message 63 * 64 * @wmi_handle: wmi handler 65 * 66 * Return: wmi msg on success else NULL 67 */ 68 static struct wmi_ext_dbg_msg *wmi_ext_dbg_msg_dequeue(struct wmi_unified 69 *wmi_handle) 70 { 71 qdf_list_node_t *list_node = NULL; 72 73 qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 74 qdf_list_remove_front(&wmi_handle->wmi_ext_dbg_msg_queue, &list_node); 75 qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 76 77 if (!list_node) 78 return NULL; 79 80 return qdf_container_of(list_node, struct wmi_ext_dbg_msg, node); 81 } 82 83 /** 84 * wmi_ext_dbg_msg_record() - record wmi messages 85 * 86 * @wmi_handle: wmi handler 87 * @buf: wmi message buffer 88 * @len: wmi message length 89 * @type: wmi message type 90 * 91 * Return: QDF_STATUS_SUCCESS on successful recording else failure. 92 */ 93 static QDF_STATUS wmi_ext_dbg_msg_record(struct wmi_unified *wmi_handle, 94 uint8_t *buf, uint32_t len, 95 enum WMI_MSG_TYPE type) 96 { 97 struct wmi_ext_dbg_msg *msg; 98 uint32_t list_size; 99 100 msg = wmi_ext_dbg_msg_get(len); 101 if (!msg) 102 return QDF_STATUS_E_NOMEM; 103 104 msg->len = len; 105 msg->type = type; 106 qdf_mem_copy(msg->buf, buf, len); 107 msg->ts = qdf_get_log_timestamp(); 108 list_size = wmi_ext_dbg_msg_enqueue(wmi_handle, msg); 109 110 if (list_size >= wmi_handle->wmi_ext_dbg_msg_queue_size) { 111 msg = wmi_ext_dbg_msg_dequeue(wmi_handle); 112 wmi_ext_dbg_msg_put(msg); 113 } 114 115 return QDF_STATUS_SUCCESS; 116 } 117 118 /** 119 * wmi_ext_dbg_msg_cmd_record() - record wmi command messages 120 * 121 * @wmi_handle: wmi handler 122 * @buf: wmi command buffer 123 * @len: wmi command message length 124 * 125 * Return: QDF_STATUS_SUCCESS on successful recording else failure. 126 */ 127 static QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified *wmi_handle, 128 uint8_t *buf, uint32_t len) 129 { 130 return wmi_ext_dbg_msg_record(wmi_handle, buf, len, 131 WMI_MSG_TYPE_CMD); 132 } 133 134 /** 135 * wmi_ext_dbg_msg_event_record() - record wmi event messages 136 * 137 * @wmi_handle: wmi handler 138 * @buf: wmi event buffer 139 * @len: wmi event message length 140 * 141 * Return: QDF_STATUS_SUCCESS on successful recording else failure. 142 */ 143 static QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified *wmi_handle, 144 uint8_t *buf, uint32_t len) 145 { 146 uint32_t id; 147 148 id = WMI_GET_FIELD(buf, WMI_CMD_HDR, COMMANDID); 149 if (id != wmi_handle->wmi_events[wmi_diag_event_id]) 150 return wmi_ext_dbg_msg_record(wmi_handle, buf, len, 151 WMI_MSG_TYPE_EVENT); 152 153 return QDF_STATUS_SUCCESS; 154 } 155 156 /** 157 * wmi_ext_dbg_msg_queue_init() - create debugfs queue and associated lock 158 * 159 * @wmi_handle: wmi handler 160 * 161 * Return: none 162 */ 163 static void wmi_ext_dbg_msg_queue_init(struct wmi_unified *wmi_handle) 164 { 165 qdf_list_create(&wmi_handle->wmi_ext_dbg_msg_queue, 166 wmi_handle->wmi_ext_dbg_msg_queue_size); 167 qdf_spinlock_create(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 168 } 169 170 /** 171 * wmi_ext_dbg_msg_queue_deinit() - destroy debugfs queue and associated lock 172 * 173 * @wmi_handle: wmi handler 174 * 175 * Return: none 176 */ 177 static void wmi_ext_dbg_msg_queue_deinit(struct wmi_unified *wmi_handle) 178 { 179 qdf_list_destroy(&wmi_handle->wmi_ext_dbg_msg_queue); 180 qdf_spinlock_destroy(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 181 } 182 183 /** 184 * wmi_ext_dbg_msg_show() - debugfs function to display whole content of 185 * wmi command/event messages including headers. 186 * 187 * @file: qdf debugfs file handler 188 * @arg: pointer to wmi handler 189 * 190 * Return: QDF_STATUS_SUCCESS if all the messages are shown successfully, 191 * else QDF_STATUS_E_AGAIN if more data to show. 192 */ 193 static QDF_STATUS wmi_ext_dbg_msg_show(qdf_debugfs_file_t file, void *arg) 194 { 195 struct wmi_unified *wmi_handle = (struct wmi_unified *)arg; 196 struct wmi_ext_dbg_msg *msg; 197 uint64_t secs, usecs; 198 199 msg = wmi_ext_dbg_msg_dequeue(wmi_handle); 200 if (!msg) 201 return QDF_STATUS_SUCCESS; 202 203 qdf_debugfs_printf(file, "%s: 0x%x\n", 204 msg->type == WMI_MSG_TYPE_CMD ? "COMMAND" : 205 "EVENT", WMI_GET_FIELD(msg->buf, WMI_CMD_HDR, 206 COMMANDID)); 207 qdf_log_timestamp_to_secs(msg->ts, &secs, &usecs); 208 qdf_debugfs_printf(file, "Time: %llu.%llu\n", secs, usecs); 209 qdf_debugfs_printf(file, "Length:%d\n", msg->len); 210 qdf_debugfs_hexdump(file, msg->buf, msg->len, 211 WMI_EXT_DBG_DUMP_ROW_SIZE, 212 WMI_EXT_DBG_DUMP_GROUP_SIZE); 213 qdf_debugfs_printf(file, "\n"); 214 215 if (qdf_debugfs_overflow(file)) { 216 qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 217 qdf_list_insert_front(&wmi_handle->wmi_ext_dbg_msg_queue, 218 &msg->node); 219 qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 220 221 } else { 222 wmi_ext_dbg_msg_put(msg); 223 } 224 225 return QDF_STATUS_E_AGAIN; 226 } 227 228 /** 229 * wmi_ext_dbg_msg_write() - debugfs write not supported 230 * 231 * @priv: private data 232 * @buf: received data buffer 233 * @len: length of received buffer 234 * 235 * Return: QDF_STATUS_E_NOSUPPORT. 236 */ 237 static QDF_STATUS wmi_ext_dbg_msg_write(void *priv, const char *buf, 238 qdf_size_t len) 239 { 240 return QDF_STATUS_E_NOSUPPORT; 241 } 242 243 static struct qdf_debugfs_fops wmi_ext_dbgfs_ops = { 244 .show = wmi_ext_dbg_msg_show, 245 .write = wmi_ext_dbg_msg_write, 246 .priv = NULL, 247 }; 248 249 /** 250 * wmi_ext_debugfs_init() - init debugfs items for extended wmi dump. 251 * 252 * @wmi_handle: wmi handler 253 * 254 * Return: QDF_STATUS_SUCCESS if debugfs is initialized else 255 * QDF_STATUS_E_FAILURE 256 */ 257 static QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle) 258 { 259 qdf_dentry_t dentry; 260 261 dentry = qdf_debugfs_create_dir(WMI_EXT_DBG_DIR, NULL); 262 if (!dentry) { 263 WMI_LOGE("error while creating extended wmi debugfs dir"); 264 return QDF_STATUS_E_FAILURE; 265 } 266 267 wmi_ext_dbgfs_ops.priv = wmi_handle; 268 if (!qdf_debugfs_create_file(WMI_EXT_DBG_FILE, WMI_EXT_DBG_FILE_PERM, 269 dentry, &wmi_ext_dbgfs_ops)) { 270 qdf_debugfs_remove_dir(dentry); 271 WMI_LOGE("error while creating extended wmi debugfs file"); 272 return QDF_STATUS_E_FAILURE; 273 } 274 275 wmi_handle->wmi_ext_dbg_dentry = dentry; 276 wmi_handle->wmi_ext_dbg_msg_queue_size = WMI_EXT_DBG_QUEUE_SIZE; 277 wmi_ext_dbg_msg_queue_init(wmi_handle); 278 279 return QDF_STATUS_SUCCESS; 280 } 281 282 /** 283 * wmi_ext_debugfs_deinit() - cleanup/deinit debugfs items of extended wmi dump. 284 * 285 * @wmi_handle: wmi handler 286 * 287 * Return: QDF_STATUS_SUCCESS if cleanup is successful 288 */ 289 static QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle) 290 { 291 struct wmi_ext_dbg_msg *msg; 292 293 while ((msg = wmi_ext_dbg_msg_dequeue(wmi_handle))) 294 wmi_ext_dbg_msg_put(msg); 295 296 wmi_ext_dbg_msg_queue_deinit(wmi_handle); 297 qdf_debugfs_remove_dir_recursive(wmi_handle->wmi_ext_dbg_dentry); 298 299 return QDF_STATUS_SUCCESS; 300 } 301 302 #endif /*WMI_EXT_DBG */ 303 304 /* This check for CONFIG_WIN temporary added due to redeclaration compilation 305 error in MCL. Error is caused due to inclusion of wmi.h in wmi_unified_api.h 306 which gets included here through ol_if_athvar.h. Eventually it is expected that 307 wmi.h will be removed from wmi_unified_api.h after cleanup, which will need 308 WMI_CMD_HDR to be defined here. */ 309 /* Copied from wmi.h */ 310 #undef MS 311 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB) 312 #undef SM 313 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) 314 #undef WO 315 #define WO(_f) ((_f##_OFFSET) >> 2) 316 317 #undef GET_FIELD 318 #define GET_FIELD(_addr, _f) MS(*((uint32_t *)(_addr) + WO(_f)), _f) 319 #undef SET_FIELD 320 #define SET_FIELD(_addr, _f, _val) \ 321 (*((uint32_t *)(_addr) + WO(_f)) = \ 322 (*((uint32_t *)(_addr) + WO(_f)) & ~_f##_MASK) | SM(_val, _f)) 323 324 #define WMI_GET_FIELD(_msg_buf, _msg_type, _f) \ 325 GET_FIELD(_msg_buf, _msg_type ## _ ## _f) 326 327 #define WMI_SET_FIELD(_msg_buf, _msg_type, _f, _val) \ 328 SET_FIELD(_msg_buf, _msg_type ## _ ## _f, _val) 329 330 #define WMI_EP_APASS 0x0 331 #define WMI_EP_LPASS 0x1 332 #define WMI_EP_SENSOR 0x2 333 334 /* 335 * * Control Path 336 * */ 337 typedef PREPACK struct { 338 uint32_t commandId:24, 339 reserved:2, /* used for WMI endpoint ID */ 340 plt_priv:6; /* platform private */ 341 } POSTPACK WMI_CMD_HDR; /* used for commands and events */ 342 343 #define WMI_CMD_HDR_COMMANDID_LSB 0 344 #define WMI_CMD_HDR_COMMANDID_MASK 0x00ffffff 345 #define WMI_CMD_HDR_COMMANDID_OFFSET 0x00000000 346 #define WMI_CMD_HDR_WMI_ENDPOINTID_MASK 0x03000000 347 #define WMI_CMD_HDR_WMI_ENDPOINTID_OFFSET 24 348 #define WMI_CMD_HDR_PLT_PRIV_LSB 24 349 #define WMI_CMD_HDR_PLT_PRIV_MASK 0xff000000 350 #define WMI_CMD_HDR_PLT_PRIV_OFFSET 0x00000000 351 /* end of copy wmi.h */ 352 353 #define WMI_MIN_HEAD_ROOM 64 354 355 /* WBUFF pool sizes for WMI */ 356 /* Allocation of size 256 bytes */ 357 #define WMI_WBUFF_POOL_0_SIZE 128 358 /* Allocation of size 512 bytes */ 359 #define WMI_WBUFF_POOL_1_SIZE 16 360 /* Allocation of size 1024 bytes */ 361 #define WMI_WBUFF_POOL_2_SIZE 8 362 /* Allocation of size 2048 bytes */ 363 #define WMI_WBUFF_POOL_3_SIZE 8 364 365 #ifdef WMI_INTERFACE_EVENT_LOGGING 366 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)) 367 /* TODO Cleanup this backported function */ 368 static int wmi_bp_seq_printf(struct seq_file *m, const char *f, ...) 369 { 370 va_list args; 371 372 va_start(args, f); 373 seq_vprintf(m, f, args); 374 va_end(args); 375 376 return 0; 377 } 378 #else 379 #define wmi_bp_seq_printf(m, fmt, ...) seq_printf((m), fmt, ##__VA_ARGS__) 380 #endif 381 382 #ifndef MAX_WMI_INSTANCES 383 #define CUSTOM_MGMT_CMD_DATA_SIZE 4 384 #endif 385 386 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC 387 /* WMI commands */ 388 uint32_t g_wmi_command_buf_idx = 0; 389 struct wmi_command_debug wmi_command_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; 390 391 /* WMI commands TX completed */ 392 uint32_t g_wmi_command_tx_cmp_buf_idx = 0; 393 struct wmi_command_debug 394 wmi_command_tx_cmp_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; 395 396 /* WMI events when processed */ 397 uint32_t g_wmi_event_buf_idx = 0; 398 struct wmi_event_debug wmi_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; 399 400 /* WMI events when queued */ 401 uint32_t g_wmi_rx_event_buf_idx = 0; 402 struct wmi_event_debug wmi_rx_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; 403 #endif 404 405 #define WMI_COMMAND_RECORD(h, a, b) { \ 406 if (wmi_log_max_entry <= \ 407 *(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)) \ 408 *(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx) = 0;\ 409 ((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\ 410 [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)]\ 411 .command = a; \ 412 qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ 413 wmi_command_log_buf_info.buf) \ 414 [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].data,\ 415 b, wmi_record_max_length); \ 416 ((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\ 417 [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].\ 418 time = qdf_get_log_timestamp(); \ 419 (*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))++; \ 420 h->log_info.wmi_command_log_buf_info.length++; \ 421 } 422 423 #define WMI_COMMAND_TX_CMP_RECORD(h, a, b) { \ 424 if (wmi_log_max_entry <= \ 425 *(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))\ 426 *(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 427 p_buf_tail_idx) = 0; \ 428 ((struct wmi_command_debug *)h->log_info. \ 429 wmi_command_tx_cmp_log_buf_info.buf) \ 430 [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 431 p_buf_tail_idx)]. \ 432 command = a; \ 433 qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ 434 wmi_command_tx_cmp_log_buf_info.buf) \ 435 [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 436 p_buf_tail_idx)]. \ 437 data, b, wmi_record_max_length); \ 438 ((struct wmi_command_debug *)h->log_info. \ 439 wmi_command_tx_cmp_log_buf_info.buf) \ 440 [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 441 p_buf_tail_idx)]. \ 442 time = qdf_get_log_timestamp(); \ 443 (*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))++;\ 444 h->log_info.wmi_command_tx_cmp_log_buf_info.length++; \ 445 } 446 447 #define WMI_EVENT_RECORD(h, a, b) { \ 448 if (wmi_log_max_entry <= \ 449 *(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)) \ 450 *(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx) = 0;\ 451 ((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\ 452 [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)]. \ 453 event = a; \ 454 qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ 455 wmi_event_log_buf_info.buf) \ 456 [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].data, b,\ 457 wmi_record_max_length); \ 458 ((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\ 459 [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].time =\ 460 qdf_get_log_timestamp(); \ 461 (*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))++; \ 462 h->log_info.wmi_event_log_buf_info.length++; \ 463 } 464 465 #define WMI_RX_EVENT_RECORD(h, a, b) { \ 466 if (wmi_log_max_entry <= \ 467 *(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))\ 468 *(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx) = 0;\ 469 ((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\ 470 [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ 471 event = a; \ 472 qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ 473 wmi_rx_event_log_buf_info.buf) \ 474 [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ 475 data, b, wmi_record_max_length); \ 476 ((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\ 477 [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ 478 time = qdf_get_log_timestamp(); \ 479 (*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))++; \ 480 h->log_info.wmi_rx_event_log_buf_info.length++; \ 481 } 482 483 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC 484 uint32_t g_wmi_mgmt_command_buf_idx = 0; 485 struct 486 wmi_command_debug wmi_mgmt_command_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY]; 487 488 /* wmi_mgmt commands TX completed */ 489 uint32_t g_wmi_mgmt_command_tx_cmp_buf_idx = 0; 490 struct wmi_command_debug 491 wmi_mgmt_command_tx_cmp_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY]; 492 493 /* wmi_mgmt events when received */ 494 uint32_t g_wmi_mgmt_rx_event_buf_idx = 0; 495 struct wmi_event_debug 496 wmi_mgmt_rx_event_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY]; 497 498 /* wmi_diag events when received */ 499 uint32_t g_wmi_diag_rx_event_buf_idx = 0; 500 struct wmi_event_debug 501 wmi_diag_rx_event_log_buffer[WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY]; 502 #endif 503 504 #define WMI_MGMT_COMMAND_RECORD(h, a, b) { \ 505 if (wmi_mgmt_log_max_entry <= \ 506 *(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)) \ 507 *(h->log_info.wmi_mgmt_command_log_buf_info. \ 508 p_buf_tail_idx) = 0; \ 509 ((struct wmi_command_debug *)h->log_info. \ 510 wmi_mgmt_command_log_buf_info.buf) \ 511 [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ 512 command = a; \ 513 qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ 514 wmi_mgmt_command_log_buf_info.buf) \ 515 [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ 516 data, b, \ 517 wmi_record_max_length); \ 518 ((struct wmi_command_debug *)h->log_info. \ 519 wmi_mgmt_command_log_buf_info.buf) \ 520 [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ 521 time = qdf_get_log_timestamp(); \ 522 (*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx))++;\ 523 h->log_info.wmi_mgmt_command_log_buf_info.length++; \ 524 } 525 526 #define WMI_MGMT_COMMAND_TX_CMP_RECORD(h, a, b) { \ 527 if (wmi_mgmt_log_max_entry <= \ 528 *(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 529 p_buf_tail_idx)) \ 530 *(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 531 p_buf_tail_idx) = 0; \ 532 ((struct wmi_command_debug *)h->log_info. \ 533 wmi_mgmt_command_tx_cmp_log_buf_info.buf) \ 534 [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 535 p_buf_tail_idx)].command = a; \ 536 qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ 537 wmi_mgmt_command_tx_cmp_log_buf_info.buf)\ 538 [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 539 p_buf_tail_idx)].data, b, \ 540 wmi_record_max_length); \ 541 ((struct wmi_command_debug *)h->log_info. \ 542 wmi_mgmt_command_tx_cmp_log_buf_info.buf) \ 543 [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 544 p_buf_tail_idx)].time = \ 545 qdf_get_log_timestamp(); \ 546 (*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 547 p_buf_tail_idx))++; \ 548 h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.length++; \ 549 } 550 551 #define WMI_MGMT_RX_EVENT_RECORD(h, a, b) do { \ 552 if (wmi_mgmt_log_max_entry <= \ 553 *(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))\ 554 *(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx) = 0;\ 555 ((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\ 556 [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)]\ 557 .event = a; \ 558 qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ 559 wmi_mgmt_event_log_buf_info.buf) \ 560 [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\ 561 data, b, wmi_record_max_length); \ 562 ((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\ 563 [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\ 564 time = qdf_get_log_timestamp(); \ 565 (*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))++; \ 566 h->log_info.wmi_mgmt_event_log_buf_info.length++; \ 567 } while (0); 568 569 #define WMI_DIAG_RX_EVENT_RECORD(h, a, b) do { \ 570 if (wmi_diag_log_max_entry <= \ 571 *(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))\ 572 *(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx) = 0;\ 573 ((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\ 574 [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)]\ 575 .event = a; \ 576 qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ 577 wmi_diag_event_log_buf_info.buf) \ 578 [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\ 579 data, b, wmi_record_max_length); \ 580 ((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\ 581 [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\ 582 time = qdf_get_log_timestamp(); \ 583 (*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))++; \ 584 h->log_info.wmi_diag_event_log_buf_info.length++; \ 585 } while (0); 586 587 /* These are defined to made it as module param, which can be configured */ 588 uint32_t wmi_log_max_entry = WMI_EVENT_DEBUG_MAX_ENTRY; 589 uint32_t wmi_mgmt_log_max_entry = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; 590 uint32_t wmi_diag_log_max_entry = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY; 591 uint32_t wmi_record_max_length = WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH; 592 uint32_t wmi_display_size = 100; 593 594 /** 595 * wmi_log_init() - Initialize WMI event logging 596 * @wmi_handle: WMI handle. 597 * 598 * Return: Initialization status 599 */ 600 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC 601 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle) 602 { 603 struct wmi_log_buf_t *cmd_log_buf = 604 &wmi_handle->log_info.wmi_command_log_buf_info; 605 struct wmi_log_buf_t *cmd_tx_cmpl_log_buf = 606 &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info; 607 608 struct wmi_log_buf_t *event_log_buf = 609 &wmi_handle->log_info.wmi_event_log_buf_info; 610 struct wmi_log_buf_t *rx_event_log_buf = 611 &wmi_handle->log_info.wmi_rx_event_log_buf_info; 612 613 struct wmi_log_buf_t *mgmt_cmd_log_buf = 614 &wmi_handle->log_info.wmi_mgmt_command_log_buf_info; 615 struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf = 616 &wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info; 617 struct wmi_log_buf_t *mgmt_event_log_buf = 618 &wmi_handle->log_info.wmi_mgmt_event_log_buf_info; 619 struct wmi_log_buf_t *diag_event_log_buf = 620 &wmi_handle->log_info.wmi_diag_event_log_buf_info; 621 622 /* WMI commands */ 623 cmd_log_buf->length = 0; 624 cmd_log_buf->buf_tail_idx = 0; 625 cmd_log_buf->buf = wmi_command_log_buffer; 626 cmd_log_buf->p_buf_tail_idx = &g_wmi_command_buf_idx; 627 cmd_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; 628 629 /* WMI commands TX completed */ 630 cmd_tx_cmpl_log_buf->length = 0; 631 cmd_tx_cmpl_log_buf->buf_tail_idx = 0; 632 cmd_tx_cmpl_log_buf->buf = wmi_command_tx_cmp_log_buffer; 633 cmd_tx_cmpl_log_buf->p_buf_tail_idx = &g_wmi_command_tx_cmp_buf_idx; 634 cmd_tx_cmpl_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; 635 636 /* WMI events when processed */ 637 event_log_buf->length = 0; 638 event_log_buf->buf_tail_idx = 0; 639 event_log_buf->buf = wmi_event_log_buffer; 640 event_log_buf->p_buf_tail_idx = &g_wmi_event_buf_idx; 641 event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; 642 643 /* WMI events when queued */ 644 rx_event_log_buf->length = 0; 645 rx_event_log_buf->buf_tail_idx = 0; 646 rx_event_log_buf->buf = wmi_rx_event_log_buffer; 647 rx_event_log_buf->p_buf_tail_idx = &g_wmi_rx_event_buf_idx; 648 rx_event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; 649 650 /* WMI Management commands */ 651 mgmt_cmd_log_buf->length = 0; 652 mgmt_cmd_log_buf->buf_tail_idx = 0; 653 mgmt_cmd_log_buf->buf = wmi_mgmt_command_log_buffer; 654 mgmt_cmd_log_buf->p_buf_tail_idx = &g_wmi_mgmt_command_buf_idx; 655 mgmt_cmd_log_buf->size = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; 656 657 /* WMI Management commands Tx completed*/ 658 mgmt_cmd_tx_cmp_log_buf->length = 0; 659 mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0; 660 mgmt_cmd_tx_cmp_log_buf->buf = wmi_mgmt_command_tx_cmp_log_buffer; 661 mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx = 662 &g_wmi_mgmt_command_tx_cmp_buf_idx; 663 mgmt_cmd_tx_cmp_log_buf->size = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; 664 665 /* WMI Management events when received */ 666 mgmt_event_log_buf->length = 0; 667 mgmt_event_log_buf->buf_tail_idx = 0; 668 mgmt_event_log_buf->buf = wmi_mgmt_rx_event_log_buffer; 669 mgmt_event_log_buf->p_buf_tail_idx = &g_wmi_mgmt_rx_event_buf_idx; 670 mgmt_event_log_buf->size = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; 671 672 /* WMI diag events when received */ 673 diag_event_log_buf->length = 0; 674 diag_event_log_buf->buf_tail_idx = 0; 675 diag_event_log_buf->buf = wmi_diag_rx_event_log_buffer; 676 diag_event_log_buf->p_buf_tail_idx = &g_wmi_diag_rx_event_buf_idx; 677 diag_event_log_buf->size = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY; 678 679 qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); 680 wmi_handle->log_info.wmi_logging_enable = 1; 681 682 return QDF_STATUS_SUCCESS; 683 } 684 #else 685 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle) 686 { 687 struct wmi_log_buf_t *cmd_log_buf = 688 &wmi_handle->log_info.wmi_command_log_buf_info; 689 struct wmi_log_buf_t *cmd_tx_cmpl_log_buf = 690 &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info; 691 692 struct wmi_log_buf_t *event_log_buf = 693 &wmi_handle->log_info.wmi_event_log_buf_info; 694 struct wmi_log_buf_t *rx_event_log_buf = 695 &wmi_handle->log_info.wmi_rx_event_log_buf_info; 696 697 struct wmi_log_buf_t *mgmt_cmd_log_buf = 698 &wmi_handle->log_info.wmi_mgmt_command_log_buf_info; 699 struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf = 700 &wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info; 701 struct wmi_log_buf_t *mgmt_event_log_buf = 702 &wmi_handle->log_info.wmi_mgmt_event_log_buf_info; 703 struct wmi_log_buf_t *diag_event_log_buf = 704 &wmi_handle->log_info.wmi_diag_event_log_buf_info; 705 706 wmi_handle->log_info.wmi_logging_enable = 0; 707 708 /* WMI commands */ 709 cmd_log_buf->length = 0; 710 cmd_log_buf->buf_tail_idx = 0; 711 cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc( 712 wmi_log_max_entry * sizeof(struct wmi_command_debug)); 713 cmd_log_buf->size = wmi_log_max_entry; 714 715 if (!cmd_log_buf->buf) 716 return QDF_STATUS_E_NOMEM; 717 718 cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx; 719 720 /* WMI commands TX completed */ 721 cmd_tx_cmpl_log_buf->length = 0; 722 cmd_tx_cmpl_log_buf->buf_tail_idx = 0; 723 cmd_tx_cmpl_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc( 724 wmi_log_max_entry * sizeof(struct wmi_command_debug)); 725 cmd_tx_cmpl_log_buf->size = wmi_log_max_entry; 726 727 if (!cmd_tx_cmpl_log_buf->buf) 728 return QDF_STATUS_E_NOMEM; 729 730 cmd_tx_cmpl_log_buf->p_buf_tail_idx = 731 &cmd_tx_cmpl_log_buf->buf_tail_idx; 732 733 /* WMI events when processed */ 734 event_log_buf->length = 0; 735 event_log_buf->buf_tail_idx = 0; 736 event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( 737 wmi_log_max_entry * sizeof(struct wmi_event_debug)); 738 event_log_buf->size = wmi_log_max_entry; 739 740 if (!event_log_buf->buf) 741 return QDF_STATUS_E_NOMEM; 742 743 event_log_buf->p_buf_tail_idx = &event_log_buf->buf_tail_idx; 744 745 /* WMI events when queued */ 746 rx_event_log_buf->length = 0; 747 rx_event_log_buf->buf_tail_idx = 0; 748 rx_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( 749 wmi_log_max_entry * sizeof(struct wmi_event_debug)); 750 rx_event_log_buf->size = wmi_log_max_entry; 751 752 if (!rx_event_log_buf->buf) 753 return QDF_STATUS_E_NOMEM; 754 755 rx_event_log_buf->p_buf_tail_idx = &rx_event_log_buf->buf_tail_idx; 756 757 /* WMI Management commands */ 758 mgmt_cmd_log_buf->length = 0; 759 mgmt_cmd_log_buf->buf_tail_idx = 0; 760 mgmt_cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc( 761 wmi_mgmt_log_max_entry * sizeof(struct wmi_command_debug)); 762 mgmt_cmd_log_buf->size = wmi_mgmt_log_max_entry; 763 764 if (!mgmt_cmd_log_buf->buf) 765 return QDF_STATUS_E_NOMEM; 766 767 mgmt_cmd_log_buf->p_buf_tail_idx = &mgmt_cmd_log_buf->buf_tail_idx; 768 769 /* WMI Management commands Tx completed*/ 770 mgmt_cmd_tx_cmp_log_buf->length = 0; 771 mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0; 772 mgmt_cmd_tx_cmp_log_buf->buf = (struct wmi_command_debug *) 773 qdf_mem_malloc( 774 wmi_mgmt_log_max_entry * 775 sizeof(struct wmi_command_debug)); 776 mgmt_cmd_tx_cmp_log_buf->size = wmi_mgmt_log_max_entry; 777 778 if (!mgmt_cmd_tx_cmp_log_buf->buf) 779 return QDF_STATUS_E_NOMEM; 780 781 mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx = 782 &mgmt_cmd_tx_cmp_log_buf->buf_tail_idx; 783 784 /* WMI Management events when received */ 785 mgmt_event_log_buf->length = 0; 786 mgmt_event_log_buf->buf_tail_idx = 0; 787 788 mgmt_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( 789 wmi_mgmt_log_max_entry * 790 sizeof(struct wmi_event_debug)); 791 mgmt_event_log_buf->size = wmi_mgmt_log_max_entry; 792 793 if (!mgmt_event_log_buf->buf) 794 return QDF_STATUS_E_NOMEM; 795 796 mgmt_event_log_buf->p_buf_tail_idx = &mgmt_event_log_buf->buf_tail_idx; 797 798 /* WMI diag events when received */ 799 diag_event_log_buf->length = 0; 800 diag_event_log_buf->buf_tail_idx = 0; 801 802 diag_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( 803 wmi_diag_log_max_entry * 804 sizeof(struct wmi_event_debug)); 805 diag_event_log_buf->size = wmi_diag_log_max_entry; 806 807 if (!diag_event_log_buf->buf) 808 return QDF_STATUS_E_NOMEM; 809 810 diag_event_log_buf->p_buf_tail_idx = &diag_event_log_buf->buf_tail_idx; 811 812 qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); 813 wmi_handle->log_info.wmi_logging_enable = 1; 814 815 return QDF_STATUS_SUCCESS; 816 } 817 #endif 818 819 /** 820 * wmi_log_buffer_free() - Free all dynamic allocated buffer memory for 821 * event logging 822 * @wmi_handle: WMI handle. 823 * 824 * Return: None 825 */ 826 #ifdef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC 827 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) 828 { 829 if (wmi_handle->log_info.wmi_command_log_buf_info.buf) 830 qdf_mem_free(wmi_handle->log_info.wmi_command_log_buf_info.buf); 831 if (wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf) 832 qdf_mem_free( 833 wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf); 834 if (wmi_handle->log_info.wmi_event_log_buf_info.buf) 835 qdf_mem_free(wmi_handle->log_info.wmi_event_log_buf_info.buf); 836 if (wmi_handle->log_info.wmi_rx_event_log_buf_info.buf) 837 qdf_mem_free( 838 wmi_handle->log_info.wmi_rx_event_log_buf_info.buf); 839 if (wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf) 840 qdf_mem_free( 841 wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf); 842 if (wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf) 843 qdf_mem_free( 844 wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf); 845 if (wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf) 846 qdf_mem_free( 847 wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf); 848 if (wmi_handle->log_info.wmi_diag_event_log_buf_info.buf) 849 qdf_mem_free( 850 wmi_handle->log_info.wmi_diag_event_log_buf_info.buf); 851 wmi_handle->log_info.wmi_logging_enable = 0; 852 qdf_spinlock_destroy(&wmi_handle->log_info.wmi_record_lock); 853 } 854 #else 855 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) 856 { 857 /* Do Nothing */ 858 } 859 #endif 860 861 /** 862 * wmi_print_cmd_log_buffer() - an output agnostic wmi command log printer 863 * @log_buffer: the command log buffer metadata of the buffer to print 864 * @count: the maximum number of entries to print 865 * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper 866 * @print_priv: any data required by the print method, e.g. a file handle 867 * 868 * Return: None 869 */ 870 static void 871 wmi_print_cmd_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count, 872 qdf_abstract_print *print, void *print_priv) 873 { 874 static const int data_len = 875 WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t); 876 char str[128]; 877 uint32_t idx; 878 879 if (count > log_buffer->size) 880 count = log_buffer->size; 881 if (count > log_buffer->length) 882 count = log_buffer->length; 883 884 /* subtract count from index, and wrap if necessary */ 885 idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count; 886 idx %= log_buffer->size; 887 888 print(print_priv, "Time (seconds) Cmd Id Payload"); 889 while (count) { 890 struct wmi_command_debug *cmd_log = (struct wmi_command_debug *) 891 &((struct wmi_command_debug *)log_buffer->buf)[idx]; 892 uint64_t secs, usecs; 893 int len = 0; 894 int i; 895 896 qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs); 897 len += scnprintf(str + len, sizeof(str) - len, 898 "% 8lld.%06lld %6u (0x%06x) ", 899 secs, usecs, 900 cmd_log->command, cmd_log->command); 901 for (i = 0; i < data_len; ++i) { 902 len += scnprintf(str + len, sizeof(str) - len, 903 "0x%08x ", cmd_log->data[i]); 904 } 905 906 print(print_priv, str); 907 908 --count; 909 ++idx; 910 if (idx >= log_buffer->size) 911 idx = 0; 912 } 913 } 914 915 /** 916 * wmi_print_event_log_buffer() - an output agnostic wmi event log printer 917 * @log_buffer: the event log buffer metadata of the buffer to print 918 * @count: the maximum number of entries to print 919 * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper 920 * @print_priv: any data required by the print method, e.g. a file handle 921 * 922 * Return: None 923 */ 924 static void 925 wmi_print_event_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count, 926 qdf_abstract_print *print, void *print_priv) 927 { 928 static const int data_len = 929 WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t); 930 char str[128]; 931 uint32_t idx; 932 933 if (count > log_buffer->size) 934 count = log_buffer->size; 935 if (count > log_buffer->length) 936 count = log_buffer->length; 937 938 /* subtract count from index, and wrap if necessary */ 939 idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count; 940 idx %= log_buffer->size; 941 942 print(print_priv, "Time (seconds) Event Id Payload"); 943 while (count) { 944 struct wmi_event_debug *event_log = (struct wmi_event_debug *) 945 &((struct wmi_event_debug *)log_buffer->buf)[idx]; 946 uint64_t secs, usecs; 947 int len = 0; 948 int i; 949 950 qdf_log_timestamp_to_secs(event_log->time, &secs, &usecs); 951 len += scnprintf(str + len, sizeof(str) - len, 952 "% 8lld.%06lld %6u (0x%06x) ", 953 secs, usecs, 954 event_log->event, event_log->event); 955 for (i = 0; i < data_len; ++i) { 956 len += scnprintf(str + len, sizeof(str) - len, 957 "0x%08x ", event_log->data[i]); 958 } 959 960 print(print_priv, str); 961 962 --count; 963 ++idx; 964 if (idx >= log_buffer->size) 965 idx = 0; 966 } 967 } 968 969 inline void 970 wmi_print_cmd_log(wmi_unified_t wmi, uint32_t count, 971 qdf_abstract_print *print, void *print_priv) 972 { 973 wmi_print_cmd_log_buffer( 974 &wmi->log_info.wmi_command_log_buf_info, 975 count, print, print_priv); 976 } 977 978 inline void 979 wmi_print_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count, 980 qdf_abstract_print *print, void *print_priv) 981 { 982 wmi_print_cmd_log_buffer( 983 &wmi->log_info.wmi_command_tx_cmp_log_buf_info, 984 count, print, print_priv); 985 } 986 987 inline void 988 wmi_print_mgmt_cmd_log(wmi_unified_t wmi, uint32_t count, 989 qdf_abstract_print *print, void *print_priv) 990 { 991 wmi_print_cmd_log_buffer( 992 &wmi->log_info.wmi_mgmt_command_log_buf_info, 993 count, print, print_priv); 994 } 995 996 inline void 997 wmi_print_mgmt_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count, 998 qdf_abstract_print *print, void *print_priv) 999 { 1000 wmi_print_cmd_log_buffer( 1001 &wmi->log_info.wmi_mgmt_command_tx_cmp_log_buf_info, 1002 count, print, print_priv); 1003 } 1004 1005 inline void 1006 wmi_print_event_log(wmi_unified_t wmi, uint32_t count, 1007 qdf_abstract_print *print, void *print_priv) 1008 { 1009 wmi_print_event_log_buffer( 1010 &wmi->log_info.wmi_event_log_buf_info, 1011 count, print, print_priv); 1012 } 1013 1014 inline void 1015 wmi_print_rx_event_log(wmi_unified_t wmi, uint32_t count, 1016 qdf_abstract_print *print, void *print_priv) 1017 { 1018 wmi_print_event_log_buffer( 1019 &wmi->log_info.wmi_rx_event_log_buf_info, 1020 count, print, print_priv); 1021 } 1022 1023 inline void 1024 wmi_print_mgmt_event_log(wmi_unified_t wmi, uint32_t count, 1025 qdf_abstract_print *print, void *print_priv) 1026 { 1027 wmi_print_event_log_buffer( 1028 &wmi->log_info.wmi_mgmt_event_log_buf_info, 1029 count, print, print_priv); 1030 } 1031 1032 1033 /* debugfs routines*/ 1034 1035 /** 1036 * debug_wmi_##func_base##_show() - debugfs functions to display content of 1037 * command and event buffers. Macro uses max buffer length to display 1038 * buffer when it is wraparound. 1039 * 1040 * @m: debugfs handler to access wmi_handle 1041 * @v: Variable arguments (not used) 1042 * 1043 * Return: Length of characters printed 1044 */ 1045 #define GENERATE_COMMAND_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size) \ 1046 static int debug_wmi_##func_base##_show(struct seq_file *m, \ 1047 void *v) \ 1048 { \ 1049 wmi_unified_t wmi_handle = (wmi_unified_t) m->private; \ 1050 struct wmi_log_buf_t *wmi_log = \ 1051 &wmi_handle->log_info.wmi_##func_base##_buf_info;\ 1052 int pos, nread, outlen; \ 1053 int i; \ 1054 uint64_t secs, usecs; \ 1055 \ 1056 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1057 if (!wmi_log->length) { \ 1058 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1059 return wmi_bp_seq_printf(m, \ 1060 "no elements to read from ring buffer!\n"); \ 1061 } \ 1062 \ 1063 if (wmi_log->length <= wmi_ring_size) \ 1064 nread = wmi_log->length; \ 1065 else \ 1066 nread = wmi_ring_size; \ 1067 \ 1068 if (*(wmi_log->p_buf_tail_idx) == 0) \ 1069 /* tail can be 0 after wrap-around */ \ 1070 pos = wmi_ring_size - 1; \ 1071 else \ 1072 pos = *(wmi_log->p_buf_tail_idx) - 1; \ 1073 \ 1074 outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\ 1075 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1076 while (nread--) { \ 1077 struct wmi_command_debug *wmi_record; \ 1078 \ 1079 wmi_record = (struct wmi_command_debug *) \ 1080 &(((struct wmi_command_debug *)wmi_log->buf)[pos]);\ 1081 outlen += wmi_bp_seq_printf(m, "CMD ID = %x\n", \ 1082 (wmi_record->command)); \ 1083 qdf_log_timestamp_to_secs(wmi_record->time, &secs,\ 1084 &usecs); \ 1085 outlen += \ 1086 wmi_bp_seq_printf(m, "CMD TIME = [%llu.%06llu]\n",\ 1087 secs, usecs); \ 1088 outlen += wmi_bp_seq_printf(m, "CMD = "); \ 1089 for (i = 0; i < (wmi_record_max_length/ \ 1090 sizeof(uint32_t)); i++) \ 1091 outlen += wmi_bp_seq_printf(m, "%x ", \ 1092 wmi_record->data[i]); \ 1093 outlen += wmi_bp_seq_printf(m, "\n"); \ 1094 \ 1095 if (pos == 0) \ 1096 pos = wmi_ring_size - 1; \ 1097 else \ 1098 pos--; \ 1099 } \ 1100 return outlen; \ 1101 } \ 1102 1103 #define GENERATE_EVENT_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size) \ 1104 static int debug_wmi_##func_base##_show(struct seq_file *m, \ 1105 void *v) \ 1106 { \ 1107 wmi_unified_t wmi_handle = (wmi_unified_t) m->private; \ 1108 struct wmi_log_buf_t *wmi_log = \ 1109 &wmi_handle->log_info.wmi_##func_base##_buf_info;\ 1110 int pos, nread, outlen; \ 1111 int i; \ 1112 uint64_t secs, usecs; \ 1113 \ 1114 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1115 if (!wmi_log->length) { \ 1116 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1117 return wmi_bp_seq_printf(m, \ 1118 "no elements to read from ring buffer!\n"); \ 1119 } \ 1120 \ 1121 if (wmi_log->length <= wmi_ring_size) \ 1122 nread = wmi_log->length; \ 1123 else \ 1124 nread = wmi_ring_size; \ 1125 \ 1126 if (*(wmi_log->p_buf_tail_idx) == 0) \ 1127 /* tail can be 0 after wrap-around */ \ 1128 pos = wmi_ring_size - 1; \ 1129 else \ 1130 pos = *(wmi_log->p_buf_tail_idx) - 1; \ 1131 \ 1132 outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\ 1133 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1134 while (nread--) { \ 1135 struct wmi_event_debug *wmi_record; \ 1136 \ 1137 wmi_record = (struct wmi_event_debug *) \ 1138 &(((struct wmi_event_debug *)wmi_log->buf)[pos]);\ 1139 qdf_log_timestamp_to_secs(wmi_record->time, &secs,\ 1140 &usecs); \ 1141 outlen += wmi_bp_seq_printf(m, "Event ID = %x\n",\ 1142 (wmi_record->event)); \ 1143 outlen += \ 1144 wmi_bp_seq_printf(m, "Event TIME = [%llu.%06llu]\n",\ 1145 secs, usecs); \ 1146 outlen += wmi_bp_seq_printf(m, "CMD = "); \ 1147 for (i = 0; i < (wmi_record_max_length/ \ 1148 sizeof(uint32_t)); i++) \ 1149 outlen += wmi_bp_seq_printf(m, "%x ", \ 1150 wmi_record->data[i]); \ 1151 outlen += wmi_bp_seq_printf(m, "\n"); \ 1152 \ 1153 if (pos == 0) \ 1154 pos = wmi_ring_size - 1; \ 1155 else \ 1156 pos--; \ 1157 } \ 1158 return outlen; \ 1159 } 1160 1161 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_log, wmi_display_size); 1162 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_tx_cmp_log, wmi_display_size); 1163 GENERATE_EVENT_DEBUG_SHOW_FUNCS(event_log, wmi_display_size); 1164 GENERATE_EVENT_DEBUG_SHOW_FUNCS(rx_event_log, wmi_display_size); 1165 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_log, wmi_display_size); 1166 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_tx_cmp_log, 1167 wmi_display_size); 1168 GENERATE_EVENT_DEBUG_SHOW_FUNCS(mgmt_event_log, wmi_display_size); 1169 1170 /** 1171 * debug_wmi_enable_show() - debugfs functions to display enable state of 1172 * wmi logging feature. 1173 * 1174 * @m: debugfs handler to access wmi_handle 1175 * @v: Variable arguments (not used) 1176 * 1177 * Return: always 1 1178 */ 1179 static int debug_wmi_enable_show(struct seq_file *m, void *v) 1180 { 1181 wmi_unified_t wmi_handle = (wmi_unified_t) m->private; 1182 1183 return wmi_bp_seq_printf(m, "%d\n", 1184 wmi_handle->log_info.wmi_logging_enable); 1185 } 1186 1187 /** 1188 * debug_wmi_log_size_show() - debugfs functions to display configured size of 1189 * wmi logging command/event buffer and management command/event buffer. 1190 * 1191 * @m: debugfs handler to access wmi_handle 1192 * @v: Variable arguments (not used) 1193 * 1194 * Return: Length of characters printed 1195 */ 1196 static int debug_wmi_log_size_show(struct seq_file *m, void *v) 1197 { 1198 1199 wmi_bp_seq_printf(m, "WMI command/event log max size:%d\n", 1200 wmi_log_max_entry); 1201 return wmi_bp_seq_printf(m, 1202 "WMI management command/events log max size:%d\n", 1203 wmi_mgmt_log_max_entry); 1204 } 1205 1206 /** 1207 * debug_wmi_##func_base##_write() - debugfs functions to clear 1208 * wmi logging command/event buffer and management command/event buffer. 1209 * 1210 * @file: file handler to access wmi_handle 1211 * @buf: received data buffer 1212 * @count: length of received buffer 1213 * @ppos: Not used 1214 * 1215 * Return: count 1216 */ 1217 #define GENERATE_DEBUG_WRITE_FUNCS(func_base, wmi_ring_size, wmi_record_type)\ 1218 static ssize_t debug_wmi_##func_base##_write(struct file *file, \ 1219 const char __user *buf, \ 1220 size_t count, loff_t *ppos) \ 1221 { \ 1222 int k, ret; \ 1223 wmi_unified_t wmi_handle = \ 1224 ((struct seq_file *)file->private_data)->private;\ 1225 struct wmi_log_buf_t *wmi_log = &wmi_handle->log_info. \ 1226 wmi_##func_base##_buf_info; \ 1227 char locbuf[50]; \ 1228 \ 1229 if ((!buf) || (count > 50)) \ 1230 return -EFAULT; \ 1231 \ 1232 if (copy_from_user(locbuf, buf, count)) \ 1233 return -EFAULT; \ 1234 \ 1235 ret = sscanf(locbuf, "%d", &k); \ 1236 if ((ret != 1) || (k != 0)) { \ 1237 WMI_LOGE("Wrong input, echo 0 to clear the wmi buffer");\ 1238 return -EINVAL; \ 1239 } \ 1240 \ 1241 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1242 qdf_mem_zero(wmi_log->buf, wmi_ring_size * \ 1243 sizeof(struct wmi_record_type)); \ 1244 wmi_log->length = 0; \ 1245 *(wmi_log->p_buf_tail_idx) = 0; \ 1246 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1247 \ 1248 return count; \ 1249 } 1250 1251 GENERATE_DEBUG_WRITE_FUNCS(command_log, wmi_log_max_entry, 1252 wmi_command_debug); 1253 GENERATE_DEBUG_WRITE_FUNCS(command_tx_cmp_log, wmi_log_max_entry, 1254 wmi_command_debug); 1255 GENERATE_DEBUG_WRITE_FUNCS(event_log, wmi_log_max_entry, 1256 wmi_event_debug); 1257 GENERATE_DEBUG_WRITE_FUNCS(rx_event_log, wmi_log_max_entry, 1258 wmi_event_debug); 1259 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_log, wmi_mgmt_log_max_entry, 1260 wmi_command_debug); 1261 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_tx_cmp_log, 1262 wmi_mgmt_log_max_entry, wmi_command_debug); 1263 GENERATE_DEBUG_WRITE_FUNCS(mgmt_event_log, wmi_mgmt_log_max_entry, 1264 wmi_event_debug); 1265 1266 /** 1267 * debug_wmi_enable_write() - debugfs functions to enable/disable 1268 * wmi logging feature. 1269 * 1270 * @file: file handler to access wmi_handle 1271 * @buf: received data buffer 1272 * @count: length of received buffer 1273 * @ppos: Not used 1274 * 1275 * Return: count 1276 */ 1277 static ssize_t debug_wmi_enable_write(struct file *file, const char __user *buf, 1278 size_t count, loff_t *ppos) 1279 { 1280 wmi_unified_t wmi_handle = 1281 ((struct seq_file *)file->private_data)->private; 1282 int k, ret; 1283 char locbuf[50]; 1284 1285 if ((!buf) || (count > 50)) 1286 return -EFAULT; 1287 1288 if (copy_from_user(locbuf, buf, count)) 1289 return -EFAULT; 1290 1291 ret = sscanf(locbuf, "%d", &k); 1292 if ((ret != 1) || ((k != 0) && (k != 1))) 1293 return -EINVAL; 1294 1295 wmi_handle->log_info.wmi_logging_enable = k; 1296 return count; 1297 } 1298 1299 /** 1300 * debug_wmi_log_size_write() - reserved. 1301 * 1302 * @file: file handler to access wmi_handle 1303 * @buf: received data buffer 1304 * @count: length of received buffer 1305 * @ppos: Not used 1306 * 1307 * Return: count 1308 */ 1309 static ssize_t debug_wmi_log_size_write(struct file *file, 1310 const char __user *buf, size_t count, loff_t *ppos) 1311 { 1312 return -EINVAL; 1313 } 1314 1315 /* Structure to maintain debug information */ 1316 struct wmi_debugfs_info { 1317 const char *name; 1318 const struct file_operations *ops; 1319 }; 1320 1321 #define DEBUG_FOO(func_base) { .name = #func_base, \ 1322 .ops = &debug_##func_base##_ops } 1323 1324 /** 1325 * debug_##func_base##_open() - Open debugfs entry for respective command 1326 * and event buffer. 1327 * 1328 * @inode: node for debug dir entry 1329 * @file: file handler 1330 * 1331 * Return: open status 1332 */ 1333 #define GENERATE_DEBUG_STRUCTS(func_base) \ 1334 static int debug_##func_base##_open(struct inode *inode, \ 1335 struct file *file) \ 1336 { \ 1337 return single_open(file, debug_##func_base##_show, \ 1338 inode->i_private); \ 1339 } \ 1340 \ 1341 \ 1342 static struct file_operations debug_##func_base##_ops = { \ 1343 .open = debug_##func_base##_open, \ 1344 .read = seq_read, \ 1345 .llseek = seq_lseek, \ 1346 .write = debug_##func_base##_write, \ 1347 .release = single_release, \ 1348 }; 1349 1350 GENERATE_DEBUG_STRUCTS(wmi_command_log); 1351 GENERATE_DEBUG_STRUCTS(wmi_command_tx_cmp_log); 1352 GENERATE_DEBUG_STRUCTS(wmi_event_log); 1353 GENERATE_DEBUG_STRUCTS(wmi_rx_event_log); 1354 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_log); 1355 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_tx_cmp_log); 1356 GENERATE_DEBUG_STRUCTS(wmi_mgmt_event_log); 1357 GENERATE_DEBUG_STRUCTS(wmi_enable); 1358 GENERATE_DEBUG_STRUCTS(wmi_log_size); 1359 1360 struct wmi_debugfs_info wmi_debugfs_infos[NUM_DEBUG_INFOS] = { 1361 DEBUG_FOO(wmi_command_log), 1362 DEBUG_FOO(wmi_command_tx_cmp_log), 1363 DEBUG_FOO(wmi_event_log), 1364 DEBUG_FOO(wmi_rx_event_log), 1365 DEBUG_FOO(wmi_mgmt_command_log), 1366 DEBUG_FOO(wmi_mgmt_command_tx_cmp_log), 1367 DEBUG_FOO(wmi_mgmt_event_log), 1368 DEBUG_FOO(wmi_enable), 1369 DEBUG_FOO(wmi_log_size), 1370 }; 1371 1372 1373 /** 1374 * wmi_debugfs_create() - Create debug_fs entry for wmi logging. 1375 * 1376 * @wmi_handle: wmi handle 1377 * @par_entry: debug directory entry 1378 * @id: Index to debug info data array 1379 * 1380 * Return: none 1381 */ 1382 static void wmi_debugfs_create(wmi_unified_t wmi_handle, 1383 struct dentry *par_entry) 1384 { 1385 int i; 1386 1387 if (!par_entry) 1388 goto out; 1389 1390 for (i = 0; i < NUM_DEBUG_INFOS; ++i) { 1391 wmi_handle->debugfs_de[i] = debugfs_create_file( 1392 wmi_debugfs_infos[i].name, 0644, par_entry, 1393 wmi_handle, wmi_debugfs_infos[i].ops); 1394 1395 if (!wmi_handle->debugfs_de[i]) { 1396 WMI_LOGE("debug Entry creation failed!"); 1397 goto out; 1398 } 1399 } 1400 1401 return; 1402 1403 out: 1404 WMI_LOGE("debug Entry creation failed!"); 1405 wmi_log_buffer_free(wmi_handle); 1406 return; 1407 } 1408 1409 /** 1410 * wmi_debugfs_remove() - Remove debugfs entry for wmi logging. 1411 * @wmi_handle: wmi handle 1412 * @dentry: debugfs directory entry 1413 * @id: Index to debug info data array 1414 * 1415 * Return: none 1416 */ 1417 static void wmi_debugfs_remove(wmi_unified_t wmi_handle) 1418 { 1419 int i; 1420 struct dentry *dentry = wmi_handle->log_info.wmi_log_debugfs_dir; 1421 1422 if (dentry) { 1423 for (i = 0; i < NUM_DEBUG_INFOS; ++i) { 1424 if (wmi_handle->debugfs_de[i]) 1425 wmi_handle->debugfs_de[i] = NULL; 1426 } 1427 } 1428 1429 if (dentry) 1430 debugfs_remove_recursive(dentry); 1431 } 1432 1433 /** 1434 * wmi_debugfs_init() - debugfs functions to create debugfs directory and to 1435 * create debugfs enteries. 1436 * 1437 * @h: wmi handler 1438 * 1439 * Return: init status 1440 */ 1441 static QDF_STATUS wmi_debugfs_init(wmi_unified_t wmi_handle, uint32_t pdev_idx) 1442 { 1443 char buf[32]; 1444 1445 snprintf(buf, sizeof(buf), "WMI_SOC%u_PDEV%u", 1446 wmi_handle->soc->soc_idx, pdev_idx); 1447 1448 wmi_handle->log_info.wmi_log_debugfs_dir = 1449 debugfs_create_dir(buf, NULL); 1450 1451 if (!wmi_handle->log_info.wmi_log_debugfs_dir) { 1452 WMI_LOGE("error while creating debugfs dir for %s", buf); 1453 return QDF_STATUS_E_FAILURE; 1454 } 1455 wmi_debugfs_create(wmi_handle, 1456 wmi_handle->log_info.wmi_log_debugfs_dir); 1457 1458 return QDF_STATUS_SUCCESS; 1459 } 1460 1461 /** 1462 * wmi_mgmt_cmd_record() - Wrapper function for mgmt command logging macro 1463 * 1464 * @wmi_handle: wmi handle 1465 * @cmd: mgmt command 1466 * @header: pointer to 802.11 header 1467 * @vdev_id: vdev id 1468 * @chanfreq: channel frequency 1469 * 1470 * Return: none 1471 */ 1472 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd, 1473 void *header, uint32_t vdev_id, uint32_t chanfreq) 1474 { 1475 1476 uint32_t data[CUSTOM_MGMT_CMD_DATA_SIZE]; 1477 1478 data[0] = ((struct wmi_command_header *)header)->type; 1479 data[1] = ((struct wmi_command_header *)header)->sub_type; 1480 data[2] = vdev_id; 1481 data[3] = chanfreq; 1482 1483 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 1484 1485 WMI_MGMT_COMMAND_RECORD(wmi_handle, cmd, (uint8_t *)data); 1486 1487 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 1488 } 1489 #else 1490 /** 1491 * wmi_debugfs_remove() - Remove debugfs entry for wmi logging. 1492 * @wmi_handle: wmi handle 1493 * @dentry: debugfs directory entry 1494 * @id: Index to debug info data array 1495 * 1496 * Return: none 1497 */ 1498 static void wmi_debugfs_remove(wmi_unified_t wmi_handle) { } 1499 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd, 1500 void *header, uint32_t vdev_id, uint32_t chanfreq) { } 1501 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) { } 1502 #endif /*WMI_INTERFACE_EVENT_LOGGING */ 1503 qdf_export_symbol(wmi_mgmt_cmd_record); 1504 1505 int wmi_get_host_credits(wmi_unified_t wmi_handle); 1506 /* WMI buffer APIs */ 1507 1508 #ifdef NBUF_MEMORY_DEBUG 1509 wmi_buf_t 1510 wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint32_t len, 1511 const char *func_name, 1512 uint32_t line_num) 1513 { 1514 wmi_buf_t wmi_buf; 1515 1516 if (roundup(len + WMI_MIN_HEAD_ROOM, 4) > wmi_handle->max_msg_len) { 1517 QDF_ASSERT(0); 1518 return NULL; 1519 } 1520 1521 wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, func_name, 1522 line_num); 1523 if (!wmi_buf) 1524 wmi_buf = qdf_nbuf_alloc_debug(NULL, 1525 roundup(len + WMI_MIN_HEAD_ROOM, 1526 4), 1527 WMI_MIN_HEAD_ROOM, 4, false, 1528 func_name, line_num); 1529 if (!wmi_buf) 1530 return NULL; 1531 1532 /* Clear the wmi buffer */ 1533 OS_MEMZERO(qdf_nbuf_data(wmi_buf), len); 1534 1535 /* 1536 * Set the length of the buffer to match the allocation size. 1537 */ 1538 qdf_nbuf_set_pktlen(wmi_buf, len); 1539 1540 return wmi_buf; 1541 } 1542 qdf_export_symbol(wmi_buf_alloc_debug); 1543 1544 void wmi_buf_free(wmi_buf_t net_buf) 1545 { 1546 net_buf = wbuff_buff_put(net_buf); 1547 if (net_buf) 1548 qdf_nbuf_free(net_buf); 1549 } 1550 qdf_export_symbol(wmi_buf_free); 1551 #else 1552 wmi_buf_t wmi_buf_alloc_fl(wmi_unified_t wmi_handle, uint32_t len, 1553 const char *func, uint32_t line) 1554 { 1555 wmi_buf_t wmi_buf; 1556 1557 if (roundup(len + WMI_MIN_HEAD_ROOM, 4) > wmi_handle->max_msg_len) { 1558 QDF_DEBUG_PANIC("Invalid length %u (via %s:%u)", 1559 len, func, line); 1560 return NULL; 1561 } 1562 1563 wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, __func__, 1564 __LINE__); 1565 if (!wmi_buf) 1566 wmi_buf = qdf_nbuf_alloc_fl(NULL, roundup(len + 1567 WMI_MIN_HEAD_ROOM, 4), WMI_MIN_HEAD_ROOM, 4, 1568 false, func, line); 1569 1570 if (!wmi_buf) { 1571 wmi_nofl_err("%s:%d, failed to alloc len:%d", func, line, len); 1572 return NULL; 1573 } 1574 1575 /* Clear the wmi buffer */ 1576 OS_MEMZERO(qdf_nbuf_data(wmi_buf), len); 1577 1578 /* 1579 * Set the length of the buffer to match the allocation size. 1580 */ 1581 qdf_nbuf_set_pktlen(wmi_buf, len); 1582 1583 return wmi_buf; 1584 } 1585 qdf_export_symbol(wmi_buf_alloc_fl); 1586 1587 void wmi_buf_free(wmi_buf_t net_buf) 1588 { 1589 net_buf = wbuff_buff_put(net_buf); 1590 if (net_buf) 1591 qdf_nbuf_free(net_buf); 1592 } 1593 qdf_export_symbol(wmi_buf_free); 1594 #endif 1595 1596 /** 1597 * wmi_get_max_msg_len() - get maximum WMI message length 1598 * @wmi_handle: WMI handle. 1599 * 1600 * This function returns the maximum WMI message length 1601 * 1602 * Return: maximum WMI message length 1603 */ 1604 uint16_t wmi_get_max_msg_len(wmi_unified_t wmi_handle) 1605 { 1606 return wmi_handle->max_msg_len - WMI_MIN_HEAD_ROOM; 1607 } 1608 qdf_export_symbol(wmi_get_max_msg_len); 1609 1610 #ifndef WMI_CMD_STRINGS 1611 static uint8_t *wmi_id_to_name(uint32_t wmi_command) 1612 { 1613 return "Invalid WMI cmd"; 1614 } 1615 #endif 1616 1617 static inline void wmi_log_cmd_id(uint32_t cmd_id, uint32_t tag) 1618 { 1619 WMI_LOGD("Send WMI command:%s command_id:%d htc_tag:%d\n", 1620 wmi_id_to_name(cmd_id), cmd_id, tag); 1621 } 1622 1623 /** 1624 * wmi_is_pm_resume_cmd() - check if a cmd is part of the resume sequence 1625 * @cmd_id: command to check 1626 * 1627 * Return: true if the command is part of the resume sequence. 1628 */ 1629 #ifdef WLAN_POWER_MANAGEMENT_OFFLOAD 1630 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id) 1631 { 1632 switch (cmd_id) { 1633 case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID: 1634 case WMI_PDEV_RESUME_CMDID: 1635 return true; 1636 1637 default: 1638 return false; 1639 } 1640 } 1641 1642 #else 1643 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id) 1644 { 1645 return false; 1646 } 1647 1648 #endif 1649 1650 #ifdef FEATURE_WLAN_D0WOW 1651 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id) 1652 { 1653 wmi_d0_wow_enable_disable_cmd_fixed_param *cmd; 1654 1655 if (cmd_id == WMI_D0_WOW_ENABLE_DISABLE_CMDID) { 1656 cmd = (wmi_d0_wow_enable_disable_cmd_fixed_param *) 1657 wmi_buf_data(buf); 1658 if (!cmd->enable) 1659 return true; 1660 else 1661 return false; 1662 } 1663 1664 return false; 1665 } 1666 #else 1667 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id) 1668 { 1669 return false; 1670 } 1671 1672 #endif 1673 1674 static inline void wmi_unified_debug_dump(wmi_unified_t wmi_handle) 1675 { 1676 wmi_nofl_err("Endpoint ID = %d, Tx Queue Depth = %d, soc_id = %u, target type = %s", 1677 wmi_handle->wmi_endpoint_id, 1678 htc_get_tx_queue_depth(wmi_handle->htc_handle, 1679 wmi_handle->wmi_endpoint_id), 1680 wmi_handle->soc->soc_idx, 1681 (wmi_handle->target_type == 1682 WMI_TLV_TARGET ? "WMI_TLV_TARGET" : 1683 "WMI_NON_TLV_TARGET")); 1684 } 1685 1686 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI 1687 QDF_STATUS wmi_unified_cmd_send_over_qmi(struct wmi_unified *wmi_handle, 1688 wmi_buf_t buf, uint32_t buflen, 1689 uint32_t cmd_id) 1690 { 1691 QDF_STATUS status; 1692 int32_t ret; 1693 1694 if (!qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR))) { 1695 wmi_err("Failed to send cmd %x, no memory", cmd_id); 1696 return QDF_STATUS_E_NOMEM; 1697 } 1698 1699 qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR)); 1700 WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id); 1701 wmi_debug("Sending WMI_CMD_ID: %d over qmi", cmd_id); 1702 status = qdf_wmi_send_recv_qmi(qdf_nbuf_data(buf), 1703 buflen + sizeof(WMI_CMD_HDR), 1704 wmi_handle, 1705 wmi_process_qmi_fw_event); 1706 if (QDF_IS_STATUS_ERROR(status)) { 1707 qdf_nbuf_pull_head(buf, sizeof(WMI_CMD_HDR)); 1708 wmi_warn("WMI send on QMI failed. Retrying WMI on HTC"); 1709 } else { 1710 ret = qdf_atomic_inc_return(&wmi_handle->num_stats_over_qmi); 1711 wmi_debug("num stats over qmi: %d", ret); 1712 wmi_buf_free(buf); 1713 } 1714 1715 return status; 1716 } 1717 1718 static int __wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len) 1719 { 1720 struct wmi_unified *wmi_handle = wmi_cb_ctx; 1721 wmi_buf_t evt_buf; 1722 uint32_t evt_id; 1723 1724 if (!wmi_handle || !buf) 1725 return -EINVAL; 1726 1727 evt_buf = wmi_buf_alloc(wmi_handle, len); 1728 if (!evt_buf) 1729 return -ENOMEM; 1730 1731 qdf_mem_copy(qdf_nbuf_data(evt_buf), buf, len); 1732 evt_id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); 1733 wmi_debug("Received WMI_EVT_ID: %d over qmi", evt_id); 1734 wmi_process_fw_event(wmi_handle, evt_buf); 1735 1736 return 0; 1737 } 1738 1739 int wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len) 1740 { 1741 struct qdf_op_sync *op_sync; 1742 int ret; 1743 1744 if (qdf_op_protect(&op_sync)) 1745 return -EINVAL; 1746 ret = __wmi_process_qmi_fw_event(wmi_cb_ctx, buf, len); 1747 qdf_op_unprotect(op_sync); 1748 1749 return ret; 1750 } 1751 #endif 1752 1753 QDF_STATUS wmi_unified_cmd_send_fl(wmi_unified_t wmi_handle, wmi_buf_t buf, 1754 uint32_t len, uint32_t cmd_id, 1755 const char *func, uint32_t line) 1756 { 1757 HTC_PACKET *pkt; 1758 QDF_STATUS status; 1759 uint16_t htc_tag = 0; 1760 1761 if (wmi_get_runtime_pm_inprogress(wmi_handle)) { 1762 htc_tag = wmi_handle->ops->wmi_set_htc_tx_tag(wmi_handle, buf, 1763 cmd_id); 1764 } else if (qdf_atomic_read(&wmi_handle->is_target_suspended) && 1765 !wmi_is_pm_resume_cmd(cmd_id) && 1766 !wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id)) { 1767 wmi_nofl_err("Target is suspended (via %s:%u)", 1768 func, line); 1769 return QDF_STATUS_E_BUSY; 1770 } 1771 1772 if (wmi_handle->wmi_stopinprogress) { 1773 wmi_nofl_err("%s:%d, WMI stop in progress, wmi_handle:%pK", 1774 func, line, wmi_handle); 1775 return QDF_STATUS_E_INVAL; 1776 } 1777 1778 #ifndef WMI_NON_TLV_SUPPORT 1779 /* Do sanity check on the TLV parameter structure */ 1780 if (wmi_handle->target_type == WMI_TLV_TARGET) { 1781 void *buf_ptr = (void *)qdf_nbuf_data(buf); 1782 1783 if (wmi_handle->ops->wmi_check_command_params(NULL, buf_ptr, len, cmd_id) 1784 != 0) { 1785 wmi_nofl_err("%s:%d, Invalid WMI Param Buffer for Cmd:%d", 1786 func, line, cmd_id); 1787 return QDF_STATUS_E_INVAL; 1788 } 1789 } 1790 #endif 1791 1792 if (qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) { 1793 wmi_nofl_err("%s:%d, Failed to send cmd %x, no memory", 1794 func, line, cmd_id); 1795 return QDF_STATUS_E_NOMEM; 1796 } 1797 1798 qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR)); 1799 WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id); 1800 1801 qdf_atomic_inc(&wmi_handle->pending_cmds); 1802 if (qdf_atomic_read(&wmi_handle->pending_cmds) >= 1803 wmi_handle->wmi_max_cmds) { 1804 wmi_nofl_err("hostcredits = %d", 1805 wmi_get_host_credits(wmi_handle)); 1806 htc_dump_counter_info(wmi_handle->htc_handle); 1807 qdf_atomic_dec(&wmi_handle->pending_cmds); 1808 wmi_nofl_err("%s:%d, MAX %d WMI Pending cmds reached", 1809 func, line, wmi_handle->wmi_max_cmds); 1810 wmi_unified_debug_dump(wmi_handle); 1811 qdf_trigger_self_recovery(QDF_WMI_EXCEED_MAX_PENDING_CMDS); 1812 return QDF_STATUS_E_BUSY; 1813 } 1814 1815 pkt = qdf_mem_malloc_fl(sizeof(*pkt), func, line); 1816 if (!pkt) { 1817 qdf_atomic_dec(&wmi_handle->pending_cmds); 1818 return QDF_STATUS_E_NOMEM; 1819 } 1820 1821 SET_HTC_PACKET_INFO_TX(pkt, 1822 NULL, 1823 qdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR), 1824 wmi_handle->wmi_endpoint_id, htc_tag); 1825 1826 SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf); 1827 wmi_log_cmd_id(cmd_id, htc_tag); 1828 wmi_ext_dbg_msg_cmd_record(wmi_handle, 1829 qdf_nbuf_data(buf), qdf_nbuf_len(buf)); 1830 #ifdef WMI_INTERFACE_EVENT_LOGGING 1831 if (wmi_handle->log_info.wmi_logging_enable) { 1832 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 1833 /* 1834 * Record 16 bytes of WMI cmd data - 1835 * exclude TLV and WMI headers 1836 * 1837 * WMI mgmt command already recorded in wmi_mgmt_cmd_record 1838 */ 1839 if (wmi_handle->ops->is_management_record(cmd_id) == false) { 1840 WMI_COMMAND_RECORD(wmi_handle, cmd_id, 1841 qdf_nbuf_data(buf) + 1842 wmi_handle->soc->buf_offset_command); 1843 } 1844 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 1845 } 1846 #endif 1847 1848 status = htc_send_pkt(wmi_handle->htc_handle, pkt); 1849 1850 if (QDF_STATUS_SUCCESS != status) { 1851 qdf_atomic_dec(&wmi_handle->pending_cmds); 1852 wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d", 1853 func, line, status); 1854 qdf_mem_free(pkt); 1855 return status; 1856 } 1857 1858 return QDF_STATUS_SUCCESS; 1859 } 1860 qdf_export_symbol(wmi_unified_cmd_send_fl); 1861 1862 /** 1863 * wmi_unified_get_event_handler_ix() - gives event handler's index 1864 * @wmi_handle: handle to wmi 1865 * @event_id: wmi event id 1866 * 1867 * Return: event handler's index 1868 */ 1869 static int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle, 1870 uint32_t event_id) 1871 { 1872 uint32_t idx = 0; 1873 int32_t invalid_idx = -1; 1874 struct wmi_soc *soc = wmi_handle->soc; 1875 1876 for (idx = 0; (idx < soc->max_event_idx && 1877 idx < WMI_UNIFIED_MAX_EVENT); ++idx) { 1878 if (wmi_handle->event_id[idx] == event_id && 1879 wmi_handle->event_handler[idx]) { 1880 return idx; 1881 } 1882 } 1883 1884 return invalid_idx; 1885 } 1886 1887 /** 1888 * wmi_unified_register_event() - register wmi event handler 1889 * @wmi_handle: handle to wmi 1890 * @event_id: wmi event id 1891 * @handler_func: wmi event handler function 1892 * 1893 * Return: 0 on success 1894 */ 1895 int wmi_unified_register_event(wmi_unified_t wmi_handle, 1896 uint32_t event_id, 1897 wmi_unified_event_handler handler_func) 1898 { 1899 uint32_t idx = 0; 1900 uint32_t evt_id; 1901 struct wmi_soc *soc; 1902 1903 if (!wmi_handle) { 1904 WMI_LOGE("WMI handle is NULL"); 1905 return QDF_STATUS_E_FAILURE; 1906 } 1907 1908 soc = wmi_handle->soc; 1909 1910 if (event_id >= wmi_events_max || 1911 wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { 1912 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 1913 "%s: Event id %d is unavailable", 1914 __func__, event_id); 1915 return QDF_STATUS_E_FAILURE; 1916 } 1917 evt_id = wmi_handle->wmi_events[event_id]; 1918 if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) { 1919 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 1920 "%s : event handler already registered 0x%x", 1921 __func__, evt_id); 1922 return QDF_STATUS_E_FAILURE; 1923 } 1924 if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) { 1925 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 1926 "%s : no more event handlers 0x%x", 1927 __func__, evt_id); 1928 return QDF_STATUS_E_FAILURE; 1929 } 1930 idx = soc->max_event_idx; 1931 wmi_handle->event_handler[idx] = handler_func; 1932 wmi_handle->event_id[idx] = evt_id; 1933 qdf_spin_lock_bh(&soc->ctx_lock); 1934 wmi_handle->ctx[idx] = WMI_RX_UMAC_CTX; 1935 qdf_spin_unlock_bh(&soc->ctx_lock); 1936 soc->max_event_idx++; 1937 1938 return 0; 1939 } 1940 1941 /** 1942 * wmi_unified_register_event_handler() - register wmi event handler 1943 * @wmi_handle: handle to wmi 1944 * @event_id: wmi event id 1945 * @handler_func: wmi event handler function 1946 * @rx_ctx: rx execution context for wmi rx events 1947 * 1948 * This API is to support legacy requirements. Will be deprecated in future. 1949 * Return: 0 on success 1950 */ 1951 int wmi_unified_register_event_handler(wmi_unified_t wmi_handle, 1952 wmi_conv_event_id event_id, 1953 wmi_unified_event_handler handler_func, 1954 uint8_t rx_ctx) 1955 { 1956 uint32_t idx = 0; 1957 uint32_t evt_id; 1958 struct wmi_soc *soc; 1959 1960 if (!wmi_handle) { 1961 WMI_LOGE("WMI handle is NULL"); 1962 return QDF_STATUS_E_FAILURE; 1963 } 1964 1965 soc = wmi_handle->soc; 1966 1967 if (event_id >= wmi_events_max || 1968 wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { 1969 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 1970 "%s: Event id %d is unavailable", 1971 __func__, event_id); 1972 return QDF_STATUS_E_FAILURE; 1973 } 1974 evt_id = wmi_handle->wmi_events[event_id]; 1975 1976 if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) { 1977 WMI_LOGE("event handler already registered 0x%x", 1978 evt_id); 1979 return QDF_STATUS_E_FAILURE; 1980 } 1981 if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) { 1982 WMI_LOGE("no more event handlers 0x%x", 1983 evt_id); 1984 return QDF_STATUS_E_FAILURE; 1985 } 1986 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, 1987 "Registered event handler for event 0x%8x", evt_id); 1988 idx = soc->max_event_idx; 1989 wmi_handle->event_handler[idx] = handler_func; 1990 wmi_handle->event_id[idx] = evt_id; 1991 qdf_spin_lock_bh(&soc->ctx_lock); 1992 wmi_handle->ctx[idx] = rx_ctx; 1993 qdf_spin_unlock_bh(&soc->ctx_lock); 1994 soc->max_event_idx++; 1995 1996 return 0; 1997 } 1998 qdf_export_symbol(wmi_unified_register_event_handler); 1999 2000 /** 2001 * wmi_unified_unregister_event() - unregister wmi event handler 2002 * @wmi_handle: handle to wmi 2003 * @event_id: wmi event id 2004 * 2005 * Return: 0 on success 2006 */ 2007 int wmi_unified_unregister_event(wmi_unified_t wmi_handle, 2008 uint32_t event_id) 2009 { 2010 uint32_t idx = 0; 2011 uint32_t evt_id; 2012 struct wmi_soc *soc = wmi_handle->soc; 2013 2014 if (event_id >= wmi_events_max || 2015 wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { 2016 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 2017 "%s: Event id %d is unavailable", 2018 __func__, event_id); 2019 return QDF_STATUS_E_FAILURE; 2020 } 2021 evt_id = wmi_handle->wmi_events[event_id]; 2022 2023 idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id); 2024 if (idx == -1) { 2025 WMI_LOGE("event handler is not registered: evt id 0x%x", 2026 evt_id); 2027 return QDF_STATUS_E_FAILURE; 2028 } 2029 wmi_handle->event_handler[idx] = NULL; 2030 wmi_handle->event_id[idx] = 0; 2031 --soc->max_event_idx; 2032 wmi_handle->event_handler[idx] = 2033 wmi_handle->event_handler[soc->max_event_idx]; 2034 wmi_handle->event_id[idx] = 2035 wmi_handle->event_id[soc->max_event_idx]; 2036 2037 return 0; 2038 } 2039 2040 /** 2041 * wmi_unified_unregister_event_handler() - unregister wmi event handler 2042 * @wmi_handle: handle to wmi 2043 * @event_id: wmi event id 2044 * 2045 * Return: 0 on success 2046 */ 2047 int wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle, 2048 wmi_conv_event_id event_id) 2049 { 2050 uint32_t idx = 0; 2051 uint32_t evt_id; 2052 struct wmi_soc *soc; 2053 2054 if (!wmi_handle) { 2055 WMI_LOGE("WMI handle is NULL"); 2056 return QDF_STATUS_E_FAILURE; 2057 } 2058 2059 soc = wmi_handle->soc; 2060 2061 if (event_id >= wmi_events_max || 2062 wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { 2063 WMI_LOGE("Event id %d is unavailable", 2064 event_id); 2065 return QDF_STATUS_E_FAILURE; 2066 } 2067 evt_id = wmi_handle->wmi_events[event_id]; 2068 2069 idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id); 2070 if (idx == -1) { 2071 WMI_LOGE("event handler is not registered: evt id 0x%x", 2072 evt_id); 2073 return QDF_STATUS_E_FAILURE; 2074 } 2075 wmi_handle->event_handler[idx] = NULL; 2076 wmi_handle->event_id[idx] = 0; 2077 --soc->max_event_idx; 2078 wmi_handle->event_handler[idx] = 2079 wmi_handle->event_handler[soc->max_event_idx]; 2080 wmi_handle->event_id[idx] = 2081 wmi_handle->event_id[soc->max_event_idx]; 2082 2083 return 0; 2084 } 2085 qdf_export_symbol(wmi_unified_unregister_event_handler); 2086 2087 void wmi_process_fw_event_worker_thread_ctx(struct wmi_unified *wmi_handle, 2088 void *evt_buf) 2089 { 2090 2091 qdf_spin_lock_bh(&wmi_handle->eventq_lock); 2092 qdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf); 2093 qdf_spin_unlock_bh(&wmi_handle->eventq_lock); 2094 qdf_queue_work(0, wmi_handle->wmi_rx_work_queue, 2095 &wmi_handle->rx_event_work); 2096 2097 return; 2098 } 2099 2100 qdf_export_symbol(wmi_process_fw_event_worker_thread_ctx); 2101 2102 uint32_t wmi_critical_events_in_flight(struct wmi_unified *wmi) 2103 { 2104 return qdf_atomic_read(&wmi->critical_events_in_flight); 2105 } 2106 2107 static bool 2108 wmi_is_event_critical(struct wmi_unified *wmi_handle, uint32_t event_id) 2109 { 2110 if (wmi_handle->wmi_events[wmi_roam_synch_event_id] == event_id) 2111 return true; 2112 2113 return false; 2114 } 2115 2116 static void wmi_discard_fw_event(struct scheduler_msg *msg) 2117 { 2118 struct wmi_process_fw_event_params *event_param; 2119 2120 if (!msg->bodyptr) 2121 return; 2122 2123 event_param = (struct wmi_process_fw_event_params *)msg->bodyptr; 2124 qdf_nbuf_free(event_param->evt_buf); 2125 qdf_mem_free(msg->bodyptr); 2126 msg->bodyptr = NULL; 2127 msg->bodyval = 0; 2128 msg->type = 0; 2129 } 2130 2131 static int wmi_process_fw_event_handler(struct scheduler_msg *msg) 2132 { 2133 struct wmi_process_fw_event_params *params = 2134 (struct wmi_process_fw_event_params *)msg->bodyptr; 2135 struct wmi_unified *wmi_handle; 2136 uint32_t event_id; 2137 2138 wmi_handle = (struct wmi_unified *)params->wmi_handle; 2139 event_id = WMI_GET_FIELD(qdf_nbuf_data(params->evt_buf), 2140 WMI_CMD_HDR, COMMANDID); 2141 wmi_process_fw_event(wmi_handle, params->evt_buf); 2142 2143 if (wmi_is_event_critical(wmi_handle, event_id)) 2144 qdf_atomic_dec(&wmi_handle->critical_events_in_flight); 2145 2146 qdf_mem_free(msg->bodyptr); 2147 2148 return 0; 2149 } 2150 2151 /** 2152 * wmi_process_fw_event_sched_thread_ctx() - common event handler to serialize 2153 * event processing through scheduler thread 2154 * @ctx: wmi context 2155 * @ev: event buffer 2156 * @rx_ctx: rx execution context 2157 * 2158 * Return: 0 on success, errno on failure 2159 */ 2160 static QDF_STATUS 2161 wmi_process_fw_event_sched_thread_ctx(struct wmi_unified *wmi, 2162 void *ev) 2163 { 2164 struct wmi_process_fw_event_params *params_buf; 2165 struct scheduler_msg msg = { 0 }; 2166 uint32_t event_id; 2167 struct target_psoc_info *tgt_hdl; 2168 bool is_wmi_ready = false; 2169 struct wlan_objmgr_psoc *psoc; 2170 2171 psoc = target_if_get_psoc_from_scn_hdl(wmi->scn_handle); 2172 if (!psoc) { 2173 target_if_err("psoc is null"); 2174 qdf_nbuf_free(ev); 2175 return QDF_STATUS_E_INVAL; 2176 } 2177 2178 tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); 2179 if (!tgt_hdl) { 2180 wmi_err("target_psoc_info is null"); 2181 qdf_nbuf_free(ev); 2182 return QDF_STATUS_E_INVAL; 2183 } 2184 2185 is_wmi_ready = target_psoc_get_wmi_ready(tgt_hdl); 2186 if (!is_wmi_ready) { 2187 wmi_debug("fw event recvd before ready event processed"); 2188 wmi_debug("therefore use worker thread"); 2189 wmi_process_fw_event_worker_thread_ctx(wmi, ev); 2190 return QDF_STATUS_E_INVAL; 2191 } 2192 2193 params_buf = qdf_mem_malloc(sizeof(struct wmi_process_fw_event_params)); 2194 if (!params_buf) { 2195 wmi_err("malloc failed"); 2196 qdf_nbuf_free(ev); 2197 return QDF_STATUS_E_NOMEM; 2198 } 2199 2200 params_buf->wmi_handle = wmi; 2201 params_buf->evt_buf = ev; 2202 2203 event_id = WMI_GET_FIELD(qdf_nbuf_data(params_buf->evt_buf), 2204 WMI_CMD_HDR, COMMANDID); 2205 if (wmi_is_event_critical(wmi, event_id)) 2206 qdf_atomic_inc(&wmi->critical_events_in_flight); 2207 2208 msg.bodyptr = params_buf; 2209 msg.bodyval = 0; 2210 msg.callback = wmi_process_fw_event_handler; 2211 msg.flush_callback = wmi_discard_fw_event; 2212 2213 if (QDF_STATUS_SUCCESS != 2214 scheduler_post_message(QDF_MODULE_ID_TARGET_IF, 2215 QDF_MODULE_ID_TARGET_IF, 2216 QDF_MODULE_ID_TARGET_IF, &msg)) { 2217 qdf_nbuf_free(ev); 2218 qdf_mem_free(params_buf); 2219 return QDF_STATUS_E_FAULT; 2220 } 2221 2222 return QDF_STATUS_SUCCESS; 2223 } 2224 2225 /** 2226 * wmi_get_pdev_ep: Get wmi handle based on endpoint 2227 * @soc: handle to wmi soc 2228 * @ep: endpoint id 2229 * 2230 * Return: none 2231 */ 2232 static struct wmi_unified *wmi_get_pdev_ep(struct wmi_soc *soc, 2233 HTC_ENDPOINT_ID ep) 2234 { 2235 uint32_t i; 2236 2237 for (i = 0; i < WMI_MAX_RADIOS; i++) 2238 if (soc->wmi_endpoint_id[i] == ep) 2239 break; 2240 2241 if (i == WMI_MAX_RADIOS) 2242 return NULL; 2243 2244 return soc->wmi_pdev[i]; 2245 } 2246 2247 /** 2248 * wmi_mtrace_rx() - Wrappper function for qdf_mtrace api 2249 * @message_id: 32-Bit Wmi message ID 2250 * @vdev_id: Vdev ID 2251 * @data: Actual message contents 2252 * 2253 * This function converts the 32-bit WMI message ID in 15-bit message ID 2254 * format for qdf_mtrace as in qdf_mtrace message there are only 15 2255 * bits reserved for message ID. 2256 * out of these 15-bits, 8-bits (From LSB) specifies the WMI_GRP_ID 2257 * and remaining 7-bits specifies the actual WMI command. With this 2258 * notation there can be maximum 256 groups and each group can have 2259 * max 128 commands can be supported. 2260 * 2261 * Return: None 2262 */ 2263 static void wmi_mtrace_rx(uint32_t message_id, uint16_t vdev_id, uint32_t data) 2264 { 2265 uint16_t mtrace_message_id; 2266 2267 mtrace_message_id = QDF_WMI_MTRACE_CMD_ID(message_id) | 2268 (QDF_WMI_MTRACE_GRP_ID(message_id) << 2269 QDF_WMI_MTRACE_CMD_NUM_BITS); 2270 qdf_mtrace(QDF_MODULE_ID_WMI, QDF_MODULE_ID_WMA, 2271 mtrace_message_id, vdev_id, data); 2272 } 2273 2274 /** 2275 * wmi_control_rx() - process fw events callbacks 2276 * @ctx: handle to wmi 2277 * @htc_packet: pointer to htc packet 2278 * 2279 * Return: none 2280 */ 2281 static void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet) 2282 { 2283 struct wmi_soc *soc = (struct wmi_soc *) ctx; 2284 struct wmi_unified *wmi_handle; 2285 wmi_buf_t evt_buf; 2286 uint32_t id; 2287 uint32_t idx = 0; 2288 enum wmi_rx_exec_ctx exec_ctx; 2289 2290 evt_buf = (wmi_buf_t) htc_packet->pPktContext; 2291 2292 wmi_handle = wmi_get_pdev_ep(soc, htc_packet->Endpoint); 2293 if (!wmi_handle) { 2294 WMI_LOGE 2295 ("unable to get wmi_handle to Endpoint %d\n", 2296 htc_packet->Endpoint); 2297 qdf_nbuf_free(evt_buf); 2298 return; 2299 } 2300 2301 id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); 2302 idx = wmi_unified_get_event_handler_ix(wmi_handle, id); 2303 if (qdf_unlikely(idx == A_ERROR)) { 2304 wmi_debug("no handler registered for event id 0x%x", id); 2305 qdf_nbuf_free(evt_buf); 2306 return; 2307 } 2308 wmi_mtrace_rx(id, 0xFF, idx); 2309 qdf_spin_lock_bh(&soc->ctx_lock); 2310 exec_ctx = wmi_handle->ctx[idx]; 2311 qdf_spin_unlock_bh(&soc->ctx_lock); 2312 2313 #ifdef WMI_INTERFACE_EVENT_LOGGING 2314 if (wmi_handle->log_info.wmi_logging_enable) { 2315 uint8_t *data; 2316 data = qdf_nbuf_data(evt_buf); 2317 2318 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 2319 /* Exclude 4 bytes of TLV header */ 2320 if (wmi_handle->ops->is_diag_event(id)) { 2321 WMI_DIAG_RX_EVENT_RECORD(wmi_handle, id, 2322 ((uint8_t *) data + 2323 wmi_handle->soc->buf_offset_event)); 2324 } else if (wmi_handle->ops->is_management_record(id)) { 2325 WMI_MGMT_RX_EVENT_RECORD(wmi_handle, id, 2326 ((uint8_t *) data + 2327 wmi_handle->soc->buf_offset_event)); 2328 } else { 2329 WMI_RX_EVENT_RECORD(wmi_handle, id, ((uint8_t *) data + 2330 wmi_handle->soc->buf_offset_event)); 2331 } 2332 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 2333 } 2334 #endif 2335 2336 if (exec_ctx == WMI_RX_WORK_CTX) { 2337 wmi_process_fw_event_worker_thread_ctx 2338 (wmi_handle, evt_buf); 2339 } else if (exec_ctx == WMI_RX_TASKLET_CTX) { 2340 wmi_process_fw_event(wmi_handle, evt_buf); 2341 } else if (exec_ctx == WMI_RX_SERIALIZER_CTX) { 2342 wmi_process_fw_event_sched_thread_ctx(wmi_handle, evt_buf); 2343 } else { 2344 WMI_LOGE("Invalid event context %d", exec_ctx); 2345 qdf_nbuf_free(evt_buf); 2346 } 2347 2348 } 2349 2350 /** 2351 * wmi_process_fw_event() - process any fw event 2352 * @wmi_handle: wmi handle 2353 * @evt_buf: fw event buffer 2354 * 2355 * This function process fw event in caller context 2356 * 2357 * Return: none 2358 */ 2359 void wmi_process_fw_event(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf) 2360 { 2361 __wmi_control_rx(wmi_handle, evt_buf); 2362 } 2363 2364 /** 2365 * __wmi_control_rx() - process serialize wmi event callback 2366 * @wmi_handle: wmi handle 2367 * @evt_buf: fw event buffer 2368 * 2369 * Return: none 2370 */ 2371 void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf) 2372 { 2373 uint32_t id; 2374 uint8_t *data; 2375 uint32_t len; 2376 void *wmi_cmd_struct_ptr = NULL; 2377 #ifndef WMI_NON_TLV_SUPPORT 2378 int tlv_ok_status = 0; 2379 #endif 2380 uint32_t idx = 0; 2381 2382 id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); 2383 2384 wmi_ext_dbg_msg_event_record(wmi_handle, qdf_nbuf_data(evt_buf), 2385 qdf_nbuf_len(evt_buf)); 2386 2387 if (qdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL) 2388 goto end; 2389 2390 data = qdf_nbuf_data(evt_buf); 2391 len = qdf_nbuf_len(evt_buf); 2392 2393 #ifndef WMI_NON_TLV_SUPPORT 2394 if (wmi_handle->target_type == WMI_TLV_TARGET) { 2395 /* Validate and pad(if necessary) the TLVs */ 2396 tlv_ok_status = 2397 wmi_handle->ops->wmi_check_and_pad_event(wmi_handle->scn_handle, 2398 data, len, id, 2399 &wmi_cmd_struct_ptr); 2400 if (tlv_ok_status != 0) { 2401 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 2402 "%s: Error: id=0x%x, wmitlv check status=%d", 2403 __func__, id, tlv_ok_status); 2404 goto end; 2405 } 2406 } 2407 #endif 2408 2409 idx = wmi_unified_get_event_handler_ix(wmi_handle, id); 2410 if (idx == A_ERROR) { 2411 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 2412 "%s : event handler is not registered: event id 0x%x", 2413 __func__, id); 2414 goto end; 2415 } 2416 #ifdef WMI_INTERFACE_EVENT_LOGGING 2417 if (wmi_handle->log_info.wmi_logging_enable) { 2418 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 2419 /* Exclude 4 bytes of TLV header */ 2420 if (wmi_handle->ops->is_diag_event(id)) { 2421 /* 2422 * skip diag event logging in WMI event buffer 2423 * as its already logged in WMI RX event buffer 2424 */ 2425 } else if (wmi_handle->ops->is_management_record(id)) { 2426 /* 2427 * skip wmi mgmt event logging in WMI event buffer 2428 * as its already logged in WMI RX event buffer 2429 */ 2430 } else { 2431 WMI_EVENT_RECORD(wmi_handle, id, ((uint8_t *) data + 2432 wmi_handle->soc->buf_offset_event)); 2433 } 2434 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 2435 } 2436 #endif 2437 /* Call the WMI registered event handler */ 2438 if (wmi_handle->target_type == WMI_TLV_TARGET) 2439 wmi_handle->event_handler[idx] (wmi_handle->scn_handle, 2440 wmi_cmd_struct_ptr, len); 2441 else 2442 wmi_handle->event_handler[idx] (wmi_handle->scn_handle, 2443 data, len); 2444 2445 end: 2446 /* Free event buffer and allocated event tlv */ 2447 #ifndef WMI_NON_TLV_SUPPORT 2448 if (wmi_handle->target_type == WMI_TLV_TARGET) 2449 wmi_handle->ops->wmi_free_allocated_event(id, &wmi_cmd_struct_ptr); 2450 #endif 2451 2452 qdf_nbuf_free(evt_buf); 2453 2454 } 2455 2456 #define WMI_WQ_WD_TIMEOUT (30 * 1000) /* 30s */ 2457 2458 static inline void wmi_workqueue_watchdog_warn(uint32_t msg_type_id) 2459 { 2460 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 2461 "%s: WLAN_BUG_RCA: Message type %x has exceeded its alloted time of %ds", 2462 __func__, msg_type_id, WMI_WQ_WD_TIMEOUT / 1000); 2463 } 2464 2465 #ifdef CONFIG_SLUB_DEBUG_ON 2466 static void wmi_workqueue_watchdog_bite(void *arg) 2467 { 2468 struct wmi_wq_dbg_info *info = arg; 2469 2470 wmi_workqueue_watchdog_warn(info->wd_msg_type_id); 2471 qdf_print_thread_trace(info->task); 2472 2473 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 2474 "%s: Going down for WMI WQ Watchdog Bite!", __func__); 2475 QDF_BUG(0); 2476 } 2477 #else 2478 static inline void wmi_workqueue_watchdog_bite(void *arg) 2479 { 2480 struct wmi_wq_dbg_info *info = arg; 2481 2482 wmi_workqueue_watchdog_warn(info->wd_msg_type_id); 2483 } 2484 #endif 2485 2486 /** 2487 * wmi_rx_event_work() - process rx event in rx work queue context 2488 * @arg: opaque pointer to wmi handle 2489 * 2490 * This function process any fw event to serialize it through rx worker thread. 2491 * 2492 * Return: none 2493 */ 2494 static void wmi_rx_event_work(void *arg) 2495 { 2496 wmi_buf_t buf; 2497 struct wmi_unified *wmi = arg; 2498 qdf_timer_t wd_timer; 2499 struct wmi_wq_dbg_info info; 2500 2501 /* initialize WMI workqueue watchdog timer */ 2502 qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite, 2503 &info, QDF_TIMER_TYPE_SW); 2504 qdf_spin_lock_bh(&wmi->eventq_lock); 2505 buf = qdf_nbuf_queue_remove(&wmi->event_queue); 2506 qdf_spin_unlock_bh(&wmi->eventq_lock); 2507 while (buf) { 2508 qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT); 2509 info.wd_msg_type_id = 2510 WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID); 2511 info.wmi_wq = wmi->wmi_rx_work_queue; 2512 info.task = qdf_get_current_task(); 2513 __wmi_control_rx(wmi, buf); 2514 qdf_timer_stop(&wd_timer); 2515 qdf_spin_lock_bh(&wmi->eventq_lock); 2516 buf = qdf_nbuf_queue_remove(&wmi->event_queue); 2517 qdf_spin_unlock_bh(&wmi->eventq_lock); 2518 } 2519 qdf_timer_free(&wd_timer); 2520 } 2521 2522 #ifdef FEATURE_RUNTIME_PM 2523 /** 2524 * wmi_runtime_pm_init() - initialize runtime pm wmi variables 2525 * @wmi_handle: wmi context 2526 */ 2527 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle) 2528 { 2529 qdf_atomic_init(&wmi_handle->runtime_pm_inprogress); 2530 } 2531 2532 /** 2533 * wmi_set_runtime_pm_inprogress() - set runtime pm progress flag 2534 * @wmi_handle: wmi context 2535 * @val: runtime pm progress flag 2536 */ 2537 void wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, A_BOOL val) 2538 { 2539 qdf_atomic_set(&wmi_handle->runtime_pm_inprogress, val); 2540 } 2541 2542 /** 2543 * wmi_get_runtime_pm_inprogress() - get runtime pm progress flag 2544 * @wmi_handle: wmi context 2545 */ 2546 inline bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle) 2547 { 2548 return qdf_atomic_read(&wmi_handle->runtime_pm_inprogress); 2549 } 2550 #else 2551 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle) 2552 { 2553 } 2554 #endif 2555 2556 /** 2557 * wmi_unified_get_soc_handle: Get WMI SoC handle 2558 * @param wmi_handle: WMI context got from wmi_attach 2559 * 2560 * return: Pointer to Soc handle 2561 */ 2562 void *wmi_unified_get_soc_handle(struct wmi_unified *wmi_handle) 2563 { 2564 return wmi_handle->soc; 2565 } 2566 2567 /** 2568 * wmi_interface_logging_init: Interface looging init 2569 * @param wmi_handle: Pointer to wmi handle object 2570 * 2571 * return: None 2572 */ 2573 #ifdef WMI_INTERFACE_EVENT_LOGGING 2574 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle, 2575 uint32_t pdev_idx) 2576 { 2577 if (QDF_STATUS_SUCCESS == wmi_log_init(wmi_handle)) { 2578 qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); 2579 wmi_debugfs_init(wmi_handle, pdev_idx); 2580 } 2581 } 2582 #else 2583 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle, 2584 uint32_t pdev_idx) 2585 { 2586 } 2587 #endif 2588 2589 /** 2590 * wmi_unified_get_pdev_handle: Get WMI SoC handle 2591 * @param wmi_soc: Pointer to wmi soc object 2592 * @param pdev_idx: pdev index 2593 * 2594 * return: Pointer to wmi handle or NULL on failure 2595 */ 2596 void *wmi_unified_get_pdev_handle(struct wmi_soc *soc, uint32_t pdev_idx) 2597 { 2598 struct wmi_unified *wmi_handle; 2599 2600 if (pdev_idx >= WMI_MAX_RADIOS) 2601 return NULL; 2602 2603 if (!soc->wmi_pdev[pdev_idx]) { 2604 wmi_handle = 2605 (struct wmi_unified *) qdf_mem_malloc( 2606 sizeof(struct wmi_unified)); 2607 if (!wmi_handle) 2608 return NULL; 2609 2610 wmi_handle->scn_handle = soc->scn_handle; 2611 wmi_handle->event_id = soc->event_id; 2612 wmi_handle->event_handler = soc->event_handler; 2613 wmi_handle->ctx = soc->ctx; 2614 wmi_handle->ops = soc->ops; 2615 qdf_spinlock_create(&wmi_handle->eventq_lock); 2616 qdf_nbuf_queue_init(&wmi_handle->event_queue); 2617 2618 qdf_create_work(0, &wmi_handle->rx_event_work, 2619 wmi_rx_event_work, wmi_handle); 2620 wmi_handle->wmi_rx_work_queue = 2621 qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue"); 2622 if (!wmi_handle->wmi_rx_work_queue) { 2623 WMI_LOGE("failed to create wmi_rx_event_work_queue"); 2624 goto error; 2625 } 2626 wmi_handle->wmi_events = soc->wmi_events; 2627 wmi_handle->services = soc->services; 2628 wmi_handle->soc = soc; 2629 wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map; 2630 wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map; 2631 wmi_interface_logging_init(wmi_handle, pdev_idx); 2632 qdf_atomic_init(&wmi_handle->pending_cmds); 2633 qdf_atomic_init(&wmi_handle->is_target_suspended); 2634 wmi_handle->target_type = soc->target_type; 2635 wmi_handle->wmi_max_cmds = soc->wmi_max_cmds; 2636 2637 soc->wmi_pdev[pdev_idx] = wmi_handle; 2638 } else 2639 wmi_handle = soc->wmi_pdev[pdev_idx]; 2640 2641 wmi_handle->wmi_stopinprogress = 0; 2642 wmi_handle->wmi_endpoint_id = soc->wmi_endpoint_id[pdev_idx]; 2643 wmi_handle->htc_handle = soc->htc_handle; 2644 wmi_handle->max_msg_len = soc->max_msg_len[pdev_idx]; 2645 2646 return wmi_handle; 2647 2648 error: 2649 qdf_mem_free(wmi_handle); 2650 2651 return NULL; 2652 } 2653 qdf_export_symbol(wmi_unified_get_pdev_handle); 2654 2655 static void (*wmi_attach_register[WMI_MAX_TARGET_TYPE])(wmi_unified_t); 2656 2657 void wmi_unified_register_module(enum wmi_target_type target_type, 2658 void (*wmi_attach)(wmi_unified_t wmi_handle)) 2659 { 2660 if (target_type < WMI_MAX_TARGET_TYPE) 2661 wmi_attach_register[target_type] = wmi_attach; 2662 2663 return; 2664 } 2665 qdf_export_symbol(wmi_unified_register_module); 2666 2667 /** 2668 * wmi_wbuff_register() - register wmi with wbuff 2669 * @wmi_handle: handle to wmi 2670 * 2671 * @Return: void 2672 */ 2673 static void wmi_wbuff_register(struct wmi_unified *wmi_handle) 2674 { 2675 struct wbuff_alloc_request wbuff_alloc[4]; 2676 2677 wbuff_alloc[0].slot = WBUFF_POOL_0; 2678 wbuff_alloc[0].size = WMI_WBUFF_POOL_0_SIZE; 2679 wbuff_alloc[1].slot = WBUFF_POOL_1; 2680 wbuff_alloc[1].size = WMI_WBUFF_POOL_1_SIZE; 2681 wbuff_alloc[2].slot = WBUFF_POOL_2; 2682 wbuff_alloc[2].size = WMI_WBUFF_POOL_2_SIZE; 2683 wbuff_alloc[3].slot = WBUFF_POOL_3; 2684 wbuff_alloc[3].size = WMI_WBUFF_POOL_3_SIZE; 2685 2686 wmi_handle->wbuff_handle = wbuff_module_register(wbuff_alloc, 4, 2687 WMI_MIN_HEAD_ROOM, 4); 2688 } 2689 2690 /** 2691 * wmi_wbuff_deregister() - deregister wmi with wbuff 2692 * @wmi_handle: handle to wmi 2693 * 2694 * @Return: void 2695 */ 2696 static inline void wmi_wbuff_deregister(struct wmi_unified *wmi_handle) 2697 { 2698 wbuff_module_deregister(wmi_handle->wbuff_handle); 2699 wmi_handle->wbuff_handle = NULL; 2700 } 2701 2702 /** 2703 * wmi_unified_attach() - attach for unified WMI 2704 * @scn_handle: handle to SCN 2705 * @osdev: OS device context 2706 * @target_type: TLV or not-TLV based target 2707 * @use_cookie: cookie based allocation enabled/disabled 2708 * @ops: umac rx callbacks 2709 * @psoc: objmgr psoc 2710 * 2711 * @Return: wmi handle. 2712 */ 2713 void *wmi_unified_attach(void *scn_handle, 2714 struct wmi_unified_attach_params *param) 2715 { 2716 struct wmi_unified *wmi_handle; 2717 struct wmi_soc *soc; 2718 2719 soc = (struct wmi_soc *) qdf_mem_malloc(sizeof(struct wmi_soc)); 2720 if (!soc) 2721 return NULL; 2722 2723 wmi_handle = 2724 (struct wmi_unified *) qdf_mem_malloc( 2725 sizeof(struct wmi_unified)); 2726 if (!wmi_handle) { 2727 qdf_mem_free(soc); 2728 return NULL; 2729 } 2730 wmi_handle->soc = soc; 2731 wmi_handle->soc->soc_idx = param->soc_id; 2732 wmi_handle->soc->is_async_ep = param->is_async_ep; 2733 wmi_handle->event_id = soc->event_id; 2734 wmi_handle->event_handler = soc->event_handler; 2735 wmi_handle->ctx = soc->ctx; 2736 wmi_handle->wmi_events = soc->wmi_events; 2737 wmi_handle->services = soc->services; 2738 wmi_handle->scn_handle = scn_handle; 2739 wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map; 2740 wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map; 2741 soc->scn_handle = scn_handle; 2742 qdf_atomic_init(&wmi_handle->pending_cmds); 2743 qdf_atomic_init(&wmi_handle->is_target_suspended); 2744 qdf_atomic_init(&wmi_handle->num_stats_over_qmi); 2745 wmi_runtime_pm_init(wmi_handle); 2746 qdf_spinlock_create(&wmi_handle->eventq_lock); 2747 qdf_nbuf_queue_init(&wmi_handle->event_queue); 2748 qdf_create_work(0, &wmi_handle->rx_event_work, 2749 wmi_rx_event_work, wmi_handle); 2750 wmi_handle->wmi_rx_work_queue = 2751 qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue"); 2752 if (!wmi_handle->wmi_rx_work_queue) { 2753 WMI_LOGE("failed to create wmi_rx_event_work_queue"); 2754 goto error; 2755 } 2756 wmi_interface_logging_init(wmi_handle, WMI_HOST_PDEV_ID_0); 2757 wmi_handle->target_type = param->target_type; 2758 soc->target_type = param->target_type; 2759 2760 if (param->target_type >= WMI_MAX_TARGET_TYPE) 2761 goto error; 2762 2763 if (wmi_attach_register[param->target_type]) { 2764 wmi_attach_register[param->target_type](wmi_handle); 2765 } else { 2766 WMI_LOGE("wmi attach is not registered"); 2767 goto error; 2768 } 2769 /* Assign target cookie capablity */ 2770 wmi_handle->use_cookie = param->use_cookie; 2771 wmi_handle->osdev = param->osdev; 2772 wmi_handle->wmi_stopinprogress = 0; 2773 wmi_handle->wmi_max_cmds = param->max_commands; 2774 soc->wmi_max_cmds = param->max_commands; 2775 /* Increase the ref count once refcount infra is present */ 2776 soc->wmi_psoc = param->psoc; 2777 qdf_spinlock_create(&soc->ctx_lock); 2778 2779 soc->ops = wmi_handle->ops; 2780 soc->wmi_pdev[0] = wmi_handle; 2781 if (wmi_ext_dbgfs_init(wmi_handle) != QDF_STATUS_SUCCESS) 2782 WMI_LOGE("failed to initialize wmi extended debugfs"); 2783 2784 wmi_wbuff_register(wmi_handle); 2785 2786 return wmi_handle; 2787 2788 error: 2789 qdf_mem_free(soc); 2790 qdf_mem_free(wmi_handle); 2791 2792 return NULL; 2793 } 2794 2795 /** 2796 * wmi_unified_detach() - detach for unified WMI 2797 * 2798 * @wmi_handle : handle to wmi. 2799 * 2800 * @Return: none. 2801 */ 2802 void wmi_unified_detach(struct wmi_unified *wmi_handle) 2803 { 2804 wmi_buf_t buf; 2805 struct wmi_soc *soc; 2806 uint8_t i; 2807 2808 wmi_wbuff_deregister(wmi_handle); 2809 2810 wmi_ext_dbgfs_deinit(wmi_handle); 2811 2812 soc = wmi_handle->soc; 2813 for (i = 0; i < WMI_MAX_RADIOS; i++) { 2814 if (soc->wmi_pdev[i]) { 2815 qdf_flush_workqueue(0, 2816 soc->wmi_pdev[i]->wmi_rx_work_queue); 2817 qdf_destroy_workqueue(0, 2818 soc->wmi_pdev[i]->wmi_rx_work_queue); 2819 wmi_debugfs_remove(soc->wmi_pdev[i]); 2820 buf = qdf_nbuf_queue_remove( 2821 &soc->wmi_pdev[i]->event_queue); 2822 while (buf) { 2823 qdf_nbuf_free(buf); 2824 buf = qdf_nbuf_queue_remove( 2825 &soc->wmi_pdev[i]->event_queue); 2826 } 2827 2828 wmi_log_buffer_free(soc->wmi_pdev[i]); 2829 2830 /* Free events logs list */ 2831 if (soc->wmi_pdev[i]->events_logs_list) 2832 qdf_mem_free( 2833 soc->wmi_pdev[i]->events_logs_list); 2834 2835 qdf_spinlock_destroy(&soc->wmi_pdev[i]->eventq_lock); 2836 qdf_mem_free(soc->wmi_pdev[i]); 2837 } 2838 } 2839 qdf_spinlock_destroy(&soc->ctx_lock); 2840 2841 if (soc->wmi_service_bitmap) { 2842 qdf_mem_free(soc->wmi_service_bitmap); 2843 soc->wmi_service_bitmap = NULL; 2844 } 2845 2846 if (soc->wmi_ext_service_bitmap) { 2847 qdf_mem_free(soc->wmi_ext_service_bitmap); 2848 soc->wmi_ext_service_bitmap = NULL; 2849 } 2850 2851 /* Decrease the ref count once refcount infra is present */ 2852 soc->wmi_psoc = NULL; 2853 qdf_mem_free(soc); 2854 } 2855 2856 /** 2857 * wmi_unified_remove_work() - detach for WMI work 2858 * @wmi_handle: handle to WMI 2859 * 2860 * A function that does not fully detach WMI, but just remove work 2861 * queue items associated with it. This is used to make sure that 2862 * before any other processing code that may destroy related contexts 2863 * (HTC, etc), work queue processing on WMI has already been stopped. 2864 * 2865 * Return: None 2866 */ 2867 void 2868 wmi_unified_remove_work(struct wmi_unified *wmi_handle) 2869 { 2870 wmi_buf_t buf; 2871 2872 qdf_flush_workqueue(0, wmi_handle->wmi_rx_work_queue); 2873 qdf_spin_lock_bh(&wmi_handle->eventq_lock); 2874 buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue); 2875 while (buf) { 2876 qdf_nbuf_free(buf); 2877 buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue); 2878 } 2879 qdf_spin_unlock_bh(&wmi_handle->eventq_lock); 2880 } 2881 2882 /** 2883 * wmi_htc_tx_complete() - Process htc tx completion 2884 * 2885 * @ctx: handle to wmi 2886 * @htc_packet: pointer to htc packet 2887 * 2888 * @Return: none. 2889 */ 2890 static void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt) 2891 { 2892 struct wmi_soc *soc = (struct wmi_soc *) ctx; 2893 wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt); 2894 u_int8_t *buf_ptr; 2895 u_int32_t len; 2896 struct wmi_unified *wmi_handle; 2897 #ifdef WMI_INTERFACE_EVENT_LOGGING 2898 uint32_t cmd_id; 2899 #endif 2900 2901 ASSERT(wmi_cmd_buf); 2902 wmi_handle = wmi_get_pdev_ep(soc, htc_pkt->Endpoint); 2903 if (!wmi_handle) { 2904 WMI_LOGE("%s: Unable to get wmi handle\n", __func__); 2905 QDF_ASSERT(0); 2906 return; 2907 } 2908 #ifdef WMI_INTERFACE_EVENT_LOGGING 2909 if (wmi_handle && wmi_handle->log_info.wmi_logging_enable) { 2910 cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), 2911 WMI_CMD_HDR, COMMANDID); 2912 2913 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 2914 /* Record 16 bytes of WMI cmd tx complete data 2915 - exclude TLV and WMI headers */ 2916 if (wmi_handle->ops->is_management_record(cmd_id)) { 2917 WMI_MGMT_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id, 2918 qdf_nbuf_data(wmi_cmd_buf) + 2919 wmi_handle->soc->buf_offset_command); 2920 } else { 2921 WMI_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id, 2922 qdf_nbuf_data(wmi_cmd_buf) + 2923 wmi_handle->soc->buf_offset_command); 2924 } 2925 2926 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 2927 } 2928 #endif 2929 buf_ptr = (u_int8_t *) wmi_buf_data(wmi_cmd_buf); 2930 len = qdf_nbuf_len(wmi_cmd_buf); 2931 qdf_mem_zero(buf_ptr, len); 2932 wmi_buf_free(wmi_cmd_buf); 2933 qdf_mem_free(htc_pkt); 2934 qdf_atomic_dec(&wmi_handle->pending_cmds); 2935 } 2936 2937 #ifdef FEATURE_RUNTIME_PM 2938 /** 2939 * wmi_htc_log_pkt() - Print information of WMI command from HTC packet 2940 * 2941 * @ctx: handle of WMI context 2942 * @htc_pkt: handle of HTC packet 2943 * 2944 * @Return: none 2945 */ 2946 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt) 2947 { 2948 wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt); 2949 uint32_t cmd_id; 2950 2951 ASSERT(wmi_cmd_buf); 2952 cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), WMI_CMD_HDR, 2953 COMMANDID); 2954 2955 WMI_LOGD("WMI command from HTC packet: %s, ID: %d\n", 2956 wmi_id_to_name(cmd_id), cmd_id); 2957 } 2958 #else 2959 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt) 2960 { 2961 } 2962 #endif 2963 2964 /** 2965 * wmi_connect_pdev_htc_service() - WMI API to get connect to HTC service 2966 * 2967 * @wmi_handle: handle to WMI. 2968 * @pdev_idx: Pdev index 2969 * 2970 * @Return: QDF_STATUS 2971 */ 2972 static QDF_STATUS wmi_connect_pdev_htc_service(struct wmi_soc *soc, 2973 uint32_t pdev_idx) 2974 { 2975 QDF_STATUS status; 2976 struct htc_service_connect_resp response; 2977 struct htc_service_connect_req connect; 2978 2979 OS_MEMZERO(&connect, sizeof(connect)); 2980 OS_MEMZERO(&response, sizeof(response)); 2981 2982 /* meta data is unused for now */ 2983 connect.pMetaData = NULL; 2984 connect.MetaDataLength = 0; 2985 /* these fields are the same for all service endpoints */ 2986 connect.EpCallbacks.pContext = soc; 2987 connect.EpCallbacks.EpTxCompleteMultiple = 2988 NULL /* Control path completion ar6000_tx_complete */; 2989 connect.EpCallbacks.EpRecv = wmi_control_rx /* Control path rx */; 2990 connect.EpCallbacks.EpRecvRefill = NULL /* ar6000_rx_refill */; 2991 connect.EpCallbacks.EpSendFull = NULL /* ar6000_tx_queue_full */; 2992 connect.EpCallbacks.EpTxComplete = 2993 wmi_htc_tx_complete /* ar6000_tx_queue_full */; 2994 connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt; 2995 2996 /* connect to control service */ 2997 connect.service_id = soc->svc_ids[pdev_idx]; 2998 status = htc_connect_service(soc->htc_handle, &connect, &response); 2999 3000 if (QDF_IS_STATUS_ERROR(status)) { 3001 WMI_LOGE("Failed to connect to WMI CONTROL service status:%d\n", 3002 status); 3003 return status; 3004 } 3005 3006 if (soc->is_async_ep) 3007 htc_set_async_ep(soc->htc_handle, response.Endpoint, true); 3008 3009 soc->wmi_endpoint_id[pdev_idx] = response.Endpoint; 3010 soc->max_msg_len[pdev_idx] = response.MaxMsgLength; 3011 3012 return QDF_STATUS_SUCCESS; 3013 } 3014 3015 QDF_STATUS 3016 wmi_unified_connect_htc_service(struct wmi_unified *wmi_handle, 3017 HTC_HANDLE htc_handle) 3018 { 3019 uint32_t i; 3020 uint8_t wmi_ep_count; 3021 3022 wmi_handle->soc->htc_handle = htc_handle; 3023 3024 wmi_ep_count = htc_get_wmi_endpoint_count(htc_handle); 3025 if (wmi_ep_count > WMI_MAX_RADIOS) 3026 return QDF_STATUS_E_FAULT; 3027 3028 for (i = 0; i < wmi_ep_count; i++) 3029 wmi_connect_pdev_htc_service(wmi_handle->soc, i); 3030 3031 wmi_handle->htc_handle = htc_handle; 3032 wmi_handle->wmi_endpoint_id = wmi_handle->soc->wmi_endpoint_id[0]; 3033 wmi_handle->max_msg_len = wmi_handle->soc->max_msg_len[0]; 3034 3035 return QDF_STATUS_SUCCESS; 3036 } 3037 3038 /** 3039 * wmi_get_host_credits() - WMI API to get updated host_credits 3040 * 3041 * @wmi_handle: handle to WMI. 3042 * 3043 * @Return: updated host_credits. 3044 */ 3045 int wmi_get_host_credits(wmi_unified_t wmi_handle) 3046 { 3047 int host_credits = 0; 3048 3049 htc_get_control_endpoint_tx_host_credits(wmi_handle->htc_handle, 3050 &host_credits); 3051 return host_credits; 3052 } 3053 3054 /** 3055 * wmi_get_pending_cmds() - WMI API to get WMI Pending Commands in the HTC 3056 * queue 3057 * 3058 * @wmi_handle: handle to WMI. 3059 * 3060 * @Return: Pending Commands in the HTC queue. 3061 */ 3062 int wmi_get_pending_cmds(wmi_unified_t wmi_handle) 3063 { 3064 return qdf_atomic_read(&wmi_handle->pending_cmds); 3065 } 3066 3067 /** 3068 * wmi_set_target_suspend() - WMI API to set target suspend state 3069 * 3070 * @wmi_handle: handle to WMI. 3071 * @val: suspend state boolean. 3072 * 3073 * @Return: none. 3074 */ 3075 void wmi_set_target_suspend(wmi_unified_t wmi_handle, A_BOOL val) 3076 { 3077 qdf_atomic_set(&wmi_handle->is_target_suspended, val); 3078 } 3079 3080 /** 3081 * wmi_is_target_suspended() - WMI API to check target suspend state 3082 * @wmi_handle: handle to WMI. 3083 * 3084 * WMI API to check target suspend state 3085 * 3086 * Return: true if target is suspended, else false. 3087 */ 3088 bool wmi_is_target_suspended(struct wmi_unified *wmi_handle) 3089 { 3090 return qdf_atomic_read(&wmi_handle->is_target_suspended); 3091 } 3092 3093 /** 3094 * WMI API to set crash injection state 3095 * @param wmi_handle: handle to WMI. 3096 * @param val: crash injection state boolean. 3097 */ 3098 void wmi_tag_crash_inject(wmi_unified_t wmi_handle, A_BOOL flag) 3099 { 3100 wmi_handle->tag_crash_inject = flag; 3101 } 3102 3103 /** 3104 * WMI API to set bus suspend state 3105 * @param wmi_handle: handle to WMI. 3106 * @param val: suspend state boolean. 3107 */ 3108 void wmi_set_is_wow_bus_suspended(wmi_unified_t wmi_handle, A_BOOL val) 3109 { 3110 qdf_atomic_set(&wmi_handle->is_wow_bus_suspended, val); 3111 } 3112 3113 void wmi_set_tgt_assert(wmi_unified_t wmi_handle, bool val) 3114 { 3115 wmi_handle->tgt_force_assert_enable = val; 3116 } 3117 3118 /** 3119 * wmi_stop() - generic function to block unified WMI command 3120 * @wmi_handle: handle to WMI. 3121 * 3122 * @Return: success always. 3123 */ 3124 int 3125 wmi_stop(wmi_unified_t wmi_handle) 3126 { 3127 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, 3128 "WMI Stop"); 3129 wmi_handle->wmi_stopinprogress = 1; 3130 return 0; 3131 } 3132 3133 /** 3134 * wmi_start() - generic function to allow unified WMI command 3135 * @wmi_handle: handle to WMI. 3136 * 3137 * @Return: success always. 3138 */ 3139 int 3140 wmi_start(wmi_unified_t wmi_handle) 3141 { 3142 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, 3143 "WMI Start"); 3144 wmi_handle->wmi_stopinprogress = 0; 3145 return 0; 3146 } 3147 3148 /** 3149 * API to flush all the previous packets associated with the wmi endpoint 3150 * 3151 * @param wmi_handle : handle to WMI. 3152 */ 3153 void 3154 wmi_flush_endpoint(wmi_unified_t wmi_handle) 3155 { 3156 htc_flush_endpoint(wmi_handle->htc_handle, 3157 wmi_handle->wmi_endpoint_id, 0); 3158 } 3159 qdf_export_symbol(wmi_flush_endpoint); 3160 3161 /** 3162 * wmi_pdev_id_conversion_enable() - API to enable pdev_id conversion in WMI 3163 * By default pdev_id conversion is not done in WMI. 3164 * This API can be used enable conversion in WMI. 3165 * @param wmi_handle : handle to WMI 3166 * @param pdev_map : pointer to pdev_map 3167 * @size : size of pdev_id_map 3168 * Return none 3169 */ 3170 void wmi_pdev_id_conversion_enable(wmi_unified_t wmi_handle, 3171 uint32_t *pdev_id_map, uint8_t size) 3172 { 3173 if (wmi_handle->target_type == WMI_TLV_TARGET) 3174 wmi_handle->ops->wmi_pdev_id_conversion_enable(wmi_handle, 3175 pdev_id_map, 3176 size); 3177 } 3178