1 /* 2 * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * Host WMI unified implementation 21 */ 22 #include "htc_api.h" 23 #include "htc_api.h" 24 #include "wmi_unified_priv.h" 25 #include "wmi_unified_api.h" 26 #include "qdf_module.h" 27 #ifdef WMI_EXT_DBG 28 #include "qdf_list.h" 29 #endif 30 31 #ifndef WMI_NON_TLV_SUPPORT 32 #include "wmi_tlv_helper.h" 33 #endif 34 35 #include <linux/debugfs.h> 36 37 #ifdef WMI_EXT_DBG 38 39 /** 40 * wmi_ext_dbg_msg_enqueue() - enqueue wmi message 41 * 42 * @wmi_handle: wmi handler 43 * 44 * Return: size of wmi message queue after enqueue 45 */ 46 static uint32_t wmi_ext_dbg_msg_enqueue(struct wmi_unified *wmi_handle, 47 struct wmi_ext_dbg_msg *msg) 48 { 49 uint32_t list_size; 50 51 qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 52 qdf_list_insert_back_size(&wmi_handle->wmi_ext_dbg_msg_queue, 53 &msg->node, &list_size); 54 qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 55 56 return list_size; 57 } 58 59 /** 60 * wmi_ext_dbg_msg_dequeue() - dequeue wmi message 61 * 62 * @wmi_handle: wmi handler 63 * 64 * Return: wmi msg on success else NULL 65 */ 66 static struct wmi_ext_dbg_msg *wmi_ext_dbg_msg_dequeue(struct wmi_unified 67 *wmi_handle) 68 { 69 qdf_list_node_t *list_node = NULL; 70 71 qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 72 qdf_list_remove_front(&wmi_handle->wmi_ext_dbg_msg_queue, &list_node); 73 qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 74 75 if (!list_node) 76 return NULL; 77 78 return qdf_container_of(list_node, struct wmi_ext_dbg_msg, node); 79 } 80 81 /** 82 * wmi_ext_dbg_msg_record() - record wmi messages 83 * 84 * @wmi_handle: wmi handler 85 * @buf: wmi message buffer 86 * @len: wmi message length 87 * @type: wmi message type 88 * 89 * Return: QDF_STATUS_SUCCESS on successful recording else failure. 90 */ 91 static QDF_STATUS wmi_ext_dbg_msg_record(struct wmi_unified *wmi_handle, 92 uint8_t *buf, uint32_t len, 93 enum WMI_MSG_TYPE type) 94 { 95 struct wmi_ext_dbg_msg *msg; 96 uint32_t list_size; 97 98 msg = wmi_ext_dbg_msg_get(len); 99 if (!msg) 100 return QDF_STATUS_E_NOMEM; 101 102 msg->len = len; 103 msg->type = type; 104 qdf_mem_copy(msg->buf, buf, len); 105 msg->ts = qdf_get_log_timestamp(); 106 list_size = wmi_ext_dbg_msg_enqueue(wmi_handle, msg); 107 108 if (list_size >= wmi_handle->wmi_ext_dbg_msg_queue_size) { 109 msg = wmi_ext_dbg_msg_dequeue(wmi_handle); 110 wmi_ext_dbg_msg_put(msg); 111 } 112 113 return QDF_STATUS_SUCCESS; 114 } 115 116 /** 117 * wmi_ext_dbg_msg_cmd_record() - record wmi command messages 118 * 119 * @wmi_handle: wmi handler 120 * @buf: wmi command buffer 121 * @len: wmi command message length 122 * 123 * Return: QDF_STATUS_SUCCESS on successful recording else failure. 124 */ 125 static QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified *wmi_handle, 126 uint8_t *buf, uint32_t len) 127 { 128 return wmi_ext_dbg_msg_record(wmi_handle, buf, len, 129 WMI_MSG_TYPE_CMD); 130 } 131 132 /** 133 * wmi_ext_dbg_msg_event_record() - record wmi event messages 134 * 135 * @wmi_handle: wmi handler 136 * @buf: wmi event buffer 137 * @len: wmi event message length 138 * 139 * Return: QDF_STATUS_SUCCESS on successful recording else failure. 140 */ 141 static QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified *wmi_handle, 142 uint8_t *buf, uint32_t len) 143 { 144 uint32_t id; 145 146 id = WMI_GET_FIELD(buf, WMI_CMD_HDR, COMMANDID); 147 if (id != wmi_handle->wmi_events[wmi_diag_event_id]) 148 return wmi_ext_dbg_msg_record(wmi_handle, buf, len, 149 WMI_MSG_TYPE_EVENT); 150 151 return QDF_STATUS_SUCCESS; 152 } 153 154 /** 155 * wmi_ext_dbg_msg_queue_init() - create debugfs queue and associated lock 156 * 157 * @wmi_handle: wmi handler 158 * 159 * Return: none 160 */ 161 static void wmi_ext_dbg_msg_queue_init(struct wmi_unified *wmi_handle) 162 { 163 qdf_list_create(&wmi_handle->wmi_ext_dbg_msg_queue, 164 wmi_handle->wmi_ext_dbg_msg_queue_size); 165 qdf_spinlock_create(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 166 } 167 168 /** 169 * wmi_ext_dbg_msg_queue_deinit() - destroy debugfs queue and associated lock 170 * 171 * @wmi_handle: wmi handler 172 * 173 * Return: none 174 */ 175 static void wmi_ext_dbg_msg_queue_deinit(struct wmi_unified *wmi_handle) 176 { 177 qdf_list_destroy(&wmi_handle->wmi_ext_dbg_msg_queue); 178 qdf_spinlock_destroy(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 179 } 180 181 /** 182 * wmi_ext_dbg_msg_show() - debugfs function to display whole content of 183 * wmi command/event messages including headers. 184 * 185 * @file: qdf debugfs file handler 186 * @arg: pointer to wmi handler 187 * 188 * Return: QDF_STATUS_SUCCESS if all the messages are shown successfully, 189 * else QDF_STATUS_E_AGAIN if more data to show. 190 */ 191 static QDF_STATUS wmi_ext_dbg_msg_show(qdf_debugfs_file_t file, void *arg) 192 { 193 struct wmi_unified *wmi_handle = (struct wmi_unified *)arg; 194 struct wmi_ext_dbg_msg *msg; 195 uint64_t secs, usecs; 196 197 msg = wmi_ext_dbg_msg_dequeue(wmi_handle); 198 if (!msg) 199 return QDF_STATUS_SUCCESS; 200 201 qdf_debugfs_printf(file, "%s: 0x%x\n", 202 msg->type == WMI_MSG_TYPE_CMD ? "COMMAND" : 203 "EVENT", WMI_GET_FIELD(msg->buf, WMI_CMD_HDR, 204 COMMANDID)); 205 qdf_log_timestamp_to_secs(msg->ts, &secs, &usecs); 206 qdf_debugfs_printf(file, "Time: %llu.%llu\n", secs, usecs); 207 qdf_debugfs_printf(file, "Length:%d\n", msg->len); 208 qdf_debugfs_hexdump(file, msg->buf, msg->len, 209 WMI_EXT_DBG_DUMP_ROW_SIZE, 210 WMI_EXT_DBG_DUMP_GROUP_SIZE); 211 qdf_debugfs_printf(file, "\n"); 212 213 if (qdf_debugfs_overflow(file)) { 214 qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 215 qdf_list_insert_front(&wmi_handle->wmi_ext_dbg_msg_queue, 216 &msg->node); 217 qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 218 219 } else { 220 wmi_ext_dbg_msg_put(msg); 221 } 222 223 return QDF_STATUS_E_AGAIN; 224 } 225 226 /** 227 * wmi_ext_dbg_msg_write() - debugfs write not supported 228 * 229 * @priv: private data 230 * @buf: received data buffer 231 * @len: length of received buffer 232 * 233 * Return: QDF_STATUS_E_NOSUPPORT. 234 */ 235 static QDF_STATUS wmi_ext_dbg_msg_write(void *priv, const char *buf, 236 qdf_size_t len) 237 { 238 return QDF_STATUS_E_NOSUPPORT; 239 } 240 241 static struct qdf_debugfs_fops wmi_ext_dbgfs_ops = { 242 .show = wmi_ext_dbg_msg_show, 243 .write = wmi_ext_dbg_msg_write, 244 .priv = NULL, 245 }; 246 247 /** 248 * wmi_ext_debugfs_init() - init debugfs items for extended wmi dump. 249 * 250 * @wmi_handle: wmi handler 251 * 252 * Return: QDF_STATUS_SUCCESS if debugfs is initialized else 253 * QDF_STATUS_E_FAILURE 254 */ 255 static QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle) 256 { 257 qdf_dentry_t dentry; 258 259 dentry = qdf_debugfs_create_dir(WMI_EXT_DBG_DIR, NULL); 260 if (!dentry) { 261 WMI_LOGE("error while creating extended wmi debugfs dir"); 262 return QDF_STATUS_E_FAILURE; 263 } 264 265 wmi_ext_dbgfs_ops.priv = wmi_handle; 266 if (!qdf_debugfs_create_file(WMI_EXT_DBG_FILE, WMI_EXT_DBG_FILE_PERM, 267 dentry, &wmi_ext_dbgfs_ops)) { 268 qdf_debugfs_remove_dir(dentry); 269 WMI_LOGE("error while creating extended wmi debugfs file"); 270 return QDF_STATUS_E_FAILURE; 271 } 272 273 wmi_handle->wmi_ext_dbg_dentry = dentry; 274 wmi_handle->wmi_ext_dbg_msg_queue_size = WMI_EXT_DBG_QUEUE_SIZE; 275 wmi_ext_dbg_msg_queue_init(wmi_handle); 276 277 return QDF_STATUS_SUCCESS; 278 } 279 280 /** 281 * wmi_ext_debugfs_deinit() - cleanup/deinit debugfs items of extended wmi dump. 282 * 283 * @wmi_handle: wmi handler 284 * 285 * Return: QDF_STATUS_SUCCESS if cleanup is successful 286 */ 287 static QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle) 288 { 289 struct wmi_ext_dbg_msg *msg; 290 291 while ((msg = wmi_ext_dbg_msg_dequeue(wmi_handle))) 292 wmi_ext_dbg_msg_put(msg); 293 294 wmi_ext_dbg_msg_queue_deinit(wmi_handle); 295 qdf_debugfs_remove_dir_recursive(wmi_handle->wmi_ext_dbg_dentry); 296 297 return QDF_STATUS_SUCCESS; 298 } 299 300 #endif /*WMI_EXT_DBG */ 301 302 /* This check for CONFIG_WIN temporary added due to redeclaration compilation 303 error in MCL. Error is caused due to inclusion of wmi.h in wmi_unified_api.h 304 which gets included here through ol_if_athvar.h. Eventually it is expected that 305 wmi.h will be removed from wmi_unified_api.h after cleanup, which will need 306 WMI_CMD_HDR to be defined here. */ 307 #ifdef CONFIG_WIN 308 /* Copied from wmi.h */ 309 #undef MS 310 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB) 311 #undef SM 312 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) 313 #undef WO 314 #define WO(_f) ((_f##_OFFSET) >> 2) 315 316 #undef GET_FIELD 317 #define GET_FIELD(_addr, _f) MS(*((uint32_t *)(_addr) + WO(_f)), _f) 318 #undef SET_FIELD 319 #define SET_FIELD(_addr, _f, _val) \ 320 (*((uint32_t *)(_addr) + WO(_f)) = \ 321 (*((uint32_t *)(_addr) + WO(_f)) & ~_f##_MASK) | SM(_val, _f)) 322 323 #define WMI_GET_FIELD(_msg_buf, _msg_type, _f) \ 324 GET_FIELD(_msg_buf, _msg_type ## _ ## _f) 325 326 #define WMI_SET_FIELD(_msg_buf, _msg_type, _f, _val) \ 327 SET_FIELD(_msg_buf, _msg_type ## _ ## _f, _val) 328 329 #define WMI_EP_APASS 0x0 330 #define WMI_EP_LPASS 0x1 331 #define WMI_EP_SENSOR 0x2 332 333 /* 334 * * Control Path 335 * */ 336 typedef PREPACK struct { 337 uint32_t commandId:24, 338 reserved:2, /* used for WMI endpoint ID */ 339 plt_priv:6; /* platform private */ 340 } POSTPACK WMI_CMD_HDR; /* used for commands and events */ 341 342 #define WMI_CMD_HDR_COMMANDID_LSB 0 343 #define WMI_CMD_HDR_COMMANDID_MASK 0x00ffffff 344 #define WMI_CMD_HDR_COMMANDID_OFFSET 0x00000000 345 #define WMI_CMD_HDR_WMI_ENDPOINTID_MASK 0x03000000 346 #define WMI_CMD_HDR_WMI_ENDPOINTID_OFFSET 24 347 #define WMI_CMD_HDR_PLT_PRIV_LSB 24 348 #define WMI_CMD_HDR_PLT_PRIV_MASK 0xff000000 349 #define WMI_CMD_HDR_PLT_PRIV_OFFSET 0x00000000 350 /* end of copy wmi.h */ 351 #endif /* CONFIG_WIN */ 352 353 #define WMI_MIN_HEAD_ROOM 64 354 355 /* WBUFF pool sizes for WMI */ 356 /* Allocation of size 256 bytes */ 357 #define WMI_WBUFF_POOL_0_SIZE 128 358 /* Allocation of size 512 bytes */ 359 #define WMI_WBUFF_POOL_1_SIZE 16 360 /* Allocation of size 1024 bytes */ 361 #define WMI_WBUFF_POOL_2_SIZE 8 362 /* Allocation of size 2048 bytes */ 363 #define WMI_WBUFF_POOL_3_SIZE 8 364 365 #ifdef WMI_INTERFACE_EVENT_LOGGING 366 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)) 367 /* TODO Cleanup this backported function */ 368 static int wmi_bp_seq_printf(struct seq_file *m, const char *f, ...) 369 { 370 va_list args; 371 372 va_start(args, f); 373 seq_vprintf(m, f, args); 374 va_end(args); 375 376 return 0; 377 } 378 #else 379 #define wmi_bp_seq_printf(m, fmt, ...) seq_printf((m), fmt, ##__VA_ARGS__) 380 #endif 381 382 #ifndef MAX_WMI_INSTANCES 383 #define CUSTOM_MGMT_CMD_DATA_SIZE 4 384 #endif 385 386 #ifdef CONFIG_MCL 387 /* WMI commands */ 388 uint32_t g_wmi_command_buf_idx = 0; 389 struct wmi_command_debug wmi_command_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; 390 391 /* WMI commands TX completed */ 392 uint32_t g_wmi_command_tx_cmp_buf_idx = 0; 393 struct wmi_command_debug 394 wmi_command_tx_cmp_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; 395 396 /* WMI events when processed */ 397 uint32_t g_wmi_event_buf_idx = 0; 398 struct wmi_event_debug wmi_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; 399 400 /* WMI events when queued */ 401 uint32_t g_wmi_rx_event_buf_idx = 0; 402 struct wmi_event_debug wmi_rx_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; 403 #endif 404 405 #define WMI_COMMAND_RECORD(h, a, b) { \ 406 if (wmi_log_max_entry <= \ 407 *(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)) \ 408 *(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx) = 0;\ 409 ((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\ 410 [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)]\ 411 .command = a; \ 412 qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ 413 wmi_command_log_buf_info.buf) \ 414 [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].data,\ 415 b, wmi_record_max_length); \ 416 ((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\ 417 [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].\ 418 time = qdf_get_log_timestamp(); \ 419 (*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))++; \ 420 h->log_info.wmi_command_log_buf_info.length++; \ 421 } 422 423 #define WMI_COMMAND_TX_CMP_RECORD(h, a, b) { \ 424 if (wmi_log_max_entry <= \ 425 *(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))\ 426 *(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 427 p_buf_tail_idx) = 0; \ 428 ((struct wmi_command_debug *)h->log_info. \ 429 wmi_command_tx_cmp_log_buf_info.buf) \ 430 [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 431 p_buf_tail_idx)]. \ 432 command = a; \ 433 qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ 434 wmi_command_tx_cmp_log_buf_info.buf) \ 435 [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 436 p_buf_tail_idx)]. \ 437 data, b, wmi_record_max_length); \ 438 ((struct wmi_command_debug *)h->log_info. \ 439 wmi_command_tx_cmp_log_buf_info.buf) \ 440 [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 441 p_buf_tail_idx)]. \ 442 time = qdf_get_log_timestamp(); \ 443 (*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))++;\ 444 h->log_info.wmi_command_tx_cmp_log_buf_info.length++; \ 445 } 446 447 #define WMI_EVENT_RECORD(h, a, b) { \ 448 if (wmi_log_max_entry <= \ 449 *(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)) \ 450 *(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx) = 0;\ 451 ((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\ 452 [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)]. \ 453 event = a; \ 454 qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ 455 wmi_event_log_buf_info.buf) \ 456 [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].data, b,\ 457 wmi_record_max_length); \ 458 ((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\ 459 [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].time =\ 460 qdf_get_log_timestamp(); \ 461 (*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))++; \ 462 h->log_info.wmi_event_log_buf_info.length++; \ 463 } 464 465 #define WMI_RX_EVENT_RECORD(h, a, b) { \ 466 if (wmi_log_max_entry <= \ 467 *(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))\ 468 *(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx) = 0;\ 469 ((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\ 470 [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ 471 event = a; \ 472 qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ 473 wmi_rx_event_log_buf_info.buf) \ 474 [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ 475 data, b, wmi_record_max_length); \ 476 ((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\ 477 [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ 478 time = qdf_get_log_timestamp(); \ 479 (*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))++; \ 480 h->log_info.wmi_rx_event_log_buf_info.length++; \ 481 } 482 483 #ifdef CONFIG_MCL 484 uint32_t g_wmi_mgmt_command_buf_idx = 0; 485 struct 486 wmi_command_debug wmi_mgmt_command_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY]; 487 488 /* wmi_mgmt commands TX completed */ 489 uint32_t g_wmi_mgmt_command_tx_cmp_buf_idx = 0; 490 struct wmi_command_debug 491 wmi_mgmt_command_tx_cmp_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY]; 492 493 /* wmi_mgmt events when received */ 494 uint32_t g_wmi_mgmt_rx_event_buf_idx = 0; 495 struct wmi_event_debug 496 wmi_mgmt_rx_event_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY]; 497 498 /* wmi_diag events when received */ 499 uint32_t g_wmi_diag_rx_event_buf_idx = 0; 500 struct wmi_event_debug 501 wmi_diag_rx_event_log_buffer[WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY]; 502 #endif 503 504 #define WMI_MGMT_COMMAND_RECORD(h, a, b) { \ 505 if (wmi_mgmt_log_max_entry <= \ 506 *(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)) \ 507 *(h->log_info.wmi_mgmt_command_log_buf_info. \ 508 p_buf_tail_idx) = 0; \ 509 ((struct wmi_command_debug *)h->log_info. \ 510 wmi_mgmt_command_log_buf_info.buf) \ 511 [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ 512 command = a; \ 513 qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ 514 wmi_mgmt_command_log_buf_info.buf) \ 515 [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ 516 data, b, \ 517 wmi_record_max_length); \ 518 ((struct wmi_command_debug *)h->log_info. \ 519 wmi_mgmt_command_log_buf_info.buf) \ 520 [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ 521 time = qdf_get_log_timestamp(); \ 522 (*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx))++;\ 523 h->log_info.wmi_mgmt_command_log_buf_info.length++; \ 524 } 525 526 #define WMI_MGMT_COMMAND_TX_CMP_RECORD(h, a, b) { \ 527 if (wmi_mgmt_log_max_entry <= \ 528 *(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 529 p_buf_tail_idx)) \ 530 *(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 531 p_buf_tail_idx) = 0; \ 532 ((struct wmi_command_debug *)h->log_info. \ 533 wmi_mgmt_command_tx_cmp_log_buf_info.buf) \ 534 [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 535 p_buf_tail_idx)].command = a; \ 536 qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ 537 wmi_mgmt_command_tx_cmp_log_buf_info.buf)\ 538 [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 539 p_buf_tail_idx)].data, b, \ 540 wmi_record_max_length); \ 541 ((struct wmi_command_debug *)h->log_info. \ 542 wmi_mgmt_command_tx_cmp_log_buf_info.buf) \ 543 [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 544 p_buf_tail_idx)].time = \ 545 qdf_get_log_timestamp(); \ 546 (*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 547 p_buf_tail_idx))++; \ 548 h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.length++; \ 549 } 550 551 #define WMI_MGMT_RX_EVENT_RECORD(h, a, b) do { \ 552 if (wmi_mgmt_log_max_entry <= \ 553 *(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))\ 554 *(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx) = 0;\ 555 ((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\ 556 [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)]\ 557 .event = a; \ 558 qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ 559 wmi_mgmt_event_log_buf_info.buf) \ 560 [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\ 561 data, b, wmi_record_max_length); \ 562 ((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\ 563 [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\ 564 time = qdf_get_log_timestamp(); \ 565 (*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))++; \ 566 h->log_info.wmi_mgmt_event_log_buf_info.length++; \ 567 } while (0); 568 569 #define WMI_DIAG_RX_EVENT_RECORD(h, a, b) do { \ 570 if (wmi_mgmt_log_max_entry <= \ 571 *(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))\ 572 *(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx) = 0;\ 573 ((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\ 574 [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)]\ 575 .event = a; \ 576 qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ 577 wmi_diag_event_log_buf_info.buf) \ 578 [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\ 579 data, b, wmi_record_max_length); \ 580 ((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\ 581 [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\ 582 time = qdf_get_log_timestamp(); \ 583 (*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))++; \ 584 h->log_info.wmi_diag_event_log_buf_info.length++; \ 585 } while (0); 586 587 /* These are defined to made it as module param, which can be configured */ 588 uint32_t wmi_log_max_entry = WMI_EVENT_DEBUG_MAX_ENTRY; 589 uint32_t wmi_mgmt_log_max_entry = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; 590 uint32_t wmi_diag_log_max_entry = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY; 591 uint32_t wmi_record_max_length = WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH; 592 uint32_t wmi_display_size = 100; 593 594 /** 595 * wmi_log_init() - Initialize WMI event logging 596 * @wmi_handle: WMI handle. 597 * 598 * Return: Initialization status 599 */ 600 #ifdef CONFIG_MCL 601 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle) 602 { 603 struct wmi_log_buf_t *cmd_log_buf = 604 &wmi_handle->log_info.wmi_command_log_buf_info; 605 struct wmi_log_buf_t *cmd_tx_cmpl_log_buf = 606 &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info; 607 608 struct wmi_log_buf_t *event_log_buf = 609 &wmi_handle->log_info.wmi_event_log_buf_info; 610 struct wmi_log_buf_t *rx_event_log_buf = 611 &wmi_handle->log_info.wmi_rx_event_log_buf_info; 612 613 struct wmi_log_buf_t *mgmt_cmd_log_buf = 614 &wmi_handle->log_info.wmi_mgmt_command_log_buf_info; 615 struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf = 616 &wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info; 617 struct wmi_log_buf_t *mgmt_event_log_buf = 618 &wmi_handle->log_info.wmi_mgmt_event_log_buf_info; 619 struct wmi_log_buf_t *diag_event_log_buf = 620 &wmi_handle->log_info.wmi_diag_event_log_buf_info; 621 622 /* WMI commands */ 623 cmd_log_buf->length = 0; 624 cmd_log_buf->buf_tail_idx = 0; 625 cmd_log_buf->buf = wmi_command_log_buffer; 626 cmd_log_buf->p_buf_tail_idx = &g_wmi_command_buf_idx; 627 cmd_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; 628 629 /* WMI commands TX completed */ 630 cmd_tx_cmpl_log_buf->length = 0; 631 cmd_tx_cmpl_log_buf->buf_tail_idx = 0; 632 cmd_tx_cmpl_log_buf->buf = wmi_command_tx_cmp_log_buffer; 633 cmd_tx_cmpl_log_buf->p_buf_tail_idx = &g_wmi_command_tx_cmp_buf_idx; 634 cmd_tx_cmpl_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; 635 636 /* WMI events when processed */ 637 event_log_buf->length = 0; 638 event_log_buf->buf_tail_idx = 0; 639 event_log_buf->buf = wmi_event_log_buffer; 640 event_log_buf->p_buf_tail_idx = &g_wmi_event_buf_idx; 641 event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; 642 643 /* WMI events when queued */ 644 rx_event_log_buf->length = 0; 645 rx_event_log_buf->buf_tail_idx = 0; 646 rx_event_log_buf->buf = wmi_rx_event_log_buffer; 647 rx_event_log_buf->p_buf_tail_idx = &g_wmi_rx_event_buf_idx; 648 rx_event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; 649 650 /* WMI Management commands */ 651 mgmt_cmd_log_buf->length = 0; 652 mgmt_cmd_log_buf->buf_tail_idx = 0; 653 mgmt_cmd_log_buf->buf = wmi_mgmt_command_log_buffer; 654 mgmt_cmd_log_buf->p_buf_tail_idx = &g_wmi_mgmt_command_buf_idx; 655 mgmt_cmd_log_buf->size = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; 656 657 /* WMI Management commands Tx completed*/ 658 mgmt_cmd_tx_cmp_log_buf->length = 0; 659 mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0; 660 mgmt_cmd_tx_cmp_log_buf->buf = wmi_mgmt_command_tx_cmp_log_buffer; 661 mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx = 662 &g_wmi_mgmt_command_tx_cmp_buf_idx; 663 mgmt_cmd_tx_cmp_log_buf->size = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; 664 665 /* WMI Management events when received */ 666 mgmt_event_log_buf->length = 0; 667 mgmt_event_log_buf->buf_tail_idx = 0; 668 mgmt_event_log_buf->buf = wmi_mgmt_rx_event_log_buffer; 669 mgmt_event_log_buf->p_buf_tail_idx = &g_wmi_mgmt_rx_event_buf_idx; 670 mgmt_event_log_buf->size = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; 671 672 /* WMI diag events when received */ 673 diag_event_log_buf->length = 0; 674 diag_event_log_buf->buf_tail_idx = 0; 675 diag_event_log_buf->buf = wmi_diag_rx_event_log_buffer; 676 diag_event_log_buf->p_buf_tail_idx = &g_wmi_diag_rx_event_buf_idx; 677 diag_event_log_buf->size = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY; 678 679 qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); 680 wmi_handle->log_info.wmi_logging_enable = 1; 681 682 return QDF_STATUS_SUCCESS; 683 } 684 #else 685 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle) 686 { 687 struct wmi_log_buf_t *cmd_log_buf = 688 &wmi_handle->log_info.wmi_command_log_buf_info; 689 struct wmi_log_buf_t *cmd_tx_cmpl_log_buf = 690 &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info; 691 692 struct wmi_log_buf_t *event_log_buf = 693 &wmi_handle->log_info.wmi_event_log_buf_info; 694 struct wmi_log_buf_t *rx_event_log_buf = 695 &wmi_handle->log_info.wmi_rx_event_log_buf_info; 696 697 struct wmi_log_buf_t *mgmt_cmd_log_buf = 698 &wmi_handle->log_info.wmi_mgmt_command_log_buf_info; 699 struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf = 700 &wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info; 701 struct wmi_log_buf_t *mgmt_event_log_buf = 702 &wmi_handle->log_info.wmi_mgmt_event_log_buf_info; 703 struct wmi_log_buf_t *diag_event_log_buf = 704 &wmi_handle->log_info.wmi_diag_event_log_buf_info; 705 706 wmi_handle->log_info.wmi_logging_enable = 0; 707 708 /* WMI commands */ 709 cmd_log_buf->length = 0; 710 cmd_log_buf->buf_tail_idx = 0; 711 cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc( 712 wmi_log_max_entry * sizeof(struct wmi_command_debug)); 713 cmd_log_buf->size = wmi_log_max_entry; 714 715 if (!cmd_log_buf->buf) { 716 WMI_LOGE("no memory for WMI command log buffer.."); 717 return QDF_STATUS_E_NOMEM; 718 } 719 cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx; 720 721 /* WMI commands TX completed */ 722 cmd_tx_cmpl_log_buf->length = 0; 723 cmd_tx_cmpl_log_buf->buf_tail_idx = 0; 724 cmd_tx_cmpl_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc( 725 wmi_log_max_entry * sizeof(struct wmi_command_debug)); 726 cmd_tx_cmpl_log_buf->size = wmi_log_max_entry; 727 728 if (!cmd_tx_cmpl_log_buf->buf) { 729 WMI_LOGE("no memory for WMI Command Tx Complete log buffer.."); 730 return QDF_STATUS_E_NOMEM; 731 } 732 cmd_tx_cmpl_log_buf->p_buf_tail_idx = 733 &cmd_tx_cmpl_log_buf->buf_tail_idx; 734 735 /* WMI events when processed */ 736 event_log_buf->length = 0; 737 event_log_buf->buf_tail_idx = 0; 738 event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( 739 wmi_log_max_entry * sizeof(struct wmi_event_debug)); 740 event_log_buf->size = wmi_log_max_entry; 741 742 if (!event_log_buf->buf) { 743 WMI_LOGE("no memory for WMI Event log buffer.."); 744 return QDF_STATUS_E_NOMEM; 745 } 746 event_log_buf->p_buf_tail_idx = &event_log_buf->buf_tail_idx; 747 748 /* WMI events when queued */ 749 rx_event_log_buf->length = 0; 750 rx_event_log_buf->buf_tail_idx = 0; 751 rx_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( 752 wmi_log_max_entry * sizeof(struct wmi_event_debug)); 753 rx_event_log_buf->size = wmi_log_max_entry; 754 755 if (!rx_event_log_buf->buf) { 756 WMI_LOGE("no memory for WMI Event Rx log buffer.."); 757 return QDF_STATUS_E_NOMEM; 758 } 759 rx_event_log_buf->p_buf_tail_idx = &rx_event_log_buf->buf_tail_idx; 760 761 /* WMI Management commands */ 762 mgmt_cmd_log_buf->length = 0; 763 mgmt_cmd_log_buf->buf_tail_idx = 0; 764 mgmt_cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc( 765 wmi_mgmt_log_max_entry * sizeof(struct wmi_command_debug)); 766 mgmt_cmd_log_buf->size = wmi_mgmt_log_max_entry; 767 768 if (!mgmt_cmd_log_buf->buf) { 769 WMI_LOGE("no memory for WMI Management Command log buffer.."); 770 return QDF_STATUS_E_NOMEM; 771 } 772 mgmt_cmd_log_buf->p_buf_tail_idx = &mgmt_cmd_log_buf->buf_tail_idx; 773 774 /* WMI Management commands Tx completed*/ 775 mgmt_cmd_tx_cmp_log_buf->length = 0; 776 mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0; 777 mgmt_cmd_tx_cmp_log_buf->buf = (struct wmi_command_debug *) 778 qdf_mem_malloc( 779 wmi_mgmt_log_max_entry * 780 sizeof(struct wmi_command_debug)); 781 mgmt_cmd_tx_cmp_log_buf->size = wmi_mgmt_log_max_entry; 782 783 if (!mgmt_cmd_tx_cmp_log_buf->buf) { 784 WMI_LOGE("no memory for WMI Management Command Tx complete log buffer.."); 785 return QDF_STATUS_E_NOMEM; 786 } 787 mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx = 788 &mgmt_cmd_tx_cmp_log_buf->buf_tail_idx; 789 790 /* WMI Management events when received */ 791 mgmt_event_log_buf->length = 0; 792 mgmt_event_log_buf->buf_tail_idx = 0; 793 794 mgmt_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( 795 wmi_mgmt_log_max_entry * 796 sizeof(struct wmi_event_debug)); 797 mgmt_event_log_buf->size = wmi_mgmt_log_max_entry; 798 799 if (!mgmt_event_log_buf->buf) { 800 WMI_LOGE("no memory for WMI Management Event log buffer.."); 801 return QDF_STATUS_E_NOMEM; 802 } 803 mgmt_event_log_buf->p_buf_tail_idx = &mgmt_event_log_buf->buf_tail_idx; 804 805 /* WMI diag events when received */ 806 diag_event_log_buf->length = 0; 807 diag_event_log_buf->buf_tail_idx = 0; 808 809 diag_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( 810 wmi_diag_log_max_entry * 811 sizeof(struct wmi_event_debug)); 812 diag_event_log_buf->size = wmi_diag_log_max_entry; 813 814 if (!diag_event_log_buf->buf) { 815 qdf_print("no memory for WMI diag event log buffer..\n"); 816 return QDF_STATUS_E_NOMEM; 817 } 818 diag_event_log_buf->p_buf_tail_idx = &diag_event_log_buf->buf_tail_idx; 819 820 qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); 821 wmi_handle->log_info.wmi_logging_enable = 1; 822 823 return QDF_STATUS_SUCCESS; 824 } 825 #endif 826 827 /** 828 * wmi_log_buffer_free() - Free all dynamic allocated buffer memory for 829 * event logging 830 * @wmi_handle: WMI handle. 831 * 832 * Return: None 833 */ 834 #ifndef CONFIG_MCL 835 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) 836 { 837 if (wmi_handle->log_info.wmi_command_log_buf_info.buf) 838 qdf_mem_free(wmi_handle->log_info.wmi_command_log_buf_info.buf); 839 if (wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf) 840 qdf_mem_free( 841 wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf); 842 if (wmi_handle->log_info.wmi_event_log_buf_info.buf) 843 qdf_mem_free(wmi_handle->log_info.wmi_event_log_buf_info.buf); 844 if (wmi_handle->log_info.wmi_rx_event_log_buf_info.buf) 845 qdf_mem_free( 846 wmi_handle->log_info.wmi_rx_event_log_buf_info.buf); 847 if (wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf) 848 qdf_mem_free( 849 wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf); 850 if (wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf) 851 qdf_mem_free( 852 wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf); 853 if (wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf) 854 qdf_mem_free( 855 wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf); 856 if (wmi_handle->log_info.wmi_diag_event_log_buf_info.buf) 857 qdf_mem_free( 858 wmi_handle->log_info.wmi_diag_event_log_buf_info.buf); 859 wmi_handle->log_info.wmi_logging_enable = 0; 860 qdf_spinlock_destroy(&wmi_handle->log_info.wmi_record_lock); 861 } 862 #else 863 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) 864 { 865 /* Do Nothing */ 866 } 867 #endif 868 869 /** 870 * wmi_print_cmd_log_buffer() - an output agnostic wmi command log printer 871 * @log_buffer: the command log buffer metadata of the buffer to print 872 * @count: the maximum number of entries to print 873 * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper 874 * @print_priv: any data required by the print method, e.g. a file handle 875 * 876 * Return: None 877 */ 878 static void 879 wmi_print_cmd_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count, 880 qdf_abstract_print *print, void *print_priv) 881 { 882 static const int data_len = 883 WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t); 884 char str[128]; 885 uint32_t idx; 886 887 if (count > log_buffer->size) 888 count = log_buffer->size; 889 if (count > log_buffer->length) 890 count = log_buffer->length; 891 892 /* subtract count from index, and wrap if necessary */ 893 idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count; 894 idx %= log_buffer->size; 895 896 print(print_priv, "Time (seconds) Cmd Id Payload"); 897 while (count) { 898 struct wmi_command_debug *cmd_log = (struct wmi_command_debug *) 899 &((struct wmi_command_debug *)log_buffer->buf)[idx]; 900 uint64_t secs, usecs; 901 int len = 0; 902 int i; 903 904 qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs); 905 len += scnprintf(str + len, sizeof(str) - len, 906 "% 8lld.%06lld %6u (0x%06x) ", 907 secs, usecs, 908 cmd_log->command, cmd_log->command); 909 for (i = 0; i < data_len; ++i) { 910 len += scnprintf(str + len, sizeof(str) - len, 911 "0x%08x ", cmd_log->data[i]); 912 } 913 914 print(print_priv, str); 915 916 --count; 917 ++idx; 918 if (idx >= log_buffer->size) 919 idx = 0; 920 } 921 } 922 923 /** 924 * wmi_print_event_log_buffer() - an output agnostic wmi event log printer 925 * @log_buffer: the event log buffer metadata of the buffer to print 926 * @count: the maximum number of entries to print 927 * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper 928 * @print_priv: any data required by the print method, e.g. a file handle 929 * 930 * Return: None 931 */ 932 static void 933 wmi_print_event_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count, 934 qdf_abstract_print *print, void *print_priv) 935 { 936 static const int data_len = 937 WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t); 938 char str[128]; 939 uint32_t idx; 940 941 if (count > log_buffer->size) 942 count = log_buffer->size; 943 if (count > log_buffer->length) 944 count = log_buffer->length; 945 946 /* subtract count from index, and wrap if necessary */ 947 idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count; 948 idx %= log_buffer->size; 949 950 print(print_priv, "Time (seconds) Event Id Payload"); 951 while (count) { 952 struct wmi_event_debug *event_log = (struct wmi_event_debug *) 953 &((struct wmi_event_debug *)log_buffer->buf)[idx]; 954 uint64_t secs, usecs; 955 int len = 0; 956 int i; 957 958 qdf_log_timestamp_to_secs(event_log->time, &secs, &usecs); 959 len += scnprintf(str + len, sizeof(str) - len, 960 "% 8lld.%06lld %6u (0x%06x) ", 961 secs, usecs, 962 event_log->event, event_log->event); 963 for (i = 0; i < data_len; ++i) { 964 len += scnprintf(str + len, sizeof(str) - len, 965 "0x%08x ", event_log->data[i]); 966 } 967 968 print(print_priv, str); 969 970 --count; 971 ++idx; 972 if (idx >= log_buffer->size) 973 idx = 0; 974 } 975 } 976 977 inline void 978 wmi_print_cmd_log(wmi_unified_t wmi, uint32_t count, 979 qdf_abstract_print *print, void *print_priv) 980 { 981 wmi_print_cmd_log_buffer( 982 &wmi->log_info.wmi_command_log_buf_info, 983 count, print, print_priv); 984 } 985 986 inline void 987 wmi_print_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count, 988 qdf_abstract_print *print, void *print_priv) 989 { 990 wmi_print_cmd_log_buffer( 991 &wmi->log_info.wmi_command_tx_cmp_log_buf_info, 992 count, print, print_priv); 993 } 994 995 inline void 996 wmi_print_mgmt_cmd_log(wmi_unified_t wmi, uint32_t count, 997 qdf_abstract_print *print, void *print_priv) 998 { 999 wmi_print_cmd_log_buffer( 1000 &wmi->log_info.wmi_mgmt_command_log_buf_info, 1001 count, print, print_priv); 1002 } 1003 1004 inline void 1005 wmi_print_mgmt_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count, 1006 qdf_abstract_print *print, void *print_priv) 1007 { 1008 wmi_print_cmd_log_buffer( 1009 &wmi->log_info.wmi_mgmt_command_tx_cmp_log_buf_info, 1010 count, print, print_priv); 1011 } 1012 1013 inline void 1014 wmi_print_event_log(wmi_unified_t wmi, uint32_t count, 1015 qdf_abstract_print *print, void *print_priv) 1016 { 1017 wmi_print_event_log_buffer( 1018 &wmi->log_info.wmi_event_log_buf_info, 1019 count, print, print_priv); 1020 } 1021 1022 inline void 1023 wmi_print_rx_event_log(wmi_unified_t wmi, uint32_t count, 1024 qdf_abstract_print *print, void *print_priv) 1025 { 1026 wmi_print_event_log_buffer( 1027 &wmi->log_info.wmi_rx_event_log_buf_info, 1028 count, print, print_priv); 1029 } 1030 1031 inline void 1032 wmi_print_mgmt_event_log(wmi_unified_t wmi, uint32_t count, 1033 qdf_abstract_print *print, void *print_priv) 1034 { 1035 wmi_print_event_log_buffer( 1036 &wmi->log_info.wmi_mgmt_event_log_buf_info, 1037 count, print, print_priv); 1038 } 1039 1040 1041 /* debugfs routines*/ 1042 1043 /** 1044 * debug_wmi_##func_base##_show() - debugfs functions to display content of 1045 * command and event buffers. Macro uses max buffer length to display 1046 * buffer when it is wraparound. 1047 * 1048 * @m: debugfs handler to access wmi_handle 1049 * @v: Variable arguments (not used) 1050 * 1051 * Return: Length of characters printed 1052 */ 1053 #define GENERATE_COMMAND_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size) \ 1054 static int debug_wmi_##func_base##_show(struct seq_file *m, \ 1055 void *v) \ 1056 { \ 1057 wmi_unified_t wmi_handle = (wmi_unified_t) m->private; \ 1058 struct wmi_log_buf_t *wmi_log = \ 1059 &wmi_handle->log_info.wmi_##func_base##_buf_info;\ 1060 int pos, nread, outlen; \ 1061 int i; \ 1062 uint64_t secs, usecs; \ 1063 \ 1064 qdf_spin_lock(&wmi_handle->log_info.wmi_record_lock); \ 1065 if (!wmi_log->length) { \ 1066 qdf_spin_unlock(&wmi_handle->log_info.wmi_record_lock);\ 1067 return wmi_bp_seq_printf(m, \ 1068 "no elements to read from ring buffer!\n"); \ 1069 } \ 1070 \ 1071 if (wmi_log->length <= wmi_ring_size) \ 1072 nread = wmi_log->length; \ 1073 else \ 1074 nread = wmi_ring_size; \ 1075 \ 1076 if (*(wmi_log->p_buf_tail_idx) == 0) \ 1077 /* tail can be 0 after wrap-around */ \ 1078 pos = wmi_ring_size - 1; \ 1079 else \ 1080 pos = *(wmi_log->p_buf_tail_idx) - 1; \ 1081 \ 1082 outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\ 1083 qdf_spin_unlock(&wmi_handle->log_info.wmi_record_lock); \ 1084 while (nread--) { \ 1085 struct wmi_command_debug *wmi_record; \ 1086 \ 1087 wmi_record = (struct wmi_command_debug *) \ 1088 &(((struct wmi_command_debug *)wmi_log->buf)[pos]);\ 1089 outlen += wmi_bp_seq_printf(m, "CMD ID = %x\n", \ 1090 (wmi_record->command)); \ 1091 qdf_log_timestamp_to_secs(wmi_record->time, &secs,\ 1092 &usecs); \ 1093 outlen += \ 1094 wmi_bp_seq_printf(m, "CMD TIME = [%llu.%06llu]\n",\ 1095 secs, usecs); \ 1096 outlen += wmi_bp_seq_printf(m, "CMD = "); \ 1097 for (i = 0; i < (wmi_record_max_length/ \ 1098 sizeof(uint32_t)); i++) \ 1099 outlen += wmi_bp_seq_printf(m, "%x ", \ 1100 wmi_record->data[i]); \ 1101 outlen += wmi_bp_seq_printf(m, "\n"); \ 1102 \ 1103 if (pos == 0) \ 1104 pos = wmi_ring_size - 1; \ 1105 else \ 1106 pos--; \ 1107 } \ 1108 return outlen; \ 1109 } \ 1110 1111 #define GENERATE_EVENT_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size) \ 1112 static int debug_wmi_##func_base##_show(struct seq_file *m, \ 1113 void *v) \ 1114 { \ 1115 wmi_unified_t wmi_handle = (wmi_unified_t) m->private; \ 1116 struct wmi_log_buf_t *wmi_log = \ 1117 &wmi_handle->log_info.wmi_##func_base##_buf_info;\ 1118 int pos, nread, outlen; \ 1119 int i; \ 1120 uint64_t secs, usecs; \ 1121 \ 1122 qdf_spin_lock(&wmi_handle->log_info.wmi_record_lock); \ 1123 if (!wmi_log->length) { \ 1124 qdf_spin_unlock(&wmi_handle->log_info.wmi_record_lock);\ 1125 return wmi_bp_seq_printf(m, \ 1126 "no elements to read from ring buffer!\n"); \ 1127 } \ 1128 \ 1129 if (wmi_log->length <= wmi_ring_size) \ 1130 nread = wmi_log->length; \ 1131 else \ 1132 nread = wmi_ring_size; \ 1133 \ 1134 if (*(wmi_log->p_buf_tail_idx) == 0) \ 1135 /* tail can be 0 after wrap-around */ \ 1136 pos = wmi_ring_size - 1; \ 1137 else \ 1138 pos = *(wmi_log->p_buf_tail_idx) - 1; \ 1139 \ 1140 outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\ 1141 qdf_spin_unlock(&wmi_handle->log_info.wmi_record_lock); \ 1142 while (nread--) { \ 1143 struct wmi_event_debug *wmi_record; \ 1144 \ 1145 wmi_record = (struct wmi_event_debug *) \ 1146 &(((struct wmi_event_debug *)wmi_log->buf)[pos]);\ 1147 qdf_log_timestamp_to_secs(wmi_record->time, &secs,\ 1148 &usecs); \ 1149 outlen += wmi_bp_seq_printf(m, "Event ID = %x\n",\ 1150 (wmi_record->event)); \ 1151 outlen += \ 1152 wmi_bp_seq_printf(m, "Event TIME = [%llu.%06llu]\n",\ 1153 secs, usecs); \ 1154 outlen += wmi_bp_seq_printf(m, "CMD = "); \ 1155 for (i = 0; i < (wmi_record_max_length/ \ 1156 sizeof(uint32_t)); i++) \ 1157 outlen += wmi_bp_seq_printf(m, "%x ", \ 1158 wmi_record->data[i]); \ 1159 outlen += wmi_bp_seq_printf(m, "\n"); \ 1160 \ 1161 if (pos == 0) \ 1162 pos = wmi_ring_size - 1; \ 1163 else \ 1164 pos--; \ 1165 } \ 1166 return outlen; \ 1167 } 1168 1169 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_log, wmi_display_size); 1170 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_tx_cmp_log, wmi_display_size); 1171 GENERATE_EVENT_DEBUG_SHOW_FUNCS(event_log, wmi_display_size); 1172 GENERATE_EVENT_DEBUG_SHOW_FUNCS(rx_event_log, wmi_display_size); 1173 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_log, wmi_display_size); 1174 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_tx_cmp_log, 1175 wmi_display_size); 1176 GENERATE_EVENT_DEBUG_SHOW_FUNCS(mgmt_event_log, wmi_display_size); 1177 1178 /** 1179 * debug_wmi_enable_show() - debugfs functions to display enable state of 1180 * wmi logging feature. 1181 * 1182 * @m: debugfs handler to access wmi_handle 1183 * @v: Variable arguments (not used) 1184 * 1185 * Return: always 1 1186 */ 1187 static int debug_wmi_enable_show(struct seq_file *m, void *v) 1188 { 1189 wmi_unified_t wmi_handle = (wmi_unified_t) m->private; 1190 1191 return wmi_bp_seq_printf(m, "%d\n", 1192 wmi_handle->log_info.wmi_logging_enable); 1193 } 1194 1195 /** 1196 * debug_wmi_log_size_show() - debugfs functions to display configured size of 1197 * wmi logging command/event buffer and management command/event buffer. 1198 * 1199 * @m: debugfs handler to access wmi_handle 1200 * @v: Variable arguments (not used) 1201 * 1202 * Return: Length of characters printed 1203 */ 1204 static int debug_wmi_log_size_show(struct seq_file *m, void *v) 1205 { 1206 1207 wmi_bp_seq_printf(m, "WMI command/event log max size:%d\n", 1208 wmi_log_max_entry); 1209 return wmi_bp_seq_printf(m, 1210 "WMI management command/events log max size:%d\n", 1211 wmi_mgmt_log_max_entry); 1212 } 1213 1214 /** 1215 * debug_wmi_##func_base##_write() - debugfs functions to clear 1216 * wmi logging command/event buffer and management command/event buffer. 1217 * 1218 * @file: file handler to access wmi_handle 1219 * @buf: received data buffer 1220 * @count: length of received buffer 1221 * @ppos: Not used 1222 * 1223 * Return: count 1224 */ 1225 #define GENERATE_DEBUG_WRITE_FUNCS(func_base, wmi_ring_size, wmi_record_type)\ 1226 static ssize_t debug_wmi_##func_base##_write(struct file *file, \ 1227 const char __user *buf, \ 1228 size_t count, loff_t *ppos) \ 1229 { \ 1230 int k, ret; \ 1231 wmi_unified_t wmi_handle = \ 1232 ((struct seq_file *)file->private_data)->private;\ 1233 struct wmi_log_buf_t *wmi_log = &wmi_handle->log_info. \ 1234 wmi_##func_base##_buf_info; \ 1235 char locbuf[50]; \ 1236 \ 1237 if ((!buf) || (count > 50)) \ 1238 return -EFAULT; \ 1239 \ 1240 if (copy_from_user(locbuf, buf, count)) \ 1241 return -EFAULT; \ 1242 \ 1243 ret = sscanf(locbuf, "%d", &k); \ 1244 if ((ret != 1) || (k != 0)) { \ 1245 WMI_LOGE("Wrong input, echo 0 to clear the wmi buffer");\ 1246 return -EINVAL; \ 1247 } \ 1248 \ 1249 qdf_spin_lock(&wmi_handle->log_info.wmi_record_lock); \ 1250 qdf_mem_zero(wmi_log->buf, wmi_ring_size * \ 1251 sizeof(struct wmi_record_type)); \ 1252 wmi_log->length = 0; \ 1253 *(wmi_log->p_buf_tail_idx) = 0; \ 1254 qdf_spin_unlock(&wmi_handle->log_info.wmi_record_lock); \ 1255 \ 1256 return count; \ 1257 } 1258 1259 GENERATE_DEBUG_WRITE_FUNCS(command_log, wmi_log_max_entry, 1260 wmi_command_debug); 1261 GENERATE_DEBUG_WRITE_FUNCS(command_tx_cmp_log, wmi_log_max_entry, 1262 wmi_command_debug); 1263 GENERATE_DEBUG_WRITE_FUNCS(event_log, wmi_log_max_entry, 1264 wmi_event_debug); 1265 GENERATE_DEBUG_WRITE_FUNCS(rx_event_log, wmi_log_max_entry, 1266 wmi_event_debug); 1267 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_log, wmi_mgmt_log_max_entry, 1268 wmi_command_debug); 1269 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_tx_cmp_log, 1270 wmi_mgmt_log_max_entry, wmi_command_debug); 1271 GENERATE_DEBUG_WRITE_FUNCS(mgmt_event_log, wmi_mgmt_log_max_entry, 1272 wmi_event_debug); 1273 1274 /** 1275 * debug_wmi_enable_write() - debugfs functions to enable/disable 1276 * wmi logging feature. 1277 * 1278 * @file: file handler to access wmi_handle 1279 * @buf: received data buffer 1280 * @count: length of received buffer 1281 * @ppos: Not used 1282 * 1283 * Return: count 1284 */ 1285 static ssize_t debug_wmi_enable_write(struct file *file, const char __user *buf, 1286 size_t count, loff_t *ppos) 1287 { 1288 wmi_unified_t wmi_handle = 1289 ((struct seq_file *)file->private_data)->private; 1290 int k, ret; 1291 char locbuf[50]; 1292 1293 if ((!buf) || (count > 50)) 1294 return -EFAULT; 1295 1296 if (copy_from_user(locbuf, buf, count)) 1297 return -EFAULT; 1298 1299 ret = sscanf(locbuf, "%d", &k); 1300 if ((ret != 1) || ((k != 0) && (k != 1))) 1301 return -EINVAL; 1302 1303 wmi_handle->log_info.wmi_logging_enable = k; 1304 return count; 1305 } 1306 1307 /** 1308 * debug_wmi_log_size_write() - reserved. 1309 * 1310 * @file: file handler to access wmi_handle 1311 * @buf: received data buffer 1312 * @count: length of received buffer 1313 * @ppos: Not used 1314 * 1315 * Return: count 1316 */ 1317 static ssize_t debug_wmi_log_size_write(struct file *file, 1318 const char __user *buf, size_t count, loff_t *ppos) 1319 { 1320 return -EINVAL; 1321 } 1322 1323 /* Structure to maintain debug information */ 1324 struct wmi_debugfs_info { 1325 const char *name; 1326 const struct file_operations *ops; 1327 }; 1328 1329 #define DEBUG_FOO(func_base) { .name = #func_base, \ 1330 .ops = &debug_##func_base##_ops } 1331 1332 /** 1333 * debug_##func_base##_open() - Open debugfs entry for respective command 1334 * and event buffer. 1335 * 1336 * @inode: node for debug dir entry 1337 * @file: file handler 1338 * 1339 * Return: open status 1340 */ 1341 #define GENERATE_DEBUG_STRUCTS(func_base) \ 1342 static int debug_##func_base##_open(struct inode *inode, \ 1343 struct file *file) \ 1344 { \ 1345 return single_open(file, debug_##func_base##_show, \ 1346 inode->i_private); \ 1347 } \ 1348 \ 1349 \ 1350 static struct file_operations debug_##func_base##_ops = { \ 1351 .open = debug_##func_base##_open, \ 1352 .read = seq_read, \ 1353 .llseek = seq_lseek, \ 1354 .write = debug_##func_base##_write, \ 1355 .release = single_release, \ 1356 }; 1357 1358 GENERATE_DEBUG_STRUCTS(wmi_command_log); 1359 GENERATE_DEBUG_STRUCTS(wmi_command_tx_cmp_log); 1360 GENERATE_DEBUG_STRUCTS(wmi_event_log); 1361 GENERATE_DEBUG_STRUCTS(wmi_rx_event_log); 1362 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_log); 1363 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_tx_cmp_log); 1364 GENERATE_DEBUG_STRUCTS(wmi_mgmt_event_log); 1365 GENERATE_DEBUG_STRUCTS(wmi_enable); 1366 GENERATE_DEBUG_STRUCTS(wmi_log_size); 1367 1368 struct wmi_debugfs_info wmi_debugfs_infos[NUM_DEBUG_INFOS] = { 1369 DEBUG_FOO(wmi_command_log), 1370 DEBUG_FOO(wmi_command_tx_cmp_log), 1371 DEBUG_FOO(wmi_event_log), 1372 DEBUG_FOO(wmi_rx_event_log), 1373 DEBUG_FOO(wmi_mgmt_command_log), 1374 DEBUG_FOO(wmi_mgmt_command_tx_cmp_log), 1375 DEBUG_FOO(wmi_mgmt_event_log), 1376 DEBUG_FOO(wmi_enable), 1377 DEBUG_FOO(wmi_log_size), 1378 }; 1379 1380 1381 /** 1382 * wmi_debugfs_create() - Create debug_fs entry for wmi logging. 1383 * 1384 * @wmi_handle: wmi handle 1385 * @par_entry: debug directory entry 1386 * @id: Index to debug info data array 1387 * 1388 * Return: none 1389 */ 1390 static void wmi_debugfs_create(wmi_unified_t wmi_handle, 1391 struct dentry *par_entry) 1392 { 1393 int i; 1394 1395 if (!par_entry) 1396 goto out; 1397 1398 for (i = 0; i < NUM_DEBUG_INFOS; ++i) { 1399 wmi_handle->debugfs_de[i] = debugfs_create_file( 1400 wmi_debugfs_infos[i].name, 0644, par_entry, 1401 wmi_handle, wmi_debugfs_infos[i].ops); 1402 1403 if (!wmi_handle->debugfs_de[i]) { 1404 WMI_LOGE("debug Entry creation failed!"); 1405 goto out; 1406 } 1407 } 1408 1409 return; 1410 1411 out: 1412 WMI_LOGE("debug Entry creation failed!"); 1413 wmi_log_buffer_free(wmi_handle); 1414 return; 1415 } 1416 1417 /** 1418 * wmi_debugfs_remove() - Remove debugfs entry for wmi logging. 1419 * @wmi_handle: wmi handle 1420 * @dentry: debugfs directory entry 1421 * @id: Index to debug info data array 1422 * 1423 * Return: none 1424 */ 1425 static void wmi_debugfs_remove(wmi_unified_t wmi_handle) 1426 { 1427 int i; 1428 struct dentry *dentry = wmi_handle->log_info.wmi_log_debugfs_dir; 1429 1430 if (dentry) { 1431 for (i = 0; i < NUM_DEBUG_INFOS; ++i) { 1432 if (wmi_handle->debugfs_de[i]) 1433 wmi_handle->debugfs_de[i] = NULL; 1434 } 1435 } 1436 1437 if (dentry) 1438 debugfs_remove_recursive(dentry); 1439 } 1440 1441 /** 1442 * wmi_debugfs_init() - debugfs functions to create debugfs directory and to 1443 * create debugfs enteries. 1444 * 1445 * @h: wmi handler 1446 * 1447 * Return: init status 1448 */ 1449 static QDF_STATUS wmi_debugfs_init(wmi_unified_t wmi_handle, uint32_t pdev_idx) 1450 { 1451 char buf[32]; 1452 1453 snprintf(buf, sizeof(buf), "WMI_SOC%u_PDEV%u", 1454 wmi_handle->soc->soc_idx, pdev_idx); 1455 1456 wmi_handle->log_info.wmi_log_debugfs_dir = 1457 debugfs_create_dir(buf, NULL); 1458 1459 if (!wmi_handle->log_info.wmi_log_debugfs_dir) { 1460 WMI_LOGE("error while creating debugfs dir for %s", buf); 1461 return QDF_STATUS_E_FAILURE; 1462 } 1463 wmi_debugfs_create(wmi_handle, 1464 wmi_handle->log_info.wmi_log_debugfs_dir); 1465 1466 return QDF_STATUS_SUCCESS; 1467 } 1468 1469 /** 1470 * wmi_mgmt_cmd_record() - Wrapper function for mgmt command logging macro 1471 * 1472 * @wmi_handle: wmi handle 1473 * @cmd: mgmt command 1474 * @header: pointer to 802.11 header 1475 * @vdev_id: vdev id 1476 * @chanfreq: channel frequency 1477 * 1478 * Return: none 1479 */ 1480 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd, 1481 void *header, uint32_t vdev_id, uint32_t chanfreq) 1482 { 1483 1484 uint32_t data[CUSTOM_MGMT_CMD_DATA_SIZE]; 1485 1486 data[0] = ((struct wmi_command_header *)header)->type; 1487 data[1] = ((struct wmi_command_header *)header)->sub_type; 1488 data[2] = vdev_id; 1489 data[3] = chanfreq; 1490 1491 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 1492 1493 WMI_MGMT_COMMAND_RECORD(wmi_handle, cmd, (uint8_t *)data); 1494 1495 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 1496 } 1497 #else 1498 /** 1499 * wmi_debugfs_remove() - Remove debugfs entry for wmi logging. 1500 * @wmi_handle: wmi handle 1501 * @dentry: debugfs directory entry 1502 * @id: Index to debug info data array 1503 * 1504 * Return: none 1505 */ 1506 static void wmi_debugfs_remove(wmi_unified_t wmi_handle) { } 1507 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd, 1508 void *header, uint32_t vdev_id, uint32_t chanfreq) { } 1509 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) { } 1510 #endif /*WMI_INTERFACE_EVENT_LOGGING */ 1511 qdf_export_symbol(wmi_mgmt_cmd_record); 1512 1513 int wmi_get_host_credits(wmi_unified_t wmi_handle); 1514 /* WMI buffer APIs */ 1515 1516 #ifdef NBUF_MEMORY_DEBUG 1517 wmi_buf_t 1518 wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint32_t len, 1519 const char *func_name, 1520 uint32_t line_num) 1521 { 1522 wmi_buf_t wmi_buf; 1523 1524 if (roundup(len + WMI_MIN_HEAD_ROOM, 4) > wmi_handle->max_msg_len) { 1525 QDF_ASSERT(0); 1526 return NULL; 1527 } 1528 1529 wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, func_name, 1530 line_num); 1531 if (!wmi_buf) 1532 wmi_buf = qdf_nbuf_alloc_debug(NULL, 1533 roundup(len + WMI_MIN_HEAD_ROOM, 1534 4), 1535 WMI_MIN_HEAD_ROOM, 4, false, 1536 func_name, line_num); 1537 if (!wmi_buf) 1538 return NULL; 1539 1540 /* Clear the wmi buffer */ 1541 OS_MEMZERO(qdf_nbuf_data(wmi_buf), len); 1542 1543 /* 1544 * Set the length of the buffer to match the allocation size. 1545 */ 1546 qdf_nbuf_set_pktlen(wmi_buf, len); 1547 1548 return wmi_buf; 1549 } 1550 qdf_export_symbol(wmi_buf_alloc_debug); 1551 1552 void wmi_buf_free(wmi_buf_t net_buf) 1553 { 1554 net_buf = wbuff_buff_put(net_buf); 1555 if (net_buf) 1556 qdf_nbuf_free(net_buf); 1557 } 1558 qdf_export_symbol(wmi_buf_free); 1559 #else 1560 wmi_buf_t wmi_buf_alloc_fl(wmi_unified_t wmi_handle, uint32_t len, 1561 const char *func, uint32_t line) 1562 { 1563 wmi_buf_t wmi_buf; 1564 1565 if (roundup(len + WMI_MIN_HEAD_ROOM, 4) > wmi_handle->max_msg_len) { 1566 wmi_nofl_err("%s:%d, Invalid len:%d", func, line, len); 1567 QDF_DEBUG_PANIC(); 1568 return NULL; 1569 } 1570 1571 wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, __func__, 1572 __LINE__); 1573 if (!wmi_buf) 1574 wmi_buf = qdf_nbuf_alloc_fl(NULL, roundup(len + 1575 WMI_MIN_HEAD_ROOM, 4), WMI_MIN_HEAD_ROOM, 4, 1576 false, func, line); 1577 1578 if (!wmi_buf) 1579 return NULL; 1580 1581 /* Clear the wmi buffer */ 1582 OS_MEMZERO(qdf_nbuf_data(wmi_buf), len); 1583 1584 /* 1585 * Set the length of the buffer to match the allocation size. 1586 */ 1587 qdf_nbuf_set_pktlen(wmi_buf, len); 1588 return wmi_buf; 1589 } 1590 qdf_export_symbol(wmi_buf_alloc_fl); 1591 1592 void wmi_buf_free(wmi_buf_t net_buf) 1593 { 1594 net_buf = wbuff_buff_put(net_buf); 1595 if (net_buf) 1596 qdf_nbuf_free(net_buf); 1597 } 1598 qdf_export_symbol(wmi_buf_free); 1599 #endif 1600 1601 /** 1602 * wmi_get_max_msg_len() - get maximum WMI message length 1603 * @wmi_handle: WMI handle. 1604 * 1605 * This function returns the maximum WMI message length 1606 * 1607 * Return: maximum WMI message length 1608 */ 1609 uint16_t wmi_get_max_msg_len(wmi_unified_t wmi_handle) 1610 { 1611 return wmi_handle->max_msg_len - WMI_MIN_HEAD_ROOM; 1612 } 1613 qdf_export_symbol(wmi_get_max_msg_len); 1614 1615 #ifndef WMI_CMD_STRINGS 1616 static uint8_t *wmi_id_to_name(uint32_t wmi_command) 1617 { 1618 return "Invalid WMI cmd"; 1619 } 1620 1621 #endif 1622 1623 #ifdef CONFIG_MCL 1624 static inline void wmi_log_cmd_id(uint32_t cmd_id, uint32_t tag) 1625 { 1626 WMI_LOGD("Send WMI command:%s command_id:%d htc_tag:%d\n", 1627 wmi_id_to_name(cmd_id), cmd_id, tag); 1628 } 1629 1630 /** 1631 * wmi_is_pm_resume_cmd() - check if a cmd is part of the resume sequence 1632 * @cmd_id: command to check 1633 * 1634 * Return: true if the command is part of the resume sequence. 1635 */ 1636 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id) 1637 { 1638 switch (cmd_id) { 1639 case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID: 1640 case WMI_PDEV_RESUME_CMDID: 1641 return true; 1642 1643 default: 1644 return false; 1645 } 1646 } 1647 #else 1648 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id) 1649 { 1650 return false; 1651 } 1652 #endif 1653 1654 QDF_STATUS wmi_unified_cmd_send_fl(wmi_unified_t wmi_handle, wmi_buf_t buf, 1655 uint32_t len, uint32_t cmd_id, 1656 const char *func, uint32_t line) 1657 { 1658 HTC_PACKET *pkt; 1659 QDF_STATUS status; 1660 uint16_t htc_tag = 0; 1661 1662 if (wmi_get_runtime_pm_inprogress(wmi_handle)) { 1663 htc_tag = 1664 (uint16_t)wmi_handle->ops->wmi_set_htc_tx_tag( 1665 wmi_handle, buf, cmd_id); 1666 } else if (qdf_atomic_read(&wmi_handle->is_target_suspended) && 1667 (!wmi_is_pm_resume_cmd(cmd_id))) { 1668 wmi_nofl_err("%s:%d, Target is suspended", func, line); 1669 QDF_DEBUG_PANIC(); 1670 return QDF_STATUS_E_BUSY; 1671 } 1672 if (wmi_handle->wmi_stopinprogress) { 1673 wmi_nofl_err("%s:%d, WMI stop in progress", func, line); 1674 return QDF_STATUS_E_INVAL; 1675 } 1676 1677 #ifndef WMI_NON_TLV_SUPPORT 1678 /* Do sanity check on the TLV parameter structure */ 1679 if (wmi_handle->target_type == WMI_TLV_TARGET) { 1680 void *buf_ptr = (void *)qdf_nbuf_data(buf); 1681 1682 if (wmi_handle->ops->wmi_check_command_params(NULL, buf_ptr, len, cmd_id) 1683 != 0) { 1684 wmi_nofl_err("%s:%d, Invalid WMI Param Buffer for Cmd:%d", 1685 func, line, cmd_id); 1686 return QDF_STATUS_E_INVAL; 1687 } 1688 } 1689 #endif 1690 1691 if (qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) { 1692 wmi_nofl_err("%s:%d, Failed to send cmd %x, no memory", 1693 func, line, cmd_id); 1694 return QDF_STATUS_E_NOMEM; 1695 } 1696 1697 qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR)); 1698 WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id); 1699 1700 qdf_atomic_inc(&wmi_handle->pending_cmds); 1701 if (qdf_atomic_read(&wmi_handle->pending_cmds) >= 1702 wmi_handle->wmi_max_cmds) { 1703 wmi_nofl_err("hostcredits = %d", 1704 wmi_get_host_credits(wmi_handle)); 1705 htc_dump_counter_info(wmi_handle->htc_handle); 1706 qdf_atomic_dec(&wmi_handle->pending_cmds); 1707 wmi_nofl_err("%s:%d, MAX %d WMI Pending cmds reached", 1708 func, line, wmi_handle->wmi_max_cmds); 1709 QDF_BUG(0); 1710 return QDF_STATUS_E_BUSY; 1711 } 1712 1713 pkt = qdf_mem_malloc_fl(sizeof(*pkt), func, line); 1714 if (!pkt) { 1715 qdf_atomic_dec(&wmi_handle->pending_cmds); 1716 return QDF_STATUS_E_NOMEM; 1717 } 1718 1719 SET_HTC_PACKET_INFO_TX(pkt, 1720 NULL, 1721 qdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR), 1722 wmi_handle->wmi_endpoint_id, htc_tag); 1723 1724 SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf); 1725 #ifdef CONFIG_MCL 1726 wmi_log_cmd_id(cmd_id, htc_tag); 1727 #endif 1728 wmi_ext_dbg_msg_cmd_record(wmi_handle, 1729 qdf_nbuf_data(buf), qdf_nbuf_len(buf)); 1730 #ifdef WMI_INTERFACE_EVENT_LOGGING 1731 if (wmi_handle->log_info.wmi_logging_enable) { 1732 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 1733 /* 1734 * Record 16 bytes of WMI cmd data - 1735 * exclude TLV and WMI headers 1736 * 1737 * WMI mgmt command already recorded in wmi_mgmt_cmd_record 1738 */ 1739 if (wmi_handle->ops->is_management_record(cmd_id) == false) { 1740 WMI_COMMAND_RECORD(wmi_handle, cmd_id, 1741 qdf_nbuf_data(buf) + 1742 wmi_handle->soc->buf_offset_command); 1743 } 1744 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 1745 } 1746 #endif 1747 1748 status = htc_send_pkt(wmi_handle->htc_handle, pkt); 1749 1750 if (QDF_STATUS_SUCCESS != status) { 1751 qdf_atomic_dec(&wmi_handle->pending_cmds); 1752 wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d", 1753 func, line, status); 1754 qdf_mem_free(pkt); 1755 return status; 1756 } 1757 1758 return QDF_STATUS_SUCCESS; 1759 } 1760 qdf_export_symbol(wmi_unified_cmd_send_fl); 1761 1762 /** 1763 * wmi_unified_get_event_handler_ix() - gives event handler's index 1764 * @wmi_handle: handle to wmi 1765 * @event_id: wmi event id 1766 * 1767 * Return: event handler's index 1768 */ 1769 static int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle, 1770 uint32_t event_id) 1771 { 1772 uint32_t idx = 0; 1773 int32_t invalid_idx = -1; 1774 struct wmi_soc *soc = wmi_handle->soc; 1775 1776 for (idx = 0; (idx < soc->max_event_idx && 1777 idx < WMI_UNIFIED_MAX_EVENT); ++idx) { 1778 if (wmi_handle->event_id[idx] == event_id && 1779 wmi_handle->event_handler[idx] != NULL) { 1780 return idx; 1781 } 1782 } 1783 1784 return invalid_idx; 1785 } 1786 1787 /** 1788 * wmi_unified_register_event() - register wmi event handler 1789 * @wmi_handle: handle to wmi 1790 * @event_id: wmi event id 1791 * @handler_func: wmi event handler function 1792 * 1793 * Return: 0 on success 1794 */ 1795 int wmi_unified_register_event(wmi_unified_t wmi_handle, 1796 uint32_t event_id, 1797 wmi_unified_event_handler handler_func) 1798 { 1799 uint32_t idx = 0; 1800 uint32_t evt_id; 1801 struct wmi_soc *soc; 1802 1803 if (!wmi_handle) { 1804 WMI_LOGE("WMI handle is NULL"); 1805 return QDF_STATUS_E_FAILURE; 1806 } 1807 1808 soc = wmi_handle->soc; 1809 1810 if (event_id >= wmi_events_max || 1811 wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { 1812 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 1813 "%s: Event id %d is unavailable", 1814 __func__, event_id); 1815 return QDF_STATUS_E_FAILURE; 1816 } 1817 evt_id = wmi_handle->wmi_events[event_id]; 1818 if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) { 1819 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 1820 "%s : event handler already registered 0x%x", 1821 __func__, evt_id); 1822 return QDF_STATUS_E_FAILURE; 1823 } 1824 if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) { 1825 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 1826 "%s : no more event handlers 0x%x", 1827 __func__, evt_id); 1828 return QDF_STATUS_E_FAILURE; 1829 } 1830 idx = soc->max_event_idx; 1831 wmi_handle->event_handler[idx] = handler_func; 1832 wmi_handle->event_id[idx] = evt_id; 1833 qdf_spin_lock_bh(&soc->ctx_lock); 1834 wmi_handle->ctx[idx] = WMI_RX_UMAC_CTX; 1835 qdf_spin_unlock_bh(&soc->ctx_lock); 1836 soc->max_event_idx++; 1837 1838 return 0; 1839 } 1840 1841 /** 1842 * wmi_unified_register_event_handler() - register wmi event handler 1843 * @wmi_handle: handle to wmi 1844 * @event_id: wmi event id 1845 * @handler_func: wmi event handler function 1846 * @rx_ctx: rx execution context for wmi rx events 1847 * 1848 * This API is to support legacy requirements. Will be deprecated in future. 1849 * Return: 0 on success 1850 */ 1851 int wmi_unified_register_event_handler(wmi_unified_t wmi_handle, 1852 wmi_conv_event_id event_id, 1853 wmi_unified_event_handler handler_func, 1854 uint8_t rx_ctx) 1855 { 1856 uint32_t idx = 0; 1857 uint32_t evt_id; 1858 struct wmi_soc *soc; 1859 1860 if (!wmi_handle) { 1861 WMI_LOGE("WMI handle is NULL"); 1862 return QDF_STATUS_E_FAILURE; 1863 } 1864 1865 soc = wmi_handle->soc; 1866 1867 if (event_id >= wmi_events_max || 1868 wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { 1869 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 1870 "%s: Event id %d is unavailable", 1871 __func__, event_id); 1872 return QDF_STATUS_E_FAILURE; 1873 } 1874 evt_id = wmi_handle->wmi_events[event_id]; 1875 1876 if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) { 1877 WMI_LOGE("event handler already registered 0x%x", 1878 evt_id); 1879 return QDF_STATUS_E_FAILURE; 1880 } 1881 if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) { 1882 WMI_LOGE("no more event handlers 0x%x", 1883 evt_id); 1884 return QDF_STATUS_E_FAILURE; 1885 } 1886 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, 1887 "Registered event handler for event 0x%8x", evt_id); 1888 idx = soc->max_event_idx; 1889 wmi_handle->event_handler[idx] = handler_func; 1890 wmi_handle->event_id[idx] = evt_id; 1891 qdf_spin_lock_bh(&soc->ctx_lock); 1892 wmi_handle->ctx[idx] = rx_ctx; 1893 qdf_spin_unlock_bh(&soc->ctx_lock); 1894 soc->max_event_idx++; 1895 1896 return 0; 1897 } 1898 qdf_export_symbol(wmi_unified_register_event_handler); 1899 1900 /** 1901 * wmi_unified_unregister_event() - unregister wmi event handler 1902 * @wmi_handle: handle to wmi 1903 * @event_id: wmi event id 1904 * 1905 * Return: 0 on success 1906 */ 1907 int wmi_unified_unregister_event(wmi_unified_t wmi_handle, 1908 uint32_t event_id) 1909 { 1910 uint32_t idx = 0; 1911 uint32_t evt_id; 1912 struct wmi_soc *soc = wmi_handle->soc; 1913 1914 if (event_id >= wmi_events_max || 1915 wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { 1916 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 1917 "%s: Event id %d is unavailable", 1918 __func__, event_id); 1919 return QDF_STATUS_E_FAILURE; 1920 } 1921 evt_id = wmi_handle->wmi_events[event_id]; 1922 1923 idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id); 1924 if (idx == -1) { 1925 WMI_LOGE("event handler is not registered: evt id 0x%x", 1926 evt_id); 1927 return QDF_STATUS_E_FAILURE; 1928 } 1929 wmi_handle->event_handler[idx] = NULL; 1930 wmi_handle->event_id[idx] = 0; 1931 --soc->max_event_idx; 1932 wmi_handle->event_handler[idx] = 1933 wmi_handle->event_handler[soc->max_event_idx]; 1934 wmi_handle->event_id[idx] = 1935 wmi_handle->event_id[soc->max_event_idx]; 1936 1937 return 0; 1938 } 1939 1940 /** 1941 * wmi_unified_unregister_event_handler() - unregister wmi event handler 1942 * @wmi_handle: handle to wmi 1943 * @event_id: wmi event id 1944 * 1945 * Return: 0 on success 1946 */ 1947 int wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle, 1948 wmi_conv_event_id event_id) 1949 { 1950 uint32_t idx = 0; 1951 uint32_t evt_id; 1952 struct wmi_soc *soc; 1953 1954 if (!wmi_handle) { 1955 WMI_LOGE("WMI handle is NULL"); 1956 return QDF_STATUS_E_FAILURE; 1957 } 1958 1959 soc = wmi_handle->soc; 1960 1961 if (event_id >= wmi_events_max || 1962 wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { 1963 WMI_LOGE("Event id %d is unavailable", 1964 event_id); 1965 return QDF_STATUS_E_FAILURE; 1966 } 1967 evt_id = wmi_handle->wmi_events[event_id]; 1968 1969 idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id); 1970 if (idx == -1) { 1971 WMI_LOGE("event handler is not registered: evt id 0x%x", 1972 evt_id); 1973 return QDF_STATUS_E_FAILURE; 1974 } 1975 wmi_handle->event_handler[idx] = NULL; 1976 wmi_handle->event_id[idx] = 0; 1977 --soc->max_event_idx; 1978 wmi_handle->event_handler[idx] = 1979 wmi_handle->event_handler[soc->max_event_idx]; 1980 wmi_handle->event_id[idx] = 1981 wmi_handle->event_id[soc->max_event_idx]; 1982 1983 return 0; 1984 } 1985 qdf_export_symbol(wmi_unified_unregister_event_handler); 1986 1987 /** 1988 * wmi_process_fw_event_default_ctx() - process in default caller context 1989 * @wmi_handle: handle to wmi 1990 * @htc_packet: pointer to htc packet 1991 * @exec_ctx: execution context for wmi fw event 1992 * 1993 * Event process by below function will be in default caller context. 1994 * wmi internally provides rx work thread processing context. 1995 * 1996 * Return: none 1997 */ 1998 static void wmi_process_fw_event_default_ctx(struct wmi_unified *wmi_handle, 1999 HTC_PACKET *htc_packet, uint8_t exec_ctx) 2000 { 2001 wmi_buf_t evt_buf; 2002 evt_buf = (wmi_buf_t) htc_packet->pPktContext; 2003 2004 #ifndef CONFIG_MCL 2005 wmi_handle->rx_ops.wma_process_fw_event_handler_cbk 2006 (wmi_handle->scn_handle, evt_buf, exec_ctx); 2007 #else 2008 wmi_handle->rx_ops.wma_process_fw_event_handler_cbk(wmi_handle, 2009 htc_packet, exec_ctx); 2010 #endif 2011 2012 return; 2013 } 2014 2015 /** 2016 * wmi_process_fw_event_worker_thread_ctx() - process in worker thread context 2017 * @wmi_handle: handle to wmi 2018 * @htc_packet: pointer to htc packet 2019 * 2020 * Event process by below function will be in worker thread context. 2021 * Use this method for events which are not critical and not 2022 * handled in protocol stack. 2023 * 2024 * Return: none 2025 */ 2026 void wmi_process_fw_event_worker_thread_ctx(struct wmi_unified *wmi_handle, 2027 HTC_PACKET *htc_packet) 2028 { 2029 wmi_buf_t evt_buf; 2030 2031 evt_buf = (wmi_buf_t) htc_packet->pPktContext; 2032 2033 qdf_spin_lock_bh(&wmi_handle->eventq_lock); 2034 qdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf); 2035 qdf_spin_unlock_bh(&wmi_handle->eventq_lock); 2036 qdf_queue_work(0, wmi_handle->wmi_rx_work_queue, 2037 &wmi_handle->rx_event_work); 2038 2039 return; 2040 } 2041 2042 qdf_export_symbol(wmi_process_fw_event_worker_thread_ctx); 2043 2044 /** 2045 * wmi_get_pdev_ep: Get wmi handle based on endpoint 2046 * @soc: handle to wmi soc 2047 * @ep: endpoint id 2048 * 2049 * Return: none 2050 */ 2051 static struct wmi_unified *wmi_get_pdev_ep(struct wmi_soc *soc, 2052 HTC_ENDPOINT_ID ep) 2053 { 2054 uint32_t i; 2055 2056 for (i = 0; i < WMI_MAX_RADIOS; i++) 2057 if (soc->wmi_endpoint_id[i] == ep) 2058 break; 2059 2060 if (i == WMI_MAX_RADIOS) 2061 return NULL; 2062 2063 return soc->wmi_pdev[i]; 2064 } 2065 2066 /** 2067 * wmi_mtrace_rx() - Wrappper function for qdf_mtrace api 2068 * @message_id: 32-Bit Wmi message ID 2069 * @vdev_id: Vdev ID 2070 * @data: Actual message contents 2071 * 2072 * This function converts the 32-bit WMI message ID in 15-bit message ID 2073 * format for qdf_mtrace as in qdf_mtrace message there are only 15 2074 * bits reserved for message ID. 2075 * out of these 15-bits, 8-bits (From LSB) specifies the WMI_GRP_ID 2076 * and remaining 7-bits specifies the actual WMI command. With this 2077 * notation there can be maximum 256 groups and each group can have 2078 * max 128 commands can be supported. 2079 * 2080 * Return: None 2081 */ 2082 static void wmi_mtrace_rx(uint32_t message_id, uint16_t vdev_id, uint32_t data) 2083 { 2084 uint16_t mtrace_message_id; 2085 2086 mtrace_message_id = QDF_WMI_MTRACE_CMD_ID(message_id) | 2087 (QDF_WMI_MTRACE_GRP_ID(message_id) << 2088 QDF_WMI_MTRACE_CMD_NUM_BITS); 2089 qdf_mtrace(QDF_MODULE_ID_WMI, QDF_MODULE_ID_WMA, 2090 mtrace_message_id, vdev_id, data); 2091 } 2092 2093 /** 2094 * wmi_control_rx() - process fw events callbacks 2095 * @ctx: handle to wmi 2096 * @htc_packet: pointer to htc packet 2097 * 2098 * Return: none 2099 */ 2100 static void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet) 2101 { 2102 struct wmi_soc *soc = (struct wmi_soc *) ctx; 2103 struct wmi_unified *wmi_handle; 2104 wmi_buf_t evt_buf; 2105 uint32_t id; 2106 uint32_t idx = 0; 2107 enum wmi_rx_exec_ctx exec_ctx; 2108 2109 evt_buf = (wmi_buf_t) htc_packet->pPktContext; 2110 2111 wmi_handle = wmi_get_pdev_ep(soc, htc_packet->Endpoint); 2112 if (wmi_handle == NULL) { 2113 WMI_LOGE 2114 ("unable to get wmi_handle to Endpoint %d\n", 2115 htc_packet->Endpoint); 2116 qdf_nbuf_free(evt_buf); 2117 return; 2118 } 2119 2120 id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); 2121 idx = wmi_unified_get_event_handler_ix(wmi_handle, id); 2122 if (qdf_unlikely(idx == A_ERROR)) { 2123 wmi_debug("no handler registered for event id 0x%x", id); 2124 qdf_nbuf_free(evt_buf); 2125 return; 2126 } 2127 wmi_mtrace_rx(id, 0xFF, idx); 2128 qdf_spin_lock_bh(&soc->ctx_lock); 2129 exec_ctx = wmi_handle->ctx[idx]; 2130 qdf_spin_unlock_bh(&soc->ctx_lock); 2131 2132 #ifdef WMI_INTERFACE_EVENT_LOGGING 2133 if (wmi_handle->log_info.wmi_logging_enable) { 2134 uint8_t *data; 2135 data = qdf_nbuf_data(evt_buf); 2136 2137 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 2138 /* Exclude 4 bytes of TLV header */ 2139 if (wmi_handle->ops->is_diag_event(id)) { 2140 WMI_DIAG_RX_EVENT_RECORD(wmi_handle, id, 2141 ((uint8_t *) data + 2142 wmi_handle->soc->buf_offset_event)); 2143 } else if (wmi_handle->ops->is_management_record(id)) { 2144 WMI_MGMT_RX_EVENT_RECORD(wmi_handle, id, 2145 ((uint8_t *) data + 2146 wmi_handle->soc->buf_offset_event)); 2147 } else { 2148 WMI_RX_EVENT_RECORD(wmi_handle, id, ((uint8_t *) data + 2149 wmi_handle->soc->buf_offset_event)); 2150 } 2151 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 2152 } 2153 #endif 2154 2155 if (exec_ctx == WMI_RX_WORK_CTX) { 2156 wmi_process_fw_event_worker_thread_ctx 2157 (wmi_handle, htc_packet); 2158 } else if (exec_ctx > WMI_RX_WORK_CTX) { 2159 wmi_process_fw_event_default_ctx 2160 (wmi_handle, htc_packet, exec_ctx); 2161 } else { 2162 WMI_LOGE("Invalid event context %d", exec_ctx); 2163 qdf_nbuf_free(evt_buf); 2164 } 2165 2166 } 2167 2168 /** 2169 * wmi_process_fw_event() - process any fw event 2170 * @wmi_handle: wmi handle 2171 * @evt_buf: fw event buffer 2172 * 2173 * This function process fw event in caller context 2174 * 2175 * Return: none 2176 */ 2177 void wmi_process_fw_event(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf) 2178 { 2179 __wmi_control_rx(wmi_handle, evt_buf); 2180 } 2181 2182 /** 2183 * __wmi_control_rx() - process serialize wmi event callback 2184 * @wmi_handle: wmi handle 2185 * @evt_buf: fw event buffer 2186 * 2187 * Return: none 2188 */ 2189 void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf) 2190 { 2191 uint32_t id; 2192 uint8_t *data; 2193 uint32_t len; 2194 void *wmi_cmd_struct_ptr = NULL; 2195 #ifndef WMI_NON_TLV_SUPPORT 2196 int tlv_ok_status = 0; 2197 #endif 2198 uint32_t idx = 0; 2199 2200 id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); 2201 2202 wmi_ext_dbg_msg_event_record(wmi_handle, qdf_nbuf_data(evt_buf), 2203 qdf_nbuf_len(evt_buf)); 2204 2205 if (qdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL) 2206 goto end; 2207 2208 data = qdf_nbuf_data(evt_buf); 2209 len = qdf_nbuf_len(evt_buf); 2210 2211 #ifndef WMI_NON_TLV_SUPPORT 2212 if (wmi_handle->target_type == WMI_TLV_TARGET) { 2213 /* Validate and pad(if necessary) the TLVs */ 2214 tlv_ok_status = 2215 wmi_handle->ops->wmi_check_and_pad_event(wmi_handle->scn_handle, 2216 data, len, id, 2217 &wmi_cmd_struct_ptr); 2218 if (tlv_ok_status != 0) { 2219 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 2220 "%s: Error: id=0x%x, wmitlv check status=%d", 2221 __func__, id, tlv_ok_status); 2222 goto end; 2223 } 2224 } 2225 #endif 2226 2227 idx = wmi_unified_get_event_handler_ix(wmi_handle, id); 2228 if (idx == A_ERROR) { 2229 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 2230 "%s : event handler is not registered: event id 0x%x", 2231 __func__, id); 2232 goto end; 2233 } 2234 #ifdef WMI_INTERFACE_EVENT_LOGGING 2235 if (wmi_handle->log_info.wmi_logging_enable) { 2236 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 2237 /* Exclude 4 bytes of TLV header */ 2238 if (wmi_handle->ops->is_diag_event(id)) { 2239 /* 2240 * skip diag event logging in WMI event buffer 2241 * as its already logged in WMI RX event buffer 2242 */ 2243 } else if (wmi_handle->ops->is_management_record(id)) { 2244 /* 2245 * skip wmi mgmt event logging in WMI event buffer 2246 * as its already logged in WMI RX event buffer 2247 */ 2248 } else { 2249 WMI_EVENT_RECORD(wmi_handle, id, ((uint8_t *) data + 2250 wmi_handle->soc->buf_offset_event)); 2251 } 2252 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 2253 } 2254 #endif 2255 /* Call the WMI registered event handler */ 2256 if (wmi_handle->target_type == WMI_TLV_TARGET) 2257 wmi_handle->event_handler[idx] (wmi_handle->scn_handle, 2258 wmi_cmd_struct_ptr, len); 2259 else 2260 wmi_handle->event_handler[idx] (wmi_handle->scn_handle, 2261 data, len); 2262 2263 end: 2264 /* Free event buffer and allocated event tlv */ 2265 #ifndef WMI_NON_TLV_SUPPORT 2266 if (wmi_handle->target_type == WMI_TLV_TARGET) 2267 wmi_handle->ops->wmi_free_allocated_event(id, &wmi_cmd_struct_ptr); 2268 #endif 2269 2270 qdf_nbuf_free(evt_buf); 2271 2272 } 2273 2274 #define WMI_WQ_WD_TIMEOUT (30 * 1000) /* 30s */ 2275 2276 static inline void wmi_workqueue_watchdog_warn(uint32_t msg_type_id) 2277 { 2278 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 2279 "%s: WLAN_BUG_RCA: Message type %x has exceeded its alloted time of %ds", 2280 __func__, msg_type_id, WMI_WQ_WD_TIMEOUT / 1000); 2281 } 2282 2283 #ifdef CONFIG_SLUB_DEBUG_ON 2284 static void wmi_workqueue_watchdog_bite(void *arg) 2285 { 2286 struct wmi_wq_dbg_info *info = arg; 2287 2288 wmi_workqueue_watchdog_warn(info->wd_msg_type_id); 2289 qdf_print_thread_trace(info->task); 2290 2291 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 2292 "%s: Going down for WMI WQ Watchdog Bite!", __func__); 2293 QDF_BUG(0); 2294 } 2295 #else 2296 static inline void wmi_workqueue_watchdog_bite(void *arg) 2297 { 2298 struct wmi_wq_dbg_info *info = arg; 2299 2300 wmi_workqueue_watchdog_warn(info->wd_msg_type_id); 2301 } 2302 #endif 2303 2304 /** 2305 * wmi_rx_event_work() - process rx event in rx work queue context 2306 * @arg: opaque pointer to wmi handle 2307 * 2308 * This function process any fw event to serialize it through rx worker thread. 2309 * 2310 * Return: none 2311 */ 2312 static void wmi_rx_event_work(void *arg) 2313 { 2314 wmi_buf_t buf; 2315 struct wmi_unified *wmi = arg; 2316 qdf_timer_t wd_timer; 2317 struct wmi_wq_dbg_info info; 2318 2319 /* initialize WMI workqueue watchdog timer */ 2320 qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite, 2321 &info, QDF_TIMER_TYPE_SW); 2322 qdf_spin_lock_bh(&wmi->eventq_lock); 2323 buf = qdf_nbuf_queue_remove(&wmi->event_queue); 2324 qdf_spin_unlock_bh(&wmi->eventq_lock); 2325 while (buf) { 2326 qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT); 2327 info.wd_msg_type_id = 2328 WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID); 2329 info.wmi_wq = wmi->wmi_rx_work_queue; 2330 info.task = qdf_get_current_task(); 2331 __wmi_control_rx(wmi, buf); 2332 qdf_timer_stop(&wd_timer); 2333 qdf_spin_lock_bh(&wmi->eventq_lock); 2334 buf = qdf_nbuf_queue_remove(&wmi->event_queue); 2335 qdf_spin_unlock_bh(&wmi->eventq_lock); 2336 } 2337 qdf_timer_free(&wd_timer); 2338 } 2339 2340 #ifdef FEATURE_RUNTIME_PM 2341 /** 2342 * wmi_runtime_pm_init() - initialize runtime pm wmi variables 2343 * @wmi_handle: wmi context 2344 */ 2345 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle) 2346 { 2347 qdf_atomic_init(&wmi_handle->runtime_pm_inprogress); 2348 } 2349 2350 /** 2351 * wmi_set_runtime_pm_inprogress() - set runtime pm progress flag 2352 * @wmi_handle: wmi context 2353 * @val: runtime pm progress flag 2354 */ 2355 void wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, A_BOOL val) 2356 { 2357 qdf_atomic_set(&wmi_handle->runtime_pm_inprogress, val); 2358 } 2359 2360 /** 2361 * wmi_get_runtime_pm_inprogress() - get runtime pm progress flag 2362 * @wmi_handle: wmi context 2363 */ 2364 inline bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle) 2365 { 2366 return qdf_atomic_read(&wmi_handle->runtime_pm_inprogress); 2367 } 2368 #else 2369 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle) 2370 { 2371 } 2372 #endif 2373 2374 /** 2375 * wmi_unified_get_soc_handle: Get WMI SoC handle 2376 * @param wmi_handle: WMI context got from wmi_attach 2377 * 2378 * return: Pointer to Soc handle 2379 */ 2380 void *wmi_unified_get_soc_handle(struct wmi_unified *wmi_handle) 2381 { 2382 return wmi_handle->soc; 2383 } 2384 2385 /** 2386 * wmi_interface_logging_init: Interface looging init 2387 * @param wmi_handle: Pointer to wmi handle object 2388 * 2389 * return: None 2390 */ 2391 #ifdef WMI_INTERFACE_EVENT_LOGGING 2392 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle, 2393 uint32_t pdev_idx) 2394 { 2395 if (QDF_STATUS_SUCCESS == wmi_log_init(wmi_handle)) { 2396 qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); 2397 wmi_debugfs_init(wmi_handle, pdev_idx); 2398 } 2399 } 2400 #else 2401 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle, 2402 uint32_t pdev_idx) 2403 { 2404 } 2405 #endif 2406 2407 /** 2408 * wmi_target_params_init: Target specific params init 2409 * @param wmi_soc: Pointer to wmi soc object 2410 * @param wmi_handle: Pointer to wmi handle object 2411 * 2412 * return: None 2413 */ 2414 #ifndef CONFIG_MCL 2415 static inline void wmi_target_params_init(struct wmi_soc *soc, 2416 struct wmi_unified *wmi_handle) 2417 { 2418 wmi_handle->pdev_param = soc->pdev_param; 2419 wmi_handle->vdev_param = soc->vdev_param; 2420 wmi_handle->services = soc->services; 2421 } 2422 #else 2423 static inline void wmi_target_params_init(struct wmi_soc *soc, 2424 struct wmi_unified *wmi_handle) 2425 { 2426 wmi_handle->services = soc->services; 2427 } 2428 #endif 2429 2430 /** 2431 * wmi_unified_get_pdev_handle: Get WMI SoC handle 2432 * @param wmi_soc: Pointer to wmi soc object 2433 * @param pdev_idx: pdev index 2434 * 2435 * return: Pointer to wmi handle or NULL on failure 2436 */ 2437 void *wmi_unified_get_pdev_handle(struct wmi_soc *soc, uint32_t pdev_idx) 2438 { 2439 struct wmi_unified *wmi_handle; 2440 2441 if (pdev_idx >= WMI_MAX_RADIOS) 2442 return NULL; 2443 2444 if (soc->wmi_pdev[pdev_idx] == NULL) { 2445 wmi_handle = 2446 (struct wmi_unified *) qdf_mem_malloc( 2447 sizeof(struct wmi_unified)); 2448 if (wmi_handle == NULL) { 2449 WMI_LOGE("allocation of wmi handle failed %zu", 2450 sizeof(struct wmi_unified)); 2451 return NULL; 2452 } 2453 wmi_handle->scn_handle = soc->scn_handle; 2454 wmi_handle->event_id = soc->event_id; 2455 wmi_handle->event_handler = soc->event_handler; 2456 wmi_handle->ctx = soc->ctx; 2457 wmi_handle->ops = soc->ops; 2458 qdf_spinlock_create(&wmi_handle->eventq_lock); 2459 qdf_nbuf_queue_init(&wmi_handle->event_queue); 2460 2461 qdf_create_work(0, &wmi_handle->rx_event_work, 2462 wmi_rx_event_work, wmi_handle); 2463 wmi_handle->wmi_rx_work_queue = 2464 qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue"); 2465 if (NULL == wmi_handle->wmi_rx_work_queue) { 2466 WMI_LOGE("failed to create wmi_rx_event_work_queue"); 2467 goto error; 2468 } 2469 wmi_handle->wmi_events = soc->wmi_events; 2470 wmi_target_params_init(soc, wmi_handle); 2471 wmi_handle->soc = soc; 2472 wmi_interface_logging_init(wmi_handle, pdev_idx); 2473 qdf_atomic_init(&wmi_handle->pending_cmds); 2474 qdf_atomic_init(&wmi_handle->is_target_suspended); 2475 wmi_handle->target_type = soc->target_type; 2476 wmi_handle->wmi_max_cmds = soc->wmi_max_cmds; 2477 2478 soc->wmi_pdev[pdev_idx] = wmi_handle; 2479 } else 2480 wmi_handle = soc->wmi_pdev[pdev_idx]; 2481 2482 wmi_handle->wmi_stopinprogress = 0; 2483 wmi_handle->wmi_endpoint_id = soc->wmi_endpoint_id[pdev_idx]; 2484 wmi_handle->htc_handle = soc->htc_handle; 2485 wmi_handle->max_msg_len = soc->max_msg_len[pdev_idx]; 2486 2487 return wmi_handle; 2488 2489 error: 2490 qdf_mem_free(wmi_handle); 2491 2492 return NULL; 2493 } 2494 qdf_export_symbol(wmi_unified_get_pdev_handle); 2495 2496 static void (*wmi_attach_register[WMI_MAX_TARGET_TYPE])(wmi_unified_t); 2497 2498 void wmi_unified_register_module(enum wmi_target_type target_type, 2499 void (*wmi_attach)(wmi_unified_t wmi_handle)) 2500 { 2501 if (target_type < WMI_MAX_TARGET_TYPE) 2502 wmi_attach_register[target_type] = wmi_attach; 2503 2504 return; 2505 } 2506 qdf_export_symbol(wmi_unified_register_module); 2507 2508 /** 2509 * wmi_wbuff_register() - register wmi with wbuff 2510 * @wmi_handle: handle to wmi 2511 * 2512 * @Return: void 2513 */ 2514 static void wmi_wbuff_register(struct wmi_unified *wmi_handle) 2515 { 2516 struct wbuff_alloc_request wbuff_alloc[4]; 2517 2518 wbuff_alloc[0].slot = WBUFF_POOL_0; 2519 wbuff_alloc[0].size = WMI_WBUFF_POOL_0_SIZE; 2520 wbuff_alloc[1].slot = WBUFF_POOL_1; 2521 wbuff_alloc[1].size = WMI_WBUFF_POOL_1_SIZE; 2522 wbuff_alloc[2].slot = WBUFF_POOL_2; 2523 wbuff_alloc[2].size = WMI_WBUFF_POOL_2_SIZE; 2524 wbuff_alloc[3].slot = WBUFF_POOL_3; 2525 wbuff_alloc[3].size = WMI_WBUFF_POOL_3_SIZE; 2526 2527 wmi_handle->wbuff_handle = wbuff_module_register(wbuff_alloc, 4, 2528 WMI_MIN_HEAD_ROOM, 4); 2529 } 2530 2531 /** 2532 * wmi_wbuff_deregister() - deregister wmi with wbuff 2533 * @wmi_handle: handle to wmi 2534 * 2535 * @Return: void 2536 */ 2537 static inline void wmi_wbuff_deregister(struct wmi_unified *wmi_handle) 2538 { 2539 wbuff_module_deregister(wmi_handle->wbuff_handle); 2540 wmi_handle->wbuff_handle = NULL; 2541 } 2542 2543 /** 2544 * wmi_unified_attach() - attach for unified WMI 2545 * @scn_handle: handle to SCN 2546 * @osdev: OS device context 2547 * @target_type: TLV or not-TLV based target 2548 * @use_cookie: cookie based allocation enabled/disabled 2549 * @ops: umac rx callbacks 2550 * @psoc: objmgr psoc 2551 * 2552 * @Return: wmi handle. 2553 */ 2554 void *wmi_unified_attach(void *scn_handle, 2555 struct wmi_unified_attach_params *param) 2556 { 2557 struct wmi_unified *wmi_handle; 2558 struct wmi_soc *soc; 2559 2560 soc = (struct wmi_soc *) qdf_mem_malloc(sizeof(struct wmi_soc)); 2561 if (soc == NULL) { 2562 WMI_LOGE("Allocation of wmi_soc failed %zu", 2563 sizeof(struct wmi_soc)); 2564 return NULL; 2565 } 2566 2567 wmi_handle = 2568 (struct wmi_unified *) qdf_mem_malloc( 2569 sizeof(struct wmi_unified)); 2570 if (wmi_handle == NULL) { 2571 qdf_mem_free(soc); 2572 WMI_LOGE("allocation of wmi handle failed %zu", 2573 sizeof(struct wmi_unified)); 2574 return NULL; 2575 } 2576 wmi_handle->soc = soc; 2577 wmi_handle->soc->soc_idx = param->soc_id; 2578 wmi_handle->soc->is_async_ep = param->is_async_ep; 2579 wmi_handle->event_id = soc->event_id; 2580 wmi_handle->event_handler = soc->event_handler; 2581 wmi_handle->ctx = soc->ctx; 2582 wmi_handle->wmi_events = soc->wmi_events; 2583 wmi_target_params_init(soc, wmi_handle); 2584 wmi_handle->scn_handle = scn_handle; 2585 soc->scn_handle = scn_handle; 2586 qdf_atomic_init(&wmi_handle->pending_cmds); 2587 qdf_atomic_init(&wmi_handle->is_target_suspended); 2588 wmi_runtime_pm_init(wmi_handle); 2589 qdf_spinlock_create(&wmi_handle->eventq_lock); 2590 qdf_nbuf_queue_init(&wmi_handle->event_queue); 2591 qdf_create_work(0, &wmi_handle->rx_event_work, 2592 wmi_rx_event_work, wmi_handle); 2593 wmi_handle->wmi_rx_work_queue = 2594 qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue"); 2595 if (NULL == wmi_handle->wmi_rx_work_queue) { 2596 WMI_LOGE("failed to create wmi_rx_event_work_queue"); 2597 goto error; 2598 } 2599 wmi_interface_logging_init(wmi_handle, WMI_HOST_PDEV_ID_0); 2600 /* Attach mc_thread context processing function */ 2601 wmi_handle->rx_ops.wma_process_fw_event_handler_cbk = 2602 param->rx_ops->wma_process_fw_event_handler_cbk; 2603 wmi_handle->target_type = param->target_type; 2604 soc->target_type = param->target_type; 2605 2606 if (param->target_type >= WMI_MAX_TARGET_TYPE) 2607 goto error; 2608 2609 if (wmi_attach_register[param->target_type]) { 2610 wmi_attach_register[param->target_type](wmi_handle); 2611 } else { 2612 WMI_LOGE("wmi attach is not registered"); 2613 goto error; 2614 } 2615 /* Assign target cookie capablity */ 2616 wmi_handle->use_cookie = param->use_cookie; 2617 wmi_handle->osdev = param->osdev; 2618 wmi_handle->wmi_stopinprogress = 0; 2619 wmi_handle->wmi_max_cmds = param->max_commands; 2620 soc->wmi_max_cmds = param->max_commands; 2621 /* Increase the ref count once refcount infra is present */ 2622 soc->wmi_psoc = param->psoc; 2623 qdf_spinlock_create(&soc->ctx_lock); 2624 2625 soc->ops = wmi_handle->ops; 2626 soc->wmi_pdev[0] = wmi_handle; 2627 if (wmi_ext_dbgfs_init(wmi_handle) != QDF_STATUS_SUCCESS) 2628 WMI_LOGE("failed to initialize wmi extended debugfs"); 2629 2630 wmi_wbuff_register(wmi_handle); 2631 2632 return wmi_handle; 2633 2634 error: 2635 qdf_mem_free(soc); 2636 qdf_mem_free(wmi_handle); 2637 2638 return NULL; 2639 } 2640 2641 /** 2642 * wmi_unified_detach() - detach for unified WMI 2643 * 2644 * @wmi_handle : handle to wmi. 2645 * 2646 * @Return: none. 2647 */ 2648 void wmi_unified_detach(struct wmi_unified *wmi_handle) 2649 { 2650 wmi_buf_t buf; 2651 struct wmi_soc *soc; 2652 uint8_t i; 2653 2654 wmi_wbuff_deregister(wmi_handle); 2655 2656 wmi_ext_dbgfs_deinit(wmi_handle); 2657 2658 soc = wmi_handle->soc; 2659 for (i = 0; i < WMI_MAX_RADIOS; i++) { 2660 if (soc->wmi_pdev[i]) { 2661 qdf_flush_workqueue(0, 2662 soc->wmi_pdev[i]->wmi_rx_work_queue); 2663 qdf_destroy_workqueue(0, 2664 soc->wmi_pdev[i]->wmi_rx_work_queue); 2665 wmi_debugfs_remove(soc->wmi_pdev[i]); 2666 buf = qdf_nbuf_queue_remove( 2667 &soc->wmi_pdev[i]->event_queue); 2668 while (buf) { 2669 qdf_nbuf_free(buf); 2670 buf = qdf_nbuf_queue_remove( 2671 &soc->wmi_pdev[i]->event_queue); 2672 } 2673 2674 wmi_log_buffer_free(soc->wmi_pdev[i]); 2675 2676 /* Free events logs list */ 2677 if (soc->wmi_pdev[i]->events_logs_list) 2678 qdf_mem_free( 2679 soc->wmi_pdev[i]->events_logs_list); 2680 2681 qdf_spinlock_destroy(&soc->wmi_pdev[i]->eventq_lock); 2682 qdf_mem_free(soc->wmi_pdev[i]); 2683 } 2684 } 2685 qdf_spinlock_destroy(&soc->ctx_lock); 2686 2687 if (soc->wmi_service_bitmap) { 2688 qdf_mem_free(soc->wmi_service_bitmap); 2689 soc->wmi_service_bitmap = NULL; 2690 } 2691 2692 if (soc->wmi_ext_service_bitmap) { 2693 qdf_mem_free(soc->wmi_ext_service_bitmap); 2694 soc->wmi_ext_service_bitmap = NULL; 2695 } 2696 2697 /* Decrease the ref count once refcount infra is present */ 2698 soc->wmi_psoc = NULL; 2699 qdf_mem_free(soc); 2700 } 2701 2702 /** 2703 * wmi_unified_remove_work() - detach for WMI work 2704 * @wmi_handle: handle to WMI 2705 * 2706 * A function that does not fully detach WMI, but just remove work 2707 * queue items associated with it. This is used to make sure that 2708 * before any other processing code that may destroy related contexts 2709 * (HTC, etc), work queue processing on WMI has already been stopped. 2710 * 2711 * Return: None 2712 */ 2713 void 2714 wmi_unified_remove_work(struct wmi_unified *wmi_handle) 2715 { 2716 wmi_buf_t buf; 2717 2718 qdf_flush_workqueue(0, wmi_handle->wmi_rx_work_queue); 2719 qdf_spin_lock_bh(&wmi_handle->eventq_lock); 2720 buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue); 2721 while (buf) { 2722 qdf_nbuf_free(buf); 2723 buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue); 2724 } 2725 qdf_spin_unlock_bh(&wmi_handle->eventq_lock); 2726 } 2727 2728 /** 2729 * wmi_htc_tx_complete() - Process htc tx completion 2730 * 2731 * @ctx: handle to wmi 2732 * @htc_packet: pointer to htc packet 2733 * 2734 * @Return: none. 2735 */ 2736 static void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt) 2737 { 2738 struct wmi_soc *soc = (struct wmi_soc *) ctx; 2739 wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt); 2740 u_int8_t *buf_ptr; 2741 u_int32_t len; 2742 struct wmi_unified *wmi_handle; 2743 #ifdef WMI_INTERFACE_EVENT_LOGGING 2744 uint32_t cmd_id; 2745 #endif 2746 2747 ASSERT(wmi_cmd_buf); 2748 wmi_handle = wmi_get_pdev_ep(soc, htc_pkt->Endpoint); 2749 if (wmi_handle == NULL) { 2750 WMI_LOGE("%s: Unable to get wmi handle\n", __func__); 2751 QDF_ASSERT(0); 2752 return; 2753 } 2754 #ifdef WMI_INTERFACE_EVENT_LOGGING 2755 if (wmi_handle && wmi_handle->log_info.wmi_logging_enable) { 2756 cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), 2757 WMI_CMD_HDR, COMMANDID); 2758 2759 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 2760 /* Record 16 bytes of WMI cmd tx complete data 2761 - exclude TLV and WMI headers */ 2762 if (wmi_handle->ops->is_management_record(cmd_id)) { 2763 WMI_MGMT_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id, 2764 qdf_nbuf_data(wmi_cmd_buf) + 2765 wmi_handle->soc->buf_offset_command); 2766 } else { 2767 WMI_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id, 2768 qdf_nbuf_data(wmi_cmd_buf) + 2769 wmi_handle->soc->buf_offset_command); 2770 } 2771 2772 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 2773 } 2774 #endif 2775 buf_ptr = (u_int8_t *) wmi_buf_data(wmi_cmd_buf); 2776 len = qdf_nbuf_len(wmi_cmd_buf); 2777 qdf_mem_zero(buf_ptr, len); 2778 wmi_buf_free(wmi_cmd_buf); 2779 qdf_mem_free(htc_pkt); 2780 qdf_atomic_dec(&wmi_handle->pending_cmds); 2781 } 2782 2783 /** 2784 * wmi_connect_pdev_htc_service() - WMI API to get connect to HTC service 2785 * 2786 * @wmi_handle: handle to WMI. 2787 * @pdev_idx: Pdev index 2788 * 2789 * @Return: status. 2790 */ 2791 static int wmi_connect_pdev_htc_service(struct wmi_soc *soc, 2792 uint32_t pdev_idx) 2793 { 2794 int status; 2795 struct htc_service_connect_resp response; 2796 struct htc_service_connect_req connect; 2797 2798 OS_MEMZERO(&connect, sizeof(connect)); 2799 OS_MEMZERO(&response, sizeof(response)); 2800 2801 /* meta data is unused for now */ 2802 connect.pMetaData = NULL; 2803 connect.MetaDataLength = 0; 2804 /* these fields are the same for all service endpoints */ 2805 connect.EpCallbacks.pContext = soc; 2806 connect.EpCallbacks.EpTxCompleteMultiple = 2807 NULL /* Control path completion ar6000_tx_complete */; 2808 connect.EpCallbacks.EpRecv = wmi_control_rx /* Control path rx */; 2809 connect.EpCallbacks.EpRecvRefill = NULL /* ar6000_rx_refill */; 2810 connect.EpCallbacks.EpSendFull = NULL /* ar6000_tx_queue_full */; 2811 connect.EpCallbacks.EpTxComplete = 2812 wmi_htc_tx_complete /* ar6000_tx_queue_full */; 2813 2814 /* connect to control service */ 2815 connect.service_id = soc->svc_ids[pdev_idx]; 2816 status = htc_connect_service(soc->htc_handle, &connect, 2817 &response); 2818 2819 2820 if (status != EOK) { 2821 WMI_LOGE 2822 ("Failed to connect to WMI CONTROL service status:%d\n", 2823 status); 2824 return status; 2825 } 2826 2827 if (soc->is_async_ep) 2828 htc_set_async_ep(soc->htc_handle, response.Endpoint, true); 2829 2830 soc->wmi_endpoint_id[pdev_idx] = response.Endpoint; 2831 soc->max_msg_len[pdev_idx] = response.MaxMsgLength; 2832 2833 return 0; 2834 } 2835 2836 /** 2837 * wmi_unified_connect_htc_service() - WMI API to get connect to HTC service 2838 * 2839 * @wmi_handle: handle to WMI. 2840 * 2841 * @Return: status. 2842 */ 2843 QDF_STATUS 2844 wmi_unified_connect_htc_service(struct wmi_unified *wmi_handle, 2845 void *htc_handle) 2846 { 2847 uint32_t i; 2848 uint8_t wmi_ep_count; 2849 2850 wmi_handle->soc->htc_handle = htc_handle; 2851 2852 wmi_ep_count = htc_get_wmi_endpoint_count(htc_handle); 2853 if (wmi_ep_count > WMI_MAX_RADIOS) 2854 return QDF_STATUS_E_FAULT; 2855 2856 for (i = 0; i < wmi_ep_count; i++) 2857 wmi_connect_pdev_htc_service(wmi_handle->soc, i); 2858 2859 wmi_handle->htc_handle = htc_handle; 2860 wmi_handle->wmi_endpoint_id = wmi_handle->soc->wmi_endpoint_id[0]; 2861 wmi_handle->max_msg_len = wmi_handle->soc->max_msg_len[0]; 2862 2863 return QDF_STATUS_SUCCESS; 2864 } 2865 2866 /** 2867 * wmi_get_host_credits() - WMI API to get updated host_credits 2868 * 2869 * @wmi_handle: handle to WMI. 2870 * 2871 * @Return: updated host_credits. 2872 */ 2873 int wmi_get_host_credits(wmi_unified_t wmi_handle) 2874 { 2875 int host_credits = 0; 2876 2877 htc_get_control_endpoint_tx_host_credits(wmi_handle->htc_handle, 2878 &host_credits); 2879 return host_credits; 2880 } 2881 2882 /** 2883 * wmi_get_pending_cmds() - WMI API to get WMI Pending Commands in the HTC 2884 * queue 2885 * 2886 * @wmi_handle: handle to WMI. 2887 * 2888 * @Return: Pending Commands in the HTC queue. 2889 */ 2890 int wmi_get_pending_cmds(wmi_unified_t wmi_handle) 2891 { 2892 return qdf_atomic_read(&wmi_handle->pending_cmds); 2893 } 2894 2895 /** 2896 * wmi_set_target_suspend() - WMI API to set target suspend state 2897 * 2898 * @wmi_handle: handle to WMI. 2899 * @val: suspend state boolean. 2900 * 2901 * @Return: none. 2902 */ 2903 void wmi_set_target_suspend(wmi_unified_t wmi_handle, A_BOOL val) 2904 { 2905 qdf_atomic_set(&wmi_handle->is_target_suspended, val); 2906 } 2907 2908 /** 2909 * WMI API to set crash injection state 2910 * @param wmi_handle: handle to WMI. 2911 * @param val: crash injection state boolean. 2912 */ 2913 void wmi_tag_crash_inject(wmi_unified_t wmi_handle, A_BOOL flag) 2914 { 2915 wmi_handle->tag_crash_inject = flag; 2916 } 2917 2918 /** 2919 * WMI API to set bus suspend state 2920 * @param wmi_handle: handle to WMI. 2921 * @param val: suspend state boolean. 2922 */ 2923 void wmi_set_is_wow_bus_suspended(wmi_unified_t wmi_handle, A_BOOL val) 2924 { 2925 qdf_atomic_set(&wmi_handle->is_wow_bus_suspended, val); 2926 } 2927 2928 void wmi_set_tgt_assert(wmi_unified_t wmi_handle, bool val) 2929 { 2930 wmi_handle->tgt_force_assert_enable = val; 2931 } 2932 2933 /** 2934 * wmi_stop() - generic function to block unified WMI command 2935 * @wmi_handle: handle to WMI. 2936 * 2937 * @Return: success always. 2938 */ 2939 int 2940 wmi_stop(wmi_unified_t wmi_handle) 2941 { 2942 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, 2943 "WMI Stop"); 2944 wmi_handle->wmi_stopinprogress = 1; 2945 return 0; 2946 } 2947 2948 #ifndef CONFIG_MCL 2949 /** 2950 * API to flush all the previous packets associated with the wmi endpoint 2951 * 2952 * @param wmi_handle : handle to WMI. 2953 */ 2954 void 2955 wmi_flush_endpoint(wmi_unified_t wmi_handle) 2956 { 2957 htc_flush_endpoint(wmi_handle->htc_handle, 2958 wmi_handle->wmi_endpoint_id, 0); 2959 } 2960 qdf_export_symbol(wmi_flush_endpoint); 2961 2962 /** 2963 * wmi_pdev_id_conversion_enable() - API to enable pdev_id conversion in WMI 2964 * By default pdev_id conversion is not done in WMI. 2965 * This API can be used enable conversion in WMI. 2966 * @param wmi_handle : handle to WMI 2967 * Return none 2968 */ 2969 void wmi_pdev_id_conversion_enable(wmi_unified_t wmi_handle) 2970 { 2971 if (wmi_handle->target_type == WMI_TLV_TARGET) 2972 wmi_handle->ops->wmi_pdev_id_conversion_enable(wmi_handle); 2973 } 2974 2975 #endif 2976