1 /* 2 * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * Host WMI unified implementation 21 */ 22 #include "htc_api.h" 23 #include "htc_api.h" 24 #include "wmi_unified_priv.h" 25 #include "wmi_unified_api.h" 26 #include "qdf_module.h" 27 #include "qdf_platform.h" 28 #ifdef WMI_EXT_DBG 29 #include "qdf_list.h" 30 #include "qdf_atomic.h" 31 #endif 32 33 #ifndef WMI_NON_TLV_SUPPORT 34 #include "wmi_tlv_helper.h" 35 #endif 36 37 #include <linux/debugfs.h> 38 #include <target_if.h> 39 #include <qdf_debugfs.h> 40 #include "wmi_filtered_logging.h" 41 #include <wmi_hang_event.h> 42 43 /* This check for CONFIG_WIN temporary added due to redeclaration compilation 44 error in MCL. Error is caused due to inclusion of wmi.h in wmi_unified_api.h 45 which gets included here through ol_if_athvar.h. Eventually it is expected that 46 wmi.h will be removed from wmi_unified_api.h after cleanup, which will need 47 WMI_CMD_HDR to be defined here. */ 48 /* Copied from wmi.h */ 49 #undef MS 50 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB) 51 #undef SM 52 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) 53 #undef WO 54 #define WO(_f) ((_f##_OFFSET) >> 2) 55 56 #undef GET_FIELD 57 #define GET_FIELD(_addr, _f) MS(*((uint32_t *)(_addr) + WO(_f)), _f) 58 #undef SET_FIELD 59 #define SET_FIELD(_addr, _f, _val) \ 60 (*((uint32_t *)(_addr) + WO(_f)) = \ 61 (*((uint32_t *)(_addr) + WO(_f)) & ~_f##_MASK) | SM(_val, _f)) 62 63 #define WMI_GET_FIELD(_msg_buf, _msg_type, _f) \ 64 GET_FIELD(_msg_buf, _msg_type ## _ ## _f) 65 66 #define WMI_SET_FIELD(_msg_buf, _msg_type, _f, _val) \ 67 SET_FIELD(_msg_buf, _msg_type ## _ ## _f, _val) 68 69 #define WMI_EP_APASS 0x0 70 #define WMI_EP_LPASS 0x1 71 #define WMI_EP_SENSOR 0x2 72 73 /* 74 * * Control Path 75 * */ 76 typedef PREPACK struct { 77 uint32_t commandId:24, 78 reserved:2, /* used for WMI endpoint ID */ 79 plt_priv:6; /* platform private */ 80 } POSTPACK WMI_CMD_HDR; /* used for commands and events */ 81 82 #define WMI_CMD_HDR_COMMANDID_LSB 0 83 #define WMI_CMD_HDR_COMMANDID_MASK 0x00ffffff 84 #define WMI_CMD_HDR_COMMANDID_OFFSET 0x00000000 85 #define WMI_CMD_HDR_WMI_ENDPOINTID_MASK 0x03000000 86 #define WMI_CMD_HDR_WMI_ENDPOINTID_OFFSET 24 87 #define WMI_CMD_HDR_PLT_PRIV_LSB 24 88 #define WMI_CMD_HDR_PLT_PRIV_MASK 0xff000000 89 #define WMI_CMD_HDR_PLT_PRIV_OFFSET 0x00000000 90 /* end of copy wmi.h */ 91 92 #define WMI_MIN_HEAD_ROOM 64 93 94 /* WBUFF pool sizes for WMI */ 95 /* Allocation of size 256 bytes */ 96 #define WMI_WBUFF_POOL_0_SIZE 128 97 /* Allocation of size 512 bytes */ 98 #define WMI_WBUFF_POOL_1_SIZE 16 99 /* Allocation of size 1024 bytes */ 100 #define WMI_WBUFF_POOL_2_SIZE 8 101 /* Allocation of size 2048 bytes */ 102 #define WMI_WBUFF_POOL_3_SIZE 8 103 104 #ifdef WMI_INTERFACE_EVENT_LOGGING 105 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)) 106 /* TODO Cleanup this backported function */ 107 static int wmi_bp_seq_printf(qdf_debugfs_file_t m, const char *f, ...) 108 { 109 va_list args; 110 111 va_start(args, f); 112 seq_vprintf(m, f, args); 113 va_end(args); 114 115 return 0; 116 } 117 #else 118 #define wmi_bp_seq_printf(m, fmt, ...) seq_printf((m), fmt, ##__VA_ARGS__) 119 #endif 120 121 #ifndef MAX_WMI_INSTANCES 122 #define CUSTOM_MGMT_CMD_DATA_SIZE 4 123 #endif 124 125 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC 126 /* WMI commands */ 127 uint32_t g_wmi_command_buf_idx = 0; 128 struct wmi_command_debug wmi_command_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; 129 130 /* WMI commands TX completed */ 131 uint32_t g_wmi_command_tx_cmp_buf_idx = 0; 132 struct wmi_command_debug 133 wmi_command_tx_cmp_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; 134 135 /* WMI events when processed */ 136 uint32_t g_wmi_event_buf_idx = 0; 137 struct wmi_event_debug wmi_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; 138 139 /* WMI events when queued */ 140 uint32_t g_wmi_rx_event_buf_idx = 0; 141 struct wmi_event_debug wmi_rx_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; 142 #endif 143 144 #define WMI_COMMAND_RECORD(h, a, b) { \ 145 if (wmi_log_max_entry <= \ 146 *(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)) \ 147 *(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx) = 0;\ 148 ((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\ 149 [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)]\ 150 .command = a; \ 151 qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ 152 wmi_command_log_buf_info.buf) \ 153 [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].data,\ 154 b, wmi_record_max_length); \ 155 ((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\ 156 [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].\ 157 time = qdf_get_log_timestamp(); \ 158 (*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))++; \ 159 h->log_info.wmi_command_log_buf_info.length++; \ 160 } 161 162 #define WMI_COMMAND_TX_CMP_RECORD(h, a, b) { \ 163 if (wmi_log_max_entry <= \ 164 *(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))\ 165 *(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 166 p_buf_tail_idx) = 0; \ 167 ((struct wmi_command_debug *)h->log_info. \ 168 wmi_command_tx_cmp_log_buf_info.buf) \ 169 [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 170 p_buf_tail_idx)]. \ 171 command = a; \ 172 qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ 173 wmi_command_tx_cmp_log_buf_info.buf) \ 174 [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 175 p_buf_tail_idx)]. \ 176 data, b, wmi_record_max_length); \ 177 ((struct wmi_command_debug *)h->log_info. \ 178 wmi_command_tx_cmp_log_buf_info.buf) \ 179 [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 180 p_buf_tail_idx)]. \ 181 time = qdf_get_log_timestamp(); \ 182 (*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))++;\ 183 h->log_info.wmi_command_tx_cmp_log_buf_info.length++; \ 184 } 185 186 #define WMI_EVENT_RECORD(h, a, b) { \ 187 if (wmi_log_max_entry <= \ 188 *(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)) \ 189 *(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx) = 0;\ 190 ((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\ 191 [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)]. \ 192 event = a; \ 193 qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ 194 wmi_event_log_buf_info.buf) \ 195 [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].data, b,\ 196 wmi_record_max_length); \ 197 ((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\ 198 [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].time =\ 199 qdf_get_log_timestamp(); \ 200 (*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))++; \ 201 h->log_info.wmi_event_log_buf_info.length++; \ 202 } 203 204 #define WMI_RX_EVENT_RECORD(h, a, b) { \ 205 if (wmi_log_max_entry <= \ 206 *(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))\ 207 *(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx) = 0;\ 208 ((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\ 209 [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ 210 event = a; \ 211 qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ 212 wmi_rx_event_log_buf_info.buf) \ 213 [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ 214 data, b, wmi_record_max_length); \ 215 ((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\ 216 [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ 217 time = qdf_get_log_timestamp(); \ 218 (*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))++; \ 219 h->log_info.wmi_rx_event_log_buf_info.length++; \ 220 } 221 222 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC 223 uint32_t g_wmi_mgmt_command_buf_idx = 0; 224 struct 225 wmi_command_debug wmi_mgmt_command_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY]; 226 227 /* wmi_mgmt commands TX completed */ 228 uint32_t g_wmi_mgmt_command_tx_cmp_buf_idx = 0; 229 struct wmi_command_debug 230 wmi_mgmt_command_tx_cmp_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY]; 231 232 /* wmi_mgmt events when received */ 233 uint32_t g_wmi_mgmt_rx_event_buf_idx = 0; 234 struct wmi_event_debug 235 wmi_mgmt_rx_event_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY]; 236 237 /* wmi_diag events when received */ 238 uint32_t g_wmi_diag_rx_event_buf_idx = 0; 239 struct wmi_event_debug 240 wmi_diag_rx_event_log_buffer[WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY]; 241 #endif 242 243 #define WMI_MGMT_COMMAND_RECORD(h, a, b) { \ 244 if (wmi_mgmt_log_max_entry <= \ 245 *(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)) \ 246 *(h->log_info.wmi_mgmt_command_log_buf_info. \ 247 p_buf_tail_idx) = 0; \ 248 ((struct wmi_command_debug *)h->log_info. \ 249 wmi_mgmt_command_log_buf_info.buf) \ 250 [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ 251 command = a; \ 252 qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ 253 wmi_mgmt_command_log_buf_info.buf) \ 254 [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ 255 data, b, \ 256 wmi_record_max_length); \ 257 ((struct wmi_command_debug *)h->log_info. \ 258 wmi_mgmt_command_log_buf_info.buf) \ 259 [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ 260 time = qdf_get_log_timestamp(); \ 261 (*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx))++;\ 262 h->log_info.wmi_mgmt_command_log_buf_info.length++; \ 263 } 264 265 #define WMI_MGMT_COMMAND_TX_CMP_RECORD(h, a, b) { \ 266 if (wmi_mgmt_log_max_entry <= \ 267 *(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 268 p_buf_tail_idx)) \ 269 *(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 270 p_buf_tail_idx) = 0; \ 271 ((struct wmi_command_debug *)h->log_info. \ 272 wmi_mgmt_command_tx_cmp_log_buf_info.buf) \ 273 [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 274 p_buf_tail_idx)].command = a; \ 275 qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ 276 wmi_mgmt_command_tx_cmp_log_buf_info.buf)\ 277 [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 278 p_buf_tail_idx)].data, b, \ 279 wmi_record_max_length); \ 280 ((struct wmi_command_debug *)h->log_info. \ 281 wmi_mgmt_command_tx_cmp_log_buf_info.buf) \ 282 [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 283 p_buf_tail_idx)].time = \ 284 qdf_get_log_timestamp(); \ 285 (*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 286 p_buf_tail_idx))++; \ 287 h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.length++; \ 288 } 289 290 #define WMI_MGMT_RX_EVENT_RECORD(h, a, b) do { \ 291 if (wmi_mgmt_log_max_entry <= \ 292 *(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))\ 293 *(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx) = 0;\ 294 ((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\ 295 [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)]\ 296 .event = a; \ 297 qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ 298 wmi_mgmt_event_log_buf_info.buf) \ 299 [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\ 300 data, b, wmi_record_max_length); \ 301 ((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\ 302 [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\ 303 time = qdf_get_log_timestamp(); \ 304 (*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))++; \ 305 h->log_info.wmi_mgmt_event_log_buf_info.length++; \ 306 } while (0); 307 308 #define WMI_DIAG_RX_EVENT_RECORD(h, a, b) do { \ 309 if (wmi_diag_log_max_entry <= \ 310 *(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))\ 311 *(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx) = 0;\ 312 ((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\ 313 [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)]\ 314 .event = a; \ 315 qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ 316 wmi_diag_event_log_buf_info.buf) \ 317 [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\ 318 data, b, wmi_record_max_length); \ 319 ((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\ 320 [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\ 321 time = qdf_get_log_timestamp(); \ 322 (*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))++; \ 323 h->log_info.wmi_diag_event_log_buf_info.length++; \ 324 } while (0); 325 326 /* These are defined to made it as module param, which can be configured */ 327 uint32_t wmi_log_max_entry = WMI_EVENT_DEBUG_MAX_ENTRY; 328 uint32_t wmi_mgmt_log_max_entry = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; 329 uint32_t wmi_diag_log_max_entry = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY; 330 uint32_t wmi_record_max_length = WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH; 331 uint32_t wmi_display_size = 100; 332 333 #ifdef WMI_EXT_DBG 334 335 /** 336 * wmi_ext_dbg_msg_enqueue() - enqueue wmi message 337 * 338 * @wmi_handle: wmi handler 339 * 340 * Return: size of wmi message queue after enqueue 341 */ 342 static uint32_t wmi_ext_dbg_msg_enqueue(struct wmi_unified *wmi_handle, 343 struct wmi_ext_dbg_msg *msg) 344 { 345 uint32_t list_size; 346 347 qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 348 qdf_list_insert_back_size(&wmi_handle->wmi_ext_dbg_msg_queue, 349 &msg->node, &list_size); 350 qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 351 352 return list_size; 353 } 354 355 /** 356 * wmi_ext_dbg_msg_dequeue() - dequeue wmi message 357 * 358 * @wmi_handle: wmi handler 359 * 360 * Return: wmi msg on success else NULL 361 */ 362 static struct wmi_ext_dbg_msg *wmi_ext_dbg_msg_dequeue(struct wmi_unified 363 *wmi_handle) 364 { 365 qdf_list_node_t *list_node = NULL; 366 367 qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 368 qdf_list_remove_front(&wmi_handle->wmi_ext_dbg_msg_queue, &list_node); 369 qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 370 371 if (!list_node) 372 return NULL; 373 374 return qdf_container_of(list_node, struct wmi_ext_dbg_msg, node); 375 } 376 377 /** 378 * wmi_ext_dbg_msg_record() - record wmi messages 379 * 380 * @wmi_handle: wmi handler 381 * @buf: wmi message buffer 382 * @len: wmi message length 383 * @type: wmi message type 384 * 385 * Return: QDF_STATUS_SUCCESS on successful recording else failure. 386 */ 387 static QDF_STATUS wmi_ext_dbg_msg_record(struct wmi_unified *wmi_handle, 388 uint8_t *buf, uint32_t len, 389 enum WMI_MSG_TYPE type) 390 { 391 struct wmi_ext_dbg_msg *msg; 392 uint32_t list_size; 393 394 msg = wmi_ext_dbg_msg_get(len); 395 if (!msg) 396 return QDF_STATUS_E_NOMEM; 397 398 msg->len = len; 399 msg->type = type; 400 qdf_mem_copy(msg->buf, buf, len); 401 msg->ts = qdf_get_log_timestamp(); 402 list_size = wmi_ext_dbg_msg_enqueue(wmi_handle, msg); 403 404 if (list_size >= wmi_handle->wmi_ext_dbg_msg_queue_size) { 405 msg = wmi_ext_dbg_msg_dequeue(wmi_handle); 406 wmi_ext_dbg_msg_put(msg); 407 } 408 409 return QDF_STATUS_SUCCESS; 410 } 411 412 /** 413 * wmi_ext_dbg_msg_cmd_record() - record wmi command messages 414 * 415 * @wmi_handle: wmi handler 416 * @buf: wmi command buffer 417 * @len: wmi command message length 418 * 419 * Return: QDF_STATUS_SUCCESS on successful recording else failure. 420 */ 421 static QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified *wmi_handle, 422 uint8_t *buf, uint32_t len) 423 { 424 return wmi_ext_dbg_msg_record(wmi_handle, buf, len, 425 WMI_MSG_TYPE_CMD); 426 } 427 428 /** 429 * wmi_ext_dbg_msg_event_record() - record wmi event messages 430 * 431 * @wmi_handle: wmi handler 432 * @buf: wmi event buffer 433 * @len: wmi event message length 434 * 435 * Return: QDF_STATUS_SUCCESS on successful recording else failure. 436 */ 437 static QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified *wmi_handle, 438 uint8_t *buf, uint32_t len) 439 { 440 uint32_t id; 441 442 id = WMI_GET_FIELD(buf, WMI_CMD_HDR, COMMANDID); 443 if (id != wmi_handle->wmi_events[wmi_diag_event_id]) 444 return wmi_ext_dbg_msg_record(wmi_handle, buf, len, 445 WMI_MSG_TYPE_EVENT); 446 447 return QDF_STATUS_SUCCESS; 448 } 449 450 /** 451 * wmi_ext_dbg_msg_queue_init() - create debugfs queue and associated lock 452 * 453 * @wmi_handle: wmi handler 454 * 455 * Return: none 456 */ 457 static void wmi_ext_dbg_msg_queue_init(struct wmi_unified *wmi_handle) 458 { 459 qdf_list_create(&wmi_handle->wmi_ext_dbg_msg_queue, 460 wmi_handle->wmi_ext_dbg_msg_queue_size); 461 qdf_spinlock_create(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 462 } 463 464 /** 465 * wmi_ext_dbg_msg_queue_deinit() - destroy debugfs queue and associated lock 466 * 467 * @wmi_handle: wmi handler 468 * 469 * Return: none 470 */ 471 static void wmi_ext_dbg_msg_queue_deinit(struct wmi_unified *wmi_handle) 472 { 473 qdf_list_destroy(&wmi_handle->wmi_ext_dbg_msg_queue); 474 qdf_spinlock_destroy(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 475 } 476 477 /** 478 * wmi_ext_dbg_msg_show() - debugfs function to display whole content of 479 * wmi command/event messages including headers. 480 * 481 * @file: qdf debugfs file handler 482 * @arg: pointer to wmi handler 483 * 484 * Return: QDF_STATUS_SUCCESS if all the messages are shown successfully, 485 * else QDF_STATUS_E_AGAIN if more data to show. 486 */ 487 static QDF_STATUS wmi_ext_dbg_msg_show(qdf_debugfs_file_t file, void *arg) 488 { 489 struct wmi_unified *wmi_handle = (struct wmi_unified *)arg; 490 struct wmi_ext_dbg_msg *msg; 491 uint64_t secs, usecs; 492 493 msg = wmi_ext_dbg_msg_dequeue(wmi_handle); 494 if (!msg) 495 return QDF_STATUS_SUCCESS; 496 497 qdf_debugfs_printf(file, "%s: 0x%x\n", 498 msg->type == WMI_MSG_TYPE_CMD ? "COMMAND" : 499 "EVENT", WMI_GET_FIELD(msg->buf, WMI_CMD_HDR, 500 COMMANDID)); 501 qdf_log_timestamp_to_secs(msg->ts, &secs, &usecs); 502 qdf_debugfs_printf(file, "Time: %llu.%llu\n", secs, usecs); 503 qdf_debugfs_printf(file, "Length:%d\n", msg->len); 504 qdf_debugfs_hexdump(file, msg->buf, msg->len, 505 WMI_EXT_DBG_DUMP_ROW_SIZE, 506 WMI_EXT_DBG_DUMP_GROUP_SIZE); 507 qdf_debugfs_printf(file, "\n"); 508 509 if (qdf_debugfs_overflow(file)) { 510 qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 511 qdf_list_insert_front(&wmi_handle->wmi_ext_dbg_msg_queue, 512 &msg->node); 513 qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock); 514 515 } else { 516 wmi_ext_dbg_msg_put(msg); 517 } 518 519 return QDF_STATUS_E_AGAIN; 520 } 521 522 /** 523 * wmi_ext_dbg_msg_write() - debugfs write not supported 524 * 525 * @priv: private data 526 * @buf: received data buffer 527 * @len: length of received buffer 528 * 529 * Return: QDF_STATUS_E_NOSUPPORT. 530 */ 531 static QDF_STATUS wmi_ext_dbg_msg_write(void *priv, const char *buf, 532 qdf_size_t len) 533 { 534 return QDF_STATUS_E_NOSUPPORT; 535 } 536 537 static struct qdf_debugfs_fops wmi_ext_dbgfs_ops = { 538 .show = wmi_ext_dbg_msg_show, 539 .write = wmi_ext_dbg_msg_write, 540 .priv = NULL, 541 }; 542 543 /** 544 * wmi_ext_debugfs_init() - init debugfs items for extended wmi dump. 545 * 546 * @wmi_handle: wmi handler 547 * 548 * Return: QDF_STATUS_SUCCESS if debugfs is initialized else 549 * QDF_STATUS_E_FAILURE 550 */ 551 static QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle) 552 { 553 qdf_dentry_t dentry; 554 555 dentry = qdf_debugfs_create_dir(WMI_EXT_DBG_DIR, NULL); 556 if (!dentry) { 557 WMI_LOGE("error while creating extended wmi debugfs dir"); 558 return QDF_STATUS_E_FAILURE; 559 } 560 561 wmi_ext_dbgfs_ops.priv = wmi_handle; 562 if (!qdf_debugfs_create_file(WMI_EXT_DBG_FILE, WMI_EXT_DBG_FILE_PERM, 563 dentry, &wmi_ext_dbgfs_ops)) { 564 qdf_debugfs_remove_dir(dentry); 565 WMI_LOGE("error while creating extended wmi debugfs file"); 566 return QDF_STATUS_E_FAILURE; 567 } 568 569 wmi_handle->wmi_ext_dbg_dentry = dentry; 570 wmi_handle->wmi_ext_dbg_msg_queue_size = WMI_EXT_DBG_QUEUE_SIZE; 571 wmi_ext_dbg_msg_queue_init(wmi_handle); 572 573 return QDF_STATUS_SUCCESS; 574 } 575 576 /** 577 * wmi_ext_debugfs_deinit() - cleanup/deinit debugfs items of extended wmi dump. 578 * 579 * @wmi_handle: wmi handler 580 * 581 * Return: QDF_STATUS_SUCCESS if cleanup is successful 582 */ 583 static QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle) 584 { 585 struct wmi_ext_dbg_msg *msg; 586 587 while ((msg = wmi_ext_dbg_msg_dequeue(wmi_handle))) 588 wmi_ext_dbg_msg_put(msg); 589 590 wmi_ext_dbg_msg_queue_deinit(wmi_handle); 591 qdf_debugfs_remove_dir_recursive(wmi_handle->wmi_ext_dbg_dentry); 592 593 return QDF_STATUS_SUCCESS; 594 } 595 596 #endif /*WMI_EXT_DBG */ 597 598 /** 599 * wmi_log_init() - Initialize WMI event logging 600 * @wmi_handle: WMI handle. 601 * 602 * Return: Initialization status 603 */ 604 #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC 605 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle) 606 { 607 struct wmi_log_buf_t *cmd_log_buf = 608 &wmi_handle->log_info.wmi_command_log_buf_info; 609 struct wmi_log_buf_t *cmd_tx_cmpl_log_buf = 610 &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info; 611 612 struct wmi_log_buf_t *event_log_buf = 613 &wmi_handle->log_info.wmi_event_log_buf_info; 614 struct wmi_log_buf_t *rx_event_log_buf = 615 &wmi_handle->log_info.wmi_rx_event_log_buf_info; 616 617 struct wmi_log_buf_t *mgmt_cmd_log_buf = 618 &wmi_handle->log_info.wmi_mgmt_command_log_buf_info; 619 struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf = 620 &wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info; 621 struct wmi_log_buf_t *mgmt_event_log_buf = 622 &wmi_handle->log_info.wmi_mgmt_event_log_buf_info; 623 struct wmi_log_buf_t *diag_event_log_buf = 624 &wmi_handle->log_info.wmi_diag_event_log_buf_info; 625 626 /* WMI commands */ 627 cmd_log_buf->length = 0; 628 cmd_log_buf->buf_tail_idx = 0; 629 cmd_log_buf->buf = wmi_command_log_buffer; 630 cmd_log_buf->p_buf_tail_idx = &g_wmi_command_buf_idx; 631 cmd_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; 632 633 /* WMI commands TX completed */ 634 cmd_tx_cmpl_log_buf->length = 0; 635 cmd_tx_cmpl_log_buf->buf_tail_idx = 0; 636 cmd_tx_cmpl_log_buf->buf = wmi_command_tx_cmp_log_buffer; 637 cmd_tx_cmpl_log_buf->p_buf_tail_idx = &g_wmi_command_tx_cmp_buf_idx; 638 cmd_tx_cmpl_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; 639 640 /* WMI events when processed */ 641 event_log_buf->length = 0; 642 event_log_buf->buf_tail_idx = 0; 643 event_log_buf->buf = wmi_event_log_buffer; 644 event_log_buf->p_buf_tail_idx = &g_wmi_event_buf_idx; 645 event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; 646 647 /* WMI events when queued */ 648 rx_event_log_buf->length = 0; 649 rx_event_log_buf->buf_tail_idx = 0; 650 rx_event_log_buf->buf = wmi_rx_event_log_buffer; 651 rx_event_log_buf->p_buf_tail_idx = &g_wmi_rx_event_buf_idx; 652 rx_event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; 653 654 /* WMI Management commands */ 655 mgmt_cmd_log_buf->length = 0; 656 mgmt_cmd_log_buf->buf_tail_idx = 0; 657 mgmt_cmd_log_buf->buf = wmi_mgmt_command_log_buffer; 658 mgmt_cmd_log_buf->p_buf_tail_idx = &g_wmi_mgmt_command_buf_idx; 659 mgmt_cmd_log_buf->size = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; 660 661 /* WMI Management commands Tx completed*/ 662 mgmt_cmd_tx_cmp_log_buf->length = 0; 663 mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0; 664 mgmt_cmd_tx_cmp_log_buf->buf = wmi_mgmt_command_tx_cmp_log_buffer; 665 mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx = 666 &g_wmi_mgmt_command_tx_cmp_buf_idx; 667 mgmt_cmd_tx_cmp_log_buf->size = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; 668 669 /* WMI Management events when received */ 670 mgmt_event_log_buf->length = 0; 671 mgmt_event_log_buf->buf_tail_idx = 0; 672 mgmt_event_log_buf->buf = wmi_mgmt_rx_event_log_buffer; 673 mgmt_event_log_buf->p_buf_tail_idx = &g_wmi_mgmt_rx_event_buf_idx; 674 mgmt_event_log_buf->size = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; 675 676 /* WMI diag events when received */ 677 diag_event_log_buf->length = 0; 678 diag_event_log_buf->buf_tail_idx = 0; 679 diag_event_log_buf->buf = wmi_diag_rx_event_log_buffer; 680 diag_event_log_buf->p_buf_tail_idx = &g_wmi_diag_rx_event_buf_idx; 681 diag_event_log_buf->size = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY; 682 683 qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); 684 wmi_handle->log_info.wmi_logging_enable = 1; 685 686 return QDF_STATUS_SUCCESS; 687 } 688 #else 689 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle) 690 { 691 struct wmi_log_buf_t *cmd_log_buf = 692 &wmi_handle->log_info.wmi_command_log_buf_info; 693 struct wmi_log_buf_t *cmd_tx_cmpl_log_buf = 694 &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info; 695 696 struct wmi_log_buf_t *event_log_buf = 697 &wmi_handle->log_info.wmi_event_log_buf_info; 698 struct wmi_log_buf_t *rx_event_log_buf = 699 &wmi_handle->log_info.wmi_rx_event_log_buf_info; 700 701 struct wmi_log_buf_t *mgmt_cmd_log_buf = 702 &wmi_handle->log_info.wmi_mgmt_command_log_buf_info; 703 struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf = 704 &wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info; 705 struct wmi_log_buf_t *mgmt_event_log_buf = 706 &wmi_handle->log_info.wmi_mgmt_event_log_buf_info; 707 struct wmi_log_buf_t *diag_event_log_buf = 708 &wmi_handle->log_info.wmi_diag_event_log_buf_info; 709 710 wmi_handle->log_info.wmi_logging_enable = 0; 711 712 /* WMI commands */ 713 cmd_log_buf->length = 0; 714 cmd_log_buf->buf_tail_idx = 0; 715 cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc( 716 wmi_log_max_entry * sizeof(struct wmi_command_debug)); 717 cmd_log_buf->size = wmi_log_max_entry; 718 719 if (!cmd_log_buf->buf) 720 return QDF_STATUS_E_NOMEM; 721 722 cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx; 723 724 /* WMI commands TX completed */ 725 cmd_tx_cmpl_log_buf->length = 0; 726 cmd_tx_cmpl_log_buf->buf_tail_idx = 0; 727 cmd_tx_cmpl_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc( 728 wmi_log_max_entry * sizeof(struct wmi_command_debug)); 729 cmd_tx_cmpl_log_buf->size = wmi_log_max_entry; 730 731 if (!cmd_tx_cmpl_log_buf->buf) 732 return QDF_STATUS_E_NOMEM; 733 734 cmd_tx_cmpl_log_buf->p_buf_tail_idx = 735 &cmd_tx_cmpl_log_buf->buf_tail_idx; 736 737 /* WMI events when processed */ 738 event_log_buf->length = 0; 739 event_log_buf->buf_tail_idx = 0; 740 event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( 741 wmi_log_max_entry * sizeof(struct wmi_event_debug)); 742 event_log_buf->size = wmi_log_max_entry; 743 744 if (!event_log_buf->buf) 745 return QDF_STATUS_E_NOMEM; 746 747 event_log_buf->p_buf_tail_idx = &event_log_buf->buf_tail_idx; 748 749 /* WMI events when queued */ 750 rx_event_log_buf->length = 0; 751 rx_event_log_buf->buf_tail_idx = 0; 752 rx_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( 753 wmi_log_max_entry * sizeof(struct wmi_event_debug)); 754 rx_event_log_buf->size = wmi_log_max_entry; 755 756 if (!rx_event_log_buf->buf) 757 return QDF_STATUS_E_NOMEM; 758 759 rx_event_log_buf->p_buf_tail_idx = &rx_event_log_buf->buf_tail_idx; 760 761 /* WMI Management commands */ 762 mgmt_cmd_log_buf->length = 0; 763 mgmt_cmd_log_buf->buf_tail_idx = 0; 764 mgmt_cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc( 765 wmi_mgmt_log_max_entry * sizeof(struct wmi_command_debug)); 766 mgmt_cmd_log_buf->size = wmi_mgmt_log_max_entry; 767 768 if (!mgmt_cmd_log_buf->buf) 769 return QDF_STATUS_E_NOMEM; 770 771 mgmt_cmd_log_buf->p_buf_tail_idx = &mgmt_cmd_log_buf->buf_tail_idx; 772 773 /* WMI Management commands Tx completed*/ 774 mgmt_cmd_tx_cmp_log_buf->length = 0; 775 mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0; 776 mgmt_cmd_tx_cmp_log_buf->buf = (struct wmi_command_debug *) 777 qdf_mem_malloc( 778 wmi_mgmt_log_max_entry * 779 sizeof(struct wmi_command_debug)); 780 mgmt_cmd_tx_cmp_log_buf->size = wmi_mgmt_log_max_entry; 781 782 if (!mgmt_cmd_tx_cmp_log_buf->buf) 783 return QDF_STATUS_E_NOMEM; 784 785 mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx = 786 &mgmt_cmd_tx_cmp_log_buf->buf_tail_idx; 787 788 /* WMI Management events when received */ 789 mgmt_event_log_buf->length = 0; 790 mgmt_event_log_buf->buf_tail_idx = 0; 791 792 mgmt_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( 793 wmi_mgmt_log_max_entry * 794 sizeof(struct wmi_event_debug)); 795 mgmt_event_log_buf->size = wmi_mgmt_log_max_entry; 796 797 if (!mgmt_event_log_buf->buf) 798 return QDF_STATUS_E_NOMEM; 799 800 mgmt_event_log_buf->p_buf_tail_idx = &mgmt_event_log_buf->buf_tail_idx; 801 802 /* WMI diag events when received */ 803 diag_event_log_buf->length = 0; 804 diag_event_log_buf->buf_tail_idx = 0; 805 806 diag_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( 807 wmi_diag_log_max_entry * 808 sizeof(struct wmi_event_debug)); 809 diag_event_log_buf->size = wmi_diag_log_max_entry; 810 811 if (!diag_event_log_buf->buf) 812 return QDF_STATUS_E_NOMEM; 813 814 diag_event_log_buf->p_buf_tail_idx = &diag_event_log_buf->buf_tail_idx; 815 816 qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); 817 wmi_handle->log_info.wmi_logging_enable = 1; 818 819 wmi_filtered_logging_init(wmi_handle); 820 821 return QDF_STATUS_SUCCESS; 822 } 823 #endif 824 825 /** 826 * wmi_log_buffer_free() - Free all dynamic allocated buffer memory for 827 * event logging 828 * @wmi_handle: WMI handle. 829 * 830 * Return: None 831 */ 832 #ifdef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC 833 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) 834 { 835 wmi_filtered_logging_free(wmi_handle); 836 837 if (wmi_handle->log_info.wmi_command_log_buf_info.buf) 838 qdf_mem_free(wmi_handle->log_info.wmi_command_log_buf_info.buf); 839 if (wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf) 840 qdf_mem_free( 841 wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf); 842 if (wmi_handle->log_info.wmi_event_log_buf_info.buf) 843 qdf_mem_free(wmi_handle->log_info.wmi_event_log_buf_info.buf); 844 if (wmi_handle->log_info.wmi_rx_event_log_buf_info.buf) 845 qdf_mem_free( 846 wmi_handle->log_info.wmi_rx_event_log_buf_info.buf); 847 if (wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf) 848 qdf_mem_free( 849 wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf); 850 if (wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf) 851 qdf_mem_free( 852 wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf); 853 if (wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf) 854 qdf_mem_free( 855 wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf); 856 if (wmi_handle->log_info.wmi_diag_event_log_buf_info.buf) 857 qdf_mem_free( 858 wmi_handle->log_info.wmi_diag_event_log_buf_info.buf); 859 wmi_handle->log_info.wmi_logging_enable = 0; 860 861 qdf_spinlock_destroy(&wmi_handle->log_info.wmi_record_lock); 862 } 863 #else 864 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) 865 { 866 /* Do Nothing */ 867 } 868 #endif 869 870 /** 871 * wmi_print_cmd_log_buffer() - an output agnostic wmi command log printer 872 * @log_buffer: the command log buffer metadata of the buffer to print 873 * @count: the maximum number of entries to print 874 * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper 875 * @print_priv: any data required by the print method, e.g. a file handle 876 * 877 * Return: None 878 */ 879 static void 880 wmi_print_cmd_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count, 881 qdf_abstract_print *print, void *print_priv) 882 { 883 static const int data_len = 884 WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t); 885 char str[128]; 886 uint32_t idx; 887 888 if (count > log_buffer->size) 889 count = log_buffer->size; 890 if (count > log_buffer->length) 891 count = log_buffer->length; 892 893 /* subtract count from index, and wrap if necessary */ 894 idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count; 895 idx %= log_buffer->size; 896 897 print(print_priv, "Time (seconds) Cmd Id Payload"); 898 while (count) { 899 struct wmi_command_debug *cmd_log = (struct wmi_command_debug *) 900 &((struct wmi_command_debug *)log_buffer->buf)[idx]; 901 uint64_t secs, usecs; 902 int len = 0; 903 int i; 904 905 qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs); 906 len += scnprintf(str + len, sizeof(str) - len, 907 "% 8lld.%06lld %6u (0x%06x) ", 908 secs, usecs, 909 cmd_log->command, cmd_log->command); 910 for (i = 0; i < data_len; ++i) { 911 len += scnprintf(str + len, sizeof(str) - len, 912 "0x%08x ", cmd_log->data[i]); 913 } 914 915 print(print_priv, str); 916 917 --count; 918 ++idx; 919 if (idx >= log_buffer->size) 920 idx = 0; 921 } 922 } 923 924 /** 925 * wmi_print_event_log_buffer() - an output agnostic wmi event log printer 926 * @log_buffer: the event log buffer metadata of the buffer to print 927 * @count: the maximum number of entries to print 928 * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper 929 * @print_priv: any data required by the print method, e.g. a file handle 930 * 931 * Return: None 932 */ 933 static void 934 wmi_print_event_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count, 935 qdf_abstract_print *print, void *print_priv) 936 { 937 static const int data_len = 938 WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t); 939 char str[128]; 940 uint32_t idx; 941 942 if (count > log_buffer->size) 943 count = log_buffer->size; 944 if (count > log_buffer->length) 945 count = log_buffer->length; 946 947 /* subtract count from index, and wrap if necessary */ 948 idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count; 949 idx %= log_buffer->size; 950 951 print(print_priv, "Time (seconds) Event Id Payload"); 952 while (count) { 953 struct wmi_event_debug *event_log = (struct wmi_event_debug *) 954 &((struct wmi_event_debug *)log_buffer->buf)[idx]; 955 uint64_t secs, usecs; 956 int len = 0; 957 int i; 958 959 qdf_log_timestamp_to_secs(event_log->time, &secs, &usecs); 960 len += scnprintf(str + len, sizeof(str) - len, 961 "% 8lld.%06lld %6u (0x%06x) ", 962 secs, usecs, 963 event_log->event, event_log->event); 964 for (i = 0; i < data_len; ++i) { 965 len += scnprintf(str + len, sizeof(str) - len, 966 "0x%08x ", event_log->data[i]); 967 } 968 969 print(print_priv, str); 970 971 --count; 972 ++idx; 973 if (idx >= log_buffer->size) 974 idx = 0; 975 } 976 } 977 978 inline void 979 wmi_print_cmd_log(wmi_unified_t wmi, uint32_t count, 980 qdf_abstract_print *print, void *print_priv) 981 { 982 wmi_print_cmd_log_buffer( 983 &wmi->log_info.wmi_command_log_buf_info, 984 count, print, print_priv); 985 } 986 987 inline void 988 wmi_print_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count, 989 qdf_abstract_print *print, void *print_priv) 990 { 991 wmi_print_cmd_log_buffer( 992 &wmi->log_info.wmi_command_tx_cmp_log_buf_info, 993 count, print, print_priv); 994 } 995 996 inline void 997 wmi_print_mgmt_cmd_log(wmi_unified_t wmi, uint32_t count, 998 qdf_abstract_print *print, void *print_priv) 999 { 1000 wmi_print_cmd_log_buffer( 1001 &wmi->log_info.wmi_mgmt_command_log_buf_info, 1002 count, print, print_priv); 1003 } 1004 1005 inline void 1006 wmi_print_mgmt_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count, 1007 qdf_abstract_print *print, void *print_priv) 1008 { 1009 wmi_print_cmd_log_buffer( 1010 &wmi->log_info.wmi_mgmt_command_tx_cmp_log_buf_info, 1011 count, print, print_priv); 1012 } 1013 1014 inline void 1015 wmi_print_event_log(wmi_unified_t wmi, uint32_t count, 1016 qdf_abstract_print *print, void *print_priv) 1017 { 1018 wmi_print_event_log_buffer( 1019 &wmi->log_info.wmi_event_log_buf_info, 1020 count, print, print_priv); 1021 } 1022 1023 inline void 1024 wmi_print_rx_event_log(wmi_unified_t wmi, uint32_t count, 1025 qdf_abstract_print *print, void *print_priv) 1026 { 1027 wmi_print_event_log_buffer( 1028 &wmi->log_info.wmi_rx_event_log_buf_info, 1029 count, print, print_priv); 1030 } 1031 1032 inline void 1033 wmi_print_mgmt_event_log(wmi_unified_t wmi, uint32_t count, 1034 qdf_abstract_print *print, void *print_priv) 1035 { 1036 wmi_print_event_log_buffer( 1037 &wmi->log_info.wmi_mgmt_event_log_buf_info, 1038 count, print, print_priv); 1039 } 1040 1041 1042 /* debugfs routines*/ 1043 1044 /** 1045 * debug_wmi_##func_base##_show() - debugfs functions to display content of 1046 * command and event buffers. Macro uses max buffer length to display 1047 * buffer when it is wraparound. 1048 * 1049 * @m: debugfs handler to access wmi_handle 1050 * @v: Variable arguments (not used) 1051 * 1052 * Return: Length of characters printed 1053 */ 1054 #define GENERATE_COMMAND_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size) \ 1055 static int debug_wmi_##func_base##_show(struct seq_file *m, \ 1056 void *v) \ 1057 { \ 1058 wmi_unified_t wmi_handle = (wmi_unified_t) m->private; \ 1059 struct wmi_log_buf_t *wmi_log = \ 1060 &wmi_handle->log_info.wmi_##func_base##_buf_info;\ 1061 int pos, nread, outlen; \ 1062 int i; \ 1063 uint64_t secs, usecs; \ 1064 \ 1065 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1066 if (!wmi_log->length) { \ 1067 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1068 return wmi_bp_seq_printf(m, \ 1069 "no elements to read from ring buffer!\n"); \ 1070 } \ 1071 \ 1072 if (wmi_log->length <= wmi_ring_size) \ 1073 nread = wmi_log->length; \ 1074 else \ 1075 nread = wmi_ring_size; \ 1076 \ 1077 if (*(wmi_log->p_buf_tail_idx) == 0) \ 1078 /* tail can be 0 after wrap-around */ \ 1079 pos = wmi_ring_size - 1; \ 1080 else \ 1081 pos = *(wmi_log->p_buf_tail_idx) - 1; \ 1082 \ 1083 outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\ 1084 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1085 while (nread--) { \ 1086 struct wmi_command_debug *wmi_record; \ 1087 \ 1088 wmi_record = (struct wmi_command_debug *) \ 1089 &(((struct wmi_command_debug *)wmi_log->buf)[pos]);\ 1090 outlen += wmi_bp_seq_printf(m, "CMD ID = %x\n", \ 1091 (wmi_record->command)); \ 1092 qdf_log_timestamp_to_secs(wmi_record->time, &secs,\ 1093 &usecs); \ 1094 outlen += \ 1095 wmi_bp_seq_printf(m, "CMD TIME = [%llu.%06llu]\n",\ 1096 secs, usecs); \ 1097 outlen += wmi_bp_seq_printf(m, "CMD = "); \ 1098 for (i = 0; i < (wmi_record_max_length/ \ 1099 sizeof(uint32_t)); i++) \ 1100 outlen += wmi_bp_seq_printf(m, "%x ", \ 1101 wmi_record->data[i]); \ 1102 outlen += wmi_bp_seq_printf(m, "\n"); \ 1103 \ 1104 if (pos == 0) \ 1105 pos = wmi_ring_size - 1; \ 1106 else \ 1107 pos--; \ 1108 } \ 1109 return outlen; \ 1110 } \ 1111 1112 #define GENERATE_EVENT_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size) \ 1113 static int debug_wmi_##func_base##_show(struct seq_file *m, \ 1114 void *v) \ 1115 { \ 1116 wmi_unified_t wmi_handle = (wmi_unified_t) m->private; \ 1117 struct wmi_log_buf_t *wmi_log = \ 1118 &wmi_handle->log_info.wmi_##func_base##_buf_info;\ 1119 int pos, nread, outlen; \ 1120 int i; \ 1121 uint64_t secs, usecs; \ 1122 \ 1123 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1124 if (!wmi_log->length) { \ 1125 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1126 return wmi_bp_seq_printf(m, \ 1127 "no elements to read from ring buffer!\n"); \ 1128 } \ 1129 \ 1130 if (wmi_log->length <= wmi_ring_size) \ 1131 nread = wmi_log->length; \ 1132 else \ 1133 nread = wmi_ring_size; \ 1134 \ 1135 if (*(wmi_log->p_buf_tail_idx) == 0) \ 1136 /* tail can be 0 after wrap-around */ \ 1137 pos = wmi_ring_size - 1; \ 1138 else \ 1139 pos = *(wmi_log->p_buf_tail_idx) - 1; \ 1140 \ 1141 outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\ 1142 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1143 while (nread--) { \ 1144 struct wmi_event_debug *wmi_record; \ 1145 \ 1146 wmi_record = (struct wmi_event_debug *) \ 1147 &(((struct wmi_event_debug *)wmi_log->buf)[pos]);\ 1148 qdf_log_timestamp_to_secs(wmi_record->time, &secs,\ 1149 &usecs); \ 1150 outlen += wmi_bp_seq_printf(m, "Event ID = %x\n",\ 1151 (wmi_record->event)); \ 1152 outlen += \ 1153 wmi_bp_seq_printf(m, "Event TIME = [%llu.%06llu]\n",\ 1154 secs, usecs); \ 1155 outlen += wmi_bp_seq_printf(m, "CMD = "); \ 1156 for (i = 0; i < (wmi_record_max_length/ \ 1157 sizeof(uint32_t)); i++) \ 1158 outlen += wmi_bp_seq_printf(m, "%x ", \ 1159 wmi_record->data[i]); \ 1160 outlen += wmi_bp_seq_printf(m, "\n"); \ 1161 \ 1162 if (pos == 0) \ 1163 pos = wmi_ring_size - 1; \ 1164 else \ 1165 pos--; \ 1166 } \ 1167 return outlen; \ 1168 } 1169 1170 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_log, wmi_display_size); 1171 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_tx_cmp_log, wmi_display_size); 1172 GENERATE_EVENT_DEBUG_SHOW_FUNCS(event_log, wmi_display_size); 1173 GENERATE_EVENT_DEBUG_SHOW_FUNCS(rx_event_log, wmi_display_size); 1174 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_log, wmi_display_size); 1175 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_tx_cmp_log, 1176 wmi_display_size); 1177 GENERATE_EVENT_DEBUG_SHOW_FUNCS(mgmt_event_log, wmi_display_size); 1178 1179 /** 1180 * debug_wmi_enable_show() - debugfs functions to display enable state of 1181 * wmi logging feature. 1182 * 1183 * @m: debugfs handler to access wmi_handle 1184 * @v: Variable arguments (not used) 1185 * 1186 * Return: always 1 1187 */ 1188 static int debug_wmi_enable_show(struct seq_file *m, void *v) 1189 { 1190 wmi_unified_t wmi_handle = (wmi_unified_t) m->private; 1191 1192 return wmi_bp_seq_printf(m, "%d\n", 1193 wmi_handle->log_info.wmi_logging_enable); 1194 } 1195 1196 /** 1197 * debug_wmi_log_size_show() - debugfs functions to display configured size of 1198 * wmi logging command/event buffer and management command/event buffer. 1199 * 1200 * @m: debugfs handler to access wmi_handle 1201 * @v: Variable arguments (not used) 1202 * 1203 * Return: Length of characters printed 1204 */ 1205 static int debug_wmi_log_size_show(struct seq_file *m, void *v) 1206 { 1207 1208 wmi_bp_seq_printf(m, "WMI command/event log max size:%d\n", 1209 wmi_log_max_entry); 1210 return wmi_bp_seq_printf(m, 1211 "WMI management command/events log max size:%d\n", 1212 wmi_mgmt_log_max_entry); 1213 } 1214 1215 /** 1216 * debug_wmi_##func_base##_write() - debugfs functions to clear 1217 * wmi logging command/event buffer and management command/event buffer. 1218 * 1219 * @file: file handler to access wmi_handle 1220 * @buf: received data buffer 1221 * @count: length of received buffer 1222 * @ppos: Not used 1223 * 1224 * Return: count 1225 */ 1226 #define GENERATE_DEBUG_WRITE_FUNCS(func_base, wmi_ring_size, wmi_record_type)\ 1227 static ssize_t debug_wmi_##func_base##_write(struct file *file, \ 1228 const char __user *buf, \ 1229 size_t count, loff_t *ppos) \ 1230 { \ 1231 int k, ret; \ 1232 wmi_unified_t wmi_handle = \ 1233 ((struct seq_file *)file->private_data)->private;\ 1234 struct wmi_log_buf_t *wmi_log = &wmi_handle->log_info. \ 1235 wmi_##func_base##_buf_info; \ 1236 char locbuf[50]; \ 1237 \ 1238 if ((!buf) || (count > 50)) \ 1239 return -EFAULT; \ 1240 \ 1241 if (copy_from_user(locbuf, buf, count)) \ 1242 return -EFAULT; \ 1243 \ 1244 ret = sscanf(locbuf, "%d", &k); \ 1245 if ((ret != 1) || (k != 0)) { \ 1246 WMI_LOGE("Wrong input, echo 0 to clear the wmi buffer");\ 1247 return -EINVAL; \ 1248 } \ 1249 \ 1250 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1251 qdf_mem_zero(wmi_log->buf, wmi_ring_size * \ 1252 sizeof(struct wmi_record_type)); \ 1253 wmi_log->length = 0; \ 1254 *(wmi_log->p_buf_tail_idx) = 0; \ 1255 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\ 1256 \ 1257 return count; \ 1258 } 1259 1260 GENERATE_DEBUG_WRITE_FUNCS(command_log, wmi_log_max_entry, 1261 wmi_command_debug); 1262 GENERATE_DEBUG_WRITE_FUNCS(command_tx_cmp_log, wmi_log_max_entry, 1263 wmi_command_debug); 1264 GENERATE_DEBUG_WRITE_FUNCS(event_log, wmi_log_max_entry, 1265 wmi_event_debug); 1266 GENERATE_DEBUG_WRITE_FUNCS(rx_event_log, wmi_log_max_entry, 1267 wmi_event_debug); 1268 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_log, wmi_mgmt_log_max_entry, 1269 wmi_command_debug); 1270 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_tx_cmp_log, 1271 wmi_mgmt_log_max_entry, wmi_command_debug); 1272 GENERATE_DEBUG_WRITE_FUNCS(mgmt_event_log, wmi_mgmt_log_max_entry, 1273 wmi_event_debug); 1274 1275 /** 1276 * debug_wmi_enable_write() - debugfs functions to enable/disable 1277 * wmi logging feature. 1278 * 1279 * @file: file handler to access wmi_handle 1280 * @buf: received data buffer 1281 * @count: length of received buffer 1282 * @ppos: Not used 1283 * 1284 * Return: count 1285 */ 1286 static ssize_t debug_wmi_enable_write(struct file *file, const char __user *buf, 1287 size_t count, loff_t *ppos) 1288 { 1289 wmi_unified_t wmi_handle = 1290 ((struct seq_file *)file->private_data)->private; 1291 int k, ret; 1292 char locbuf[50]; 1293 1294 if ((!buf) || (count > 50)) 1295 return -EFAULT; 1296 1297 if (copy_from_user(locbuf, buf, count)) 1298 return -EFAULT; 1299 1300 ret = sscanf(locbuf, "%d", &k); 1301 if ((ret != 1) || ((k != 0) && (k != 1))) 1302 return -EINVAL; 1303 1304 wmi_handle->log_info.wmi_logging_enable = k; 1305 return count; 1306 } 1307 1308 /** 1309 * debug_wmi_log_size_write() - reserved. 1310 * 1311 * @file: file handler to access wmi_handle 1312 * @buf: received data buffer 1313 * @count: length of received buffer 1314 * @ppos: Not used 1315 * 1316 * Return: count 1317 */ 1318 static ssize_t debug_wmi_log_size_write(struct file *file, 1319 const char __user *buf, size_t count, loff_t *ppos) 1320 { 1321 return -EINVAL; 1322 } 1323 1324 /* Structure to maintain debug information */ 1325 struct wmi_debugfs_info { 1326 const char *name; 1327 const struct file_operations *ops; 1328 }; 1329 1330 #define DEBUG_FOO(func_base) { .name = #func_base, \ 1331 .ops = &debug_##func_base##_ops } 1332 1333 /** 1334 * debug_##func_base##_open() - Open debugfs entry for respective command 1335 * and event buffer. 1336 * 1337 * @inode: node for debug dir entry 1338 * @file: file handler 1339 * 1340 * Return: open status 1341 */ 1342 #define GENERATE_DEBUG_STRUCTS(func_base) \ 1343 static int debug_##func_base##_open(struct inode *inode, \ 1344 struct file *file) \ 1345 { \ 1346 return single_open(file, debug_##func_base##_show, \ 1347 inode->i_private); \ 1348 } \ 1349 \ 1350 \ 1351 static struct file_operations debug_##func_base##_ops = { \ 1352 .open = debug_##func_base##_open, \ 1353 .read = seq_read, \ 1354 .llseek = seq_lseek, \ 1355 .write = debug_##func_base##_write, \ 1356 .release = single_release, \ 1357 }; 1358 1359 GENERATE_DEBUG_STRUCTS(wmi_command_log); 1360 GENERATE_DEBUG_STRUCTS(wmi_command_tx_cmp_log); 1361 GENERATE_DEBUG_STRUCTS(wmi_event_log); 1362 GENERATE_DEBUG_STRUCTS(wmi_rx_event_log); 1363 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_log); 1364 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_tx_cmp_log); 1365 GENERATE_DEBUG_STRUCTS(wmi_mgmt_event_log); 1366 GENERATE_DEBUG_STRUCTS(wmi_enable); 1367 GENERATE_DEBUG_STRUCTS(wmi_log_size); 1368 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING 1369 GENERATE_DEBUG_STRUCTS(filtered_wmi_cmds); 1370 GENERATE_DEBUG_STRUCTS(filtered_wmi_evts); 1371 GENERATE_DEBUG_STRUCTS(wmi_filtered_command_log); 1372 GENERATE_DEBUG_STRUCTS(wmi_filtered_event_log); 1373 #endif 1374 1375 struct wmi_debugfs_info wmi_debugfs_infos[NUM_DEBUG_INFOS] = { 1376 DEBUG_FOO(wmi_command_log), 1377 DEBUG_FOO(wmi_command_tx_cmp_log), 1378 DEBUG_FOO(wmi_event_log), 1379 DEBUG_FOO(wmi_rx_event_log), 1380 DEBUG_FOO(wmi_mgmt_command_log), 1381 DEBUG_FOO(wmi_mgmt_command_tx_cmp_log), 1382 DEBUG_FOO(wmi_mgmt_event_log), 1383 DEBUG_FOO(wmi_enable), 1384 DEBUG_FOO(wmi_log_size), 1385 #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING 1386 DEBUG_FOO(filtered_wmi_cmds), 1387 DEBUG_FOO(filtered_wmi_evts), 1388 DEBUG_FOO(wmi_filtered_command_log), 1389 DEBUG_FOO(wmi_filtered_event_log), 1390 #endif 1391 }; 1392 1393 1394 /** 1395 * wmi_debugfs_create() - Create debug_fs entry for wmi logging. 1396 * 1397 * @wmi_handle: wmi handle 1398 * @par_entry: debug directory entry 1399 * @id: Index to debug info data array 1400 * 1401 * Return: none 1402 */ 1403 static void wmi_debugfs_create(wmi_unified_t wmi_handle, 1404 struct dentry *par_entry) 1405 { 1406 int i; 1407 1408 if (!par_entry) 1409 goto out; 1410 1411 for (i = 0; i < NUM_DEBUG_INFOS; ++i) { 1412 wmi_handle->debugfs_de[i] = debugfs_create_file( 1413 wmi_debugfs_infos[i].name, 0644, par_entry, 1414 wmi_handle, wmi_debugfs_infos[i].ops); 1415 1416 if (!wmi_handle->debugfs_de[i]) { 1417 WMI_LOGE("debug Entry creation failed!"); 1418 goto out; 1419 } 1420 } 1421 1422 return; 1423 1424 out: 1425 WMI_LOGE("debug Entry creation failed!"); 1426 wmi_log_buffer_free(wmi_handle); 1427 return; 1428 } 1429 1430 /** 1431 * wmi_debugfs_remove() - Remove debugfs entry for wmi logging. 1432 * @wmi_handle: wmi handle 1433 * @dentry: debugfs directory entry 1434 * @id: Index to debug info data array 1435 * 1436 * Return: none 1437 */ 1438 static void wmi_debugfs_remove(wmi_unified_t wmi_handle) 1439 { 1440 int i; 1441 struct dentry *dentry = wmi_handle->log_info.wmi_log_debugfs_dir; 1442 1443 if (dentry) { 1444 for (i = 0; i < NUM_DEBUG_INFOS; ++i) { 1445 if (wmi_handle->debugfs_de[i]) 1446 wmi_handle->debugfs_de[i] = NULL; 1447 } 1448 } 1449 1450 if (dentry) 1451 debugfs_remove_recursive(dentry); 1452 } 1453 1454 /** 1455 * wmi_debugfs_init() - debugfs functions to create debugfs directory and to 1456 * create debugfs enteries. 1457 * 1458 * @h: wmi handler 1459 * 1460 * Return: init status 1461 */ 1462 static QDF_STATUS wmi_debugfs_init(wmi_unified_t wmi_handle, uint32_t pdev_idx) 1463 { 1464 char buf[32]; 1465 1466 snprintf(buf, sizeof(buf), "WMI_SOC%u_PDEV%u", 1467 wmi_handle->soc->soc_idx, pdev_idx); 1468 1469 wmi_handle->log_info.wmi_log_debugfs_dir = 1470 debugfs_create_dir(buf, NULL); 1471 1472 if (!wmi_handle->log_info.wmi_log_debugfs_dir) { 1473 WMI_LOGE("error while creating debugfs dir for %s", buf); 1474 return QDF_STATUS_E_FAILURE; 1475 } 1476 wmi_debugfs_create(wmi_handle, 1477 wmi_handle->log_info.wmi_log_debugfs_dir); 1478 1479 return QDF_STATUS_SUCCESS; 1480 } 1481 1482 /** 1483 * wmi_mgmt_cmd_record() - Wrapper function for mgmt command logging macro 1484 * 1485 * @wmi_handle: wmi handle 1486 * @cmd: mgmt command 1487 * @header: pointer to 802.11 header 1488 * @vdev_id: vdev id 1489 * @chanfreq: channel frequency 1490 * 1491 * Return: none 1492 */ 1493 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd, 1494 void *header, uint32_t vdev_id, uint32_t chanfreq) 1495 { 1496 1497 uint32_t data[CUSTOM_MGMT_CMD_DATA_SIZE]; 1498 1499 data[0] = ((struct wmi_command_header *)header)->type; 1500 data[1] = ((struct wmi_command_header *)header)->sub_type; 1501 data[2] = vdev_id; 1502 data[3] = chanfreq; 1503 1504 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 1505 1506 WMI_MGMT_COMMAND_RECORD(wmi_handle, cmd, (uint8_t *)data); 1507 wmi_specific_cmd_record(wmi_handle, cmd, (uint8_t *)data); 1508 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 1509 } 1510 #else 1511 /** 1512 * wmi_debugfs_remove() - Remove debugfs entry for wmi logging. 1513 * @wmi_handle: wmi handle 1514 * @dentry: debugfs directory entry 1515 * @id: Index to debug info data array 1516 * 1517 * Return: none 1518 */ 1519 static void wmi_debugfs_remove(wmi_unified_t wmi_handle) { } 1520 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd, 1521 void *header, uint32_t vdev_id, uint32_t chanfreq) { } 1522 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) { } 1523 #endif /*WMI_INTERFACE_EVENT_LOGGING */ 1524 qdf_export_symbol(wmi_mgmt_cmd_record); 1525 1526 int wmi_get_host_credits(wmi_unified_t wmi_handle); 1527 /* WMI buffer APIs */ 1528 1529 #ifdef NBUF_MEMORY_DEBUG 1530 wmi_buf_t 1531 wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint32_t len, 1532 const char *func_name, 1533 uint32_t line_num) 1534 { 1535 wmi_buf_t wmi_buf; 1536 1537 if (roundup(len + WMI_MIN_HEAD_ROOM, 4) > wmi_handle->max_msg_len) { 1538 QDF_ASSERT(0); 1539 return NULL; 1540 } 1541 1542 wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, func_name, 1543 line_num); 1544 if (!wmi_buf) 1545 wmi_buf = qdf_nbuf_alloc_debug(NULL, 1546 roundup(len + WMI_MIN_HEAD_ROOM, 1547 4), 1548 WMI_MIN_HEAD_ROOM, 4, false, 1549 func_name, line_num); 1550 if (!wmi_buf) 1551 return NULL; 1552 1553 /* Clear the wmi buffer */ 1554 OS_MEMZERO(qdf_nbuf_data(wmi_buf), len); 1555 1556 /* 1557 * Set the length of the buffer to match the allocation size. 1558 */ 1559 qdf_nbuf_set_pktlen(wmi_buf, len); 1560 1561 return wmi_buf; 1562 } 1563 qdf_export_symbol(wmi_buf_alloc_debug); 1564 1565 void wmi_buf_free(wmi_buf_t net_buf) 1566 { 1567 net_buf = wbuff_buff_put(net_buf); 1568 if (net_buf) 1569 qdf_nbuf_free(net_buf); 1570 } 1571 qdf_export_symbol(wmi_buf_free); 1572 #else 1573 wmi_buf_t wmi_buf_alloc_fl(wmi_unified_t wmi_handle, uint32_t len, 1574 const char *func, uint32_t line) 1575 { 1576 wmi_buf_t wmi_buf; 1577 1578 if (roundup(len + WMI_MIN_HEAD_ROOM, 4) > wmi_handle->max_msg_len) { 1579 QDF_DEBUG_PANIC("Invalid length %u (via %s:%u)", 1580 len, func, line); 1581 return NULL; 1582 } 1583 1584 wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, __func__, 1585 __LINE__); 1586 if (!wmi_buf) 1587 wmi_buf = qdf_nbuf_alloc_fl(NULL, roundup(len + 1588 WMI_MIN_HEAD_ROOM, 4), WMI_MIN_HEAD_ROOM, 4, 1589 false, func, line); 1590 1591 if (!wmi_buf) { 1592 wmi_nofl_err("%s:%d, failed to alloc len:%d", func, line, len); 1593 return NULL; 1594 } 1595 1596 /* Clear the wmi buffer */ 1597 OS_MEMZERO(qdf_nbuf_data(wmi_buf), len); 1598 1599 /* 1600 * Set the length of the buffer to match the allocation size. 1601 */ 1602 qdf_nbuf_set_pktlen(wmi_buf, len); 1603 1604 return wmi_buf; 1605 } 1606 qdf_export_symbol(wmi_buf_alloc_fl); 1607 1608 void wmi_buf_free(wmi_buf_t net_buf) 1609 { 1610 net_buf = wbuff_buff_put(net_buf); 1611 if (net_buf) 1612 qdf_nbuf_free(net_buf); 1613 } 1614 qdf_export_symbol(wmi_buf_free); 1615 #endif 1616 1617 /** 1618 * wmi_get_max_msg_len() - get maximum WMI message length 1619 * @wmi_handle: WMI handle. 1620 * 1621 * This function returns the maximum WMI message length 1622 * 1623 * Return: maximum WMI message length 1624 */ 1625 uint16_t wmi_get_max_msg_len(wmi_unified_t wmi_handle) 1626 { 1627 return wmi_handle->max_msg_len - WMI_MIN_HEAD_ROOM; 1628 } 1629 qdf_export_symbol(wmi_get_max_msg_len); 1630 1631 #ifndef WMI_CMD_STRINGS 1632 static uint8_t *wmi_id_to_name(uint32_t wmi_command) 1633 { 1634 return "Invalid WMI cmd"; 1635 } 1636 #endif 1637 1638 static inline void wmi_log_cmd_id(uint32_t cmd_id, uint32_t tag) 1639 { 1640 WMI_LOGD("Send WMI command:%s command_id:%d htc_tag:%d", 1641 wmi_id_to_name(cmd_id), cmd_id, tag); 1642 } 1643 1644 /** 1645 * wmi_is_pm_resume_cmd() - check if a cmd is part of the resume sequence 1646 * @cmd_id: command to check 1647 * 1648 * Return: true if the command is part of the resume sequence. 1649 */ 1650 #ifdef WLAN_POWER_MANAGEMENT_OFFLOAD 1651 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id) 1652 { 1653 switch (cmd_id) { 1654 case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID: 1655 case WMI_PDEV_RESUME_CMDID: 1656 return true; 1657 1658 default: 1659 return false; 1660 } 1661 } 1662 1663 #else 1664 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id) 1665 { 1666 return false; 1667 } 1668 1669 #endif 1670 1671 #ifdef FEATURE_WLAN_D0WOW 1672 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id) 1673 { 1674 wmi_d0_wow_enable_disable_cmd_fixed_param *cmd; 1675 1676 if (cmd_id == WMI_D0_WOW_ENABLE_DISABLE_CMDID) { 1677 cmd = (wmi_d0_wow_enable_disable_cmd_fixed_param *) 1678 wmi_buf_data(buf); 1679 if (!cmd->enable) 1680 return true; 1681 else 1682 return false; 1683 } 1684 1685 return false; 1686 } 1687 #else 1688 static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id) 1689 { 1690 return false; 1691 } 1692 1693 #endif 1694 1695 static inline void wmi_unified_debug_dump(wmi_unified_t wmi_handle) 1696 { 1697 wmi_nofl_err("Endpoint ID = %d, Tx Queue Depth = %d, soc_id = %u, target type = %s", 1698 wmi_handle->wmi_endpoint_id, 1699 htc_get_tx_queue_depth(wmi_handle->htc_handle, 1700 wmi_handle->wmi_endpoint_id), 1701 wmi_handle->soc->soc_idx, 1702 (wmi_handle->target_type == 1703 WMI_TLV_TARGET ? "WMI_TLV_TARGET" : 1704 "WMI_NON_TLV_TARGET")); 1705 } 1706 1707 QDF_STATUS wmi_unified_cmd_send_fl(wmi_unified_t wmi_handle, wmi_buf_t buf, 1708 uint32_t len, uint32_t cmd_id, 1709 const char *func, uint32_t line) 1710 { 1711 HTC_PACKET *pkt; 1712 QDF_STATUS status; 1713 uint16_t htc_tag = 0; 1714 1715 if (wmi_get_runtime_pm_inprogress(wmi_handle)) { 1716 htc_tag = wmi_handle->ops->wmi_set_htc_tx_tag(wmi_handle, buf, 1717 cmd_id); 1718 } else if (qdf_atomic_read(&wmi_handle->is_target_suspended) && 1719 !wmi_is_pm_resume_cmd(cmd_id) && 1720 !wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id)) { 1721 wmi_nofl_err("Target is suspended (via %s:%u)", 1722 func, line); 1723 return QDF_STATUS_E_BUSY; 1724 } 1725 1726 if (wmi_handle->wmi_stopinprogress) { 1727 wmi_nofl_err("%s:%d, WMI stop in progress, wmi_handle:%pK", 1728 func, line, wmi_handle); 1729 return QDF_STATUS_E_INVAL; 1730 } 1731 1732 #ifndef WMI_NON_TLV_SUPPORT 1733 /* Do sanity check on the TLV parameter structure */ 1734 if (wmi_handle->target_type == WMI_TLV_TARGET) { 1735 void *buf_ptr = (void *)qdf_nbuf_data(buf); 1736 1737 if (wmi_handle->ops->wmi_check_command_params(NULL, buf_ptr, len, cmd_id) 1738 != 0) { 1739 wmi_nofl_err("%s:%d, Invalid WMI Param Buffer for Cmd:%d", 1740 func, line, cmd_id); 1741 return QDF_STATUS_E_INVAL; 1742 } 1743 } 1744 #endif 1745 1746 if (qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) { 1747 wmi_nofl_err("%s:%d, Failed to send cmd %x, no memory", 1748 func, line, cmd_id); 1749 return QDF_STATUS_E_NOMEM; 1750 } 1751 1752 qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR)); 1753 WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id); 1754 1755 qdf_atomic_inc(&wmi_handle->pending_cmds); 1756 if (qdf_atomic_read(&wmi_handle->pending_cmds) >= 1757 wmi_handle->wmi_max_cmds) { 1758 wmi_nofl_err("hostcredits = %d", 1759 wmi_get_host_credits(wmi_handle)); 1760 htc_dump_counter_info(wmi_handle->htc_handle); 1761 qdf_atomic_dec(&wmi_handle->pending_cmds); 1762 wmi_nofl_err("%s:%d, MAX %d WMI Pending cmds reached", 1763 func, line, wmi_handle->wmi_max_cmds); 1764 wmi_unified_debug_dump(wmi_handle); 1765 htc_ce_tasklet_debug_dump(wmi_handle->htc_handle); 1766 qdf_trigger_self_recovery(wmi_handle->soc->wmi_psoc, 1767 QDF_WMI_EXCEED_MAX_PENDING_CMDS); 1768 return QDF_STATUS_E_BUSY; 1769 } 1770 1771 pkt = qdf_mem_malloc_fl(sizeof(*pkt), func, line); 1772 if (!pkt) { 1773 qdf_atomic_dec(&wmi_handle->pending_cmds); 1774 return QDF_STATUS_E_NOMEM; 1775 } 1776 1777 SET_HTC_PACKET_INFO_TX(pkt, 1778 NULL, 1779 qdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR), 1780 wmi_handle->wmi_endpoint_id, htc_tag); 1781 1782 SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf); 1783 wmi_log_cmd_id(cmd_id, htc_tag); 1784 wmi_ext_dbg_msg_cmd_record(wmi_handle, 1785 qdf_nbuf_data(buf), qdf_nbuf_len(buf)); 1786 #ifdef WMI_INTERFACE_EVENT_LOGGING 1787 if (wmi_handle->log_info.wmi_logging_enable) { 1788 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 1789 /* 1790 * Record 16 bytes of WMI cmd data - 1791 * exclude TLV and WMI headers 1792 * 1793 * WMI mgmt command already recorded in wmi_mgmt_cmd_record 1794 */ 1795 if (wmi_handle->ops->is_management_record(cmd_id) == false) { 1796 uint8_t *tmpbuf = (uint8_t *)qdf_nbuf_data(buf) + 1797 wmi_handle->soc->buf_offset_command; 1798 1799 WMI_COMMAND_RECORD(wmi_handle, cmd_id, tmpbuf); 1800 wmi_specific_cmd_record(wmi_handle, cmd_id, tmpbuf); 1801 } 1802 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 1803 } 1804 #endif 1805 1806 status = htc_send_pkt(wmi_handle->htc_handle, pkt); 1807 1808 if (QDF_STATUS_SUCCESS != status) { 1809 qdf_atomic_dec(&wmi_handle->pending_cmds); 1810 wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d", 1811 func, line, status); 1812 qdf_mem_free(pkt); 1813 return status; 1814 } 1815 1816 return QDF_STATUS_SUCCESS; 1817 } 1818 qdf_export_symbol(wmi_unified_cmd_send_fl); 1819 1820 /** 1821 * wmi_unified_get_event_handler_ix() - gives event handler's index 1822 * @wmi_handle: handle to wmi 1823 * @event_id: wmi event id 1824 * 1825 * Return: event handler's index 1826 */ 1827 static int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle, 1828 uint32_t event_id) 1829 { 1830 uint32_t idx = 0; 1831 int32_t invalid_idx = -1; 1832 struct wmi_soc *soc = wmi_handle->soc; 1833 1834 for (idx = 0; (idx < soc->max_event_idx && 1835 idx < WMI_UNIFIED_MAX_EVENT); ++idx) { 1836 if (wmi_handle->event_id[idx] == event_id && 1837 wmi_handle->event_handler[idx]) { 1838 return idx; 1839 } 1840 } 1841 1842 return invalid_idx; 1843 } 1844 1845 /** 1846 * wmi_unified_register_event() - register wmi event handler 1847 * @wmi_handle: handle to wmi 1848 * @event_id: wmi event id 1849 * @handler_func: wmi event handler function 1850 * 1851 * Return: 0 on success 1852 */ 1853 int wmi_unified_register_event(wmi_unified_t wmi_handle, 1854 uint32_t event_id, 1855 wmi_unified_event_handler handler_func) 1856 { 1857 uint32_t idx = 0; 1858 uint32_t evt_id; 1859 struct wmi_soc *soc; 1860 1861 if (!wmi_handle) { 1862 WMI_LOGE("WMI handle is NULL"); 1863 return QDF_STATUS_E_FAILURE; 1864 } 1865 1866 soc = wmi_handle->soc; 1867 1868 if (event_id >= wmi_events_max || 1869 wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { 1870 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, 1871 "%s: Event id %d is unavailable", 1872 __func__, event_id); 1873 return QDF_STATUS_E_FAILURE; 1874 } 1875 evt_id = wmi_handle->wmi_events[event_id]; 1876 if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) { 1877 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, 1878 "%s : event handler already registered 0x%x", 1879 __func__, evt_id); 1880 return QDF_STATUS_E_FAILURE; 1881 } 1882 if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) { 1883 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 1884 "%s : no more event handlers 0x%x", 1885 __func__, evt_id); 1886 return QDF_STATUS_E_FAILURE; 1887 } 1888 idx = soc->max_event_idx; 1889 wmi_handle->event_handler[idx] = handler_func; 1890 wmi_handle->event_id[idx] = evt_id; 1891 qdf_spin_lock_bh(&soc->ctx_lock); 1892 wmi_handle->ctx[idx] = WMI_RX_UMAC_CTX; 1893 qdf_spin_unlock_bh(&soc->ctx_lock); 1894 soc->max_event_idx++; 1895 1896 return 0; 1897 } 1898 1899 /** 1900 * wmi_unified_register_event_handler() - register wmi event handler 1901 * @wmi_handle: handle to wmi 1902 * @event_id: wmi event id 1903 * @handler_func: wmi event handler function 1904 * @rx_ctx: rx execution context for wmi rx events 1905 * 1906 * This API is to support legacy requirements. Will be deprecated in future. 1907 * Return: 0 on success 1908 */ 1909 int wmi_unified_register_event_handler(wmi_unified_t wmi_handle, 1910 wmi_conv_event_id event_id, 1911 wmi_unified_event_handler handler_func, 1912 uint8_t rx_ctx) 1913 { 1914 uint32_t idx = 0; 1915 uint32_t evt_id; 1916 struct wmi_soc *soc; 1917 1918 if (!wmi_handle) { 1919 WMI_LOGE("WMI handle is NULL"); 1920 return QDF_STATUS_E_FAILURE; 1921 } 1922 1923 soc = wmi_handle->soc; 1924 1925 if (event_id >= wmi_events_max || 1926 wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { 1927 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, 1928 "%s: Event id %d is unavailable", 1929 __func__, event_id); 1930 return QDF_STATUS_E_FAILURE; 1931 } 1932 evt_id = wmi_handle->wmi_events[event_id]; 1933 1934 if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) { 1935 WMI_LOGI("event handler already registered 0x%x", 1936 evt_id); 1937 return QDF_STATUS_E_FAILURE; 1938 } 1939 if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) { 1940 WMI_LOGE("no more event handlers 0x%x", 1941 evt_id); 1942 return QDF_STATUS_E_FAILURE; 1943 } 1944 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, 1945 "Registered event handler for event 0x%8x", evt_id); 1946 idx = soc->max_event_idx; 1947 wmi_handle->event_handler[idx] = handler_func; 1948 wmi_handle->event_id[idx] = evt_id; 1949 qdf_spin_lock_bh(&soc->ctx_lock); 1950 wmi_handle->ctx[idx] = rx_ctx; 1951 qdf_spin_unlock_bh(&soc->ctx_lock); 1952 soc->max_event_idx++; 1953 1954 return 0; 1955 } 1956 qdf_export_symbol(wmi_unified_register_event_handler); 1957 1958 /** 1959 * wmi_unified_unregister_event() - unregister wmi event handler 1960 * @wmi_handle: handle to wmi 1961 * @event_id: wmi event id 1962 * 1963 * Return: 0 on success 1964 */ 1965 int wmi_unified_unregister_event(wmi_unified_t wmi_handle, 1966 uint32_t event_id) 1967 { 1968 uint32_t idx = 0; 1969 uint32_t evt_id; 1970 struct wmi_soc *soc = wmi_handle->soc; 1971 1972 if (event_id >= wmi_events_max || 1973 wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { 1974 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, 1975 "%s: Event id %d is unavailable", 1976 __func__, event_id); 1977 return QDF_STATUS_E_FAILURE; 1978 } 1979 evt_id = wmi_handle->wmi_events[event_id]; 1980 1981 idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id); 1982 if (idx == -1) { 1983 WMI_LOGI("event handler is not registered: evt id 0x%x", 1984 evt_id); 1985 return QDF_STATUS_E_FAILURE; 1986 } 1987 wmi_handle->event_handler[idx] = NULL; 1988 wmi_handle->event_id[idx] = 0; 1989 --soc->max_event_idx; 1990 wmi_handle->event_handler[idx] = 1991 wmi_handle->event_handler[soc->max_event_idx]; 1992 wmi_handle->event_id[idx] = 1993 wmi_handle->event_id[soc->max_event_idx]; 1994 1995 return 0; 1996 } 1997 1998 /** 1999 * wmi_unified_unregister_event_handler() - unregister wmi event handler 2000 * @wmi_handle: handle to wmi 2001 * @event_id: wmi event id 2002 * 2003 * Return: 0 on success 2004 */ 2005 int wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle, 2006 wmi_conv_event_id event_id) 2007 { 2008 uint32_t idx = 0; 2009 uint32_t evt_id; 2010 struct wmi_soc *soc; 2011 2012 if (!wmi_handle) { 2013 WMI_LOGE("WMI handle is NULL"); 2014 return QDF_STATUS_E_FAILURE; 2015 } 2016 2017 soc = wmi_handle->soc; 2018 2019 if (event_id >= wmi_events_max || 2020 wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { 2021 WMI_LOGI("Event id %d is unavailable", 2022 event_id); 2023 return QDF_STATUS_E_FAILURE; 2024 } 2025 evt_id = wmi_handle->wmi_events[event_id]; 2026 2027 idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id); 2028 if (idx == -1) { 2029 WMI_LOGI("event handler is not registered: evt id 0x%x", 2030 evt_id); 2031 return QDF_STATUS_E_FAILURE; 2032 } 2033 wmi_handle->event_handler[idx] = NULL; 2034 wmi_handle->event_id[idx] = 0; 2035 --soc->max_event_idx; 2036 wmi_handle->event_handler[idx] = 2037 wmi_handle->event_handler[soc->max_event_idx]; 2038 wmi_handle->event_id[idx] = 2039 wmi_handle->event_id[soc->max_event_idx]; 2040 2041 return 0; 2042 } 2043 qdf_export_symbol(wmi_unified_unregister_event_handler); 2044 2045 void wmi_process_fw_event_worker_thread_ctx(struct wmi_unified *wmi_handle, 2046 void *evt_buf) 2047 { 2048 2049 qdf_spin_lock_bh(&wmi_handle->eventq_lock); 2050 qdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf); 2051 qdf_spin_unlock_bh(&wmi_handle->eventq_lock); 2052 qdf_queue_work(0, wmi_handle->wmi_rx_work_queue, 2053 &wmi_handle->rx_event_work); 2054 2055 return; 2056 } 2057 2058 qdf_export_symbol(wmi_process_fw_event_worker_thread_ctx); 2059 2060 uint32_t wmi_critical_events_in_flight(struct wmi_unified *wmi) 2061 { 2062 return qdf_atomic_read(&wmi->critical_events_in_flight); 2063 } 2064 2065 static bool 2066 wmi_is_event_critical(struct wmi_unified *wmi_handle, uint32_t event_id) 2067 { 2068 if (wmi_handle->wmi_events[wmi_roam_synch_event_id] == event_id) 2069 return true; 2070 2071 return false; 2072 } 2073 2074 static QDF_STATUS wmi_discard_fw_event(struct scheduler_msg *msg) 2075 { 2076 struct wmi_process_fw_event_params *event_param; 2077 2078 if (!msg->bodyptr) 2079 return QDF_STATUS_E_INVAL; 2080 2081 event_param = (struct wmi_process_fw_event_params *)msg->bodyptr; 2082 qdf_nbuf_free(event_param->evt_buf); 2083 qdf_mem_free(msg->bodyptr); 2084 msg->bodyptr = NULL; 2085 msg->bodyval = 0; 2086 msg->type = 0; 2087 2088 return QDF_STATUS_SUCCESS; 2089 } 2090 2091 static QDF_STATUS wmi_process_fw_event_handler(struct scheduler_msg *msg) 2092 { 2093 struct wmi_process_fw_event_params *params = 2094 (struct wmi_process_fw_event_params *)msg->bodyptr; 2095 struct wmi_unified *wmi_handle; 2096 uint32_t event_id; 2097 2098 wmi_handle = (struct wmi_unified *)params->wmi_handle; 2099 event_id = WMI_GET_FIELD(qdf_nbuf_data(params->evt_buf), 2100 WMI_CMD_HDR, COMMANDID); 2101 wmi_process_fw_event(wmi_handle, params->evt_buf); 2102 2103 if (wmi_is_event_critical(wmi_handle, event_id)) 2104 qdf_atomic_dec(&wmi_handle->critical_events_in_flight); 2105 2106 qdf_mem_free(msg->bodyptr); 2107 2108 return QDF_STATUS_SUCCESS; 2109 } 2110 2111 /** 2112 * wmi_process_fw_event_sched_thread_ctx() - common event handler to serialize 2113 * event processing through scheduler thread 2114 * @ctx: wmi context 2115 * @ev: event buffer 2116 * @rx_ctx: rx execution context 2117 * 2118 * Return: 0 on success, errno on failure 2119 */ 2120 static QDF_STATUS 2121 wmi_process_fw_event_sched_thread_ctx(struct wmi_unified *wmi, 2122 void *ev) 2123 { 2124 struct wmi_process_fw_event_params *params_buf; 2125 struct scheduler_msg msg = { 0 }; 2126 uint32_t event_id; 2127 2128 params_buf = qdf_mem_malloc(sizeof(struct wmi_process_fw_event_params)); 2129 if (!params_buf) { 2130 wmi_err("malloc failed"); 2131 qdf_nbuf_free(ev); 2132 return QDF_STATUS_E_NOMEM; 2133 } 2134 2135 params_buf->wmi_handle = wmi; 2136 params_buf->evt_buf = ev; 2137 2138 event_id = WMI_GET_FIELD(qdf_nbuf_data(params_buf->evt_buf), 2139 WMI_CMD_HDR, COMMANDID); 2140 if (wmi_is_event_critical(wmi, event_id)) 2141 qdf_atomic_inc(&wmi->critical_events_in_flight); 2142 2143 msg.bodyptr = params_buf; 2144 msg.bodyval = 0; 2145 msg.callback = wmi_process_fw_event_handler; 2146 msg.flush_callback = wmi_discard_fw_event; 2147 2148 if (QDF_STATUS_SUCCESS != 2149 scheduler_post_message(QDF_MODULE_ID_TARGET_IF, 2150 QDF_MODULE_ID_TARGET_IF, 2151 QDF_MODULE_ID_TARGET_IF, &msg)) { 2152 qdf_nbuf_free(ev); 2153 qdf_mem_free(params_buf); 2154 return QDF_STATUS_E_FAULT; 2155 } 2156 2157 return QDF_STATUS_SUCCESS; 2158 } 2159 2160 /** 2161 * wmi_get_pdev_ep: Get wmi handle based on endpoint 2162 * @soc: handle to wmi soc 2163 * @ep: endpoint id 2164 * 2165 * Return: none 2166 */ 2167 static struct wmi_unified *wmi_get_pdev_ep(struct wmi_soc *soc, 2168 HTC_ENDPOINT_ID ep) 2169 { 2170 uint32_t i; 2171 2172 for (i = 0; i < WMI_MAX_RADIOS; i++) 2173 if (soc->wmi_endpoint_id[i] == ep) 2174 break; 2175 2176 if (i == WMI_MAX_RADIOS) 2177 return NULL; 2178 2179 return soc->wmi_pdev[i]; 2180 } 2181 2182 /** 2183 * wmi_mtrace_rx() - Wrappper function for qdf_mtrace api 2184 * @message_id: 32-Bit Wmi message ID 2185 * @vdev_id: Vdev ID 2186 * @data: Actual message contents 2187 * 2188 * This function converts the 32-bit WMI message ID in 15-bit message ID 2189 * format for qdf_mtrace as in qdf_mtrace message there are only 15 2190 * bits reserved for message ID. 2191 * out of these 15-bits, 8-bits (From LSB) specifies the WMI_GRP_ID 2192 * and remaining 7-bits specifies the actual WMI command. With this 2193 * notation there can be maximum 256 groups and each group can have 2194 * max 128 commands can be supported. 2195 * 2196 * Return: None 2197 */ 2198 static void wmi_mtrace_rx(uint32_t message_id, uint16_t vdev_id, uint32_t data) 2199 { 2200 uint16_t mtrace_message_id; 2201 2202 mtrace_message_id = QDF_WMI_MTRACE_CMD_ID(message_id) | 2203 (QDF_WMI_MTRACE_GRP_ID(message_id) << 2204 QDF_WMI_MTRACE_CMD_NUM_BITS); 2205 qdf_mtrace(QDF_MODULE_ID_WMI, QDF_MODULE_ID_WMA, 2206 mtrace_message_id, vdev_id, data); 2207 } 2208 2209 /** 2210 * wmi_process_control_rx() - process fw events callbacks 2211 * @wmi_handle: handle to wmi_unified 2212 * @evt_buf: handle to wmi_buf_t 2213 * 2214 * Return: none 2215 */ 2216 static void wmi_process_control_rx(struct wmi_unified *wmi_handle, 2217 wmi_buf_t evt_buf) 2218 { 2219 struct wmi_soc *soc = wmi_handle->soc; 2220 uint32_t id; 2221 uint32_t idx; 2222 enum wmi_rx_exec_ctx exec_ctx; 2223 2224 id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); 2225 idx = wmi_unified_get_event_handler_ix(wmi_handle, id); 2226 if (qdf_unlikely(idx == A_ERROR)) { 2227 wmi_debug("no handler registered for event id 0x%x", id); 2228 qdf_nbuf_free(evt_buf); 2229 return; 2230 } 2231 wmi_mtrace_rx(id, 0xFF, idx); 2232 qdf_spin_lock_bh(&soc->ctx_lock); 2233 exec_ctx = wmi_handle->ctx[idx]; 2234 qdf_spin_unlock_bh(&soc->ctx_lock); 2235 2236 #ifdef WMI_INTERFACE_EVENT_LOGGING 2237 if (wmi_handle->log_info.wmi_logging_enable) { 2238 uint8_t *data; 2239 data = qdf_nbuf_data(evt_buf); 2240 2241 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 2242 /* Exclude 4 bytes of TLV header */ 2243 if (wmi_handle->ops->is_diag_event(id)) { 2244 WMI_DIAG_RX_EVENT_RECORD(wmi_handle, id, 2245 ((uint8_t *) data + 2246 wmi_handle->soc->buf_offset_event)); 2247 } else if (wmi_handle->ops->is_management_record(id)) { 2248 WMI_MGMT_RX_EVENT_RECORD(wmi_handle, id, 2249 ((uint8_t *) data + 2250 wmi_handle->soc->buf_offset_event)); 2251 } else { 2252 WMI_RX_EVENT_RECORD(wmi_handle, id, ((uint8_t *) data + 2253 wmi_handle->soc->buf_offset_event)); 2254 } 2255 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 2256 } 2257 #endif 2258 2259 if (exec_ctx == WMI_RX_WORK_CTX) { 2260 wmi_process_fw_event_worker_thread_ctx 2261 (wmi_handle, evt_buf); 2262 } else if (exec_ctx == WMI_RX_TASKLET_CTX) { 2263 wmi_process_fw_event(wmi_handle, evt_buf); 2264 } else if (exec_ctx == WMI_RX_SERIALIZER_CTX) { 2265 wmi_process_fw_event_sched_thread_ctx(wmi_handle, evt_buf); 2266 } else { 2267 WMI_LOGE("Invalid event context %d", exec_ctx); 2268 qdf_nbuf_free(evt_buf); 2269 } 2270 2271 } 2272 2273 /** 2274 * wmi_control_rx() - process fw events callbacks 2275 * @ctx: handle to wmi 2276 * @htc_packet: pointer to htc packet 2277 * 2278 * Return: none 2279 */ 2280 static void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet) 2281 { 2282 struct wmi_soc *soc = (struct wmi_soc *)ctx; 2283 struct wmi_unified *wmi_handle; 2284 wmi_buf_t evt_buf; 2285 2286 evt_buf = (wmi_buf_t)htc_packet->pPktContext; 2287 2288 wmi_handle = wmi_get_pdev_ep(soc, htc_packet->Endpoint); 2289 if (!wmi_handle) { 2290 WMI_LOGE 2291 ("unable to get wmi_handle to Endpoint %d\n", 2292 htc_packet->Endpoint); 2293 qdf_nbuf_free(evt_buf); 2294 return; 2295 } 2296 2297 wmi_process_control_rx(wmi_handle, evt_buf); 2298 } 2299 2300 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI 2301 QDF_STATUS wmi_unified_cmd_send_over_qmi(struct wmi_unified *wmi_handle, 2302 wmi_buf_t buf, uint32_t buflen, 2303 uint32_t cmd_id) 2304 { 2305 QDF_STATUS status; 2306 int32_t ret; 2307 2308 if (!qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR))) { 2309 wmi_err("Failed to send cmd %x, no memory", cmd_id); 2310 return QDF_STATUS_E_NOMEM; 2311 } 2312 2313 qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR)); 2314 WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id); 2315 wmi_debug("Sending WMI_CMD_ID: 0x%x over qmi", cmd_id); 2316 status = qdf_wmi_send_recv_qmi(qdf_nbuf_data(buf), 2317 buflen + sizeof(WMI_CMD_HDR), 2318 wmi_handle, 2319 wmi_process_qmi_fw_event); 2320 if (QDF_IS_STATUS_ERROR(status)) { 2321 qdf_nbuf_pull_head(buf, sizeof(WMI_CMD_HDR)); 2322 wmi_warn("WMI send on QMI failed. Retrying WMI on HTC"); 2323 } else { 2324 ret = qdf_atomic_inc_return(&wmi_handle->num_stats_over_qmi); 2325 wmi_debug("num stats over qmi: %d", ret); 2326 wmi_buf_free(buf); 2327 } 2328 2329 return status; 2330 } 2331 2332 static int __wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len) 2333 { 2334 struct wmi_unified *wmi_handle = wmi_cb_ctx; 2335 wmi_buf_t evt_buf; 2336 uint32_t evt_id; 2337 2338 if (!wmi_handle || !buf) 2339 return -EINVAL; 2340 2341 evt_buf = wmi_buf_alloc(wmi_handle, len); 2342 if (!evt_buf) 2343 return -ENOMEM; 2344 2345 qdf_mem_copy(qdf_nbuf_data(evt_buf), buf, len); 2346 evt_id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); 2347 wmi_debug("Received WMI_EVT_ID: %d over qmi", evt_id); 2348 wmi_process_control_rx(wmi_handle, evt_buf); 2349 2350 return 0; 2351 } 2352 2353 int wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len) 2354 { 2355 struct qdf_op_sync *op_sync; 2356 int ret; 2357 2358 if (qdf_op_protect(&op_sync)) 2359 return -EINVAL; 2360 ret = __wmi_process_qmi_fw_event(wmi_cb_ctx, buf, len); 2361 qdf_op_unprotect(op_sync); 2362 2363 return ret; 2364 } 2365 #endif 2366 2367 /** 2368 * wmi_process_fw_event() - process any fw event 2369 * @wmi_handle: wmi handle 2370 * @evt_buf: fw event buffer 2371 * 2372 * This function process fw event in caller context 2373 * 2374 * Return: none 2375 */ 2376 void wmi_process_fw_event(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf) 2377 { 2378 __wmi_control_rx(wmi_handle, evt_buf); 2379 } 2380 2381 /** 2382 * __wmi_control_rx() - process serialize wmi event callback 2383 * @wmi_handle: wmi handle 2384 * @evt_buf: fw event buffer 2385 * 2386 * Return: none 2387 */ 2388 void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf) 2389 { 2390 uint32_t id; 2391 uint8_t *data; 2392 uint32_t len; 2393 void *wmi_cmd_struct_ptr = NULL; 2394 #ifndef WMI_NON_TLV_SUPPORT 2395 int tlv_ok_status = 0; 2396 #endif 2397 uint32_t idx = 0; 2398 2399 id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); 2400 2401 wmi_ext_dbg_msg_event_record(wmi_handle, qdf_nbuf_data(evt_buf), 2402 qdf_nbuf_len(evt_buf)); 2403 2404 if (qdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL) 2405 goto end; 2406 2407 data = qdf_nbuf_data(evt_buf); 2408 len = qdf_nbuf_len(evt_buf); 2409 2410 #ifndef WMI_NON_TLV_SUPPORT 2411 if (wmi_handle->target_type == WMI_TLV_TARGET) { 2412 /* Validate and pad(if necessary) the TLVs */ 2413 tlv_ok_status = 2414 wmi_handle->ops->wmi_check_and_pad_event(wmi_handle->scn_handle, 2415 data, len, id, 2416 &wmi_cmd_struct_ptr); 2417 if (tlv_ok_status != 0) { 2418 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 2419 "%s: Error: id=0x%x, wmitlv check status=%d", 2420 __func__, id, tlv_ok_status); 2421 goto end; 2422 } 2423 } 2424 #endif 2425 2426 idx = wmi_unified_get_event_handler_ix(wmi_handle, id); 2427 if (idx == A_ERROR) { 2428 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 2429 "%s : event handler is not registered: event id 0x%x", 2430 __func__, id); 2431 goto end; 2432 } 2433 #ifdef WMI_INTERFACE_EVENT_LOGGING 2434 if (wmi_handle->log_info.wmi_logging_enable) { 2435 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 2436 /* Exclude 4 bytes of TLV header */ 2437 if (wmi_handle->ops->is_diag_event(id)) { 2438 /* 2439 * skip diag event logging in WMI event buffer 2440 * as its already logged in WMI RX event buffer 2441 */ 2442 } else if (wmi_handle->ops->is_management_record(id)) { 2443 /* 2444 * skip wmi mgmt event logging in WMI event buffer 2445 * as its already logged in WMI RX event buffer 2446 */ 2447 } else { 2448 uint8_t *tmpbuf = (uint8_t *)data + 2449 wmi_handle->soc->buf_offset_event; 2450 2451 WMI_EVENT_RECORD(wmi_handle, id, tmpbuf); 2452 wmi_specific_evt_record(wmi_handle, id, tmpbuf); 2453 } 2454 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 2455 } 2456 #endif 2457 /* Call the WMI registered event handler */ 2458 if (wmi_handle->target_type == WMI_TLV_TARGET) 2459 wmi_handle->event_handler[idx] (wmi_handle->scn_handle, 2460 wmi_cmd_struct_ptr, len); 2461 else 2462 wmi_handle->event_handler[idx] (wmi_handle->scn_handle, 2463 data, len); 2464 2465 end: 2466 /* Free event buffer and allocated event tlv */ 2467 #ifndef WMI_NON_TLV_SUPPORT 2468 if (wmi_handle->target_type == WMI_TLV_TARGET) 2469 wmi_handle->ops->wmi_free_allocated_event(id, &wmi_cmd_struct_ptr); 2470 #endif 2471 2472 qdf_nbuf_free(evt_buf); 2473 2474 } 2475 2476 #define WMI_WQ_WD_TIMEOUT (30 * 1000) /* 30s */ 2477 2478 static inline void wmi_workqueue_watchdog_warn(uint32_t msg_type_id) 2479 { 2480 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 2481 "%s: WLAN_BUG_RCA: Message type %x has exceeded its alloted time of %ds", 2482 __func__, msg_type_id, WMI_WQ_WD_TIMEOUT / 1000); 2483 } 2484 2485 #ifdef CONFIG_SLUB_DEBUG_ON 2486 static void wmi_workqueue_watchdog_bite(void *arg) 2487 { 2488 struct wmi_wq_dbg_info *info = arg; 2489 2490 wmi_workqueue_watchdog_warn(info->wd_msg_type_id); 2491 qdf_print_thread_trace(info->task); 2492 2493 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 2494 "%s: Going down for WMI WQ Watchdog Bite!", __func__); 2495 QDF_BUG(0); 2496 } 2497 #else 2498 static inline void wmi_workqueue_watchdog_bite(void *arg) 2499 { 2500 struct wmi_wq_dbg_info *info = arg; 2501 2502 wmi_workqueue_watchdog_warn(info->wd_msg_type_id); 2503 } 2504 #endif 2505 2506 /** 2507 * wmi_rx_event_work() - process rx event in rx work queue context 2508 * @arg: opaque pointer to wmi handle 2509 * 2510 * This function process any fw event to serialize it through rx worker thread. 2511 * 2512 * Return: none 2513 */ 2514 static void wmi_rx_event_work(void *arg) 2515 { 2516 wmi_buf_t buf; 2517 struct wmi_unified *wmi = arg; 2518 qdf_timer_t wd_timer; 2519 struct wmi_wq_dbg_info info; 2520 2521 /* initialize WMI workqueue watchdog timer */ 2522 qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite, 2523 &info, QDF_TIMER_TYPE_SW); 2524 qdf_spin_lock_bh(&wmi->eventq_lock); 2525 buf = qdf_nbuf_queue_remove(&wmi->event_queue); 2526 qdf_spin_unlock_bh(&wmi->eventq_lock); 2527 while (buf) { 2528 qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT); 2529 info.wd_msg_type_id = 2530 WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID); 2531 info.wmi_wq = wmi->wmi_rx_work_queue; 2532 info.task = qdf_get_current_task(); 2533 __wmi_control_rx(wmi, buf); 2534 qdf_timer_stop(&wd_timer); 2535 qdf_spin_lock_bh(&wmi->eventq_lock); 2536 buf = qdf_nbuf_queue_remove(&wmi->event_queue); 2537 qdf_spin_unlock_bh(&wmi->eventq_lock); 2538 } 2539 qdf_timer_free(&wd_timer); 2540 } 2541 2542 #ifdef FEATURE_RUNTIME_PM 2543 /** 2544 * wmi_runtime_pm_init() - initialize runtime pm wmi variables 2545 * @wmi_handle: wmi context 2546 */ 2547 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle) 2548 { 2549 qdf_atomic_init(&wmi_handle->runtime_pm_inprogress); 2550 } 2551 2552 /** 2553 * wmi_set_runtime_pm_inprogress() - set runtime pm progress flag 2554 * @wmi_handle: wmi context 2555 * @val: runtime pm progress flag 2556 */ 2557 void wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, A_BOOL val) 2558 { 2559 qdf_atomic_set(&wmi_handle->runtime_pm_inprogress, val); 2560 } 2561 2562 /** 2563 * wmi_get_runtime_pm_inprogress() - get runtime pm progress flag 2564 * @wmi_handle: wmi context 2565 */ 2566 inline bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle) 2567 { 2568 return qdf_atomic_read(&wmi_handle->runtime_pm_inprogress); 2569 } 2570 #else 2571 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle) 2572 { 2573 } 2574 #endif 2575 2576 /** 2577 * wmi_unified_get_soc_handle: Get WMI SoC handle 2578 * @param wmi_handle: WMI context got from wmi_attach 2579 * 2580 * return: Pointer to Soc handle 2581 */ 2582 void *wmi_unified_get_soc_handle(struct wmi_unified *wmi_handle) 2583 { 2584 return wmi_handle->soc; 2585 } 2586 2587 /** 2588 * wmi_interface_logging_init: Interface looging init 2589 * @param wmi_handle: Pointer to wmi handle object 2590 * 2591 * return: None 2592 */ 2593 #ifdef WMI_INTERFACE_EVENT_LOGGING 2594 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle, 2595 uint32_t pdev_idx) 2596 { 2597 if (QDF_STATUS_SUCCESS == wmi_log_init(wmi_handle)) { 2598 qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); 2599 wmi_debugfs_init(wmi_handle, pdev_idx); 2600 } 2601 } 2602 #else 2603 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle, 2604 uint32_t pdev_idx) 2605 { 2606 } 2607 #endif 2608 2609 /** 2610 * wmi_unified_get_pdev_handle: Get WMI SoC handle 2611 * @param wmi_soc: Pointer to wmi soc object 2612 * @param pdev_idx: pdev index 2613 * 2614 * return: Pointer to wmi handle or NULL on failure 2615 */ 2616 void *wmi_unified_get_pdev_handle(struct wmi_soc *soc, uint32_t pdev_idx) 2617 { 2618 struct wmi_unified *wmi_handle; 2619 2620 if (pdev_idx >= WMI_MAX_RADIOS) 2621 return NULL; 2622 2623 if (!soc->wmi_pdev[pdev_idx]) { 2624 wmi_handle = 2625 (struct wmi_unified *) qdf_mem_malloc( 2626 sizeof(struct wmi_unified)); 2627 if (!wmi_handle) 2628 return NULL; 2629 2630 wmi_handle->scn_handle = soc->scn_handle; 2631 wmi_handle->event_id = soc->event_id; 2632 wmi_handle->event_handler = soc->event_handler; 2633 wmi_handle->ctx = soc->ctx; 2634 wmi_handle->ops = soc->ops; 2635 qdf_spinlock_create(&wmi_handle->eventq_lock); 2636 qdf_nbuf_queue_init(&wmi_handle->event_queue); 2637 2638 qdf_create_work(0, &wmi_handle->rx_event_work, 2639 wmi_rx_event_work, wmi_handle); 2640 wmi_handle->wmi_rx_work_queue = 2641 qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue"); 2642 if (!wmi_handle->wmi_rx_work_queue) { 2643 WMI_LOGE("failed to create wmi_rx_event_work_queue"); 2644 goto error; 2645 } 2646 wmi_handle->wmi_events = soc->wmi_events; 2647 wmi_handle->services = soc->services; 2648 wmi_handle->soc = soc; 2649 wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map; 2650 wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map; 2651 wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map; 2652 wmi_handle->evt_phy_id_map = soc->evt_phy_id_map; 2653 wmi_interface_logging_init(wmi_handle, pdev_idx); 2654 qdf_atomic_init(&wmi_handle->pending_cmds); 2655 qdf_atomic_init(&wmi_handle->is_target_suspended); 2656 wmi_handle->target_type = soc->target_type; 2657 wmi_handle->wmi_max_cmds = soc->wmi_max_cmds; 2658 2659 soc->wmi_pdev[pdev_idx] = wmi_handle; 2660 } else 2661 wmi_handle = soc->wmi_pdev[pdev_idx]; 2662 2663 wmi_handle->wmi_stopinprogress = 0; 2664 wmi_handle->wmi_endpoint_id = soc->wmi_endpoint_id[pdev_idx]; 2665 wmi_handle->htc_handle = soc->htc_handle; 2666 wmi_handle->max_msg_len = soc->max_msg_len[pdev_idx]; 2667 wmi_handle->tag_crash_inject = false; 2668 2669 return wmi_handle; 2670 2671 error: 2672 qdf_mem_free(wmi_handle); 2673 2674 return NULL; 2675 } 2676 qdf_export_symbol(wmi_unified_get_pdev_handle); 2677 2678 static void (*wmi_attach_register[WMI_MAX_TARGET_TYPE])(wmi_unified_t); 2679 2680 void wmi_unified_register_module(enum wmi_target_type target_type, 2681 void (*wmi_attach)(wmi_unified_t wmi_handle)) 2682 { 2683 if (target_type < WMI_MAX_TARGET_TYPE) 2684 wmi_attach_register[target_type] = wmi_attach; 2685 2686 return; 2687 } 2688 qdf_export_symbol(wmi_unified_register_module); 2689 2690 /** 2691 * wmi_wbuff_register() - register wmi with wbuff 2692 * @wmi_handle: handle to wmi 2693 * 2694 * @Return: void 2695 */ 2696 static void wmi_wbuff_register(struct wmi_unified *wmi_handle) 2697 { 2698 struct wbuff_alloc_request wbuff_alloc[4]; 2699 2700 wbuff_alloc[0].slot = WBUFF_POOL_0; 2701 wbuff_alloc[0].size = WMI_WBUFF_POOL_0_SIZE; 2702 wbuff_alloc[1].slot = WBUFF_POOL_1; 2703 wbuff_alloc[1].size = WMI_WBUFF_POOL_1_SIZE; 2704 wbuff_alloc[2].slot = WBUFF_POOL_2; 2705 wbuff_alloc[2].size = WMI_WBUFF_POOL_2_SIZE; 2706 wbuff_alloc[3].slot = WBUFF_POOL_3; 2707 wbuff_alloc[3].size = WMI_WBUFF_POOL_3_SIZE; 2708 2709 wmi_handle->wbuff_handle = wbuff_module_register(wbuff_alloc, 4, 2710 WMI_MIN_HEAD_ROOM, 4); 2711 } 2712 2713 /** 2714 * wmi_wbuff_deregister() - deregister wmi with wbuff 2715 * @wmi_handle: handle to wmi 2716 * 2717 * @Return: void 2718 */ 2719 static inline void wmi_wbuff_deregister(struct wmi_unified *wmi_handle) 2720 { 2721 wbuff_module_deregister(wmi_handle->wbuff_handle); 2722 wmi_handle->wbuff_handle = NULL; 2723 } 2724 2725 /** 2726 * wmi_unified_attach() - attach for unified WMI 2727 * @scn_handle: handle to SCN 2728 * @osdev: OS device context 2729 * @target_type: TLV or not-TLV based target 2730 * @use_cookie: cookie based allocation enabled/disabled 2731 * @ops: umac rx callbacks 2732 * @psoc: objmgr psoc 2733 * 2734 * @Return: wmi handle. 2735 */ 2736 void *wmi_unified_attach(void *scn_handle, 2737 struct wmi_unified_attach_params *param) 2738 { 2739 struct wmi_unified *wmi_handle; 2740 struct wmi_soc *soc; 2741 2742 soc = (struct wmi_soc *) qdf_mem_malloc(sizeof(struct wmi_soc)); 2743 if (!soc) 2744 return NULL; 2745 2746 wmi_handle = 2747 (struct wmi_unified *) qdf_mem_malloc( 2748 sizeof(struct wmi_unified)); 2749 if (!wmi_handle) { 2750 qdf_mem_free(soc); 2751 return NULL; 2752 } 2753 wmi_handle->soc = soc; 2754 wmi_handle->soc->soc_idx = param->soc_id; 2755 wmi_handle->soc->is_async_ep = param->is_async_ep; 2756 wmi_handle->event_id = soc->event_id; 2757 wmi_handle->event_handler = soc->event_handler; 2758 wmi_handle->ctx = soc->ctx; 2759 wmi_handle->wmi_events = soc->wmi_events; 2760 wmi_handle->services = soc->services; 2761 wmi_handle->scn_handle = scn_handle; 2762 wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map; 2763 wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map; 2764 wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map; 2765 wmi_handle->evt_phy_id_map = soc->evt_phy_id_map; 2766 soc->scn_handle = scn_handle; 2767 qdf_atomic_init(&wmi_handle->pending_cmds); 2768 qdf_atomic_init(&wmi_handle->is_target_suspended); 2769 qdf_atomic_init(&wmi_handle->num_stats_over_qmi); 2770 wmi_runtime_pm_init(wmi_handle); 2771 qdf_spinlock_create(&wmi_handle->eventq_lock); 2772 qdf_nbuf_queue_init(&wmi_handle->event_queue); 2773 qdf_create_work(0, &wmi_handle->rx_event_work, 2774 wmi_rx_event_work, wmi_handle); 2775 wmi_handle->wmi_rx_work_queue = 2776 qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue"); 2777 if (!wmi_handle->wmi_rx_work_queue) { 2778 WMI_LOGE("failed to create wmi_rx_event_work_queue"); 2779 goto error; 2780 } 2781 wmi_interface_logging_init(wmi_handle, WMI_HOST_PDEV_ID_0); 2782 wmi_handle->target_type = param->target_type; 2783 soc->target_type = param->target_type; 2784 2785 if (param->target_type >= WMI_MAX_TARGET_TYPE) 2786 goto error; 2787 2788 if (wmi_attach_register[param->target_type]) { 2789 wmi_attach_register[param->target_type](wmi_handle); 2790 } else { 2791 WMI_LOGE("wmi attach is not registered"); 2792 goto error; 2793 } 2794 /* Assign target cookie capablity */ 2795 wmi_handle->use_cookie = param->use_cookie; 2796 wmi_handle->osdev = param->osdev; 2797 wmi_handle->wmi_stopinprogress = 0; 2798 wmi_handle->wmi_max_cmds = param->max_commands; 2799 soc->wmi_max_cmds = param->max_commands; 2800 /* Increase the ref count once refcount infra is present */ 2801 soc->wmi_psoc = param->psoc; 2802 qdf_spinlock_create(&soc->ctx_lock); 2803 2804 soc->ops = wmi_handle->ops; 2805 soc->wmi_pdev[0] = wmi_handle; 2806 if (wmi_ext_dbgfs_init(wmi_handle) != QDF_STATUS_SUCCESS) 2807 WMI_LOGE("failed to initialize wmi extended debugfs"); 2808 2809 wmi_wbuff_register(wmi_handle); 2810 2811 wmi_hang_event_notifier_register(wmi_handle); 2812 2813 return wmi_handle; 2814 2815 error: 2816 qdf_mem_free(soc); 2817 qdf_mem_free(wmi_handle); 2818 2819 return NULL; 2820 } 2821 2822 /** 2823 * wmi_unified_detach() - detach for unified WMI 2824 * 2825 * @wmi_handle : handle to wmi. 2826 * 2827 * @Return: none. 2828 */ 2829 void wmi_unified_detach(struct wmi_unified *wmi_handle) 2830 { 2831 wmi_buf_t buf; 2832 struct wmi_soc *soc; 2833 uint8_t i; 2834 2835 wmi_hang_event_notifier_unregister(); 2836 2837 wmi_wbuff_deregister(wmi_handle); 2838 2839 wmi_ext_dbgfs_deinit(wmi_handle); 2840 2841 soc = wmi_handle->soc; 2842 for (i = 0; i < WMI_MAX_RADIOS; i++) { 2843 if (soc->wmi_pdev[i]) { 2844 qdf_flush_workqueue(0, 2845 soc->wmi_pdev[i]->wmi_rx_work_queue); 2846 qdf_destroy_workqueue(0, 2847 soc->wmi_pdev[i]->wmi_rx_work_queue); 2848 wmi_debugfs_remove(soc->wmi_pdev[i]); 2849 buf = qdf_nbuf_queue_remove( 2850 &soc->wmi_pdev[i]->event_queue); 2851 while (buf) { 2852 qdf_nbuf_free(buf); 2853 buf = qdf_nbuf_queue_remove( 2854 &soc->wmi_pdev[i]->event_queue); 2855 } 2856 2857 wmi_log_buffer_free(soc->wmi_pdev[i]); 2858 2859 /* Free events logs list */ 2860 if (soc->wmi_pdev[i]->events_logs_list) 2861 qdf_mem_free( 2862 soc->wmi_pdev[i]->events_logs_list); 2863 2864 qdf_spinlock_destroy(&soc->wmi_pdev[i]->eventq_lock); 2865 qdf_mem_free(soc->wmi_pdev[i]); 2866 } 2867 } 2868 qdf_spinlock_destroy(&soc->ctx_lock); 2869 2870 if (soc->wmi_service_bitmap) { 2871 qdf_mem_free(soc->wmi_service_bitmap); 2872 soc->wmi_service_bitmap = NULL; 2873 } 2874 2875 if (soc->wmi_ext_service_bitmap) { 2876 qdf_mem_free(soc->wmi_ext_service_bitmap); 2877 soc->wmi_ext_service_bitmap = NULL; 2878 } 2879 2880 if (soc->wmi_ext2_service_bitmap) { 2881 qdf_mem_free(soc->wmi_ext2_service_bitmap); 2882 soc->wmi_ext2_service_bitmap = NULL; 2883 } 2884 2885 /* Decrease the ref count once refcount infra is present */ 2886 soc->wmi_psoc = NULL; 2887 qdf_mem_free(soc); 2888 } 2889 2890 /** 2891 * wmi_unified_remove_work() - detach for WMI work 2892 * @wmi_handle: handle to WMI 2893 * 2894 * A function that does not fully detach WMI, but just remove work 2895 * queue items associated with it. This is used to make sure that 2896 * before any other processing code that may destroy related contexts 2897 * (HTC, etc), work queue processing on WMI has already been stopped. 2898 * 2899 * Return: None 2900 */ 2901 void 2902 wmi_unified_remove_work(struct wmi_unified *wmi_handle) 2903 { 2904 wmi_buf_t buf; 2905 2906 qdf_flush_workqueue(0, wmi_handle->wmi_rx_work_queue); 2907 qdf_spin_lock_bh(&wmi_handle->eventq_lock); 2908 buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue); 2909 while (buf) { 2910 qdf_nbuf_free(buf); 2911 buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue); 2912 } 2913 qdf_spin_unlock_bh(&wmi_handle->eventq_lock); 2914 } 2915 2916 /** 2917 * wmi_htc_tx_complete() - Process htc tx completion 2918 * 2919 * @ctx: handle to wmi 2920 * @htc_packet: pointer to htc packet 2921 * 2922 * @Return: none. 2923 */ 2924 static void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt) 2925 { 2926 struct wmi_soc *soc = (struct wmi_soc *) ctx; 2927 wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt); 2928 u_int8_t *buf_ptr; 2929 u_int32_t len; 2930 struct wmi_unified *wmi_handle; 2931 #ifdef WMI_INTERFACE_EVENT_LOGGING 2932 uint32_t cmd_id; 2933 #endif 2934 2935 ASSERT(wmi_cmd_buf); 2936 wmi_handle = wmi_get_pdev_ep(soc, htc_pkt->Endpoint); 2937 if (!wmi_handle) { 2938 WMI_LOGE("%s: Unable to get wmi handle\n", __func__); 2939 QDF_ASSERT(0); 2940 return; 2941 } 2942 #ifdef WMI_INTERFACE_EVENT_LOGGING 2943 if (wmi_handle && wmi_handle->log_info.wmi_logging_enable) { 2944 cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), 2945 WMI_CMD_HDR, COMMANDID); 2946 2947 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 2948 /* Record 16 bytes of WMI cmd tx complete data 2949 - exclude TLV and WMI headers */ 2950 if (wmi_handle->ops->is_management_record(cmd_id)) { 2951 WMI_MGMT_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id, 2952 qdf_nbuf_data(wmi_cmd_buf) + 2953 wmi_handle->soc->buf_offset_command); 2954 } else { 2955 WMI_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id, 2956 qdf_nbuf_data(wmi_cmd_buf) + 2957 wmi_handle->soc->buf_offset_command); 2958 } 2959 2960 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 2961 } 2962 #endif 2963 buf_ptr = (u_int8_t *) wmi_buf_data(wmi_cmd_buf); 2964 len = qdf_nbuf_len(wmi_cmd_buf); 2965 qdf_mem_zero(buf_ptr, len); 2966 wmi_buf_free(wmi_cmd_buf); 2967 qdf_mem_free(htc_pkt); 2968 qdf_atomic_dec(&wmi_handle->pending_cmds); 2969 } 2970 2971 #ifdef FEATURE_RUNTIME_PM 2972 /** 2973 * wmi_htc_log_pkt() - Print information of WMI command from HTC packet 2974 * 2975 * @ctx: handle of WMI context 2976 * @htc_pkt: handle of HTC packet 2977 * 2978 * @Return: none 2979 */ 2980 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt) 2981 { 2982 wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt); 2983 uint32_t cmd_id; 2984 2985 ASSERT(wmi_cmd_buf); 2986 cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), WMI_CMD_HDR, 2987 COMMANDID); 2988 2989 WMI_LOGD("WMI command from HTC packet: %s, ID: %d\n", 2990 wmi_id_to_name(cmd_id), cmd_id); 2991 } 2992 #else 2993 static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt) 2994 { 2995 } 2996 #endif 2997 2998 /** 2999 * wmi_connect_pdev_htc_service() - WMI API to get connect to HTC service 3000 * 3001 * @wmi_handle: handle to WMI. 3002 * @pdev_idx: Pdev index 3003 * 3004 * @Return: QDF_STATUS 3005 */ 3006 static QDF_STATUS wmi_connect_pdev_htc_service(struct wmi_soc *soc, 3007 uint32_t pdev_idx) 3008 { 3009 QDF_STATUS status; 3010 struct htc_service_connect_resp response; 3011 struct htc_service_connect_req connect; 3012 3013 OS_MEMZERO(&connect, sizeof(connect)); 3014 OS_MEMZERO(&response, sizeof(response)); 3015 3016 /* meta data is unused for now */ 3017 connect.pMetaData = NULL; 3018 connect.MetaDataLength = 0; 3019 /* these fields are the same for all service endpoints */ 3020 connect.EpCallbacks.pContext = soc; 3021 connect.EpCallbacks.EpTxCompleteMultiple = 3022 NULL /* Control path completion ar6000_tx_complete */; 3023 connect.EpCallbacks.EpRecv = wmi_control_rx /* Control path rx */; 3024 connect.EpCallbacks.EpRecvRefill = NULL /* ar6000_rx_refill */; 3025 connect.EpCallbacks.EpSendFull = NULL /* ar6000_tx_queue_full */; 3026 connect.EpCallbacks.EpTxComplete = 3027 wmi_htc_tx_complete /* ar6000_tx_queue_full */; 3028 connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt; 3029 3030 /* connect to control service */ 3031 connect.service_id = soc->svc_ids[pdev_idx]; 3032 status = htc_connect_service(soc->htc_handle, &connect, &response); 3033 3034 if (QDF_IS_STATUS_ERROR(status)) { 3035 WMI_LOGE("Failed to connect to WMI CONTROL service status:%d\n", 3036 status); 3037 return status; 3038 } 3039 3040 if (soc->is_async_ep) 3041 htc_set_async_ep(soc->htc_handle, response.Endpoint, true); 3042 3043 soc->wmi_endpoint_id[pdev_idx] = response.Endpoint; 3044 soc->max_msg_len[pdev_idx] = response.MaxMsgLength; 3045 3046 return QDF_STATUS_SUCCESS; 3047 } 3048 3049 QDF_STATUS 3050 wmi_unified_connect_htc_service(struct wmi_unified *wmi_handle, 3051 HTC_HANDLE htc_handle) 3052 { 3053 uint32_t i; 3054 uint8_t wmi_ep_count; 3055 3056 wmi_handle->soc->htc_handle = htc_handle; 3057 3058 wmi_ep_count = htc_get_wmi_endpoint_count(htc_handle); 3059 if (wmi_ep_count > WMI_MAX_RADIOS) 3060 return QDF_STATUS_E_FAULT; 3061 3062 for (i = 0; i < wmi_ep_count; i++) 3063 wmi_connect_pdev_htc_service(wmi_handle->soc, i); 3064 3065 wmi_handle->htc_handle = htc_handle; 3066 wmi_handle->wmi_endpoint_id = wmi_handle->soc->wmi_endpoint_id[0]; 3067 wmi_handle->max_msg_len = wmi_handle->soc->max_msg_len[0]; 3068 3069 return QDF_STATUS_SUCCESS; 3070 } 3071 3072 /** 3073 * wmi_get_host_credits() - WMI API to get updated host_credits 3074 * 3075 * @wmi_handle: handle to WMI. 3076 * 3077 * @Return: updated host_credits. 3078 */ 3079 int wmi_get_host_credits(wmi_unified_t wmi_handle) 3080 { 3081 int host_credits = 0; 3082 3083 htc_get_control_endpoint_tx_host_credits(wmi_handle->htc_handle, 3084 &host_credits); 3085 return host_credits; 3086 } 3087 3088 /** 3089 * wmi_get_pending_cmds() - WMI API to get WMI Pending Commands in the HTC 3090 * queue 3091 * 3092 * @wmi_handle: handle to WMI. 3093 * 3094 * @Return: Pending Commands in the HTC queue. 3095 */ 3096 int wmi_get_pending_cmds(wmi_unified_t wmi_handle) 3097 { 3098 return qdf_atomic_read(&wmi_handle->pending_cmds); 3099 } 3100 3101 /** 3102 * wmi_set_target_suspend() - WMI API to set target suspend state 3103 * 3104 * @wmi_handle: handle to WMI. 3105 * @val: suspend state boolean. 3106 * 3107 * @Return: none. 3108 */ 3109 void wmi_set_target_suspend(wmi_unified_t wmi_handle, A_BOOL val) 3110 { 3111 qdf_atomic_set(&wmi_handle->is_target_suspended, val); 3112 } 3113 3114 /** 3115 * wmi_is_target_suspended() - WMI API to check target suspend state 3116 * @wmi_handle: handle to WMI. 3117 * 3118 * WMI API to check target suspend state 3119 * 3120 * Return: true if target is suspended, else false. 3121 */ 3122 bool wmi_is_target_suspended(struct wmi_unified *wmi_handle) 3123 { 3124 return qdf_atomic_read(&wmi_handle->is_target_suspended); 3125 } 3126 3127 #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI 3128 void wmi_set_qmi_stats(wmi_unified_t wmi_handle, bool val) 3129 { 3130 wmi_handle->is_qmi_stats_enabled = val; 3131 } 3132 3133 bool wmi_is_qmi_stats_enabled(struct wmi_unified *wmi_handle) 3134 { 3135 return wmi_handle->is_qmi_stats_enabled; 3136 } 3137 #endif 3138 3139 /** 3140 * WMI API to set crash injection state 3141 * @param wmi_handle: handle to WMI. 3142 * @param val: crash injection state boolean. 3143 */ 3144 void wmi_tag_crash_inject(wmi_unified_t wmi_handle, A_BOOL flag) 3145 { 3146 wmi_handle->tag_crash_inject = flag; 3147 } 3148 3149 /** 3150 * WMI API to set bus suspend state 3151 * @param wmi_handle: handle to WMI. 3152 * @param val: suspend state boolean. 3153 */ 3154 void wmi_set_is_wow_bus_suspended(wmi_unified_t wmi_handle, A_BOOL val) 3155 { 3156 qdf_atomic_set(&wmi_handle->is_wow_bus_suspended, val); 3157 } 3158 3159 void wmi_set_tgt_assert(wmi_unified_t wmi_handle, bool val) 3160 { 3161 wmi_handle->tgt_force_assert_enable = val; 3162 } 3163 3164 /** 3165 * wmi_stop() - generic function to block unified WMI command 3166 * @wmi_handle: handle to WMI. 3167 * 3168 * @Return: success always. 3169 */ 3170 int 3171 wmi_stop(wmi_unified_t wmi_handle) 3172 { 3173 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, 3174 "WMI Stop"); 3175 wmi_handle->wmi_stopinprogress = 1; 3176 return 0; 3177 } 3178 3179 /** 3180 * wmi_start() - generic function to allow unified WMI command 3181 * @wmi_handle: handle to WMI. 3182 * 3183 * @Return: success always. 3184 */ 3185 int 3186 wmi_start(wmi_unified_t wmi_handle) 3187 { 3188 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, 3189 "WMI Start"); 3190 wmi_handle->wmi_stopinprogress = 0; 3191 return 0; 3192 } 3193 3194 /** 3195 * wmi_is_blocked() - generic function to check if WMI is blocked 3196 * @wmi_handle: handle to WMI. 3197 * 3198 * @Return: true, if blocked, false if not blocked 3199 */ 3200 bool 3201 wmi_is_blocked(wmi_unified_t wmi_handle) 3202 { 3203 return (!(!wmi_handle->wmi_stopinprogress)); 3204 } 3205 3206 /** 3207 * API to flush all the previous packets associated with the wmi endpoint 3208 * 3209 * @param wmi_handle : handle to WMI. 3210 */ 3211 void 3212 wmi_flush_endpoint(wmi_unified_t wmi_handle) 3213 { 3214 htc_flush_endpoint(wmi_handle->htc_handle, 3215 wmi_handle->wmi_endpoint_id, 0); 3216 } 3217 qdf_export_symbol(wmi_flush_endpoint); 3218 3219 /** 3220 * wmi_pdev_id_conversion_enable() - API to enable pdev_id/phy_id conversion 3221 * in WMI. By default pdev_id conversion is not done in WMI. 3222 * This API can be used enable conversion in WMI. 3223 * @param wmi_handle : handle to WMI 3224 * @param pdev_map : pointer to pdev_map 3225 * @size : size of pdev_id_map 3226 * Return none 3227 */ 3228 void wmi_pdev_id_conversion_enable(wmi_unified_t wmi_handle, 3229 uint32_t *pdev_id_map, 3230 uint8_t size) 3231 { 3232 if (wmi_handle->target_type == WMI_TLV_TARGET) 3233 wmi_handle->ops->wmi_pdev_id_conversion_enable(wmi_handle, 3234 pdev_id_map, 3235 size); 3236 } 3237