1 /* 2 * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * Host WMI unified implementation 21 */ 22 #include "htc_api.h" 23 #include "htc_api.h" 24 #include "wmi_unified_priv.h" 25 #include "wmi_unified_api.h" 26 #include "qdf_module.h" 27 28 #ifndef WMI_NON_TLV_SUPPORT 29 #include "wmi_tlv_helper.h" 30 #endif 31 32 #include <linux/debugfs.h> 33 34 /* This check for CONFIG_WIN temporary added due to redeclaration compilation 35 error in MCL. Error is caused due to inclusion of wmi.h in wmi_unified_api.h 36 which gets included here through ol_if_athvar.h. Eventually it is expected that 37 wmi.h will be removed from wmi_unified_api.h after cleanup, which will need 38 WMI_CMD_HDR to be defined here. */ 39 #ifdef CONFIG_WIN 40 /* Copied from wmi.h */ 41 #undef MS 42 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB) 43 #undef SM 44 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) 45 #undef WO 46 #define WO(_f) ((_f##_OFFSET) >> 2) 47 48 #undef GET_FIELD 49 #define GET_FIELD(_addr, _f) MS(*((uint32_t *)(_addr) + WO(_f)), _f) 50 #undef SET_FIELD 51 #define SET_FIELD(_addr, _f, _val) \ 52 (*((uint32_t *)(_addr) + WO(_f)) = \ 53 (*((uint32_t *)(_addr) + WO(_f)) & ~_f##_MASK) | SM(_val, _f)) 54 55 #define WMI_GET_FIELD(_msg_buf, _msg_type, _f) \ 56 GET_FIELD(_msg_buf, _msg_type ## _ ## _f) 57 58 #define WMI_SET_FIELD(_msg_buf, _msg_type, _f, _val) \ 59 SET_FIELD(_msg_buf, _msg_type ## _ ## _f, _val) 60 61 #define WMI_EP_APASS 0x0 62 #define WMI_EP_LPASS 0x1 63 #define WMI_EP_SENSOR 0x2 64 65 /* 66 * * Control Path 67 * */ 68 typedef PREPACK struct { 69 uint32_t commandId:24, 70 reserved:2, /* used for WMI endpoint ID */ 71 plt_priv:6; /* platform private */ 72 } POSTPACK WMI_CMD_HDR; /* used for commands and events */ 73 74 #define WMI_CMD_HDR_COMMANDID_LSB 0 75 #define WMI_CMD_HDR_COMMANDID_MASK 0x00ffffff 76 #define WMI_CMD_HDR_COMMANDID_OFFSET 0x00000000 77 #define WMI_CMD_HDR_WMI_ENDPOINTID_MASK 0x03000000 78 #define WMI_CMD_HDR_WMI_ENDPOINTID_OFFSET 24 79 #define WMI_CMD_HDR_PLT_PRIV_LSB 24 80 #define WMI_CMD_HDR_PLT_PRIV_MASK 0xff000000 81 #define WMI_CMD_HDR_PLT_PRIV_OFFSET 0x00000000 82 /* end of copy wmi.h */ 83 #endif /* CONFIG_WIN */ 84 85 #define WMI_MIN_HEAD_ROOM 64 86 87 #ifdef WMI_INTERFACE_EVENT_LOGGING 88 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)) 89 /* TODO Cleanup this backported function */ 90 static int wmi_bp_seq_printf(struct seq_file *m, const char *f, ...) 91 { 92 va_list args; 93 94 va_start(args, f); 95 seq_vprintf(m, f, args); 96 va_end(args); 97 98 return 0; 99 } 100 #else 101 #define wmi_bp_seq_printf(m, fmt, ...) seq_printf((m), fmt, ##__VA_ARGS__) 102 #endif 103 104 #ifndef MAX_WMI_INSTANCES 105 #define CUSTOM_MGMT_CMD_DATA_SIZE 4 106 #endif 107 108 #ifdef CONFIG_MCL 109 /* WMI commands */ 110 uint32_t g_wmi_command_buf_idx = 0; 111 struct wmi_command_debug wmi_command_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; 112 113 /* WMI commands TX completed */ 114 uint32_t g_wmi_command_tx_cmp_buf_idx = 0; 115 struct wmi_command_debug 116 wmi_command_tx_cmp_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; 117 118 /* WMI events when processed */ 119 uint32_t g_wmi_event_buf_idx = 0; 120 struct wmi_event_debug wmi_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; 121 122 /* WMI events when queued */ 123 uint32_t g_wmi_rx_event_buf_idx = 0; 124 struct wmi_event_debug wmi_rx_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; 125 #endif 126 127 #define WMI_COMMAND_RECORD(h, a, b) { \ 128 if (wmi_log_max_entry <= \ 129 *(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)) \ 130 *(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx) = 0;\ 131 ((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\ 132 [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)]\ 133 .command = a; \ 134 qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ 135 wmi_command_log_buf_info.buf) \ 136 [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].data,\ 137 b, wmi_record_max_length); \ 138 ((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\ 139 [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].\ 140 time = qdf_get_log_timestamp(); \ 141 (*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))++; \ 142 h->log_info.wmi_command_log_buf_info.length++; \ 143 } 144 145 #define WMI_COMMAND_TX_CMP_RECORD(h, a, b) { \ 146 if (wmi_log_max_entry <= \ 147 *(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))\ 148 *(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 149 p_buf_tail_idx) = 0; \ 150 ((struct wmi_command_debug *)h->log_info. \ 151 wmi_command_tx_cmp_log_buf_info.buf) \ 152 [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 153 p_buf_tail_idx)]. \ 154 command = a; \ 155 qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ 156 wmi_command_tx_cmp_log_buf_info.buf) \ 157 [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 158 p_buf_tail_idx)]. \ 159 data, b, wmi_record_max_length); \ 160 ((struct wmi_command_debug *)h->log_info. \ 161 wmi_command_tx_cmp_log_buf_info.buf) \ 162 [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ 163 p_buf_tail_idx)]. \ 164 time = qdf_get_log_timestamp(); \ 165 (*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))++;\ 166 h->log_info.wmi_command_tx_cmp_log_buf_info.length++; \ 167 } 168 169 #define WMI_EVENT_RECORD(h, a, b) { \ 170 if (wmi_log_max_entry <= \ 171 *(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)) \ 172 *(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx) = 0;\ 173 ((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\ 174 [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)]. \ 175 event = a; \ 176 qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ 177 wmi_event_log_buf_info.buf) \ 178 [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].data, b,\ 179 wmi_record_max_length); \ 180 ((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\ 181 [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].time =\ 182 qdf_get_log_timestamp(); \ 183 (*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))++; \ 184 h->log_info.wmi_event_log_buf_info.length++; \ 185 } 186 187 #define WMI_RX_EVENT_RECORD(h, a, b) { \ 188 if (wmi_log_max_entry <= \ 189 *(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))\ 190 *(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx) = 0;\ 191 ((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\ 192 [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ 193 event = a; \ 194 qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ 195 wmi_rx_event_log_buf_info.buf) \ 196 [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ 197 data, b, wmi_record_max_length); \ 198 ((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\ 199 [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ 200 time = qdf_get_log_timestamp(); \ 201 (*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))++; \ 202 h->log_info.wmi_rx_event_log_buf_info.length++; \ 203 } 204 205 #ifdef CONFIG_MCL 206 uint32_t g_wmi_mgmt_command_buf_idx = 0; 207 struct 208 wmi_command_debug wmi_mgmt_command_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY]; 209 210 /* wmi_mgmt commands TX completed */ 211 uint32_t g_wmi_mgmt_command_tx_cmp_buf_idx = 0; 212 struct wmi_command_debug 213 wmi_mgmt_command_tx_cmp_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY]; 214 215 /* wmi_mgmt events when processed */ 216 uint32_t g_wmi_mgmt_event_buf_idx = 0; 217 struct wmi_event_debug 218 wmi_mgmt_event_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY]; 219 #endif 220 221 #define WMI_MGMT_COMMAND_RECORD(h, a, b) { \ 222 if (wmi_mgmt_log_max_entry <= \ 223 *(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)) \ 224 *(h->log_info.wmi_mgmt_command_log_buf_info. \ 225 p_buf_tail_idx) = 0; \ 226 ((struct wmi_command_debug *)h->log_info. \ 227 wmi_mgmt_command_log_buf_info.buf) \ 228 [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ 229 command = a; \ 230 qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ 231 wmi_mgmt_command_log_buf_info.buf) \ 232 [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ 233 data, b, \ 234 wmi_record_max_length); \ 235 ((struct wmi_command_debug *)h->log_info. \ 236 wmi_mgmt_command_log_buf_info.buf) \ 237 [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ 238 time = qdf_get_log_timestamp(); \ 239 (*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx))++;\ 240 h->log_info.wmi_mgmt_command_log_buf_info.length++; \ 241 } 242 243 #define WMI_MGMT_COMMAND_TX_CMP_RECORD(h, a, b) { \ 244 if (wmi_mgmt_log_max_entry <= \ 245 *(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 246 p_buf_tail_idx)) \ 247 *(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 248 p_buf_tail_idx) = 0; \ 249 ((struct wmi_command_debug *)h->log_info. \ 250 wmi_mgmt_command_tx_cmp_log_buf_info.buf) \ 251 [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 252 p_buf_tail_idx)].command = a; \ 253 qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ 254 wmi_mgmt_command_tx_cmp_log_buf_info.buf)\ 255 [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 256 p_buf_tail_idx)].data, b, \ 257 wmi_record_max_length); \ 258 ((struct wmi_command_debug *)h->log_info. \ 259 wmi_mgmt_command_tx_cmp_log_buf_info.buf) \ 260 [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 261 p_buf_tail_idx)].time = \ 262 qdf_get_log_timestamp(); \ 263 (*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ 264 p_buf_tail_idx))++; \ 265 h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.length++; \ 266 } 267 268 #define WMI_MGMT_EVENT_RECORD(h, a, b) { \ 269 if (wmi_mgmt_log_max_entry <= \ 270 *(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))\ 271 *(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx) = 0;\ 272 ((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\ 273 [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)]\ 274 .event = a; \ 275 qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ 276 wmi_mgmt_event_log_buf_info.buf) \ 277 [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\ 278 data, b, wmi_record_max_length); \ 279 ((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\ 280 [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\ 281 time = qdf_get_log_timestamp(); \ 282 (*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))++; \ 283 h->log_info.wmi_mgmt_event_log_buf_info.length++; \ 284 } 285 286 /* These are defined to made it as module param, which can be configured */ 287 uint32_t wmi_log_max_entry = WMI_EVENT_DEBUG_MAX_ENTRY; 288 uint32_t wmi_mgmt_log_max_entry = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; 289 uint32_t wmi_record_max_length = WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH; 290 uint32_t wmi_display_size = 100; 291 292 /** 293 * wmi_log_init() - Initialize WMI event logging 294 * @wmi_handle: WMI handle. 295 * 296 * Return: Initialization status 297 */ 298 #ifdef CONFIG_MCL 299 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle) 300 { 301 struct wmi_log_buf_t *cmd_log_buf = 302 &wmi_handle->log_info.wmi_command_log_buf_info; 303 struct wmi_log_buf_t *cmd_tx_cmpl_log_buf = 304 &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info; 305 306 struct wmi_log_buf_t *event_log_buf = 307 &wmi_handle->log_info.wmi_event_log_buf_info; 308 struct wmi_log_buf_t *rx_event_log_buf = 309 &wmi_handle->log_info.wmi_rx_event_log_buf_info; 310 311 struct wmi_log_buf_t *mgmt_cmd_log_buf = 312 &wmi_handle->log_info.wmi_mgmt_command_log_buf_info; 313 struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf = 314 &wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info; 315 struct wmi_log_buf_t *mgmt_event_log_buf = 316 &wmi_handle->log_info.wmi_mgmt_event_log_buf_info; 317 318 /* WMI commands */ 319 cmd_log_buf->length = 0; 320 cmd_log_buf->buf_tail_idx = 0; 321 cmd_log_buf->buf = wmi_command_log_buffer; 322 cmd_log_buf->p_buf_tail_idx = &g_wmi_command_buf_idx; 323 cmd_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; 324 325 /* WMI commands TX completed */ 326 cmd_tx_cmpl_log_buf->length = 0; 327 cmd_tx_cmpl_log_buf->buf_tail_idx = 0; 328 cmd_tx_cmpl_log_buf->buf = wmi_command_tx_cmp_log_buffer; 329 cmd_tx_cmpl_log_buf->p_buf_tail_idx = &g_wmi_command_tx_cmp_buf_idx; 330 cmd_tx_cmpl_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; 331 332 /* WMI events when processed */ 333 event_log_buf->length = 0; 334 event_log_buf->buf_tail_idx = 0; 335 event_log_buf->buf = wmi_event_log_buffer; 336 event_log_buf->p_buf_tail_idx = &g_wmi_event_buf_idx; 337 event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; 338 339 /* WMI events when queued */ 340 rx_event_log_buf->length = 0; 341 rx_event_log_buf->buf_tail_idx = 0; 342 rx_event_log_buf->buf = wmi_rx_event_log_buffer; 343 rx_event_log_buf->p_buf_tail_idx = &g_wmi_rx_event_buf_idx; 344 rx_event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; 345 346 /* WMI Management commands */ 347 mgmt_cmd_log_buf->length = 0; 348 mgmt_cmd_log_buf->buf_tail_idx = 0; 349 mgmt_cmd_log_buf->buf = wmi_mgmt_command_log_buffer; 350 mgmt_cmd_log_buf->p_buf_tail_idx = &g_wmi_mgmt_command_buf_idx; 351 mgmt_cmd_log_buf->size = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; 352 353 /* WMI Management commands Tx completed*/ 354 mgmt_cmd_tx_cmp_log_buf->length = 0; 355 mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0; 356 mgmt_cmd_tx_cmp_log_buf->buf = wmi_mgmt_command_tx_cmp_log_buffer; 357 mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx = 358 &g_wmi_mgmt_command_tx_cmp_buf_idx; 359 mgmt_cmd_tx_cmp_log_buf->size = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; 360 361 /* WMI Management events when processed*/ 362 mgmt_event_log_buf->length = 0; 363 mgmt_event_log_buf->buf_tail_idx = 0; 364 mgmt_event_log_buf->buf = wmi_mgmt_event_log_buffer; 365 mgmt_event_log_buf->p_buf_tail_idx = &g_wmi_mgmt_event_buf_idx; 366 mgmt_event_log_buf->size = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; 367 368 qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); 369 wmi_handle->log_info.wmi_logging_enable = 1; 370 371 return QDF_STATUS_SUCCESS; 372 } 373 #else 374 static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle) 375 { 376 struct wmi_log_buf_t *cmd_log_buf = 377 &wmi_handle->log_info.wmi_command_log_buf_info; 378 struct wmi_log_buf_t *cmd_tx_cmpl_log_buf = 379 &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info; 380 381 struct wmi_log_buf_t *event_log_buf = 382 &wmi_handle->log_info.wmi_event_log_buf_info; 383 struct wmi_log_buf_t *rx_event_log_buf = 384 &wmi_handle->log_info.wmi_rx_event_log_buf_info; 385 386 struct wmi_log_buf_t *mgmt_cmd_log_buf = 387 &wmi_handle->log_info.wmi_mgmt_command_log_buf_info; 388 struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf = 389 &wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info; 390 struct wmi_log_buf_t *mgmt_event_log_buf = 391 &wmi_handle->log_info.wmi_mgmt_event_log_buf_info; 392 393 wmi_handle->log_info.wmi_logging_enable = 0; 394 395 /* WMI commands */ 396 cmd_log_buf->length = 0; 397 cmd_log_buf->buf_tail_idx = 0; 398 cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc( 399 wmi_log_max_entry * sizeof(struct wmi_command_debug)); 400 cmd_log_buf->size = wmi_log_max_entry; 401 402 if (!cmd_log_buf->buf) { 403 qdf_print("no memory for WMI command log buffer..\n"); 404 return QDF_STATUS_E_NOMEM; 405 } 406 cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx; 407 408 /* WMI commands TX completed */ 409 cmd_tx_cmpl_log_buf->length = 0; 410 cmd_tx_cmpl_log_buf->buf_tail_idx = 0; 411 cmd_tx_cmpl_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc( 412 wmi_log_max_entry * sizeof(struct wmi_command_debug)); 413 cmd_tx_cmpl_log_buf->size = wmi_log_max_entry; 414 415 if (!cmd_tx_cmpl_log_buf->buf) { 416 qdf_print("no memory for WMI Command Tx Complete log buffer..\n"); 417 return QDF_STATUS_E_NOMEM; 418 } 419 cmd_tx_cmpl_log_buf->p_buf_tail_idx = 420 &cmd_tx_cmpl_log_buf->buf_tail_idx; 421 422 /* WMI events when processed */ 423 event_log_buf->length = 0; 424 event_log_buf->buf_tail_idx = 0; 425 event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( 426 wmi_log_max_entry * sizeof(struct wmi_event_debug)); 427 event_log_buf->size = wmi_log_max_entry; 428 429 if (!event_log_buf->buf) { 430 qdf_print("no memory for WMI Event log buffer..\n"); 431 return QDF_STATUS_E_NOMEM; 432 } 433 event_log_buf->p_buf_tail_idx = &event_log_buf->buf_tail_idx; 434 435 /* WMI events when queued */ 436 rx_event_log_buf->length = 0; 437 rx_event_log_buf->buf_tail_idx = 0; 438 rx_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( 439 wmi_log_max_entry * sizeof(struct wmi_event_debug)); 440 rx_event_log_buf->size = wmi_log_max_entry; 441 442 if (!rx_event_log_buf->buf) { 443 qdf_print("no memory for WMI Event Rx log buffer..\n"); 444 return QDF_STATUS_E_NOMEM; 445 } 446 rx_event_log_buf->p_buf_tail_idx = &rx_event_log_buf->buf_tail_idx; 447 448 /* WMI Management commands */ 449 mgmt_cmd_log_buf->length = 0; 450 mgmt_cmd_log_buf->buf_tail_idx = 0; 451 mgmt_cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc( 452 wmi_mgmt_log_max_entry * sizeof(struct wmi_command_debug)); 453 mgmt_cmd_log_buf->size = wmi_mgmt_log_max_entry; 454 455 if (!mgmt_cmd_log_buf->buf) { 456 qdf_print("no memory for WMI Management Command log buffer..\n"); 457 return QDF_STATUS_E_NOMEM; 458 } 459 mgmt_cmd_log_buf->p_buf_tail_idx = &mgmt_cmd_log_buf->buf_tail_idx; 460 461 /* WMI Management commands Tx completed*/ 462 mgmt_cmd_tx_cmp_log_buf->length = 0; 463 mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0; 464 mgmt_cmd_tx_cmp_log_buf->buf = (struct wmi_command_debug *) 465 qdf_mem_malloc( 466 wmi_mgmt_log_max_entry * 467 sizeof(struct wmi_command_debug)); 468 mgmt_cmd_tx_cmp_log_buf->size = wmi_mgmt_log_max_entry; 469 470 if (!mgmt_cmd_tx_cmp_log_buf->buf) { 471 qdf_print("no memory for WMI Management Command Tx complete log buffer..\n"); 472 return QDF_STATUS_E_NOMEM; 473 } 474 mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx = 475 &mgmt_cmd_tx_cmp_log_buf->buf_tail_idx; 476 477 /* WMI Management events when processed*/ 478 mgmt_event_log_buf->length = 0; 479 mgmt_event_log_buf->buf_tail_idx = 0; 480 481 mgmt_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( 482 wmi_mgmt_log_max_entry * 483 sizeof(struct wmi_event_debug)); 484 mgmt_event_log_buf->size = wmi_mgmt_log_max_entry; 485 486 if (!mgmt_event_log_buf->buf) { 487 qdf_print("no memory for WMI Management Event log buffer..\n"); 488 return QDF_STATUS_E_NOMEM; 489 } 490 mgmt_event_log_buf->p_buf_tail_idx = &mgmt_event_log_buf->buf_tail_idx; 491 492 qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); 493 wmi_handle->log_info.wmi_logging_enable = 1; 494 495 return QDF_STATUS_SUCCESS; 496 } 497 #endif 498 499 /** 500 * wmi_log_buffer_free() - Free all dynamic allocated buffer memory for 501 * event logging 502 * @wmi_handle: WMI handle. 503 * 504 * Return: None 505 */ 506 #ifndef CONFIG_MCL 507 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) 508 { 509 if (wmi_handle->log_info.wmi_command_log_buf_info.buf) 510 qdf_mem_free(wmi_handle->log_info.wmi_command_log_buf_info.buf); 511 if (wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf) 512 qdf_mem_free( 513 wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf); 514 if (wmi_handle->log_info.wmi_event_log_buf_info.buf) 515 qdf_mem_free(wmi_handle->log_info.wmi_event_log_buf_info.buf); 516 if (wmi_handle->log_info.wmi_rx_event_log_buf_info.buf) 517 qdf_mem_free( 518 wmi_handle->log_info.wmi_rx_event_log_buf_info.buf); 519 if (wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf) 520 qdf_mem_free( 521 wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf); 522 if (wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf) 523 qdf_mem_free( 524 wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf); 525 if (wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf) 526 qdf_mem_free( 527 wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf); 528 wmi_handle->log_info.wmi_logging_enable = 0; 529 qdf_spinlock_destroy(&wmi_handle->log_info.wmi_record_lock); 530 } 531 #else 532 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) 533 { 534 /* Do Nothing */ 535 } 536 #endif 537 538 /** 539 * wmi_print_cmd_log_buffer() - an output agnostic wmi command log printer 540 * @log_buffer: the command log buffer metadata of the buffer to print 541 * @count: the maximum number of entries to print 542 * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper 543 * @print_priv: any data required by the print method, e.g. a file handle 544 * 545 * Return: None 546 */ 547 static void 548 wmi_print_cmd_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count, 549 qdf_abstract_print *print, void *print_priv) 550 { 551 static const int data_len = 552 WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t); 553 char str[128]; 554 uint32_t idx; 555 556 if (count > log_buffer->size) 557 count = log_buffer->size; 558 if (count > log_buffer->length) 559 count = log_buffer->length; 560 561 /* subtract count from index, and wrap if necessary */ 562 idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count; 563 idx %= log_buffer->size; 564 565 print(print_priv, "Time (seconds) Cmd Id Payload"); 566 while (count) { 567 struct wmi_command_debug *cmd_log = (struct wmi_command_debug *) 568 &((struct wmi_command_debug *)log_buffer->buf)[idx]; 569 uint64_t secs, usecs; 570 int len = 0; 571 int i; 572 573 qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs); 574 len += scnprintf(str + len, sizeof(str) - len, 575 "% 8lld.%06lld %6u (0x%06x) ", 576 secs, usecs, 577 cmd_log->command, cmd_log->command); 578 for (i = 0; i < data_len; ++i) { 579 len += scnprintf(str + len, sizeof(str) - len, 580 "0x%08x ", cmd_log->data[i]); 581 } 582 583 print(print_priv, str); 584 585 --count; 586 ++idx; 587 if (idx >= log_buffer->size) 588 idx = 0; 589 } 590 } 591 592 /** 593 * wmi_print_event_log_buffer() - an output agnostic wmi event log printer 594 * @log_buffer: the event log buffer metadata of the buffer to print 595 * @count: the maximum number of entries to print 596 * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper 597 * @print_priv: any data required by the print method, e.g. a file handle 598 * 599 * Return: None 600 */ 601 static void 602 wmi_print_event_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count, 603 qdf_abstract_print *print, void *print_priv) 604 { 605 static const int data_len = 606 WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t); 607 char str[128]; 608 uint32_t idx; 609 610 if (count > log_buffer->size) 611 count = log_buffer->size; 612 if (count > log_buffer->length) 613 count = log_buffer->length; 614 615 /* subtract count from index, and wrap if necessary */ 616 idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count; 617 idx %= log_buffer->size; 618 619 print(print_priv, "Time (seconds) Event Id Payload"); 620 while (count) { 621 struct wmi_event_debug *event_log = (struct wmi_event_debug *) 622 &((struct wmi_event_debug *)log_buffer->buf)[idx]; 623 uint64_t secs, usecs; 624 int len = 0; 625 int i; 626 627 qdf_log_timestamp_to_secs(event_log->time, &secs, &usecs); 628 len += scnprintf(str + len, sizeof(str) - len, 629 "% 8lld.%06lld %6u (0x%06x) ", 630 secs, usecs, 631 event_log->event, event_log->event); 632 for (i = 0; i < data_len; ++i) { 633 len += scnprintf(str + len, sizeof(str) - len, 634 "0x%08x ", event_log->data[i]); 635 } 636 637 print(print_priv, str); 638 639 --count; 640 ++idx; 641 if (idx >= log_buffer->size) 642 idx = 0; 643 } 644 } 645 646 inline void 647 wmi_print_cmd_log(wmi_unified_t wmi, uint32_t count, 648 qdf_abstract_print *print, void *print_priv) 649 { 650 wmi_print_cmd_log_buffer( 651 &wmi->log_info.wmi_command_log_buf_info, 652 count, print, print_priv); 653 } 654 655 inline void 656 wmi_print_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count, 657 qdf_abstract_print *print, void *print_priv) 658 { 659 wmi_print_cmd_log_buffer( 660 &wmi->log_info.wmi_command_tx_cmp_log_buf_info, 661 count, print, print_priv); 662 } 663 664 inline void 665 wmi_print_mgmt_cmd_log(wmi_unified_t wmi, uint32_t count, 666 qdf_abstract_print *print, void *print_priv) 667 { 668 wmi_print_cmd_log_buffer( 669 &wmi->log_info.wmi_mgmt_command_log_buf_info, 670 count, print, print_priv); 671 } 672 673 inline void 674 wmi_print_mgmt_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count, 675 qdf_abstract_print *print, void *print_priv) 676 { 677 wmi_print_cmd_log_buffer( 678 &wmi->log_info.wmi_mgmt_command_tx_cmp_log_buf_info, 679 count, print, print_priv); 680 } 681 682 inline void 683 wmi_print_event_log(wmi_unified_t wmi, uint32_t count, 684 qdf_abstract_print *print, void *print_priv) 685 { 686 wmi_print_event_log_buffer( 687 &wmi->log_info.wmi_event_log_buf_info, 688 count, print, print_priv); 689 } 690 691 inline void 692 wmi_print_rx_event_log(wmi_unified_t wmi, uint32_t count, 693 qdf_abstract_print *print, void *print_priv) 694 { 695 wmi_print_event_log_buffer( 696 &wmi->log_info.wmi_rx_event_log_buf_info, 697 count, print, print_priv); 698 } 699 700 inline void 701 wmi_print_mgmt_event_log(wmi_unified_t wmi, uint32_t count, 702 qdf_abstract_print *print, void *print_priv) 703 { 704 wmi_print_event_log_buffer( 705 &wmi->log_info.wmi_mgmt_event_log_buf_info, 706 count, print, print_priv); 707 } 708 709 710 /* debugfs routines*/ 711 712 /** 713 * debug_wmi_##func_base##_show() - debugfs functions to display content of 714 * command and event buffers. Macro uses max buffer length to display 715 * buffer when it is wraparound. 716 * 717 * @m: debugfs handler to access wmi_handle 718 * @v: Variable arguments (not used) 719 * 720 * Return: Length of characters printed 721 */ 722 #define GENERATE_COMMAND_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size) \ 723 static int debug_wmi_##func_base##_show(struct seq_file *m, \ 724 void *v) \ 725 { \ 726 wmi_unified_t wmi_handle = (wmi_unified_t) m->private; \ 727 struct wmi_log_buf_t *wmi_log = \ 728 &wmi_handle->log_info.wmi_##func_base##_buf_info;\ 729 int pos, nread, outlen; \ 730 int i; \ 731 uint64_t secs, usecs; \ 732 \ 733 qdf_spin_lock(&wmi_handle->log_info.wmi_record_lock); \ 734 if (!wmi_log->length) { \ 735 qdf_spin_unlock(&wmi_handle->log_info.wmi_record_lock);\ 736 return wmi_bp_seq_printf(m, \ 737 "no elements to read from ring buffer!\n"); \ 738 } \ 739 \ 740 if (wmi_log->length <= wmi_ring_size) \ 741 nread = wmi_log->length; \ 742 else \ 743 nread = wmi_ring_size; \ 744 \ 745 if (*(wmi_log->p_buf_tail_idx) == 0) \ 746 /* tail can be 0 after wrap-around */ \ 747 pos = wmi_ring_size - 1; \ 748 else \ 749 pos = *(wmi_log->p_buf_tail_idx) - 1; \ 750 \ 751 outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\ 752 qdf_spin_unlock(&wmi_handle->log_info.wmi_record_lock); \ 753 while (nread--) { \ 754 struct wmi_command_debug *wmi_record; \ 755 \ 756 wmi_record = (struct wmi_command_debug *) \ 757 &(((struct wmi_command_debug *)wmi_log->buf)[pos]);\ 758 outlen += wmi_bp_seq_printf(m, "CMD ID = %x\n", \ 759 (wmi_record->command)); \ 760 qdf_log_timestamp_to_secs(wmi_record->time, &secs,\ 761 &usecs); \ 762 outlen += \ 763 wmi_bp_seq_printf(m, "CMD TIME = [%llu.%06llu]\n",\ 764 secs, usecs); \ 765 outlen += wmi_bp_seq_printf(m, "CMD = "); \ 766 for (i = 0; i < (wmi_record_max_length/ \ 767 sizeof(uint32_t)); i++) \ 768 outlen += wmi_bp_seq_printf(m, "%x ", \ 769 wmi_record->data[i]); \ 770 outlen += wmi_bp_seq_printf(m, "\n"); \ 771 \ 772 if (pos == 0) \ 773 pos = wmi_ring_size - 1; \ 774 else \ 775 pos--; \ 776 } \ 777 return outlen; \ 778 } \ 779 780 #define GENERATE_EVENT_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size) \ 781 static int debug_wmi_##func_base##_show(struct seq_file *m, \ 782 void *v) \ 783 { \ 784 wmi_unified_t wmi_handle = (wmi_unified_t) m->private; \ 785 struct wmi_log_buf_t *wmi_log = \ 786 &wmi_handle->log_info.wmi_##func_base##_buf_info;\ 787 int pos, nread, outlen; \ 788 int i; \ 789 uint64_t secs, usecs; \ 790 \ 791 qdf_spin_lock(&wmi_handle->log_info.wmi_record_lock); \ 792 if (!wmi_log->length) { \ 793 qdf_spin_unlock(&wmi_handle->log_info.wmi_record_lock);\ 794 return wmi_bp_seq_printf(m, \ 795 "no elements to read from ring buffer!\n"); \ 796 } \ 797 \ 798 if (wmi_log->length <= wmi_ring_size) \ 799 nread = wmi_log->length; \ 800 else \ 801 nread = wmi_ring_size; \ 802 \ 803 if (*(wmi_log->p_buf_tail_idx) == 0) \ 804 /* tail can be 0 after wrap-around */ \ 805 pos = wmi_ring_size - 1; \ 806 else \ 807 pos = *(wmi_log->p_buf_tail_idx) - 1; \ 808 \ 809 outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\ 810 qdf_spin_unlock(&wmi_handle->log_info.wmi_record_lock); \ 811 while (nread--) { \ 812 struct wmi_event_debug *wmi_record; \ 813 \ 814 wmi_record = (struct wmi_event_debug *) \ 815 &(((struct wmi_event_debug *)wmi_log->buf)[pos]);\ 816 qdf_log_timestamp_to_secs(wmi_record->time, &secs,\ 817 &usecs); \ 818 outlen += wmi_bp_seq_printf(m, "Event ID = %x\n",\ 819 (wmi_record->event)); \ 820 outlen += \ 821 wmi_bp_seq_printf(m, "Event TIME = [%llu.%06llu]\n",\ 822 secs, usecs); \ 823 outlen += wmi_bp_seq_printf(m, "CMD = "); \ 824 for (i = 0; i < (wmi_record_max_length/ \ 825 sizeof(uint32_t)); i++) \ 826 outlen += wmi_bp_seq_printf(m, "%x ", \ 827 wmi_record->data[i]); \ 828 outlen += wmi_bp_seq_printf(m, "\n"); \ 829 \ 830 if (pos == 0) \ 831 pos = wmi_ring_size - 1; \ 832 else \ 833 pos--; \ 834 } \ 835 return outlen; \ 836 } 837 838 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_log, wmi_display_size); 839 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_tx_cmp_log, wmi_display_size); 840 GENERATE_EVENT_DEBUG_SHOW_FUNCS(event_log, wmi_display_size); 841 GENERATE_EVENT_DEBUG_SHOW_FUNCS(rx_event_log, wmi_display_size); 842 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_log, wmi_display_size); 843 GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_tx_cmp_log, 844 wmi_display_size); 845 GENERATE_EVENT_DEBUG_SHOW_FUNCS(mgmt_event_log, wmi_display_size); 846 847 /** 848 * debug_wmi_enable_show() - debugfs functions to display enable state of 849 * wmi logging feature. 850 * 851 * @m: debugfs handler to access wmi_handle 852 * @v: Variable arguments (not used) 853 * 854 * Return: always 1 855 */ 856 static int debug_wmi_enable_show(struct seq_file *m, void *v) 857 { 858 wmi_unified_t wmi_handle = (wmi_unified_t) m->private; 859 860 return wmi_bp_seq_printf(m, "%d\n", 861 wmi_handle->log_info.wmi_logging_enable); 862 } 863 864 /** 865 * debug_wmi_log_size_show() - debugfs functions to display configured size of 866 * wmi logging command/event buffer and management command/event buffer. 867 * 868 * @m: debugfs handler to access wmi_handle 869 * @v: Variable arguments (not used) 870 * 871 * Return: Length of characters printed 872 */ 873 static int debug_wmi_log_size_show(struct seq_file *m, void *v) 874 { 875 876 wmi_bp_seq_printf(m, "WMI command/event log max size:%d\n", 877 wmi_log_max_entry); 878 return wmi_bp_seq_printf(m, 879 "WMI management command/events log max size:%d\n", 880 wmi_mgmt_log_max_entry); 881 } 882 883 /** 884 * debug_wmi_##func_base##_write() - debugfs functions to clear 885 * wmi logging command/event buffer and management command/event buffer. 886 * 887 * @file: file handler to access wmi_handle 888 * @buf: received data buffer 889 * @count: length of received buffer 890 * @ppos: Not used 891 * 892 * Return: count 893 */ 894 #define GENERATE_DEBUG_WRITE_FUNCS(func_base, wmi_ring_size, wmi_record_type)\ 895 static ssize_t debug_wmi_##func_base##_write(struct file *file, \ 896 const char __user *buf, \ 897 size_t count, loff_t *ppos) \ 898 { \ 899 int k, ret; \ 900 wmi_unified_t wmi_handle = \ 901 ((struct seq_file *)file->private_data)->private;\ 902 struct wmi_log_buf_t *wmi_log = &wmi_handle->log_info. \ 903 wmi_##func_base##_buf_info; \ 904 char locbuf[50]; \ 905 \ 906 if ((!buf) || (count > 50)) \ 907 return -EFAULT; \ 908 \ 909 if (copy_from_user(locbuf, buf, count)) \ 910 return -EFAULT; \ 911 \ 912 ret = sscanf(locbuf, "%d", &k); \ 913 if ((ret != 1) || (k != 0)) { \ 914 qdf_print("Wrong input, echo 0 to clear the wmi buffer\n");\ 915 return -EINVAL; \ 916 } \ 917 \ 918 qdf_spin_lock(&wmi_handle->log_info.wmi_record_lock); \ 919 qdf_mem_zero(wmi_log->buf, wmi_ring_size * \ 920 sizeof(struct wmi_record_type)); \ 921 wmi_log->length = 0; \ 922 *(wmi_log->p_buf_tail_idx) = 0; \ 923 qdf_spin_unlock(&wmi_handle->log_info.wmi_record_lock); \ 924 \ 925 return count; \ 926 } 927 928 GENERATE_DEBUG_WRITE_FUNCS(command_log, wmi_log_max_entry, 929 wmi_command_debug); 930 GENERATE_DEBUG_WRITE_FUNCS(command_tx_cmp_log, wmi_log_max_entry, 931 wmi_command_debug); 932 GENERATE_DEBUG_WRITE_FUNCS(event_log, wmi_log_max_entry, 933 wmi_event_debug); 934 GENERATE_DEBUG_WRITE_FUNCS(rx_event_log, wmi_log_max_entry, 935 wmi_event_debug); 936 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_log, wmi_mgmt_log_max_entry, 937 wmi_command_debug); 938 GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_tx_cmp_log, 939 wmi_mgmt_log_max_entry, wmi_command_debug); 940 GENERATE_DEBUG_WRITE_FUNCS(mgmt_event_log, wmi_mgmt_log_max_entry, 941 wmi_event_debug); 942 943 /** 944 * debug_wmi_enable_write() - debugfs functions to enable/disable 945 * wmi logging feature. 946 * 947 * @file: file handler to access wmi_handle 948 * @buf: received data buffer 949 * @count: length of received buffer 950 * @ppos: Not used 951 * 952 * Return: count 953 */ 954 static ssize_t debug_wmi_enable_write(struct file *file, const char __user *buf, 955 size_t count, loff_t *ppos) 956 { 957 wmi_unified_t wmi_handle = 958 ((struct seq_file *)file->private_data)->private; 959 int k, ret; 960 char locbuf[50]; 961 962 if ((!buf) || (count > 50)) 963 return -EFAULT; 964 965 if (copy_from_user(locbuf, buf, count)) 966 return -EFAULT; 967 968 ret = sscanf(locbuf, "%d", &k); 969 if ((ret != 1) || ((k != 0) && (k != 1))) 970 return -EINVAL; 971 972 wmi_handle->log_info.wmi_logging_enable = k; 973 return count; 974 } 975 976 /** 977 * debug_wmi_log_size_write() - reserved. 978 * 979 * @file: file handler to access wmi_handle 980 * @buf: received data buffer 981 * @count: length of received buffer 982 * @ppos: Not used 983 * 984 * Return: count 985 */ 986 static ssize_t debug_wmi_log_size_write(struct file *file, 987 const char __user *buf, size_t count, loff_t *ppos) 988 { 989 return -EINVAL; 990 } 991 992 /* Structure to maintain debug information */ 993 struct wmi_debugfs_info { 994 const char *name; 995 const struct file_operations *ops; 996 }; 997 998 #define DEBUG_FOO(func_base) { .name = #func_base, \ 999 .ops = &debug_##func_base##_ops } 1000 1001 /** 1002 * debug_##func_base##_open() - Open debugfs entry for respective command 1003 * and event buffer. 1004 * 1005 * @inode: node for debug dir entry 1006 * @file: file handler 1007 * 1008 * Return: open status 1009 */ 1010 #define GENERATE_DEBUG_STRUCTS(func_base) \ 1011 static int debug_##func_base##_open(struct inode *inode, \ 1012 struct file *file) \ 1013 { \ 1014 return single_open(file, debug_##func_base##_show, \ 1015 inode->i_private); \ 1016 } \ 1017 \ 1018 \ 1019 static struct file_operations debug_##func_base##_ops = { \ 1020 .open = debug_##func_base##_open, \ 1021 .read = seq_read, \ 1022 .llseek = seq_lseek, \ 1023 .write = debug_##func_base##_write, \ 1024 .release = single_release, \ 1025 }; 1026 1027 GENERATE_DEBUG_STRUCTS(wmi_command_log); 1028 GENERATE_DEBUG_STRUCTS(wmi_command_tx_cmp_log); 1029 GENERATE_DEBUG_STRUCTS(wmi_event_log); 1030 GENERATE_DEBUG_STRUCTS(wmi_rx_event_log); 1031 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_log); 1032 GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_tx_cmp_log); 1033 GENERATE_DEBUG_STRUCTS(wmi_mgmt_event_log); 1034 GENERATE_DEBUG_STRUCTS(wmi_enable); 1035 GENERATE_DEBUG_STRUCTS(wmi_log_size); 1036 1037 struct wmi_debugfs_info wmi_debugfs_infos[NUM_DEBUG_INFOS] = { 1038 DEBUG_FOO(wmi_command_log), 1039 DEBUG_FOO(wmi_command_tx_cmp_log), 1040 DEBUG_FOO(wmi_event_log), 1041 DEBUG_FOO(wmi_rx_event_log), 1042 DEBUG_FOO(wmi_mgmt_command_log), 1043 DEBUG_FOO(wmi_mgmt_command_tx_cmp_log), 1044 DEBUG_FOO(wmi_mgmt_event_log), 1045 DEBUG_FOO(wmi_enable), 1046 DEBUG_FOO(wmi_log_size), 1047 }; 1048 1049 1050 /** 1051 * wmi_debugfs_create() - Create debug_fs entry for wmi logging. 1052 * 1053 * @wmi_handle: wmi handle 1054 * @par_entry: debug directory entry 1055 * @id: Index to debug info data array 1056 * 1057 * Return: none 1058 */ 1059 static void wmi_debugfs_create(wmi_unified_t wmi_handle, 1060 struct dentry *par_entry) 1061 { 1062 int i; 1063 1064 if (!par_entry) 1065 goto out; 1066 1067 for (i = 0; i < NUM_DEBUG_INFOS; ++i) { 1068 wmi_handle->debugfs_de[i] = debugfs_create_file( 1069 wmi_debugfs_infos[i].name, 0644, par_entry, 1070 wmi_handle, wmi_debugfs_infos[i].ops); 1071 1072 if (!wmi_handle->debugfs_de[i]) { 1073 qdf_print("%s: debug Entry creation failed!\n", 1074 __func__); 1075 goto out; 1076 } 1077 } 1078 1079 return; 1080 1081 out: 1082 qdf_print("%s: debug Entry creation failed!\n", __func__); 1083 wmi_log_buffer_free(wmi_handle); 1084 return; 1085 } 1086 1087 /** 1088 * wmi_debugfs_remove() - Remove debugfs entry for wmi logging. 1089 * @wmi_handle: wmi handle 1090 * @dentry: debugfs directory entry 1091 * @id: Index to debug info data array 1092 * 1093 * Return: none 1094 */ 1095 static void wmi_debugfs_remove(wmi_unified_t wmi_handle) 1096 { 1097 int i; 1098 struct dentry *dentry = wmi_handle->log_info.wmi_log_debugfs_dir; 1099 1100 if (dentry) { 1101 for (i = 0; i < NUM_DEBUG_INFOS; ++i) { 1102 if (wmi_handle->debugfs_de[i]) 1103 wmi_handle->debugfs_de[i] = NULL; 1104 } 1105 } 1106 1107 if (dentry) 1108 debugfs_remove_recursive(dentry); 1109 } 1110 1111 /** 1112 * wmi_debugfs_init() - debugfs functions to create debugfs directory and to 1113 * create debugfs enteries. 1114 * 1115 * @h: wmi handler 1116 * 1117 * Return: init status 1118 */ 1119 static QDF_STATUS wmi_debugfs_init(wmi_unified_t wmi_handle, uint32_t pdev_idx) 1120 { 1121 char buf[32]; 1122 1123 snprintf(buf, sizeof(buf), "WMI_SOC%u_PDEV%u", 1124 wmi_handle->soc->soc_idx, pdev_idx); 1125 1126 wmi_handle->log_info.wmi_log_debugfs_dir = 1127 debugfs_create_dir(buf, NULL); 1128 1129 if (!wmi_handle->log_info.wmi_log_debugfs_dir) { 1130 qdf_print("error while creating debugfs dir for %s\n", buf); 1131 return QDF_STATUS_E_FAILURE; 1132 } 1133 wmi_debugfs_create(wmi_handle, 1134 wmi_handle->log_info.wmi_log_debugfs_dir); 1135 1136 return QDF_STATUS_SUCCESS; 1137 } 1138 1139 /** 1140 * wmi_mgmt_cmd_record() - Wrapper function for mgmt command logging macro 1141 * 1142 * @wmi_handle: wmi handle 1143 * @cmd: mgmt command 1144 * @header: pointer to 802.11 header 1145 * @vdev_id: vdev id 1146 * @chanfreq: channel frequency 1147 * 1148 * Return: none 1149 */ 1150 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd, 1151 void *header, uint32_t vdev_id, uint32_t chanfreq) 1152 { 1153 1154 uint32_t data[CUSTOM_MGMT_CMD_DATA_SIZE]; 1155 1156 data[0] = ((struct wmi_command_header *)header)->type; 1157 data[1] = ((struct wmi_command_header *)header)->sub_type; 1158 data[2] = vdev_id; 1159 data[3] = chanfreq; 1160 1161 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 1162 1163 WMI_MGMT_COMMAND_RECORD(wmi_handle, cmd, (uint8_t *)data); 1164 1165 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 1166 } 1167 #else 1168 /** 1169 * wmi_debugfs_remove() - Remove debugfs entry for wmi logging. 1170 * @wmi_handle: wmi handle 1171 * @dentry: debugfs directory entry 1172 * @id: Index to debug info data array 1173 * 1174 * Return: none 1175 */ 1176 static void wmi_debugfs_remove(wmi_unified_t wmi_handle) { } 1177 void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd, 1178 void *header, uint32_t vdev_id, uint32_t chanfreq) { } 1179 static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) { } 1180 #endif /*WMI_INTERFACE_EVENT_LOGGING */ 1181 qdf_export_symbol(wmi_mgmt_cmd_record); 1182 1183 int wmi_get_host_credits(wmi_unified_t wmi_handle); 1184 /* WMI buffer APIs */ 1185 1186 #ifdef NBUF_MEMORY_DEBUG 1187 wmi_buf_t 1188 wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint32_t len, uint8_t *file_name, 1189 uint32_t line_num) 1190 { 1191 wmi_buf_t wmi_buf; 1192 1193 if (roundup(len + WMI_MIN_HEAD_ROOM, 4) > wmi_handle->max_msg_len) { 1194 QDF_ASSERT(0); 1195 return NULL; 1196 } 1197 1198 wmi_buf = qdf_nbuf_alloc_debug(NULL, 1199 roundup(len + WMI_MIN_HEAD_ROOM, 4), 1200 WMI_MIN_HEAD_ROOM, 4, false, file_name, 1201 line_num); 1202 1203 if (!wmi_buf) 1204 return NULL; 1205 1206 /* Clear the wmi buffer */ 1207 OS_MEMZERO(qdf_nbuf_data(wmi_buf), len); 1208 1209 /* 1210 * Set the length of the buffer to match the allocation size. 1211 */ 1212 qdf_nbuf_set_pktlen(wmi_buf, len); 1213 1214 return wmi_buf; 1215 } 1216 qdf_export_symbol(wmi_buf_alloc_debug); 1217 1218 void wmi_buf_free(wmi_buf_t net_buf) 1219 { 1220 qdf_nbuf_free(net_buf); 1221 } 1222 qdf_export_symbol(wmi_buf_free); 1223 #else 1224 wmi_buf_t wmi_buf_alloc(wmi_unified_t wmi_handle, uint32_t len) 1225 { 1226 wmi_buf_t wmi_buf; 1227 1228 if (roundup(len + WMI_MIN_HEAD_ROOM, 4) > wmi_handle->max_msg_len) { 1229 QDF_ASSERT(0); 1230 return NULL; 1231 } 1232 1233 wmi_buf = qdf_nbuf_alloc(NULL, roundup(len + WMI_MIN_HEAD_ROOM, 4), 1234 WMI_MIN_HEAD_ROOM, 4, false); 1235 if (!wmi_buf) 1236 return NULL; 1237 1238 /* Clear the wmi buffer */ 1239 OS_MEMZERO(qdf_nbuf_data(wmi_buf), len); 1240 1241 /* 1242 * Set the length of the buffer to match the allocation size. 1243 */ 1244 qdf_nbuf_set_pktlen(wmi_buf, len); 1245 return wmi_buf; 1246 } 1247 qdf_export_symbol(wmi_buf_alloc); 1248 1249 void wmi_buf_free(wmi_buf_t net_buf) 1250 { 1251 qdf_nbuf_free(net_buf); 1252 } 1253 qdf_export_symbol(wmi_buf_free); 1254 #endif 1255 1256 /** 1257 * wmi_get_max_msg_len() - get maximum WMI message length 1258 * @wmi_handle: WMI handle. 1259 * 1260 * This function returns the maximum WMI message length 1261 * 1262 * Return: maximum WMI message length 1263 */ 1264 uint16_t wmi_get_max_msg_len(wmi_unified_t wmi_handle) 1265 { 1266 return wmi_handle->max_msg_len - WMI_MIN_HEAD_ROOM; 1267 } 1268 qdf_export_symbol(wmi_get_max_msg_len); 1269 1270 #ifndef WMI_CMD_STRINGS 1271 static uint8_t *wmi_id_to_name(uint32_t wmi_command) 1272 { 1273 return "Invalid WMI cmd"; 1274 } 1275 1276 #endif 1277 1278 #ifdef CONFIG_MCL 1279 static inline void wmi_log_cmd_id(uint32_t cmd_id, uint32_t tag) 1280 { 1281 WMI_LOGD("Send WMI command:%s command_id:%d htc_tag:%d\n", 1282 wmi_id_to_name(cmd_id), cmd_id, tag); 1283 } 1284 1285 /** 1286 * wmi_is_pm_resume_cmd() - check if a cmd is part of the resume sequence 1287 * @cmd_id: command to check 1288 * 1289 * Return: true if the command is part of the resume sequence. 1290 */ 1291 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id) 1292 { 1293 switch (cmd_id) { 1294 case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID: 1295 case WMI_PDEV_RESUME_CMDID: 1296 return true; 1297 1298 default: 1299 return false; 1300 } 1301 } 1302 #else 1303 static bool wmi_is_pm_resume_cmd(uint32_t cmd_id) 1304 { 1305 return false; 1306 } 1307 #endif 1308 1309 /** 1310 * wmi_unified_cmd_send() - WMI command API 1311 * @wmi_handle: handle to wmi 1312 * @buf: wmi buf 1313 * @len: wmi buffer length 1314 * @cmd_id: wmi command id 1315 * 1316 * Note, it is NOT safe to access buf after calling this function! 1317 * 1318 * Return: 0 on success 1319 */ 1320 QDF_STATUS wmi_unified_cmd_send(wmi_unified_t wmi_handle, wmi_buf_t buf, 1321 uint32_t len, uint32_t cmd_id) 1322 { 1323 HTC_PACKET *pkt; 1324 QDF_STATUS status; 1325 uint16_t htc_tag = 0; 1326 1327 if (wmi_get_runtime_pm_inprogress(wmi_handle)) { 1328 htc_tag = 1329 (uint16_t)wmi_handle->ops->wmi_set_htc_tx_tag( 1330 wmi_handle, buf, cmd_id); 1331 } else if (qdf_atomic_read(&wmi_handle->is_target_suspended) && 1332 (!wmi_is_pm_resume_cmd(cmd_id))) { 1333 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 1334 "%s: Target is suspended", __func__); 1335 QDF_ASSERT(0); 1336 return QDF_STATUS_E_BUSY; 1337 } 1338 if (wmi_handle->wmi_stopinprogress) { 1339 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 1340 "WMI stop in progress\n"); 1341 return QDF_STATUS_E_INVAL; 1342 } 1343 1344 #ifndef WMI_NON_TLV_SUPPORT 1345 /* Do sanity check on the TLV parameter structure */ 1346 if (wmi_handle->target_type == WMI_TLV_TARGET) { 1347 void *buf_ptr = (void *)qdf_nbuf_data(buf); 1348 1349 if (wmi_handle->ops->wmi_check_command_params(NULL, buf_ptr, len, cmd_id) 1350 != 0) { 1351 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 1352 "\nERROR: %s: Invalid WMI Param Buffer for Cmd:%d", 1353 __func__, cmd_id); 1354 return QDF_STATUS_E_INVAL; 1355 } 1356 } 1357 #endif 1358 1359 if (qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) { 1360 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 1361 "%s, Failed to send cmd %x, no memory", 1362 __func__, cmd_id); 1363 return QDF_STATUS_E_NOMEM; 1364 } 1365 1366 qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR)); 1367 WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id); 1368 1369 qdf_atomic_inc(&wmi_handle->pending_cmds); 1370 if (qdf_atomic_read(&wmi_handle->pending_cmds) >= 1371 wmi_handle->wmi_max_cmds) { 1372 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 1373 "\n%s: hostcredits = %d", __func__, 1374 wmi_get_host_credits(wmi_handle)); 1375 htc_dump_counter_info(wmi_handle->htc_handle); 1376 qdf_atomic_dec(&wmi_handle->pending_cmds); 1377 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 1378 "%s: MAX %d WMI Pending cmds reached.", __func__, 1379 wmi_handle->wmi_max_cmds); 1380 QDF_BUG(0); 1381 return QDF_STATUS_E_BUSY; 1382 } 1383 1384 pkt = qdf_mem_malloc(sizeof(*pkt)); 1385 if (!pkt) { 1386 qdf_atomic_dec(&wmi_handle->pending_cmds); 1387 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 1388 "%s, Failed to alloc htc packet %x, no memory", 1389 __func__, cmd_id); 1390 return QDF_STATUS_E_NOMEM; 1391 } 1392 1393 SET_HTC_PACKET_INFO_TX(pkt, 1394 NULL, 1395 qdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR), 1396 wmi_handle->wmi_endpoint_id, htc_tag); 1397 1398 SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf); 1399 #ifdef CONFIG_MCL 1400 wmi_log_cmd_id(cmd_id, htc_tag); 1401 #endif 1402 1403 #ifdef WMI_INTERFACE_EVENT_LOGGING 1404 if (wmi_handle->log_info.wmi_logging_enable) { 1405 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 1406 /* 1407 * Record 16 bytes of WMI cmd data - 1408 * exclude TLV and WMI headers 1409 * 1410 * WMI mgmt command already recorded in wmi_mgmt_cmd_record 1411 */ 1412 if (wmi_handle->ops->is_management_record(cmd_id) == false) { 1413 WMI_COMMAND_RECORD(wmi_handle, cmd_id, 1414 qdf_nbuf_data(buf) + 1415 wmi_handle->log_info.buf_offset_command); 1416 } 1417 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 1418 } 1419 #endif 1420 1421 status = htc_send_pkt(wmi_handle->htc_handle, pkt); 1422 1423 if (QDF_STATUS_SUCCESS != status) { 1424 qdf_atomic_dec(&wmi_handle->pending_cmds); 1425 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 1426 "%s %d, htc_send_pkt failed", __func__, __LINE__); 1427 qdf_mem_free(pkt); 1428 return status; 1429 } 1430 1431 return QDF_STATUS_SUCCESS; 1432 } 1433 qdf_export_symbol(wmi_unified_cmd_send); 1434 1435 /** 1436 * wmi_unified_get_event_handler_ix() - gives event handler's index 1437 * @wmi_handle: handle to wmi 1438 * @event_id: wmi event id 1439 * 1440 * Return: event handler's index 1441 */ 1442 static int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle, 1443 uint32_t event_id) 1444 { 1445 uint32_t idx = 0; 1446 int32_t invalid_idx = -1; 1447 struct wmi_soc *soc = wmi_handle->soc; 1448 1449 for (idx = 0; (idx < soc->max_event_idx && 1450 idx < WMI_UNIFIED_MAX_EVENT); ++idx) { 1451 if (wmi_handle->event_id[idx] == event_id && 1452 wmi_handle->event_handler[idx] != NULL) { 1453 return idx; 1454 } 1455 } 1456 1457 return invalid_idx; 1458 } 1459 1460 /** 1461 * wmi_unified_register_event() - register wmi event handler 1462 * @wmi_handle: handle to wmi 1463 * @event_id: wmi event id 1464 * @handler_func: wmi event handler function 1465 * 1466 * Return: 0 on success 1467 */ 1468 int wmi_unified_register_event(wmi_unified_t wmi_handle, 1469 uint32_t event_id, 1470 wmi_unified_event_handler handler_func) 1471 { 1472 uint32_t idx = 0; 1473 uint32_t evt_id; 1474 struct wmi_soc *soc = wmi_handle->soc; 1475 1476 if (event_id >= wmi_events_max || 1477 wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { 1478 qdf_print("%s: Event id %d is unavailable\n", 1479 __func__, event_id); 1480 return QDF_STATUS_E_FAILURE; 1481 } 1482 evt_id = wmi_handle->wmi_events[event_id]; 1483 if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) { 1484 qdf_print("%s : event handler already registered 0x%x\n", 1485 __func__, evt_id); 1486 return QDF_STATUS_E_FAILURE; 1487 } 1488 if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) { 1489 qdf_print("%s : no more event handlers 0x%x\n", 1490 __func__, evt_id); 1491 return QDF_STATUS_E_FAILURE; 1492 } 1493 idx = soc->max_event_idx; 1494 wmi_handle->event_handler[idx] = handler_func; 1495 wmi_handle->event_id[idx] = evt_id; 1496 qdf_spin_lock_bh(&soc->ctx_lock); 1497 wmi_handle->ctx[idx] = WMI_RX_UMAC_CTX; 1498 qdf_spin_unlock_bh(&soc->ctx_lock); 1499 soc->max_event_idx++; 1500 1501 return 0; 1502 } 1503 1504 /** 1505 * wmi_unified_register_event_handler() - register wmi event handler 1506 * @wmi_handle: handle to wmi 1507 * @event_id: wmi event id 1508 * @handler_func: wmi event handler function 1509 * @rx_ctx: rx execution context for wmi rx events 1510 * 1511 * This API is to support legacy requirements. Will be deprecated in future. 1512 * Return: 0 on success 1513 */ 1514 int wmi_unified_register_event_handler(wmi_unified_t wmi_handle, 1515 wmi_conv_event_id event_id, 1516 wmi_unified_event_handler handler_func, 1517 uint8_t rx_ctx) 1518 { 1519 uint32_t idx = 0; 1520 uint32_t evt_id; 1521 struct wmi_soc *soc = wmi_handle->soc; 1522 1523 if (event_id >= wmi_events_max || 1524 wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { 1525 qdf_print("%s: Event id %d is unavailable\n", 1526 __func__, event_id); 1527 return QDF_STATUS_E_FAILURE; 1528 } 1529 evt_id = wmi_handle->wmi_events[event_id]; 1530 1531 if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) { 1532 qdf_print("%s : event handler already registered 0x%x\n", 1533 __func__, evt_id); 1534 return QDF_STATUS_E_FAILURE; 1535 } 1536 if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) { 1537 qdf_print("%s : no more event handlers 0x%x\n", 1538 __func__, evt_id); 1539 return QDF_STATUS_E_FAILURE; 1540 } 1541 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, 1542 "Registered event handler for event 0x%8x\n", evt_id); 1543 idx = soc->max_event_idx; 1544 wmi_handle->event_handler[idx] = handler_func; 1545 wmi_handle->event_id[idx] = evt_id; 1546 qdf_spin_lock_bh(&soc->ctx_lock); 1547 wmi_handle->ctx[idx] = rx_ctx; 1548 qdf_spin_unlock_bh(&soc->ctx_lock); 1549 soc->max_event_idx++; 1550 1551 return 0; 1552 } 1553 qdf_export_symbol(wmi_unified_register_event_handler); 1554 1555 /** 1556 * wmi_unified_unregister_event() - unregister wmi event handler 1557 * @wmi_handle: handle to wmi 1558 * @event_id: wmi event id 1559 * 1560 * Return: 0 on success 1561 */ 1562 int wmi_unified_unregister_event(wmi_unified_t wmi_handle, 1563 uint32_t event_id) 1564 { 1565 uint32_t idx = 0; 1566 uint32_t evt_id; 1567 struct wmi_soc *soc = wmi_handle->soc; 1568 1569 if (event_id >= wmi_events_max || 1570 wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { 1571 qdf_print("%s: Event id %d is unavailable\n", 1572 __func__, event_id); 1573 return QDF_STATUS_E_FAILURE; 1574 } 1575 evt_id = wmi_handle->wmi_events[event_id]; 1576 1577 idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id); 1578 if (idx == -1) { 1579 qdf_print("%s : event handler is not registered: evt id 0x%x\n", 1580 __func__, evt_id); 1581 return QDF_STATUS_E_FAILURE; 1582 } 1583 wmi_handle->event_handler[idx] = NULL; 1584 wmi_handle->event_id[idx] = 0; 1585 --soc->max_event_idx; 1586 wmi_handle->event_handler[idx] = 1587 wmi_handle->event_handler[soc->max_event_idx]; 1588 wmi_handle->event_id[idx] = 1589 wmi_handle->event_id[soc->max_event_idx]; 1590 1591 return 0; 1592 } 1593 1594 /** 1595 * wmi_unified_unregister_event_handler() - unregister wmi event handler 1596 * @wmi_handle: handle to wmi 1597 * @event_id: wmi event id 1598 * 1599 * Return: 0 on success 1600 */ 1601 int wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle, 1602 wmi_conv_event_id event_id) 1603 { 1604 uint32_t idx = 0; 1605 uint32_t evt_id; 1606 struct wmi_soc *soc = wmi_handle->soc; 1607 1608 if (event_id >= wmi_events_max || 1609 wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { 1610 qdf_print("%s: Event id %d is unavailable\n", 1611 __func__, event_id); 1612 return QDF_STATUS_E_FAILURE; 1613 } 1614 evt_id = wmi_handle->wmi_events[event_id]; 1615 1616 idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id); 1617 if (idx == -1) { 1618 qdf_print("%s : event handler is not registered: evt id 0x%x\n", 1619 __func__, evt_id); 1620 return QDF_STATUS_E_FAILURE; 1621 } 1622 wmi_handle->event_handler[idx] = NULL; 1623 wmi_handle->event_id[idx] = 0; 1624 --soc->max_event_idx; 1625 wmi_handle->event_handler[idx] = 1626 wmi_handle->event_handler[soc->max_event_idx]; 1627 wmi_handle->event_id[idx] = 1628 wmi_handle->event_id[soc->max_event_idx]; 1629 1630 return 0; 1631 } 1632 qdf_export_symbol(wmi_unified_unregister_event_handler); 1633 1634 /** 1635 * wmi_process_fw_event_default_ctx() - process in default caller context 1636 * @wmi_handle: handle to wmi 1637 * @htc_packet: pointer to htc packet 1638 * @exec_ctx: execution context for wmi fw event 1639 * 1640 * Event process by below function will be in default caller context. 1641 * wmi internally provides rx work thread processing context. 1642 * 1643 * Return: none 1644 */ 1645 static void wmi_process_fw_event_default_ctx(struct wmi_unified *wmi_handle, 1646 HTC_PACKET *htc_packet, uint8_t exec_ctx) 1647 { 1648 wmi_buf_t evt_buf; 1649 evt_buf = (wmi_buf_t) htc_packet->pPktContext; 1650 1651 #ifndef CONFIG_MCL 1652 wmi_handle->rx_ops.wma_process_fw_event_handler_cbk 1653 (wmi_handle->scn_handle, evt_buf, exec_ctx); 1654 #else 1655 wmi_handle->rx_ops.wma_process_fw_event_handler_cbk(wmi_handle, 1656 htc_packet, exec_ctx); 1657 #endif 1658 1659 return; 1660 } 1661 1662 /** 1663 * wmi_process_fw_event_worker_thread_ctx() - process in worker thread context 1664 * @wmi_handle: handle to wmi 1665 * @htc_packet: pointer to htc packet 1666 * 1667 * Event process by below function will be in worker thread context. 1668 * Use this method for events which are not critical and not 1669 * handled in protocol stack. 1670 * 1671 * Return: none 1672 */ 1673 void wmi_process_fw_event_worker_thread_ctx(struct wmi_unified *wmi_handle, 1674 HTC_PACKET *htc_packet) 1675 { 1676 wmi_buf_t evt_buf; 1677 1678 evt_buf = (wmi_buf_t) htc_packet->pPktContext; 1679 1680 qdf_spin_lock_bh(&wmi_handle->eventq_lock); 1681 qdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf); 1682 qdf_spin_unlock_bh(&wmi_handle->eventq_lock); 1683 qdf_queue_work(0, wmi_handle->wmi_rx_work_queue, 1684 &wmi_handle->rx_event_work); 1685 1686 return; 1687 } 1688 1689 qdf_export_symbol(wmi_process_fw_event_worker_thread_ctx); 1690 1691 /** 1692 * wmi_get_pdev_ep: Get wmi handle based on endpoint 1693 * @soc: handle to wmi soc 1694 * @ep: endpoint id 1695 * 1696 * Return: none 1697 */ 1698 static struct wmi_unified *wmi_get_pdev_ep(struct wmi_soc *soc, 1699 HTC_ENDPOINT_ID ep) 1700 { 1701 uint32_t i; 1702 1703 for (i = 0; i < WMI_MAX_RADIOS; i++) 1704 if (soc->wmi_endpoint_id[i] == ep) 1705 break; 1706 1707 if (i == WMI_MAX_RADIOS) 1708 return NULL; 1709 1710 return soc->wmi_pdev[i]; 1711 } 1712 1713 /** 1714 * wmi_control_rx() - process fw events callbacks 1715 * @ctx: handle to wmi 1716 * @htc_packet: pointer to htc packet 1717 * 1718 * Return: none 1719 */ 1720 static void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet) 1721 { 1722 struct wmi_soc *soc = (struct wmi_soc *) ctx; 1723 struct wmi_unified *wmi_handle; 1724 wmi_buf_t evt_buf; 1725 uint32_t id; 1726 uint32_t idx = 0; 1727 enum wmi_rx_exec_ctx exec_ctx; 1728 1729 evt_buf = (wmi_buf_t) htc_packet->pPktContext; 1730 1731 wmi_handle = wmi_get_pdev_ep(soc, htc_packet->Endpoint); 1732 if (wmi_handle == NULL) { 1733 qdf_print 1734 ("%s :unable to get wmi_handle to Endpoint %d\n", 1735 __func__, htc_packet->Endpoint); 1736 qdf_nbuf_free(evt_buf); 1737 return; 1738 } 1739 1740 id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); 1741 idx = wmi_unified_get_event_handler_ix(wmi_handle, id); 1742 if (qdf_unlikely(idx == A_ERROR)) { 1743 WMI_LOGD("%s :event handler is not registered: event id 0x%x\n", 1744 __func__, id); 1745 qdf_nbuf_free(evt_buf); 1746 return; 1747 } 1748 qdf_spin_lock_bh(&soc->ctx_lock); 1749 exec_ctx = wmi_handle->ctx[idx]; 1750 qdf_spin_unlock_bh(&soc->ctx_lock); 1751 1752 #ifdef WMI_INTERFACE_EVENT_LOGGING 1753 if (wmi_handle->log_info.wmi_logging_enable) { 1754 uint8_t *data; 1755 data = qdf_nbuf_data(evt_buf); 1756 1757 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 1758 /* Exclude 4 bytes of TLV header */ 1759 WMI_RX_EVENT_RECORD(wmi_handle, id, data + 1760 wmi_handle->log_info.buf_offset_event); 1761 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 1762 } 1763 #endif 1764 1765 if (exec_ctx == WMI_RX_WORK_CTX) { 1766 wmi_process_fw_event_worker_thread_ctx 1767 (wmi_handle, htc_packet); 1768 } else if (exec_ctx > WMI_RX_WORK_CTX) { 1769 wmi_process_fw_event_default_ctx 1770 (wmi_handle, htc_packet, exec_ctx); 1771 } else { 1772 qdf_print("%s :Invalid event context %d\n", __func__, exec_ctx); 1773 qdf_nbuf_free(evt_buf); 1774 } 1775 1776 } 1777 1778 /** 1779 * wmi_process_fw_event() - process any fw event 1780 * @wmi_handle: wmi handle 1781 * @evt_buf: fw event buffer 1782 * 1783 * This function process fw event in caller context 1784 * 1785 * Return: none 1786 */ 1787 void wmi_process_fw_event(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf) 1788 { 1789 __wmi_control_rx(wmi_handle, evt_buf); 1790 } 1791 1792 /** 1793 * __wmi_control_rx() - process serialize wmi event callback 1794 * @wmi_handle: wmi handle 1795 * @evt_buf: fw event buffer 1796 * 1797 * Return: none 1798 */ 1799 void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf) 1800 { 1801 uint32_t id; 1802 uint8_t *data; 1803 uint32_t len; 1804 void *wmi_cmd_struct_ptr = NULL; 1805 #ifndef WMI_NON_TLV_SUPPORT 1806 int tlv_ok_status = 0; 1807 #endif 1808 uint32_t idx = 0; 1809 1810 id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); 1811 1812 if (qdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL) 1813 goto end; 1814 1815 data = qdf_nbuf_data(evt_buf); 1816 len = qdf_nbuf_len(evt_buf); 1817 1818 #ifndef WMI_NON_TLV_SUPPORT 1819 if (wmi_handle->target_type == WMI_TLV_TARGET) { 1820 /* Validate and pad(if necessary) the TLVs */ 1821 tlv_ok_status = 1822 wmi_handle->ops->wmi_check_and_pad_event(wmi_handle->scn_handle, 1823 data, len, id, 1824 &wmi_cmd_struct_ptr); 1825 if (tlv_ok_status != 0) { 1826 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 1827 "%s: Error: id=0x%d, wmitlv check status=%d\n", 1828 __func__, id, tlv_ok_status); 1829 goto end; 1830 } 1831 } 1832 #endif 1833 1834 idx = wmi_unified_get_event_handler_ix(wmi_handle, id); 1835 if (idx == A_ERROR) { 1836 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, 1837 "%s : event handler is not registered: event id 0x%x\n", 1838 __func__, id); 1839 goto end; 1840 } 1841 #ifdef WMI_INTERFACE_EVENT_LOGGING 1842 if (wmi_handle->log_info.wmi_logging_enable) { 1843 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 1844 /* Exclude 4 bytes of TLV header */ 1845 if (wmi_handle->ops->is_management_record(id)) { 1846 WMI_MGMT_EVENT_RECORD(wmi_handle, id, data 1847 + wmi_handle->log_info.buf_offset_event); 1848 } else { 1849 WMI_EVENT_RECORD(wmi_handle, id, data + 1850 wmi_handle->log_info.buf_offset_event); 1851 } 1852 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 1853 } 1854 #endif 1855 /* Call the WMI registered event handler */ 1856 if (wmi_handle->target_type == WMI_TLV_TARGET) 1857 wmi_handle->event_handler[idx] (wmi_handle->scn_handle, 1858 wmi_cmd_struct_ptr, len); 1859 else 1860 wmi_handle->event_handler[idx] (wmi_handle->scn_handle, 1861 data, len); 1862 1863 end: 1864 /* Free event buffer and allocated event tlv */ 1865 #ifndef WMI_NON_TLV_SUPPORT 1866 if (wmi_handle->target_type == WMI_TLV_TARGET) 1867 wmi_handle->ops->wmi_free_allocated_event(id, &wmi_cmd_struct_ptr); 1868 #endif 1869 1870 qdf_nbuf_free(evt_buf); 1871 1872 } 1873 1874 #define WMI_WQ_WD_TIMEOUT (30 * 1000) /* 30s */ 1875 1876 static inline void wmi_workqueue_watchdog_warn(uint32_t msg_type_id) 1877 { 1878 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 1879 "%s: WLAN_BUG_RCA: Message type %x has exceeded its alloted time of %ds", 1880 __func__, msg_type_id, WMI_WQ_WD_TIMEOUT / 1000); 1881 } 1882 1883 #ifdef CONFIG_SLUB_DEBUG_ON 1884 static void wmi_workqueue_watchdog_bite(void *arg) 1885 { 1886 struct wmi_wq_dbg_info *info = arg; 1887 1888 wmi_workqueue_watchdog_warn(info->wd_msg_type_id); 1889 qdf_print_thread_trace(info->task); 1890 1891 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 1892 "%s: Going down for WMI WQ Watchdog Bite!", __func__); 1893 QDF_BUG(0); 1894 } 1895 #else 1896 static inline void wmi_workqueue_watchdog_bite(void *arg) 1897 { 1898 struct wmi_wq_dbg_info *info = arg; 1899 1900 wmi_workqueue_watchdog_warn(info->wd_msg_type_id); 1901 } 1902 #endif 1903 1904 /** 1905 * wmi_rx_event_work() - process rx event in rx work queue context 1906 * @arg: opaque pointer to wmi handle 1907 * 1908 * This function process any fw event to serialize it through rx worker thread. 1909 * 1910 * Return: none 1911 */ 1912 static void wmi_rx_event_work(void *arg) 1913 { 1914 wmi_buf_t buf; 1915 struct wmi_unified *wmi = arg; 1916 qdf_timer_t wd_timer; 1917 struct wmi_wq_dbg_info info; 1918 1919 /* initialize WMI workqueue watchdog timer */ 1920 qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite, 1921 &info, QDF_TIMER_TYPE_SW); 1922 qdf_spin_lock_bh(&wmi->eventq_lock); 1923 buf = qdf_nbuf_queue_remove(&wmi->event_queue); 1924 qdf_spin_unlock_bh(&wmi->eventq_lock); 1925 while (buf) { 1926 qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT); 1927 info.wd_msg_type_id = 1928 WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID); 1929 info.wmi_wq = wmi->wmi_rx_work_queue; 1930 info.task = qdf_get_current_task(); 1931 __wmi_control_rx(wmi, buf); 1932 qdf_timer_stop(&wd_timer); 1933 qdf_spin_lock_bh(&wmi->eventq_lock); 1934 buf = qdf_nbuf_queue_remove(&wmi->event_queue); 1935 qdf_spin_unlock_bh(&wmi->eventq_lock); 1936 } 1937 qdf_timer_free(&wd_timer); 1938 } 1939 1940 #ifdef FEATURE_RUNTIME_PM 1941 /** 1942 * wmi_runtime_pm_init() - initialize runtime pm wmi variables 1943 * @wmi_handle: wmi context 1944 */ 1945 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle) 1946 { 1947 qdf_atomic_init(&wmi_handle->runtime_pm_inprogress); 1948 } 1949 1950 /** 1951 * wmi_set_runtime_pm_inprogress() - set runtime pm progress flag 1952 * @wmi_handle: wmi context 1953 * @val: runtime pm progress flag 1954 */ 1955 void wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, A_BOOL val) 1956 { 1957 qdf_atomic_set(&wmi_handle->runtime_pm_inprogress, val); 1958 } 1959 1960 /** 1961 * wmi_get_runtime_pm_inprogress() - get runtime pm progress flag 1962 * @wmi_handle: wmi context 1963 */ 1964 inline bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle) 1965 { 1966 return qdf_atomic_read(&wmi_handle->runtime_pm_inprogress); 1967 } 1968 #else 1969 static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle) 1970 { 1971 } 1972 #endif 1973 1974 /** 1975 * wmi_unified_get_soc_handle: Get WMI SoC handle 1976 * @param wmi_handle: WMI context got from wmi_attach 1977 * 1978 * return: Pointer to Soc handle 1979 */ 1980 void *wmi_unified_get_soc_handle(struct wmi_unified *wmi_handle) 1981 { 1982 return wmi_handle->soc; 1983 } 1984 1985 /** 1986 * wmi_interface_logging_init: Interface looging init 1987 * @param wmi_handle: Pointer to wmi handle object 1988 * 1989 * return: None 1990 */ 1991 #ifdef WMI_INTERFACE_EVENT_LOGGING 1992 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle, 1993 uint32_t pdev_idx) 1994 { 1995 if (QDF_STATUS_SUCCESS == wmi_log_init(wmi_handle)) { 1996 qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); 1997 wmi_debugfs_init(wmi_handle, pdev_idx); 1998 } 1999 } 2000 #else 2001 static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle, 2002 uint32_t pdev_idx) 2003 { 2004 } 2005 #endif 2006 2007 /** 2008 * wmi_target_params_init: Target specific params init 2009 * @param wmi_soc: Pointer to wmi soc object 2010 * @param wmi_handle: Pointer to wmi handle object 2011 * 2012 * return: None 2013 */ 2014 #ifndef CONFIG_MCL 2015 static inline void wmi_target_params_init(struct wmi_soc *soc, 2016 struct wmi_unified *wmi_handle) 2017 { 2018 wmi_handle->pdev_param = soc->pdev_param; 2019 wmi_handle->vdev_param = soc->vdev_param; 2020 wmi_handle->services = soc->services; 2021 } 2022 #else 2023 static inline void wmi_target_params_init(struct wmi_soc *soc, 2024 struct wmi_unified *wmi_handle) 2025 { 2026 wmi_handle->services = soc->services; 2027 } 2028 #endif 2029 2030 /** 2031 * wmi_unified_get_pdev_handle: Get WMI SoC handle 2032 * @param wmi_soc: Pointer to wmi soc object 2033 * @param pdev_idx: pdev index 2034 * 2035 * return: Pointer to wmi handle or NULL on failure 2036 */ 2037 void *wmi_unified_get_pdev_handle(struct wmi_soc *soc, uint32_t pdev_idx) 2038 { 2039 struct wmi_unified *wmi_handle; 2040 2041 if (pdev_idx >= WMI_MAX_RADIOS) 2042 return NULL; 2043 2044 if (soc->wmi_pdev[pdev_idx] == NULL) { 2045 wmi_handle = 2046 (struct wmi_unified *) qdf_mem_malloc( 2047 sizeof(struct wmi_unified)); 2048 if (wmi_handle == NULL) { 2049 qdf_print("allocation of wmi handle failed %zu\n", 2050 sizeof(struct wmi_unified)); 2051 return NULL; 2052 } 2053 wmi_handle->scn_handle = soc->scn_handle; 2054 wmi_handle->event_id = soc->event_id; 2055 wmi_handle->event_handler = soc->event_handler; 2056 wmi_handle->ctx = soc->ctx; 2057 wmi_handle->ops = soc->ops; 2058 qdf_spinlock_create(&wmi_handle->eventq_lock); 2059 qdf_nbuf_queue_init(&wmi_handle->event_queue); 2060 2061 qdf_create_work(0, &wmi_handle->rx_event_work, 2062 wmi_rx_event_work, wmi_handle); 2063 wmi_handle->wmi_rx_work_queue = 2064 qdf_create_workqueue("wmi_rx_event_work_queue"); 2065 if (NULL == wmi_handle->wmi_rx_work_queue) { 2066 WMI_LOGE("failed to create wmi_rx_event_work_queue"); 2067 goto error; 2068 } 2069 wmi_handle->wmi_events = soc->wmi_events; 2070 wmi_target_params_init(soc, wmi_handle); 2071 wmi_handle->soc = soc; 2072 wmi_interface_logging_init(wmi_handle, pdev_idx); 2073 qdf_atomic_init(&wmi_handle->pending_cmds); 2074 qdf_atomic_init(&wmi_handle->is_target_suspended); 2075 wmi_handle->target_type = soc->target_type; 2076 wmi_handle->wmi_max_cmds = soc->wmi_max_cmds; 2077 2078 soc->wmi_pdev[pdev_idx] = wmi_handle; 2079 } else 2080 wmi_handle = soc->wmi_pdev[pdev_idx]; 2081 2082 wmi_handle->wmi_stopinprogress = 0; 2083 wmi_handle->wmi_endpoint_id = soc->wmi_endpoint_id[pdev_idx]; 2084 wmi_handle->htc_handle = soc->htc_handle; 2085 wmi_handle->max_msg_len = soc->max_msg_len[pdev_idx]; 2086 2087 return wmi_handle; 2088 2089 error: 2090 qdf_mem_free(wmi_handle); 2091 2092 return NULL; 2093 } 2094 qdf_export_symbol(wmi_unified_get_pdev_handle); 2095 2096 static void (*wmi_attach_register[WMI_MAX_TARGET_TYPE])(wmi_unified_t); 2097 2098 void wmi_unified_register_module(enum wmi_target_type target_type, 2099 void (*wmi_attach)(wmi_unified_t wmi_handle)) 2100 { 2101 if (target_type < WMI_MAX_TARGET_TYPE) 2102 wmi_attach_register[target_type] = wmi_attach; 2103 2104 return; 2105 } 2106 qdf_export_symbol(wmi_unified_register_module); 2107 2108 /** 2109 * wmi_unified_attach() - attach for unified WMI 2110 * @scn_handle: handle to SCN 2111 * @osdev: OS device context 2112 * @target_type: TLV or not-TLV based target 2113 * @use_cookie: cookie based allocation enabled/disabled 2114 * @ops: umac rx callbacks 2115 * @psoc: objmgr psoc 2116 * 2117 * @Return: wmi handle. 2118 */ 2119 void *wmi_unified_attach(void *scn_handle, 2120 struct wmi_unified_attach_params *param) 2121 { 2122 struct wmi_unified *wmi_handle; 2123 struct wmi_soc *soc; 2124 2125 soc = (struct wmi_soc *) qdf_mem_malloc(sizeof(struct wmi_soc)); 2126 if (soc == NULL) { 2127 qdf_print("Allocation of wmi_soc failed %zu\n", 2128 sizeof(struct wmi_soc)); 2129 return NULL; 2130 } 2131 2132 wmi_handle = 2133 (struct wmi_unified *) qdf_mem_malloc( 2134 sizeof(struct wmi_unified)); 2135 if (wmi_handle == NULL) { 2136 qdf_mem_free(soc); 2137 qdf_print("allocation of wmi handle failed %zu\n", 2138 sizeof(struct wmi_unified)); 2139 return NULL; 2140 } 2141 wmi_handle->soc = soc; 2142 wmi_handle->soc->soc_idx = param->soc_id; 2143 wmi_handle->event_id = soc->event_id; 2144 wmi_handle->event_handler = soc->event_handler; 2145 wmi_handle->ctx = soc->ctx; 2146 wmi_handle->wmi_events = soc->wmi_events; 2147 wmi_target_params_init(soc, wmi_handle); 2148 wmi_handle->scn_handle = scn_handle; 2149 soc->scn_handle = scn_handle; 2150 qdf_atomic_init(&wmi_handle->pending_cmds); 2151 qdf_atomic_init(&wmi_handle->is_target_suspended); 2152 wmi_runtime_pm_init(wmi_handle); 2153 qdf_spinlock_create(&wmi_handle->eventq_lock); 2154 qdf_nbuf_queue_init(&wmi_handle->event_queue); 2155 qdf_create_work(0, &wmi_handle->rx_event_work, 2156 wmi_rx_event_work, wmi_handle); 2157 wmi_handle->wmi_rx_work_queue = 2158 qdf_create_workqueue("wmi_rx_event_work_queue"); 2159 if (NULL == wmi_handle->wmi_rx_work_queue) { 2160 WMI_LOGE("failed to create wmi_rx_event_work_queue"); 2161 goto error; 2162 } 2163 wmi_interface_logging_init(wmi_handle, WMI_HOST_PDEV_ID_0); 2164 /* Attach mc_thread context processing function */ 2165 wmi_handle->rx_ops.wma_process_fw_event_handler_cbk = 2166 param->rx_ops->wma_process_fw_event_handler_cbk; 2167 wmi_handle->target_type = param->target_type; 2168 soc->target_type = param->target_type; 2169 2170 if (param->target_type >= WMI_MAX_TARGET_TYPE) 2171 goto error; 2172 2173 if (wmi_attach_register[param->target_type]) { 2174 wmi_attach_register[param->target_type](wmi_handle); 2175 } else { 2176 WMI_LOGE("wmi attach is not registered"); 2177 goto error; 2178 } 2179 /* Assign target cookie capablity */ 2180 wmi_handle->use_cookie = param->use_cookie; 2181 wmi_handle->osdev = param->osdev; 2182 wmi_handle->wmi_stopinprogress = 0; 2183 wmi_handle->wmi_max_cmds = param->max_commands; 2184 soc->wmi_max_cmds = param->max_commands; 2185 /* Increase the ref count once refcount infra is present */ 2186 soc->wmi_psoc = param->psoc; 2187 qdf_spinlock_create(&soc->ctx_lock); 2188 2189 soc->ops = wmi_handle->ops; 2190 soc->wmi_pdev[0] = wmi_handle; 2191 2192 return wmi_handle; 2193 2194 error: 2195 qdf_mem_free(soc); 2196 qdf_mem_free(wmi_handle); 2197 2198 return NULL; 2199 } 2200 2201 /** 2202 * wmi_unified_detach() - detach for unified WMI 2203 * 2204 * @wmi_handle : handle to wmi. 2205 * 2206 * @Return: none. 2207 */ 2208 void wmi_unified_detach(struct wmi_unified *wmi_handle) 2209 { 2210 wmi_buf_t buf; 2211 struct wmi_soc *soc; 2212 uint8_t i; 2213 2214 soc = wmi_handle->soc; 2215 for (i = 0; i < WMI_MAX_RADIOS; i++) { 2216 if (soc->wmi_pdev[i]) { 2217 qdf_flush_workqueue(0, 2218 soc->wmi_pdev[i]->wmi_rx_work_queue); 2219 qdf_destroy_workqueue(0, 2220 soc->wmi_pdev[i]->wmi_rx_work_queue); 2221 wmi_debugfs_remove(soc->wmi_pdev[i]); 2222 buf = qdf_nbuf_queue_remove( 2223 &soc->wmi_pdev[i]->event_queue); 2224 while (buf) { 2225 qdf_nbuf_free(buf); 2226 buf = qdf_nbuf_queue_remove( 2227 &soc->wmi_pdev[i]->event_queue); 2228 } 2229 2230 wmi_log_buffer_free(soc->wmi_pdev[i]); 2231 2232 /* Free events logs list */ 2233 if (soc->wmi_pdev[i]->events_logs_list) 2234 qdf_mem_free( 2235 soc->wmi_pdev[i]->events_logs_list); 2236 2237 qdf_spinlock_destroy(&soc->wmi_pdev[i]->eventq_lock); 2238 qdf_mem_free(soc->wmi_pdev[i]); 2239 } 2240 } 2241 qdf_spinlock_destroy(&soc->ctx_lock); 2242 2243 if (soc->wmi_service_bitmap) { 2244 qdf_mem_free(soc->wmi_service_bitmap); 2245 soc->wmi_service_bitmap = NULL; 2246 } 2247 2248 if (soc->wmi_ext_service_bitmap) { 2249 qdf_mem_free(soc->wmi_ext_service_bitmap); 2250 soc->wmi_ext_service_bitmap = NULL; 2251 } 2252 2253 /* Decrease the ref count once refcount infra is present */ 2254 soc->wmi_psoc = NULL; 2255 qdf_mem_free(soc); 2256 } 2257 2258 /** 2259 * wmi_unified_remove_work() - detach for WMI work 2260 * @wmi_handle: handle to WMI 2261 * 2262 * A function that does not fully detach WMI, but just remove work 2263 * queue items associated with it. This is used to make sure that 2264 * before any other processing code that may destroy related contexts 2265 * (HTC, etc), work queue processing on WMI has already been stopped. 2266 * 2267 * Return: None 2268 */ 2269 void 2270 wmi_unified_remove_work(struct wmi_unified *wmi_handle) 2271 { 2272 wmi_buf_t buf; 2273 2274 qdf_flush_workqueue(0, wmi_handle->wmi_rx_work_queue); 2275 qdf_spin_lock_bh(&wmi_handle->eventq_lock); 2276 buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue); 2277 while (buf) { 2278 qdf_nbuf_free(buf); 2279 buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue); 2280 } 2281 qdf_spin_unlock_bh(&wmi_handle->eventq_lock); 2282 } 2283 2284 /** 2285 * wmi_htc_tx_complete() - Process htc tx completion 2286 * 2287 * @ctx: handle to wmi 2288 * @htc_packet: pointer to htc packet 2289 * 2290 * @Return: none. 2291 */ 2292 static void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt) 2293 { 2294 struct wmi_soc *soc = (struct wmi_soc *) ctx; 2295 wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt); 2296 u_int8_t *buf_ptr; 2297 u_int32_t len; 2298 struct wmi_unified *wmi_handle; 2299 #ifdef WMI_INTERFACE_EVENT_LOGGING 2300 uint32_t cmd_id; 2301 #endif 2302 2303 ASSERT(wmi_cmd_buf); 2304 wmi_handle = wmi_get_pdev_ep(soc, htc_pkt->Endpoint); 2305 if (wmi_handle == NULL) { 2306 WMI_LOGE("%s: Unable to get wmi handle\n", __func__); 2307 QDF_ASSERT(0); 2308 return; 2309 } 2310 #ifdef WMI_INTERFACE_EVENT_LOGGING 2311 if (wmi_handle && wmi_handle->log_info.wmi_logging_enable) { 2312 cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), 2313 WMI_CMD_HDR, COMMANDID); 2314 2315 qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); 2316 /* Record 16 bytes of WMI cmd tx complete data 2317 - exclude TLV and WMI headers */ 2318 if (wmi_handle->ops->is_management_record(cmd_id)) { 2319 WMI_MGMT_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id, 2320 qdf_nbuf_data(wmi_cmd_buf) + 2321 wmi_handle->log_info.buf_offset_command); 2322 } else { 2323 WMI_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id, 2324 qdf_nbuf_data(wmi_cmd_buf) + 2325 wmi_handle->log_info.buf_offset_command); 2326 } 2327 2328 qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); 2329 } 2330 #endif 2331 buf_ptr = (u_int8_t *) wmi_buf_data(wmi_cmd_buf); 2332 len = qdf_nbuf_len(wmi_cmd_buf); 2333 qdf_mem_zero(buf_ptr, len); 2334 qdf_nbuf_free(wmi_cmd_buf); 2335 qdf_mem_free(htc_pkt); 2336 qdf_atomic_dec(&wmi_handle->pending_cmds); 2337 } 2338 2339 /** 2340 * wmi_connect_pdev_htc_service() - WMI API to get connect to HTC service 2341 * 2342 * @wmi_handle: handle to WMI. 2343 * @pdev_idx: Pdev index 2344 * 2345 * @Return: status. 2346 */ 2347 static int wmi_connect_pdev_htc_service(struct wmi_soc *soc, 2348 uint32_t pdev_idx) 2349 { 2350 int status; 2351 struct htc_service_connect_resp response; 2352 struct htc_service_connect_req connect; 2353 2354 OS_MEMZERO(&connect, sizeof(connect)); 2355 OS_MEMZERO(&response, sizeof(response)); 2356 2357 /* meta data is unused for now */ 2358 connect.pMetaData = NULL; 2359 connect.MetaDataLength = 0; 2360 /* these fields are the same for all service endpoints */ 2361 connect.EpCallbacks.pContext = soc; 2362 connect.EpCallbacks.EpTxCompleteMultiple = 2363 NULL /* Control path completion ar6000_tx_complete */; 2364 connect.EpCallbacks.EpRecv = wmi_control_rx /* Control path rx */; 2365 connect.EpCallbacks.EpRecvRefill = NULL /* ar6000_rx_refill */; 2366 connect.EpCallbacks.EpSendFull = NULL /* ar6000_tx_queue_full */; 2367 connect.EpCallbacks.EpTxComplete = 2368 wmi_htc_tx_complete /* ar6000_tx_queue_full */; 2369 2370 /* connect to control service */ 2371 connect.service_id = soc->svc_ids[pdev_idx]; 2372 status = htc_connect_service(soc->htc_handle, &connect, 2373 &response); 2374 2375 2376 if (status != EOK) { 2377 qdf_print 2378 ("Failed to connect to WMI CONTROL service status:%d\n", 2379 status); 2380 return status; 2381 } 2382 2383 soc->wmi_endpoint_id[pdev_idx] = response.Endpoint; 2384 soc->max_msg_len[pdev_idx] = response.MaxMsgLength; 2385 2386 return 0; 2387 } 2388 2389 /** 2390 * wmi_unified_connect_htc_service() - WMI API to get connect to HTC service 2391 * 2392 * @wmi_handle: handle to WMI. 2393 * 2394 * @Return: status. 2395 */ 2396 QDF_STATUS 2397 wmi_unified_connect_htc_service(struct wmi_unified *wmi_handle, 2398 void *htc_handle) 2399 { 2400 uint32_t i; 2401 uint8_t wmi_ep_count; 2402 2403 wmi_handle->soc->htc_handle = htc_handle; 2404 2405 wmi_ep_count = htc_get_wmi_endpoint_count(htc_handle); 2406 if (wmi_ep_count > WMI_MAX_RADIOS) 2407 return QDF_STATUS_E_FAULT; 2408 2409 for (i = 0; i < wmi_ep_count; i++) 2410 wmi_connect_pdev_htc_service(wmi_handle->soc, i); 2411 2412 wmi_handle->htc_handle = htc_handle; 2413 wmi_handle->wmi_endpoint_id = wmi_handle->soc->wmi_endpoint_id[0]; 2414 wmi_handle->max_msg_len = wmi_handle->soc->max_msg_len[0]; 2415 2416 return QDF_STATUS_SUCCESS; 2417 } 2418 2419 /** 2420 * wmi_get_host_credits() - WMI API to get updated host_credits 2421 * 2422 * @wmi_handle: handle to WMI. 2423 * 2424 * @Return: updated host_credits. 2425 */ 2426 int wmi_get_host_credits(wmi_unified_t wmi_handle) 2427 { 2428 int host_credits = 0; 2429 2430 htc_get_control_endpoint_tx_host_credits(wmi_handle->htc_handle, 2431 &host_credits); 2432 return host_credits; 2433 } 2434 2435 /** 2436 * wmi_get_pending_cmds() - WMI API to get WMI Pending Commands in the HTC 2437 * queue 2438 * 2439 * @wmi_handle: handle to WMI. 2440 * 2441 * @Return: Pending Commands in the HTC queue. 2442 */ 2443 int wmi_get_pending_cmds(wmi_unified_t wmi_handle) 2444 { 2445 return qdf_atomic_read(&wmi_handle->pending_cmds); 2446 } 2447 2448 /** 2449 * wmi_set_target_suspend() - WMI API to set target suspend state 2450 * 2451 * @wmi_handle: handle to WMI. 2452 * @val: suspend state boolean. 2453 * 2454 * @Return: none. 2455 */ 2456 void wmi_set_target_suspend(wmi_unified_t wmi_handle, A_BOOL val) 2457 { 2458 qdf_atomic_set(&wmi_handle->is_target_suspended, val); 2459 } 2460 2461 /** 2462 * WMI API to set crash injection state 2463 * @param wmi_handle: handle to WMI. 2464 * @param val: crash injection state boolean. 2465 */ 2466 void wmi_tag_crash_inject(wmi_unified_t wmi_handle, A_BOOL flag) 2467 { 2468 wmi_handle->tag_crash_inject = flag; 2469 } 2470 2471 /** 2472 * WMI API to set bus suspend state 2473 * @param wmi_handle: handle to WMI. 2474 * @param val: suspend state boolean. 2475 */ 2476 void wmi_set_is_wow_bus_suspended(wmi_unified_t wmi_handle, A_BOOL val) 2477 { 2478 qdf_atomic_set(&wmi_handle->is_wow_bus_suspended, val); 2479 } 2480 2481 void wmi_set_tgt_assert(wmi_unified_t wmi_handle, bool val) 2482 { 2483 wmi_handle->tgt_force_assert_enable = val; 2484 } 2485 2486 /** 2487 * wmi_stop() - generic function to block unified WMI command 2488 * @wmi_handle: handle to WMI. 2489 * 2490 * @Return: success always. 2491 */ 2492 int 2493 wmi_stop(wmi_unified_t wmi_handle) 2494 { 2495 QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, 2496 "WMI Stop\n"); 2497 wmi_handle->wmi_stopinprogress = 1; 2498 return 0; 2499 } 2500 2501 #ifndef CONFIG_MCL 2502 /** 2503 * API to flush all the previous packets associated with the wmi endpoint 2504 * 2505 * @param wmi_handle : handle to WMI. 2506 */ 2507 void 2508 wmi_flush_endpoint(wmi_unified_t wmi_handle) 2509 { 2510 htc_flush_endpoint(wmi_handle->htc_handle, 2511 wmi_handle->wmi_endpoint_id, 0); 2512 } 2513 qdf_export_symbol(wmi_flush_endpoint); 2514 2515 /** 2516 * wmi_pdev_id_conversion_enable() - API to enable pdev_id conversion in WMI 2517 * By default pdev_id conversion is not done in WMI. 2518 * This API can be used enable conversion in WMI. 2519 * @param wmi_handle : handle to WMI 2520 * Return none 2521 */ 2522 void wmi_pdev_id_conversion_enable(wmi_unified_t wmi_handle) 2523 { 2524 if (wmi_handle->target_type == WMI_TLV_TARGET) 2525 wmi_handle->ops->wmi_pdev_id_conversion_enable(wmi_handle); 2526 } 2527 2528 #endif 2529