1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hal_hw_headers.h" 21 #include "hal_api.h" 22 #include "hal_reo.h" 23 #include "target_type.h" 24 #include "qdf_module.h" 25 #include "wcss_version.h" 26 #include <qdf_tracepoint.h> 27 #include "qdf_ssr_driver_dump.h" 28 29 struct tcl_data_cmd gtcl_data_symbol __attribute__((used)); 30 31 #ifdef QCA_WIFI_QCA8074 32 void hal_qca6290_attach(struct hal_soc *hal); 33 #endif 34 #ifdef QCA_WIFI_QCA8074 35 void hal_qca8074_attach(struct hal_soc *hal); 36 #endif 37 #if defined(QCA_WIFI_QCA8074V2) || defined(QCA_WIFI_QCA6018) || \ 38 defined(QCA_WIFI_QCA9574) 39 void hal_qca8074v2_attach(struct hal_soc *hal); 40 #endif 41 #ifdef QCA_WIFI_QCA6390 42 void hal_qca6390_attach(struct hal_soc *hal); 43 #endif 44 #ifdef QCA_WIFI_QCA6490 45 void hal_qca6490_attach(struct hal_soc *hal); 46 #endif 47 #ifdef QCA_WIFI_QCN9000 48 void hal_qcn9000_attach(struct hal_soc *hal); 49 #endif 50 #ifdef QCA_WIFI_QCN9224 51 void hal_qcn9224v2_attach(struct hal_soc *hal); 52 #endif 53 #if defined(QCA_WIFI_QCN6122) || defined(QCA_WIFI_QCN9160) 54 void hal_qcn6122_attach(struct hal_soc *hal); 55 #endif 56 #ifdef QCA_WIFI_QCN6432 57 void hal_qcn6432_attach(struct hal_soc *hal); 58 #endif 59 #ifdef QCA_WIFI_QCA6750 60 void hal_qca6750_attach(struct hal_soc *hal); 61 #endif 62 #ifdef QCA_WIFI_QCA5018 63 void hal_qca5018_attach(struct hal_soc *hal); 64 #endif 65 #ifdef QCA_WIFI_QCA5332 66 void hal_qca5332_attach(struct hal_soc *hal); 67 #endif 68 #ifdef QCA_WIFI_KIWI 69 void hal_kiwi_attach(struct hal_soc *hal); 70 #endif 71 72 #ifdef ENABLE_VERBOSE_DEBUG 73 bool is_hal_verbose_debug_enabled; 74 #endif 75 76 #define HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR(x) ((x) + 0x4) 77 #define HAL_REO_DESTINATION_RING_CTRL_IX_1_ADDR(x) ((x) + 0x8) 78 #define HAL_REO_DESTINATION_RING_CTRL_IX_2_ADDR(x) ((x) + 0xc) 79 #define HAL_REO_DESTINATION_RING_CTRL_IX_3_ADDR(x) ((x) + 0x10) 80 81 #ifdef ENABLE_HAL_REG_WR_HISTORY 82 struct hal_reg_write_fail_history hal_reg_wr_hist; 83 84 void hal_reg_wr_fail_history_add(struct hal_soc *hal_soc, 85 uint32_t offset, 86 uint32_t wr_val, uint32_t rd_val) 87 { 88 struct hal_reg_write_fail_entry *record; 89 int idx; 90 91 idx = hal_history_get_next_index(&hal_soc->reg_wr_fail_hist->index, 92 HAL_REG_WRITE_HIST_SIZE); 93 94 record = &hal_soc->reg_wr_fail_hist->record[idx]; 95 96 record->timestamp = qdf_get_log_timestamp(); 97 record->reg_offset = offset; 98 record->write_val = wr_val; 99 record->read_val = rd_val; 100 } 101 102 static void hal_reg_write_fail_history_init(struct hal_soc *hal) 103 { 104 hal->reg_wr_fail_hist = &hal_reg_wr_hist; 105 106 qdf_atomic_set(&hal->reg_wr_fail_hist->index, -1); 107 } 108 #else 109 static void hal_reg_write_fail_history_init(struct hal_soc *hal) 110 { 111 } 112 #endif 113 114 /** 115 * hal_get_srng_ring_id() - get the ring id of a described ring 116 * @hal: hal_soc data structure 117 * @ring_type: type enum describing the ring 118 * @ring_num: which ring of the ring type 119 * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings) 120 * 121 * Return: the ring id or -EINVAL if the ring does not exist. 122 */ 123 static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type, 124 int ring_num, int mac_id) 125 { 126 struct hal_hw_srng_config *ring_config = 127 HAL_SRNG_CONFIG(hal, ring_type); 128 int ring_id; 129 130 if (ring_num >= ring_config->max_rings) { 131 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 132 "%s: ring_num exceeded maximum no. of supported rings", 133 __func__); 134 /* TODO: This is a programming error. Assert if this happens */ 135 return -EINVAL; 136 } 137 138 /* 139 * Some DMAC rings share a common source ring, hence don't provide them 140 * with separate ring IDs per LMAC. 141 */ 142 if (ring_config->lmac_ring && !ring_config->dmac_cmn_ring) { 143 ring_id = (ring_config->start_ring_id + ring_num + 144 (mac_id * HAL_MAX_RINGS_PER_LMAC)); 145 } else { 146 ring_id = ring_config->start_ring_id + ring_num; 147 } 148 149 return ring_id; 150 } 151 152 static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id) 153 { 154 /* TODO: Should we allocate srng structures dynamically? */ 155 return &(hal->srng_list[ring_id]); 156 } 157 158 #ifndef SHADOW_REG_CONFIG_DISABLED 159 #define HP_OFFSET_IN_REG_START 1 160 #define OFFSET_FROM_HP_TO_TP 4 161 static void hal_update_srng_hp_tp_address(struct hal_soc *hal_soc, 162 int shadow_config_index, 163 int ring_type, 164 int ring_num) 165 { 166 struct hal_srng *srng; 167 int ring_id; 168 struct hal_hw_srng_config *ring_config = 169 HAL_SRNG_CONFIG(hal_soc, ring_type); 170 171 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0); 172 if (ring_id < 0) 173 return; 174 175 srng = hal_get_srng(hal_soc, ring_id); 176 177 if (ring_config->ring_dir == HAL_SRNG_DST_RING) { 178 srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index) 179 + hal_soc->dev_base_addr; 180 hal_debug("tp_addr=%pK dev base addr %pK index %u", 181 srng->u.dst_ring.tp_addr, hal_soc->dev_base_addr, 182 shadow_config_index); 183 } else { 184 srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index) 185 + hal_soc->dev_base_addr; 186 hal_debug("hp_addr=%pK dev base addr %pK index %u", 187 srng->u.src_ring.hp_addr, 188 hal_soc->dev_base_addr, shadow_config_index); 189 } 190 191 } 192 #endif 193 194 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE 195 void hal_set_one_target_reg_config(struct hal_soc *hal, 196 uint32_t target_reg_offset, 197 int list_index) 198 { 199 int i = list_index; 200 201 qdf_assert_always(i < MAX_GENERIC_SHADOW_REG); 202 hal->list_shadow_reg_config[i].target_register = 203 target_reg_offset; 204 hal->num_generic_shadow_regs_configured++; 205 } 206 207 qdf_export_symbol(hal_set_one_target_reg_config); 208 209 #define REO_R0_DESTINATION_RING_CTRL_ADDR_OFFSET 0x4 210 #define MAX_REO_REMAP_SHADOW_REGS 4 211 QDF_STATUS hal_set_shadow_regs(void *hal_soc) 212 { 213 uint32_t target_reg_offset; 214 struct hal_soc *hal = (struct hal_soc *)hal_soc; 215 int i; 216 struct hal_hw_srng_config *srng_config = 217 &hal->hw_srng_table[WBM2SW_RELEASE]; 218 uint32_t reo_reg_base; 219 220 reo_reg_base = hal_get_reo_reg_base_offset(hal_soc); 221 222 target_reg_offset = 223 HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR(reo_reg_base); 224 225 for (i = 0; i < MAX_REO_REMAP_SHADOW_REGS; i++) { 226 hal_set_one_target_reg_config(hal, target_reg_offset, i); 227 target_reg_offset += REO_R0_DESTINATION_RING_CTRL_ADDR_OFFSET; 228 } 229 230 target_reg_offset = srng_config->reg_start[HP_OFFSET_IN_REG_START]; 231 target_reg_offset += (srng_config->reg_size[HP_OFFSET_IN_REG_START] 232 * HAL_IPA_TX_COMP_RING_IDX); 233 234 hal_set_one_target_reg_config(hal, target_reg_offset, i); 235 return QDF_STATUS_SUCCESS; 236 } 237 238 qdf_export_symbol(hal_set_shadow_regs); 239 240 QDF_STATUS hal_construct_shadow_regs(void *hal_soc) 241 { 242 struct hal_soc *hal = (struct hal_soc *)hal_soc; 243 int shadow_config_index = hal->num_shadow_registers_configured; 244 int i; 245 int num_regs = hal->num_generic_shadow_regs_configured; 246 247 for (i = 0; i < num_regs; i++) { 248 qdf_assert_always(shadow_config_index < MAX_SHADOW_REGISTERS); 249 hal->shadow_config[shadow_config_index].addr = 250 hal->list_shadow_reg_config[i].target_register; 251 hal->list_shadow_reg_config[i].shadow_config_index = 252 shadow_config_index; 253 hal->list_shadow_reg_config[i].va = 254 SHADOW_REGISTER(shadow_config_index) + 255 (uintptr_t)hal->dev_base_addr; 256 hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x", 257 hal->shadow_config[shadow_config_index].addr, 258 SHADOW_REGISTER(shadow_config_index), 259 shadow_config_index); 260 shadow_config_index++; 261 hal->num_shadow_registers_configured++; 262 } 263 return QDF_STATUS_SUCCESS; 264 } 265 266 qdf_export_symbol(hal_construct_shadow_regs); 267 #endif 268 269 #ifndef SHADOW_REG_CONFIG_DISABLED 270 271 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, 272 int ring_type, 273 int ring_num) 274 { 275 uint32_t target_register; 276 struct hal_soc *hal = (struct hal_soc *)hal_soc; 277 struct hal_hw_srng_config *srng_config = &hal->hw_srng_table[ring_type]; 278 int shadow_config_index = hal->num_shadow_registers_configured; 279 280 if (shadow_config_index >= MAX_SHADOW_REGISTERS) { 281 QDF_ASSERT(0); 282 return QDF_STATUS_E_RESOURCES; 283 } 284 285 hal->num_shadow_registers_configured++; 286 287 target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START]; 288 target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START] 289 *ring_num); 290 291 /* if the ring is a dst ring, we need to shadow the tail pointer */ 292 if (srng_config->ring_dir == HAL_SRNG_DST_RING) 293 target_register += OFFSET_FROM_HP_TO_TP; 294 295 hal->shadow_config[shadow_config_index].addr = target_register; 296 297 /* update hp/tp addr in the hal_soc structure*/ 298 hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type, 299 ring_num); 300 301 hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x, ring_type %d, ring num %d", 302 target_register, 303 SHADOW_REGISTER(shadow_config_index), 304 shadow_config_index, 305 ring_type, ring_num); 306 307 return QDF_STATUS_SUCCESS; 308 } 309 310 qdf_export_symbol(hal_set_one_shadow_config); 311 312 QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc) 313 { 314 int ring_type, ring_num; 315 struct hal_soc *hal = (struct hal_soc *)hal_soc; 316 317 for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) { 318 struct hal_hw_srng_config *srng_config = 319 &hal->hw_srng_table[ring_type]; 320 321 if (ring_type == CE_SRC || 322 ring_type == CE_DST || 323 ring_type == CE_DST_STATUS) 324 continue; 325 326 if (srng_config->lmac_ring) 327 continue; 328 329 for (ring_num = 0; ring_num < srng_config->max_rings; 330 ring_num++) 331 hal_set_one_shadow_config(hal_soc, ring_type, ring_num); 332 } 333 334 return QDF_STATUS_SUCCESS; 335 } 336 337 qdf_export_symbol(hal_construct_srng_shadow_regs); 338 #else 339 340 QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc) 341 { 342 return QDF_STATUS_SUCCESS; 343 } 344 345 qdf_export_symbol(hal_construct_srng_shadow_regs); 346 347 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type, 348 int ring_num) 349 { 350 return QDF_STATUS_SUCCESS; 351 } 352 qdf_export_symbol(hal_set_one_shadow_config); 353 #endif 354 355 void hal_get_shadow_config(void *hal_soc, 356 struct pld_shadow_reg_v2_cfg **shadow_config, 357 int *num_shadow_registers_configured) 358 { 359 struct hal_soc *hal = (struct hal_soc *)hal_soc; 360 361 *shadow_config = &hal->shadow_config[0].v2; 362 *num_shadow_registers_configured = 363 hal->num_shadow_registers_configured; 364 } 365 qdf_export_symbol(hal_get_shadow_config); 366 367 #ifdef CONFIG_SHADOW_V3 368 void hal_get_shadow_v3_config(void *hal_soc, 369 struct pld_shadow_reg_v3_cfg **shadow_config, 370 int *num_shadow_registers_configured) 371 { 372 struct hal_soc *hal = (struct hal_soc *)hal_soc; 373 374 *shadow_config = &hal->shadow_config[0].v3; 375 *num_shadow_registers_configured = 376 hal->num_shadow_registers_configured; 377 } 378 qdf_export_symbol(hal_get_shadow_v3_config); 379 #endif 380 381 static bool hal_validate_shadow_register(struct hal_soc *hal, 382 uint32_t *destination, 383 uint32_t *shadow_address) 384 { 385 unsigned int index; 386 uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr; 387 int destination_ba_offset = 388 ((char *)destination) - (char *)hal->dev_base_addr; 389 390 index = shadow_address - shadow_0_offset; 391 392 if (index >= MAX_SHADOW_REGISTERS) { 393 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 394 "%s: index %x out of bounds", __func__, index); 395 goto error; 396 } else if (hal->shadow_config[index].addr != destination_ba_offset) { 397 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 398 "%s: sanity check failure, expected %x, found %x", 399 __func__, destination_ba_offset, 400 hal->shadow_config[index].addr); 401 goto error; 402 } 403 return true; 404 error: 405 qdf_print("baddr %pK, destination %pK, shadow_address %pK s0offset %pK index %x", 406 hal->dev_base_addr, destination, shadow_address, 407 shadow_0_offset, index); 408 QDF_BUG(0); 409 return false; 410 } 411 412 static void hal_target_based_configure(struct hal_soc *hal) 413 { 414 /* 415 * Indicate Initialization of srngs to avoid force wake 416 * as umac power collapse is not enabled yet 417 */ 418 hal->init_phase = true; 419 420 switch (hal->target_type) { 421 #ifdef QCA_WIFI_QCA6290 422 case TARGET_TYPE_QCA6290: 423 hal->use_register_windowing = true; 424 hal_qca6290_attach(hal); 425 break; 426 #endif 427 #ifdef QCA_WIFI_QCA6390 428 case TARGET_TYPE_QCA6390: 429 hal->use_register_windowing = true; 430 hal_qca6390_attach(hal); 431 break; 432 #endif 433 #ifdef QCA_WIFI_QCA6490 434 case TARGET_TYPE_QCA6490: 435 hal->use_register_windowing = true; 436 hal_qca6490_attach(hal); 437 break; 438 #endif 439 #ifdef QCA_WIFI_QCA6750 440 case TARGET_TYPE_QCA6750: 441 hal->use_register_windowing = true; 442 hal->static_window_map = true; 443 hal_qca6750_attach(hal); 444 break; 445 #endif 446 #ifdef QCA_WIFI_KIWI 447 case TARGET_TYPE_KIWI: 448 case TARGET_TYPE_MANGO: 449 case TARGET_TYPE_PEACH: 450 hal->use_register_windowing = true; 451 hal_kiwi_attach(hal); 452 break; 453 #endif 454 #if defined(QCA_WIFI_QCA8074) && defined(WIFI_TARGET_TYPE_3_0) 455 case TARGET_TYPE_QCA8074: 456 hal_qca8074_attach(hal); 457 break; 458 #endif 459 460 #if defined(QCA_WIFI_QCA8074V2) 461 case TARGET_TYPE_QCA8074V2: 462 hal_qca8074v2_attach(hal); 463 break; 464 #endif 465 466 #if defined(QCA_WIFI_QCA6018) 467 case TARGET_TYPE_QCA6018: 468 hal_qca8074v2_attach(hal); 469 break; 470 #endif 471 472 #if defined(QCA_WIFI_QCA9574) 473 case TARGET_TYPE_QCA9574: 474 hal_qca8074v2_attach(hal); 475 break; 476 #endif 477 478 #if defined(QCA_WIFI_QCN6122) 479 case TARGET_TYPE_QCN6122: 480 hal->use_register_windowing = true; 481 /* 482 * Static window map is enabled for qcn9000 to use 2mb bar 483 * size and use multiple windows to write into registers. 484 */ 485 hal->static_window_map = true; 486 hal_qcn6122_attach(hal); 487 break; 488 #endif 489 490 #if defined(QCA_WIFI_QCN9160) 491 case TARGET_TYPE_QCN9160: 492 hal->use_register_windowing = true; 493 /* 494 * Static window map is enabled for qcn9160 to use 2mb bar 495 * size and use multiple windows to write into registers. 496 */ 497 hal->static_window_map = true; 498 hal_qcn6122_attach(hal); 499 break; 500 #endif 501 502 #if defined(QCA_WIFI_QCN6432) 503 case TARGET_TYPE_QCN6432: 504 hal->use_register_windowing = true; 505 /* 506 * Static window map is enabled for qcn6432 to use 2mb bar 507 * size and use multiple windows to write into registers. 508 */ 509 hal->static_window_map = true; 510 hal_qcn6432_attach(hal); 511 break; 512 #endif 513 514 #ifdef QCA_WIFI_QCN9000 515 case TARGET_TYPE_QCN9000: 516 hal->use_register_windowing = true; 517 /* 518 * Static window map is enabled for qcn9000 to use 2mb bar 519 * size and use multiple windows to write into registers. 520 */ 521 hal->static_window_map = true; 522 hal_qcn9000_attach(hal); 523 break; 524 #endif 525 #ifdef QCA_WIFI_QCA5018 526 case TARGET_TYPE_QCA5018: 527 hal->use_register_windowing = true; 528 hal->static_window_map = true; 529 hal_qca5018_attach(hal); 530 break; 531 #endif 532 #ifdef QCA_WIFI_QCN9224 533 case TARGET_TYPE_QCN9224: 534 hal->use_register_windowing = true; 535 hal->static_window_map = true; 536 if (hal->version == 1) 537 qdf_assert_always(0); 538 else 539 hal_qcn9224v2_attach(hal); 540 break; 541 #endif 542 #ifdef QCA_WIFI_QCA5332 543 case TARGET_TYPE_QCA5332: 544 hal->use_register_windowing = true; 545 hal->static_window_map = true; 546 hal_qca5332_attach(hal); 547 break; 548 #endif 549 #ifdef QCA_WIFI_WCN6450 550 case TARGET_TYPE_WCN6450: 551 hal->use_register_windowing = true; 552 hal->static_window_map = true; 553 hal_wcn6450_attach(hal); 554 break; 555 #endif 556 default: 557 break; 558 } 559 } 560 561 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl) 562 { 563 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; 564 struct hif_target_info *tgt_info = 565 hif_get_target_info_handle(hal_soc->hif_handle); 566 567 return tgt_info->target_type; 568 } 569 570 qdf_export_symbol(hal_get_target_type); 571 572 #if defined(FEATURE_HAL_DELAYED_REG_WRITE) 573 /** 574 * hal_is_reg_write_tput_level_high() - throughput level for delayed reg writes 575 * @hal: hal_soc pointer 576 * 577 * Return: true if throughput is high, else false. 578 */ 579 static inline bool hal_is_reg_write_tput_level_high(struct hal_soc *hal) 580 { 581 int bw_level = hif_get_bandwidth_level(hal->hif_handle); 582 583 return (bw_level >= PLD_BUS_WIDTH_MEDIUM) ? true : false; 584 } 585 586 static inline 587 char *hal_fill_reg_write_srng_stats(struct hal_srng *srng, 588 char *buf, qdf_size_t size) 589 { 590 qdf_scnprintf(buf, size, "enq %u deq %u coal %u direct %u", 591 srng->wstats.enqueues, srng->wstats.dequeues, 592 srng->wstats.coalesces, srng->wstats.direct); 593 return buf; 594 } 595 596 /* bytes for local buffer */ 597 #define HAL_REG_WRITE_SRNG_STATS_LEN 100 598 599 #ifndef WLAN_SOFTUMAC_SUPPORT 600 void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl) 601 { 602 struct hal_srng *srng; 603 char buf[HAL_REG_WRITE_SRNG_STATS_LEN]; 604 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 605 606 srng = hal_get_srng(hal, HAL_SRNG_SW2TCL1); 607 hal_debug("SW2TCL1: %s", 608 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 609 610 srng = hal_get_srng(hal, HAL_SRNG_WBM2SW0_RELEASE); 611 hal_debug("WBM2SW0: %s", 612 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 613 614 srng = hal_get_srng(hal, HAL_SRNG_REO2SW1); 615 hal_debug("REO2SW1: %s", 616 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 617 618 srng = hal_get_srng(hal, HAL_SRNG_REO2SW2); 619 hal_debug("REO2SW2: %s", 620 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 621 622 srng = hal_get_srng(hal, HAL_SRNG_REO2SW3); 623 hal_debug("REO2SW3: %s", 624 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 625 } 626 #else 627 void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl) 628 { 629 } 630 #endif 631 632 void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl) 633 { 634 uint32_t *hist; 635 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 636 637 hist = hal->stats.wstats.sched_delay; 638 hal_debug("wstats: enq %u deq %u coal %u direct %u q_depth %u max_q %u sched-delay hist %u %u %u %u", 639 qdf_atomic_read(&hal->stats.wstats.enqueues), 640 hal->stats.wstats.dequeues, 641 qdf_atomic_read(&hal->stats.wstats.coalesces), 642 qdf_atomic_read(&hal->stats.wstats.direct), 643 qdf_atomic_read(&hal->stats.wstats.q_depth), 644 hal->stats.wstats.max_q_depth, 645 hist[REG_WRITE_SCHED_DELAY_SUB_100us], 646 hist[REG_WRITE_SCHED_DELAY_SUB_1000us], 647 hist[REG_WRITE_SCHED_DELAY_SUB_5000us], 648 hist[REG_WRITE_SCHED_DELAY_GT_5000us]); 649 } 650 651 int hal_get_reg_write_pending_work(void *hal_soc) 652 { 653 struct hal_soc *hal = (struct hal_soc *)hal_soc; 654 655 return qdf_atomic_read(&hal->active_work_cnt); 656 } 657 658 #endif 659 660 #ifdef FEATURE_HAL_DELAYED_REG_WRITE 661 #ifdef MEMORY_DEBUG 662 /* 663 * Length of the queue(array) used to hold delayed register writes. 664 * Must be a multiple of 2. 665 */ 666 #define HAL_REG_WRITE_QUEUE_LEN 128 667 #else 668 #define HAL_REG_WRITE_QUEUE_LEN 32 669 #endif 670 671 /** 672 * hal_process_reg_write_q_elem() - process a register write queue element 673 * @hal: hal_soc pointer 674 * @q_elem: pointer to hal register write queue element 675 * 676 * Return: The value which was written to the address 677 */ 678 static uint32_t 679 hal_process_reg_write_q_elem(struct hal_soc *hal, 680 struct hal_reg_write_q_elem *q_elem) 681 { 682 struct hal_srng *srng = q_elem->srng; 683 uint32_t write_val; 684 685 SRNG_LOCK(&srng->lock); 686 687 srng->reg_write_in_progress = false; 688 srng->wstats.dequeues++; 689 690 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 691 q_elem->dequeue_val = srng->u.src_ring.hp; 692 hal_write_address_32_mb(hal, 693 srng->u.src_ring.hp_addr, 694 srng->u.src_ring.hp, false); 695 write_val = srng->u.src_ring.hp; 696 } else { 697 q_elem->dequeue_val = srng->u.dst_ring.tp; 698 hal_write_address_32_mb(hal, 699 srng->u.dst_ring.tp_addr, 700 srng->u.dst_ring.tp, false); 701 write_val = srng->u.dst_ring.tp; 702 } 703 hal_srng_reg_his_add(srng, write_val); 704 705 q_elem->valid = 0; 706 srng->last_dequeue_time = q_elem->dequeue_time; 707 SRNG_UNLOCK(&srng->lock); 708 709 return write_val; 710 } 711 712 /** 713 * hal_reg_write_fill_sched_delay_hist() - fill reg write delay histogram in hal 714 * @hal: hal_soc pointer 715 * @delay_us: delay in us 716 * 717 * Return: None 718 */ 719 static inline void hal_reg_write_fill_sched_delay_hist(struct hal_soc *hal, 720 uint64_t delay_us) 721 { 722 uint32_t *hist; 723 724 hist = hal->stats.wstats.sched_delay; 725 726 if (delay_us < 100) 727 hist[REG_WRITE_SCHED_DELAY_SUB_100us]++; 728 else if (delay_us < 1000) 729 hist[REG_WRITE_SCHED_DELAY_SUB_1000us]++; 730 else if (delay_us < 5000) 731 hist[REG_WRITE_SCHED_DELAY_SUB_5000us]++; 732 else 733 hist[REG_WRITE_SCHED_DELAY_GT_5000us]++; 734 } 735 736 #ifdef SHADOW_WRITE_DELAY 737 738 #define SHADOW_WRITE_MIN_DELTA_US 5 739 #define SHADOW_WRITE_DELAY_US 50 740 741 /* 742 * Never add those srngs which are performance relate. 743 * The delay itself will hit performance heavily. 744 */ 745 #define IS_SRNG_MATCH(s) ((s)->ring_id == HAL_SRNG_CE_1_DST_STATUS || \ 746 (s)->ring_id == HAL_SRNG_CE_1_DST) 747 748 static inline bool hal_reg_write_need_delay(struct hal_reg_write_q_elem *elem) 749 { 750 struct hal_srng *srng = elem->srng; 751 struct hal_soc *hal; 752 qdf_time_t now; 753 qdf_iomem_t real_addr; 754 755 if (qdf_unlikely(!srng)) 756 return false; 757 758 hal = srng->hal_soc; 759 if (qdf_unlikely(!hal)) 760 return false; 761 762 /* Check if it is target srng, and valid shadow reg */ 763 if (qdf_likely(!IS_SRNG_MATCH(srng))) 764 return false; 765 766 if (srng->ring_dir == HAL_SRNG_SRC_RING) 767 real_addr = SRNG_SRC_ADDR(srng, HP); 768 else 769 real_addr = SRNG_DST_ADDR(srng, TP); 770 if (!hal_validate_shadow_register(hal, real_addr, elem->addr)) 771 return false; 772 773 /* Check the time delta from last write of same srng */ 774 now = qdf_get_log_timestamp(); 775 if (qdf_log_timestamp_to_usecs(now - srng->last_dequeue_time) > 776 SHADOW_WRITE_MIN_DELTA_US) 777 return false; 778 779 /* Delay dequeue, and record */ 780 qdf_udelay(SHADOW_WRITE_DELAY_US); 781 782 srng->wstats.dequeue_delay++; 783 hal->stats.wstats.dequeue_delay++; 784 785 return true; 786 } 787 #else 788 static inline bool hal_reg_write_need_delay(struct hal_reg_write_q_elem *elem) 789 { 790 return false; 791 } 792 #endif 793 794 /** 795 * hal_reg_write_work() - Worker to process delayed writes 796 * @arg: hal_soc pointer 797 * 798 * Return: None 799 */ 800 static void hal_reg_write_work(void *arg) 801 { 802 int32_t q_depth, write_val; 803 struct hal_soc *hal = arg; 804 struct hal_reg_write_q_elem *q_elem; 805 uint64_t delta_us; 806 uint8_t ring_id; 807 uint32_t *addr; 808 uint32_t num_processed = 0; 809 810 q_elem = &hal->reg_write_queue[(hal->read_idx)]; 811 q_elem->work_scheduled_time = qdf_get_log_timestamp(); 812 q_elem->cpu_id = qdf_get_cpu(); 813 814 /* Make sure q_elem consistent in the memory for multi-cores */ 815 qdf_rmb(); 816 if (!q_elem->valid) 817 return; 818 819 q_depth = qdf_atomic_read(&hal->stats.wstats.q_depth); 820 if (q_depth > hal->stats.wstats.max_q_depth) 821 hal->stats.wstats.max_q_depth = q_depth; 822 823 if (hif_prevent_link_low_power_states(hal->hif_handle)) { 824 hal->stats.wstats.prevent_l1_fails++; 825 return; 826 } 827 828 while (true) { 829 qdf_rmb(); 830 if (!q_elem->valid) 831 break; 832 833 q_elem->dequeue_time = qdf_get_log_timestamp(); 834 ring_id = q_elem->srng->ring_id; 835 addr = q_elem->addr; 836 delta_us = qdf_log_timestamp_to_usecs(q_elem->dequeue_time - 837 q_elem->enqueue_time); 838 hal_reg_write_fill_sched_delay_hist(hal, delta_us); 839 840 hal->stats.wstats.dequeues++; 841 qdf_atomic_dec(&hal->stats.wstats.q_depth); 842 843 if (hal_reg_write_need_delay(q_elem)) 844 hal_verbose_debug("Delay reg writer for srng 0x%x, addr 0x%pK", 845 q_elem->srng->ring_id, q_elem->addr); 846 847 write_val = hal_process_reg_write_q_elem(hal, q_elem); 848 hal_verbose_debug("read_idx %u srng 0x%x, addr 0x%pK dequeue_val %u sched delay %llu us", 849 hal->read_idx, ring_id, addr, write_val, delta_us); 850 851 qdf_trace_dp_del_reg_write(ring_id, q_elem->enqueue_val, 852 q_elem->dequeue_val, 853 q_elem->enqueue_time, 854 q_elem->dequeue_time); 855 856 num_processed++; 857 hal->read_idx = (hal->read_idx + 1) & 858 (HAL_REG_WRITE_QUEUE_LEN - 1); 859 q_elem = &hal->reg_write_queue[(hal->read_idx)]; 860 } 861 862 hif_allow_link_low_power_states(hal->hif_handle); 863 /* 864 * Decrement active_work_cnt by the number of elements dequeued after 865 * hif_allow_link_low_power_states. 866 * This makes sure that hif_try_complete_tasks will wait till we make 867 * the bus access in hif_allow_link_low_power_states. This will avoid 868 * race condition between delayed register worker and bus suspend 869 * (system suspend or runtime suspend). 870 * 871 * The following decrement should be done at the end! 872 */ 873 qdf_atomic_sub(num_processed, &hal->active_work_cnt); 874 } 875 876 static void __hal_flush_reg_write_work(struct hal_soc *hal) 877 { 878 qdf_flush_work(&hal->reg_write_work); 879 qdf_disable_work(&hal->reg_write_work); 880 } 881 882 void hal_flush_reg_write_work(hal_soc_handle_t hal_handle) 883 { __hal_flush_reg_write_work((struct hal_soc *)hal_handle); 884 } 885 886 /** 887 * hal_reg_write_enqueue() - enqueue register writes into kworker 888 * @hal_soc: hal_soc pointer 889 * @srng: srng pointer 890 * @addr: iomem address of register 891 * @value: value to be written to iomem address 892 * 893 * This function executes from within the SRNG LOCK 894 * 895 * Return: None 896 */ 897 static void hal_reg_write_enqueue(struct hal_soc *hal_soc, 898 struct hal_srng *srng, 899 void __iomem *addr, 900 uint32_t value) 901 { 902 struct hal_reg_write_q_elem *q_elem; 903 uint32_t write_idx; 904 905 if (srng->reg_write_in_progress) { 906 hal_verbose_debug("Already in progress srng ring id 0x%x addr 0x%pK val %u", 907 srng->ring_id, addr, value); 908 qdf_atomic_inc(&hal_soc->stats.wstats.coalesces); 909 srng->wstats.coalesces++; 910 return; 911 } 912 913 write_idx = qdf_atomic_inc_return(&hal_soc->write_idx); 914 915 write_idx = write_idx & (HAL_REG_WRITE_QUEUE_LEN - 1); 916 917 q_elem = &hal_soc->reg_write_queue[write_idx]; 918 919 if (q_elem->valid) { 920 hal_err("queue full"); 921 QDF_BUG(0); 922 return; 923 } 924 925 qdf_atomic_inc(&hal_soc->stats.wstats.enqueues); 926 srng->wstats.enqueues++; 927 928 qdf_atomic_inc(&hal_soc->stats.wstats.q_depth); 929 930 q_elem->srng = srng; 931 q_elem->addr = addr; 932 q_elem->enqueue_val = value; 933 q_elem->enqueue_time = qdf_get_log_timestamp(); 934 935 /* 936 * Before the valid flag is set to true, all the other 937 * fields in the q_elem needs to be updated in memory. 938 * Else there is a chance that the dequeuing worker thread 939 * might read stale entries and process incorrect srng. 940 */ 941 qdf_wmb(); 942 q_elem->valid = true; 943 944 /* 945 * After all other fields in the q_elem has been updated 946 * in memory successfully, the valid flag needs to be updated 947 * in memory in time too. 948 * Else there is a chance that the dequeuing worker thread 949 * might read stale valid flag and the work will be bypassed 950 * for this round. And if there is no other work scheduled 951 * later, this hal register writing won't be updated any more. 952 */ 953 qdf_wmb(); 954 955 srng->reg_write_in_progress = true; 956 qdf_atomic_inc(&hal_soc->active_work_cnt); 957 958 hal_verbose_debug("write_idx %u srng ring id 0x%x addr 0x%pK val %u", 959 write_idx, srng->ring_id, addr, value); 960 961 qdf_queue_work(hal_soc->qdf_dev, hal_soc->reg_write_wq, 962 &hal_soc->reg_write_work); 963 } 964 965 /** 966 * hal_delayed_reg_write_init() - Initialization function for delayed reg writes 967 * @hal: hal_soc pointer 968 * 969 * Initialize main data structures to process register writes in a delayed 970 * workqueue. 971 * 972 * Return: QDF_STATUS_SUCCESS on success else a QDF error. 973 */ 974 static QDF_STATUS hal_delayed_reg_write_init(struct hal_soc *hal) 975 { 976 hal->reg_write_wq = 977 qdf_alloc_high_prior_ordered_workqueue("hal_register_write_wq"); 978 qdf_create_work(0, &hal->reg_write_work, hal_reg_write_work, hal); 979 hal->reg_write_queue = qdf_mem_malloc(HAL_REG_WRITE_QUEUE_LEN * 980 sizeof(*hal->reg_write_queue)); 981 if (!hal->reg_write_queue) { 982 hal_err("unable to allocate memory"); 983 QDF_BUG(0); 984 return QDF_STATUS_E_NOMEM; 985 } 986 987 /* Initial value of indices */ 988 hal->read_idx = 0; 989 qdf_atomic_set(&hal->write_idx, -1); 990 return QDF_STATUS_SUCCESS; 991 } 992 993 /** 994 * hal_delayed_reg_write_deinit() - De-Initialize delayed reg write processing 995 * @hal: hal_soc pointer 996 * 997 * De-initialize main data structures to process register writes in a delayed 998 * workqueue. 999 * 1000 * Return: None 1001 */ 1002 static void hal_delayed_reg_write_deinit(struct hal_soc *hal) 1003 { 1004 __hal_flush_reg_write_work(hal); 1005 1006 qdf_flush_workqueue(0, hal->reg_write_wq); 1007 qdf_destroy_workqueue(0, hal->reg_write_wq); 1008 qdf_mem_free(hal->reg_write_queue); 1009 } 1010 1011 #else 1012 static inline QDF_STATUS hal_delayed_reg_write_init(struct hal_soc *hal) 1013 { 1014 return QDF_STATUS_SUCCESS; 1015 } 1016 1017 static inline void hal_delayed_reg_write_deinit(struct hal_soc *hal) 1018 { 1019 } 1020 #endif 1021 1022 #ifdef FEATURE_HAL_DELAYED_REG_WRITE 1023 #ifdef HAL_RECORD_SUSPEND_WRITE 1024 static struct hal_suspend_write_history 1025 g_hal_suspend_write_history[HAL_SUSPEND_WRITE_HISTORY_MAX]; 1026 1027 static 1028 void hal_event_suspend_record(uint8_t ring_id, uint32_t value, uint32_t count) 1029 { 1030 uint32_t index = qdf_atomic_read(g_hal_suspend_write_history.index) & 1031 (HAL_SUSPEND_WRITE_HISTORY_MAX - 1); 1032 struct hal_suspend_write_record *cur_event = 1033 &hal_suspend_write_event.record[index]; 1034 1035 cur_event->ts = qdf_get_log_timestamp(); 1036 cur_event->ring_id = ring_id; 1037 cur_event->value = value; 1038 cur_event->direct_wcount = count; 1039 qdf_atomic_inc(g_hal_suspend_write_history.index); 1040 } 1041 1042 static inline 1043 void hal_record_suspend_write(uint8_t ring_id, uint32_t value, uint32_t count) 1044 { 1045 if (hif_rtpm_get_state() >= HIF_RTPM_STATE_SUSPENDING) 1046 hal_event_suspend_record(ring_id, value, count); 1047 } 1048 #else 1049 static inline 1050 void hal_record_suspend_write(uint8_t ring_id, uint32_t value, uint32_t count) 1051 { 1052 } 1053 #endif 1054 1055 #ifdef QCA_WIFI_QCA6750 1056 void hal_delayed_reg_write(struct hal_soc *hal_soc, 1057 struct hal_srng *srng, 1058 void __iomem *addr, 1059 uint32_t value) 1060 { 1061 uint8_t vote_access; 1062 1063 switch (srng->ring_type) { 1064 case CE_SRC: 1065 case CE_DST: 1066 case CE_DST_STATUS: 1067 vote_access = hif_get_ep_vote_access(hal_soc->hif_handle, 1068 HIF_EP_VOTE_NONDP_ACCESS); 1069 if ((vote_access == HIF_EP_VOTE_ACCESS_DISABLE) || 1070 (vote_access == HIF_EP_VOTE_INTERMEDIATE_ACCESS && 1071 PLD_MHI_STATE_L0 == 1072 pld_get_mhi_state(hal_soc->qdf_dev->dev))) { 1073 hal_write_address_32_mb(hal_soc, addr, value, false); 1074 hal_srng_reg_his_add(srng, value); 1075 qdf_atomic_inc(&hal_soc->stats.wstats.direct); 1076 srng->wstats.direct++; 1077 } else { 1078 hal_reg_write_enqueue(hal_soc, srng, addr, value); 1079 } 1080 break; 1081 default: 1082 if (hif_get_ep_vote_access(hal_soc->hif_handle, 1083 HIF_EP_VOTE_DP_ACCESS) == 1084 HIF_EP_VOTE_ACCESS_DISABLE || 1085 hal_is_reg_write_tput_level_high(hal_soc) || 1086 PLD_MHI_STATE_L0 == 1087 pld_get_mhi_state(hal_soc->qdf_dev->dev)) { 1088 hal_write_address_32_mb(hal_soc, addr, value, false); 1089 hal_srng_reg_his_add(srng, value); 1090 qdf_atomic_inc(&hal_soc->stats.wstats.direct); 1091 srng->wstats.direct++; 1092 } else { 1093 hal_reg_write_enqueue(hal_soc, srng, addr, value); 1094 } 1095 1096 break; 1097 } 1098 } 1099 #else 1100 void hal_delayed_reg_write(struct hal_soc *hal_soc, 1101 struct hal_srng *srng, 1102 void __iomem *addr, 1103 uint32_t value) 1104 { 1105 if (hal_is_reg_write_tput_level_high(hal_soc) || 1106 pld_is_device_awake(hal_soc->qdf_dev->dev)) { 1107 qdf_atomic_inc(&hal_soc->stats.wstats.direct); 1108 srng->wstats.direct++; 1109 hal_write_address_32_mb(hal_soc, addr, value, false); 1110 hal_srng_reg_his_add(srng, value); 1111 } else { 1112 hal_reg_write_enqueue(hal_soc, srng, addr, value); 1113 } 1114 1115 hal_record_suspend_write(srng->ring_id, value, srng->wstats.direct); 1116 } 1117 #endif 1118 #endif 1119 1120 #ifdef HAL_SRNG_REG_HIS_DEBUG 1121 inline void hal_free_srng_history(struct hal_soc *hal) 1122 { 1123 int i; 1124 1125 for (i = 0; i < HAL_SRNG_ID_MAX; i++) 1126 qdf_mem_free(hal->srng_list[i].reg_his_ctx); 1127 } 1128 1129 inline bool hal_alloc_srng_history(struct hal_soc *hal) 1130 { 1131 int i; 1132 1133 for (i = 0; i < HAL_SRNG_ID_MAX; i++) { 1134 hal->srng_list[i].reg_his_ctx = 1135 qdf_mem_malloc(sizeof(struct hal_srng_reg_his_ctx)); 1136 if (!hal->srng_list[i].reg_his_ctx) { 1137 hal_err("srng_hist alloc failed"); 1138 hal_free_srng_history(hal); 1139 return false; 1140 } 1141 } 1142 1143 return true; 1144 } 1145 #else 1146 inline void hal_free_srng_history(struct hal_soc *hal) 1147 { 1148 } 1149 1150 inline bool hal_alloc_srng_history(struct hal_soc *hal) 1151 { 1152 return true; 1153 } 1154 #endif 1155 1156 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev) 1157 { 1158 struct hal_soc *hal; 1159 int i; 1160 1161 hal = qdf_mem_common_alloc(sizeof(*hal)); 1162 1163 if (!hal) { 1164 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1165 "%s: hal_soc allocation failed", __func__); 1166 goto fail0; 1167 } 1168 hal->hif_handle = hif_handle; 1169 hal->dev_base_addr = hif_get_dev_ba(hif_handle); /* UMAC */ 1170 hal->dev_base_addr_ce = hif_get_dev_ba_ce(hif_handle); /* CE */ 1171 hal->dev_base_addr_cmem = hif_get_dev_ba_cmem(hif_handle); /* CMEM */ 1172 hal->dev_base_addr_pmm = hif_get_dev_ba_pmm(hif_handle); /* PMM */ 1173 hal->qdf_dev = qdf_dev; 1174 hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent( 1175 qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) * 1176 HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr)); 1177 if (!hal->shadow_rdptr_mem_paddr) { 1178 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1179 "%s: hal->shadow_rdptr_mem_paddr allocation failed", 1180 __func__); 1181 goto fail1; 1182 } 1183 qdf_mem_zero(hal->shadow_rdptr_mem_vaddr, 1184 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX); 1185 1186 hal->shadow_wrptr_mem_vaddr = 1187 (uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev, 1188 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 1189 &(hal->shadow_wrptr_mem_paddr)); 1190 if (!hal->shadow_wrptr_mem_vaddr) { 1191 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1192 "%s: hal->shadow_wrptr_mem_vaddr allocation failed", 1193 __func__); 1194 goto fail2; 1195 } 1196 qdf_mem_zero(hal->shadow_wrptr_mem_vaddr, 1197 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS); 1198 1199 if (!hal_alloc_srng_history(hal)) 1200 goto fail2; 1201 1202 for (i = 0; i < HAL_SRNG_ID_MAX; i++) { 1203 hal->srng_list[i].initialized = 0; 1204 hal->srng_list[i].ring_id = i; 1205 } 1206 1207 qdf_spinlock_create(&hal->register_access_lock); 1208 hal->register_window = 0; 1209 hal->target_type = hal_get_target_type(hal_soc_to_hal_soc_handle(hal)); 1210 hal->version = hif_get_soc_version(hif_handle); 1211 hal->ops = qdf_mem_malloc(sizeof(*hal->ops)); 1212 1213 if (!hal->ops) { 1214 hal_err("unable to allocable memory for HAL ops"); 1215 goto fail3; 1216 } 1217 1218 hal_target_based_configure(hal); 1219 1220 hal_reg_write_fail_history_init(hal); 1221 1222 qdf_minidump_log(hal, sizeof(*hal), "hal_soc"); 1223 1224 qdf_ssr_driver_dump_register_region("hal_soc", hal, sizeof(*hal)); 1225 1226 qdf_atomic_init(&hal->active_work_cnt); 1227 if (hal_delayed_reg_write_init(hal) != QDF_STATUS_SUCCESS) { 1228 hal_err("unable to initialize delayed reg write"); 1229 goto fail4; 1230 } 1231 1232 hif_rtpm_register(HIF_RTPM_ID_HAL_REO_CMD, NULL); 1233 1234 return (void *)hal; 1235 fail4: 1236 qdf_ssr_driver_dump_unregister_region("hal_soc"); 1237 qdf_minidump_remove(hal, sizeof(*hal), "hal_soc"); 1238 qdf_mem_free(hal->ops); 1239 fail3: 1240 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, 1241 sizeof(*hal->shadow_wrptr_mem_vaddr) * 1242 HAL_MAX_LMAC_RINGS, 1243 hal->shadow_wrptr_mem_vaddr, 1244 hal->shadow_wrptr_mem_paddr, 0); 1245 fail2: 1246 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, 1247 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 1248 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 1249 fail1: 1250 qdf_mem_common_free(hal); 1251 fail0: 1252 return NULL; 1253 } 1254 qdf_export_symbol(hal_attach); 1255 1256 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem) 1257 { 1258 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 1259 mem->dev_base_addr = (void *)hal->dev_base_addr; 1260 mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr; 1261 mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr; 1262 mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr; 1263 mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr; 1264 hif_read_phy_mem_base((void *)hal->hif_handle, 1265 (qdf_dma_addr_t *)&mem->dev_base_paddr); 1266 mem->lmac_srng_start_id = HAL_SRNG_LMAC1_ID_START; 1267 return; 1268 } 1269 qdf_export_symbol(hal_get_meminfo); 1270 1271 void hal_detach(void *hal_soc) 1272 { 1273 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1274 1275 hif_rtpm_deregister(HIF_RTPM_ID_HAL_REO_CMD); 1276 hal_delayed_reg_write_deinit(hal); 1277 hal_reo_shared_qaddr_detach((hal_soc_handle_t)hal); 1278 qdf_ssr_driver_dump_unregister_region("hal_soc"); 1279 qdf_minidump_remove(hal, sizeof(*hal), "hal_soc"); 1280 qdf_mem_free(hal->ops); 1281 1282 hal_free_srng_history(hal); 1283 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 1284 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 1285 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 1286 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 1287 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 1288 hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0); 1289 qdf_mem_common_free(hal); 1290 1291 return; 1292 } 1293 qdf_export_symbol(hal_detach); 1294 1295 #define HAL_CE_CHANNEL_DST_DEST_CTRL_ADDR(x) ((x) + 0x000000b0) 1296 #define HAL_CE_CHANNEL_DST_DEST_CTRL_DEST_MAX_LENGTH_BMSK 0x0000ffff 1297 #define HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_ADDR(x) ((x) + 0x00000040) 1298 #define HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_RMSK 0x00000007 1299 1300 /** 1301 * hal_ce_dst_setup() - Initialize CE destination ring registers 1302 * @hal: HAL SOC handle 1303 * @srng: SRNG ring pointer 1304 * @ring_num: ring number 1305 */ 1306 static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng, 1307 int ring_num) 1308 { 1309 uint32_t reg_val = 0; 1310 uint32_t reg_addr; 1311 struct hal_hw_srng_config *ring_config = 1312 HAL_SRNG_CONFIG(hal, CE_DST); 1313 1314 /* set DEST_MAX_LENGTH according to ce assignment */ 1315 reg_addr = HAL_CE_CHANNEL_DST_DEST_CTRL_ADDR( 1316 ring_config->reg_start[R0_INDEX] + 1317 (ring_num * ring_config->reg_size[R0_INDEX])); 1318 1319 reg_val = HAL_REG_READ(hal, reg_addr); 1320 reg_val &= ~HAL_CE_CHANNEL_DST_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 1321 reg_val |= srng->u.dst_ring.max_buffer_length & 1322 HAL_CE_CHANNEL_DST_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 1323 HAL_REG_WRITE(hal, reg_addr, reg_val); 1324 1325 if (srng->prefetch_timer) { 1326 reg_addr = HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_ADDR( 1327 ring_config->reg_start[R0_INDEX] + 1328 (ring_num * ring_config->reg_size[R0_INDEX])); 1329 1330 reg_val = HAL_REG_READ(hal, reg_addr); 1331 reg_val &= ~HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_RMSK; 1332 reg_val |= srng->prefetch_timer; 1333 HAL_REG_WRITE(hal, reg_addr, reg_val); 1334 reg_val = HAL_REG_READ(hal, reg_addr); 1335 } 1336 1337 } 1338 1339 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read, 1340 uint32_t *ix0, uint32_t *ix1, 1341 uint32_t *ix2, uint32_t *ix3) 1342 { 1343 uint32_t reg_offset; 1344 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 1345 uint32_t reo_reg_base; 1346 1347 reo_reg_base = hal_get_reo_reg_base_offset(hal_soc_hdl); 1348 1349 if (read) { 1350 if (ix0) { 1351 reg_offset = 1352 HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR( 1353 reo_reg_base); 1354 *ix0 = HAL_REG_READ(hal, reg_offset); 1355 } 1356 1357 if (ix1) { 1358 reg_offset = 1359 HAL_REO_DESTINATION_RING_CTRL_IX_1_ADDR( 1360 reo_reg_base); 1361 *ix1 = HAL_REG_READ(hal, reg_offset); 1362 } 1363 1364 if (ix2) { 1365 reg_offset = 1366 HAL_REO_DESTINATION_RING_CTRL_IX_2_ADDR( 1367 reo_reg_base); 1368 *ix2 = HAL_REG_READ(hal, reg_offset); 1369 } 1370 1371 if (ix3) { 1372 reg_offset = 1373 HAL_REO_DESTINATION_RING_CTRL_IX_3_ADDR( 1374 reo_reg_base); 1375 *ix3 = HAL_REG_READ(hal, reg_offset); 1376 } 1377 } else { 1378 if (ix0) { 1379 reg_offset = 1380 HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR( 1381 reo_reg_base); 1382 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, 1383 *ix0, true); 1384 } 1385 1386 if (ix1) { 1387 reg_offset = 1388 HAL_REO_DESTINATION_RING_CTRL_IX_1_ADDR( 1389 reo_reg_base); 1390 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, 1391 *ix1, true); 1392 } 1393 1394 if (ix2) { 1395 reg_offset = 1396 HAL_REO_DESTINATION_RING_CTRL_IX_2_ADDR( 1397 reo_reg_base); 1398 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, 1399 *ix2, true); 1400 } 1401 1402 if (ix3) { 1403 reg_offset = 1404 HAL_REO_DESTINATION_RING_CTRL_IX_3_ADDR( 1405 reo_reg_base); 1406 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, 1407 *ix3, true); 1408 } 1409 } 1410 } 1411 1412 qdf_export_symbol(hal_reo_read_write_ctrl_ix); 1413 1414 void hal_srng_dst_set_hp_paddr_confirm(struct hal_srng *srng, uint64_t paddr) 1415 { 1416 SRNG_DST_REG_WRITE_CONFIRM(srng, HP_ADDR_LSB, paddr & 0xffffffff); 1417 SRNG_DST_REG_WRITE_CONFIRM(srng, HP_ADDR_MSB, paddr >> 32); 1418 } 1419 1420 qdf_export_symbol(hal_srng_dst_set_hp_paddr_confirm); 1421 1422 void hal_srng_dst_init_hp(struct hal_soc_handle *hal_soc, 1423 struct hal_srng *srng, 1424 uint32_t *vaddr) 1425 { 1426 uint32_t reg_offset; 1427 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1428 1429 if (!srng) 1430 return; 1431 1432 srng->u.dst_ring.hp_addr = vaddr; 1433 reg_offset = SRNG_DST_ADDR(srng, HP) - hal->dev_base_addr; 1434 HAL_REG_WRITE_CONFIRM_RETRY( 1435 hal, reg_offset, srng->u.dst_ring.cached_hp, true); 1436 1437 if (vaddr) { 1438 *srng->u.dst_ring.hp_addr = srng->u.dst_ring.cached_hp; 1439 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1440 "hp_addr=%pK, cached_hp=%d", 1441 (void *)srng->u.dst_ring.hp_addr, 1442 srng->u.dst_ring.cached_hp); 1443 } 1444 } 1445 1446 qdf_export_symbol(hal_srng_dst_init_hp); 1447 1448 /** 1449 * hal_srng_hw_init - Private function to initialize SRNG HW 1450 * @hal: HAL SOC handle 1451 * @srng: SRNG ring pointer 1452 * @idle_check: Check if ring is idle 1453 * @idx: ring index 1454 */ 1455 static inline void hal_srng_hw_init(struct hal_soc *hal, 1456 struct hal_srng *srng, bool idle_check, uint32_t idx) 1457 { 1458 if (srng->ring_dir == HAL_SRNG_SRC_RING) 1459 hal_srng_src_hw_init(hal, srng, idle_check, idx); 1460 else 1461 hal_srng_dst_hw_init(hal, srng, idle_check, idx); 1462 } 1463 1464 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ 1465 bool hal_srng_is_near_full_irq_supported(hal_soc_handle_t hal_soc, 1466 int ring_type, int ring_num) 1467 { 1468 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1469 struct hal_hw_srng_config *ring_config = 1470 HAL_SRNG_CONFIG(hal, ring_type); 1471 1472 return ring_config->nf_irq_support; 1473 } 1474 1475 /** 1476 * hal_srng_set_msi2_params() - Set MSI2 params to SRNG data structure from 1477 * ring params 1478 * @srng: SRNG handle 1479 * @ring_params: ring params for this SRNG 1480 * 1481 * Return: None 1482 */ 1483 static inline void 1484 hal_srng_set_msi2_params(struct hal_srng *srng, 1485 struct hal_srng_params *ring_params) 1486 { 1487 srng->msi2_addr = ring_params->msi2_addr; 1488 srng->msi2_data = ring_params->msi2_data; 1489 } 1490 1491 /** 1492 * hal_srng_get_nf_params() - Get the near full MSI2 params from srng 1493 * @srng: SRNG handle 1494 * @ring_params: ring params for this SRNG 1495 * 1496 * Return: None 1497 */ 1498 static inline void 1499 hal_srng_get_nf_params(struct hal_srng *srng, 1500 struct hal_srng_params *ring_params) 1501 { 1502 ring_params->msi2_addr = srng->msi2_addr; 1503 ring_params->msi2_data = srng->msi2_data; 1504 } 1505 1506 /** 1507 * hal_srng_set_nf_thresholds() - Set the near full thresholds in SRNG 1508 * @srng: SRNG handle where the params are to be set 1509 * @ring_params: ring params, from where threshold is to be fetched 1510 * 1511 * Return: None 1512 */ 1513 static inline void 1514 hal_srng_set_nf_thresholds(struct hal_srng *srng, 1515 struct hal_srng_params *ring_params) 1516 { 1517 srng->u.dst_ring.nf_irq_support = ring_params->nf_irq_support; 1518 srng->u.dst_ring.high_thresh = ring_params->high_thresh; 1519 } 1520 #else 1521 static inline void 1522 hal_srng_set_msi2_params(struct hal_srng *srng, 1523 struct hal_srng_params *ring_params) 1524 { 1525 } 1526 1527 static inline void 1528 hal_srng_get_nf_params(struct hal_srng *srng, 1529 struct hal_srng_params *ring_params) 1530 { 1531 } 1532 1533 static inline void 1534 hal_srng_set_nf_thresholds(struct hal_srng *srng, 1535 struct hal_srng_params *ring_params) 1536 { 1537 } 1538 #endif 1539 1540 #if defined(CLEAR_SW2TCL_CONSUMED_DESC) 1541 /** 1542 * hal_srng_last_desc_cleared_init - Initialize SRNG last_desc_cleared ptr 1543 * @srng: Source ring pointer 1544 * 1545 * Return: None 1546 */ 1547 static inline 1548 void hal_srng_last_desc_cleared_init(struct hal_srng *srng) 1549 { 1550 srng->last_desc_cleared = srng->ring_size - srng->entry_size; 1551 } 1552 1553 #else 1554 static inline 1555 void hal_srng_last_desc_cleared_init(struct hal_srng *srng) 1556 { 1557 } 1558 #endif /* CLEAR_SW2TCL_CONSUMED_DESC */ 1559 1560 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING 1561 static inline void hal_srng_update_high_wm_thresholds(struct hal_srng *srng) 1562 { 1563 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_90_to_100] = 1564 ((srng->num_entries * 90) / 100); 1565 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_80_to_90] = 1566 ((srng->num_entries * 80) / 100); 1567 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_70_to_80] = 1568 ((srng->num_entries * 70) / 100); 1569 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_60_to_70] = 1570 ((srng->num_entries * 60) / 100); 1571 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_50_to_60] = 1572 ((srng->num_entries * 50) / 100); 1573 /* Below 50% threshold is not needed */ 1574 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT] = 0; 1575 1576 hal_info("ring_id: %u, wm_thresh- <50:%u, 50-60:%u, 60-70:%u, 70-80:%u, 80-90:%u, 90-100:%u", 1577 srng->ring_id, 1578 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT], 1579 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_50_to_60], 1580 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_60_to_70], 1581 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_70_to_80], 1582 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_80_to_90], 1583 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_90_to_100]); 1584 } 1585 #else 1586 static inline void hal_srng_update_high_wm_thresholds(struct hal_srng *srng) 1587 { 1588 } 1589 #endif 1590 1591 void *hal_srng_setup_idx(void *hal_soc, int ring_type, int ring_num, int mac_id, 1592 struct hal_srng_params *ring_params, bool idle_check, 1593 uint32_t idx) 1594 { 1595 int ring_id; 1596 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1597 hal_soc_handle_t hal_hdl = (hal_soc_handle_t)hal; 1598 struct hal_srng *srng; 1599 struct hal_hw_srng_config *ring_config = 1600 HAL_SRNG_CONFIG(hal, ring_type); 1601 void *dev_base_addr; 1602 int i; 1603 1604 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id); 1605 if (ring_id < 0) 1606 return NULL; 1607 1608 hal_verbose_debug("mac_id %d ring_id %d", mac_id, ring_id); 1609 1610 srng = hal_get_srng(hal_soc, ring_id); 1611 1612 if (srng->initialized) { 1613 hal_verbose_debug("Ring (ring_type, ring_num) already initialized"); 1614 return NULL; 1615 } 1616 1617 hal_srng_reg_his_init(srng); 1618 dev_base_addr = hal->dev_base_addr; 1619 srng->ring_id = ring_id; 1620 srng->ring_type = ring_type; 1621 srng->ring_dir = ring_config->ring_dir; 1622 srng->ring_base_paddr = ring_params->ring_base_paddr; 1623 srng->ring_base_vaddr = ring_params->ring_base_vaddr; 1624 srng->entry_size = ring_config->entry_size; 1625 srng->num_entries = ring_params->num_entries; 1626 srng->ring_size = srng->num_entries * srng->entry_size; 1627 srng->ring_size_mask = srng->ring_size - 1; 1628 srng->ring_vaddr_end = srng->ring_base_vaddr + srng->ring_size; 1629 srng->msi_addr = ring_params->msi_addr; 1630 srng->msi_data = ring_params->msi_data; 1631 srng->intr_timer_thres_us = ring_params->intr_timer_thres_us; 1632 srng->intr_batch_cntr_thres_entries = 1633 ring_params->intr_batch_cntr_thres_entries; 1634 srng->pointer_timer_threshold = 1635 ring_params->pointer_timer_threshold; 1636 srng->pointer_num_threshold = 1637 ring_params->pointer_num_threshold; 1638 1639 if (!idle_check) 1640 srng->prefetch_timer = ring_params->prefetch_timer; 1641 srng->hal_soc = hal_soc; 1642 hal_srng_set_msi2_params(srng, ring_params); 1643 hal_srng_update_high_wm_thresholds(srng); 1644 1645 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) { 1646 srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i] 1647 + (ring_num * ring_config->reg_size[i]); 1648 } 1649 1650 /* Zero out the entire ring memory */ 1651 qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size * 1652 srng->num_entries) << 2); 1653 1654 srng->flags = ring_params->flags; 1655 1656 /* For cached descriptors flush and invalidate the memory*/ 1657 if (srng->flags & HAL_SRNG_CACHED_DESC) { 1658 qdf_nbuf_dma_clean_range( 1659 srng->ring_base_vaddr, 1660 srng->ring_base_vaddr + 1661 ((srng->entry_size * srng->num_entries))); 1662 qdf_nbuf_dma_inv_range( 1663 srng->ring_base_vaddr, 1664 srng->ring_base_vaddr + 1665 ((srng->entry_size * srng->num_entries))); 1666 } 1667 #ifdef BIG_ENDIAN_HOST 1668 /* TODO: See if we should we get these flags from caller */ 1669 srng->flags |= HAL_SRNG_DATA_TLV_SWAP; 1670 srng->flags |= HAL_SRNG_MSI_SWAP; 1671 srng->flags |= HAL_SRNG_RING_PTR_SWAP; 1672 #endif 1673 1674 hal_srng_last_desc_cleared_init(srng); 1675 1676 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 1677 srng->u.src_ring.hp = 0; 1678 srng->u.src_ring.reap_hp = srng->ring_size - 1679 srng->entry_size; 1680 srng->u.src_ring.tp_addr = 1681 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 1682 srng->u.src_ring.low_threshold = 1683 ring_params->low_threshold * srng->entry_size; 1684 1685 if (srng->u.src_ring.tp_addr) 1686 qdf_mem_zero(srng->u.src_ring.tp_addr, 1687 sizeof(*hal->shadow_rdptr_mem_vaddr)); 1688 1689 if (ring_config->lmac_ring) { 1690 /* For LMAC rings, head pointer updates will be done 1691 * through FW by writing to a shared memory location 1692 */ 1693 srng->u.src_ring.hp_addr = 1694 &(hal->shadow_wrptr_mem_vaddr[ring_id - 1695 HAL_SRNG_LMAC1_ID_START]); 1696 srng->flags |= HAL_SRNG_LMAC_RING; 1697 1698 if (srng->u.src_ring.hp_addr) 1699 qdf_mem_zero(srng->u.src_ring.hp_addr, 1700 sizeof(*hal->shadow_wrptr_mem_vaddr)); 1701 1702 } else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) { 1703 srng->u.src_ring.hp_addr = 1704 hal_get_window_address(hal, 1705 SRNG_SRC_ADDR(srng, HP)); 1706 1707 if (CHECK_SHADOW_REGISTERS) { 1708 QDF_TRACE(QDF_MODULE_ID_TXRX, 1709 QDF_TRACE_LEVEL_ERROR, 1710 "%s: Ring (%d, %d) missing shadow config", 1711 __func__, ring_type, ring_num); 1712 } 1713 } else { 1714 hal_validate_shadow_register(hal, 1715 SRNG_SRC_ADDR(srng, HP), 1716 srng->u.src_ring.hp_addr); 1717 } 1718 } else { 1719 /* During initialization loop count in all the descriptors 1720 * will be set to zero, and HW will set it to 1 on completing 1721 * descriptor update in first loop, and increments it by 1 on 1722 * subsequent loops (loop count wraps around after reaching 1723 * 0xffff). The 'loop_cnt' in SW ring state is the expected 1724 * loop count in descriptors updated by HW (to be processed 1725 * by SW). 1726 */ 1727 hal_srng_set_nf_thresholds(srng, ring_params); 1728 srng->u.dst_ring.loop_cnt = 1; 1729 srng->u.dst_ring.tp = 0; 1730 srng->u.dst_ring.hp_addr = 1731 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 1732 1733 if (srng->u.dst_ring.hp_addr) 1734 qdf_mem_zero(srng->u.dst_ring.hp_addr, 1735 sizeof(*hal->shadow_rdptr_mem_vaddr)); 1736 1737 if (ring_config->lmac_ring) { 1738 /* For LMAC rings, tail pointer updates will be done 1739 * through FW by writing to a shared memory location 1740 */ 1741 srng->u.dst_ring.tp_addr = 1742 &(hal->shadow_wrptr_mem_vaddr[ring_id - 1743 HAL_SRNG_LMAC1_ID_START]); 1744 srng->flags |= HAL_SRNG_LMAC_RING; 1745 1746 if (srng->u.dst_ring.tp_addr) 1747 qdf_mem_zero(srng->u.dst_ring.tp_addr, 1748 sizeof(*hal->shadow_wrptr_mem_vaddr)); 1749 1750 } else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) { 1751 srng->u.dst_ring.tp_addr = 1752 hal_get_window_address(hal, 1753 SRNG_DST_ADDR(srng, TP)); 1754 1755 if (CHECK_SHADOW_REGISTERS) { 1756 QDF_TRACE(QDF_MODULE_ID_TXRX, 1757 QDF_TRACE_LEVEL_ERROR, 1758 "%s: Ring (%d, %d) missing shadow config", 1759 __func__, ring_type, ring_num); 1760 } 1761 } else { 1762 hal_validate_shadow_register(hal, 1763 SRNG_DST_ADDR(srng, TP), 1764 srng->u.dst_ring.tp_addr); 1765 } 1766 } 1767 1768 if (!(ring_config->lmac_ring)) { 1769 /* 1770 * UMAC reset has idle check enabled. 1771 * During UMAC reset Tx ring halt is set 1772 * by Wi-Fi FW during pre-reset stage, 1773 * avoid Tx ring halt again. 1774 */ 1775 if (idle_check && idx) { 1776 if (!hal->ops->hal_tx_ring_halt_get(hal_hdl)) { 1777 qdf_print("\nTx ring halt not set:Ring(%d, %d)", 1778 ring_type, ring_num); 1779 qdf_assert_always(0); 1780 } 1781 hal_srng_hw_init(hal, srng, idle_check, idx); 1782 goto ce_setup; 1783 } 1784 1785 if (idx) { 1786 hal->ops->hal_tx_ring_halt_set(hal_hdl); 1787 do { 1788 hal_info("Waiting for ring reset"); 1789 } while (!(hal->ops->hal_tx_ring_halt_poll(hal_hdl))); 1790 } 1791 hal_srng_hw_init(hal, srng, idle_check, idx); 1792 1793 if (idx) { 1794 hal->ops->hal_tx_ring_halt_reset(hal_hdl); 1795 } 1796 1797 ce_setup: 1798 if (ring_type == CE_DST) { 1799 srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length; 1800 hal_ce_dst_setup(hal, srng, ring_num); 1801 } 1802 } 1803 1804 SRNG_LOCK_INIT(&srng->lock); 1805 1806 srng->srng_event = 0; 1807 1808 srng->initialized = true; 1809 1810 return (void *)srng; 1811 } 1812 qdf_export_symbol(hal_srng_setup_idx); 1813 1814 /** 1815 * hal_srng_setup - Initialize HW SRNG ring. 1816 * @hal_soc: Opaque HAL SOC handle 1817 * @ring_type: one of the types from hal_ring_type 1818 * @ring_num: Ring number if there are multiple rings of same type (staring 1819 * from 0) 1820 * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings 1821 * @ring_params: SRNG ring params in hal_srng_params structure. 1822 * @idle_check: Check if ring is idle 1823 * 1824 * Callers are expected to allocate contiguous ring memory of size 1825 * 'num_entries * entry_size' bytes and pass the physical and virtual base 1826 * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in 1827 * hal_srng_params structure. Ring base address should be 8 byte aligned 1828 * and size of each ring entry should be queried using the API 1829 * hal_srng_get_entrysize 1830 * 1831 * Return: Opaque pointer to ring on success 1832 * NULL on failure (if given ring is not available) 1833 */ 1834 void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num, 1835 int mac_id, struct hal_srng_params *ring_params, 1836 bool idle_check) 1837 { 1838 return hal_srng_setup_idx(hal_soc, ring_type, ring_num, mac_id, 1839 ring_params, idle_check, 0); 1840 } 1841 qdf_export_symbol(hal_srng_setup); 1842 1843 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl, 1844 bool umac_reset_inprogress) 1845 { 1846 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; 1847 SRNG_LOCK_DESTROY(&srng->lock); 1848 srng->initialized = 0; 1849 if (umac_reset_inprogress) 1850 hal_srng_hw_disable(hal_soc, srng); 1851 } 1852 qdf_export_symbol(hal_srng_cleanup); 1853 1854 uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type) 1855 { 1856 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1857 struct hal_hw_srng_config *ring_config = 1858 HAL_SRNG_CONFIG(hal, ring_type); 1859 return ring_config->entry_size << 2; 1860 } 1861 qdf_export_symbol(hal_srng_get_entrysize); 1862 1863 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type) 1864 { 1865 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1866 struct hal_hw_srng_config *ring_config = 1867 HAL_SRNG_CONFIG(hal, ring_type); 1868 1869 return ring_config->max_size / ring_config->entry_size; 1870 } 1871 qdf_export_symbol(hal_srng_max_entries); 1872 1873 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type) 1874 { 1875 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1876 struct hal_hw_srng_config *ring_config = 1877 HAL_SRNG_CONFIG(hal, ring_type); 1878 1879 return ring_config->ring_dir; 1880 } 1881 1882 void hal_srng_dump(struct hal_srng *srng) 1883 { 1884 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 1885 hal_debug("=== SRC RING %d ===", srng->ring_id); 1886 hal_debug("hp %u, reap_hp %u, tp %u, cached tp %u", 1887 srng->u.src_ring.hp, 1888 srng->u.src_ring.reap_hp, 1889 *srng->u.src_ring.tp_addr, 1890 srng->u.src_ring.cached_tp); 1891 } else { 1892 hal_debug("=== DST RING %d ===", srng->ring_id); 1893 hal_debug("tp %u, hp %u, cached tp %u, loop_cnt %u", 1894 srng->u.dst_ring.tp, 1895 *srng->u.dst_ring.hp_addr, 1896 srng->u.dst_ring.cached_hp, 1897 srng->u.dst_ring.loop_cnt); 1898 } 1899 } 1900 1901 void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl, 1902 hal_ring_handle_t hal_ring_hdl, 1903 struct hal_srng_params *ring_params) 1904 { 1905 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; 1906 int i =0; 1907 ring_params->ring_id = srng->ring_id; 1908 ring_params->ring_dir = srng->ring_dir; 1909 ring_params->entry_size = srng->entry_size; 1910 1911 ring_params->ring_base_paddr = srng->ring_base_paddr; 1912 ring_params->ring_base_vaddr = srng->ring_base_vaddr; 1913 ring_params->num_entries = srng->num_entries; 1914 ring_params->msi_addr = srng->msi_addr; 1915 ring_params->msi_data = srng->msi_data; 1916 ring_params->intr_timer_thres_us = srng->intr_timer_thres_us; 1917 ring_params->intr_batch_cntr_thres_entries = 1918 srng->intr_batch_cntr_thres_entries; 1919 ring_params->low_threshold = srng->u.src_ring.low_threshold; 1920 ring_params->flags = srng->flags; 1921 ring_params->ring_id = srng->ring_id; 1922 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) 1923 ring_params->hwreg_base[i] = srng->hwreg_base[i]; 1924 1925 hal_srng_get_nf_params(srng, ring_params); 1926 } 1927 qdf_export_symbol(hal_get_srng_params); 1928 1929 void hal_set_low_threshold(hal_ring_handle_t hal_ring_hdl, 1930 uint32_t low_threshold) 1931 { 1932 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; 1933 srng->u.src_ring.low_threshold = low_threshold * srng->entry_size; 1934 } 1935 qdf_export_symbol(hal_set_low_threshold); 1936 1937 #ifdef FEATURE_RUNTIME_PM 1938 void 1939 hal_srng_rtpm_access_end(hal_soc_handle_t hal_soc_hdl, 1940 hal_ring_handle_t hal_ring_hdl, 1941 uint32_t rtpm_id) 1942 { 1943 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; 1944 1945 if (qdf_unlikely(!hal_ring_hdl)) { 1946 qdf_print("Error: Invalid hal_ring\n"); 1947 return; 1948 } 1949 1950 if (hif_rtpm_get(HIF_RTPM_GET_ASYNC, rtpm_id) == 0) { 1951 if (hif_system_pm_state_check(hal_soc->hif_handle)) { 1952 hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl); 1953 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); 1954 hal_srng_inc_flush_cnt(hal_ring_hdl); 1955 } else { 1956 hal_srng_access_end(hal_soc_hdl, hal_ring_hdl); 1957 } 1958 1959 hif_rtpm_put(HIF_RTPM_PUT_ASYNC, rtpm_id); 1960 } else { 1961 hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl); 1962 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); 1963 hal_srng_inc_flush_cnt(hal_ring_hdl); 1964 } 1965 } 1966 1967 qdf_export_symbol(hal_srng_rtpm_access_end); 1968 #endif /* FEATURE_RUNTIME_PM */ 1969 1970 #ifdef FORCE_WAKE 1971 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase) 1972 { 1973 struct hal_soc *hal_soc = (struct hal_soc *)soc; 1974 hal_soc->init_phase = init_phase; 1975 } 1976 #endif /* FORCE_WAKE */ 1977