1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hal_hw_headers.h" 21 #include "hal_api.h" 22 #include "hal_reo.h" 23 #include "target_type.h" 24 #include "qdf_module.h" 25 #include "wcss_version.h" 26 #include <qdf_tracepoint.h> 27 #include "qdf_ssr_driver_dump.h" 28 29 struct tcl_data_cmd gtcl_data_symbol __attribute__((used)); 30 31 #ifdef QCA_WIFI_QCA8074 32 void hal_qca6290_attach(struct hal_soc *hal); 33 #endif 34 #ifdef QCA_WIFI_QCA8074 35 void hal_qca8074_attach(struct hal_soc *hal); 36 #endif 37 #if defined(QCA_WIFI_QCA8074V2) || defined(QCA_WIFI_QCA6018) || \ 38 defined(QCA_WIFI_QCA9574) 39 void hal_qca8074v2_attach(struct hal_soc *hal); 40 #endif 41 #ifdef QCA_WIFI_QCA6390 42 void hal_qca6390_attach(struct hal_soc *hal); 43 #endif 44 #ifdef QCA_WIFI_QCA6490 45 void hal_qca6490_attach(struct hal_soc *hal); 46 #endif 47 #ifdef QCA_WIFI_QCN9000 48 void hal_qcn9000_attach(struct hal_soc *hal); 49 #endif 50 #ifdef QCA_WIFI_QCN9224 51 void hal_qcn9224v1_attach(struct hal_soc *hal); 52 void hal_qcn9224v2_attach(struct hal_soc *hal); 53 #endif 54 #if defined(QCA_WIFI_QCN6122) || defined(QCA_WIFI_QCN9160) 55 void hal_qcn6122_attach(struct hal_soc *hal); 56 #endif 57 #ifdef QCA_WIFI_QCA6750 58 void hal_qca6750_attach(struct hal_soc *hal); 59 #endif 60 #ifdef QCA_WIFI_QCA5018 61 void hal_qca5018_attach(struct hal_soc *hal); 62 #endif 63 #ifdef QCA_WIFI_QCA5332 64 void hal_qca5332_attach(struct hal_soc *hal); 65 #endif 66 #ifdef QCA_WIFI_KIWI 67 void hal_kiwi_attach(struct hal_soc *hal); 68 #endif 69 70 #ifdef ENABLE_VERBOSE_DEBUG 71 bool is_hal_verbose_debug_enabled; 72 #endif 73 74 #define HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR(x) ((x) + 0x4) 75 #define HAL_REO_DESTINATION_RING_CTRL_IX_1_ADDR(x) ((x) + 0x8) 76 #define HAL_REO_DESTINATION_RING_CTRL_IX_2_ADDR(x) ((x) + 0xc) 77 #define HAL_REO_DESTINATION_RING_CTRL_IX_3_ADDR(x) ((x) + 0x10) 78 79 #ifdef ENABLE_HAL_REG_WR_HISTORY 80 struct hal_reg_write_fail_history hal_reg_wr_hist; 81 82 void hal_reg_wr_fail_history_add(struct hal_soc *hal_soc, 83 uint32_t offset, 84 uint32_t wr_val, uint32_t rd_val) 85 { 86 struct hal_reg_write_fail_entry *record; 87 int idx; 88 89 idx = hal_history_get_next_index(&hal_soc->reg_wr_fail_hist->index, 90 HAL_REG_WRITE_HIST_SIZE); 91 92 record = &hal_soc->reg_wr_fail_hist->record[idx]; 93 94 record->timestamp = qdf_get_log_timestamp(); 95 record->reg_offset = offset; 96 record->write_val = wr_val; 97 record->read_val = rd_val; 98 } 99 100 static void hal_reg_write_fail_history_init(struct hal_soc *hal) 101 { 102 hal->reg_wr_fail_hist = &hal_reg_wr_hist; 103 104 qdf_atomic_set(&hal->reg_wr_fail_hist->index, -1); 105 } 106 #else 107 static void hal_reg_write_fail_history_init(struct hal_soc *hal) 108 { 109 } 110 #endif 111 112 /** 113 * hal_get_srng_ring_id() - get the ring id of a described ring 114 * @hal: hal_soc data structure 115 * @ring_type: type enum describing the ring 116 * @ring_num: which ring of the ring type 117 * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings) 118 * 119 * Return: the ring id or -EINVAL if the ring does not exist. 120 */ 121 static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type, 122 int ring_num, int mac_id) 123 { 124 struct hal_hw_srng_config *ring_config = 125 HAL_SRNG_CONFIG(hal, ring_type); 126 int ring_id; 127 128 if (ring_num >= ring_config->max_rings) { 129 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 130 "%s: ring_num exceeded maximum no. of supported rings", 131 __func__); 132 /* TODO: This is a programming error. Assert if this happens */ 133 return -EINVAL; 134 } 135 136 /* 137 * Some DMAC rings share a common source ring, hence don't provide them 138 * with separate ring IDs per LMAC. 139 */ 140 if (ring_config->lmac_ring && !ring_config->dmac_cmn_ring) { 141 ring_id = (ring_config->start_ring_id + ring_num + 142 (mac_id * HAL_MAX_RINGS_PER_LMAC)); 143 } else { 144 ring_id = ring_config->start_ring_id + ring_num; 145 } 146 147 return ring_id; 148 } 149 150 static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id) 151 { 152 /* TODO: Should we allocate srng structures dynamically? */ 153 return &(hal->srng_list[ring_id]); 154 } 155 156 #ifndef SHADOW_REG_CONFIG_DISABLED 157 #define HP_OFFSET_IN_REG_START 1 158 #define OFFSET_FROM_HP_TO_TP 4 159 static void hal_update_srng_hp_tp_address(struct hal_soc *hal_soc, 160 int shadow_config_index, 161 int ring_type, 162 int ring_num) 163 { 164 struct hal_srng *srng; 165 int ring_id; 166 struct hal_hw_srng_config *ring_config = 167 HAL_SRNG_CONFIG(hal_soc, ring_type); 168 169 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0); 170 if (ring_id < 0) 171 return; 172 173 srng = hal_get_srng(hal_soc, ring_id); 174 175 if (ring_config->ring_dir == HAL_SRNG_DST_RING) { 176 srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index) 177 + hal_soc->dev_base_addr; 178 hal_debug("tp_addr=%pK dev base addr %pK index %u", 179 srng->u.dst_ring.tp_addr, hal_soc->dev_base_addr, 180 shadow_config_index); 181 } else { 182 srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index) 183 + hal_soc->dev_base_addr; 184 hal_debug("hp_addr=%pK dev base addr %pK index %u", 185 srng->u.src_ring.hp_addr, 186 hal_soc->dev_base_addr, shadow_config_index); 187 } 188 189 } 190 #endif 191 192 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE 193 void hal_set_one_target_reg_config(struct hal_soc *hal, 194 uint32_t target_reg_offset, 195 int list_index) 196 { 197 int i = list_index; 198 199 qdf_assert_always(i < MAX_GENERIC_SHADOW_REG); 200 hal->list_shadow_reg_config[i].target_register = 201 target_reg_offset; 202 hal->num_generic_shadow_regs_configured++; 203 } 204 205 qdf_export_symbol(hal_set_one_target_reg_config); 206 207 #define REO_R0_DESTINATION_RING_CTRL_ADDR_OFFSET 0x4 208 #define MAX_REO_REMAP_SHADOW_REGS 4 209 QDF_STATUS hal_set_shadow_regs(void *hal_soc) 210 { 211 uint32_t target_reg_offset; 212 struct hal_soc *hal = (struct hal_soc *)hal_soc; 213 int i; 214 struct hal_hw_srng_config *srng_config = 215 &hal->hw_srng_table[WBM2SW_RELEASE]; 216 uint32_t reo_reg_base; 217 218 reo_reg_base = hal_get_reo_reg_base_offset(hal_soc); 219 220 target_reg_offset = 221 HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR(reo_reg_base); 222 223 for (i = 0; i < MAX_REO_REMAP_SHADOW_REGS; i++) { 224 hal_set_one_target_reg_config(hal, target_reg_offset, i); 225 target_reg_offset += REO_R0_DESTINATION_RING_CTRL_ADDR_OFFSET; 226 } 227 228 target_reg_offset = srng_config->reg_start[HP_OFFSET_IN_REG_START]; 229 target_reg_offset += (srng_config->reg_size[HP_OFFSET_IN_REG_START] 230 * HAL_IPA_TX_COMP_RING_IDX); 231 232 hal_set_one_target_reg_config(hal, target_reg_offset, i); 233 return QDF_STATUS_SUCCESS; 234 } 235 236 qdf_export_symbol(hal_set_shadow_regs); 237 238 QDF_STATUS hal_construct_shadow_regs(void *hal_soc) 239 { 240 struct hal_soc *hal = (struct hal_soc *)hal_soc; 241 int shadow_config_index = hal->num_shadow_registers_configured; 242 int i; 243 int num_regs = hal->num_generic_shadow_regs_configured; 244 245 for (i = 0; i < num_regs; i++) { 246 qdf_assert_always(shadow_config_index < MAX_SHADOW_REGISTERS); 247 hal->shadow_config[shadow_config_index].addr = 248 hal->list_shadow_reg_config[i].target_register; 249 hal->list_shadow_reg_config[i].shadow_config_index = 250 shadow_config_index; 251 hal->list_shadow_reg_config[i].va = 252 SHADOW_REGISTER(shadow_config_index) + 253 (uintptr_t)hal->dev_base_addr; 254 hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x", 255 hal->shadow_config[shadow_config_index].addr, 256 SHADOW_REGISTER(shadow_config_index), 257 shadow_config_index); 258 shadow_config_index++; 259 hal->num_shadow_registers_configured++; 260 } 261 return QDF_STATUS_SUCCESS; 262 } 263 264 qdf_export_symbol(hal_construct_shadow_regs); 265 #endif 266 267 #ifndef SHADOW_REG_CONFIG_DISABLED 268 269 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, 270 int ring_type, 271 int ring_num) 272 { 273 uint32_t target_register; 274 struct hal_soc *hal = (struct hal_soc *)hal_soc; 275 struct hal_hw_srng_config *srng_config = &hal->hw_srng_table[ring_type]; 276 int shadow_config_index = hal->num_shadow_registers_configured; 277 278 if (shadow_config_index >= MAX_SHADOW_REGISTERS) { 279 QDF_ASSERT(0); 280 return QDF_STATUS_E_RESOURCES; 281 } 282 283 hal->num_shadow_registers_configured++; 284 285 target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START]; 286 target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START] 287 *ring_num); 288 289 /* if the ring is a dst ring, we need to shadow the tail pointer */ 290 if (srng_config->ring_dir == HAL_SRNG_DST_RING) 291 target_register += OFFSET_FROM_HP_TO_TP; 292 293 hal->shadow_config[shadow_config_index].addr = target_register; 294 295 /* update hp/tp addr in the hal_soc structure*/ 296 hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type, 297 ring_num); 298 299 hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x, ring_type %d, ring num %d", 300 target_register, 301 SHADOW_REGISTER(shadow_config_index), 302 shadow_config_index, 303 ring_type, ring_num); 304 305 return QDF_STATUS_SUCCESS; 306 } 307 308 qdf_export_symbol(hal_set_one_shadow_config); 309 310 QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc) 311 { 312 int ring_type, ring_num; 313 struct hal_soc *hal = (struct hal_soc *)hal_soc; 314 315 for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) { 316 struct hal_hw_srng_config *srng_config = 317 &hal->hw_srng_table[ring_type]; 318 319 if (ring_type == CE_SRC || 320 ring_type == CE_DST || 321 ring_type == CE_DST_STATUS) 322 continue; 323 324 if (srng_config->lmac_ring) 325 continue; 326 327 for (ring_num = 0; ring_num < srng_config->max_rings; 328 ring_num++) 329 hal_set_one_shadow_config(hal_soc, ring_type, ring_num); 330 } 331 332 return QDF_STATUS_SUCCESS; 333 } 334 335 qdf_export_symbol(hal_construct_srng_shadow_regs); 336 #else 337 338 QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc) 339 { 340 return QDF_STATUS_SUCCESS; 341 } 342 343 qdf_export_symbol(hal_construct_srng_shadow_regs); 344 345 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type, 346 int ring_num) 347 { 348 return QDF_STATUS_SUCCESS; 349 } 350 qdf_export_symbol(hal_set_one_shadow_config); 351 #endif 352 353 void hal_get_shadow_config(void *hal_soc, 354 struct pld_shadow_reg_v2_cfg **shadow_config, 355 int *num_shadow_registers_configured) 356 { 357 struct hal_soc *hal = (struct hal_soc *)hal_soc; 358 359 *shadow_config = &hal->shadow_config[0].v2; 360 *num_shadow_registers_configured = 361 hal->num_shadow_registers_configured; 362 } 363 qdf_export_symbol(hal_get_shadow_config); 364 365 #ifdef CONFIG_SHADOW_V3 366 void hal_get_shadow_v3_config(void *hal_soc, 367 struct pld_shadow_reg_v3_cfg **shadow_config, 368 int *num_shadow_registers_configured) 369 { 370 struct hal_soc *hal = (struct hal_soc *)hal_soc; 371 372 *shadow_config = &hal->shadow_config[0].v3; 373 *num_shadow_registers_configured = 374 hal->num_shadow_registers_configured; 375 } 376 qdf_export_symbol(hal_get_shadow_v3_config); 377 #endif 378 379 static bool hal_validate_shadow_register(struct hal_soc *hal, 380 uint32_t *destination, 381 uint32_t *shadow_address) 382 { 383 unsigned int index; 384 uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr; 385 int destination_ba_offset = 386 ((char *)destination) - (char *)hal->dev_base_addr; 387 388 index = shadow_address - shadow_0_offset; 389 390 if (index >= MAX_SHADOW_REGISTERS) { 391 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 392 "%s: index %x out of bounds", __func__, index); 393 goto error; 394 } else if (hal->shadow_config[index].addr != destination_ba_offset) { 395 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 396 "%s: sanity check failure, expected %x, found %x", 397 __func__, destination_ba_offset, 398 hal->shadow_config[index].addr); 399 goto error; 400 } 401 return true; 402 error: 403 qdf_print("baddr %pK, destination %pK, shadow_address %pK s0offset %pK index %x", 404 hal->dev_base_addr, destination, shadow_address, 405 shadow_0_offset, index); 406 QDF_BUG(0); 407 return false; 408 } 409 410 static void hal_target_based_configure(struct hal_soc *hal) 411 { 412 /* 413 * Indicate Initialization of srngs to avoid force wake 414 * as umac power collapse is not enabled yet 415 */ 416 hal->init_phase = true; 417 418 switch (hal->target_type) { 419 #ifdef QCA_WIFI_QCA6290 420 case TARGET_TYPE_QCA6290: 421 hal->use_register_windowing = true; 422 hal_qca6290_attach(hal); 423 break; 424 #endif 425 #ifdef QCA_WIFI_QCA6390 426 case TARGET_TYPE_QCA6390: 427 hal->use_register_windowing = true; 428 hal_qca6390_attach(hal); 429 break; 430 #endif 431 #ifdef QCA_WIFI_QCA6490 432 case TARGET_TYPE_QCA6490: 433 hal->use_register_windowing = true; 434 hal_qca6490_attach(hal); 435 break; 436 #endif 437 #ifdef QCA_WIFI_QCA6750 438 case TARGET_TYPE_QCA6750: 439 hal->use_register_windowing = true; 440 hal->static_window_map = true; 441 hal_qca6750_attach(hal); 442 break; 443 #endif 444 #ifdef QCA_WIFI_KIWI 445 case TARGET_TYPE_KIWI: 446 case TARGET_TYPE_MANGO: 447 case TARGET_TYPE_PEACH: 448 hal->use_register_windowing = true; 449 hal_kiwi_attach(hal); 450 break; 451 #endif 452 #if defined(QCA_WIFI_QCA8074) && defined(WIFI_TARGET_TYPE_3_0) 453 case TARGET_TYPE_QCA8074: 454 hal_qca8074_attach(hal); 455 break; 456 #endif 457 458 #if defined(QCA_WIFI_QCA8074V2) 459 case TARGET_TYPE_QCA8074V2: 460 hal_qca8074v2_attach(hal); 461 break; 462 #endif 463 464 #if defined(QCA_WIFI_QCA6018) 465 case TARGET_TYPE_QCA6018: 466 hal_qca8074v2_attach(hal); 467 break; 468 #endif 469 470 #if defined(QCA_WIFI_QCA9574) 471 case TARGET_TYPE_QCA9574: 472 hal_qca8074v2_attach(hal); 473 break; 474 #endif 475 476 #if defined(QCA_WIFI_QCN6122) 477 case TARGET_TYPE_QCN6122: 478 hal->use_register_windowing = true; 479 /* 480 * Static window map is enabled for qcn9000 to use 2mb bar 481 * size and use multiple windows to write into registers. 482 */ 483 hal->static_window_map = true; 484 hal_qcn6122_attach(hal); 485 break; 486 #endif 487 488 #if defined(QCA_WIFI_QCN9160) 489 case TARGET_TYPE_QCN9160: 490 hal->use_register_windowing = true; 491 /* 492 * Static window map is enabled for qcn9160 to use 2mb bar 493 * size and use multiple windows to write into registers. 494 */ 495 hal->static_window_map = true; 496 hal_qcn6122_attach(hal); 497 break; 498 #endif 499 500 #ifdef QCA_WIFI_QCN9000 501 case TARGET_TYPE_QCN9000: 502 hal->use_register_windowing = true; 503 /* 504 * Static window map is enabled for qcn9000 to use 2mb bar 505 * size and use multiple windows to write into registers. 506 */ 507 hal->static_window_map = true; 508 hal_qcn9000_attach(hal); 509 break; 510 #endif 511 #ifdef QCA_WIFI_QCA5018 512 case TARGET_TYPE_QCA5018: 513 hal->use_register_windowing = true; 514 hal->static_window_map = true; 515 hal_qca5018_attach(hal); 516 break; 517 #endif 518 #ifdef QCA_WIFI_QCN9224 519 case TARGET_TYPE_QCN9224: 520 hal->use_register_windowing = true; 521 hal->static_window_map = true; 522 if (hal->version == 1) 523 hal_qcn9224v1_attach(hal); 524 else 525 hal_qcn9224v2_attach(hal); 526 break; 527 #endif 528 #ifdef QCA_WIFI_QCA5332 529 case TARGET_TYPE_QCA5332: 530 hal->use_register_windowing = true; 531 hal->static_window_map = true; 532 hal_qca5332_attach(hal); 533 break; 534 #endif 535 #ifdef QCA_WIFI_WCN6450 536 case TARGET_TYPE_WCN6450: 537 hal->use_register_windowing = true; 538 hal->static_window_map = true; 539 hal_wcn6450_attach(hal); 540 break; 541 #endif 542 default: 543 break; 544 } 545 } 546 547 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl) 548 { 549 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; 550 struct hif_target_info *tgt_info = 551 hif_get_target_info_handle(hal_soc->hif_handle); 552 553 return tgt_info->target_type; 554 } 555 556 qdf_export_symbol(hal_get_target_type); 557 558 #if defined(FEATURE_HAL_DELAYED_REG_WRITE) 559 /** 560 * hal_is_reg_write_tput_level_high() - throughput level for delayed reg writes 561 * @hal: hal_soc pointer 562 * 563 * Return: true if throughput is high, else false. 564 */ 565 static inline bool hal_is_reg_write_tput_level_high(struct hal_soc *hal) 566 { 567 int bw_level = hif_get_bandwidth_level(hal->hif_handle); 568 569 return (bw_level >= PLD_BUS_WIDTH_MEDIUM) ? true : false; 570 } 571 572 static inline 573 char *hal_fill_reg_write_srng_stats(struct hal_srng *srng, 574 char *buf, qdf_size_t size) 575 { 576 qdf_scnprintf(buf, size, "enq %u deq %u coal %u direct %u", 577 srng->wstats.enqueues, srng->wstats.dequeues, 578 srng->wstats.coalesces, srng->wstats.direct); 579 return buf; 580 } 581 582 /* bytes for local buffer */ 583 #define HAL_REG_WRITE_SRNG_STATS_LEN 100 584 585 #ifndef WLAN_SOFTUMAC_SUPPORT 586 void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl) 587 { 588 struct hal_srng *srng; 589 char buf[HAL_REG_WRITE_SRNG_STATS_LEN]; 590 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 591 592 srng = hal_get_srng(hal, HAL_SRNG_SW2TCL1); 593 hal_debug("SW2TCL1: %s", 594 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 595 596 srng = hal_get_srng(hal, HAL_SRNG_WBM2SW0_RELEASE); 597 hal_debug("WBM2SW0: %s", 598 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 599 600 srng = hal_get_srng(hal, HAL_SRNG_REO2SW1); 601 hal_debug("REO2SW1: %s", 602 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 603 604 srng = hal_get_srng(hal, HAL_SRNG_REO2SW2); 605 hal_debug("REO2SW2: %s", 606 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 607 608 srng = hal_get_srng(hal, HAL_SRNG_REO2SW3); 609 hal_debug("REO2SW3: %s", 610 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 611 } 612 #else 613 void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl) 614 { 615 } 616 #endif 617 618 void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl) 619 { 620 uint32_t *hist; 621 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 622 623 hist = hal->stats.wstats.sched_delay; 624 hal_debug("wstats: enq %u deq %u coal %u direct %u q_depth %u max_q %u sched-delay hist %u %u %u %u", 625 qdf_atomic_read(&hal->stats.wstats.enqueues), 626 hal->stats.wstats.dequeues, 627 qdf_atomic_read(&hal->stats.wstats.coalesces), 628 qdf_atomic_read(&hal->stats.wstats.direct), 629 qdf_atomic_read(&hal->stats.wstats.q_depth), 630 hal->stats.wstats.max_q_depth, 631 hist[REG_WRITE_SCHED_DELAY_SUB_100us], 632 hist[REG_WRITE_SCHED_DELAY_SUB_1000us], 633 hist[REG_WRITE_SCHED_DELAY_SUB_5000us], 634 hist[REG_WRITE_SCHED_DELAY_GT_5000us]); 635 } 636 637 int hal_get_reg_write_pending_work(void *hal_soc) 638 { 639 struct hal_soc *hal = (struct hal_soc *)hal_soc; 640 641 return qdf_atomic_read(&hal->active_work_cnt); 642 } 643 644 #endif 645 646 #ifdef FEATURE_HAL_DELAYED_REG_WRITE 647 #ifdef MEMORY_DEBUG 648 /* 649 * Length of the queue(array) used to hold delayed register writes. 650 * Must be a multiple of 2. 651 */ 652 #define HAL_REG_WRITE_QUEUE_LEN 128 653 #else 654 #define HAL_REG_WRITE_QUEUE_LEN 32 655 #endif 656 657 /** 658 * hal_process_reg_write_q_elem() - process a register write queue element 659 * @hal: hal_soc pointer 660 * @q_elem: pointer to hal register write queue element 661 * 662 * Return: The value which was written to the address 663 */ 664 static uint32_t 665 hal_process_reg_write_q_elem(struct hal_soc *hal, 666 struct hal_reg_write_q_elem *q_elem) 667 { 668 struct hal_srng *srng = q_elem->srng; 669 uint32_t write_val; 670 671 SRNG_LOCK(&srng->lock); 672 673 srng->reg_write_in_progress = false; 674 srng->wstats.dequeues++; 675 676 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 677 q_elem->dequeue_val = srng->u.src_ring.hp; 678 hal_write_address_32_mb(hal, 679 srng->u.src_ring.hp_addr, 680 srng->u.src_ring.hp, false); 681 write_val = srng->u.src_ring.hp; 682 } else { 683 q_elem->dequeue_val = srng->u.dst_ring.tp; 684 hal_write_address_32_mb(hal, 685 srng->u.dst_ring.tp_addr, 686 srng->u.dst_ring.tp, false); 687 write_val = srng->u.dst_ring.tp; 688 } 689 690 q_elem->valid = 0; 691 srng->last_dequeue_time = q_elem->dequeue_time; 692 SRNG_UNLOCK(&srng->lock); 693 694 return write_val; 695 } 696 697 /** 698 * hal_reg_write_fill_sched_delay_hist() - fill reg write delay histogram in hal 699 * @hal: hal_soc pointer 700 * @delay_us: delay in us 701 * 702 * Return: None 703 */ 704 static inline void hal_reg_write_fill_sched_delay_hist(struct hal_soc *hal, 705 uint64_t delay_us) 706 { 707 uint32_t *hist; 708 709 hist = hal->stats.wstats.sched_delay; 710 711 if (delay_us < 100) 712 hist[REG_WRITE_SCHED_DELAY_SUB_100us]++; 713 else if (delay_us < 1000) 714 hist[REG_WRITE_SCHED_DELAY_SUB_1000us]++; 715 else if (delay_us < 5000) 716 hist[REG_WRITE_SCHED_DELAY_SUB_5000us]++; 717 else 718 hist[REG_WRITE_SCHED_DELAY_GT_5000us]++; 719 } 720 721 #ifdef SHADOW_WRITE_DELAY 722 723 #define SHADOW_WRITE_MIN_DELTA_US 5 724 #define SHADOW_WRITE_DELAY_US 50 725 726 /* 727 * Never add those srngs which are performance relate. 728 * The delay itself will hit performance heavily. 729 */ 730 #define IS_SRNG_MATCH(s) ((s)->ring_id == HAL_SRNG_CE_1_DST_STATUS || \ 731 (s)->ring_id == HAL_SRNG_CE_1_DST) 732 733 static inline bool hal_reg_write_need_delay(struct hal_reg_write_q_elem *elem) 734 { 735 struct hal_srng *srng = elem->srng; 736 struct hal_soc *hal; 737 qdf_time_t now; 738 qdf_iomem_t real_addr; 739 740 if (qdf_unlikely(!srng)) 741 return false; 742 743 hal = srng->hal_soc; 744 if (qdf_unlikely(!hal)) 745 return false; 746 747 /* Check if it is target srng, and valid shadow reg */ 748 if (qdf_likely(!IS_SRNG_MATCH(srng))) 749 return false; 750 751 if (srng->ring_dir == HAL_SRNG_SRC_RING) 752 real_addr = SRNG_SRC_ADDR(srng, HP); 753 else 754 real_addr = SRNG_DST_ADDR(srng, TP); 755 if (!hal_validate_shadow_register(hal, real_addr, elem->addr)) 756 return false; 757 758 /* Check the time delta from last write of same srng */ 759 now = qdf_get_log_timestamp(); 760 if (qdf_log_timestamp_to_usecs(now - srng->last_dequeue_time) > 761 SHADOW_WRITE_MIN_DELTA_US) 762 return false; 763 764 /* Delay dequeue, and record */ 765 qdf_udelay(SHADOW_WRITE_DELAY_US); 766 767 srng->wstats.dequeue_delay++; 768 hal->stats.wstats.dequeue_delay++; 769 770 return true; 771 } 772 #else 773 static inline bool hal_reg_write_need_delay(struct hal_reg_write_q_elem *elem) 774 { 775 return false; 776 } 777 #endif 778 779 /** 780 * hal_reg_write_work() - Worker to process delayed writes 781 * @arg: hal_soc pointer 782 * 783 * Return: None 784 */ 785 static void hal_reg_write_work(void *arg) 786 { 787 int32_t q_depth, write_val; 788 struct hal_soc *hal = arg; 789 struct hal_reg_write_q_elem *q_elem; 790 uint64_t delta_us; 791 uint8_t ring_id; 792 uint32_t *addr; 793 uint32_t num_processed = 0; 794 795 q_elem = &hal->reg_write_queue[(hal->read_idx)]; 796 q_elem->work_scheduled_time = qdf_get_log_timestamp(); 797 q_elem->cpu_id = qdf_get_cpu(); 798 799 /* Make sure q_elem consistent in the memory for multi-cores */ 800 qdf_rmb(); 801 if (!q_elem->valid) 802 return; 803 804 q_depth = qdf_atomic_read(&hal->stats.wstats.q_depth); 805 if (q_depth > hal->stats.wstats.max_q_depth) 806 hal->stats.wstats.max_q_depth = q_depth; 807 808 if (hif_prevent_link_low_power_states(hal->hif_handle)) { 809 hal->stats.wstats.prevent_l1_fails++; 810 return; 811 } 812 813 while (true) { 814 qdf_rmb(); 815 if (!q_elem->valid) 816 break; 817 818 q_elem->dequeue_time = qdf_get_log_timestamp(); 819 ring_id = q_elem->srng->ring_id; 820 addr = q_elem->addr; 821 delta_us = qdf_log_timestamp_to_usecs(q_elem->dequeue_time - 822 q_elem->enqueue_time); 823 hal_reg_write_fill_sched_delay_hist(hal, delta_us); 824 825 hal->stats.wstats.dequeues++; 826 qdf_atomic_dec(&hal->stats.wstats.q_depth); 827 828 if (hal_reg_write_need_delay(q_elem)) 829 hal_verbose_debug("Delay reg writer for srng 0x%x, addr 0x%pK", 830 q_elem->srng->ring_id, q_elem->addr); 831 832 write_val = hal_process_reg_write_q_elem(hal, q_elem); 833 hal_verbose_debug("read_idx %u srng 0x%x, addr 0x%pK dequeue_val %u sched delay %llu us", 834 hal->read_idx, ring_id, addr, write_val, delta_us); 835 836 qdf_trace_dp_del_reg_write(ring_id, q_elem->enqueue_val, 837 q_elem->dequeue_val, 838 q_elem->enqueue_time, 839 q_elem->dequeue_time); 840 841 num_processed++; 842 hal->read_idx = (hal->read_idx + 1) & 843 (HAL_REG_WRITE_QUEUE_LEN - 1); 844 q_elem = &hal->reg_write_queue[(hal->read_idx)]; 845 } 846 847 hif_allow_link_low_power_states(hal->hif_handle); 848 /* 849 * Decrement active_work_cnt by the number of elements dequeued after 850 * hif_allow_link_low_power_states. 851 * This makes sure that hif_try_complete_tasks will wait till we make 852 * the bus access in hif_allow_link_low_power_states. This will avoid 853 * race condition between delayed register worker and bus suspend 854 * (system suspend or runtime suspend). 855 * 856 * The following decrement should be done at the end! 857 */ 858 qdf_atomic_sub(num_processed, &hal->active_work_cnt); 859 } 860 861 static void __hal_flush_reg_write_work(struct hal_soc *hal) 862 { 863 qdf_flush_work(&hal->reg_write_work); 864 qdf_disable_work(&hal->reg_write_work); 865 } 866 867 void hal_flush_reg_write_work(hal_soc_handle_t hal_handle) 868 { __hal_flush_reg_write_work((struct hal_soc *)hal_handle); 869 } 870 871 /** 872 * hal_reg_write_enqueue() - enqueue register writes into kworker 873 * @hal_soc: hal_soc pointer 874 * @srng: srng pointer 875 * @addr: iomem address of register 876 * @value: value to be written to iomem address 877 * 878 * This function executes from within the SRNG LOCK 879 * 880 * Return: None 881 */ 882 static void hal_reg_write_enqueue(struct hal_soc *hal_soc, 883 struct hal_srng *srng, 884 void __iomem *addr, 885 uint32_t value) 886 { 887 struct hal_reg_write_q_elem *q_elem; 888 uint32_t write_idx; 889 890 if (srng->reg_write_in_progress) { 891 hal_verbose_debug("Already in progress srng ring id 0x%x addr 0x%pK val %u", 892 srng->ring_id, addr, value); 893 qdf_atomic_inc(&hal_soc->stats.wstats.coalesces); 894 srng->wstats.coalesces++; 895 return; 896 } 897 898 write_idx = qdf_atomic_inc_return(&hal_soc->write_idx); 899 900 write_idx = write_idx & (HAL_REG_WRITE_QUEUE_LEN - 1); 901 902 q_elem = &hal_soc->reg_write_queue[write_idx]; 903 904 if (q_elem->valid) { 905 hal_err("queue full"); 906 QDF_BUG(0); 907 return; 908 } 909 910 qdf_atomic_inc(&hal_soc->stats.wstats.enqueues); 911 srng->wstats.enqueues++; 912 913 qdf_atomic_inc(&hal_soc->stats.wstats.q_depth); 914 915 q_elem->srng = srng; 916 q_elem->addr = addr; 917 q_elem->enqueue_val = value; 918 q_elem->enqueue_time = qdf_get_log_timestamp(); 919 920 /* 921 * Before the valid flag is set to true, all the other 922 * fields in the q_elem needs to be updated in memory. 923 * Else there is a chance that the dequeuing worker thread 924 * might read stale entries and process incorrect srng. 925 */ 926 qdf_wmb(); 927 q_elem->valid = true; 928 929 /* 930 * After all other fields in the q_elem has been updated 931 * in memory successfully, the valid flag needs to be updated 932 * in memory in time too. 933 * Else there is a chance that the dequeuing worker thread 934 * might read stale valid flag and the work will be bypassed 935 * for this round. And if there is no other work scheduled 936 * later, this hal register writing won't be updated any more. 937 */ 938 qdf_wmb(); 939 940 srng->reg_write_in_progress = true; 941 qdf_atomic_inc(&hal_soc->active_work_cnt); 942 943 hal_verbose_debug("write_idx %u srng ring id 0x%x addr 0x%pK val %u", 944 write_idx, srng->ring_id, addr, value); 945 946 qdf_queue_work(hal_soc->qdf_dev, hal_soc->reg_write_wq, 947 &hal_soc->reg_write_work); 948 } 949 950 /** 951 * hal_delayed_reg_write_init() - Initialization function for delayed reg writes 952 * @hal: hal_soc pointer 953 * 954 * Initialize main data structures to process register writes in a delayed 955 * workqueue. 956 * 957 * Return: QDF_STATUS_SUCCESS on success else a QDF error. 958 */ 959 static QDF_STATUS hal_delayed_reg_write_init(struct hal_soc *hal) 960 { 961 hal->reg_write_wq = 962 qdf_alloc_high_prior_ordered_workqueue("hal_register_write_wq"); 963 qdf_create_work(0, &hal->reg_write_work, hal_reg_write_work, hal); 964 hal->reg_write_queue = qdf_mem_malloc(HAL_REG_WRITE_QUEUE_LEN * 965 sizeof(*hal->reg_write_queue)); 966 if (!hal->reg_write_queue) { 967 hal_err("unable to allocate memory"); 968 QDF_BUG(0); 969 return QDF_STATUS_E_NOMEM; 970 } 971 972 /* Initial value of indices */ 973 hal->read_idx = 0; 974 qdf_atomic_set(&hal->write_idx, -1); 975 return QDF_STATUS_SUCCESS; 976 } 977 978 /** 979 * hal_delayed_reg_write_deinit() - De-Initialize delayed reg write processing 980 * @hal: hal_soc pointer 981 * 982 * De-initialize main data structures to process register writes in a delayed 983 * workqueue. 984 * 985 * Return: None 986 */ 987 static void hal_delayed_reg_write_deinit(struct hal_soc *hal) 988 { 989 __hal_flush_reg_write_work(hal); 990 991 qdf_flush_workqueue(0, hal->reg_write_wq); 992 qdf_destroy_workqueue(0, hal->reg_write_wq); 993 qdf_mem_free(hal->reg_write_queue); 994 } 995 996 #else 997 static inline QDF_STATUS hal_delayed_reg_write_init(struct hal_soc *hal) 998 { 999 return QDF_STATUS_SUCCESS; 1000 } 1001 1002 static inline void hal_delayed_reg_write_deinit(struct hal_soc *hal) 1003 { 1004 } 1005 #endif 1006 1007 #ifdef FEATURE_HAL_DELAYED_REG_WRITE 1008 #ifdef HAL_RECORD_SUSPEND_WRITE 1009 static struct hal_suspend_write_history 1010 g_hal_suspend_write_history[HAL_SUSPEND_WRITE_HISTORY_MAX]; 1011 1012 static 1013 void hal_event_suspend_record(uint8_t ring_id, uint32_t value, uint32_t count) 1014 { 1015 uint32_t index = qdf_atomic_read(g_hal_suspend_write_history.index) & 1016 (HAL_SUSPEND_WRITE_HISTORY_MAX - 1); 1017 struct hal_suspend_write_record *cur_event = 1018 &hal_suspend_write_event.record[index]; 1019 1020 cur_event->ts = qdf_get_log_timestamp(); 1021 cur_event->ring_id = ring_id; 1022 cur_event->value = value; 1023 cur_event->direct_wcount = count; 1024 qdf_atomic_inc(g_hal_suspend_write_history.index); 1025 } 1026 1027 static inline 1028 void hal_record_suspend_write(uint8_t ring_id, uint32_t value, uint32_t count) 1029 { 1030 if (hif_rtpm_get_state() >= HIF_RTPM_STATE_SUSPENDING) 1031 hal_event_suspend_record(ring_id, value, count); 1032 } 1033 #else 1034 static inline 1035 void hal_record_suspend_write(uint8_t ring_id, uint32_t value, uint32_t count) 1036 { 1037 } 1038 #endif 1039 1040 #ifdef QCA_WIFI_QCA6750 1041 void hal_delayed_reg_write(struct hal_soc *hal_soc, 1042 struct hal_srng *srng, 1043 void __iomem *addr, 1044 uint32_t value) 1045 { 1046 uint8_t vote_access; 1047 1048 switch (srng->ring_type) { 1049 case CE_SRC: 1050 case CE_DST: 1051 case CE_DST_STATUS: 1052 vote_access = hif_get_ep_vote_access(hal_soc->hif_handle, 1053 HIF_EP_VOTE_NONDP_ACCESS); 1054 if ((vote_access == HIF_EP_VOTE_ACCESS_DISABLE) || 1055 (vote_access == HIF_EP_VOTE_INTERMEDIATE_ACCESS && 1056 PLD_MHI_STATE_L0 == 1057 pld_get_mhi_state(hal_soc->qdf_dev->dev))) { 1058 hal_write_address_32_mb(hal_soc, addr, value, false); 1059 qdf_atomic_inc(&hal_soc->stats.wstats.direct); 1060 srng->wstats.direct++; 1061 } else { 1062 hal_reg_write_enqueue(hal_soc, srng, addr, value); 1063 } 1064 break; 1065 default: 1066 if (hif_get_ep_vote_access(hal_soc->hif_handle, 1067 HIF_EP_VOTE_DP_ACCESS) == 1068 HIF_EP_VOTE_ACCESS_DISABLE || 1069 hal_is_reg_write_tput_level_high(hal_soc) || 1070 PLD_MHI_STATE_L0 == 1071 pld_get_mhi_state(hal_soc->qdf_dev->dev)) { 1072 hal_write_address_32_mb(hal_soc, addr, value, false); 1073 qdf_atomic_inc(&hal_soc->stats.wstats.direct); 1074 srng->wstats.direct++; 1075 } else { 1076 hal_reg_write_enqueue(hal_soc, srng, addr, value); 1077 } 1078 1079 break; 1080 } 1081 } 1082 #else 1083 void hal_delayed_reg_write(struct hal_soc *hal_soc, 1084 struct hal_srng *srng, 1085 void __iomem *addr, 1086 uint32_t value) 1087 { 1088 if (hal_is_reg_write_tput_level_high(hal_soc) || 1089 pld_is_device_awake(hal_soc->qdf_dev->dev)) { 1090 qdf_atomic_inc(&hal_soc->stats.wstats.direct); 1091 srng->wstats.direct++; 1092 hal_write_address_32_mb(hal_soc, addr, value, false); 1093 } else { 1094 hal_reg_write_enqueue(hal_soc, srng, addr, value); 1095 } 1096 1097 hal_record_suspend_write(srng->ring_id, value, srng->wstats.direct); 1098 } 1099 #endif 1100 #endif 1101 1102 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev) 1103 { 1104 struct hal_soc *hal; 1105 int i; 1106 1107 hal = qdf_mem_common_alloc(sizeof(*hal)); 1108 1109 if (!hal) { 1110 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1111 "%s: hal_soc allocation failed", __func__); 1112 goto fail0; 1113 } 1114 hal->hif_handle = hif_handle; 1115 hal->dev_base_addr = hif_get_dev_ba(hif_handle); /* UMAC */ 1116 hal->dev_base_addr_ce = hif_get_dev_ba_ce(hif_handle); /* CE */ 1117 hal->dev_base_addr_cmem = hif_get_dev_ba_cmem(hif_handle); /* CMEM */ 1118 hal->dev_base_addr_pmm = hif_get_dev_ba_pmm(hif_handle); /* PMM */ 1119 hal->qdf_dev = qdf_dev; 1120 hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent( 1121 qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) * 1122 HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr)); 1123 if (!hal->shadow_rdptr_mem_paddr) { 1124 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1125 "%s: hal->shadow_rdptr_mem_paddr allocation failed", 1126 __func__); 1127 goto fail1; 1128 } 1129 qdf_mem_zero(hal->shadow_rdptr_mem_vaddr, 1130 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX); 1131 1132 hal->shadow_wrptr_mem_vaddr = 1133 (uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev, 1134 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 1135 &(hal->shadow_wrptr_mem_paddr)); 1136 if (!hal->shadow_wrptr_mem_vaddr) { 1137 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1138 "%s: hal->shadow_wrptr_mem_vaddr allocation failed", 1139 __func__); 1140 goto fail2; 1141 } 1142 qdf_mem_zero(hal->shadow_wrptr_mem_vaddr, 1143 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS); 1144 1145 for (i = 0; i < HAL_SRNG_ID_MAX; i++) { 1146 hal->srng_list[i].initialized = 0; 1147 hal->srng_list[i].ring_id = i; 1148 } 1149 1150 qdf_spinlock_create(&hal->register_access_lock); 1151 hal->register_window = 0; 1152 hal->target_type = hal_get_target_type(hal_soc_to_hal_soc_handle(hal)); 1153 hal->version = hif_get_soc_version(hif_handle); 1154 hal->ops = qdf_mem_malloc(sizeof(*hal->ops)); 1155 1156 if (!hal->ops) { 1157 hal_err("unable to allocable memory for HAL ops"); 1158 goto fail3; 1159 } 1160 1161 hal_target_based_configure(hal); 1162 1163 hal_reg_write_fail_history_init(hal); 1164 1165 qdf_minidump_log(hal, sizeof(*hal), "hal_soc"); 1166 1167 qdf_ssr_driver_dump_register_region("hal_soc", hal, sizeof(*hal)); 1168 1169 qdf_atomic_init(&hal->active_work_cnt); 1170 if (hal_delayed_reg_write_init(hal) != QDF_STATUS_SUCCESS) { 1171 hal_err("unable to initialize delayed reg write"); 1172 goto fail4; 1173 } 1174 1175 hif_rtpm_register(HIF_RTPM_ID_HAL_REO_CMD, NULL); 1176 1177 return (void *)hal; 1178 fail4: 1179 qdf_ssr_driver_dump_unregister_region("hal_soc"); 1180 qdf_minidump_remove(hal, sizeof(*hal), "hal_soc"); 1181 qdf_mem_free(hal->ops); 1182 fail3: 1183 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, 1184 sizeof(*hal->shadow_wrptr_mem_vaddr) * 1185 HAL_MAX_LMAC_RINGS, 1186 hal->shadow_wrptr_mem_vaddr, 1187 hal->shadow_wrptr_mem_paddr, 0); 1188 fail2: 1189 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, 1190 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 1191 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 1192 fail1: 1193 qdf_mem_common_free(hal); 1194 fail0: 1195 return NULL; 1196 } 1197 qdf_export_symbol(hal_attach); 1198 1199 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem) 1200 { 1201 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 1202 mem->dev_base_addr = (void *)hal->dev_base_addr; 1203 mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr; 1204 mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr; 1205 mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr; 1206 mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr; 1207 hif_read_phy_mem_base((void *)hal->hif_handle, 1208 (qdf_dma_addr_t *)&mem->dev_base_paddr); 1209 mem->lmac_srng_start_id = HAL_SRNG_LMAC1_ID_START; 1210 return; 1211 } 1212 qdf_export_symbol(hal_get_meminfo); 1213 1214 void hal_detach(void *hal_soc) 1215 { 1216 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1217 1218 hif_rtpm_deregister(HIF_RTPM_ID_HAL_REO_CMD); 1219 hal_delayed_reg_write_deinit(hal); 1220 hal_reo_shared_qaddr_detach((hal_soc_handle_t)hal); 1221 qdf_ssr_driver_dump_unregister_region("hal_soc"); 1222 qdf_minidump_remove(hal, sizeof(*hal), "hal_soc"); 1223 qdf_mem_free(hal->ops); 1224 1225 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 1226 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 1227 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 1228 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 1229 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 1230 hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0); 1231 qdf_mem_common_free(hal); 1232 1233 return; 1234 } 1235 qdf_export_symbol(hal_detach); 1236 1237 #define HAL_CE_CHANNEL_DST_DEST_CTRL_ADDR(x) ((x) + 0x000000b0) 1238 #define HAL_CE_CHANNEL_DST_DEST_CTRL_DEST_MAX_LENGTH_BMSK 0x0000ffff 1239 #define HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_ADDR(x) ((x) + 0x00000040) 1240 #define HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_RMSK 0x00000007 1241 1242 /** 1243 * hal_ce_dst_setup() - Initialize CE destination ring registers 1244 * @hal: HAL SOC handle 1245 * @srng: SRNG ring pointer 1246 * @ring_num: ring number 1247 */ 1248 static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng, 1249 int ring_num) 1250 { 1251 uint32_t reg_val = 0; 1252 uint32_t reg_addr; 1253 struct hal_hw_srng_config *ring_config = 1254 HAL_SRNG_CONFIG(hal, CE_DST); 1255 1256 /* set DEST_MAX_LENGTH according to ce assignment */ 1257 reg_addr = HAL_CE_CHANNEL_DST_DEST_CTRL_ADDR( 1258 ring_config->reg_start[R0_INDEX] + 1259 (ring_num * ring_config->reg_size[R0_INDEX])); 1260 1261 reg_val = HAL_REG_READ(hal, reg_addr); 1262 reg_val &= ~HAL_CE_CHANNEL_DST_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 1263 reg_val |= srng->u.dst_ring.max_buffer_length & 1264 HAL_CE_CHANNEL_DST_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 1265 HAL_REG_WRITE(hal, reg_addr, reg_val); 1266 1267 if (srng->prefetch_timer) { 1268 reg_addr = HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_ADDR( 1269 ring_config->reg_start[R0_INDEX] + 1270 (ring_num * ring_config->reg_size[R0_INDEX])); 1271 1272 reg_val = HAL_REG_READ(hal, reg_addr); 1273 reg_val &= ~HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_RMSK; 1274 reg_val |= srng->prefetch_timer; 1275 HAL_REG_WRITE(hal, reg_addr, reg_val); 1276 reg_val = HAL_REG_READ(hal, reg_addr); 1277 } 1278 1279 } 1280 1281 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read, 1282 uint32_t *ix0, uint32_t *ix1, 1283 uint32_t *ix2, uint32_t *ix3) 1284 { 1285 uint32_t reg_offset; 1286 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 1287 uint32_t reo_reg_base; 1288 1289 reo_reg_base = hal_get_reo_reg_base_offset(hal_soc_hdl); 1290 1291 if (read) { 1292 if (ix0) { 1293 reg_offset = 1294 HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR( 1295 reo_reg_base); 1296 *ix0 = HAL_REG_READ(hal, reg_offset); 1297 } 1298 1299 if (ix1) { 1300 reg_offset = 1301 HAL_REO_DESTINATION_RING_CTRL_IX_1_ADDR( 1302 reo_reg_base); 1303 *ix1 = HAL_REG_READ(hal, reg_offset); 1304 } 1305 1306 if (ix2) { 1307 reg_offset = 1308 HAL_REO_DESTINATION_RING_CTRL_IX_2_ADDR( 1309 reo_reg_base); 1310 *ix2 = HAL_REG_READ(hal, reg_offset); 1311 } 1312 1313 if (ix3) { 1314 reg_offset = 1315 HAL_REO_DESTINATION_RING_CTRL_IX_3_ADDR( 1316 reo_reg_base); 1317 *ix3 = HAL_REG_READ(hal, reg_offset); 1318 } 1319 } else { 1320 if (ix0) { 1321 reg_offset = 1322 HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR( 1323 reo_reg_base); 1324 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, 1325 *ix0, true); 1326 } 1327 1328 if (ix1) { 1329 reg_offset = 1330 HAL_REO_DESTINATION_RING_CTRL_IX_1_ADDR( 1331 reo_reg_base); 1332 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, 1333 *ix1, true); 1334 } 1335 1336 if (ix2) { 1337 reg_offset = 1338 HAL_REO_DESTINATION_RING_CTRL_IX_2_ADDR( 1339 reo_reg_base); 1340 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, 1341 *ix2, true); 1342 } 1343 1344 if (ix3) { 1345 reg_offset = 1346 HAL_REO_DESTINATION_RING_CTRL_IX_3_ADDR( 1347 reo_reg_base); 1348 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, 1349 *ix3, true); 1350 } 1351 } 1352 } 1353 1354 qdf_export_symbol(hal_reo_read_write_ctrl_ix); 1355 1356 void hal_srng_dst_set_hp_paddr_confirm(struct hal_srng *srng, uint64_t paddr) 1357 { 1358 SRNG_DST_REG_WRITE_CONFIRM(srng, HP_ADDR_LSB, paddr & 0xffffffff); 1359 SRNG_DST_REG_WRITE_CONFIRM(srng, HP_ADDR_MSB, paddr >> 32); 1360 } 1361 1362 qdf_export_symbol(hal_srng_dst_set_hp_paddr_confirm); 1363 1364 void hal_srng_dst_init_hp(struct hal_soc_handle *hal_soc, 1365 struct hal_srng *srng, 1366 uint32_t *vaddr) 1367 { 1368 uint32_t reg_offset; 1369 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1370 1371 if (!srng) 1372 return; 1373 1374 srng->u.dst_ring.hp_addr = vaddr; 1375 reg_offset = SRNG_DST_ADDR(srng, HP) - hal->dev_base_addr; 1376 HAL_REG_WRITE_CONFIRM_RETRY( 1377 hal, reg_offset, srng->u.dst_ring.cached_hp, true); 1378 1379 if (vaddr) { 1380 *srng->u.dst_ring.hp_addr = srng->u.dst_ring.cached_hp; 1381 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1382 "hp_addr=%pK, cached_hp=%d", 1383 (void *)srng->u.dst_ring.hp_addr, 1384 srng->u.dst_ring.cached_hp); 1385 } 1386 } 1387 1388 qdf_export_symbol(hal_srng_dst_init_hp); 1389 1390 /** 1391 * hal_srng_hw_init - Private function to initialize SRNG HW 1392 * @hal: HAL SOC handle 1393 * @srng: SRNG ring pointer 1394 * @idle_check: Check if ring is idle 1395 * @idx: ring index 1396 */ 1397 static inline void hal_srng_hw_init(struct hal_soc *hal, 1398 struct hal_srng *srng, bool idle_check, uint32_t idx) 1399 { 1400 if (srng->ring_dir == HAL_SRNG_SRC_RING) 1401 hal_srng_src_hw_init(hal, srng, idle_check, idx); 1402 else 1403 hal_srng_dst_hw_init(hal, srng, idle_check, idx); 1404 } 1405 1406 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ 1407 bool hal_srng_is_near_full_irq_supported(hal_soc_handle_t hal_soc, 1408 int ring_type, int ring_num) 1409 { 1410 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1411 struct hal_hw_srng_config *ring_config = 1412 HAL_SRNG_CONFIG(hal, ring_type); 1413 1414 return ring_config->nf_irq_support; 1415 } 1416 1417 /** 1418 * hal_srng_set_msi2_params() - Set MSI2 params to SRNG data structure from 1419 * ring params 1420 * @srng: SRNG handle 1421 * @ring_params: ring params for this SRNG 1422 * 1423 * Return: None 1424 */ 1425 static inline void 1426 hal_srng_set_msi2_params(struct hal_srng *srng, 1427 struct hal_srng_params *ring_params) 1428 { 1429 srng->msi2_addr = ring_params->msi2_addr; 1430 srng->msi2_data = ring_params->msi2_data; 1431 } 1432 1433 /** 1434 * hal_srng_get_nf_params() - Get the near full MSI2 params from srng 1435 * @srng: SRNG handle 1436 * @ring_params: ring params for this SRNG 1437 * 1438 * Return: None 1439 */ 1440 static inline void 1441 hal_srng_get_nf_params(struct hal_srng *srng, 1442 struct hal_srng_params *ring_params) 1443 { 1444 ring_params->msi2_addr = srng->msi2_addr; 1445 ring_params->msi2_data = srng->msi2_data; 1446 } 1447 1448 /** 1449 * hal_srng_set_nf_thresholds() - Set the near full thresholds in SRNG 1450 * @srng: SRNG handle where the params are to be set 1451 * @ring_params: ring params, from where threshold is to be fetched 1452 * 1453 * Return: None 1454 */ 1455 static inline void 1456 hal_srng_set_nf_thresholds(struct hal_srng *srng, 1457 struct hal_srng_params *ring_params) 1458 { 1459 srng->u.dst_ring.nf_irq_support = ring_params->nf_irq_support; 1460 srng->u.dst_ring.high_thresh = ring_params->high_thresh; 1461 } 1462 #else 1463 static inline void 1464 hal_srng_set_msi2_params(struct hal_srng *srng, 1465 struct hal_srng_params *ring_params) 1466 { 1467 } 1468 1469 static inline void 1470 hal_srng_get_nf_params(struct hal_srng *srng, 1471 struct hal_srng_params *ring_params) 1472 { 1473 } 1474 1475 static inline void 1476 hal_srng_set_nf_thresholds(struct hal_srng *srng, 1477 struct hal_srng_params *ring_params) 1478 { 1479 } 1480 #endif 1481 1482 #if defined(CLEAR_SW2TCL_CONSUMED_DESC) 1483 /** 1484 * hal_srng_last_desc_cleared_init - Initialize SRNG last_desc_cleared ptr 1485 * @srng: Source ring pointer 1486 * 1487 * Return: None 1488 */ 1489 static inline 1490 void hal_srng_last_desc_cleared_init(struct hal_srng *srng) 1491 { 1492 srng->last_desc_cleared = srng->ring_size - srng->entry_size; 1493 } 1494 1495 #else 1496 static inline 1497 void hal_srng_last_desc_cleared_init(struct hal_srng *srng) 1498 { 1499 } 1500 #endif /* CLEAR_SW2TCL_CONSUMED_DESC */ 1501 1502 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING 1503 static inline void hal_srng_update_high_wm_thresholds(struct hal_srng *srng) 1504 { 1505 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_90_to_100] = 1506 ((srng->num_entries * 90) / 100); 1507 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_80_to_90] = 1508 ((srng->num_entries * 80) / 100); 1509 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_70_to_80] = 1510 ((srng->num_entries * 70) / 100); 1511 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_60_to_70] = 1512 ((srng->num_entries * 60) / 100); 1513 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_50_to_60] = 1514 ((srng->num_entries * 50) / 100); 1515 /* Below 50% threshold is not needed */ 1516 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT] = 0; 1517 1518 hal_info("ring_id: %u, wm_thresh- <50:%u, 50-60:%u, 60-70:%u, 70-80:%u, 80-90:%u, 90-100:%u", 1519 srng->ring_id, 1520 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT], 1521 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_50_to_60], 1522 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_60_to_70], 1523 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_70_to_80], 1524 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_80_to_90], 1525 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_90_to_100]); 1526 } 1527 #else 1528 static inline void hal_srng_update_high_wm_thresholds(struct hal_srng *srng) 1529 { 1530 } 1531 #endif 1532 1533 void *hal_srng_setup_idx(void *hal_soc, int ring_type, int ring_num, int mac_id, 1534 struct hal_srng_params *ring_params, bool idle_check, 1535 uint32_t idx) 1536 { 1537 int ring_id; 1538 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1539 hal_soc_handle_t hal_hdl = (hal_soc_handle_t)hal; 1540 struct hal_srng *srng; 1541 struct hal_hw_srng_config *ring_config = 1542 HAL_SRNG_CONFIG(hal, ring_type); 1543 void *dev_base_addr; 1544 int i; 1545 1546 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id); 1547 if (ring_id < 0) 1548 return NULL; 1549 1550 hal_verbose_debug("mac_id %d ring_id %d", mac_id, ring_id); 1551 1552 srng = hal_get_srng(hal_soc, ring_id); 1553 1554 if (srng->initialized) { 1555 hal_verbose_debug("Ring (ring_type, ring_num) already initialized"); 1556 return NULL; 1557 } 1558 1559 dev_base_addr = hal->dev_base_addr; 1560 srng->ring_id = ring_id; 1561 srng->ring_type = ring_type; 1562 srng->ring_dir = ring_config->ring_dir; 1563 srng->ring_base_paddr = ring_params->ring_base_paddr; 1564 srng->ring_base_vaddr = ring_params->ring_base_vaddr; 1565 srng->entry_size = ring_config->entry_size; 1566 srng->num_entries = ring_params->num_entries; 1567 srng->ring_size = srng->num_entries * srng->entry_size; 1568 srng->ring_size_mask = srng->ring_size - 1; 1569 srng->ring_vaddr_end = srng->ring_base_vaddr + srng->ring_size; 1570 srng->msi_addr = ring_params->msi_addr; 1571 srng->msi_data = ring_params->msi_data; 1572 srng->intr_timer_thres_us = ring_params->intr_timer_thres_us; 1573 srng->intr_batch_cntr_thres_entries = 1574 ring_params->intr_batch_cntr_thres_entries; 1575 srng->pointer_timer_threshold = 1576 ring_params->pointer_timer_threshold; 1577 srng->pointer_num_threshold = 1578 ring_params->pointer_num_threshold; 1579 1580 if (!idle_check) 1581 srng->prefetch_timer = ring_params->prefetch_timer; 1582 srng->hal_soc = hal_soc; 1583 hal_srng_set_msi2_params(srng, ring_params); 1584 hal_srng_update_high_wm_thresholds(srng); 1585 1586 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) { 1587 srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i] 1588 + (ring_num * ring_config->reg_size[i]); 1589 } 1590 1591 /* Zero out the entire ring memory */ 1592 qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size * 1593 srng->num_entries) << 2); 1594 1595 srng->flags = ring_params->flags; 1596 1597 /* For cached descriptors flush and invalidate the memory*/ 1598 if (srng->flags & HAL_SRNG_CACHED_DESC) { 1599 qdf_nbuf_dma_clean_range( 1600 srng->ring_base_vaddr, 1601 srng->ring_base_vaddr + 1602 ((srng->entry_size * srng->num_entries))); 1603 qdf_nbuf_dma_inv_range( 1604 srng->ring_base_vaddr, 1605 srng->ring_base_vaddr + 1606 ((srng->entry_size * srng->num_entries))); 1607 } 1608 #ifdef BIG_ENDIAN_HOST 1609 /* TODO: See if we should we get these flags from caller */ 1610 srng->flags |= HAL_SRNG_DATA_TLV_SWAP; 1611 srng->flags |= HAL_SRNG_MSI_SWAP; 1612 srng->flags |= HAL_SRNG_RING_PTR_SWAP; 1613 #endif 1614 1615 hal_srng_last_desc_cleared_init(srng); 1616 1617 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 1618 srng->u.src_ring.hp = 0; 1619 srng->u.src_ring.reap_hp = srng->ring_size - 1620 srng->entry_size; 1621 srng->u.src_ring.tp_addr = 1622 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 1623 srng->u.src_ring.low_threshold = 1624 ring_params->low_threshold * srng->entry_size; 1625 1626 if (srng->u.src_ring.tp_addr) 1627 qdf_mem_zero(srng->u.src_ring.tp_addr, 1628 sizeof(*hal->shadow_rdptr_mem_vaddr)); 1629 1630 if (ring_config->lmac_ring) { 1631 /* For LMAC rings, head pointer updates will be done 1632 * through FW by writing to a shared memory location 1633 */ 1634 srng->u.src_ring.hp_addr = 1635 &(hal->shadow_wrptr_mem_vaddr[ring_id - 1636 HAL_SRNG_LMAC1_ID_START]); 1637 srng->flags |= HAL_SRNG_LMAC_RING; 1638 1639 if (srng->u.src_ring.hp_addr) 1640 qdf_mem_zero(srng->u.src_ring.hp_addr, 1641 sizeof(*hal->shadow_wrptr_mem_vaddr)); 1642 1643 } else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) { 1644 srng->u.src_ring.hp_addr = 1645 hal_get_window_address(hal, 1646 SRNG_SRC_ADDR(srng, HP)); 1647 1648 if (CHECK_SHADOW_REGISTERS) { 1649 QDF_TRACE(QDF_MODULE_ID_TXRX, 1650 QDF_TRACE_LEVEL_ERROR, 1651 "%s: Ring (%d, %d) missing shadow config", 1652 __func__, ring_type, ring_num); 1653 } 1654 } else { 1655 hal_validate_shadow_register(hal, 1656 SRNG_SRC_ADDR(srng, HP), 1657 srng->u.src_ring.hp_addr); 1658 } 1659 } else { 1660 /* During initialization loop count in all the descriptors 1661 * will be set to zero, and HW will set it to 1 on completing 1662 * descriptor update in first loop, and increments it by 1 on 1663 * subsequent loops (loop count wraps around after reaching 1664 * 0xffff). The 'loop_cnt' in SW ring state is the expected 1665 * loop count in descriptors updated by HW (to be processed 1666 * by SW). 1667 */ 1668 hal_srng_set_nf_thresholds(srng, ring_params); 1669 srng->u.dst_ring.loop_cnt = 1; 1670 srng->u.dst_ring.tp = 0; 1671 srng->u.dst_ring.hp_addr = 1672 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 1673 1674 if (srng->u.dst_ring.hp_addr) 1675 qdf_mem_zero(srng->u.dst_ring.hp_addr, 1676 sizeof(*hal->shadow_rdptr_mem_vaddr)); 1677 1678 if (ring_config->lmac_ring) { 1679 /* For LMAC rings, tail pointer updates will be done 1680 * through FW by writing to a shared memory location 1681 */ 1682 srng->u.dst_ring.tp_addr = 1683 &(hal->shadow_wrptr_mem_vaddr[ring_id - 1684 HAL_SRNG_LMAC1_ID_START]); 1685 srng->flags |= HAL_SRNG_LMAC_RING; 1686 1687 if (srng->u.dst_ring.tp_addr) 1688 qdf_mem_zero(srng->u.dst_ring.tp_addr, 1689 sizeof(*hal->shadow_wrptr_mem_vaddr)); 1690 1691 } else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) { 1692 srng->u.dst_ring.tp_addr = 1693 hal_get_window_address(hal, 1694 SRNG_DST_ADDR(srng, TP)); 1695 1696 if (CHECK_SHADOW_REGISTERS) { 1697 QDF_TRACE(QDF_MODULE_ID_TXRX, 1698 QDF_TRACE_LEVEL_ERROR, 1699 "%s: Ring (%d, %d) missing shadow config", 1700 __func__, ring_type, ring_num); 1701 } 1702 } else { 1703 hal_validate_shadow_register(hal, 1704 SRNG_DST_ADDR(srng, TP), 1705 srng->u.dst_ring.tp_addr); 1706 } 1707 } 1708 1709 if (!(ring_config->lmac_ring)) { 1710 if (idx) { 1711 hal->ops->hal_tx_ring_halt_set(hal_hdl); 1712 do { 1713 hal_info("Waiting for ring reset\n"); 1714 } while (!(hal->ops->hal_tx_ring_halt_poll(hal_hdl))); 1715 } 1716 hal_srng_hw_init(hal, srng, idle_check, idx); 1717 1718 if (idx) { 1719 hal->ops->hal_tx_ring_halt_reset(hal_hdl); 1720 } 1721 1722 1723 if (ring_type == CE_DST) { 1724 srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length; 1725 hal_ce_dst_setup(hal, srng, ring_num); 1726 } 1727 } 1728 1729 SRNG_LOCK_INIT(&srng->lock); 1730 1731 srng->srng_event = 0; 1732 1733 srng->initialized = true; 1734 1735 return (void *)srng; 1736 } 1737 qdf_export_symbol(hal_srng_setup_idx); 1738 1739 /** 1740 * hal_srng_setup - Initialize HW SRNG ring. 1741 * @hal_soc: Opaque HAL SOC handle 1742 * @ring_type: one of the types from hal_ring_type 1743 * @ring_num: Ring number if there are multiple rings of same type (staring 1744 * from 0) 1745 * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings 1746 * @ring_params: SRNG ring params in hal_srng_params structure. 1747 * @idle_check: Check if ring is idle 1748 * 1749 * Callers are expected to allocate contiguous ring memory of size 1750 * 'num_entries * entry_size' bytes and pass the physical and virtual base 1751 * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in 1752 * hal_srng_params structure. Ring base address should be 8 byte aligned 1753 * and size of each ring entry should be queried using the API 1754 * hal_srng_get_entrysize 1755 * 1756 * Return: Opaque pointer to ring on success 1757 * NULL on failure (if given ring is not available) 1758 */ 1759 void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num, 1760 int mac_id, struct hal_srng_params *ring_params, 1761 bool idle_check) 1762 { 1763 return hal_srng_setup_idx(hal_soc, ring_type, ring_num, mac_id, 1764 ring_params, idle_check, 0); 1765 } 1766 qdf_export_symbol(hal_srng_setup); 1767 1768 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl) 1769 { 1770 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; 1771 SRNG_LOCK_DESTROY(&srng->lock); 1772 srng->initialized = 0; 1773 hal_srng_hw_disable(hal_soc, srng); 1774 } 1775 qdf_export_symbol(hal_srng_cleanup); 1776 1777 uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type) 1778 { 1779 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1780 struct hal_hw_srng_config *ring_config = 1781 HAL_SRNG_CONFIG(hal, ring_type); 1782 return ring_config->entry_size << 2; 1783 } 1784 qdf_export_symbol(hal_srng_get_entrysize); 1785 1786 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type) 1787 { 1788 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1789 struct hal_hw_srng_config *ring_config = 1790 HAL_SRNG_CONFIG(hal, ring_type); 1791 1792 return ring_config->max_size / ring_config->entry_size; 1793 } 1794 qdf_export_symbol(hal_srng_max_entries); 1795 1796 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type) 1797 { 1798 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1799 struct hal_hw_srng_config *ring_config = 1800 HAL_SRNG_CONFIG(hal, ring_type); 1801 1802 return ring_config->ring_dir; 1803 } 1804 1805 void hal_srng_dump(struct hal_srng *srng) 1806 { 1807 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 1808 hal_debug("=== SRC RING %d ===", srng->ring_id); 1809 hal_debug("hp %u, reap_hp %u, tp %u, cached tp %u", 1810 srng->u.src_ring.hp, 1811 srng->u.src_ring.reap_hp, 1812 *srng->u.src_ring.tp_addr, 1813 srng->u.src_ring.cached_tp); 1814 } else { 1815 hal_debug("=== DST RING %d ===", srng->ring_id); 1816 hal_debug("tp %u, hp %u, cached tp %u, loop_cnt %u", 1817 srng->u.dst_ring.tp, 1818 *srng->u.dst_ring.hp_addr, 1819 srng->u.dst_ring.cached_hp, 1820 srng->u.dst_ring.loop_cnt); 1821 } 1822 } 1823 1824 void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl, 1825 hal_ring_handle_t hal_ring_hdl, 1826 struct hal_srng_params *ring_params) 1827 { 1828 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; 1829 int i =0; 1830 ring_params->ring_id = srng->ring_id; 1831 ring_params->ring_dir = srng->ring_dir; 1832 ring_params->entry_size = srng->entry_size; 1833 1834 ring_params->ring_base_paddr = srng->ring_base_paddr; 1835 ring_params->ring_base_vaddr = srng->ring_base_vaddr; 1836 ring_params->num_entries = srng->num_entries; 1837 ring_params->msi_addr = srng->msi_addr; 1838 ring_params->msi_data = srng->msi_data; 1839 ring_params->intr_timer_thres_us = srng->intr_timer_thres_us; 1840 ring_params->intr_batch_cntr_thres_entries = 1841 srng->intr_batch_cntr_thres_entries; 1842 ring_params->low_threshold = srng->u.src_ring.low_threshold; 1843 ring_params->flags = srng->flags; 1844 ring_params->ring_id = srng->ring_id; 1845 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) 1846 ring_params->hwreg_base[i] = srng->hwreg_base[i]; 1847 1848 hal_srng_get_nf_params(srng, ring_params); 1849 } 1850 qdf_export_symbol(hal_get_srng_params); 1851 1852 void hal_set_low_threshold(hal_ring_handle_t hal_ring_hdl, 1853 uint32_t low_threshold) 1854 { 1855 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; 1856 srng->u.src_ring.low_threshold = low_threshold * srng->entry_size; 1857 } 1858 qdf_export_symbol(hal_set_low_threshold); 1859 1860 #ifdef FEATURE_RUNTIME_PM 1861 void 1862 hal_srng_rtpm_access_end(hal_soc_handle_t hal_soc_hdl, 1863 hal_ring_handle_t hal_ring_hdl, 1864 uint32_t rtpm_id) 1865 { 1866 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; 1867 1868 if (qdf_unlikely(!hal_ring_hdl)) { 1869 qdf_print("Error: Invalid hal_ring\n"); 1870 return; 1871 } 1872 1873 if (hif_rtpm_get(HIF_RTPM_GET_ASYNC, rtpm_id) == 0) { 1874 if (hif_system_pm_state_check(hal_soc->hif_handle)) { 1875 hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl); 1876 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); 1877 hal_srng_inc_flush_cnt(hal_ring_hdl); 1878 } else { 1879 hal_srng_access_end(hal_soc_hdl, hal_ring_hdl); 1880 } 1881 1882 hif_rtpm_put(HIF_RTPM_PUT_ASYNC, rtpm_id); 1883 } else { 1884 hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl); 1885 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); 1886 hal_srng_inc_flush_cnt(hal_ring_hdl); 1887 } 1888 } 1889 1890 qdf_export_symbol(hal_srng_rtpm_access_end); 1891 #endif /* FEATURE_RUNTIME_PM */ 1892 1893 #ifdef FORCE_WAKE 1894 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase) 1895 { 1896 struct hal_soc *hal_soc = (struct hal_soc *)soc; 1897 hal_soc->init_phase = init_phase; 1898 } 1899 #endif /* FORCE_WAKE */ 1900