1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hal_hw_headers.h" 21 #include "hal_api.h" 22 #include "hal_reo.h" 23 #include "target_type.h" 24 #include "qdf_module.h" 25 #include "wcss_version.h" 26 #include <qdf_tracepoint.h> 27 28 struct tcl_data_cmd gtcl_data_symbol __attribute__((used)); 29 30 #ifdef QCA_WIFI_QCA8074 31 void hal_qca6290_attach(struct hal_soc *hal); 32 #endif 33 #ifdef QCA_WIFI_QCA8074 34 void hal_qca8074_attach(struct hal_soc *hal); 35 #endif 36 #if defined(QCA_WIFI_QCA8074V2) || defined(QCA_WIFI_QCA6018) || \ 37 defined(QCA_WIFI_QCA9574) 38 void hal_qca8074v2_attach(struct hal_soc *hal); 39 #endif 40 #ifdef QCA_WIFI_QCA6390 41 void hal_qca6390_attach(struct hal_soc *hal); 42 #endif 43 #ifdef QCA_WIFI_QCA6490 44 void hal_qca6490_attach(struct hal_soc *hal); 45 #endif 46 #ifdef QCA_WIFI_QCN9000 47 void hal_qcn9000_attach(struct hal_soc *hal); 48 #endif 49 #ifdef QCA_WIFI_QCN9224 50 void hal_qcn9224v1_attach(struct hal_soc *hal); 51 void hal_qcn9224v2_attach(struct hal_soc *hal); 52 #endif 53 #ifdef QCA_WIFI_QCN6122 54 void hal_qcn6122_attach(struct hal_soc *hal); 55 #endif 56 #ifdef QCA_WIFI_QCA6750 57 void hal_qca6750_attach(struct hal_soc *hal); 58 #endif 59 #ifdef QCA_WIFI_QCA5018 60 void hal_qca5018_attach(struct hal_soc *hal); 61 #endif 62 #ifdef QCA_WIFI_QCA5332 63 void hal_qca5332_attach(struct hal_soc *hal); 64 #endif 65 #ifdef QCA_WIFI_KIWI 66 void hal_kiwi_attach(struct hal_soc *hal); 67 #endif 68 69 #ifdef ENABLE_VERBOSE_DEBUG 70 bool is_hal_verbose_debug_enabled; 71 #endif 72 73 #define HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR(x) ((x) + 0x4) 74 #define HAL_REO_DESTINATION_RING_CTRL_IX_1_ADDR(x) ((x) + 0x8) 75 #define HAL_REO_DESTINATION_RING_CTRL_IX_2_ADDR(x) ((x) + 0xc) 76 #define HAL_REO_DESTINATION_RING_CTRL_IX_3_ADDR(x) ((x) + 0x10) 77 78 #ifdef ENABLE_HAL_REG_WR_HISTORY 79 struct hal_reg_write_fail_history hal_reg_wr_hist; 80 81 void hal_reg_wr_fail_history_add(struct hal_soc *hal_soc, 82 uint32_t offset, 83 uint32_t wr_val, uint32_t rd_val) 84 { 85 struct hal_reg_write_fail_entry *record; 86 int idx; 87 88 idx = hal_history_get_next_index(&hal_soc->reg_wr_fail_hist->index, 89 HAL_REG_WRITE_HIST_SIZE); 90 91 record = &hal_soc->reg_wr_fail_hist->record[idx]; 92 93 record->timestamp = qdf_get_log_timestamp(); 94 record->reg_offset = offset; 95 record->write_val = wr_val; 96 record->read_val = rd_val; 97 } 98 99 static void hal_reg_write_fail_history_init(struct hal_soc *hal) 100 { 101 hal->reg_wr_fail_hist = &hal_reg_wr_hist; 102 103 qdf_atomic_set(&hal->reg_wr_fail_hist->index, -1); 104 } 105 #else 106 static void hal_reg_write_fail_history_init(struct hal_soc *hal) 107 { 108 } 109 #endif 110 111 /** 112 * hal_get_srng_ring_id() - get the ring id of a descriped ring 113 * @hal: hal_soc data structure 114 * @ring_type: type enum describing the ring 115 * @ring_num: which ring of the ring type 116 * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings) 117 * 118 * Return: the ring id or -EINVAL if the ring does not exist. 119 */ 120 static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type, 121 int ring_num, int mac_id) 122 { 123 struct hal_hw_srng_config *ring_config = 124 HAL_SRNG_CONFIG(hal, ring_type); 125 int ring_id; 126 127 if (ring_num >= ring_config->max_rings) { 128 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 129 "%s: ring_num exceeded maximum no. of supported rings", 130 __func__); 131 /* TODO: This is a programming error. Assert if this happens */ 132 return -EINVAL; 133 } 134 135 /** 136 * Some DMAC rings share a common source ring, hence don't provide them 137 * with separate ring IDs per LMAC. 138 */ 139 if (ring_config->lmac_ring && !ring_config->dmac_cmn_ring) { 140 ring_id = (ring_config->start_ring_id + ring_num + 141 (mac_id * HAL_MAX_RINGS_PER_LMAC)); 142 } else { 143 ring_id = ring_config->start_ring_id + ring_num; 144 } 145 146 return ring_id; 147 } 148 149 static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id) 150 { 151 /* TODO: Should we allocate srng structures dynamically? */ 152 return &(hal->srng_list[ring_id]); 153 } 154 155 #ifndef SHADOW_REG_CONFIG_DISABLED 156 #define HP_OFFSET_IN_REG_START 1 157 #define OFFSET_FROM_HP_TO_TP 4 158 static void hal_update_srng_hp_tp_address(struct hal_soc *hal_soc, 159 int shadow_config_index, 160 int ring_type, 161 int ring_num) 162 { 163 struct hal_srng *srng; 164 int ring_id; 165 struct hal_hw_srng_config *ring_config = 166 HAL_SRNG_CONFIG(hal_soc, ring_type); 167 168 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0); 169 if (ring_id < 0) 170 return; 171 172 srng = hal_get_srng(hal_soc, ring_id); 173 174 if (ring_config->ring_dir == HAL_SRNG_DST_RING) { 175 srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index) 176 + hal_soc->dev_base_addr; 177 hal_debug("tp_addr=%pK dev base addr %pK index %u", 178 srng->u.dst_ring.tp_addr, hal_soc->dev_base_addr, 179 shadow_config_index); 180 } else { 181 srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index) 182 + hal_soc->dev_base_addr; 183 hal_debug("hp_addr=%pK dev base addr %pK index %u", 184 srng->u.src_ring.hp_addr, 185 hal_soc->dev_base_addr, shadow_config_index); 186 } 187 188 } 189 #endif 190 191 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE 192 void hal_set_one_target_reg_config(struct hal_soc *hal, 193 uint32_t target_reg_offset, 194 int list_index) 195 { 196 int i = list_index; 197 198 qdf_assert_always(i < MAX_GENERIC_SHADOW_REG); 199 hal->list_shadow_reg_config[i].target_register = 200 target_reg_offset; 201 hal->num_generic_shadow_regs_configured++; 202 } 203 204 qdf_export_symbol(hal_set_one_target_reg_config); 205 206 #define REO_R0_DESTINATION_RING_CTRL_ADDR_OFFSET 0x4 207 #define MAX_REO_REMAP_SHADOW_REGS 4 208 QDF_STATUS hal_set_shadow_regs(void *hal_soc) 209 { 210 uint32_t target_reg_offset; 211 struct hal_soc *hal = (struct hal_soc *)hal_soc; 212 int i; 213 struct hal_hw_srng_config *srng_config = 214 &hal->hw_srng_table[WBM2SW_RELEASE]; 215 uint32_t reo_reg_base; 216 217 reo_reg_base = hal_get_reo_reg_base_offset(hal_soc); 218 219 target_reg_offset = 220 HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR(reo_reg_base); 221 222 for (i = 0; i < MAX_REO_REMAP_SHADOW_REGS; i++) { 223 hal_set_one_target_reg_config(hal, target_reg_offset, i); 224 target_reg_offset += REO_R0_DESTINATION_RING_CTRL_ADDR_OFFSET; 225 } 226 227 target_reg_offset = srng_config->reg_start[HP_OFFSET_IN_REG_START]; 228 target_reg_offset += (srng_config->reg_size[HP_OFFSET_IN_REG_START] 229 * HAL_IPA_TX_COMP_RING_IDX); 230 231 hal_set_one_target_reg_config(hal, target_reg_offset, i); 232 return QDF_STATUS_SUCCESS; 233 } 234 235 qdf_export_symbol(hal_set_shadow_regs); 236 237 QDF_STATUS hal_construct_shadow_regs(void *hal_soc) 238 { 239 struct hal_soc *hal = (struct hal_soc *)hal_soc; 240 int shadow_config_index = hal->num_shadow_registers_configured; 241 int i; 242 int num_regs = hal->num_generic_shadow_regs_configured; 243 244 for (i = 0; i < num_regs; i++) { 245 qdf_assert_always(shadow_config_index < MAX_SHADOW_REGISTERS); 246 hal->shadow_config[shadow_config_index].addr = 247 hal->list_shadow_reg_config[i].target_register; 248 hal->list_shadow_reg_config[i].shadow_config_index = 249 shadow_config_index; 250 hal->list_shadow_reg_config[i].va = 251 SHADOW_REGISTER(shadow_config_index) + 252 (uintptr_t)hal->dev_base_addr; 253 hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x", 254 hal->shadow_config[shadow_config_index].addr, 255 SHADOW_REGISTER(shadow_config_index), 256 shadow_config_index); 257 shadow_config_index++; 258 hal->num_shadow_registers_configured++; 259 } 260 return QDF_STATUS_SUCCESS; 261 } 262 263 qdf_export_symbol(hal_construct_shadow_regs); 264 #endif 265 266 #ifndef SHADOW_REG_CONFIG_DISABLED 267 268 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, 269 int ring_type, 270 int ring_num) 271 { 272 uint32_t target_register; 273 struct hal_soc *hal = (struct hal_soc *)hal_soc; 274 struct hal_hw_srng_config *srng_config = &hal->hw_srng_table[ring_type]; 275 int shadow_config_index = hal->num_shadow_registers_configured; 276 277 if (shadow_config_index >= MAX_SHADOW_REGISTERS) { 278 QDF_ASSERT(0); 279 return QDF_STATUS_E_RESOURCES; 280 } 281 282 hal->num_shadow_registers_configured++; 283 284 target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START]; 285 target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START] 286 *ring_num); 287 288 /* if the ring is a dst ring, we need to shadow the tail pointer */ 289 if (srng_config->ring_dir == HAL_SRNG_DST_RING) 290 target_register += OFFSET_FROM_HP_TO_TP; 291 292 hal->shadow_config[shadow_config_index].addr = target_register; 293 294 /* update hp/tp addr in the hal_soc structure*/ 295 hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type, 296 ring_num); 297 298 hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x, ring_type %d, ring num %d", 299 target_register, 300 SHADOW_REGISTER(shadow_config_index), 301 shadow_config_index, 302 ring_type, ring_num); 303 304 return QDF_STATUS_SUCCESS; 305 } 306 307 qdf_export_symbol(hal_set_one_shadow_config); 308 309 QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc) 310 { 311 int ring_type, ring_num; 312 struct hal_soc *hal = (struct hal_soc *)hal_soc; 313 314 for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) { 315 struct hal_hw_srng_config *srng_config = 316 &hal->hw_srng_table[ring_type]; 317 318 if (ring_type == CE_SRC || 319 ring_type == CE_DST || 320 ring_type == CE_DST_STATUS) 321 continue; 322 323 if (srng_config->lmac_ring) 324 continue; 325 326 for (ring_num = 0; ring_num < srng_config->max_rings; 327 ring_num++) 328 hal_set_one_shadow_config(hal_soc, ring_type, ring_num); 329 } 330 331 return QDF_STATUS_SUCCESS; 332 } 333 334 qdf_export_symbol(hal_construct_srng_shadow_regs); 335 #else 336 337 QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc) 338 { 339 return QDF_STATUS_SUCCESS; 340 } 341 342 qdf_export_symbol(hal_construct_srng_shadow_regs); 343 344 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type, 345 int ring_num) 346 { 347 return QDF_STATUS_SUCCESS; 348 } 349 qdf_export_symbol(hal_set_one_shadow_config); 350 #endif 351 352 void hal_get_shadow_config(void *hal_soc, 353 struct pld_shadow_reg_v2_cfg **shadow_config, 354 int *num_shadow_registers_configured) 355 { 356 struct hal_soc *hal = (struct hal_soc *)hal_soc; 357 358 *shadow_config = &hal->shadow_config[0].v2; 359 *num_shadow_registers_configured = 360 hal->num_shadow_registers_configured; 361 } 362 qdf_export_symbol(hal_get_shadow_config); 363 364 #ifdef CONFIG_SHADOW_V3 365 void hal_get_shadow_v3_config(void *hal_soc, 366 struct pld_shadow_reg_v3_cfg **shadow_config, 367 int *num_shadow_registers_configured) 368 { 369 struct hal_soc *hal = (struct hal_soc *)hal_soc; 370 371 *shadow_config = &hal->shadow_config[0].v3; 372 *num_shadow_registers_configured = 373 hal->num_shadow_registers_configured; 374 } 375 qdf_export_symbol(hal_get_shadow_v3_config); 376 #endif 377 378 static bool hal_validate_shadow_register(struct hal_soc *hal, 379 uint32_t *destination, 380 uint32_t *shadow_address) 381 { 382 unsigned int index; 383 uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr; 384 int destination_ba_offset = 385 ((char *)destination) - (char *)hal->dev_base_addr; 386 387 index = shadow_address - shadow_0_offset; 388 389 if (index >= MAX_SHADOW_REGISTERS) { 390 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 391 "%s: index %x out of bounds", __func__, index); 392 goto error; 393 } else if (hal->shadow_config[index].addr != destination_ba_offset) { 394 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 395 "%s: sanity check failure, expected %x, found %x", 396 __func__, destination_ba_offset, 397 hal->shadow_config[index].addr); 398 goto error; 399 } 400 return true; 401 error: 402 qdf_print("baddr %pK, desination %pK, shadow_address %pK s0offset %pK index %x", 403 hal->dev_base_addr, destination, shadow_address, 404 shadow_0_offset, index); 405 QDF_BUG(0); 406 return false; 407 } 408 409 static void hal_target_based_configure(struct hal_soc *hal) 410 { 411 /** 412 * Indicate Initialization of srngs to avoid force wake 413 * as umac power collapse is not enabled yet 414 */ 415 hal->init_phase = true; 416 417 switch (hal->target_type) { 418 #ifdef QCA_WIFI_QCA6290 419 case TARGET_TYPE_QCA6290: 420 hal->use_register_windowing = true; 421 hal_qca6290_attach(hal); 422 break; 423 #endif 424 #ifdef QCA_WIFI_QCA6390 425 case TARGET_TYPE_QCA6390: 426 hal->use_register_windowing = true; 427 hal_qca6390_attach(hal); 428 break; 429 #endif 430 #ifdef QCA_WIFI_QCA6490 431 case TARGET_TYPE_QCA6490: 432 hal->use_register_windowing = true; 433 hal_qca6490_attach(hal); 434 break; 435 #endif 436 #ifdef QCA_WIFI_QCA6750 437 case TARGET_TYPE_QCA6750: 438 hal->use_register_windowing = true; 439 hal->static_window_map = true; 440 hal_qca6750_attach(hal); 441 break; 442 #endif 443 #ifdef QCA_WIFI_KIWI 444 case TARGET_TYPE_KIWI: 445 case TARGET_TYPE_MANGO: 446 hal->use_register_windowing = true; 447 hal_kiwi_attach(hal); 448 break; 449 #endif 450 #if defined(QCA_WIFI_QCA8074) && defined(WIFI_TARGET_TYPE_3_0) 451 case TARGET_TYPE_QCA8074: 452 hal_qca8074_attach(hal); 453 break; 454 #endif 455 456 #if defined(QCA_WIFI_QCA8074V2) 457 case TARGET_TYPE_QCA8074V2: 458 hal_qca8074v2_attach(hal); 459 break; 460 #endif 461 462 #if defined(QCA_WIFI_QCA6018) 463 case TARGET_TYPE_QCA6018: 464 hal_qca8074v2_attach(hal); 465 break; 466 #endif 467 468 #if defined(QCA_WIFI_QCA9574) 469 case TARGET_TYPE_QCA9574: 470 hal_qca8074v2_attach(hal); 471 break; 472 #endif 473 474 #if defined(QCA_WIFI_QCN6122) 475 case TARGET_TYPE_QCN6122: 476 hal->use_register_windowing = true; 477 /* 478 * Static window map is enabled for qcn9000 to use 2mb bar 479 * size and use multiple windows to write into registers. 480 */ 481 hal->static_window_map = true; 482 hal_qcn6122_attach(hal); 483 break; 484 #endif 485 486 #ifdef QCA_WIFI_QCN9000 487 case TARGET_TYPE_QCN9000: 488 hal->use_register_windowing = true; 489 /* 490 * Static window map is enabled for qcn9000 to use 2mb bar 491 * size and use multiple windows to write into registers. 492 */ 493 hal->static_window_map = true; 494 hal_qcn9000_attach(hal); 495 break; 496 #endif 497 #ifdef QCA_WIFI_QCA5018 498 case TARGET_TYPE_QCA5018: 499 hal->use_register_windowing = true; 500 hal->static_window_map = true; 501 hal_qca5018_attach(hal); 502 break; 503 #endif 504 #ifdef QCA_WIFI_QCN9224 505 case TARGET_TYPE_QCN9224: 506 hal->use_register_windowing = true; 507 hal->static_window_map = true; 508 if (hal->version == 1) 509 hal_qcn9224v1_attach(hal); 510 else 511 hal_qcn9224v2_attach(hal); 512 break; 513 #endif 514 #ifdef QCA_WIFI_QCA5332 515 case TARGET_TYPE_QCA5332: 516 hal->use_register_windowing = true; 517 hal->static_window_map = true; 518 hal_qca5332_attach(hal); 519 break; 520 #endif 521 default: 522 break; 523 } 524 } 525 526 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl) 527 { 528 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; 529 struct hif_target_info *tgt_info = 530 hif_get_target_info_handle(hal_soc->hif_handle); 531 532 return tgt_info->target_type; 533 } 534 535 qdf_export_symbol(hal_get_target_type); 536 537 #if defined(FEATURE_HAL_DELAYED_REG_WRITE) 538 /** 539 * hal_is_reg_write_tput_level_high() - throughput level for delayed reg writes 540 * @hal: hal_soc pointer 541 * 542 * Return: true if throughput is high, else false. 543 */ 544 static inline bool hal_is_reg_write_tput_level_high(struct hal_soc *hal) 545 { 546 int bw_level = hif_get_bandwidth_level(hal->hif_handle); 547 548 return (bw_level >= PLD_BUS_WIDTH_MEDIUM) ? true : false; 549 } 550 551 static inline 552 char *hal_fill_reg_write_srng_stats(struct hal_srng *srng, 553 char *buf, qdf_size_t size) 554 { 555 qdf_scnprintf(buf, size, "enq %u deq %u coal %u direct %u", 556 srng->wstats.enqueues, srng->wstats.dequeues, 557 srng->wstats.coalesces, srng->wstats.direct); 558 return buf; 559 } 560 561 /* bytes for local buffer */ 562 #define HAL_REG_WRITE_SRNG_STATS_LEN 100 563 564 void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl) 565 { 566 struct hal_srng *srng; 567 char buf[HAL_REG_WRITE_SRNG_STATS_LEN]; 568 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 569 570 srng = hal_get_srng(hal, HAL_SRNG_SW2TCL1); 571 hal_debug("SW2TCL1: %s", 572 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 573 574 srng = hal_get_srng(hal, HAL_SRNG_WBM2SW0_RELEASE); 575 hal_debug("WBM2SW0: %s", 576 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 577 578 srng = hal_get_srng(hal, HAL_SRNG_REO2SW1); 579 hal_debug("REO2SW1: %s", 580 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 581 582 srng = hal_get_srng(hal, HAL_SRNG_REO2SW2); 583 hal_debug("REO2SW2: %s", 584 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 585 586 srng = hal_get_srng(hal, HAL_SRNG_REO2SW3); 587 hal_debug("REO2SW3: %s", 588 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 589 } 590 591 void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl) 592 { 593 uint32_t *hist; 594 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 595 596 hist = hal->stats.wstats.sched_delay; 597 hal_debug("wstats: enq %u deq %u coal %u direct %u q_depth %u max_q %u sched-delay hist %u %u %u %u", 598 qdf_atomic_read(&hal->stats.wstats.enqueues), 599 hal->stats.wstats.dequeues, 600 qdf_atomic_read(&hal->stats.wstats.coalesces), 601 qdf_atomic_read(&hal->stats.wstats.direct), 602 qdf_atomic_read(&hal->stats.wstats.q_depth), 603 hal->stats.wstats.max_q_depth, 604 hist[REG_WRITE_SCHED_DELAY_SUB_100us], 605 hist[REG_WRITE_SCHED_DELAY_SUB_1000us], 606 hist[REG_WRITE_SCHED_DELAY_SUB_5000us], 607 hist[REG_WRITE_SCHED_DELAY_GT_5000us]); 608 } 609 610 int hal_get_reg_write_pending_work(void *hal_soc) 611 { 612 struct hal_soc *hal = (struct hal_soc *)hal_soc; 613 614 return qdf_atomic_read(&hal->active_work_cnt); 615 } 616 617 #endif 618 619 #ifdef FEATURE_HAL_DELAYED_REG_WRITE 620 #ifdef MEMORY_DEBUG 621 /* 622 * Length of the queue(array) used to hold delayed register writes. 623 * Must be a multiple of 2. 624 */ 625 #define HAL_REG_WRITE_QUEUE_LEN 128 626 #else 627 #define HAL_REG_WRITE_QUEUE_LEN 32 628 #endif 629 630 /** 631 * hal_process_reg_write_q_elem() - process a regiter write queue element 632 * @hal: hal_soc pointer 633 * @q_elem: pointer to hal regiter write queue element 634 * 635 * Return: The value which was written to the address 636 */ 637 static uint32_t 638 hal_process_reg_write_q_elem(struct hal_soc *hal, 639 struct hal_reg_write_q_elem *q_elem) 640 { 641 struct hal_srng *srng = q_elem->srng; 642 uint32_t write_val; 643 644 SRNG_LOCK(&srng->lock); 645 646 srng->reg_write_in_progress = false; 647 srng->wstats.dequeues++; 648 649 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 650 q_elem->dequeue_val = srng->u.src_ring.hp; 651 hal_write_address_32_mb(hal, 652 srng->u.src_ring.hp_addr, 653 srng->u.src_ring.hp, false); 654 write_val = srng->u.src_ring.hp; 655 } else { 656 q_elem->dequeue_val = srng->u.dst_ring.tp; 657 hal_write_address_32_mb(hal, 658 srng->u.dst_ring.tp_addr, 659 srng->u.dst_ring.tp, false); 660 write_val = srng->u.dst_ring.tp; 661 } 662 663 q_elem->valid = 0; 664 srng->last_dequeue_time = q_elem->dequeue_time; 665 SRNG_UNLOCK(&srng->lock); 666 667 return write_val; 668 } 669 670 /** 671 * hal_reg_write_fill_sched_delay_hist() - fill reg write delay histogram in hal 672 * @hal: hal_soc pointer 673 * @delay: delay in us 674 * 675 * Return: None 676 */ 677 static inline void hal_reg_write_fill_sched_delay_hist(struct hal_soc *hal, 678 uint64_t delay_us) 679 { 680 uint32_t *hist; 681 682 hist = hal->stats.wstats.sched_delay; 683 684 if (delay_us < 100) 685 hist[REG_WRITE_SCHED_DELAY_SUB_100us]++; 686 else if (delay_us < 1000) 687 hist[REG_WRITE_SCHED_DELAY_SUB_1000us]++; 688 else if (delay_us < 5000) 689 hist[REG_WRITE_SCHED_DELAY_SUB_5000us]++; 690 else 691 hist[REG_WRITE_SCHED_DELAY_GT_5000us]++; 692 } 693 694 #ifdef SHADOW_WRITE_DELAY 695 696 #define SHADOW_WRITE_MIN_DELTA_US 5 697 #define SHADOW_WRITE_DELAY_US 50 698 699 /* 700 * Never add those srngs which are performance relate. 701 * The delay itself will hit performance heavily. 702 */ 703 #define IS_SRNG_MATCH(s) ((s)->ring_id == HAL_SRNG_CE_1_DST_STATUS || \ 704 (s)->ring_id == HAL_SRNG_CE_1_DST) 705 706 static inline bool hal_reg_write_need_delay(struct hal_reg_write_q_elem *elem) 707 { 708 struct hal_srng *srng = elem->srng; 709 struct hal_soc *hal; 710 qdf_time_t now; 711 qdf_iomem_t real_addr; 712 713 if (qdf_unlikely(!srng)) 714 return false; 715 716 hal = srng->hal_soc; 717 if (qdf_unlikely(!hal)) 718 return false; 719 720 /* Check if it is target srng, and valid shadow reg */ 721 if (qdf_likely(!IS_SRNG_MATCH(srng))) 722 return false; 723 724 if (srng->ring_dir == HAL_SRNG_SRC_RING) 725 real_addr = SRNG_SRC_ADDR(srng, HP); 726 else 727 real_addr = SRNG_DST_ADDR(srng, TP); 728 if (!hal_validate_shadow_register(hal, real_addr, elem->addr)) 729 return false; 730 731 /* Check the time delta from last write of same srng */ 732 now = qdf_get_log_timestamp(); 733 if (qdf_log_timestamp_to_usecs(now - srng->last_dequeue_time) > 734 SHADOW_WRITE_MIN_DELTA_US) 735 return false; 736 737 /* Delay dequeue, and record */ 738 qdf_udelay(SHADOW_WRITE_DELAY_US); 739 740 srng->wstats.dequeue_delay++; 741 hal->stats.wstats.dequeue_delay++; 742 743 return true; 744 } 745 #else 746 static inline bool hal_reg_write_need_delay(struct hal_reg_write_q_elem *elem) 747 { 748 return false; 749 } 750 #endif 751 752 /** 753 * hal_reg_write_work() - Worker to process delayed writes 754 * @arg: hal_soc pointer 755 * 756 * Return: None 757 */ 758 static void hal_reg_write_work(void *arg) 759 { 760 int32_t q_depth, write_val; 761 struct hal_soc *hal = arg; 762 struct hal_reg_write_q_elem *q_elem; 763 uint64_t delta_us; 764 uint8_t ring_id; 765 uint32_t *addr; 766 uint32_t num_processed = 0; 767 768 q_elem = &hal->reg_write_queue[(hal->read_idx)]; 769 q_elem->work_scheduled_time = qdf_get_log_timestamp(); 770 q_elem->cpu_id = qdf_get_cpu(); 771 772 /* Make sure q_elem consistent in the memory for multi-cores */ 773 qdf_rmb(); 774 if (!q_elem->valid) 775 return; 776 777 q_depth = qdf_atomic_read(&hal->stats.wstats.q_depth); 778 if (q_depth > hal->stats.wstats.max_q_depth) 779 hal->stats.wstats.max_q_depth = q_depth; 780 781 if (hif_prevent_link_low_power_states(hal->hif_handle)) { 782 hal->stats.wstats.prevent_l1_fails++; 783 return; 784 } 785 786 while (true) { 787 qdf_rmb(); 788 if (!q_elem->valid) 789 break; 790 791 q_elem->dequeue_time = qdf_get_log_timestamp(); 792 ring_id = q_elem->srng->ring_id; 793 addr = q_elem->addr; 794 delta_us = qdf_log_timestamp_to_usecs(q_elem->dequeue_time - 795 q_elem->enqueue_time); 796 hal_reg_write_fill_sched_delay_hist(hal, delta_us); 797 798 hal->stats.wstats.dequeues++; 799 qdf_atomic_dec(&hal->stats.wstats.q_depth); 800 801 if (hal_reg_write_need_delay(q_elem)) 802 hal_verbose_debug("Delay reg writer for srng 0x%x, addr 0x%pK", 803 q_elem->srng->ring_id, q_elem->addr); 804 805 write_val = hal_process_reg_write_q_elem(hal, q_elem); 806 hal_verbose_debug("read_idx %u srng 0x%x, addr 0x%pK dequeue_val %u sched delay %llu us", 807 hal->read_idx, ring_id, addr, write_val, delta_us); 808 809 qdf_trace_dp_del_reg_write(ring_id, q_elem->enqueue_val, 810 q_elem->dequeue_val, 811 q_elem->enqueue_time, 812 q_elem->dequeue_time); 813 814 num_processed++; 815 hal->read_idx = (hal->read_idx + 1) & 816 (HAL_REG_WRITE_QUEUE_LEN - 1); 817 q_elem = &hal->reg_write_queue[(hal->read_idx)]; 818 } 819 820 hif_allow_link_low_power_states(hal->hif_handle); 821 /* 822 * Decrement active_work_cnt by the number of elements dequeued after 823 * hif_allow_link_low_power_states. 824 * This makes sure that hif_try_complete_tasks will wait till we make 825 * the bus access in hif_allow_link_low_power_states. This will avoid 826 * race condition between delayed register worker and bus suspend 827 * (system suspend or runtime suspend). 828 * 829 * The following decrement should be done at the end! 830 */ 831 qdf_atomic_sub(num_processed, &hal->active_work_cnt); 832 } 833 834 static void __hal_flush_reg_write_work(struct hal_soc *hal) 835 { 836 qdf_flush_work(&hal->reg_write_work); 837 qdf_disable_work(&hal->reg_write_work); 838 } 839 840 void hal_flush_reg_write_work(hal_soc_handle_t hal_handle) 841 { __hal_flush_reg_write_work((struct hal_soc *)hal_handle); 842 } 843 844 /** 845 * hal_reg_write_enqueue() - enqueue register writes into kworker 846 * @hal_soc: hal_soc pointer 847 * @srng: srng pointer 848 * @addr: iomem address of regiter 849 * @value: value to be written to iomem address 850 * 851 * This function executes from within the SRNG LOCK 852 * 853 * Return: None 854 */ 855 static void hal_reg_write_enqueue(struct hal_soc *hal_soc, 856 struct hal_srng *srng, 857 void __iomem *addr, 858 uint32_t value) 859 { 860 struct hal_reg_write_q_elem *q_elem; 861 uint32_t write_idx; 862 863 if (srng->reg_write_in_progress) { 864 hal_verbose_debug("Already in progress srng ring id 0x%x addr 0x%pK val %u", 865 srng->ring_id, addr, value); 866 qdf_atomic_inc(&hal_soc->stats.wstats.coalesces); 867 srng->wstats.coalesces++; 868 return; 869 } 870 871 write_idx = qdf_atomic_inc_return(&hal_soc->write_idx); 872 873 write_idx = write_idx & (HAL_REG_WRITE_QUEUE_LEN - 1); 874 875 q_elem = &hal_soc->reg_write_queue[write_idx]; 876 877 if (q_elem->valid) { 878 hal_err("queue full"); 879 QDF_BUG(0); 880 return; 881 } 882 883 qdf_atomic_inc(&hal_soc->stats.wstats.enqueues); 884 srng->wstats.enqueues++; 885 886 qdf_atomic_inc(&hal_soc->stats.wstats.q_depth); 887 888 q_elem->srng = srng; 889 q_elem->addr = addr; 890 q_elem->enqueue_val = value; 891 q_elem->enqueue_time = qdf_get_log_timestamp(); 892 893 /* 894 * Before the valid flag is set to true, all the other 895 * fields in the q_elem needs to be updated in memory. 896 * Else there is a chance that the dequeuing worker thread 897 * might read stale entries and process incorrect srng. 898 */ 899 qdf_wmb(); 900 q_elem->valid = true; 901 902 /* 903 * After all other fields in the q_elem has been updated 904 * in memory successfully, the valid flag needs to be updated 905 * in memory in time too. 906 * Else there is a chance that the dequeuing worker thread 907 * might read stale valid flag and the work will be bypassed 908 * for this round. And if there is no other work scheduled 909 * later, this hal register writing won't be updated any more. 910 */ 911 qdf_wmb(); 912 913 srng->reg_write_in_progress = true; 914 qdf_atomic_inc(&hal_soc->active_work_cnt); 915 916 hal_verbose_debug("write_idx %u srng ring id 0x%x addr 0x%pK val %u", 917 write_idx, srng->ring_id, addr, value); 918 919 qdf_queue_work(hal_soc->qdf_dev, hal_soc->reg_write_wq, 920 &hal_soc->reg_write_work); 921 } 922 923 /** 924 * hal_delayed_reg_write_init() - Initialization function for delayed reg writes 925 * @hal_soc: hal_soc pointer 926 * 927 * Initialize main data structures to process register writes in a delayed 928 * workqueue. 929 * 930 * Return: QDF_STATUS_SUCCESS on success else a QDF error. 931 */ 932 static QDF_STATUS hal_delayed_reg_write_init(struct hal_soc *hal) 933 { 934 hal->reg_write_wq = 935 qdf_alloc_high_prior_ordered_workqueue("hal_register_write_wq"); 936 qdf_create_work(0, &hal->reg_write_work, hal_reg_write_work, hal); 937 hal->reg_write_queue = qdf_mem_malloc(HAL_REG_WRITE_QUEUE_LEN * 938 sizeof(*hal->reg_write_queue)); 939 if (!hal->reg_write_queue) { 940 hal_err("unable to allocate memory"); 941 QDF_BUG(0); 942 return QDF_STATUS_E_NOMEM; 943 } 944 945 /* Initial value of indices */ 946 hal->read_idx = 0; 947 qdf_atomic_set(&hal->write_idx, -1); 948 return QDF_STATUS_SUCCESS; 949 } 950 951 /** 952 * hal_delayed_reg_write_deinit() - De-Initialize delayed reg write processing 953 * @hal_soc: hal_soc pointer 954 * 955 * De-initialize main data structures to process register writes in a delayed 956 * workqueue. 957 * 958 * Return: None 959 */ 960 static void hal_delayed_reg_write_deinit(struct hal_soc *hal) 961 { 962 __hal_flush_reg_write_work(hal); 963 964 qdf_flush_workqueue(0, hal->reg_write_wq); 965 qdf_destroy_workqueue(0, hal->reg_write_wq); 966 qdf_mem_free(hal->reg_write_queue); 967 } 968 969 #else 970 static inline QDF_STATUS hal_delayed_reg_write_init(struct hal_soc *hal) 971 { 972 return QDF_STATUS_SUCCESS; 973 } 974 975 static inline void hal_delayed_reg_write_deinit(struct hal_soc *hal) 976 { 977 } 978 #endif 979 980 #ifdef FEATURE_HAL_DELAYED_REG_WRITE 981 #ifdef HAL_RECORD_SUSPEND_WRITE 982 static struct hal_suspend_write_history 983 g_hal_suspend_write_history[HAL_SUSPEND_WRITE_HISTORY_MAX]; 984 985 static 986 void hal_event_suspend_record(uint8_t ring_id, uint32_t value, uint32_t count) 987 { 988 uint32_t index = qdf_atomic_read(g_hal_suspend_write_history.index) & 989 (HAL_SUSPEND_WRITE_HISTORY_MAX - 1); 990 struct hal_suspend_write_record *cur_event = 991 &hal_suspend_write_event.record[index]; 992 993 cur_event->ts = qdf_get_log_timestamp(); 994 cur_event->ring_id = ring_id; 995 cur_event->value = value; 996 cur_event->direct_wcount = count; 997 qdf_atomic_inc(g_hal_suspend_write_history.index); 998 } 999 1000 static inline 1001 void hal_record_suspend_write(uint8_t ring_id, uint32_t value, uint32_t count) 1002 { 1003 if (hif_rtpm_get_state() >= HIF_RTPM_STATE_SUSPENDING) 1004 hal_event_suspend_record(ring_id, value, count); 1005 } 1006 #else 1007 static inline 1008 void hal_record_suspend_write(uint8_t ring_id, uint32_t value, uint32_t count) 1009 { 1010 } 1011 #endif 1012 1013 #ifdef QCA_WIFI_QCA6750 1014 void hal_delayed_reg_write(struct hal_soc *hal_soc, 1015 struct hal_srng *srng, 1016 void __iomem *addr, 1017 uint32_t value) 1018 { 1019 uint8_t vote_access; 1020 1021 switch (srng->ring_type) { 1022 case CE_SRC: 1023 case CE_DST: 1024 case CE_DST_STATUS: 1025 vote_access = hif_get_ep_vote_access(hal_soc->hif_handle, 1026 HIF_EP_VOTE_NONDP_ACCESS); 1027 if ((vote_access == HIF_EP_VOTE_ACCESS_DISABLE) || 1028 (vote_access == HIF_EP_VOTE_INTERMEDIATE_ACCESS && 1029 PLD_MHI_STATE_L0 == 1030 pld_get_mhi_state(hal_soc->qdf_dev->dev))) { 1031 hal_write_address_32_mb(hal_soc, addr, value, false); 1032 qdf_atomic_inc(&hal_soc->stats.wstats.direct); 1033 srng->wstats.direct++; 1034 } else { 1035 hal_reg_write_enqueue(hal_soc, srng, addr, value); 1036 } 1037 break; 1038 default: 1039 if (hif_get_ep_vote_access(hal_soc->hif_handle, 1040 HIF_EP_VOTE_DP_ACCESS) == 1041 HIF_EP_VOTE_ACCESS_DISABLE || 1042 hal_is_reg_write_tput_level_high(hal_soc) || 1043 PLD_MHI_STATE_L0 == 1044 pld_get_mhi_state(hal_soc->qdf_dev->dev)) { 1045 hal_write_address_32_mb(hal_soc, addr, value, false); 1046 qdf_atomic_inc(&hal_soc->stats.wstats.direct); 1047 srng->wstats.direct++; 1048 } else { 1049 hal_reg_write_enqueue(hal_soc, srng, addr, value); 1050 } 1051 1052 break; 1053 } 1054 } 1055 #else 1056 void hal_delayed_reg_write(struct hal_soc *hal_soc, 1057 struct hal_srng *srng, 1058 void __iomem *addr, 1059 uint32_t value) 1060 { 1061 if (hal_is_reg_write_tput_level_high(hal_soc) || 1062 pld_is_device_awake(hal_soc->qdf_dev->dev)) { 1063 qdf_atomic_inc(&hal_soc->stats.wstats.direct); 1064 srng->wstats.direct++; 1065 hal_write_address_32_mb(hal_soc, addr, value, false); 1066 } else { 1067 hal_reg_write_enqueue(hal_soc, srng, addr, value); 1068 } 1069 1070 hal_record_suspend_write(srng->ring_id, value, srng->wstats.direct); 1071 } 1072 #endif 1073 #endif 1074 1075 /** 1076 * hal_attach - Initialize HAL layer 1077 * @hif_handle: Opaque HIF handle 1078 * @qdf_dev: QDF device 1079 * 1080 * Return: Opaque HAL SOC handle 1081 * NULL on failure (if given ring is not available) 1082 * 1083 * This function should be called as part of HIF initialization (for accessing 1084 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() 1085 * 1086 */ 1087 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev) 1088 { 1089 struct hal_soc *hal; 1090 int i; 1091 1092 hal = qdf_mem_malloc(sizeof(*hal)); 1093 1094 if (!hal) { 1095 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1096 "%s: hal_soc allocation failed", __func__); 1097 goto fail0; 1098 } 1099 hal->hif_handle = hif_handle; 1100 hal->dev_base_addr = hif_get_dev_ba(hif_handle); /* UMAC */ 1101 hal->dev_base_addr_ce = hif_get_dev_ba_ce(hif_handle); /* CE */ 1102 hal->dev_base_addr_cmem = hif_get_dev_ba_cmem(hif_handle); /* CMEM */ 1103 hal->qdf_dev = qdf_dev; 1104 hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent( 1105 qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) * 1106 HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr)); 1107 if (!hal->shadow_rdptr_mem_paddr) { 1108 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1109 "%s: hal->shadow_rdptr_mem_paddr allocation failed", 1110 __func__); 1111 goto fail1; 1112 } 1113 qdf_mem_zero(hal->shadow_rdptr_mem_vaddr, 1114 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX); 1115 1116 hal->shadow_wrptr_mem_vaddr = 1117 (uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev, 1118 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 1119 &(hal->shadow_wrptr_mem_paddr)); 1120 if (!hal->shadow_wrptr_mem_vaddr) { 1121 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1122 "%s: hal->shadow_wrptr_mem_vaddr allocation failed", 1123 __func__); 1124 goto fail2; 1125 } 1126 qdf_mem_zero(hal->shadow_wrptr_mem_vaddr, 1127 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS); 1128 1129 for (i = 0; i < HAL_SRNG_ID_MAX; i++) { 1130 hal->srng_list[i].initialized = 0; 1131 hal->srng_list[i].ring_id = i; 1132 } 1133 1134 qdf_spinlock_create(&hal->register_access_lock); 1135 hal->register_window = 0; 1136 hal->target_type = hal_get_target_type(hal_soc_to_hal_soc_handle(hal)); 1137 hal->version = hif_get_soc_version(hif_handle); 1138 hal->ops = qdf_mem_malloc(sizeof(*hal->ops)); 1139 1140 if (!hal->ops) { 1141 hal_err("unable to allocable memory for HAL ops"); 1142 goto fail3; 1143 } 1144 1145 hal_target_based_configure(hal); 1146 1147 hal_reg_write_fail_history_init(hal); 1148 1149 qdf_minidump_log(hal, sizeof(*hal), "hal_soc"); 1150 1151 qdf_atomic_init(&hal->active_work_cnt); 1152 hal_delayed_reg_write_init(hal); 1153 1154 hal_reo_shared_qaddr_setup((hal_soc_handle_t)hal); 1155 1156 hif_rtpm_register(HIF_RTPM_ID_HAL_REO_CMD, NULL); 1157 1158 return (void *)hal; 1159 fail3: 1160 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, 1161 sizeof(*hal->shadow_wrptr_mem_vaddr) * 1162 HAL_MAX_LMAC_RINGS, 1163 hal->shadow_wrptr_mem_vaddr, 1164 hal->shadow_wrptr_mem_paddr, 0); 1165 fail2: 1166 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, 1167 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 1168 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 1169 fail1: 1170 qdf_mem_free(hal); 1171 fail0: 1172 return NULL; 1173 } 1174 qdf_export_symbol(hal_attach); 1175 1176 /** 1177 * hal_mem_info - Retrieve hal memory base address 1178 * 1179 * @hal_soc: Opaque HAL SOC handle 1180 * @mem: pointer to structure to be updated with hal mem info 1181 */ 1182 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem) 1183 { 1184 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 1185 mem->dev_base_addr = (void *)hal->dev_base_addr; 1186 mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr; 1187 mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr; 1188 mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr; 1189 mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr; 1190 hif_read_phy_mem_base((void *)hal->hif_handle, 1191 (qdf_dma_addr_t *)&mem->dev_base_paddr); 1192 mem->lmac_srng_start_id = HAL_SRNG_LMAC1_ID_START; 1193 return; 1194 } 1195 qdf_export_symbol(hal_get_meminfo); 1196 1197 /** 1198 * hal_detach - Detach HAL layer 1199 * @hal_soc: HAL SOC handle 1200 * 1201 * Return: Opaque HAL SOC handle 1202 * NULL on failure (if given ring is not available) 1203 * 1204 * This function should be called as part of HIF initialization (for accessing 1205 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() 1206 * 1207 */ 1208 extern void hal_detach(void *hal_soc) 1209 { 1210 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1211 1212 hif_rtpm_deregister(HIF_RTPM_ID_HAL_REO_CMD); 1213 hal_delayed_reg_write_deinit(hal); 1214 hal_reo_shared_qaddr_detach((hal_soc_handle_t)hal); 1215 qdf_mem_free(hal->ops); 1216 1217 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 1218 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 1219 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 1220 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 1221 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 1222 hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0); 1223 qdf_minidump_remove(hal, sizeof(*hal), "hal_soc"); 1224 1225 qdf_mem_free(hal); 1226 1227 return; 1228 } 1229 qdf_export_symbol(hal_detach); 1230 1231 #define HAL_CE_CHANNEL_DST_DEST_CTRL_ADDR(x) ((x) + 0x000000b0) 1232 #define HAL_CE_CHANNEL_DST_DEST_CTRL_DEST_MAX_LENGTH_BMSK 0x0000ffff 1233 #define HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_ADDR(x) ((x) + 0x00000040) 1234 #define HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_RMSK 0x00000007 1235 /** 1236 * hal_ce_dst_setup - Initialize CE destination ring registers 1237 * @hal_soc: HAL SOC handle 1238 * @srng: SRNG ring pointer 1239 */ 1240 static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng, 1241 int ring_num) 1242 { 1243 uint32_t reg_val = 0; 1244 uint32_t reg_addr; 1245 struct hal_hw_srng_config *ring_config = 1246 HAL_SRNG_CONFIG(hal, CE_DST); 1247 1248 /* set DEST_MAX_LENGTH according to ce assignment */ 1249 reg_addr = HAL_CE_CHANNEL_DST_DEST_CTRL_ADDR( 1250 ring_config->reg_start[R0_INDEX] + 1251 (ring_num * ring_config->reg_size[R0_INDEX])); 1252 1253 reg_val = HAL_REG_READ(hal, reg_addr); 1254 reg_val &= ~HAL_CE_CHANNEL_DST_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 1255 reg_val |= srng->u.dst_ring.max_buffer_length & 1256 HAL_CE_CHANNEL_DST_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 1257 HAL_REG_WRITE(hal, reg_addr, reg_val); 1258 1259 if (srng->prefetch_timer) { 1260 reg_addr = HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_ADDR( 1261 ring_config->reg_start[R0_INDEX] + 1262 (ring_num * ring_config->reg_size[R0_INDEX])); 1263 1264 reg_val = HAL_REG_READ(hal, reg_addr); 1265 reg_val &= ~HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_RMSK; 1266 reg_val |= srng->prefetch_timer; 1267 HAL_REG_WRITE(hal, reg_addr, reg_val); 1268 reg_val = HAL_REG_READ(hal, reg_addr); 1269 } 1270 1271 } 1272 1273 /** 1274 * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX 1275 * @hal: HAL SOC handle 1276 * @read: boolean value to indicate if read or write 1277 * @ix0: pointer to store IX0 reg value 1278 * @ix1: pointer to store IX1 reg value 1279 * @ix2: pointer to store IX2 reg value 1280 * @ix3: pointer to store IX3 reg value 1281 */ 1282 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read, 1283 uint32_t *ix0, uint32_t *ix1, 1284 uint32_t *ix2, uint32_t *ix3) 1285 { 1286 uint32_t reg_offset; 1287 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 1288 uint32_t reo_reg_base; 1289 1290 reo_reg_base = hal_get_reo_reg_base_offset(hal_soc_hdl); 1291 1292 if (read) { 1293 if (ix0) { 1294 reg_offset = 1295 HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR( 1296 reo_reg_base); 1297 *ix0 = HAL_REG_READ(hal, reg_offset); 1298 } 1299 1300 if (ix1) { 1301 reg_offset = 1302 HAL_REO_DESTINATION_RING_CTRL_IX_1_ADDR( 1303 reo_reg_base); 1304 *ix1 = HAL_REG_READ(hal, reg_offset); 1305 } 1306 1307 if (ix2) { 1308 reg_offset = 1309 HAL_REO_DESTINATION_RING_CTRL_IX_2_ADDR( 1310 reo_reg_base); 1311 *ix2 = HAL_REG_READ(hal, reg_offset); 1312 } 1313 1314 if (ix3) { 1315 reg_offset = 1316 HAL_REO_DESTINATION_RING_CTRL_IX_3_ADDR( 1317 reo_reg_base); 1318 *ix3 = HAL_REG_READ(hal, reg_offset); 1319 } 1320 } else { 1321 if (ix0) { 1322 reg_offset = 1323 HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR( 1324 reo_reg_base); 1325 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, 1326 *ix0, true); 1327 } 1328 1329 if (ix1) { 1330 reg_offset = 1331 HAL_REO_DESTINATION_RING_CTRL_IX_1_ADDR( 1332 reo_reg_base); 1333 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, 1334 *ix1, true); 1335 } 1336 1337 if (ix2) { 1338 reg_offset = 1339 HAL_REO_DESTINATION_RING_CTRL_IX_2_ADDR( 1340 reo_reg_base); 1341 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, 1342 *ix2, true); 1343 } 1344 1345 if (ix3) { 1346 reg_offset = 1347 HAL_REO_DESTINATION_RING_CTRL_IX_3_ADDR( 1348 reo_reg_base); 1349 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, 1350 *ix3, true); 1351 } 1352 } 1353 } 1354 1355 qdf_export_symbol(hal_reo_read_write_ctrl_ix); 1356 1357 /** 1358 * hal_srng_dst_set_hp_paddr_confirm() - Set physical address to dest ring head 1359 * pointer and confirm that write went through by reading back the value 1360 * @srng: sring pointer 1361 * @paddr: physical address 1362 * 1363 * Return: None 1364 */ 1365 void hal_srng_dst_set_hp_paddr_confirm(struct hal_srng *srng, uint64_t paddr) 1366 { 1367 SRNG_DST_REG_WRITE_CONFIRM(srng, HP_ADDR_LSB, paddr & 0xffffffff); 1368 SRNG_DST_REG_WRITE_CONFIRM(srng, HP_ADDR_MSB, paddr >> 32); 1369 } 1370 1371 qdf_export_symbol(hal_srng_dst_set_hp_paddr_confirm); 1372 1373 /** 1374 * hal_srng_dst_init_hp() - Initialize destination ring head 1375 * pointer 1376 * @hal_soc: hal_soc handle 1377 * @srng: sring pointer 1378 * @vaddr: virtual address 1379 */ 1380 void hal_srng_dst_init_hp(struct hal_soc_handle *hal_soc, 1381 struct hal_srng *srng, 1382 uint32_t *vaddr) 1383 { 1384 uint32_t reg_offset; 1385 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1386 1387 if (!srng) 1388 return; 1389 1390 srng->u.dst_ring.hp_addr = vaddr; 1391 reg_offset = SRNG_DST_ADDR(srng, HP) - hal->dev_base_addr; 1392 HAL_REG_WRITE_CONFIRM_RETRY( 1393 hal, reg_offset, srng->u.dst_ring.cached_hp, true); 1394 1395 if (vaddr) { 1396 *srng->u.dst_ring.hp_addr = srng->u.dst_ring.cached_hp; 1397 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1398 "hp_addr=%pK, cached_hp=%d, hp=%d", 1399 (void *)srng->u.dst_ring.hp_addr, 1400 srng->u.dst_ring.cached_hp, 1401 *srng->u.dst_ring.hp_addr); 1402 } 1403 } 1404 1405 qdf_export_symbol(hal_srng_dst_init_hp); 1406 1407 /** 1408 * hal_srng_hw_init - Private function to initialize SRNG HW 1409 * @hal_soc: HAL SOC handle 1410 * @srng: SRNG ring pointer 1411 * @idle_check: Check if ring is idle 1412 */ 1413 static inline void hal_srng_hw_init(struct hal_soc *hal, 1414 struct hal_srng *srng, bool idle_check) 1415 { 1416 if (srng->ring_dir == HAL_SRNG_SRC_RING) 1417 hal_srng_src_hw_init(hal, srng, idle_check); 1418 else 1419 hal_srng_dst_hw_init(hal, srng, idle_check); 1420 } 1421 1422 #if defined(CONFIG_SHADOW_V2) || defined(CONFIG_SHADOW_V3) 1423 #define ignore_shadow false 1424 #define CHECK_SHADOW_REGISTERS true 1425 #else 1426 #define ignore_shadow true 1427 #define CHECK_SHADOW_REGISTERS false 1428 #endif 1429 1430 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ 1431 /** 1432 * hal_srng_is_near_full_irq_supported() - Check if near full irq is 1433 * supported on this SRNG 1434 * @hal_soc: HAL SoC handle 1435 * @ring_type: SRNG type 1436 * @ring_num: ring number 1437 * 1438 * Return: true, if near full irq is supported for this SRNG 1439 * false, if near full irq is not supported for this SRNG 1440 */ 1441 bool hal_srng_is_near_full_irq_supported(hal_soc_handle_t hal_soc, 1442 int ring_type, int ring_num) 1443 { 1444 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1445 struct hal_hw_srng_config *ring_config = 1446 HAL_SRNG_CONFIG(hal, ring_type); 1447 1448 return ring_config->nf_irq_support; 1449 } 1450 1451 /** 1452 * hal_srng_set_msi2_params() - Set MSI2 params to SRNG data structure from 1453 * ring params 1454 * @srng: SRNG handle 1455 * @ring_params: ring params for this SRNG 1456 * 1457 * Return: None 1458 */ 1459 static inline void 1460 hal_srng_set_msi2_params(struct hal_srng *srng, 1461 struct hal_srng_params *ring_params) 1462 { 1463 srng->msi2_addr = ring_params->msi2_addr; 1464 srng->msi2_data = ring_params->msi2_data; 1465 } 1466 1467 /** 1468 * hal_srng_get_nf_params() - Get the near full MSI2 params from srng 1469 * @srng: SRNG handle 1470 * @ring_params: ring params for this SRNG 1471 * 1472 * Return: None 1473 */ 1474 static inline void 1475 hal_srng_get_nf_params(struct hal_srng *srng, 1476 struct hal_srng_params *ring_params) 1477 { 1478 ring_params->msi2_addr = srng->msi2_addr; 1479 ring_params->msi2_data = srng->msi2_data; 1480 } 1481 1482 /** 1483 * hal_srng_set_nf_thresholds() - Set the near full thresholds in SRNG 1484 * @srng: SRNG handle where the params are to be set 1485 * @ring_params: ring params, from where threshold is to be fetched 1486 * 1487 * Return: None 1488 */ 1489 static inline void 1490 hal_srng_set_nf_thresholds(struct hal_srng *srng, 1491 struct hal_srng_params *ring_params) 1492 { 1493 srng->u.dst_ring.nf_irq_support = ring_params->nf_irq_support; 1494 srng->u.dst_ring.high_thresh = ring_params->high_thresh; 1495 } 1496 #else 1497 static inline void 1498 hal_srng_set_msi2_params(struct hal_srng *srng, 1499 struct hal_srng_params *ring_params) 1500 { 1501 } 1502 1503 static inline void 1504 hal_srng_get_nf_params(struct hal_srng *srng, 1505 struct hal_srng_params *ring_params) 1506 { 1507 } 1508 1509 static inline void 1510 hal_srng_set_nf_thresholds(struct hal_srng *srng, 1511 struct hal_srng_params *ring_params) 1512 { 1513 } 1514 #endif 1515 1516 #if defined(CLEAR_SW2TCL_CONSUMED_DESC) 1517 /** 1518 * hal_srng_last_desc_cleared_init - Initialize SRNG last_desc_cleared ptr 1519 * 1520 * @srng: Source ring pointer 1521 * 1522 * Return: None 1523 */ 1524 static inline 1525 void hal_srng_last_desc_cleared_init(struct hal_srng *srng) 1526 { 1527 srng->last_desc_cleared = srng->ring_size - srng->entry_size; 1528 } 1529 1530 #else 1531 static inline 1532 void hal_srng_last_desc_cleared_init(struct hal_srng *srng) 1533 { 1534 } 1535 #endif /* CLEAR_SW2TCL_CONSUMED_DESC */ 1536 1537 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING 1538 static inline void hal_srng_update_high_wm_thresholds(struct hal_srng *srng) 1539 { 1540 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_90_to_100] = 1541 ((srng->num_entries * 90) / 100); 1542 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_80_to_90] = 1543 ((srng->num_entries * 80) / 100); 1544 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_70_to_80] = 1545 ((srng->num_entries * 70) / 100); 1546 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_60_to_70] = 1547 ((srng->num_entries * 60) / 100); 1548 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_50_to_60] = 1549 ((srng->num_entries * 50) / 100); 1550 /* Below 50% threshold is not needed */ 1551 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT] = 0; 1552 1553 hal_info("ring_id: %u, wm_thresh- <50:%u, 50-60:%u, 60-70:%u, 70-80:%u, 80-90:%u, 90-100:%u", 1554 srng->ring_id, 1555 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT], 1556 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_50_to_60], 1557 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_60_to_70], 1558 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_70_to_80], 1559 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_80_to_90], 1560 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_90_to_100]); 1561 } 1562 #else 1563 static inline void hal_srng_update_high_wm_thresholds(struct hal_srng *srng) 1564 { 1565 } 1566 #endif 1567 1568 /** 1569 * hal_srng_setup - Initialize HW SRNG ring. 1570 * @hal_soc: Opaque HAL SOC handle 1571 * @ring_type: one of the types from hal_ring_type 1572 * @ring_num: Ring number if there are multiple rings of same type (staring 1573 * from 0) 1574 * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings 1575 * @ring_params: SRNG ring params in hal_srng_params structure. 1576 * @idle_check: Check if ring is idle 1577 * 1578 * Callers are expected to allocate contiguous ring memory of size 1579 * 'num_entries * entry_size' bytes and pass the physical and virtual base 1580 * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in 1581 * hal_srng_params structure. Ring base address should be 8 byte aligned 1582 * and size of each ring entry should be queried using the API 1583 * hal_srng_get_entrysize 1584 * 1585 * Return: Opaque pointer to ring on success 1586 * NULL on failure (if given ring is not available) 1587 */ 1588 void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num, 1589 int mac_id, struct hal_srng_params *ring_params, bool idle_check) 1590 { 1591 int ring_id; 1592 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1593 struct hal_srng *srng; 1594 struct hal_hw_srng_config *ring_config = 1595 HAL_SRNG_CONFIG(hal, ring_type); 1596 void *dev_base_addr; 1597 int i; 1598 1599 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id); 1600 if (ring_id < 0) 1601 return NULL; 1602 1603 hal_verbose_debug("mac_id %d ring_id %d", mac_id, ring_id); 1604 1605 srng = hal_get_srng(hal_soc, ring_id); 1606 1607 if (srng->initialized) { 1608 hal_verbose_debug("Ring (ring_type, ring_num) already initialized"); 1609 return NULL; 1610 } 1611 1612 dev_base_addr = hal->dev_base_addr; 1613 srng->ring_id = ring_id; 1614 srng->ring_type = ring_type; 1615 srng->ring_dir = ring_config->ring_dir; 1616 srng->ring_base_paddr = ring_params->ring_base_paddr; 1617 srng->ring_base_vaddr = ring_params->ring_base_vaddr; 1618 srng->entry_size = ring_config->entry_size; 1619 srng->num_entries = ring_params->num_entries; 1620 srng->ring_size = srng->num_entries * srng->entry_size; 1621 srng->ring_size_mask = srng->ring_size - 1; 1622 srng->ring_vaddr_end = srng->ring_base_vaddr + srng->ring_size; 1623 srng->msi_addr = ring_params->msi_addr; 1624 srng->msi_data = ring_params->msi_data; 1625 srng->intr_timer_thres_us = ring_params->intr_timer_thres_us; 1626 srng->intr_batch_cntr_thres_entries = 1627 ring_params->intr_batch_cntr_thres_entries; 1628 if (!idle_check) 1629 srng->prefetch_timer = ring_params->prefetch_timer; 1630 srng->hal_soc = hal_soc; 1631 hal_srng_set_msi2_params(srng, ring_params); 1632 hal_srng_update_high_wm_thresholds(srng); 1633 1634 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) { 1635 srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i] 1636 + (ring_num * ring_config->reg_size[i]); 1637 } 1638 1639 /* Zero out the entire ring memory */ 1640 qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size * 1641 srng->num_entries) << 2); 1642 1643 srng->flags = ring_params->flags; 1644 1645 /* For cached descriptors flush and invalidate the memory*/ 1646 if (srng->flags & HAL_SRNG_CACHED_DESC) { 1647 qdf_nbuf_dma_clean_range( 1648 srng->ring_base_vaddr, 1649 srng->ring_base_vaddr + 1650 ((srng->entry_size * srng->num_entries))); 1651 qdf_nbuf_dma_inv_range( 1652 srng->ring_base_vaddr, 1653 srng->ring_base_vaddr + 1654 ((srng->entry_size * srng->num_entries))); 1655 } 1656 #ifdef BIG_ENDIAN_HOST 1657 /* TODO: See if we should we get these flags from caller */ 1658 srng->flags |= HAL_SRNG_DATA_TLV_SWAP; 1659 srng->flags |= HAL_SRNG_MSI_SWAP; 1660 srng->flags |= HAL_SRNG_RING_PTR_SWAP; 1661 #endif 1662 1663 hal_srng_last_desc_cleared_init(srng); 1664 1665 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 1666 srng->u.src_ring.hp = 0; 1667 srng->u.src_ring.reap_hp = srng->ring_size - 1668 srng->entry_size; 1669 srng->u.src_ring.tp_addr = 1670 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 1671 srng->u.src_ring.low_threshold = 1672 ring_params->low_threshold * srng->entry_size; 1673 1674 if (srng->u.src_ring.tp_addr) 1675 qdf_mem_zero(srng->u.src_ring.tp_addr, 1676 sizeof(*hal->shadow_rdptr_mem_vaddr)); 1677 1678 if (ring_config->lmac_ring) { 1679 /* For LMAC rings, head pointer updates will be done 1680 * through FW by writing to a shared memory location 1681 */ 1682 srng->u.src_ring.hp_addr = 1683 &(hal->shadow_wrptr_mem_vaddr[ring_id - 1684 HAL_SRNG_LMAC1_ID_START]); 1685 srng->flags |= HAL_SRNG_LMAC_RING; 1686 1687 if (srng->u.src_ring.hp_addr) 1688 qdf_mem_zero(srng->u.src_ring.hp_addr, 1689 sizeof(*hal->shadow_wrptr_mem_vaddr)); 1690 1691 } else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) { 1692 srng->u.src_ring.hp_addr = 1693 hal_get_window_address(hal, 1694 SRNG_SRC_ADDR(srng, HP)); 1695 1696 if (CHECK_SHADOW_REGISTERS) { 1697 QDF_TRACE(QDF_MODULE_ID_TXRX, 1698 QDF_TRACE_LEVEL_ERROR, 1699 "%s: Ring (%d, %d) missing shadow config", 1700 __func__, ring_type, ring_num); 1701 } 1702 } else { 1703 hal_validate_shadow_register(hal, 1704 SRNG_SRC_ADDR(srng, HP), 1705 srng->u.src_ring.hp_addr); 1706 } 1707 } else { 1708 /* During initialization loop count in all the descriptors 1709 * will be set to zero, and HW will set it to 1 on completing 1710 * descriptor update in first loop, and increments it by 1 on 1711 * subsequent loops (loop count wraps around after reaching 1712 * 0xffff). The 'loop_cnt' in SW ring state is the expected 1713 * loop count in descriptors updated by HW (to be processed 1714 * by SW). 1715 */ 1716 hal_srng_set_nf_thresholds(srng, ring_params); 1717 srng->u.dst_ring.loop_cnt = 1; 1718 srng->u.dst_ring.tp = 0; 1719 srng->u.dst_ring.hp_addr = 1720 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 1721 1722 if (srng->u.dst_ring.hp_addr) 1723 qdf_mem_zero(srng->u.dst_ring.hp_addr, 1724 sizeof(*hal->shadow_rdptr_mem_vaddr)); 1725 1726 if (ring_config->lmac_ring) { 1727 /* For LMAC rings, tail pointer updates will be done 1728 * through FW by writing to a shared memory location 1729 */ 1730 srng->u.dst_ring.tp_addr = 1731 &(hal->shadow_wrptr_mem_vaddr[ring_id - 1732 HAL_SRNG_LMAC1_ID_START]); 1733 srng->flags |= HAL_SRNG_LMAC_RING; 1734 1735 if (srng->u.dst_ring.tp_addr) 1736 qdf_mem_zero(srng->u.dst_ring.tp_addr, 1737 sizeof(*hal->shadow_wrptr_mem_vaddr)); 1738 1739 } else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) { 1740 srng->u.dst_ring.tp_addr = 1741 hal_get_window_address(hal, 1742 SRNG_DST_ADDR(srng, TP)); 1743 1744 if (CHECK_SHADOW_REGISTERS) { 1745 QDF_TRACE(QDF_MODULE_ID_TXRX, 1746 QDF_TRACE_LEVEL_ERROR, 1747 "%s: Ring (%d, %d) missing shadow config", 1748 __func__, ring_type, ring_num); 1749 } 1750 } else { 1751 hal_validate_shadow_register(hal, 1752 SRNG_DST_ADDR(srng, TP), 1753 srng->u.dst_ring.tp_addr); 1754 } 1755 } 1756 1757 if (!(ring_config->lmac_ring)) { 1758 hal_srng_hw_init(hal, srng, idle_check); 1759 1760 if (ring_type == CE_DST) { 1761 srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length; 1762 hal_ce_dst_setup(hal, srng, ring_num); 1763 } 1764 } 1765 1766 SRNG_LOCK_INIT(&srng->lock); 1767 1768 srng->srng_event = 0; 1769 1770 srng->initialized = true; 1771 1772 return (void *)srng; 1773 } 1774 qdf_export_symbol(hal_srng_setup); 1775 1776 /** 1777 * hal_srng_cleanup - Deinitialize HW SRNG ring. 1778 * @hal_soc: Opaque HAL SOC handle 1779 * @hal_srng: Opaque HAL SRNG pointer 1780 */ 1781 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl) 1782 { 1783 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; 1784 SRNG_LOCK_DESTROY(&srng->lock); 1785 srng->initialized = 0; 1786 hal_srng_hw_disable(hal_soc, srng); 1787 } 1788 qdf_export_symbol(hal_srng_cleanup); 1789 1790 /** 1791 * hal_srng_get_entrysize - Returns size of ring entry in bytes 1792 * @hal_soc: Opaque HAL SOC handle 1793 * @ring_type: one of the types from hal_ring_type 1794 * 1795 */ 1796 uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type) 1797 { 1798 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1799 struct hal_hw_srng_config *ring_config = 1800 HAL_SRNG_CONFIG(hal, ring_type); 1801 return ring_config->entry_size << 2; 1802 } 1803 qdf_export_symbol(hal_srng_get_entrysize); 1804 1805 /** 1806 * hal_srng_max_entries - Returns maximum possible number of ring entries 1807 * @hal_soc: Opaque HAL SOC handle 1808 * @ring_type: one of the types from hal_ring_type 1809 * 1810 * Return: Maximum number of entries for the given ring_type 1811 */ 1812 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type) 1813 { 1814 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1815 struct hal_hw_srng_config *ring_config = 1816 HAL_SRNG_CONFIG(hal, ring_type); 1817 1818 return ring_config->max_size / ring_config->entry_size; 1819 } 1820 qdf_export_symbol(hal_srng_max_entries); 1821 1822 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type) 1823 { 1824 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1825 struct hal_hw_srng_config *ring_config = 1826 HAL_SRNG_CONFIG(hal, ring_type); 1827 1828 return ring_config->ring_dir; 1829 } 1830 1831 /** 1832 * hal_srng_dump - Dump ring status 1833 * @srng: hal srng pointer 1834 */ 1835 void hal_srng_dump(struct hal_srng *srng) 1836 { 1837 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 1838 hal_debug("=== SRC RING %d ===", srng->ring_id); 1839 hal_debug("hp %u, reap_hp %u, tp %u, cached tp %u", 1840 srng->u.src_ring.hp, 1841 srng->u.src_ring.reap_hp, 1842 *srng->u.src_ring.tp_addr, 1843 srng->u.src_ring.cached_tp); 1844 } else { 1845 hal_debug("=== DST RING %d ===", srng->ring_id); 1846 hal_debug("tp %u, hp %u, cached tp %u, loop_cnt %u", 1847 srng->u.dst_ring.tp, 1848 *srng->u.dst_ring.hp_addr, 1849 srng->u.dst_ring.cached_hp, 1850 srng->u.dst_ring.loop_cnt); 1851 } 1852 } 1853 1854 /** 1855 * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL 1856 * 1857 * @hal_soc: Opaque HAL SOC handle 1858 * @hal_ring: Ring pointer (Source or Destination ring) 1859 * @ring_params: SRNG parameters will be returned through this structure 1860 */ 1861 extern void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl, 1862 hal_ring_handle_t hal_ring_hdl, 1863 struct hal_srng_params *ring_params) 1864 { 1865 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; 1866 int i =0; 1867 ring_params->ring_id = srng->ring_id; 1868 ring_params->ring_dir = srng->ring_dir; 1869 ring_params->entry_size = srng->entry_size; 1870 1871 ring_params->ring_base_paddr = srng->ring_base_paddr; 1872 ring_params->ring_base_vaddr = srng->ring_base_vaddr; 1873 ring_params->num_entries = srng->num_entries; 1874 ring_params->msi_addr = srng->msi_addr; 1875 ring_params->msi_data = srng->msi_data; 1876 ring_params->intr_timer_thres_us = srng->intr_timer_thres_us; 1877 ring_params->intr_batch_cntr_thres_entries = 1878 srng->intr_batch_cntr_thres_entries; 1879 ring_params->low_threshold = srng->u.src_ring.low_threshold; 1880 ring_params->flags = srng->flags; 1881 ring_params->ring_id = srng->ring_id; 1882 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) 1883 ring_params->hwreg_base[i] = srng->hwreg_base[i]; 1884 1885 hal_srng_get_nf_params(srng, ring_params); 1886 } 1887 qdf_export_symbol(hal_get_srng_params); 1888 1889 void hal_set_low_threshold(hal_ring_handle_t hal_ring_hdl, 1890 uint32_t low_threshold) 1891 { 1892 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; 1893 srng->u.src_ring.low_threshold = low_threshold * srng->entry_size; 1894 } 1895 qdf_export_symbol(hal_set_low_threshold); 1896 1897 #ifdef FEATURE_RUNTIME_PM 1898 void 1899 hal_srng_rtpm_access_end(hal_soc_handle_t hal_soc_hdl, 1900 hal_ring_handle_t hal_ring_hdl, 1901 uint32_t rtpm_id) 1902 { 1903 if (qdf_unlikely(!hal_ring_hdl)) { 1904 qdf_print("Error: Invalid hal_ring\n"); 1905 return; 1906 } 1907 1908 if (hif_rtpm_get(HIF_RTPM_GET_ASYNC, rtpm_id) == 0) { 1909 hal_srng_access_end(hal_soc_hdl, hal_ring_hdl); 1910 hif_rtpm_put(HIF_RTPM_PUT_ASYNC, rtpm_id); 1911 } else { 1912 hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl); 1913 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); 1914 hal_srng_inc_flush_cnt(hal_ring_hdl); 1915 } 1916 } 1917 1918 qdf_export_symbol(hal_srng_rtpm_access_end); 1919 #endif /* FEATURE_RUNTIME_PM */ 1920 1921 #ifdef FORCE_WAKE 1922 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase) 1923 { 1924 struct hal_soc *hal_soc = (struct hal_soc *)soc; 1925 hal_soc->init_phase = init_phase; 1926 } 1927 #endif /* FORCE_WAKE */ 1928