1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hal_hw_headers.h" 21 #include "hal_api.h" 22 #include "hal_reo.h" 23 #include "target_type.h" 24 #include "qdf_module.h" 25 #include "wcss_version.h" 26 #include <qdf_tracepoint.h> 27 #include "qdf_ssr_driver_dump.h" 28 29 struct tcl_data_cmd gtcl_data_symbol __attribute__((used)); 30 31 #ifdef QCA_WIFI_QCA8074 32 void hal_qca6290_attach(struct hal_soc *hal); 33 #endif 34 #ifdef QCA_WIFI_QCA8074 35 void hal_qca8074_attach(struct hal_soc *hal); 36 #endif 37 #if defined(QCA_WIFI_QCA8074V2) || defined(QCA_WIFI_QCA6018) || \ 38 defined(QCA_WIFI_QCA9574) 39 void hal_qca8074v2_attach(struct hal_soc *hal); 40 #endif 41 #ifdef QCA_WIFI_QCA6390 42 void hal_qca6390_attach(struct hal_soc *hal); 43 #endif 44 #ifdef QCA_WIFI_QCA6490 45 void hal_qca6490_attach(struct hal_soc *hal); 46 #endif 47 #ifdef QCA_WIFI_QCN9000 48 void hal_qcn9000_attach(struct hal_soc *hal); 49 #endif 50 #ifdef QCA_WIFI_QCN9224 51 void hal_qcn9224v1_attach(struct hal_soc *hal); 52 void hal_qcn9224v2_attach(struct hal_soc *hal); 53 #endif 54 #if defined(QCA_WIFI_QCN6122) || defined(QCA_WIFI_QCN9160) 55 void hal_qcn6122_attach(struct hal_soc *hal); 56 #endif 57 #ifdef QCA_WIFI_QCA6750 58 void hal_qca6750_attach(struct hal_soc *hal); 59 #endif 60 #ifdef QCA_WIFI_QCA5018 61 void hal_qca5018_attach(struct hal_soc *hal); 62 #endif 63 #ifdef QCA_WIFI_QCA5332 64 void hal_qca5332_attach(struct hal_soc *hal); 65 #endif 66 #ifdef QCA_WIFI_KIWI 67 void hal_kiwi_attach(struct hal_soc *hal); 68 #endif 69 70 #ifdef ENABLE_VERBOSE_DEBUG 71 bool is_hal_verbose_debug_enabled; 72 #endif 73 74 #define HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR(x) ((x) + 0x4) 75 #define HAL_REO_DESTINATION_RING_CTRL_IX_1_ADDR(x) ((x) + 0x8) 76 #define HAL_REO_DESTINATION_RING_CTRL_IX_2_ADDR(x) ((x) + 0xc) 77 #define HAL_REO_DESTINATION_RING_CTRL_IX_3_ADDR(x) ((x) + 0x10) 78 79 #ifdef ENABLE_HAL_REG_WR_HISTORY 80 struct hal_reg_write_fail_history hal_reg_wr_hist; 81 82 void hal_reg_wr_fail_history_add(struct hal_soc *hal_soc, 83 uint32_t offset, 84 uint32_t wr_val, uint32_t rd_val) 85 { 86 struct hal_reg_write_fail_entry *record; 87 int idx; 88 89 idx = hal_history_get_next_index(&hal_soc->reg_wr_fail_hist->index, 90 HAL_REG_WRITE_HIST_SIZE); 91 92 record = &hal_soc->reg_wr_fail_hist->record[idx]; 93 94 record->timestamp = qdf_get_log_timestamp(); 95 record->reg_offset = offset; 96 record->write_val = wr_val; 97 record->read_val = rd_val; 98 } 99 100 static void hal_reg_write_fail_history_init(struct hal_soc *hal) 101 { 102 hal->reg_wr_fail_hist = &hal_reg_wr_hist; 103 104 qdf_atomic_set(&hal->reg_wr_fail_hist->index, -1); 105 } 106 #else 107 static void hal_reg_write_fail_history_init(struct hal_soc *hal) 108 { 109 } 110 #endif 111 112 /** 113 * hal_get_srng_ring_id() - get the ring id of a described ring 114 * @hal: hal_soc data structure 115 * @ring_type: type enum describing the ring 116 * @ring_num: which ring of the ring type 117 * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings) 118 * 119 * Return: the ring id or -EINVAL if the ring does not exist. 120 */ 121 static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type, 122 int ring_num, int mac_id) 123 { 124 struct hal_hw_srng_config *ring_config = 125 HAL_SRNG_CONFIG(hal, ring_type); 126 int ring_id; 127 128 if (ring_num >= ring_config->max_rings) { 129 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 130 "%s: ring_num exceeded maximum no. of supported rings", 131 __func__); 132 /* TODO: This is a programming error. Assert if this happens */ 133 return -EINVAL; 134 } 135 136 /** 137 * Some DMAC rings share a common source ring, hence don't provide them 138 * with separate ring IDs per LMAC. 139 */ 140 if (ring_config->lmac_ring && !ring_config->dmac_cmn_ring) { 141 ring_id = (ring_config->start_ring_id + ring_num + 142 (mac_id * HAL_MAX_RINGS_PER_LMAC)); 143 } else { 144 ring_id = ring_config->start_ring_id + ring_num; 145 } 146 147 return ring_id; 148 } 149 150 static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id) 151 { 152 /* TODO: Should we allocate srng structures dynamically? */ 153 return &(hal->srng_list[ring_id]); 154 } 155 156 #ifndef SHADOW_REG_CONFIG_DISABLED 157 #define HP_OFFSET_IN_REG_START 1 158 #define OFFSET_FROM_HP_TO_TP 4 159 static void hal_update_srng_hp_tp_address(struct hal_soc *hal_soc, 160 int shadow_config_index, 161 int ring_type, 162 int ring_num) 163 { 164 struct hal_srng *srng; 165 int ring_id; 166 struct hal_hw_srng_config *ring_config = 167 HAL_SRNG_CONFIG(hal_soc, ring_type); 168 169 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0); 170 if (ring_id < 0) 171 return; 172 173 srng = hal_get_srng(hal_soc, ring_id); 174 175 if (ring_config->ring_dir == HAL_SRNG_DST_RING) { 176 srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index) 177 + hal_soc->dev_base_addr; 178 hal_debug("tp_addr=%pK dev base addr %pK index %u", 179 srng->u.dst_ring.tp_addr, hal_soc->dev_base_addr, 180 shadow_config_index); 181 } else { 182 srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index) 183 + hal_soc->dev_base_addr; 184 hal_debug("hp_addr=%pK dev base addr %pK index %u", 185 srng->u.src_ring.hp_addr, 186 hal_soc->dev_base_addr, shadow_config_index); 187 } 188 189 } 190 #endif 191 192 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE 193 void hal_set_one_target_reg_config(struct hal_soc *hal, 194 uint32_t target_reg_offset, 195 int list_index) 196 { 197 int i = list_index; 198 199 qdf_assert_always(i < MAX_GENERIC_SHADOW_REG); 200 hal->list_shadow_reg_config[i].target_register = 201 target_reg_offset; 202 hal->num_generic_shadow_regs_configured++; 203 } 204 205 qdf_export_symbol(hal_set_one_target_reg_config); 206 207 #define REO_R0_DESTINATION_RING_CTRL_ADDR_OFFSET 0x4 208 #define MAX_REO_REMAP_SHADOW_REGS 4 209 QDF_STATUS hal_set_shadow_regs(void *hal_soc) 210 { 211 uint32_t target_reg_offset; 212 struct hal_soc *hal = (struct hal_soc *)hal_soc; 213 int i; 214 struct hal_hw_srng_config *srng_config = 215 &hal->hw_srng_table[WBM2SW_RELEASE]; 216 uint32_t reo_reg_base; 217 218 reo_reg_base = hal_get_reo_reg_base_offset(hal_soc); 219 220 target_reg_offset = 221 HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR(reo_reg_base); 222 223 for (i = 0; i < MAX_REO_REMAP_SHADOW_REGS; i++) { 224 hal_set_one_target_reg_config(hal, target_reg_offset, i); 225 target_reg_offset += REO_R0_DESTINATION_RING_CTRL_ADDR_OFFSET; 226 } 227 228 target_reg_offset = srng_config->reg_start[HP_OFFSET_IN_REG_START]; 229 target_reg_offset += (srng_config->reg_size[HP_OFFSET_IN_REG_START] 230 * HAL_IPA_TX_COMP_RING_IDX); 231 232 hal_set_one_target_reg_config(hal, target_reg_offset, i); 233 return QDF_STATUS_SUCCESS; 234 } 235 236 qdf_export_symbol(hal_set_shadow_regs); 237 238 QDF_STATUS hal_construct_shadow_regs(void *hal_soc) 239 { 240 struct hal_soc *hal = (struct hal_soc *)hal_soc; 241 int shadow_config_index = hal->num_shadow_registers_configured; 242 int i; 243 int num_regs = hal->num_generic_shadow_regs_configured; 244 245 for (i = 0; i < num_regs; i++) { 246 qdf_assert_always(shadow_config_index < MAX_SHADOW_REGISTERS); 247 hal->shadow_config[shadow_config_index].addr = 248 hal->list_shadow_reg_config[i].target_register; 249 hal->list_shadow_reg_config[i].shadow_config_index = 250 shadow_config_index; 251 hal->list_shadow_reg_config[i].va = 252 SHADOW_REGISTER(shadow_config_index) + 253 (uintptr_t)hal->dev_base_addr; 254 hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x", 255 hal->shadow_config[shadow_config_index].addr, 256 SHADOW_REGISTER(shadow_config_index), 257 shadow_config_index); 258 shadow_config_index++; 259 hal->num_shadow_registers_configured++; 260 } 261 return QDF_STATUS_SUCCESS; 262 } 263 264 qdf_export_symbol(hal_construct_shadow_regs); 265 #endif 266 267 #ifndef SHADOW_REG_CONFIG_DISABLED 268 269 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, 270 int ring_type, 271 int ring_num) 272 { 273 uint32_t target_register; 274 struct hal_soc *hal = (struct hal_soc *)hal_soc; 275 struct hal_hw_srng_config *srng_config = &hal->hw_srng_table[ring_type]; 276 int shadow_config_index = hal->num_shadow_registers_configured; 277 278 if (shadow_config_index >= MAX_SHADOW_REGISTERS) { 279 QDF_ASSERT(0); 280 return QDF_STATUS_E_RESOURCES; 281 } 282 283 hal->num_shadow_registers_configured++; 284 285 target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START]; 286 target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START] 287 *ring_num); 288 289 /* if the ring is a dst ring, we need to shadow the tail pointer */ 290 if (srng_config->ring_dir == HAL_SRNG_DST_RING) 291 target_register += OFFSET_FROM_HP_TO_TP; 292 293 hal->shadow_config[shadow_config_index].addr = target_register; 294 295 /* update hp/tp addr in the hal_soc structure*/ 296 hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type, 297 ring_num); 298 299 hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x, ring_type %d, ring num %d", 300 target_register, 301 SHADOW_REGISTER(shadow_config_index), 302 shadow_config_index, 303 ring_type, ring_num); 304 305 return QDF_STATUS_SUCCESS; 306 } 307 308 qdf_export_symbol(hal_set_one_shadow_config); 309 310 QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc) 311 { 312 int ring_type, ring_num; 313 struct hal_soc *hal = (struct hal_soc *)hal_soc; 314 315 for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) { 316 struct hal_hw_srng_config *srng_config = 317 &hal->hw_srng_table[ring_type]; 318 319 if (ring_type == CE_SRC || 320 ring_type == CE_DST || 321 ring_type == CE_DST_STATUS) 322 continue; 323 324 if (srng_config->lmac_ring) 325 continue; 326 327 for (ring_num = 0; ring_num < srng_config->max_rings; 328 ring_num++) 329 hal_set_one_shadow_config(hal_soc, ring_type, ring_num); 330 } 331 332 return QDF_STATUS_SUCCESS; 333 } 334 335 qdf_export_symbol(hal_construct_srng_shadow_regs); 336 #else 337 338 QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc) 339 { 340 return QDF_STATUS_SUCCESS; 341 } 342 343 qdf_export_symbol(hal_construct_srng_shadow_regs); 344 345 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type, 346 int ring_num) 347 { 348 return QDF_STATUS_SUCCESS; 349 } 350 qdf_export_symbol(hal_set_one_shadow_config); 351 #endif 352 353 void hal_get_shadow_config(void *hal_soc, 354 struct pld_shadow_reg_v2_cfg **shadow_config, 355 int *num_shadow_registers_configured) 356 { 357 struct hal_soc *hal = (struct hal_soc *)hal_soc; 358 359 *shadow_config = &hal->shadow_config[0].v2; 360 *num_shadow_registers_configured = 361 hal->num_shadow_registers_configured; 362 } 363 qdf_export_symbol(hal_get_shadow_config); 364 365 #ifdef CONFIG_SHADOW_V3 366 void hal_get_shadow_v3_config(void *hal_soc, 367 struct pld_shadow_reg_v3_cfg **shadow_config, 368 int *num_shadow_registers_configured) 369 { 370 struct hal_soc *hal = (struct hal_soc *)hal_soc; 371 372 *shadow_config = &hal->shadow_config[0].v3; 373 *num_shadow_registers_configured = 374 hal->num_shadow_registers_configured; 375 } 376 qdf_export_symbol(hal_get_shadow_v3_config); 377 #endif 378 379 static bool hal_validate_shadow_register(struct hal_soc *hal, 380 uint32_t *destination, 381 uint32_t *shadow_address) 382 { 383 unsigned int index; 384 uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr; 385 int destination_ba_offset = 386 ((char *)destination) - (char *)hal->dev_base_addr; 387 388 index = shadow_address - shadow_0_offset; 389 390 if (index >= MAX_SHADOW_REGISTERS) { 391 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 392 "%s: index %x out of bounds", __func__, index); 393 goto error; 394 } else if (hal->shadow_config[index].addr != destination_ba_offset) { 395 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 396 "%s: sanity check failure, expected %x, found %x", 397 __func__, destination_ba_offset, 398 hal->shadow_config[index].addr); 399 goto error; 400 } 401 return true; 402 error: 403 qdf_print("baddr %pK, destination %pK, shadow_address %pK s0offset %pK index %x", 404 hal->dev_base_addr, destination, shadow_address, 405 shadow_0_offset, index); 406 QDF_BUG(0); 407 return false; 408 } 409 410 static void hal_target_based_configure(struct hal_soc *hal) 411 { 412 /** 413 * Indicate Initialization of srngs to avoid force wake 414 * as umac power collapse is not enabled yet 415 */ 416 hal->init_phase = true; 417 418 switch (hal->target_type) { 419 #ifdef QCA_WIFI_QCA6290 420 case TARGET_TYPE_QCA6290: 421 hal->use_register_windowing = true; 422 hal_qca6290_attach(hal); 423 break; 424 #endif 425 #ifdef QCA_WIFI_QCA6390 426 case TARGET_TYPE_QCA6390: 427 hal->use_register_windowing = true; 428 hal_qca6390_attach(hal); 429 break; 430 #endif 431 #ifdef QCA_WIFI_QCA6490 432 case TARGET_TYPE_QCA6490: 433 hal->use_register_windowing = true; 434 hal_qca6490_attach(hal); 435 break; 436 #endif 437 #ifdef QCA_WIFI_QCA6750 438 case TARGET_TYPE_QCA6750: 439 hal->use_register_windowing = true; 440 hal->static_window_map = true; 441 hal_qca6750_attach(hal); 442 break; 443 #endif 444 #ifdef QCA_WIFI_KIWI 445 case TARGET_TYPE_KIWI: 446 case TARGET_TYPE_MANGO: 447 hal->use_register_windowing = true; 448 hal_kiwi_attach(hal); 449 break; 450 #endif 451 #if defined(QCA_WIFI_QCA8074) && defined(WIFI_TARGET_TYPE_3_0) 452 case TARGET_TYPE_QCA8074: 453 hal_qca8074_attach(hal); 454 break; 455 #endif 456 457 #if defined(QCA_WIFI_QCA8074V2) 458 case TARGET_TYPE_QCA8074V2: 459 hal_qca8074v2_attach(hal); 460 break; 461 #endif 462 463 #if defined(QCA_WIFI_QCA6018) 464 case TARGET_TYPE_QCA6018: 465 hal_qca8074v2_attach(hal); 466 break; 467 #endif 468 469 #if defined(QCA_WIFI_QCA9574) 470 case TARGET_TYPE_QCA9574: 471 hal_qca8074v2_attach(hal); 472 break; 473 #endif 474 475 #if defined(QCA_WIFI_QCN6122) 476 case TARGET_TYPE_QCN6122: 477 hal->use_register_windowing = true; 478 /* 479 * Static window map is enabled for qcn9000 to use 2mb bar 480 * size and use multiple windows to write into registers. 481 */ 482 hal->static_window_map = true; 483 hal_qcn6122_attach(hal); 484 break; 485 #endif 486 487 #if defined(QCA_WIFI_QCN9160) 488 case TARGET_TYPE_QCN9160: 489 hal->use_register_windowing = true; 490 /* 491 * Static window map is enabled for qcn9160 to use 2mb bar 492 * size and use multiple windows to write into registers. 493 */ 494 hal->static_window_map = true; 495 hal_qcn6122_attach(hal); 496 break; 497 #endif 498 499 #ifdef QCA_WIFI_QCN9000 500 case TARGET_TYPE_QCN9000: 501 hal->use_register_windowing = true; 502 /* 503 * Static window map is enabled for qcn9000 to use 2mb bar 504 * size and use multiple windows to write into registers. 505 */ 506 hal->static_window_map = true; 507 hal_qcn9000_attach(hal); 508 break; 509 #endif 510 #ifdef QCA_WIFI_QCA5018 511 case TARGET_TYPE_QCA5018: 512 hal->use_register_windowing = true; 513 hal->static_window_map = true; 514 hal_qca5018_attach(hal); 515 break; 516 #endif 517 #ifdef QCA_WIFI_QCN9224 518 case TARGET_TYPE_QCN9224: 519 hal->use_register_windowing = true; 520 hal->static_window_map = true; 521 if (hal->version == 1) 522 hal_qcn9224v1_attach(hal); 523 else 524 hal_qcn9224v2_attach(hal); 525 break; 526 #endif 527 #ifdef QCA_WIFI_QCA5332 528 case TARGET_TYPE_QCA5332: 529 hal->use_register_windowing = true; 530 hal->static_window_map = true; 531 hal_qca5332_attach(hal); 532 break; 533 #endif 534 default: 535 break; 536 } 537 } 538 539 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl) 540 { 541 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; 542 struct hif_target_info *tgt_info = 543 hif_get_target_info_handle(hal_soc->hif_handle); 544 545 return tgt_info->target_type; 546 } 547 548 qdf_export_symbol(hal_get_target_type); 549 550 #if defined(FEATURE_HAL_DELAYED_REG_WRITE) 551 /** 552 * hal_is_reg_write_tput_level_high() - throughput level for delayed reg writes 553 * @hal: hal_soc pointer 554 * 555 * Return: true if throughput is high, else false. 556 */ 557 static inline bool hal_is_reg_write_tput_level_high(struct hal_soc *hal) 558 { 559 int bw_level = hif_get_bandwidth_level(hal->hif_handle); 560 561 return (bw_level >= PLD_BUS_WIDTH_MEDIUM) ? true : false; 562 } 563 564 static inline 565 char *hal_fill_reg_write_srng_stats(struct hal_srng *srng, 566 char *buf, qdf_size_t size) 567 { 568 qdf_scnprintf(buf, size, "enq %u deq %u coal %u direct %u", 569 srng->wstats.enqueues, srng->wstats.dequeues, 570 srng->wstats.coalesces, srng->wstats.direct); 571 return buf; 572 } 573 574 /* bytes for local buffer */ 575 #define HAL_REG_WRITE_SRNG_STATS_LEN 100 576 577 void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl) 578 { 579 struct hal_srng *srng; 580 char buf[HAL_REG_WRITE_SRNG_STATS_LEN]; 581 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 582 583 srng = hal_get_srng(hal, HAL_SRNG_SW2TCL1); 584 hal_debug("SW2TCL1: %s", 585 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 586 587 srng = hal_get_srng(hal, HAL_SRNG_WBM2SW0_RELEASE); 588 hal_debug("WBM2SW0: %s", 589 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 590 591 srng = hal_get_srng(hal, HAL_SRNG_REO2SW1); 592 hal_debug("REO2SW1: %s", 593 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 594 595 srng = hal_get_srng(hal, HAL_SRNG_REO2SW2); 596 hal_debug("REO2SW2: %s", 597 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 598 599 srng = hal_get_srng(hal, HAL_SRNG_REO2SW3); 600 hal_debug("REO2SW3: %s", 601 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 602 } 603 604 void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl) 605 { 606 uint32_t *hist; 607 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 608 609 hist = hal->stats.wstats.sched_delay; 610 hal_debug("wstats: enq %u deq %u coal %u direct %u q_depth %u max_q %u sched-delay hist %u %u %u %u", 611 qdf_atomic_read(&hal->stats.wstats.enqueues), 612 hal->stats.wstats.dequeues, 613 qdf_atomic_read(&hal->stats.wstats.coalesces), 614 qdf_atomic_read(&hal->stats.wstats.direct), 615 qdf_atomic_read(&hal->stats.wstats.q_depth), 616 hal->stats.wstats.max_q_depth, 617 hist[REG_WRITE_SCHED_DELAY_SUB_100us], 618 hist[REG_WRITE_SCHED_DELAY_SUB_1000us], 619 hist[REG_WRITE_SCHED_DELAY_SUB_5000us], 620 hist[REG_WRITE_SCHED_DELAY_GT_5000us]); 621 } 622 623 int hal_get_reg_write_pending_work(void *hal_soc) 624 { 625 struct hal_soc *hal = (struct hal_soc *)hal_soc; 626 627 return qdf_atomic_read(&hal->active_work_cnt); 628 } 629 630 #endif 631 632 #ifdef FEATURE_HAL_DELAYED_REG_WRITE 633 #ifdef MEMORY_DEBUG 634 /* 635 * Length of the queue(array) used to hold delayed register writes. 636 * Must be a multiple of 2. 637 */ 638 #define HAL_REG_WRITE_QUEUE_LEN 128 639 #else 640 #define HAL_REG_WRITE_QUEUE_LEN 32 641 #endif 642 643 /** 644 * hal_process_reg_write_q_elem() - process a register write queue element 645 * @hal: hal_soc pointer 646 * @q_elem: pointer to hal register write queue element 647 * 648 * Return: The value which was written to the address 649 */ 650 static uint32_t 651 hal_process_reg_write_q_elem(struct hal_soc *hal, 652 struct hal_reg_write_q_elem *q_elem) 653 { 654 struct hal_srng *srng = q_elem->srng; 655 uint32_t write_val; 656 657 SRNG_LOCK(&srng->lock); 658 659 srng->reg_write_in_progress = false; 660 srng->wstats.dequeues++; 661 662 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 663 q_elem->dequeue_val = srng->u.src_ring.hp; 664 hal_write_address_32_mb(hal, 665 srng->u.src_ring.hp_addr, 666 srng->u.src_ring.hp, false); 667 write_val = srng->u.src_ring.hp; 668 } else { 669 q_elem->dequeue_val = srng->u.dst_ring.tp; 670 hal_write_address_32_mb(hal, 671 srng->u.dst_ring.tp_addr, 672 srng->u.dst_ring.tp, false); 673 write_val = srng->u.dst_ring.tp; 674 } 675 676 q_elem->valid = 0; 677 srng->last_dequeue_time = q_elem->dequeue_time; 678 SRNG_UNLOCK(&srng->lock); 679 680 return write_val; 681 } 682 683 /** 684 * hal_reg_write_fill_sched_delay_hist() - fill reg write delay histogram in hal 685 * @hal: hal_soc pointer 686 * @delay: delay in us 687 * 688 * Return: None 689 */ 690 static inline void hal_reg_write_fill_sched_delay_hist(struct hal_soc *hal, 691 uint64_t delay_us) 692 { 693 uint32_t *hist; 694 695 hist = hal->stats.wstats.sched_delay; 696 697 if (delay_us < 100) 698 hist[REG_WRITE_SCHED_DELAY_SUB_100us]++; 699 else if (delay_us < 1000) 700 hist[REG_WRITE_SCHED_DELAY_SUB_1000us]++; 701 else if (delay_us < 5000) 702 hist[REG_WRITE_SCHED_DELAY_SUB_5000us]++; 703 else 704 hist[REG_WRITE_SCHED_DELAY_GT_5000us]++; 705 } 706 707 #ifdef SHADOW_WRITE_DELAY 708 709 #define SHADOW_WRITE_MIN_DELTA_US 5 710 #define SHADOW_WRITE_DELAY_US 50 711 712 /* 713 * Never add those srngs which are performance relate. 714 * The delay itself will hit performance heavily. 715 */ 716 #define IS_SRNG_MATCH(s) ((s)->ring_id == HAL_SRNG_CE_1_DST_STATUS || \ 717 (s)->ring_id == HAL_SRNG_CE_1_DST) 718 719 static inline bool hal_reg_write_need_delay(struct hal_reg_write_q_elem *elem) 720 { 721 struct hal_srng *srng = elem->srng; 722 struct hal_soc *hal; 723 qdf_time_t now; 724 qdf_iomem_t real_addr; 725 726 if (qdf_unlikely(!srng)) 727 return false; 728 729 hal = srng->hal_soc; 730 if (qdf_unlikely(!hal)) 731 return false; 732 733 /* Check if it is target srng, and valid shadow reg */ 734 if (qdf_likely(!IS_SRNG_MATCH(srng))) 735 return false; 736 737 if (srng->ring_dir == HAL_SRNG_SRC_RING) 738 real_addr = SRNG_SRC_ADDR(srng, HP); 739 else 740 real_addr = SRNG_DST_ADDR(srng, TP); 741 if (!hal_validate_shadow_register(hal, real_addr, elem->addr)) 742 return false; 743 744 /* Check the time delta from last write of same srng */ 745 now = qdf_get_log_timestamp(); 746 if (qdf_log_timestamp_to_usecs(now - srng->last_dequeue_time) > 747 SHADOW_WRITE_MIN_DELTA_US) 748 return false; 749 750 /* Delay dequeue, and record */ 751 qdf_udelay(SHADOW_WRITE_DELAY_US); 752 753 srng->wstats.dequeue_delay++; 754 hal->stats.wstats.dequeue_delay++; 755 756 return true; 757 } 758 #else 759 static inline bool hal_reg_write_need_delay(struct hal_reg_write_q_elem *elem) 760 { 761 return false; 762 } 763 #endif 764 765 /** 766 * hal_reg_write_work() - Worker to process delayed writes 767 * @arg: hal_soc pointer 768 * 769 * Return: None 770 */ 771 static void hal_reg_write_work(void *arg) 772 { 773 int32_t q_depth, write_val; 774 struct hal_soc *hal = arg; 775 struct hal_reg_write_q_elem *q_elem; 776 uint64_t delta_us; 777 uint8_t ring_id; 778 uint32_t *addr; 779 uint32_t num_processed = 0; 780 781 q_elem = &hal->reg_write_queue[(hal->read_idx)]; 782 q_elem->work_scheduled_time = qdf_get_log_timestamp(); 783 q_elem->cpu_id = qdf_get_cpu(); 784 785 /* Make sure q_elem consistent in the memory for multi-cores */ 786 qdf_rmb(); 787 if (!q_elem->valid) 788 return; 789 790 q_depth = qdf_atomic_read(&hal->stats.wstats.q_depth); 791 if (q_depth > hal->stats.wstats.max_q_depth) 792 hal->stats.wstats.max_q_depth = q_depth; 793 794 if (hif_prevent_link_low_power_states(hal->hif_handle)) { 795 hal->stats.wstats.prevent_l1_fails++; 796 return; 797 } 798 799 while (true) { 800 qdf_rmb(); 801 if (!q_elem->valid) 802 break; 803 804 q_elem->dequeue_time = qdf_get_log_timestamp(); 805 ring_id = q_elem->srng->ring_id; 806 addr = q_elem->addr; 807 delta_us = qdf_log_timestamp_to_usecs(q_elem->dequeue_time - 808 q_elem->enqueue_time); 809 hal_reg_write_fill_sched_delay_hist(hal, delta_us); 810 811 hal->stats.wstats.dequeues++; 812 qdf_atomic_dec(&hal->stats.wstats.q_depth); 813 814 if (hal_reg_write_need_delay(q_elem)) 815 hal_verbose_debug("Delay reg writer for srng 0x%x, addr 0x%pK", 816 q_elem->srng->ring_id, q_elem->addr); 817 818 write_val = hal_process_reg_write_q_elem(hal, q_elem); 819 hal_verbose_debug("read_idx %u srng 0x%x, addr 0x%pK dequeue_val %u sched delay %llu us", 820 hal->read_idx, ring_id, addr, write_val, delta_us); 821 822 qdf_trace_dp_del_reg_write(ring_id, q_elem->enqueue_val, 823 q_elem->dequeue_val, 824 q_elem->enqueue_time, 825 q_elem->dequeue_time); 826 827 num_processed++; 828 hal->read_idx = (hal->read_idx + 1) & 829 (HAL_REG_WRITE_QUEUE_LEN - 1); 830 q_elem = &hal->reg_write_queue[(hal->read_idx)]; 831 } 832 833 hif_allow_link_low_power_states(hal->hif_handle); 834 /* 835 * Decrement active_work_cnt by the number of elements dequeued after 836 * hif_allow_link_low_power_states. 837 * This makes sure that hif_try_complete_tasks will wait till we make 838 * the bus access in hif_allow_link_low_power_states. This will avoid 839 * race condition between delayed register worker and bus suspend 840 * (system suspend or runtime suspend). 841 * 842 * The following decrement should be done at the end! 843 */ 844 qdf_atomic_sub(num_processed, &hal->active_work_cnt); 845 } 846 847 static void __hal_flush_reg_write_work(struct hal_soc *hal) 848 { 849 qdf_flush_work(&hal->reg_write_work); 850 qdf_disable_work(&hal->reg_write_work); 851 } 852 853 void hal_flush_reg_write_work(hal_soc_handle_t hal_handle) 854 { __hal_flush_reg_write_work((struct hal_soc *)hal_handle); 855 } 856 857 /** 858 * hal_reg_write_enqueue() - enqueue register writes into kworker 859 * @hal_soc: hal_soc pointer 860 * @srng: srng pointer 861 * @addr: iomem address of register 862 * @value: value to be written to iomem address 863 * 864 * This function executes from within the SRNG LOCK 865 * 866 * Return: None 867 */ 868 static void hal_reg_write_enqueue(struct hal_soc *hal_soc, 869 struct hal_srng *srng, 870 void __iomem *addr, 871 uint32_t value) 872 { 873 struct hal_reg_write_q_elem *q_elem; 874 uint32_t write_idx; 875 876 if (srng->reg_write_in_progress) { 877 hal_verbose_debug("Already in progress srng ring id 0x%x addr 0x%pK val %u", 878 srng->ring_id, addr, value); 879 qdf_atomic_inc(&hal_soc->stats.wstats.coalesces); 880 srng->wstats.coalesces++; 881 return; 882 } 883 884 write_idx = qdf_atomic_inc_return(&hal_soc->write_idx); 885 886 write_idx = write_idx & (HAL_REG_WRITE_QUEUE_LEN - 1); 887 888 q_elem = &hal_soc->reg_write_queue[write_idx]; 889 890 if (q_elem->valid) { 891 hal_err("queue full"); 892 QDF_BUG(0); 893 return; 894 } 895 896 qdf_atomic_inc(&hal_soc->stats.wstats.enqueues); 897 srng->wstats.enqueues++; 898 899 qdf_atomic_inc(&hal_soc->stats.wstats.q_depth); 900 901 q_elem->srng = srng; 902 q_elem->addr = addr; 903 q_elem->enqueue_val = value; 904 q_elem->enqueue_time = qdf_get_log_timestamp(); 905 906 /* 907 * Before the valid flag is set to true, all the other 908 * fields in the q_elem needs to be updated in memory. 909 * Else there is a chance that the dequeuing worker thread 910 * might read stale entries and process incorrect srng. 911 */ 912 qdf_wmb(); 913 q_elem->valid = true; 914 915 /* 916 * After all other fields in the q_elem has been updated 917 * in memory successfully, the valid flag needs to be updated 918 * in memory in time too. 919 * Else there is a chance that the dequeuing worker thread 920 * might read stale valid flag and the work will be bypassed 921 * for this round. And if there is no other work scheduled 922 * later, this hal register writing won't be updated any more. 923 */ 924 qdf_wmb(); 925 926 srng->reg_write_in_progress = true; 927 qdf_atomic_inc(&hal_soc->active_work_cnt); 928 929 hal_verbose_debug("write_idx %u srng ring id 0x%x addr 0x%pK val %u", 930 write_idx, srng->ring_id, addr, value); 931 932 qdf_queue_work(hal_soc->qdf_dev, hal_soc->reg_write_wq, 933 &hal_soc->reg_write_work); 934 } 935 936 /** 937 * hal_delayed_reg_write_init() - Initialization function for delayed reg writes 938 * @hal_soc: hal_soc pointer 939 * 940 * Initialize main data structures to process register writes in a delayed 941 * workqueue. 942 * 943 * Return: QDF_STATUS_SUCCESS on success else a QDF error. 944 */ 945 static QDF_STATUS hal_delayed_reg_write_init(struct hal_soc *hal) 946 { 947 hal->reg_write_wq = 948 qdf_alloc_high_prior_ordered_workqueue("hal_register_write_wq"); 949 qdf_create_work(0, &hal->reg_write_work, hal_reg_write_work, hal); 950 hal->reg_write_queue = qdf_mem_malloc(HAL_REG_WRITE_QUEUE_LEN * 951 sizeof(*hal->reg_write_queue)); 952 if (!hal->reg_write_queue) { 953 hal_err("unable to allocate memory"); 954 QDF_BUG(0); 955 return QDF_STATUS_E_NOMEM; 956 } 957 958 /* Initial value of indices */ 959 hal->read_idx = 0; 960 qdf_atomic_set(&hal->write_idx, -1); 961 return QDF_STATUS_SUCCESS; 962 } 963 964 /** 965 * hal_delayed_reg_write_deinit() - De-Initialize delayed reg write processing 966 * @hal_soc: hal_soc pointer 967 * 968 * De-initialize main data structures to process register writes in a delayed 969 * workqueue. 970 * 971 * Return: None 972 */ 973 static void hal_delayed_reg_write_deinit(struct hal_soc *hal) 974 { 975 __hal_flush_reg_write_work(hal); 976 977 qdf_flush_workqueue(0, hal->reg_write_wq); 978 qdf_destroy_workqueue(0, hal->reg_write_wq); 979 qdf_mem_free(hal->reg_write_queue); 980 } 981 982 #else 983 static inline QDF_STATUS hal_delayed_reg_write_init(struct hal_soc *hal) 984 { 985 return QDF_STATUS_SUCCESS; 986 } 987 988 static inline void hal_delayed_reg_write_deinit(struct hal_soc *hal) 989 { 990 } 991 #endif 992 993 #ifdef FEATURE_HAL_DELAYED_REG_WRITE 994 #ifdef HAL_RECORD_SUSPEND_WRITE 995 static struct hal_suspend_write_history 996 g_hal_suspend_write_history[HAL_SUSPEND_WRITE_HISTORY_MAX]; 997 998 static 999 void hal_event_suspend_record(uint8_t ring_id, uint32_t value, uint32_t count) 1000 { 1001 uint32_t index = qdf_atomic_read(g_hal_suspend_write_history.index) & 1002 (HAL_SUSPEND_WRITE_HISTORY_MAX - 1); 1003 struct hal_suspend_write_record *cur_event = 1004 &hal_suspend_write_event.record[index]; 1005 1006 cur_event->ts = qdf_get_log_timestamp(); 1007 cur_event->ring_id = ring_id; 1008 cur_event->value = value; 1009 cur_event->direct_wcount = count; 1010 qdf_atomic_inc(g_hal_suspend_write_history.index); 1011 } 1012 1013 static inline 1014 void hal_record_suspend_write(uint8_t ring_id, uint32_t value, uint32_t count) 1015 { 1016 if (hif_rtpm_get_state() >= HIF_RTPM_STATE_SUSPENDING) 1017 hal_event_suspend_record(ring_id, value, count); 1018 } 1019 #else 1020 static inline 1021 void hal_record_suspend_write(uint8_t ring_id, uint32_t value, uint32_t count) 1022 { 1023 } 1024 #endif 1025 1026 #ifdef QCA_WIFI_QCA6750 1027 void hal_delayed_reg_write(struct hal_soc *hal_soc, 1028 struct hal_srng *srng, 1029 void __iomem *addr, 1030 uint32_t value) 1031 { 1032 uint8_t vote_access; 1033 1034 switch (srng->ring_type) { 1035 case CE_SRC: 1036 case CE_DST: 1037 case CE_DST_STATUS: 1038 vote_access = hif_get_ep_vote_access(hal_soc->hif_handle, 1039 HIF_EP_VOTE_NONDP_ACCESS); 1040 if ((vote_access == HIF_EP_VOTE_ACCESS_DISABLE) || 1041 (vote_access == HIF_EP_VOTE_INTERMEDIATE_ACCESS && 1042 PLD_MHI_STATE_L0 == 1043 pld_get_mhi_state(hal_soc->qdf_dev->dev))) { 1044 hal_write_address_32_mb(hal_soc, addr, value, false); 1045 qdf_atomic_inc(&hal_soc->stats.wstats.direct); 1046 srng->wstats.direct++; 1047 } else { 1048 hal_reg_write_enqueue(hal_soc, srng, addr, value); 1049 } 1050 break; 1051 default: 1052 if (hif_get_ep_vote_access(hal_soc->hif_handle, 1053 HIF_EP_VOTE_DP_ACCESS) == 1054 HIF_EP_VOTE_ACCESS_DISABLE || 1055 hal_is_reg_write_tput_level_high(hal_soc) || 1056 PLD_MHI_STATE_L0 == 1057 pld_get_mhi_state(hal_soc->qdf_dev->dev)) { 1058 hal_write_address_32_mb(hal_soc, addr, value, false); 1059 qdf_atomic_inc(&hal_soc->stats.wstats.direct); 1060 srng->wstats.direct++; 1061 } else { 1062 hal_reg_write_enqueue(hal_soc, srng, addr, value); 1063 } 1064 1065 break; 1066 } 1067 } 1068 #else 1069 void hal_delayed_reg_write(struct hal_soc *hal_soc, 1070 struct hal_srng *srng, 1071 void __iomem *addr, 1072 uint32_t value) 1073 { 1074 if (hal_is_reg_write_tput_level_high(hal_soc) || 1075 pld_is_device_awake(hal_soc->qdf_dev->dev)) { 1076 qdf_atomic_inc(&hal_soc->stats.wstats.direct); 1077 srng->wstats.direct++; 1078 hal_write_address_32_mb(hal_soc, addr, value, false); 1079 } else { 1080 hal_reg_write_enqueue(hal_soc, srng, addr, value); 1081 } 1082 1083 hal_record_suspend_write(srng->ring_id, value, srng->wstats.direct); 1084 } 1085 #endif 1086 #endif 1087 1088 /** 1089 * hal_attach - Initialize HAL layer 1090 * @hif_handle: Opaque HIF handle 1091 * @qdf_dev: QDF device 1092 * 1093 * Return: Opaque HAL SOC handle 1094 * NULL on failure (if given ring is not available) 1095 * 1096 * This function should be called as part of HIF initialization (for accessing 1097 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() 1098 * 1099 */ 1100 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev) 1101 { 1102 struct hal_soc *hal; 1103 int i; 1104 1105 hal = qdf_mem_malloc(sizeof(*hal)); 1106 1107 if (!hal) { 1108 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1109 "%s: hal_soc allocation failed", __func__); 1110 goto fail0; 1111 } 1112 hal->hif_handle = hif_handle; 1113 hal->dev_base_addr = hif_get_dev_ba(hif_handle); /* UMAC */ 1114 hal->dev_base_addr_ce = hif_get_dev_ba_ce(hif_handle); /* CE */ 1115 hal->dev_base_addr_cmem = hif_get_dev_ba_cmem(hif_handle); /* CMEM */ 1116 hal->dev_base_addr_pmm = hif_get_dev_ba_pmm(hif_handle); /* PMM */ 1117 hal->qdf_dev = qdf_dev; 1118 hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent( 1119 qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) * 1120 HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr)); 1121 if (!hal->shadow_rdptr_mem_paddr) { 1122 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1123 "%s: hal->shadow_rdptr_mem_paddr allocation failed", 1124 __func__); 1125 goto fail1; 1126 } 1127 qdf_mem_zero(hal->shadow_rdptr_mem_vaddr, 1128 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX); 1129 1130 hal->shadow_wrptr_mem_vaddr = 1131 (uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev, 1132 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 1133 &(hal->shadow_wrptr_mem_paddr)); 1134 if (!hal->shadow_wrptr_mem_vaddr) { 1135 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1136 "%s: hal->shadow_wrptr_mem_vaddr allocation failed", 1137 __func__); 1138 goto fail2; 1139 } 1140 qdf_mem_zero(hal->shadow_wrptr_mem_vaddr, 1141 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS); 1142 1143 for (i = 0; i < HAL_SRNG_ID_MAX; i++) { 1144 hal->srng_list[i].initialized = 0; 1145 hal->srng_list[i].ring_id = i; 1146 } 1147 1148 qdf_spinlock_create(&hal->register_access_lock); 1149 hal->register_window = 0; 1150 hal->target_type = hal_get_target_type(hal_soc_to_hal_soc_handle(hal)); 1151 hal->version = hif_get_soc_version(hif_handle); 1152 hal->ops = qdf_mem_malloc(sizeof(*hal->ops)); 1153 1154 if (!hal->ops) { 1155 hal_err("unable to allocable memory for HAL ops"); 1156 goto fail3; 1157 } 1158 1159 hal_target_based_configure(hal); 1160 1161 hal_reg_write_fail_history_init(hal); 1162 1163 qdf_minidump_log(hal, sizeof(*hal), "hal_soc"); 1164 1165 qdf_ssr_driver_dump_register_region("hal_soc", hal, sizeof(*hal)); 1166 1167 qdf_atomic_init(&hal->active_work_cnt); 1168 if (hal_delayed_reg_write_init(hal) != QDF_STATUS_SUCCESS) { 1169 hal_err("unable to initialize delayed reg write"); 1170 goto fail4; 1171 } 1172 1173 if (hal_reo_shared_qaddr_setup((hal_soc_handle_t)hal) 1174 != QDF_STATUS_SUCCESS) { 1175 hal_err("unable to setup reo shared qaddr"); 1176 goto fail5; 1177 } 1178 1179 hif_rtpm_register(HIF_RTPM_ID_HAL_REO_CMD, NULL); 1180 1181 return (void *)hal; 1182 fail5: 1183 hal_delayed_reg_write_deinit(hal); 1184 fail4: 1185 qdf_ssr_driver_dump_unregister_region("hal_soc"); 1186 qdf_minidump_remove(hal, sizeof(*hal), "hal_soc"); 1187 qdf_mem_free(hal->ops); 1188 fail3: 1189 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, 1190 sizeof(*hal->shadow_wrptr_mem_vaddr) * 1191 HAL_MAX_LMAC_RINGS, 1192 hal->shadow_wrptr_mem_vaddr, 1193 hal->shadow_wrptr_mem_paddr, 0); 1194 fail2: 1195 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, 1196 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 1197 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 1198 fail1: 1199 qdf_mem_free(hal); 1200 fail0: 1201 return NULL; 1202 } 1203 qdf_export_symbol(hal_attach); 1204 1205 /** 1206 * hal_mem_info - Retrieve hal memory base address 1207 * 1208 * @hal_soc: Opaque HAL SOC handle 1209 * @mem: pointer to structure to be updated with hal mem info 1210 */ 1211 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem) 1212 { 1213 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 1214 mem->dev_base_addr = (void *)hal->dev_base_addr; 1215 mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr; 1216 mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr; 1217 mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr; 1218 mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr; 1219 hif_read_phy_mem_base((void *)hal->hif_handle, 1220 (qdf_dma_addr_t *)&mem->dev_base_paddr); 1221 mem->lmac_srng_start_id = HAL_SRNG_LMAC1_ID_START; 1222 return; 1223 } 1224 qdf_export_symbol(hal_get_meminfo); 1225 1226 /** 1227 * hal_detach - Detach HAL layer 1228 * @hal_soc: HAL SOC handle 1229 * 1230 * Return: Opaque HAL SOC handle 1231 * NULL on failure (if given ring is not available) 1232 * 1233 * This function should be called as part of HIF initialization (for accessing 1234 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() 1235 * 1236 */ 1237 extern void hal_detach(void *hal_soc) 1238 { 1239 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1240 1241 hif_rtpm_deregister(HIF_RTPM_ID_HAL_REO_CMD); 1242 hal_delayed_reg_write_deinit(hal); 1243 hal_reo_shared_qaddr_detach((hal_soc_handle_t)hal); 1244 qdf_ssr_driver_dump_unregister_region("hal_soc"); 1245 qdf_minidump_remove(hal, sizeof(*hal), "hal_soc"); 1246 qdf_mem_free(hal->ops); 1247 1248 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 1249 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 1250 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 1251 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 1252 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 1253 hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0); 1254 qdf_mem_free(hal); 1255 1256 return; 1257 } 1258 qdf_export_symbol(hal_detach); 1259 1260 #define HAL_CE_CHANNEL_DST_DEST_CTRL_ADDR(x) ((x) + 0x000000b0) 1261 #define HAL_CE_CHANNEL_DST_DEST_CTRL_DEST_MAX_LENGTH_BMSK 0x0000ffff 1262 #define HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_ADDR(x) ((x) + 0x00000040) 1263 #define HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_RMSK 0x00000007 1264 /** 1265 * hal_ce_dst_setup - Initialize CE destination ring registers 1266 * @hal_soc: HAL SOC handle 1267 * @srng: SRNG ring pointer 1268 */ 1269 static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng, 1270 int ring_num) 1271 { 1272 uint32_t reg_val = 0; 1273 uint32_t reg_addr; 1274 struct hal_hw_srng_config *ring_config = 1275 HAL_SRNG_CONFIG(hal, CE_DST); 1276 1277 /* set DEST_MAX_LENGTH according to ce assignment */ 1278 reg_addr = HAL_CE_CHANNEL_DST_DEST_CTRL_ADDR( 1279 ring_config->reg_start[R0_INDEX] + 1280 (ring_num * ring_config->reg_size[R0_INDEX])); 1281 1282 reg_val = HAL_REG_READ(hal, reg_addr); 1283 reg_val &= ~HAL_CE_CHANNEL_DST_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 1284 reg_val |= srng->u.dst_ring.max_buffer_length & 1285 HAL_CE_CHANNEL_DST_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 1286 HAL_REG_WRITE(hal, reg_addr, reg_val); 1287 1288 if (srng->prefetch_timer) { 1289 reg_addr = HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_ADDR( 1290 ring_config->reg_start[R0_INDEX] + 1291 (ring_num * ring_config->reg_size[R0_INDEX])); 1292 1293 reg_val = HAL_REG_READ(hal, reg_addr); 1294 reg_val &= ~HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_RMSK; 1295 reg_val |= srng->prefetch_timer; 1296 HAL_REG_WRITE(hal, reg_addr, reg_val); 1297 reg_val = HAL_REG_READ(hal, reg_addr); 1298 } 1299 1300 } 1301 1302 /** 1303 * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX 1304 * @hal: HAL SOC handle 1305 * @read: boolean value to indicate if read or write 1306 * @ix0: pointer to store IX0 reg value 1307 * @ix1: pointer to store IX1 reg value 1308 * @ix2: pointer to store IX2 reg value 1309 * @ix3: pointer to store IX3 reg value 1310 */ 1311 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read, 1312 uint32_t *ix0, uint32_t *ix1, 1313 uint32_t *ix2, uint32_t *ix3) 1314 { 1315 uint32_t reg_offset; 1316 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 1317 uint32_t reo_reg_base; 1318 1319 reo_reg_base = hal_get_reo_reg_base_offset(hal_soc_hdl); 1320 1321 if (read) { 1322 if (ix0) { 1323 reg_offset = 1324 HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR( 1325 reo_reg_base); 1326 *ix0 = HAL_REG_READ(hal, reg_offset); 1327 } 1328 1329 if (ix1) { 1330 reg_offset = 1331 HAL_REO_DESTINATION_RING_CTRL_IX_1_ADDR( 1332 reo_reg_base); 1333 *ix1 = HAL_REG_READ(hal, reg_offset); 1334 } 1335 1336 if (ix2) { 1337 reg_offset = 1338 HAL_REO_DESTINATION_RING_CTRL_IX_2_ADDR( 1339 reo_reg_base); 1340 *ix2 = HAL_REG_READ(hal, reg_offset); 1341 } 1342 1343 if (ix3) { 1344 reg_offset = 1345 HAL_REO_DESTINATION_RING_CTRL_IX_3_ADDR( 1346 reo_reg_base); 1347 *ix3 = HAL_REG_READ(hal, reg_offset); 1348 } 1349 } else { 1350 if (ix0) { 1351 reg_offset = 1352 HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR( 1353 reo_reg_base); 1354 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, 1355 *ix0, true); 1356 } 1357 1358 if (ix1) { 1359 reg_offset = 1360 HAL_REO_DESTINATION_RING_CTRL_IX_1_ADDR( 1361 reo_reg_base); 1362 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, 1363 *ix1, true); 1364 } 1365 1366 if (ix2) { 1367 reg_offset = 1368 HAL_REO_DESTINATION_RING_CTRL_IX_2_ADDR( 1369 reo_reg_base); 1370 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, 1371 *ix2, true); 1372 } 1373 1374 if (ix3) { 1375 reg_offset = 1376 HAL_REO_DESTINATION_RING_CTRL_IX_3_ADDR( 1377 reo_reg_base); 1378 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, 1379 *ix3, true); 1380 } 1381 } 1382 } 1383 1384 qdf_export_symbol(hal_reo_read_write_ctrl_ix); 1385 1386 /** 1387 * hal_srng_dst_set_hp_paddr_confirm() - Set physical address to dest ring head 1388 * pointer and confirm that write went through by reading back the value 1389 * @srng: sring pointer 1390 * @paddr: physical address 1391 * 1392 * Return: None 1393 */ 1394 void hal_srng_dst_set_hp_paddr_confirm(struct hal_srng *srng, uint64_t paddr) 1395 { 1396 SRNG_DST_REG_WRITE_CONFIRM(srng, HP_ADDR_LSB, paddr & 0xffffffff); 1397 SRNG_DST_REG_WRITE_CONFIRM(srng, HP_ADDR_MSB, paddr >> 32); 1398 } 1399 1400 qdf_export_symbol(hal_srng_dst_set_hp_paddr_confirm); 1401 1402 /** 1403 * hal_srng_dst_init_hp() - Initialize destination ring head 1404 * pointer 1405 * @hal_soc: hal_soc handle 1406 * @srng: sring pointer 1407 * @vaddr: virtual address 1408 */ 1409 void hal_srng_dst_init_hp(struct hal_soc_handle *hal_soc, 1410 struct hal_srng *srng, 1411 uint32_t *vaddr) 1412 { 1413 uint32_t reg_offset; 1414 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1415 1416 if (!srng) 1417 return; 1418 1419 srng->u.dst_ring.hp_addr = vaddr; 1420 reg_offset = SRNG_DST_ADDR(srng, HP) - hal->dev_base_addr; 1421 HAL_REG_WRITE_CONFIRM_RETRY( 1422 hal, reg_offset, srng->u.dst_ring.cached_hp, true); 1423 1424 if (vaddr) { 1425 *srng->u.dst_ring.hp_addr = srng->u.dst_ring.cached_hp; 1426 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1427 "hp_addr=%pK, cached_hp=%d", 1428 (void *)srng->u.dst_ring.hp_addr, 1429 srng->u.dst_ring.cached_hp); 1430 } 1431 } 1432 1433 qdf_export_symbol(hal_srng_dst_init_hp); 1434 1435 /** 1436 * hal_srng_hw_init - Private function to initialize SRNG HW 1437 * @hal_soc: HAL SOC handle 1438 * @srng: SRNG ring pointer 1439 * @idle_check: Check if ring is idle 1440 * @idx: ring index 1441 */ 1442 static inline void hal_srng_hw_init(struct hal_soc *hal, 1443 struct hal_srng *srng, bool idle_check, uint32_t idx) 1444 { 1445 if (srng->ring_dir == HAL_SRNG_SRC_RING) 1446 hal_srng_src_hw_init(hal, srng, idle_check, idx); 1447 else 1448 hal_srng_dst_hw_init(hal, srng, idle_check, idx); 1449 } 1450 1451 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ 1452 /** 1453 * hal_srng_is_near_full_irq_supported() - Check if near full irq is 1454 * supported on this SRNG 1455 * @hal_soc: HAL SoC handle 1456 * @ring_type: SRNG type 1457 * @ring_num: ring number 1458 * 1459 * Return: true, if near full irq is supported for this SRNG 1460 * false, if near full irq is not supported for this SRNG 1461 */ 1462 bool hal_srng_is_near_full_irq_supported(hal_soc_handle_t hal_soc, 1463 int ring_type, int ring_num) 1464 { 1465 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1466 struct hal_hw_srng_config *ring_config = 1467 HAL_SRNG_CONFIG(hal, ring_type); 1468 1469 return ring_config->nf_irq_support; 1470 } 1471 1472 /** 1473 * hal_srng_set_msi2_params() - Set MSI2 params to SRNG data structure from 1474 * ring params 1475 * @srng: SRNG handle 1476 * @ring_params: ring params for this SRNG 1477 * 1478 * Return: None 1479 */ 1480 static inline void 1481 hal_srng_set_msi2_params(struct hal_srng *srng, 1482 struct hal_srng_params *ring_params) 1483 { 1484 srng->msi2_addr = ring_params->msi2_addr; 1485 srng->msi2_data = ring_params->msi2_data; 1486 } 1487 1488 /** 1489 * hal_srng_get_nf_params() - Get the near full MSI2 params from srng 1490 * @srng: SRNG handle 1491 * @ring_params: ring params for this SRNG 1492 * 1493 * Return: None 1494 */ 1495 static inline void 1496 hal_srng_get_nf_params(struct hal_srng *srng, 1497 struct hal_srng_params *ring_params) 1498 { 1499 ring_params->msi2_addr = srng->msi2_addr; 1500 ring_params->msi2_data = srng->msi2_data; 1501 } 1502 1503 /** 1504 * hal_srng_set_nf_thresholds() - Set the near full thresholds in SRNG 1505 * @srng: SRNG handle where the params are to be set 1506 * @ring_params: ring params, from where threshold is to be fetched 1507 * 1508 * Return: None 1509 */ 1510 static inline void 1511 hal_srng_set_nf_thresholds(struct hal_srng *srng, 1512 struct hal_srng_params *ring_params) 1513 { 1514 srng->u.dst_ring.nf_irq_support = ring_params->nf_irq_support; 1515 srng->u.dst_ring.high_thresh = ring_params->high_thresh; 1516 } 1517 #else 1518 static inline void 1519 hal_srng_set_msi2_params(struct hal_srng *srng, 1520 struct hal_srng_params *ring_params) 1521 { 1522 } 1523 1524 static inline void 1525 hal_srng_get_nf_params(struct hal_srng *srng, 1526 struct hal_srng_params *ring_params) 1527 { 1528 } 1529 1530 static inline void 1531 hal_srng_set_nf_thresholds(struct hal_srng *srng, 1532 struct hal_srng_params *ring_params) 1533 { 1534 } 1535 #endif 1536 1537 #if defined(CLEAR_SW2TCL_CONSUMED_DESC) 1538 /** 1539 * hal_srng_last_desc_cleared_init - Initialize SRNG last_desc_cleared ptr 1540 * 1541 * @srng: Source ring pointer 1542 * 1543 * Return: None 1544 */ 1545 static inline 1546 void hal_srng_last_desc_cleared_init(struct hal_srng *srng) 1547 { 1548 srng->last_desc_cleared = srng->ring_size - srng->entry_size; 1549 } 1550 1551 #else 1552 static inline 1553 void hal_srng_last_desc_cleared_init(struct hal_srng *srng) 1554 { 1555 } 1556 #endif /* CLEAR_SW2TCL_CONSUMED_DESC */ 1557 1558 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING 1559 static inline void hal_srng_update_high_wm_thresholds(struct hal_srng *srng) 1560 { 1561 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_90_to_100] = 1562 ((srng->num_entries * 90) / 100); 1563 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_80_to_90] = 1564 ((srng->num_entries * 80) / 100); 1565 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_70_to_80] = 1566 ((srng->num_entries * 70) / 100); 1567 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_60_to_70] = 1568 ((srng->num_entries * 60) / 100); 1569 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_50_to_60] = 1570 ((srng->num_entries * 50) / 100); 1571 /* Below 50% threshold is not needed */ 1572 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT] = 0; 1573 1574 hal_info("ring_id: %u, wm_thresh- <50:%u, 50-60:%u, 60-70:%u, 70-80:%u, 80-90:%u, 90-100:%u", 1575 srng->ring_id, 1576 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT], 1577 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_50_to_60], 1578 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_60_to_70], 1579 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_70_to_80], 1580 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_80_to_90], 1581 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_90_to_100]); 1582 } 1583 #else 1584 static inline void hal_srng_update_high_wm_thresholds(struct hal_srng *srng) 1585 { 1586 } 1587 #endif 1588 1589 /** 1590 * hal_srng_setup_idx - Initialize HW SRNG ring. 1591 * @hal_soc: Opaque HAL SOC handle 1592 * @ring_type: one of the types from hal_ring_type 1593 * @ring_num: Ring number if there are multiple rings of same type (staring 1594 * from 0) 1595 * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings 1596 * @ring_params: SRNG ring params in hal_srng_params structure. 1597 * @idle_check: Check if ring is idle 1598 * @idx: Ring index to be programmed as init value in HP/TP based on srng type 1599 * 1600 * Callers are expected to allocate contiguous ring memory of size 1601 * 'num_entries * entry_size' bytes and pass the physical and virtual base 1602 * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in 1603 * hal_srng_params structure. Ring base address should be 8 byte aligned 1604 * and size of each ring entry should be queried using the API 1605 * hal_srng_get_entrysize 1606 * 1607 * Return: Opaque pointer to ring on success 1608 * NULL on failure (if given ring is not available) 1609 */ 1610 void *hal_srng_setup_idx(void *hal_soc, int ring_type, int ring_num, int mac_id, 1611 struct hal_srng_params *ring_params, bool idle_check, 1612 uint32_t idx) 1613 { 1614 int ring_id; 1615 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1616 hal_soc_handle_t hal_hdl = (hal_soc_handle_t)hal; 1617 struct hal_srng *srng; 1618 struct hal_hw_srng_config *ring_config = 1619 HAL_SRNG_CONFIG(hal, ring_type); 1620 void *dev_base_addr; 1621 int i; 1622 1623 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id); 1624 if (ring_id < 0) 1625 return NULL; 1626 1627 hal_verbose_debug("mac_id %d ring_id %d", mac_id, ring_id); 1628 1629 srng = hal_get_srng(hal_soc, ring_id); 1630 1631 if (srng->initialized) { 1632 hal_verbose_debug("Ring (ring_type, ring_num) already initialized"); 1633 return NULL; 1634 } 1635 1636 dev_base_addr = hal->dev_base_addr; 1637 srng->ring_id = ring_id; 1638 srng->ring_type = ring_type; 1639 srng->ring_dir = ring_config->ring_dir; 1640 srng->ring_base_paddr = ring_params->ring_base_paddr; 1641 srng->ring_base_vaddr = ring_params->ring_base_vaddr; 1642 srng->entry_size = ring_config->entry_size; 1643 srng->num_entries = ring_params->num_entries; 1644 srng->ring_size = srng->num_entries * srng->entry_size; 1645 srng->ring_size_mask = srng->ring_size - 1; 1646 srng->ring_vaddr_end = srng->ring_base_vaddr + srng->ring_size; 1647 srng->msi_addr = ring_params->msi_addr; 1648 srng->msi_data = ring_params->msi_data; 1649 srng->intr_timer_thres_us = ring_params->intr_timer_thres_us; 1650 srng->intr_batch_cntr_thres_entries = 1651 ring_params->intr_batch_cntr_thres_entries; 1652 if (!idle_check) 1653 srng->prefetch_timer = ring_params->prefetch_timer; 1654 srng->hal_soc = hal_soc; 1655 hal_srng_set_msi2_params(srng, ring_params); 1656 hal_srng_update_high_wm_thresholds(srng); 1657 1658 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) { 1659 srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i] 1660 + (ring_num * ring_config->reg_size[i]); 1661 } 1662 1663 /* Zero out the entire ring memory */ 1664 qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size * 1665 srng->num_entries) << 2); 1666 1667 srng->flags = ring_params->flags; 1668 1669 /* For cached descriptors flush and invalidate the memory*/ 1670 if (srng->flags & HAL_SRNG_CACHED_DESC) { 1671 qdf_nbuf_dma_clean_range( 1672 srng->ring_base_vaddr, 1673 srng->ring_base_vaddr + 1674 ((srng->entry_size * srng->num_entries))); 1675 qdf_nbuf_dma_inv_range( 1676 srng->ring_base_vaddr, 1677 srng->ring_base_vaddr + 1678 ((srng->entry_size * srng->num_entries))); 1679 } 1680 #ifdef BIG_ENDIAN_HOST 1681 /* TODO: See if we should we get these flags from caller */ 1682 srng->flags |= HAL_SRNG_DATA_TLV_SWAP; 1683 srng->flags |= HAL_SRNG_MSI_SWAP; 1684 srng->flags |= HAL_SRNG_RING_PTR_SWAP; 1685 #endif 1686 1687 hal_srng_last_desc_cleared_init(srng); 1688 1689 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 1690 srng->u.src_ring.hp = 0; 1691 srng->u.src_ring.reap_hp = srng->ring_size - 1692 srng->entry_size; 1693 srng->u.src_ring.tp_addr = 1694 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 1695 srng->u.src_ring.low_threshold = 1696 ring_params->low_threshold * srng->entry_size; 1697 1698 if (srng->u.src_ring.tp_addr) 1699 qdf_mem_zero(srng->u.src_ring.tp_addr, 1700 sizeof(*hal->shadow_rdptr_mem_vaddr)); 1701 1702 if (ring_config->lmac_ring) { 1703 /* For LMAC rings, head pointer updates will be done 1704 * through FW by writing to a shared memory location 1705 */ 1706 srng->u.src_ring.hp_addr = 1707 &(hal->shadow_wrptr_mem_vaddr[ring_id - 1708 HAL_SRNG_LMAC1_ID_START]); 1709 srng->flags |= HAL_SRNG_LMAC_RING; 1710 1711 if (srng->u.src_ring.hp_addr) 1712 qdf_mem_zero(srng->u.src_ring.hp_addr, 1713 sizeof(*hal->shadow_wrptr_mem_vaddr)); 1714 1715 } else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) { 1716 srng->u.src_ring.hp_addr = 1717 hal_get_window_address(hal, 1718 SRNG_SRC_ADDR(srng, HP)); 1719 1720 if (CHECK_SHADOW_REGISTERS) { 1721 QDF_TRACE(QDF_MODULE_ID_TXRX, 1722 QDF_TRACE_LEVEL_ERROR, 1723 "%s: Ring (%d, %d) missing shadow config", 1724 __func__, ring_type, ring_num); 1725 } 1726 } else { 1727 hal_validate_shadow_register(hal, 1728 SRNG_SRC_ADDR(srng, HP), 1729 srng->u.src_ring.hp_addr); 1730 } 1731 } else { 1732 /* During initialization loop count in all the descriptors 1733 * will be set to zero, and HW will set it to 1 on completing 1734 * descriptor update in first loop, and increments it by 1 on 1735 * subsequent loops (loop count wraps around after reaching 1736 * 0xffff). The 'loop_cnt' in SW ring state is the expected 1737 * loop count in descriptors updated by HW (to be processed 1738 * by SW). 1739 */ 1740 hal_srng_set_nf_thresholds(srng, ring_params); 1741 srng->u.dst_ring.loop_cnt = 1; 1742 srng->u.dst_ring.tp = 0; 1743 srng->u.dst_ring.hp_addr = 1744 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 1745 1746 if (srng->u.dst_ring.hp_addr) 1747 qdf_mem_zero(srng->u.dst_ring.hp_addr, 1748 sizeof(*hal->shadow_rdptr_mem_vaddr)); 1749 1750 if (ring_config->lmac_ring) { 1751 /* For LMAC rings, tail pointer updates will be done 1752 * through FW by writing to a shared memory location 1753 */ 1754 srng->u.dst_ring.tp_addr = 1755 &(hal->shadow_wrptr_mem_vaddr[ring_id - 1756 HAL_SRNG_LMAC1_ID_START]); 1757 srng->flags |= HAL_SRNG_LMAC_RING; 1758 1759 if (srng->u.dst_ring.tp_addr) 1760 qdf_mem_zero(srng->u.dst_ring.tp_addr, 1761 sizeof(*hal->shadow_wrptr_mem_vaddr)); 1762 1763 } else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) { 1764 srng->u.dst_ring.tp_addr = 1765 hal_get_window_address(hal, 1766 SRNG_DST_ADDR(srng, TP)); 1767 1768 if (CHECK_SHADOW_REGISTERS) { 1769 QDF_TRACE(QDF_MODULE_ID_TXRX, 1770 QDF_TRACE_LEVEL_ERROR, 1771 "%s: Ring (%d, %d) missing shadow config", 1772 __func__, ring_type, ring_num); 1773 } 1774 } else { 1775 hal_validate_shadow_register(hal, 1776 SRNG_DST_ADDR(srng, TP), 1777 srng->u.dst_ring.tp_addr); 1778 } 1779 } 1780 1781 if (!(ring_config->lmac_ring)) { 1782 if (idx) { 1783 hal->ops->hal_tx_ring_halt_set(hal_hdl); 1784 do { 1785 hal_info("Waiting for ring reset\n"); 1786 } while (!(hal->ops->hal_tx_ring_halt_poll(hal_hdl))); 1787 } 1788 hal_srng_hw_init(hal, srng, idle_check, idx); 1789 1790 if (idx) { 1791 hal->ops->hal_tx_ring_halt_reset(hal_hdl); 1792 } 1793 1794 1795 if (ring_type == CE_DST) { 1796 srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length; 1797 hal_ce_dst_setup(hal, srng, ring_num); 1798 } 1799 } 1800 1801 SRNG_LOCK_INIT(&srng->lock); 1802 1803 srng->srng_event = 0; 1804 1805 srng->initialized = true; 1806 1807 return (void *)srng; 1808 } 1809 qdf_export_symbol(hal_srng_setup_idx); 1810 1811 /** 1812 * hal_srng_setup - Initialize HW SRNG ring. 1813 * @hal_soc: Opaque HAL SOC handle 1814 * @ring_type: one of the types from hal_ring_type 1815 * @ring_num: Ring number if there are multiple rings of same type (staring 1816 * from 0) 1817 * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings 1818 * @ring_params: SRNG ring params in hal_srng_params structure. 1819 * @idle_check: Check if ring is idle 1820 * 1821 * Callers are expected to allocate contiguous ring memory of size 1822 * 'num_entries * entry_size' bytes and pass the physical and virtual base 1823 * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in 1824 * hal_srng_params structure. Ring base address should be 8 byte aligned 1825 * and size of each ring entry should be queried using the API 1826 * hal_srng_get_entrysize 1827 * 1828 * Return: Opaque pointer to ring on success 1829 * NULL on failure (if given ring is not available) 1830 */ 1831 void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num, 1832 int mac_id, struct hal_srng_params *ring_params, 1833 bool idle_check) 1834 { 1835 return hal_srng_setup_idx(hal_soc, ring_type, ring_num, mac_id, 1836 ring_params, idle_check, 0); 1837 } 1838 qdf_export_symbol(hal_srng_setup); 1839 1840 /** 1841 * hal_srng_cleanup - Deinitialize HW SRNG ring. 1842 * @hal_soc: Opaque HAL SOC handle 1843 * @hal_srng: Opaque HAL SRNG pointer 1844 */ 1845 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl) 1846 { 1847 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; 1848 SRNG_LOCK_DESTROY(&srng->lock); 1849 srng->initialized = 0; 1850 hal_srng_hw_disable(hal_soc, srng); 1851 } 1852 qdf_export_symbol(hal_srng_cleanup); 1853 1854 /** 1855 * hal_srng_get_entrysize - Returns size of ring entry in bytes 1856 * @hal_soc: Opaque HAL SOC handle 1857 * @ring_type: one of the types from hal_ring_type 1858 * 1859 */ 1860 uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type) 1861 { 1862 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1863 struct hal_hw_srng_config *ring_config = 1864 HAL_SRNG_CONFIG(hal, ring_type); 1865 return ring_config->entry_size << 2; 1866 } 1867 qdf_export_symbol(hal_srng_get_entrysize); 1868 1869 /** 1870 * hal_srng_max_entries - Returns maximum possible number of ring entries 1871 * @hal_soc: Opaque HAL SOC handle 1872 * @ring_type: one of the types from hal_ring_type 1873 * 1874 * Return: Maximum number of entries for the given ring_type 1875 */ 1876 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type) 1877 { 1878 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1879 struct hal_hw_srng_config *ring_config = 1880 HAL_SRNG_CONFIG(hal, ring_type); 1881 1882 return ring_config->max_size / ring_config->entry_size; 1883 } 1884 qdf_export_symbol(hal_srng_max_entries); 1885 1886 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type) 1887 { 1888 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1889 struct hal_hw_srng_config *ring_config = 1890 HAL_SRNG_CONFIG(hal, ring_type); 1891 1892 return ring_config->ring_dir; 1893 } 1894 1895 /** 1896 * hal_srng_dump - Dump ring status 1897 * @srng: hal srng pointer 1898 */ 1899 void hal_srng_dump(struct hal_srng *srng) 1900 { 1901 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 1902 hal_debug("=== SRC RING %d ===", srng->ring_id); 1903 hal_debug("hp %u, reap_hp %u, tp %u, cached tp %u", 1904 srng->u.src_ring.hp, 1905 srng->u.src_ring.reap_hp, 1906 *srng->u.src_ring.tp_addr, 1907 srng->u.src_ring.cached_tp); 1908 } else { 1909 hal_debug("=== DST RING %d ===", srng->ring_id); 1910 hal_debug("tp %u, hp %u, cached tp %u, loop_cnt %u", 1911 srng->u.dst_ring.tp, 1912 *srng->u.dst_ring.hp_addr, 1913 srng->u.dst_ring.cached_hp, 1914 srng->u.dst_ring.loop_cnt); 1915 } 1916 } 1917 1918 /** 1919 * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL 1920 * 1921 * @hal_soc: Opaque HAL SOC handle 1922 * @hal_ring: Ring pointer (Source or Destination ring) 1923 * @ring_params: SRNG parameters will be returned through this structure 1924 */ 1925 extern void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl, 1926 hal_ring_handle_t hal_ring_hdl, 1927 struct hal_srng_params *ring_params) 1928 { 1929 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; 1930 int i =0; 1931 ring_params->ring_id = srng->ring_id; 1932 ring_params->ring_dir = srng->ring_dir; 1933 ring_params->entry_size = srng->entry_size; 1934 1935 ring_params->ring_base_paddr = srng->ring_base_paddr; 1936 ring_params->ring_base_vaddr = srng->ring_base_vaddr; 1937 ring_params->num_entries = srng->num_entries; 1938 ring_params->msi_addr = srng->msi_addr; 1939 ring_params->msi_data = srng->msi_data; 1940 ring_params->intr_timer_thres_us = srng->intr_timer_thres_us; 1941 ring_params->intr_batch_cntr_thres_entries = 1942 srng->intr_batch_cntr_thres_entries; 1943 ring_params->low_threshold = srng->u.src_ring.low_threshold; 1944 ring_params->flags = srng->flags; 1945 ring_params->ring_id = srng->ring_id; 1946 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) 1947 ring_params->hwreg_base[i] = srng->hwreg_base[i]; 1948 1949 hal_srng_get_nf_params(srng, ring_params); 1950 } 1951 qdf_export_symbol(hal_get_srng_params); 1952 1953 void hal_set_low_threshold(hal_ring_handle_t hal_ring_hdl, 1954 uint32_t low_threshold) 1955 { 1956 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; 1957 srng->u.src_ring.low_threshold = low_threshold * srng->entry_size; 1958 } 1959 qdf_export_symbol(hal_set_low_threshold); 1960 1961 #ifdef FEATURE_RUNTIME_PM 1962 void 1963 hal_srng_rtpm_access_end(hal_soc_handle_t hal_soc_hdl, 1964 hal_ring_handle_t hal_ring_hdl, 1965 uint32_t rtpm_id) 1966 { 1967 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; 1968 1969 if (qdf_unlikely(!hal_ring_hdl)) { 1970 qdf_print("Error: Invalid hal_ring\n"); 1971 return; 1972 } 1973 1974 if (hif_rtpm_get(HIF_RTPM_GET_ASYNC, rtpm_id) == 0) { 1975 if (hif_system_pm_state_check(hal_soc->hif_handle)) { 1976 hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl); 1977 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); 1978 hal_srng_inc_flush_cnt(hal_ring_hdl); 1979 } else { 1980 hal_srng_access_end(hal_soc_hdl, hal_ring_hdl); 1981 } 1982 1983 hif_rtpm_put(HIF_RTPM_PUT_ASYNC, rtpm_id); 1984 } else { 1985 hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl); 1986 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); 1987 hal_srng_inc_flush_cnt(hal_ring_hdl); 1988 } 1989 } 1990 1991 qdf_export_symbol(hal_srng_rtpm_access_end); 1992 #endif /* FEATURE_RUNTIME_PM */ 1993 1994 #ifdef FORCE_WAKE 1995 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase) 1996 { 1997 struct hal_soc *hal_soc = (struct hal_soc *)soc; 1998 hal_soc->init_phase = init_phase; 1999 } 2000 #endif /* FORCE_WAKE */ 2001