1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hal_hw_headers.h" 21 #include "hal_api.h" 22 #include "hal_reo.h" 23 #include "target_type.h" 24 #include "qdf_module.h" 25 #include "wcss_version.h" 26 #include <qdf_tracepoint.h> 27 28 struct tcl_data_cmd gtcl_data_symbol __attribute__((used)); 29 30 #ifdef QCA_WIFI_QCA8074 31 void hal_qca6290_attach(struct hal_soc *hal); 32 #endif 33 #ifdef QCA_WIFI_QCA8074 34 void hal_qca8074_attach(struct hal_soc *hal); 35 #endif 36 #if defined(QCA_WIFI_QCA8074V2) || defined(QCA_WIFI_QCA6018) || \ 37 defined(QCA_WIFI_QCA9574) 38 void hal_qca8074v2_attach(struct hal_soc *hal); 39 #endif 40 #ifdef QCA_WIFI_QCA6390 41 void hal_qca6390_attach(struct hal_soc *hal); 42 #endif 43 #ifdef QCA_WIFI_QCA6490 44 void hal_qca6490_attach(struct hal_soc *hal); 45 #endif 46 #ifdef QCA_WIFI_QCN9000 47 void hal_qcn9000_attach(struct hal_soc *hal); 48 #endif 49 #ifdef QCA_WIFI_QCN9224 50 void hal_qcn9224v1_attach(struct hal_soc *hal); 51 void hal_qcn9224v2_attach(struct hal_soc *hal); 52 #endif 53 #ifdef QCA_WIFI_QCN6122 54 void hal_qcn6122_attach(struct hal_soc *hal); 55 #endif 56 #ifdef QCA_WIFI_QCA6750 57 void hal_qca6750_attach(struct hal_soc *hal); 58 #endif 59 #ifdef QCA_WIFI_QCA5018 60 void hal_qca5018_attach(struct hal_soc *hal); 61 #endif 62 #ifdef QCA_WIFI_QCA5332 63 void hal_qca5332_attach(struct hal_soc *hal); 64 #endif 65 #ifdef QCA_WIFI_KIWI 66 void hal_kiwi_attach(struct hal_soc *hal); 67 #endif 68 69 #ifdef ENABLE_VERBOSE_DEBUG 70 bool is_hal_verbose_debug_enabled; 71 #endif 72 73 #define HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR(x) ((x) + 0x4) 74 #define HAL_REO_DESTINATION_RING_CTRL_IX_1_ADDR(x) ((x) + 0x8) 75 #define HAL_REO_DESTINATION_RING_CTRL_IX_2_ADDR(x) ((x) + 0xc) 76 #define HAL_REO_DESTINATION_RING_CTRL_IX_3_ADDR(x) ((x) + 0x10) 77 78 #ifdef ENABLE_HAL_REG_WR_HISTORY 79 struct hal_reg_write_fail_history hal_reg_wr_hist; 80 81 void hal_reg_wr_fail_history_add(struct hal_soc *hal_soc, 82 uint32_t offset, 83 uint32_t wr_val, uint32_t rd_val) 84 { 85 struct hal_reg_write_fail_entry *record; 86 int idx; 87 88 idx = hal_history_get_next_index(&hal_soc->reg_wr_fail_hist->index, 89 HAL_REG_WRITE_HIST_SIZE); 90 91 record = &hal_soc->reg_wr_fail_hist->record[idx]; 92 93 record->timestamp = qdf_get_log_timestamp(); 94 record->reg_offset = offset; 95 record->write_val = wr_val; 96 record->read_val = rd_val; 97 } 98 99 static void hal_reg_write_fail_history_init(struct hal_soc *hal) 100 { 101 hal->reg_wr_fail_hist = &hal_reg_wr_hist; 102 103 qdf_atomic_set(&hal->reg_wr_fail_hist->index, -1); 104 } 105 #else 106 static void hal_reg_write_fail_history_init(struct hal_soc *hal) 107 { 108 } 109 #endif 110 111 /** 112 * hal_get_srng_ring_id() - get the ring id of a descriped ring 113 * @hal: hal_soc data structure 114 * @ring_type: type enum describing the ring 115 * @ring_num: which ring of the ring type 116 * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings) 117 * 118 * Return: the ring id or -EINVAL if the ring does not exist. 119 */ 120 static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type, 121 int ring_num, int mac_id) 122 { 123 struct hal_hw_srng_config *ring_config = 124 HAL_SRNG_CONFIG(hal, ring_type); 125 int ring_id; 126 127 if (ring_num >= ring_config->max_rings) { 128 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 129 "%s: ring_num exceeded maximum no. of supported rings", 130 __func__); 131 /* TODO: This is a programming error. Assert if this happens */ 132 return -EINVAL; 133 } 134 135 /* 136 * For BE, dmac_cmn_src_rxbuf_ring is set. If this is set 137 * and ring is dst and also lmac ring then provide ring id per lmac 138 */ 139 if (ring_config->lmac_ring && 140 (!hal->dmac_cmn_src_rxbuf_ring || 141 ring_config->ring_dir == HAL_SRNG_DST_RING)) { 142 ring_id = (ring_config->start_ring_id + ring_num + 143 (mac_id * HAL_MAX_RINGS_PER_LMAC)); 144 } else { 145 ring_id = ring_config->start_ring_id + ring_num; 146 } 147 148 return ring_id; 149 } 150 151 static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id) 152 { 153 /* TODO: Should we allocate srng structures dynamically? */ 154 return &(hal->srng_list[ring_id]); 155 } 156 157 #ifndef SHADOW_REG_CONFIG_DISABLED 158 #define HP_OFFSET_IN_REG_START 1 159 #define OFFSET_FROM_HP_TO_TP 4 160 static void hal_update_srng_hp_tp_address(struct hal_soc *hal_soc, 161 int shadow_config_index, 162 int ring_type, 163 int ring_num) 164 { 165 struct hal_srng *srng; 166 int ring_id; 167 struct hal_hw_srng_config *ring_config = 168 HAL_SRNG_CONFIG(hal_soc, ring_type); 169 170 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0); 171 if (ring_id < 0) 172 return; 173 174 srng = hal_get_srng(hal_soc, ring_id); 175 176 if (ring_config->ring_dir == HAL_SRNG_DST_RING) { 177 srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index) 178 + hal_soc->dev_base_addr; 179 hal_debug("tp_addr=%pK dev base addr %pK index %u", 180 srng->u.dst_ring.tp_addr, hal_soc->dev_base_addr, 181 shadow_config_index); 182 } else { 183 srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index) 184 + hal_soc->dev_base_addr; 185 hal_debug("hp_addr=%pK dev base addr %pK index %u", 186 srng->u.src_ring.hp_addr, 187 hal_soc->dev_base_addr, shadow_config_index); 188 } 189 190 } 191 #endif 192 193 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE 194 void hal_set_one_target_reg_config(struct hal_soc *hal, 195 uint32_t target_reg_offset, 196 int list_index) 197 { 198 int i = list_index; 199 200 qdf_assert_always(i < MAX_GENERIC_SHADOW_REG); 201 hal->list_shadow_reg_config[i].target_register = 202 target_reg_offset; 203 hal->num_generic_shadow_regs_configured++; 204 } 205 206 qdf_export_symbol(hal_set_one_target_reg_config); 207 208 #define REO_R0_DESTINATION_RING_CTRL_ADDR_OFFSET 0x4 209 #define MAX_REO_REMAP_SHADOW_REGS 4 210 QDF_STATUS hal_set_shadow_regs(void *hal_soc) 211 { 212 uint32_t target_reg_offset; 213 struct hal_soc *hal = (struct hal_soc *)hal_soc; 214 int i; 215 struct hal_hw_srng_config *srng_config = 216 &hal->hw_srng_table[WBM2SW_RELEASE]; 217 uint32_t reo_reg_base; 218 219 reo_reg_base = hal_get_reo_reg_base_offset(hal_soc); 220 221 target_reg_offset = 222 HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR(reo_reg_base); 223 224 for (i = 0; i < MAX_REO_REMAP_SHADOW_REGS; i++) { 225 hal_set_one_target_reg_config(hal, target_reg_offset, i); 226 target_reg_offset += REO_R0_DESTINATION_RING_CTRL_ADDR_OFFSET; 227 } 228 229 target_reg_offset = srng_config->reg_start[HP_OFFSET_IN_REG_START]; 230 target_reg_offset += (srng_config->reg_size[HP_OFFSET_IN_REG_START] 231 * HAL_IPA_TX_COMP_RING_IDX); 232 233 hal_set_one_target_reg_config(hal, target_reg_offset, i); 234 return QDF_STATUS_SUCCESS; 235 } 236 237 qdf_export_symbol(hal_set_shadow_regs); 238 239 QDF_STATUS hal_construct_shadow_regs(void *hal_soc) 240 { 241 struct hal_soc *hal = (struct hal_soc *)hal_soc; 242 int shadow_config_index = hal->num_shadow_registers_configured; 243 int i; 244 int num_regs = hal->num_generic_shadow_regs_configured; 245 246 for (i = 0; i < num_regs; i++) { 247 qdf_assert_always(shadow_config_index < MAX_SHADOW_REGISTERS); 248 hal->shadow_config[shadow_config_index].addr = 249 hal->list_shadow_reg_config[i].target_register; 250 hal->list_shadow_reg_config[i].shadow_config_index = 251 shadow_config_index; 252 hal->list_shadow_reg_config[i].va = 253 SHADOW_REGISTER(shadow_config_index) + 254 (uintptr_t)hal->dev_base_addr; 255 hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x", 256 hal->shadow_config[shadow_config_index].addr, 257 SHADOW_REGISTER(shadow_config_index), 258 shadow_config_index); 259 shadow_config_index++; 260 hal->num_shadow_registers_configured++; 261 } 262 return QDF_STATUS_SUCCESS; 263 } 264 265 qdf_export_symbol(hal_construct_shadow_regs); 266 #endif 267 268 #ifndef SHADOW_REG_CONFIG_DISABLED 269 270 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, 271 int ring_type, 272 int ring_num) 273 { 274 uint32_t target_register; 275 struct hal_soc *hal = (struct hal_soc *)hal_soc; 276 struct hal_hw_srng_config *srng_config = &hal->hw_srng_table[ring_type]; 277 int shadow_config_index = hal->num_shadow_registers_configured; 278 279 if (shadow_config_index >= MAX_SHADOW_REGISTERS) { 280 QDF_ASSERT(0); 281 return QDF_STATUS_E_RESOURCES; 282 } 283 284 hal->num_shadow_registers_configured++; 285 286 target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START]; 287 target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START] 288 *ring_num); 289 290 /* if the ring is a dst ring, we need to shadow the tail pointer */ 291 if (srng_config->ring_dir == HAL_SRNG_DST_RING) 292 target_register += OFFSET_FROM_HP_TO_TP; 293 294 hal->shadow_config[shadow_config_index].addr = target_register; 295 296 /* update hp/tp addr in the hal_soc structure*/ 297 hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type, 298 ring_num); 299 300 hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x, ring_type %d, ring num %d", 301 target_register, 302 SHADOW_REGISTER(shadow_config_index), 303 shadow_config_index, 304 ring_type, ring_num); 305 306 return QDF_STATUS_SUCCESS; 307 } 308 309 qdf_export_symbol(hal_set_one_shadow_config); 310 311 QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc) 312 { 313 int ring_type, ring_num; 314 struct hal_soc *hal = (struct hal_soc *)hal_soc; 315 316 for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) { 317 struct hal_hw_srng_config *srng_config = 318 &hal->hw_srng_table[ring_type]; 319 320 if (ring_type == CE_SRC || 321 ring_type == CE_DST || 322 ring_type == CE_DST_STATUS) 323 continue; 324 325 if (srng_config->lmac_ring) 326 continue; 327 328 for (ring_num = 0; ring_num < srng_config->max_rings; 329 ring_num++) 330 hal_set_one_shadow_config(hal_soc, ring_type, ring_num); 331 } 332 333 return QDF_STATUS_SUCCESS; 334 } 335 336 qdf_export_symbol(hal_construct_srng_shadow_regs); 337 #else 338 339 QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc) 340 { 341 return QDF_STATUS_SUCCESS; 342 } 343 344 qdf_export_symbol(hal_construct_srng_shadow_regs); 345 346 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type, 347 int ring_num) 348 { 349 return QDF_STATUS_SUCCESS; 350 } 351 qdf_export_symbol(hal_set_one_shadow_config); 352 #endif 353 354 void hal_get_shadow_config(void *hal_soc, 355 struct pld_shadow_reg_v2_cfg **shadow_config, 356 int *num_shadow_registers_configured) 357 { 358 struct hal_soc *hal = (struct hal_soc *)hal_soc; 359 360 *shadow_config = &hal->shadow_config[0].v2; 361 *num_shadow_registers_configured = 362 hal->num_shadow_registers_configured; 363 } 364 qdf_export_symbol(hal_get_shadow_config); 365 366 #ifdef CONFIG_SHADOW_V3 367 void hal_get_shadow_v3_config(void *hal_soc, 368 struct pld_shadow_reg_v3_cfg **shadow_config, 369 int *num_shadow_registers_configured) 370 { 371 struct hal_soc *hal = (struct hal_soc *)hal_soc; 372 373 *shadow_config = &hal->shadow_config[0].v3; 374 *num_shadow_registers_configured = 375 hal->num_shadow_registers_configured; 376 } 377 qdf_export_symbol(hal_get_shadow_v3_config); 378 #endif 379 380 static bool hal_validate_shadow_register(struct hal_soc *hal, 381 uint32_t *destination, 382 uint32_t *shadow_address) 383 { 384 unsigned int index; 385 uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr; 386 int destination_ba_offset = 387 ((char *)destination) - (char *)hal->dev_base_addr; 388 389 index = shadow_address - shadow_0_offset; 390 391 if (index >= MAX_SHADOW_REGISTERS) { 392 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 393 "%s: index %x out of bounds", __func__, index); 394 goto error; 395 } else if (hal->shadow_config[index].addr != destination_ba_offset) { 396 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 397 "%s: sanity check failure, expected %x, found %x", 398 __func__, destination_ba_offset, 399 hal->shadow_config[index].addr); 400 goto error; 401 } 402 return true; 403 error: 404 qdf_print("baddr %pK, desination %pK, shadow_address %pK s0offset %pK index %x", 405 hal->dev_base_addr, destination, shadow_address, 406 shadow_0_offset, index); 407 QDF_BUG(0); 408 return false; 409 } 410 411 static void hal_target_based_configure(struct hal_soc *hal) 412 { 413 /** 414 * Indicate Initialization of srngs to avoid force wake 415 * as umac power collapse is not enabled yet 416 */ 417 hal->init_phase = true; 418 419 switch (hal->target_type) { 420 #ifdef QCA_WIFI_QCA6290 421 case TARGET_TYPE_QCA6290: 422 hal->use_register_windowing = true; 423 hal_qca6290_attach(hal); 424 break; 425 #endif 426 #ifdef QCA_WIFI_QCA6390 427 case TARGET_TYPE_QCA6390: 428 hal->use_register_windowing = true; 429 hal_qca6390_attach(hal); 430 break; 431 #endif 432 #ifdef QCA_WIFI_QCA6490 433 case TARGET_TYPE_QCA6490: 434 hal->use_register_windowing = true; 435 hal_qca6490_attach(hal); 436 break; 437 #endif 438 #ifdef QCA_WIFI_QCA6750 439 case TARGET_TYPE_QCA6750: 440 hal->use_register_windowing = true; 441 hal->static_window_map = true; 442 hal_qca6750_attach(hal); 443 break; 444 #endif 445 #ifdef QCA_WIFI_KIWI 446 case TARGET_TYPE_KIWI: 447 case TARGET_TYPE_MANGO: 448 hal->use_register_windowing = true; 449 hal_kiwi_attach(hal); 450 break; 451 #endif 452 #if defined(QCA_WIFI_QCA8074) && defined(WIFI_TARGET_TYPE_3_0) 453 case TARGET_TYPE_QCA8074: 454 hal_qca8074_attach(hal); 455 break; 456 #endif 457 458 #if defined(QCA_WIFI_QCA8074V2) 459 case TARGET_TYPE_QCA8074V2: 460 hal_qca8074v2_attach(hal); 461 break; 462 #endif 463 464 #if defined(QCA_WIFI_QCA6018) 465 case TARGET_TYPE_QCA6018: 466 hal_qca8074v2_attach(hal); 467 break; 468 #endif 469 470 #if defined(QCA_WIFI_QCA9574) 471 case TARGET_TYPE_QCA9574: 472 hal_qca8074v2_attach(hal); 473 break; 474 #endif 475 476 #if defined(QCA_WIFI_QCN6122) 477 case TARGET_TYPE_QCN6122: 478 hal->use_register_windowing = true; 479 /* 480 * Static window map is enabled for qcn9000 to use 2mb bar 481 * size and use multiple windows to write into registers. 482 */ 483 hal->static_window_map = true; 484 hal_qcn6122_attach(hal); 485 break; 486 #endif 487 488 #ifdef QCA_WIFI_QCN9000 489 case TARGET_TYPE_QCN9000: 490 hal->use_register_windowing = true; 491 /* 492 * Static window map is enabled for qcn9000 to use 2mb bar 493 * size and use multiple windows to write into registers. 494 */ 495 hal->static_window_map = true; 496 hal_qcn9000_attach(hal); 497 break; 498 #endif 499 #ifdef QCA_WIFI_QCA5018 500 case TARGET_TYPE_QCA5018: 501 hal->use_register_windowing = true; 502 hal->static_window_map = true; 503 hal_qca5018_attach(hal); 504 break; 505 #endif 506 #ifdef QCA_WIFI_QCN9224 507 case TARGET_TYPE_QCN9224: 508 hal->use_register_windowing = true; 509 hal->static_window_map = true; 510 if (hal->version == 1) 511 hal_qcn9224v1_attach(hal); 512 else 513 hal_qcn9224v2_attach(hal); 514 break; 515 #endif 516 #ifdef QCA_WIFI_QCA5332 517 case TARGET_TYPE_QCA5332: 518 hal->use_register_windowing = true; 519 hal->static_window_map = true; 520 hal_qca5332_attach(hal); 521 break; 522 #endif 523 default: 524 break; 525 } 526 } 527 528 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl) 529 { 530 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; 531 struct hif_target_info *tgt_info = 532 hif_get_target_info_handle(hal_soc->hif_handle); 533 534 return tgt_info->target_type; 535 } 536 537 qdf_export_symbol(hal_get_target_type); 538 539 #if defined(FEATURE_HAL_DELAYED_REG_WRITE) 540 /** 541 * hal_is_reg_write_tput_level_high() - throughput level for delayed reg writes 542 * @hal: hal_soc pointer 543 * 544 * Return: true if throughput is high, else false. 545 */ 546 static inline bool hal_is_reg_write_tput_level_high(struct hal_soc *hal) 547 { 548 int bw_level = hif_get_bandwidth_level(hal->hif_handle); 549 550 return (bw_level >= PLD_BUS_WIDTH_MEDIUM) ? true : false; 551 } 552 553 static inline 554 char *hal_fill_reg_write_srng_stats(struct hal_srng *srng, 555 char *buf, qdf_size_t size) 556 { 557 qdf_scnprintf(buf, size, "enq %u deq %u coal %u direct %u", 558 srng->wstats.enqueues, srng->wstats.dequeues, 559 srng->wstats.coalesces, srng->wstats.direct); 560 return buf; 561 } 562 563 /* bytes for local buffer */ 564 #define HAL_REG_WRITE_SRNG_STATS_LEN 100 565 566 void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl) 567 { 568 struct hal_srng *srng; 569 char buf[HAL_REG_WRITE_SRNG_STATS_LEN]; 570 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 571 572 srng = hal_get_srng(hal, HAL_SRNG_SW2TCL1); 573 hal_debug("SW2TCL1: %s", 574 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 575 576 srng = hal_get_srng(hal, HAL_SRNG_WBM2SW0_RELEASE); 577 hal_debug("WBM2SW0: %s", 578 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 579 580 srng = hal_get_srng(hal, HAL_SRNG_REO2SW1); 581 hal_debug("REO2SW1: %s", 582 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 583 584 srng = hal_get_srng(hal, HAL_SRNG_REO2SW2); 585 hal_debug("REO2SW2: %s", 586 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 587 588 srng = hal_get_srng(hal, HAL_SRNG_REO2SW3); 589 hal_debug("REO2SW3: %s", 590 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 591 } 592 593 void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl) 594 { 595 uint32_t *hist; 596 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 597 598 hist = hal->stats.wstats.sched_delay; 599 hal_debug("wstats: enq %u deq %u coal %u direct %u q_depth %u max_q %u sched-delay hist %u %u %u %u", 600 qdf_atomic_read(&hal->stats.wstats.enqueues), 601 hal->stats.wstats.dequeues, 602 qdf_atomic_read(&hal->stats.wstats.coalesces), 603 qdf_atomic_read(&hal->stats.wstats.direct), 604 qdf_atomic_read(&hal->stats.wstats.q_depth), 605 hal->stats.wstats.max_q_depth, 606 hist[REG_WRITE_SCHED_DELAY_SUB_100us], 607 hist[REG_WRITE_SCHED_DELAY_SUB_1000us], 608 hist[REG_WRITE_SCHED_DELAY_SUB_5000us], 609 hist[REG_WRITE_SCHED_DELAY_GT_5000us]); 610 } 611 612 int hal_get_reg_write_pending_work(void *hal_soc) 613 { 614 struct hal_soc *hal = (struct hal_soc *)hal_soc; 615 616 return qdf_atomic_read(&hal->active_work_cnt); 617 } 618 619 #endif 620 621 #ifdef FEATURE_HAL_DELAYED_REG_WRITE 622 #ifdef MEMORY_DEBUG 623 /* 624 * Length of the queue(array) used to hold delayed register writes. 625 * Must be a multiple of 2. 626 */ 627 #define HAL_REG_WRITE_QUEUE_LEN 128 628 #else 629 #define HAL_REG_WRITE_QUEUE_LEN 32 630 #endif 631 632 /** 633 * hal_process_reg_write_q_elem() - process a regiter write queue element 634 * @hal: hal_soc pointer 635 * @q_elem: pointer to hal regiter write queue element 636 * 637 * Return: The value which was written to the address 638 */ 639 static uint32_t 640 hal_process_reg_write_q_elem(struct hal_soc *hal, 641 struct hal_reg_write_q_elem *q_elem) 642 { 643 struct hal_srng *srng = q_elem->srng; 644 uint32_t write_val; 645 646 SRNG_LOCK(&srng->lock); 647 648 srng->reg_write_in_progress = false; 649 srng->wstats.dequeues++; 650 651 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 652 q_elem->dequeue_val = srng->u.src_ring.hp; 653 hal_write_address_32_mb(hal, 654 srng->u.src_ring.hp_addr, 655 srng->u.src_ring.hp, false); 656 write_val = srng->u.src_ring.hp; 657 } else { 658 q_elem->dequeue_val = srng->u.dst_ring.tp; 659 hal_write_address_32_mb(hal, 660 srng->u.dst_ring.tp_addr, 661 srng->u.dst_ring.tp, false); 662 write_val = srng->u.dst_ring.tp; 663 } 664 665 q_elem->valid = 0; 666 srng->last_dequeue_time = q_elem->dequeue_time; 667 SRNG_UNLOCK(&srng->lock); 668 669 return write_val; 670 } 671 672 /** 673 * hal_reg_write_fill_sched_delay_hist() - fill reg write delay histogram in hal 674 * @hal: hal_soc pointer 675 * @delay: delay in us 676 * 677 * Return: None 678 */ 679 static inline void hal_reg_write_fill_sched_delay_hist(struct hal_soc *hal, 680 uint64_t delay_us) 681 { 682 uint32_t *hist; 683 684 hist = hal->stats.wstats.sched_delay; 685 686 if (delay_us < 100) 687 hist[REG_WRITE_SCHED_DELAY_SUB_100us]++; 688 else if (delay_us < 1000) 689 hist[REG_WRITE_SCHED_DELAY_SUB_1000us]++; 690 else if (delay_us < 5000) 691 hist[REG_WRITE_SCHED_DELAY_SUB_5000us]++; 692 else 693 hist[REG_WRITE_SCHED_DELAY_GT_5000us]++; 694 } 695 696 #ifdef SHADOW_WRITE_DELAY 697 698 #define SHADOW_WRITE_MIN_DELTA_US 5 699 #define SHADOW_WRITE_DELAY_US 50 700 701 /* 702 * Never add those srngs which are performance relate. 703 * The delay itself will hit performance heavily. 704 */ 705 #define IS_SRNG_MATCH(s) ((s)->ring_id == HAL_SRNG_CE_1_DST_STATUS || \ 706 (s)->ring_id == HAL_SRNG_CE_1_DST) 707 708 static inline bool hal_reg_write_need_delay(struct hal_reg_write_q_elem *elem) 709 { 710 struct hal_srng *srng = elem->srng; 711 struct hal_soc *hal; 712 qdf_time_t now; 713 qdf_iomem_t real_addr; 714 715 if (qdf_unlikely(!srng)) 716 return false; 717 718 hal = srng->hal_soc; 719 if (qdf_unlikely(!hal)) 720 return false; 721 722 /* Check if it is target srng, and valid shadow reg */ 723 if (qdf_likely(!IS_SRNG_MATCH(srng))) 724 return false; 725 726 if (srng->ring_dir == HAL_SRNG_SRC_RING) 727 real_addr = SRNG_SRC_ADDR(srng, HP); 728 else 729 real_addr = SRNG_DST_ADDR(srng, TP); 730 if (!hal_validate_shadow_register(hal, real_addr, elem->addr)) 731 return false; 732 733 /* Check the time delta from last write of same srng */ 734 now = qdf_get_log_timestamp(); 735 if (qdf_log_timestamp_to_usecs(now - srng->last_dequeue_time) > 736 SHADOW_WRITE_MIN_DELTA_US) 737 return false; 738 739 /* Delay dequeue, and record */ 740 qdf_udelay(SHADOW_WRITE_DELAY_US); 741 742 srng->wstats.dequeue_delay++; 743 hal->stats.wstats.dequeue_delay++; 744 745 return true; 746 } 747 #else 748 static inline bool hal_reg_write_need_delay(struct hal_reg_write_q_elem *elem) 749 { 750 return false; 751 } 752 #endif 753 754 /** 755 * hal_reg_write_work() - Worker to process delayed writes 756 * @arg: hal_soc pointer 757 * 758 * Return: None 759 */ 760 static void hal_reg_write_work(void *arg) 761 { 762 int32_t q_depth, write_val; 763 struct hal_soc *hal = arg; 764 struct hal_reg_write_q_elem *q_elem; 765 uint64_t delta_us; 766 uint8_t ring_id; 767 uint32_t *addr; 768 uint32_t num_processed = 0; 769 770 q_elem = &hal->reg_write_queue[(hal->read_idx)]; 771 q_elem->work_scheduled_time = qdf_get_log_timestamp(); 772 q_elem->cpu_id = qdf_get_cpu(); 773 774 /* Make sure q_elem consistent in the memory for multi-cores */ 775 qdf_rmb(); 776 if (!q_elem->valid) 777 return; 778 779 q_depth = qdf_atomic_read(&hal->stats.wstats.q_depth); 780 if (q_depth > hal->stats.wstats.max_q_depth) 781 hal->stats.wstats.max_q_depth = q_depth; 782 783 if (hif_prevent_link_low_power_states(hal->hif_handle)) { 784 hal->stats.wstats.prevent_l1_fails++; 785 return; 786 } 787 788 while (true) { 789 qdf_rmb(); 790 if (!q_elem->valid) 791 break; 792 793 q_elem->dequeue_time = qdf_get_log_timestamp(); 794 ring_id = q_elem->srng->ring_id; 795 addr = q_elem->addr; 796 delta_us = qdf_log_timestamp_to_usecs(q_elem->dequeue_time - 797 q_elem->enqueue_time); 798 hal_reg_write_fill_sched_delay_hist(hal, delta_us); 799 800 hal->stats.wstats.dequeues++; 801 qdf_atomic_dec(&hal->stats.wstats.q_depth); 802 803 if (hal_reg_write_need_delay(q_elem)) 804 hal_verbose_debug("Delay reg writer for srng 0x%x, addr 0x%pK", 805 q_elem->srng->ring_id, q_elem->addr); 806 807 write_val = hal_process_reg_write_q_elem(hal, q_elem); 808 hal_verbose_debug("read_idx %u srng 0x%x, addr 0x%pK dequeue_val %u sched delay %llu us", 809 hal->read_idx, ring_id, addr, write_val, delta_us); 810 811 qdf_trace_dp_del_reg_write(ring_id, q_elem->enqueue_val, 812 q_elem->dequeue_val, 813 q_elem->enqueue_time, 814 q_elem->dequeue_time); 815 816 num_processed++; 817 hal->read_idx = (hal->read_idx + 1) & 818 (HAL_REG_WRITE_QUEUE_LEN - 1); 819 q_elem = &hal->reg_write_queue[(hal->read_idx)]; 820 } 821 822 hif_allow_link_low_power_states(hal->hif_handle); 823 /* 824 * Decrement active_work_cnt by the number of elements dequeued after 825 * hif_allow_link_low_power_states. 826 * This makes sure that hif_try_complete_tasks will wait till we make 827 * the bus access in hif_allow_link_low_power_states. This will avoid 828 * race condition between delayed register worker and bus suspend 829 * (system suspend or runtime suspend). 830 * 831 * The following decrement should be done at the end! 832 */ 833 qdf_atomic_sub(num_processed, &hal->active_work_cnt); 834 } 835 836 static void __hal_flush_reg_write_work(struct hal_soc *hal) 837 { 838 qdf_flush_work(&hal->reg_write_work); 839 qdf_disable_work(&hal->reg_write_work); 840 } 841 842 void hal_flush_reg_write_work(hal_soc_handle_t hal_handle) 843 { __hal_flush_reg_write_work((struct hal_soc *)hal_handle); 844 } 845 846 /** 847 * hal_reg_write_enqueue() - enqueue register writes into kworker 848 * @hal_soc: hal_soc pointer 849 * @srng: srng pointer 850 * @addr: iomem address of regiter 851 * @value: value to be written to iomem address 852 * 853 * This function executes from within the SRNG LOCK 854 * 855 * Return: None 856 */ 857 static void hal_reg_write_enqueue(struct hal_soc *hal_soc, 858 struct hal_srng *srng, 859 void __iomem *addr, 860 uint32_t value) 861 { 862 struct hal_reg_write_q_elem *q_elem; 863 uint32_t write_idx; 864 865 if (srng->reg_write_in_progress) { 866 hal_verbose_debug("Already in progress srng ring id 0x%x addr 0x%pK val %u", 867 srng->ring_id, addr, value); 868 qdf_atomic_inc(&hal_soc->stats.wstats.coalesces); 869 srng->wstats.coalesces++; 870 return; 871 } 872 873 write_idx = qdf_atomic_inc_return(&hal_soc->write_idx); 874 875 write_idx = write_idx & (HAL_REG_WRITE_QUEUE_LEN - 1); 876 877 q_elem = &hal_soc->reg_write_queue[write_idx]; 878 879 if (q_elem->valid) { 880 hal_err("queue full"); 881 QDF_BUG(0); 882 return; 883 } 884 885 qdf_atomic_inc(&hal_soc->stats.wstats.enqueues); 886 srng->wstats.enqueues++; 887 888 qdf_atomic_inc(&hal_soc->stats.wstats.q_depth); 889 890 q_elem->srng = srng; 891 q_elem->addr = addr; 892 q_elem->enqueue_val = value; 893 q_elem->enqueue_time = qdf_get_log_timestamp(); 894 895 /* 896 * Before the valid flag is set to true, all the other 897 * fields in the q_elem needs to be updated in memory. 898 * Else there is a chance that the dequeuing worker thread 899 * might read stale entries and process incorrect srng. 900 */ 901 qdf_wmb(); 902 q_elem->valid = true; 903 904 /* 905 * After all other fields in the q_elem has been updated 906 * in memory successfully, the valid flag needs to be updated 907 * in memory in time too. 908 * Else there is a chance that the dequeuing worker thread 909 * might read stale valid flag and the work will be bypassed 910 * for this round. And if there is no other work scheduled 911 * later, this hal register writing won't be updated any more. 912 */ 913 qdf_wmb(); 914 915 srng->reg_write_in_progress = true; 916 qdf_atomic_inc(&hal_soc->active_work_cnt); 917 918 hal_verbose_debug("write_idx %u srng ring id 0x%x addr 0x%pK val %u", 919 write_idx, srng->ring_id, addr, value); 920 921 qdf_queue_work(hal_soc->qdf_dev, hal_soc->reg_write_wq, 922 &hal_soc->reg_write_work); 923 } 924 925 /** 926 * hal_delayed_reg_write_init() - Initialization function for delayed reg writes 927 * @hal_soc: hal_soc pointer 928 * 929 * Initialize main data structures to process register writes in a delayed 930 * workqueue. 931 * 932 * Return: QDF_STATUS_SUCCESS on success else a QDF error. 933 */ 934 static QDF_STATUS hal_delayed_reg_write_init(struct hal_soc *hal) 935 { 936 hal->reg_write_wq = 937 qdf_alloc_high_prior_ordered_workqueue("hal_register_write_wq"); 938 qdf_create_work(0, &hal->reg_write_work, hal_reg_write_work, hal); 939 hal->reg_write_queue = qdf_mem_malloc(HAL_REG_WRITE_QUEUE_LEN * 940 sizeof(*hal->reg_write_queue)); 941 if (!hal->reg_write_queue) { 942 hal_err("unable to allocate memory"); 943 QDF_BUG(0); 944 return QDF_STATUS_E_NOMEM; 945 } 946 947 /* Initial value of indices */ 948 hal->read_idx = 0; 949 qdf_atomic_set(&hal->write_idx, -1); 950 return QDF_STATUS_SUCCESS; 951 } 952 953 /** 954 * hal_delayed_reg_write_deinit() - De-Initialize delayed reg write processing 955 * @hal_soc: hal_soc pointer 956 * 957 * De-initialize main data structures to process register writes in a delayed 958 * workqueue. 959 * 960 * Return: None 961 */ 962 static void hal_delayed_reg_write_deinit(struct hal_soc *hal) 963 { 964 __hal_flush_reg_write_work(hal); 965 966 qdf_flush_workqueue(0, hal->reg_write_wq); 967 qdf_destroy_workqueue(0, hal->reg_write_wq); 968 qdf_mem_free(hal->reg_write_queue); 969 } 970 971 #else 972 static inline QDF_STATUS hal_delayed_reg_write_init(struct hal_soc *hal) 973 { 974 return QDF_STATUS_SUCCESS; 975 } 976 977 static inline void hal_delayed_reg_write_deinit(struct hal_soc *hal) 978 { 979 } 980 #endif 981 982 #ifdef FEATURE_HAL_DELAYED_REG_WRITE 983 #ifdef QCA_WIFI_QCA6750 984 void hal_delayed_reg_write(struct hal_soc *hal_soc, 985 struct hal_srng *srng, 986 void __iomem *addr, 987 uint32_t value) 988 { 989 uint8_t vote_access; 990 991 switch (srng->ring_type) { 992 case CE_SRC: 993 case CE_DST: 994 case CE_DST_STATUS: 995 vote_access = hif_get_ep_vote_access(hal_soc->hif_handle, 996 HIF_EP_VOTE_NONDP_ACCESS); 997 if ((vote_access == HIF_EP_VOTE_ACCESS_DISABLE) || 998 (vote_access == HIF_EP_VOTE_INTERMEDIATE_ACCESS && 999 PLD_MHI_STATE_L0 == 1000 pld_get_mhi_state(hal_soc->qdf_dev->dev))) { 1001 hal_write_address_32_mb(hal_soc, addr, value, false); 1002 qdf_atomic_inc(&hal_soc->stats.wstats.direct); 1003 srng->wstats.direct++; 1004 } else { 1005 hal_reg_write_enqueue(hal_soc, srng, addr, value); 1006 } 1007 break; 1008 default: 1009 if (hif_get_ep_vote_access(hal_soc->hif_handle, 1010 HIF_EP_VOTE_DP_ACCESS) == 1011 HIF_EP_VOTE_ACCESS_DISABLE || 1012 hal_is_reg_write_tput_level_high(hal_soc) || 1013 PLD_MHI_STATE_L0 == 1014 pld_get_mhi_state(hal_soc->qdf_dev->dev)) { 1015 hal_write_address_32_mb(hal_soc, addr, value, false); 1016 qdf_atomic_inc(&hal_soc->stats.wstats.direct); 1017 srng->wstats.direct++; 1018 } else { 1019 hal_reg_write_enqueue(hal_soc, srng, addr, value); 1020 } 1021 1022 break; 1023 } 1024 } 1025 #else 1026 void hal_delayed_reg_write(struct hal_soc *hal_soc, 1027 struct hal_srng *srng, 1028 void __iomem *addr, 1029 uint32_t value) 1030 { 1031 if (hal_is_reg_write_tput_level_high(hal_soc) || 1032 pld_is_device_awake(hal_soc->qdf_dev->dev)) { 1033 qdf_atomic_inc(&hal_soc->stats.wstats.direct); 1034 srng->wstats.direct++; 1035 hal_write_address_32_mb(hal_soc, addr, value, false); 1036 } else { 1037 hal_reg_write_enqueue(hal_soc, srng, addr, value); 1038 } 1039 } 1040 #endif 1041 #endif 1042 1043 /** 1044 * hal_attach - Initialize HAL layer 1045 * @hif_handle: Opaque HIF handle 1046 * @qdf_dev: QDF device 1047 * 1048 * Return: Opaque HAL SOC handle 1049 * NULL on failure (if given ring is not available) 1050 * 1051 * This function should be called as part of HIF initialization (for accessing 1052 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() 1053 * 1054 */ 1055 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev) 1056 { 1057 struct hal_soc *hal; 1058 int i; 1059 1060 hal = qdf_mem_malloc(sizeof(*hal)); 1061 1062 if (!hal) { 1063 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1064 "%s: hal_soc allocation failed", __func__); 1065 goto fail0; 1066 } 1067 hal->hif_handle = hif_handle; 1068 hal->dev_base_addr = hif_get_dev_ba(hif_handle); /* UMAC */ 1069 hal->dev_base_addr_ce = hif_get_dev_ba_ce(hif_handle); /* CE */ 1070 hal->qdf_dev = qdf_dev; 1071 hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent( 1072 qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) * 1073 HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr)); 1074 if (!hal->shadow_rdptr_mem_paddr) { 1075 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1076 "%s: hal->shadow_rdptr_mem_paddr allocation failed", 1077 __func__); 1078 goto fail1; 1079 } 1080 qdf_mem_zero(hal->shadow_rdptr_mem_vaddr, 1081 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX); 1082 1083 hal->shadow_wrptr_mem_vaddr = 1084 (uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev, 1085 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 1086 &(hal->shadow_wrptr_mem_paddr)); 1087 if (!hal->shadow_wrptr_mem_vaddr) { 1088 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1089 "%s: hal->shadow_wrptr_mem_vaddr allocation failed", 1090 __func__); 1091 goto fail2; 1092 } 1093 qdf_mem_zero(hal->shadow_wrptr_mem_vaddr, 1094 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS); 1095 1096 for (i = 0; i < HAL_SRNG_ID_MAX; i++) { 1097 hal->srng_list[i].initialized = 0; 1098 hal->srng_list[i].ring_id = i; 1099 } 1100 1101 qdf_spinlock_create(&hal->register_access_lock); 1102 hal->register_window = 0; 1103 hal->target_type = hal_get_target_type(hal_soc_to_hal_soc_handle(hal)); 1104 hal->version = hif_get_soc_version(hif_handle); 1105 hal->ops = qdf_mem_malloc(sizeof(*hal->ops)); 1106 1107 if (!hal->ops) { 1108 hal_err("unable to allocable memory for HAL ops"); 1109 goto fail3; 1110 } 1111 1112 hal_target_based_configure(hal); 1113 1114 hal_reg_write_fail_history_init(hal); 1115 1116 qdf_minidump_log(hal, sizeof(*hal), "hal_soc"); 1117 1118 qdf_atomic_init(&hal->active_work_cnt); 1119 hal_delayed_reg_write_init(hal); 1120 1121 hal_reo_shared_qaddr_setup((hal_soc_handle_t)hal); 1122 1123 hif_rtpm_register(HIF_RTPM_ID_HAL_REO_CMD, NULL); 1124 1125 return (void *)hal; 1126 fail3: 1127 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, 1128 sizeof(*hal->shadow_wrptr_mem_vaddr) * 1129 HAL_MAX_LMAC_RINGS, 1130 hal->shadow_wrptr_mem_vaddr, 1131 hal->shadow_wrptr_mem_paddr, 0); 1132 fail2: 1133 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, 1134 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 1135 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 1136 fail1: 1137 qdf_mem_free(hal); 1138 fail0: 1139 return NULL; 1140 } 1141 qdf_export_symbol(hal_attach); 1142 1143 /** 1144 * hal_mem_info - Retrieve hal memory base address 1145 * 1146 * @hal_soc: Opaque HAL SOC handle 1147 * @mem: pointer to structure to be updated with hal mem info 1148 */ 1149 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem) 1150 { 1151 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 1152 mem->dev_base_addr = (void *)hal->dev_base_addr; 1153 mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr; 1154 mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr; 1155 mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr; 1156 mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr; 1157 hif_read_phy_mem_base((void *)hal->hif_handle, 1158 (qdf_dma_addr_t *)&mem->dev_base_paddr); 1159 mem->lmac_srng_start_id = HAL_SRNG_LMAC1_ID_START; 1160 return; 1161 } 1162 qdf_export_symbol(hal_get_meminfo); 1163 1164 /** 1165 * hal_detach - Detach HAL layer 1166 * @hal_soc: HAL SOC handle 1167 * 1168 * Return: Opaque HAL SOC handle 1169 * NULL on failure (if given ring is not available) 1170 * 1171 * This function should be called as part of HIF initialization (for accessing 1172 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() 1173 * 1174 */ 1175 extern void hal_detach(void *hal_soc) 1176 { 1177 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1178 1179 hif_rtpm_deregister(HIF_RTPM_ID_HAL_REO_CMD); 1180 hal_delayed_reg_write_deinit(hal); 1181 hal_reo_shared_qaddr_detach((hal_soc_handle_t)hal); 1182 qdf_mem_free(hal->ops); 1183 1184 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 1185 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 1186 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 1187 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 1188 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 1189 hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0); 1190 qdf_minidump_remove(hal, sizeof(*hal), "hal_soc"); 1191 1192 qdf_mem_free(hal); 1193 1194 return; 1195 } 1196 qdf_export_symbol(hal_detach); 1197 1198 #define HAL_CE_CHANNEL_DST_DEST_CTRL_ADDR(x) ((x) + 0x000000b0) 1199 #define HAL_CE_CHANNEL_DST_DEST_CTRL_DEST_MAX_LENGTH_BMSK 0x0000ffff 1200 #define HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_ADDR(x) ((x) + 0x00000040) 1201 #define HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_RMSK 0x00000007 1202 /** 1203 * hal_ce_dst_setup - Initialize CE destination ring registers 1204 * @hal_soc: HAL SOC handle 1205 * @srng: SRNG ring pointer 1206 */ 1207 static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng, 1208 int ring_num) 1209 { 1210 uint32_t reg_val = 0; 1211 uint32_t reg_addr; 1212 struct hal_hw_srng_config *ring_config = 1213 HAL_SRNG_CONFIG(hal, CE_DST); 1214 1215 /* set DEST_MAX_LENGTH according to ce assignment */ 1216 reg_addr = HAL_CE_CHANNEL_DST_DEST_CTRL_ADDR( 1217 ring_config->reg_start[R0_INDEX] + 1218 (ring_num * ring_config->reg_size[R0_INDEX])); 1219 1220 reg_val = HAL_REG_READ(hal, reg_addr); 1221 reg_val &= ~HAL_CE_CHANNEL_DST_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 1222 reg_val |= srng->u.dst_ring.max_buffer_length & 1223 HAL_CE_CHANNEL_DST_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 1224 HAL_REG_WRITE(hal, reg_addr, reg_val); 1225 1226 if (srng->prefetch_timer) { 1227 reg_addr = HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_ADDR( 1228 ring_config->reg_start[R0_INDEX] + 1229 (ring_num * ring_config->reg_size[R0_INDEX])); 1230 1231 reg_val = HAL_REG_READ(hal, reg_addr); 1232 reg_val &= ~HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_RMSK; 1233 reg_val |= srng->prefetch_timer; 1234 HAL_REG_WRITE(hal, reg_addr, reg_val); 1235 reg_val = HAL_REG_READ(hal, reg_addr); 1236 } 1237 1238 } 1239 1240 /** 1241 * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX 1242 * @hal: HAL SOC handle 1243 * @read: boolean value to indicate if read or write 1244 * @ix0: pointer to store IX0 reg value 1245 * @ix1: pointer to store IX1 reg value 1246 * @ix2: pointer to store IX2 reg value 1247 * @ix3: pointer to store IX3 reg value 1248 */ 1249 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read, 1250 uint32_t *ix0, uint32_t *ix1, 1251 uint32_t *ix2, uint32_t *ix3) 1252 { 1253 uint32_t reg_offset; 1254 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 1255 uint32_t reo_reg_base; 1256 1257 reo_reg_base = hal_get_reo_reg_base_offset(hal_soc_hdl); 1258 1259 if (read) { 1260 if (ix0) { 1261 reg_offset = 1262 HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR( 1263 reo_reg_base); 1264 *ix0 = HAL_REG_READ(hal, reg_offset); 1265 } 1266 1267 if (ix1) { 1268 reg_offset = 1269 HAL_REO_DESTINATION_RING_CTRL_IX_1_ADDR( 1270 reo_reg_base); 1271 *ix1 = HAL_REG_READ(hal, reg_offset); 1272 } 1273 1274 if (ix2) { 1275 reg_offset = 1276 HAL_REO_DESTINATION_RING_CTRL_IX_2_ADDR( 1277 reo_reg_base); 1278 *ix2 = HAL_REG_READ(hal, reg_offset); 1279 } 1280 1281 if (ix3) { 1282 reg_offset = 1283 HAL_REO_DESTINATION_RING_CTRL_IX_3_ADDR( 1284 reo_reg_base); 1285 *ix3 = HAL_REG_READ(hal, reg_offset); 1286 } 1287 } else { 1288 if (ix0) { 1289 reg_offset = 1290 HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR( 1291 reo_reg_base); 1292 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, 1293 *ix0, true); 1294 } 1295 1296 if (ix1) { 1297 reg_offset = 1298 HAL_REO_DESTINATION_RING_CTRL_IX_1_ADDR( 1299 reo_reg_base); 1300 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, 1301 *ix1, true); 1302 } 1303 1304 if (ix2) { 1305 reg_offset = 1306 HAL_REO_DESTINATION_RING_CTRL_IX_2_ADDR( 1307 reo_reg_base); 1308 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, 1309 *ix2, true); 1310 } 1311 1312 if (ix3) { 1313 reg_offset = 1314 HAL_REO_DESTINATION_RING_CTRL_IX_3_ADDR( 1315 reo_reg_base); 1316 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, 1317 *ix3, true); 1318 } 1319 } 1320 } 1321 1322 qdf_export_symbol(hal_reo_read_write_ctrl_ix); 1323 1324 /** 1325 * hal_srng_dst_set_hp_paddr_confirm() - Set physical address to dest ring head 1326 * pointer and confirm that write went through by reading back the value 1327 * @srng: sring pointer 1328 * @paddr: physical address 1329 * 1330 * Return: None 1331 */ 1332 void hal_srng_dst_set_hp_paddr_confirm(struct hal_srng *srng, uint64_t paddr) 1333 { 1334 SRNG_DST_REG_WRITE_CONFIRM(srng, HP_ADDR_LSB, paddr & 0xffffffff); 1335 SRNG_DST_REG_WRITE_CONFIRM(srng, HP_ADDR_MSB, paddr >> 32); 1336 } 1337 1338 qdf_export_symbol(hal_srng_dst_set_hp_paddr_confirm); 1339 1340 /** 1341 * hal_srng_dst_init_hp() - Initialize destination ring head 1342 * pointer 1343 * @hal_soc: hal_soc handle 1344 * @srng: sring pointer 1345 * @vaddr: virtual address 1346 */ 1347 void hal_srng_dst_init_hp(struct hal_soc_handle *hal_soc, 1348 struct hal_srng *srng, 1349 uint32_t *vaddr) 1350 { 1351 uint32_t reg_offset; 1352 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1353 1354 if (!srng) 1355 return; 1356 1357 srng->u.dst_ring.hp_addr = vaddr; 1358 reg_offset = SRNG_DST_ADDR(srng, HP) - hal->dev_base_addr; 1359 HAL_REG_WRITE_CONFIRM_RETRY( 1360 hal, reg_offset, srng->u.dst_ring.cached_hp, true); 1361 1362 if (vaddr) { 1363 *srng->u.dst_ring.hp_addr = srng->u.dst_ring.cached_hp; 1364 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1365 "hp_addr=%pK, cached_hp=%d, hp=%d", 1366 (void *)srng->u.dst_ring.hp_addr, 1367 srng->u.dst_ring.cached_hp, 1368 *srng->u.dst_ring.hp_addr); 1369 } 1370 } 1371 1372 qdf_export_symbol(hal_srng_dst_init_hp); 1373 1374 /** 1375 * hal_srng_hw_init - Private function to initialize SRNG HW 1376 * @hal_soc: HAL SOC handle 1377 * @srng: SRNG ring pointer 1378 * @idle_check: Check if ring is idle 1379 */ 1380 static inline void hal_srng_hw_init(struct hal_soc *hal, 1381 struct hal_srng *srng, bool idle_check) 1382 { 1383 if (srng->ring_dir == HAL_SRNG_SRC_RING) 1384 hal_srng_src_hw_init(hal, srng, idle_check); 1385 else 1386 hal_srng_dst_hw_init(hal, srng, idle_check); 1387 } 1388 1389 #if defined(CONFIG_SHADOW_V2) || defined(CONFIG_SHADOW_V3) 1390 #define ignore_shadow false 1391 #define CHECK_SHADOW_REGISTERS true 1392 #else 1393 #define ignore_shadow true 1394 #define CHECK_SHADOW_REGISTERS false 1395 #endif 1396 1397 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ 1398 /** 1399 * hal_srng_is_near_full_irq_supported() - Check if near full irq is 1400 * supported on this SRNG 1401 * @hal_soc: HAL SoC handle 1402 * @ring_type: SRNG type 1403 * @ring_num: ring number 1404 * 1405 * Return: true, if near full irq is supported for this SRNG 1406 * false, if near full irq is not supported for this SRNG 1407 */ 1408 bool hal_srng_is_near_full_irq_supported(hal_soc_handle_t hal_soc, 1409 int ring_type, int ring_num) 1410 { 1411 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1412 struct hal_hw_srng_config *ring_config = 1413 HAL_SRNG_CONFIG(hal, ring_type); 1414 1415 return ring_config->nf_irq_support; 1416 } 1417 1418 /** 1419 * hal_srng_set_msi2_params() - Set MSI2 params to SRNG data structure from 1420 * ring params 1421 * @srng: SRNG handle 1422 * @ring_params: ring params for this SRNG 1423 * 1424 * Return: None 1425 */ 1426 static inline void 1427 hal_srng_set_msi2_params(struct hal_srng *srng, 1428 struct hal_srng_params *ring_params) 1429 { 1430 srng->msi2_addr = ring_params->msi2_addr; 1431 srng->msi2_data = ring_params->msi2_data; 1432 } 1433 1434 /** 1435 * hal_srng_get_nf_params() - Get the near full MSI2 params from srng 1436 * @srng: SRNG handle 1437 * @ring_params: ring params for this SRNG 1438 * 1439 * Return: None 1440 */ 1441 static inline void 1442 hal_srng_get_nf_params(struct hal_srng *srng, 1443 struct hal_srng_params *ring_params) 1444 { 1445 ring_params->msi2_addr = srng->msi2_addr; 1446 ring_params->msi2_data = srng->msi2_data; 1447 } 1448 1449 /** 1450 * hal_srng_set_nf_thresholds() - Set the near full thresholds in SRNG 1451 * @srng: SRNG handle where the params are to be set 1452 * @ring_params: ring params, from where threshold is to be fetched 1453 * 1454 * Return: None 1455 */ 1456 static inline void 1457 hal_srng_set_nf_thresholds(struct hal_srng *srng, 1458 struct hal_srng_params *ring_params) 1459 { 1460 srng->u.dst_ring.nf_irq_support = ring_params->nf_irq_support; 1461 srng->u.dst_ring.high_thresh = ring_params->high_thresh; 1462 } 1463 #else 1464 static inline void 1465 hal_srng_set_msi2_params(struct hal_srng *srng, 1466 struct hal_srng_params *ring_params) 1467 { 1468 } 1469 1470 static inline void 1471 hal_srng_get_nf_params(struct hal_srng *srng, 1472 struct hal_srng_params *ring_params) 1473 { 1474 } 1475 1476 static inline void 1477 hal_srng_set_nf_thresholds(struct hal_srng *srng, 1478 struct hal_srng_params *ring_params) 1479 { 1480 } 1481 #endif 1482 1483 #if defined(CLEAR_SW2TCL_CONSUMED_DESC) 1484 /** 1485 * hal_srng_last_desc_cleared_init - Initialize SRNG last_desc_cleared ptr 1486 * 1487 * @srng: Source ring pointer 1488 * 1489 * Return: None 1490 */ 1491 static inline 1492 void hal_srng_last_desc_cleared_init(struct hal_srng *srng) 1493 { 1494 srng->last_desc_cleared = srng->ring_size - srng->entry_size; 1495 } 1496 1497 #else 1498 static inline 1499 void hal_srng_last_desc_cleared_init(struct hal_srng *srng) 1500 { 1501 } 1502 #endif /* CLEAR_SW2TCL_CONSUMED_DESC */ 1503 1504 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING 1505 static inline void hal_srng_update_high_wm_thresholds(struct hal_srng *srng) 1506 { 1507 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_90_to_100] = 1508 ((srng->num_entries * 90) / 100); 1509 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_80_to_90] = 1510 ((srng->num_entries * 80) / 100); 1511 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_70_to_80] = 1512 ((srng->num_entries * 70) / 100); 1513 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_60_to_70] = 1514 ((srng->num_entries * 60) / 100); 1515 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_50_to_60] = 1516 ((srng->num_entries * 50) / 100); 1517 /* Below 50% threshold is not needed */ 1518 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT] = 0; 1519 1520 hal_info("ring_id: %u, wm_thresh- <50:%u, 50-60:%u, 60-70:%u, 70-80:%u, 80-90:%u, 90-100:%u", 1521 srng->ring_id, 1522 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT], 1523 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_50_to_60], 1524 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_60_to_70], 1525 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_70_to_80], 1526 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_80_to_90], 1527 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_90_to_100]); 1528 } 1529 #else 1530 static inline void hal_srng_update_high_wm_thresholds(struct hal_srng *srng) 1531 { 1532 } 1533 #endif 1534 1535 /** 1536 * hal_srng_setup - Initialize HW SRNG ring. 1537 * @hal_soc: Opaque HAL SOC handle 1538 * @ring_type: one of the types from hal_ring_type 1539 * @ring_num: Ring number if there are multiple rings of same type (staring 1540 * from 0) 1541 * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings 1542 * @ring_params: SRNG ring params in hal_srng_params structure. 1543 * @idle_check: Check if ring is idle 1544 * 1545 * Callers are expected to allocate contiguous ring memory of size 1546 * 'num_entries * entry_size' bytes and pass the physical and virtual base 1547 * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in 1548 * hal_srng_params structure. Ring base address should be 8 byte aligned 1549 * and size of each ring entry should be queried using the API 1550 * hal_srng_get_entrysize 1551 * 1552 * Return: Opaque pointer to ring on success 1553 * NULL on failure (if given ring is not available) 1554 */ 1555 void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num, 1556 int mac_id, struct hal_srng_params *ring_params, bool idle_check) 1557 { 1558 int ring_id; 1559 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1560 struct hal_srng *srng; 1561 struct hal_hw_srng_config *ring_config = 1562 HAL_SRNG_CONFIG(hal, ring_type); 1563 void *dev_base_addr; 1564 int i; 1565 1566 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id); 1567 if (ring_id < 0) 1568 return NULL; 1569 1570 hal_verbose_debug("mac_id %d ring_id %d", mac_id, ring_id); 1571 1572 srng = hal_get_srng(hal_soc, ring_id); 1573 1574 if (srng->initialized) { 1575 hal_verbose_debug("Ring (ring_type, ring_num) already initialized"); 1576 return NULL; 1577 } 1578 1579 dev_base_addr = hal->dev_base_addr; 1580 srng->ring_id = ring_id; 1581 srng->ring_type = ring_type; 1582 srng->ring_dir = ring_config->ring_dir; 1583 srng->ring_base_paddr = ring_params->ring_base_paddr; 1584 srng->ring_base_vaddr = ring_params->ring_base_vaddr; 1585 srng->entry_size = ring_config->entry_size; 1586 srng->num_entries = ring_params->num_entries; 1587 srng->ring_size = srng->num_entries * srng->entry_size; 1588 srng->ring_size_mask = srng->ring_size - 1; 1589 srng->ring_vaddr_end = srng->ring_base_vaddr + srng->ring_size; 1590 srng->msi_addr = ring_params->msi_addr; 1591 srng->msi_data = ring_params->msi_data; 1592 srng->intr_timer_thres_us = ring_params->intr_timer_thres_us; 1593 srng->intr_batch_cntr_thres_entries = 1594 ring_params->intr_batch_cntr_thres_entries; 1595 if (!idle_check) 1596 srng->prefetch_timer = ring_params->prefetch_timer; 1597 srng->hal_soc = hal_soc; 1598 hal_srng_set_msi2_params(srng, ring_params); 1599 hal_srng_update_high_wm_thresholds(srng); 1600 1601 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) { 1602 srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i] 1603 + (ring_num * ring_config->reg_size[i]); 1604 } 1605 1606 /* Zero out the entire ring memory */ 1607 qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size * 1608 srng->num_entries) << 2); 1609 1610 srng->flags = ring_params->flags; 1611 1612 /* For cached descriptors flush and invalidate the memory*/ 1613 if (srng->flags & HAL_SRNG_CACHED_DESC) { 1614 qdf_nbuf_dma_clean_range( 1615 srng->ring_base_vaddr, 1616 srng->ring_base_vaddr + 1617 ((srng->entry_size * srng->num_entries))); 1618 qdf_nbuf_dma_inv_range( 1619 srng->ring_base_vaddr, 1620 srng->ring_base_vaddr + 1621 ((srng->entry_size * srng->num_entries))); 1622 } 1623 #ifdef BIG_ENDIAN_HOST 1624 /* TODO: See if we should we get these flags from caller */ 1625 srng->flags |= HAL_SRNG_DATA_TLV_SWAP; 1626 srng->flags |= HAL_SRNG_MSI_SWAP; 1627 srng->flags |= HAL_SRNG_RING_PTR_SWAP; 1628 #endif 1629 1630 hal_srng_last_desc_cleared_init(srng); 1631 1632 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 1633 srng->u.src_ring.hp = 0; 1634 srng->u.src_ring.reap_hp = srng->ring_size - 1635 srng->entry_size; 1636 srng->u.src_ring.tp_addr = 1637 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 1638 srng->u.src_ring.low_threshold = 1639 ring_params->low_threshold * srng->entry_size; 1640 1641 if (srng->u.src_ring.tp_addr) 1642 qdf_mem_zero(srng->u.src_ring.tp_addr, 1643 sizeof(*hal->shadow_rdptr_mem_vaddr)); 1644 1645 if (ring_config->lmac_ring) { 1646 /* For LMAC rings, head pointer updates will be done 1647 * through FW by writing to a shared memory location 1648 */ 1649 srng->u.src_ring.hp_addr = 1650 &(hal->shadow_wrptr_mem_vaddr[ring_id - 1651 HAL_SRNG_LMAC1_ID_START]); 1652 srng->flags |= HAL_SRNG_LMAC_RING; 1653 1654 if (srng->u.src_ring.hp_addr) 1655 qdf_mem_zero(srng->u.src_ring.hp_addr, 1656 sizeof(*hal->shadow_wrptr_mem_vaddr)); 1657 1658 } else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) { 1659 srng->u.src_ring.hp_addr = 1660 hal_get_window_address(hal, 1661 SRNG_SRC_ADDR(srng, HP)); 1662 1663 if (CHECK_SHADOW_REGISTERS) { 1664 QDF_TRACE(QDF_MODULE_ID_TXRX, 1665 QDF_TRACE_LEVEL_ERROR, 1666 "%s: Ring (%d, %d) missing shadow config", 1667 __func__, ring_type, ring_num); 1668 } 1669 } else { 1670 hal_validate_shadow_register(hal, 1671 SRNG_SRC_ADDR(srng, HP), 1672 srng->u.src_ring.hp_addr); 1673 } 1674 } else { 1675 /* During initialization loop count in all the descriptors 1676 * will be set to zero, and HW will set it to 1 on completing 1677 * descriptor update in first loop, and increments it by 1 on 1678 * subsequent loops (loop count wraps around after reaching 1679 * 0xffff). The 'loop_cnt' in SW ring state is the expected 1680 * loop count in descriptors updated by HW (to be processed 1681 * by SW). 1682 */ 1683 hal_srng_set_nf_thresholds(srng, ring_params); 1684 srng->u.dst_ring.loop_cnt = 1; 1685 srng->u.dst_ring.tp = 0; 1686 srng->u.dst_ring.hp_addr = 1687 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 1688 1689 if (srng->u.dst_ring.hp_addr) 1690 qdf_mem_zero(srng->u.dst_ring.hp_addr, 1691 sizeof(*hal->shadow_rdptr_mem_vaddr)); 1692 1693 if (ring_config->lmac_ring) { 1694 /* For LMAC rings, tail pointer updates will be done 1695 * through FW by writing to a shared memory location 1696 */ 1697 srng->u.dst_ring.tp_addr = 1698 &(hal->shadow_wrptr_mem_vaddr[ring_id - 1699 HAL_SRNG_LMAC1_ID_START]); 1700 srng->flags |= HAL_SRNG_LMAC_RING; 1701 1702 if (srng->u.dst_ring.tp_addr) 1703 qdf_mem_zero(srng->u.dst_ring.tp_addr, 1704 sizeof(*hal->shadow_wrptr_mem_vaddr)); 1705 1706 } else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) { 1707 srng->u.dst_ring.tp_addr = 1708 hal_get_window_address(hal, 1709 SRNG_DST_ADDR(srng, TP)); 1710 1711 if (CHECK_SHADOW_REGISTERS) { 1712 QDF_TRACE(QDF_MODULE_ID_TXRX, 1713 QDF_TRACE_LEVEL_ERROR, 1714 "%s: Ring (%d, %d) missing shadow config", 1715 __func__, ring_type, ring_num); 1716 } 1717 } else { 1718 hal_validate_shadow_register(hal, 1719 SRNG_DST_ADDR(srng, TP), 1720 srng->u.dst_ring.tp_addr); 1721 } 1722 } 1723 1724 if (!(ring_config->lmac_ring)) { 1725 hal_srng_hw_init(hal, srng, idle_check); 1726 1727 if (ring_type == CE_DST) { 1728 srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length; 1729 hal_ce_dst_setup(hal, srng, ring_num); 1730 } 1731 } 1732 1733 SRNG_LOCK_INIT(&srng->lock); 1734 1735 srng->srng_event = 0; 1736 1737 srng->initialized = true; 1738 1739 return (void *)srng; 1740 } 1741 qdf_export_symbol(hal_srng_setup); 1742 1743 /** 1744 * hal_srng_cleanup - Deinitialize HW SRNG ring. 1745 * @hal_soc: Opaque HAL SOC handle 1746 * @hal_srng: Opaque HAL SRNG pointer 1747 */ 1748 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl) 1749 { 1750 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; 1751 SRNG_LOCK_DESTROY(&srng->lock); 1752 srng->initialized = 0; 1753 hal_srng_hw_disable(hal_soc, srng); 1754 } 1755 qdf_export_symbol(hal_srng_cleanup); 1756 1757 /** 1758 * hal_srng_get_entrysize - Returns size of ring entry in bytes 1759 * @hal_soc: Opaque HAL SOC handle 1760 * @ring_type: one of the types from hal_ring_type 1761 * 1762 */ 1763 uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type) 1764 { 1765 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1766 struct hal_hw_srng_config *ring_config = 1767 HAL_SRNG_CONFIG(hal, ring_type); 1768 return ring_config->entry_size << 2; 1769 } 1770 qdf_export_symbol(hal_srng_get_entrysize); 1771 1772 /** 1773 * hal_srng_max_entries - Returns maximum possible number of ring entries 1774 * @hal_soc: Opaque HAL SOC handle 1775 * @ring_type: one of the types from hal_ring_type 1776 * 1777 * Return: Maximum number of entries for the given ring_type 1778 */ 1779 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type) 1780 { 1781 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1782 struct hal_hw_srng_config *ring_config = 1783 HAL_SRNG_CONFIG(hal, ring_type); 1784 1785 return ring_config->max_size / ring_config->entry_size; 1786 } 1787 qdf_export_symbol(hal_srng_max_entries); 1788 1789 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type) 1790 { 1791 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1792 struct hal_hw_srng_config *ring_config = 1793 HAL_SRNG_CONFIG(hal, ring_type); 1794 1795 return ring_config->ring_dir; 1796 } 1797 1798 /** 1799 * hal_srng_dump - Dump ring status 1800 * @srng: hal srng pointer 1801 */ 1802 void hal_srng_dump(struct hal_srng *srng) 1803 { 1804 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 1805 hal_debug("=== SRC RING %d ===", srng->ring_id); 1806 hal_debug("hp %u, reap_hp %u, tp %u, cached tp %u", 1807 srng->u.src_ring.hp, 1808 srng->u.src_ring.reap_hp, 1809 *srng->u.src_ring.tp_addr, 1810 srng->u.src_ring.cached_tp); 1811 } else { 1812 hal_debug("=== DST RING %d ===", srng->ring_id); 1813 hal_debug("tp %u, hp %u, cached tp %u, loop_cnt %u", 1814 srng->u.dst_ring.tp, 1815 *srng->u.dst_ring.hp_addr, 1816 srng->u.dst_ring.cached_hp, 1817 srng->u.dst_ring.loop_cnt); 1818 } 1819 } 1820 1821 /** 1822 * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL 1823 * 1824 * @hal_soc: Opaque HAL SOC handle 1825 * @hal_ring: Ring pointer (Source or Destination ring) 1826 * @ring_params: SRNG parameters will be returned through this structure 1827 */ 1828 extern void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl, 1829 hal_ring_handle_t hal_ring_hdl, 1830 struct hal_srng_params *ring_params) 1831 { 1832 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; 1833 int i =0; 1834 ring_params->ring_id = srng->ring_id; 1835 ring_params->ring_dir = srng->ring_dir; 1836 ring_params->entry_size = srng->entry_size; 1837 1838 ring_params->ring_base_paddr = srng->ring_base_paddr; 1839 ring_params->ring_base_vaddr = srng->ring_base_vaddr; 1840 ring_params->num_entries = srng->num_entries; 1841 ring_params->msi_addr = srng->msi_addr; 1842 ring_params->msi_data = srng->msi_data; 1843 ring_params->intr_timer_thres_us = srng->intr_timer_thres_us; 1844 ring_params->intr_batch_cntr_thres_entries = 1845 srng->intr_batch_cntr_thres_entries; 1846 ring_params->low_threshold = srng->u.src_ring.low_threshold; 1847 ring_params->flags = srng->flags; 1848 ring_params->ring_id = srng->ring_id; 1849 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) 1850 ring_params->hwreg_base[i] = srng->hwreg_base[i]; 1851 1852 hal_srng_get_nf_params(srng, ring_params); 1853 } 1854 qdf_export_symbol(hal_get_srng_params); 1855 1856 void hal_set_low_threshold(hal_ring_handle_t hal_ring_hdl, 1857 uint32_t low_threshold) 1858 { 1859 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; 1860 srng->u.src_ring.low_threshold = low_threshold * srng->entry_size; 1861 } 1862 qdf_export_symbol(hal_set_low_threshold); 1863 1864 #ifdef FEATURE_RUNTIME_PM 1865 void 1866 hal_srng_rtpm_access_end(hal_soc_handle_t hal_soc_hdl, 1867 hal_ring_handle_t hal_ring_hdl, 1868 uint32_t rtpm_id) 1869 { 1870 if (qdf_unlikely(!hal_ring_hdl)) { 1871 qdf_print("Error: Invalid hal_ring\n"); 1872 return; 1873 } 1874 1875 if (hif_rtpm_get(HIF_RTPM_GET_ASYNC, rtpm_id) == 0) { 1876 hal_srng_access_end(hal_soc_hdl, hal_ring_hdl); 1877 hif_rtpm_put(HIF_RTPM_PUT_ASYNC, rtpm_id); 1878 } else { 1879 hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl); 1880 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); 1881 hal_srng_inc_flush_cnt(hal_ring_hdl); 1882 } 1883 } 1884 1885 qdf_export_symbol(hal_srng_rtpm_access_end); 1886 #endif /* FEATURE_RUNTIME_PM */ 1887 1888 #ifdef FORCE_WAKE 1889 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase) 1890 { 1891 struct hal_soc *hal_soc = (struct hal_soc *)soc; 1892 hal_soc->init_phase = init_phase; 1893 } 1894 #endif /* FORCE_WAKE */ 1895