1 /* 2 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "hal_api.h" 21 #include "target_type.h" 22 #include "wcss_version.h" 23 #include "qdf_module.h" 24 25 #ifdef QCA_WIFI_QCA8074 26 void hal_qca6290_attach(struct hal_soc *hal); 27 #endif 28 #ifdef QCA_WIFI_QCA8074 29 void hal_qca8074_attach(struct hal_soc *hal); 30 #endif 31 #if defined(QCA_WIFI_QCA8074V2) || defined(QCA_WIFI_QCA6018) 32 void hal_qca8074v2_attach(struct hal_soc *hal); 33 #endif 34 #ifdef QCA_WIFI_QCA6390 35 void hal_qca6390_attach(struct hal_soc *hal); 36 #endif 37 #ifdef QCA_WIFI_QCA6490 38 void hal_qca6490_attach(struct hal_soc *hal); 39 #endif 40 #ifdef QCA_WIFI_QCN9000 41 void hal_qcn9000_attach(struct hal_soc *hal); 42 #endif 43 #ifdef QCA_WIFI_QCA6750 44 void hal_qca6750_attach(struct hal_soc *hal); 45 #endif 46 #ifdef QCA_WIFI_QCA5018 47 void hal_qca5018_attach(struct hal_soc *hal); 48 #endif 49 50 #ifdef ENABLE_VERBOSE_DEBUG 51 bool is_hal_verbose_debug_enabled; 52 #endif 53 54 #ifdef ENABLE_HAL_REG_WR_HISTORY 55 struct hal_reg_write_fail_history hal_reg_wr_hist; 56 57 void hal_reg_wr_fail_history_add(struct hal_soc *hal_soc, 58 uint32_t offset, 59 uint32_t wr_val, uint32_t rd_val) 60 { 61 struct hal_reg_write_fail_entry *record; 62 int idx; 63 64 idx = hal_history_get_next_index(&hal_soc->reg_wr_fail_hist->index, 65 HAL_REG_WRITE_HIST_SIZE); 66 67 record = &hal_soc->reg_wr_fail_hist->record[idx]; 68 69 record->timestamp = qdf_get_log_timestamp(); 70 record->reg_offset = offset; 71 record->write_val = wr_val; 72 record->read_val = rd_val; 73 } 74 75 static void hal_reg_write_fail_history_init(struct hal_soc *hal) 76 { 77 hal->reg_wr_fail_hist = &hal_reg_wr_hist; 78 79 qdf_atomic_set(&hal->reg_wr_fail_hist->index, -1); 80 } 81 #else 82 static void hal_reg_write_fail_history_init(struct hal_soc *hal) 83 { 84 } 85 #endif 86 87 /** 88 * hal_get_srng_ring_id() - get the ring id of a descriped ring 89 * @hal: hal_soc data structure 90 * @ring_type: type enum describing the ring 91 * @ring_num: which ring of the ring type 92 * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings) 93 * 94 * Return: the ring id or -EINVAL if the ring does not exist. 95 */ 96 static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type, 97 int ring_num, int mac_id) 98 { 99 struct hal_hw_srng_config *ring_config = 100 HAL_SRNG_CONFIG(hal, ring_type); 101 int ring_id; 102 103 if (ring_num >= ring_config->max_rings) { 104 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 105 "%s: ring_num exceeded maximum no. of supported rings", 106 __func__); 107 /* TODO: This is a programming error. Assert if this happens */ 108 return -EINVAL; 109 } 110 111 if (ring_config->lmac_ring) { 112 ring_id = ring_config->start_ring_id + ring_num + 113 (mac_id * HAL_MAX_RINGS_PER_LMAC); 114 } else { 115 ring_id = ring_config->start_ring_id + ring_num; 116 } 117 118 return ring_id; 119 } 120 121 static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id) 122 { 123 /* TODO: Should we allocate srng structures dynamically? */ 124 return &(hal->srng_list[ring_id]); 125 } 126 127 #define HP_OFFSET_IN_REG_START 1 128 #define OFFSET_FROM_HP_TO_TP 4 129 static void hal_update_srng_hp_tp_address(struct hal_soc *hal_soc, 130 int shadow_config_index, 131 int ring_type, 132 int ring_num) 133 { 134 struct hal_srng *srng; 135 int ring_id; 136 struct hal_hw_srng_config *ring_config = 137 HAL_SRNG_CONFIG(hal_soc, ring_type); 138 139 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0); 140 if (ring_id < 0) 141 return; 142 143 srng = hal_get_srng(hal_soc, ring_id); 144 145 if (ring_config->ring_dir == HAL_SRNG_DST_RING) { 146 srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index) 147 + hal_soc->dev_base_addr; 148 hal_debug("tp_addr=%pK dev base addr %pK index %u", 149 srng->u.dst_ring.tp_addr, hal_soc->dev_base_addr, 150 shadow_config_index); 151 } else { 152 srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index) 153 + hal_soc->dev_base_addr; 154 hal_debug("hp_addr=%pK dev base addr %pK index %u", 155 srng->u.src_ring.hp_addr, 156 hal_soc->dev_base_addr, shadow_config_index); 157 } 158 159 } 160 161 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, 162 int ring_type, 163 int ring_num) 164 { 165 uint32_t target_register; 166 struct hal_soc *hal = (struct hal_soc *)hal_soc; 167 struct hal_hw_srng_config *srng_config = &hal->hw_srng_table[ring_type]; 168 int shadow_config_index = hal->num_shadow_registers_configured; 169 170 if (shadow_config_index >= MAX_SHADOW_REGISTERS) { 171 QDF_ASSERT(0); 172 return QDF_STATUS_E_RESOURCES; 173 } 174 175 hal->num_shadow_registers_configured++; 176 177 target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START]; 178 target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START] 179 *ring_num); 180 181 /* if the ring is a dst ring, we need to shadow the tail pointer */ 182 if (srng_config->ring_dir == HAL_SRNG_DST_RING) 183 target_register += OFFSET_FROM_HP_TO_TP; 184 185 hal->shadow_config[shadow_config_index].addr = target_register; 186 187 /* update hp/tp addr in the hal_soc structure*/ 188 hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type, 189 ring_num); 190 191 hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x, ring_type %d, ring num %d", 192 target_register, 193 SHADOW_REGISTER(shadow_config_index), 194 shadow_config_index, 195 ring_type, ring_num); 196 197 return QDF_STATUS_SUCCESS; 198 } 199 200 qdf_export_symbol(hal_set_one_shadow_config); 201 202 QDF_STATUS hal_construct_shadow_config(void *hal_soc) 203 { 204 int ring_type, ring_num; 205 struct hal_soc *hal = (struct hal_soc *)hal_soc; 206 207 for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) { 208 struct hal_hw_srng_config *srng_config = 209 &hal->hw_srng_table[ring_type]; 210 211 if (ring_type == CE_SRC || 212 ring_type == CE_DST || 213 ring_type == CE_DST_STATUS) 214 continue; 215 216 if (srng_config->lmac_ring) 217 continue; 218 219 for (ring_num = 0; ring_num < srng_config->max_rings; 220 ring_num++) 221 hal_set_one_shadow_config(hal_soc, ring_type, ring_num); 222 } 223 224 return QDF_STATUS_SUCCESS; 225 } 226 227 qdf_export_symbol(hal_construct_shadow_config); 228 229 void hal_get_shadow_config(void *hal_soc, 230 struct pld_shadow_reg_v2_cfg **shadow_config, 231 int *num_shadow_registers_configured) 232 { 233 struct hal_soc *hal = (struct hal_soc *)hal_soc; 234 235 *shadow_config = hal->shadow_config; 236 *num_shadow_registers_configured = 237 hal->num_shadow_registers_configured; 238 } 239 240 qdf_export_symbol(hal_get_shadow_config); 241 242 243 static void hal_validate_shadow_register(struct hal_soc *hal, 244 uint32_t *destination, 245 uint32_t *shadow_address) 246 { 247 unsigned int index; 248 uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr; 249 int destination_ba_offset = 250 ((char *)destination) - (char *)hal->dev_base_addr; 251 252 index = shadow_address - shadow_0_offset; 253 254 if (index >= MAX_SHADOW_REGISTERS) { 255 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 256 "%s: index %x out of bounds", __func__, index); 257 goto error; 258 } else if (hal->shadow_config[index].addr != destination_ba_offset) { 259 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 260 "%s: sanity check failure, expected %x, found %x", 261 __func__, destination_ba_offset, 262 hal->shadow_config[index].addr); 263 goto error; 264 } 265 return; 266 error: 267 qdf_print("%s: baddr %pK, desination %pK, shadow_address %pK s0offset %pK index %x", 268 __func__, hal->dev_base_addr, destination, shadow_address, 269 shadow_0_offset, index); 270 QDF_BUG(0); 271 return; 272 } 273 274 static void hal_target_based_configure(struct hal_soc *hal) 275 { 276 /** 277 * Indicate Initialization of srngs to avoid force wake 278 * as umac power collapse is not enabled yet 279 */ 280 hal->init_phase = true; 281 282 switch (hal->target_type) { 283 #ifdef QCA_WIFI_QCA6290 284 case TARGET_TYPE_QCA6290: 285 hal->use_register_windowing = true; 286 hal_qca6290_attach(hal); 287 break; 288 #endif 289 #ifdef QCA_WIFI_QCA6390 290 case TARGET_TYPE_QCA6390: 291 hal->use_register_windowing = true; 292 hal_qca6390_attach(hal); 293 break; 294 #endif 295 #ifdef QCA_WIFI_QCA6490 296 case TARGET_TYPE_QCA6490: 297 hal->use_register_windowing = true; 298 hal_qca6490_attach(hal); 299 hal->init_phase = false; 300 break; 301 #endif 302 #ifdef QCA_WIFI_QCA6750 303 case TARGET_TYPE_QCA6750: 304 hal->use_register_windowing = true; 305 hal->static_window_map = true; 306 hal_qca6750_attach(hal); 307 break; 308 #endif 309 #if defined(QCA_WIFI_QCA8074) && defined(WIFI_TARGET_TYPE_3_0) 310 case TARGET_TYPE_QCA8074: 311 hal_qca8074_attach(hal); 312 break; 313 #endif 314 315 #if defined(QCA_WIFI_QCA8074V2) 316 case TARGET_TYPE_QCA8074V2: 317 hal_qca8074v2_attach(hal); 318 break; 319 #endif 320 321 #if defined(QCA_WIFI_QCA6018) 322 case TARGET_TYPE_QCA6018: 323 hal_qca8074v2_attach(hal); 324 break; 325 #endif 326 327 #ifdef QCA_WIFI_QCN9000 328 case TARGET_TYPE_QCN9000: 329 hal->use_register_windowing = true; 330 /* 331 * Static window map is enabled for qcn9000 to use 2mb bar 332 * size and use multiple windows to write into registers. 333 */ 334 hal->static_window_map = true; 335 hal_qcn9000_attach(hal); 336 break; 337 #endif 338 #ifdef QCA_WIFI_QCA5018 339 case TARGET_TYPE_QCA5018: 340 hal->use_register_windowing = true; 341 hal->static_window_map = true; 342 hal_qca5018_attach(hal); 343 break; 344 #endif 345 default: 346 break; 347 } 348 } 349 350 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl) 351 { 352 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; 353 struct hif_target_info *tgt_info = 354 hif_get_target_info_handle(hal_soc->hif_handle); 355 356 return tgt_info->target_type; 357 } 358 359 qdf_export_symbol(hal_get_target_type); 360 361 #ifdef FEATURE_HAL_DELAYED_REG_WRITE 362 #ifdef MEMORY_DEBUG 363 /* 364 * Length of the queue(array) used to hold delayed register writes. 365 * Must be a multiple of 2. 366 */ 367 #define HAL_REG_WRITE_QUEUE_LEN 128 368 #else 369 #define HAL_REG_WRITE_QUEUE_LEN 32 370 #endif 371 372 /** 373 * hal_is_reg_write_tput_level_high() - throughput level for delayed reg writes 374 * @hal: hal_soc pointer 375 * 376 * Return: true if throughput is high, else false. 377 */ 378 static inline bool hal_is_reg_write_tput_level_high(struct hal_soc *hal) 379 { 380 int bw_level = hif_get_bandwidth_level(hal->hif_handle); 381 382 return (bw_level >= PLD_BUS_WIDTH_MEDIUM) ? true : false; 383 } 384 385 /** 386 * hal_process_reg_write_q_elem() - process a regiter write queue element 387 * @hal: hal_soc pointer 388 * @q_elem: pointer to hal regiter write queue element 389 * 390 * Return: The value which was written to the address 391 */ 392 static uint32_t 393 hal_process_reg_write_q_elem(struct hal_soc *hal, 394 struct hal_reg_write_q_elem *q_elem) 395 { 396 struct hal_srng *srng = q_elem->srng; 397 uint32_t write_val; 398 399 SRNG_LOCK(&srng->lock); 400 401 srng->reg_write_in_progress = false; 402 srng->wstats.dequeues++; 403 404 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 405 q_elem->dequeue_val = srng->u.src_ring.hp; 406 hal_write_address_32_mb(hal, 407 srng->u.src_ring.hp_addr, 408 srng->u.src_ring.hp, false); 409 write_val = srng->u.src_ring.hp; 410 } else { 411 q_elem->dequeue_val = srng->u.dst_ring.tp; 412 hal_write_address_32_mb(hal, 413 srng->u.dst_ring.tp_addr, 414 srng->u.dst_ring.tp, false); 415 write_val = srng->u.dst_ring.tp; 416 } 417 418 q_elem->valid = 0; 419 SRNG_UNLOCK(&srng->lock); 420 421 return write_val; 422 } 423 424 /** 425 * hal_reg_write_fill_sched_delay_hist() - fill reg write delay histogram in hal 426 * @hal: hal_soc pointer 427 * @delay: delay in us 428 * 429 * Return: None 430 */ 431 static inline void hal_reg_write_fill_sched_delay_hist(struct hal_soc *hal, 432 uint64_t delay_us) 433 { 434 uint32_t *hist; 435 436 hist = hal->stats.wstats.sched_delay; 437 438 if (delay_us < 100) 439 hist[REG_WRITE_SCHED_DELAY_SUB_100us]++; 440 else if (delay_us < 1000) 441 hist[REG_WRITE_SCHED_DELAY_SUB_1000us]++; 442 else if (delay_us < 5000) 443 hist[REG_WRITE_SCHED_DELAY_SUB_5000us]++; 444 else 445 hist[REG_WRITE_SCHED_DELAY_GT_5000us]++; 446 } 447 448 /** 449 * hal_reg_write_work() - Worker to process delayed writes 450 * @arg: hal_soc pointer 451 * 452 * Return: None 453 */ 454 static void hal_reg_write_work(void *arg) 455 { 456 int32_t q_depth, write_val; 457 struct hal_soc *hal = arg; 458 struct hal_reg_write_q_elem *q_elem; 459 uint64_t delta_us; 460 uint8_t ring_id; 461 uint32_t *addr; 462 463 q_elem = &hal->reg_write_queue[(hal->read_idx)]; 464 q_elem->work_scheduled_time = qdf_get_log_timestamp(); 465 466 /* Make sure q_elem consistent in the memory for multi-cores */ 467 qdf_rmb(); 468 if (!q_elem->valid) 469 return; 470 471 q_depth = qdf_atomic_read(&hal->stats.wstats.q_depth); 472 if (q_depth > hal->stats.wstats.max_q_depth) 473 hal->stats.wstats.max_q_depth = q_depth; 474 475 if (hif_prevent_link_low_power_states(hal->hif_handle)) { 476 hal->stats.wstats.prevent_l1_fails++; 477 return; 478 } 479 480 while (true) { 481 qdf_rmb(); 482 if (!q_elem->valid) 483 break; 484 485 q_elem->dequeue_time = qdf_get_log_timestamp(); 486 ring_id = q_elem->srng->ring_id; 487 addr = q_elem->addr; 488 delta_us = qdf_log_timestamp_to_usecs(q_elem->dequeue_time - 489 q_elem->enqueue_time); 490 hal_reg_write_fill_sched_delay_hist(hal, delta_us); 491 492 hal->stats.wstats.dequeues++; 493 qdf_atomic_dec(&hal->stats.wstats.q_depth); 494 495 write_val = hal_process_reg_write_q_elem(hal, q_elem); 496 hal_verbose_debug("read_idx %u srng 0x%x, addr 0x%pK dequeue_val %u sched delay %llu us", 497 hal->read_idx, ring_id, addr, write_val, delta_us); 498 499 qdf_atomic_dec(&hal->active_work_cnt); 500 hal->read_idx = (hal->read_idx + 1) & 501 (HAL_REG_WRITE_QUEUE_LEN - 1); 502 q_elem = &hal->reg_write_queue[(hal->read_idx)]; 503 } 504 505 hif_allow_link_low_power_states(hal->hif_handle); 506 } 507 508 /** 509 * hal_flush_reg_write_work() - flush all writes from regiter write queue 510 * @arg: hal_soc pointer 511 * 512 * Return: None 513 */ 514 static inline void hal_flush_reg_write_work(struct hal_soc *hal) 515 { 516 qdf_cancel_work(&hal->reg_write_work); 517 qdf_flush_work(&hal->reg_write_work); 518 qdf_flush_workqueue(0, hal->reg_write_wq); 519 } 520 521 /** 522 * hal_reg_write_enqueue() - enqueue register writes into kworker 523 * @hal_soc: hal_soc pointer 524 * @srng: srng pointer 525 * @addr: iomem address of regiter 526 * @value: value to be written to iomem address 527 * 528 * This function executes from within the SRNG LOCK 529 * 530 * Return: None 531 */ 532 static void hal_reg_write_enqueue(struct hal_soc *hal_soc, 533 struct hal_srng *srng, 534 void __iomem *addr, 535 uint32_t value) 536 { 537 struct hal_reg_write_q_elem *q_elem; 538 uint32_t write_idx; 539 540 if (srng->reg_write_in_progress) { 541 hal_verbose_debug("Already in progress srng ring id 0x%x addr 0x%pK val %u", 542 srng->ring_id, addr, value); 543 qdf_atomic_inc(&hal_soc->stats.wstats.coalesces); 544 srng->wstats.coalesces++; 545 return; 546 } 547 548 write_idx = qdf_atomic_inc_return(&hal_soc->write_idx); 549 550 write_idx = write_idx & (HAL_REG_WRITE_QUEUE_LEN - 1); 551 552 q_elem = &hal_soc->reg_write_queue[write_idx]; 553 554 if (q_elem->valid) { 555 hal_err("queue full"); 556 QDF_BUG(0); 557 return; 558 } 559 560 qdf_atomic_inc(&hal_soc->stats.wstats.enqueues); 561 srng->wstats.enqueues++; 562 563 qdf_atomic_inc(&hal_soc->stats.wstats.q_depth); 564 565 q_elem->srng = srng; 566 q_elem->addr = addr; 567 q_elem->enqueue_val = value; 568 q_elem->enqueue_time = qdf_get_log_timestamp(); 569 570 /* 571 * Before the valid flag is set to true, all the other 572 * fields in the q_elem needs to be updated in memory. 573 * Else there is a chance that the dequeuing worker thread 574 * might read stale entries and process incorrect srng. 575 */ 576 qdf_wmb(); 577 q_elem->valid = true; 578 579 /* 580 * After all other fields in the q_elem has been updated 581 * in memory successfully, the valid flag needs to be updated 582 * in memory in time too. 583 * Else there is a chance that the dequeuing worker thread 584 * might read stale valid flag and the work will be bypassed 585 * for this round. And if there is no other work scheduled 586 * later, this hal register writing won't be updated any more. 587 */ 588 qdf_wmb(); 589 590 srng->reg_write_in_progress = true; 591 qdf_atomic_inc(&hal_soc->active_work_cnt); 592 593 hal_verbose_debug("write_idx %u srng ring id 0x%x addr 0x%pK val %u", 594 write_idx, srng->ring_id, addr, value); 595 596 qdf_queue_work(hal_soc->qdf_dev, hal_soc->reg_write_wq, 597 &hal_soc->reg_write_work); 598 } 599 600 void hal_delayed_reg_write(struct hal_soc *hal_soc, 601 struct hal_srng *srng, 602 void __iomem *addr, 603 uint32_t value) 604 { 605 if (pld_is_device_awake(hal_soc->qdf_dev->dev) || 606 hal_is_reg_write_tput_level_high(hal_soc)) { 607 qdf_atomic_inc(&hal_soc->stats.wstats.direct); 608 srng->wstats.direct++; 609 hal_write_address_32_mb(hal_soc, addr, value, false); 610 } else { 611 hal_reg_write_enqueue(hal_soc, srng, addr, value); 612 } 613 } 614 615 /** 616 * hal_delayed_reg_write_init() - Initialization function for delayed reg writes 617 * @hal_soc: hal_soc pointer 618 * 619 * Initialize main data structures to process register writes in a delayed 620 * workqueue. 621 * 622 * Return: QDF_STATUS_SUCCESS on success else a QDF error. 623 */ 624 static QDF_STATUS hal_delayed_reg_write_init(struct hal_soc *hal) 625 { 626 hal->reg_write_wq = 627 qdf_alloc_high_prior_ordered_workqueue("hal_register_write_wq"); 628 qdf_create_work(0, &hal->reg_write_work, hal_reg_write_work, hal); 629 hal->reg_write_queue = qdf_mem_malloc(HAL_REG_WRITE_QUEUE_LEN * 630 sizeof(*hal->reg_write_queue)); 631 if (!hal->reg_write_queue) { 632 hal_err("unable to allocate memory"); 633 QDF_BUG(0); 634 return QDF_STATUS_E_NOMEM; 635 } 636 637 /* Initial value of indices */ 638 hal->read_idx = 0; 639 qdf_atomic_set(&hal->write_idx, -1); 640 return QDF_STATUS_SUCCESS; 641 } 642 643 /** 644 * hal_delayed_reg_write_deinit() - De-Initialize delayed reg write processing 645 * @hal_soc: hal_soc pointer 646 * 647 * De-initialize main data structures to process register writes in a delayed 648 * workqueue. 649 * 650 * Return: None 651 */ 652 static void hal_delayed_reg_write_deinit(struct hal_soc *hal) 653 { 654 hal_flush_reg_write_work(hal); 655 qdf_destroy_workqueue(0, hal->reg_write_wq); 656 qdf_mem_free(hal->reg_write_queue); 657 } 658 659 static inline 660 char *hal_fill_reg_write_srng_stats(struct hal_srng *srng, 661 char *buf, qdf_size_t size) 662 { 663 qdf_scnprintf(buf, size, "enq %u deq %u coal %u direct %u", 664 srng->wstats.enqueues, srng->wstats.dequeues, 665 srng->wstats.coalesces, srng->wstats.direct); 666 return buf; 667 } 668 669 /* bytes for local buffer */ 670 #define HAL_REG_WRITE_SRNG_STATS_LEN 100 671 672 void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl) 673 { 674 struct hal_srng *srng; 675 char buf[HAL_REG_WRITE_SRNG_STATS_LEN]; 676 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 677 678 srng = hal_get_srng(hal, HAL_SRNG_SW2TCL1); 679 hal_debug("SW2TCL1: %s", 680 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 681 682 srng = hal_get_srng(hal, HAL_SRNG_WBM2SW0_RELEASE); 683 hal_debug("WBM2SW0: %s", 684 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 685 686 srng = hal_get_srng(hal, HAL_SRNG_REO2SW1); 687 hal_debug("REO2SW1: %s", 688 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 689 690 srng = hal_get_srng(hal, HAL_SRNG_REO2SW2); 691 hal_debug("REO2SW2: %s", 692 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 693 694 srng = hal_get_srng(hal, HAL_SRNG_REO2SW3); 695 hal_debug("REO2SW3: %s", 696 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); 697 } 698 699 void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl) 700 { 701 uint32_t *hist; 702 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 703 704 hist = hal->stats.wstats.sched_delay; 705 706 hal_debug("enq %u deq %u coal %u direct %u q_depth %u max_q %u sched-delay hist %u %u %u %u", 707 qdf_atomic_read(&hal->stats.wstats.enqueues), 708 hal->stats.wstats.dequeues, 709 qdf_atomic_read(&hal->stats.wstats.coalesces), 710 qdf_atomic_read(&hal->stats.wstats.direct), 711 qdf_atomic_read(&hal->stats.wstats.q_depth), 712 hal->stats.wstats.max_q_depth, 713 hist[REG_WRITE_SCHED_DELAY_SUB_100us], 714 hist[REG_WRITE_SCHED_DELAY_SUB_1000us], 715 hist[REG_WRITE_SCHED_DELAY_SUB_5000us], 716 hist[REG_WRITE_SCHED_DELAY_GT_5000us]); 717 } 718 719 int hal_get_reg_write_pending_work(void *hal_soc) 720 { 721 struct hal_soc *hal = (struct hal_soc *)hal_soc; 722 723 return qdf_atomic_read(&hal->active_work_cnt); 724 } 725 726 #else 727 static inline QDF_STATUS hal_delayed_reg_write_init(struct hal_soc *hal) 728 { 729 return QDF_STATUS_SUCCESS; 730 } 731 732 static inline void hal_delayed_reg_write_deinit(struct hal_soc *hal) 733 { 734 } 735 #endif 736 737 /** 738 * hal_attach - Initialize HAL layer 739 * @hif_handle: Opaque HIF handle 740 * @qdf_dev: QDF device 741 * 742 * Return: Opaque HAL SOC handle 743 * NULL on failure (if given ring is not available) 744 * 745 * This function should be called as part of HIF initialization (for accessing 746 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() 747 * 748 */ 749 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev) 750 { 751 struct hal_soc *hal; 752 int i; 753 754 hal = qdf_mem_malloc(sizeof(*hal)); 755 756 if (!hal) { 757 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 758 "%s: hal_soc allocation failed", __func__); 759 goto fail0; 760 } 761 hal->hif_handle = hif_handle; 762 hal->dev_base_addr = hif_get_dev_ba(hif_handle); /* UMAC */ 763 hal->dev_base_addr_ce = hif_get_dev_ba_ce(hif_handle); /* CE */ 764 hal->qdf_dev = qdf_dev; 765 hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent( 766 qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) * 767 HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr)); 768 if (!hal->shadow_rdptr_mem_paddr) { 769 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 770 "%s: hal->shadow_rdptr_mem_paddr allocation failed", 771 __func__); 772 goto fail1; 773 } 774 qdf_mem_zero(hal->shadow_rdptr_mem_vaddr, 775 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX); 776 777 hal->shadow_wrptr_mem_vaddr = 778 (uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev, 779 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 780 &(hal->shadow_wrptr_mem_paddr)); 781 if (!hal->shadow_wrptr_mem_vaddr) { 782 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 783 "%s: hal->shadow_wrptr_mem_vaddr allocation failed", 784 __func__); 785 goto fail2; 786 } 787 qdf_mem_zero(hal->shadow_wrptr_mem_vaddr, 788 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS); 789 790 for (i = 0; i < HAL_SRNG_ID_MAX; i++) { 791 hal->srng_list[i].initialized = 0; 792 hal->srng_list[i].ring_id = i; 793 } 794 795 qdf_spinlock_create(&hal->register_access_lock); 796 hal->register_window = 0; 797 hal->target_type = hal_get_target_type(hal_soc_to_hal_soc_handle(hal)); 798 799 hal_target_based_configure(hal); 800 801 hal_reg_write_fail_history_init(hal); 802 803 qdf_minidump_log(hal, sizeof(*hal), "hal_soc"); 804 805 qdf_atomic_init(&hal->active_work_cnt); 806 hal_delayed_reg_write_init(hal); 807 808 return (void *)hal; 809 810 fail2: 811 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, 812 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 813 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 814 fail1: 815 qdf_mem_free(hal); 816 fail0: 817 return NULL; 818 } 819 qdf_export_symbol(hal_attach); 820 821 /** 822 * hal_mem_info - Retrieve hal memory base address 823 * 824 * @hal_soc: Opaque HAL SOC handle 825 * @mem: pointer to structure to be updated with hal mem info 826 */ 827 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem) 828 { 829 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 830 mem->dev_base_addr = (void *)hal->dev_base_addr; 831 mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr; 832 mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr; 833 mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr; 834 mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr; 835 hif_read_phy_mem_base((void *)hal->hif_handle, 836 (qdf_dma_addr_t *)&mem->dev_base_paddr); 837 return; 838 } 839 qdf_export_symbol(hal_get_meminfo); 840 841 /** 842 * hal_detach - Detach HAL layer 843 * @hal_soc: HAL SOC handle 844 * 845 * Return: Opaque HAL SOC handle 846 * NULL on failure (if given ring is not available) 847 * 848 * This function should be called as part of HIF initialization (for accessing 849 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() 850 * 851 */ 852 extern void hal_detach(void *hal_soc) 853 { 854 struct hal_soc *hal = (struct hal_soc *)hal_soc; 855 856 hal_delayed_reg_write_deinit(hal); 857 858 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 859 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 860 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 861 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 862 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 863 hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0); 864 qdf_minidump_remove(hal); 865 qdf_mem_free(hal); 866 867 return; 868 } 869 qdf_export_symbol(hal_detach); 870 871 /** 872 * hal_ce_dst_setup - Initialize CE destination ring registers 873 * @hal_soc: HAL SOC handle 874 * @srng: SRNG ring pointer 875 */ 876 static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng, 877 int ring_num) 878 { 879 uint32_t reg_val = 0; 880 uint32_t reg_addr; 881 struct hal_hw_srng_config *ring_config = 882 HAL_SRNG_CONFIG(hal, CE_DST); 883 884 /* set DEST_MAX_LENGTH according to ce assignment */ 885 reg_addr = HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_ADDR( 886 ring_config->reg_start[R0_INDEX] + 887 (ring_num * ring_config->reg_size[R0_INDEX])); 888 889 reg_val = HAL_REG_READ(hal, reg_addr); 890 reg_val &= ~HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 891 reg_val |= srng->u.dst_ring.max_buffer_length & 892 HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 893 HAL_REG_WRITE(hal, reg_addr, reg_val); 894 895 if (srng->prefetch_timer) { 896 reg_addr = HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_CONSUMER_PREFETCH_TIMER_ADDR( 897 ring_config->reg_start[R0_INDEX] + 898 (ring_num * ring_config->reg_size[R0_INDEX])); 899 900 reg_val = HAL_REG_READ(hal, reg_addr); 901 reg_val &= ~HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_CONSUMER_PREFETCH_TIMER_RMSK; 902 reg_val |= srng->prefetch_timer; 903 HAL_REG_WRITE(hal, reg_addr, reg_val); 904 reg_val = HAL_REG_READ(hal, reg_addr); 905 } 906 907 } 908 909 /** 910 * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX 911 * @hal: HAL SOC handle 912 * @read: boolean value to indicate if read or write 913 * @ix0: pointer to store IX0 reg value 914 * @ix1: pointer to store IX1 reg value 915 * @ix2: pointer to store IX2 reg value 916 * @ix3: pointer to store IX3 reg value 917 */ 918 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read, 919 uint32_t *ix0, uint32_t *ix1, 920 uint32_t *ix2, uint32_t *ix3) 921 { 922 uint32_t reg_offset; 923 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 924 925 if (read) { 926 if (ix0) { 927 reg_offset = 928 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR( 929 SEQ_WCSS_UMAC_REO_REG_OFFSET); 930 *ix0 = HAL_REG_READ(hal, reg_offset); 931 } 932 933 if (ix1) { 934 reg_offset = 935 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1_ADDR( 936 SEQ_WCSS_UMAC_REO_REG_OFFSET); 937 *ix1 = HAL_REG_READ(hal, reg_offset); 938 } 939 940 if (ix2) { 941 reg_offset = 942 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR( 943 SEQ_WCSS_UMAC_REO_REG_OFFSET); 944 *ix2 = HAL_REG_READ(hal, reg_offset); 945 } 946 947 if (ix3) { 948 reg_offset = 949 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR( 950 SEQ_WCSS_UMAC_REO_REG_OFFSET); 951 *ix3 = HAL_REG_READ(hal, reg_offset); 952 } 953 } else { 954 if (ix0) { 955 reg_offset = 956 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR( 957 SEQ_WCSS_UMAC_REO_REG_OFFSET); 958 HAL_REG_WRITE_CONFIRM(hal, reg_offset, *ix0); 959 } 960 961 if (ix1) { 962 reg_offset = 963 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1_ADDR( 964 SEQ_WCSS_UMAC_REO_REG_OFFSET); 965 HAL_REG_WRITE(hal, reg_offset, *ix1); 966 } 967 968 if (ix2) { 969 reg_offset = 970 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR( 971 SEQ_WCSS_UMAC_REO_REG_OFFSET); 972 HAL_REG_WRITE_CONFIRM(hal, reg_offset, *ix2); 973 } 974 975 if (ix3) { 976 reg_offset = 977 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR( 978 SEQ_WCSS_UMAC_REO_REG_OFFSET); 979 HAL_REG_WRITE_CONFIRM(hal, reg_offset, *ix3); 980 } 981 } 982 } 983 984 /** 985 * hal_srng_dst_set_hp_paddr() - Set physical address to dest ring head pointer 986 * @srng: sring pointer 987 * @paddr: physical address 988 */ 989 void hal_srng_dst_set_hp_paddr(struct hal_srng *srng, 990 uint64_t paddr) 991 { 992 SRNG_DST_REG_WRITE(srng, HP_ADDR_LSB, 993 paddr & 0xffffffff); 994 SRNG_DST_REG_WRITE(srng, HP_ADDR_MSB, 995 paddr >> 32); 996 } 997 998 /** 999 * hal_srng_dst_init_hp() - Initilaize destination ring head pointer 1000 * @srng: sring pointer 1001 * @vaddr: virtual address 1002 */ 1003 void hal_srng_dst_init_hp(struct hal_srng *srng, 1004 uint32_t *vaddr) 1005 { 1006 if (!srng) 1007 return; 1008 1009 srng->u.dst_ring.hp_addr = vaddr; 1010 SRNG_DST_REG_WRITE_CONFIRM(srng, HP, srng->u.dst_ring.cached_hp); 1011 1012 if (vaddr) { 1013 *srng->u.dst_ring.hp_addr = srng->u.dst_ring.cached_hp; 1014 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1015 "hp_addr=%pK, cached_hp=%d, hp=%d", 1016 (void *)srng->u.dst_ring.hp_addr, 1017 srng->u.dst_ring.cached_hp, 1018 *srng->u.dst_ring.hp_addr); 1019 } 1020 } 1021 1022 /** 1023 * hal_srng_hw_init - Private function to initialize SRNG HW 1024 * @hal_soc: HAL SOC handle 1025 * @srng: SRNG ring pointer 1026 */ 1027 static inline void hal_srng_hw_init(struct hal_soc *hal, 1028 struct hal_srng *srng) 1029 { 1030 if (srng->ring_dir == HAL_SRNG_SRC_RING) 1031 hal_srng_src_hw_init(hal, srng); 1032 else 1033 hal_srng_dst_hw_init(hal, srng); 1034 } 1035 1036 #ifdef CONFIG_SHADOW_V2 1037 #define ignore_shadow false 1038 #define CHECK_SHADOW_REGISTERS true 1039 #else 1040 #define ignore_shadow true 1041 #define CHECK_SHADOW_REGISTERS false 1042 #endif 1043 1044 /** 1045 * hal_srng_setup - Initialize HW SRNG ring. 1046 * @hal_soc: Opaque HAL SOC handle 1047 * @ring_type: one of the types from hal_ring_type 1048 * @ring_num: Ring number if there are multiple rings of same type (staring 1049 * from 0) 1050 * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings 1051 * @ring_params: SRNG ring params in hal_srng_params structure. 1052 1053 * Callers are expected to allocate contiguous ring memory of size 1054 * 'num_entries * entry_size' bytes and pass the physical and virtual base 1055 * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in 1056 * hal_srng_params structure. Ring base address should be 8 byte aligned 1057 * and size of each ring entry should be queried using the API 1058 * hal_srng_get_entrysize 1059 * 1060 * Return: Opaque pointer to ring on success 1061 * NULL on failure (if given ring is not available) 1062 */ 1063 void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num, 1064 int mac_id, struct hal_srng_params *ring_params) 1065 { 1066 int ring_id; 1067 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1068 struct hal_srng *srng; 1069 struct hal_hw_srng_config *ring_config = 1070 HAL_SRNG_CONFIG(hal, ring_type); 1071 void *dev_base_addr; 1072 int i; 1073 1074 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id); 1075 if (ring_id < 0) 1076 return NULL; 1077 1078 hal_verbose_debug("mac_id %d ring_id %d", mac_id, ring_id); 1079 1080 srng = hal_get_srng(hal_soc, ring_id); 1081 1082 if (srng->initialized) { 1083 hal_verbose_debug("Ring (ring_type, ring_num) already initialized"); 1084 return NULL; 1085 } 1086 1087 dev_base_addr = hal->dev_base_addr; 1088 srng->ring_id = ring_id; 1089 srng->ring_dir = ring_config->ring_dir; 1090 srng->ring_base_paddr = ring_params->ring_base_paddr; 1091 srng->ring_base_vaddr = ring_params->ring_base_vaddr; 1092 srng->entry_size = ring_config->entry_size; 1093 srng->num_entries = ring_params->num_entries; 1094 srng->ring_size = srng->num_entries * srng->entry_size; 1095 srng->ring_size_mask = srng->ring_size - 1; 1096 srng->msi_addr = ring_params->msi_addr; 1097 srng->msi_data = ring_params->msi_data; 1098 srng->intr_timer_thres_us = ring_params->intr_timer_thres_us; 1099 srng->intr_batch_cntr_thres_entries = 1100 ring_params->intr_batch_cntr_thres_entries; 1101 srng->prefetch_timer = ring_params->prefetch_timer; 1102 srng->hal_soc = hal_soc; 1103 1104 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) { 1105 srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i] 1106 + (ring_num * ring_config->reg_size[i]); 1107 } 1108 1109 /* Zero out the entire ring memory */ 1110 qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size * 1111 srng->num_entries) << 2); 1112 1113 srng->flags = ring_params->flags; 1114 #ifdef BIG_ENDIAN_HOST 1115 /* TODO: See if we should we get these flags from caller */ 1116 srng->flags |= HAL_SRNG_DATA_TLV_SWAP; 1117 srng->flags |= HAL_SRNG_MSI_SWAP; 1118 srng->flags |= HAL_SRNG_RING_PTR_SWAP; 1119 #endif 1120 1121 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 1122 srng->u.src_ring.hp = 0; 1123 srng->u.src_ring.reap_hp = srng->ring_size - 1124 srng->entry_size; 1125 srng->u.src_ring.tp_addr = 1126 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 1127 srng->u.src_ring.low_threshold = 1128 ring_params->low_threshold * srng->entry_size; 1129 if (ring_config->lmac_ring) { 1130 /* For LMAC rings, head pointer updates will be done 1131 * through FW by writing to a shared memory location 1132 */ 1133 srng->u.src_ring.hp_addr = 1134 &(hal->shadow_wrptr_mem_vaddr[ring_id - 1135 HAL_SRNG_LMAC1_ID_START]); 1136 srng->flags |= HAL_SRNG_LMAC_RING; 1137 } else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) { 1138 srng->u.src_ring.hp_addr = 1139 hal_get_window_address(hal, 1140 SRNG_SRC_ADDR(srng, HP)); 1141 1142 if (CHECK_SHADOW_REGISTERS) { 1143 QDF_TRACE(QDF_MODULE_ID_TXRX, 1144 QDF_TRACE_LEVEL_ERROR, 1145 "%s: Ring (%d, %d) missing shadow config", 1146 __func__, ring_type, ring_num); 1147 } 1148 } else { 1149 hal_validate_shadow_register(hal, 1150 SRNG_SRC_ADDR(srng, HP), 1151 srng->u.src_ring.hp_addr); 1152 } 1153 } else { 1154 /* During initialization loop count in all the descriptors 1155 * will be set to zero, and HW will set it to 1 on completing 1156 * descriptor update in first loop, and increments it by 1 on 1157 * subsequent loops (loop count wraps around after reaching 1158 * 0xffff). The 'loop_cnt' in SW ring state is the expected 1159 * loop count in descriptors updated by HW (to be processed 1160 * by SW). 1161 */ 1162 srng->u.dst_ring.loop_cnt = 1; 1163 srng->u.dst_ring.tp = 0; 1164 srng->u.dst_ring.hp_addr = 1165 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 1166 if (ring_config->lmac_ring) { 1167 /* For LMAC rings, tail pointer updates will be done 1168 * through FW by writing to a shared memory location 1169 */ 1170 srng->u.dst_ring.tp_addr = 1171 &(hal->shadow_wrptr_mem_vaddr[ring_id - 1172 HAL_SRNG_LMAC1_ID_START]); 1173 srng->flags |= HAL_SRNG_LMAC_RING; 1174 } else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) { 1175 srng->u.dst_ring.tp_addr = 1176 hal_get_window_address(hal, 1177 SRNG_DST_ADDR(srng, TP)); 1178 1179 if (CHECK_SHADOW_REGISTERS) { 1180 QDF_TRACE(QDF_MODULE_ID_TXRX, 1181 QDF_TRACE_LEVEL_ERROR, 1182 "%s: Ring (%d, %d) missing shadow config", 1183 __func__, ring_type, ring_num); 1184 } 1185 } else { 1186 hal_validate_shadow_register(hal, 1187 SRNG_DST_ADDR(srng, TP), 1188 srng->u.dst_ring.tp_addr); 1189 } 1190 } 1191 1192 if (!(ring_config->lmac_ring)) { 1193 hal_srng_hw_init(hal, srng); 1194 1195 if (ring_type == CE_DST) { 1196 srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length; 1197 hal_ce_dst_setup(hal, srng, ring_num); 1198 } 1199 } 1200 1201 SRNG_LOCK_INIT(&srng->lock); 1202 1203 srng->srng_event = 0; 1204 1205 srng->initialized = true; 1206 1207 return (void *)srng; 1208 } 1209 qdf_export_symbol(hal_srng_setup); 1210 1211 /** 1212 * hal_srng_cleanup - Deinitialize HW SRNG ring. 1213 * @hal_soc: Opaque HAL SOC handle 1214 * @hal_srng: Opaque HAL SRNG pointer 1215 */ 1216 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl) 1217 { 1218 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; 1219 SRNG_LOCK_DESTROY(&srng->lock); 1220 srng->initialized = 0; 1221 } 1222 qdf_export_symbol(hal_srng_cleanup); 1223 1224 /** 1225 * hal_srng_get_entrysize - Returns size of ring entry in bytes 1226 * @hal_soc: Opaque HAL SOC handle 1227 * @ring_type: one of the types from hal_ring_type 1228 * 1229 */ 1230 uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type) 1231 { 1232 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1233 struct hal_hw_srng_config *ring_config = 1234 HAL_SRNG_CONFIG(hal, ring_type); 1235 return ring_config->entry_size << 2; 1236 } 1237 qdf_export_symbol(hal_srng_get_entrysize); 1238 1239 /** 1240 * hal_srng_max_entries - Returns maximum possible number of ring entries 1241 * @hal_soc: Opaque HAL SOC handle 1242 * @ring_type: one of the types from hal_ring_type 1243 * 1244 * Return: Maximum number of entries for the given ring_type 1245 */ 1246 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type) 1247 { 1248 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1249 struct hal_hw_srng_config *ring_config = 1250 HAL_SRNG_CONFIG(hal, ring_type); 1251 1252 return ring_config->max_size / ring_config->entry_size; 1253 } 1254 qdf_export_symbol(hal_srng_max_entries); 1255 1256 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type) 1257 { 1258 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1259 struct hal_hw_srng_config *ring_config = 1260 HAL_SRNG_CONFIG(hal, ring_type); 1261 1262 return ring_config->ring_dir; 1263 } 1264 1265 /** 1266 * hal_srng_dump - Dump ring status 1267 * @srng: hal srng pointer 1268 */ 1269 void hal_srng_dump(struct hal_srng *srng) 1270 { 1271 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 1272 hal_debug("=== SRC RING %d ===", srng->ring_id); 1273 hal_debug("hp %u, reap_hp %u, tp %u, cached tp %u", 1274 srng->u.src_ring.hp, 1275 srng->u.src_ring.reap_hp, 1276 *srng->u.src_ring.tp_addr, 1277 srng->u.src_ring.cached_tp); 1278 } else { 1279 hal_debug("=== DST RING %d ===", srng->ring_id); 1280 hal_debug("tp %u, hp %u, cached tp %u, loop_cnt %u", 1281 srng->u.dst_ring.tp, 1282 *srng->u.dst_ring.hp_addr, 1283 srng->u.dst_ring.cached_hp, 1284 srng->u.dst_ring.loop_cnt); 1285 } 1286 } 1287 1288 /** 1289 * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL 1290 * 1291 * @hal_soc: Opaque HAL SOC handle 1292 * @hal_ring: Ring pointer (Source or Destination ring) 1293 * @ring_params: SRNG parameters will be returned through this structure 1294 */ 1295 extern void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl, 1296 hal_ring_handle_t hal_ring_hdl, 1297 struct hal_srng_params *ring_params) 1298 { 1299 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; 1300 int i =0; 1301 ring_params->ring_id = srng->ring_id; 1302 ring_params->ring_dir = srng->ring_dir; 1303 ring_params->entry_size = srng->entry_size; 1304 1305 ring_params->ring_base_paddr = srng->ring_base_paddr; 1306 ring_params->ring_base_vaddr = srng->ring_base_vaddr; 1307 ring_params->num_entries = srng->num_entries; 1308 ring_params->msi_addr = srng->msi_addr; 1309 ring_params->msi_data = srng->msi_data; 1310 ring_params->intr_timer_thres_us = srng->intr_timer_thres_us; 1311 ring_params->intr_batch_cntr_thres_entries = 1312 srng->intr_batch_cntr_thres_entries; 1313 ring_params->low_threshold = srng->u.src_ring.low_threshold; 1314 ring_params->flags = srng->flags; 1315 ring_params->ring_id = srng->ring_id; 1316 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) 1317 ring_params->hwreg_base[i] = srng->hwreg_base[i]; 1318 } 1319 qdf_export_symbol(hal_get_srng_params); 1320 1321 void hal_set_low_threshold(hal_ring_handle_t hal_ring_hdl, 1322 uint32_t low_threshold) 1323 { 1324 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; 1325 srng->u.src_ring.low_threshold = low_threshold * srng->entry_size; 1326 } 1327 qdf_export_symbol(hal_set_low_threshold); 1328 1329 1330 #ifdef FORCE_WAKE 1331 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase) 1332 { 1333 struct hal_soc *hal_soc = (struct hal_soc *)soc; 1334 1335 hal_soc->init_phase = init_phase; 1336 } 1337 #endif /* FORCE_WAKE */ 1338