1 /* 2 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "hal_api.h" 21 #include "target_type.h" 22 #include "wcss_version.h" 23 #include "qdf_module.h" 24 #ifdef QCA_WIFI_QCA8074 25 void hal_qca6290_attach(struct hal_soc *hal); 26 #endif 27 #ifdef QCA_WIFI_QCA8074 28 void hal_qca8074_attach(struct hal_soc *hal); 29 #endif 30 #if defined(QCA_WIFI_QCA8074V2) || defined(QCA_WIFI_QCA6018) 31 void hal_qca8074v2_attach(struct hal_soc *hal); 32 #endif 33 #ifdef QCA_WIFI_QCA6390 34 void hal_qca6390_attach(struct hal_soc *hal); 35 #endif 36 #ifdef QCA_WIFI_QCA6490 37 void hal_qca6490_attach(struct hal_soc *hal); 38 #endif 39 #ifdef QCA_WIFI_QCN9000 40 void hal_qcn9000_attach(struct hal_soc *hal); 41 #endif 42 #ifdef QCA_WIFI_QCA6750 43 void hal_qca6750_attach(struct hal_soc *hal); 44 #endif 45 46 #ifdef ENABLE_VERBOSE_DEBUG 47 bool is_hal_verbose_debug_enabled; 48 #endif 49 50 /** 51 * hal_get_srng_ring_id() - get the ring id of a descriped ring 52 * @hal: hal_soc data structure 53 * @ring_type: type enum describing the ring 54 * @ring_num: which ring of the ring type 55 * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings) 56 * 57 * Return: the ring id or -EINVAL if the ring does not exist. 58 */ 59 static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type, 60 int ring_num, int mac_id) 61 { 62 struct hal_hw_srng_config *ring_config = 63 HAL_SRNG_CONFIG(hal, ring_type); 64 int ring_id; 65 66 if (ring_num >= ring_config->max_rings) { 67 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 68 "%s: ring_num exceeded maximum no. of supported rings", 69 __func__); 70 /* TODO: This is a programming error. Assert if this happens */ 71 return -EINVAL; 72 } 73 74 if (ring_config->lmac_ring) { 75 ring_id = ring_config->start_ring_id + ring_num + 76 (mac_id * HAL_MAX_RINGS_PER_LMAC); 77 } else { 78 ring_id = ring_config->start_ring_id + ring_num; 79 } 80 81 return ring_id; 82 } 83 84 static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id) 85 { 86 /* TODO: Should we allocate srng structures dynamically? */ 87 return &(hal->srng_list[ring_id]); 88 } 89 90 #define HP_OFFSET_IN_REG_START 1 91 #define OFFSET_FROM_HP_TO_TP 4 92 static void hal_update_srng_hp_tp_address(struct hal_soc *hal_soc, 93 int shadow_config_index, 94 int ring_type, 95 int ring_num) 96 { 97 struct hal_srng *srng; 98 int ring_id; 99 struct hal_hw_srng_config *ring_config = 100 HAL_SRNG_CONFIG(hal_soc, ring_type); 101 102 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0); 103 if (ring_id < 0) 104 return; 105 106 srng = hal_get_srng(hal_soc, ring_id); 107 108 if (ring_config->ring_dir == HAL_SRNG_DST_RING) { 109 srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index) 110 + hal_soc->dev_base_addr; 111 hal_debug("tp_addr=%pK dev base addr %pK index %u", 112 srng->u.dst_ring.tp_addr, hal_soc->dev_base_addr, 113 shadow_config_index); 114 } else { 115 srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index) 116 + hal_soc->dev_base_addr; 117 hal_debug("hp_addr=%pK dev base addr %pK index %u", 118 srng->u.src_ring.hp_addr, 119 hal_soc->dev_base_addr, shadow_config_index); 120 } 121 122 } 123 124 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, 125 int ring_type, 126 int ring_num) 127 { 128 uint32_t target_register; 129 struct hal_soc *hal = (struct hal_soc *)hal_soc; 130 struct hal_hw_srng_config *srng_config = &hal->hw_srng_table[ring_type]; 131 int shadow_config_index = hal->num_shadow_registers_configured; 132 133 if (shadow_config_index >= MAX_SHADOW_REGISTERS) { 134 QDF_ASSERT(0); 135 return QDF_STATUS_E_RESOURCES; 136 } 137 138 hal->num_shadow_registers_configured++; 139 140 target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START]; 141 target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START] 142 *ring_num); 143 144 /* if the ring is a dst ring, we need to shadow the tail pointer */ 145 if (srng_config->ring_dir == HAL_SRNG_DST_RING) 146 target_register += OFFSET_FROM_HP_TO_TP; 147 148 hal->shadow_config[shadow_config_index].addr = target_register; 149 150 /* update hp/tp addr in the hal_soc structure*/ 151 hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type, 152 ring_num); 153 154 hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x, ring_type %d, ring num %d", 155 target_register, 156 SHADOW_REGISTER(shadow_config_index), 157 shadow_config_index, 158 ring_type, ring_num); 159 160 return QDF_STATUS_SUCCESS; 161 } 162 163 qdf_export_symbol(hal_set_one_shadow_config); 164 165 QDF_STATUS hal_construct_shadow_config(void *hal_soc) 166 { 167 int ring_type, ring_num; 168 struct hal_soc *hal = (struct hal_soc *)hal_soc; 169 170 for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) { 171 struct hal_hw_srng_config *srng_config = 172 &hal->hw_srng_table[ring_type]; 173 174 if (ring_type == CE_SRC || 175 ring_type == CE_DST || 176 ring_type == CE_DST_STATUS) 177 continue; 178 179 if (srng_config->lmac_ring) 180 continue; 181 182 for (ring_num = 0; ring_num < srng_config->max_rings; 183 ring_num++) 184 hal_set_one_shadow_config(hal_soc, ring_type, ring_num); 185 } 186 187 return QDF_STATUS_SUCCESS; 188 } 189 190 qdf_export_symbol(hal_construct_shadow_config); 191 192 void hal_get_shadow_config(void *hal_soc, 193 struct pld_shadow_reg_v2_cfg **shadow_config, 194 int *num_shadow_registers_configured) 195 { 196 struct hal_soc *hal = (struct hal_soc *)hal_soc; 197 198 *shadow_config = hal->shadow_config; 199 *num_shadow_registers_configured = 200 hal->num_shadow_registers_configured; 201 } 202 203 qdf_export_symbol(hal_get_shadow_config); 204 205 206 static void hal_validate_shadow_register(struct hal_soc *hal, 207 uint32_t *destination, 208 uint32_t *shadow_address) 209 { 210 unsigned int index; 211 uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr; 212 int destination_ba_offset = 213 ((char *)destination) - (char *)hal->dev_base_addr; 214 215 index = shadow_address - shadow_0_offset; 216 217 if (index >= MAX_SHADOW_REGISTERS) { 218 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 219 "%s: index %x out of bounds", __func__, index); 220 goto error; 221 } else if (hal->shadow_config[index].addr != destination_ba_offset) { 222 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 223 "%s: sanity check failure, expected %x, found %x", 224 __func__, destination_ba_offset, 225 hal->shadow_config[index].addr); 226 goto error; 227 } 228 return; 229 error: 230 qdf_print("%s: baddr %pK, desination %pK, shadow_address %pK s0offset %pK index %x", 231 __func__, hal->dev_base_addr, destination, shadow_address, 232 shadow_0_offset, index); 233 QDF_BUG(0); 234 return; 235 } 236 237 static void hal_target_based_configure(struct hal_soc *hal) 238 { 239 switch (hal->target_type) { 240 #ifdef QCA_WIFI_QCA6290 241 case TARGET_TYPE_QCA6290: 242 hal->use_register_windowing = true; 243 hal_qca6290_attach(hal); 244 break; 245 #endif 246 #ifdef QCA_WIFI_QCA6390 247 case TARGET_TYPE_QCA6390: 248 hal->use_register_windowing = true; 249 hal_qca6390_attach(hal); 250 break; 251 #endif 252 #ifdef QCA_WIFI_QCA6490 253 case TARGET_TYPE_QCA6490: 254 hal->use_register_windowing = true; 255 hal_qca6490_attach(hal); 256 break; 257 #endif 258 #ifdef QCA_WIFI_QCA6750 259 case TARGET_TYPE_QCA6750: 260 hal->use_register_windowing = true; 261 hal_qca6750_attach(hal); 262 break; 263 #endif 264 #if defined(QCA_WIFI_QCA8074) && defined(WIFI_TARGET_TYPE_3_0) 265 case TARGET_TYPE_QCA8074: 266 hal_qca8074_attach(hal); 267 break; 268 #endif 269 270 #if defined(QCA_WIFI_QCA8074V2) 271 case TARGET_TYPE_QCA8074V2: 272 hal_qca8074v2_attach(hal); 273 break; 274 #endif 275 276 #if defined(QCA_WIFI_QCA6018) 277 case TARGET_TYPE_QCA6018: 278 hal_qca8074v2_attach(hal); 279 break; 280 #endif 281 282 #ifdef QCA_WIFI_QCN9000 283 case TARGET_TYPE_QCN9000: 284 hal->use_register_windowing = true; 285 /* 286 * Static window map is enabled for qcn9000 to use 2mb bar 287 * size and use multiple windows to write into registers. 288 */ 289 hal->static_window_map = true; 290 hal_qcn9000_attach(hal); 291 break; 292 #endif 293 default: 294 break; 295 } 296 } 297 298 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl) 299 { 300 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; 301 struct hif_target_info *tgt_info = 302 hif_get_target_info_handle(hal_soc->hif_handle); 303 304 return tgt_info->target_type; 305 } 306 307 qdf_export_symbol(hal_get_target_type); 308 309 /** 310 * hal_attach - Initialize HAL layer 311 * @hif_handle: Opaque HIF handle 312 * @qdf_dev: QDF device 313 * 314 * Return: Opaque HAL SOC handle 315 * NULL on failure (if given ring is not available) 316 * 317 * This function should be called as part of HIF initialization (for accessing 318 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() 319 * 320 */ 321 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev) 322 { 323 struct hal_soc *hal; 324 int i; 325 326 hal = qdf_mem_malloc(sizeof(*hal)); 327 328 if (!hal) { 329 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 330 "%s: hal_soc allocation failed", __func__); 331 goto fail0; 332 } 333 qdf_minidump_log(hal, sizeof(*hal), "hal_soc"); 334 hal->hif_handle = hif_handle; 335 hal->dev_base_addr = hif_get_dev_ba(hif_handle); 336 hal->qdf_dev = qdf_dev; 337 hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent( 338 qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) * 339 HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr)); 340 if (!hal->shadow_rdptr_mem_paddr) { 341 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 342 "%s: hal->shadow_rdptr_mem_paddr allocation failed", 343 __func__); 344 goto fail1; 345 } 346 qdf_mem_zero(hal->shadow_rdptr_mem_vaddr, 347 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX); 348 349 hal->shadow_wrptr_mem_vaddr = 350 (uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev, 351 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 352 &(hal->shadow_wrptr_mem_paddr)); 353 if (!hal->shadow_wrptr_mem_vaddr) { 354 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 355 "%s: hal->shadow_wrptr_mem_vaddr allocation failed", 356 __func__); 357 goto fail2; 358 } 359 qdf_mem_zero(hal->shadow_wrptr_mem_vaddr, 360 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS); 361 362 for (i = 0; i < HAL_SRNG_ID_MAX; i++) { 363 hal->srng_list[i].initialized = 0; 364 hal->srng_list[i].ring_id = i; 365 } 366 367 qdf_spinlock_create(&hal->register_access_lock); 368 hal->register_window = 0; 369 hal->target_type = hal_get_target_type(hal_soc_to_hal_soc_handle(hal)); 370 371 hal_target_based_configure(hal); 372 /** 373 * Indicate Initialization of srngs to avoid force wake 374 * as umac power collapse is not enabled yet 375 */ 376 hal->init_phase = true; 377 378 return (void *)hal; 379 380 fail2: 381 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, 382 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 383 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 384 fail1: 385 qdf_mem_free(hal); 386 fail0: 387 return NULL; 388 } 389 qdf_export_symbol(hal_attach); 390 391 /** 392 * hal_mem_info - Retrieve hal memory base address 393 * 394 * @hal_soc: Opaque HAL SOC handle 395 * @mem: pointer to structure to be updated with hal mem info 396 */ 397 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem) 398 { 399 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 400 mem->dev_base_addr = (void *)hal->dev_base_addr; 401 mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr; 402 mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr; 403 mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr; 404 mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr; 405 hif_read_phy_mem_base((void *)hal->hif_handle, 406 (qdf_dma_addr_t *)&mem->dev_base_paddr); 407 return; 408 } 409 qdf_export_symbol(hal_get_meminfo); 410 411 /** 412 * hal_detach - Detach HAL layer 413 * @hal_soc: HAL SOC handle 414 * 415 * Return: Opaque HAL SOC handle 416 * NULL on failure (if given ring is not available) 417 * 418 * This function should be called as part of HIF initialization (for accessing 419 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() 420 * 421 */ 422 extern void hal_detach(void *hal_soc) 423 { 424 struct hal_soc *hal = (struct hal_soc *)hal_soc; 425 426 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 427 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 428 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 429 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 430 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 431 hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0); 432 qdf_minidump_remove(hal); 433 qdf_mem_free(hal); 434 435 return; 436 } 437 qdf_export_symbol(hal_detach); 438 439 440 /** 441 * hal_ce_dst_setup - Initialize CE destination ring registers 442 * @hal_soc: HAL SOC handle 443 * @srng: SRNG ring pointer 444 */ 445 static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng, 446 int ring_num) 447 { 448 uint32_t reg_val = 0; 449 uint32_t reg_addr; 450 struct hal_hw_srng_config *ring_config = 451 HAL_SRNG_CONFIG(hal, CE_DST); 452 453 /* set DEST_MAX_LENGTH according to ce assignment */ 454 reg_addr = HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_ADDR( 455 ring_config->reg_start[R0_INDEX] + 456 (ring_num * ring_config->reg_size[R0_INDEX])); 457 458 reg_val = HAL_REG_READ(hal, reg_addr); 459 reg_val &= ~HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 460 reg_val |= srng->u.dst_ring.max_buffer_length & 461 HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 462 HAL_REG_WRITE(hal, reg_addr, reg_val); 463 } 464 465 /** 466 * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX 467 * @hal: HAL SOC handle 468 * @read: boolean value to indicate if read or write 469 * @ix0: pointer to store IX0 reg value 470 * @ix1: pointer to store IX1 reg value 471 * @ix2: pointer to store IX2 reg value 472 * @ix3: pointer to store IX3 reg value 473 */ 474 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read, 475 uint32_t *ix0, uint32_t *ix1, 476 uint32_t *ix2, uint32_t *ix3) 477 { 478 uint32_t reg_offset; 479 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; 480 481 if (read) { 482 if (ix0) { 483 reg_offset = 484 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR( 485 SEQ_WCSS_UMAC_REO_REG_OFFSET); 486 *ix0 = HAL_REG_READ(hal, reg_offset); 487 } 488 489 if (ix1) { 490 reg_offset = 491 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1_ADDR( 492 SEQ_WCSS_UMAC_REO_REG_OFFSET); 493 *ix1 = HAL_REG_READ(hal, reg_offset); 494 } 495 496 if (ix2) { 497 reg_offset = 498 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR( 499 SEQ_WCSS_UMAC_REO_REG_OFFSET); 500 *ix2 = HAL_REG_READ(hal, reg_offset); 501 } 502 503 if (ix3) { 504 reg_offset = 505 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR( 506 SEQ_WCSS_UMAC_REO_REG_OFFSET); 507 *ix3 = HAL_REG_READ(hal, reg_offset); 508 } 509 } else { 510 if (ix0) { 511 reg_offset = 512 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR( 513 SEQ_WCSS_UMAC_REO_REG_OFFSET); 514 HAL_REG_WRITE_CONFIRM(hal, reg_offset, *ix0); 515 } 516 517 if (ix1) { 518 reg_offset = 519 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1_ADDR( 520 SEQ_WCSS_UMAC_REO_REG_OFFSET); 521 HAL_REG_WRITE(hal, reg_offset, *ix1); 522 } 523 524 if (ix2) { 525 reg_offset = 526 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR( 527 SEQ_WCSS_UMAC_REO_REG_OFFSET); 528 HAL_REG_WRITE_CONFIRM(hal, reg_offset, *ix2); 529 } 530 531 if (ix3) { 532 reg_offset = 533 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR( 534 SEQ_WCSS_UMAC_REO_REG_OFFSET); 535 HAL_REG_WRITE_CONFIRM(hal, reg_offset, *ix3); 536 } 537 } 538 } 539 540 /** 541 * hal_srng_dst_set_hp_paddr() - Set physical address to dest ring head pointer 542 * @srng: sring pointer 543 * @paddr: physical address 544 */ 545 void hal_srng_dst_set_hp_paddr(struct hal_srng *srng, 546 uint64_t paddr) 547 { 548 SRNG_DST_REG_WRITE(srng, HP_ADDR_LSB, 549 paddr & 0xffffffff); 550 SRNG_DST_REG_WRITE(srng, HP_ADDR_MSB, 551 paddr >> 32); 552 } 553 554 /** 555 * hal_srng_dst_init_hp() - Initilaize destination ring head pointer 556 * @srng: sring pointer 557 * @vaddr: virtual address 558 */ 559 void hal_srng_dst_init_hp(struct hal_srng *srng, 560 uint32_t *vaddr) 561 { 562 if (!srng) 563 return; 564 565 srng->u.dst_ring.hp_addr = vaddr; 566 SRNG_DST_REG_WRITE(srng, HP, srng->u.dst_ring.cached_hp); 567 568 if (vaddr) { 569 *srng->u.dst_ring.hp_addr = srng->u.dst_ring.cached_hp; 570 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 571 "hp_addr=%pK, cached_hp=%d, hp=%d", 572 (void *)srng->u.dst_ring.hp_addr, 573 srng->u.dst_ring.cached_hp, 574 *srng->u.dst_ring.hp_addr); 575 } 576 } 577 578 /** 579 * hal_srng_hw_init - Private function to initialize SRNG HW 580 * @hal_soc: HAL SOC handle 581 * @srng: SRNG ring pointer 582 */ 583 static inline void hal_srng_hw_init(struct hal_soc *hal, 584 struct hal_srng *srng) 585 { 586 if (srng->ring_dir == HAL_SRNG_SRC_RING) 587 hal_srng_src_hw_init(hal, srng); 588 else 589 hal_srng_dst_hw_init(hal, srng); 590 } 591 592 #ifdef CONFIG_SHADOW_V2 593 #define ignore_shadow false 594 #define CHECK_SHADOW_REGISTERS true 595 #else 596 #define ignore_shadow true 597 #define CHECK_SHADOW_REGISTERS false 598 #endif 599 600 /** 601 * hal_srng_setup - Initialize HW SRNG ring. 602 * @hal_soc: Opaque HAL SOC handle 603 * @ring_type: one of the types from hal_ring_type 604 * @ring_num: Ring number if there are multiple rings of same type (staring 605 * from 0) 606 * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings 607 * @ring_params: SRNG ring params in hal_srng_params structure. 608 609 * Callers are expected to allocate contiguous ring memory of size 610 * 'num_entries * entry_size' bytes and pass the physical and virtual base 611 * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in 612 * hal_srng_params structure. Ring base address should be 8 byte aligned 613 * and size of each ring entry should be queried using the API 614 * hal_srng_get_entrysize 615 * 616 * Return: Opaque pointer to ring on success 617 * NULL on failure (if given ring is not available) 618 */ 619 void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num, 620 int mac_id, struct hal_srng_params *ring_params) 621 { 622 int ring_id; 623 struct hal_soc *hal = (struct hal_soc *)hal_soc; 624 struct hal_srng *srng; 625 struct hal_hw_srng_config *ring_config = 626 HAL_SRNG_CONFIG(hal, ring_type); 627 void *dev_base_addr; 628 int i; 629 630 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id); 631 if (ring_id < 0) 632 return NULL; 633 634 hal_verbose_debug("mac_id %d ring_id %d", mac_id, ring_id); 635 636 srng = hal_get_srng(hal_soc, ring_id); 637 638 if (srng->initialized) { 639 hal_verbose_debug("Ring (ring_type, ring_num) already initialized"); 640 return NULL; 641 } 642 643 dev_base_addr = hal->dev_base_addr; 644 srng->ring_id = ring_id; 645 srng->ring_dir = ring_config->ring_dir; 646 srng->ring_base_paddr = ring_params->ring_base_paddr; 647 srng->ring_base_vaddr = ring_params->ring_base_vaddr; 648 srng->entry_size = ring_config->entry_size; 649 srng->num_entries = ring_params->num_entries; 650 srng->ring_size = srng->num_entries * srng->entry_size; 651 srng->ring_size_mask = srng->ring_size - 1; 652 srng->msi_addr = ring_params->msi_addr; 653 srng->msi_data = ring_params->msi_data; 654 srng->intr_timer_thres_us = ring_params->intr_timer_thres_us; 655 srng->intr_batch_cntr_thres_entries = 656 ring_params->intr_batch_cntr_thres_entries; 657 srng->hal_soc = hal_soc; 658 659 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) { 660 srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i] 661 + (ring_num * ring_config->reg_size[i]); 662 } 663 664 /* Zero out the entire ring memory */ 665 qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size * 666 srng->num_entries) << 2); 667 668 srng->flags = ring_params->flags; 669 #ifdef BIG_ENDIAN_HOST 670 /* TODO: See if we should we get these flags from caller */ 671 srng->flags |= HAL_SRNG_DATA_TLV_SWAP; 672 srng->flags |= HAL_SRNG_MSI_SWAP; 673 srng->flags |= HAL_SRNG_RING_PTR_SWAP; 674 #endif 675 676 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 677 srng->u.src_ring.hp = 0; 678 srng->u.src_ring.reap_hp = srng->ring_size - 679 srng->entry_size; 680 srng->u.src_ring.tp_addr = 681 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 682 srng->u.src_ring.low_threshold = 683 ring_params->low_threshold * srng->entry_size; 684 if (ring_config->lmac_ring) { 685 /* For LMAC rings, head pointer updates will be done 686 * through FW by writing to a shared memory location 687 */ 688 srng->u.src_ring.hp_addr = 689 &(hal->shadow_wrptr_mem_vaddr[ring_id - 690 HAL_SRNG_LMAC1_ID_START]); 691 srng->flags |= HAL_SRNG_LMAC_RING; 692 } else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) { 693 srng->u.src_ring.hp_addr = 694 hal_get_window_address(hal, 695 SRNG_SRC_ADDR(srng, HP)); 696 697 if (CHECK_SHADOW_REGISTERS) { 698 QDF_TRACE(QDF_MODULE_ID_TXRX, 699 QDF_TRACE_LEVEL_ERROR, 700 "%s: Ring (%d, %d) missing shadow config", 701 __func__, ring_type, ring_num); 702 } 703 } else { 704 hal_validate_shadow_register(hal, 705 SRNG_SRC_ADDR(srng, HP), 706 srng->u.src_ring.hp_addr); 707 } 708 } else { 709 /* During initialization loop count in all the descriptors 710 * will be set to zero, and HW will set it to 1 on completing 711 * descriptor update in first loop, and increments it by 1 on 712 * subsequent loops (loop count wraps around after reaching 713 * 0xffff). The 'loop_cnt' in SW ring state is the expected 714 * loop count in descriptors updated by HW (to be processed 715 * by SW). 716 */ 717 srng->u.dst_ring.loop_cnt = 1; 718 srng->u.dst_ring.tp = 0; 719 srng->u.dst_ring.hp_addr = 720 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 721 if (ring_config->lmac_ring) { 722 /* For LMAC rings, tail pointer updates will be done 723 * through FW by writing to a shared memory location 724 */ 725 srng->u.dst_ring.tp_addr = 726 &(hal->shadow_wrptr_mem_vaddr[ring_id - 727 HAL_SRNG_LMAC1_ID_START]); 728 srng->flags |= HAL_SRNG_LMAC_RING; 729 } else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) { 730 srng->u.dst_ring.tp_addr = 731 hal_get_window_address(hal, 732 SRNG_DST_ADDR(srng, TP)); 733 734 if (CHECK_SHADOW_REGISTERS) { 735 QDF_TRACE(QDF_MODULE_ID_TXRX, 736 QDF_TRACE_LEVEL_ERROR, 737 "%s: Ring (%d, %d) missing shadow config", 738 __func__, ring_type, ring_num); 739 } 740 } else { 741 hal_validate_shadow_register(hal, 742 SRNG_DST_ADDR(srng, TP), 743 srng->u.dst_ring.tp_addr); 744 } 745 } 746 747 if (!(ring_config->lmac_ring)) { 748 hal_srng_hw_init(hal, srng); 749 750 if (ring_type == CE_DST) { 751 srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length; 752 hal_ce_dst_setup(hal, srng, ring_num); 753 } 754 } 755 756 SRNG_LOCK_INIT(&srng->lock); 757 758 srng->srng_event = 0; 759 760 srng->initialized = true; 761 762 return (void *)srng; 763 } 764 qdf_export_symbol(hal_srng_setup); 765 766 /** 767 * hal_srng_cleanup - Deinitialize HW SRNG ring. 768 * @hal_soc: Opaque HAL SOC handle 769 * @hal_srng: Opaque HAL SRNG pointer 770 */ 771 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl) 772 { 773 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; 774 SRNG_LOCK_DESTROY(&srng->lock); 775 srng->initialized = 0; 776 } 777 qdf_export_symbol(hal_srng_cleanup); 778 779 /** 780 * hal_srng_get_entrysize - Returns size of ring entry in bytes 781 * @hal_soc: Opaque HAL SOC handle 782 * @ring_type: one of the types from hal_ring_type 783 * 784 */ 785 uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type) 786 { 787 struct hal_soc *hal = (struct hal_soc *)hal_soc; 788 struct hal_hw_srng_config *ring_config = 789 HAL_SRNG_CONFIG(hal, ring_type); 790 return ring_config->entry_size << 2; 791 } 792 qdf_export_symbol(hal_srng_get_entrysize); 793 794 /** 795 * hal_srng_max_entries - Returns maximum possible number of ring entries 796 * @hal_soc: Opaque HAL SOC handle 797 * @ring_type: one of the types from hal_ring_type 798 * 799 * Return: Maximum number of entries for the given ring_type 800 */ 801 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type) 802 { 803 struct hal_soc *hal = (struct hal_soc *)hal_soc; 804 struct hal_hw_srng_config *ring_config = 805 HAL_SRNG_CONFIG(hal, ring_type); 806 807 return ring_config->max_size / ring_config->entry_size; 808 } 809 qdf_export_symbol(hal_srng_max_entries); 810 811 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type) 812 { 813 struct hal_soc *hal = (struct hal_soc *)hal_soc; 814 struct hal_hw_srng_config *ring_config = 815 HAL_SRNG_CONFIG(hal, ring_type); 816 817 return ring_config->ring_dir; 818 } 819 820 /** 821 * hal_srng_dump - Dump ring status 822 * @srng: hal srng pointer 823 */ 824 void hal_srng_dump(struct hal_srng *srng) 825 { 826 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 827 hal_debug("=== SRC RING %d ===", srng->ring_id); 828 hal_debug("hp %u, reap_hp %u, tp %u, cached tp %u", 829 srng->u.src_ring.hp, 830 srng->u.src_ring.reap_hp, 831 *srng->u.src_ring.tp_addr, 832 srng->u.src_ring.cached_tp); 833 } else { 834 hal_debug("=== DST RING %d ===", srng->ring_id); 835 hal_debug("tp %u, hp %u, cached tp %u, loop_cnt %u", 836 srng->u.dst_ring.tp, 837 *srng->u.dst_ring.hp_addr, 838 srng->u.dst_ring.cached_hp, 839 srng->u.dst_ring.loop_cnt); 840 } 841 } 842 843 /** 844 * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL 845 * 846 * @hal_soc: Opaque HAL SOC handle 847 * @hal_ring: Ring pointer (Source or Destination ring) 848 * @ring_params: SRNG parameters will be returned through this structure 849 */ 850 extern void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl, 851 hal_ring_handle_t hal_ring_hdl, 852 struct hal_srng_params *ring_params) 853 { 854 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; 855 int i =0; 856 ring_params->ring_id = srng->ring_id; 857 ring_params->ring_dir = srng->ring_dir; 858 ring_params->entry_size = srng->entry_size; 859 860 ring_params->ring_base_paddr = srng->ring_base_paddr; 861 ring_params->ring_base_vaddr = srng->ring_base_vaddr; 862 ring_params->num_entries = srng->num_entries; 863 ring_params->msi_addr = srng->msi_addr; 864 ring_params->msi_data = srng->msi_data; 865 ring_params->intr_timer_thres_us = srng->intr_timer_thres_us; 866 ring_params->intr_batch_cntr_thres_entries = 867 srng->intr_batch_cntr_thres_entries; 868 ring_params->low_threshold = srng->u.src_ring.low_threshold; 869 ring_params->flags = srng->flags; 870 ring_params->ring_id = srng->ring_id; 871 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) 872 ring_params->hwreg_base[i] = srng->hwreg_base[i]; 873 } 874 qdf_export_symbol(hal_get_srng_params); 875 876 #ifdef FORCE_WAKE 877 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase) 878 { 879 struct hal_soc *hal_soc = (struct hal_soc *)soc; 880 881 hal_soc->init_phase = init_phase; 882 } 883 #endif /* FORCE_WAKE */ 884