1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "hal_api.h" 21 #include "target_type.h" 22 #include "wcss_version.h" 23 #include "qdf_module.h" 24 #ifdef QCA_WIFI_QCA8074 25 void hal_qca6290_attach(struct hal_soc *hal); 26 #endif 27 #ifdef QCA_WIFI_QCA8074 28 void hal_qca8074_attach(struct hal_soc *hal); 29 #endif 30 #ifdef QCA_WIFI_QCA8074V2 31 void hal_qca8074v2_attach(struct hal_soc *hal); 32 #endif 33 #ifdef QCA_WIFI_QCA6390 34 void hal_qca6390_attach(struct hal_soc *hal); 35 #endif 36 #ifdef QCA_WIFI_QCA6018 37 void hal_qca6018_attach(struct hal_soc *hal); 38 #endif 39 40 /** 41 * hal_get_srng_ring_id() - get the ring id of a descriped ring 42 * @hal: hal_soc data structure 43 * @ring_type: type enum describing the ring 44 * @ring_num: which ring of the ring type 45 * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings) 46 * 47 * Return: the ring id or -EINVAL if the ring does not exist. 48 */ 49 static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type, 50 int ring_num, int mac_id) 51 { 52 struct hal_hw_srng_config *ring_config = 53 HAL_SRNG_CONFIG(hal, ring_type); 54 int ring_id; 55 56 if (ring_num >= ring_config->max_rings) { 57 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 58 "%s: ring_num exceeded maximum no. of supported rings", 59 __func__); 60 /* TODO: This is a programming error. Assert if this happens */ 61 return -EINVAL; 62 } 63 64 if (ring_config->lmac_ring) { 65 ring_id = ring_config->start_ring_id + ring_num + 66 (mac_id * HAL_MAX_RINGS_PER_LMAC); 67 } else { 68 ring_id = ring_config->start_ring_id + ring_num; 69 } 70 71 return ring_id; 72 } 73 74 static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id) 75 { 76 /* TODO: Should we allocate srng structures dynamically? */ 77 return &(hal->srng_list[ring_id]); 78 } 79 80 #define HP_OFFSET_IN_REG_START 1 81 #define OFFSET_FROM_HP_TO_TP 4 82 static void hal_update_srng_hp_tp_address(void *hal_soc, 83 int shadow_config_index, 84 int ring_type, 85 int ring_num) 86 { 87 struct hal_srng *srng; 88 struct hal_soc *hal = (struct hal_soc *)hal_soc; 89 int ring_id; 90 struct hal_hw_srng_config *ring_config = 91 HAL_SRNG_CONFIG(hal, ring_type); 92 93 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0); 94 if (ring_id < 0) 95 return; 96 97 srng = hal_get_srng(hal_soc, ring_id); 98 99 if (ring_config->ring_dir == HAL_SRNG_DST_RING) { 100 srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index) 101 + hal->dev_base_addr; 102 hal_debug("tp_addr=%pK dev base addr %pK index %u", 103 srng->u.dst_ring.tp_addr, hal->dev_base_addr, 104 shadow_config_index); 105 } else { 106 srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index) 107 + hal->dev_base_addr; 108 hal_debug("hp_addr=%pK dev base addr %pK index %u", 109 srng->u.src_ring.hp_addr, 110 hal->dev_base_addr, shadow_config_index); 111 } 112 113 } 114 115 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, 116 int ring_type, 117 int ring_num) 118 { 119 uint32_t target_register; 120 struct hal_soc *hal = (struct hal_soc *)hal_soc; 121 struct hal_hw_srng_config *srng_config = &hal->hw_srng_table[ring_type]; 122 int shadow_config_index = hal->num_shadow_registers_configured; 123 124 if (shadow_config_index >= MAX_SHADOW_REGISTERS) { 125 QDF_ASSERT(0); 126 return QDF_STATUS_E_RESOURCES; 127 } 128 129 hal->num_shadow_registers_configured++; 130 131 target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START]; 132 target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START] 133 *ring_num); 134 135 /* if the ring is a dst ring, we need to shadow the tail pointer */ 136 if (srng_config->ring_dir == HAL_SRNG_DST_RING) 137 target_register += OFFSET_FROM_HP_TO_TP; 138 139 hal->shadow_config[shadow_config_index].addr = target_register; 140 141 /* update hp/tp addr in the hal_soc structure*/ 142 hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type, 143 ring_num); 144 145 hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x, ring_type %d, ring num %d", 146 target_register, 147 SHADOW_REGISTER(shadow_config_index), 148 shadow_config_index, 149 ring_type, ring_num); 150 151 return QDF_STATUS_SUCCESS; 152 } 153 154 qdf_export_symbol(hal_set_one_shadow_config); 155 156 QDF_STATUS hal_construct_shadow_config(void *hal_soc) 157 { 158 int ring_type, ring_num; 159 struct hal_soc *hal = (struct hal_soc *)hal_soc; 160 161 for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) { 162 struct hal_hw_srng_config *srng_config = 163 &hal->hw_srng_table[ring_type]; 164 165 if (ring_type == CE_SRC || 166 ring_type == CE_DST || 167 ring_type == CE_DST_STATUS) 168 continue; 169 170 if (srng_config->lmac_ring) 171 continue; 172 173 for (ring_num = 0; ring_num < srng_config->max_rings; 174 ring_num++) 175 hal_set_one_shadow_config(hal_soc, ring_type, ring_num); 176 } 177 178 return QDF_STATUS_SUCCESS; 179 } 180 181 qdf_export_symbol(hal_construct_shadow_config); 182 183 void hal_get_shadow_config(void *hal_soc, 184 struct pld_shadow_reg_v2_cfg **shadow_config, 185 int *num_shadow_registers_configured) 186 { 187 struct hal_soc *hal = (struct hal_soc *)hal_soc; 188 189 *shadow_config = hal->shadow_config; 190 *num_shadow_registers_configured = 191 hal->num_shadow_registers_configured; 192 193 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 194 "%s", __func__); 195 } 196 197 qdf_export_symbol(hal_get_shadow_config); 198 199 200 static void hal_validate_shadow_register(struct hal_soc *hal, 201 uint32_t *destination, 202 uint32_t *shadow_address) 203 { 204 unsigned int index; 205 uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr; 206 int destination_ba_offset = 207 ((char *)destination) - (char *)hal->dev_base_addr; 208 209 index = shadow_address - shadow_0_offset; 210 211 if (index >= MAX_SHADOW_REGISTERS) { 212 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 213 "%s: index %x out of bounds", __func__, index); 214 goto error; 215 } else if (hal->shadow_config[index].addr != destination_ba_offset) { 216 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 217 "%s: sanity check failure, expected %x, found %x", 218 __func__, destination_ba_offset, 219 hal->shadow_config[index].addr); 220 goto error; 221 } 222 return; 223 error: 224 qdf_print("%s: baddr %pK, desination %pK, shadow_address %pK s0offset %pK index %x", 225 __func__, hal->dev_base_addr, destination, shadow_address, 226 shadow_0_offset, index); 227 QDF_BUG(0); 228 return; 229 } 230 231 static void hal_target_based_configure(struct hal_soc *hal) 232 { 233 switch (hal->target_type) { 234 #ifdef QCA_WIFI_QCA6290 235 case TARGET_TYPE_QCA6290: 236 hal->use_register_windowing = true; 237 hal_qca6290_attach(hal); 238 break; 239 #endif 240 #ifdef QCA_WIFI_QCA6390 241 case TARGET_TYPE_QCA6390: 242 hal->use_register_windowing = true; 243 hal_qca6390_attach(hal); 244 break; 245 #endif 246 #if defined(QCA_WIFI_QCA8074) && defined(CONFIG_WIN) 247 case TARGET_TYPE_QCA8074: 248 hal_qca8074_attach(hal); 249 break; 250 #endif 251 252 #if defined(QCA_WIFI_QCA8074V2) && defined(CONFIG_WIN) 253 case TARGET_TYPE_QCA8074V2: 254 hal_qca8074v2_attach(hal); 255 break; 256 #endif 257 258 #if defined(QCA_WIFI_QCA6018) && defined(CONFIG_WIN) 259 case TARGET_TYPE_QCA6018: 260 hal_qca6018_attach(hal); 261 break; 262 #endif 263 default: 264 break; 265 } 266 } 267 268 uint32_t hal_get_target_type(struct hal_soc *hal) 269 { 270 struct hif_target_info *tgt_info = 271 hif_get_target_info_handle(hal->hif_handle); 272 273 return tgt_info->target_type; 274 } 275 276 qdf_export_symbol(hal_get_target_type); 277 278 /** 279 * hal_attach - Initialize HAL layer 280 * @hif_handle: Opaque HIF handle 281 * @qdf_dev: QDF device 282 * 283 * Return: Opaque HAL SOC handle 284 * NULL on failure (if given ring is not available) 285 * 286 * This function should be called as part of HIF initialization (for accessing 287 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() 288 * 289 */ 290 void *hal_attach(void *hif_handle, qdf_device_t qdf_dev) 291 { 292 struct hal_soc *hal; 293 int i; 294 295 hal = qdf_mem_malloc(sizeof(*hal)); 296 297 if (!hal) { 298 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 299 "%s: hal_soc allocation failed", __func__); 300 goto fail0; 301 } 302 hal->hif_handle = hif_handle; 303 hal->dev_base_addr = hif_get_dev_ba(hif_handle); 304 hal->qdf_dev = qdf_dev; 305 hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent( 306 qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) * 307 HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr)); 308 if (!hal->shadow_rdptr_mem_paddr) { 309 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 310 "%s: hal->shadow_rdptr_mem_paddr allocation failed", 311 __func__); 312 goto fail1; 313 } 314 315 hal->shadow_wrptr_mem_vaddr = 316 (uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev, 317 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 318 &(hal->shadow_wrptr_mem_paddr)); 319 if (!hal->shadow_wrptr_mem_vaddr) { 320 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 321 "%s: hal->shadow_wrptr_mem_vaddr allocation failed", 322 __func__); 323 goto fail2; 324 } 325 326 for (i = 0; i < HAL_SRNG_ID_MAX; i++) { 327 hal->srng_list[i].initialized = 0; 328 hal->srng_list[i].ring_id = i; 329 } 330 331 qdf_spinlock_create(&hal->register_access_lock); 332 hal->register_window = 0; 333 hal->target_type = hal_get_target_type(hal); 334 335 hal_target_based_configure(hal); 336 337 return (void *)hal; 338 339 fail2: 340 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, 341 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 342 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 343 fail1: 344 qdf_mem_free(hal); 345 fail0: 346 return NULL; 347 } 348 qdf_export_symbol(hal_attach); 349 350 /** 351 * hal_mem_info - Retrieve hal memory base address 352 * 353 * @hal_soc: Opaque HAL SOC handle 354 * @mem: pointer to structure to be updated with hal mem info 355 */ 356 void hal_get_meminfo(void *hal_soc, struct hal_mem_info *mem ) 357 { 358 struct hal_soc *hal = (struct hal_soc *)hal_soc; 359 mem->dev_base_addr = (void *)hal->dev_base_addr; 360 mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr; 361 mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr; 362 mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr; 363 mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr; 364 hif_read_phy_mem_base(hal->hif_handle, (qdf_dma_addr_t *)&mem->dev_base_paddr); 365 return; 366 } 367 qdf_export_symbol(hal_get_meminfo); 368 369 /** 370 * hal_detach - Detach HAL layer 371 * @hal_soc: HAL SOC handle 372 * 373 * Return: Opaque HAL SOC handle 374 * NULL on failure (if given ring is not available) 375 * 376 * This function should be called as part of HIF initialization (for accessing 377 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() 378 * 379 */ 380 extern void hal_detach(void *hal_soc) 381 { 382 struct hal_soc *hal = (struct hal_soc *)hal_soc; 383 384 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 385 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 386 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 387 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 388 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 389 hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0); 390 qdf_mem_free(hal); 391 392 return; 393 } 394 qdf_export_symbol(hal_detach); 395 396 397 /** 398 * hal_ce_dst_setup - Initialize CE destination ring registers 399 * @hal_soc: HAL SOC handle 400 * @srng: SRNG ring pointer 401 */ 402 static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng, 403 int ring_num) 404 { 405 uint32_t reg_val = 0; 406 uint32_t reg_addr; 407 struct hal_hw_srng_config *ring_config = 408 HAL_SRNG_CONFIG(hal, CE_DST); 409 410 /* set DEST_MAX_LENGTH according to ce assignment */ 411 reg_addr = HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_ADDR( 412 ring_config->reg_start[R0_INDEX] + 413 (ring_num * ring_config->reg_size[R0_INDEX])); 414 415 reg_val = HAL_REG_READ(hal, reg_addr); 416 reg_val &= ~HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 417 reg_val |= srng->u.dst_ring.max_buffer_length & 418 HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 419 HAL_REG_WRITE(hal, reg_addr, reg_val); 420 } 421 422 /** 423 * hal_reo_remap_IX0 - Remap REO ring destination 424 * @hal: HAL SOC handle 425 * @remap_val: Remap value 426 */ 427 void hal_reo_remap_IX0(struct hal_soc *hal, uint32_t remap_val) 428 { 429 uint32_t reg_offset = HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR( 430 SEQ_WCSS_UMAC_REO_REG_OFFSET); 431 HAL_REG_WRITE(hal, reg_offset, remap_val); 432 } 433 434 /** 435 * hal_srng_dst_set_hp_paddr() - Set physical address to dest ring head pointer 436 * @srng: sring pointer 437 * @paddr: physical address 438 */ 439 void hal_srng_dst_set_hp_paddr(struct hal_srng *srng, 440 uint64_t paddr) 441 { 442 SRNG_DST_REG_WRITE(srng, HP_ADDR_LSB, 443 paddr & 0xffffffff); 444 SRNG_DST_REG_WRITE(srng, HP_ADDR_MSB, 445 paddr >> 32); 446 } 447 448 /** 449 * hal_srng_dst_init_hp() - Initilaize destination ring head pointer 450 * @srng: sring pointer 451 * @vaddr: virtual address 452 */ 453 void hal_srng_dst_init_hp(struct hal_srng *srng, 454 uint32_t *vaddr) 455 { 456 if (!srng) 457 return; 458 459 srng->u.dst_ring.hp_addr = vaddr; 460 SRNG_DST_REG_WRITE(srng, HP, srng->u.dst_ring.cached_hp); 461 462 if (vaddr) { 463 *srng->u.dst_ring.hp_addr = srng->u.dst_ring.cached_hp; 464 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 465 "hp_addr=%pK, cached_hp=%d, hp=%d", 466 (void *)srng->u.dst_ring.hp_addr, 467 srng->u.dst_ring.cached_hp, 468 *srng->u.dst_ring.hp_addr); 469 } 470 } 471 472 /** 473 * hal_srng_hw_init - Private function to initialize SRNG HW 474 * @hal_soc: HAL SOC handle 475 * @srng: SRNG ring pointer 476 */ 477 static inline void hal_srng_hw_init(struct hal_soc *hal, 478 struct hal_srng *srng) 479 { 480 if (srng->ring_dir == HAL_SRNG_SRC_RING) 481 hal_srng_src_hw_init(hal, srng); 482 else 483 hal_srng_dst_hw_init(hal, srng); 484 } 485 486 #ifdef CONFIG_SHADOW_V2 487 #define ignore_shadow false 488 #define CHECK_SHADOW_REGISTERS true 489 #else 490 #define ignore_shadow true 491 #define CHECK_SHADOW_REGISTERS false 492 #endif 493 494 /** 495 * hal_srng_setup - Initialize HW SRNG ring. 496 * @hal_soc: Opaque HAL SOC handle 497 * @ring_type: one of the types from hal_ring_type 498 * @ring_num: Ring number if there are multiple rings of same type (staring 499 * from 0) 500 * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings 501 * @ring_params: SRNG ring params in hal_srng_params structure. 502 503 * Callers are expected to allocate contiguous ring memory of size 504 * 'num_entries * entry_size' bytes and pass the physical and virtual base 505 * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in 506 * hal_srng_params structure. Ring base address should be 8 byte aligned 507 * and size of each ring entry should be queried using the API 508 * hal_srng_get_entrysize 509 * 510 * Return: Opaque pointer to ring on success 511 * NULL on failure (if given ring is not available) 512 */ 513 void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num, 514 int mac_id, struct hal_srng_params *ring_params) 515 { 516 int ring_id; 517 struct hal_soc *hal = (struct hal_soc *)hal_soc; 518 struct hal_srng *srng; 519 struct hal_hw_srng_config *ring_config = 520 HAL_SRNG_CONFIG(hal, ring_type); 521 void *dev_base_addr; 522 int i; 523 524 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id); 525 if (ring_id < 0) 526 return NULL; 527 528 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 529 "%s: mac_id %d ring_id %d", 530 __func__, mac_id, ring_id); 531 532 srng = hal_get_srng(hal_soc, ring_id); 533 534 if (srng->initialized) { 535 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 536 "%s: Ring (ring_type, ring_num) already initialized", 537 __func__); 538 return NULL; 539 } 540 541 dev_base_addr = hal->dev_base_addr; 542 srng->ring_id = ring_id; 543 srng->ring_dir = ring_config->ring_dir; 544 srng->ring_base_paddr = ring_params->ring_base_paddr; 545 srng->ring_base_vaddr = ring_params->ring_base_vaddr; 546 srng->entry_size = ring_config->entry_size; 547 srng->num_entries = ring_params->num_entries; 548 srng->ring_size = srng->num_entries * srng->entry_size; 549 srng->ring_size_mask = srng->ring_size - 1; 550 srng->msi_addr = ring_params->msi_addr; 551 srng->msi_data = ring_params->msi_data; 552 srng->intr_timer_thres_us = ring_params->intr_timer_thres_us; 553 srng->intr_batch_cntr_thres_entries = 554 ring_params->intr_batch_cntr_thres_entries; 555 srng->hal_soc = hal_soc; 556 557 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) { 558 srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i] 559 + (ring_num * ring_config->reg_size[i]); 560 } 561 562 /* Zero out the entire ring memory */ 563 qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size * 564 srng->num_entries) << 2); 565 566 srng->flags = ring_params->flags; 567 #ifdef BIG_ENDIAN_HOST 568 /* TODO: See if we should we get these flags from caller */ 569 srng->flags |= HAL_SRNG_DATA_TLV_SWAP; 570 srng->flags |= HAL_SRNG_MSI_SWAP; 571 srng->flags |= HAL_SRNG_RING_PTR_SWAP; 572 #endif 573 574 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 575 srng->u.src_ring.hp = 0; 576 srng->u.src_ring.reap_hp = srng->ring_size - 577 srng->entry_size; 578 srng->u.src_ring.tp_addr = 579 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 580 srng->u.src_ring.low_threshold = 581 ring_params->low_threshold * srng->entry_size; 582 if (ring_config->lmac_ring) { 583 /* For LMAC rings, head pointer updates will be done 584 * through FW by writing to a shared memory location 585 */ 586 srng->u.src_ring.hp_addr = 587 &(hal->shadow_wrptr_mem_vaddr[ring_id - 588 HAL_SRNG_LMAC1_ID_START]); 589 srng->flags |= HAL_SRNG_LMAC_RING; 590 } else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) { 591 srng->u.src_ring.hp_addr = SRNG_SRC_ADDR(srng, HP); 592 593 if (CHECK_SHADOW_REGISTERS) { 594 QDF_TRACE(QDF_MODULE_ID_TXRX, 595 QDF_TRACE_LEVEL_ERROR, 596 "%s: Ring (%d, %d) missing shadow config", 597 __func__, ring_type, ring_num); 598 } 599 } else { 600 hal_validate_shadow_register(hal, 601 SRNG_SRC_ADDR(srng, HP), 602 srng->u.src_ring.hp_addr); 603 } 604 } else { 605 /* During initialization loop count in all the descriptors 606 * will be set to zero, and HW will set it to 1 on completing 607 * descriptor update in first loop, and increments it by 1 on 608 * subsequent loops (loop count wraps around after reaching 609 * 0xffff). The 'loop_cnt' in SW ring state is the expected 610 * loop count in descriptors updated by HW (to be processed 611 * by SW). 612 */ 613 srng->u.dst_ring.loop_cnt = 1; 614 srng->u.dst_ring.tp = 0; 615 srng->u.dst_ring.hp_addr = 616 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 617 if (ring_config->lmac_ring) { 618 /* For LMAC rings, tail pointer updates will be done 619 * through FW by writing to a shared memory location 620 */ 621 srng->u.dst_ring.tp_addr = 622 &(hal->shadow_wrptr_mem_vaddr[ring_id - 623 HAL_SRNG_LMAC1_ID_START]); 624 srng->flags |= HAL_SRNG_LMAC_RING; 625 } else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) { 626 srng->u.dst_ring.tp_addr = SRNG_DST_ADDR(srng, TP); 627 628 if (CHECK_SHADOW_REGISTERS) { 629 QDF_TRACE(QDF_MODULE_ID_TXRX, 630 QDF_TRACE_LEVEL_ERROR, 631 "%s: Ring (%d, %d) missing shadow config", 632 __func__, ring_type, ring_num); 633 } 634 } else { 635 hal_validate_shadow_register(hal, 636 SRNG_DST_ADDR(srng, TP), 637 srng->u.dst_ring.tp_addr); 638 } 639 } 640 641 if (!(ring_config->lmac_ring)) { 642 hal_srng_hw_init(hal, srng); 643 644 if (ring_type == CE_DST) { 645 srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length; 646 hal_ce_dst_setup(hal, srng, ring_num); 647 } 648 } 649 650 SRNG_LOCK_INIT(&srng->lock); 651 652 srng->initialized = true; 653 654 return (void *)srng; 655 } 656 qdf_export_symbol(hal_srng_setup); 657 658 /** 659 * hal_srng_cleanup - Deinitialize HW SRNG ring. 660 * @hal_soc: Opaque HAL SOC handle 661 * @hal_srng: Opaque HAL SRNG pointer 662 */ 663 void hal_srng_cleanup(void *hal_soc, void *hal_srng) 664 { 665 struct hal_srng *srng = (struct hal_srng *)hal_srng; 666 SRNG_LOCK_DESTROY(&srng->lock); 667 srng->initialized = 0; 668 } 669 qdf_export_symbol(hal_srng_cleanup); 670 671 /** 672 * hal_srng_get_entrysize - Returns size of ring entry in bytes 673 * @hal_soc: Opaque HAL SOC handle 674 * @ring_type: one of the types from hal_ring_type 675 * 676 */ 677 uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type) 678 { 679 struct hal_soc *hal = (struct hal_soc *)hal_soc; 680 struct hal_hw_srng_config *ring_config = 681 HAL_SRNG_CONFIG(hal, ring_type); 682 return ring_config->entry_size << 2; 683 } 684 qdf_export_symbol(hal_srng_get_entrysize); 685 686 /** 687 * hal_srng_max_entries - Returns maximum possible number of ring entries 688 * @hal_soc: Opaque HAL SOC handle 689 * @ring_type: one of the types from hal_ring_type 690 * 691 * Return: Maximum number of entries for the given ring_type 692 */ 693 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type) 694 { 695 struct hal_soc *hal = (struct hal_soc *)hal_soc; 696 struct hal_hw_srng_config *ring_config = 697 HAL_SRNG_CONFIG(hal, ring_type); 698 699 return ring_config->max_size / ring_config->entry_size; 700 } 701 qdf_export_symbol(hal_srng_max_entries); 702 703 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type) 704 { 705 struct hal_soc *hal = (struct hal_soc *)hal_soc; 706 struct hal_hw_srng_config *ring_config = 707 HAL_SRNG_CONFIG(hal, ring_type); 708 709 return ring_config->ring_dir; 710 } 711 712 /** 713 * hal_srng_dump - Dump ring status 714 * @srng: hal srng pointer 715 */ 716 void hal_srng_dump(struct hal_srng *srng) 717 { 718 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 719 qdf_print("=== SRC RING %d ===", srng->ring_id); 720 qdf_print("hp %u, reap_hp %u, tp %u, cached tp %u", 721 srng->u.src_ring.hp, 722 srng->u.src_ring.reap_hp, 723 *srng->u.src_ring.tp_addr, 724 srng->u.src_ring.cached_tp); 725 } else { 726 qdf_print("=== DST RING %d ===", srng->ring_id); 727 qdf_print("tp %u, hp %u, cached tp %u, loop_cnt %u", 728 srng->u.dst_ring.tp, 729 *srng->u.dst_ring.hp_addr, 730 srng->u.dst_ring.cached_hp, 731 srng->u.dst_ring.loop_cnt); 732 } 733 } 734 735 /** 736 * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL 737 * 738 * @hal_soc: Opaque HAL SOC handle 739 * @hal_ring: Ring pointer (Source or Destination ring) 740 * @ring_params: SRNG parameters will be returned through this structure 741 */ 742 extern void hal_get_srng_params(void *hal_soc, void *hal_ring, 743 struct hal_srng_params *ring_params) 744 { 745 struct hal_srng *srng = (struct hal_srng *)hal_ring; 746 int i =0; 747 ring_params->ring_id = srng->ring_id; 748 ring_params->ring_dir = srng->ring_dir; 749 ring_params->entry_size = srng->entry_size; 750 751 ring_params->ring_base_paddr = srng->ring_base_paddr; 752 ring_params->ring_base_vaddr = srng->ring_base_vaddr; 753 ring_params->num_entries = srng->num_entries; 754 ring_params->msi_addr = srng->msi_addr; 755 ring_params->msi_data = srng->msi_data; 756 ring_params->intr_timer_thres_us = srng->intr_timer_thres_us; 757 ring_params->intr_batch_cntr_thres_entries = 758 srng->intr_batch_cntr_thres_entries; 759 ring_params->low_threshold = srng->u.src_ring.low_threshold; 760 ring_params->flags = srng->flags; 761 ring_params->ring_id = srng->ring_id; 762 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) 763 ring_params->hwreg_base[i] = srng->hwreg_base[i]; 764 } 765 qdf_export_symbol(hal_get_srng_params); 766