1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "hal_api.h" 21 #include "target_type.h" 22 #include "wcss_version.h" 23 #include "qdf_module.h" 24 #ifdef QCA_WIFI_QCA8074 25 void hal_qca6290_attach(struct hal_soc *hal); 26 #endif 27 #ifdef QCA_WIFI_QCA8074 28 void hal_qca8074_attach(struct hal_soc *hal); 29 #endif 30 #ifdef QCA_WIFI_QCA8074V2 31 void hal_qca8074v2_attach(struct hal_soc *hal); 32 #endif 33 #ifdef QCA_WIFI_QCA6390 34 void hal_qca6390_attach(struct hal_soc *hal); 35 #endif 36 37 /** 38 * hal_get_srng_ring_id() - get the ring id of a descriped ring 39 * @hal: hal_soc data structure 40 * @ring_type: type enum describing the ring 41 * @ring_num: which ring of the ring type 42 * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings) 43 * 44 * Return: the ring id or -EINVAL if the ring does not exist. 45 */ 46 static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type, 47 int ring_num, int mac_id) 48 { 49 struct hal_hw_srng_config *ring_config = 50 HAL_SRNG_CONFIG(hal, ring_type); 51 int ring_id; 52 53 if (ring_num >= ring_config->max_rings) { 54 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 55 "%s: ring_num exceeded maximum no. of supported rings", 56 __func__); 57 /* TODO: This is a programming error. Assert if this happens */ 58 return -EINVAL; 59 } 60 61 if (ring_config->lmac_ring) { 62 ring_id = ring_config->start_ring_id + ring_num + 63 (mac_id * HAL_MAX_RINGS_PER_LMAC); 64 } else { 65 ring_id = ring_config->start_ring_id + ring_num; 66 } 67 68 return ring_id; 69 } 70 71 static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id) 72 { 73 /* TODO: Should we allocate srng structures dynamically? */ 74 return &(hal->srng_list[ring_id]); 75 } 76 77 #define HP_OFFSET_IN_REG_START 1 78 #define OFFSET_FROM_HP_TO_TP 4 79 static void hal_update_srng_hp_tp_address(void *hal_soc, 80 int shadow_config_index, 81 int ring_type, 82 int ring_num) 83 { 84 struct hal_srng *srng; 85 struct hal_soc *hal = (struct hal_soc *)hal_soc; 86 int ring_id; 87 88 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0); 89 if (ring_id < 0) 90 return; 91 92 srng = hal_get_srng(hal_soc, ring_id); 93 94 if (srng->ring_dir == HAL_SRNG_DST_RING) 95 srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index) 96 + hal->dev_base_addr; 97 else 98 srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index) 99 + hal->dev_base_addr; 100 } 101 102 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, 103 int ring_type, 104 int ring_num) 105 { 106 uint32_t target_register; 107 struct hal_soc *hal = (struct hal_soc *)hal_soc; 108 struct hal_hw_srng_config *srng_config = &hal->hw_srng_table[ring_type]; 109 int shadow_config_index = hal->num_shadow_registers_configured; 110 111 if (shadow_config_index >= MAX_SHADOW_REGISTERS) { 112 QDF_ASSERT(0); 113 return QDF_STATUS_E_RESOURCES; 114 } 115 116 hal->num_shadow_registers_configured++; 117 118 target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START]; 119 target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START] 120 *ring_num); 121 122 /* if the ring is a dst ring, we need to shadow the tail pointer */ 123 if (srng_config->ring_dir == HAL_SRNG_DST_RING) 124 target_register += OFFSET_FROM_HP_TO_TP; 125 126 hal->shadow_config[shadow_config_index].addr = target_register; 127 128 /* update hp/tp addr in the hal_soc structure*/ 129 hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type, 130 ring_num); 131 132 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 133 "%s: target_reg %x, shadow_index %x, ring_type %d, ring num %d", 134 __func__, target_register, shadow_config_index, 135 ring_type, ring_num); 136 137 return QDF_STATUS_SUCCESS; 138 } 139 140 QDF_STATUS hal_construct_shadow_config(void *hal_soc) 141 { 142 int ring_type, ring_num; 143 struct hal_soc *hal = (struct hal_soc *)hal_soc; 144 145 for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) { 146 struct hal_hw_srng_config *srng_config = 147 &hal->hw_srng_table[ring_type]; 148 149 if (ring_type == CE_SRC || 150 ring_type == CE_DST || 151 ring_type == CE_DST_STATUS) 152 continue; 153 154 if (srng_config->lmac_ring) 155 continue; 156 157 for (ring_num = 0; ring_num < srng_config->max_rings; 158 ring_num++) 159 hal_set_one_shadow_config(hal_soc, ring_type, ring_num); 160 } 161 162 return QDF_STATUS_SUCCESS; 163 } 164 165 void hal_get_shadow_config(void *hal_soc, 166 struct pld_shadow_reg_v2_cfg **shadow_config, 167 int *num_shadow_registers_configured) 168 { 169 struct hal_soc *hal = (struct hal_soc *)hal_soc; 170 171 *shadow_config = hal->shadow_config; 172 *num_shadow_registers_configured = 173 hal->num_shadow_registers_configured; 174 175 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 176 "%s", __func__); 177 } 178 179 180 static void hal_validate_shadow_register(struct hal_soc *hal, 181 uint32_t *destination, 182 uint32_t *shadow_address) 183 { 184 unsigned int index; 185 uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr; 186 int destination_ba_offset = 187 ((char *)destination) - (char *)hal->dev_base_addr; 188 189 index = shadow_address - shadow_0_offset; 190 191 if (index >= MAX_SHADOW_REGISTERS) { 192 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 193 "%s: index %x out of bounds", __func__, index); 194 goto error; 195 } else if (hal->shadow_config[index].addr != destination_ba_offset) { 196 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 197 "%s: sanity check failure, expected %x, found %x", 198 __func__, destination_ba_offset, 199 hal->shadow_config[index].addr); 200 goto error; 201 } 202 return; 203 error: 204 qdf_print("%s: baddr %pK, desination %pK, shadow_address %pK s0offset %pK index %x", 205 __func__, hal->dev_base_addr, destination, shadow_address, 206 shadow_0_offset, index); 207 QDF_BUG(0); 208 return; 209 } 210 211 static void hal_target_based_configure(struct hal_soc *hal) 212 { 213 switch (hal->target_type) { 214 #ifdef QCA_WIFI_QCA6290 215 case TARGET_TYPE_QCA6290: 216 hal->use_register_windowing = true; 217 hal_qca6290_attach(hal); 218 break; 219 #endif 220 #ifdef QCA_WIFI_QCA6390 221 case TARGET_TYPE_QCA6390: 222 hal->use_register_windowing = true; 223 hal_qca6390_attach(hal); 224 break; 225 #endif 226 #if defined(QCA_WIFI_QCA8074) && defined(CONFIG_WIN) 227 case TARGET_TYPE_QCA8074: 228 hal_qca8074_attach(hal); 229 break; 230 #endif 231 232 #if defined(QCA_WIFI_QCA8074V2) && defined(CONFIG_WIN) 233 case TARGET_TYPE_QCA8074V2: 234 hal_qca8074v2_attach(hal); 235 break; 236 #endif 237 default: 238 break; 239 } 240 } 241 242 uint32_t hal_get_target_type(struct hal_soc *hal) 243 { 244 struct hif_target_info *tgt_info = 245 hif_get_target_info_handle(hal->hif_handle); 246 247 return tgt_info->target_type; 248 } 249 250 qdf_export_symbol(hal_get_target_type); 251 252 /** 253 * hal_attach - Initialize HAL layer 254 * @hif_handle: Opaque HIF handle 255 * @qdf_dev: QDF device 256 * 257 * Return: Opaque HAL SOC handle 258 * NULL on failure (if given ring is not available) 259 * 260 * This function should be called as part of HIF initialization (for accessing 261 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() 262 * 263 */ 264 void *hal_attach(void *hif_handle, qdf_device_t qdf_dev) 265 { 266 struct hal_soc *hal; 267 int i; 268 269 hal = qdf_mem_malloc(sizeof(*hal)); 270 271 if (!hal) { 272 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 273 "%s: hal_soc allocation failed", __func__); 274 goto fail0; 275 } 276 hal->hif_handle = hif_handle; 277 hal->dev_base_addr = hif_get_dev_ba(hif_handle); 278 hal->qdf_dev = qdf_dev; 279 hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent( 280 qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) * 281 HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr)); 282 if (!hal->shadow_rdptr_mem_paddr) { 283 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 284 "%s: hal->shadow_rdptr_mem_paddr allocation failed", 285 __func__); 286 goto fail1; 287 } 288 289 hal->shadow_wrptr_mem_vaddr = 290 (uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev, 291 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 292 &(hal->shadow_wrptr_mem_paddr)); 293 if (!hal->shadow_wrptr_mem_vaddr) { 294 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 295 "%s: hal->shadow_wrptr_mem_vaddr allocation failed", 296 __func__); 297 goto fail2; 298 } 299 300 for (i = 0; i < HAL_SRNG_ID_MAX; i++) { 301 hal->srng_list[i].initialized = 0; 302 hal->srng_list[i].ring_id = i; 303 } 304 305 qdf_spinlock_create(&hal->register_access_lock); 306 hal->register_window = 0; 307 hal->target_type = hal_get_target_type(hal); 308 309 hal_target_based_configure(hal); 310 311 return (void *)hal; 312 313 fail2: 314 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, 315 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 316 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 317 fail1: 318 qdf_mem_free(hal); 319 fail0: 320 return NULL; 321 } 322 qdf_export_symbol(hal_attach); 323 324 /** 325 * hal_mem_info - Retrieve hal memory base address 326 * 327 * @hal_soc: Opaque HAL SOC handle 328 * @mem: pointer to structure to be updated with hal mem info 329 */ 330 void hal_get_meminfo(void *hal_soc, struct hal_mem_info *mem ) 331 { 332 struct hal_soc *hal = (struct hal_soc *)hal_soc; 333 mem->dev_base_addr = (void *)hal->dev_base_addr; 334 mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr; 335 mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr; 336 mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr; 337 mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr; 338 hif_read_phy_mem_base(hal->hif_handle, (qdf_dma_addr_t *)&mem->dev_base_paddr); 339 return; 340 } 341 qdf_export_symbol(hal_get_meminfo); 342 343 /** 344 * hal_detach - Detach HAL layer 345 * @hal_soc: HAL SOC handle 346 * 347 * Return: Opaque HAL SOC handle 348 * NULL on failure (if given ring is not available) 349 * 350 * This function should be called as part of HIF initialization (for accessing 351 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() 352 * 353 */ 354 extern void hal_detach(void *hal_soc) 355 { 356 struct hal_soc *hal = (struct hal_soc *)hal_soc; 357 358 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 359 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 360 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 361 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 362 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 363 hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0); 364 qdf_mem_free(hal); 365 366 return; 367 } 368 qdf_export_symbol(hal_detach); 369 370 371 /** 372 * hal_ce_dst_setup - Initialize CE destination ring registers 373 * @hal_soc: HAL SOC handle 374 * @srng: SRNG ring pointer 375 */ 376 static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng, 377 int ring_num) 378 { 379 uint32_t reg_val = 0; 380 uint32_t reg_addr; 381 struct hal_hw_srng_config *ring_config = 382 HAL_SRNG_CONFIG(hal, CE_DST); 383 384 /* set DEST_MAX_LENGTH according to ce assignment */ 385 reg_addr = HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_ADDR( 386 ring_config->reg_start[R0_INDEX] + 387 (ring_num * ring_config->reg_size[R0_INDEX])); 388 389 reg_val = HAL_REG_READ(hal, reg_addr); 390 reg_val &= ~HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 391 reg_val |= srng->u.dst_ring.max_buffer_length & 392 HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 393 HAL_REG_WRITE(hal, reg_addr, reg_val); 394 } 395 396 /** 397 * hal_reo_remap_IX0 - Remap REO ring destination 398 * @hal: HAL SOC handle 399 * @remap_val: Remap value 400 */ 401 void hal_reo_remap_IX0(struct hal_soc *hal, uint32_t remap_val) 402 { 403 uint32_t reg_offset = HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR( 404 SEQ_WCSS_UMAC_REO_REG_OFFSET); 405 HAL_REG_WRITE(hal, reg_offset, remap_val); 406 } 407 408 /** 409 * hal_srng_dst_set_hp_paddr() - Set physical address to dest ring head pointer 410 * @srng: sring pointer 411 * @paddr: physical address 412 */ 413 void hal_srng_dst_set_hp_paddr(struct hal_srng *srng, 414 uint64_t paddr) 415 { 416 SRNG_DST_REG_WRITE(srng, HP_ADDR_LSB, 417 paddr & 0xffffffff); 418 SRNG_DST_REG_WRITE(srng, HP_ADDR_MSB, 419 paddr >> 32); 420 } 421 422 /** 423 * hal_srng_dst_init_hp() - Initilaize destination ring head pointer 424 * @srng: sring pointer 425 * @vaddr: virtual address 426 */ 427 void hal_srng_dst_init_hp(struct hal_srng *srng, 428 uint32_t *vaddr) 429 { 430 srng->u.dst_ring.hp_addr = vaddr; 431 SRNG_DST_REG_WRITE(srng, HP, srng->u.dst_ring.cached_hp); 432 *(srng->u.dst_ring.hp_addr) = srng->u.dst_ring.cached_hp; 433 434 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 435 "hp_addr=%pK, cached_hp=%d, hp=%d", 436 (void *)srng->u.dst_ring.hp_addr, srng->u.dst_ring.cached_hp, 437 *(srng->u.dst_ring.hp_addr)); 438 } 439 440 /** 441 * hal_srng_hw_init - Private function to initialize SRNG HW 442 * @hal_soc: HAL SOC handle 443 * @srng: SRNG ring pointer 444 */ 445 static inline void hal_srng_hw_init(struct hal_soc *hal, 446 struct hal_srng *srng) 447 { 448 if (srng->ring_dir == HAL_SRNG_SRC_RING) 449 hal_srng_src_hw_init(hal, srng); 450 else 451 hal_srng_dst_hw_init(hal, srng); 452 } 453 454 #ifdef CONFIG_SHADOW_V2 455 #define ignore_shadow false 456 #define CHECK_SHADOW_REGISTERS true 457 #else 458 #define ignore_shadow true 459 #define CHECK_SHADOW_REGISTERS false 460 #endif 461 462 /** 463 * hal_srng_setup - Initialize HW SRNG ring. 464 * @hal_soc: Opaque HAL SOC handle 465 * @ring_type: one of the types from hal_ring_type 466 * @ring_num: Ring number if there are multiple rings of same type (staring 467 * from 0) 468 * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings 469 * @ring_params: SRNG ring params in hal_srng_params structure. 470 471 * Callers are expected to allocate contiguous ring memory of size 472 * 'num_entries * entry_size' bytes and pass the physical and virtual base 473 * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in 474 * hal_srng_params structure. Ring base address should be 8 byte aligned 475 * and size of each ring entry should be queried using the API 476 * hal_srng_get_entrysize 477 * 478 * Return: Opaque pointer to ring on success 479 * NULL on failure (if given ring is not available) 480 */ 481 void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num, 482 int mac_id, struct hal_srng_params *ring_params) 483 { 484 int ring_id; 485 struct hal_soc *hal = (struct hal_soc *)hal_soc; 486 struct hal_srng *srng; 487 struct hal_hw_srng_config *ring_config = 488 HAL_SRNG_CONFIG(hal, ring_type); 489 void *dev_base_addr; 490 int i; 491 492 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id); 493 if (ring_id < 0) 494 return NULL; 495 496 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 497 "%s: mac_id %d ring_id %d", 498 __func__, mac_id, ring_id); 499 500 srng = hal_get_srng(hal_soc, ring_id); 501 502 if (srng->initialized) { 503 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 504 "%s: Ring (ring_type, ring_num) already initialized", 505 __func__); 506 return NULL; 507 } 508 509 dev_base_addr = hal->dev_base_addr; 510 srng->ring_id = ring_id; 511 srng->ring_dir = ring_config->ring_dir; 512 srng->ring_base_paddr = ring_params->ring_base_paddr; 513 srng->ring_base_vaddr = ring_params->ring_base_vaddr; 514 srng->entry_size = ring_config->entry_size; 515 srng->num_entries = ring_params->num_entries; 516 srng->ring_size = srng->num_entries * srng->entry_size; 517 srng->ring_size_mask = srng->ring_size - 1; 518 srng->msi_addr = ring_params->msi_addr; 519 srng->msi_data = ring_params->msi_data; 520 srng->intr_timer_thres_us = ring_params->intr_timer_thres_us; 521 srng->intr_batch_cntr_thres_entries = 522 ring_params->intr_batch_cntr_thres_entries; 523 srng->hal_soc = hal_soc; 524 525 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) { 526 srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i] 527 + (ring_num * ring_config->reg_size[i]); 528 } 529 530 /* Zero out the entire ring memory */ 531 qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size * 532 srng->num_entries) << 2); 533 534 srng->flags = ring_params->flags; 535 #ifdef BIG_ENDIAN_HOST 536 /* TODO: See if we should we get these flags from caller */ 537 srng->flags |= HAL_SRNG_DATA_TLV_SWAP; 538 srng->flags |= HAL_SRNG_MSI_SWAP; 539 srng->flags |= HAL_SRNG_RING_PTR_SWAP; 540 #endif 541 542 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 543 srng->u.src_ring.hp = 0; 544 srng->u.src_ring.reap_hp = srng->ring_size - 545 srng->entry_size; 546 srng->u.src_ring.tp_addr = 547 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 548 srng->u.src_ring.low_threshold = 549 ring_params->low_threshold * srng->entry_size; 550 if (ring_config->lmac_ring) { 551 /* For LMAC rings, head pointer updates will be done 552 * through FW by writing to a shared memory location 553 */ 554 srng->u.src_ring.hp_addr = 555 &(hal->shadow_wrptr_mem_vaddr[ring_id - 556 HAL_SRNG_LMAC1_ID_START]); 557 srng->flags |= HAL_SRNG_LMAC_RING; 558 } else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) { 559 srng->u.src_ring.hp_addr = SRNG_SRC_ADDR(srng, HP); 560 561 if (CHECK_SHADOW_REGISTERS) { 562 QDF_TRACE(QDF_MODULE_ID_TXRX, 563 QDF_TRACE_LEVEL_ERROR, 564 "%s: Ring (%d, %d) missing shadow config", 565 __func__, ring_type, ring_num); 566 } 567 } else { 568 hal_validate_shadow_register(hal, 569 SRNG_SRC_ADDR(srng, HP), 570 srng->u.src_ring.hp_addr); 571 } 572 } else { 573 /* During initialization loop count in all the descriptors 574 * will be set to zero, and HW will set it to 1 on completing 575 * descriptor update in first loop, and increments it by 1 on 576 * subsequent loops (loop count wraps around after reaching 577 * 0xffff). The 'loop_cnt' in SW ring state is the expected 578 * loop count in descriptors updated by HW (to be processed 579 * by SW). 580 */ 581 srng->u.dst_ring.loop_cnt = 1; 582 srng->u.dst_ring.tp = 0; 583 srng->u.dst_ring.hp_addr = 584 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 585 if (ring_config->lmac_ring) { 586 /* For LMAC rings, tail pointer updates will be done 587 * through FW by writing to a shared memory location 588 */ 589 srng->u.dst_ring.tp_addr = 590 &(hal->shadow_wrptr_mem_vaddr[ring_id - 591 HAL_SRNG_LMAC1_ID_START]); 592 srng->flags |= HAL_SRNG_LMAC_RING; 593 } else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) { 594 srng->u.dst_ring.tp_addr = SRNG_DST_ADDR(srng, TP); 595 596 if (CHECK_SHADOW_REGISTERS) { 597 QDF_TRACE(QDF_MODULE_ID_TXRX, 598 QDF_TRACE_LEVEL_ERROR, 599 "%s: Ring (%d, %d) missing shadow config", 600 __func__, ring_type, ring_num); 601 } 602 } else { 603 hal_validate_shadow_register(hal, 604 SRNG_DST_ADDR(srng, TP), 605 srng->u.dst_ring.tp_addr); 606 } 607 } 608 609 if (!(ring_config->lmac_ring)) { 610 hal_srng_hw_init(hal, srng); 611 612 if (ring_type == CE_DST) { 613 srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length; 614 hal_ce_dst_setup(hal, srng, ring_num); 615 } 616 } 617 618 SRNG_LOCK_INIT(&srng->lock); 619 620 srng->initialized = true; 621 622 return (void *)srng; 623 } 624 qdf_export_symbol(hal_srng_setup); 625 626 /** 627 * hal_srng_cleanup - Deinitialize HW SRNG ring. 628 * @hal_soc: Opaque HAL SOC handle 629 * @hal_srng: Opaque HAL SRNG pointer 630 */ 631 void hal_srng_cleanup(void *hal_soc, void *hal_srng) 632 { 633 struct hal_srng *srng = (struct hal_srng *)hal_srng; 634 SRNG_LOCK_DESTROY(&srng->lock); 635 srng->initialized = 0; 636 } 637 qdf_export_symbol(hal_srng_cleanup); 638 639 /** 640 * hal_srng_get_entrysize - Returns size of ring entry in bytes 641 * @hal_soc: Opaque HAL SOC handle 642 * @ring_type: one of the types from hal_ring_type 643 * 644 */ 645 uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type) 646 { 647 struct hal_soc *hal = (struct hal_soc *)hal_soc; 648 struct hal_hw_srng_config *ring_config = 649 HAL_SRNG_CONFIG(hal, ring_type); 650 return ring_config->entry_size << 2; 651 } 652 qdf_export_symbol(hal_srng_get_entrysize); 653 654 /** 655 * hal_srng_max_entries - Returns maximum possible number of ring entries 656 * @hal_soc: Opaque HAL SOC handle 657 * @ring_type: one of the types from hal_ring_type 658 * 659 * Return: Maximum number of entries for the given ring_type 660 */ 661 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type) 662 { 663 struct hal_soc *hal = (struct hal_soc *)hal_soc; 664 struct hal_hw_srng_config *ring_config = 665 HAL_SRNG_CONFIG(hal, ring_type); 666 667 return ring_config->max_size / ring_config->entry_size; 668 } 669 qdf_export_symbol(hal_srng_max_entries); 670 671 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type) 672 { 673 struct hal_soc *hal = (struct hal_soc *)hal_soc; 674 struct hal_hw_srng_config *ring_config = 675 HAL_SRNG_CONFIG(hal, ring_type); 676 677 return ring_config->ring_dir; 678 } 679 680 /** 681 * hal_srng_dump - Dump ring status 682 * @srng: hal srng pointer 683 */ 684 void hal_srng_dump(struct hal_srng *srng) 685 { 686 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 687 qdf_print("=== SRC RING %d ===", srng->ring_id); 688 qdf_print("hp %u, reap_hp %u, tp %u, cached tp %u", 689 srng->u.src_ring.hp, 690 srng->u.src_ring.reap_hp, 691 *srng->u.src_ring.tp_addr, 692 srng->u.src_ring.cached_tp); 693 } else { 694 qdf_print("=== DST RING %d ===", srng->ring_id); 695 qdf_print("tp %u, hp %u, cached tp %u, loop_cnt %u", 696 srng->u.dst_ring.tp, 697 *srng->u.dst_ring.hp_addr, 698 srng->u.dst_ring.cached_hp, 699 srng->u.dst_ring.loop_cnt); 700 } 701 } 702 703 /** 704 * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL 705 * 706 * @hal_soc: Opaque HAL SOC handle 707 * @hal_ring: Ring pointer (Source or Destination ring) 708 * @ring_params: SRNG parameters will be returned through this structure 709 */ 710 extern void hal_get_srng_params(void *hal_soc, void *hal_ring, 711 struct hal_srng_params *ring_params) 712 { 713 struct hal_srng *srng = (struct hal_srng *)hal_ring; 714 int i =0; 715 ring_params->ring_id = srng->ring_id; 716 ring_params->ring_dir = srng->ring_dir; 717 ring_params->entry_size = srng->entry_size; 718 719 ring_params->ring_base_paddr = srng->ring_base_paddr; 720 ring_params->ring_base_vaddr = srng->ring_base_vaddr; 721 ring_params->num_entries = srng->num_entries; 722 ring_params->msi_addr = srng->msi_addr; 723 ring_params->msi_data = srng->msi_data; 724 ring_params->intr_timer_thres_us = srng->intr_timer_thres_us; 725 ring_params->intr_batch_cntr_thres_entries = 726 srng->intr_batch_cntr_thres_entries; 727 ring_params->low_threshold = srng->u.src_ring.low_threshold; 728 ring_params->flags = srng->flags; 729 ring_params->ring_id = srng->ring_id; 730 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) 731 ring_params->hwreg_base[i] = srng->hwreg_base[i]; 732 } 733 qdf_export_symbol(hal_get_srng_params); 734