1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are 6 * met: 7 * * Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * * Redistributions in binary form must reproduce the above 10 * copyright notice, this list of conditions and the following 11 * disclaimer in the documentation and/or other materials provided 12 * with the distribution. 13 * * Neither the name of The Linux Foundation nor the names of its 14 * contributors may be used to endorse or promote products derived 15 * from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 24 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 26 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 27 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 #include "hal_hw_headers.h" 30 #include "hal_api.h" 31 #include "target_type.h" 32 #include "wcss_version.h" 33 #include "qdf_module.h" 34 #ifdef QCA_WIFI_QCA8074 35 void hal_qca6290_attach(struct hal_soc *hal); 36 #endif 37 #ifdef QCA_WIFI_QCA8074 38 void hal_qca8074_attach(struct hal_soc *hal); 39 #endif 40 #ifdef QCA_WIFI_QCA6390 41 void hal_qca6390_attach(struct hal_soc *hal); 42 #endif 43 44 /** 45 * hal_get_srng_ring_id() - get the ring id of a descriped ring 46 * @hal: hal_soc data structure 47 * @ring_type: type enum describing the ring 48 * @ring_num: which ring of the ring type 49 * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings) 50 * 51 * Return: the ring id or -EINVAL if the ring does not exist. 52 */ 53 static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type, 54 int ring_num, int mac_id) 55 { 56 struct hal_hw_srng_config *ring_config = 57 HAL_SRNG_CONFIG(hal, ring_type); 58 int ring_id; 59 60 if (ring_num >= ring_config->max_rings) { 61 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 62 "%s: ring_num exceeded maximum no. of supported rings", 63 __func__); 64 /* TODO: This is a programming error. Assert if this happens */ 65 return -EINVAL; 66 } 67 68 if (ring_config->lmac_ring) { 69 ring_id = ring_config->start_ring_id + ring_num + 70 (mac_id * HAL_MAX_RINGS_PER_LMAC); 71 } else { 72 ring_id = ring_config->start_ring_id + ring_num; 73 } 74 75 return ring_id; 76 } 77 78 static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id) 79 { 80 /* TODO: Should we allocate srng structures dynamically? */ 81 return &(hal->srng_list[ring_id]); 82 } 83 84 #define HP_OFFSET_IN_REG_START 1 85 #define OFFSET_FROM_HP_TO_TP 4 86 static void hal_update_srng_hp_tp_address(void *hal_soc, 87 int shadow_config_index, 88 int ring_type, 89 int ring_num) 90 { 91 struct hal_srng *srng; 92 struct hal_soc *hal = (struct hal_soc *)hal_soc; 93 int ring_id; 94 95 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0); 96 if (ring_id < 0) 97 return; 98 99 srng = hal_get_srng(hal_soc, ring_id); 100 101 if (srng->ring_dir == HAL_SRNG_DST_RING) 102 srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index) 103 + hal->dev_base_addr; 104 else 105 srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index) 106 + hal->dev_base_addr; 107 } 108 109 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, 110 int ring_type, 111 int ring_num) 112 { 113 uint32_t target_register; 114 struct hal_soc *hal = (struct hal_soc *)hal_soc; 115 struct hal_hw_srng_config *srng_config = &hal->hw_srng_table[ring_type]; 116 int shadow_config_index = hal->num_shadow_registers_configured; 117 118 if (shadow_config_index >= MAX_SHADOW_REGISTERS) { 119 QDF_ASSERT(0); 120 return QDF_STATUS_E_RESOURCES; 121 } 122 123 hal->num_shadow_registers_configured++; 124 125 target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START]; 126 target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START] 127 *ring_num); 128 129 /* if the ring is a dst ring, we need to shadow the tail pointer */ 130 if (srng_config->ring_dir == HAL_SRNG_DST_RING) 131 target_register += OFFSET_FROM_HP_TO_TP; 132 133 hal->shadow_config[shadow_config_index].addr = target_register; 134 135 /* update hp/tp addr in the hal_soc structure*/ 136 hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type, 137 ring_num); 138 139 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 140 "%s: target_reg %x, shadow_index %x, ring_type %d, ring num %d", 141 __func__, target_register, shadow_config_index, 142 ring_type, ring_num); 143 144 return QDF_STATUS_SUCCESS; 145 } 146 147 QDF_STATUS hal_construct_shadow_config(void *hal_soc) 148 { 149 int ring_type, ring_num; 150 struct hal_soc *hal = (struct hal_soc *)hal_soc; 151 152 for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) { 153 struct hal_hw_srng_config *srng_config = 154 &hal->hw_srng_table[ring_type]; 155 156 if (ring_type == CE_SRC || 157 ring_type == CE_DST || 158 ring_type == CE_DST_STATUS) 159 continue; 160 161 if (srng_config->lmac_ring) 162 continue; 163 164 for (ring_num = 0; ring_num < srng_config->max_rings; 165 ring_num++) 166 hal_set_one_shadow_config(hal_soc, ring_type, ring_num); 167 } 168 169 return QDF_STATUS_SUCCESS; 170 } 171 172 void hal_get_shadow_config(void *hal_soc, 173 struct pld_shadow_reg_v2_cfg **shadow_config, 174 int *num_shadow_registers_configured) 175 { 176 struct hal_soc *hal = (struct hal_soc *)hal_soc; 177 178 *shadow_config = hal->shadow_config; 179 *num_shadow_registers_configured = 180 hal->num_shadow_registers_configured; 181 182 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 183 "%s", __func__); 184 } 185 186 187 static void hal_validate_shadow_register(struct hal_soc *hal, 188 uint32_t *destination, 189 uint32_t *shadow_address) 190 { 191 unsigned int index; 192 uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr; 193 int destination_ba_offset = 194 ((char *)destination) - (char *)hal->dev_base_addr; 195 196 index = shadow_address - shadow_0_offset; 197 198 if (index >= MAX_SHADOW_REGISTERS) { 199 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 200 "%s: index %x out of bounds", __func__, index); 201 goto error; 202 } else if (hal->shadow_config[index].addr != destination_ba_offset) { 203 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 204 "%s: sanity check failure, expected %x, found %x", 205 __func__, destination_ba_offset, 206 hal->shadow_config[index].addr); 207 goto error; 208 } 209 return; 210 error: 211 qdf_print("%s: baddr %pK, desination %pK, shadow_address %pK s0offset %pK index %x", 212 __func__, hal->dev_base_addr, destination, shadow_address, 213 shadow_0_offset, index); 214 QDF_BUG(0); 215 return; 216 } 217 218 static void hal_target_based_configure(struct hal_soc *hal) 219 { 220 switch (hal->target_type) { 221 #ifdef QCA_WIFI_QCA6290 222 case TARGET_TYPE_QCA6290: 223 case TARGET_TYPE_QCA6390: 224 hal->use_register_windowing = true; 225 hal_qca6290_attach(hal); 226 break; 227 #endif 228 #ifdef QCA_WIFI_QCA6390 229 case TARGET_TYPE_QCA6390: 230 hal->use_register_windowing = true; 231 hal_qca6390_attach(hal); 232 break; 233 #endif 234 #if defined(QCA_WIFI_QCA8074) && defined(CONFIG_WIN) 235 case TARGET_TYPE_QCA8074: 236 hal_qca8074_attach(hal); 237 break; 238 #endif 239 default: 240 break; 241 } 242 } 243 244 uint32_t hal_get_target_type(struct hal_soc *hal) 245 { 246 struct hif_target_info *tgt_info = 247 hif_get_target_info_handle(hal->hif_handle); 248 249 return tgt_info->target_type; 250 } 251 252 qdf_export_symbol(hal_get_target_type); 253 254 /** 255 * hal_attach - Initialize HAL layer 256 * @hif_handle: Opaque HIF handle 257 * @qdf_dev: QDF device 258 * 259 * Return: Opaque HAL SOC handle 260 * NULL on failure (if given ring is not available) 261 * 262 * This function should be called as part of HIF initialization (for accessing 263 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() 264 * 265 */ 266 void *hal_attach(void *hif_handle, qdf_device_t qdf_dev) 267 { 268 struct hal_soc *hal; 269 int i; 270 271 hal = qdf_mem_malloc(sizeof(*hal)); 272 273 if (!hal) { 274 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 275 "%s: hal_soc allocation failed", __func__); 276 goto fail0; 277 } 278 hal->hif_handle = hif_handle; 279 hal->dev_base_addr = hif_get_dev_ba(hif_handle); 280 hal->qdf_dev = qdf_dev; 281 hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent( 282 qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) * 283 HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr)); 284 if (!hal->shadow_rdptr_mem_paddr) { 285 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 286 "%s: hal->shadow_rdptr_mem_paddr allocation failed", 287 __func__); 288 goto fail1; 289 } 290 291 hal->shadow_wrptr_mem_vaddr = 292 (uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev, 293 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 294 &(hal->shadow_wrptr_mem_paddr)); 295 if (!hal->shadow_wrptr_mem_vaddr) { 296 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 297 "%s: hal->shadow_wrptr_mem_vaddr allocation failed", 298 __func__); 299 goto fail2; 300 } 301 302 for (i = 0; i < HAL_SRNG_ID_MAX; i++) { 303 hal->srng_list[i].initialized = 0; 304 hal->srng_list[i].ring_id = i; 305 } 306 307 qdf_spinlock_create(&hal->register_access_lock); 308 hal->register_window = 0; 309 hal->target_type = hal_get_target_type(hal); 310 311 hal_target_based_configure(hal); 312 313 return (void *)hal; 314 315 fail2: 316 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, 317 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 318 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 319 fail1: 320 qdf_mem_free(hal); 321 fail0: 322 return NULL; 323 } 324 qdf_export_symbol(hal_attach); 325 326 /** 327 * hal_mem_info - Retrieve hal memory base address 328 * 329 * @hal_soc: Opaque HAL SOC handle 330 * @mem: pointer to structure to be updated with hal mem info 331 */ 332 void hal_get_meminfo(void *hal_soc, struct hal_mem_info *mem ) 333 { 334 struct hal_soc *hal = (struct hal_soc *)hal_soc; 335 mem->dev_base_addr = (void *)hal->dev_base_addr; 336 mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr; 337 mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr; 338 mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr; 339 mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr; 340 hif_read_phy_mem_base(hal->hif_handle, (qdf_dma_addr_t *)&mem->dev_base_paddr); 341 return; 342 } 343 qdf_export_symbol(hal_get_meminfo); 344 345 /** 346 * hal_detach - Detach HAL layer 347 * @hal_soc: HAL SOC handle 348 * 349 * Return: Opaque HAL SOC handle 350 * NULL on failure (if given ring is not available) 351 * 352 * This function should be called as part of HIF initialization (for accessing 353 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() 354 * 355 */ 356 extern void hal_detach(void *hal_soc) 357 { 358 struct hal_soc *hal = (struct hal_soc *)hal_soc; 359 360 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 361 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 362 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 363 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 364 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 365 hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0); 366 qdf_mem_free(hal); 367 368 return; 369 } 370 qdf_export_symbol(hal_detach); 371 372 /** 373 * hal_srng_src_hw_init - Private function to initialize SRNG 374 * source ring HW 375 * @hal_soc: HAL SOC handle 376 * @srng: SRNG ring pointer 377 */ 378 static inline void hal_srng_src_hw_init(struct hal_soc *hal, 379 struct hal_srng *srng) 380 { 381 uint32_t reg_val = 0; 382 uint64_t tp_addr = 0; 383 384 HIF_DBG("%s: hw_init srng %d", __func__, srng->ring_id); 385 386 if (srng->flags & HAL_SRNG_MSI_INTR) { 387 SRNG_SRC_REG_WRITE(srng, MSI1_BASE_LSB, 388 srng->msi_addr & 0xffffffff); 389 reg_val = SRNG_SM(SRNG_SRC_FLD(MSI1_BASE_MSB, ADDR), 390 (uint64_t)(srng->msi_addr) >> 32) | 391 SRNG_SM(SRNG_SRC_FLD(MSI1_BASE_MSB, 392 MSI1_ENABLE), 1); 393 SRNG_SRC_REG_WRITE(srng, MSI1_BASE_MSB, reg_val); 394 SRNG_SRC_REG_WRITE(srng, MSI1_DATA, srng->msi_data); 395 } 396 397 SRNG_SRC_REG_WRITE(srng, BASE_LSB, srng->ring_base_paddr & 0xffffffff); 398 reg_val = SRNG_SM(SRNG_SRC_FLD(BASE_MSB, RING_BASE_ADDR_MSB), 399 ((uint64_t)(srng->ring_base_paddr) >> 32)) | 400 SRNG_SM(SRNG_SRC_FLD(BASE_MSB, RING_SIZE), 401 srng->entry_size * srng->num_entries); 402 SRNG_SRC_REG_WRITE(srng, BASE_MSB, reg_val); 403 404 #if defined(WCSS_VERSION) && \ 405 ((defined(CONFIG_WIN) && (WCSS_VERSION > 81)) || \ 406 (defined(CONFIG_MCL) && (WCSS_VERSION >= 72))) 407 reg_val = SRNG_SM(SRNG_SRC_FLD(ID, ENTRY_SIZE), srng->entry_size); 408 #else 409 reg_val = SRNG_SM(SRNG_SRC_FLD(ID, RING_ID), srng->ring_id) | 410 SRNG_SM(SRNG_SRC_FLD(ID, ENTRY_SIZE), srng->entry_size); 411 #endif 412 SRNG_SRC_REG_WRITE(srng, ID, reg_val); 413 414 /** 415 * Interrupt setup: 416 * Default interrupt mode is 'pulse'. Need to setup SW_INTERRUPT_MODE 417 * if level mode is required 418 */ 419 reg_val = 0; 420 421 /* 422 * WAR - Hawkeye v1 has a hardware bug which requires timer value to be 423 * programmed in terms of 1us resolution instead of 8us resolution as 424 * given in MLD. 425 */ 426 if (srng->intr_timer_thres_us) { 427 reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX0, 428 INTERRUPT_TIMER_THRESHOLD), 429 srng->intr_timer_thres_us); 430 /* For HK v2 this should be (srng->intr_timer_thres_us >> 3) */ 431 } 432 433 if (srng->intr_batch_cntr_thres_entries) { 434 reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX0, 435 BATCH_COUNTER_THRESHOLD), 436 srng->intr_batch_cntr_thres_entries * 437 srng->entry_size); 438 } 439 SRNG_SRC_REG_WRITE(srng, CONSUMER_INT_SETUP_IX0, reg_val); 440 441 reg_val = 0; 442 if (srng->flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) { 443 reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX1, 444 LOW_THRESHOLD), srng->u.src_ring.low_threshold); 445 } 446 447 SRNG_SRC_REG_WRITE(srng, CONSUMER_INT_SETUP_IX1, reg_val); 448 449 /* As per HW team, TP_ADDR and HP_ADDR for Idle link ring should 450 * remain 0 to avoid some WBM stability issues. Remote head/tail 451 * pointers are not required since this ring is completely managed 452 * by WBM HW */ 453 if (srng->ring_id != HAL_SRNG_WBM_IDLE_LINK) { 454 tp_addr = (uint64_t)(hal->shadow_rdptr_mem_paddr + 455 ((unsigned long)(srng->u.src_ring.tp_addr) - 456 (unsigned long)(hal->shadow_rdptr_mem_vaddr))); 457 SRNG_SRC_REG_WRITE(srng, TP_ADDR_LSB, tp_addr & 0xffffffff); 458 SRNG_SRC_REG_WRITE(srng, TP_ADDR_MSB, tp_addr >> 32); 459 } 460 461 /* Initilaize head and tail pointers to indicate ring is empty */ 462 SRNG_SRC_REG_WRITE(srng, HP, 0); 463 SRNG_SRC_REG_WRITE(srng, TP, 0); 464 *(srng->u.src_ring.tp_addr) = 0; 465 466 reg_val = ((srng->flags & HAL_SRNG_DATA_TLV_SWAP) ? 467 SRNG_SM(SRNG_SRC_FLD(MISC, DATA_TLV_SWAP_BIT), 1) : 0) | 468 ((srng->flags & HAL_SRNG_RING_PTR_SWAP) ? 469 SRNG_SM(SRNG_SRC_FLD(MISC, HOST_FW_SWAP_BIT), 1) : 0) | 470 ((srng->flags & HAL_SRNG_MSI_SWAP) ? 471 SRNG_SM(SRNG_SRC_FLD(MISC, MSI_SWAP_BIT), 1) : 0); 472 473 /* Loop count is not used for SRC rings */ 474 reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, LOOPCNT_DISABLE), 1); 475 476 /* 477 * reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, SRNG_ENABLE), 1); 478 * todo: update fw_api and replace with above line 479 * (when SRNG_ENABLE field for the MISC register is available in fw_api) 480 * (WCSS_UMAC_CE_0_SRC_WFSS_CE_CHANNEL_SRC_R0_SRC_RING_MISC) 481 */ 482 reg_val |= 0x40; 483 484 SRNG_SRC_REG_WRITE(srng, MISC, reg_val); 485 486 } 487 488 /** 489 * hal_ce_dst_setup - Initialize CE destination ring registers 490 * @hal_soc: HAL SOC handle 491 * @srng: SRNG ring pointer 492 */ 493 static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng, 494 int ring_num) 495 { 496 uint32_t reg_val = 0; 497 uint32_t reg_addr; 498 struct hal_hw_srng_config *ring_config = 499 HAL_SRNG_CONFIG(hal, CE_DST); 500 501 /* set DEST_MAX_LENGTH according to ce assignment */ 502 reg_addr = HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_ADDR( 503 ring_config->reg_start[R0_INDEX] + 504 (ring_num * ring_config->reg_size[R0_INDEX])); 505 506 reg_val = HAL_REG_READ(hal, reg_addr); 507 reg_val &= ~HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 508 reg_val |= srng->u.dst_ring.max_buffer_length & 509 HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 510 HAL_REG_WRITE(hal, reg_addr, reg_val); 511 } 512 513 /** 514 * hal_reo_remap_IX0 - Remap REO ring destination 515 * @hal: HAL SOC handle 516 * @remap_val: Remap value 517 */ 518 void hal_reo_remap_IX0(struct hal_soc *hal, uint32_t remap_val) 519 { 520 uint32_t reg_offset = HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR( 521 SEQ_WCSS_UMAC_REO_REG_OFFSET); 522 HAL_REG_WRITE(hal, reg_offset, remap_val); 523 } 524 525 /** 526 * hal_srng_dst_set_hp_paddr() - Set physical address to dest ring head pointer 527 * @srng: sring pointer 528 * @paddr: physical address 529 */ 530 void hal_srng_dst_set_hp_paddr(struct hal_srng *srng, 531 uint64_t paddr) 532 { 533 SRNG_DST_REG_WRITE(srng, HP_ADDR_LSB, 534 paddr & 0xffffffff); 535 SRNG_DST_REG_WRITE(srng, HP_ADDR_MSB, 536 paddr >> 32); 537 } 538 539 /** 540 * hal_srng_dst_init_hp() - Initilaize destination ring head pointer 541 * @srng: sring pointer 542 * @vaddr: virtual address 543 */ 544 void hal_srng_dst_init_hp(struct hal_srng *srng, 545 uint32_t *vaddr) 546 { 547 srng->u.dst_ring.hp_addr = vaddr; 548 SRNG_DST_REG_WRITE(srng, HP, srng->u.dst_ring.cached_hp); 549 *(srng->u.dst_ring.hp_addr) = srng->u.dst_ring.cached_hp; 550 551 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 552 "hp_addr=%pK, cached_hp=%d, hp=%d", 553 (void *)srng->u.dst_ring.hp_addr, srng->u.dst_ring.cached_hp, 554 *(srng->u.dst_ring.hp_addr)); 555 } 556 557 /** 558 * hal_srng_dst_hw_init - Private function to initialize SRNG 559 * destination ring HW 560 * @hal_soc: HAL SOC handle 561 * @srng: SRNG ring pointer 562 */ 563 static inline void hal_srng_dst_hw_init(struct hal_soc *hal, 564 struct hal_srng *srng) 565 { 566 uint32_t reg_val = 0; 567 uint64_t hp_addr = 0; 568 569 HIF_DBG("%s: hw_init srng %d", __func__, srng->ring_id); 570 571 if (srng->flags & HAL_SRNG_MSI_INTR) { 572 SRNG_DST_REG_WRITE(srng, MSI1_BASE_LSB, 573 srng->msi_addr & 0xffffffff); 574 reg_val = SRNG_SM(SRNG_DST_FLD(MSI1_BASE_MSB, ADDR), 575 (uint64_t)(srng->msi_addr) >> 32) | 576 SRNG_SM(SRNG_DST_FLD(MSI1_BASE_MSB, 577 MSI1_ENABLE), 1); 578 SRNG_DST_REG_WRITE(srng, MSI1_BASE_MSB, reg_val); 579 SRNG_DST_REG_WRITE(srng, MSI1_DATA, srng->msi_data); 580 } 581 582 SRNG_DST_REG_WRITE(srng, BASE_LSB, srng->ring_base_paddr & 0xffffffff); 583 reg_val = SRNG_SM(SRNG_DST_FLD(BASE_MSB, RING_BASE_ADDR_MSB), 584 ((uint64_t)(srng->ring_base_paddr) >> 32)) | 585 SRNG_SM(SRNG_DST_FLD(BASE_MSB, RING_SIZE), 586 srng->entry_size * srng->num_entries); 587 SRNG_DST_REG_WRITE(srng, BASE_MSB, reg_val); 588 589 reg_val = SRNG_SM(SRNG_DST_FLD(ID, RING_ID), srng->ring_id) | 590 SRNG_SM(SRNG_DST_FLD(ID, ENTRY_SIZE), srng->entry_size); 591 SRNG_DST_REG_WRITE(srng, ID, reg_val); 592 593 594 /** 595 * Interrupt setup: 596 * Default interrupt mode is 'pulse'. Need to setup SW_INTERRUPT_MODE 597 * if level mode is required 598 */ 599 reg_val = 0; 600 if (srng->intr_timer_thres_us) { 601 reg_val |= SRNG_SM(SRNG_DST_FLD(PRODUCER_INT_SETUP, 602 INTERRUPT_TIMER_THRESHOLD), 603 srng->intr_timer_thres_us >> 3); 604 } 605 606 if (srng->intr_batch_cntr_thres_entries) { 607 reg_val |= SRNG_SM(SRNG_DST_FLD(PRODUCER_INT_SETUP, 608 BATCH_COUNTER_THRESHOLD), 609 srng->intr_batch_cntr_thres_entries * 610 srng->entry_size); 611 } 612 613 SRNG_DST_REG_WRITE(srng, PRODUCER_INT_SETUP, reg_val); 614 hp_addr = (uint64_t)(hal->shadow_rdptr_mem_paddr + 615 ((unsigned long)(srng->u.dst_ring.hp_addr) - 616 (unsigned long)(hal->shadow_rdptr_mem_vaddr))); 617 SRNG_DST_REG_WRITE(srng, HP_ADDR_LSB, hp_addr & 0xffffffff); 618 SRNG_DST_REG_WRITE(srng, HP_ADDR_MSB, hp_addr >> 32); 619 620 /* Initilaize head and tail pointers to indicate ring is empty */ 621 SRNG_DST_REG_WRITE(srng, HP, 0); 622 SRNG_DST_REG_WRITE(srng, TP, 0); 623 *(srng->u.dst_ring.hp_addr) = 0; 624 625 reg_val = ((srng->flags & HAL_SRNG_DATA_TLV_SWAP) ? 626 SRNG_SM(SRNG_DST_FLD(MISC, DATA_TLV_SWAP_BIT), 1) : 0) | 627 ((srng->flags & HAL_SRNG_RING_PTR_SWAP) ? 628 SRNG_SM(SRNG_DST_FLD(MISC, HOST_FW_SWAP_BIT), 1) : 0) | 629 ((srng->flags & HAL_SRNG_MSI_SWAP) ? 630 SRNG_SM(SRNG_DST_FLD(MISC, MSI_SWAP_BIT), 1) : 0); 631 632 /* 633 * reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, SRNG_ENABLE), 1); 634 * todo: update fw_api and replace with above line 635 * (when SRNG_ENABLE field for the MISC register is available in fw_api) 636 * (WCSS_UMAC_CE_0_SRC_WFSS_CE_CHANNEL_SRC_R0_SRC_RING_MISC) 637 */ 638 reg_val |= 0x40; 639 640 SRNG_DST_REG_WRITE(srng, MISC, reg_val); 641 642 } 643 644 /** 645 * hal_srng_hw_init - Private function to initialize SRNG HW 646 * @hal_soc: HAL SOC handle 647 * @srng: SRNG ring pointer 648 */ 649 static inline void hal_srng_hw_init(struct hal_soc *hal, 650 struct hal_srng *srng) 651 { 652 if (srng->ring_dir == HAL_SRNG_SRC_RING) 653 hal_srng_src_hw_init(hal, srng); 654 else 655 hal_srng_dst_hw_init(hal, srng); 656 } 657 658 #ifdef CONFIG_SHADOW_V2 659 #define ignore_shadow false 660 #define CHECK_SHADOW_REGISTERS true 661 #else 662 #define ignore_shadow true 663 #define CHECK_SHADOW_REGISTERS false 664 #endif 665 666 /** 667 * hal_srng_setup - Initialize HW SRNG ring. 668 * @hal_soc: Opaque HAL SOC handle 669 * @ring_type: one of the types from hal_ring_type 670 * @ring_num: Ring number if there are multiple rings of same type (staring 671 * from 0) 672 * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings 673 * @ring_params: SRNG ring params in hal_srng_params structure. 674 675 * Callers are expected to allocate contiguous ring memory of size 676 * 'num_entries * entry_size' bytes and pass the physical and virtual base 677 * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in 678 * hal_srng_params structure. Ring base address should be 8 byte aligned 679 * and size of each ring entry should be queried using the API 680 * hal_srng_get_entrysize 681 * 682 * Return: Opaque pointer to ring on success 683 * NULL on failure (if given ring is not available) 684 */ 685 void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num, 686 int mac_id, struct hal_srng_params *ring_params) 687 { 688 int ring_id; 689 struct hal_soc *hal = (struct hal_soc *)hal_soc; 690 struct hal_srng *srng; 691 struct hal_hw_srng_config *ring_config = 692 HAL_SRNG_CONFIG(hal, ring_type); 693 void *dev_base_addr; 694 int i; 695 696 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id); 697 if (ring_id < 0) 698 return NULL; 699 700 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 701 "%s: mac_id %d ring_id %d", 702 __func__, mac_id, ring_id); 703 704 srng = hal_get_srng(hal_soc, ring_id); 705 706 if (srng->initialized) { 707 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 708 "%s: Ring (ring_type, ring_num) already initialized", 709 __func__); 710 return NULL; 711 } 712 713 dev_base_addr = hal->dev_base_addr; 714 srng->ring_id = ring_id; 715 srng->ring_dir = ring_config->ring_dir; 716 srng->ring_base_paddr = ring_params->ring_base_paddr; 717 srng->ring_base_vaddr = ring_params->ring_base_vaddr; 718 srng->entry_size = ring_config->entry_size; 719 srng->num_entries = ring_params->num_entries; 720 srng->ring_size = srng->num_entries * srng->entry_size; 721 srng->ring_size_mask = srng->ring_size - 1; 722 srng->msi_addr = ring_params->msi_addr; 723 srng->msi_data = ring_params->msi_data; 724 srng->intr_timer_thres_us = ring_params->intr_timer_thres_us; 725 srng->intr_batch_cntr_thres_entries = 726 ring_params->intr_batch_cntr_thres_entries; 727 srng->hal_soc = hal_soc; 728 729 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) { 730 srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i] 731 + (ring_num * ring_config->reg_size[i]); 732 } 733 734 /* Zero out the entire ring memory */ 735 qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size * 736 srng->num_entries) << 2); 737 738 srng->flags = ring_params->flags; 739 #ifdef BIG_ENDIAN_HOST 740 /* TODO: See if we should we get these flags from caller */ 741 srng->flags |= HAL_SRNG_DATA_TLV_SWAP; 742 srng->flags |= HAL_SRNG_MSI_SWAP; 743 srng->flags |= HAL_SRNG_RING_PTR_SWAP; 744 #endif 745 746 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 747 srng->u.src_ring.hp = 0; 748 srng->u.src_ring.reap_hp = srng->ring_size - 749 srng->entry_size; 750 srng->u.src_ring.tp_addr = 751 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 752 srng->u.src_ring.low_threshold = 753 ring_params->low_threshold * srng->entry_size; 754 if (ring_config->lmac_ring) { 755 /* For LMAC rings, head pointer updates will be done 756 * through FW by writing to a shared memory location 757 */ 758 srng->u.src_ring.hp_addr = 759 &(hal->shadow_wrptr_mem_vaddr[ring_id - 760 HAL_SRNG_LMAC1_ID_START]); 761 srng->flags |= HAL_SRNG_LMAC_RING; 762 } else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) { 763 srng->u.src_ring.hp_addr = SRNG_SRC_ADDR(srng, HP); 764 765 if (CHECK_SHADOW_REGISTERS) { 766 QDF_TRACE(QDF_MODULE_ID_TXRX, 767 QDF_TRACE_LEVEL_ERROR, 768 "%s: Ring (%d, %d) missing shadow config", 769 __func__, ring_type, ring_num); 770 } 771 } else { 772 hal_validate_shadow_register(hal, 773 SRNG_SRC_ADDR(srng, HP), 774 srng->u.src_ring.hp_addr); 775 } 776 } else { 777 /* During initialization loop count in all the descriptors 778 * will be set to zero, and HW will set it to 1 on completing 779 * descriptor update in first loop, and increments it by 1 on 780 * subsequent loops (loop count wraps around after reaching 781 * 0xffff). The 'loop_cnt' in SW ring state is the expected 782 * loop count in descriptors updated by HW (to be processed 783 * by SW). 784 */ 785 srng->u.dst_ring.loop_cnt = 1; 786 srng->u.dst_ring.tp = 0; 787 srng->u.dst_ring.hp_addr = 788 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 789 if (ring_config->lmac_ring) { 790 /* For LMAC rings, tail pointer updates will be done 791 * through FW by writing to a shared memory location 792 */ 793 srng->u.dst_ring.tp_addr = 794 &(hal->shadow_wrptr_mem_vaddr[ring_id - 795 HAL_SRNG_LMAC1_ID_START]); 796 srng->flags |= HAL_SRNG_LMAC_RING; 797 } else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) { 798 srng->u.dst_ring.tp_addr = SRNG_DST_ADDR(srng, TP); 799 800 if (CHECK_SHADOW_REGISTERS) { 801 QDF_TRACE(QDF_MODULE_ID_TXRX, 802 QDF_TRACE_LEVEL_ERROR, 803 "%s: Ring (%d, %d) missing shadow config", 804 __func__, ring_type, ring_num); 805 } 806 } else { 807 hal_validate_shadow_register(hal, 808 SRNG_DST_ADDR(srng, TP), 809 srng->u.dst_ring.tp_addr); 810 } 811 } 812 813 if (!(ring_config->lmac_ring)) { 814 hal_srng_hw_init(hal, srng); 815 816 if (ring_type == CE_DST) { 817 srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length; 818 hal_ce_dst_setup(hal, srng, ring_num); 819 } 820 } 821 822 SRNG_LOCK_INIT(&srng->lock); 823 824 srng->initialized = true; 825 826 return (void *)srng; 827 } 828 qdf_export_symbol(hal_srng_setup); 829 830 /** 831 * hal_srng_cleanup - Deinitialize HW SRNG ring. 832 * @hal_soc: Opaque HAL SOC handle 833 * @hal_srng: Opaque HAL SRNG pointer 834 */ 835 void hal_srng_cleanup(void *hal_soc, void *hal_srng) 836 { 837 struct hal_srng *srng = (struct hal_srng *)hal_srng; 838 SRNG_LOCK_DESTROY(&srng->lock); 839 srng->initialized = 0; 840 } 841 qdf_export_symbol(hal_srng_cleanup); 842 843 /** 844 * hal_srng_get_entrysize - Returns size of ring entry in bytes 845 * @hal_soc: Opaque HAL SOC handle 846 * @ring_type: one of the types from hal_ring_type 847 * 848 */ 849 uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type) 850 { 851 struct hal_soc *hal = (struct hal_soc *)hal_soc; 852 struct hal_hw_srng_config *ring_config = 853 HAL_SRNG_CONFIG(hal, ring_type); 854 return ring_config->entry_size << 2; 855 } 856 qdf_export_symbol(hal_srng_get_entrysize); 857 858 /** 859 * hal_srng_max_entries - Returns maximum possible number of ring entries 860 * @hal_soc: Opaque HAL SOC handle 861 * @ring_type: one of the types from hal_ring_type 862 * 863 * Return: Maximum number of entries for the given ring_type 864 */ 865 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type) 866 { 867 struct hal_soc *hal = (struct hal_soc *)hal_soc; 868 struct hal_hw_srng_config *ring_config = 869 HAL_SRNG_CONFIG(hal, ring_type); 870 871 return ring_config->max_size / ring_config->entry_size; 872 } 873 qdf_export_symbol(hal_srng_max_entries); 874 875 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type) 876 { 877 struct hal_soc *hal = (struct hal_soc *)hal_soc; 878 struct hal_hw_srng_config *ring_config = 879 HAL_SRNG_CONFIG(hal, ring_type); 880 881 return ring_config->ring_dir; 882 } 883 884 /** 885 * hal_srng_dump - Dump ring status 886 * @srng: hal srng pointer 887 */ 888 void hal_srng_dump(struct hal_srng *srng) 889 { 890 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 891 qdf_print("=== SRC RING %d ===", srng->ring_id); 892 qdf_print("hp %u, reap_hp %u, tp %u, cached tp %u", 893 srng->u.src_ring.hp, 894 srng->u.src_ring.reap_hp, 895 *srng->u.src_ring.tp_addr, 896 srng->u.src_ring.cached_tp); 897 } else { 898 qdf_print("=== DST RING %d ===", srng->ring_id); 899 qdf_print("tp %u, hp %u, cached tp %u, loop_cnt %u", 900 srng->u.dst_ring.tp, 901 *srng->u.dst_ring.hp_addr, 902 srng->u.dst_ring.cached_hp, 903 srng->u.dst_ring.loop_cnt); 904 } 905 } 906 907 /** 908 * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL 909 * 910 * @hal_soc: Opaque HAL SOC handle 911 * @hal_ring: Ring pointer (Source or Destination ring) 912 * @ring_params: SRNG parameters will be returned through this structure 913 */ 914 extern void hal_get_srng_params(void *hal_soc, void *hal_ring, 915 struct hal_srng_params *ring_params) 916 { 917 struct hal_srng *srng = (struct hal_srng *)hal_ring; 918 int i =0; 919 ring_params->ring_id = srng->ring_id; 920 ring_params->ring_dir = srng->ring_dir; 921 ring_params->entry_size = srng->entry_size; 922 923 ring_params->ring_base_paddr = srng->ring_base_paddr; 924 ring_params->ring_base_vaddr = srng->ring_base_vaddr; 925 ring_params->num_entries = srng->num_entries; 926 ring_params->msi_addr = srng->msi_addr; 927 ring_params->msi_data = srng->msi_data; 928 ring_params->intr_timer_thres_us = srng->intr_timer_thres_us; 929 ring_params->intr_batch_cntr_thres_entries = 930 srng->intr_batch_cntr_thres_entries; 931 ring_params->low_threshold = srng->u.src_ring.low_threshold; 932 ring_params->flags = srng->flags; 933 ring_params->ring_id = srng->ring_id; 934 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) 935 ring_params->hwreg_base[i] = srng->hwreg_base[i]; 936 } 937 qdf_export_symbol(hal_get_srng_params); 938