1 /* 2 * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #ifdef IPA_OFFLOAD 19 20 #include <wlan_ipa_ucfg_api.h> 21 #include <qdf_ipa_wdi3.h> 22 #include <qdf_types.h> 23 #include <qdf_lock.h> 24 #include <hal_hw_headers.h> 25 #include <hal_api.h> 26 #include <hal_reo.h> 27 #include <hif.h> 28 #include <htt.h> 29 #include <wdi_event.h> 30 #include <queue.h> 31 #include "dp_types.h" 32 #include "dp_htt.h" 33 #include "dp_tx.h" 34 #include "dp_rx.h" 35 #include "dp_ipa.h" 36 #include "dp_internal.h" 37 #ifdef WIFI_MONITOR_SUPPORT 38 #include "dp_mon.h" 39 #endif 40 #ifdef FEATURE_WDS 41 #include "dp_txrx_wds.h" 42 #endif 43 44 /* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */ 45 #define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT (2048) 46 47 /* WAR for IPA_OFFLOAD case. In some cases, its observed that WBM tries to 48 * release a buffer into WBM2SW RELEASE ring for IPA, and the ring is full. 49 * This causes back pressure, resulting in a FW crash. 50 * By leaving some entries with no buffer attached, WBM will be able to write 51 * to the ring, and from dumps we can figure out the buffer which is causing 52 * this issue. 53 */ 54 #define DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES 16 55 /** 56 *struct dp_ipa_reo_remap_record - history for dp ipa reo remaps 57 * @ix0_reg: reo destination ring IX0 value 58 * @ix2_reg: reo destination ring IX2 value 59 * @ix3_reg: reo destination ring IX3 value 60 */ 61 struct dp_ipa_reo_remap_record { 62 uint64_t timestamp; 63 uint32_t ix0_reg; 64 uint32_t ix2_reg; 65 uint32_t ix3_reg; 66 }; 67 68 #ifdef IPA_WDS_EASYMESH_FEATURE 69 #define WLAN_IPA_META_DATA_MASK htonl(0x000000FF) 70 #else 71 #define WLAN_IPA_META_DATA_MASK htonl(0x00FF0000) 72 #endif 73 74 #define REO_REMAP_HISTORY_SIZE 32 75 76 struct dp_ipa_reo_remap_record dp_ipa_reo_remap_history[REO_REMAP_HISTORY_SIZE]; 77 78 static qdf_atomic_t dp_ipa_reo_remap_history_index; 79 static int dp_ipa_reo_remap_record_index_next(qdf_atomic_t *index) 80 { 81 int next = qdf_atomic_inc_return(index); 82 83 if (next == REO_REMAP_HISTORY_SIZE) 84 qdf_atomic_sub(REO_REMAP_HISTORY_SIZE, index); 85 86 return next % REO_REMAP_HISTORY_SIZE; 87 } 88 89 /** 90 * dp_ipa_reo_remap_history_add() - Record dp ipa reo remap values 91 * @ix0_val: reo destination ring IX0 value 92 * @ix2_val: reo destination ring IX2 value 93 * @ix3_val: reo destination ring IX3 value 94 * 95 * Return: None 96 */ 97 static void dp_ipa_reo_remap_history_add(uint32_t ix0_val, uint32_t ix2_val, 98 uint32_t ix3_val) 99 { 100 int idx = dp_ipa_reo_remap_record_index_next( 101 &dp_ipa_reo_remap_history_index); 102 struct dp_ipa_reo_remap_record *record = &dp_ipa_reo_remap_history[idx]; 103 104 record->timestamp = qdf_get_log_timestamp(); 105 record->ix0_reg = ix0_val; 106 record->ix2_reg = ix2_val; 107 record->ix3_reg = ix3_val; 108 } 109 110 static QDF_STATUS __dp_ipa_handle_buf_smmu_mapping(struct dp_soc *soc, 111 qdf_nbuf_t nbuf, 112 uint32_t size, 113 bool create, 114 const char *func, 115 uint32_t line) 116 { 117 qdf_mem_info_t mem_map_table = {0}; 118 QDF_STATUS ret = QDF_STATUS_SUCCESS; 119 qdf_ipa_wdi_hdl_t hdl; 120 121 /* Need to handle the case when one soc will 122 * have multiple pdev(radio's), Currently passing 123 * pdev_id as 0 assuming 1 soc has only 1 radio. 124 */ 125 hdl = wlan_ipa_get_hdl(soc->ctrl_psoc, 0); 126 if (hdl == DP_IPA_HDL_INVALID) { 127 dp_err("IPA handle is invalid"); 128 return QDF_STATUS_E_INVAL; 129 } 130 qdf_update_mem_map_table(soc->osdev, &mem_map_table, 131 qdf_nbuf_get_frag_paddr(nbuf, 0), 132 size); 133 134 if (create) { 135 /* Assert if PA is zero */ 136 qdf_assert_always(mem_map_table.pa); 137 138 ret = qdf_nbuf_smmu_map_debug(nbuf, hdl, 1, &mem_map_table, 139 func, line); 140 } else { 141 ret = qdf_nbuf_smmu_unmap_debug(nbuf, hdl, 1, &mem_map_table, 142 func, line); 143 } 144 qdf_assert_always(!ret); 145 146 /* Return status of mapping/unmapping is stored in 147 * mem_map_table.result field, assert if the result 148 * is failure 149 */ 150 if (create) 151 qdf_assert_always(!mem_map_table.result); 152 else 153 qdf_assert_always(mem_map_table.result >= mem_map_table.size); 154 155 return ret; 156 } 157 158 QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc, 159 qdf_nbuf_t nbuf, 160 uint32_t size, 161 bool create, const char *func, 162 uint32_t line) 163 { 164 struct dp_pdev *pdev; 165 int i; 166 167 for (i = 0; i < soc->pdev_count; i++) { 168 pdev = soc->pdev_list[i]; 169 if (pdev && dp_monitor_is_configured(pdev)) 170 return QDF_STATUS_SUCCESS; 171 } 172 173 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) || 174 !qdf_mem_smmu_s1_enabled(soc->osdev)) 175 return QDF_STATUS_SUCCESS; 176 177 /** 178 * Even if ipa pipes is disabled, but if it's unmap 179 * operation and nbuf has done ipa smmu map before, 180 * do ipa smmu unmap as well. 181 */ 182 if (!qdf_atomic_read(&soc->ipa_pipes_enabled)) { 183 if (!create && qdf_nbuf_is_rx_ipa_smmu_map(nbuf)) { 184 DP_STATS_INC(soc, rx.err.ipa_unmap_no_pipe, 1); 185 } else { 186 return QDF_STATUS_SUCCESS; 187 } 188 } 189 190 if (qdf_unlikely(create == qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) { 191 if (create) { 192 DP_STATS_INC(soc, rx.err.ipa_smmu_map_dup, 1); 193 } else { 194 DP_STATS_INC(soc, rx.err.ipa_smmu_unmap_dup, 1); 195 } 196 return QDF_STATUS_E_INVAL; 197 } 198 199 qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create); 200 201 return __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, size, create, 202 func, line); 203 } 204 205 static QDF_STATUS __dp_ipa_tx_buf_smmu_mapping( 206 struct dp_soc *soc, 207 struct dp_pdev *pdev, 208 bool create, 209 const char *func, 210 uint32_t line) 211 { 212 uint32_t index; 213 QDF_STATUS ret = QDF_STATUS_SUCCESS; 214 uint32_t tx_buffer_cnt = soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; 215 qdf_nbuf_t nbuf; 216 uint32_t buf_len; 217 218 if (!ipa_is_ready()) { 219 dp_info("IPA is not READY"); 220 return 0; 221 } 222 223 for (index = 0; index < tx_buffer_cnt; index++) { 224 nbuf = (qdf_nbuf_t) 225 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[index]; 226 if (!nbuf) 227 continue; 228 buf_len = qdf_nbuf_get_data_len(nbuf); 229 ret = __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, buf_len, 230 create, func, line); 231 } 232 233 return ret; 234 } 235 236 #ifndef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS 237 static void dp_ipa_set_reo_ctx_mapping_lock_required(struct dp_soc *soc, 238 bool lock_required) 239 { 240 hal_ring_handle_t hal_ring_hdl; 241 int ring; 242 243 for (ring = 0; ring < soc->num_reo_dest_rings; ring++) { 244 hal_ring_hdl = soc->reo_dest_ring[ring].hal_srng; 245 hal_srng_lock(hal_ring_hdl); 246 soc->ipa_reo_ctx_lock_required[ring] = lock_required; 247 hal_srng_unlock(hal_ring_hdl); 248 } 249 } 250 #else 251 static void dp_ipa_set_reo_ctx_mapping_lock_required(struct dp_soc *soc, 252 bool lock_required) 253 { 254 } 255 256 #endif 257 258 #ifdef RX_DESC_MULTI_PAGE_ALLOC 259 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc, 260 struct dp_pdev *pdev, 261 bool create, 262 const char *func, 263 uint32_t line) 264 { 265 struct rx_desc_pool *rx_pool; 266 uint8_t pdev_id; 267 uint32_t num_desc, page_id, offset, i; 268 uint16_t num_desc_per_page; 269 union dp_rx_desc_list_elem_t *rx_desc_elem; 270 struct dp_rx_desc *rx_desc; 271 qdf_nbuf_t nbuf; 272 QDF_STATUS ret = QDF_STATUS_SUCCESS; 273 274 if (!qdf_ipa_is_ready()) 275 return ret; 276 277 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) 278 return ret; 279 280 pdev_id = pdev->pdev_id; 281 rx_pool = &soc->rx_desc_buf[pdev_id]; 282 283 dp_ipa_set_reo_ctx_mapping_lock_required(soc, true); 284 qdf_spin_lock_bh(&rx_pool->lock); 285 dp_ipa_rx_buf_smmu_mapping_lock(soc); 286 num_desc = rx_pool->pool_size; 287 num_desc_per_page = rx_pool->desc_pages.num_element_per_page; 288 for (i = 0; i < num_desc; i++) { 289 page_id = i / num_desc_per_page; 290 offset = i % num_desc_per_page; 291 if (qdf_unlikely(!(rx_pool->desc_pages.cacheable_pages))) 292 break; 293 rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_pool); 294 rx_desc = &rx_desc_elem->rx_desc; 295 if ((!(rx_desc->in_use)) || rx_desc->unmapped) 296 continue; 297 nbuf = rx_desc->nbuf; 298 299 if (qdf_unlikely(create == 300 qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) { 301 if (create) { 302 DP_STATS_INC(soc, 303 rx.err.ipa_smmu_map_dup, 1); 304 } else { 305 DP_STATS_INC(soc, 306 rx.err.ipa_smmu_unmap_dup, 1); 307 } 308 continue; 309 } 310 qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create); 311 312 ret = __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, 313 rx_pool->buf_size, 314 create, func, line); 315 } 316 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 317 qdf_spin_unlock_bh(&rx_pool->lock); 318 dp_ipa_set_reo_ctx_mapping_lock_required(soc, false); 319 320 return ret; 321 } 322 #else 323 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping( 324 struct dp_soc *soc, 325 struct dp_pdev *pdev, 326 bool create, 327 const char *func, 328 uint32_t line) 329 { 330 struct rx_desc_pool *rx_pool; 331 uint8_t pdev_id; 332 qdf_nbuf_t nbuf; 333 int i; 334 335 if (!qdf_ipa_is_ready()) 336 return QDF_STATUS_SUCCESS; 337 338 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) 339 return QDF_STATUS_SUCCESS; 340 341 pdev_id = pdev->pdev_id; 342 rx_pool = &soc->rx_desc_buf[pdev_id]; 343 344 dp_ipa_set_reo_ctx_mapping_lock_required(soc, true); 345 qdf_spin_lock_bh(&rx_pool->lock); 346 dp_ipa_rx_buf_smmu_mapping_lock(soc); 347 for (i = 0; i < rx_pool->pool_size; i++) { 348 if ((!(rx_pool->array[i].rx_desc.in_use)) || 349 rx_pool->array[i].rx_desc.unmapped) 350 continue; 351 352 nbuf = rx_pool->array[i].rx_desc.nbuf; 353 354 if (qdf_unlikely(create == 355 qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) { 356 if (create) { 357 DP_STATS_INC(soc, 358 rx.err.ipa_smmu_map_dup, 1); 359 } else { 360 DP_STATS_INC(soc, 361 rx.err.ipa_smmu_unmap_dup, 1); 362 } 363 continue; 364 } 365 qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create); 366 367 __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, rx_pool->buf_size, 368 create, func, line); 369 } 370 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 371 qdf_spin_unlock_bh(&rx_pool->lock); 372 dp_ipa_set_reo_ctx_mapping_lock_required(soc, false); 373 374 return QDF_STATUS_SUCCESS; 375 } 376 #endif /* RX_DESC_MULTI_PAGE_ALLOC */ 377 378 static QDF_STATUS dp_ipa_get_shared_mem_info(qdf_device_t osdev, 379 qdf_shared_mem_t *shared_mem, 380 void *cpu_addr, 381 qdf_dma_addr_t dma_addr, 382 uint32_t size) 383 { 384 qdf_dma_addr_t paddr; 385 int ret; 386 387 shared_mem->vaddr = cpu_addr; 388 qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size); 389 *qdf_mem_get_dma_addr_ptr(osdev, &shared_mem->mem_info) = dma_addr; 390 391 paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr); 392 qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr); 393 394 ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable, 395 shared_mem->vaddr, dma_addr, size); 396 if (ret) { 397 dp_err("Unable to get DMA sgtable"); 398 return QDF_STATUS_E_NOMEM; 399 } 400 401 qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable); 402 403 return QDF_STATUS_SUCCESS; 404 } 405 406 /** 407 * dp_ipa_get_tx_bank_id - API to get TCL bank id 408 * @soc: dp_soc handle 409 * @bank_id: out parameter for bank id 410 * 411 * Return: QDF_STATUS 412 */ 413 static QDF_STATUS dp_ipa_get_tx_bank_id(struct dp_soc *soc, uint8_t *bank_id) 414 { 415 if (soc->arch_ops.ipa_get_bank_id) { 416 *bank_id = soc->arch_ops.ipa_get_bank_id(soc); 417 if (*bank_id < 0) { 418 return QDF_STATUS_E_INVAL; 419 } else { 420 dp_info("bank_id %u", *bank_id); 421 return QDF_STATUS_SUCCESS; 422 } 423 } else { 424 return QDF_STATUS_E_NOSUPPORT; 425 } 426 } 427 428 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \ 429 defined(CONFIG_IPA_WDI_UNIFIED_API) 430 static void dp_ipa_setup_tx_params_bank_id(struct dp_soc *soc, 431 qdf_ipa_wdi_pipe_setup_info_t *tx) 432 { 433 uint8_t bank_id; 434 435 if (QDF_IS_STATUS_SUCCESS(dp_ipa_get_tx_bank_id(soc, &bank_id))) 436 QDF_IPA_WDI_SETUP_INFO_RX_BANK_ID(tx, bank_id); 437 } 438 439 static void 440 dp_ipa_setup_tx_smmu_params_bank_id(struct dp_soc *soc, 441 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu) 442 { 443 uint8_t bank_id; 444 445 if (QDF_IS_STATUS_SUCCESS(dp_ipa_get_tx_bank_id(soc, &bank_id))) 446 QDF_IPA_WDI_SETUP_INFO_SMMU_RX_BANK_ID(tx_smmu, bank_id); 447 } 448 #else 449 static inline void 450 dp_ipa_setup_tx_params_bank_id(struct dp_soc *soc, 451 qdf_ipa_wdi_pipe_setup_info_t *tx) 452 { 453 } 454 455 static inline void 456 dp_ipa_setup_tx_smmu_params_bank_id(struct dp_soc *soc, 457 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu) 458 { 459 } 460 #endif 461 462 #ifdef IPA_WDI3_TX_TWO_PIPES 463 static void dp_ipa_tx_alt_pool_detach(struct dp_soc *soc, struct dp_pdev *pdev) 464 { 465 struct dp_ipa_resources *ipa_res; 466 qdf_nbuf_t nbuf; 467 int idx; 468 469 for (idx = 0; idx < soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt; idx++) { 470 nbuf = (qdf_nbuf_t) 471 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[idx]; 472 if (!nbuf) 473 continue; 474 475 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 476 qdf_mem_dp_tx_skb_cnt_dec(); 477 qdf_mem_dp_tx_skb_dec(qdf_nbuf_get_end_offset(nbuf)); 478 qdf_nbuf_free(nbuf); 479 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[idx] = 480 (void *)NULL; 481 } 482 483 qdf_mem_free(soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned); 484 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = NULL; 485 486 ipa_res = &pdev->ipa_resource; 487 if (!ipa_res->is_db_ddr_mapped) 488 iounmap(ipa_res->tx_alt_comp_doorbell_vaddr); 489 490 qdf_mem_free_sgtable(&ipa_res->tx_alt_ring.sgtable); 491 qdf_mem_free_sgtable(&ipa_res->tx_alt_comp_ring.sgtable); 492 } 493 494 static int dp_ipa_tx_alt_pool_attach(struct dp_soc *soc) 495 { 496 uint32_t tx_buffer_count; 497 uint32_t ring_base_align = 8; 498 qdf_dma_addr_t buffer_paddr; 499 struct hal_srng *wbm_srng = (struct hal_srng *) 500 soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng; 501 struct hal_srng_params srng_params; 502 uint32_t wbm_bm_id; 503 void *ring_entry; 504 int num_entries; 505 qdf_nbuf_t nbuf; 506 int retval = QDF_STATUS_SUCCESS; 507 int max_alloc_count = 0; 508 509 /* 510 * Uncomment when dp_ops_cfg.cfg_attach is implemented 511 * unsigned int uc_tx_buf_sz = 512 * dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev); 513 */ 514 unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT; 515 unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1; 516 517 wbm_bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, 518 IPA_TX_ALT_RING_IDX); 519 520 hal_get_srng_params(soc->hal_soc, 521 hal_srng_to_hal_ring_handle(wbm_srng), 522 &srng_params); 523 num_entries = srng_params.num_entries; 524 525 max_alloc_count = 526 num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES; 527 if (max_alloc_count <= 0) { 528 dp_err("incorrect value for buffer count %u", max_alloc_count); 529 return -EINVAL; 530 } 531 532 dp_info("requested %d buffers to be posted to wbm ring", 533 max_alloc_count); 534 535 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = 536 qdf_mem_malloc(num_entries * 537 sizeof(*soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned)); 538 if (!soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned) { 539 dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail"); 540 return -ENOMEM; 541 } 542 543 hal_srng_access_start_unlocked(soc->hal_soc, 544 hal_srng_to_hal_ring_handle(wbm_srng)); 545 546 /* 547 * Allocate Tx buffers as many as possible. 548 * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty 549 * Populate Tx buffers into WBM2IPA ring 550 * This initial buffer population will simulate H/W as source ring, 551 * and update HP 552 */ 553 for (tx_buffer_count = 0; 554 tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) { 555 nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE); 556 if (!nbuf) 557 break; 558 559 ring_entry = hal_srng_dst_get_next_hp( 560 soc->hal_soc, 561 hal_srng_to_hal_ring_handle(wbm_srng)); 562 if (!ring_entry) { 563 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 564 "%s: Failed to get WBM ring entry", 565 __func__); 566 qdf_nbuf_free(nbuf); 567 break; 568 } 569 570 qdf_nbuf_map_single(soc->osdev, nbuf, 571 QDF_DMA_BIDIRECTIONAL); 572 buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); 573 qdf_mem_dp_tx_skb_cnt_inc(); 574 qdf_mem_dp_tx_skb_inc(qdf_nbuf_get_end_offset(nbuf)); 575 576 hal_rxdma_buff_addr_info_set(soc->hal_soc, ring_entry, 577 buffer_paddr, 0, wbm_bm_id); 578 579 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[ 580 tx_buffer_count] = (void *)nbuf; 581 } 582 583 hal_srng_access_end_unlocked(soc->hal_soc, 584 hal_srng_to_hal_ring_handle(wbm_srng)); 585 586 soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt = tx_buffer_count; 587 588 if (tx_buffer_count) { 589 dp_info("IPA TX buffer pool2: %d allocated", tx_buffer_count); 590 } else { 591 dp_err("Failed to allocate IPA TX buffer pool2"); 592 qdf_mem_free( 593 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned); 594 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = NULL; 595 retval = -ENOMEM; 596 } 597 598 return retval; 599 } 600 601 static QDF_STATUS dp_ipa_tx_alt_ring_get_resource(struct dp_pdev *pdev) 602 { 603 struct dp_soc *soc = pdev->soc; 604 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 605 606 ipa_res->tx_alt_ring_num_alloc_buffer = 607 (uint32_t)soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt; 608 609 dp_ipa_get_shared_mem_info( 610 soc->osdev, &ipa_res->tx_alt_ring, 611 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr, 612 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr, 613 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size); 614 615 dp_ipa_get_shared_mem_info( 616 soc->osdev, &ipa_res->tx_alt_comp_ring, 617 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr, 618 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr, 619 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size); 620 621 if (!qdf_mem_get_dma_addr(soc->osdev, 622 &ipa_res->tx_alt_comp_ring.mem_info)) 623 return QDF_STATUS_E_FAILURE; 624 625 return QDF_STATUS_SUCCESS; 626 } 627 628 static void dp_ipa_tx_alt_ring_resource_setup(struct dp_soc *soc) 629 { 630 struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc; 631 struct hal_srng *hal_srng; 632 struct hal_srng_params srng_params; 633 unsigned long addr_offset, dev_base_paddr; 634 635 /* IPA TCL_DATA Alternative Ring - HAL_SRNG_SW2TCL2 */ 636 hal_srng = (struct hal_srng *) 637 soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng; 638 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 639 hal_srng_to_hal_ring_handle(hal_srng), 640 &srng_params); 641 642 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr = 643 srng_params.ring_base_paddr; 644 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr = 645 srng_params.ring_base_vaddr; 646 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size = 647 (srng_params.num_entries * srng_params.entry_size) << 2; 648 /* 649 * For the register backed memory addresses, use the scn->mem_pa to 650 * calculate the physical address of the shadow registers 651 */ 652 dev_base_paddr = 653 (unsigned long) 654 ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa; 655 addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) - 656 (unsigned long)(hal_soc->dev_base_addr); 657 soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr = 658 (qdf_dma_addr_t)(addr_offset + dev_base_paddr); 659 660 dp_info("IPA TCL_DATA Alt Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", 661 (unsigned int)addr_offset, 662 (unsigned int)dev_base_paddr, 663 (unsigned int)(soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr), 664 (void *)soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr, 665 (void *)soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr, 666 srng_params.num_entries, 667 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size); 668 669 /* IPA TX Alternative COMP Ring - HAL_SRNG_WBM2SW4_RELEASE */ 670 hal_srng = (struct hal_srng *) 671 soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng; 672 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 673 hal_srng_to_hal_ring_handle(hal_srng), 674 &srng_params); 675 676 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr = 677 srng_params.ring_base_paddr; 678 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr = 679 srng_params.ring_base_vaddr; 680 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size = 681 (srng_params.num_entries * srng_params.entry_size) << 2; 682 soc->ipa_uc_tx_rsc_alt.ipa_wbm_hp_shadow_paddr = 683 hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc), 684 hal_srng_to_hal_ring_handle(hal_srng)); 685 addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) - 686 (unsigned long)(hal_soc->dev_base_addr); 687 soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr = 688 (qdf_dma_addr_t)(addr_offset + dev_base_paddr); 689 690 dp_info("IPA TX Alt COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)", 691 (unsigned int)addr_offset, 692 (unsigned int)dev_base_paddr, 693 (unsigned int)(soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr), 694 (void *)soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr, 695 (void *)soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr, 696 srng_params.num_entries, 697 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size); 698 } 699 700 static void dp_ipa_map_ring_doorbell_paddr(struct dp_pdev *pdev) 701 { 702 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 703 uint32_t rx_ready_doorbell_dmaaddr; 704 uint32_t tx_comp_doorbell_dmaaddr; 705 struct dp_soc *soc = pdev->soc; 706 int ret = 0; 707 708 if (ipa_res->is_db_ddr_mapped) 709 ipa_res->tx_comp_doorbell_vaddr = 710 phys_to_virt(ipa_res->tx_comp_doorbell_paddr); 711 else 712 ipa_res->tx_comp_doorbell_vaddr = 713 ioremap(ipa_res->tx_comp_doorbell_paddr, 4); 714 715 if (qdf_mem_smmu_s1_enabled(soc->osdev)) { 716 ret = pld_smmu_map(soc->osdev->dev, 717 ipa_res->tx_comp_doorbell_paddr, 718 &tx_comp_doorbell_dmaaddr, 719 sizeof(uint32_t)); 720 ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr; 721 qdf_assert_always(!ret); 722 723 ret = pld_smmu_map(soc->osdev->dev, 724 ipa_res->rx_ready_doorbell_paddr, 725 &rx_ready_doorbell_dmaaddr, 726 sizeof(uint32_t)); 727 ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr; 728 qdf_assert_always(!ret); 729 } 730 731 /* Setup for alternative TX pipe */ 732 if (!ipa_res->tx_alt_comp_doorbell_paddr) 733 return; 734 735 if (ipa_res->is_db_ddr_mapped) 736 ipa_res->tx_alt_comp_doorbell_vaddr = 737 phys_to_virt(ipa_res->tx_alt_comp_doorbell_paddr); 738 else 739 ipa_res->tx_alt_comp_doorbell_vaddr = 740 ioremap(ipa_res->tx_alt_comp_doorbell_paddr, 4); 741 742 if (qdf_mem_smmu_s1_enabled(soc->osdev)) { 743 ret = pld_smmu_map(soc->osdev->dev, 744 ipa_res->tx_alt_comp_doorbell_paddr, 745 &tx_comp_doorbell_dmaaddr, 746 sizeof(uint32_t)); 747 ipa_res->tx_alt_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr; 748 qdf_assert_always(!ret); 749 } 750 } 751 752 static void dp_ipa_unmap_ring_doorbell_paddr(struct dp_pdev *pdev) 753 { 754 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 755 struct dp_soc *soc = pdev->soc; 756 int ret = 0; 757 758 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) 759 return; 760 761 /* Unmap must be in reverse order of map */ 762 if (ipa_res->tx_alt_comp_doorbell_paddr) { 763 ret = pld_smmu_unmap(soc->osdev->dev, 764 ipa_res->tx_alt_comp_doorbell_paddr, 765 sizeof(uint32_t)); 766 qdf_assert_always(!ret); 767 } 768 769 ret = pld_smmu_unmap(soc->osdev->dev, 770 ipa_res->rx_ready_doorbell_paddr, 771 sizeof(uint32_t)); 772 qdf_assert_always(!ret); 773 774 ret = pld_smmu_unmap(soc->osdev->dev, 775 ipa_res->tx_comp_doorbell_paddr, 776 sizeof(uint32_t)); 777 qdf_assert_always(!ret); 778 } 779 780 static QDF_STATUS dp_ipa_tx_alt_buf_smmu_mapping(struct dp_soc *soc, 781 struct dp_pdev *pdev, 782 bool create, const char *func, 783 uint32_t line) 784 { 785 QDF_STATUS ret = QDF_STATUS_SUCCESS; 786 struct ipa_dp_tx_rsc *rsc; 787 uint32_t tx_buffer_cnt; 788 uint32_t buf_len; 789 qdf_nbuf_t nbuf; 790 uint32_t index; 791 792 if (!ipa_is_ready()) { 793 dp_info("IPA is not READY"); 794 return QDF_STATUS_SUCCESS; 795 } 796 797 rsc = &soc->ipa_uc_tx_rsc_alt; 798 tx_buffer_cnt = rsc->alloc_tx_buf_cnt; 799 800 for (index = 0; index < tx_buffer_cnt; index++) { 801 nbuf = (qdf_nbuf_t)rsc->tx_buf_pool_vaddr_unaligned[index]; 802 if (!nbuf) 803 continue; 804 805 buf_len = qdf_nbuf_get_data_len(nbuf); 806 ret = __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, buf_len, 807 create, func, line); 808 } 809 810 return ret; 811 } 812 813 static void dp_ipa_wdi_tx_alt_pipe_params(struct dp_soc *soc, 814 struct dp_ipa_resources *ipa_res, 815 qdf_ipa_wdi_pipe_setup_info_t *tx) 816 { 817 QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS1; 818 819 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) = 820 qdf_mem_get_dma_addr(soc->osdev, 821 &ipa_res->tx_alt_comp_ring.mem_info); 822 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) = 823 qdf_mem_get_dma_size(soc->osdev, 824 &ipa_res->tx_alt_comp_ring.mem_info); 825 826 /* WBM Tail Pointer Address */ 827 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) = 828 soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr; 829 QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true; 830 831 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) = 832 qdf_mem_get_dma_addr(soc->osdev, 833 &ipa_res->tx_alt_ring.mem_info); 834 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = 835 qdf_mem_get_dma_size(soc->osdev, 836 &ipa_res->tx_alt_ring.mem_info); 837 838 /* TCL Head Pointer Address */ 839 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) = 840 soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr; 841 QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true; 842 843 QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) = 844 ipa_res->tx_alt_ring_num_alloc_buffer; 845 846 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0; 847 848 dp_ipa_setup_tx_params_bank_id(soc, tx); 849 } 850 851 static void 852 dp_ipa_wdi_tx_alt_pipe_smmu_params(struct dp_soc *soc, 853 struct dp_ipa_resources *ipa_res, 854 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu) 855 { 856 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = IPA_CLIENT_WLAN2_CONS1; 857 858 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu), 859 &ipa_res->tx_alt_comp_ring.sgtable, 860 sizeof(sgtable_t)); 861 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) = 862 qdf_mem_get_dma_size(soc->osdev, 863 &ipa_res->tx_alt_comp_ring.mem_info); 864 /* WBM Tail Pointer Address */ 865 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) = 866 soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr; 867 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true; 868 869 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu), 870 &ipa_res->tx_alt_ring.sgtable, 871 sizeof(sgtable_t)); 872 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) = 873 qdf_mem_get_dma_size(soc->osdev, 874 &ipa_res->tx_alt_ring.mem_info); 875 /* TCL Head Pointer Address */ 876 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) = 877 soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr; 878 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true; 879 880 QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) = 881 ipa_res->tx_alt_ring_num_alloc_buffer; 882 QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0; 883 884 dp_ipa_setup_tx_smmu_params_bank_id(soc, tx_smmu); 885 } 886 887 static void dp_ipa_setup_tx_alt_pipe(struct dp_soc *soc, 888 struct dp_ipa_resources *res, 889 qdf_ipa_wdi_conn_in_params_t *in) 890 { 891 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu = NULL; 892 qdf_ipa_wdi_pipe_setup_info_t *tx = NULL; 893 qdf_ipa_ep_cfg_t *tx_cfg; 894 895 QDF_IPA_WDI_CONN_IN_PARAMS_IS_TX1_USED(in) = true; 896 897 if (qdf_mem_smmu_s1_enabled(soc->osdev)) { 898 tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_ALT_PIPE_SMMU(in); 899 tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu); 900 dp_ipa_wdi_tx_alt_pipe_smmu_params(soc, res, tx_smmu); 901 } else { 902 tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_ALT_PIPE(in); 903 tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx); 904 dp_ipa_wdi_tx_alt_pipe_params(soc, res, tx); 905 } 906 907 QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT; 908 QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN; 909 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0; 910 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0; 911 QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0; 912 QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC; 913 QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true; 914 } 915 916 static void dp_ipa_set_pipe_db(struct dp_ipa_resources *res, 917 qdf_ipa_wdi_conn_out_params_t *out) 918 { 919 res->tx_comp_doorbell_paddr = 920 QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(out); 921 res->rx_ready_doorbell_paddr = 922 QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(out); 923 res->tx_alt_comp_doorbell_paddr = 924 QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_ALT_DB_PA(out); 925 } 926 927 static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in, 928 uint8_t session_id) 929 { 930 bool is_2g_iface = session_id & IPA_SESSION_ID_SHIFT; 931 932 session_id = session_id >> IPA_SESSION_ID_SHIFT; 933 dp_debug("session_id %u is_2g_iface %d", session_id, is_2g_iface); 934 935 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id << 16); 936 QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_TX1_USED(in) = is_2g_iface; 937 } 938 939 static void dp_ipa_tx_comp_ring_init_hp(struct dp_soc *soc, 940 struct dp_ipa_resources *res) 941 { 942 struct hal_srng *wbm_srng; 943 944 /* Init first TX comp ring */ 945 wbm_srng = (struct hal_srng *) 946 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 947 948 hal_srng_dst_init_hp(soc->hal_soc, wbm_srng, 949 res->tx_comp_doorbell_vaddr); 950 951 /* Init the alternate TX comp ring */ 952 wbm_srng = (struct hal_srng *) 953 soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng; 954 955 hal_srng_dst_init_hp(soc->hal_soc, wbm_srng, 956 res->tx_alt_comp_doorbell_vaddr); 957 } 958 959 static void dp_ipa_set_tx_doorbell_paddr(struct dp_soc *soc, 960 struct dp_ipa_resources *ipa_res) 961 { 962 struct hal_srng *wbm_srng; 963 964 wbm_srng = (struct hal_srng *) 965 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 966 967 hal_srng_dst_set_hp_paddr_confirm(wbm_srng, 968 ipa_res->tx_comp_doorbell_paddr); 969 970 dp_info("paddr %pK vaddr %pK", 971 (void *)ipa_res->tx_comp_doorbell_paddr, 972 (void *)ipa_res->tx_comp_doorbell_vaddr); 973 974 /* Setup for alternative TX comp ring */ 975 wbm_srng = (struct hal_srng *) 976 soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng; 977 978 hal_srng_dst_set_hp_paddr_confirm(wbm_srng, 979 ipa_res->tx_alt_comp_doorbell_paddr); 980 981 dp_info("paddr %pK vaddr %pK", 982 (void *)ipa_res->tx_alt_comp_doorbell_paddr, 983 (void *)ipa_res->tx_alt_comp_doorbell_vaddr); 984 } 985 986 #ifdef IPA_SET_RESET_TX_DB_PA 987 static QDF_STATUS dp_ipa_reset_tx_doorbell_pa(struct dp_soc *soc, 988 struct dp_ipa_resources *ipa_res) 989 { 990 hal_ring_handle_t wbm_srng; 991 qdf_dma_addr_t hp_addr; 992 993 wbm_srng = soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 994 if (!wbm_srng) 995 return QDF_STATUS_E_FAILURE; 996 997 hp_addr = soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr; 998 999 hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr); 1000 1001 dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr); 1002 1003 /* Reset alternative TX comp ring */ 1004 wbm_srng = soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng; 1005 if (!wbm_srng) 1006 return QDF_STATUS_E_FAILURE; 1007 1008 hp_addr = soc->ipa_uc_tx_rsc_alt.ipa_wbm_hp_shadow_paddr; 1009 1010 hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr); 1011 1012 dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr); 1013 1014 return QDF_STATUS_SUCCESS; 1015 } 1016 #endif /* IPA_SET_RESET_TX_DB_PA */ 1017 1018 #else /* !IPA_WDI3_TX_TWO_PIPES */ 1019 1020 static inline 1021 void dp_ipa_tx_alt_pool_detach(struct dp_soc *soc, struct dp_pdev *pdev) 1022 { 1023 } 1024 1025 static inline void dp_ipa_tx_alt_ring_resource_setup(struct dp_soc *soc) 1026 { 1027 } 1028 1029 static inline int dp_ipa_tx_alt_pool_attach(struct dp_soc *soc) 1030 { 1031 return 0; 1032 } 1033 1034 static inline QDF_STATUS dp_ipa_tx_alt_ring_get_resource(struct dp_pdev *pdev) 1035 { 1036 return QDF_STATUS_SUCCESS; 1037 } 1038 1039 static void dp_ipa_map_ring_doorbell_paddr(struct dp_pdev *pdev) 1040 { 1041 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 1042 uint32_t rx_ready_doorbell_dmaaddr; 1043 uint32_t tx_comp_doorbell_dmaaddr; 1044 struct dp_soc *soc = pdev->soc; 1045 int ret = 0; 1046 1047 if (ipa_res->is_db_ddr_mapped) 1048 ipa_res->tx_comp_doorbell_vaddr = 1049 phys_to_virt(ipa_res->tx_comp_doorbell_paddr); 1050 else 1051 ipa_res->tx_comp_doorbell_vaddr = 1052 ioremap(ipa_res->tx_comp_doorbell_paddr, 4); 1053 1054 if (qdf_mem_smmu_s1_enabled(soc->osdev)) { 1055 ret = pld_smmu_map(soc->osdev->dev, 1056 ipa_res->tx_comp_doorbell_paddr, 1057 &tx_comp_doorbell_dmaaddr, 1058 sizeof(uint32_t)); 1059 ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr; 1060 qdf_assert_always(!ret); 1061 1062 ret = pld_smmu_map(soc->osdev->dev, 1063 ipa_res->rx_ready_doorbell_paddr, 1064 &rx_ready_doorbell_dmaaddr, 1065 sizeof(uint32_t)); 1066 ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr; 1067 qdf_assert_always(!ret); 1068 } 1069 } 1070 1071 static inline void dp_ipa_unmap_ring_doorbell_paddr(struct dp_pdev *pdev) 1072 { 1073 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 1074 struct dp_soc *soc = pdev->soc; 1075 int ret = 0; 1076 1077 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) 1078 return; 1079 1080 ret = pld_smmu_unmap(soc->osdev->dev, 1081 ipa_res->rx_ready_doorbell_paddr, 1082 sizeof(uint32_t)); 1083 qdf_assert_always(!ret); 1084 1085 ret = pld_smmu_unmap(soc->osdev->dev, 1086 ipa_res->tx_comp_doorbell_paddr, 1087 sizeof(uint32_t)); 1088 qdf_assert_always(!ret); 1089 } 1090 1091 static inline QDF_STATUS dp_ipa_tx_alt_buf_smmu_mapping(struct dp_soc *soc, 1092 struct dp_pdev *pdev, 1093 bool create, 1094 const char *func, 1095 uint32_t line) 1096 { 1097 return QDF_STATUS_SUCCESS; 1098 } 1099 1100 static inline 1101 void dp_ipa_setup_tx_alt_pipe(struct dp_soc *soc, struct dp_ipa_resources *res, 1102 qdf_ipa_wdi_conn_in_params_t *in) 1103 { 1104 } 1105 1106 static void dp_ipa_set_pipe_db(struct dp_ipa_resources *res, 1107 qdf_ipa_wdi_conn_out_params_t *out) 1108 { 1109 res->tx_comp_doorbell_paddr = 1110 QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(out); 1111 res->rx_ready_doorbell_paddr = 1112 QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(out); 1113 } 1114 1115 #ifdef IPA_WDS_EASYMESH_FEATURE 1116 /** 1117 * dp_ipa_setup_iface_session_id - Pass vdev id to IPA 1118 * @in: ipa in params 1119 * @session_id: vdev id 1120 * 1121 * Pass Vdev id to IPA, IPA metadata order is changed and vdev id 1122 * is stored at higher nibble so, no shift is required. 1123 * 1124 * Return: none 1125 */ 1126 static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in, 1127 uint8_t session_id) 1128 { 1129 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id); 1130 } 1131 #else 1132 static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in, 1133 uint8_t session_id) 1134 { 1135 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id << 16); 1136 } 1137 #endif 1138 1139 static inline void dp_ipa_tx_comp_ring_init_hp(struct dp_soc *soc, 1140 struct dp_ipa_resources *res) 1141 { 1142 struct hal_srng *wbm_srng = (struct hal_srng *) 1143 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 1144 1145 hal_srng_dst_init_hp(soc->hal_soc, wbm_srng, 1146 res->tx_comp_doorbell_vaddr); 1147 } 1148 1149 static void dp_ipa_set_tx_doorbell_paddr(struct dp_soc *soc, 1150 struct dp_ipa_resources *ipa_res) 1151 { 1152 struct hal_srng *wbm_srng = (struct hal_srng *) 1153 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 1154 1155 hal_srng_dst_set_hp_paddr_confirm(wbm_srng, 1156 ipa_res->tx_comp_doorbell_paddr); 1157 1158 dp_info("paddr %pK vaddr %pK", 1159 (void *)ipa_res->tx_comp_doorbell_paddr, 1160 (void *)ipa_res->tx_comp_doorbell_vaddr); 1161 } 1162 1163 #ifdef IPA_SET_RESET_TX_DB_PA 1164 static QDF_STATUS dp_ipa_reset_tx_doorbell_pa(struct dp_soc *soc, 1165 struct dp_ipa_resources *ipa_res) 1166 { 1167 hal_ring_handle_t wbm_srng = 1168 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 1169 qdf_dma_addr_t hp_addr; 1170 1171 if (!wbm_srng) 1172 return QDF_STATUS_E_FAILURE; 1173 1174 hp_addr = soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr; 1175 1176 hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr); 1177 1178 dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr); 1179 1180 return QDF_STATUS_SUCCESS; 1181 } 1182 #endif /* IPA_SET_RESET_TX_DB_PA */ 1183 1184 #endif /* IPA_WDI3_TX_TWO_PIPES */ 1185 1186 /** 1187 * dp_tx_ipa_uc_detach - Free autonomy TX resources 1188 * @soc: data path instance 1189 * @pdev: core txrx pdev context 1190 * 1191 * Free allocated TX buffers with WBM SRNG 1192 * 1193 * Return: none 1194 */ 1195 static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) 1196 { 1197 int idx; 1198 qdf_nbuf_t nbuf; 1199 struct dp_ipa_resources *ipa_res; 1200 1201 for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) { 1202 nbuf = (qdf_nbuf_t) 1203 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx]; 1204 if (!nbuf) 1205 continue; 1206 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 1207 qdf_mem_dp_tx_skb_cnt_dec(); 1208 qdf_mem_dp_tx_skb_dec(qdf_nbuf_get_end_offset(nbuf)); 1209 qdf_nbuf_free(nbuf); 1210 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx] = 1211 (void *)NULL; 1212 } 1213 1214 qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned); 1215 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL; 1216 1217 ipa_res = &pdev->ipa_resource; 1218 1219 qdf_mem_free_sgtable(&ipa_res->tx_ring.sgtable); 1220 qdf_mem_free_sgtable(&ipa_res->tx_comp_ring.sgtable); 1221 } 1222 1223 /** 1224 * dp_rx_ipa_uc_detach - free autonomy RX resources 1225 * @soc: data path instance 1226 * @pdev: core txrx pdev context 1227 * 1228 * This function will detach DP RX into main device context 1229 * will free DP Rx resources. 1230 * 1231 * Return: none 1232 */ 1233 static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) 1234 { 1235 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 1236 1237 qdf_mem_free_sgtable(&ipa_res->rx_rdy_ring.sgtable); 1238 qdf_mem_free_sgtable(&ipa_res->rx_refill_ring.sgtable); 1239 } 1240 1241 /* 1242 * dp_rx_alt_ipa_uc_detach - free autonomy RX resources 1243 * @soc: data path instance 1244 * @pdev: core txrx pdev context 1245 * 1246 * This function will detach DP RX into main device context 1247 * will free DP Rx resources. 1248 * 1249 * Return: none 1250 */ 1251 #ifdef IPA_WDI3_VLAN_SUPPORT 1252 static void dp_rx_alt_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) 1253 { 1254 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 1255 1256 if (!wlan_ipa_is_vlan_enabled()) 1257 return; 1258 1259 qdf_mem_free_sgtable(&ipa_res->rx_alt_rdy_ring.sgtable); 1260 qdf_mem_free_sgtable(&ipa_res->rx_alt_refill_ring.sgtable); 1261 } 1262 #else 1263 static inline 1264 void dp_rx_alt_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) 1265 { } 1266 #endif 1267 1268 int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) 1269 { 1270 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 1271 return QDF_STATUS_SUCCESS; 1272 1273 /* TX resource detach */ 1274 dp_tx_ipa_uc_detach(soc, pdev); 1275 1276 /* Cleanup 2nd TX pipe resources */ 1277 dp_ipa_tx_alt_pool_detach(soc, pdev); 1278 1279 /* RX resource detach */ 1280 dp_rx_ipa_uc_detach(soc, pdev); 1281 1282 /* Cleanup 2nd RX pipe resources */ 1283 dp_rx_alt_ipa_uc_detach(soc, pdev); 1284 1285 return QDF_STATUS_SUCCESS; /* success */ 1286 } 1287 1288 /** 1289 * dp_tx_ipa_uc_attach - Allocate autonomy TX resources 1290 * @soc: data path instance 1291 * @pdev: Physical device handle 1292 * 1293 * Allocate TX buffer from non-cacheable memory 1294 * Attache allocated TX buffers with WBM SRNG 1295 * 1296 * Return: int 1297 */ 1298 static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev) 1299 { 1300 uint32_t tx_buffer_count; 1301 uint32_t ring_base_align = 8; 1302 qdf_dma_addr_t buffer_paddr; 1303 struct hal_srng *wbm_srng = (struct hal_srng *) 1304 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 1305 struct hal_srng_params srng_params; 1306 void *ring_entry; 1307 int num_entries; 1308 qdf_nbuf_t nbuf; 1309 int retval = QDF_STATUS_SUCCESS; 1310 int max_alloc_count = 0; 1311 uint32_t wbm_bm_id; 1312 1313 /* 1314 * Uncomment when dp_ops_cfg.cfg_attach is implemented 1315 * unsigned int uc_tx_buf_sz = 1316 * dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev); 1317 */ 1318 unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT; 1319 unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1; 1320 1321 wbm_bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, 1322 IPA_TCL_DATA_RING_IDX); 1323 1324 hal_get_srng_params(soc->hal_soc, hal_srng_to_hal_ring_handle(wbm_srng), 1325 &srng_params); 1326 num_entries = srng_params.num_entries; 1327 1328 max_alloc_count = 1329 num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES; 1330 if (max_alloc_count <= 0) { 1331 dp_err("incorrect value for buffer count %u", max_alloc_count); 1332 return -EINVAL; 1333 } 1334 1335 dp_info("requested %d buffers to be posted to wbm ring", 1336 max_alloc_count); 1337 1338 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = 1339 qdf_mem_malloc(num_entries * 1340 sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned)); 1341 if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned) { 1342 dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail"); 1343 return -ENOMEM; 1344 } 1345 1346 hal_srng_access_start_unlocked(soc->hal_soc, 1347 hal_srng_to_hal_ring_handle(wbm_srng)); 1348 1349 /* 1350 * Allocate Tx buffers as many as possible. 1351 * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty 1352 * Populate Tx buffers into WBM2IPA ring 1353 * This initial buffer population will simulate H/W as source ring, 1354 * and update HP 1355 */ 1356 for (tx_buffer_count = 0; 1357 tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) { 1358 nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE); 1359 if (!nbuf) 1360 break; 1361 1362 ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc, 1363 hal_srng_to_hal_ring_handle(wbm_srng)); 1364 if (!ring_entry) { 1365 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 1366 "%s: Failed to get WBM ring entry", 1367 __func__); 1368 qdf_nbuf_free(nbuf); 1369 break; 1370 } 1371 1372 qdf_nbuf_map_single(soc->osdev, nbuf, 1373 QDF_DMA_BIDIRECTIONAL); 1374 buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); 1375 qdf_mem_dp_tx_skb_cnt_inc(); 1376 qdf_mem_dp_tx_skb_inc(qdf_nbuf_get_end_offset(nbuf)); 1377 1378 /* 1379 * TODO - KIWI code can directly call the be handler 1380 * instead of hal soc ops. 1381 */ 1382 hal_rxdma_buff_addr_info_set(soc->hal_soc, ring_entry, 1383 buffer_paddr, 0, wbm_bm_id); 1384 1385 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count] 1386 = (void *)nbuf; 1387 } 1388 1389 hal_srng_access_end_unlocked(soc->hal_soc, 1390 hal_srng_to_hal_ring_handle(wbm_srng)); 1391 1392 soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count; 1393 1394 if (tx_buffer_count) { 1395 dp_info("IPA WDI TX buffer: %d allocated", tx_buffer_count); 1396 } else { 1397 dp_err("No IPA WDI TX buffer allocated!"); 1398 qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned); 1399 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL; 1400 retval = -ENOMEM; 1401 } 1402 1403 return retval; 1404 } 1405 1406 /** 1407 * dp_rx_ipa_uc_attach - Allocate autonomy RX resources 1408 * @soc: data path instance 1409 * @pdev: core txrx pdev context 1410 * 1411 * This function will attach a DP RX instance into the main 1412 * device (SOC) context. 1413 * 1414 * Return: QDF_STATUS_SUCCESS: success 1415 * QDF_STATUS_E_RESOURCES: Error return 1416 */ 1417 static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev) 1418 { 1419 return QDF_STATUS_SUCCESS; 1420 } 1421 1422 int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev) 1423 { 1424 int error; 1425 1426 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 1427 return QDF_STATUS_SUCCESS; 1428 1429 /* TX resource attach */ 1430 error = dp_tx_ipa_uc_attach(soc, pdev); 1431 if (error) { 1432 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1433 "%s: DP IPA UC TX attach fail code %d", 1434 __func__, error); 1435 return error; 1436 } 1437 1438 /* Setup 2nd TX pipe */ 1439 error = dp_ipa_tx_alt_pool_attach(soc); 1440 if (error) { 1441 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1442 "%s: DP IPA TX pool2 attach fail code %d", 1443 __func__, error); 1444 dp_tx_ipa_uc_detach(soc, pdev); 1445 return error; 1446 } 1447 1448 /* RX resource attach */ 1449 error = dp_rx_ipa_uc_attach(soc, pdev); 1450 if (error) { 1451 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1452 "%s: DP IPA UC RX attach fail code %d", 1453 __func__, error); 1454 dp_ipa_tx_alt_pool_detach(soc, pdev); 1455 dp_tx_ipa_uc_detach(soc, pdev); 1456 return error; 1457 } 1458 1459 return QDF_STATUS_SUCCESS; /* success */ 1460 } 1461 1462 #ifdef IPA_WDI3_VLAN_SUPPORT 1463 /* 1464 * dp_ipa_rx_alt_ring_resource_setup() - setup IPA 2nd RX ring resources 1465 * @soc: data path SoC handle 1466 * @pdev: data path pdev handle 1467 * 1468 * Return: none 1469 */ 1470 static 1471 void dp_ipa_rx_alt_ring_resource_setup(struct dp_soc *soc, struct dp_pdev *pdev) 1472 { 1473 struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc; 1474 struct hal_srng *hal_srng; 1475 struct hal_srng_params srng_params; 1476 unsigned long addr_offset, dev_base_paddr; 1477 qdf_dma_addr_t hp_addr; 1478 1479 if (!wlan_ipa_is_vlan_enabled()) 1480 return; 1481 1482 dev_base_paddr = 1483 (unsigned long) 1484 ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa; 1485 1486 /* IPA REO_DEST Ring - HAL_SRNG_REO2SW3 */ 1487 hal_srng = (struct hal_srng *) 1488 soc->reo_dest_ring[IPA_ALT_REO_DEST_RING_IDX].hal_srng; 1489 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 1490 hal_srng_to_hal_ring_handle(hal_srng), 1491 &srng_params); 1492 1493 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_paddr = 1494 srng_params.ring_base_paddr; 1495 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_vaddr = 1496 srng_params.ring_base_vaddr; 1497 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_size = 1498 (srng_params.num_entries * srng_params.entry_size) << 2; 1499 addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) - 1500 (unsigned long)(hal_soc->dev_base_addr); 1501 soc->ipa_uc_rx_rsc_alt.ipa_reo_tp_paddr = 1502 (qdf_dma_addr_t)(addr_offset + dev_base_paddr); 1503 1504 dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", 1505 (unsigned int)addr_offset, 1506 (unsigned int)dev_base_paddr, 1507 (unsigned int)(soc->ipa_uc_rx_rsc_alt.ipa_reo_tp_paddr), 1508 (void *)soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_paddr, 1509 (void *)soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_vaddr, 1510 srng_params.num_entries, 1511 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_size); 1512 1513 hal_srng = (struct hal_srng *) 1514 pdev->rx_refill_buf_ring3.hal_srng; 1515 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 1516 hal_srng_to_hal_ring_handle(hal_srng), 1517 &srng_params); 1518 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_paddr = 1519 srng_params.ring_base_paddr; 1520 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_vaddr = 1521 srng_params.ring_base_vaddr; 1522 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_size = 1523 (srng_params.num_entries * srng_params.entry_size) << 2; 1524 hp_addr = hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc), 1525 hal_srng_to_hal_ring_handle(hal_srng)); 1526 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_hp_paddr = 1527 qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr); 1528 1529 dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", 1530 (unsigned int)(soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_hp_paddr), 1531 (void *)soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_paddr, 1532 (void *)soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_vaddr, 1533 srng_params.num_entries, 1534 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_size); 1535 } 1536 #else 1537 static inline 1538 void dp_ipa_rx_alt_ring_resource_setup(struct dp_soc *soc, struct dp_pdev *pdev) 1539 { } 1540 #endif 1541 /* 1542 * dp_ipa_ring_resource_setup() - setup IPA ring resources 1543 * @soc: data path SoC handle 1544 * 1545 * Return: none 1546 */ 1547 int dp_ipa_ring_resource_setup(struct dp_soc *soc, 1548 struct dp_pdev *pdev) 1549 { 1550 struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc; 1551 struct hal_srng *hal_srng; 1552 struct hal_srng_params srng_params; 1553 qdf_dma_addr_t hp_addr; 1554 unsigned long addr_offset, dev_base_paddr; 1555 uint32_t ix0; 1556 uint8_t ix0_map[8]; 1557 1558 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 1559 return QDF_STATUS_SUCCESS; 1560 1561 /* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */ 1562 hal_srng = (struct hal_srng *) 1563 soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng; 1564 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 1565 hal_srng_to_hal_ring_handle(hal_srng), 1566 &srng_params); 1567 1568 soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr = 1569 srng_params.ring_base_paddr; 1570 soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr = 1571 srng_params.ring_base_vaddr; 1572 soc->ipa_uc_tx_rsc.ipa_tcl_ring_size = 1573 (srng_params.num_entries * srng_params.entry_size) << 2; 1574 /* 1575 * For the register backed memory addresses, use the scn->mem_pa to 1576 * calculate the physical address of the shadow registers 1577 */ 1578 dev_base_paddr = 1579 (unsigned long) 1580 ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa; 1581 addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) - 1582 (unsigned long)(hal_soc->dev_base_addr); 1583 soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr = 1584 (qdf_dma_addr_t)(addr_offset + dev_base_paddr); 1585 1586 dp_info("IPA TCL_DATA Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", 1587 (unsigned int)addr_offset, 1588 (unsigned int)dev_base_paddr, 1589 (unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr), 1590 (void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr, 1591 (void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr, 1592 srng_params.num_entries, 1593 soc->ipa_uc_tx_rsc.ipa_tcl_ring_size); 1594 1595 /* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */ 1596 hal_srng = (struct hal_srng *) 1597 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 1598 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 1599 hal_srng_to_hal_ring_handle(hal_srng), 1600 &srng_params); 1601 1602 soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr = 1603 srng_params.ring_base_paddr; 1604 soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr = 1605 srng_params.ring_base_vaddr; 1606 soc->ipa_uc_tx_rsc.ipa_wbm_ring_size = 1607 (srng_params.num_entries * srng_params.entry_size) << 2; 1608 soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr = 1609 hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc), 1610 hal_srng_to_hal_ring_handle(hal_srng)); 1611 addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) - 1612 (unsigned long)(hal_soc->dev_base_addr); 1613 soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr = 1614 (qdf_dma_addr_t)(addr_offset + dev_base_paddr); 1615 1616 dp_info("IPA TX COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)", 1617 (unsigned int)addr_offset, 1618 (unsigned int)dev_base_paddr, 1619 (unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr), 1620 (void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr, 1621 (void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr, 1622 srng_params.num_entries, 1623 soc->ipa_uc_tx_rsc.ipa_wbm_ring_size); 1624 1625 dp_ipa_tx_alt_ring_resource_setup(soc); 1626 1627 /* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */ 1628 hal_srng = (struct hal_srng *) 1629 soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng; 1630 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 1631 hal_srng_to_hal_ring_handle(hal_srng), 1632 &srng_params); 1633 1634 soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr = 1635 srng_params.ring_base_paddr; 1636 soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr = 1637 srng_params.ring_base_vaddr; 1638 soc->ipa_uc_rx_rsc.ipa_reo_ring_size = 1639 (srng_params.num_entries * srng_params.entry_size) << 2; 1640 addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) - 1641 (unsigned long)(hal_soc->dev_base_addr); 1642 soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr = 1643 (qdf_dma_addr_t)(addr_offset + dev_base_paddr); 1644 1645 dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", 1646 (unsigned int)addr_offset, 1647 (unsigned int)dev_base_paddr, 1648 (unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr), 1649 (void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr, 1650 (void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr, 1651 srng_params.num_entries, 1652 soc->ipa_uc_rx_rsc.ipa_reo_ring_size); 1653 1654 hal_srng = (struct hal_srng *) 1655 pdev->rx_refill_buf_ring2.hal_srng; 1656 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 1657 hal_srng_to_hal_ring_handle(hal_srng), 1658 &srng_params); 1659 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr = 1660 srng_params.ring_base_paddr; 1661 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr = 1662 srng_params.ring_base_vaddr; 1663 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size = 1664 (srng_params.num_entries * srng_params.entry_size) << 2; 1665 hp_addr = hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc), 1666 hal_srng_to_hal_ring_handle(hal_srng)); 1667 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr = 1668 qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr); 1669 1670 dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", 1671 (unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr), 1672 (void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr, 1673 (void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr, 1674 srng_params.num_entries, 1675 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size); 1676 1677 /* 1678 * Set DEST_RING_MAPPING_4 to SW2 as default value for 1679 * DESTINATION_RING_CTRL_IX_0. 1680 */ 1681 ix0_map[0] = REO_REMAP_SW1; 1682 ix0_map[1] = REO_REMAP_SW1; 1683 ix0_map[2] = REO_REMAP_SW2; 1684 ix0_map[3] = REO_REMAP_SW3; 1685 ix0_map[4] = REO_REMAP_SW2; 1686 ix0_map[5] = REO_REMAP_RELEASE; 1687 ix0_map[6] = REO_REMAP_FW; 1688 ix0_map[7] = REO_REMAP_FW; 1689 1690 ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0, 1691 ix0_map); 1692 1693 hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, NULL, NULL); 1694 1695 dp_ipa_rx_alt_ring_resource_setup(soc, pdev); 1696 return 0; 1697 } 1698 1699 #ifdef IPA_WDI3_VLAN_SUPPORT 1700 /* 1701 * dp_ipa_rx_alt_ring_get_resource() - get IPA 2nd RX ring resources 1702 * @pdev: data path pdev handle 1703 * 1704 * Return: Success if resourece is found 1705 */ 1706 static QDF_STATUS dp_ipa_rx_alt_ring_get_resource(struct dp_pdev *pdev) 1707 { 1708 struct dp_soc *soc = pdev->soc; 1709 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 1710 1711 if (!wlan_ipa_is_vlan_enabled()) 1712 return QDF_STATUS_SUCCESS; 1713 1714 dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_alt_rdy_ring, 1715 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_vaddr, 1716 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_paddr, 1717 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_size); 1718 1719 dp_ipa_get_shared_mem_info( 1720 soc->osdev, &ipa_res->rx_alt_refill_ring, 1721 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_vaddr, 1722 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_paddr, 1723 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_size); 1724 1725 if (!qdf_mem_get_dma_addr(soc->osdev, 1726 &ipa_res->rx_alt_rdy_ring.mem_info) || 1727 !qdf_mem_get_dma_addr(soc->osdev, 1728 &ipa_res->rx_alt_refill_ring.mem_info)) 1729 return QDF_STATUS_E_FAILURE; 1730 1731 return QDF_STATUS_SUCCESS; 1732 } 1733 #else 1734 static inline QDF_STATUS dp_ipa_rx_alt_ring_get_resource(struct dp_pdev *pdev) 1735 { 1736 return QDF_STATUS_SUCCESS; 1737 } 1738 #endif 1739 1740 QDF_STATUS dp_ipa_get_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 1741 { 1742 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1743 struct dp_pdev *pdev = 1744 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1745 struct dp_ipa_resources *ipa_res; 1746 1747 if (!pdev) { 1748 dp_err("Invalid instance"); 1749 return QDF_STATUS_E_FAILURE; 1750 } 1751 1752 ipa_res = &pdev->ipa_resource; 1753 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 1754 return QDF_STATUS_SUCCESS; 1755 1756 ipa_res->tx_num_alloc_buffer = 1757 (uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; 1758 1759 dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_ring, 1760 soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr, 1761 soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr, 1762 soc->ipa_uc_tx_rsc.ipa_tcl_ring_size); 1763 1764 dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_comp_ring, 1765 soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr, 1766 soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr, 1767 soc->ipa_uc_tx_rsc.ipa_wbm_ring_size); 1768 1769 dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_rdy_ring, 1770 soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr, 1771 soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr, 1772 soc->ipa_uc_rx_rsc.ipa_reo_ring_size); 1773 1774 dp_ipa_get_shared_mem_info( 1775 soc->osdev, &ipa_res->rx_refill_ring, 1776 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr, 1777 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr, 1778 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size); 1779 1780 if (!qdf_mem_get_dma_addr(soc->osdev, &ipa_res->tx_ring.mem_info) || 1781 !qdf_mem_get_dma_addr(soc->osdev, 1782 &ipa_res->tx_comp_ring.mem_info) || 1783 !qdf_mem_get_dma_addr(soc->osdev, &ipa_res->rx_rdy_ring.mem_info) || 1784 !qdf_mem_get_dma_addr(soc->osdev, 1785 &ipa_res->rx_refill_ring.mem_info)) 1786 return QDF_STATUS_E_FAILURE; 1787 1788 if (dp_ipa_tx_alt_ring_get_resource(pdev)) 1789 return QDF_STATUS_E_FAILURE; 1790 1791 if (dp_ipa_rx_alt_ring_get_resource(pdev)) 1792 return QDF_STATUS_E_FAILURE; 1793 1794 return QDF_STATUS_SUCCESS; 1795 } 1796 1797 #ifdef IPA_SET_RESET_TX_DB_PA 1798 #define DP_IPA_SET_TX_DB_PADDR(soc, ipa_res) 1799 #else 1800 #define DP_IPA_SET_TX_DB_PADDR(soc, ipa_res) \ 1801 dp_ipa_set_tx_doorbell_paddr(soc, ipa_res) 1802 #endif 1803 1804 #ifdef IPA_WDI3_VLAN_SUPPORT 1805 /* 1806 * dp_ipa_map_rx_alt_ring_doorbell_paddr() - Map 2nd rx ring doorbell paddr 1807 * @pdev: data path pdev handle 1808 * 1809 * Return: none 1810 */ 1811 static void dp_ipa_map_rx_alt_ring_doorbell_paddr(struct dp_pdev *pdev) 1812 { 1813 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 1814 uint32_t rx_ready_doorbell_dmaaddr; 1815 struct dp_soc *soc = pdev->soc; 1816 struct hal_srng *reo_srng = (struct hal_srng *) 1817 soc->reo_dest_ring[IPA_ALT_REO_DEST_RING_IDX].hal_srng; 1818 int ret = 0; 1819 1820 if (!wlan_ipa_is_vlan_enabled()) 1821 return; 1822 1823 if (qdf_mem_smmu_s1_enabled(soc->osdev)) { 1824 ret = pld_smmu_map(soc->osdev->dev, 1825 ipa_res->rx_alt_ready_doorbell_paddr, 1826 &rx_ready_doorbell_dmaaddr, 1827 sizeof(uint32_t)); 1828 ipa_res->rx_alt_ready_doorbell_paddr = 1829 rx_ready_doorbell_dmaaddr; 1830 qdf_assert_always(!ret); 1831 } 1832 1833 hal_srng_dst_set_hp_paddr_confirm(reo_srng, 1834 ipa_res->rx_alt_ready_doorbell_paddr); 1835 } 1836 1837 /* 1838 * dp_ipa_unmap_rx_alt_ring_doorbell_paddr() - Unmap 2nd rx ring doorbell paddr 1839 * @pdev: data path pdev handle 1840 * 1841 * Return: none 1842 */ 1843 static void dp_ipa_unmap_rx_alt_ring_doorbell_paddr(struct dp_pdev *pdev) 1844 { 1845 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 1846 struct dp_soc *soc = pdev->soc; 1847 int ret = 0; 1848 1849 if (!wlan_ipa_is_vlan_enabled()) 1850 return; 1851 1852 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) 1853 return; 1854 1855 ret = pld_smmu_unmap(soc->osdev->dev, 1856 ipa_res->rx_alt_ready_doorbell_paddr, 1857 sizeof(uint32_t)); 1858 qdf_assert_always(!ret); 1859 } 1860 #else 1861 static inline void dp_ipa_map_rx_alt_ring_doorbell_paddr(struct dp_pdev *pdev) 1862 { } 1863 1864 static inline void dp_ipa_unmap_rx_alt_ring_doorbell_paddr(struct dp_pdev *pdev) 1865 { } 1866 #endif 1867 1868 QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 1869 { 1870 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1871 struct dp_pdev *pdev = 1872 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1873 struct dp_ipa_resources *ipa_res; 1874 struct hal_srng *reo_srng = (struct hal_srng *) 1875 soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng; 1876 1877 if (!pdev) { 1878 dp_err("Invalid instance"); 1879 return QDF_STATUS_E_FAILURE; 1880 } 1881 1882 ipa_res = &pdev->ipa_resource; 1883 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 1884 return QDF_STATUS_SUCCESS; 1885 1886 dp_ipa_map_ring_doorbell_paddr(pdev); 1887 dp_ipa_map_rx_alt_ring_doorbell_paddr(pdev); 1888 1889 DP_IPA_SET_TX_DB_PADDR(soc, ipa_res); 1890 1891 /* 1892 * For RX, REO module on Napier/Hastings does reordering on incoming 1893 * Ethernet packets and writes one or more descriptors to REO2IPA Rx 1894 * ring.It then updates the ring’s Write/Head ptr and rings a doorbell 1895 * to IPA. 1896 * Set the doorbell addr for the REO ring. 1897 */ 1898 hal_srng_dst_set_hp_paddr_confirm(reo_srng, 1899 ipa_res->rx_ready_doorbell_paddr); 1900 return QDF_STATUS_SUCCESS; 1901 } 1902 1903 QDF_STATUS dp_ipa_iounmap_doorbell_vaddr(struct cdp_soc_t *soc_hdl, 1904 uint8_t pdev_id) 1905 { 1906 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1907 struct dp_pdev *pdev = 1908 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1909 struct dp_ipa_resources *ipa_res; 1910 1911 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 1912 return QDF_STATUS_SUCCESS; 1913 1914 if (!pdev) { 1915 dp_err("Invalid instance"); 1916 return QDF_STATUS_E_FAILURE; 1917 } 1918 1919 ipa_res = &pdev->ipa_resource; 1920 if (!ipa_res->is_db_ddr_mapped) 1921 iounmap(ipa_res->tx_comp_doorbell_vaddr); 1922 1923 return QDF_STATUS_SUCCESS; 1924 } 1925 1926 QDF_STATUS dp_ipa_op_response(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 1927 uint8_t *op_msg) 1928 { 1929 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1930 struct dp_pdev *pdev = 1931 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1932 1933 if (!pdev) { 1934 dp_err("Invalid instance"); 1935 return QDF_STATUS_E_FAILURE; 1936 } 1937 1938 if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx)) 1939 return QDF_STATUS_SUCCESS; 1940 1941 if (pdev->ipa_uc_op_cb) { 1942 pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt); 1943 } else { 1944 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1945 "%s: IPA callback function is not registered", __func__); 1946 qdf_mem_free(op_msg); 1947 return QDF_STATUS_E_FAILURE; 1948 } 1949 1950 return QDF_STATUS_SUCCESS; 1951 } 1952 1953 QDF_STATUS dp_ipa_register_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 1954 ipa_uc_op_cb_type op_cb, 1955 void *usr_ctxt) 1956 { 1957 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1958 struct dp_pdev *pdev = 1959 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1960 1961 if (!pdev) { 1962 dp_err("Invalid instance"); 1963 return QDF_STATUS_E_FAILURE; 1964 } 1965 1966 if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx)) 1967 return QDF_STATUS_SUCCESS; 1968 1969 pdev->ipa_uc_op_cb = op_cb; 1970 pdev->usr_ctxt = usr_ctxt; 1971 1972 return QDF_STATUS_SUCCESS; 1973 } 1974 1975 void dp_ipa_deregister_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 1976 { 1977 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1978 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1979 1980 if (!pdev) { 1981 dp_err("Invalid instance"); 1982 return; 1983 } 1984 1985 dp_debug("Deregister OP handler callback"); 1986 pdev->ipa_uc_op_cb = NULL; 1987 pdev->usr_ctxt = NULL; 1988 } 1989 1990 QDF_STATUS dp_ipa_get_stat(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 1991 { 1992 /* TBD */ 1993 return QDF_STATUS_SUCCESS; 1994 } 1995 1996 /** 1997 * dp_tx_send_ipa_data_frame() - send IPA data frame 1998 * @soc_hdl: datapath soc handle 1999 * @vdev_id: id of the virtual device 2000 * @skb: skb to transmit 2001 * 2002 * Return: skb/ NULL is for success 2003 */ 2004 qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2005 qdf_nbuf_t skb) 2006 { 2007 qdf_nbuf_t ret; 2008 2009 /* Terminate the (single-element) list of tx frames */ 2010 qdf_nbuf_set_next(skb, NULL); 2011 ret = dp_tx_send(soc_hdl, vdev_id, skb); 2012 if (ret) { 2013 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2014 "%s: Failed to tx", __func__); 2015 return ret; 2016 } 2017 2018 return NULL; 2019 } 2020 2021 #ifdef QCA_IPA_LL_TX_FLOW_CONTROL 2022 /** 2023 * dp_ipa_is_target_ready() - check if target is ready or not 2024 * @soc: datapath soc handle 2025 * 2026 * Return: true if target is ready 2027 */ 2028 static inline 2029 bool dp_ipa_is_target_ready(struct dp_soc *soc) 2030 { 2031 if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET) 2032 return false; 2033 else 2034 return true; 2035 } 2036 #else 2037 static inline 2038 bool dp_ipa_is_target_ready(struct dp_soc *soc) 2039 { 2040 return true; 2041 } 2042 #endif 2043 2044 QDF_STATUS dp_ipa_enable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 2045 { 2046 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2047 struct dp_pdev *pdev = 2048 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2049 uint32_t ix0; 2050 uint32_t ix2; 2051 uint8_t ix_map[8]; 2052 2053 if (!pdev) { 2054 dp_err("Invalid instance"); 2055 return QDF_STATUS_E_FAILURE; 2056 } 2057 2058 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 2059 return QDF_STATUS_SUCCESS; 2060 2061 if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle))) 2062 return QDF_STATUS_E_AGAIN; 2063 2064 if (!dp_ipa_is_target_ready(soc)) 2065 return QDF_STATUS_E_AGAIN; 2066 2067 /* Call HAL API to remap REO rings to REO2IPA ring */ 2068 ix_map[0] = REO_REMAP_SW1; 2069 ix_map[1] = REO_REMAP_SW4; 2070 ix_map[2] = REO_REMAP_SW1; 2071 if (wlan_ipa_is_vlan_enabled()) 2072 ix_map[3] = REO_REMAP_SW3; 2073 else 2074 ix_map[3] = REO_REMAP_SW4; 2075 ix_map[4] = REO_REMAP_SW4; 2076 ix_map[5] = REO_REMAP_RELEASE; 2077 ix_map[6] = REO_REMAP_FW; 2078 ix_map[7] = REO_REMAP_FW; 2079 2080 ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0, 2081 ix_map); 2082 2083 if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) { 2084 ix_map[0] = REO_REMAP_SW4; 2085 ix_map[1] = REO_REMAP_SW4; 2086 ix_map[2] = REO_REMAP_SW4; 2087 ix_map[3] = REO_REMAP_SW4; 2088 ix_map[4] = REO_REMAP_SW4; 2089 ix_map[5] = REO_REMAP_SW4; 2090 ix_map[6] = REO_REMAP_SW4; 2091 ix_map[7] = REO_REMAP_SW4; 2092 2093 ix2 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX2, 2094 ix_map); 2095 2096 hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, 2097 &ix2, &ix2); 2098 dp_ipa_reo_remap_history_add(ix0, ix2, ix2); 2099 } else { 2100 hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, 2101 NULL, NULL); 2102 dp_ipa_reo_remap_history_add(ix0, 0, 0); 2103 } 2104 2105 return QDF_STATUS_SUCCESS; 2106 } 2107 2108 QDF_STATUS dp_ipa_disable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 2109 { 2110 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2111 struct dp_pdev *pdev = 2112 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2113 uint8_t ix0_map[8]; 2114 uint32_t ix0; 2115 uint32_t ix1; 2116 uint32_t ix2; 2117 uint32_t ix3; 2118 2119 if (!pdev) { 2120 dp_err("Invalid instance"); 2121 return QDF_STATUS_E_FAILURE; 2122 } 2123 2124 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 2125 return QDF_STATUS_SUCCESS; 2126 2127 if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle))) 2128 return QDF_STATUS_E_AGAIN; 2129 2130 if (!dp_ipa_is_target_ready(soc)) 2131 return QDF_STATUS_E_AGAIN; 2132 2133 ix0_map[0] = REO_REMAP_SW1; 2134 ix0_map[1] = REO_REMAP_SW1; 2135 ix0_map[2] = REO_REMAP_SW2; 2136 ix0_map[3] = REO_REMAP_SW3; 2137 ix0_map[4] = REO_REMAP_SW2; 2138 ix0_map[5] = REO_REMAP_RELEASE; 2139 ix0_map[6] = REO_REMAP_FW; 2140 ix0_map[7] = REO_REMAP_FW; 2141 2142 /* Call HAL API to remap REO rings to REO2IPA ring */ 2143 ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0, 2144 ix0_map); 2145 2146 if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) { 2147 dp_reo_remap_config(soc, &ix1, &ix2, &ix3); 2148 2149 hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, 2150 &ix2, &ix3); 2151 dp_ipa_reo_remap_history_add(ix0, ix2, ix3); 2152 } else { 2153 hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, 2154 NULL, NULL); 2155 dp_ipa_reo_remap_history_add(ix0, 0, 0); 2156 } 2157 2158 return QDF_STATUS_SUCCESS; 2159 } 2160 2161 /* This should be configurable per H/W configuration enable status */ 2162 #define L3_HEADER_PADDING 2 2163 2164 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \ 2165 defined(CONFIG_IPA_WDI_UNIFIED_API) 2166 2167 #if !defined(QCA_LL_TX_FLOW_CONTROL_V2) && !defined(QCA_IPA_LL_TX_FLOW_CONTROL) 2168 static inline void dp_setup_mcc_sys_pipes( 2169 qdf_ipa_sys_connect_params_t *sys_in, 2170 qdf_ipa_wdi_conn_in_params_t *pipe_in) 2171 { 2172 int i = 0; 2173 /* Setup MCC sys pipe */ 2174 QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) = 2175 DP_IPA_MAX_IFACE; 2176 for (i = 0; i < DP_IPA_MAX_IFACE; i++) 2177 memcpy(&QDF_IPA_WDI_CONN_IN_PARAMS_SYS_IN(pipe_in)[i], 2178 &sys_in[i], sizeof(qdf_ipa_sys_connect_params_t)); 2179 } 2180 #else 2181 static inline void dp_setup_mcc_sys_pipes( 2182 qdf_ipa_sys_connect_params_t *sys_in, 2183 qdf_ipa_wdi_conn_in_params_t *pipe_in) 2184 { 2185 QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) = 0; 2186 } 2187 #endif 2188 2189 static void dp_ipa_wdi_tx_params(struct dp_soc *soc, 2190 struct dp_ipa_resources *ipa_res, 2191 qdf_ipa_wdi_pipe_setup_info_t *tx, 2192 bool over_gsi) 2193 { 2194 if (over_gsi) 2195 QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS; 2196 else 2197 QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS; 2198 2199 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) = 2200 qdf_mem_get_dma_addr(soc->osdev, 2201 &ipa_res->tx_comp_ring.mem_info); 2202 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) = 2203 qdf_mem_get_dma_size(soc->osdev, 2204 &ipa_res->tx_comp_ring.mem_info); 2205 2206 /* WBM Tail Pointer Address */ 2207 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) = 2208 soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr; 2209 QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true; 2210 2211 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) = 2212 qdf_mem_get_dma_addr(soc->osdev, 2213 &ipa_res->tx_ring.mem_info); 2214 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = 2215 qdf_mem_get_dma_size(soc->osdev, 2216 &ipa_res->tx_ring.mem_info); 2217 2218 /* TCL Head Pointer Address */ 2219 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) = 2220 soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr; 2221 QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true; 2222 2223 QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) = 2224 ipa_res->tx_num_alloc_buffer; 2225 2226 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0; 2227 2228 dp_ipa_setup_tx_params_bank_id(soc, tx); 2229 } 2230 2231 static void dp_ipa_wdi_rx_params(struct dp_soc *soc, 2232 struct dp_ipa_resources *ipa_res, 2233 qdf_ipa_wdi_pipe_setup_info_t *rx, 2234 bool over_gsi) 2235 { 2236 if (over_gsi) 2237 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = 2238 IPA_CLIENT_WLAN2_PROD; 2239 else 2240 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = 2241 IPA_CLIENT_WLAN1_PROD; 2242 2243 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) = 2244 qdf_mem_get_dma_addr(soc->osdev, 2245 &ipa_res->rx_rdy_ring.mem_info); 2246 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) = 2247 qdf_mem_get_dma_size(soc->osdev, 2248 &ipa_res->rx_rdy_ring.mem_info); 2249 2250 /* REO Tail Pointer Address */ 2251 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) = 2252 soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr; 2253 QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true; 2254 2255 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) = 2256 qdf_mem_get_dma_addr(soc->osdev, 2257 &ipa_res->rx_refill_ring.mem_info); 2258 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) = 2259 qdf_mem_get_dma_size(soc->osdev, 2260 &ipa_res->rx_refill_ring.mem_info); 2261 2262 /* FW Head Pointer Address */ 2263 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) = 2264 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr; 2265 QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false; 2266 2267 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = 2268 soc->rx_pkt_tlv_size + L3_HEADER_PADDING; 2269 } 2270 2271 static void 2272 dp_ipa_wdi_tx_smmu_params(struct dp_soc *soc, 2273 struct dp_ipa_resources *ipa_res, 2274 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu, 2275 bool over_gsi, 2276 qdf_ipa_wdi_hdl_t hdl) 2277 { 2278 if (over_gsi) { 2279 if (hdl == DP_IPA_HDL_FIRST) 2280 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = 2281 IPA_CLIENT_WLAN2_CONS; 2282 else if (hdl == DP_IPA_HDL_SECOND) 2283 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = 2284 IPA_CLIENT_WLAN4_CONS; 2285 } else { 2286 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = 2287 IPA_CLIENT_WLAN1_CONS; 2288 } 2289 2290 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu), 2291 &ipa_res->tx_comp_ring.sgtable, 2292 sizeof(sgtable_t)); 2293 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) = 2294 qdf_mem_get_dma_size(soc->osdev, 2295 &ipa_res->tx_comp_ring.mem_info); 2296 /* WBM Tail Pointer Address */ 2297 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) = 2298 soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr; 2299 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true; 2300 2301 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu), 2302 &ipa_res->tx_ring.sgtable, 2303 sizeof(sgtable_t)); 2304 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) = 2305 qdf_mem_get_dma_size(soc->osdev, 2306 &ipa_res->tx_ring.mem_info); 2307 /* TCL Head Pointer Address */ 2308 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) = 2309 soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr; 2310 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true; 2311 2312 QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) = 2313 ipa_res->tx_num_alloc_buffer; 2314 QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0; 2315 2316 dp_ipa_setup_tx_smmu_params_bank_id(soc, tx_smmu); 2317 } 2318 2319 static void 2320 dp_ipa_wdi_rx_smmu_params(struct dp_soc *soc, 2321 struct dp_ipa_resources *ipa_res, 2322 qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu, 2323 bool over_gsi, 2324 qdf_ipa_wdi_hdl_t hdl) 2325 { 2326 if (over_gsi) { 2327 if (hdl == DP_IPA_HDL_FIRST) 2328 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = 2329 IPA_CLIENT_WLAN2_PROD; 2330 else if (hdl == DP_IPA_HDL_SECOND) 2331 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = 2332 IPA_CLIENT_WLAN3_PROD; 2333 } else { 2334 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = 2335 IPA_CLIENT_WLAN1_PROD; 2336 } 2337 2338 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu), 2339 &ipa_res->rx_rdy_ring.sgtable, 2340 sizeof(sgtable_t)); 2341 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) = 2342 qdf_mem_get_dma_size(soc->osdev, 2343 &ipa_res->rx_rdy_ring.mem_info); 2344 /* REO Tail Pointer Address */ 2345 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) = 2346 soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr; 2347 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(rx_smmu) = true; 2348 2349 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu), 2350 &ipa_res->rx_refill_ring.sgtable, 2351 sizeof(sgtable_t)); 2352 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) = 2353 qdf_mem_get_dma_size(soc->osdev, 2354 &ipa_res->rx_refill_ring.mem_info); 2355 2356 /* FW Head Pointer Address */ 2357 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) = 2358 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr; 2359 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false; 2360 2361 QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) = 2362 soc->rx_pkt_tlv_size + L3_HEADER_PADDING; 2363 } 2364 2365 #ifdef IPA_WDI3_VLAN_SUPPORT 2366 /* 2367 * dp_ipa_wdi_rx_alt_pipe_smmu_params() - Setup 2nd rx pipe smmu params 2368 * @soc: data path soc handle 2369 * @ipa_res: ipa resource pointer 2370 * @rx_smmu: smmu pipe info handle 2371 * @over_gsi: flag for IPA offload over gsi 2372 * @hdl: ipa registered handle 2373 * 2374 * Return: none 2375 */ 2376 static void 2377 dp_ipa_wdi_rx_alt_pipe_smmu_params(struct dp_soc *soc, 2378 struct dp_ipa_resources *ipa_res, 2379 qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu, 2380 bool over_gsi, 2381 qdf_ipa_wdi_hdl_t hdl) 2382 { 2383 if (!wlan_ipa_is_vlan_enabled()) 2384 return; 2385 2386 if (over_gsi) { 2387 if (hdl == DP_IPA_HDL_FIRST) 2388 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = 2389 IPA_CLIENT_WLAN2_PROD1; 2390 else if (hdl == DP_IPA_HDL_SECOND) 2391 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = 2392 IPA_CLIENT_WLAN3_PROD1; 2393 } else { 2394 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = 2395 IPA_CLIENT_WLAN1_PROD; 2396 } 2397 2398 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu), 2399 &ipa_res->rx_alt_rdy_ring.sgtable, 2400 sizeof(sgtable_t)); 2401 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) = 2402 qdf_mem_get_dma_size(soc->osdev, 2403 &ipa_res->rx_alt_rdy_ring.mem_info); 2404 /* REO Tail Pointer Address */ 2405 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) = 2406 soc->ipa_uc_rx_rsc_alt.ipa_reo_tp_paddr; 2407 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(rx_smmu) = true; 2408 2409 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu), 2410 &ipa_res->rx_alt_refill_ring.sgtable, 2411 sizeof(sgtable_t)); 2412 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) = 2413 qdf_mem_get_dma_size(soc->osdev, 2414 &ipa_res->rx_alt_refill_ring.mem_info); 2415 2416 /* FW Head Pointer Address */ 2417 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) = 2418 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_hp_paddr; 2419 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false; 2420 2421 QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) = 2422 soc->rx_pkt_tlv_size + L3_HEADER_PADDING; 2423 } 2424 2425 /* 2426 * dp_ipa_wdi_rx_alt_pipe_smmu_params() - Setup 2nd rx pipe params 2427 * @soc: data path soc handle 2428 * @ipa_res: ipa resource pointer 2429 * @rx: pipe info handle 2430 * @over_gsi: flag for IPA offload over gsi 2431 * @hdl: ipa registered handle 2432 * 2433 * Return: none 2434 */ 2435 static void dp_ipa_wdi_rx_alt_pipe_params(struct dp_soc *soc, 2436 struct dp_ipa_resources *ipa_res, 2437 qdf_ipa_wdi_pipe_setup_info_t *rx, 2438 bool over_gsi, 2439 qdf_ipa_wdi_hdl_t hdl) 2440 { 2441 if (!wlan_ipa_is_vlan_enabled()) 2442 return; 2443 2444 if (over_gsi) { 2445 if (hdl == DP_IPA_HDL_FIRST) 2446 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = 2447 IPA_CLIENT_WLAN2_PROD1; 2448 else if (hdl == DP_IPA_HDL_SECOND) 2449 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = 2450 IPA_CLIENT_WLAN3_PROD1; 2451 } else { 2452 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = 2453 IPA_CLIENT_WLAN1_PROD; 2454 } 2455 2456 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) = 2457 qdf_mem_get_dma_addr(soc->osdev, 2458 &ipa_res->rx_alt_rdy_ring.mem_info); 2459 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) = 2460 qdf_mem_get_dma_size(soc->osdev, 2461 &ipa_res->rx_alt_rdy_ring.mem_info); 2462 2463 /* REO Tail Pointer Address */ 2464 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) = 2465 soc->ipa_uc_rx_rsc_alt.ipa_reo_tp_paddr; 2466 QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true; 2467 2468 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) = 2469 qdf_mem_get_dma_addr(soc->osdev, 2470 &ipa_res->rx_alt_refill_ring.mem_info); 2471 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) = 2472 qdf_mem_get_dma_size(soc->osdev, 2473 &ipa_res->rx_alt_refill_ring.mem_info); 2474 2475 /* FW Head Pointer Address */ 2476 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) = 2477 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_hp_paddr; 2478 QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false; 2479 2480 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = 2481 soc->rx_pkt_tlv_size + L3_HEADER_PADDING; 2482 } 2483 2484 /* 2485 * dp_ipa_setup_rx_alt_pipe() - Setup 2nd rx pipe for IPA offload 2486 * @soc: data path soc handle 2487 * @res: ipa resource pointer 2488 * @in: pipe in handle 2489 * @over_gsi: flag for IPA offload over gsi 2490 * @hdl: ipa registered handle 2491 * 2492 * Return: none 2493 */ 2494 static void dp_ipa_setup_rx_alt_pipe(struct dp_soc *soc, 2495 struct dp_ipa_resources *res, 2496 qdf_ipa_wdi_conn_in_params_t *in, 2497 bool over_gsi, 2498 qdf_ipa_wdi_hdl_t hdl) 2499 { 2500 qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu = NULL; 2501 qdf_ipa_wdi_pipe_setup_info_t *rx = NULL; 2502 qdf_ipa_ep_cfg_t *rx_cfg; 2503 2504 if (!wlan_ipa_is_vlan_enabled()) 2505 return; 2506 2507 QDF_IPA_WDI_CONN_IN_PARAMS_IS_RX1_USED(in) = true; 2508 if (qdf_mem_smmu_s1_enabled(soc->osdev)) { 2509 rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_ALT_SMMU(in); 2510 rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu); 2511 dp_ipa_wdi_rx_alt_pipe_smmu_params(soc, res, rx_smmu, 2512 over_gsi, hdl); 2513 } else { 2514 rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_ALT(in); 2515 rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx); 2516 dp_ipa_wdi_rx_alt_pipe_params(soc, res, rx, over_gsi, hdl); 2517 } 2518 2519 QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT; 2520 /* Update with wds len(96) + 4 if wds support is enabled */ 2521 if (ucfg_ipa_is_wds_enabled()) 2522 QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN_AST_VLAN; 2523 else 2524 QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_TX_VLAN_HDR_LEN; 2525 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1; 2526 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0; 2527 QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0; 2528 QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0; 2529 QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1; 2530 QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC; 2531 QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true; 2532 } 2533 2534 /* 2535 * dp_ipa_set_rx_alt_pipe_db() - Setup 2nd rx pipe doorbell 2536 * @res: ipa resource pointer 2537 * @out: pipe out handle 2538 * 2539 * Return: none 2540 */ 2541 static void dp_ipa_set_rx_alt_pipe_db(struct dp_ipa_resources *res, 2542 qdf_ipa_wdi_conn_out_params_t *out) 2543 { 2544 if (!wlan_ipa_is_vlan_enabled()) 2545 return; 2546 2547 res->rx_alt_ready_doorbell_paddr = 2548 QDF_IPA_WDI_CONN_OUT_PARAMS_RX_ALT_UC_DB_PA(out); 2549 dp_debug("Setting DB 0x%x for RX alt pipe", 2550 res->rx_alt_ready_doorbell_paddr); 2551 } 2552 #else 2553 static inline 2554 void dp_ipa_setup_rx_alt_pipe(struct dp_soc *soc, 2555 struct dp_ipa_resources *res, 2556 qdf_ipa_wdi_conn_in_params_t *in, 2557 bool over_gsi, 2558 qdf_ipa_wdi_hdl_t hdl) 2559 { } 2560 2561 static inline 2562 void dp_ipa_set_rx_alt_pipe_db(struct dp_ipa_resources *res, 2563 qdf_ipa_wdi_conn_out_params_t *out) 2564 { } 2565 #endif 2566 2567 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2568 void *ipa_i2w_cb, void *ipa_w2i_cb, 2569 void *ipa_wdi_meter_notifier_cb, 2570 uint32_t ipa_desc_size, void *ipa_priv, 2571 bool is_rm_enabled, uint32_t *tx_pipe_handle, 2572 uint32_t *rx_pipe_handle, bool is_smmu_enabled, 2573 qdf_ipa_sys_connect_params_t *sys_in, bool over_gsi, 2574 qdf_ipa_wdi_hdl_t hdl, qdf_ipa_wdi_hdl_t id, 2575 void *ipa_ast_notify_cb) 2576 { 2577 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2578 struct dp_pdev *pdev = 2579 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2580 struct dp_ipa_resources *ipa_res; 2581 qdf_ipa_ep_cfg_t *tx_cfg; 2582 qdf_ipa_ep_cfg_t *rx_cfg; 2583 qdf_ipa_wdi_pipe_setup_info_t *tx = NULL; 2584 qdf_ipa_wdi_pipe_setup_info_t *rx = NULL; 2585 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu; 2586 qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu = NULL; 2587 qdf_ipa_wdi_conn_in_params_t *pipe_in = NULL; 2588 qdf_ipa_wdi_conn_out_params_t pipe_out; 2589 int ret; 2590 2591 if (!pdev) { 2592 dp_err("Invalid instance"); 2593 return QDF_STATUS_E_FAILURE; 2594 } 2595 2596 ipa_res = &pdev->ipa_resource; 2597 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 2598 return QDF_STATUS_SUCCESS; 2599 2600 pipe_in = qdf_mem_malloc(sizeof(*pipe_in)); 2601 if (!pipe_in) 2602 return QDF_STATUS_E_NOMEM; 2603 2604 qdf_mem_zero(&pipe_out, sizeof(pipe_out)); 2605 2606 if (is_smmu_enabled) 2607 QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) = true; 2608 else 2609 QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) = false; 2610 2611 dp_setup_mcc_sys_pipes(sys_in, pipe_in); 2612 2613 /* TX PIPE */ 2614 if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in)) { 2615 tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(pipe_in); 2616 tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu); 2617 } else { 2618 tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(pipe_in); 2619 tx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(tx); 2620 } 2621 2622 QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT; 2623 QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN; 2624 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0; 2625 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0; 2626 QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0; 2627 QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC; 2628 QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true; 2629 2630 /** 2631 * Transfer Ring: WBM Ring 2632 * Transfer Ring Doorbell PA: WBM Tail Pointer Address 2633 * Event Ring: TCL ring 2634 * Event Ring Doorbell PA: TCL Head Pointer Address 2635 */ 2636 if (is_smmu_enabled) 2637 dp_ipa_wdi_tx_smmu_params(soc, ipa_res, tx_smmu, over_gsi, id); 2638 else 2639 dp_ipa_wdi_tx_params(soc, ipa_res, tx, over_gsi); 2640 2641 dp_ipa_setup_tx_alt_pipe(soc, ipa_res, pipe_in); 2642 2643 /* RX PIPE */ 2644 if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in)) { 2645 rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(pipe_in); 2646 rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu); 2647 } else { 2648 rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(pipe_in); 2649 rx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(rx); 2650 } 2651 2652 QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT; 2653 if (ucfg_ipa_is_wds_enabled()) 2654 QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN_AST; 2655 else 2656 QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN; 2657 2658 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1; 2659 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0; 2660 QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0; 2661 QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0; 2662 QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1; 2663 QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC; 2664 QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true; 2665 2666 /** 2667 * Transfer Ring: REO Ring 2668 * Transfer Ring Doorbell PA: REO Tail Pointer Address 2669 * Event Ring: FW ring 2670 * Event Ring Doorbell PA: FW Head Pointer Address 2671 */ 2672 if (is_smmu_enabled) 2673 dp_ipa_wdi_rx_smmu_params(soc, ipa_res, rx_smmu, over_gsi, id); 2674 else 2675 dp_ipa_wdi_rx_params(soc, ipa_res, rx, over_gsi); 2676 2677 /* setup 2nd rx pipe */ 2678 dp_ipa_setup_rx_alt_pipe(soc, ipa_res, pipe_in, over_gsi, id); 2679 2680 QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(pipe_in) = ipa_w2i_cb; 2681 QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(pipe_in) = ipa_priv; 2682 QDF_IPA_WDI_CONN_IN_PARAMS_HANDLE(pipe_in) = hdl; 2683 dp_ipa_ast_notify_cb(pipe_in, ipa_ast_notify_cb); 2684 2685 /* Connect WDI IPA PIPEs */ 2686 ret = qdf_ipa_wdi_conn_pipes(pipe_in, &pipe_out); 2687 2688 if (ret) { 2689 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2690 "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d", 2691 __func__, ret); 2692 qdf_mem_free(pipe_in); 2693 return QDF_STATUS_E_FAILURE; 2694 } 2695 2696 /* IPA uC Doorbell registers */ 2697 dp_info("Tx DB PA=0x%x, Rx DB PA=0x%x", 2698 (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out), 2699 (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out)); 2700 2701 dp_ipa_set_pipe_db(ipa_res, &pipe_out); 2702 dp_ipa_set_rx_alt_pipe_db(ipa_res, &pipe_out); 2703 2704 ipa_res->is_db_ddr_mapped = 2705 QDF_IPA_WDI_CONN_OUT_PARAMS_IS_DB_DDR_MAPPED(&pipe_out); 2706 2707 soc->ipa_first_tx_db_access = true; 2708 qdf_mem_free(pipe_in); 2709 2710 qdf_spinlock_create(&soc->ipa_rx_buf_map_lock); 2711 soc->ipa_rx_buf_map_lock_initialized = true; 2712 2713 return QDF_STATUS_SUCCESS; 2714 } 2715 2716 #ifdef IPA_WDI3_VLAN_SUPPORT 2717 /* 2718 * dp_ipa_set_rx1_used() - Set rx1 used flag for 2nd rx offload ring 2719 * @in: pipe in handle 2720 * 2721 * Return: none 2722 */ 2723 static inline 2724 void dp_ipa_set_rx1_used(qdf_ipa_wdi_reg_intf_in_params_t *in) 2725 { 2726 QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_RX1_USED(in) = true; 2727 } 2728 2729 /* 2730 * dp_ipa_set_v4_vlan_hdr() - Set v4 vlan hdr 2731 * @in: pipe in handle 2732 * hdr: pointer to hdr 2733 * 2734 * Return: none 2735 */ 2736 static inline 2737 void dp_ipa_set_v4_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t *in, 2738 qdf_ipa_wdi_hdr_info_t *hdr) 2739 { 2740 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(in)[IPA_IP_v4_VLAN]), 2741 hdr, sizeof(qdf_ipa_wdi_hdr_info_t)); 2742 } 2743 2744 /* 2745 * dp_ipa_set_v6_vlan_hdr() - Set v6 vlan hdr 2746 * @in: pipe in handle 2747 * hdr: pointer to hdr 2748 * 2749 * Return: none 2750 */ 2751 static inline 2752 void dp_ipa_set_v6_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t *in, 2753 qdf_ipa_wdi_hdr_info_t *hdr) 2754 { 2755 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(in)[IPA_IP_v6_VLAN]), 2756 hdr, sizeof(qdf_ipa_wdi_hdr_info_t)); 2757 } 2758 #else 2759 static inline 2760 void dp_ipa_set_rx1_used(qdf_ipa_wdi_reg_intf_in_params_t *in) 2761 { } 2762 2763 static inline 2764 void dp_ipa_set_v4_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t *in, 2765 qdf_ipa_wdi_hdr_info_t *hdr) 2766 { } 2767 2768 static inline 2769 void dp_ipa_set_v6_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t *in, 2770 qdf_ipa_wdi_hdr_info_t *hdr) 2771 { } 2772 #endif 2773 2774 #ifdef IPA_WDS_EASYMESH_FEATURE 2775 /** 2776 * dp_ipa_set_wdi_hdr_type() - Set wdi hdr type for IPA 2777 * @hdr_info: Header info 2778 * 2779 * Return: None 2780 */ 2781 static inline void 2782 dp_ipa_set_wdi_hdr_type(qdf_ipa_wdi_hdr_info_t *hdr_info) 2783 { 2784 if (ucfg_ipa_is_wds_enabled()) 2785 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) = 2786 IPA_HDR_L2_ETHERNET_II_AST; 2787 else 2788 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) = 2789 IPA_HDR_L2_ETHERNET_II; 2790 } 2791 #else 2792 static inline void 2793 dp_ipa_set_wdi_hdr_type(qdf_ipa_wdi_hdr_info_t *hdr_info) 2794 { 2795 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) = IPA_HDR_L2_ETHERNET_II; 2796 } 2797 #endif 2798 2799 #ifdef IPA_WDI3_VLAN_SUPPORT 2800 /** 2801 * dp_ipa_set_wdi_vlan_hdr_type() - Set wdi vlan hdr type for IPA 2802 * @hdr_info: Header info 2803 * 2804 * Return: None 2805 */ 2806 static inline void 2807 dp_ipa_set_wdi_vlan_hdr_type(qdf_ipa_wdi_hdr_info_t *hdr_info) 2808 { 2809 if (ucfg_ipa_is_wds_enabled()) 2810 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) = 2811 IPA_HDR_L2_802_1Q_AST; 2812 else 2813 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) = 2814 IPA_HDR_L2_802_1Q; 2815 } 2816 #else 2817 static inline void 2818 dp_ipa_set_wdi_vlan_hdr_type(qdf_ipa_wdi_hdr_info_t *hdr_info) 2819 { } 2820 #endif 2821 2822 /** 2823 * dp_ipa_setup_iface() - Setup IPA header and register interface 2824 * @ifname: Interface name 2825 * @mac_addr: Interface MAC address 2826 * @prod_client: IPA prod client type 2827 * @cons_client: IPA cons client type 2828 * @session_id: Session ID 2829 * @is_ipv6_enabled: Is IPV6 enabled or not 2830 * @hdl: IPA handle 2831 * 2832 * Return: QDF_STATUS 2833 */ 2834 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr, 2835 qdf_ipa_client_type_t prod_client, 2836 qdf_ipa_client_type_t cons_client, 2837 uint8_t session_id, bool is_ipv6_enabled, 2838 qdf_ipa_wdi_hdl_t hdl) 2839 { 2840 qdf_ipa_wdi_reg_intf_in_params_t in; 2841 qdf_ipa_wdi_hdr_info_t hdr_info; 2842 struct dp_ipa_uc_tx_hdr uc_tx_hdr; 2843 struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6; 2844 struct dp_ipa_uc_tx_vlan_hdr uc_tx_vlan_hdr; 2845 struct dp_ipa_uc_tx_vlan_hdr uc_tx_vlan_hdr_v6; 2846 int ret = -EINVAL; 2847 2848 qdf_mem_zero(&in, sizeof(qdf_ipa_wdi_reg_intf_in_params_t)); 2849 2850 dp_debug("Add Partial hdr: %s, "QDF_MAC_ADDR_FMT, ifname, 2851 QDF_MAC_ADDR_REF(mac_addr)); 2852 qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 2853 qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr); 2854 2855 /* IPV4 header */ 2856 uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP); 2857 2858 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr; 2859 QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN; 2860 dp_ipa_set_wdi_hdr_type(&hdr_info); 2861 2862 QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) = 2863 DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET; 2864 2865 QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname; 2866 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]), 2867 &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 2868 QDF_IPA_WDI_REG_INTF_IN_PARAMS_ALT_DST_PIPE(&in) = cons_client; 2869 QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1; 2870 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = WLAN_IPA_META_DATA_MASK; 2871 QDF_IPA_WDI_REG_INTF_IN_PARAMS_HANDLE(&in) = hdl; 2872 dp_ipa_setup_iface_session_id(&in, session_id); 2873 dp_debug("registering for session_id: %u", session_id); 2874 2875 /* IPV6 header */ 2876 if (is_ipv6_enabled) { 2877 qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr, 2878 DP_IPA_UC_WLAN_TX_HDR_LEN); 2879 uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6); 2880 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6; 2881 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]), 2882 &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 2883 } 2884 2885 if (wlan_ipa_is_vlan_enabled()) { 2886 /* Add vlan specific headers if vlan supporti is enabled */ 2887 qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 2888 dp_ipa_set_rx1_used(&in); 2889 qdf_ether_addr_copy(uc_tx_vlan_hdr.eth.h_source, mac_addr); 2890 /* IPV4 Vlan header */ 2891 uc_tx_vlan_hdr.eth.h_vlan_proto = qdf_htons(ETH_P_8021Q); 2892 uc_tx_vlan_hdr.eth.h_vlan_encapsulated_proto = qdf_htons(ETH_P_IP); 2893 2894 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = 2895 (uint8_t *)&uc_tx_vlan_hdr; 2896 QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = 2897 DP_IPA_UC_WLAN_TX_VLAN_HDR_LEN; 2898 dp_ipa_set_wdi_vlan_hdr_type(&hdr_info); 2899 2900 QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) = 2901 DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET; 2902 2903 dp_ipa_set_v4_vlan_hdr(&in, &hdr_info); 2904 2905 /* IPV6 Vlan header */ 2906 if (is_ipv6_enabled) { 2907 qdf_mem_copy(&uc_tx_vlan_hdr_v6, &uc_tx_vlan_hdr, 2908 DP_IPA_UC_WLAN_TX_VLAN_HDR_LEN); 2909 uc_tx_vlan_hdr_v6.eth.h_vlan_proto = 2910 qdf_htons(ETH_P_8021Q); 2911 uc_tx_vlan_hdr_v6.eth.h_vlan_encapsulated_proto = 2912 qdf_htons(ETH_P_IPV6); 2913 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = 2914 (uint8_t *)&uc_tx_vlan_hdr_v6; 2915 dp_ipa_set_v6_vlan_hdr(&in, &hdr_info); 2916 } 2917 } 2918 2919 ret = qdf_ipa_wdi_reg_intf(&in); 2920 if (ret) { 2921 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2922 "%s: ipa_wdi_reg_intf: register IPA interface falied: ret=%d", 2923 __func__, ret); 2924 return QDF_STATUS_E_FAILURE; 2925 } 2926 2927 return QDF_STATUS_SUCCESS; 2928 } 2929 2930 #else /* !CONFIG_IPA_WDI_UNIFIED_API */ 2931 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2932 void *ipa_i2w_cb, void *ipa_w2i_cb, 2933 void *ipa_wdi_meter_notifier_cb, 2934 uint32_t ipa_desc_size, void *ipa_priv, 2935 bool is_rm_enabled, uint32_t *tx_pipe_handle, 2936 uint32_t *rx_pipe_handle) 2937 { 2938 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2939 struct dp_pdev *pdev = 2940 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2941 struct dp_ipa_resources *ipa_res; 2942 qdf_ipa_wdi_pipe_setup_info_t *tx; 2943 qdf_ipa_wdi_pipe_setup_info_t *rx; 2944 qdf_ipa_wdi_conn_in_params_t pipe_in; 2945 qdf_ipa_wdi_conn_out_params_t pipe_out; 2946 struct tcl_data_cmd *tcl_desc_ptr; 2947 uint8_t *desc_addr; 2948 uint32_t desc_size; 2949 int ret; 2950 2951 if (!pdev) { 2952 dp_err("Invalid instance"); 2953 return QDF_STATUS_E_FAILURE; 2954 } 2955 2956 ipa_res = &pdev->ipa_resource; 2957 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 2958 return QDF_STATUS_SUCCESS; 2959 2960 qdf_mem_zero(&tx, sizeof(qdf_ipa_wdi_pipe_setup_info_t)); 2961 qdf_mem_zero(&rx, sizeof(qdf_ipa_wdi_pipe_setup_info_t)); 2962 qdf_mem_zero(&pipe_in, sizeof(pipe_in)); 2963 qdf_mem_zero(&pipe_out, sizeof(pipe_out)); 2964 2965 /* TX PIPE */ 2966 /** 2967 * Transfer Ring: WBM Ring 2968 * Transfer Ring Doorbell PA: WBM Tail Pointer Address 2969 * Event Ring: TCL ring 2970 * Event Ring Doorbell PA: TCL Head Pointer Address 2971 */ 2972 tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in); 2973 QDF_IPA_WDI_SETUP_INFO_NAT_EN(tx) = IPA_BYPASS_NAT; 2974 QDF_IPA_WDI_SETUP_INFO_HDR_LEN(tx) = DP_IPA_UC_WLAN_TX_HDR_LEN; 2975 QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(tx) = 0; 2976 QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(tx) = 0; 2977 QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(tx) = 0; 2978 QDF_IPA_WDI_SETUP_INFO_MODE(tx) = IPA_BASIC; 2979 QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(tx) = true; 2980 QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS; 2981 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) = 2982 ipa_res->tx_comp_ring_base_paddr; 2983 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) = 2984 ipa_res->tx_comp_ring_size; 2985 /* WBM Tail Pointer Address */ 2986 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) = 2987 soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr; 2988 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) = 2989 ipa_res->tx_ring_base_paddr; 2990 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = ipa_res->tx_ring_size; 2991 /* TCL Head Pointer Address */ 2992 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) = 2993 soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr; 2994 QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) = 2995 ipa_res->tx_num_alloc_buffer; 2996 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0; 2997 2998 /* Preprogram TCL descriptor */ 2999 desc_addr = 3000 (uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx); 3001 desc_size = sizeof(struct tcl_data_cmd); 3002 HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size); 3003 tcl_desc_ptr = (struct tcl_data_cmd *) 3004 (QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1); 3005 tcl_desc_ptr->buf_addr_info.return_buffer_manager = 3006 HAL_RX_BUF_RBM_SW2_BM; 3007 tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */ 3008 tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET; 3009 tcl_desc_ptr->packet_offset = 2; /* padding for alignment */ 3010 3011 /* RX PIPE */ 3012 /** 3013 * Transfer Ring: REO Ring 3014 * Transfer Ring Doorbell PA: REO Tail Pointer Address 3015 * Event Ring: FW ring 3016 * Event Ring Doorbell PA: FW Head Pointer Address 3017 */ 3018 rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in); 3019 QDF_IPA_WDI_SETUP_INFO_NAT_EN(rx) = IPA_BYPASS_NAT; 3020 QDF_IPA_WDI_SETUP_INFO_HDR_LEN(rx) = DP_IPA_UC_WLAN_RX_HDR_LEN; 3021 QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(rx) = 0; 3022 QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(rx) = 0; 3023 QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(rx) = 0; 3024 QDF_IPA_WDI_SETUP_INFO_HDR_OFST_METADATA_VALID(rx) = 0; 3025 QDF_IPA_WDI_SETUP_INFO_HDR_METADATA_REG_VALID(rx) = 1; 3026 QDF_IPA_WDI_SETUP_INFO_MODE(rx) = IPA_BASIC; 3027 QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(rx) = true; 3028 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD; 3029 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) = 3030 ipa_res->rx_rdy_ring_base_paddr; 3031 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) = 3032 ipa_res->rx_rdy_ring_size; 3033 /* REO Tail Pointer Address */ 3034 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) = 3035 soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr; 3036 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) = 3037 ipa_res->rx_refill_ring_base_paddr; 3038 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) = 3039 ipa_res->rx_refill_ring_size; 3040 /* FW Head Pointer Address */ 3041 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) = 3042 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr; 3043 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = soc->rx_pkt_tlv_size + 3044 L3_HEADER_PADDING; 3045 QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb; 3046 QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv; 3047 3048 /* Connect WDI IPA PIPE */ 3049 ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out); 3050 if (ret) { 3051 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3052 "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d", 3053 __func__, ret); 3054 return QDF_STATUS_E_FAILURE; 3055 } 3056 3057 /* IPA uC Doorbell registers */ 3058 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 3059 "%s: Tx DB PA=0x%x, Rx DB PA=0x%x", 3060 __func__, 3061 (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out), 3062 (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out)); 3063 3064 ipa_res->tx_comp_doorbell_paddr = 3065 QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out); 3066 ipa_res->tx_comp_doorbell_vaddr = 3067 QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_VA(&pipe_out); 3068 ipa_res->rx_ready_doorbell_paddr = 3069 QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out); 3070 3071 soc->ipa_first_tx_db_access = true; 3072 3073 qdf_spinlock_create(&soc->ipa_rx_buf_map_lock); 3074 soc->ipa_rx_buf_map_lock_initialized = true; 3075 3076 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 3077 "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK", 3078 __func__, 3079 "transfer_ring_base_pa", 3080 (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx), 3081 "transfer_ring_size", 3082 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx), 3083 "transfer_ring_doorbell_pa", 3084 (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx), 3085 "event_ring_base_pa", 3086 (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx), 3087 "event_ring_size", 3088 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx), 3089 "event_ring_doorbell_pa", 3090 (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx), 3091 "num_pkt_buffers", 3092 QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx), 3093 "tx_comp_doorbell_paddr", 3094 (void *)ipa_res->tx_comp_doorbell_paddr); 3095 3096 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 3097 "%s: Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK", 3098 __func__, 3099 "transfer_ring_base_pa", 3100 (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx), 3101 "transfer_ring_size", 3102 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx), 3103 "transfer_ring_doorbell_pa", 3104 (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx), 3105 "event_ring_base_pa", 3106 (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx), 3107 "event_ring_size", 3108 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx), 3109 "event_ring_doorbell_pa", 3110 (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx), 3111 "num_pkt_buffers", 3112 QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(rx), 3113 "tx_comp_doorbell_paddr", 3114 (void *)ipa_res->rx_ready_doorbell_paddr); 3115 3116 return QDF_STATUS_SUCCESS; 3117 } 3118 3119 /** 3120 * dp_ipa_setup_iface() - Setup IPA header and register interface 3121 * @ifname: Interface name 3122 * @mac_addr: Interface MAC address 3123 * @prod_client: IPA prod client type 3124 * @cons_client: IPA cons client type 3125 * @session_id: Session ID 3126 * @is_ipv6_enabled: Is IPV6 enabled or not 3127 * @hdl: IPA handle 3128 * 3129 * Return: QDF_STATUS 3130 */ 3131 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr, 3132 qdf_ipa_client_type_t prod_client, 3133 qdf_ipa_client_type_t cons_client, 3134 uint8_t session_id, bool is_ipv6_enabled, 3135 qdf_ipa_wdi_hdl_t hdl) 3136 { 3137 qdf_ipa_wdi_reg_intf_in_params_t in; 3138 qdf_ipa_wdi_hdr_info_t hdr_info; 3139 struct dp_ipa_uc_tx_hdr uc_tx_hdr; 3140 struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6; 3141 int ret = -EINVAL; 3142 3143 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 3144 "%s: Add Partial hdr: %s, "QDF_MAC_ADDR_FMT, 3145 __func__, ifname, QDF_MAC_ADDR_REF(mac_addr)); 3146 3147 qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 3148 qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr); 3149 3150 /* IPV4 header */ 3151 uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP); 3152 3153 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr; 3154 QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN; 3155 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II; 3156 QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) = 3157 DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET; 3158 3159 QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname; 3160 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]), 3161 &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 3162 QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1; 3163 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) = 3164 htonl(session_id << 16); 3165 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000); 3166 3167 /* IPV6 header */ 3168 if (is_ipv6_enabled) { 3169 qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr, 3170 DP_IPA_UC_WLAN_TX_HDR_LEN); 3171 uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6); 3172 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6; 3173 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]), 3174 &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 3175 } 3176 3177 ret = qdf_ipa_wdi_reg_intf(&in); 3178 if (ret) { 3179 dp_err("ipa_wdi_reg_intf: register IPA interface falied: ret=%d", 3180 ret); 3181 return QDF_STATUS_E_FAILURE; 3182 } 3183 3184 return QDF_STATUS_SUCCESS; 3185 } 3186 3187 #endif /* CONFIG_IPA_WDI_UNIFIED_API */ 3188 3189 /** 3190 * dp_ipa_cleanup() - Disconnect IPA pipes 3191 * @soc_hdl: dp soc handle 3192 * @pdev_id: dp pdev id 3193 * @tx_pipe_handle: Tx pipe handle 3194 * @rx_pipe_handle: Rx pipe handle 3195 * @hdl: IPA handle 3196 * 3197 * Return: QDF_STATUS 3198 */ 3199 QDF_STATUS dp_ipa_cleanup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3200 uint32_t tx_pipe_handle, uint32_t rx_pipe_handle, 3201 qdf_ipa_wdi_hdl_t hdl) 3202 { 3203 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3204 QDF_STATUS status = QDF_STATUS_SUCCESS; 3205 struct dp_pdev *pdev; 3206 int ret; 3207 3208 ret = qdf_ipa_wdi_disconn_pipes(hdl); 3209 if (ret) { 3210 dp_err("ipa_wdi_disconn_pipes: IPA pipe cleanup failed: ret=%d", 3211 ret); 3212 status = QDF_STATUS_E_FAILURE; 3213 } 3214 3215 if (soc->ipa_rx_buf_map_lock_initialized) { 3216 qdf_spinlock_destroy(&soc->ipa_rx_buf_map_lock); 3217 soc->ipa_rx_buf_map_lock_initialized = false; 3218 } 3219 3220 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 3221 if (qdf_unlikely(!pdev)) { 3222 dp_err_rl("Invalid pdev for pdev_id %d", pdev_id); 3223 status = QDF_STATUS_E_FAILURE; 3224 goto exit; 3225 } 3226 3227 dp_ipa_unmap_ring_doorbell_paddr(pdev); 3228 dp_ipa_unmap_rx_alt_ring_doorbell_paddr(pdev); 3229 exit: 3230 return status; 3231 } 3232 3233 /** 3234 * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface 3235 * @ifname: Interface name 3236 * @is_ipv6_enabled: Is IPV6 enabled or not 3237 * @hdl: IPA handle 3238 * 3239 * Return: QDF_STATUS 3240 */ 3241 QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled, 3242 qdf_ipa_wdi_hdl_t hdl) 3243 { 3244 int ret; 3245 3246 ret = qdf_ipa_wdi_dereg_intf(ifname, hdl); 3247 if (ret) { 3248 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3249 "%s: ipa_wdi_dereg_intf: IPA pipe deregistration failed: ret=%d", 3250 __func__, ret); 3251 return QDF_STATUS_E_FAILURE; 3252 } 3253 3254 return QDF_STATUS_SUCCESS; 3255 } 3256 3257 #ifdef IPA_SET_RESET_TX_DB_PA 3258 #define DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res) \ 3259 dp_ipa_set_tx_doorbell_paddr((soc), (ipa_res)) 3260 #define DP_IPA_RESET_TX_DB_PA(soc, ipa_res) \ 3261 dp_ipa_reset_tx_doorbell_pa((soc), (ipa_res)) 3262 #else 3263 #define DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res) 3264 #define DP_IPA_RESET_TX_DB_PA(soc, ipa_res) 3265 #endif 3266 3267 QDF_STATUS dp_ipa_enable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3268 qdf_ipa_wdi_hdl_t hdl) 3269 { 3270 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3271 struct dp_pdev *pdev = 3272 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 3273 struct dp_ipa_resources *ipa_res; 3274 QDF_STATUS result; 3275 3276 if (!pdev) { 3277 dp_err("Invalid instance"); 3278 return QDF_STATUS_E_FAILURE; 3279 } 3280 3281 ipa_res = &pdev->ipa_resource; 3282 3283 qdf_atomic_set(&soc->ipa_pipes_enabled, 1); 3284 DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res); 3285 dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, true, 3286 __func__, __LINE__); 3287 3288 result = qdf_ipa_wdi_enable_pipes(hdl); 3289 if (result) { 3290 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3291 "%s: Enable WDI PIPE fail, code %d", 3292 __func__, result); 3293 qdf_atomic_set(&soc->ipa_pipes_enabled, 0); 3294 DP_IPA_RESET_TX_DB_PA(soc, ipa_res); 3295 dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false, 3296 __func__, __LINE__); 3297 return QDF_STATUS_E_FAILURE; 3298 } 3299 3300 if (soc->ipa_first_tx_db_access) { 3301 dp_ipa_tx_comp_ring_init_hp(soc, ipa_res); 3302 soc->ipa_first_tx_db_access = false; 3303 } 3304 3305 return QDF_STATUS_SUCCESS; 3306 } 3307 3308 QDF_STATUS dp_ipa_disable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3309 qdf_ipa_wdi_hdl_t hdl) 3310 { 3311 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3312 struct dp_pdev *pdev = 3313 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 3314 QDF_STATUS result; 3315 struct dp_ipa_resources *ipa_res; 3316 3317 if (!pdev) { 3318 dp_err("Invalid instance"); 3319 return QDF_STATUS_E_FAILURE; 3320 } 3321 3322 ipa_res = &pdev->ipa_resource; 3323 3324 qdf_sleep(TX_COMP_DRAIN_WAIT_TIMEOUT_MS); 3325 /* 3326 * Reset the tx completion doorbell address before invoking IPA disable 3327 * pipes API to ensure that there is no access to IPA tx doorbell 3328 * address post disable pipes. 3329 */ 3330 DP_IPA_RESET_TX_DB_PA(soc, ipa_res); 3331 3332 result = qdf_ipa_wdi_disable_pipes(hdl); 3333 if (result) { 3334 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3335 "%s: Disable WDI PIPE fail, code %d", 3336 __func__, result); 3337 qdf_assert_always(0); 3338 return QDF_STATUS_E_FAILURE; 3339 } 3340 3341 qdf_atomic_set(&soc->ipa_pipes_enabled, 0); 3342 dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false, 3343 __func__, __LINE__); 3344 3345 return result ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS; 3346 } 3347 3348 /** 3349 * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates 3350 * @client: Client type 3351 * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps) 3352 * @hdl: IPA handle 3353 * 3354 * Return: QDF_STATUS 3355 */ 3356 QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps, 3357 qdf_ipa_wdi_hdl_t hdl) 3358 { 3359 qdf_ipa_wdi_perf_profile_t profile; 3360 QDF_STATUS result; 3361 3362 profile.client = client; 3363 profile.max_supported_bw_mbps = max_supported_bw_mbps; 3364 3365 result = qdf_ipa_wdi_set_perf_profile(hdl, &profile); 3366 if (result) { 3367 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3368 "%s: ipa_wdi_set_perf_profile fail, code %d", 3369 __func__, result); 3370 return QDF_STATUS_E_FAILURE; 3371 } 3372 3373 return QDF_STATUS_SUCCESS; 3374 } 3375 3376 /** 3377 * dp_ipa_intrabss_send - send IPA RX intra-bss frames 3378 * @pdev: pdev 3379 * @vdev: vdev 3380 * @nbuf: skb 3381 * 3382 * Return: nbuf if TX fails and NULL if TX succeeds 3383 */ 3384 static qdf_nbuf_t dp_ipa_intrabss_send(struct dp_pdev *pdev, 3385 struct dp_vdev *vdev, 3386 qdf_nbuf_t nbuf) 3387 { 3388 struct dp_peer *vdev_peer; 3389 uint16_t len; 3390 3391 vdev_peer = dp_vdev_bss_peer_ref_n_get(pdev->soc, vdev, DP_MOD_ID_IPA); 3392 if (qdf_unlikely(!vdev_peer)) 3393 return nbuf; 3394 3395 if (qdf_unlikely(!vdev_peer->txrx_peer)) { 3396 dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA); 3397 return nbuf; 3398 } 3399 3400 qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb)); 3401 len = qdf_nbuf_len(nbuf); 3402 3403 if (dp_tx_send((struct cdp_soc_t *)pdev->soc, vdev->vdev_id, nbuf)) { 3404 DP_PEER_PER_PKT_STATS_INC_PKT(vdev_peer->txrx_peer, 3405 rx.intra_bss.fail, 1, len); 3406 dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA); 3407 return nbuf; 3408 } 3409 3410 DP_PEER_PER_PKT_STATS_INC_PKT(vdev_peer->txrx_peer, 3411 rx.intra_bss.pkts, 1, len); 3412 dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA); 3413 return NULL; 3414 } 3415 3416 #ifdef IPA_WDS_EASYMESH_FEATURE 3417 /** 3418 * dp_ipa_peer_check() - Check for peer for given mac 3419 * @soc: dp soc object 3420 * @peer_mac_addr: peer mac address 3421 * @vdev_id: vdev id 3422 * 3423 * Return: true if peer is found, else false 3424 */ 3425 static inline bool dp_ipa_peer_check(struct dp_soc *soc, 3426 uint8_t *peer_mac_addr, uint8_t vdev_id) 3427 { 3428 struct dp_ast_entry *ast_entry = NULL; 3429 struct dp_peer *peer = NULL; 3430 3431 qdf_spin_lock_bh(&soc->ast_lock); 3432 ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr); 3433 3434 if ((!ast_entry) || 3435 (ast_entry->delete_in_progress && !ast_entry->callback)) { 3436 qdf_spin_unlock_bh(&soc->ast_lock); 3437 return false; 3438 } 3439 3440 peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id, 3441 DP_MOD_ID_AST); 3442 3443 if (!peer) { 3444 qdf_spin_unlock_bh(&soc->ast_lock); 3445 return false; 3446 } else { 3447 if (peer->vdev->vdev_id == vdev_id) { 3448 dp_peer_unref_delete(peer, DP_MOD_ID_IPA); 3449 qdf_spin_unlock_bh(&soc->ast_lock); 3450 return true; 3451 } 3452 dp_peer_unref_delete(peer, DP_MOD_ID_IPA); 3453 qdf_spin_unlock_bh(&soc->ast_lock); 3454 return false; 3455 } 3456 } 3457 #else 3458 static inline bool dp_ipa_peer_check(struct dp_soc *soc, 3459 uint8_t *peer_mac_addr, uint8_t vdev_id) 3460 { 3461 struct dp_peer *peer = NULL; 3462 3463 peer = dp_peer_find_hash_find(soc, peer_mac_addr, 0, vdev_id, 3464 DP_MOD_ID_IPA); 3465 if (!peer) { 3466 return false; 3467 } else { 3468 dp_peer_unref_delete(peer, DP_MOD_ID_IPA); 3469 return true; 3470 } 3471 } 3472 #endif 3473 3474 bool dp_ipa_rx_intrabss_fwd(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 3475 qdf_nbuf_t nbuf, bool *fwd_success) 3476 { 3477 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3478 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 3479 DP_MOD_ID_IPA); 3480 struct dp_pdev *pdev; 3481 qdf_nbuf_t nbuf_copy; 3482 uint8_t da_is_bcmc; 3483 struct ethhdr *eh; 3484 bool status = false; 3485 3486 *fwd_success = false; /* set default as failure */ 3487 3488 /* 3489 * WDI 3.0 skb->cb[] info from IPA driver 3490 * skb->cb[0] = vdev_id 3491 * skb->cb[1].bit#1 = da_is_bcmc 3492 */ 3493 da_is_bcmc = ((uint8_t)nbuf->cb[1]) & 0x2; 3494 3495 if (qdf_unlikely(!vdev)) 3496 return false; 3497 3498 pdev = vdev->pdev; 3499 if (qdf_unlikely(!pdev)) 3500 goto out; 3501 3502 /* no fwd for station mode and just pass up to stack */ 3503 if (vdev->opmode == wlan_op_mode_sta) 3504 goto out; 3505 3506 if (da_is_bcmc) { 3507 nbuf_copy = qdf_nbuf_copy(nbuf); 3508 if (!nbuf_copy) 3509 goto out; 3510 3511 if (dp_ipa_intrabss_send(pdev, vdev, nbuf_copy)) 3512 qdf_nbuf_free(nbuf_copy); 3513 else 3514 *fwd_success = true; 3515 3516 /* return false to pass original pkt up to stack */ 3517 goto out; 3518 } 3519 3520 eh = (struct ethhdr *)qdf_nbuf_data(nbuf); 3521 3522 if (!qdf_mem_cmp(eh->h_dest, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE)) 3523 goto out; 3524 3525 if (!dp_ipa_peer_check(soc, eh->h_dest, vdev->vdev_id)) 3526 goto out; 3527 3528 if (!dp_ipa_peer_check(soc, eh->h_source, vdev->vdev_id)) 3529 goto out; 3530 3531 /* 3532 * In intra-bss forwarding scenario, skb is allocated by IPA driver. 3533 * Need to add skb to internal tracking table to avoid nbuf memory 3534 * leak check for unallocated skb. 3535 */ 3536 qdf_net_buf_debug_acquire_skb(nbuf, __FILE__, __LINE__); 3537 3538 if (dp_ipa_intrabss_send(pdev, vdev, nbuf)) 3539 qdf_nbuf_free(nbuf); 3540 else 3541 *fwd_success = true; 3542 3543 status = true; 3544 out: 3545 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_IPA); 3546 return status; 3547 } 3548 3549 #ifdef MDM_PLATFORM 3550 bool dp_ipa_is_mdm_platform(void) 3551 { 3552 return true; 3553 } 3554 #else 3555 bool dp_ipa_is_mdm_platform(void) 3556 { 3557 return false; 3558 } 3559 #endif 3560 3561 /** 3562 * dp_ipa_frag_nbuf_linearize - linearize nbuf for IPA 3563 * @soc: soc 3564 * @nbuf: source skb 3565 * 3566 * Return: new nbuf if success and otherwise NULL 3567 */ 3568 static qdf_nbuf_t dp_ipa_frag_nbuf_linearize(struct dp_soc *soc, 3569 qdf_nbuf_t nbuf) 3570 { 3571 uint8_t *src_nbuf_data; 3572 uint8_t *dst_nbuf_data; 3573 qdf_nbuf_t dst_nbuf; 3574 qdf_nbuf_t temp_nbuf = nbuf; 3575 uint32_t nbuf_len = qdf_nbuf_len(nbuf); 3576 bool is_nbuf_head = true; 3577 uint32_t copy_len = 0; 3578 3579 dst_nbuf = qdf_nbuf_alloc(soc->osdev, RX_DATA_BUFFER_SIZE, 3580 RX_BUFFER_RESERVATION, 3581 RX_DATA_BUFFER_ALIGNMENT, FALSE); 3582 3583 if (!dst_nbuf) { 3584 dp_err_rl("nbuf allocate fail"); 3585 return NULL; 3586 } 3587 3588 if ((nbuf_len + L3_HEADER_PADDING) > RX_DATA_BUFFER_SIZE) { 3589 qdf_nbuf_free(dst_nbuf); 3590 dp_err_rl("nbuf is jumbo data"); 3591 return NULL; 3592 } 3593 3594 /* prepeare to copy all data into new skb */ 3595 dst_nbuf_data = qdf_nbuf_data(dst_nbuf); 3596 while (temp_nbuf) { 3597 src_nbuf_data = qdf_nbuf_data(temp_nbuf); 3598 /* first head nbuf */ 3599 if (is_nbuf_head) { 3600 qdf_mem_copy(dst_nbuf_data, src_nbuf_data, 3601 soc->rx_pkt_tlv_size); 3602 /* leave extra 2 bytes L3_HEADER_PADDING */ 3603 dst_nbuf_data += (soc->rx_pkt_tlv_size + 3604 L3_HEADER_PADDING); 3605 src_nbuf_data += soc->rx_pkt_tlv_size; 3606 copy_len = qdf_nbuf_headlen(temp_nbuf) - 3607 soc->rx_pkt_tlv_size; 3608 temp_nbuf = qdf_nbuf_get_ext_list(temp_nbuf); 3609 is_nbuf_head = false; 3610 } else { 3611 copy_len = qdf_nbuf_len(temp_nbuf); 3612 temp_nbuf = qdf_nbuf_queue_next(temp_nbuf); 3613 } 3614 qdf_mem_copy(dst_nbuf_data, src_nbuf_data, copy_len); 3615 dst_nbuf_data += copy_len; 3616 } 3617 3618 qdf_nbuf_set_len(dst_nbuf, nbuf_len); 3619 /* copy is done, free original nbuf */ 3620 qdf_nbuf_free(nbuf); 3621 3622 return dst_nbuf; 3623 } 3624 3625 /** 3626 * dp_ipa_handle_rx_reo_reinject - Handle RX REO reinject skb buffer 3627 * @soc: soc 3628 * @nbuf: skb 3629 * 3630 * Return: nbuf if success and otherwise NULL 3631 */ 3632 qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf) 3633 { 3634 3635 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 3636 return nbuf; 3637 3638 /* WLAN IPA is run-time disabled */ 3639 if (!qdf_atomic_read(&soc->ipa_pipes_enabled)) 3640 return nbuf; 3641 3642 if (!qdf_nbuf_is_frag(nbuf)) 3643 return nbuf; 3644 3645 /* linearize skb for IPA */ 3646 return dp_ipa_frag_nbuf_linearize(soc, nbuf); 3647 } 3648 3649 QDF_STATUS dp_ipa_tx_buf_smmu_mapping( 3650 struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3651 const char *func, uint32_t line) 3652 { 3653 QDF_STATUS ret; 3654 3655 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3656 struct dp_pdev *pdev = 3657 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 3658 3659 if (!pdev) { 3660 dp_err("%s invalid instance", __func__); 3661 return QDF_STATUS_E_FAILURE; 3662 } 3663 3664 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) { 3665 dp_debug("SMMU S1 disabled"); 3666 return QDF_STATUS_SUCCESS; 3667 } 3668 ret = __dp_ipa_tx_buf_smmu_mapping(soc, pdev, true, func, line); 3669 if (ret) 3670 return ret; 3671 3672 ret = dp_ipa_tx_alt_buf_smmu_mapping(soc, pdev, true, func, line); 3673 if (ret) 3674 __dp_ipa_tx_buf_smmu_mapping(soc, pdev, false, func, line); 3675 return ret; 3676 } 3677 3678 QDF_STATUS dp_ipa_tx_buf_smmu_unmapping( 3679 struct cdp_soc_t *soc_hdl, uint8_t pdev_id, const char *func, 3680 uint32_t line) 3681 { 3682 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3683 struct dp_pdev *pdev = 3684 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 3685 3686 if (!pdev) { 3687 dp_err("%s invalid instance", __func__); 3688 return QDF_STATUS_E_FAILURE; 3689 } 3690 3691 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) { 3692 dp_debug("SMMU S1 disabled"); 3693 return QDF_STATUS_SUCCESS; 3694 } 3695 3696 if (__dp_ipa_tx_buf_smmu_mapping(soc, pdev, false, func, line) || 3697 dp_ipa_tx_alt_buf_smmu_mapping(soc, pdev, false, func, line)) 3698 return QDF_STATUS_E_FAILURE; 3699 3700 return QDF_STATUS_SUCCESS; 3701 } 3702 3703 #ifdef IPA_WDS_EASYMESH_FEATURE 3704 QDF_STATUS dp_ipa_ast_create(struct cdp_soc_t *soc_hdl, 3705 qdf_ipa_ast_info_type_t *data) 3706 { 3707 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3708 uint8_t *rx_tlv_hdr; 3709 struct dp_peer *peer; 3710 struct hal_rx_msdu_metadata msdu_metadata; 3711 qdf_ipa_ast_info_type_t *ast_info; 3712 3713 if (!data) { 3714 dp_err("Data is NULL !!!"); 3715 return QDF_STATUS_E_FAILURE; 3716 } 3717 ast_info = data; 3718 3719 rx_tlv_hdr = qdf_nbuf_data(ast_info->skb); 3720 peer = dp_peer_get_ref_by_id(soc, ast_info->ta_peer_id, 3721 DP_MOD_ID_IPA); 3722 if (!peer) { 3723 dp_err("Peer is NULL !!!!"); 3724 return QDF_STATUS_E_FAILURE; 3725 } 3726 3727 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata); 3728 3729 dp_rx_ipa_wds_srcport_learn(soc, peer, ast_info->skb, msdu_metadata, 3730 ast_info->mac_addr_ad4_valid, 3731 ast_info->first_msdu_in_mpdu_flag); 3732 3733 dp_peer_unref_delete(peer, DP_MOD_ID_IPA); 3734 3735 return QDF_STATUS_SUCCESS; 3736 } 3737 #endif 3738 #endif 3739