1 /* 2 * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #ifdef IPA_OFFLOAD 19 20 #include <wlan_ipa_ucfg_api.h> 21 #include <wlan_ipa_core.h> 22 #include <qdf_ipa_wdi3.h> 23 #include <qdf_types.h> 24 #include <qdf_lock.h> 25 #include <hal_hw_headers.h> 26 #include <hal_api.h> 27 #include <hal_reo.h> 28 #include <hif.h> 29 #include <htt.h> 30 #include <wdi_event.h> 31 #include <queue.h> 32 #include "dp_types.h" 33 #include "dp_htt.h" 34 #include "dp_tx.h" 35 #include "dp_rx.h" 36 #include "dp_ipa.h" 37 #include "dp_internal.h" 38 #ifdef WIFI_MONITOR_SUPPORT 39 #include "dp_mon.h" 40 #endif 41 #ifdef FEATURE_WDS 42 #include "dp_txrx_wds.h" 43 #endif 44 #ifdef QCA_IPA_LL_TX_FLOW_CONTROL 45 #include <pld_common.h> 46 #endif 47 48 /* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */ 49 #define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT (2048) 50 51 /* WAR for IPA_OFFLOAD case. In some cases, its observed that WBM tries to 52 * release a buffer into WBM2SW RELEASE ring for IPA, and the ring is full. 53 * This causes back pressure, resulting in a FW crash. 54 * By leaving some entries with no buffer attached, WBM will be able to write 55 * to the ring, and from dumps we can figure out the buffer which is causing 56 * this issue. 57 */ 58 #define DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES 16 59 60 /** 61 * struct dp_ipa_reo_remap_record - history for dp ipa reo remaps 62 * @timestamp: Timestamp when remap occurs 63 * @ix0_reg: reo destination ring IX0 value 64 * @ix2_reg: reo destination ring IX2 value 65 * @ix3_reg: reo destination ring IX3 value 66 */ 67 struct dp_ipa_reo_remap_record { 68 uint64_t timestamp; 69 uint32_t ix0_reg; 70 uint32_t ix2_reg; 71 uint32_t ix3_reg; 72 }; 73 74 #define WLAN_IPA_AST_META_DATA_MASK htonl(0x000000FF) 75 #define WLAN_IPA_META_DATA_MASK htonl(0x00FF0000) 76 77 #define REO_REMAP_HISTORY_SIZE 32 78 79 struct dp_ipa_reo_remap_record dp_ipa_reo_remap_history[REO_REMAP_HISTORY_SIZE]; 80 81 static qdf_atomic_t dp_ipa_reo_remap_history_index; 82 static int dp_ipa_reo_remap_record_index_next(qdf_atomic_t *index) 83 { 84 int next = qdf_atomic_inc_return(index); 85 86 if (next == REO_REMAP_HISTORY_SIZE) 87 qdf_atomic_sub(REO_REMAP_HISTORY_SIZE, index); 88 89 return next % REO_REMAP_HISTORY_SIZE; 90 } 91 92 /** 93 * dp_ipa_reo_remap_history_add() - Record dp ipa reo remap values 94 * @ix0_val: reo destination ring IX0 value 95 * @ix2_val: reo destination ring IX2 value 96 * @ix3_val: reo destination ring IX3 value 97 * 98 * Return: None 99 */ 100 static void dp_ipa_reo_remap_history_add(uint32_t ix0_val, uint32_t ix2_val, 101 uint32_t ix3_val) 102 { 103 int idx = dp_ipa_reo_remap_record_index_next( 104 &dp_ipa_reo_remap_history_index); 105 struct dp_ipa_reo_remap_record *record = &dp_ipa_reo_remap_history[idx]; 106 107 record->timestamp = qdf_get_log_timestamp(); 108 record->ix0_reg = ix0_val; 109 record->ix2_reg = ix2_val; 110 record->ix3_reg = ix3_val; 111 } 112 113 static QDF_STATUS __dp_ipa_handle_buf_smmu_mapping(struct dp_soc *soc, 114 qdf_nbuf_t nbuf, 115 uint32_t size, 116 bool create, 117 const char *func, 118 uint32_t line) 119 { 120 qdf_mem_info_t mem_map_table = {0}; 121 QDF_STATUS ret = QDF_STATUS_SUCCESS; 122 qdf_ipa_wdi_hdl_t hdl; 123 124 /* Need to handle the case when one soc will 125 * have multiple pdev(radio's), Currently passing 126 * pdev_id as 0 assuming 1 soc has only 1 radio. 127 */ 128 hdl = wlan_ipa_get_hdl(soc->ctrl_psoc, 0); 129 if (hdl == DP_IPA_HDL_INVALID) { 130 dp_err("IPA handle is invalid"); 131 return QDF_STATUS_E_INVAL; 132 } 133 qdf_update_mem_map_table(soc->osdev, &mem_map_table, 134 qdf_nbuf_get_frag_paddr(nbuf, 0), 135 size); 136 137 if (create) { 138 /* Assert if PA is zero */ 139 qdf_assert_always(mem_map_table.pa); 140 141 ret = qdf_nbuf_smmu_map_debug(nbuf, hdl, 1, &mem_map_table, 142 func, line); 143 } else { 144 ret = qdf_nbuf_smmu_unmap_debug(nbuf, hdl, 1, &mem_map_table, 145 func, line); 146 } 147 qdf_assert_always(!ret); 148 149 /* Return status of mapping/unmapping is stored in 150 * mem_map_table.result field, assert if the result 151 * is failure 152 */ 153 if (create) 154 qdf_assert_always(!mem_map_table.result); 155 else 156 qdf_assert_always(mem_map_table.result >= mem_map_table.size); 157 158 return ret; 159 } 160 161 QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc, 162 qdf_nbuf_t nbuf, 163 uint32_t size, 164 bool create, const char *func, 165 uint32_t line) 166 { 167 struct dp_pdev *pdev; 168 int i; 169 170 for (i = 0; i < soc->pdev_count; i++) { 171 pdev = soc->pdev_list[i]; 172 if (pdev && dp_monitor_is_configured(pdev)) 173 return QDF_STATUS_SUCCESS; 174 } 175 176 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) || 177 !qdf_mem_smmu_s1_enabled(soc->osdev)) 178 return QDF_STATUS_SUCCESS; 179 180 /* 181 * Even if ipa pipes is disabled, but if it's unmap 182 * operation and nbuf has done ipa smmu map before, 183 * do ipa smmu unmap as well. 184 */ 185 if (!qdf_atomic_read(&soc->ipa_pipes_enabled)) { 186 if (!create && qdf_nbuf_is_rx_ipa_smmu_map(nbuf)) { 187 DP_STATS_INC(soc, rx.err.ipa_unmap_no_pipe, 1); 188 } else { 189 return QDF_STATUS_SUCCESS; 190 } 191 } 192 193 if (qdf_unlikely(create == qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) { 194 if (create) { 195 DP_STATS_INC(soc, rx.err.ipa_smmu_map_dup, 1); 196 } else { 197 DP_STATS_INC(soc, rx.err.ipa_smmu_unmap_dup, 1); 198 } 199 return QDF_STATUS_E_INVAL; 200 } 201 202 qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create); 203 204 return __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, size, create, 205 func, line); 206 } 207 208 static QDF_STATUS __dp_ipa_tx_buf_smmu_mapping( 209 struct dp_soc *soc, 210 struct dp_pdev *pdev, 211 bool create, 212 const char *func, 213 uint32_t line) 214 { 215 uint32_t index; 216 QDF_STATUS ret = QDF_STATUS_SUCCESS; 217 uint32_t tx_buffer_cnt = soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; 218 qdf_nbuf_t nbuf; 219 uint32_t buf_len; 220 221 if (!ipa_is_ready()) { 222 dp_info("IPA is not READY"); 223 return 0; 224 } 225 226 for (index = 0; index < tx_buffer_cnt; index++) { 227 nbuf = (qdf_nbuf_t) 228 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[index]; 229 if (!nbuf) 230 continue; 231 buf_len = qdf_nbuf_get_data_len(nbuf); 232 ret = __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, buf_len, 233 create, func, line); 234 } 235 236 return ret; 237 } 238 239 #ifndef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS 240 static void dp_ipa_set_reo_ctx_mapping_lock_required(struct dp_soc *soc, 241 bool lock_required) 242 { 243 hal_ring_handle_t hal_ring_hdl; 244 int ring; 245 246 for (ring = 0; ring < soc->num_reo_dest_rings; ring++) { 247 hal_ring_hdl = soc->reo_dest_ring[ring].hal_srng; 248 hal_srng_lock(hal_ring_hdl); 249 soc->ipa_reo_ctx_lock_required[ring] = lock_required; 250 hal_srng_unlock(hal_ring_hdl); 251 } 252 } 253 #else 254 static void dp_ipa_set_reo_ctx_mapping_lock_required(struct dp_soc *soc, 255 bool lock_required) 256 { 257 } 258 259 #endif 260 261 #ifdef RX_DESC_MULTI_PAGE_ALLOC 262 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc, 263 struct dp_pdev *pdev, 264 bool create, 265 const char *func, 266 uint32_t line) 267 { 268 struct rx_desc_pool *rx_pool; 269 uint8_t pdev_id; 270 uint32_t num_desc, page_id, offset, i; 271 uint16_t num_desc_per_page; 272 union dp_rx_desc_list_elem_t *rx_desc_elem; 273 struct dp_rx_desc *rx_desc; 274 qdf_nbuf_t nbuf; 275 QDF_STATUS ret = QDF_STATUS_SUCCESS; 276 277 if (!qdf_ipa_is_ready()) 278 return ret; 279 280 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) 281 return ret; 282 283 pdev_id = pdev->pdev_id; 284 rx_pool = &soc->rx_desc_buf[pdev_id]; 285 286 dp_ipa_set_reo_ctx_mapping_lock_required(soc, true); 287 qdf_spin_lock_bh(&rx_pool->lock); 288 dp_ipa_rx_buf_smmu_mapping_lock(soc); 289 num_desc = rx_pool->pool_size; 290 num_desc_per_page = rx_pool->desc_pages.num_element_per_page; 291 for (i = 0; i < num_desc; i++) { 292 page_id = i / num_desc_per_page; 293 offset = i % num_desc_per_page; 294 if (qdf_unlikely(!(rx_pool->desc_pages.cacheable_pages))) 295 break; 296 rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_pool); 297 rx_desc = &rx_desc_elem->rx_desc; 298 if ((!(rx_desc->in_use)) || rx_desc->unmapped) 299 continue; 300 nbuf = rx_desc->nbuf; 301 302 if (qdf_unlikely(create == 303 qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) { 304 if (create) { 305 DP_STATS_INC(soc, 306 rx.err.ipa_smmu_map_dup, 1); 307 } else { 308 DP_STATS_INC(soc, 309 rx.err.ipa_smmu_unmap_dup, 1); 310 } 311 continue; 312 } 313 qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create); 314 315 ret = __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, 316 rx_pool->buf_size, 317 create, func, line); 318 } 319 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 320 qdf_spin_unlock_bh(&rx_pool->lock); 321 dp_ipa_set_reo_ctx_mapping_lock_required(soc, false); 322 323 return ret; 324 } 325 #else 326 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping( 327 struct dp_soc *soc, 328 struct dp_pdev *pdev, 329 bool create, 330 const char *func, 331 uint32_t line) 332 { 333 struct rx_desc_pool *rx_pool; 334 uint8_t pdev_id; 335 qdf_nbuf_t nbuf; 336 int i; 337 338 if (!qdf_ipa_is_ready()) 339 return QDF_STATUS_SUCCESS; 340 341 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) 342 return QDF_STATUS_SUCCESS; 343 344 pdev_id = pdev->pdev_id; 345 rx_pool = &soc->rx_desc_buf[pdev_id]; 346 347 dp_ipa_set_reo_ctx_mapping_lock_required(soc, true); 348 qdf_spin_lock_bh(&rx_pool->lock); 349 dp_ipa_rx_buf_smmu_mapping_lock(soc); 350 for (i = 0; i < rx_pool->pool_size; i++) { 351 if ((!(rx_pool->array[i].rx_desc.in_use)) || 352 rx_pool->array[i].rx_desc.unmapped) 353 continue; 354 355 nbuf = rx_pool->array[i].rx_desc.nbuf; 356 357 if (qdf_unlikely(create == 358 qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) { 359 if (create) { 360 DP_STATS_INC(soc, 361 rx.err.ipa_smmu_map_dup, 1); 362 } else { 363 DP_STATS_INC(soc, 364 rx.err.ipa_smmu_unmap_dup, 1); 365 } 366 continue; 367 } 368 qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create); 369 370 __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, rx_pool->buf_size, 371 create, func, line); 372 } 373 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 374 qdf_spin_unlock_bh(&rx_pool->lock); 375 dp_ipa_set_reo_ctx_mapping_lock_required(soc, false); 376 377 return QDF_STATUS_SUCCESS; 378 } 379 #endif /* RX_DESC_MULTI_PAGE_ALLOC */ 380 381 QDF_STATUS dp_ipa_set_smmu_mapped(struct cdp_soc_t *soc_hdl, int val) 382 { 383 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 384 385 qdf_atomic_set(&soc->ipa_mapped, val); 386 return QDF_STATUS_SUCCESS; 387 } 388 389 int dp_ipa_get_smmu_mapped(struct cdp_soc_t *soc_hdl) 390 { 391 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 392 393 return qdf_atomic_read(&soc->ipa_mapped); 394 } 395 396 static QDF_STATUS dp_ipa_get_shared_mem_info(qdf_device_t osdev, 397 qdf_shared_mem_t *shared_mem, 398 void *cpu_addr, 399 qdf_dma_addr_t dma_addr, 400 uint32_t size) 401 { 402 qdf_dma_addr_t paddr; 403 int ret; 404 405 shared_mem->vaddr = cpu_addr; 406 qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size); 407 *qdf_mem_get_dma_addr_ptr(osdev, &shared_mem->mem_info) = dma_addr; 408 409 paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr); 410 qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr); 411 412 ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable, 413 shared_mem->vaddr, dma_addr, size); 414 if (ret) { 415 dp_err("Unable to get DMA sgtable"); 416 return QDF_STATUS_E_NOMEM; 417 } 418 419 qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable); 420 421 return QDF_STATUS_SUCCESS; 422 } 423 424 /** 425 * dp_ipa_get_tx_bank_id() - API to get TCL bank id 426 * @soc: dp_soc handle 427 * @bank_id: out parameter for bank id 428 * 429 * Return: QDF_STATUS 430 */ 431 static QDF_STATUS dp_ipa_get_tx_bank_id(struct dp_soc *soc, uint8_t *bank_id) 432 { 433 if (soc->arch_ops.ipa_get_bank_id) { 434 *bank_id = soc->arch_ops.ipa_get_bank_id(soc); 435 if (*bank_id < 0) { 436 return QDF_STATUS_E_INVAL; 437 } else { 438 dp_info("bank_id %u", *bank_id); 439 return QDF_STATUS_SUCCESS; 440 } 441 } else { 442 return QDF_STATUS_E_NOSUPPORT; 443 } 444 } 445 446 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \ 447 defined(CONFIG_IPA_WDI_UNIFIED_API) 448 static void dp_ipa_setup_tx_params_bank_id(struct dp_soc *soc, 449 qdf_ipa_wdi_pipe_setup_info_t *tx) 450 { 451 uint8_t bank_id; 452 453 if (QDF_IS_STATUS_SUCCESS(dp_ipa_get_tx_bank_id(soc, &bank_id))) 454 QDF_IPA_WDI_SETUP_INFO_RX_BANK_ID(tx, bank_id); 455 } 456 457 static void 458 dp_ipa_setup_tx_smmu_params_bank_id(struct dp_soc *soc, 459 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu) 460 { 461 uint8_t bank_id; 462 463 if (QDF_IS_STATUS_SUCCESS(dp_ipa_get_tx_bank_id(soc, &bank_id))) 464 QDF_IPA_WDI_SETUP_INFO_SMMU_RX_BANK_ID(tx_smmu, bank_id); 465 } 466 #else 467 static inline void 468 dp_ipa_setup_tx_params_bank_id(struct dp_soc *soc, 469 qdf_ipa_wdi_pipe_setup_info_t *tx) 470 { 471 } 472 473 static inline void 474 dp_ipa_setup_tx_smmu_params_bank_id(struct dp_soc *soc, 475 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu) 476 { 477 } 478 #endif 479 480 #ifdef QCA_IPA_LL_TX_FLOW_CONTROL 481 static void 482 dp_ipa_setup_tx_alt_params_pmac_id(struct dp_soc *soc, 483 qdf_ipa_wdi_pipe_setup_info_t *tx) 484 { 485 uint8_t pmac_id = 0; 486 487 /* Set Pmac ID, extract pmac_id from second radio for TX_ALT ring */ 488 if (soc->pdev_count > 1) 489 pmac_id = soc->pdev_list[soc->pdev_count - 1]->lmac_id; 490 491 QDF_IPA_WDI_SETUP_INFO_RX_PMAC_ID(tx, pmac_id); 492 } 493 494 static void 495 dp_ipa_setup_tx_alt_smmu_params_pmac_id(struct dp_soc *soc, 496 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu) 497 { 498 uint8_t pmac_id = 0; 499 500 /* Set Pmac ID, extract pmac_id from second radio for TX_ALT ring */ 501 if (soc->pdev_count > 1) 502 pmac_id = soc->pdev_list[soc->pdev_count - 1]->lmac_id; 503 504 QDF_IPA_WDI_SETUP_INFO_SMMU_RX_PMAC_ID(tx_smmu, pmac_id); 505 } 506 507 static void 508 dp_ipa_setup_tx_params_pmac_id(struct dp_soc *soc, 509 qdf_ipa_wdi_pipe_setup_info_t *tx) 510 { 511 uint8_t pmac_id; 512 513 pmac_id = soc->pdev_list[0]->lmac_id; 514 515 QDF_IPA_WDI_SETUP_INFO_RX_PMAC_ID(tx, pmac_id); 516 } 517 518 static void 519 dp_ipa_setup_tx_smmu_params_pmac_id(struct dp_soc *soc, 520 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu) 521 { 522 uint8_t pmac_id; 523 524 pmac_id = soc->pdev_list[0]->lmac_id; 525 526 QDF_IPA_WDI_SETUP_INFO_SMMU_RX_PMAC_ID(tx_smmu, pmac_id); 527 } 528 #else 529 static inline void 530 dp_ipa_setup_tx_alt_params_pmac_id(struct dp_soc *soc, 531 qdf_ipa_wdi_pipe_setup_info_t *tx) 532 { 533 } 534 535 static inline void 536 dp_ipa_setup_tx_alt_smmu_params_pmac_id(struct dp_soc *soc, 537 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu) 538 { 539 } 540 541 static inline void 542 dp_ipa_setup_tx_params_pmac_id(struct dp_soc *soc, 543 qdf_ipa_wdi_pipe_setup_info_t *tx) 544 { 545 } 546 547 static inline void 548 dp_ipa_setup_tx_smmu_params_pmac_id(struct dp_soc *soc, 549 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu) 550 { 551 } 552 #endif 553 554 #ifdef IPA_WDI3_TX_TWO_PIPES 555 static void dp_ipa_tx_alt_pool_detach(struct dp_soc *soc, struct dp_pdev *pdev) 556 { 557 struct dp_ipa_resources *ipa_res; 558 qdf_nbuf_t nbuf; 559 int idx; 560 561 for (idx = 0; idx < soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt; idx++) { 562 nbuf = (qdf_nbuf_t) 563 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[idx]; 564 if (!nbuf) 565 continue; 566 567 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 568 qdf_mem_dp_tx_skb_cnt_dec(); 569 qdf_mem_dp_tx_skb_dec(qdf_nbuf_get_end_offset(nbuf)); 570 qdf_nbuf_free(nbuf); 571 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[idx] = 572 (void *)NULL; 573 } 574 575 qdf_mem_free(soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned); 576 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = NULL; 577 578 ipa_res = &pdev->ipa_resource; 579 if (!ipa_res->is_db_ddr_mapped && ipa_res->tx_alt_comp_doorbell_vaddr) 580 iounmap(ipa_res->tx_alt_comp_doorbell_vaddr); 581 582 qdf_mem_free_sgtable(&ipa_res->tx_alt_ring.sgtable); 583 qdf_mem_free_sgtable(&ipa_res->tx_alt_comp_ring.sgtable); 584 } 585 586 static int dp_ipa_tx_alt_pool_attach(struct dp_soc *soc) 587 { 588 uint32_t tx_buffer_count; 589 uint32_t ring_base_align = 8; 590 qdf_dma_addr_t buffer_paddr; 591 struct hal_srng *wbm_srng = (struct hal_srng *) 592 soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng; 593 struct hal_srng_params srng_params; 594 uint32_t wbm_bm_id; 595 void *ring_entry; 596 int num_entries; 597 qdf_nbuf_t nbuf; 598 int retval = QDF_STATUS_SUCCESS; 599 int max_alloc_count = 0; 600 601 /* 602 * Uncomment when dp_ops_cfg.cfg_attach is implemented 603 * unsigned int uc_tx_buf_sz = 604 * dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev); 605 */ 606 unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT; 607 unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1; 608 609 wbm_bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, 610 IPA_TX_ALT_RING_IDX); 611 612 hal_get_srng_params(soc->hal_soc, 613 hal_srng_to_hal_ring_handle(wbm_srng), 614 &srng_params); 615 num_entries = srng_params.num_entries; 616 617 max_alloc_count = 618 num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES; 619 if (max_alloc_count <= 0) { 620 dp_err("incorrect value for buffer count %u", max_alloc_count); 621 return -EINVAL; 622 } 623 624 dp_info("requested %d buffers to be posted to wbm ring", 625 max_alloc_count); 626 627 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = 628 qdf_mem_malloc(num_entries * 629 sizeof(*soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned)); 630 if (!soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned) { 631 dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail"); 632 return -ENOMEM; 633 } 634 635 hal_srng_access_start_unlocked(soc->hal_soc, 636 hal_srng_to_hal_ring_handle(wbm_srng)); 637 638 /* 639 * Allocate Tx buffers as many as possible. 640 * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty 641 * Populate Tx buffers into WBM2IPA ring 642 * This initial buffer population will simulate H/W as source ring, 643 * and update HP 644 */ 645 for (tx_buffer_count = 0; 646 tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) { 647 nbuf = qdf_nbuf_frag_alloc(soc->osdev, alloc_size, 0, 648 256, FALSE); 649 if (!nbuf) 650 break; 651 652 ring_entry = hal_srng_dst_get_next_hp( 653 soc->hal_soc, 654 hal_srng_to_hal_ring_handle(wbm_srng)); 655 if (!ring_entry) { 656 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 657 "%s: Failed to get WBM ring entry", 658 __func__); 659 qdf_nbuf_free(nbuf); 660 break; 661 } 662 663 qdf_nbuf_map_single(soc->osdev, nbuf, 664 QDF_DMA_BIDIRECTIONAL); 665 buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); 666 qdf_mem_dp_tx_skb_cnt_inc(); 667 qdf_mem_dp_tx_skb_inc(qdf_nbuf_get_end_offset(nbuf)); 668 669 hal_rxdma_buff_addr_info_set(soc->hal_soc, ring_entry, 670 buffer_paddr, 0, wbm_bm_id); 671 672 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[ 673 tx_buffer_count] = (void *)nbuf; 674 } 675 676 hal_srng_access_end_unlocked(soc->hal_soc, 677 hal_srng_to_hal_ring_handle(wbm_srng)); 678 679 soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt = tx_buffer_count; 680 681 if (tx_buffer_count) { 682 dp_info("IPA TX buffer pool2: %d allocated", tx_buffer_count); 683 } else { 684 dp_err("Failed to allocate IPA TX buffer pool2"); 685 qdf_mem_free( 686 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned); 687 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = NULL; 688 retval = -ENOMEM; 689 } 690 691 return retval; 692 } 693 694 static QDF_STATUS dp_ipa_tx_alt_ring_get_resource(struct dp_pdev *pdev) 695 { 696 struct dp_soc *soc = pdev->soc; 697 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 698 699 ipa_res->tx_alt_ring_num_alloc_buffer = 700 (uint32_t)soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt; 701 702 dp_ipa_get_shared_mem_info( 703 soc->osdev, &ipa_res->tx_alt_ring, 704 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr, 705 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr, 706 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size); 707 708 dp_ipa_get_shared_mem_info( 709 soc->osdev, &ipa_res->tx_alt_comp_ring, 710 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr, 711 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr, 712 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size); 713 714 if (!qdf_mem_get_dma_addr(soc->osdev, 715 &ipa_res->tx_alt_comp_ring.mem_info)) 716 return QDF_STATUS_E_FAILURE; 717 718 return QDF_STATUS_SUCCESS; 719 } 720 721 static void dp_ipa_tx_alt_ring_resource_setup(struct dp_soc *soc) 722 { 723 struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc; 724 struct hal_srng *hal_srng; 725 struct hal_srng_params srng_params; 726 unsigned long addr_offset, dev_base_paddr; 727 728 /* IPA TCL_DATA Alternative Ring - HAL_SRNG_SW2TCL2 */ 729 hal_srng = (struct hal_srng *) 730 soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng; 731 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 732 hal_srng_to_hal_ring_handle(hal_srng), 733 &srng_params); 734 735 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr = 736 srng_params.ring_base_paddr; 737 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr = 738 srng_params.ring_base_vaddr; 739 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size = 740 (srng_params.num_entries * srng_params.entry_size) << 2; 741 /* 742 * For the register backed memory addresses, use the scn->mem_pa to 743 * calculate the physical address of the shadow registers 744 */ 745 dev_base_paddr = 746 (unsigned long) 747 ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa; 748 addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) - 749 (unsigned long)(hal_soc->dev_base_addr); 750 soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr = 751 (qdf_dma_addr_t)(addr_offset + dev_base_paddr); 752 753 dp_info("IPA TCL_DATA Alt Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", 754 (unsigned int)addr_offset, 755 (unsigned int)dev_base_paddr, 756 (unsigned int)(soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr), 757 (void *)soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr, 758 (void *)soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr, 759 srng_params.num_entries, 760 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size); 761 762 /* IPA TX Alternative COMP Ring - HAL_SRNG_WBM2SW4_RELEASE */ 763 hal_srng = (struct hal_srng *) 764 soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng; 765 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 766 hal_srng_to_hal_ring_handle(hal_srng), 767 &srng_params); 768 769 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr = 770 srng_params.ring_base_paddr; 771 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr = 772 srng_params.ring_base_vaddr; 773 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size = 774 (srng_params.num_entries * srng_params.entry_size) << 2; 775 soc->ipa_uc_tx_rsc_alt.ipa_wbm_hp_shadow_paddr = 776 hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc), 777 hal_srng_to_hal_ring_handle(hal_srng)); 778 addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) - 779 (unsigned long)(hal_soc->dev_base_addr); 780 soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr = 781 (qdf_dma_addr_t)(addr_offset + dev_base_paddr); 782 783 dp_info("IPA TX Alt COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)", 784 (unsigned int)addr_offset, 785 (unsigned int)dev_base_paddr, 786 (unsigned int)(soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr), 787 (void *)soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr, 788 (void *)soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr, 789 srng_params.num_entries, 790 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size); 791 } 792 793 static void dp_ipa_map_ring_doorbell_paddr(struct dp_pdev *pdev) 794 { 795 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 796 uint32_t rx_ready_doorbell_dmaaddr; 797 uint32_t tx_comp_doorbell_dmaaddr; 798 struct dp_soc *soc = pdev->soc; 799 int ret = 0; 800 801 if (ipa_res->is_db_ddr_mapped) 802 ipa_res->tx_comp_doorbell_vaddr = 803 phys_to_virt(ipa_res->tx_comp_doorbell_paddr); 804 else 805 ipa_res->tx_comp_doorbell_vaddr = 806 ioremap(ipa_res->tx_comp_doorbell_paddr, 4); 807 808 if (qdf_mem_smmu_s1_enabled(soc->osdev)) { 809 ret = pld_smmu_map(soc->osdev->dev, 810 ipa_res->tx_comp_doorbell_paddr, 811 &tx_comp_doorbell_dmaaddr, 812 sizeof(uint32_t)); 813 ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr; 814 qdf_assert_always(!ret); 815 816 ret = pld_smmu_map(soc->osdev->dev, 817 ipa_res->rx_ready_doorbell_paddr, 818 &rx_ready_doorbell_dmaaddr, 819 sizeof(uint32_t)); 820 ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr; 821 qdf_assert_always(!ret); 822 } 823 824 /* Setup for alternative TX pipe */ 825 if (!ipa_res->tx_alt_comp_doorbell_paddr) 826 return; 827 828 if (ipa_res->is_db_ddr_mapped) 829 ipa_res->tx_alt_comp_doorbell_vaddr = 830 phys_to_virt(ipa_res->tx_alt_comp_doorbell_paddr); 831 else 832 ipa_res->tx_alt_comp_doorbell_vaddr = 833 ioremap(ipa_res->tx_alt_comp_doorbell_paddr, 4); 834 835 if (qdf_mem_smmu_s1_enabled(soc->osdev)) { 836 ret = pld_smmu_map(soc->osdev->dev, 837 ipa_res->tx_alt_comp_doorbell_paddr, 838 &tx_comp_doorbell_dmaaddr, 839 sizeof(uint32_t)); 840 ipa_res->tx_alt_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr; 841 qdf_assert_always(!ret); 842 } 843 } 844 845 static void dp_ipa_unmap_ring_doorbell_paddr(struct dp_pdev *pdev) 846 { 847 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 848 struct dp_soc *soc = pdev->soc; 849 int ret = 0; 850 851 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) 852 return; 853 854 /* Unmap must be in reverse order of map */ 855 if (ipa_res->tx_alt_comp_doorbell_paddr) { 856 ret = pld_smmu_unmap(soc->osdev->dev, 857 ipa_res->tx_alt_comp_doorbell_paddr, 858 sizeof(uint32_t)); 859 qdf_assert_always(!ret); 860 } 861 862 ret = pld_smmu_unmap(soc->osdev->dev, 863 ipa_res->rx_ready_doorbell_paddr, 864 sizeof(uint32_t)); 865 qdf_assert_always(!ret); 866 867 ret = pld_smmu_unmap(soc->osdev->dev, 868 ipa_res->tx_comp_doorbell_paddr, 869 sizeof(uint32_t)); 870 qdf_assert_always(!ret); 871 } 872 873 static QDF_STATUS dp_ipa_tx_alt_buf_smmu_mapping(struct dp_soc *soc, 874 struct dp_pdev *pdev, 875 bool create, const char *func, 876 uint32_t line) 877 { 878 QDF_STATUS ret = QDF_STATUS_SUCCESS; 879 struct ipa_dp_tx_rsc *rsc; 880 uint32_t tx_buffer_cnt; 881 uint32_t buf_len; 882 qdf_nbuf_t nbuf; 883 uint32_t index; 884 885 if (!ipa_is_ready()) { 886 dp_info("IPA is not READY"); 887 return QDF_STATUS_SUCCESS; 888 } 889 890 rsc = &soc->ipa_uc_tx_rsc_alt; 891 tx_buffer_cnt = rsc->alloc_tx_buf_cnt; 892 893 for (index = 0; index < tx_buffer_cnt; index++) { 894 nbuf = (qdf_nbuf_t)rsc->tx_buf_pool_vaddr_unaligned[index]; 895 if (!nbuf) 896 continue; 897 898 buf_len = qdf_nbuf_get_data_len(nbuf); 899 ret = __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, buf_len, 900 create, func, line); 901 } 902 903 return ret; 904 } 905 906 static void dp_ipa_wdi_tx_alt_pipe_params(struct dp_soc *soc, 907 struct dp_ipa_resources *ipa_res, 908 qdf_ipa_wdi_pipe_setup_info_t *tx) 909 { 910 QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS1; 911 912 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) = 913 qdf_mem_get_dma_addr(soc->osdev, 914 &ipa_res->tx_alt_comp_ring.mem_info); 915 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) = 916 qdf_mem_get_dma_size(soc->osdev, 917 &ipa_res->tx_alt_comp_ring.mem_info); 918 919 /* WBM Tail Pointer Address */ 920 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) = 921 soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr; 922 QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true; 923 924 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) = 925 qdf_mem_get_dma_addr(soc->osdev, 926 &ipa_res->tx_alt_ring.mem_info); 927 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = 928 qdf_mem_get_dma_size(soc->osdev, 929 &ipa_res->tx_alt_ring.mem_info); 930 931 /* TCL Head Pointer Address */ 932 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) = 933 soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr; 934 QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true; 935 936 QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) = 937 ipa_res->tx_alt_ring_num_alloc_buffer; 938 939 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0; 940 941 dp_ipa_setup_tx_params_bank_id(soc, tx); 942 943 /* Set Pmac ID, extract pmac_id from second radio for TX_ALT ring */ 944 dp_ipa_setup_tx_alt_params_pmac_id(soc, tx); 945 } 946 947 static void 948 dp_ipa_wdi_tx_alt_pipe_smmu_params(struct dp_soc *soc, 949 struct dp_ipa_resources *ipa_res, 950 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu) 951 { 952 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = IPA_CLIENT_WLAN2_CONS1; 953 954 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu), 955 &ipa_res->tx_alt_comp_ring.sgtable, 956 sizeof(sgtable_t)); 957 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) = 958 qdf_mem_get_dma_size(soc->osdev, 959 &ipa_res->tx_alt_comp_ring.mem_info); 960 /* WBM Tail Pointer Address */ 961 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) = 962 soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr; 963 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true; 964 965 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu), 966 &ipa_res->tx_alt_ring.sgtable, 967 sizeof(sgtable_t)); 968 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) = 969 qdf_mem_get_dma_size(soc->osdev, 970 &ipa_res->tx_alt_ring.mem_info); 971 /* TCL Head Pointer Address */ 972 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) = 973 soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr; 974 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true; 975 976 QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) = 977 ipa_res->tx_alt_ring_num_alloc_buffer; 978 QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0; 979 980 dp_ipa_setup_tx_smmu_params_bank_id(soc, tx_smmu); 981 982 /* Set Pmac ID, extract pmac_id from second radio for TX_ALT ring */ 983 dp_ipa_setup_tx_alt_smmu_params_pmac_id(soc, tx_smmu); 984 } 985 986 static void dp_ipa_setup_tx_alt_pipe(struct dp_soc *soc, 987 struct dp_ipa_resources *res, 988 qdf_ipa_wdi_conn_in_params_t *in) 989 { 990 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu = NULL; 991 qdf_ipa_wdi_pipe_setup_info_t *tx = NULL; 992 qdf_ipa_ep_cfg_t *tx_cfg; 993 994 QDF_IPA_WDI_CONN_IN_PARAMS_IS_TX1_USED(in) = true; 995 996 if (qdf_mem_smmu_s1_enabled(soc->osdev)) { 997 tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_ALT_PIPE_SMMU(in); 998 tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu); 999 dp_ipa_wdi_tx_alt_pipe_smmu_params(soc, res, tx_smmu); 1000 } else { 1001 tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_ALT_PIPE(in); 1002 tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx); 1003 dp_ipa_wdi_tx_alt_pipe_params(soc, res, tx); 1004 } 1005 1006 QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT; 1007 QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN; 1008 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0; 1009 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0; 1010 QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0; 1011 QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC; 1012 QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true; 1013 } 1014 1015 static void dp_ipa_set_pipe_db(struct dp_ipa_resources *res, 1016 qdf_ipa_wdi_conn_out_params_t *out) 1017 { 1018 res->tx_comp_doorbell_paddr = 1019 QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(out); 1020 res->rx_ready_doorbell_paddr = 1021 QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(out); 1022 res->tx_alt_comp_doorbell_paddr = 1023 QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_ALT_DB_PA(out); 1024 } 1025 1026 static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in, 1027 uint8_t session_id) 1028 { 1029 bool is_2g_iface = session_id & IPA_SESSION_ID_SHIFT; 1030 1031 session_id = session_id >> IPA_SESSION_ID_SHIFT; 1032 dp_debug("session_id %u is_2g_iface %d", session_id, is_2g_iface); 1033 1034 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id << 16); 1035 QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_TX1_USED(in) = is_2g_iface; 1036 } 1037 1038 static void dp_ipa_tx_comp_ring_init_hp(struct dp_soc *soc, 1039 struct dp_ipa_resources *res) 1040 { 1041 struct hal_srng *wbm_srng; 1042 1043 /* Init first TX comp ring */ 1044 wbm_srng = (struct hal_srng *) 1045 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 1046 1047 hal_srng_dst_init_hp(soc->hal_soc, wbm_srng, 1048 res->tx_comp_doorbell_vaddr); 1049 1050 /* Init the alternate TX comp ring */ 1051 if (!res->tx_alt_comp_doorbell_paddr) 1052 return; 1053 1054 wbm_srng = (struct hal_srng *) 1055 soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng; 1056 1057 hal_srng_dst_init_hp(soc->hal_soc, wbm_srng, 1058 res->tx_alt_comp_doorbell_vaddr); 1059 } 1060 1061 static void dp_ipa_set_tx_doorbell_paddr(struct dp_soc *soc, 1062 struct dp_ipa_resources *ipa_res) 1063 { 1064 struct hal_srng *wbm_srng; 1065 1066 wbm_srng = (struct hal_srng *) 1067 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 1068 1069 hal_srng_dst_set_hp_paddr_confirm(wbm_srng, 1070 ipa_res->tx_comp_doorbell_paddr); 1071 1072 dp_info("paddr %pK vaddr %pK", 1073 (void *)ipa_res->tx_comp_doorbell_paddr, 1074 (void *)ipa_res->tx_comp_doorbell_vaddr); 1075 1076 /* Setup for alternative TX comp ring */ 1077 if (!ipa_res->tx_alt_comp_doorbell_paddr) 1078 return; 1079 1080 wbm_srng = (struct hal_srng *) 1081 soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng; 1082 1083 hal_srng_dst_set_hp_paddr_confirm(wbm_srng, 1084 ipa_res->tx_alt_comp_doorbell_paddr); 1085 1086 dp_info("paddr %pK vaddr %pK", 1087 (void *)ipa_res->tx_alt_comp_doorbell_paddr, 1088 (void *)ipa_res->tx_alt_comp_doorbell_vaddr); 1089 } 1090 1091 #ifdef IPA_SET_RESET_TX_DB_PA 1092 static QDF_STATUS dp_ipa_reset_tx_doorbell_pa(struct dp_soc *soc, 1093 struct dp_ipa_resources *ipa_res) 1094 { 1095 hal_ring_handle_t wbm_srng; 1096 qdf_dma_addr_t hp_addr; 1097 1098 wbm_srng = soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 1099 if (!wbm_srng) 1100 return QDF_STATUS_E_FAILURE; 1101 1102 hp_addr = soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr; 1103 1104 hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr); 1105 1106 dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr); 1107 1108 /* Reset alternative TX comp ring */ 1109 wbm_srng = soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng; 1110 if (!wbm_srng) 1111 return QDF_STATUS_E_FAILURE; 1112 1113 hp_addr = soc->ipa_uc_tx_rsc_alt.ipa_wbm_hp_shadow_paddr; 1114 1115 hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr); 1116 1117 dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr); 1118 1119 return QDF_STATUS_SUCCESS; 1120 } 1121 #endif /* IPA_SET_RESET_TX_DB_PA */ 1122 1123 #else /* !IPA_WDI3_TX_TWO_PIPES */ 1124 1125 static inline 1126 void dp_ipa_tx_alt_pool_detach(struct dp_soc *soc, struct dp_pdev *pdev) 1127 { 1128 } 1129 1130 static inline void dp_ipa_tx_alt_ring_resource_setup(struct dp_soc *soc) 1131 { 1132 } 1133 1134 static inline int dp_ipa_tx_alt_pool_attach(struct dp_soc *soc) 1135 { 1136 return 0; 1137 } 1138 1139 static inline QDF_STATUS dp_ipa_tx_alt_ring_get_resource(struct dp_pdev *pdev) 1140 { 1141 return QDF_STATUS_SUCCESS; 1142 } 1143 1144 static void dp_ipa_map_ring_doorbell_paddr(struct dp_pdev *pdev) 1145 { 1146 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 1147 uint32_t rx_ready_doorbell_dmaaddr; 1148 uint32_t tx_comp_doorbell_dmaaddr; 1149 struct dp_soc *soc = pdev->soc; 1150 int ret = 0; 1151 1152 if (ipa_res->is_db_ddr_mapped) 1153 ipa_res->tx_comp_doorbell_vaddr = 1154 phys_to_virt(ipa_res->tx_comp_doorbell_paddr); 1155 else 1156 ipa_res->tx_comp_doorbell_vaddr = 1157 ioremap(ipa_res->tx_comp_doorbell_paddr, 4); 1158 1159 if (qdf_mem_smmu_s1_enabled(soc->osdev)) { 1160 ret = pld_smmu_map(soc->osdev->dev, 1161 ipa_res->tx_comp_doorbell_paddr, 1162 &tx_comp_doorbell_dmaaddr, 1163 sizeof(uint32_t)); 1164 ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr; 1165 qdf_assert_always(!ret); 1166 1167 ret = pld_smmu_map(soc->osdev->dev, 1168 ipa_res->rx_ready_doorbell_paddr, 1169 &rx_ready_doorbell_dmaaddr, 1170 sizeof(uint32_t)); 1171 ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr; 1172 qdf_assert_always(!ret); 1173 } 1174 } 1175 1176 static inline void dp_ipa_unmap_ring_doorbell_paddr(struct dp_pdev *pdev) 1177 { 1178 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 1179 struct dp_soc *soc = pdev->soc; 1180 int ret = 0; 1181 1182 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) 1183 return; 1184 1185 ret = pld_smmu_unmap(soc->osdev->dev, 1186 ipa_res->rx_ready_doorbell_paddr, 1187 sizeof(uint32_t)); 1188 qdf_assert_always(!ret); 1189 1190 ret = pld_smmu_unmap(soc->osdev->dev, 1191 ipa_res->tx_comp_doorbell_paddr, 1192 sizeof(uint32_t)); 1193 qdf_assert_always(!ret); 1194 } 1195 1196 static inline QDF_STATUS dp_ipa_tx_alt_buf_smmu_mapping(struct dp_soc *soc, 1197 struct dp_pdev *pdev, 1198 bool create, 1199 const char *func, 1200 uint32_t line) 1201 { 1202 return QDF_STATUS_SUCCESS; 1203 } 1204 1205 static inline 1206 void dp_ipa_setup_tx_alt_pipe(struct dp_soc *soc, struct dp_ipa_resources *res, 1207 qdf_ipa_wdi_conn_in_params_t *in) 1208 { 1209 } 1210 1211 static void dp_ipa_set_pipe_db(struct dp_ipa_resources *res, 1212 qdf_ipa_wdi_conn_out_params_t *out) 1213 { 1214 res->tx_comp_doorbell_paddr = 1215 QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(out); 1216 res->rx_ready_doorbell_paddr = 1217 QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(out); 1218 } 1219 1220 #ifdef IPA_WDS_EASYMESH_FEATURE 1221 /** 1222 * dp_ipa_setup_iface_session_id() - Pass vdev id to IPA 1223 * @in: ipa in params 1224 * @session_id: vdev id 1225 * 1226 * Pass Vdev id to IPA, IPA metadata order is changed and vdev id 1227 * is stored at higher nibble so, no shift is required. 1228 * 1229 * Return: none 1230 */ 1231 static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in, 1232 uint8_t session_id) 1233 { 1234 if (ucfg_ipa_is_wds_enabled()) 1235 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id); 1236 else 1237 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id << 16); 1238 } 1239 #else 1240 static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in, 1241 uint8_t session_id) 1242 { 1243 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id << 16); 1244 } 1245 #endif 1246 1247 static inline void dp_ipa_tx_comp_ring_init_hp(struct dp_soc *soc, 1248 struct dp_ipa_resources *res) 1249 { 1250 struct hal_srng *wbm_srng = (struct hal_srng *) 1251 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 1252 1253 hal_srng_dst_init_hp(soc->hal_soc, wbm_srng, 1254 res->tx_comp_doorbell_vaddr); 1255 } 1256 1257 static void dp_ipa_set_tx_doorbell_paddr(struct dp_soc *soc, 1258 struct dp_ipa_resources *ipa_res) 1259 { 1260 struct hal_srng *wbm_srng = (struct hal_srng *) 1261 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 1262 1263 hal_srng_dst_set_hp_paddr_confirm(wbm_srng, 1264 ipa_res->tx_comp_doorbell_paddr); 1265 1266 dp_info("paddr %pK vaddr %pK", 1267 (void *)ipa_res->tx_comp_doorbell_paddr, 1268 (void *)ipa_res->tx_comp_doorbell_vaddr); 1269 } 1270 1271 #ifdef IPA_SET_RESET_TX_DB_PA 1272 static QDF_STATUS dp_ipa_reset_tx_doorbell_pa(struct dp_soc *soc, 1273 struct dp_ipa_resources *ipa_res) 1274 { 1275 hal_ring_handle_t wbm_srng = 1276 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 1277 qdf_dma_addr_t hp_addr; 1278 1279 if (!wbm_srng) 1280 return QDF_STATUS_E_FAILURE; 1281 1282 hp_addr = soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr; 1283 1284 hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr); 1285 1286 dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr); 1287 1288 return QDF_STATUS_SUCCESS; 1289 } 1290 #endif /* IPA_SET_RESET_TX_DB_PA */ 1291 1292 #endif /* IPA_WDI3_TX_TWO_PIPES */ 1293 1294 /** 1295 * dp_tx_ipa_uc_detach() - Free autonomy TX resources 1296 * @soc: data path instance 1297 * @pdev: core txrx pdev context 1298 * 1299 * Free allocated TX buffers with WBM SRNG 1300 * 1301 * Return: none 1302 */ 1303 static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) 1304 { 1305 int idx; 1306 qdf_nbuf_t nbuf; 1307 struct dp_ipa_resources *ipa_res; 1308 1309 for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) { 1310 nbuf = (qdf_nbuf_t) 1311 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx]; 1312 if (!nbuf) 1313 continue; 1314 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 1315 qdf_mem_dp_tx_skb_cnt_dec(); 1316 qdf_mem_dp_tx_skb_dec(qdf_nbuf_get_end_offset(nbuf)); 1317 qdf_nbuf_free(nbuf); 1318 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx] = 1319 (void *)NULL; 1320 } 1321 1322 qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned); 1323 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL; 1324 1325 ipa_res = &pdev->ipa_resource; 1326 1327 qdf_mem_free_sgtable(&ipa_res->tx_ring.sgtable); 1328 qdf_mem_free_sgtable(&ipa_res->tx_comp_ring.sgtable); 1329 } 1330 1331 /** 1332 * dp_rx_ipa_uc_detach() - free autonomy RX resources 1333 * @soc: data path instance 1334 * @pdev: core txrx pdev context 1335 * 1336 * This function will detach DP RX into main device context 1337 * will free DP Rx resources. 1338 * 1339 * Return: none 1340 */ 1341 static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) 1342 { 1343 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 1344 1345 qdf_mem_free_sgtable(&ipa_res->rx_rdy_ring.sgtable); 1346 qdf_mem_free_sgtable(&ipa_res->rx_refill_ring.sgtable); 1347 } 1348 1349 /** 1350 * dp_rx_alt_ipa_uc_detach() - free autonomy RX resources 1351 * @soc: data path instance 1352 * @pdev: core txrx pdev context 1353 * 1354 * This function will detach DP RX into main device context 1355 * will free DP Rx resources. 1356 * 1357 * Return: none 1358 */ 1359 #ifdef IPA_WDI3_VLAN_SUPPORT 1360 static void dp_rx_alt_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) 1361 { 1362 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 1363 1364 if (!wlan_ipa_is_vlan_enabled()) 1365 return; 1366 1367 qdf_mem_free_sgtable(&ipa_res->rx_alt_rdy_ring.sgtable); 1368 qdf_mem_free_sgtable(&ipa_res->rx_alt_refill_ring.sgtable); 1369 } 1370 #else 1371 static inline 1372 void dp_rx_alt_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) 1373 { } 1374 #endif 1375 1376 /** 1377 * dp_ipa_opt_wifi_dp_cleanup() - Cleanup ipa opt wifi dp filter setup 1378 * @soc: data path instance 1379 * @pdev: core txrx pdev context 1380 * 1381 * This function will cleanup filter setup for optional wifi dp. 1382 * 1383 * Return: none 1384 */ 1385 1386 #ifdef IPA_OPT_WIFI_DP 1387 static void dp_ipa_opt_wifi_dp_cleanup(struct dp_soc *soc, struct dp_pdev *pdev) 1388 { 1389 struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc; 1390 struct hif_softc *hif = (struct hif_softc *)(hal_soc->hif_handle); 1391 int count = qdf_atomic_read(&hif->opt_wifi_dp_rtpm_cnt); 1392 int i; 1393 1394 for (i = count; i > 0; i--) { 1395 dp_info("opt_dp: cleanup call pcie link down"); 1396 dp_ipa_pcie_link_down((struct cdp_soc_t *)soc); 1397 } 1398 } 1399 #else 1400 static inline 1401 void dp_ipa_opt_wifi_dp_cleanup(struct dp_soc *soc, struct dp_pdev *pdev) 1402 { 1403 } 1404 #endif 1405 1406 int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) 1407 { 1408 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 1409 return QDF_STATUS_SUCCESS; 1410 1411 /* TX resource detach */ 1412 dp_tx_ipa_uc_detach(soc, pdev); 1413 1414 /* Cleanup 2nd TX pipe resources */ 1415 dp_ipa_tx_alt_pool_detach(soc, pdev); 1416 1417 /* RX resource detach */ 1418 dp_rx_ipa_uc_detach(soc, pdev); 1419 1420 /* Cleanup 2nd RX pipe resources */ 1421 dp_rx_alt_ipa_uc_detach(soc, pdev); 1422 1423 dp_ipa_opt_wifi_dp_cleanup(soc, pdev); 1424 1425 return QDF_STATUS_SUCCESS; /* success */ 1426 } 1427 1428 /** 1429 * dp_tx_ipa_uc_attach() - Allocate autonomy TX resources 1430 * @soc: data path instance 1431 * @pdev: Physical device handle 1432 * 1433 * Allocate TX buffer from non-cacheable memory 1434 * Attach allocated TX buffers with WBM SRNG 1435 * 1436 * Return: int 1437 */ 1438 static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev) 1439 { 1440 uint32_t tx_buffer_count; 1441 uint32_t ring_base_align = 8; 1442 qdf_dma_addr_t buffer_paddr; 1443 struct hal_srng *wbm_srng = (struct hal_srng *) 1444 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 1445 struct hal_srng_params srng_params; 1446 void *ring_entry; 1447 int num_entries; 1448 qdf_nbuf_t nbuf; 1449 int retval = QDF_STATUS_SUCCESS; 1450 int max_alloc_count = 0; 1451 uint32_t wbm_bm_id; 1452 1453 /* 1454 * Uncomment when dp_ops_cfg.cfg_attach is implemented 1455 * unsigned int uc_tx_buf_sz = 1456 * dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev); 1457 */ 1458 unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT; 1459 unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1; 1460 1461 wbm_bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, 1462 IPA_TCL_DATA_RING_IDX); 1463 1464 hal_get_srng_params(soc->hal_soc, hal_srng_to_hal_ring_handle(wbm_srng), 1465 &srng_params); 1466 num_entries = srng_params.num_entries; 1467 1468 max_alloc_count = 1469 num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES; 1470 if (max_alloc_count <= 0) { 1471 dp_err("incorrect value for buffer count %u", max_alloc_count); 1472 return -EINVAL; 1473 } 1474 1475 dp_info("requested %d buffers to be posted to wbm ring", 1476 max_alloc_count); 1477 1478 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = 1479 qdf_mem_malloc(num_entries * 1480 sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned)); 1481 if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned) { 1482 dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail"); 1483 return -ENOMEM; 1484 } 1485 1486 hal_srng_access_start_unlocked(soc->hal_soc, 1487 hal_srng_to_hal_ring_handle(wbm_srng)); 1488 1489 /* 1490 * Allocate Tx buffers as many as possible. 1491 * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty 1492 * Populate Tx buffers into WBM2IPA ring 1493 * This initial buffer population will simulate H/W as source ring, 1494 * and update HP 1495 */ 1496 for (tx_buffer_count = 0; 1497 tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) { 1498 nbuf = qdf_nbuf_frag_alloc(soc->osdev, alloc_size, 0, 1499 256, FALSE); 1500 if (!nbuf) 1501 break; 1502 1503 ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc, 1504 hal_srng_to_hal_ring_handle(wbm_srng)); 1505 if (!ring_entry) { 1506 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 1507 "%s: Failed to get WBM ring entry", 1508 __func__); 1509 qdf_nbuf_free(nbuf); 1510 break; 1511 } 1512 1513 qdf_nbuf_map_single(soc->osdev, nbuf, 1514 QDF_DMA_BIDIRECTIONAL); 1515 buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); 1516 qdf_mem_dp_tx_skb_cnt_inc(); 1517 qdf_mem_dp_tx_skb_inc(qdf_nbuf_get_end_offset(nbuf)); 1518 1519 /* 1520 * TODO - KIWI code can directly call the be handler 1521 * instead of hal soc ops. 1522 */ 1523 hal_rxdma_buff_addr_info_set(soc->hal_soc, ring_entry, 1524 buffer_paddr, 0, wbm_bm_id); 1525 1526 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count] 1527 = (void *)nbuf; 1528 } 1529 1530 hal_srng_access_end_unlocked(soc->hal_soc, 1531 hal_srng_to_hal_ring_handle(wbm_srng)); 1532 1533 soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count; 1534 1535 if (tx_buffer_count) { 1536 dp_info("IPA WDI TX buffer: %d allocated", tx_buffer_count); 1537 } else { 1538 dp_err("No IPA WDI TX buffer allocated!"); 1539 qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned); 1540 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL; 1541 retval = -ENOMEM; 1542 } 1543 1544 return retval; 1545 } 1546 1547 /** 1548 * dp_rx_ipa_uc_attach() - Allocate autonomy RX resources 1549 * @soc: data path instance 1550 * @pdev: core txrx pdev context 1551 * 1552 * This function will attach a DP RX instance into the main 1553 * device (SOC) context. 1554 * 1555 * Return: QDF_STATUS_SUCCESS: success 1556 * QDF_STATUS_E_RESOURCES: Error return 1557 */ 1558 static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev) 1559 { 1560 return QDF_STATUS_SUCCESS; 1561 } 1562 1563 int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev) 1564 { 1565 int error; 1566 1567 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 1568 return QDF_STATUS_SUCCESS; 1569 1570 /* TX resource attach */ 1571 error = dp_tx_ipa_uc_attach(soc, pdev); 1572 if (error) { 1573 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1574 "%s: DP IPA UC TX attach fail code %d", 1575 __func__, error); 1576 return error; 1577 } 1578 1579 /* Setup 2nd TX pipe */ 1580 error = dp_ipa_tx_alt_pool_attach(soc); 1581 if (error) { 1582 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1583 "%s: DP IPA TX pool2 attach fail code %d", 1584 __func__, error); 1585 dp_tx_ipa_uc_detach(soc, pdev); 1586 return error; 1587 } 1588 1589 /* RX resource attach */ 1590 error = dp_rx_ipa_uc_attach(soc, pdev); 1591 if (error) { 1592 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1593 "%s: DP IPA UC RX attach fail code %d", 1594 __func__, error); 1595 dp_ipa_tx_alt_pool_detach(soc, pdev); 1596 dp_tx_ipa_uc_detach(soc, pdev); 1597 return error; 1598 } 1599 1600 return QDF_STATUS_SUCCESS; /* success */ 1601 } 1602 1603 #ifdef IPA_WDI3_VLAN_SUPPORT 1604 /** 1605 * dp_ipa_rx_alt_ring_resource_setup() - setup IPA 2nd RX ring resources 1606 * @soc: data path SoC handle 1607 * @pdev: data path pdev handle 1608 * 1609 * Return: none 1610 */ 1611 static 1612 void dp_ipa_rx_alt_ring_resource_setup(struct dp_soc *soc, struct dp_pdev *pdev) 1613 { 1614 struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc; 1615 struct hal_srng *hal_srng; 1616 struct hal_srng_params srng_params; 1617 unsigned long addr_offset, dev_base_paddr; 1618 qdf_dma_addr_t hp_addr; 1619 1620 if (!wlan_ipa_is_vlan_enabled()) 1621 return; 1622 1623 dev_base_paddr = 1624 (unsigned long) 1625 ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa; 1626 1627 /* IPA REO_DEST Ring - HAL_SRNG_REO2SW3 */ 1628 hal_srng = (struct hal_srng *) 1629 soc->reo_dest_ring[IPA_ALT_REO_DEST_RING_IDX].hal_srng; 1630 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 1631 hal_srng_to_hal_ring_handle(hal_srng), 1632 &srng_params); 1633 1634 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_paddr = 1635 srng_params.ring_base_paddr; 1636 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_vaddr = 1637 srng_params.ring_base_vaddr; 1638 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_size = 1639 (srng_params.num_entries * srng_params.entry_size) << 2; 1640 addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) - 1641 (unsigned long)(hal_soc->dev_base_addr); 1642 soc->ipa_uc_rx_rsc_alt.ipa_reo_tp_paddr = 1643 (qdf_dma_addr_t)(addr_offset + dev_base_paddr); 1644 1645 dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", 1646 (unsigned int)addr_offset, 1647 (unsigned int)dev_base_paddr, 1648 (unsigned int)(soc->ipa_uc_rx_rsc_alt.ipa_reo_tp_paddr), 1649 (void *)soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_paddr, 1650 (void *)soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_vaddr, 1651 srng_params.num_entries, 1652 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_size); 1653 1654 hal_srng = (struct hal_srng *) 1655 pdev->rx_refill_buf_ring3.hal_srng; 1656 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 1657 hal_srng_to_hal_ring_handle(hal_srng), 1658 &srng_params); 1659 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_paddr = 1660 srng_params.ring_base_paddr; 1661 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_vaddr = 1662 srng_params.ring_base_vaddr; 1663 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_size = 1664 (srng_params.num_entries * srng_params.entry_size) << 2; 1665 hp_addr = hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc), 1666 hal_srng_to_hal_ring_handle(hal_srng)); 1667 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_hp_paddr = 1668 qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr); 1669 1670 dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", 1671 (unsigned int)(soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_hp_paddr), 1672 (void *)soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_paddr, 1673 (void *)soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_vaddr, 1674 srng_params.num_entries, 1675 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_size); 1676 } 1677 #else 1678 static inline 1679 void dp_ipa_rx_alt_ring_resource_setup(struct dp_soc *soc, struct dp_pdev *pdev) 1680 { } 1681 #endif 1682 int dp_ipa_ring_resource_setup(struct dp_soc *soc, 1683 struct dp_pdev *pdev) 1684 { 1685 struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc; 1686 struct hal_srng *hal_srng; 1687 struct hal_srng_params srng_params; 1688 qdf_dma_addr_t hp_addr; 1689 unsigned long addr_offset, dev_base_paddr; 1690 uint32_t ix0; 1691 uint8_t ix0_map[8]; 1692 1693 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 1694 return QDF_STATUS_SUCCESS; 1695 1696 /* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */ 1697 hal_srng = (struct hal_srng *) 1698 soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng; 1699 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 1700 hal_srng_to_hal_ring_handle(hal_srng), 1701 &srng_params); 1702 1703 soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr = 1704 srng_params.ring_base_paddr; 1705 soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr = 1706 srng_params.ring_base_vaddr; 1707 soc->ipa_uc_tx_rsc.ipa_tcl_ring_size = 1708 (srng_params.num_entries * srng_params.entry_size) << 2; 1709 /* 1710 * For the register backed memory addresses, use the scn->mem_pa to 1711 * calculate the physical address of the shadow registers 1712 */ 1713 dev_base_paddr = 1714 (unsigned long) 1715 ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa; 1716 addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) - 1717 (unsigned long)(hal_soc->dev_base_addr); 1718 soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr = 1719 (qdf_dma_addr_t)(addr_offset + dev_base_paddr); 1720 1721 dp_info("IPA TCL_DATA Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", 1722 (unsigned int)addr_offset, 1723 (unsigned int)dev_base_paddr, 1724 (unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr), 1725 (void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr, 1726 (void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr, 1727 srng_params.num_entries, 1728 soc->ipa_uc_tx_rsc.ipa_tcl_ring_size); 1729 1730 /* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */ 1731 hal_srng = (struct hal_srng *) 1732 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 1733 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 1734 hal_srng_to_hal_ring_handle(hal_srng), 1735 &srng_params); 1736 1737 soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr = 1738 srng_params.ring_base_paddr; 1739 soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr = 1740 srng_params.ring_base_vaddr; 1741 soc->ipa_uc_tx_rsc.ipa_wbm_ring_size = 1742 (srng_params.num_entries * srng_params.entry_size) << 2; 1743 soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr = 1744 hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc), 1745 hal_srng_to_hal_ring_handle(hal_srng)); 1746 addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) - 1747 (unsigned long)(hal_soc->dev_base_addr); 1748 soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr = 1749 (qdf_dma_addr_t)(addr_offset + dev_base_paddr); 1750 1751 dp_info("IPA TX COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)", 1752 (unsigned int)addr_offset, 1753 (unsigned int)dev_base_paddr, 1754 (unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr), 1755 (void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr, 1756 (void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr, 1757 srng_params.num_entries, 1758 soc->ipa_uc_tx_rsc.ipa_wbm_ring_size); 1759 1760 dp_ipa_tx_alt_ring_resource_setup(soc); 1761 1762 /* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */ 1763 hal_srng = (struct hal_srng *) 1764 soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng; 1765 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 1766 hal_srng_to_hal_ring_handle(hal_srng), 1767 &srng_params); 1768 1769 soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr = 1770 srng_params.ring_base_paddr; 1771 soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr = 1772 srng_params.ring_base_vaddr; 1773 soc->ipa_uc_rx_rsc.ipa_reo_ring_size = 1774 (srng_params.num_entries * srng_params.entry_size) << 2; 1775 addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) - 1776 (unsigned long)(hal_soc->dev_base_addr); 1777 soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr = 1778 (qdf_dma_addr_t)(addr_offset + dev_base_paddr); 1779 1780 dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", 1781 (unsigned int)addr_offset, 1782 (unsigned int)dev_base_paddr, 1783 (unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr), 1784 (void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr, 1785 (void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr, 1786 srng_params.num_entries, 1787 soc->ipa_uc_rx_rsc.ipa_reo_ring_size); 1788 1789 hal_srng = (struct hal_srng *) 1790 pdev->rx_refill_buf_ring2.hal_srng; 1791 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 1792 hal_srng_to_hal_ring_handle(hal_srng), 1793 &srng_params); 1794 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr = 1795 srng_params.ring_base_paddr; 1796 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr = 1797 srng_params.ring_base_vaddr; 1798 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size = 1799 (srng_params.num_entries * srng_params.entry_size) << 2; 1800 hp_addr = hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc), 1801 hal_srng_to_hal_ring_handle(hal_srng)); 1802 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr = 1803 qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr); 1804 1805 dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", 1806 (unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr), 1807 (void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr, 1808 (void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr, 1809 srng_params.num_entries, 1810 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size); 1811 1812 /* 1813 * Set DEST_RING_MAPPING_4 to SW2 as default value for 1814 * DESTINATION_RING_CTRL_IX_0. 1815 */ 1816 ix0_map[0] = REO_REMAP_SW1; 1817 ix0_map[1] = REO_REMAP_SW1; 1818 ix0_map[2] = REO_REMAP_SW2; 1819 ix0_map[3] = REO_REMAP_SW3; 1820 ix0_map[4] = REO_REMAP_SW2; 1821 ix0_map[5] = REO_REMAP_RELEASE; 1822 ix0_map[6] = REO_REMAP_FW; 1823 ix0_map[7] = REO_REMAP_FW; 1824 1825 dp_ipa_opt_dp_ixo_remap(ix0_map); 1826 ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0, 1827 ix0_map); 1828 1829 hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, NULL, NULL); 1830 1831 dp_ipa_rx_alt_ring_resource_setup(soc, pdev); 1832 return 0; 1833 } 1834 1835 #ifdef IPA_WDI3_VLAN_SUPPORT 1836 /** 1837 * dp_ipa_rx_alt_ring_get_resource() - get IPA 2nd RX ring resources 1838 * @pdev: data path pdev handle 1839 * 1840 * Return: Success if resourece is found 1841 */ 1842 static QDF_STATUS dp_ipa_rx_alt_ring_get_resource(struct dp_pdev *pdev) 1843 { 1844 struct dp_soc *soc = pdev->soc; 1845 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 1846 1847 if (!wlan_ipa_is_vlan_enabled()) 1848 return QDF_STATUS_SUCCESS; 1849 1850 dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_alt_rdy_ring, 1851 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_vaddr, 1852 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_paddr, 1853 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_size); 1854 1855 dp_ipa_get_shared_mem_info( 1856 soc->osdev, &ipa_res->rx_alt_refill_ring, 1857 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_vaddr, 1858 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_paddr, 1859 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_size); 1860 1861 if (!qdf_mem_get_dma_addr(soc->osdev, 1862 &ipa_res->rx_alt_rdy_ring.mem_info) || 1863 !qdf_mem_get_dma_addr(soc->osdev, 1864 &ipa_res->rx_alt_refill_ring.mem_info)) 1865 return QDF_STATUS_E_FAILURE; 1866 1867 return QDF_STATUS_SUCCESS; 1868 } 1869 #else 1870 static inline QDF_STATUS dp_ipa_rx_alt_ring_get_resource(struct dp_pdev *pdev) 1871 { 1872 return QDF_STATUS_SUCCESS; 1873 } 1874 #endif 1875 1876 QDF_STATUS dp_ipa_get_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 1877 { 1878 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1879 struct dp_pdev *pdev = 1880 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1881 struct dp_ipa_resources *ipa_res; 1882 1883 if (!pdev) { 1884 dp_err("Invalid instance"); 1885 return QDF_STATUS_E_FAILURE; 1886 } 1887 1888 ipa_res = &pdev->ipa_resource; 1889 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 1890 return QDF_STATUS_SUCCESS; 1891 1892 ipa_res->tx_num_alloc_buffer = 1893 (uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; 1894 1895 dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_ring, 1896 soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr, 1897 soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr, 1898 soc->ipa_uc_tx_rsc.ipa_tcl_ring_size); 1899 1900 dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_comp_ring, 1901 soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr, 1902 soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr, 1903 soc->ipa_uc_tx_rsc.ipa_wbm_ring_size); 1904 1905 dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_rdy_ring, 1906 soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr, 1907 soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr, 1908 soc->ipa_uc_rx_rsc.ipa_reo_ring_size); 1909 1910 dp_ipa_get_shared_mem_info( 1911 soc->osdev, &ipa_res->rx_refill_ring, 1912 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr, 1913 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr, 1914 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size); 1915 1916 if (!qdf_mem_get_dma_addr(soc->osdev, &ipa_res->tx_ring.mem_info) || 1917 !qdf_mem_get_dma_addr(soc->osdev, 1918 &ipa_res->tx_comp_ring.mem_info) || 1919 !qdf_mem_get_dma_addr(soc->osdev, &ipa_res->rx_rdy_ring.mem_info) || 1920 !qdf_mem_get_dma_addr(soc->osdev, 1921 &ipa_res->rx_refill_ring.mem_info)) 1922 return QDF_STATUS_E_FAILURE; 1923 1924 if (dp_ipa_tx_alt_ring_get_resource(pdev)) 1925 return QDF_STATUS_E_FAILURE; 1926 1927 if (dp_ipa_rx_alt_ring_get_resource(pdev)) 1928 return QDF_STATUS_E_FAILURE; 1929 1930 return QDF_STATUS_SUCCESS; 1931 } 1932 1933 #ifdef IPA_SET_RESET_TX_DB_PA 1934 #define DP_IPA_SET_TX_DB_PADDR(soc, ipa_res) 1935 #else 1936 #define DP_IPA_SET_TX_DB_PADDR(soc, ipa_res) \ 1937 dp_ipa_set_tx_doorbell_paddr(soc, ipa_res) 1938 #endif 1939 1940 #ifdef IPA_WDI3_VLAN_SUPPORT 1941 /** 1942 * dp_ipa_map_rx_alt_ring_doorbell_paddr() - Map 2nd rx ring doorbell paddr 1943 * @pdev: data path pdev handle 1944 * 1945 * Return: none 1946 */ 1947 static void dp_ipa_map_rx_alt_ring_doorbell_paddr(struct dp_pdev *pdev) 1948 { 1949 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 1950 uint32_t rx_ready_doorbell_dmaaddr; 1951 struct dp_soc *soc = pdev->soc; 1952 struct hal_srng *reo_srng = (struct hal_srng *) 1953 soc->reo_dest_ring[IPA_ALT_REO_DEST_RING_IDX].hal_srng; 1954 int ret = 0; 1955 1956 if (!wlan_ipa_is_vlan_enabled()) 1957 return; 1958 1959 if (qdf_mem_smmu_s1_enabled(soc->osdev)) { 1960 ret = pld_smmu_map(soc->osdev->dev, 1961 ipa_res->rx_alt_ready_doorbell_paddr, 1962 &rx_ready_doorbell_dmaaddr, 1963 sizeof(uint32_t)); 1964 ipa_res->rx_alt_ready_doorbell_paddr = 1965 rx_ready_doorbell_dmaaddr; 1966 qdf_assert_always(!ret); 1967 } 1968 1969 hal_srng_dst_set_hp_paddr_confirm(reo_srng, 1970 ipa_res->rx_alt_ready_doorbell_paddr); 1971 } 1972 1973 /** 1974 * dp_ipa_unmap_rx_alt_ring_doorbell_paddr() - Unmap 2nd rx ring doorbell paddr 1975 * @pdev: data path pdev handle 1976 * 1977 * Return: none 1978 */ 1979 static void dp_ipa_unmap_rx_alt_ring_doorbell_paddr(struct dp_pdev *pdev) 1980 { 1981 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 1982 struct dp_soc *soc = pdev->soc; 1983 int ret = 0; 1984 1985 if (!wlan_ipa_is_vlan_enabled()) 1986 return; 1987 1988 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) 1989 return; 1990 1991 ret = pld_smmu_unmap(soc->osdev->dev, 1992 ipa_res->rx_alt_ready_doorbell_paddr, 1993 sizeof(uint32_t)); 1994 qdf_assert_always(!ret); 1995 } 1996 #else 1997 static inline void dp_ipa_map_rx_alt_ring_doorbell_paddr(struct dp_pdev *pdev) 1998 { } 1999 2000 static inline void dp_ipa_unmap_rx_alt_ring_doorbell_paddr(struct dp_pdev *pdev) 2001 { } 2002 #endif 2003 2004 QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 2005 { 2006 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2007 struct dp_pdev *pdev = 2008 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2009 struct dp_ipa_resources *ipa_res; 2010 struct hal_srng *reo_srng = (struct hal_srng *) 2011 soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng; 2012 2013 if (!pdev) { 2014 dp_err("Invalid instance"); 2015 return QDF_STATUS_E_FAILURE; 2016 } 2017 2018 ipa_res = &pdev->ipa_resource; 2019 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 2020 return QDF_STATUS_SUCCESS; 2021 2022 dp_ipa_map_ring_doorbell_paddr(pdev); 2023 dp_ipa_map_rx_alt_ring_doorbell_paddr(pdev); 2024 2025 DP_IPA_SET_TX_DB_PADDR(soc, ipa_res); 2026 2027 /* 2028 * For RX, REO module on Napier/Hastings does reordering on incoming 2029 * Ethernet packets and writes one or more descriptors to REO2IPA Rx 2030 * ring.It then updates the ring’s Write/Head ptr and rings a doorbell 2031 * to IPA. 2032 * Set the doorbell addr for the REO ring. 2033 */ 2034 hal_srng_dst_set_hp_paddr_confirm(reo_srng, 2035 ipa_res->rx_ready_doorbell_paddr); 2036 return QDF_STATUS_SUCCESS; 2037 } 2038 2039 QDF_STATUS dp_ipa_iounmap_doorbell_vaddr(struct cdp_soc_t *soc_hdl, 2040 uint8_t pdev_id) 2041 { 2042 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2043 struct dp_pdev *pdev = 2044 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2045 struct dp_ipa_resources *ipa_res; 2046 2047 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 2048 return QDF_STATUS_SUCCESS; 2049 2050 if (!pdev) { 2051 dp_err("Invalid instance"); 2052 return QDF_STATUS_E_FAILURE; 2053 } 2054 2055 ipa_res = &pdev->ipa_resource; 2056 if (!ipa_res->is_db_ddr_mapped) 2057 iounmap(ipa_res->tx_comp_doorbell_vaddr); 2058 2059 return QDF_STATUS_SUCCESS; 2060 } 2061 2062 QDF_STATUS dp_ipa_op_response(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2063 uint8_t *op_msg) 2064 { 2065 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2066 struct dp_pdev *pdev = 2067 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2068 2069 if (!pdev) { 2070 dp_err("Invalid instance"); 2071 return QDF_STATUS_E_FAILURE; 2072 } 2073 2074 if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx)) 2075 return QDF_STATUS_SUCCESS; 2076 2077 if (pdev->ipa_uc_op_cb) { 2078 pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt); 2079 } else { 2080 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2081 "%s: IPA callback function is not registered", __func__); 2082 qdf_mem_free(op_msg); 2083 return QDF_STATUS_E_FAILURE; 2084 } 2085 2086 return QDF_STATUS_SUCCESS; 2087 } 2088 2089 QDF_STATUS dp_ipa_register_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2090 ipa_uc_op_cb_type op_cb, 2091 void *usr_ctxt) 2092 { 2093 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2094 struct dp_pdev *pdev = 2095 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2096 2097 if (!pdev) { 2098 dp_err("Invalid instance"); 2099 return QDF_STATUS_E_FAILURE; 2100 } 2101 2102 if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx)) 2103 return QDF_STATUS_SUCCESS; 2104 2105 pdev->ipa_uc_op_cb = op_cb; 2106 pdev->usr_ctxt = usr_ctxt; 2107 2108 return QDF_STATUS_SUCCESS; 2109 } 2110 2111 void dp_ipa_deregister_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 2112 { 2113 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2114 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2115 2116 if (!pdev) { 2117 dp_err("Invalid instance"); 2118 return; 2119 } 2120 2121 dp_debug("Deregister OP handler callback"); 2122 pdev->ipa_uc_op_cb = NULL; 2123 pdev->usr_ctxt = NULL; 2124 } 2125 2126 QDF_STATUS dp_ipa_get_stat(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 2127 { 2128 /* TBD */ 2129 return QDF_STATUS_SUCCESS; 2130 } 2131 2132 qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2133 qdf_nbuf_t skb) 2134 { 2135 qdf_nbuf_t ret; 2136 2137 /* Terminate the (single-element) list of tx frames */ 2138 qdf_nbuf_set_next(skb, NULL); 2139 ret = dp_tx_send(soc_hdl, vdev_id, skb); 2140 if (ret) { 2141 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2142 "%s: Failed to tx", __func__); 2143 return ret; 2144 } 2145 2146 return NULL; 2147 } 2148 2149 #ifdef QCA_IPA_LL_TX_FLOW_CONTROL 2150 /** 2151 * dp_ipa_is_target_ready() - check if target is ready or not 2152 * @soc: datapath soc handle 2153 * 2154 * Return: true if target is ready 2155 */ 2156 static inline 2157 bool dp_ipa_is_target_ready(struct dp_soc *soc) 2158 { 2159 if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET) 2160 return false; 2161 else 2162 return true; 2163 } 2164 2165 /** 2166 * dp_ipa_update_txr_db_status() - Indicate transfer ring DB is SMMU mapped or not 2167 * @dev: Pointer to device 2168 * @txrx_smmu: WDI TX/RX configuration 2169 * 2170 * Return: None 2171 */ 2172 static inline 2173 void dp_ipa_update_txr_db_status(struct device *dev, 2174 qdf_ipa_wdi_pipe_setup_info_smmu_t *txrx_smmu) 2175 { 2176 int pcie_slot = pld_get_pci_slot(dev); 2177 2178 if (pcie_slot) 2179 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(txrx_smmu) = false; 2180 else 2181 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(txrx_smmu) = true; 2182 } 2183 2184 /** 2185 * dp_ipa_update_evt_db_status() - Indicate evt ring DB is SMMU mapped or not 2186 * @dev: Pointer to device 2187 * @txrx_smmu: WDI TX/RX configuration 2188 * 2189 * Return: None 2190 */ 2191 static inline 2192 void dp_ipa_update_evt_db_status(struct device *dev, 2193 qdf_ipa_wdi_pipe_setup_info_smmu_t *txrx_smmu) 2194 { 2195 int pcie_slot = pld_get_pci_slot(dev); 2196 2197 if (pcie_slot) 2198 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(txrx_smmu) = false; 2199 else 2200 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(txrx_smmu) = true; 2201 } 2202 #else 2203 static inline 2204 bool dp_ipa_is_target_ready(struct dp_soc *soc) 2205 { 2206 return true; 2207 } 2208 2209 static inline 2210 void dp_ipa_update_txr_db_status(struct device *dev, 2211 qdf_ipa_wdi_pipe_setup_info_smmu_t *txrx_smmu) 2212 { 2213 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(txrx_smmu) = true; 2214 } 2215 2216 static inline 2217 void dp_ipa_update_evt_db_status(struct device *dev, 2218 qdf_ipa_wdi_pipe_setup_info_smmu_t *txrx_smmu) 2219 { 2220 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(txrx_smmu) = true; 2221 } 2222 #endif 2223 2224 QDF_STATUS dp_ipa_enable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 2225 { 2226 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2227 struct dp_pdev *pdev = 2228 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2229 uint32_t ix0; 2230 uint32_t ix2; 2231 uint8_t ix_map[8]; 2232 2233 if (!pdev) { 2234 dp_err("Invalid instance"); 2235 return QDF_STATUS_E_FAILURE; 2236 } 2237 2238 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 2239 return QDF_STATUS_SUCCESS; 2240 2241 if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle))) 2242 return QDF_STATUS_E_AGAIN; 2243 2244 if (!dp_ipa_is_target_ready(soc)) 2245 return QDF_STATUS_E_AGAIN; 2246 2247 /* Call HAL API to remap REO rings to REO2IPA ring */ 2248 ix_map[0] = REO_REMAP_SW1; 2249 ix_map[1] = REO_REMAP_SW4; 2250 ix_map[2] = REO_REMAP_SW1; 2251 if (wlan_ipa_is_vlan_enabled()) 2252 ix_map[3] = REO_REMAP_SW3; 2253 else 2254 ix_map[3] = REO_REMAP_SW4; 2255 ix_map[4] = REO_REMAP_SW4; 2256 ix_map[5] = REO_REMAP_RELEASE; 2257 ix_map[6] = REO_REMAP_FW; 2258 ix_map[7] = REO_REMAP_FW; 2259 2260 ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0, 2261 ix_map); 2262 2263 if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) { 2264 ix_map[0] = REO_REMAP_SW4; 2265 ix_map[1] = REO_REMAP_SW4; 2266 ix_map[2] = REO_REMAP_SW4; 2267 ix_map[3] = REO_REMAP_SW4; 2268 ix_map[4] = REO_REMAP_SW4; 2269 ix_map[5] = REO_REMAP_SW4; 2270 ix_map[6] = REO_REMAP_SW4; 2271 ix_map[7] = REO_REMAP_SW4; 2272 2273 ix2 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX2, 2274 ix_map); 2275 2276 hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, 2277 &ix2, &ix2); 2278 dp_ipa_reo_remap_history_add(ix0, ix2, ix2); 2279 } else { 2280 hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, 2281 NULL, NULL); 2282 dp_ipa_reo_remap_history_add(ix0, 0, 0); 2283 } 2284 2285 return QDF_STATUS_SUCCESS; 2286 } 2287 2288 QDF_STATUS dp_ipa_disable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 2289 { 2290 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2291 struct dp_pdev *pdev = 2292 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2293 uint8_t ix0_map[8]; 2294 uint32_t ix0; 2295 uint32_t ix1; 2296 uint32_t ix2; 2297 uint32_t ix3; 2298 2299 if (!pdev) { 2300 dp_err("Invalid instance"); 2301 return QDF_STATUS_E_FAILURE; 2302 } 2303 2304 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 2305 return QDF_STATUS_SUCCESS; 2306 2307 if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle))) 2308 return QDF_STATUS_E_AGAIN; 2309 2310 if (!dp_ipa_is_target_ready(soc)) 2311 return QDF_STATUS_E_AGAIN; 2312 2313 ix0_map[0] = REO_REMAP_SW1; 2314 ix0_map[1] = REO_REMAP_SW1; 2315 ix0_map[2] = REO_REMAP_SW2; 2316 ix0_map[3] = REO_REMAP_SW3; 2317 ix0_map[4] = REO_REMAP_SW2; 2318 ix0_map[5] = REO_REMAP_RELEASE; 2319 ix0_map[6] = REO_REMAP_FW; 2320 ix0_map[7] = REO_REMAP_FW; 2321 2322 /* Call HAL API to remap REO rings to REO2IPA ring */ 2323 ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0, 2324 ix0_map); 2325 2326 if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) { 2327 dp_reo_remap_config(soc, &ix1, &ix2, &ix3); 2328 2329 hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, 2330 &ix2, &ix3); 2331 dp_ipa_reo_remap_history_add(ix0, ix2, ix3); 2332 } else { 2333 hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, 2334 NULL, NULL); 2335 dp_ipa_reo_remap_history_add(ix0, 0, 0); 2336 } 2337 2338 return QDF_STATUS_SUCCESS; 2339 } 2340 2341 /* This should be configurable per H/W configuration enable status */ 2342 #define L3_HEADER_PADDING 2 2343 2344 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \ 2345 defined(CONFIG_IPA_WDI_UNIFIED_API) 2346 2347 #if !defined(QCA_LL_TX_FLOW_CONTROL_V2) && !defined(QCA_IPA_LL_TX_FLOW_CONTROL) 2348 static inline void dp_setup_mcc_sys_pipes( 2349 qdf_ipa_sys_connect_params_t *sys_in, 2350 qdf_ipa_wdi_conn_in_params_t *pipe_in) 2351 { 2352 int i = 0; 2353 /* Setup MCC sys pipe */ 2354 QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) = 2355 DP_IPA_MAX_IFACE; 2356 for (i = 0; i < DP_IPA_MAX_IFACE; i++) 2357 memcpy(&QDF_IPA_WDI_CONN_IN_PARAMS_SYS_IN(pipe_in)[i], 2358 &sys_in[i], sizeof(qdf_ipa_sys_connect_params_t)); 2359 } 2360 #else 2361 static inline void dp_setup_mcc_sys_pipes( 2362 qdf_ipa_sys_connect_params_t *sys_in, 2363 qdf_ipa_wdi_conn_in_params_t *pipe_in) 2364 { 2365 QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) = 0; 2366 } 2367 #endif 2368 2369 static void dp_ipa_wdi_tx_params(struct dp_soc *soc, 2370 struct dp_ipa_resources *ipa_res, 2371 qdf_ipa_wdi_pipe_setup_info_t *tx, 2372 bool over_gsi) 2373 { 2374 if (over_gsi) 2375 QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS; 2376 else 2377 QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS; 2378 2379 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) = 2380 qdf_mem_get_dma_addr(soc->osdev, 2381 &ipa_res->tx_comp_ring.mem_info); 2382 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) = 2383 qdf_mem_get_dma_size(soc->osdev, 2384 &ipa_res->tx_comp_ring.mem_info); 2385 2386 /* WBM Tail Pointer Address */ 2387 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) = 2388 soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr; 2389 QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true; 2390 2391 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) = 2392 qdf_mem_get_dma_addr(soc->osdev, 2393 &ipa_res->tx_ring.mem_info); 2394 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = 2395 qdf_mem_get_dma_size(soc->osdev, 2396 &ipa_res->tx_ring.mem_info); 2397 2398 /* TCL Head Pointer Address */ 2399 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) = 2400 soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr; 2401 QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true; 2402 2403 QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) = 2404 ipa_res->tx_num_alloc_buffer; 2405 2406 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0; 2407 2408 dp_ipa_setup_tx_params_bank_id(soc, tx); 2409 2410 /* Set Pmac ID, extract pmac_id from pdev_id 0 for TX ring */ 2411 dp_ipa_setup_tx_params_pmac_id(soc, tx); 2412 } 2413 2414 static void dp_ipa_wdi_rx_params(struct dp_soc *soc, 2415 struct dp_ipa_resources *ipa_res, 2416 qdf_ipa_wdi_pipe_setup_info_t *rx, 2417 bool over_gsi) 2418 { 2419 if (over_gsi) 2420 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = 2421 IPA_CLIENT_WLAN2_PROD; 2422 else 2423 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = 2424 IPA_CLIENT_WLAN1_PROD; 2425 2426 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) = 2427 qdf_mem_get_dma_addr(soc->osdev, 2428 &ipa_res->rx_rdy_ring.mem_info); 2429 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) = 2430 qdf_mem_get_dma_size(soc->osdev, 2431 &ipa_res->rx_rdy_ring.mem_info); 2432 2433 /* REO Tail Pointer Address */ 2434 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) = 2435 soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr; 2436 QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true; 2437 2438 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) = 2439 qdf_mem_get_dma_addr(soc->osdev, 2440 &ipa_res->rx_refill_ring.mem_info); 2441 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) = 2442 qdf_mem_get_dma_size(soc->osdev, 2443 &ipa_res->rx_refill_ring.mem_info); 2444 2445 /* FW Head Pointer Address */ 2446 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) = 2447 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr; 2448 QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false; 2449 2450 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = 2451 soc->rx_pkt_tlv_size + L3_HEADER_PADDING; 2452 } 2453 2454 static void 2455 dp_ipa_wdi_tx_smmu_params(struct dp_soc *soc, 2456 struct dp_ipa_resources *ipa_res, 2457 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu, 2458 bool over_gsi, 2459 qdf_ipa_wdi_hdl_t hdl) 2460 { 2461 if (over_gsi) { 2462 if (hdl == DP_IPA_HDL_FIRST) 2463 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = 2464 IPA_CLIENT_WLAN2_CONS; 2465 else if (hdl == DP_IPA_HDL_SECOND) 2466 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = 2467 IPA_CLIENT_WLAN4_CONS; 2468 else if (hdl == DP_IPA_HDL_THIRD) 2469 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = 2470 IPA_CLIENT_WLAN1_CONS; 2471 } else { 2472 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = 2473 IPA_CLIENT_WLAN1_CONS; 2474 } 2475 2476 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu), 2477 &ipa_res->tx_comp_ring.sgtable, 2478 sizeof(sgtable_t)); 2479 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) = 2480 qdf_mem_get_dma_size(soc->osdev, 2481 &ipa_res->tx_comp_ring.mem_info); 2482 /* WBM Tail Pointer Address */ 2483 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) = 2484 soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr; 2485 dp_ipa_update_txr_db_status(soc->osdev->dev, tx_smmu); 2486 2487 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu), 2488 &ipa_res->tx_ring.sgtable, 2489 sizeof(sgtable_t)); 2490 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) = 2491 qdf_mem_get_dma_size(soc->osdev, 2492 &ipa_res->tx_ring.mem_info); 2493 /* TCL Head Pointer Address */ 2494 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) = 2495 soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr; 2496 dp_ipa_update_evt_db_status(soc->osdev->dev, tx_smmu); 2497 2498 QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) = 2499 ipa_res->tx_num_alloc_buffer; 2500 QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0; 2501 2502 dp_ipa_setup_tx_smmu_params_bank_id(soc, tx_smmu); 2503 2504 /* Set Pmac ID, extract pmac_id from first pdev for TX ring */ 2505 dp_ipa_setup_tx_smmu_params_pmac_id(soc, tx_smmu); 2506 } 2507 2508 static void 2509 dp_ipa_wdi_rx_smmu_params(struct dp_soc *soc, 2510 struct dp_ipa_resources *ipa_res, 2511 qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu, 2512 bool over_gsi, 2513 qdf_ipa_wdi_hdl_t hdl) 2514 { 2515 if (over_gsi) { 2516 if (hdl == DP_IPA_HDL_FIRST) 2517 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = 2518 IPA_CLIENT_WLAN2_PROD; 2519 else if (hdl == DP_IPA_HDL_SECOND) 2520 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = 2521 IPA_CLIENT_WLAN3_PROD; 2522 else if (hdl == DP_IPA_HDL_THIRD) 2523 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = 2524 IPA_CLIENT_WLAN1_PROD; 2525 } else { 2526 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = 2527 IPA_CLIENT_WLAN1_PROD; 2528 } 2529 2530 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu), 2531 &ipa_res->rx_rdy_ring.sgtable, 2532 sizeof(sgtable_t)); 2533 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) = 2534 qdf_mem_get_dma_size(soc->osdev, 2535 &ipa_res->rx_rdy_ring.mem_info); 2536 /* REO Tail Pointer Address */ 2537 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) = 2538 soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr; 2539 dp_ipa_update_txr_db_status(soc->osdev->dev, rx_smmu); 2540 2541 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu), 2542 &ipa_res->rx_refill_ring.sgtable, 2543 sizeof(sgtable_t)); 2544 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) = 2545 qdf_mem_get_dma_size(soc->osdev, 2546 &ipa_res->rx_refill_ring.mem_info); 2547 2548 /* FW Head Pointer Address */ 2549 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) = 2550 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr; 2551 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false; 2552 2553 QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) = 2554 soc->rx_pkt_tlv_size + L3_HEADER_PADDING; 2555 } 2556 2557 #ifdef IPA_WDI3_VLAN_SUPPORT 2558 /** 2559 * dp_ipa_wdi_rx_alt_pipe_smmu_params() - Setup 2nd rx pipe smmu params 2560 * @soc: data path soc handle 2561 * @ipa_res: ipa resource pointer 2562 * @rx_smmu: smmu pipe info handle 2563 * @over_gsi: flag for IPA offload over gsi 2564 * @hdl: ipa registered handle 2565 * 2566 * Return: none 2567 */ 2568 static void 2569 dp_ipa_wdi_rx_alt_pipe_smmu_params(struct dp_soc *soc, 2570 struct dp_ipa_resources *ipa_res, 2571 qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu, 2572 bool over_gsi, 2573 qdf_ipa_wdi_hdl_t hdl) 2574 { 2575 if (!wlan_ipa_is_vlan_enabled()) 2576 return; 2577 2578 if (over_gsi) { 2579 if (hdl == DP_IPA_HDL_FIRST) 2580 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = 2581 IPA_CLIENT_WLAN2_PROD1; 2582 else if (hdl == DP_IPA_HDL_SECOND) 2583 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = 2584 IPA_CLIENT_WLAN3_PROD1; 2585 else if (hdl == DP_IPA_HDL_THIRD) 2586 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx_smmu) = 2587 IPA_CLIENT_WLAN1_PROD1; 2588 } else { 2589 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = 2590 IPA_CLIENT_WLAN1_PROD; 2591 } 2592 2593 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu), 2594 &ipa_res->rx_alt_rdy_ring.sgtable, 2595 sizeof(sgtable_t)); 2596 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) = 2597 qdf_mem_get_dma_size(soc->osdev, 2598 &ipa_res->rx_alt_rdy_ring.mem_info); 2599 /* REO Tail Pointer Address */ 2600 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) = 2601 soc->ipa_uc_rx_rsc_alt.ipa_reo_tp_paddr; 2602 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(rx_smmu) = true; 2603 2604 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu), 2605 &ipa_res->rx_alt_refill_ring.sgtable, 2606 sizeof(sgtable_t)); 2607 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) = 2608 qdf_mem_get_dma_size(soc->osdev, 2609 &ipa_res->rx_alt_refill_ring.mem_info); 2610 2611 /* FW Head Pointer Address */ 2612 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) = 2613 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_hp_paddr; 2614 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false; 2615 2616 QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) = 2617 soc->rx_pkt_tlv_size + L3_HEADER_PADDING; 2618 } 2619 2620 /** 2621 * dp_ipa_wdi_rx_alt_pipe_params() - Setup 2nd rx pipe params 2622 * @soc: data path soc handle 2623 * @ipa_res: ipa resource pointer 2624 * @rx: pipe info handle 2625 * @over_gsi: flag for IPA offload over gsi 2626 * @hdl: ipa registered handle 2627 * 2628 * Return: none 2629 */ 2630 static void dp_ipa_wdi_rx_alt_pipe_params(struct dp_soc *soc, 2631 struct dp_ipa_resources *ipa_res, 2632 qdf_ipa_wdi_pipe_setup_info_t *rx, 2633 bool over_gsi, 2634 qdf_ipa_wdi_hdl_t hdl) 2635 { 2636 if (!wlan_ipa_is_vlan_enabled()) 2637 return; 2638 2639 if (over_gsi) { 2640 if (hdl == DP_IPA_HDL_FIRST) 2641 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = 2642 IPA_CLIENT_WLAN2_PROD1; 2643 else if (hdl == DP_IPA_HDL_SECOND) 2644 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = 2645 IPA_CLIENT_WLAN3_PROD1; 2646 else if (hdl == DP_IPA_HDL_THIRD) 2647 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = 2648 IPA_CLIENT_WLAN1_PROD1; 2649 } else { 2650 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = 2651 IPA_CLIENT_WLAN1_PROD; 2652 } 2653 2654 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) = 2655 qdf_mem_get_dma_addr(soc->osdev, 2656 &ipa_res->rx_alt_rdy_ring.mem_info); 2657 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) = 2658 qdf_mem_get_dma_size(soc->osdev, 2659 &ipa_res->rx_alt_rdy_ring.mem_info); 2660 2661 /* REO Tail Pointer Address */ 2662 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) = 2663 soc->ipa_uc_rx_rsc_alt.ipa_reo_tp_paddr; 2664 QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true; 2665 2666 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) = 2667 qdf_mem_get_dma_addr(soc->osdev, 2668 &ipa_res->rx_alt_refill_ring.mem_info); 2669 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) = 2670 qdf_mem_get_dma_size(soc->osdev, 2671 &ipa_res->rx_alt_refill_ring.mem_info); 2672 2673 /* FW Head Pointer Address */ 2674 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) = 2675 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_hp_paddr; 2676 QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false; 2677 2678 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = 2679 soc->rx_pkt_tlv_size + L3_HEADER_PADDING; 2680 } 2681 2682 /** 2683 * dp_ipa_setup_rx_alt_pipe() - Setup 2nd rx pipe for IPA offload 2684 * @soc: data path soc handle 2685 * @res: ipa resource pointer 2686 * @in: pipe in handle 2687 * @over_gsi: flag for IPA offload over gsi 2688 * @hdl: ipa registered handle 2689 * 2690 * Return: none 2691 */ 2692 static void dp_ipa_setup_rx_alt_pipe(struct dp_soc *soc, 2693 struct dp_ipa_resources *res, 2694 qdf_ipa_wdi_conn_in_params_t *in, 2695 bool over_gsi, 2696 qdf_ipa_wdi_hdl_t hdl) 2697 { 2698 qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu = NULL; 2699 qdf_ipa_wdi_pipe_setup_info_t *rx = NULL; 2700 qdf_ipa_ep_cfg_t *rx_cfg; 2701 2702 if (!wlan_ipa_is_vlan_enabled()) 2703 return; 2704 2705 QDF_IPA_WDI_CONN_IN_PARAMS_IS_RX1_USED(in) = true; 2706 if (qdf_mem_smmu_s1_enabled(soc->osdev)) { 2707 rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_ALT_SMMU(in); 2708 rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu); 2709 dp_ipa_wdi_rx_alt_pipe_smmu_params(soc, res, rx_smmu, 2710 over_gsi, hdl); 2711 } else { 2712 rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_ALT(in); 2713 rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx); 2714 dp_ipa_wdi_rx_alt_pipe_params(soc, res, rx, over_gsi, hdl); 2715 } 2716 2717 QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT; 2718 /* Update with wds len(96) + 4 if wds support is enabled */ 2719 if (ucfg_ipa_is_wds_enabled()) 2720 QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN_AST_VLAN; 2721 else 2722 QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_TX_VLAN_HDR_LEN; 2723 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1; 2724 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0; 2725 QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0; 2726 QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0; 2727 QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1; 2728 QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC; 2729 QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true; 2730 } 2731 2732 /** 2733 * dp_ipa_set_rx_alt_pipe_db() - Setup 2nd rx pipe doorbell 2734 * @res: ipa resource pointer 2735 * @out: pipe out handle 2736 * 2737 * Return: none 2738 */ 2739 static void dp_ipa_set_rx_alt_pipe_db(struct dp_ipa_resources *res, 2740 qdf_ipa_wdi_conn_out_params_t *out) 2741 { 2742 if (!wlan_ipa_is_vlan_enabled()) 2743 return; 2744 2745 res->rx_alt_ready_doorbell_paddr = 2746 QDF_IPA_WDI_CONN_OUT_PARAMS_RX_ALT_UC_DB_PA(out); 2747 dp_debug("Setting DB 0x%x for RX alt pipe", 2748 res->rx_alt_ready_doorbell_paddr); 2749 } 2750 #else 2751 static inline 2752 void dp_ipa_setup_rx_alt_pipe(struct dp_soc *soc, 2753 struct dp_ipa_resources *res, 2754 qdf_ipa_wdi_conn_in_params_t *in, 2755 bool over_gsi, 2756 qdf_ipa_wdi_hdl_t hdl) 2757 { } 2758 2759 static inline 2760 void dp_ipa_set_rx_alt_pipe_db(struct dp_ipa_resources *res, 2761 qdf_ipa_wdi_conn_out_params_t *out) 2762 { } 2763 #endif 2764 2765 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2766 void *ipa_i2w_cb, void *ipa_w2i_cb, 2767 void *ipa_wdi_meter_notifier_cb, 2768 uint32_t ipa_desc_size, void *ipa_priv, 2769 bool is_rm_enabled, uint32_t *tx_pipe_handle, 2770 uint32_t *rx_pipe_handle, bool is_smmu_enabled, 2771 qdf_ipa_sys_connect_params_t *sys_in, bool over_gsi, 2772 qdf_ipa_wdi_hdl_t hdl, qdf_ipa_wdi_hdl_t id, 2773 void *ipa_ast_notify_cb) 2774 { 2775 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2776 struct dp_pdev *pdev = 2777 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2778 struct dp_ipa_resources *ipa_res; 2779 qdf_ipa_ep_cfg_t *tx_cfg; 2780 qdf_ipa_ep_cfg_t *rx_cfg; 2781 qdf_ipa_wdi_pipe_setup_info_t *tx = NULL; 2782 qdf_ipa_wdi_pipe_setup_info_t *rx = NULL; 2783 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu; 2784 qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu = NULL; 2785 qdf_ipa_wdi_conn_in_params_t *pipe_in = NULL; 2786 qdf_ipa_wdi_conn_out_params_t pipe_out; 2787 int ret; 2788 2789 if (!pdev) { 2790 dp_err("Invalid instance"); 2791 return QDF_STATUS_E_FAILURE; 2792 } 2793 2794 ipa_res = &pdev->ipa_resource; 2795 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 2796 return QDF_STATUS_SUCCESS; 2797 2798 pipe_in = qdf_mem_malloc(sizeof(*pipe_in)); 2799 if (!pipe_in) 2800 return QDF_STATUS_E_NOMEM; 2801 2802 qdf_mem_zero(&pipe_out, sizeof(pipe_out)); 2803 2804 if (is_smmu_enabled) 2805 QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) = true; 2806 else 2807 QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) = false; 2808 2809 dp_setup_mcc_sys_pipes(sys_in, pipe_in); 2810 2811 /* TX PIPE */ 2812 if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in)) { 2813 tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(pipe_in); 2814 tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu); 2815 } else { 2816 tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(pipe_in); 2817 tx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(tx); 2818 } 2819 2820 QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT; 2821 QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN; 2822 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0; 2823 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0; 2824 QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0; 2825 QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC; 2826 QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true; 2827 2828 /* 2829 * Transfer Ring: WBM Ring 2830 * Transfer Ring Doorbell PA: WBM Tail Pointer Address 2831 * Event Ring: TCL ring 2832 * Event Ring Doorbell PA: TCL Head Pointer Address 2833 */ 2834 if (is_smmu_enabled) 2835 dp_ipa_wdi_tx_smmu_params(soc, ipa_res, tx_smmu, over_gsi, id); 2836 else 2837 dp_ipa_wdi_tx_params(soc, ipa_res, tx, over_gsi); 2838 2839 dp_ipa_setup_tx_alt_pipe(soc, ipa_res, pipe_in); 2840 2841 /* RX PIPE */ 2842 if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in)) { 2843 rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(pipe_in); 2844 rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu); 2845 } else { 2846 rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(pipe_in); 2847 rx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(rx); 2848 } 2849 2850 QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT; 2851 if (ucfg_ipa_is_wds_enabled()) 2852 QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN_AST; 2853 else 2854 QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN; 2855 2856 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1; 2857 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0; 2858 QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0; 2859 QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0; 2860 QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1; 2861 QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC; 2862 QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true; 2863 2864 /* 2865 * Transfer Ring: REO Ring 2866 * Transfer Ring Doorbell PA: REO Tail Pointer Address 2867 * Event Ring: FW ring 2868 * Event Ring Doorbell PA: FW Head Pointer Address 2869 */ 2870 if (is_smmu_enabled) 2871 dp_ipa_wdi_rx_smmu_params(soc, ipa_res, rx_smmu, over_gsi, id); 2872 else 2873 dp_ipa_wdi_rx_params(soc, ipa_res, rx, over_gsi); 2874 2875 /* setup 2nd rx pipe */ 2876 dp_ipa_setup_rx_alt_pipe(soc, ipa_res, pipe_in, over_gsi, id); 2877 2878 QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(pipe_in) = ipa_w2i_cb; 2879 QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(pipe_in) = ipa_priv; 2880 QDF_IPA_WDI_CONN_IN_PARAMS_HANDLE(pipe_in) = hdl; 2881 dp_ipa_ast_notify_cb(pipe_in, ipa_ast_notify_cb); 2882 2883 /* Connect WDI IPA PIPEs */ 2884 ret = qdf_ipa_wdi_conn_pipes(pipe_in, &pipe_out); 2885 2886 if (ret) { 2887 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2888 "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d", 2889 __func__, ret); 2890 qdf_mem_free(pipe_in); 2891 return QDF_STATUS_E_FAILURE; 2892 } 2893 2894 /* IPA uC Doorbell registers */ 2895 dp_info("Tx DB PA=0x%x, Rx DB PA=0x%x", 2896 (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out), 2897 (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out)); 2898 2899 dp_ipa_set_pipe_db(ipa_res, &pipe_out); 2900 dp_ipa_set_rx_alt_pipe_db(ipa_res, &pipe_out); 2901 2902 ipa_res->is_db_ddr_mapped = 2903 QDF_IPA_WDI_CONN_OUT_PARAMS_IS_DB_DDR_MAPPED(&pipe_out); 2904 2905 soc->ipa_first_tx_db_access = true; 2906 qdf_mem_free(pipe_in); 2907 2908 qdf_spinlock_create(&soc->ipa_rx_buf_map_lock); 2909 soc->ipa_rx_buf_map_lock_initialized = true; 2910 2911 return QDF_STATUS_SUCCESS; 2912 } 2913 2914 #ifdef IPA_WDI3_VLAN_SUPPORT 2915 /** 2916 * dp_ipa_set_rx1_used() - Set rx1 used flag for 2nd rx offload ring 2917 * @in: pipe in handle 2918 * 2919 * Return: none 2920 */ 2921 static inline 2922 void dp_ipa_set_rx1_used(qdf_ipa_wdi_reg_intf_in_params_t *in) 2923 { 2924 QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_RX1_USED(in) = true; 2925 } 2926 2927 /** 2928 * dp_ipa_set_v4_vlan_hdr() - Set v4 vlan hdr 2929 * @in: pipe in handle 2930 * @hdr: pointer to hdr 2931 * 2932 * Return: none 2933 */ 2934 static inline 2935 void dp_ipa_set_v4_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t *in, 2936 qdf_ipa_wdi_hdr_info_t *hdr) 2937 { 2938 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(in)[IPA_IP_v4_VLAN]), 2939 hdr, sizeof(qdf_ipa_wdi_hdr_info_t)); 2940 } 2941 2942 /** 2943 * dp_ipa_set_v6_vlan_hdr() - Set v6 vlan hdr 2944 * @in: pipe in handle 2945 * @hdr: pointer to hdr 2946 * 2947 * Return: none 2948 */ 2949 static inline 2950 void dp_ipa_set_v6_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t *in, 2951 qdf_ipa_wdi_hdr_info_t *hdr) 2952 { 2953 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(in)[IPA_IP_v6_VLAN]), 2954 hdr, sizeof(qdf_ipa_wdi_hdr_info_t)); 2955 } 2956 #else 2957 static inline 2958 void dp_ipa_set_rx1_used(qdf_ipa_wdi_reg_intf_in_params_t *in) 2959 { } 2960 2961 static inline 2962 void dp_ipa_set_v4_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t *in, 2963 qdf_ipa_wdi_hdr_info_t *hdr) 2964 { } 2965 2966 static inline 2967 void dp_ipa_set_v6_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t *in, 2968 qdf_ipa_wdi_hdr_info_t *hdr) 2969 { } 2970 #endif 2971 2972 #ifdef IPA_WDS_EASYMESH_FEATURE 2973 /** 2974 * dp_ipa_set_wdi_hdr_type() - Set wdi hdr type for IPA 2975 * @hdr_info: Header info 2976 * 2977 * Return: None 2978 */ 2979 static inline void 2980 dp_ipa_set_wdi_hdr_type(qdf_ipa_wdi_hdr_info_t *hdr_info) 2981 { 2982 if (ucfg_ipa_is_wds_enabled()) 2983 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) = 2984 IPA_HDR_L2_ETHERNET_II_AST; 2985 else 2986 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) = 2987 IPA_HDR_L2_ETHERNET_II; 2988 } 2989 2990 /** 2991 * dp_ipa_setup_meta_data_mask() - Pass meta data mask to IPA 2992 * @in: ipa in params 2993 * 2994 * Pass meta data mask to IPA. 2995 * 2996 * Return: none 2997 */ 2998 static void dp_ipa_setup_meta_data_mask(qdf_ipa_wdi_reg_intf_in_params_t *in) 2999 { 3000 if (ucfg_ipa_is_wds_enabled()) 3001 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(in) = WLAN_IPA_AST_META_DATA_MASK; 3002 else 3003 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(in) = WLAN_IPA_META_DATA_MASK; 3004 } 3005 #else 3006 static inline void 3007 dp_ipa_set_wdi_hdr_type(qdf_ipa_wdi_hdr_info_t *hdr_info) 3008 { 3009 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) = IPA_HDR_L2_ETHERNET_II; 3010 } 3011 3012 static void dp_ipa_setup_meta_data_mask(qdf_ipa_wdi_reg_intf_in_params_t *in) 3013 { 3014 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(in) = WLAN_IPA_META_DATA_MASK; 3015 } 3016 #endif 3017 3018 #ifdef IPA_WDI3_VLAN_SUPPORT 3019 /** 3020 * dp_ipa_set_wdi_vlan_hdr_type() - Set wdi vlan hdr type for IPA 3021 * @hdr_info: Header info 3022 * 3023 * Return: None 3024 */ 3025 static inline void 3026 dp_ipa_set_wdi_vlan_hdr_type(qdf_ipa_wdi_hdr_info_t *hdr_info) 3027 { 3028 if (ucfg_ipa_is_wds_enabled()) 3029 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) = 3030 IPA_HDR_L2_802_1Q_AST; 3031 else 3032 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) = 3033 IPA_HDR_L2_802_1Q; 3034 } 3035 #else 3036 static inline void 3037 dp_ipa_set_wdi_vlan_hdr_type(qdf_ipa_wdi_hdr_info_t *hdr_info) 3038 { } 3039 #endif 3040 3041 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr, 3042 qdf_ipa_client_type_t prod_client, 3043 qdf_ipa_client_type_t cons_client, 3044 uint8_t session_id, bool is_ipv6_enabled, 3045 qdf_ipa_wdi_hdl_t hdl) 3046 { 3047 qdf_ipa_wdi_reg_intf_in_params_t in; 3048 qdf_ipa_wdi_hdr_info_t hdr_info; 3049 struct dp_ipa_uc_tx_hdr uc_tx_hdr; 3050 struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6; 3051 struct dp_ipa_uc_tx_vlan_hdr uc_tx_vlan_hdr; 3052 struct dp_ipa_uc_tx_vlan_hdr uc_tx_vlan_hdr_v6; 3053 int ret = -EINVAL; 3054 3055 qdf_mem_zero(&in, sizeof(qdf_ipa_wdi_reg_intf_in_params_t)); 3056 3057 /* Need to reset the values to 0 as all the fields are not 3058 * updated in the Header, Unused fields will be set to 0. 3059 */ 3060 qdf_mem_zero(&uc_tx_vlan_hdr, sizeof(struct dp_ipa_uc_tx_vlan_hdr)); 3061 qdf_mem_zero(&uc_tx_vlan_hdr_v6, sizeof(struct dp_ipa_uc_tx_vlan_hdr)); 3062 3063 dp_debug("Add Partial hdr: %s, "QDF_MAC_ADDR_FMT, ifname, 3064 QDF_MAC_ADDR_REF(mac_addr)); 3065 qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 3066 qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr); 3067 3068 /* IPV4 header */ 3069 uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP); 3070 3071 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr; 3072 QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN; 3073 dp_ipa_set_wdi_hdr_type(&hdr_info); 3074 3075 QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) = 3076 DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET; 3077 3078 QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname; 3079 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]), 3080 &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 3081 QDF_IPA_WDI_REG_INTF_IN_PARAMS_ALT_DST_PIPE(&in) = cons_client; 3082 QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1; 3083 dp_ipa_setup_meta_data_mask(&in); 3084 QDF_IPA_WDI_REG_INTF_IN_PARAMS_HANDLE(&in) = hdl; 3085 dp_ipa_setup_iface_session_id(&in, session_id); 3086 dp_debug("registering for session_id: %u", session_id); 3087 3088 /* IPV6 header */ 3089 if (is_ipv6_enabled) { 3090 qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr, 3091 DP_IPA_UC_WLAN_TX_HDR_LEN); 3092 uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6); 3093 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6; 3094 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]), 3095 &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 3096 } 3097 3098 if (wlan_ipa_is_vlan_enabled()) { 3099 /* Add vlan specific headers if vlan supporti is enabled */ 3100 qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 3101 dp_ipa_set_rx1_used(&in); 3102 qdf_ether_addr_copy(uc_tx_vlan_hdr.eth.h_source, mac_addr); 3103 /* IPV4 Vlan header */ 3104 uc_tx_vlan_hdr.eth.h_vlan_proto = qdf_htons(ETH_P_8021Q); 3105 uc_tx_vlan_hdr.eth.h_vlan_encapsulated_proto = qdf_htons(ETH_P_IP); 3106 3107 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = 3108 (uint8_t *)&uc_tx_vlan_hdr; 3109 QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = 3110 DP_IPA_UC_WLAN_TX_VLAN_HDR_LEN; 3111 dp_ipa_set_wdi_vlan_hdr_type(&hdr_info); 3112 3113 QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) = 3114 DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET; 3115 3116 dp_ipa_set_v4_vlan_hdr(&in, &hdr_info); 3117 3118 /* IPV6 Vlan header */ 3119 if (is_ipv6_enabled) { 3120 qdf_mem_copy(&uc_tx_vlan_hdr_v6, &uc_tx_vlan_hdr, 3121 DP_IPA_UC_WLAN_TX_VLAN_HDR_LEN); 3122 uc_tx_vlan_hdr_v6.eth.h_vlan_proto = 3123 qdf_htons(ETH_P_8021Q); 3124 uc_tx_vlan_hdr_v6.eth.h_vlan_encapsulated_proto = 3125 qdf_htons(ETH_P_IPV6); 3126 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = 3127 (uint8_t *)&uc_tx_vlan_hdr_v6; 3128 dp_ipa_set_v6_vlan_hdr(&in, &hdr_info); 3129 } 3130 } 3131 3132 ret = qdf_ipa_wdi_reg_intf(&in); 3133 if (ret) { 3134 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3135 "%s: ipa_wdi_reg_intf: register IPA interface failed: ret=%d", 3136 __func__, ret); 3137 return QDF_STATUS_E_FAILURE; 3138 } 3139 3140 return QDF_STATUS_SUCCESS; 3141 } 3142 3143 #else /* !CONFIG_IPA_WDI_UNIFIED_API */ 3144 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3145 void *ipa_i2w_cb, void *ipa_w2i_cb, 3146 void *ipa_wdi_meter_notifier_cb, 3147 uint32_t ipa_desc_size, void *ipa_priv, 3148 bool is_rm_enabled, uint32_t *tx_pipe_handle, 3149 uint32_t *rx_pipe_handle) 3150 { 3151 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3152 struct dp_pdev *pdev = 3153 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 3154 struct dp_ipa_resources *ipa_res; 3155 qdf_ipa_wdi_pipe_setup_info_t *tx; 3156 qdf_ipa_wdi_pipe_setup_info_t *rx; 3157 qdf_ipa_wdi_conn_in_params_t pipe_in; 3158 qdf_ipa_wdi_conn_out_params_t pipe_out; 3159 struct tcl_data_cmd *tcl_desc_ptr; 3160 uint8_t *desc_addr; 3161 uint32_t desc_size; 3162 int ret; 3163 3164 if (!pdev) { 3165 dp_err("Invalid instance"); 3166 return QDF_STATUS_E_FAILURE; 3167 } 3168 3169 ipa_res = &pdev->ipa_resource; 3170 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 3171 return QDF_STATUS_SUCCESS; 3172 3173 qdf_mem_zero(&tx, sizeof(qdf_ipa_wdi_pipe_setup_info_t)); 3174 qdf_mem_zero(&rx, sizeof(qdf_ipa_wdi_pipe_setup_info_t)); 3175 qdf_mem_zero(&pipe_in, sizeof(pipe_in)); 3176 qdf_mem_zero(&pipe_out, sizeof(pipe_out)); 3177 3178 /* TX PIPE */ 3179 /* 3180 * Transfer Ring: WBM Ring 3181 * Transfer Ring Doorbell PA: WBM Tail Pointer Address 3182 * Event Ring: TCL ring 3183 * Event Ring Doorbell PA: TCL Head Pointer Address 3184 */ 3185 tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in); 3186 QDF_IPA_WDI_SETUP_INFO_NAT_EN(tx) = IPA_BYPASS_NAT; 3187 QDF_IPA_WDI_SETUP_INFO_HDR_LEN(tx) = DP_IPA_UC_WLAN_TX_HDR_LEN; 3188 QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(tx) = 0; 3189 QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(tx) = 0; 3190 QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(tx) = 0; 3191 QDF_IPA_WDI_SETUP_INFO_MODE(tx) = IPA_BASIC; 3192 QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(tx) = true; 3193 QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS; 3194 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) = 3195 ipa_res->tx_comp_ring_base_paddr; 3196 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) = 3197 ipa_res->tx_comp_ring_size; 3198 /* WBM Tail Pointer Address */ 3199 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) = 3200 soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr; 3201 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) = 3202 ipa_res->tx_ring_base_paddr; 3203 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = ipa_res->tx_ring_size; 3204 /* TCL Head Pointer Address */ 3205 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) = 3206 soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr; 3207 QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) = 3208 ipa_res->tx_num_alloc_buffer; 3209 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0; 3210 3211 /* Preprogram TCL descriptor */ 3212 desc_addr = 3213 (uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx); 3214 desc_size = sizeof(struct tcl_data_cmd); 3215 HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size); 3216 tcl_desc_ptr = (struct tcl_data_cmd *) 3217 (QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1); 3218 tcl_desc_ptr->buf_addr_info.return_buffer_manager = 3219 HAL_RX_BUF_RBM_SW2_BM; 3220 tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */ 3221 tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET; 3222 tcl_desc_ptr->packet_offset = 2; /* padding for alignment */ 3223 3224 /* RX PIPE */ 3225 /* 3226 * Transfer Ring: REO Ring 3227 * Transfer Ring Doorbell PA: REO Tail Pointer Address 3228 * Event Ring: FW ring 3229 * Event Ring Doorbell PA: FW Head Pointer Address 3230 */ 3231 rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in); 3232 QDF_IPA_WDI_SETUP_INFO_NAT_EN(rx) = IPA_BYPASS_NAT; 3233 QDF_IPA_WDI_SETUP_INFO_HDR_LEN(rx) = DP_IPA_UC_WLAN_RX_HDR_LEN; 3234 QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(rx) = 0; 3235 QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(rx) = 0; 3236 QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(rx) = 0; 3237 QDF_IPA_WDI_SETUP_INFO_HDR_OFST_METADATA_VALID(rx) = 0; 3238 QDF_IPA_WDI_SETUP_INFO_HDR_METADATA_REG_VALID(rx) = 1; 3239 QDF_IPA_WDI_SETUP_INFO_MODE(rx) = IPA_BASIC; 3240 QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(rx) = true; 3241 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD; 3242 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) = 3243 ipa_res->rx_rdy_ring_base_paddr; 3244 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) = 3245 ipa_res->rx_rdy_ring_size; 3246 /* REO Tail Pointer Address */ 3247 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) = 3248 soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr; 3249 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) = 3250 ipa_res->rx_refill_ring_base_paddr; 3251 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) = 3252 ipa_res->rx_refill_ring_size; 3253 /* FW Head Pointer Address */ 3254 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) = 3255 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr; 3256 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = soc->rx_pkt_tlv_size + 3257 L3_HEADER_PADDING; 3258 QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb; 3259 QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv; 3260 3261 /* Connect WDI IPA PIPE */ 3262 ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out); 3263 if (ret) { 3264 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3265 "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d", 3266 __func__, ret); 3267 return QDF_STATUS_E_FAILURE; 3268 } 3269 3270 /* IPA uC Doorbell registers */ 3271 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 3272 "%s: Tx DB PA=0x%x, Rx DB PA=0x%x", 3273 __func__, 3274 (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out), 3275 (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out)); 3276 3277 ipa_res->tx_comp_doorbell_paddr = 3278 QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out); 3279 ipa_res->tx_comp_doorbell_vaddr = 3280 QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_VA(&pipe_out); 3281 ipa_res->rx_ready_doorbell_paddr = 3282 QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out); 3283 3284 soc->ipa_first_tx_db_access = true; 3285 3286 qdf_spinlock_create(&soc->ipa_rx_buf_map_lock); 3287 soc->ipa_rx_buf_map_lock_initialized = true; 3288 3289 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 3290 "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK", 3291 __func__, 3292 "transfer_ring_base_pa", 3293 (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx), 3294 "transfer_ring_size", 3295 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx), 3296 "transfer_ring_doorbell_pa", 3297 (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx), 3298 "event_ring_base_pa", 3299 (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx), 3300 "event_ring_size", 3301 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx), 3302 "event_ring_doorbell_pa", 3303 (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx), 3304 "num_pkt_buffers", 3305 QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx), 3306 "tx_comp_doorbell_paddr", 3307 (void *)ipa_res->tx_comp_doorbell_paddr); 3308 3309 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 3310 "%s: Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK", 3311 __func__, 3312 "transfer_ring_base_pa", 3313 (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx), 3314 "transfer_ring_size", 3315 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx), 3316 "transfer_ring_doorbell_pa", 3317 (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx), 3318 "event_ring_base_pa", 3319 (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx), 3320 "event_ring_size", 3321 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx), 3322 "event_ring_doorbell_pa", 3323 (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx), 3324 "num_pkt_buffers", 3325 QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(rx), 3326 "tx_comp_doorbell_paddr", 3327 (void *)ipa_res->rx_ready_doorbell_paddr); 3328 3329 return QDF_STATUS_SUCCESS; 3330 } 3331 3332 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr, 3333 qdf_ipa_client_type_t prod_client, 3334 qdf_ipa_client_type_t cons_client, 3335 uint8_t session_id, bool is_ipv6_enabled, 3336 qdf_ipa_wdi_hdl_t hdl) 3337 { 3338 qdf_ipa_wdi_reg_intf_in_params_t in; 3339 qdf_ipa_wdi_hdr_info_t hdr_info; 3340 struct dp_ipa_uc_tx_hdr uc_tx_hdr; 3341 struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6; 3342 int ret = -EINVAL; 3343 3344 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 3345 "%s: Add Partial hdr: %s, "QDF_MAC_ADDR_FMT, 3346 __func__, ifname, QDF_MAC_ADDR_REF(mac_addr)); 3347 3348 qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 3349 qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr); 3350 3351 /* IPV4 header */ 3352 uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP); 3353 3354 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr; 3355 QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN; 3356 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II; 3357 QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) = 3358 DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET; 3359 3360 QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname; 3361 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]), 3362 &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 3363 QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1; 3364 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) = 3365 htonl(session_id << 16); 3366 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000); 3367 3368 /* IPV6 header */ 3369 if (is_ipv6_enabled) { 3370 qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr, 3371 DP_IPA_UC_WLAN_TX_HDR_LEN); 3372 uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6); 3373 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6; 3374 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]), 3375 &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 3376 } 3377 3378 ret = qdf_ipa_wdi_reg_intf(&in); 3379 if (ret) { 3380 dp_err("ipa_wdi_reg_intf: register IPA interface failed: ret=%d", 3381 ret); 3382 return QDF_STATUS_E_FAILURE; 3383 } 3384 3385 return QDF_STATUS_SUCCESS; 3386 } 3387 3388 #endif /* CONFIG_IPA_WDI_UNIFIED_API */ 3389 3390 QDF_STATUS dp_ipa_cleanup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3391 uint32_t tx_pipe_handle, uint32_t rx_pipe_handle, 3392 qdf_ipa_wdi_hdl_t hdl) 3393 { 3394 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3395 QDF_STATUS status = QDF_STATUS_SUCCESS; 3396 struct dp_pdev *pdev; 3397 int ret; 3398 3399 ret = qdf_ipa_wdi_disconn_pipes(hdl); 3400 if (ret) { 3401 dp_err("ipa_wdi_disconn_pipes: IPA pipe cleanup failed: ret=%d", 3402 ret); 3403 status = QDF_STATUS_E_FAILURE; 3404 } 3405 3406 if (soc->ipa_rx_buf_map_lock_initialized) { 3407 qdf_spinlock_destroy(&soc->ipa_rx_buf_map_lock); 3408 soc->ipa_rx_buf_map_lock_initialized = false; 3409 } 3410 3411 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 3412 if (qdf_unlikely(!pdev)) { 3413 dp_err_rl("Invalid pdev for pdev_id %d", pdev_id); 3414 status = QDF_STATUS_E_FAILURE; 3415 goto exit; 3416 } 3417 3418 dp_ipa_unmap_ring_doorbell_paddr(pdev); 3419 dp_ipa_unmap_rx_alt_ring_doorbell_paddr(pdev); 3420 exit: 3421 return status; 3422 } 3423 3424 QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled, 3425 qdf_ipa_wdi_hdl_t hdl) 3426 { 3427 int ret; 3428 3429 ret = qdf_ipa_wdi_dereg_intf(ifname, hdl); 3430 if (ret) { 3431 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3432 "%s: ipa_wdi_dereg_intf: IPA pipe deregistration failed: ret=%d", 3433 __func__, ret); 3434 return QDF_STATUS_E_FAILURE; 3435 } 3436 3437 return QDF_STATUS_SUCCESS; 3438 } 3439 3440 #ifdef IPA_SET_RESET_TX_DB_PA 3441 #define DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res) \ 3442 dp_ipa_set_tx_doorbell_paddr((soc), (ipa_res)) 3443 #define DP_IPA_RESET_TX_DB_PA(soc, ipa_res) \ 3444 dp_ipa_reset_tx_doorbell_pa((soc), (ipa_res)) 3445 #else 3446 #define DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res) 3447 #define DP_IPA_RESET_TX_DB_PA(soc, ipa_res) 3448 #endif 3449 3450 QDF_STATUS dp_ipa_enable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3451 qdf_ipa_wdi_hdl_t hdl) 3452 { 3453 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3454 struct dp_pdev *pdev = 3455 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 3456 struct dp_ipa_resources *ipa_res; 3457 QDF_STATUS result; 3458 3459 if (!pdev) { 3460 dp_err("Invalid instance"); 3461 return QDF_STATUS_E_FAILURE; 3462 } 3463 3464 ipa_res = &pdev->ipa_resource; 3465 3466 qdf_atomic_set(&soc->ipa_pipes_enabled, 1); 3467 DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res); 3468 3469 if (!ipa_config_is_opt_wifi_dp_enabled()) 3470 dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, true, 3471 __func__, __LINE__); 3472 3473 result = qdf_ipa_wdi_enable_pipes(hdl); 3474 if (result) { 3475 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3476 "%s: Enable WDI PIPE fail, code %d", 3477 __func__, result); 3478 qdf_atomic_set(&soc->ipa_pipes_enabled, 0); 3479 DP_IPA_RESET_TX_DB_PA(soc, ipa_res); 3480 if (qdf_atomic_read(&soc->ipa_mapped)) 3481 dp_ipa_handle_rx_buf_pool_smmu_mapping( 3482 soc, pdev, false, __func__, __LINE__); 3483 return QDF_STATUS_E_FAILURE; 3484 } 3485 3486 if (soc->ipa_first_tx_db_access) { 3487 dp_ipa_tx_comp_ring_init_hp(soc, ipa_res); 3488 soc->ipa_first_tx_db_access = false; 3489 } 3490 3491 return QDF_STATUS_SUCCESS; 3492 } 3493 3494 QDF_STATUS dp_ipa_disable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3495 qdf_ipa_wdi_hdl_t hdl) 3496 { 3497 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3498 struct dp_pdev *pdev = 3499 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 3500 QDF_STATUS result; 3501 struct dp_ipa_resources *ipa_res; 3502 3503 if (!pdev) { 3504 dp_err("Invalid instance"); 3505 return QDF_STATUS_E_FAILURE; 3506 } 3507 3508 ipa_res = &pdev->ipa_resource; 3509 3510 qdf_sleep(TX_COMP_DRAIN_WAIT_TIMEOUT_MS); 3511 /* 3512 * Reset the tx completion doorbell address before invoking IPA disable 3513 * pipes API to ensure that there is no access to IPA tx doorbell 3514 * address post disable pipes. 3515 */ 3516 DP_IPA_RESET_TX_DB_PA(soc, ipa_res); 3517 3518 result = qdf_ipa_wdi_disable_pipes(hdl); 3519 if (result) { 3520 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3521 "%s: Disable WDI PIPE fail, code %d", 3522 __func__, result); 3523 qdf_assert_always(0); 3524 return QDF_STATUS_E_FAILURE; 3525 } 3526 3527 qdf_atomic_set(&soc->ipa_pipes_enabled, 0); 3528 3529 if (qdf_atomic_read(&soc->ipa_mapped)) 3530 dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false, 3531 __func__, __LINE__); 3532 3533 return result ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS; 3534 } 3535 3536 QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps, 3537 qdf_ipa_wdi_hdl_t hdl) 3538 { 3539 qdf_ipa_wdi_perf_profile_t profile; 3540 QDF_STATUS result; 3541 3542 profile.client = client; 3543 profile.max_supported_bw_mbps = max_supported_bw_mbps; 3544 3545 result = qdf_ipa_wdi_set_perf_profile(hdl, &profile); 3546 if (result) { 3547 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3548 "%s: ipa_wdi_set_perf_profile fail, code %d", 3549 __func__, result); 3550 return QDF_STATUS_E_FAILURE; 3551 } 3552 3553 return QDF_STATUS_SUCCESS; 3554 } 3555 3556 /** 3557 * dp_ipa_intrabss_send() - send IPA RX intra-bss frames 3558 * @pdev: pdev 3559 * @vdev: vdev 3560 * @nbuf: skb 3561 * 3562 * Return: nbuf if TX fails and NULL if TX succeeds 3563 */ 3564 static qdf_nbuf_t dp_ipa_intrabss_send(struct dp_pdev *pdev, 3565 struct dp_vdev *vdev, 3566 qdf_nbuf_t nbuf) 3567 { 3568 struct dp_peer *vdev_peer; 3569 uint16_t len; 3570 3571 vdev_peer = dp_vdev_bss_peer_ref_n_get(pdev->soc, vdev, DP_MOD_ID_IPA); 3572 if (qdf_unlikely(!vdev_peer)) 3573 return nbuf; 3574 3575 if (qdf_unlikely(!vdev_peer->txrx_peer)) { 3576 dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA); 3577 return nbuf; 3578 } 3579 3580 qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb)); 3581 len = qdf_nbuf_len(nbuf); 3582 3583 if (dp_tx_send((struct cdp_soc_t *)pdev->soc, vdev->vdev_id, nbuf)) { 3584 DP_PEER_PER_PKT_STATS_INC_PKT(vdev_peer->txrx_peer, 3585 rx.intra_bss.fail, 1, len, 3586 0); 3587 dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA); 3588 return nbuf; 3589 } 3590 3591 DP_PEER_PER_PKT_STATS_INC_PKT(vdev_peer->txrx_peer, 3592 rx.intra_bss.pkts, 1, len, 0); 3593 dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA); 3594 return NULL; 3595 } 3596 3597 #ifdef IPA_OPT_WIFI_DP 3598 /** 3599 * dp_ipa_rx_super_rule_setup()- pass cce super rule params to fw from ipa 3600 * 3601 * @soc_hdl: cdp soc 3602 * @flt_params: filter tuple 3603 * 3604 * Return: QDF_STATUS 3605 */ 3606 QDF_STATUS dp_ipa_rx_super_rule_setup(struct cdp_soc_t *soc_hdl, 3607 void *flt_params) 3608 { 3609 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3610 3611 return htt_h2t_rx_cce_super_rule_setup(soc->htt_handle, flt_params); 3612 } 3613 3614 /** 3615 * dp_ipa_wdi_opt_dpath_notify_flt_add_rem_cb()- send cce super rule filter 3616 * add/remove result to ipa 3617 * 3618 * @flt0_rslt : result for filter0 add/remove 3619 * @flt1_rslt : result for filter1 add/remove 3620 * 3621 * Return: void 3622 */ 3623 void dp_ipa_wdi_opt_dpath_notify_flt_add_rem_cb(int flt0_rslt, int flt1_rslt) 3624 { 3625 wlan_ipa_wdi_opt_dpath_notify_flt_add_rem_cb(flt0_rslt, flt1_rslt); 3626 } 3627 3628 int dp_ipa_pcie_link_up(struct cdp_soc_t *soc_hdl) 3629 { 3630 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3631 struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc; 3632 int response = 0; 3633 3634 response = hif_prevent_l1((hal_soc->hif_handle)); 3635 return response; 3636 } 3637 3638 void dp_ipa_pcie_link_down(struct cdp_soc_t *soc_hdl) 3639 { 3640 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3641 struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc; 3642 3643 hif_allow_l1(hal_soc->hif_handle); 3644 } 3645 3646 /** 3647 * dp_ipa_wdi_opt_dpath_notify_flt_rlsd()- send cce super rule release 3648 * notification to ipa 3649 * 3650 * @flt0_rslt : result for filter0 release 3651 * @flt1_rslt : result for filter1 release 3652 * 3653 *Return: void 3654 */ 3655 void dp_ipa_wdi_opt_dpath_notify_flt_rlsd(int flt0_rslt, int flt1_rslt) 3656 { 3657 wlan_ipa_wdi_opt_dpath_notify_flt_rlsd(flt0_rslt, flt1_rslt); 3658 } 3659 3660 /** 3661 * dp_ipa_wdi_opt_dpath_notify_flt_rsvd()- send cce super rule reserve 3662 * notification to ipa 3663 * 3664 *@is_success : result of filter reservatiom 3665 * 3666 *Return: void 3667 */ 3668 void dp_ipa_wdi_opt_dpath_notify_flt_rsvd(bool is_success) 3669 { 3670 wlan_ipa_wdi_opt_dpath_notify_flt_rsvd(is_success); 3671 } 3672 #endif 3673 3674 #ifdef IPA_WDS_EASYMESH_FEATURE 3675 /** 3676 * dp_ipa_peer_check() - Check for peer for given mac 3677 * @soc: dp soc object 3678 * @peer_mac_addr: peer mac address 3679 * @vdev_id: vdev id 3680 * 3681 * Return: true if peer is found, else false 3682 */ 3683 static inline bool dp_ipa_peer_check(struct dp_soc *soc, 3684 uint8_t *peer_mac_addr, uint8_t vdev_id) 3685 { 3686 struct dp_ast_entry *ast_entry = NULL; 3687 struct dp_peer *peer = NULL; 3688 3689 qdf_spin_lock_bh(&soc->ast_lock); 3690 ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr); 3691 3692 if ((!ast_entry) || 3693 (ast_entry->delete_in_progress && !ast_entry->callback)) { 3694 qdf_spin_unlock_bh(&soc->ast_lock); 3695 return false; 3696 } 3697 3698 peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id, 3699 DP_MOD_ID_IPA); 3700 3701 if (!peer) { 3702 qdf_spin_unlock_bh(&soc->ast_lock); 3703 return false; 3704 } else { 3705 if (peer->vdev->vdev_id == vdev_id) { 3706 dp_peer_unref_delete(peer, DP_MOD_ID_IPA); 3707 qdf_spin_unlock_bh(&soc->ast_lock); 3708 return true; 3709 } 3710 dp_peer_unref_delete(peer, DP_MOD_ID_IPA); 3711 qdf_spin_unlock_bh(&soc->ast_lock); 3712 return false; 3713 } 3714 } 3715 #else 3716 static inline bool dp_ipa_peer_check(struct dp_soc *soc, 3717 uint8_t *peer_mac_addr, uint8_t vdev_id) 3718 { 3719 struct cdp_peer_info peer_info = {0}; 3720 struct dp_peer *peer = NULL; 3721 3722 DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac_addr, false, 3723 CDP_WILD_PEER_TYPE); 3724 3725 peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_IPA); 3726 if (peer) { 3727 dp_peer_unref_delete(peer, DP_MOD_ID_IPA); 3728 return true; 3729 } else { 3730 return false; 3731 } 3732 } 3733 #endif 3734 3735 bool dp_ipa_rx_intrabss_fwd(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 3736 qdf_nbuf_t nbuf, bool *fwd_success) 3737 { 3738 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3739 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 3740 DP_MOD_ID_IPA); 3741 struct dp_pdev *pdev; 3742 qdf_nbuf_t nbuf_copy; 3743 uint8_t da_is_bcmc; 3744 struct ethhdr *eh; 3745 bool status = false; 3746 3747 *fwd_success = false; /* set default as failure */ 3748 3749 /* 3750 * WDI 3.0 skb->cb[] info from IPA driver 3751 * skb->cb[0] = vdev_id 3752 * skb->cb[1].bit#1 = da_is_bcmc 3753 */ 3754 da_is_bcmc = ((uint8_t)nbuf->cb[1]) & 0x2; 3755 3756 if (qdf_unlikely(!vdev)) 3757 return false; 3758 3759 pdev = vdev->pdev; 3760 if (qdf_unlikely(!pdev)) 3761 goto out; 3762 3763 /* no fwd for station mode and just pass up to stack */ 3764 if (vdev->opmode == wlan_op_mode_sta) 3765 goto out; 3766 3767 if (da_is_bcmc) { 3768 nbuf_copy = qdf_nbuf_copy(nbuf); 3769 if (!nbuf_copy) 3770 goto out; 3771 3772 if (dp_ipa_intrabss_send(pdev, vdev, nbuf_copy)) 3773 qdf_nbuf_free(nbuf_copy); 3774 else 3775 *fwd_success = true; 3776 3777 /* return false to pass original pkt up to stack */ 3778 goto out; 3779 } 3780 3781 eh = (struct ethhdr *)qdf_nbuf_data(nbuf); 3782 3783 if (!qdf_mem_cmp(eh->h_dest, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE)) 3784 goto out; 3785 3786 if (!dp_ipa_peer_check(soc, eh->h_dest, vdev->vdev_id)) 3787 goto out; 3788 3789 if (!dp_ipa_peer_check(soc, eh->h_source, vdev->vdev_id)) 3790 goto out; 3791 3792 /* 3793 * In intra-bss forwarding scenario, skb is allocated by IPA driver. 3794 * Need to add skb to internal tracking table to avoid nbuf memory 3795 * leak check for unallocated skb. 3796 */ 3797 qdf_net_buf_debug_acquire_skb(nbuf, __FILE__, __LINE__); 3798 3799 if (dp_ipa_intrabss_send(pdev, vdev, nbuf)) 3800 qdf_nbuf_free(nbuf); 3801 else 3802 *fwd_success = true; 3803 3804 status = true; 3805 out: 3806 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_IPA); 3807 return status; 3808 } 3809 3810 #ifdef MDM_PLATFORM 3811 bool dp_ipa_is_mdm_platform(void) 3812 { 3813 return true; 3814 } 3815 #else 3816 bool dp_ipa_is_mdm_platform(void) 3817 { 3818 return false; 3819 } 3820 #endif 3821 3822 /** 3823 * dp_ipa_frag_nbuf_linearize() - linearize nbuf for IPA 3824 * @soc: soc 3825 * @nbuf: source skb 3826 * 3827 * Return: new nbuf if success and otherwise NULL 3828 */ 3829 static qdf_nbuf_t dp_ipa_frag_nbuf_linearize(struct dp_soc *soc, 3830 qdf_nbuf_t nbuf) 3831 { 3832 uint8_t *src_nbuf_data; 3833 uint8_t *dst_nbuf_data; 3834 qdf_nbuf_t dst_nbuf; 3835 qdf_nbuf_t temp_nbuf = nbuf; 3836 uint32_t nbuf_len = qdf_nbuf_len(nbuf); 3837 bool is_nbuf_head = true; 3838 uint32_t copy_len = 0; 3839 uint16_t buf_size; 3840 3841 buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx); 3842 3843 dst_nbuf = qdf_nbuf_alloc(soc->osdev, buf_size, 3844 RX_BUFFER_RESERVATION, 3845 RX_DATA_BUFFER_ALIGNMENT, FALSE); 3846 3847 if (!dst_nbuf) { 3848 dp_err_rl("nbuf allocate fail"); 3849 return NULL; 3850 } 3851 3852 if ((nbuf_len + L3_HEADER_PADDING) > buf_size) { 3853 qdf_nbuf_free(dst_nbuf); 3854 dp_err_rl("nbuf is jumbo data"); 3855 return NULL; 3856 } 3857 3858 /* prepeare to copy all data into new skb */ 3859 dst_nbuf_data = qdf_nbuf_data(dst_nbuf); 3860 while (temp_nbuf) { 3861 src_nbuf_data = qdf_nbuf_data(temp_nbuf); 3862 /* first head nbuf */ 3863 if (is_nbuf_head) { 3864 qdf_mem_copy(dst_nbuf_data, src_nbuf_data, 3865 soc->rx_pkt_tlv_size); 3866 /* leave extra 2 bytes L3_HEADER_PADDING */ 3867 dst_nbuf_data += (soc->rx_pkt_tlv_size + 3868 L3_HEADER_PADDING); 3869 src_nbuf_data += soc->rx_pkt_tlv_size; 3870 copy_len = qdf_nbuf_headlen(temp_nbuf) - 3871 soc->rx_pkt_tlv_size; 3872 temp_nbuf = qdf_nbuf_get_ext_list(temp_nbuf); 3873 is_nbuf_head = false; 3874 } else { 3875 copy_len = qdf_nbuf_len(temp_nbuf); 3876 temp_nbuf = qdf_nbuf_queue_next(temp_nbuf); 3877 } 3878 qdf_mem_copy(dst_nbuf_data, src_nbuf_data, copy_len); 3879 dst_nbuf_data += copy_len; 3880 } 3881 3882 qdf_nbuf_set_len(dst_nbuf, nbuf_len); 3883 /* copy is done, free original nbuf */ 3884 qdf_nbuf_free(nbuf); 3885 3886 return dst_nbuf; 3887 } 3888 3889 qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf) 3890 { 3891 3892 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 3893 return nbuf; 3894 3895 /* WLAN IPA is run-time disabled */ 3896 if (!qdf_atomic_read(&soc->ipa_pipes_enabled)) 3897 return nbuf; 3898 3899 if (!qdf_nbuf_is_frag(nbuf)) 3900 return nbuf; 3901 3902 /* linearize skb for IPA */ 3903 return dp_ipa_frag_nbuf_linearize(soc, nbuf); 3904 } 3905 3906 QDF_STATUS dp_ipa_tx_buf_smmu_mapping( 3907 struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3908 const char *func, uint32_t line) 3909 { 3910 QDF_STATUS ret; 3911 3912 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3913 struct dp_pdev *pdev = 3914 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 3915 3916 if (!pdev) { 3917 dp_err("Invalid instance"); 3918 return QDF_STATUS_E_FAILURE; 3919 } 3920 3921 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) { 3922 dp_debug("SMMU S1 disabled"); 3923 return QDF_STATUS_SUCCESS; 3924 } 3925 ret = __dp_ipa_tx_buf_smmu_mapping(soc, pdev, true, func, line); 3926 if (ret) 3927 return ret; 3928 3929 ret = dp_ipa_tx_alt_buf_smmu_mapping(soc, pdev, true, func, line); 3930 if (ret) 3931 __dp_ipa_tx_buf_smmu_mapping(soc, pdev, false, func, line); 3932 return ret; 3933 } 3934 3935 QDF_STATUS dp_ipa_tx_buf_smmu_unmapping( 3936 struct cdp_soc_t *soc_hdl, uint8_t pdev_id, const char *func, 3937 uint32_t line) 3938 { 3939 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3940 struct dp_pdev *pdev = 3941 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 3942 3943 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) { 3944 dp_debug("SMMU S1 disabled"); 3945 return QDF_STATUS_SUCCESS; 3946 } 3947 3948 if (!pdev) { 3949 dp_err("Invalid pdev instance pdev_id:%d", pdev_id); 3950 return QDF_STATUS_E_FAILURE; 3951 } 3952 3953 if (__dp_ipa_tx_buf_smmu_mapping(soc, pdev, false, func, line) || 3954 dp_ipa_tx_alt_buf_smmu_mapping(soc, pdev, false, func, line)) 3955 return QDF_STATUS_E_FAILURE; 3956 3957 return QDF_STATUS_SUCCESS; 3958 } 3959 3960 QDF_STATUS dp_ipa_rx_buf_pool_smmu_mapping( 3961 struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3962 bool create, const char *func, uint32_t line) 3963 { 3964 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3965 struct dp_pdev *pdev = 3966 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 3967 3968 if (!pdev) { 3969 dp_err("Invalid instance"); 3970 return QDF_STATUS_E_FAILURE; 3971 } 3972 3973 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) { 3974 dp_debug("SMMU S1 disabled"); 3975 return QDF_STATUS_SUCCESS; 3976 } 3977 3978 dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, create, func, line); 3979 return QDF_STATUS_SUCCESS; 3980 } 3981 #ifdef IPA_WDS_EASYMESH_FEATURE 3982 QDF_STATUS dp_ipa_ast_create(struct cdp_soc_t *soc_hdl, 3983 qdf_ipa_ast_info_type_t *data) 3984 { 3985 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3986 uint8_t *rx_tlv_hdr; 3987 struct dp_peer *peer; 3988 struct hal_rx_msdu_metadata msdu_metadata; 3989 qdf_ipa_ast_info_type_t *ast_info; 3990 3991 if (!data) { 3992 dp_err("Data is NULL !!!"); 3993 return QDF_STATUS_E_FAILURE; 3994 } 3995 ast_info = data; 3996 3997 rx_tlv_hdr = qdf_nbuf_data(ast_info->skb); 3998 peer = dp_peer_get_ref_by_id(soc, ast_info->ta_peer_id, 3999 DP_MOD_ID_IPA); 4000 if (!peer) { 4001 dp_err("Peer is NULL !!!!"); 4002 return QDF_STATUS_E_FAILURE; 4003 } 4004 4005 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata); 4006 4007 dp_rx_ipa_wds_srcport_learn(soc, peer, ast_info->skb, msdu_metadata, 4008 ast_info->mac_addr_ad4_valid, 4009 ast_info->first_msdu_in_mpdu_flag); 4010 4011 dp_peer_unref_delete(peer, DP_MOD_ID_IPA); 4012 4013 return QDF_STATUS_SUCCESS; 4014 } 4015 #endif 4016 4017 #ifdef QCA_ENHANCED_STATS_SUPPORT 4018 QDF_STATUS dp_ipa_update_peer_rx_stats(struct cdp_soc_t *soc, 4019 uint8_t vdev_id, uint8_t *peer_mac, 4020 qdf_nbuf_t nbuf) 4021 { 4022 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, 4023 peer_mac, 0, vdev_id, 4024 DP_MOD_ID_IPA); 4025 struct dp_txrx_peer *txrx_peer; 4026 uint8_t da_is_bcmc; 4027 qdf_ether_header_t *eh; 4028 4029 if (!peer) 4030 return QDF_STATUS_E_FAILURE; 4031 4032 txrx_peer = dp_get_txrx_peer(peer); 4033 4034 if (!txrx_peer) { 4035 dp_peer_unref_delete(peer, DP_MOD_ID_IPA); 4036 return QDF_STATUS_E_FAILURE; 4037 } 4038 4039 da_is_bcmc = ((uint8_t)nbuf->cb[1]) & 0x2; 4040 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 4041 4042 if (da_is_bcmc) { 4043 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 1, 4044 qdf_nbuf_len(nbuf), 0); 4045 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) 4046 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast, 4047 1, qdf_nbuf_len(nbuf), 0); 4048 } 4049 4050 dp_peer_unref_delete(peer, DP_MOD_ID_IPA); 4051 4052 return QDF_STATUS_SUCCESS; 4053 } 4054 4055 void 4056 dp_peer_aggregate_tid_stats(struct dp_peer *peer) 4057 { 4058 uint8_t i = 0; 4059 struct dp_rx_tid *rx_tid = NULL; 4060 struct cdp_pkt_info rx_total = {0}; 4061 struct dp_txrx_peer *txrx_peer = NULL; 4062 4063 if (!peer->rx_tid) 4064 return; 4065 4066 txrx_peer = dp_get_txrx_peer(peer); 4067 4068 if (!txrx_peer) 4069 return; 4070 4071 for (i = 0; i < DP_MAX_TIDS; i++) { 4072 rx_tid = &peer->rx_tid[i]; 4073 rx_total.num += rx_tid->rx_msdu_cnt.num; 4074 rx_total.bytes += rx_tid->rx_msdu_cnt.bytes; 4075 } 4076 4077 DP_PEER_PER_PKT_STATS_UPD(txrx_peer, rx.rx_total.num, 4078 rx_total.num, 0); 4079 DP_PEER_PER_PKT_STATS_UPD(txrx_peer, rx.rx_total.bytes, 4080 rx_total.bytes, 0); 4081 } 4082 4083 /** 4084 * dp_ipa_update_vdev_stats(): update vdev stats 4085 * @soc: soc handle 4086 * @srcobj: DP_PEER object 4087 * @arg: point to vdev stats structure 4088 * 4089 * Return: void 4090 */ 4091 static inline 4092 void dp_ipa_update_vdev_stats(struct dp_soc *soc, struct dp_peer *srcobj, 4093 void *arg) 4094 { 4095 dp_peer_aggregate_tid_stats(srcobj); 4096 dp_update_vdev_stats(soc, srcobj, arg); 4097 } 4098 4099 /** 4100 * dp_ipa_aggregate_vdev_stats - Aggregate vdev_stats 4101 * @vdev: Data path vdev 4102 * @vdev_stats: buffer to hold vdev stats 4103 * 4104 * Return: void 4105 */ 4106 static inline 4107 void dp_ipa_aggregate_vdev_stats(struct dp_vdev *vdev, 4108 struct cdp_vdev_stats *vdev_stats) 4109 { 4110 struct dp_soc *soc = NULL; 4111 4112 if (!vdev || !vdev->pdev) 4113 return; 4114 4115 soc = vdev->pdev->soc; 4116 dp_update_vdev_ingress_stats(vdev); 4117 qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats)); 4118 dp_vdev_iterate_peer(vdev, dp_ipa_update_vdev_stats, vdev_stats, 4119 DP_MOD_ID_GENERIC_STATS); 4120 dp_update_vdev_rate_stats(vdev_stats, &vdev->stats); 4121 4122 vdev_stats->tx.ucast.num = vdev_stats->tx.tx_ucast_total.num; 4123 vdev_stats->tx.ucast.bytes = vdev_stats->tx.tx_ucast_total.bytes; 4124 vdev_stats->tx.tx_success.num = vdev_stats->tx.tx_ucast_success.num; 4125 vdev_stats->tx.tx_success.bytes = vdev_stats->tx.tx_ucast_success.bytes; 4126 4127 if (vdev_stats->rx.rx_total.num >= vdev_stats->rx.multicast.num) 4128 vdev_stats->rx.unicast.num = vdev_stats->rx.rx_total.num - 4129 vdev_stats->rx.multicast.num; 4130 if (vdev_stats->rx.rx_total.bytes >= vdev_stats->rx.multicast.bytes) 4131 vdev_stats->rx.unicast.bytes = vdev_stats->rx.rx_total.bytes - 4132 vdev_stats->rx.multicast.bytes; 4133 vdev_stats->rx.to_stack.num = vdev_stats->rx.rx_total.num; 4134 vdev_stats->rx.to_stack.bytes = vdev_stats->rx.rx_total.bytes; 4135 } 4136 4137 /** 4138 * dp_ipa_aggregate_pdev_stats - Aggregate pdev stats 4139 * @pdev: Data path pdev 4140 * 4141 * Return: void 4142 */ 4143 static inline 4144 void dp_ipa_aggregate_pdev_stats(struct dp_pdev *pdev) 4145 { 4146 struct dp_vdev *vdev = NULL; 4147 struct dp_soc *soc; 4148 struct cdp_vdev_stats *vdev_stats = 4149 qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats)); 4150 4151 if (!vdev_stats) { 4152 dp_err("%pK: DP alloc failure - unable to get alloc vdev stats", 4153 pdev->soc); 4154 return; 4155 } 4156 4157 soc = pdev->soc; 4158 4159 qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx)); 4160 qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx)); 4161 qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i)); 4162 qdf_mem_zero(&pdev->stats.rx_i, sizeof(pdev->stats.rx_i)); 4163 4164 qdf_spin_lock_bh(&pdev->vdev_list_lock); 4165 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 4166 dp_ipa_aggregate_vdev_stats(vdev, vdev_stats); 4167 dp_update_pdev_stats(pdev, vdev_stats); 4168 dp_update_pdev_ingress_stats(pdev, vdev); 4169 } 4170 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 4171 qdf_mem_free(vdev_stats); 4172 } 4173 4174 /** 4175 * dp_ipa_get_peer_stats - Get peer stats 4176 * @peer: Data path peer 4177 * @peer_stats: buffer to hold peer stats 4178 * 4179 * Return: void 4180 */ 4181 static 4182 void dp_ipa_get_peer_stats(struct dp_peer *peer, 4183 struct cdp_peer_stats *peer_stats) 4184 { 4185 dp_peer_aggregate_tid_stats(peer); 4186 dp_get_peer_stats(peer, peer_stats); 4187 4188 peer_stats->tx.tx_success.num = 4189 peer_stats->tx.tx_ucast_success.num; 4190 peer_stats->tx.tx_success.bytes = 4191 peer_stats->tx.tx_ucast_success.bytes; 4192 peer_stats->tx.ucast.num = 4193 peer_stats->tx.tx_ucast_total.num; 4194 peer_stats->tx.ucast.bytes = 4195 peer_stats->tx.tx_ucast_total.bytes; 4196 4197 if (peer_stats->rx.rx_total.num >= peer_stats->rx.multicast.num) 4198 peer_stats->rx.unicast.num = peer_stats->rx.rx_total.num - 4199 peer_stats->rx.multicast.num; 4200 4201 if (peer_stats->rx.rx_total.bytes >= peer_stats->rx.multicast.bytes) 4202 peer_stats->rx.unicast.bytes = peer_stats->rx.rx_total.bytes - 4203 peer_stats->rx.multicast.bytes; 4204 } 4205 4206 QDF_STATUS 4207 dp_ipa_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id, 4208 struct cdp_pdev_stats *pdev_stats) 4209 { 4210 struct dp_pdev *pdev = 4211 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 4212 pdev_id); 4213 if (!pdev) 4214 return QDF_STATUS_E_FAILURE; 4215 4216 dp_ipa_aggregate_pdev_stats(pdev); 4217 qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats)); 4218 4219 return QDF_STATUS_SUCCESS; 4220 } 4221 4222 int dp_ipa_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 4223 void *buf, bool is_aggregate) 4224 { 4225 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 4226 struct cdp_vdev_stats *vdev_stats; 4227 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 4228 DP_MOD_ID_IPA); 4229 4230 if (!vdev) 4231 return 1; 4232 4233 vdev_stats = (struct cdp_vdev_stats *)buf; 4234 dp_ipa_aggregate_vdev_stats(vdev, buf); 4235 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_IPA); 4236 4237 return 0; 4238 } 4239 4240 QDF_STATUS dp_ipa_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id, 4241 uint8_t *peer_mac, 4242 struct cdp_peer_stats *peer_stats) 4243 { 4244 struct dp_peer *peer = NULL; 4245 struct cdp_peer_info peer_info = { 0 }; 4246 4247 DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, false, 4248 CDP_WILD_PEER_TYPE); 4249 4250 peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info, 4251 DP_MOD_ID_IPA); 4252 4253 qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats)); 4254 4255 if (!peer) 4256 return QDF_STATUS_E_FAILURE; 4257 4258 dp_ipa_get_peer_stats(peer, peer_stats); 4259 dp_peer_unref_delete(peer, DP_MOD_ID_IPA); 4260 4261 return QDF_STATUS_SUCCESS; 4262 } 4263 #endif 4264 4265 /** 4266 * dp_ipa_get_wdi_version() - Get WDI version 4267 * @soc_hdl: data path soc handle 4268 * @wdi_ver: Out parameter for wdi version 4269 * 4270 * Get WDI version based on soc arch 4271 * 4272 * Return: None 4273 */ 4274 void dp_ipa_get_wdi_version(struct cdp_soc_t *soc_hdl, uint8_t *wdi_ver) 4275 { 4276 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 4277 4278 if (soc->arch_ops.ipa_get_wdi_ver) 4279 soc->arch_ops.ipa_get_wdi_ver(wdi_ver); 4280 else 4281 *wdi_ver = IPA_WDI_3; 4282 } 4283 #endif 4284