1 /* 2 * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #ifdef IPA_OFFLOAD 19 20 #include <wlan_ipa_ucfg_api.h> 21 #include <qdf_ipa_wdi3.h> 22 #include <qdf_types.h> 23 #include <qdf_lock.h> 24 #include <hal_hw_headers.h> 25 #include <hal_api.h> 26 #include <hal_reo.h> 27 #include <hif.h> 28 #include <htt.h> 29 #include <wdi_event.h> 30 #include <queue.h> 31 #include "dp_types.h" 32 #include "dp_htt.h" 33 #include "dp_tx.h" 34 #include "dp_rx.h" 35 #include "dp_ipa.h" 36 #include "dp_internal.h" 37 #ifdef WIFI_MONITOR_SUPPORT 38 #include "dp_mon.h" 39 #endif 40 #ifdef FEATURE_WDS 41 #include "dp_txrx_wds.h" 42 #endif 43 44 /* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */ 45 #define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT (2048) 46 47 /* WAR for IPA_OFFLOAD case. In some cases, its observed that WBM tries to 48 * release a buffer into WBM2SW RELEASE ring for IPA, and the ring is full. 49 * This causes back pressure, resulting in a FW crash. 50 * By leaving some entries with no buffer attached, WBM will be able to write 51 * to the ring, and from dumps we can figure out the buffer which is causing 52 * this issue. 53 */ 54 #define DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES 16 55 /** 56 *struct dp_ipa_reo_remap_record - history for dp ipa reo remaps 57 * @ix0_reg: reo destination ring IX0 value 58 * @ix2_reg: reo destination ring IX2 value 59 * @ix3_reg: reo destination ring IX3 value 60 */ 61 struct dp_ipa_reo_remap_record { 62 uint64_t timestamp; 63 uint32_t ix0_reg; 64 uint32_t ix2_reg; 65 uint32_t ix3_reg; 66 }; 67 68 #ifdef IPA_WDS_EASYMESH_FEATURE 69 #define WLAN_IPA_META_DATA_MASK htonl(0x000000FF) 70 #else 71 #define WLAN_IPA_META_DATA_MASK htonl(0x00FF0000) 72 #endif 73 74 #define REO_REMAP_HISTORY_SIZE 32 75 76 struct dp_ipa_reo_remap_record dp_ipa_reo_remap_history[REO_REMAP_HISTORY_SIZE]; 77 78 static qdf_atomic_t dp_ipa_reo_remap_history_index; 79 static int dp_ipa_reo_remap_record_index_next(qdf_atomic_t *index) 80 { 81 int next = qdf_atomic_inc_return(index); 82 83 if (next == REO_REMAP_HISTORY_SIZE) 84 qdf_atomic_sub(REO_REMAP_HISTORY_SIZE, index); 85 86 return next % REO_REMAP_HISTORY_SIZE; 87 } 88 89 /** 90 * dp_ipa_reo_remap_history_add() - Record dp ipa reo remap values 91 * @ix0_val: reo destination ring IX0 value 92 * @ix2_val: reo destination ring IX2 value 93 * @ix3_val: reo destination ring IX3 value 94 * 95 * Return: None 96 */ 97 static void dp_ipa_reo_remap_history_add(uint32_t ix0_val, uint32_t ix2_val, 98 uint32_t ix3_val) 99 { 100 int idx = dp_ipa_reo_remap_record_index_next( 101 &dp_ipa_reo_remap_history_index); 102 struct dp_ipa_reo_remap_record *record = &dp_ipa_reo_remap_history[idx]; 103 104 record->timestamp = qdf_get_log_timestamp(); 105 record->ix0_reg = ix0_val; 106 record->ix2_reg = ix2_val; 107 record->ix3_reg = ix3_val; 108 } 109 110 static QDF_STATUS __dp_ipa_handle_buf_smmu_mapping(struct dp_soc *soc, 111 qdf_nbuf_t nbuf, 112 uint32_t size, 113 bool create) 114 { 115 qdf_mem_info_t mem_map_table = {0}; 116 QDF_STATUS ret = QDF_STATUS_SUCCESS; 117 qdf_ipa_wdi_hdl_t hdl; 118 119 /* Need to handle the case when one soc will 120 * have multiple pdev(radio's), Currently passing 121 * pdev_id as 0 assuming 1 soc has only 1 radio. 122 */ 123 hdl = wlan_ipa_get_hdl(soc->ctrl_psoc, 0); 124 if (hdl == DP_IPA_HDL_INVALID) { 125 dp_err("IPA handle is invalid"); 126 return QDF_STATUS_E_INVAL; 127 } 128 qdf_update_mem_map_table(soc->osdev, &mem_map_table, 129 qdf_nbuf_get_frag_paddr(nbuf, 0), 130 size); 131 132 if (create) { 133 /* Assert if PA is zero */ 134 qdf_assert_always(mem_map_table.pa); 135 136 ret = qdf_ipa_wdi_create_smmu_mapping(hdl, 1, 137 &mem_map_table); 138 } else { 139 ret = qdf_ipa_wdi_release_smmu_mapping(hdl, 1, 140 &mem_map_table); 141 } 142 qdf_assert_always(!ret); 143 144 /* Return status of mapping/unmapping is stored in 145 * mem_map_table.result field, assert if the result 146 * is failure 147 */ 148 if (create) 149 qdf_assert_always(!mem_map_table.result); 150 else 151 qdf_assert_always(mem_map_table.result >= mem_map_table.size); 152 153 return ret; 154 } 155 156 QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc, 157 qdf_nbuf_t nbuf, 158 uint32_t size, 159 bool create) 160 { 161 struct dp_pdev *pdev; 162 int i; 163 164 for (i = 0; i < soc->pdev_count; i++) { 165 pdev = soc->pdev_list[i]; 166 if (pdev && dp_monitor_is_configured(pdev)) 167 return QDF_STATUS_SUCCESS; 168 } 169 170 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) || 171 !qdf_mem_smmu_s1_enabled(soc->osdev)) 172 return QDF_STATUS_SUCCESS; 173 174 /** 175 * Even if ipa pipes is disabled, but if it's unmap 176 * operation and nbuf has done ipa smmu map before, 177 * do ipa smmu unmap as well. 178 */ 179 if (!qdf_atomic_read(&soc->ipa_pipes_enabled)) { 180 if (!create && qdf_nbuf_is_rx_ipa_smmu_map(nbuf)) { 181 DP_STATS_INC(soc, rx.err.ipa_unmap_no_pipe, 1); 182 } else { 183 return QDF_STATUS_SUCCESS; 184 } 185 } 186 187 if (qdf_unlikely(create == qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) { 188 if (create) { 189 DP_STATS_INC(soc, rx.err.ipa_smmu_map_dup, 1); 190 } else { 191 DP_STATS_INC(soc, rx.err.ipa_smmu_unmap_dup, 1); 192 } 193 return QDF_STATUS_E_INVAL; 194 } 195 196 qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create); 197 198 return __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, size, create); 199 } 200 201 static QDF_STATUS __dp_ipa_tx_buf_smmu_mapping( 202 struct dp_soc *soc, 203 struct dp_pdev *pdev, 204 bool create) 205 { 206 uint32_t index; 207 QDF_STATUS ret = QDF_STATUS_SUCCESS; 208 uint32_t tx_buffer_cnt = soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; 209 qdf_nbuf_t nbuf; 210 uint32_t buf_len; 211 212 if (!ipa_is_ready()) { 213 dp_info("IPA is not READY"); 214 return 0; 215 } 216 217 for (index = 0; index < tx_buffer_cnt; index++) { 218 nbuf = (qdf_nbuf_t) 219 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[index]; 220 if (!nbuf) 221 continue; 222 buf_len = qdf_nbuf_get_data_len(nbuf); 223 ret = __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, buf_len, 224 create); 225 } 226 227 return ret; 228 } 229 230 #ifndef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS 231 static void dp_ipa_set_reo_ctx_mapping_lock_required(struct dp_soc *soc, 232 bool lock_required) 233 { 234 hal_ring_handle_t hal_ring_hdl; 235 int ring; 236 237 for (ring = 0; ring < soc->num_reo_dest_rings; ring++) { 238 hal_ring_hdl = soc->reo_dest_ring[ring].hal_srng; 239 hal_srng_lock(hal_ring_hdl); 240 soc->ipa_reo_ctx_lock_required[ring] = lock_required; 241 hal_srng_unlock(hal_ring_hdl); 242 } 243 } 244 #else 245 static void dp_ipa_set_reo_ctx_mapping_lock_required(struct dp_soc *soc, 246 bool lock_required) 247 { 248 } 249 250 #endif 251 252 #ifdef RX_DESC_MULTI_PAGE_ALLOC 253 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc, 254 struct dp_pdev *pdev, 255 bool create) 256 { 257 struct rx_desc_pool *rx_pool; 258 uint8_t pdev_id; 259 uint32_t num_desc, page_id, offset, i; 260 uint16_t num_desc_per_page; 261 union dp_rx_desc_list_elem_t *rx_desc_elem; 262 struct dp_rx_desc *rx_desc; 263 qdf_nbuf_t nbuf; 264 QDF_STATUS ret = QDF_STATUS_SUCCESS; 265 266 if (!qdf_ipa_is_ready()) 267 return ret; 268 269 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) 270 return ret; 271 272 pdev_id = pdev->pdev_id; 273 rx_pool = &soc->rx_desc_buf[pdev_id]; 274 275 dp_ipa_set_reo_ctx_mapping_lock_required(soc, true); 276 qdf_spin_lock_bh(&rx_pool->lock); 277 dp_ipa_rx_buf_smmu_mapping_lock(soc); 278 num_desc = rx_pool->pool_size; 279 num_desc_per_page = rx_pool->desc_pages.num_element_per_page; 280 for (i = 0; i < num_desc; i++) { 281 page_id = i / num_desc_per_page; 282 offset = i % num_desc_per_page; 283 if (qdf_unlikely(!(rx_pool->desc_pages.cacheable_pages))) 284 break; 285 rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_pool); 286 rx_desc = &rx_desc_elem->rx_desc; 287 if ((!(rx_desc->in_use)) || rx_desc->unmapped) 288 continue; 289 nbuf = rx_desc->nbuf; 290 291 if (qdf_unlikely(create == 292 qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) { 293 if (create) { 294 DP_STATS_INC(soc, 295 rx.err.ipa_smmu_map_dup, 1); 296 } else { 297 DP_STATS_INC(soc, 298 rx.err.ipa_smmu_unmap_dup, 1); 299 } 300 continue; 301 } 302 qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create); 303 304 ret = __dp_ipa_handle_buf_smmu_mapping( 305 soc, nbuf, rx_pool->buf_size, create); 306 } 307 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 308 qdf_spin_unlock_bh(&rx_pool->lock); 309 dp_ipa_set_reo_ctx_mapping_lock_required(soc, false); 310 311 return ret; 312 } 313 #else 314 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc, 315 struct dp_pdev *pdev, 316 bool create) 317 { 318 struct rx_desc_pool *rx_pool; 319 uint8_t pdev_id; 320 qdf_nbuf_t nbuf; 321 int i; 322 323 if (!qdf_ipa_is_ready()) 324 return QDF_STATUS_SUCCESS; 325 326 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) 327 return QDF_STATUS_SUCCESS; 328 329 pdev_id = pdev->pdev_id; 330 rx_pool = &soc->rx_desc_buf[pdev_id]; 331 332 dp_ipa_set_reo_ctx_mapping_lock_required(soc, true); 333 qdf_spin_lock_bh(&rx_pool->lock); 334 dp_ipa_rx_buf_smmu_mapping_lock(soc); 335 for (i = 0; i < rx_pool->pool_size; i++) { 336 if ((!(rx_pool->array[i].rx_desc.in_use)) || 337 rx_pool->array[i].rx_desc.unmapped) 338 continue; 339 340 nbuf = rx_pool->array[i].rx_desc.nbuf; 341 342 if (qdf_unlikely(create == 343 qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) { 344 if (create) { 345 DP_STATS_INC(soc, 346 rx.err.ipa_smmu_map_dup, 1); 347 } else { 348 DP_STATS_INC(soc, 349 rx.err.ipa_smmu_unmap_dup, 1); 350 } 351 continue; 352 } 353 qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create); 354 355 __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, 356 rx_pool->buf_size, create); 357 } 358 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 359 qdf_spin_unlock_bh(&rx_pool->lock); 360 dp_ipa_set_reo_ctx_mapping_lock_required(soc, false); 361 362 return QDF_STATUS_SUCCESS; 363 } 364 #endif /* RX_DESC_MULTI_PAGE_ALLOC */ 365 366 static QDF_STATUS dp_ipa_get_shared_mem_info(qdf_device_t osdev, 367 qdf_shared_mem_t *shared_mem, 368 void *cpu_addr, 369 qdf_dma_addr_t dma_addr, 370 uint32_t size) 371 { 372 qdf_dma_addr_t paddr; 373 int ret; 374 375 shared_mem->vaddr = cpu_addr; 376 qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size); 377 *qdf_mem_get_dma_addr_ptr(osdev, &shared_mem->mem_info) = dma_addr; 378 379 paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr); 380 qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr); 381 382 ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable, 383 shared_mem->vaddr, dma_addr, size); 384 if (ret) { 385 dp_err("Unable to get DMA sgtable"); 386 return QDF_STATUS_E_NOMEM; 387 } 388 389 qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable); 390 391 return QDF_STATUS_SUCCESS; 392 } 393 394 #ifdef IPA_WDI3_TX_TWO_PIPES 395 static void dp_ipa_tx_alt_pool_detach(struct dp_soc *soc, struct dp_pdev *pdev) 396 { 397 struct dp_ipa_resources *ipa_res; 398 qdf_nbuf_t nbuf; 399 int idx; 400 401 for (idx = 0; idx < soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt; idx++) { 402 nbuf = (qdf_nbuf_t) 403 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[idx]; 404 if (!nbuf) 405 continue; 406 407 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 408 qdf_mem_dp_tx_skb_cnt_dec(); 409 qdf_mem_dp_tx_skb_dec(qdf_nbuf_get_end_offset(nbuf)); 410 qdf_nbuf_free(nbuf); 411 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[idx] = 412 (void *)NULL; 413 } 414 415 qdf_mem_free(soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned); 416 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = NULL; 417 418 ipa_res = &pdev->ipa_resource; 419 if (!ipa_res->is_db_ddr_mapped) 420 iounmap(ipa_res->tx_alt_comp_doorbell_vaddr); 421 422 qdf_mem_free_sgtable(&ipa_res->tx_alt_ring.sgtable); 423 qdf_mem_free_sgtable(&ipa_res->tx_alt_comp_ring.sgtable); 424 } 425 426 static int dp_ipa_tx_alt_pool_attach(struct dp_soc *soc) 427 { 428 uint32_t tx_buffer_count; 429 uint32_t ring_base_align = 8; 430 qdf_dma_addr_t buffer_paddr; 431 struct hal_srng *wbm_srng = (struct hal_srng *) 432 soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng; 433 struct hal_srng_params srng_params; 434 uint32_t wbm_bm_id; 435 void *ring_entry; 436 int num_entries; 437 qdf_nbuf_t nbuf; 438 int retval = QDF_STATUS_SUCCESS; 439 int max_alloc_count = 0; 440 441 /* 442 * Uncomment when dp_ops_cfg.cfg_attach is implemented 443 * unsigned int uc_tx_buf_sz = 444 * dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev); 445 */ 446 unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT; 447 unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1; 448 449 wbm_bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, 450 IPA_TX_ALT_RING_IDX); 451 452 hal_get_srng_params(soc->hal_soc, 453 hal_srng_to_hal_ring_handle(wbm_srng), 454 &srng_params); 455 num_entries = srng_params.num_entries; 456 457 max_alloc_count = 458 num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES; 459 if (max_alloc_count <= 0) { 460 dp_err("incorrect value for buffer count %u", max_alloc_count); 461 return -EINVAL; 462 } 463 464 dp_info("requested %d buffers to be posted to wbm ring", 465 max_alloc_count); 466 467 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = 468 qdf_mem_malloc(num_entries * 469 sizeof(*soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned)); 470 if (!soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned) { 471 dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail"); 472 return -ENOMEM; 473 } 474 475 hal_srng_access_start_unlocked(soc->hal_soc, 476 hal_srng_to_hal_ring_handle(wbm_srng)); 477 478 /* 479 * Allocate Tx buffers as many as possible. 480 * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty 481 * Populate Tx buffers into WBM2IPA ring 482 * This initial buffer population will simulate H/W as source ring, 483 * and update HP 484 */ 485 for (tx_buffer_count = 0; 486 tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) { 487 nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE); 488 if (!nbuf) 489 break; 490 491 ring_entry = hal_srng_dst_get_next_hp( 492 soc->hal_soc, 493 hal_srng_to_hal_ring_handle(wbm_srng)); 494 if (!ring_entry) { 495 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 496 "%s: Failed to get WBM ring entry", 497 __func__); 498 qdf_nbuf_free(nbuf); 499 break; 500 } 501 502 qdf_nbuf_map_single(soc->osdev, nbuf, 503 QDF_DMA_BIDIRECTIONAL); 504 buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); 505 qdf_mem_dp_tx_skb_cnt_inc(); 506 qdf_mem_dp_tx_skb_inc(qdf_nbuf_get_end_offset(nbuf)); 507 508 hal_rxdma_buff_addr_info_set(soc->hal_soc, ring_entry, 509 buffer_paddr, 0, wbm_bm_id); 510 511 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[ 512 tx_buffer_count] = (void *)nbuf; 513 } 514 515 hal_srng_access_end_unlocked(soc->hal_soc, 516 hal_srng_to_hal_ring_handle(wbm_srng)); 517 518 soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt = tx_buffer_count; 519 520 if (tx_buffer_count) { 521 dp_info("IPA TX buffer pool2: %d allocated", tx_buffer_count); 522 } else { 523 dp_err("Failed to allocate IPA TX buffer pool2"); 524 qdf_mem_free( 525 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned); 526 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = NULL; 527 retval = -ENOMEM; 528 } 529 530 return retval; 531 } 532 533 static QDF_STATUS dp_ipa_tx_alt_ring_get_resource(struct dp_pdev *pdev) 534 { 535 struct dp_soc *soc = pdev->soc; 536 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 537 538 ipa_res->tx_alt_ring_num_alloc_buffer = 539 (uint32_t)soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt; 540 541 dp_ipa_get_shared_mem_info( 542 soc->osdev, &ipa_res->tx_alt_ring, 543 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr, 544 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr, 545 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size); 546 547 dp_ipa_get_shared_mem_info( 548 soc->osdev, &ipa_res->tx_alt_comp_ring, 549 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr, 550 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr, 551 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size); 552 553 if (!qdf_mem_get_dma_addr(soc->osdev, 554 &ipa_res->tx_alt_comp_ring.mem_info)) 555 return QDF_STATUS_E_FAILURE; 556 557 return QDF_STATUS_SUCCESS; 558 } 559 560 static void dp_ipa_tx_alt_ring_resource_setup(struct dp_soc *soc) 561 { 562 struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc; 563 struct hal_srng *hal_srng; 564 struct hal_srng_params srng_params; 565 unsigned long addr_offset, dev_base_paddr; 566 567 /* IPA TCL_DATA Alternative Ring - HAL_SRNG_SW2TCL2 */ 568 hal_srng = (struct hal_srng *) 569 soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng; 570 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 571 hal_srng_to_hal_ring_handle(hal_srng), 572 &srng_params); 573 574 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr = 575 srng_params.ring_base_paddr; 576 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr = 577 srng_params.ring_base_vaddr; 578 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size = 579 (srng_params.num_entries * srng_params.entry_size) << 2; 580 /* 581 * For the register backed memory addresses, use the scn->mem_pa to 582 * calculate the physical address of the shadow registers 583 */ 584 dev_base_paddr = 585 (unsigned long) 586 ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa; 587 addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) - 588 (unsigned long)(hal_soc->dev_base_addr); 589 soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr = 590 (qdf_dma_addr_t)(addr_offset + dev_base_paddr); 591 592 dp_info("IPA TCL_DATA Alt Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", 593 (unsigned int)addr_offset, 594 (unsigned int)dev_base_paddr, 595 (unsigned int)(soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr), 596 (void *)soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr, 597 (void *)soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr, 598 srng_params.num_entries, 599 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size); 600 601 /* IPA TX Alternative COMP Ring - HAL_SRNG_WBM2SW4_RELEASE */ 602 hal_srng = (struct hal_srng *) 603 soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng; 604 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 605 hal_srng_to_hal_ring_handle(hal_srng), 606 &srng_params); 607 608 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr = 609 srng_params.ring_base_paddr; 610 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr = 611 srng_params.ring_base_vaddr; 612 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size = 613 (srng_params.num_entries * srng_params.entry_size) << 2; 614 soc->ipa_uc_tx_rsc_alt.ipa_wbm_hp_shadow_paddr = 615 hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc), 616 hal_srng_to_hal_ring_handle(hal_srng)); 617 addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) - 618 (unsigned long)(hal_soc->dev_base_addr); 619 soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr = 620 (qdf_dma_addr_t)(addr_offset + dev_base_paddr); 621 622 dp_info("IPA TX Alt COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)", 623 (unsigned int)addr_offset, 624 (unsigned int)dev_base_paddr, 625 (unsigned int)(soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr), 626 (void *)soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr, 627 (void *)soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr, 628 srng_params.num_entries, 629 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size); 630 } 631 632 static void dp_ipa_map_ring_doorbell_paddr(struct dp_pdev *pdev) 633 { 634 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 635 uint32_t rx_ready_doorbell_dmaaddr; 636 uint32_t tx_comp_doorbell_dmaaddr; 637 struct dp_soc *soc = pdev->soc; 638 int ret = 0; 639 640 if (ipa_res->is_db_ddr_mapped) 641 ipa_res->tx_comp_doorbell_vaddr = 642 phys_to_virt(ipa_res->tx_comp_doorbell_paddr); 643 else 644 ipa_res->tx_comp_doorbell_vaddr = 645 ioremap(ipa_res->tx_comp_doorbell_paddr, 4); 646 647 if (qdf_mem_smmu_s1_enabled(soc->osdev)) { 648 ret = pld_smmu_map(soc->osdev->dev, 649 ipa_res->tx_comp_doorbell_paddr, 650 &tx_comp_doorbell_dmaaddr, 651 sizeof(uint32_t)); 652 ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr; 653 qdf_assert_always(!ret); 654 655 ret = pld_smmu_map(soc->osdev->dev, 656 ipa_res->rx_ready_doorbell_paddr, 657 &rx_ready_doorbell_dmaaddr, 658 sizeof(uint32_t)); 659 ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr; 660 qdf_assert_always(!ret); 661 } 662 663 /* Setup for alternative TX pipe */ 664 if (!ipa_res->tx_alt_comp_doorbell_paddr) 665 return; 666 667 if (ipa_res->is_db_ddr_mapped) 668 ipa_res->tx_alt_comp_doorbell_vaddr = 669 phys_to_virt(ipa_res->tx_alt_comp_doorbell_paddr); 670 else 671 ipa_res->tx_alt_comp_doorbell_vaddr = 672 ioremap(ipa_res->tx_alt_comp_doorbell_paddr, 4); 673 674 if (qdf_mem_smmu_s1_enabled(soc->osdev)) { 675 ret = pld_smmu_map(soc->osdev->dev, 676 ipa_res->tx_alt_comp_doorbell_paddr, 677 &tx_comp_doorbell_dmaaddr, 678 sizeof(uint32_t)); 679 ipa_res->tx_alt_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr; 680 qdf_assert_always(!ret); 681 } 682 } 683 684 static void dp_ipa_unmap_ring_doorbell_paddr(struct dp_pdev *pdev) 685 { 686 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 687 struct dp_soc *soc = pdev->soc; 688 int ret = 0; 689 690 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) 691 return; 692 693 /* Unmap must be in reverse order of map */ 694 if (ipa_res->tx_alt_comp_doorbell_paddr) { 695 ret = pld_smmu_unmap(soc->osdev->dev, 696 ipa_res->tx_alt_comp_doorbell_paddr, 697 sizeof(uint32_t)); 698 qdf_assert_always(!ret); 699 } 700 701 ret = pld_smmu_unmap(soc->osdev->dev, 702 ipa_res->rx_ready_doorbell_paddr, 703 sizeof(uint32_t)); 704 qdf_assert_always(!ret); 705 706 ret = pld_smmu_unmap(soc->osdev->dev, 707 ipa_res->tx_comp_doorbell_paddr, 708 sizeof(uint32_t)); 709 qdf_assert_always(!ret); 710 } 711 712 static QDF_STATUS dp_ipa_tx_alt_buf_smmu_mapping(struct dp_soc *soc, 713 struct dp_pdev *pdev, 714 bool create) 715 { 716 QDF_STATUS ret = QDF_STATUS_SUCCESS; 717 struct ipa_dp_tx_rsc *rsc; 718 uint32_t tx_buffer_cnt; 719 uint32_t buf_len; 720 qdf_nbuf_t nbuf; 721 uint32_t index; 722 723 if (!ipa_is_ready()) { 724 dp_info("IPA is not READY"); 725 return QDF_STATUS_SUCCESS; 726 } 727 728 rsc = &soc->ipa_uc_tx_rsc_alt; 729 tx_buffer_cnt = rsc->alloc_tx_buf_cnt; 730 731 for (index = 0; index < tx_buffer_cnt; index++) { 732 nbuf = (qdf_nbuf_t)rsc->tx_buf_pool_vaddr_unaligned[index]; 733 if (!nbuf) 734 continue; 735 736 buf_len = qdf_nbuf_get_data_len(nbuf); 737 ret = __dp_ipa_handle_buf_smmu_mapping( 738 soc, nbuf, buf_len, create); 739 } 740 741 return ret; 742 } 743 744 static void dp_ipa_wdi_tx_alt_pipe_params(struct dp_soc *soc, 745 struct dp_ipa_resources *ipa_res, 746 qdf_ipa_wdi_pipe_setup_info_t *tx) 747 { 748 QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS1; 749 750 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) = 751 qdf_mem_get_dma_addr(soc->osdev, 752 &ipa_res->tx_alt_comp_ring.mem_info); 753 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) = 754 qdf_mem_get_dma_size(soc->osdev, 755 &ipa_res->tx_alt_comp_ring.mem_info); 756 757 /* WBM Tail Pointer Address */ 758 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) = 759 soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr; 760 QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true; 761 762 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) = 763 qdf_mem_get_dma_addr(soc->osdev, 764 &ipa_res->tx_alt_ring.mem_info); 765 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = 766 qdf_mem_get_dma_size(soc->osdev, 767 &ipa_res->tx_alt_ring.mem_info); 768 769 /* TCL Head Pointer Address */ 770 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) = 771 soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr; 772 QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true; 773 774 QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) = 775 ipa_res->tx_alt_ring_num_alloc_buffer; 776 777 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0; 778 } 779 780 static void 781 dp_ipa_wdi_tx_alt_pipe_smmu_params(struct dp_soc *soc, 782 struct dp_ipa_resources *ipa_res, 783 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu) 784 { 785 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = IPA_CLIENT_WLAN2_CONS1; 786 787 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu), 788 &ipa_res->tx_alt_comp_ring.sgtable, 789 sizeof(sgtable_t)); 790 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) = 791 qdf_mem_get_dma_size(soc->osdev, 792 &ipa_res->tx_alt_comp_ring.mem_info); 793 /* WBM Tail Pointer Address */ 794 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) = 795 soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr; 796 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true; 797 798 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu), 799 &ipa_res->tx_alt_ring.sgtable, 800 sizeof(sgtable_t)); 801 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) = 802 qdf_mem_get_dma_size(soc->osdev, 803 &ipa_res->tx_alt_ring.mem_info); 804 /* TCL Head Pointer Address */ 805 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) = 806 soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr; 807 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true; 808 809 QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) = 810 ipa_res->tx_alt_ring_num_alloc_buffer; 811 QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0; 812 } 813 814 static void dp_ipa_setup_tx_alt_pipe(struct dp_soc *soc, 815 struct dp_ipa_resources *res, 816 qdf_ipa_wdi_conn_in_params_t *in) 817 { 818 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu = NULL; 819 qdf_ipa_wdi_pipe_setup_info_t *tx = NULL; 820 qdf_ipa_ep_cfg_t *tx_cfg; 821 822 QDF_IPA_WDI_CONN_IN_PARAMS_IS_TX1_USED(in) = true; 823 824 if (qdf_mem_smmu_s1_enabled(soc->osdev)) { 825 tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_ALT_PIPE_SMMU(in); 826 tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu); 827 dp_ipa_wdi_tx_alt_pipe_smmu_params(soc, res, tx_smmu); 828 } else { 829 tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_ALT_PIPE(in); 830 tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx); 831 dp_ipa_wdi_tx_alt_pipe_params(soc, res, tx); 832 } 833 834 QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT; 835 QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN; 836 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0; 837 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0; 838 QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0; 839 QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC; 840 QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true; 841 } 842 843 static void dp_ipa_set_pipe_db(struct dp_ipa_resources *res, 844 qdf_ipa_wdi_conn_out_params_t *out) 845 { 846 res->tx_comp_doorbell_paddr = 847 QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(out); 848 res->rx_ready_doorbell_paddr = 849 QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(out); 850 res->tx_alt_comp_doorbell_paddr = 851 QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_ALT_DB_PA(out); 852 } 853 854 static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in, 855 uint8_t session_id) 856 { 857 bool is_2g_iface = session_id & IPA_SESSION_ID_SHIFT; 858 859 session_id = session_id >> IPA_SESSION_ID_SHIFT; 860 dp_debug("session_id %u is_2g_iface %d", session_id, is_2g_iface); 861 862 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id << 16); 863 QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_TX1_USED(in) = is_2g_iface; 864 } 865 866 static void dp_ipa_tx_comp_ring_init_hp(struct dp_soc *soc, 867 struct dp_ipa_resources *res) 868 { 869 struct hal_srng *wbm_srng; 870 871 /* Init first TX comp ring */ 872 wbm_srng = (struct hal_srng *) 873 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 874 875 hal_srng_dst_init_hp(soc->hal_soc, wbm_srng, 876 res->tx_comp_doorbell_vaddr); 877 878 /* Init the alternate TX comp ring */ 879 wbm_srng = (struct hal_srng *) 880 soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng; 881 882 hal_srng_dst_init_hp(soc->hal_soc, wbm_srng, 883 res->tx_alt_comp_doorbell_vaddr); 884 } 885 886 static void dp_ipa_set_tx_doorbell_paddr(struct dp_soc *soc, 887 struct dp_ipa_resources *ipa_res) 888 { 889 struct hal_srng *wbm_srng; 890 891 wbm_srng = (struct hal_srng *) 892 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 893 894 hal_srng_dst_set_hp_paddr_confirm(wbm_srng, 895 ipa_res->tx_comp_doorbell_paddr); 896 897 dp_info("paddr %pK vaddr %pK", 898 (void *)ipa_res->tx_comp_doorbell_paddr, 899 (void *)ipa_res->tx_comp_doorbell_vaddr); 900 901 /* Setup for alternative TX comp ring */ 902 wbm_srng = (struct hal_srng *) 903 soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng; 904 905 hal_srng_dst_set_hp_paddr_confirm(wbm_srng, 906 ipa_res->tx_alt_comp_doorbell_paddr); 907 908 dp_info("paddr %pK vaddr %pK", 909 (void *)ipa_res->tx_alt_comp_doorbell_paddr, 910 (void *)ipa_res->tx_alt_comp_doorbell_vaddr); 911 } 912 913 #ifdef IPA_SET_RESET_TX_DB_PA 914 static QDF_STATUS dp_ipa_reset_tx_doorbell_pa(struct dp_soc *soc, 915 struct dp_ipa_resources *ipa_res) 916 { 917 hal_ring_handle_t wbm_srng; 918 qdf_dma_addr_t hp_addr; 919 920 wbm_srng = soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 921 if (!wbm_srng) 922 return QDF_STATUS_E_FAILURE; 923 924 hp_addr = soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr; 925 926 hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr); 927 928 dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr); 929 930 /* Reset alternative TX comp ring */ 931 wbm_srng = soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng; 932 if (!wbm_srng) 933 return QDF_STATUS_E_FAILURE; 934 935 hp_addr = soc->ipa_uc_tx_rsc_alt.ipa_wbm_hp_shadow_paddr; 936 937 hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr); 938 939 dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr); 940 941 return QDF_STATUS_SUCCESS; 942 } 943 #endif /* IPA_SET_RESET_TX_DB_PA */ 944 945 #else /* !IPA_WDI3_TX_TWO_PIPES */ 946 947 static inline 948 void dp_ipa_tx_alt_pool_detach(struct dp_soc *soc, struct dp_pdev *pdev) 949 { 950 } 951 952 static inline void dp_ipa_tx_alt_ring_resource_setup(struct dp_soc *soc) 953 { 954 } 955 956 static inline int dp_ipa_tx_alt_pool_attach(struct dp_soc *soc) 957 { 958 return 0; 959 } 960 961 static inline QDF_STATUS dp_ipa_tx_alt_ring_get_resource(struct dp_pdev *pdev) 962 { 963 return QDF_STATUS_SUCCESS; 964 } 965 966 static void dp_ipa_map_ring_doorbell_paddr(struct dp_pdev *pdev) 967 { 968 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 969 uint32_t rx_ready_doorbell_dmaaddr; 970 uint32_t tx_comp_doorbell_dmaaddr; 971 struct dp_soc *soc = pdev->soc; 972 int ret = 0; 973 974 if (ipa_res->is_db_ddr_mapped) 975 ipa_res->tx_comp_doorbell_vaddr = 976 phys_to_virt(ipa_res->tx_comp_doorbell_paddr); 977 else 978 ipa_res->tx_comp_doorbell_vaddr = 979 ioremap(ipa_res->tx_comp_doorbell_paddr, 4); 980 981 if (qdf_mem_smmu_s1_enabled(soc->osdev)) { 982 ret = pld_smmu_map(soc->osdev->dev, 983 ipa_res->tx_comp_doorbell_paddr, 984 &tx_comp_doorbell_dmaaddr, 985 sizeof(uint32_t)); 986 ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr; 987 qdf_assert_always(!ret); 988 989 ret = pld_smmu_map(soc->osdev->dev, 990 ipa_res->rx_ready_doorbell_paddr, 991 &rx_ready_doorbell_dmaaddr, 992 sizeof(uint32_t)); 993 ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr; 994 qdf_assert_always(!ret); 995 } 996 } 997 998 static inline void dp_ipa_unmap_ring_doorbell_paddr(struct dp_pdev *pdev) 999 { 1000 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 1001 struct dp_soc *soc = pdev->soc; 1002 int ret = 0; 1003 1004 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) 1005 return; 1006 1007 ret = pld_smmu_unmap(soc->osdev->dev, 1008 ipa_res->rx_ready_doorbell_paddr, 1009 sizeof(uint32_t)); 1010 qdf_assert_always(!ret); 1011 1012 ret = pld_smmu_unmap(soc->osdev->dev, 1013 ipa_res->tx_comp_doorbell_paddr, 1014 sizeof(uint32_t)); 1015 qdf_assert_always(!ret); 1016 } 1017 1018 static inline QDF_STATUS dp_ipa_tx_alt_buf_smmu_mapping(struct dp_soc *soc, 1019 struct dp_pdev *pdev, 1020 bool create) 1021 { 1022 return QDF_STATUS_SUCCESS; 1023 } 1024 1025 static inline 1026 void dp_ipa_setup_tx_alt_pipe(struct dp_soc *soc, struct dp_ipa_resources *res, 1027 qdf_ipa_wdi_conn_in_params_t *in) 1028 { 1029 } 1030 1031 static void dp_ipa_set_pipe_db(struct dp_ipa_resources *res, 1032 qdf_ipa_wdi_conn_out_params_t *out) 1033 { 1034 res->tx_comp_doorbell_paddr = 1035 QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(out); 1036 res->rx_ready_doorbell_paddr = 1037 QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(out); 1038 } 1039 1040 #ifdef IPA_WDS_EASYMESH_FEATURE 1041 /** 1042 * dp_ipa_setup_iface_session_id - Pass vdev id to IPA 1043 * @in: ipa in params 1044 * @session_id: vdev id 1045 * 1046 * Pass Vdev id to IPA, IPA metadata order is changed and vdev id 1047 * is stored at higher nibble so, no shift is required. 1048 * 1049 * Return: none 1050 */ 1051 static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in, 1052 uint8_t session_id) 1053 { 1054 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id); 1055 } 1056 #else 1057 static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in, 1058 uint8_t session_id) 1059 { 1060 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id << 16); 1061 } 1062 #endif 1063 1064 static inline void dp_ipa_tx_comp_ring_init_hp(struct dp_soc *soc, 1065 struct dp_ipa_resources *res) 1066 { 1067 struct hal_srng *wbm_srng = (struct hal_srng *) 1068 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 1069 1070 hal_srng_dst_init_hp(soc->hal_soc, wbm_srng, 1071 res->tx_comp_doorbell_vaddr); 1072 } 1073 1074 static void dp_ipa_set_tx_doorbell_paddr(struct dp_soc *soc, 1075 struct dp_ipa_resources *ipa_res) 1076 { 1077 struct hal_srng *wbm_srng = (struct hal_srng *) 1078 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 1079 1080 hal_srng_dst_set_hp_paddr_confirm(wbm_srng, 1081 ipa_res->tx_comp_doorbell_paddr); 1082 1083 dp_info("paddr %pK vaddr %pK", 1084 (void *)ipa_res->tx_comp_doorbell_paddr, 1085 (void *)ipa_res->tx_comp_doorbell_vaddr); 1086 } 1087 1088 #ifdef IPA_SET_RESET_TX_DB_PA 1089 static QDF_STATUS dp_ipa_reset_tx_doorbell_pa(struct dp_soc *soc, 1090 struct dp_ipa_resources *ipa_res) 1091 { 1092 hal_ring_handle_t wbm_srng = 1093 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 1094 qdf_dma_addr_t hp_addr; 1095 1096 if (!wbm_srng) 1097 return QDF_STATUS_E_FAILURE; 1098 1099 hp_addr = soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr; 1100 1101 hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr); 1102 1103 dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr); 1104 1105 return QDF_STATUS_SUCCESS; 1106 } 1107 #endif /* IPA_SET_RESET_TX_DB_PA */ 1108 1109 #endif /* IPA_WDI3_TX_TWO_PIPES */ 1110 1111 /** 1112 * dp_tx_ipa_uc_detach - Free autonomy TX resources 1113 * @soc: data path instance 1114 * @pdev: core txrx pdev context 1115 * 1116 * Free allocated TX buffers with WBM SRNG 1117 * 1118 * Return: none 1119 */ 1120 static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) 1121 { 1122 int idx; 1123 qdf_nbuf_t nbuf; 1124 struct dp_ipa_resources *ipa_res; 1125 1126 for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) { 1127 nbuf = (qdf_nbuf_t) 1128 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx]; 1129 if (!nbuf) 1130 continue; 1131 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 1132 qdf_mem_dp_tx_skb_cnt_dec(); 1133 qdf_mem_dp_tx_skb_dec(qdf_nbuf_get_end_offset(nbuf)); 1134 qdf_nbuf_free(nbuf); 1135 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx] = 1136 (void *)NULL; 1137 } 1138 1139 qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned); 1140 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL; 1141 1142 ipa_res = &pdev->ipa_resource; 1143 1144 qdf_mem_free_sgtable(&ipa_res->tx_ring.sgtable); 1145 qdf_mem_free_sgtable(&ipa_res->tx_comp_ring.sgtable); 1146 } 1147 1148 /** 1149 * dp_rx_ipa_uc_detach - free autonomy RX resources 1150 * @soc: data path instance 1151 * @pdev: core txrx pdev context 1152 * 1153 * This function will detach DP RX into main device context 1154 * will free DP Rx resources. 1155 * 1156 * Return: none 1157 */ 1158 static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) 1159 { 1160 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 1161 1162 qdf_mem_free_sgtable(&ipa_res->rx_rdy_ring.sgtable); 1163 qdf_mem_free_sgtable(&ipa_res->rx_refill_ring.sgtable); 1164 } 1165 1166 /* 1167 * dp_rx_alt_ipa_uc_detach - free autonomy RX resources 1168 * @soc: data path instance 1169 * @pdev: core txrx pdev context 1170 * 1171 * This function will detach DP RX into main device context 1172 * will free DP Rx resources. 1173 * 1174 * Return: none 1175 */ 1176 #ifdef IPA_WDI3_VLAN_SUPPORT 1177 static void dp_rx_alt_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) 1178 { 1179 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 1180 1181 if (!wlan_ipa_is_vlan_enabled()) 1182 return; 1183 1184 qdf_mem_free_sgtable(&ipa_res->rx_alt_rdy_ring.sgtable); 1185 qdf_mem_free_sgtable(&ipa_res->rx_alt_refill_ring.sgtable); 1186 } 1187 #else 1188 static inline 1189 void dp_rx_alt_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) 1190 { } 1191 #endif 1192 1193 int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) 1194 { 1195 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 1196 return QDF_STATUS_SUCCESS; 1197 1198 /* TX resource detach */ 1199 dp_tx_ipa_uc_detach(soc, pdev); 1200 1201 /* Cleanup 2nd TX pipe resources */ 1202 dp_ipa_tx_alt_pool_detach(soc, pdev); 1203 1204 /* RX resource detach */ 1205 dp_rx_ipa_uc_detach(soc, pdev); 1206 1207 /* Cleanup 2nd RX pipe resources */ 1208 dp_rx_alt_ipa_uc_detach(soc, pdev); 1209 1210 return QDF_STATUS_SUCCESS; /* success */ 1211 } 1212 1213 /** 1214 * dp_tx_ipa_uc_attach - Allocate autonomy TX resources 1215 * @soc: data path instance 1216 * @pdev: Physical device handle 1217 * 1218 * Allocate TX buffer from non-cacheable memory 1219 * Attache allocated TX buffers with WBM SRNG 1220 * 1221 * Return: int 1222 */ 1223 static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev) 1224 { 1225 uint32_t tx_buffer_count; 1226 uint32_t ring_base_align = 8; 1227 qdf_dma_addr_t buffer_paddr; 1228 struct hal_srng *wbm_srng = (struct hal_srng *) 1229 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 1230 struct hal_srng_params srng_params; 1231 void *ring_entry; 1232 int num_entries; 1233 qdf_nbuf_t nbuf; 1234 int retval = QDF_STATUS_SUCCESS; 1235 int max_alloc_count = 0; 1236 uint32_t wbm_bm_id; 1237 1238 /* 1239 * Uncomment when dp_ops_cfg.cfg_attach is implemented 1240 * unsigned int uc_tx_buf_sz = 1241 * dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev); 1242 */ 1243 unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT; 1244 unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1; 1245 1246 wbm_bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, 1247 IPA_TCL_DATA_RING_IDX); 1248 1249 hal_get_srng_params(soc->hal_soc, hal_srng_to_hal_ring_handle(wbm_srng), 1250 &srng_params); 1251 num_entries = srng_params.num_entries; 1252 1253 max_alloc_count = 1254 num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES; 1255 if (max_alloc_count <= 0) { 1256 dp_err("incorrect value for buffer count %u", max_alloc_count); 1257 return -EINVAL; 1258 } 1259 1260 dp_info("requested %d buffers to be posted to wbm ring", 1261 max_alloc_count); 1262 1263 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = 1264 qdf_mem_malloc(num_entries * 1265 sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned)); 1266 if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned) { 1267 dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail"); 1268 return -ENOMEM; 1269 } 1270 1271 hal_srng_access_start_unlocked(soc->hal_soc, 1272 hal_srng_to_hal_ring_handle(wbm_srng)); 1273 1274 /* 1275 * Allocate Tx buffers as many as possible. 1276 * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty 1277 * Populate Tx buffers into WBM2IPA ring 1278 * This initial buffer population will simulate H/W as source ring, 1279 * and update HP 1280 */ 1281 for (tx_buffer_count = 0; 1282 tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) { 1283 nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE); 1284 if (!nbuf) 1285 break; 1286 1287 ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc, 1288 hal_srng_to_hal_ring_handle(wbm_srng)); 1289 if (!ring_entry) { 1290 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 1291 "%s: Failed to get WBM ring entry", 1292 __func__); 1293 qdf_nbuf_free(nbuf); 1294 break; 1295 } 1296 1297 qdf_nbuf_map_single(soc->osdev, nbuf, 1298 QDF_DMA_BIDIRECTIONAL); 1299 buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); 1300 qdf_mem_dp_tx_skb_cnt_inc(); 1301 qdf_mem_dp_tx_skb_inc(qdf_nbuf_get_end_offset(nbuf)); 1302 1303 /* 1304 * TODO - KIWI code can directly call the be handler 1305 * instead of hal soc ops. 1306 */ 1307 hal_rxdma_buff_addr_info_set(soc->hal_soc, ring_entry, 1308 buffer_paddr, 0, wbm_bm_id); 1309 1310 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count] 1311 = (void *)nbuf; 1312 } 1313 1314 hal_srng_access_end_unlocked(soc->hal_soc, 1315 hal_srng_to_hal_ring_handle(wbm_srng)); 1316 1317 soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count; 1318 1319 if (tx_buffer_count) { 1320 dp_info("IPA WDI TX buffer: %d allocated", tx_buffer_count); 1321 } else { 1322 dp_err("No IPA WDI TX buffer allocated!"); 1323 qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned); 1324 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL; 1325 retval = -ENOMEM; 1326 } 1327 1328 return retval; 1329 } 1330 1331 /** 1332 * dp_rx_ipa_uc_attach - Allocate autonomy RX resources 1333 * @soc: data path instance 1334 * @pdev: core txrx pdev context 1335 * 1336 * This function will attach a DP RX instance into the main 1337 * device (SOC) context. 1338 * 1339 * Return: QDF_STATUS_SUCCESS: success 1340 * QDF_STATUS_E_RESOURCES: Error return 1341 */ 1342 static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev) 1343 { 1344 return QDF_STATUS_SUCCESS; 1345 } 1346 1347 int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev) 1348 { 1349 int error; 1350 1351 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 1352 return QDF_STATUS_SUCCESS; 1353 1354 /* TX resource attach */ 1355 error = dp_tx_ipa_uc_attach(soc, pdev); 1356 if (error) { 1357 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1358 "%s: DP IPA UC TX attach fail code %d", 1359 __func__, error); 1360 return error; 1361 } 1362 1363 /* Setup 2nd TX pipe */ 1364 error = dp_ipa_tx_alt_pool_attach(soc); 1365 if (error) { 1366 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1367 "%s: DP IPA TX pool2 attach fail code %d", 1368 __func__, error); 1369 dp_tx_ipa_uc_detach(soc, pdev); 1370 return error; 1371 } 1372 1373 /* RX resource attach */ 1374 error = dp_rx_ipa_uc_attach(soc, pdev); 1375 if (error) { 1376 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1377 "%s: DP IPA UC RX attach fail code %d", 1378 __func__, error); 1379 dp_ipa_tx_alt_pool_detach(soc, pdev); 1380 dp_tx_ipa_uc_detach(soc, pdev); 1381 return error; 1382 } 1383 1384 return QDF_STATUS_SUCCESS; /* success */ 1385 } 1386 1387 #ifdef IPA_WDI3_VLAN_SUPPORT 1388 /* 1389 * dp_ipa_rx_alt_ring_resource_setup() - setup IPA 2nd RX ring resources 1390 * @soc: data path SoC handle 1391 * @pdev: data path pdev handle 1392 * 1393 * Return: none 1394 */ 1395 static 1396 void dp_ipa_rx_alt_ring_resource_setup(struct dp_soc *soc, struct dp_pdev *pdev) 1397 { 1398 struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc; 1399 struct hal_srng *hal_srng; 1400 struct hal_srng_params srng_params; 1401 unsigned long addr_offset, dev_base_paddr; 1402 qdf_dma_addr_t hp_addr; 1403 1404 if (!wlan_ipa_is_vlan_enabled()) 1405 return; 1406 1407 dev_base_paddr = 1408 (unsigned long) 1409 ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa; 1410 1411 /* IPA REO_DEST Ring - HAL_SRNG_REO2SW3 */ 1412 hal_srng = (struct hal_srng *) 1413 soc->reo_dest_ring[IPA_ALT_REO_DEST_RING_IDX].hal_srng; 1414 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 1415 hal_srng_to_hal_ring_handle(hal_srng), 1416 &srng_params); 1417 1418 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_paddr = 1419 srng_params.ring_base_paddr; 1420 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_vaddr = 1421 srng_params.ring_base_vaddr; 1422 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_size = 1423 (srng_params.num_entries * srng_params.entry_size) << 2; 1424 addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) - 1425 (unsigned long)(hal_soc->dev_base_addr); 1426 soc->ipa_uc_rx_rsc_alt.ipa_reo_tp_paddr = 1427 (qdf_dma_addr_t)(addr_offset + dev_base_paddr); 1428 1429 dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", 1430 (unsigned int)addr_offset, 1431 (unsigned int)dev_base_paddr, 1432 (unsigned int)(soc->ipa_uc_rx_rsc_alt.ipa_reo_tp_paddr), 1433 (void *)soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_paddr, 1434 (void *)soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_vaddr, 1435 srng_params.num_entries, 1436 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_size); 1437 1438 hal_srng = (struct hal_srng *) 1439 pdev->rx_refill_buf_ring3.hal_srng; 1440 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 1441 hal_srng_to_hal_ring_handle(hal_srng), 1442 &srng_params); 1443 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_paddr = 1444 srng_params.ring_base_paddr; 1445 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_vaddr = 1446 srng_params.ring_base_vaddr; 1447 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_size = 1448 (srng_params.num_entries * srng_params.entry_size) << 2; 1449 hp_addr = hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc), 1450 hal_srng_to_hal_ring_handle(hal_srng)); 1451 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_hp_paddr = 1452 qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr); 1453 1454 dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", 1455 (unsigned int)(soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_hp_paddr), 1456 (void *)soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_paddr, 1457 (void *)soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_vaddr, 1458 srng_params.num_entries, 1459 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_size); 1460 } 1461 #else 1462 static inline 1463 void dp_ipa_rx_alt_ring_resource_setup(struct dp_soc *soc, struct dp_pdev *pdev) 1464 { } 1465 #endif 1466 /* 1467 * dp_ipa_ring_resource_setup() - setup IPA ring resources 1468 * @soc: data path SoC handle 1469 * 1470 * Return: none 1471 */ 1472 int dp_ipa_ring_resource_setup(struct dp_soc *soc, 1473 struct dp_pdev *pdev) 1474 { 1475 struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc; 1476 struct hal_srng *hal_srng; 1477 struct hal_srng_params srng_params; 1478 qdf_dma_addr_t hp_addr; 1479 unsigned long addr_offset, dev_base_paddr; 1480 uint32_t ix0; 1481 uint8_t ix0_map[8]; 1482 1483 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 1484 return QDF_STATUS_SUCCESS; 1485 1486 /* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */ 1487 hal_srng = (struct hal_srng *) 1488 soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng; 1489 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 1490 hal_srng_to_hal_ring_handle(hal_srng), 1491 &srng_params); 1492 1493 soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr = 1494 srng_params.ring_base_paddr; 1495 soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr = 1496 srng_params.ring_base_vaddr; 1497 soc->ipa_uc_tx_rsc.ipa_tcl_ring_size = 1498 (srng_params.num_entries * srng_params.entry_size) << 2; 1499 /* 1500 * For the register backed memory addresses, use the scn->mem_pa to 1501 * calculate the physical address of the shadow registers 1502 */ 1503 dev_base_paddr = 1504 (unsigned long) 1505 ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa; 1506 addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) - 1507 (unsigned long)(hal_soc->dev_base_addr); 1508 soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr = 1509 (qdf_dma_addr_t)(addr_offset + dev_base_paddr); 1510 1511 dp_info("IPA TCL_DATA Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", 1512 (unsigned int)addr_offset, 1513 (unsigned int)dev_base_paddr, 1514 (unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr), 1515 (void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr, 1516 (void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr, 1517 srng_params.num_entries, 1518 soc->ipa_uc_tx_rsc.ipa_tcl_ring_size); 1519 1520 /* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */ 1521 hal_srng = (struct hal_srng *) 1522 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; 1523 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 1524 hal_srng_to_hal_ring_handle(hal_srng), 1525 &srng_params); 1526 1527 soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr = 1528 srng_params.ring_base_paddr; 1529 soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr = 1530 srng_params.ring_base_vaddr; 1531 soc->ipa_uc_tx_rsc.ipa_wbm_ring_size = 1532 (srng_params.num_entries * srng_params.entry_size) << 2; 1533 soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr = 1534 hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc), 1535 hal_srng_to_hal_ring_handle(hal_srng)); 1536 addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) - 1537 (unsigned long)(hal_soc->dev_base_addr); 1538 soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr = 1539 (qdf_dma_addr_t)(addr_offset + dev_base_paddr); 1540 1541 dp_info("IPA TX COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)", 1542 (unsigned int)addr_offset, 1543 (unsigned int)dev_base_paddr, 1544 (unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr), 1545 (void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr, 1546 (void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr, 1547 srng_params.num_entries, 1548 soc->ipa_uc_tx_rsc.ipa_wbm_ring_size); 1549 1550 dp_ipa_tx_alt_ring_resource_setup(soc); 1551 1552 /* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */ 1553 hal_srng = (struct hal_srng *) 1554 soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng; 1555 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 1556 hal_srng_to_hal_ring_handle(hal_srng), 1557 &srng_params); 1558 1559 soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr = 1560 srng_params.ring_base_paddr; 1561 soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr = 1562 srng_params.ring_base_vaddr; 1563 soc->ipa_uc_rx_rsc.ipa_reo_ring_size = 1564 (srng_params.num_entries * srng_params.entry_size) << 2; 1565 addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) - 1566 (unsigned long)(hal_soc->dev_base_addr); 1567 soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr = 1568 (qdf_dma_addr_t)(addr_offset + dev_base_paddr); 1569 1570 dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", 1571 (unsigned int)addr_offset, 1572 (unsigned int)dev_base_paddr, 1573 (unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr), 1574 (void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr, 1575 (void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr, 1576 srng_params.num_entries, 1577 soc->ipa_uc_rx_rsc.ipa_reo_ring_size); 1578 1579 hal_srng = (struct hal_srng *) 1580 pdev->rx_refill_buf_ring2.hal_srng; 1581 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), 1582 hal_srng_to_hal_ring_handle(hal_srng), 1583 &srng_params); 1584 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr = 1585 srng_params.ring_base_paddr; 1586 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr = 1587 srng_params.ring_base_vaddr; 1588 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size = 1589 (srng_params.num_entries * srng_params.entry_size) << 2; 1590 hp_addr = hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc), 1591 hal_srng_to_hal_ring_handle(hal_srng)); 1592 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr = 1593 qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr); 1594 1595 dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", 1596 (unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr), 1597 (void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr, 1598 (void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr, 1599 srng_params.num_entries, 1600 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size); 1601 1602 /* 1603 * Set DEST_RING_MAPPING_4 to SW2 as default value for 1604 * DESTINATION_RING_CTRL_IX_0. 1605 */ 1606 ix0_map[0] = REO_REMAP_SW1; 1607 ix0_map[1] = REO_REMAP_SW1; 1608 ix0_map[2] = REO_REMAP_SW2; 1609 ix0_map[3] = REO_REMAP_SW3; 1610 ix0_map[4] = REO_REMAP_SW2; 1611 ix0_map[5] = REO_REMAP_RELEASE; 1612 ix0_map[6] = REO_REMAP_FW; 1613 ix0_map[7] = REO_REMAP_FW; 1614 1615 ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0, 1616 ix0_map); 1617 1618 hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, NULL, NULL); 1619 1620 dp_ipa_rx_alt_ring_resource_setup(soc, pdev); 1621 return 0; 1622 } 1623 1624 #ifdef IPA_WDI3_VLAN_SUPPORT 1625 /* 1626 * dp_ipa_rx_alt_ring_get_resource() - get IPA 2nd RX ring resources 1627 * @pdev: data path pdev handle 1628 * 1629 * Return: Success if resourece is found 1630 */ 1631 static QDF_STATUS dp_ipa_rx_alt_ring_get_resource(struct dp_pdev *pdev) 1632 { 1633 struct dp_soc *soc = pdev->soc; 1634 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 1635 1636 if (!wlan_ipa_is_vlan_enabled()) 1637 return QDF_STATUS_SUCCESS; 1638 1639 dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_alt_rdy_ring, 1640 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_vaddr, 1641 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_paddr, 1642 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_size); 1643 1644 dp_ipa_get_shared_mem_info( 1645 soc->osdev, &ipa_res->rx_alt_refill_ring, 1646 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_vaddr, 1647 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_paddr, 1648 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_size); 1649 1650 if (!qdf_mem_get_dma_addr(soc->osdev, 1651 &ipa_res->rx_alt_rdy_ring.mem_info) || 1652 !qdf_mem_get_dma_addr(soc->osdev, 1653 &ipa_res->rx_alt_refill_ring.mem_info)) 1654 return QDF_STATUS_E_FAILURE; 1655 1656 return QDF_STATUS_SUCCESS; 1657 } 1658 #else 1659 static inline QDF_STATUS dp_ipa_rx_alt_ring_get_resource(struct dp_pdev *pdev) 1660 { 1661 return QDF_STATUS_SUCCESS; 1662 } 1663 #endif 1664 1665 QDF_STATUS dp_ipa_get_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 1666 { 1667 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1668 struct dp_pdev *pdev = 1669 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1670 struct dp_ipa_resources *ipa_res; 1671 1672 if (!pdev) { 1673 dp_err("Invalid instance"); 1674 return QDF_STATUS_E_FAILURE; 1675 } 1676 1677 ipa_res = &pdev->ipa_resource; 1678 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 1679 return QDF_STATUS_SUCCESS; 1680 1681 ipa_res->tx_num_alloc_buffer = 1682 (uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; 1683 1684 dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_ring, 1685 soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr, 1686 soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr, 1687 soc->ipa_uc_tx_rsc.ipa_tcl_ring_size); 1688 1689 dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_comp_ring, 1690 soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr, 1691 soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr, 1692 soc->ipa_uc_tx_rsc.ipa_wbm_ring_size); 1693 1694 dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_rdy_ring, 1695 soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr, 1696 soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr, 1697 soc->ipa_uc_rx_rsc.ipa_reo_ring_size); 1698 1699 dp_ipa_get_shared_mem_info( 1700 soc->osdev, &ipa_res->rx_refill_ring, 1701 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr, 1702 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr, 1703 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size); 1704 1705 if (!qdf_mem_get_dma_addr(soc->osdev, &ipa_res->tx_ring.mem_info) || 1706 !qdf_mem_get_dma_addr(soc->osdev, 1707 &ipa_res->tx_comp_ring.mem_info) || 1708 !qdf_mem_get_dma_addr(soc->osdev, &ipa_res->rx_rdy_ring.mem_info) || 1709 !qdf_mem_get_dma_addr(soc->osdev, 1710 &ipa_res->rx_refill_ring.mem_info)) 1711 return QDF_STATUS_E_FAILURE; 1712 1713 if (dp_ipa_tx_alt_ring_get_resource(pdev)) 1714 return QDF_STATUS_E_FAILURE; 1715 1716 if (dp_ipa_rx_alt_ring_get_resource(pdev)) 1717 return QDF_STATUS_E_FAILURE; 1718 1719 return QDF_STATUS_SUCCESS; 1720 } 1721 1722 #ifdef IPA_SET_RESET_TX_DB_PA 1723 #define DP_IPA_SET_TX_DB_PADDR(soc, ipa_res) 1724 #else 1725 #define DP_IPA_SET_TX_DB_PADDR(soc, ipa_res) \ 1726 dp_ipa_set_tx_doorbell_paddr(soc, ipa_res) 1727 #endif 1728 1729 #ifdef IPA_WDI3_VLAN_SUPPORT 1730 /* 1731 * dp_ipa_map_rx_alt_ring_doorbell_paddr() - Map 2nd rx ring doorbell paddr 1732 * @pdev: data path pdev handle 1733 * 1734 * Return: none 1735 */ 1736 static void dp_ipa_map_rx_alt_ring_doorbell_paddr(struct dp_pdev *pdev) 1737 { 1738 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 1739 uint32_t rx_ready_doorbell_dmaaddr; 1740 struct dp_soc *soc = pdev->soc; 1741 struct hal_srng *reo_srng = (struct hal_srng *) 1742 soc->reo_dest_ring[IPA_ALT_REO_DEST_RING_IDX].hal_srng; 1743 int ret = 0; 1744 1745 if (!wlan_ipa_is_vlan_enabled()) 1746 return; 1747 1748 if (qdf_mem_smmu_s1_enabled(soc->osdev)) { 1749 ret = pld_smmu_map(soc->osdev->dev, 1750 ipa_res->rx_alt_ready_doorbell_paddr, 1751 &rx_ready_doorbell_dmaaddr, 1752 sizeof(uint32_t)); 1753 ipa_res->rx_alt_ready_doorbell_paddr = 1754 rx_ready_doorbell_dmaaddr; 1755 qdf_assert_always(!ret); 1756 } 1757 1758 hal_srng_dst_set_hp_paddr_confirm(reo_srng, 1759 ipa_res->rx_alt_ready_doorbell_paddr); 1760 } 1761 1762 /* 1763 * dp_ipa_unmap_rx_alt_ring_doorbell_paddr() - Unmap 2nd rx ring doorbell paddr 1764 * @pdev: data path pdev handle 1765 * 1766 * Return: none 1767 */ 1768 static void dp_ipa_unmap_rx_alt_ring_doorbell_paddr(struct dp_pdev *pdev) 1769 { 1770 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; 1771 struct dp_soc *soc = pdev->soc; 1772 int ret = 0; 1773 1774 if (!wlan_ipa_is_vlan_enabled()) 1775 return; 1776 1777 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) 1778 return; 1779 1780 ret = pld_smmu_unmap(soc->osdev->dev, 1781 ipa_res->rx_alt_ready_doorbell_paddr, 1782 sizeof(uint32_t)); 1783 qdf_assert_always(!ret); 1784 } 1785 #else 1786 static inline void dp_ipa_map_rx_alt_ring_doorbell_paddr(struct dp_pdev *pdev) 1787 { } 1788 1789 static inline void dp_ipa_unmap_rx_alt_ring_doorbell_paddr(struct dp_pdev *pdev) 1790 { } 1791 #endif 1792 1793 QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 1794 { 1795 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1796 struct dp_pdev *pdev = 1797 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1798 struct dp_ipa_resources *ipa_res; 1799 struct hal_srng *reo_srng = (struct hal_srng *) 1800 soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng; 1801 1802 if (!pdev) { 1803 dp_err("Invalid instance"); 1804 return QDF_STATUS_E_FAILURE; 1805 } 1806 1807 ipa_res = &pdev->ipa_resource; 1808 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 1809 return QDF_STATUS_SUCCESS; 1810 1811 dp_ipa_map_ring_doorbell_paddr(pdev); 1812 dp_ipa_map_rx_alt_ring_doorbell_paddr(pdev); 1813 1814 DP_IPA_SET_TX_DB_PADDR(soc, ipa_res); 1815 1816 /* 1817 * For RX, REO module on Napier/Hastings does reordering on incoming 1818 * Ethernet packets and writes one or more descriptors to REO2IPA Rx 1819 * ring.It then updates the ring’s Write/Head ptr and rings a doorbell 1820 * to IPA. 1821 * Set the doorbell addr for the REO ring. 1822 */ 1823 hal_srng_dst_set_hp_paddr_confirm(reo_srng, 1824 ipa_res->rx_ready_doorbell_paddr); 1825 return QDF_STATUS_SUCCESS; 1826 } 1827 1828 QDF_STATUS dp_ipa_iounmap_doorbell_vaddr(struct cdp_soc_t *soc_hdl, 1829 uint8_t pdev_id) 1830 { 1831 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1832 struct dp_pdev *pdev = 1833 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1834 struct dp_ipa_resources *ipa_res; 1835 1836 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 1837 return QDF_STATUS_SUCCESS; 1838 1839 if (!pdev) { 1840 dp_err("Invalid instance"); 1841 return QDF_STATUS_E_FAILURE; 1842 } 1843 1844 ipa_res = &pdev->ipa_resource; 1845 if (!ipa_res->is_db_ddr_mapped) 1846 iounmap(ipa_res->tx_comp_doorbell_vaddr); 1847 1848 return QDF_STATUS_SUCCESS; 1849 } 1850 1851 QDF_STATUS dp_ipa_op_response(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 1852 uint8_t *op_msg) 1853 { 1854 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1855 struct dp_pdev *pdev = 1856 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1857 1858 if (!pdev) { 1859 dp_err("Invalid instance"); 1860 return QDF_STATUS_E_FAILURE; 1861 } 1862 1863 if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx)) 1864 return QDF_STATUS_SUCCESS; 1865 1866 if (pdev->ipa_uc_op_cb) { 1867 pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt); 1868 } else { 1869 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1870 "%s: IPA callback function is not registered", __func__); 1871 qdf_mem_free(op_msg); 1872 return QDF_STATUS_E_FAILURE; 1873 } 1874 1875 return QDF_STATUS_SUCCESS; 1876 } 1877 1878 QDF_STATUS dp_ipa_register_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 1879 ipa_uc_op_cb_type op_cb, 1880 void *usr_ctxt) 1881 { 1882 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1883 struct dp_pdev *pdev = 1884 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1885 1886 if (!pdev) { 1887 dp_err("Invalid instance"); 1888 return QDF_STATUS_E_FAILURE; 1889 } 1890 1891 if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx)) 1892 return QDF_STATUS_SUCCESS; 1893 1894 pdev->ipa_uc_op_cb = op_cb; 1895 pdev->usr_ctxt = usr_ctxt; 1896 1897 return QDF_STATUS_SUCCESS; 1898 } 1899 1900 void dp_ipa_deregister_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 1901 { 1902 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1903 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1904 1905 if (!pdev) { 1906 dp_err("Invalid instance"); 1907 return; 1908 } 1909 1910 dp_debug("Deregister OP handler callback"); 1911 pdev->ipa_uc_op_cb = NULL; 1912 pdev->usr_ctxt = NULL; 1913 } 1914 1915 QDF_STATUS dp_ipa_get_stat(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 1916 { 1917 /* TBD */ 1918 return QDF_STATUS_SUCCESS; 1919 } 1920 1921 /** 1922 * dp_tx_send_ipa_data_frame() - send IPA data frame 1923 * @soc_hdl: datapath soc handle 1924 * @vdev_id: id of the virtual device 1925 * @skb: skb to transmit 1926 * 1927 * Return: skb/ NULL is for success 1928 */ 1929 qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 1930 qdf_nbuf_t skb) 1931 { 1932 qdf_nbuf_t ret; 1933 1934 /* Terminate the (single-element) list of tx frames */ 1935 qdf_nbuf_set_next(skb, NULL); 1936 ret = dp_tx_send(soc_hdl, vdev_id, skb); 1937 if (ret) { 1938 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1939 "%s: Failed to tx", __func__); 1940 return ret; 1941 } 1942 1943 return NULL; 1944 } 1945 1946 #ifdef QCA_IPA_LL_TX_FLOW_CONTROL 1947 /** 1948 * dp_ipa_is_target_ready() - check if target is ready or not 1949 * @soc: datapath soc handle 1950 * 1951 * Return: true if target is ready 1952 */ 1953 static inline 1954 bool dp_ipa_is_target_ready(struct dp_soc *soc) 1955 { 1956 if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET) 1957 return false; 1958 else 1959 return true; 1960 } 1961 #else 1962 static inline 1963 bool dp_ipa_is_target_ready(struct dp_soc *soc) 1964 { 1965 return true; 1966 } 1967 #endif 1968 1969 QDF_STATUS dp_ipa_enable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 1970 { 1971 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1972 struct dp_pdev *pdev = 1973 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1974 uint32_t ix0; 1975 uint32_t ix2; 1976 uint8_t ix_map[8]; 1977 1978 if (!pdev) { 1979 dp_err("Invalid instance"); 1980 return QDF_STATUS_E_FAILURE; 1981 } 1982 1983 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 1984 return QDF_STATUS_SUCCESS; 1985 1986 if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle))) 1987 return QDF_STATUS_E_AGAIN; 1988 1989 if (!dp_ipa_is_target_ready(soc)) 1990 return QDF_STATUS_E_AGAIN; 1991 1992 /* Call HAL API to remap REO rings to REO2IPA ring */ 1993 ix_map[0] = REO_REMAP_SW1; 1994 ix_map[1] = REO_REMAP_SW4; 1995 ix_map[2] = REO_REMAP_SW1; 1996 if (wlan_ipa_is_vlan_enabled()) 1997 ix_map[3] = REO_REMAP_SW3; 1998 else 1999 ix_map[3] = REO_REMAP_SW4; 2000 ix_map[4] = REO_REMAP_SW4; 2001 ix_map[5] = REO_REMAP_RELEASE; 2002 ix_map[6] = REO_REMAP_FW; 2003 ix_map[7] = REO_REMAP_FW; 2004 2005 ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0, 2006 ix_map); 2007 2008 if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) { 2009 ix_map[0] = REO_REMAP_SW4; 2010 ix_map[1] = REO_REMAP_SW4; 2011 ix_map[2] = REO_REMAP_SW4; 2012 ix_map[3] = REO_REMAP_SW4; 2013 ix_map[4] = REO_REMAP_SW4; 2014 ix_map[5] = REO_REMAP_SW4; 2015 ix_map[6] = REO_REMAP_SW4; 2016 ix_map[7] = REO_REMAP_SW4; 2017 2018 ix2 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX2, 2019 ix_map); 2020 2021 hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, 2022 &ix2, &ix2); 2023 dp_ipa_reo_remap_history_add(ix0, ix2, ix2); 2024 } else { 2025 hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, 2026 NULL, NULL); 2027 dp_ipa_reo_remap_history_add(ix0, 0, 0); 2028 } 2029 2030 return QDF_STATUS_SUCCESS; 2031 } 2032 2033 QDF_STATUS dp_ipa_disable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 2034 { 2035 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2036 struct dp_pdev *pdev = 2037 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2038 uint8_t ix0_map[8]; 2039 uint32_t ix0; 2040 uint32_t ix1; 2041 uint32_t ix2; 2042 uint32_t ix3; 2043 2044 if (!pdev) { 2045 dp_err("Invalid instance"); 2046 return QDF_STATUS_E_FAILURE; 2047 } 2048 2049 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 2050 return QDF_STATUS_SUCCESS; 2051 2052 if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle))) 2053 return QDF_STATUS_E_AGAIN; 2054 2055 if (!dp_ipa_is_target_ready(soc)) 2056 return QDF_STATUS_E_AGAIN; 2057 2058 ix0_map[0] = REO_REMAP_SW1; 2059 ix0_map[1] = REO_REMAP_SW1; 2060 ix0_map[2] = REO_REMAP_SW2; 2061 ix0_map[3] = REO_REMAP_SW3; 2062 ix0_map[4] = REO_REMAP_SW2; 2063 ix0_map[5] = REO_REMAP_RELEASE; 2064 ix0_map[6] = REO_REMAP_FW; 2065 ix0_map[7] = REO_REMAP_FW; 2066 2067 /* Call HAL API to remap REO rings to REO2IPA ring */ 2068 ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0, 2069 ix0_map); 2070 2071 if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) { 2072 dp_reo_remap_config(soc, &ix1, &ix2, &ix3); 2073 2074 hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, 2075 &ix2, &ix3); 2076 dp_ipa_reo_remap_history_add(ix0, ix2, ix3); 2077 } else { 2078 hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, 2079 NULL, NULL); 2080 dp_ipa_reo_remap_history_add(ix0, 0, 0); 2081 } 2082 2083 return QDF_STATUS_SUCCESS; 2084 } 2085 2086 /* This should be configurable per H/W configuration enable status */ 2087 #define L3_HEADER_PADDING 2 2088 2089 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \ 2090 defined(CONFIG_IPA_WDI_UNIFIED_API) 2091 2092 #if !defined(QCA_LL_TX_FLOW_CONTROL_V2) && !defined(QCA_IPA_LL_TX_FLOW_CONTROL) 2093 static inline void dp_setup_mcc_sys_pipes( 2094 qdf_ipa_sys_connect_params_t *sys_in, 2095 qdf_ipa_wdi_conn_in_params_t *pipe_in) 2096 { 2097 int i = 0; 2098 /* Setup MCC sys pipe */ 2099 QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) = 2100 DP_IPA_MAX_IFACE; 2101 for (i = 0; i < DP_IPA_MAX_IFACE; i++) 2102 memcpy(&QDF_IPA_WDI_CONN_IN_PARAMS_SYS_IN(pipe_in)[i], 2103 &sys_in[i], sizeof(qdf_ipa_sys_connect_params_t)); 2104 } 2105 #else 2106 static inline void dp_setup_mcc_sys_pipes( 2107 qdf_ipa_sys_connect_params_t *sys_in, 2108 qdf_ipa_wdi_conn_in_params_t *pipe_in) 2109 { 2110 QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) = 0; 2111 } 2112 #endif 2113 2114 static void dp_ipa_wdi_tx_params(struct dp_soc *soc, 2115 struct dp_ipa_resources *ipa_res, 2116 qdf_ipa_wdi_pipe_setup_info_t *tx, 2117 bool over_gsi) 2118 { 2119 if (over_gsi) 2120 QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS; 2121 else 2122 QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS; 2123 2124 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) = 2125 qdf_mem_get_dma_addr(soc->osdev, 2126 &ipa_res->tx_comp_ring.mem_info); 2127 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) = 2128 qdf_mem_get_dma_size(soc->osdev, 2129 &ipa_res->tx_comp_ring.mem_info); 2130 2131 /* WBM Tail Pointer Address */ 2132 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) = 2133 soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr; 2134 QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true; 2135 2136 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) = 2137 qdf_mem_get_dma_addr(soc->osdev, 2138 &ipa_res->tx_ring.mem_info); 2139 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = 2140 qdf_mem_get_dma_size(soc->osdev, 2141 &ipa_res->tx_ring.mem_info); 2142 2143 /* TCL Head Pointer Address */ 2144 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) = 2145 soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr; 2146 QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true; 2147 2148 QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) = 2149 ipa_res->tx_num_alloc_buffer; 2150 2151 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0; 2152 } 2153 2154 static void dp_ipa_wdi_rx_params(struct dp_soc *soc, 2155 struct dp_ipa_resources *ipa_res, 2156 qdf_ipa_wdi_pipe_setup_info_t *rx, 2157 bool over_gsi) 2158 { 2159 if (over_gsi) 2160 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = 2161 IPA_CLIENT_WLAN2_PROD; 2162 else 2163 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = 2164 IPA_CLIENT_WLAN1_PROD; 2165 2166 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) = 2167 qdf_mem_get_dma_addr(soc->osdev, 2168 &ipa_res->rx_rdy_ring.mem_info); 2169 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) = 2170 qdf_mem_get_dma_size(soc->osdev, 2171 &ipa_res->rx_rdy_ring.mem_info); 2172 2173 /* REO Tail Pointer Address */ 2174 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) = 2175 soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr; 2176 QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true; 2177 2178 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) = 2179 qdf_mem_get_dma_addr(soc->osdev, 2180 &ipa_res->rx_refill_ring.mem_info); 2181 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) = 2182 qdf_mem_get_dma_size(soc->osdev, 2183 &ipa_res->rx_refill_ring.mem_info); 2184 2185 /* FW Head Pointer Address */ 2186 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) = 2187 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr; 2188 QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false; 2189 2190 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = 2191 soc->rx_pkt_tlv_size + L3_HEADER_PADDING; 2192 } 2193 2194 static void 2195 dp_ipa_wdi_tx_smmu_params(struct dp_soc *soc, 2196 struct dp_ipa_resources *ipa_res, 2197 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu, 2198 bool over_gsi, 2199 qdf_ipa_wdi_hdl_t hdl) 2200 { 2201 if (over_gsi) { 2202 if (hdl == DP_IPA_HDL_FIRST) 2203 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = 2204 IPA_CLIENT_WLAN2_CONS; 2205 else if (hdl == DP_IPA_HDL_SECOND) 2206 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = 2207 IPA_CLIENT_WLAN4_CONS; 2208 } else { 2209 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = 2210 IPA_CLIENT_WLAN1_CONS; 2211 } 2212 2213 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu), 2214 &ipa_res->tx_comp_ring.sgtable, 2215 sizeof(sgtable_t)); 2216 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) = 2217 qdf_mem_get_dma_size(soc->osdev, 2218 &ipa_res->tx_comp_ring.mem_info); 2219 /* WBM Tail Pointer Address */ 2220 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) = 2221 soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr; 2222 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true; 2223 2224 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu), 2225 &ipa_res->tx_ring.sgtable, 2226 sizeof(sgtable_t)); 2227 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) = 2228 qdf_mem_get_dma_size(soc->osdev, 2229 &ipa_res->tx_ring.mem_info); 2230 /* TCL Head Pointer Address */ 2231 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) = 2232 soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr; 2233 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true; 2234 2235 QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) = 2236 ipa_res->tx_num_alloc_buffer; 2237 QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0; 2238 2239 } 2240 2241 static void 2242 dp_ipa_wdi_rx_smmu_params(struct dp_soc *soc, 2243 struct dp_ipa_resources *ipa_res, 2244 qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu, 2245 bool over_gsi, 2246 qdf_ipa_wdi_hdl_t hdl) 2247 { 2248 if (over_gsi) { 2249 if (hdl == DP_IPA_HDL_FIRST) 2250 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = 2251 IPA_CLIENT_WLAN2_PROD; 2252 else if (hdl == DP_IPA_HDL_SECOND) 2253 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = 2254 IPA_CLIENT_WLAN3_PROD; 2255 } else { 2256 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = 2257 IPA_CLIENT_WLAN1_PROD; 2258 } 2259 2260 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu), 2261 &ipa_res->rx_rdy_ring.sgtable, 2262 sizeof(sgtable_t)); 2263 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) = 2264 qdf_mem_get_dma_size(soc->osdev, 2265 &ipa_res->rx_rdy_ring.mem_info); 2266 /* REO Tail Pointer Address */ 2267 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) = 2268 soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr; 2269 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(rx_smmu) = true; 2270 2271 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu), 2272 &ipa_res->rx_refill_ring.sgtable, 2273 sizeof(sgtable_t)); 2274 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) = 2275 qdf_mem_get_dma_size(soc->osdev, 2276 &ipa_res->rx_refill_ring.mem_info); 2277 2278 /* FW Head Pointer Address */ 2279 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) = 2280 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr; 2281 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false; 2282 2283 QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) = 2284 soc->rx_pkt_tlv_size + L3_HEADER_PADDING; 2285 } 2286 2287 #ifdef IPA_WDI3_VLAN_SUPPORT 2288 /* 2289 * dp_ipa_wdi_rx_alt_pipe_smmu_params() - Setup 2nd rx pipe smmu params 2290 * @soc: data path soc handle 2291 * @ipa_res: ipa resource pointer 2292 * @rx_smmu: smmu pipe info handle 2293 * @over_gsi: flag for IPA offload over gsi 2294 * @hdl: ipa registered handle 2295 * 2296 * Return: none 2297 */ 2298 static void 2299 dp_ipa_wdi_rx_alt_pipe_smmu_params(struct dp_soc *soc, 2300 struct dp_ipa_resources *ipa_res, 2301 qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu, 2302 bool over_gsi, 2303 qdf_ipa_wdi_hdl_t hdl) 2304 { 2305 if (!wlan_ipa_is_vlan_enabled()) 2306 return; 2307 2308 if (over_gsi) { 2309 if (hdl == DP_IPA_HDL_FIRST) 2310 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = 2311 IPA_CLIENT_WLAN2_PROD1; 2312 else if (hdl == DP_IPA_HDL_SECOND) 2313 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = 2314 IPA_CLIENT_WLAN3_PROD1; 2315 } else { 2316 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = 2317 IPA_CLIENT_WLAN1_PROD; 2318 } 2319 2320 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu), 2321 &ipa_res->rx_alt_rdy_ring.sgtable, 2322 sizeof(sgtable_t)); 2323 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) = 2324 qdf_mem_get_dma_size(soc->osdev, 2325 &ipa_res->rx_alt_rdy_ring.mem_info); 2326 /* REO Tail Pointer Address */ 2327 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) = 2328 soc->ipa_uc_rx_rsc_alt.ipa_reo_tp_paddr; 2329 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(rx_smmu) = true; 2330 2331 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu), 2332 &ipa_res->rx_alt_refill_ring.sgtable, 2333 sizeof(sgtable_t)); 2334 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) = 2335 qdf_mem_get_dma_size(soc->osdev, 2336 &ipa_res->rx_alt_refill_ring.mem_info); 2337 2338 /* FW Head Pointer Address */ 2339 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) = 2340 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_hp_paddr; 2341 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false; 2342 2343 QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) = 2344 soc->rx_pkt_tlv_size + L3_HEADER_PADDING; 2345 } 2346 2347 /* 2348 * dp_ipa_wdi_rx_alt_pipe_smmu_params() - Setup 2nd rx pipe params 2349 * @soc: data path soc handle 2350 * @ipa_res: ipa resource pointer 2351 * @rx: pipe info handle 2352 * @over_gsi: flag for IPA offload over gsi 2353 * @hdl: ipa registered handle 2354 * 2355 * Return: none 2356 */ 2357 static void dp_ipa_wdi_rx_alt_pipe_params(struct dp_soc *soc, 2358 struct dp_ipa_resources *ipa_res, 2359 qdf_ipa_wdi_pipe_setup_info_t *rx, 2360 bool over_gsi, 2361 qdf_ipa_wdi_hdl_t hdl) 2362 { 2363 if (!wlan_ipa_is_vlan_enabled()) 2364 return; 2365 2366 if (over_gsi) { 2367 if (hdl == DP_IPA_HDL_FIRST) 2368 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = 2369 IPA_CLIENT_WLAN2_PROD1; 2370 else if (hdl == DP_IPA_HDL_SECOND) 2371 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = 2372 IPA_CLIENT_WLAN3_PROD1; 2373 } else { 2374 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = 2375 IPA_CLIENT_WLAN1_PROD; 2376 } 2377 2378 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) = 2379 qdf_mem_get_dma_addr(soc->osdev, 2380 &ipa_res->rx_alt_rdy_ring.mem_info); 2381 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) = 2382 qdf_mem_get_dma_size(soc->osdev, 2383 &ipa_res->rx_alt_rdy_ring.mem_info); 2384 2385 /* REO Tail Pointer Address */ 2386 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) = 2387 soc->ipa_uc_rx_rsc_alt.ipa_reo_tp_paddr; 2388 QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true; 2389 2390 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) = 2391 qdf_mem_get_dma_addr(soc->osdev, 2392 &ipa_res->rx_alt_refill_ring.mem_info); 2393 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) = 2394 qdf_mem_get_dma_size(soc->osdev, 2395 &ipa_res->rx_alt_refill_ring.mem_info); 2396 2397 /* FW Head Pointer Address */ 2398 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) = 2399 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_hp_paddr; 2400 QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false; 2401 2402 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = 2403 soc->rx_pkt_tlv_size + L3_HEADER_PADDING; 2404 } 2405 2406 /* 2407 * dp_ipa_setup_rx_alt_pipe() - Setup 2nd rx pipe for IPA offload 2408 * @soc: data path soc handle 2409 * @res: ipa resource pointer 2410 * @in: pipe in handle 2411 * @over_gsi: flag for IPA offload over gsi 2412 * @hdl: ipa registered handle 2413 * 2414 * Return: none 2415 */ 2416 static void dp_ipa_setup_rx_alt_pipe(struct dp_soc *soc, 2417 struct dp_ipa_resources *res, 2418 qdf_ipa_wdi_conn_in_params_t *in, 2419 bool over_gsi, 2420 qdf_ipa_wdi_hdl_t hdl) 2421 { 2422 qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu = NULL; 2423 qdf_ipa_wdi_pipe_setup_info_t *rx = NULL; 2424 qdf_ipa_ep_cfg_t *rx_cfg; 2425 2426 if (!wlan_ipa_is_vlan_enabled()) 2427 return; 2428 2429 QDF_IPA_WDI_CONN_IN_PARAMS_IS_RX1_USED(in) = true; 2430 if (qdf_mem_smmu_s1_enabled(soc->osdev)) { 2431 rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_ALT_SMMU(in); 2432 rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu); 2433 dp_ipa_wdi_rx_alt_pipe_smmu_params(soc, res, rx_smmu, 2434 over_gsi, hdl); 2435 } else { 2436 rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_ALT(in); 2437 rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx); 2438 dp_ipa_wdi_rx_alt_pipe_params(soc, res, rx, over_gsi, hdl); 2439 } 2440 2441 QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT; 2442 /* Update with wds len(96) + 4 if wds support is enabled */ 2443 if (ucfg_ipa_is_wds_enabled()) 2444 QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN_AST_VLAN; 2445 else 2446 QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_TX_VLAN_HDR_LEN; 2447 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1; 2448 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0; 2449 QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0; 2450 QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0; 2451 QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1; 2452 QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC; 2453 QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true; 2454 } 2455 2456 /* 2457 * dp_ipa_set_rx_alt_pipe_db() - Setup 2nd rx pipe doorbell 2458 * @res: ipa resource pointer 2459 * @out: pipe out handle 2460 * 2461 * Return: none 2462 */ 2463 static void dp_ipa_set_rx_alt_pipe_db(struct dp_ipa_resources *res, 2464 qdf_ipa_wdi_conn_out_params_t *out) 2465 { 2466 if (!wlan_ipa_is_vlan_enabled()) 2467 return; 2468 2469 res->rx_alt_ready_doorbell_paddr = 2470 QDF_IPA_WDI_CONN_OUT_PARAMS_RX_ALT_UC_DB_PA(out); 2471 dp_debug("Setting DB 0x%x for RX alt pipe", 2472 res->rx_alt_ready_doorbell_paddr); 2473 } 2474 #else 2475 static inline 2476 void dp_ipa_setup_rx_alt_pipe(struct dp_soc *soc, 2477 struct dp_ipa_resources *res, 2478 qdf_ipa_wdi_conn_in_params_t *in, 2479 bool over_gsi, 2480 qdf_ipa_wdi_hdl_t hdl) 2481 { } 2482 2483 static inline 2484 void dp_ipa_set_rx_alt_pipe_db(struct dp_ipa_resources *res, 2485 qdf_ipa_wdi_conn_out_params_t *out) 2486 { } 2487 #endif 2488 2489 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2490 void *ipa_i2w_cb, void *ipa_w2i_cb, 2491 void *ipa_wdi_meter_notifier_cb, 2492 uint32_t ipa_desc_size, void *ipa_priv, 2493 bool is_rm_enabled, uint32_t *tx_pipe_handle, 2494 uint32_t *rx_pipe_handle, bool is_smmu_enabled, 2495 qdf_ipa_sys_connect_params_t *sys_in, bool over_gsi, 2496 qdf_ipa_wdi_hdl_t hdl, qdf_ipa_wdi_hdl_t id, 2497 void *ipa_ast_notify_cb) 2498 { 2499 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2500 struct dp_pdev *pdev = 2501 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2502 struct dp_ipa_resources *ipa_res; 2503 qdf_ipa_ep_cfg_t *tx_cfg; 2504 qdf_ipa_ep_cfg_t *rx_cfg; 2505 qdf_ipa_wdi_pipe_setup_info_t *tx = NULL; 2506 qdf_ipa_wdi_pipe_setup_info_t *rx = NULL; 2507 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu; 2508 qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu = NULL; 2509 qdf_ipa_wdi_conn_in_params_t *pipe_in = NULL; 2510 qdf_ipa_wdi_conn_out_params_t pipe_out; 2511 int ret; 2512 2513 if (!pdev) { 2514 dp_err("Invalid instance"); 2515 return QDF_STATUS_E_FAILURE; 2516 } 2517 2518 ipa_res = &pdev->ipa_resource; 2519 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 2520 return QDF_STATUS_SUCCESS; 2521 2522 pipe_in = qdf_mem_malloc(sizeof(*pipe_in)); 2523 if (!pipe_in) 2524 return QDF_STATUS_E_NOMEM; 2525 2526 qdf_mem_zero(&pipe_out, sizeof(pipe_out)); 2527 2528 if (is_smmu_enabled) 2529 QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) = true; 2530 else 2531 QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) = false; 2532 2533 dp_setup_mcc_sys_pipes(sys_in, pipe_in); 2534 2535 /* TX PIPE */ 2536 if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in)) { 2537 tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(pipe_in); 2538 tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu); 2539 } else { 2540 tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(pipe_in); 2541 tx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(tx); 2542 } 2543 2544 QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT; 2545 QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN; 2546 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0; 2547 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0; 2548 QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0; 2549 QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC; 2550 QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true; 2551 2552 /** 2553 * Transfer Ring: WBM Ring 2554 * Transfer Ring Doorbell PA: WBM Tail Pointer Address 2555 * Event Ring: TCL ring 2556 * Event Ring Doorbell PA: TCL Head Pointer Address 2557 */ 2558 if (is_smmu_enabled) 2559 dp_ipa_wdi_tx_smmu_params(soc, ipa_res, tx_smmu, over_gsi, id); 2560 else 2561 dp_ipa_wdi_tx_params(soc, ipa_res, tx, over_gsi); 2562 2563 dp_ipa_setup_tx_alt_pipe(soc, ipa_res, pipe_in); 2564 2565 /* RX PIPE */ 2566 if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in)) { 2567 rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(pipe_in); 2568 rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu); 2569 } else { 2570 rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(pipe_in); 2571 rx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(rx); 2572 } 2573 2574 QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT; 2575 if (ucfg_ipa_is_wds_enabled()) 2576 QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN_AST; 2577 else 2578 QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN; 2579 2580 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1; 2581 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0; 2582 QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0; 2583 QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0; 2584 QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1; 2585 QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC; 2586 QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true; 2587 2588 /** 2589 * Transfer Ring: REO Ring 2590 * Transfer Ring Doorbell PA: REO Tail Pointer Address 2591 * Event Ring: FW ring 2592 * Event Ring Doorbell PA: FW Head Pointer Address 2593 */ 2594 if (is_smmu_enabled) 2595 dp_ipa_wdi_rx_smmu_params(soc, ipa_res, rx_smmu, over_gsi, id); 2596 else 2597 dp_ipa_wdi_rx_params(soc, ipa_res, rx, over_gsi); 2598 2599 /* setup 2nd rx pipe */ 2600 dp_ipa_setup_rx_alt_pipe(soc, ipa_res, pipe_in, over_gsi, id); 2601 2602 QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(pipe_in) = ipa_w2i_cb; 2603 QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(pipe_in) = ipa_priv; 2604 QDF_IPA_WDI_CONN_IN_PARAMS_HANDLE(pipe_in) = hdl; 2605 dp_ipa_ast_notify_cb(pipe_in, ipa_ast_notify_cb); 2606 2607 /* Connect WDI IPA PIPEs */ 2608 ret = qdf_ipa_wdi_conn_pipes(pipe_in, &pipe_out); 2609 2610 if (ret) { 2611 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2612 "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d", 2613 __func__, ret); 2614 qdf_mem_free(pipe_in); 2615 return QDF_STATUS_E_FAILURE; 2616 } 2617 2618 /* IPA uC Doorbell registers */ 2619 dp_info("Tx DB PA=0x%x, Rx DB PA=0x%x", 2620 (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out), 2621 (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out)); 2622 2623 dp_ipa_set_pipe_db(ipa_res, &pipe_out); 2624 dp_ipa_set_rx_alt_pipe_db(ipa_res, &pipe_out); 2625 2626 ipa_res->is_db_ddr_mapped = 2627 QDF_IPA_WDI_CONN_OUT_PARAMS_IS_DB_DDR_MAPPED(&pipe_out); 2628 2629 soc->ipa_first_tx_db_access = true; 2630 qdf_mem_free(pipe_in); 2631 2632 qdf_spinlock_create(&soc->ipa_rx_buf_map_lock); 2633 soc->ipa_rx_buf_map_lock_initialized = true; 2634 2635 return QDF_STATUS_SUCCESS; 2636 } 2637 2638 #ifdef IPA_WDI3_VLAN_SUPPORT 2639 /* 2640 * dp_ipa_set_rx1_used() - Set rx1 used flag for 2nd rx offload ring 2641 * @in: pipe in handle 2642 * 2643 * Return: none 2644 */ 2645 static inline 2646 void dp_ipa_set_rx1_used(qdf_ipa_wdi_reg_intf_in_params_t *in) 2647 { 2648 QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_RX1_USED(in) = true; 2649 } 2650 2651 /* 2652 * dp_ipa_set_v4_vlan_hdr() - Set v4 vlan hdr 2653 * @in: pipe in handle 2654 * hdr: pointer to hdr 2655 * 2656 * Return: none 2657 */ 2658 static inline 2659 void dp_ipa_set_v4_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t *in, 2660 qdf_ipa_wdi_hdr_info_t *hdr) 2661 { 2662 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(in)[IPA_IP_v4_VLAN]), 2663 hdr, sizeof(qdf_ipa_wdi_hdr_info_t)); 2664 } 2665 2666 /* 2667 * dp_ipa_set_v6_vlan_hdr() - Set v6 vlan hdr 2668 * @in: pipe in handle 2669 * hdr: pointer to hdr 2670 * 2671 * Return: none 2672 */ 2673 static inline 2674 void dp_ipa_set_v6_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t *in, 2675 qdf_ipa_wdi_hdr_info_t *hdr) 2676 { 2677 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(in)[IPA_IP_v6_VLAN]), 2678 hdr, sizeof(qdf_ipa_wdi_hdr_info_t)); 2679 } 2680 #else 2681 static inline 2682 void dp_ipa_set_rx1_used(qdf_ipa_wdi_reg_intf_in_params_t *in) 2683 { } 2684 2685 static inline 2686 void dp_ipa_set_v4_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t *in, 2687 qdf_ipa_wdi_hdr_info_t *hdr) 2688 { } 2689 2690 static inline 2691 void dp_ipa_set_v6_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t *in, 2692 qdf_ipa_wdi_hdr_info_t *hdr) 2693 { } 2694 #endif 2695 2696 #ifdef IPA_WDS_EASYMESH_FEATURE 2697 /** 2698 * dp_ipa_set_wdi_hdr_type() - Set wdi hdr type for IPA 2699 * @hdr_info: Header info 2700 * 2701 * Return: None 2702 */ 2703 static inline void 2704 dp_ipa_set_wdi_hdr_type(qdf_ipa_wdi_hdr_info_t *hdr_info) 2705 { 2706 if (ucfg_ipa_is_wds_enabled()) 2707 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) = 2708 IPA_HDR_L2_ETHERNET_II_AST; 2709 else 2710 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) = 2711 IPA_HDR_L2_ETHERNET_II; 2712 } 2713 #else 2714 static inline void 2715 dp_ipa_set_wdi_hdr_type(qdf_ipa_wdi_hdr_info_t *hdr_info) 2716 { 2717 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) = IPA_HDR_L2_ETHERNET_II; 2718 } 2719 #endif 2720 2721 #ifdef IPA_WDI3_VLAN_SUPPORT 2722 /** 2723 * dp_ipa_set_wdi_vlan_hdr_type() - Set wdi vlan hdr type for IPA 2724 * @hdr_info: Header info 2725 * 2726 * Return: None 2727 */ 2728 static inline void 2729 dp_ipa_set_wdi_vlan_hdr_type(qdf_ipa_wdi_hdr_info_t *hdr_info) 2730 { 2731 if (ucfg_ipa_is_wds_enabled()) 2732 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) = 2733 IPA_HDR_L2_802_1Q_AST; 2734 else 2735 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) = 2736 IPA_HDR_L2_802_1Q; 2737 } 2738 #else 2739 static inline void 2740 dp_ipa_set_wdi_vlan_hdr_type(qdf_ipa_wdi_hdr_info_t *hdr_info) 2741 { } 2742 #endif 2743 2744 /** 2745 * dp_ipa_setup_iface() - Setup IPA header and register interface 2746 * @ifname: Interface name 2747 * @mac_addr: Interface MAC address 2748 * @prod_client: IPA prod client type 2749 * @cons_client: IPA cons client type 2750 * @session_id: Session ID 2751 * @is_ipv6_enabled: Is IPV6 enabled or not 2752 * @hdl: IPA handle 2753 * 2754 * Return: QDF_STATUS 2755 */ 2756 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr, 2757 qdf_ipa_client_type_t prod_client, 2758 qdf_ipa_client_type_t cons_client, 2759 uint8_t session_id, bool is_ipv6_enabled, 2760 qdf_ipa_wdi_hdl_t hdl) 2761 { 2762 qdf_ipa_wdi_reg_intf_in_params_t in; 2763 qdf_ipa_wdi_hdr_info_t hdr_info; 2764 struct dp_ipa_uc_tx_hdr uc_tx_hdr; 2765 struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6; 2766 struct dp_ipa_uc_tx_vlan_hdr uc_tx_vlan_hdr; 2767 struct dp_ipa_uc_tx_vlan_hdr uc_tx_vlan_hdr_v6; 2768 int ret = -EINVAL; 2769 2770 qdf_mem_zero(&in, sizeof(qdf_ipa_wdi_reg_intf_in_params_t)); 2771 2772 dp_debug("Add Partial hdr: %s, "QDF_MAC_ADDR_FMT, ifname, 2773 QDF_MAC_ADDR_REF(mac_addr)); 2774 qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 2775 qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr); 2776 2777 /* IPV4 header */ 2778 uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP); 2779 2780 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr; 2781 QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN; 2782 dp_ipa_set_wdi_hdr_type(&hdr_info); 2783 2784 QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) = 2785 DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET; 2786 2787 QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname; 2788 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]), 2789 &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 2790 QDF_IPA_WDI_REG_INTF_IN_PARAMS_ALT_DST_PIPE(&in) = cons_client; 2791 QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1; 2792 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = WLAN_IPA_META_DATA_MASK; 2793 QDF_IPA_WDI_REG_INTF_IN_PARAMS_HANDLE(&in) = hdl; 2794 dp_ipa_setup_iface_session_id(&in, session_id); 2795 dp_debug("registering for session_id: %u", session_id); 2796 2797 /* IPV6 header */ 2798 if (is_ipv6_enabled) { 2799 qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr, 2800 DP_IPA_UC_WLAN_TX_HDR_LEN); 2801 uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6); 2802 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6; 2803 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]), 2804 &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 2805 } 2806 2807 if (wlan_ipa_is_vlan_enabled()) { 2808 /* Add vlan specific headers if vlan supporti is enabled */ 2809 qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 2810 dp_ipa_set_rx1_used(&in); 2811 qdf_ether_addr_copy(uc_tx_vlan_hdr.eth.h_source, mac_addr); 2812 /* IPV4 Vlan header */ 2813 uc_tx_vlan_hdr.eth.h_vlan_proto = qdf_htons(ETH_P_8021Q); 2814 uc_tx_vlan_hdr.eth.h_vlan_encapsulated_proto = qdf_htons(ETH_P_IP); 2815 2816 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = 2817 (uint8_t *)&uc_tx_vlan_hdr; 2818 QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = 2819 DP_IPA_UC_WLAN_TX_VLAN_HDR_LEN; 2820 dp_ipa_set_wdi_vlan_hdr_type(&hdr_info); 2821 2822 QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) = 2823 DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET; 2824 2825 dp_ipa_set_v4_vlan_hdr(&in, &hdr_info); 2826 2827 /* IPV6 Vlan header */ 2828 if (is_ipv6_enabled) { 2829 qdf_mem_copy(&uc_tx_vlan_hdr_v6, &uc_tx_vlan_hdr, 2830 DP_IPA_UC_WLAN_TX_VLAN_HDR_LEN); 2831 uc_tx_vlan_hdr_v6.eth.h_vlan_proto = 2832 qdf_htons(ETH_P_8021Q); 2833 uc_tx_vlan_hdr_v6.eth.h_vlan_encapsulated_proto = 2834 qdf_htons(ETH_P_IPV6); 2835 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = 2836 (uint8_t *)&uc_tx_vlan_hdr_v6; 2837 dp_ipa_set_v6_vlan_hdr(&in, &hdr_info); 2838 } 2839 } 2840 2841 ret = qdf_ipa_wdi_reg_intf(&in); 2842 if (ret) { 2843 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2844 "%s: ipa_wdi_reg_intf: register IPA interface falied: ret=%d", 2845 __func__, ret); 2846 return QDF_STATUS_E_FAILURE; 2847 } 2848 2849 return QDF_STATUS_SUCCESS; 2850 } 2851 2852 #else /* !CONFIG_IPA_WDI_UNIFIED_API */ 2853 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2854 void *ipa_i2w_cb, void *ipa_w2i_cb, 2855 void *ipa_wdi_meter_notifier_cb, 2856 uint32_t ipa_desc_size, void *ipa_priv, 2857 bool is_rm_enabled, uint32_t *tx_pipe_handle, 2858 uint32_t *rx_pipe_handle) 2859 { 2860 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2861 struct dp_pdev *pdev = 2862 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2863 struct dp_ipa_resources *ipa_res; 2864 qdf_ipa_wdi_pipe_setup_info_t *tx; 2865 qdf_ipa_wdi_pipe_setup_info_t *rx; 2866 qdf_ipa_wdi_conn_in_params_t pipe_in; 2867 qdf_ipa_wdi_conn_out_params_t pipe_out; 2868 struct tcl_data_cmd *tcl_desc_ptr; 2869 uint8_t *desc_addr; 2870 uint32_t desc_size; 2871 int ret; 2872 2873 if (!pdev) { 2874 dp_err("Invalid instance"); 2875 return QDF_STATUS_E_FAILURE; 2876 } 2877 2878 ipa_res = &pdev->ipa_resource; 2879 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 2880 return QDF_STATUS_SUCCESS; 2881 2882 qdf_mem_zero(&tx, sizeof(qdf_ipa_wdi_pipe_setup_info_t)); 2883 qdf_mem_zero(&rx, sizeof(qdf_ipa_wdi_pipe_setup_info_t)); 2884 qdf_mem_zero(&pipe_in, sizeof(pipe_in)); 2885 qdf_mem_zero(&pipe_out, sizeof(pipe_out)); 2886 2887 /* TX PIPE */ 2888 /** 2889 * Transfer Ring: WBM Ring 2890 * Transfer Ring Doorbell PA: WBM Tail Pointer Address 2891 * Event Ring: TCL ring 2892 * Event Ring Doorbell PA: TCL Head Pointer Address 2893 */ 2894 tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in); 2895 QDF_IPA_WDI_SETUP_INFO_NAT_EN(tx) = IPA_BYPASS_NAT; 2896 QDF_IPA_WDI_SETUP_INFO_HDR_LEN(tx) = DP_IPA_UC_WLAN_TX_HDR_LEN; 2897 QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(tx) = 0; 2898 QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(tx) = 0; 2899 QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(tx) = 0; 2900 QDF_IPA_WDI_SETUP_INFO_MODE(tx) = IPA_BASIC; 2901 QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(tx) = true; 2902 QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS; 2903 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) = 2904 ipa_res->tx_comp_ring_base_paddr; 2905 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) = 2906 ipa_res->tx_comp_ring_size; 2907 /* WBM Tail Pointer Address */ 2908 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) = 2909 soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr; 2910 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) = 2911 ipa_res->tx_ring_base_paddr; 2912 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = ipa_res->tx_ring_size; 2913 /* TCL Head Pointer Address */ 2914 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) = 2915 soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr; 2916 QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) = 2917 ipa_res->tx_num_alloc_buffer; 2918 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0; 2919 2920 /* Preprogram TCL descriptor */ 2921 desc_addr = 2922 (uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx); 2923 desc_size = sizeof(struct tcl_data_cmd); 2924 HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size); 2925 tcl_desc_ptr = (struct tcl_data_cmd *) 2926 (QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1); 2927 tcl_desc_ptr->buf_addr_info.return_buffer_manager = 2928 HAL_RX_BUF_RBM_SW2_BM; 2929 tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */ 2930 tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET; 2931 tcl_desc_ptr->packet_offset = 2; /* padding for alignment */ 2932 2933 /* RX PIPE */ 2934 /** 2935 * Transfer Ring: REO Ring 2936 * Transfer Ring Doorbell PA: REO Tail Pointer Address 2937 * Event Ring: FW ring 2938 * Event Ring Doorbell PA: FW Head Pointer Address 2939 */ 2940 rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in); 2941 QDF_IPA_WDI_SETUP_INFO_NAT_EN(rx) = IPA_BYPASS_NAT; 2942 QDF_IPA_WDI_SETUP_INFO_HDR_LEN(rx) = DP_IPA_UC_WLAN_RX_HDR_LEN; 2943 QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(rx) = 0; 2944 QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(rx) = 0; 2945 QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(rx) = 0; 2946 QDF_IPA_WDI_SETUP_INFO_HDR_OFST_METADATA_VALID(rx) = 0; 2947 QDF_IPA_WDI_SETUP_INFO_HDR_METADATA_REG_VALID(rx) = 1; 2948 QDF_IPA_WDI_SETUP_INFO_MODE(rx) = IPA_BASIC; 2949 QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(rx) = true; 2950 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD; 2951 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) = 2952 ipa_res->rx_rdy_ring_base_paddr; 2953 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) = 2954 ipa_res->rx_rdy_ring_size; 2955 /* REO Tail Pointer Address */ 2956 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) = 2957 soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr; 2958 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) = 2959 ipa_res->rx_refill_ring_base_paddr; 2960 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) = 2961 ipa_res->rx_refill_ring_size; 2962 /* FW Head Pointer Address */ 2963 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) = 2964 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr; 2965 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = soc->rx_pkt_tlv_size + 2966 L3_HEADER_PADDING; 2967 QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb; 2968 QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv; 2969 2970 /* Connect WDI IPA PIPE */ 2971 ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out); 2972 if (ret) { 2973 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2974 "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d", 2975 __func__, ret); 2976 return QDF_STATUS_E_FAILURE; 2977 } 2978 2979 /* IPA uC Doorbell registers */ 2980 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 2981 "%s: Tx DB PA=0x%x, Rx DB PA=0x%x", 2982 __func__, 2983 (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out), 2984 (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out)); 2985 2986 ipa_res->tx_comp_doorbell_paddr = 2987 QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out); 2988 ipa_res->tx_comp_doorbell_vaddr = 2989 QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_VA(&pipe_out); 2990 ipa_res->rx_ready_doorbell_paddr = 2991 QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out); 2992 2993 soc->ipa_first_tx_db_access = true; 2994 2995 qdf_spinlock_create(&soc->ipa_rx_buf_map_lock); 2996 soc->ipa_rx_buf_map_lock_initialized = true; 2997 2998 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 2999 "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK", 3000 __func__, 3001 "transfer_ring_base_pa", 3002 (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx), 3003 "transfer_ring_size", 3004 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx), 3005 "transfer_ring_doorbell_pa", 3006 (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx), 3007 "event_ring_base_pa", 3008 (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx), 3009 "event_ring_size", 3010 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx), 3011 "event_ring_doorbell_pa", 3012 (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx), 3013 "num_pkt_buffers", 3014 QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx), 3015 "tx_comp_doorbell_paddr", 3016 (void *)ipa_res->tx_comp_doorbell_paddr); 3017 3018 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 3019 "%s: Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK", 3020 __func__, 3021 "transfer_ring_base_pa", 3022 (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx), 3023 "transfer_ring_size", 3024 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx), 3025 "transfer_ring_doorbell_pa", 3026 (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx), 3027 "event_ring_base_pa", 3028 (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx), 3029 "event_ring_size", 3030 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx), 3031 "event_ring_doorbell_pa", 3032 (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx), 3033 "num_pkt_buffers", 3034 QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(rx), 3035 "tx_comp_doorbell_paddr", 3036 (void *)ipa_res->rx_ready_doorbell_paddr); 3037 3038 return QDF_STATUS_SUCCESS; 3039 } 3040 3041 /** 3042 * dp_ipa_setup_iface() - Setup IPA header and register interface 3043 * @ifname: Interface name 3044 * @mac_addr: Interface MAC address 3045 * @prod_client: IPA prod client type 3046 * @cons_client: IPA cons client type 3047 * @session_id: Session ID 3048 * @is_ipv6_enabled: Is IPV6 enabled or not 3049 * @hdl: IPA handle 3050 * 3051 * Return: QDF_STATUS 3052 */ 3053 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr, 3054 qdf_ipa_client_type_t prod_client, 3055 qdf_ipa_client_type_t cons_client, 3056 uint8_t session_id, bool is_ipv6_enabled, 3057 qdf_ipa_wdi_hdl_t hdl) 3058 { 3059 qdf_ipa_wdi_reg_intf_in_params_t in; 3060 qdf_ipa_wdi_hdr_info_t hdr_info; 3061 struct dp_ipa_uc_tx_hdr uc_tx_hdr; 3062 struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6; 3063 int ret = -EINVAL; 3064 3065 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 3066 "%s: Add Partial hdr: %s, "QDF_MAC_ADDR_FMT, 3067 __func__, ifname, QDF_MAC_ADDR_REF(mac_addr)); 3068 3069 qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 3070 qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr); 3071 3072 /* IPV4 header */ 3073 uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP); 3074 3075 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr; 3076 QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN; 3077 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II; 3078 QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) = 3079 DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET; 3080 3081 QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname; 3082 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]), 3083 &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 3084 QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1; 3085 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) = 3086 htonl(session_id << 16); 3087 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000); 3088 3089 /* IPV6 header */ 3090 if (is_ipv6_enabled) { 3091 qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr, 3092 DP_IPA_UC_WLAN_TX_HDR_LEN); 3093 uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6); 3094 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6; 3095 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]), 3096 &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); 3097 } 3098 3099 ret = qdf_ipa_wdi_reg_intf(&in); 3100 if (ret) { 3101 dp_err("ipa_wdi_reg_intf: register IPA interface falied: ret=%d", 3102 ret); 3103 return QDF_STATUS_E_FAILURE; 3104 } 3105 3106 return QDF_STATUS_SUCCESS; 3107 } 3108 3109 #endif /* CONFIG_IPA_WDI_UNIFIED_API */ 3110 3111 /** 3112 * dp_ipa_cleanup() - Disconnect IPA pipes 3113 * @soc_hdl: dp soc handle 3114 * @pdev_id: dp pdev id 3115 * @tx_pipe_handle: Tx pipe handle 3116 * @rx_pipe_handle: Rx pipe handle 3117 * @hdl: IPA handle 3118 * 3119 * Return: QDF_STATUS 3120 */ 3121 QDF_STATUS dp_ipa_cleanup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3122 uint32_t tx_pipe_handle, uint32_t rx_pipe_handle, 3123 qdf_ipa_wdi_hdl_t hdl) 3124 { 3125 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3126 QDF_STATUS status = QDF_STATUS_SUCCESS; 3127 struct dp_pdev *pdev; 3128 int ret; 3129 3130 ret = qdf_ipa_wdi_disconn_pipes(hdl); 3131 if (ret) { 3132 dp_err("ipa_wdi_disconn_pipes: IPA pipe cleanup failed: ret=%d", 3133 ret); 3134 status = QDF_STATUS_E_FAILURE; 3135 } 3136 3137 if (soc->ipa_rx_buf_map_lock_initialized) { 3138 qdf_spinlock_destroy(&soc->ipa_rx_buf_map_lock); 3139 soc->ipa_rx_buf_map_lock_initialized = false; 3140 } 3141 3142 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 3143 if (qdf_unlikely(!pdev)) { 3144 dp_err_rl("Invalid pdev for pdev_id %d", pdev_id); 3145 status = QDF_STATUS_E_FAILURE; 3146 goto exit; 3147 } 3148 3149 dp_ipa_unmap_ring_doorbell_paddr(pdev); 3150 dp_ipa_unmap_rx_alt_ring_doorbell_paddr(pdev); 3151 exit: 3152 return status; 3153 } 3154 3155 /** 3156 * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface 3157 * @ifname: Interface name 3158 * @is_ipv6_enabled: Is IPV6 enabled or not 3159 * @hdl: IPA handle 3160 * 3161 * Return: QDF_STATUS 3162 */ 3163 QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled, 3164 qdf_ipa_wdi_hdl_t hdl) 3165 { 3166 int ret; 3167 3168 ret = qdf_ipa_wdi_dereg_intf(ifname, hdl); 3169 if (ret) { 3170 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3171 "%s: ipa_wdi_dereg_intf: IPA pipe deregistration failed: ret=%d", 3172 __func__, ret); 3173 return QDF_STATUS_E_FAILURE; 3174 } 3175 3176 return QDF_STATUS_SUCCESS; 3177 } 3178 3179 #ifdef IPA_SET_RESET_TX_DB_PA 3180 #define DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res) \ 3181 dp_ipa_set_tx_doorbell_paddr((soc), (ipa_res)) 3182 #define DP_IPA_RESET_TX_DB_PA(soc, ipa_res) \ 3183 dp_ipa_reset_tx_doorbell_pa((soc), (ipa_res)) 3184 #else 3185 #define DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res) 3186 #define DP_IPA_RESET_TX_DB_PA(soc, ipa_res) 3187 #endif 3188 3189 QDF_STATUS dp_ipa_enable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3190 qdf_ipa_wdi_hdl_t hdl) 3191 { 3192 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3193 struct dp_pdev *pdev = 3194 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 3195 struct dp_ipa_resources *ipa_res; 3196 QDF_STATUS result; 3197 3198 if (!pdev) { 3199 dp_err("Invalid instance"); 3200 return QDF_STATUS_E_FAILURE; 3201 } 3202 3203 ipa_res = &pdev->ipa_resource; 3204 3205 qdf_atomic_set(&soc->ipa_pipes_enabled, 1); 3206 DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res); 3207 dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, true); 3208 3209 result = qdf_ipa_wdi_enable_pipes(hdl); 3210 if (result) { 3211 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3212 "%s: Enable WDI PIPE fail, code %d", 3213 __func__, result); 3214 qdf_atomic_set(&soc->ipa_pipes_enabled, 0); 3215 DP_IPA_RESET_TX_DB_PA(soc, ipa_res); 3216 dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false); 3217 return QDF_STATUS_E_FAILURE; 3218 } 3219 3220 if (soc->ipa_first_tx_db_access) { 3221 dp_ipa_tx_comp_ring_init_hp(soc, ipa_res); 3222 soc->ipa_first_tx_db_access = false; 3223 } 3224 3225 return QDF_STATUS_SUCCESS; 3226 } 3227 3228 QDF_STATUS dp_ipa_disable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3229 qdf_ipa_wdi_hdl_t hdl) 3230 { 3231 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3232 struct dp_pdev *pdev = 3233 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 3234 QDF_STATUS result; 3235 struct dp_ipa_resources *ipa_res; 3236 3237 if (!pdev) { 3238 dp_err("Invalid instance"); 3239 return QDF_STATUS_E_FAILURE; 3240 } 3241 3242 ipa_res = &pdev->ipa_resource; 3243 3244 qdf_sleep(TX_COMP_DRAIN_WAIT_TIMEOUT_MS); 3245 /* 3246 * Reset the tx completion doorbell address before invoking IPA disable 3247 * pipes API to ensure that there is no access to IPA tx doorbell 3248 * address post disable pipes. 3249 */ 3250 DP_IPA_RESET_TX_DB_PA(soc, ipa_res); 3251 3252 result = qdf_ipa_wdi_disable_pipes(hdl); 3253 if (result) { 3254 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3255 "%s: Disable WDI PIPE fail, code %d", 3256 __func__, result); 3257 qdf_assert_always(0); 3258 return QDF_STATUS_E_FAILURE; 3259 } 3260 3261 qdf_atomic_set(&soc->ipa_pipes_enabled, 0); 3262 dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false); 3263 3264 return result ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS; 3265 } 3266 3267 /** 3268 * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates 3269 * @client: Client type 3270 * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps) 3271 * @hdl: IPA handle 3272 * 3273 * Return: QDF_STATUS 3274 */ 3275 QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps, 3276 qdf_ipa_wdi_hdl_t hdl) 3277 { 3278 qdf_ipa_wdi_perf_profile_t profile; 3279 QDF_STATUS result; 3280 3281 profile.client = client; 3282 profile.max_supported_bw_mbps = max_supported_bw_mbps; 3283 3284 result = qdf_ipa_wdi_set_perf_profile(hdl, &profile); 3285 if (result) { 3286 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3287 "%s: ipa_wdi_set_perf_profile fail, code %d", 3288 __func__, result); 3289 return QDF_STATUS_E_FAILURE; 3290 } 3291 3292 return QDF_STATUS_SUCCESS; 3293 } 3294 3295 /** 3296 * dp_ipa_intrabss_send - send IPA RX intra-bss frames 3297 * @pdev: pdev 3298 * @vdev: vdev 3299 * @nbuf: skb 3300 * 3301 * Return: nbuf if TX fails and NULL if TX succeeds 3302 */ 3303 static qdf_nbuf_t dp_ipa_intrabss_send(struct dp_pdev *pdev, 3304 struct dp_vdev *vdev, 3305 qdf_nbuf_t nbuf) 3306 { 3307 struct dp_peer *vdev_peer; 3308 uint16_t len; 3309 3310 vdev_peer = dp_vdev_bss_peer_ref_n_get(pdev->soc, vdev, DP_MOD_ID_IPA); 3311 if (qdf_unlikely(!vdev_peer)) 3312 return nbuf; 3313 3314 if (qdf_unlikely(!vdev_peer->txrx_peer)) { 3315 dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA); 3316 return nbuf; 3317 } 3318 3319 qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb)); 3320 len = qdf_nbuf_len(nbuf); 3321 3322 if (dp_tx_send((struct cdp_soc_t *)pdev->soc, vdev->vdev_id, nbuf)) { 3323 DP_PEER_PER_PKT_STATS_INC_PKT(vdev_peer->txrx_peer, 3324 rx.intra_bss.fail, 1, len); 3325 dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA); 3326 return nbuf; 3327 } 3328 3329 DP_PEER_PER_PKT_STATS_INC_PKT(vdev_peer->txrx_peer, 3330 rx.intra_bss.pkts, 1, len); 3331 dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA); 3332 return NULL; 3333 } 3334 3335 #ifdef IPA_WDS_EASYMESH_FEATURE 3336 /** 3337 * dp_ipa_peer_check() - Check for peer for given mac 3338 * @soc: dp soc object 3339 * @peer_mac_addr: peer mac address 3340 * @vdev_id: vdev id 3341 * 3342 * Return: true if peer is found, else false 3343 */ 3344 static inline bool dp_ipa_peer_check(struct dp_soc *soc, 3345 uint8_t *peer_mac_addr, uint8_t vdev_id) 3346 { 3347 struct dp_ast_entry *ast_entry = NULL; 3348 struct dp_peer *peer = NULL; 3349 3350 qdf_spin_lock_bh(&soc->ast_lock); 3351 ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr); 3352 3353 if ((!ast_entry) || 3354 (ast_entry->delete_in_progress && !ast_entry->callback)) { 3355 qdf_spin_unlock_bh(&soc->ast_lock); 3356 return false; 3357 } 3358 3359 peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id, 3360 DP_MOD_ID_AST); 3361 3362 if (!peer) { 3363 qdf_spin_unlock_bh(&soc->ast_lock); 3364 return false; 3365 } else { 3366 if (peer->vdev->vdev_id == vdev_id) { 3367 dp_peer_unref_delete(peer, DP_MOD_ID_IPA); 3368 qdf_spin_unlock_bh(&soc->ast_lock); 3369 return true; 3370 } 3371 dp_peer_unref_delete(peer, DP_MOD_ID_IPA); 3372 qdf_spin_unlock_bh(&soc->ast_lock); 3373 return false; 3374 } 3375 } 3376 #else 3377 static inline bool dp_ipa_peer_check(struct dp_soc *soc, 3378 uint8_t *peer_mac_addr, uint8_t vdev_id) 3379 { 3380 struct dp_peer *peer = NULL; 3381 3382 peer = dp_peer_find_hash_find(soc, peer_mac_addr, 0, vdev_id, 3383 DP_MOD_ID_IPA); 3384 if (!peer) { 3385 return false; 3386 } else { 3387 dp_peer_unref_delete(peer, DP_MOD_ID_IPA); 3388 return true; 3389 } 3390 } 3391 #endif 3392 3393 bool dp_ipa_rx_intrabss_fwd(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 3394 qdf_nbuf_t nbuf, bool *fwd_success) 3395 { 3396 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3397 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 3398 DP_MOD_ID_IPA); 3399 struct dp_pdev *pdev; 3400 qdf_nbuf_t nbuf_copy; 3401 uint8_t da_is_bcmc; 3402 struct ethhdr *eh; 3403 bool status = false; 3404 3405 *fwd_success = false; /* set default as failure */ 3406 3407 /* 3408 * WDI 3.0 skb->cb[] info from IPA driver 3409 * skb->cb[0] = vdev_id 3410 * skb->cb[1].bit#1 = da_is_bcmc 3411 */ 3412 da_is_bcmc = ((uint8_t)nbuf->cb[1]) & 0x2; 3413 3414 if (qdf_unlikely(!vdev)) 3415 return false; 3416 3417 pdev = vdev->pdev; 3418 if (qdf_unlikely(!pdev)) 3419 goto out; 3420 3421 /* no fwd for station mode and just pass up to stack */ 3422 if (vdev->opmode == wlan_op_mode_sta) 3423 goto out; 3424 3425 if (da_is_bcmc) { 3426 nbuf_copy = qdf_nbuf_copy(nbuf); 3427 if (!nbuf_copy) 3428 goto out; 3429 3430 if (dp_ipa_intrabss_send(pdev, vdev, nbuf_copy)) 3431 qdf_nbuf_free(nbuf_copy); 3432 else 3433 *fwd_success = true; 3434 3435 /* return false to pass original pkt up to stack */ 3436 goto out; 3437 } 3438 3439 eh = (struct ethhdr *)qdf_nbuf_data(nbuf); 3440 3441 if (!qdf_mem_cmp(eh->h_dest, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE)) 3442 goto out; 3443 3444 if (!dp_ipa_peer_check(soc, eh->h_dest, vdev->vdev_id)) 3445 goto out; 3446 3447 if (!dp_ipa_peer_check(soc, eh->h_source, vdev->vdev_id)) 3448 goto out; 3449 3450 /* 3451 * In intra-bss forwarding scenario, skb is allocated by IPA driver. 3452 * Need to add skb to internal tracking table to avoid nbuf memory 3453 * leak check for unallocated skb. 3454 */ 3455 qdf_net_buf_debug_acquire_skb(nbuf, __FILE__, __LINE__); 3456 3457 if (dp_ipa_intrabss_send(pdev, vdev, nbuf)) 3458 qdf_nbuf_free(nbuf); 3459 else 3460 *fwd_success = true; 3461 3462 status = true; 3463 out: 3464 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_IPA); 3465 return status; 3466 } 3467 3468 #ifdef MDM_PLATFORM 3469 bool dp_ipa_is_mdm_platform(void) 3470 { 3471 return true; 3472 } 3473 #else 3474 bool dp_ipa_is_mdm_platform(void) 3475 { 3476 return false; 3477 } 3478 #endif 3479 3480 /** 3481 * dp_ipa_frag_nbuf_linearize - linearize nbuf for IPA 3482 * @soc: soc 3483 * @nbuf: source skb 3484 * 3485 * Return: new nbuf if success and otherwise NULL 3486 */ 3487 static qdf_nbuf_t dp_ipa_frag_nbuf_linearize(struct dp_soc *soc, 3488 qdf_nbuf_t nbuf) 3489 { 3490 uint8_t *src_nbuf_data; 3491 uint8_t *dst_nbuf_data; 3492 qdf_nbuf_t dst_nbuf; 3493 qdf_nbuf_t temp_nbuf = nbuf; 3494 uint32_t nbuf_len = qdf_nbuf_len(nbuf); 3495 bool is_nbuf_head = true; 3496 uint32_t copy_len = 0; 3497 3498 dst_nbuf = qdf_nbuf_alloc(soc->osdev, RX_DATA_BUFFER_SIZE, 3499 RX_BUFFER_RESERVATION, 3500 RX_DATA_BUFFER_ALIGNMENT, FALSE); 3501 3502 if (!dst_nbuf) { 3503 dp_err_rl("nbuf allocate fail"); 3504 return NULL; 3505 } 3506 3507 if ((nbuf_len + L3_HEADER_PADDING) > RX_DATA_BUFFER_SIZE) { 3508 qdf_nbuf_free(dst_nbuf); 3509 dp_err_rl("nbuf is jumbo data"); 3510 return NULL; 3511 } 3512 3513 /* prepeare to copy all data into new skb */ 3514 dst_nbuf_data = qdf_nbuf_data(dst_nbuf); 3515 while (temp_nbuf) { 3516 src_nbuf_data = qdf_nbuf_data(temp_nbuf); 3517 /* first head nbuf */ 3518 if (is_nbuf_head) { 3519 qdf_mem_copy(dst_nbuf_data, src_nbuf_data, 3520 soc->rx_pkt_tlv_size); 3521 /* leave extra 2 bytes L3_HEADER_PADDING */ 3522 dst_nbuf_data += (soc->rx_pkt_tlv_size + 3523 L3_HEADER_PADDING); 3524 src_nbuf_data += soc->rx_pkt_tlv_size; 3525 copy_len = qdf_nbuf_headlen(temp_nbuf) - 3526 soc->rx_pkt_tlv_size; 3527 temp_nbuf = qdf_nbuf_get_ext_list(temp_nbuf); 3528 is_nbuf_head = false; 3529 } else { 3530 copy_len = qdf_nbuf_len(temp_nbuf); 3531 temp_nbuf = qdf_nbuf_queue_next(temp_nbuf); 3532 } 3533 qdf_mem_copy(dst_nbuf_data, src_nbuf_data, copy_len); 3534 dst_nbuf_data += copy_len; 3535 } 3536 3537 qdf_nbuf_set_len(dst_nbuf, nbuf_len); 3538 /* copy is done, free original nbuf */ 3539 qdf_nbuf_free(nbuf); 3540 3541 return dst_nbuf; 3542 } 3543 3544 /** 3545 * dp_ipa_handle_rx_reo_reinject - Handle RX REO reinject skb buffer 3546 * @soc: soc 3547 * @nbuf: skb 3548 * 3549 * Return: nbuf if success and otherwise NULL 3550 */ 3551 qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf) 3552 { 3553 3554 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 3555 return nbuf; 3556 3557 /* WLAN IPA is run-time disabled */ 3558 if (!qdf_atomic_read(&soc->ipa_pipes_enabled)) 3559 return nbuf; 3560 3561 if (!qdf_nbuf_is_frag(nbuf)) 3562 return nbuf; 3563 3564 /* linearize skb for IPA */ 3565 return dp_ipa_frag_nbuf_linearize(soc, nbuf); 3566 } 3567 3568 QDF_STATUS dp_ipa_tx_buf_smmu_mapping( 3569 struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 3570 { 3571 QDF_STATUS ret; 3572 3573 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3574 struct dp_pdev *pdev = 3575 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 3576 3577 if (!pdev) { 3578 dp_err("%s invalid instance", __func__); 3579 return QDF_STATUS_E_FAILURE; 3580 } 3581 3582 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) { 3583 dp_debug("SMMU S1 disabled"); 3584 return QDF_STATUS_SUCCESS; 3585 } 3586 ret = __dp_ipa_tx_buf_smmu_mapping(soc, pdev, true); 3587 if (ret) 3588 return ret; 3589 3590 ret = dp_ipa_tx_alt_buf_smmu_mapping(soc, pdev, true); 3591 if (ret) 3592 __dp_ipa_tx_buf_smmu_mapping(soc, pdev, false); 3593 3594 return ret; 3595 } 3596 3597 QDF_STATUS dp_ipa_tx_buf_smmu_unmapping( 3598 struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 3599 { 3600 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3601 struct dp_pdev *pdev = 3602 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 3603 3604 if (!pdev) { 3605 dp_err("%s invalid instance", __func__); 3606 return QDF_STATUS_E_FAILURE; 3607 } 3608 3609 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) { 3610 dp_debug("SMMU S1 disabled"); 3611 return QDF_STATUS_SUCCESS; 3612 } 3613 3614 if (__dp_ipa_tx_buf_smmu_mapping(soc, pdev, false) || 3615 dp_ipa_tx_alt_buf_smmu_mapping(soc, pdev, false)) 3616 return QDF_STATUS_E_FAILURE; 3617 3618 return QDF_STATUS_SUCCESS; 3619 } 3620 3621 #ifdef IPA_WDS_EASYMESH_FEATURE 3622 QDF_STATUS dp_ipa_ast_create(struct cdp_soc_t *soc_hdl, 3623 qdf_ipa_ast_info_type_t *data) 3624 { 3625 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3626 uint8_t *rx_tlv_hdr; 3627 struct dp_peer *peer; 3628 struct hal_rx_msdu_metadata msdu_metadata; 3629 qdf_ipa_ast_info_type_t *ast_info; 3630 3631 if (!data) { 3632 dp_err("Data is NULL !!!"); 3633 return QDF_STATUS_E_FAILURE; 3634 } 3635 ast_info = data; 3636 3637 rx_tlv_hdr = qdf_nbuf_data(ast_info->skb); 3638 peer = dp_peer_get_ref_by_id(soc, ast_info->ta_peer_id, 3639 DP_MOD_ID_IPA); 3640 if (!peer) { 3641 dp_err("Peer is NULL !!!!"); 3642 return QDF_STATUS_E_FAILURE; 3643 } 3644 3645 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata); 3646 3647 dp_rx_ipa_wds_srcport_learn(soc, peer, ast_info->skb, msdu_metadata, 3648 ast_info->mac_addr_ad4_valid, 3649 ast_info->first_msdu_in_mpdu_flag); 3650 3651 dp_peer_unref_delete(peer, DP_MOD_ID_IPA); 3652 3653 return QDF_STATUS_SUCCESS; 3654 } 3655 #endif 3656 #endif 3657