1 /* 2 * Copyright (c) 2021 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <dp_internal.h> 20 #include <dp_htt.h> 21 #include "dp_be.h" 22 #include "dp_be_tx.h" 23 #include "dp_be_rx.h" 24 #include <hal_be_api.h> 25 26 qdf_size_t dp_get_context_size_be(enum dp_context_type context_type) 27 { 28 switch (context_type) { 29 case DP_CONTEXT_TYPE_SOC: 30 return sizeof(struct dp_soc_be); 31 case DP_CONTEXT_TYPE_PDEV: 32 return sizeof(struct dp_pdev_be); 33 case DP_CONTEXT_TYPE_VDEV: 34 return sizeof(struct dp_vdev_be); 35 case DP_CONTEXT_TYPE_PEER: 36 return sizeof(struct dp_peer_be); 37 default: 38 return 0; 39 } 40 } 41 42 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION 43 /** 44 * dp_cc_reg_cfg_init() - initialize and configure HW cookie 45 conversion register 46 * @soc: SOC handle 47 * @cc_ctx: cookie conversion context pointer 48 * @is_4k_align: page address 4k alignd 49 * 50 * Return: None 51 */ 52 static void dp_cc_reg_cfg_init(struct dp_soc *soc, 53 struct dp_hw_cookie_conversion_t *cc_ctx, 54 bool is_4k_align) 55 { 56 struct hal_hw_cc_config cc_cfg = { 0 }; 57 58 if (!soc->wlan_cfg_ctx->hw_cc_enabled) { 59 dp_info("INI skip HW CC register setting"); 60 return; 61 } 62 63 cc_cfg.lut_base_addr_31_0 = cc_ctx->cmem_base; 64 cc_cfg.cc_global_en = true; 65 cc_cfg.page_4k_align = is_4k_align; 66 cc_cfg.cookie_offset_msb = DP_CC_DESC_ID_SPT_VA_OS_MSB; 67 cc_cfg.cookie_page_msb = DP_CC_DESC_ID_PPT_PAGE_OS_MSB; 68 /* 36th bit should be 1 then HW know this is CMEM address */ 69 cc_cfg.lut_base_addr_39_32 = 0x10; 70 71 cc_cfg.wbm2sw6_cc_en = 1; 72 cc_cfg.wbm2sw5_cc_en = 1; 73 cc_cfg.wbm2sw4_cc_en = 1; 74 cc_cfg.wbm2sw3_cc_en = 1; 75 cc_cfg.wbm2sw2_cc_en = 1; 76 cc_cfg.wbm2sw1_cc_en = 1; 77 cc_cfg.wbm2sw0_cc_en = 1; 78 cc_cfg.wbm2fw_cc_en = 0; 79 80 hal_cookie_conversion_reg_cfg_be(soc->hal_soc, &cc_cfg); 81 } 82 83 /** 84 * dp_hw_cc_cmem_write() - DP wrapper function for CMEM buffer writing 85 * @hal_soc_hdl: HAL SOC handle 86 * @offset: CMEM address 87 * @value: value to write 88 * 89 * Return: None. 90 */ 91 static inline void dp_hw_cc_cmem_write(hal_soc_handle_t hal_soc_hdl, 92 uint32_t offset, 93 uint32_t value) 94 { 95 hal_cmem_write(hal_soc_hdl, offset, value); 96 } 97 98 /** 99 * dp_hw_cc_cmem_addr_init() - Check and initialize CMEM base address for 100 HW cookie conversion 101 * @soc: SOC handle 102 * @cc_ctx: cookie conversion context pointer 103 * 104 * Return: 0 in case of success, else error value 105 */ 106 static inline QDF_STATUS dp_hw_cc_cmem_addr_init( 107 struct dp_soc *soc, 108 struct dp_hw_cookie_conversion_t *cc_ctx) 109 { 110 /* get CMEM for cookie conversion */ 111 if (soc->cmem_size < DP_CC_PPT_MEM_SIZE) { 112 dp_err("cmem_size %llu bytes < 4K", soc->cmem_size); 113 return QDF_STATUS_E_RESOURCES; 114 } 115 cc_ctx->cmem_base = (uint32_t)(soc->cmem_base + 116 DP_CC_MEM_OFFSET_IN_CMEM); 117 118 return QDF_STATUS_SUCCESS; 119 } 120 121 #else 122 123 static inline void dp_cc_reg_cfg_init(struct dp_soc *soc, 124 struct dp_hw_cookie_conversion_t *cc_ctx, 125 bool is_4k_align) {} 126 127 static inline void dp_hw_cc_cmem_write(hal_soc_handle_t hal_soc_hdl, 128 uint32_t offset, 129 uint32_t value) 130 { } 131 132 static inline QDF_STATUS dp_hw_cc_cmem_addr_init( 133 struct dp_soc *soc, 134 struct dp_hw_cookie_conversion_t *cc_ctx) 135 { 136 return QDF_STATUS_SUCCESS; 137 } 138 #endif 139 140 static QDF_STATUS dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc) 141 { 142 struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc); 143 struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx; 144 uint32_t max_tx_rx_desc_num, num_spt_pages, i = 0; 145 struct dp_spt_page_desc *spt_desc; 146 struct qdf_mem_dma_page_t *dma_page; 147 QDF_STATUS qdf_status; 148 149 if (soc->cdp_soc.ol_ops->get_con_mode && 150 soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE) 151 return QDF_STATUS_SUCCESS; 152 153 qdf_status = dp_hw_cc_cmem_addr_init(soc, cc_ctx); 154 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) 155 return qdf_status; 156 157 /* estimate how many SPT DDR pages needed */ 158 max_tx_rx_desc_num = WLAN_CFG_NUM_TX_DESC_MAX * MAX_TXDESC_POOLS + 159 WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX * MAX_RXDESC_POOLS; 160 num_spt_pages = max_tx_rx_desc_num / DP_CC_SPT_PAGE_MAX_ENTRIES; 161 num_spt_pages = num_spt_pages <= DP_CC_PPT_MAX_ENTRIES ? 162 num_spt_pages : DP_CC_PPT_MAX_ENTRIES; 163 dp_info("num_spt_pages needed %d", num_spt_pages); 164 165 dp_desc_multi_pages_mem_alloc(soc, DP_HW_CC_SPT_PAGE_TYPE, 166 &cc_ctx->page_pool, qdf_page_size, 167 num_spt_pages, 0, false); 168 if (!cc_ctx->page_pool.dma_pages) { 169 dp_err("spt ddr pages allocation failed"); 170 return QDF_STATUS_E_RESOURCES; 171 } 172 cc_ctx->page_desc_base = qdf_mem_malloc( 173 num_spt_pages * sizeof(struct dp_spt_page_desc)); 174 if (!cc_ctx->page_desc_base) { 175 dp_err("spt page descs allocation failed"); 176 goto fail_0; 177 } 178 179 /* initial page desc */ 180 spt_desc = cc_ctx->page_desc_base; 181 dma_page = cc_ctx->page_pool.dma_pages; 182 while (i < num_spt_pages) { 183 /* check if page address 4K aligned */ 184 if (qdf_unlikely(dma_page[i].page_p_addr & 0xFFF)) { 185 dp_err("non-4k aligned pages addr %pK", 186 (void *)dma_page[i].page_p_addr); 187 goto fail_1; 188 } 189 190 spt_desc[i].page_v_addr = 191 dma_page[i].page_v_addr_start; 192 spt_desc[i].page_p_addr = 193 dma_page[i].page_p_addr; 194 i++; 195 } 196 197 cc_ctx->total_page_num = num_spt_pages; 198 qdf_spinlock_create(&cc_ctx->cc_lock); 199 200 return QDF_STATUS_SUCCESS; 201 fail_1: 202 qdf_mem_free(cc_ctx->page_desc_base); 203 fail_0: 204 dp_desc_multi_pages_mem_free(soc, DP_HW_CC_SPT_PAGE_TYPE, 205 &cc_ctx->page_pool, 0, false); 206 207 return QDF_STATUS_E_FAILURE; 208 } 209 210 static QDF_STATUS dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc) 211 { 212 struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc); 213 struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx; 214 215 if (soc->cdp_soc.ol_ops->get_con_mode && 216 soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE) 217 return QDF_STATUS_SUCCESS; 218 219 qdf_mem_free(cc_ctx->page_desc_base); 220 dp_desc_multi_pages_mem_free(soc, DP_HW_CC_SPT_PAGE_TYPE, 221 &cc_ctx->page_pool, 0, false); 222 qdf_spinlock_destroy(&cc_ctx->cc_lock); 223 224 return QDF_STATUS_SUCCESS; 225 } 226 227 static QDF_STATUS dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc) 228 { 229 struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc); 230 struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx; 231 uint32_t i = 0; 232 struct dp_spt_page_desc *spt_desc; 233 234 if (soc->cdp_soc.ol_ops->get_con_mode && 235 soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE) 236 return QDF_STATUS_SUCCESS; 237 238 if (!cc_ctx->total_page_num) { 239 dp_err("total page num is 0"); 240 return QDF_STATUS_E_INVAL; 241 } 242 243 spt_desc = cc_ctx->page_desc_base; 244 while (i < cc_ctx->total_page_num) { 245 /* write page PA to CMEM */ 246 dp_hw_cc_cmem_write(soc->hal_soc, 247 (cc_ctx->cmem_base + 248 i * DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED), 249 (spt_desc[i].page_p_addr >> 250 DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED)); 251 252 spt_desc[i].ppt_index = i; 253 spt_desc[i].avail_entry_index = 0; 254 /* link page desc */ 255 if ((i + 1) != cc_ctx->total_page_num) 256 spt_desc[i].next = &spt_desc[i + 1]; 257 else 258 spt_desc[i].next = NULL; 259 i++; 260 } 261 262 cc_ctx->page_desc_freelist = cc_ctx->page_desc_base; 263 cc_ctx->free_page_num = cc_ctx->total_page_num; 264 265 /* write WBM/REO cookie conversion CFG register */ 266 dp_cc_reg_cfg_init(soc, cc_ctx, true); 267 268 return QDF_STATUS_SUCCESS; 269 } 270 271 static QDF_STATUS dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc) 272 { 273 struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc); 274 struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx; 275 276 if (soc->cdp_soc.ol_ops->get_con_mode && 277 soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE) 278 return QDF_STATUS_SUCCESS; 279 280 cc_ctx->page_desc_freelist = NULL; 281 cc_ctx->free_page_num = 0; 282 283 return QDF_STATUS_SUCCESS; 284 } 285 286 uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc, 287 struct dp_spt_page_desc **list_head, 288 struct dp_spt_page_desc **list_tail, 289 uint16_t num_desc) 290 { 291 uint16_t num_pages, count; 292 struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx; 293 294 num_pages = (num_desc / DP_CC_SPT_PAGE_MAX_ENTRIES) + 295 (num_desc % DP_CC_SPT_PAGE_MAX_ENTRIES ? 1 : 0); 296 297 if (num_pages > cc_ctx->free_page_num) { 298 dp_err("fail: num_pages required %d > free_page_num %d", 299 num_pages, 300 cc_ctx->free_page_num); 301 return 0; 302 } 303 304 qdf_spin_lock_bh(&cc_ctx->cc_lock); 305 306 *list_head = *list_tail = cc_ctx->page_desc_freelist; 307 for (count = 0; count < num_pages; count++) { 308 if (qdf_unlikely(!cc_ctx->page_desc_freelist)) { 309 cc_ctx->page_desc_freelist = *list_head; 310 *list_head = *list_tail = NULL; 311 qdf_spin_unlock_bh(&cc_ctx->cc_lock); 312 return 0; 313 } 314 *list_tail = cc_ctx->page_desc_freelist; 315 cc_ctx->page_desc_freelist = cc_ctx->page_desc_freelist->next; 316 } 317 (*list_tail)->next = NULL; 318 cc_ctx->free_page_num -= count; 319 320 qdf_spin_unlock_bh(&cc_ctx->cc_lock); 321 322 return count; 323 } 324 325 void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc, 326 struct dp_spt_page_desc **list_head, 327 struct dp_spt_page_desc **list_tail, 328 uint16_t page_nums) 329 { 330 struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx; 331 struct dp_spt_page_desc *temp_list = NULL; 332 333 qdf_spin_lock_bh(&cc_ctx->cc_lock); 334 335 temp_list = cc_ctx->page_desc_freelist; 336 cc_ctx->page_desc_freelist = *list_head; 337 (*list_tail)->next = temp_list; 338 cc_ctx->free_page_num += page_nums; 339 *list_tail = NULL; 340 *list_head = NULL; 341 342 qdf_spin_unlock_bh(&cc_ctx->cc_lock); 343 } 344 345 static QDF_STATUS dp_soc_attach_be(struct dp_soc *soc) 346 { 347 struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc); 348 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 349 350 soc->wbm_sw0_bm_id = hal_tx_get_wbm_sw0_bm_id(); 351 qdf_status = dp_tx_init_bank_profiles(be_soc); 352 353 /* cookie conversion */ 354 qdf_status = dp_hw_cookie_conversion_attach(be_soc); 355 356 return qdf_status; 357 } 358 359 static QDF_STATUS dp_soc_detach_be(struct dp_soc *soc) 360 { 361 struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc); 362 363 dp_tx_deinit_bank_profiles(be_soc); 364 365 dp_hw_cookie_conversion_detach(be_soc); 366 367 return QDF_STATUS_SUCCESS; 368 } 369 370 static QDF_STATUS dp_soc_init_be(struct dp_soc *soc) 371 { 372 struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc); 373 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 374 375 qdf_status = dp_hw_cookie_conversion_init(be_soc); 376 377 return qdf_status; 378 } 379 380 static QDF_STATUS dp_soc_deinit_be(struct dp_soc *soc) 381 { 382 struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc); 383 384 dp_hw_cookie_conversion_deinit(be_soc); 385 386 return QDF_STATUS_SUCCESS; 387 } 388 389 static QDF_STATUS dp_pdev_attach_be(struct dp_pdev *pdev) 390 { 391 return QDF_STATUS_SUCCESS; 392 } 393 394 static QDF_STATUS dp_pdev_detach_be(struct dp_pdev *pdev) 395 { 396 return QDF_STATUS_SUCCESS; 397 } 398 399 static QDF_STATUS dp_vdev_attach_be(struct dp_soc *soc, struct dp_vdev *vdev) 400 { 401 struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc); 402 struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev); 403 404 be_vdev->bank_id = dp_tx_get_bank_profile(be_soc, be_vdev); 405 406 /* Needs to be enabled after bring-up*/ 407 be_vdev->vdev_id_check_en = false; 408 409 if (be_vdev->bank_id == DP_BE_INVALID_BANK_ID) { 410 QDF_BUG(0); 411 return QDF_STATUS_E_FAULT; 412 } 413 return QDF_STATUS_SUCCESS; 414 } 415 416 static QDF_STATUS dp_vdev_detach_be(struct dp_soc *soc, struct dp_vdev *vdev) 417 { 418 struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc); 419 struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev); 420 421 dp_tx_put_bank_profile(be_soc, be_vdev); 422 return QDF_STATUS_SUCCESS; 423 } 424 425 qdf_size_t dp_get_soc_context_size_be(void) 426 { 427 return sizeof(struct dp_soc_be); 428 } 429 430 /** 431 * dp_rxdma_ring_sel_cfg_be() - Setup RXDMA ring config 432 * @soc: Common DP soc handle 433 * 434 * Return: QDF_STATUS 435 */ 436 static QDF_STATUS 437 dp_rxdma_ring_sel_cfg_be(struct dp_soc *soc) 438 { 439 int i; 440 int mac_id; 441 struct htt_rx_ring_tlv_filter htt_tlv_filter = {0}; 442 struct dp_srng *rx_mac_srng; 443 QDF_STATUS status = QDF_STATUS_SUCCESS; 444 445 /* 446 * In Beryllium chipset msdu_start, mpdu_end 447 * and rx_attn are part of msdu_end/mpdu_start 448 */ 449 htt_tlv_filter.msdu_start = 0; 450 htt_tlv_filter.mpdu_end = 0; 451 htt_tlv_filter.attention = 0; 452 htt_tlv_filter.mpdu_start = 1; 453 htt_tlv_filter.msdu_end = 1; 454 htt_tlv_filter.packet = 1; 455 htt_tlv_filter.packet_header = 1; 456 457 htt_tlv_filter.ppdu_start = 0; 458 htt_tlv_filter.ppdu_end = 0; 459 htt_tlv_filter.ppdu_end_user_stats = 0; 460 htt_tlv_filter.ppdu_end_user_stats_ext = 0; 461 htt_tlv_filter.ppdu_end_status_done = 0; 462 htt_tlv_filter.enable_fp = 1; 463 htt_tlv_filter.enable_md = 0; 464 htt_tlv_filter.enable_md = 0; 465 htt_tlv_filter.enable_mo = 0; 466 467 htt_tlv_filter.fp_mgmt_filter = 0; 468 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ; 469 htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST | 470 FILTER_DATA_MCAST | 471 FILTER_DATA_DATA); 472 htt_tlv_filter.mo_mgmt_filter = 0; 473 htt_tlv_filter.mo_ctrl_filter = 0; 474 htt_tlv_filter.mo_data_filter = 0; 475 htt_tlv_filter.md_data_filter = 0; 476 477 htt_tlv_filter.offset_valid = true; 478 479 /* Not subscribing to mpdu_end, msdu_start and rx_attn */ 480 htt_tlv_filter.rx_mpdu_end_offset = 0; 481 htt_tlv_filter.rx_msdu_start_offset = 0; 482 htt_tlv_filter.rx_attn_offset = 0; 483 484 htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size; 485 htt_tlv_filter.rx_header_offset = 486 hal_rx_pkt_tlv_offset_get(soc->hal_soc); 487 htt_tlv_filter.rx_mpdu_start_offset = 488 hal_rx_mpdu_start_offset_get(soc->hal_soc); 489 htt_tlv_filter.rx_msdu_end_offset = 490 hal_rx_msdu_end_offset_get(soc->hal_soc); 491 492 dp_info("TLV subscription\n" 493 "msdu_start %d, mpdu_end %d, attention %d" 494 "mpdu_start %d, msdu_end %d, pkt_hdr %d, pkt %d\n" 495 "TLV offsets\n" 496 "msdu_start %d, mpdu_end %d, attention %d" 497 "mpdu_start %d, msdu_end %d, pkt_hdr %d, pkt %d\n", 498 htt_tlv_filter.msdu_start, 499 htt_tlv_filter.mpdu_end, 500 htt_tlv_filter.attention, 501 htt_tlv_filter.mpdu_start, 502 htt_tlv_filter.msdu_end, 503 htt_tlv_filter.packet_header, 504 htt_tlv_filter.packet, 505 htt_tlv_filter.rx_msdu_start_offset, 506 htt_tlv_filter.rx_mpdu_end_offset, 507 htt_tlv_filter.rx_attn_offset, 508 htt_tlv_filter.rx_mpdu_start_offset, 509 htt_tlv_filter.rx_msdu_end_offset, 510 htt_tlv_filter.rx_header_offset, 511 htt_tlv_filter.rx_packet_offset); 512 513 for (i = 0; i < MAX_PDEV_CNT; i++) { 514 struct dp_pdev *pdev = soc->pdev_list[i]; 515 516 if (!pdev) 517 continue; 518 519 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { 520 int mac_for_pdev = 521 dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id); 522 /* 523 * Obtain lmac id from pdev to access the LMAC ring 524 * in soc context 525 */ 526 int lmac_id = 527 dp_get_lmac_id_for_pdev_id(soc, mac_id, 528 pdev->pdev_id); 529 530 rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id); 531 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, 532 rx_mac_srng->hal_srng, 533 RXDMA_BUF, RX_DATA_BUFFER_SIZE, 534 &htt_tlv_filter); 535 } 536 } 537 return status; 538 539 } 540 541 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ 542 /** 543 * dp_service_near_full_srngs_be() - Main bottom half callback for the 544 * near-full IRQs. 545 * @soc: Datapath SoC handle 546 * @int_ctx: Interrupt context 547 * @dp_budget: Budget of the work that can be done in the bottom half 548 * 549 * Return: work done in the handler 550 */ 551 static uint32_t 552 dp_service_near_full_srngs_be(struct dp_soc *soc, struct dp_intr *int_ctx, 553 uint32_t dp_budget) 554 { 555 int ring = 0; 556 int budget = dp_budget; 557 uint32_t work_done = 0; 558 uint32_t remaining_quota = dp_budget; 559 struct dp_intr_stats *intr_stats = &int_ctx->intr_stats; 560 int tx_ring_near_full_mask = int_ctx->tx_ring_near_full_mask; 561 int rx_near_full_grp_1_mask = int_ctx->rx_near_full_grp_1_mask; 562 int rx_near_full_grp_2_mask = int_ctx->rx_near_full_grp_2_mask; 563 int rx_near_full_mask = rx_near_full_grp_1_mask | 564 rx_near_full_grp_2_mask; 565 566 dp_verbose_debug("rx_ring_near_full 0x%x tx_ring_near_full 0x%x", 567 rx_near_full_mask, 568 tx_ring_near_full_mask); 569 570 if (rx_near_full_mask) { 571 for (ring = 0; ring < soc->num_reo_dest_rings; ring++) { 572 if (!(rx_near_full_mask & (1 << ring))) 573 continue; 574 575 work_done = dp_rx_nf_process(int_ctx, 576 soc->reo_dest_ring[ring].hal_srng, 577 ring, remaining_quota); 578 if (work_done) { 579 intr_stats->num_rx_ring_near_full_masks[ring]++; 580 dp_verbose_debug("rx NF mask 0x%x ring %d, work_done %d budget %d", 581 rx_near_full_mask, ring, 582 work_done, 583 budget); 584 budget -= work_done; 585 if (budget <= 0) 586 goto budget_done; 587 remaining_quota = budget; 588 } 589 } 590 } 591 592 if (tx_ring_near_full_mask) { 593 for (ring = 0; ring < MAX_TCL_DATA_RINGS; ring++) { 594 if (!(tx_ring_near_full_mask & (1 << ring))) 595 continue; 596 597 work_done = dp_tx_comp_nf_handler(int_ctx, soc, 598 soc->tx_comp_ring[ring].hal_srng, 599 ring, remaining_quota); 600 if (work_done) { 601 intr_stats->num_tx_comp_ring_near_full_masks[ring]++; 602 dp_verbose_debug("tx NF mask 0x%x ring %d, work_done %d budget %d", 603 tx_ring_near_full_mask, ring, 604 work_done, budget); 605 budget -= work_done; 606 if (budget <= 0) 607 break; 608 remaining_quota = budget; 609 } 610 } 611 } 612 613 intr_stats->num_near_full_masks++; 614 615 budget_done: 616 return dp_budget - budget; 617 } 618 619 /** 620 * dp_srng_test_and_update_nf_params_be() - Check if the srng is in near full 621 * state and set the reap_limit appropriately 622 * as per the near full state 623 * @soc: Datapath soc handle 624 * @dp_srng: Datapath handle for SRNG 625 * @max_reap_limit: [Output Buffer] Buffer to set the max reap limit as per 626 * the srng near-full state 627 * 628 * Return: 1, if the srng is in near-full state 629 * 0, if the srng is not in near-full state 630 */ 631 static int 632 dp_srng_test_and_update_nf_params_be(struct dp_soc *soc, 633 struct dp_srng *dp_srng, 634 int *max_reap_limit) 635 { 636 return _dp_srng_test_and_update_nf_params(soc, dp_srng, max_reap_limit); 637 } 638 639 /** 640 * dp_init_near_full_arch_ops_be() - Initialize the arch ops handler for the 641 * near full IRQ handling operations. 642 * @arch_ops: arch ops handle 643 * 644 * Return: none 645 */ 646 static inline void 647 dp_init_near_full_arch_ops_be(struct dp_arch_ops *arch_ops) 648 { 649 arch_ops->dp_service_near_full_srngs = dp_service_near_full_srngs_be; 650 arch_ops->dp_srng_test_and_update_nf_params = 651 dp_srng_test_and_update_nf_params_be; 652 } 653 654 #else 655 static inline void 656 dp_init_near_full_arch_ops_be(struct dp_arch_ops *arch_ops) 657 { 658 } 659 #endif 660 661 void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops) 662 { 663 #ifndef QCA_HOST_MODE_WIFI_DISABLED 664 arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_be; 665 arch_ops->dp_rx_process = dp_rx_process_be; 666 arch_ops->tx_comp_get_params_from_hal_desc = 667 dp_tx_comp_get_params_from_hal_desc_be; 668 arch_ops->dp_tx_desc_pool_init = dp_tx_desc_pool_init_be; 669 arch_ops->dp_tx_desc_pool_deinit = dp_tx_desc_pool_deinit_be; 670 arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_be; 671 arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_be; 672 arch_ops->dp_wbm_get_rx_desc_from_hal_desc = 673 dp_wbm_get_rx_desc_from_hal_desc_be; 674 #endif 675 arch_ops->txrx_get_context_size = dp_get_context_size_be; 676 arch_ops->dp_rx_desc_cookie_2_va = 677 dp_rx_desc_cookie_2_va_be; 678 679 arch_ops->txrx_soc_attach = dp_soc_attach_be; 680 arch_ops->txrx_soc_detach = dp_soc_detach_be; 681 arch_ops->txrx_soc_init = dp_soc_init_be; 682 arch_ops->txrx_soc_deinit = dp_soc_deinit_be; 683 arch_ops->txrx_pdev_attach = dp_pdev_attach_be; 684 arch_ops->txrx_pdev_detach = dp_pdev_detach_be; 685 arch_ops->txrx_vdev_attach = dp_vdev_attach_be; 686 arch_ops->txrx_vdev_detach = dp_vdev_detach_be; 687 arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_be; 688 689 dp_init_near_full_arch_ops_be(arch_ops); 690 } 691