1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 #include "cdp_txrx_cmn_struct.h" 20 #include "dp_types.h" 21 #include "dp_tx.h" 22 #include "dp_li_tx.h" 23 #include "dp_tx_desc.h" 24 #include <dp_internal.h> 25 #include <dp_htt.h> 26 #include <hal_li_api.h> 27 #include <hal_li_tx.h> 28 #include "dp_peer.h" 29 #ifdef FEATURE_WDS 30 #include "dp_txrx_wds.h" 31 #endif 32 #include "dp_li.h" 33 34 extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE]; 35 36 void dp_tx_comp_get_params_from_hal_desc_li(struct dp_soc *soc, 37 void *tx_comp_hal_desc, 38 struct dp_tx_desc_s **r_tx_desc) 39 { 40 uint8_t pool_id; 41 uint32_t tx_desc_id; 42 43 tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc); 44 pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >> 45 DP_TX_DESC_ID_POOL_OS; 46 47 /* Find Tx descriptor */ 48 *r_tx_desc = dp_tx_desc_find(soc, pool_id, 49 (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >> 50 DP_TX_DESC_ID_PAGE_OS, 51 (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >> 52 DP_TX_DESC_ID_OFFSET_OS); 53 /* Pool id is not matching. Error */ 54 if ((*r_tx_desc)->pool_id != pool_id) { 55 dp_tx_comp_alert("Tx Comp pool id %d not matched %d", 56 pool_id, (*r_tx_desc)->pool_id); 57 58 qdf_assert_always(0); 59 } 60 61 (*r_tx_desc)->peer_id = hal_tx_comp_get_peer_id(tx_comp_hal_desc); 62 } 63 64 static inline 65 void dp_tx_process_mec_notify_li(struct dp_soc *soc, uint8_t *status) 66 { 67 struct dp_vdev *vdev; 68 uint8_t vdev_id; 69 uint32_t *htt_desc = (uint32_t *)status; 70 71 /* 72 * Get vdev id from HTT status word in case of MEC 73 * notification 74 */ 75 vdev_id = HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(htt_desc[3]); 76 if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT)) 77 return; 78 79 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 80 DP_MOD_ID_HTT_COMP); 81 if (!vdev) 82 return; 83 dp_tx_mec_handler(vdev, status); 84 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP); 85 } 86 87 void dp_tx_process_htt_completion_li(struct dp_soc *soc, 88 struct dp_tx_desc_s *tx_desc, 89 uint8_t *status, 90 uint8_t ring_id) 91 { 92 uint8_t tx_status; 93 struct dp_pdev *pdev; 94 struct dp_vdev *vdev = NULL; 95 struct hal_tx_completion_status ts = {0}; 96 uint32_t *htt_desc = (uint32_t *)status; 97 struct dp_txrx_peer *txrx_peer; 98 dp_txrx_ref_handle txrx_ref_handle = NULL; 99 struct cdp_tid_tx_stats *tid_stats = NULL; 100 struct htt_soc *htt_handle; 101 uint8_t vdev_id; 102 103 tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]); 104 htt_handle = (struct htt_soc *)soc->htt_handle; 105 htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status); 106 107 /* 108 * There can be scenario where WBM consuming descriptor enqueued 109 * from TQM2WBM first and TQM completion can happen before MEC 110 * notification comes from FW2WBM. Avoid access any field of tx 111 * descriptor in case of MEC notify. 112 */ 113 if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY) 114 return dp_tx_process_mec_notify_li(soc, status); 115 116 /* 117 * If the descriptor is already freed in vdev_detach, 118 * continue to next descriptor 119 */ 120 if (qdf_unlikely(!tx_desc->flags)) { 121 dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d", 122 tx_desc->id); 123 return; 124 } 125 126 if (qdf_unlikely(tx_desc->vdev_id == DP_INVALID_VDEV_ID)) { 127 dp_tx_comp_info_rl("Invalid vdev_id %d", tx_desc->id); 128 tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR; 129 goto release_tx_desc; 130 } 131 132 pdev = tx_desc->pdev; 133 134 if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) { 135 dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id); 136 tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR; 137 goto release_tx_desc; 138 } 139 140 qdf_assert(tx_desc->pdev); 141 142 vdev_id = tx_desc->vdev_id; 143 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 144 DP_MOD_ID_HTT_COMP); 145 146 if (qdf_unlikely(!vdev)) { 147 dp_tx_comp_info_rl("Unable to get vdev ref %d", tx_desc->id); 148 tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR; 149 goto release_tx_desc; 150 } 151 152 switch (tx_status) { 153 case HTT_TX_FW2WBM_TX_STATUS_OK: 154 case HTT_TX_FW2WBM_TX_STATUS_DROP: 155 case HTT_TX_FW2WBM_TX_STATUS_TTL: 156 { 157 uint8_t tid; 158 159 if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) { 160 ts.peer_id = 161 HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET( 162 htt_desc[2]); 163 ts.tid = 164 HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET( 165 htt_desc[2]); 166 } else { 167 ts.peer_id = HTT_INVALID_PEER; 168 ts.tid = HTT_INVALID_TID; 169 } 170 ts.release_src = HAL_TX_COMP_RELEASE_SOURCE_FW; 171 ts.ppdu_id = 172 HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET( 173 htt_desc[1]); 174 ts.ack_frame_rssi = 175 HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET( 176 htt_desc[1]); 177 178 ts.tsf = htt_desc[3]; 179 ts.first_msdu = 1; 180 ts.last_msdu = 1; 181 ts.status = (tx_status == HTT_TX_FW2WBM_TX_STATUS_OK ? 182 HAL_TX_TQM_RR_FRAME_ACKED : 183 HAL_TX_TQM_RR_REM_CMD_REM); 184 tid = ts.tid; 185 if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) 186 tid = CDP_MAX_DATA_TIDS - 1; 187 188 tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid]; 189 190 if (qdf_unlikely(pdev->delay_stats_flag) || 191 qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev))) 192 dp_tx_compute_delay(vdev, tx_desc, tid, ring_id); 193 if (tx_status < CDP_MAX_TX_HTT_STATUS) 194 tid_stats->htt_status_cnt[tx_status]++; 195 196 txrx_peer = dp_txrx_peer_get_ref_by_id(soc, ts.peer_id, 197 &txrx_ref_handle, 198 DP_MOD_ID_HTT_COMP); 199 if (qdf_likely(txrx_peer)) { 200 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, 201 qdf_nbuf_len(tx_desc->nbuf)); 202 if (tx_status != HTT_TX_FW2WBM_TX_STATUS_OK) 203 DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1); 204 } 205 206 dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer, 207 ring_id); 208 dp_tx_comp_process_desc(soc, tx_desc, &ts, txrx_peer); 209 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 210 211 if (qdf_likely(txrx_peer)) 212 dp_txrx_peer_unref_delete(txrx_ref_handle, 213 DP_MOD_ID_HTT_COMP); 214 215 break; 216 } 217 case HTT_TX_FW2WBM_TX_STATUS_REINJECT: 218 { 219 uint8_t reinject_reason; 220 221 reinject_reason = 222 HTT_TX_WBM_COMPLETION_V2_REINJECT_REASON_GET( 223 htt_desc[0]); 224 dp_tx_reinject_handler(soc, vdev, tx_desc, 225 status, reinject_reason); 226 break; 227 } 228 case HTT_TX_FW2WBM_TX_STATUS_INSPECT: 229 { 230 dp_tx_inspect_handler(soc, vdev, tx_desc, status); 231 break; 232 } 233 case HTT_TX_FW2WBM_TX_STATUS_VDEVID_MISMATCH: 234 { 235 DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1); 236 goto release_tx_desc; 237 } 238 default: 239 dp_tx_comp_err("Invalid HTT tx_status %d\n", 240 tx_status); 241 goto release_tx_desc; 242 } 243 244 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP); 245 return; 246 247 release_tx_desc: 248 dp_tx_comp_free_buf(soc, tx_desc, false); 249 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 250 if (vdev) 251 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP); 252 } 253 254 #ifdef QCA_OL_TX_MULTIQ_SUPPORT 255 /* 256 * dp_tx_get_rbm_id()- Get the RBM ID for data transmission completion. 257 * @dp_soc - DP soc structure pointer 258 * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled 259 * 260 * Return - HAL ring handle 261 */ 262 #ifdef IPA_OFFLOAD 263 static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc, 264 uint8_t ring_id) 265 { 266 return (ring_id + soc->wbm_sw0_bm_id); 267 } 268 #else 269 #ifndef QCA_DP_ENABLE_TX_COMP_RING4 270 static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc, 271 uint8_t ring_id) 272 { 273 return (ring_id ? HAL_WBM_SW0_BM_ID + (ring_id - 1) : 274 HAL_WBM_SW2_BM_ID); 275 } 276 #else 277 static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc, 278 uint8_t ring_id) 279 { 280 if (ring_id == soc->num_tcl_data_rings) 281 return HAL_WBM_SW4_BM_ID(soc->wbm_sw0_bm_id); 282 return (ring_id + HAL_WBM_SW0_BM_ID(soc->wbm_sw0_bm_id)); 283 } 284 #endif 285 #endif 286 #else 287 #ifdef TX_MULTI_TCL 288 #ifdef IPA_OFFLOAD 289 static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc, 290 uint8_t ring_id) 291 { 292 if (soc->wlan_cfg_ctx->ipa_enabled) 293 return (ring_id + soc->wbm_sw0_bm_id); 294 295 return soc->wlan_cfg_ctx->tcl_wbm_map_array[ring_id].wbm_rbm_id; 296 } 297 #else 298 static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc, 299 uint8_t ring_id) 300 { 301 return soc->wlan_cfg_ctx->tcl_wbm_map_array[ring_id].wbm_rbm_id; 302 } 303 #endif 304 #else 305 static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc, 306 uint8_t ring_id) 307 { 308 return (ring_id + soc->wbm_sw0_bm_id); 309 } 310 #endif 311 #endif 312 313 #if defined(CLEAR_SW2TCL_CONSUMED_DESC) 314 /** 315 * dp_tx_clear_consumed_hw_descs - Reset all the consumed Tx ring descs to 0 316 * 317 * @soc: DP soc handle 318 * @hal_ring_hdl: Source ring pointer 319 * 320 * Return: void 321 */ 322 static inline 323 void dp_tx_clear_consumed_hw_descs(struct dp_soc *soc, 324 hal_ring_handle_t hal_ring_hdl) 325 { 326 void *desc = hal_srng_src_get_next_consumed(soc->hal_soc, hal_ring_hdl); 327 328 while (desc) { 329 hal_tx_desc_clear(desc); 330 desc = hal_srng_src_get_next_consumed(soc->hal_soc, 331 hal_ring_hdl); 332 } 333 } 334 335 #else 336 static inline 337 void dp_tx_clear_consumed_hw_descs(struct dp_soc *soc, 338 hal_ring_handle_t hal_ring_hdl) 339 { 340 } 341 #endif /* CLEAR_SW2TCL_CONSUMED_DESC */ 342 343 #ifdef WLAN_CONFIG_TX_DELAY 344 static inline 345 QDF_STATUS dp_tx_compute_hw_delay_li(struct dp_soc *soc, 346 struct dp_vdev *vdev, 347 struct hal_tx_completion_status *ts, 348 uint32_t *delay_us) 349 { 350 return dp_tx_compute_hw_delay_us(ts, vdev->delta_tsf, delay_us); 351 } 352 #else 353 static inline 354 QDF_STATUS dp_tx_compute_hw_delay_li(struct dp_soc *soc, 355 struct dp_vdev *vdev, 356 struct hal_tx_completion_status *ts, 357 uint32_t *delay_us) 358 { 359 return QDF_STATUS_SUCCESS; 360 } 361 #endif 362 363 #ifdef CONFIG_SAWF 364 /** 365 * dp_sawf_config_li - Configure sawf specific fields in tcl 366 * 367 * @soc: DP soc handle 368 * @hhal_tx_desc_cached: tx descriptor 369 * @vdev_id: vdev id 370 * @nbuf: skb buffer 371 * 372 * Return: void 373 */ 374 static inline 375 void dp_sawf_config_li(struct dp_soc *soc, uint32_t *hal_tx_desc_cached, 376 uint16_t *fw_metadata, uint16_t vdev_id, 377 qdf_nbuf_t nbuf) 378 { 379 uint8_t q_id = 0; 380 uint32_t search_index; 381 382 if (!wlan_cfg_get_sawf_config(soc->wlan_cfg_ctx)) 383 return; 384 385 q_id = dp_sawf_queue_id_get(nbuf); 386 if (q_id == DP_SAWF_DEFAULT_Q_INVALID) 387 return; 388 389 dp_sawf_tcl_cmd(fw_metadata, nbuf); 390 391 search_index = dp_sawf_get_search_index(soc, nbuf, vdev_id, 392 q_id); 393 hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, 394 (q_id & (CDP_DATA_TID_MAX - 1))); 395 hal_tx_desc_set_search_type_li(soc->hal_soc, hal_tx_desc_cached, 396 HAL_TX_ADDR_INDEX_SEARCH); 397 hal_tx_desc_set_search_index_li(soc->hal_soc, hal_tx_desc_cached, 398 search_index); 399 } 400 #else 401 static inline 402 void dp_sawf_config_li(struct dp_soc *soc, uint32_t *hal_tx_desc_cached, 403 uint16_t *fw_metadata, uint16_t vdev_id, 404 qdf_nbuf_t nbuf) 405 { 406 } 407 408 #define dp_sawf_tx_enqueue_peer_stats(soc, tx_desc) 409 #define dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc) 410 #endif 411 412 QDF_STATUS 413 dp_tx_hw_enqueue_li(struct dp_soc *soc, struct dp_vdev *vdev, 414 struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata, 415 struct cdp_tx_exception_metadata *tx_exc_metadata, 416 struct dp_tx_msdu_info_s *msdu_info) 417 { 418 void *hal_tx_desc; 419 uint32_t *hal_tx_desc_cached; 420 int coalesce = 0; 421 struct dp_tx_queue *tx_q = &msdu_info->tx_queue; 422 uint8_t ring_id = tx_q->ring_id & DP_TX_QUEUE_MASK; 423 uint8_t tid = msdu_info->tid; 424 425 /* 426 * Setting it initialization statically here to avoid 427 * a memset call jump with qdf_mem_set call 428 */ 429 uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 }; 430 431 enum cdp_sec_type sec_type = ((tx_exc_metadata && 432 tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ? 433 tx_exc_metadata->sec_type : vdev->sec_type); 434 435 /* Return Buffer Manager ID */ 436 uint8_t bm_id = dp_tx_get_rbm_id_li(soc, ring_id); 437 438 hal_ring_handle_t hal_ring_hdl = NULL; 439 440 QDF_STATUS status = QDF_STATUS_E_RESOURCES; 441 442 if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) { 443 dp_err_rl("Invalid tx desc id:%d", tx_desc->id); 444 return QDF_STATUS_E_RESOURCES; 445 } 446 447 hal_tx_desc_cached = (void *)cached_desc; 448 449 hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached, 450 tx_desc->dma_addr, bm_id, tx_desc->id, 451 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)); 452 hal_tx_desc_set_lmac_id_li(soc->hal_soc, hal_tx_desc_cached, 453 vdev->lmac_id); 454 hal_tx_desc_set_search_type_li(soc->hal_soc, hal_tx_desc_cached, 455 vdev->search_type); 456 hal_tx_desc_set_search_index_li(soc->hal_soc, hal_tx_desc_cached, 457 vdev->bss_ast_idx); 458 hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached, 459 vdev->dscp_tid_map_id); 460 461 hal_tx_desc_set_encrypt_type(hal_tx_desc_cached, 462 sec_type_map[sec_type]); 463 hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached, 464 (vdev->bss_ast_hash & 0xF)); 465 466 if (dp_sawf_tag_valid_get(tx_desc->nbuf)) { 467 dp_sawf_config_li(soc, hal_tx_desc_cached, &fw_metadata, 468 vdev->vdev_id, tx_desc->nbuf); 469 dp_sawf_tx_enqueue_peer_stats(soc, tx_desc); 470 } 471 472 hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata); 473 hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length); 474 hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset); 475 hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type); 476 hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached, 477 vdev->hal_desc_addr_search_flags); 478 479 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) 480 hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1); 481 482 /* verify checksum offload configuration*/ 483 if ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == 484 QDF_NBUF_TX_CKSUM_TCP_UDP) || 485 qdf_nbuf_is_tso(tx_desc->nbuf)) { 486 hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1); 487 hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1); 488 } 489 490 if (tid != HTT_TX_EXT_TID_INVALID) 491 hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid); 492 493 if (tx_desc->flags & DP_TX_DESC_FLAG_MESH) 494 hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1); 495 496 if (!dp_tx_desc_set_ktimestamp(vdev, tx_desc)) 497 dp_tx_desc_set_timestamp(tx_desc); 498 499 dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u", 500 tx_desc->length, 501 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG), 502 (uint64_t)tx_desc->dma_addr, tx_desc->pkt_offset, 503 tx_desc->id); 504 505 hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id); 506 507 if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) { 508 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 509 "%s %d : HAL RING Access Failed -- %pK", 510 __func__, __LINE__, hal_ring_hdl); 511 DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1); 512 DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1); 513 dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc); 514 return status; 515 } 516 517 dp_tx_clear_consumed_hw_descs(soc, hal_ring_hdl); 518 519 /* Sync cached descriptor with HW */ 520 521 hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl); 522 if (qdf_unlikely(!hal_tx_desc)) { 523 dp_verbose_debug("TCL ring full ring_id:%d", ring_id); 524 DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1); 525 DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1); 526 dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc); 527 goto ring_access_fail; 528 } 529 530 tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX; 531 dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf); 532 hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc); 533 coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid, 534 msdu_info, ring_id); 535 DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length); 536 DP_STATS_INC(soc, tx.tcl_enq[ring_id], 1); 537 dp_tx_update_stats(soc, tx_desc, ring_id); 538 status = QDF_STATUS_SUCCESS; 539 540 dp_tx_hw_desc_update_evt((uint8_t *)hal_tx_desc_cached, 541 hal_ring_hdl, soc, ring_id); 542 543 ring_access_fail: 544 dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, coalesce); 545 dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT, 546 qdf_get_log_timestamp(), tx_desc->nbuf); 547 548 return status; 549 } 550 551 QDF_STATUS dp_tx_desc_pool_init_li(struct dp_soc *soc, 552 uint32_t num_elem, 553 uint8_t pool_id) 554 { 555 uint32_t id, count, page_id, offset, pool_id_32; 556 struct dp_tx_desc_s *tx_desc; 557 struct dp_tx_desc_pool_s *tx_desc_pool; 558 uint16_t num_desc_per_page; 559 560 tx_desc_pool = &soc->tx_desc[pool_id]; 561 tx_desc = tx_desc_pool->freelist; 562 count = 0; 563 pool_id_32 = (uint32_t)pool_id; 564 num_desc_per_page = tx_desc_pool->desc_pages.num_element_per_page; 565 while (tx_desc) { 566 page_id = count / num_desc_per_page; 567 offset = count % num_desc_per_page; 568 id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) | 569 (page_id << DP_TX_DESC_ID_PAGE_OS) | offset); 570 571 tx_desc->id = id; 572 tx_desc->pool_id = pool_id; 573 dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE); 574 tx_desc = tx_desc->next; 575 count++; 576 } 577 578 return QDF_STATUS_SUCCESS; 579 } 580 581 void dp_tx_desc_pool_deinit_li(struct dp_soc *soc, 582 struct dp_tx_desc_pool_s *tx_desc_pool, 583 uint8_t pool_id) 584 { 585 } 586 587 QDF_STATUS dp_tx_compute_tx_delay_li(struct dp_soc *soc, 588 struct dp_vdev *vdev, 589 struct hal_tx_completion_status *ts, 590 uint32_t *delay_us) 591 { 592 return dp_tx_compute_hw_delay_li(soc, vdev, ts, delay_us); 593 } 594