1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 #include "cdp_txrx_cmn_struct.h" 20 #include "dp_types.h" 21 #include "dp_tx.h" 22 #include "dp_li_tx.h" 23 #include "dp_tx_desc.h" 24 #include <dp_internal.h> 25 #include <dp_htt.h> 26 #include <hal_li_api.h> 27 #include <hal_li_tx.h> 28 #include "dp_peer.h" 29 #ifdef FEATURE_WDS 30 #include "dp_txrx_wds.h" 31 #endif 32 #include "dp_li.h" 33 34 extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE]; 35 36 void dp_tx_comp_get_params_from_hal_desc_li(struct dp_soc *soc, 37 void *tx_comp_hal_desc, 38 struct dp_tx_desc_s **r_tx_desc) 39 { 40 uint8_t pool_id; 41 uint32_t tx_desc_id; 42 43 tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc); 44 pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >> 45 DP_TX_DESC_ID_POOL_OS; 46 47 /* Find Tx descriptor */ 48 *r_tx_desc = dp_tx_desc_find(soc, pool_id, 49 (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >> 50 DP_TX_DESC_ID_PAGE_OS, 51 (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >> 52 DP_TX_DESC_ID_OFFSET_OS); 53 /* Pool id is not matching. Error */ 54 if ((*r_tx_desc)->pool_id != pool_id) { 55 dp_tx_comp_alert("Tx Comp pool id %d not matched %d", 56 pool_id, (*r_tx_desc)->pool_id); 57 58 qdf_assert_always(0); 59 } 60 61 (*r_tx_desc)->peer_id = hal_tx_comp_get_peer_id(tx_comp_hal_desc); 62 } 63 64 static inline 65 void dp_tx_process_mec_notify_li(struct dp_soc *soc, uint8_t *status) 66 { 67 struct dp_vdev *vdev; 68 uint8_t vdev_id; 69 uint32_t *htt_desc = (uint32_t *)status; 70 71 /* 72 * Get vdev id from HTT status word in case of MEC 73 * notification 74 */ 75 vdev_id = HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(htt_desc[3]); 76 if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT)) 77 return; 78 79 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 80 DP_MOD_ID_HTT_COMP); 81 if (!vdev) 82 return; 83 dp_tx_mec_handler(vdev, status); 84 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP); 85 } 86 87 void dp_tx_process_htt_completion_li(struct dp_soc *soc, 88 struct dp_tx_desc_s *tx_desc, 89 uint8_t *status, 90 uint8_t ring_id) 91 { 92 uint8_t tx_status; 93 struct dp_pdev *pdev; 94 struct dp_vdev *vdev = NULL; 95 struct hal_tx_completion_status ts = {0}; 96 uint32_t *htt_desc = (uint32_t *)status; 97 struct dp_txrx_peer *txrx_peer; 98 dp_txrx_ref_handle txrx_ref_handle = NULL; 99 struct cdp_tid_tx_stats *tid_stats = NULL; 100 struct htt_soc *htt_handle; 101 uint8_t vdev_id; 102 103 tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]); 104 htt_handle = (struct htt_soc *)soc->htt_handle; 105 htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status); 106 107 /* 108 * There can be scenario where WBM consuming descriptor enqueued 109 * from TQM2WBM first and TQM completion can happen before MEC 110 * notification comes from FW2WBM. Avoid access any field of tx 111 * descriptor in case of MEC notify. 112 */ 113 if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY) 114 return dp_tx_process_mec_notify_li(soc, status); 115 116 /* 117 * If the descriptor is already freed in vdev_detach, 118 * continue to next descriptor 119 */ 120 if (qdf_unlikely(!tx_desc->flags)) { 121 dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d", 122 tx_desc->id); 123 return; 124 } 125 126 if (qdf_unlikely(tx_desc->vdev_id == DP_INVALID_VDEV_ID)) { 127 dp_tx_comp_info_rl("Invalid vdev_id %d", tx_desc->id); 128 tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR; 129 goto release_tx_desc; 130 } 131 132 pdev = tx_desc->pdev; 133 134 if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) { 135 dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id); 136 tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR; 137 goto release_tx_desc; 138 } 139 140 qdf_assert(tx_desc->pdev); 141 142 vdev_id = tx_desc->vdev_id; 143 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 144 DP_MOD_ID_HTT_COMP); 145 146 if (qdf_unlikely(!vdev)) { 147 dp_tx_comp_info_rl("Unable to get vdev ref %d", tx_desc->id); 148 tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR; 149 goto release_tx_desc; 150 } 151 152 switch (tx_status) { 153 case HTT_TX_FW2WBM_TX_STATUS_OK: 154 case HTT_TX_FW2WBM_TX_STATUS_DROP: 155 case HTT_TX_FW2WBM_TX_STATUS_TTL: 156 { 157 uint8_t tid; 158 159 if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) { 160 ts.peer_id = 161 HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET( 162 htt_desc[2]); 163 ts.tid = 164 HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET( 165 htt_desc[2]); 166 } else { 167 ts.peer_id = HTT_INVALID_PEER; 168 ts.tid = HTT_INVALID_TID; 169 } 170 ts.release_src = HAL_TX_COMP_RELEASE_SOURCE_FW; 171 ts.ppdu_id = 172 HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET( 173 htt_desc[1]); 174 ts.ack_frame_rssi = 175 HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET( 176 htt_desc[1]); 177 178 ts.tsf = htt_desc[3]; 179 ts.first_msdu = 1; 180 ts.last_msdu = 1; 181 ts.status = (tx_status == HTT_TX_FW2WBM_TX_STATUS_OK ? 182 HAL_TX_TQM_RR_FRAME_ACKED : 183 HAL_TX_TQM_RR_REM_CMD_REM); 184 tid = ts.tid; 185 if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) 186 tid = CDP_MAX_DATA_TIDS - 1; 187 188 tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid]; 189 190 if (qdf_unlikely(pdev->delay_stats_flag) || 191 qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev))) 192 dp_tx_compute_delay(vdev, tx_desc, tid, ring_id); 193 if (tx_status < CDP_MAX_TX_HTT_STATUS) 194 tid_stats->htt_status_cnt[tx_status]++; 195 196 txrx_peer = dp_txrx_peer_get_ref_by_id(soc, ts.peer_id, 197 &txrx_ref_handle, 198 DP_MOD_ID_HTT_COMP); 199 if (qdf_likely(txrx_peer)) { 200 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, 201 qdf_nbuf_len(tx_desc->nbuf)); 202 if (tx_status != HTT_TX_FW2WBM_TX_STATUS_OK) 203 DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1); 204 } 205 206 dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer, 207 ring_id); 208 dp_tx_comp_process_desc(soc, tx_desc, &ts, txrx_peer); 209 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 210 211 if (qdf_likely(txrx_peer)) 212 dp_txrx_peer_unref_delete(txrx_ref_handle, 213 DP_MOD_ID_HTT_COMP); 214 215 break; 216 } 217 case HTT_TX_FW2WBM_TX_STATUS_REINJECT: 218 { 219 uint8_t reinject_reason; 220 221 reinject_reason = 222 HTT_TX_WBM_COMPLETION_V2_REINJECT_REASON_GET( 223 htt_desc[0]); 224 dp_tx_reinject_handler(soc, vdev, tx_desc, 225 status, reinject_reason); 226 break; 227 } 228 case HTT_TX_FW2WBM_TX_STATUS_INSPECT: 229 { 230 dp_tx_inspect_handler(soc, vdev, tx_desc, status); 231 break; 232 } 233 case HTT_TX_FW2WBM_TX_STATUS_VDEVID_MISMATCH: 234 { 235 DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1); 236 goto release_tx_desc; 237 } 238 default: 239 dp_tx_comp_err("Invalid HTT tx_status %d\n", 240 tx_status); 241 goto release_tx_desc; 242 } 243 244 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP); 245 return; 246 247 release_tx_desc: 248 dp_tx_comp_free_buf(soc, tx_desc); 249 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 250 if (vdev) 251 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP); 252 } 253 254 #ifdef QCA_OL_TX_MULTIQ_SUPPORT 255 /* 256 * dp_tx_get_rbm_id()- Get the RBM ID for data transmission completion. 257 * @dp_soc - DP soc structure pointer 258 * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled 259 * 260 * Return - HAL ring handle 261 */ 262 #ifdef IPA_OFFLOAD 263 static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc, 264 uint8_t ring_id) 265 { 266 return (ring_id + soc->wbm_sw0_bm_id); 267 } 268 #else 269 #ifndef QCA_DP_ENABLE_TX_COMP_RING4 270 static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc, 271 uint8_t ring_id) 272 { 273 return (ring_id ? HAL_WBM_SW0_BM_ID + (ring_id - 1) : 274 HAL_WBM_SW2_BM_ID); 275 } 276 #else 277 static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc, 278 uint8_t ring_id) 279 { 280 if (ring_id == soc->num_tcl_data_rings) 281 return HAL_WBM_SW4_BM_ID(soc->wbm_sw0_bm_id); 282 return (ring_id + HAL_WBM_SW0_BM_ID(soc->wbm_sw0_bm_id)); 283 } 284 #endif 285 #endif 286 #else 287 #ifdef TX_MULTI_TCL 288 #ifdef IPA_OFFLOAD 289 static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc, 290 uint8_t ring_id) 291 { 292 if (soc->wlan_cfg_ctx->ipa_enabled) 293 return (ring_id + soc->wbm_sw0_bm_id); 294 295 return soc->wlan_cfg_ctx->tcl_wbm_map_array[ring_id].wbm_rbm_id; 296 } 297 #else 298 static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc, 299 uint8_t ring_id) 300 { 301 return soc->wlan_cfg_ctx->tcl_wbm_map_array[ring_id].wbm_rbm_id; 302 } 303 #endif 304 #else 305 static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc, 306 uint8_t ring_id) 307 { 308 return (ring_id + soc->wbm_sw0_bm_id); 309 } 310 #endif 311 #endif 312 313 #if defined(CLEAR_SW2TCL_CONSUMED_DESC) 314 /** 315 * dp_tx_clear_consumed_hw_descs - Reset all the consumed Tx ring descs to 0 316 * 317 * @soc: DP soc handle 318 * @hal_ring_hdl: Source ring pointer 319 * 320 * Return: void 321 */ 322 static inline 323 void dp_tx_clear_consumed_hw_descs(struct dp_soc *soc, 324 hal_ring_handle_t hal_ring_hdl) 325 { 326 void *desc = hal_srng_src_get_next_consumed(soc->hal_soc, hal_ring_hdl); 327 328 while (desc) { 329 hal_tx_desc_clear(desc); 330 desc = hal_srng_src_get_next_consumed(soc->hal_soc, 331 hal_ring_hdl); 332 } 333 } 334 335 #else 336 static inline 337 void dp_tx_clear_consumed_hw_descs(struct dp_soc *soc, 338 hal_ring_handle_t hal_ring_hdl) 339 { 340 } 341 #endif /* CLEAR_SW2TCL_CONSUMED_DESC */ 342 343 #ifdef CONFIG_SAWF 344 /** 345 * dp_sawf_config_li - Configure sawf specific fields in tcl 346 * 347 * @soc: DP soc handle 348 * @hhal_tx_desc_cached: tx descriptor 349 * @vdev_id: vdev id 350 * @nbuf: skb buffer 351 * 352 * Return: void 353 */ 354 static inline 355 void dp_sawf_config_li(struct dp_soc *soc, uint32_t *hal_tx_desc_cached, 356 uint16_t *fw_metadata, uint16_t vdev_id, 357 qdf_nbuf_t nbuf) 358 { 359 uint8_t q_id = 0; 360 uint32_t search_index; 361 362 if (!wlan_cfg_get_sawf_config(soc->wlan_cfg_ctx)) 363 return; 364 365 q_id = dp_sawf_queue_id_get(nbuf); 366 if (q_id == DP_SAWF_DEFAULT_Q_INVALID) 367 return; 368 369 dp_sawf_tcl_cmd(fw_metadata, nbuf); 370 371 search_index = dp_sawf_get_search_index(soc, nbuf, vdev_id, 372 q_id); 373 hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, (q_id & 0x7)); 374 hal_tx_desc_set_search_type_li(soc->hal_soc, hal_tx_desc_cached, 375 HAL_TX_ADDR_INDEX_SEARCH); 376 hal_tx_desc_set_search_index_li(soc->hal_soc, hal_tx_desc_cached, 377 search_index); 378 } 379 380 static inline 381 QDF_STATUS dp_tx_compute_hw_delay_li(struct dp_soc *soc, 382 struct dp_vdev *vdev, 383 struct hal_tx_completion_status *ts, 384 uint32_t *delay_us) 385 { 386 return dp_tx_compute_hw_delay_us(ts, vdev->delta_tsf, delay_us); 387 } 388 #else 389 static inline 390 void dp_sawf_config_li(struct dp_soc *soc, uint32_t *hal_tx_desc_cached, 391 uint16_t *fw_metadata, uint16_t vdev_id, 392 qdf_nbuf_t nbuf) 393 { 394 } 395 396 static inline 397 QDF_STATUS dp_tx_compute_hw_delay_li(struct dp_soc *soc, 398 struct dp_vdev *vdev, 399 struct hal_tx_completion_status *ts, 400 uint32_t *delay_us) 401 { 402 return QDF_STATUS_SUCCESS; 403 } 404 405 #define dp_sawf_tx_enqueue_peer_stats(soc, tx_desc) 406 #define dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc) 407 #endif 408 409 QDF_STATUS 410 dp_tx_hw_enqueue_li(struct dp_soc *soc, struct dp_vdev *vdev, 411 struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata, 412 struct cdp_tx_exception_metadata *tx_exc_metadata, 413 struct dp_tx_msdu_info_s *msdu_info) 414 { 415 void *hal_tx_desc; 416 uint32_t *hal_tx_desc_cached; 417 int coalesce = 0; 418 struct dp_tx_queue *tx_q = &msdu_info->tx_queue; 419 uint8_t ring_id = tx_q->ring_id & DP_TX_QUEUE_MASK; 420 uint8_t tid = msdu_info->tid; 421 422 /* 423 * Setting it initialization statically here to avoid 424 * a memset call jump with qdf_mem_set call 425 */ 426 uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 }; 427 428 enum cdp_sec_type sec_type = ((tx_exc_metadata && 429 tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ? 430 tx_exc_metadata->sec_type : vdev->sec_type); 431 432 /* Return Buffer Manager ID */ 433 uint8_t bm_id = dp_tx_get_rbm_id_li(soc, ring_id); 434 435 hal_ring_handle_t hal_ring_hdl = NULL; 436 437 QDF_STATUS status = QDF_STATUS_E_RESOURCES; 438 439 if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) { 440 dp_err_rl("Invalid tx desc id:%d", tx_desc->id); 441 return QDF_STATUS_E_RESOURCES; 442 } 443 444 hal_tx_desc_cached = (void *)cached_desc; 445 446 hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached, 447 tx_desc->dma_addr, bm_id, tx_desc->id, 448 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)); 449 hal_tx_desc_set_lmac_id_li(soc->hal_soc, hal_tx_desc_cached, 450 vdev->lmac_id); 451 hal_tx_desc_set_search_type_li(soc->hal_soc, hal_tx_desc_cached, 452 vdev->search_type); 453 hal_tx_desc_set_search_index_li(soc->hal_soc, hal_tx_desc_cached, 454 vdev->bss_ast_idx); 455 hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached, 456 vdev->dscp_tid_map_id); 457 458 hal_tx_desc_set_encrypt_type(hal_tx_desc_cached, 459 sec_type_map[sec_type]); 460 hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached, 461 (vdev->bss_ast_hash & 0xF)); 462 463 if (dp_sawf_tag_valid_get(tx_desc->nbuf)) { 464 dp_sawf_config_li(soc, hal_tx_desc_cached, &fw_metadata, 465 vdev->vdev_id, tx_desc->nbuf); 466 dp_sawf_tx_enqueue_peer_stats(soc, tx_desc); 467 } 468 469 hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata); 470 hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length); 471 hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset); 472 hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type); 473 hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached, 474 vdev->hal_desc_addr_search_flags); 475 476 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) 477 hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1); 478 479 /* verify checksum offload configuration*/ 480 if ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == 481 QDF_NBUF_TX_CKSUM_TCP_UDP) || 482 qdf_nbuf_is_tso(tx_desc->nbuf)) { 483 hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1); 484 hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1); 485 } 486 487 if (tid != HTT_TX_EXT_TID_INVALID) 488 hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid); 489 490 if (tx_desc->flags & DP_TX_DESC_FLAG_MESH) 491 hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1); 492 493 if (!dp_tx_desc_set_ktimestamp(vdev, tx_desc)) 494 dp_tx_desc_set_timestamp(tx_desc); 495 496 dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u", 497 tx_desc->length, 498 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG), 499 (uint64_t)tx_desc->dma_addr, tx_desc->pkt_offset, 500 tx_desc->id); 501 502 hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id); 503 504 if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) { 505 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 506 "%s %d : HAL RING Access Failed -- %pK", 507 __func__, __LINE__, hal_ring_hdl); 508 DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1); 509 DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1); 510 dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc); 511 return status; 512 } 513 514 dp_tx_clear_consumed_hw_descs(soc, hal_ring_hdl); 515 516 /* Sync cached descriptor with HW */ 517 518 hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl); 519 if (qdf_unlikely(!hal_tx_desc)) { 520 dp_verbose_debug("TCL ring full ring_id:%d", ring_id); 521 DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1); 522 DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1); 523 dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc); 524 goto ring_access_fail; 525 } 526 527 tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX; 528 dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf); 529 hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc); 530 coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid, 531 msdu_info, ring_id); 532 DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length); 533 DP_STATS_INC(soc, tx.tcl_enq[ring_id], 1); 534 dp_tx_update_stats(soc, tx_desc, ring_id); 535 status = QDF_STATUS_SUCCESS; 536 537 dp_tx_hw_desc_update_evt((uint8_t *)hal_tx_desc_cached, 538 hal_ring_hdl, soc); 539 540 ring_access_fail: 541 dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, coalesce); 542 dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT, 543 qdf_get_log_timestamp(), tx_desc->nbuf); 544 545 return status; 546 } 547 548 QDF_STATUS dp_tx_desc_pool_init_li(struct dp_soc *soc, 549 uint32_t num_elem, 550 uint8_t pool_id) 551 { 552 uint32_t id, count, page_id, offset, pool_id_32; 553 struct dp_tx_desc_s *tx_desc; 554 struct dp_tx_desc_pool_s *tx_desc_pool; 555 uint16_t num_desc_per_page; 556 557 tx_desc_pool = &soc->tx_desc[pool_id]; 558 tx_desc = tx_desc_pool->freelist; 559 count = 0; 560 pool_id_32 = (uint32_t)pool_id; 561 num_desc_per_page = tx_desc_pool->desc_pages.num_element_per_page; 562 while (tx_desc) { 563 page_id = count / num_desc_per_page; 564 offset = count % num_desc_per_page; 565 id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) | 566 (page_id << DP_TX_DESC_ID_PAGE_OS) | offset); 567 568 tx_desc->id = id; 569 tx_desc->pool_id = pool_id; 570 dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE); 571 tx_desc = tx_desc->next; 572 count++; 573 } 574 575 return QDF_STATUS_SUCCESS; 576 } 577 578 void dp_tx_desc_pool_deinit_li(struct dp_soc *soc, 579 struct dp_tx_desc_pool_s *tx_desc_pool, 580 uint8_t pool_id) 581 { 582 } 583 584 QDF_STATUS dp_tx_compute_tx_delay_li(struct dp_soc *soc, 585 struct dp_vdev *vdev, 586 struct hal_tx_completion_status *ts, 587 uint32_t *delay_us) 588 { 589 return dp_tx_compute_hw_delay_li(soc, vdev, ts, delay_us); 590 } 591