1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "htt.h" 21 #include "dp_htt.h" 22 #include "hal_hw_headers.h" 23 #include "dp_tx.h" 24 #include "dp_tx_desc.h" 25 #include "dp_peer.h" 26 #include "dp_types.h" 27 #include "hal_tx.h" 28 #include "qdf_mem.h" 29 #include "qdf_nbuf.h" 30 #include "qdf_net_types.h" 31 #include "qdf_module.h" 32 #include <wlan_cfg.h> 33 #include "dp_ipa.h" 34 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO) 35 #include "if_meta_hdr.h" 36 #endif 37 #include "enet.h" 38 #include "dp_internal.h" 39 #ifdef ATH_SUPPORT_IQUE 40 #include "dp_txrx_me.h" 41 #endif 42 #include "dp_hist.h" 43 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 44 #include <dp_swlm.h> 45 #endif 46 #ifdef WIFI_MONITOR_SUPPORT 47 #include <dp_mon.h> 48 #endif 49 #ifdef FEATURE_WDS 50 #include "dp_txrx_wds.h" 51 #endif 52 #include "cdp_txrx_cmn_reg.h" 53 #ifdef CONFIG_SAWF 54 #include <dp_sawf.h> 55 #endif 56 57 /* Flag to skip CCE classify when mesh or tid override enabled */ 58 #define DP_TX_SKIP_CCE_CLASSIFY \ 59 (DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED) 60 61 /* TODO Add support in TSO */ 62 #define DP_DESC_NUM_FRAG(x) 0 63 64 /* disable TQM_BYPASS */ 65 #define TQM_BYPASS_WAR 0 66 67 /* invalid peer id for reinject*/ 68 #define DP_INVALID_PEER 0XFFFE 69 70 #define DP_RETRY_COUNT 7 71 72 #ifdef QCA_DP_TX_FW_METADATA_V2 73 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\ 74 HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val) 75 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \ 76 HTT_TX_TCL_METADATA_V2_VALID_HTT_SET(_var, _val) 77 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \ 78 HTT_TX_TCL_METADATA_TYPE_V2_SET(_var, _val) 79 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \ 80 HTT_TX_TCL_METADATA_V2_HOST_INSPECTED_SET(_var, _val) 81 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \ 82 HTT_TX_TCL_METADATA_V2_PEER_ID_SET(_var, _val) 83 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \ 84 HTT_TX_TCL_METADATA_V2_VDEV_ID_SET(_var, _val) 85 #define DP_TCL_METADATA_TYPE_PEER_BASED \ 86 HTT_TCL_METADATA_V2_TYPE_PEER_BASED 87 #define DP_TCL_METADATA_TYPE_VDEV_BASED \ 88 HTT_TCL_METADATA_V2_TYPE_VDEV_BASED 89 #else 90 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\ 91 HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) 92 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \ 93 HTT_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) 94 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \ 95 HTT_TX_TCL_METADATA_TYPE_SET(_var, _val) 96 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \ 97 HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) 98 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \ 99 HTT_TX_TCL_METADATA_PEER_ID_SET(_var, _val) 100 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \ 101 HTT_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) 102 #define DP_TCL_METADATA_TYPE_PEER_BASED \ 103 HTT_TCL_METADATA_TYPE_PEER_BASED 104 #define DP_TCL_METADATA_TYPE_VDEV_BASED \ 105 HTT_TCL_METADATA_TYPE_VDEV_BASED 106 #endif 107 108 /*mapping between hal encrypt type and cdp_sec_type*/ 109 uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER, 110 HAL_TX_ENCRYPT_TYPE_WEP_128, 111 HAL_TX_ENCRYPT_TYPE_WEP_104, 112 HAL_TX_ENCRYPT_TYPE_WEP_40, 113 HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC, 114 HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC, 115 HAL_TX_ENCRYPT_TYPE_AES_CCMP_128, 116 HAL_TX_ENCRYPT_TYPE_WAPI, 117 HAL_TX_ENCRYPT_TYPE_AES_CCMP_256, 118 HAL_TX_ENCRYPT_TYPE_AES_GCMP_128, 119 HAL_TX_ENCRYPT_TYPE_AES_GCMP_256, 120 HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4}; 121 qdf_export_symbol(sec_type_map); 122 123 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS 124 /** 125 * dp_update_tx_desc_stats - Update the increase or decrease in 126 * outstanding tx desc count 127 * values on pdev and soc 128 * @vdev: DP pdev handle 129 * 130 * Return: void 131 */ 132 static inline void 133 dp_update_tx_desc_stats(struct dp_pdev *pdev) 134 { 135 int32_t tx_descs_cnt = 136 qdf_atomic_read(&pdev->num_tx_outstanding); 137 if (pdev->tx_descs_max < tx_descs_cnt) 138 pdev->tx_descs_max = tx_descs_cnt; 139 qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding, 140 pdev->tx_descs_max); 141 } 142 143 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */ 144 145 static inline void 146 dp_update_tx_desc_stats(struct dp_pdev *pdev) 147 { 148 } 149 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */ 150 151 #ifdef QCA_TX_LIMIT_CHECK 152 /** 153 * dp_tx_limit_check - Check if allocated tx descriptors reached 154 * soc max limit and pdev max limit 155 * @vdev: DP vdev handle 156 * 157 * Return: true if allocated tx descriptors reached max configured value, else 158 * false 159 */ 160 static inline bool 161 dp_tx_limit_check(struct dp_vdev *vdev) 162 { 163 struct dp_pdev *pdev = vdev->pdev; 164 struct dp_soc *soc = pdev->soc; 165 166 if (qdf_atomic_read(&soc->num_tx_outstanding) >= 167 soc->num_tx_allowed) { 168 dp_tx_info("queued packets are more than max tx, drop the frame"); 169 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 170 return true; 171 } 172 173 if (qdf_atomic_read(&pdev->num_tx_outstanding) >= 174 pdev->num_tx_allowed) { 175 dp_tx_info("queued packets are more than max tx, drop the frame"); 176 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 177 DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_outstand.num, 1); 178 return true; 179 } 180 return false; 181 } 182 183 /** 184 * dp_tx_exception_limit_check - Check if allocated tx exception descriptors 185 * reached soc max limit 186 * @vdev: DP vdev handle 187 * 188 * Return: true if allocated tx descriptors reached max configured value, else 189 * false 190 */ 191 static inline bool 192 dp_tx_exception_limit_check(struct dp_vdev *vdev) 193 { 194 struct dp_pdev *pdev = vdev->pdev; 195 struct dp_soc *soc = pdev->soc; 196 197 if (qdf_atomic_read(&soc->num_tx_exception) >= 198 soc->num_msdu_exception_desc) { 199 dp_info("exc packets are more than max drop the exc pkt"); 200 DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1); 201 return true; 202 } 203 204 return false; 205 } 206 207 /** 208 * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc 209 * @vdev: DP pdev handle 210 * 211 * Return: void 212 */ 213 static inline void 214 dp_tx_outstanding_inc(struct dp_pdev *pdev) 215 { 216 struct dp_soc *soc = pdev->soc; 217 218 qdf_atomic_inc(&pdev->num_tx_outstanding); 219 qdf_atomic_inc(&soc->num_tx_outstanding); 220 dp_update_tx_desc_stats(pdev); 221 } 222 223 /** 224 * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc 225 * @vdev: DP pdev handle 226 * 227 * Return: void 228 */ 229 static inline void 230 dp_tx_outstanding_dec(struct dp_pdev *pdev) 231 { 232 struct dp_soc *soc = pdev->soc; 233 234 qdf_atomic_dec(&pdev->num_tx_outstanding); 235 qdf_atomic_dec(&soc->num_tx_outstanding); 236 dp_update_tx_desc_stats(pdev); 237 } 238 239 #else //QCA_TX_LIMIT_CHECK 240 static inline bool 241 dp_tx_limit_check(struct dp_vdev *vdev) 242 { 243 return false; 244 } 245 246 static inline bool 247 dp_tx_exception_limit_check(struct dp_vdev *vdev) 248 { 249 return false; 250 } 251 252 static inline void 253 dp_tx_outstanding_inc(struct dp_pdev *pdev) 254 { 255 qdf_atomic_inc(&pdev->num_tx_outstanding); 256 dp_update_tx_desc_stats(pdev); 257 } 258 259 static inline void 260 dp_tx_outstanding_dec(struct dp_pdev *pdev) 261 { 262 qdf_atomic_dec(&pdev->num_tx_outstanding); 263 dp_update_tx_desc_stats(pdev); 264 } 265 #endif //QCA_TX_LIMIT_CHECK 266 267 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY 268 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags) 269 { 270 enum dp_tx_event_type type; 271 272 if (flags & DP_TX_DESC_FLAG_FLUSH) 273 type = DP_TX_DESC_FLUSH; 274 else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR) 275 type = DP_TX_COMP_UNMAP_ERR; 276 else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX) 277 type = DP_TX_COMP_UNMAP; 278 else 279 type = DP_TX_DESC_UNMAP; 280 281 return type; 282 } 283 284 static inline void 285 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr, 286 qdf_nbuf_t skb, uint32_t sw_cookie, 287 enum dp_tx_event_type type) 288 { 289 struct dp_tx_desc_event *entry; 290 uint32_t idx; 291 292 if (qdf_unlikely(!soc->tx_tcl_history || !soc->tx_comp_history)) 293 return; 294 295 switch (type) { 296 case DP_TX_COMP_UNMAP: 297 case DP_TX_COMP_UNMAP_ERR: 298 case DP_TX_COMP_MSDU_EXT: 299 idx = dp_history_get_next_index(&soc->tx_comp_history->index, 300 DP_TX_COMP_HISTORY_SIZE); 301 entry = &soc->tx_comp_history->entry[idx]; 302 break; 303 case DP_TX_DESC_MAP: 304 case DP_TX_DESC_UNMAP: 305 case DP_TX_DESC_COOKIE: 306 case DP_TX_DESC_FLUSH: 307 idx = dp_history_get_next_index(&soc->tx_tcl_history->index, 308 DP_TX_TCL_HISTORY_SIZE); 309 entry = &soc->tx_tcl_history->entry[idx]; 310 break; 311 default: 312 dp_info_rl("Invalid dp_tx_event_type: %d", type); 313 return; 314 } 315 316 entry->skb = skb; 317 entry->paddr = paddr; 318 entry->sw_cookie = sw_cookie; 319 entry->type = type; 320 entry->ts = qdf_get_log_timestamp(); 321 } 322 323 static inline void 324 dp_tx_tso_seg_history_add(struct dp_soc *soc, 325 struct qdf_tso_seg_elem_t *tso_seg, 326 qdf_nbuf_t skb, uint32_t sw_cookie, 327 enum dp_tx_event_type type) 328 { 329 int i; 330 331 for (i = 1; i < tso_seg->seg.num_frags; i++) { 332 dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr, 333 skb, sw_cookie, type); 334 } 335 336 if (!tso_seg->next) 337 dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr, 338 skb, 0xFFFFFFFF, type); 339 } 340 341 static inline void 342 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info, 343 qdf_nbuf_t skb, uint32_t sw_cookie, 344 enum dp_tx_event_type type) 345 { 346 struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list; 347 uint32_t num_segs = tso_info.num_segs; 348 349 while (num_segs) { 350 dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type); 351 curr_seg = curr_seg->next; 352 num_segs--; 353 } 354 } 355 356 #else 357 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags) 358 { 359 return DP_TX_DESC_INVAL_EVT; 360 } 361 362 static inline void 363 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr, 364 qdf_nbuf_t skb, uint32_t sw_cookie, 365 enum dp_tx_event_type type) 366 { 367 } 368 369 static inline void 370 dp_tx_tso_seg_history_add(struct dp_soc *soc, 371 struct qdf_tso_seg_elem_t *tso_seg, 372 qdf_nbuf_t skb, uint32_t sw_cookie, 373 enum dp_tx_event_type type) 374 { 375 } 376 377 static inline void 378 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info, 379 qdf_nbuf_t skb, uint32_t sw_cookie, 380 enum dp_tx_event_type type) 381 { 382 } 383 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */ 384 385 static int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc); 386 387 /** 388 * dp_is_tput_high() - Check if throughput is high 389 * 390 * @soc - core txrx main context 391 * 392 * The current function is based of the RTPM tput policy variable where RTPM is 393 * avoided based on throughput. 394 */ 395 static inline int dp_is_tput_high(struct dp_soc *soc) 396 { 397 return dp_get_rtpm_tput_policy_requirement(soc); 398 } 399 400 #if defined(FEATURE_TSO) 401 /** 402 * dp_tx_tso_unmap_segment() - Unmap TSO segment 403 * 404 * @soc - core txrx main context 405 * @seg_desc - tso segment descriptor 406 * @num_seg_desc - tso number segment descriptor 407 */ 408 static void dp_tx_tso_unmap_segment( 409 struct dp_soc *soc, 410 struct qdf_tso_seg_elem_t *seg_desc, 411 struct qdf_tso_num_seg_elem_t *num_seg_desc) 412 { 413 TSO_DEBUG("%s: Unmap the tso segment", __func__); 414 if (qdf_unlikely(!seg_desc)) { 415 DP_TRACE(ERROR, "%s %d TSO desc is NULL!", 416 __func__, __LINE__); 417 qdf_assert(0); 418 } else if (qdf_unlikely(!num_seg_desc)) { 419 DP_TRACE(ERROR, "%s %d TSO num desc is NULL!", 420 __func__, __LINE__); 421 qdf_assert(0); 422 } else { 423 bool is_last_seg; 424 /* no tso segment left to do dma unmap */ 425 if (num_seg_desc->num_seg.tso_cmn_num_seg < 1) 426 return; 427 428 is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ? 429 true : false; 430 qdf_nbuf_unmap_tso_segment(soc->osdev, 431 seg_desc, is_last_seg); 432 num_seg_desc->num_seg.tso_cmn_num_seg--; 433 } 434 } 435 436 /** 437 * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg 438 * back to the freelist 439 * 440 * @soc - soc device handle 441 * @tx_desc - Tx software descriptor 442 */ 443 static void dp_tx_tso_desc_release(struct dp_soc *soc, 444 struct dp_tx_desc_s *tx_desc) 445 { 446 TSO_DEBUG("%s: Free the tso descriptor", __func__); 447 if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_desc)) { 448 dp_tx_err("SO desc is NULL!"); 449 qdf_assert(0); 450 } else if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_num_desc)) { 451 dp_tx_err("TSO num desc is NULL!"); 452 qdf_assert(0); 453 } else { 454 struct qdf_tso_num_seg_elem_t *tso_num_desc = 455 (struct qdf_tso_num_seg_elem_t *)tx_desc-> 456 msdu_ext_desc->tso_num_desc; 457 458 /* Add the tso num segment into the free list */ 459 if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) { 460 dp_tso_num_seg_free(soc, tx_desc->pool_id, 461 tx_desc->msdu_ext_desc-> 462 tso_num_desc); 463 tx_desc->msdu_ext_desc->tso_num_desc = NULL; 464 DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1); 465 } 466 467 /* Add the tso segment into the free list*/ 468 dp_tx_tso_desc_free(soc, 469 tx_desc->pool_id, tx_desc->msdu_ext_desc-> 470 tso_desc); 471 tx_desc->msdu_ext_desc->tso_desc = NULL; 472 } 473 } 474 #else 475 static void dp_tx_tso_unmap_segment( 476 struct dp_soc *soc, 477 struct qdf_tso_seg_elem_t *seg_desc, 478 struct qdf_tso_num_seg_elem_t *num_seg_desc) 479 480 { 481 } 482 483 static void dp_tx_tso_desc_release(struct dp_soc *soc, 484 struct dp_tx_desc_s *tx_desc) 485 { 486 } 487 #endif 488 489 /** 490 * dp_tx_desc_release() - Release Tx Descriptor 491 * @tx_desc : Tx Descriptor 492 * @desc_pool_id: Descriptor Pool ID 493 * 494 * Deallocate all resources attached to Tx descriptor and free the Tx 495 * descriptor. 496 * 497 * Return: 498 */ 499 void 500 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id) 501 { 502 struct dp_pdev *pdev = tx_desc->pdev; 503 struct dp_soc *soc; 504 uint8_t comp_status = 0; 505 506 qdf_assert(pdev); 507 508 soc = pdev->soc; 509 510 dp_tx_outstanding_dec(pdev); 511 512 if (tx_desc->msdu_ext_desc) { 513 if (tx_desc->frm_type == dp_tx_frm_tso) 514 dp_tx_tso_desc_release(soc, tx_desc); 515 516 if (tx_desc->flags & DP_TX_DESC_FLAG_ME) 517 dp_tx_me_free_buf(tx_desc->pdev, 518 tx_desc->msdu_ext_desc->me_buffer); 519 520 dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id); 521 } 522 523 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) 524 qdf_atomic_dec(&soc->num_tx_exception); 525 526 if (HAL_TX_COMP_RELEASE_SOURCE_TQM == 527 tx_desc->buffer_src) 528 comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp, 529 soc->hal_soc); 530 else 531 comp_status = HAL_TX_COMP_RELEASE_REASON_FW; 532 533 dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d", 534 tx_desc->id, comp_status, 535 qdf_atomic_read(&pdev->num_tx_outstanding)); 536 537 dp_tx_desc_free(soc, tx_desc, desc_pool_id); 538 return; 539 } 540 541 /** 542 * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames 543 * @vdev: DP vdev Handle 544 * @nbuf: skb 545 * @msdu_info: msdu_info required to create HTT metadata 546 * 547 * Prepares and fills HTT metadata in the frame pre-header for special frames 548 * that should be transmitted using varying transmit parameters. 549 * There are 2 VDEV modes that currently needs this special metadata - 550 * 1) Mesh Mode 551 * 2) DSRC Mode 552 * 553 * Return: HTT metadata size 554 * 555 */ 556 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 557 struct dp_tx_msdu_info_s *msdu_info) 558 { 559 uint32_t *meta_data = msdu_info->meta_data; 560 struct htt_tx_msdu_desc_ext2_t *desc_ext = 561 (struct htt_tx_msdu_desc_ext2_t *) meta_data; 562 563 uint8_t htt_desc_size; 564 565 /* Size rounded of multiple of 8 bytes */ 566 uint8_t htt_desc_size_aligned; 567 568 uint8_t *hdr = NULL; 569 570 /* 571 * Metadata - HTT MSDU Extension header 572 */ 573 htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t); 574 htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7; 575 576 if (vdev->mesh_vdev || msdu_info->is_tx_sniffer || 577 HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info-> 578 meta_data[0])) { 579 if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < 580 htt_desc_size_aligned)) { 581 nbuf = qdf_nbuf_realloc_headroom(nbuf, 582 htt_desc_size_aligned); 583 if (!nbuf) { 584 /* 585 * qdf_nbuf_realloc_headroom won't do skb_clone 586 * as skb_realloc_headroom does. so, no free is 587 * needed here. 588 */ 589 DP_STATS_INC(vdev, 590 tx_i.dropped.headroom_insufficient, 591 1); 592 qdf_print(" %s[%d] skb_realloc_headroom failed", 593 __func__, __LINE__); 594 return 0; 595 } 596 } 597 /* Fill and add HTT metaheader */ 598 hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned); 599 if (!hdr) { 600 dp_tx_err("Error in filling HTT metadata"); 601 602 return 0; 603 } 604 qdf_mem_copy(hdr, desc_ext, htt_desc_size); 605 606 } else if (vdev->opmode == wlan_op_mode_ocb) { 607 /* Todo - Add support for DSRC */ 608 } 609 610 return htt_desc_size_aligned; 611 } 612 613 /** 614 * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO 615 * @tso_seg: TSO segment to process 616 * @ext_desc: Pointer to MSDU extension descriptor 617 * 618 * Return: void 619 */ 620 #if defined(FEATURE_TSO) 621 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg, 622 void *ext_desc) 623 { 624 uint8_t num_frag; 625 uint32_t tso_flags; 626 627 /* 628 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN), 629 * tcp_flag_mask 630 * 631 * Checksum enable flags are set in TCL descriptor and not in Extension 632 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor) 633 */ 634 tso_flags = *(uint32_t *) &tso_seg->tso_flags; 635 636 hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags); 637 638 hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len, 639 tso_seg->tso_flags.ip_len); 640 641 hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num); 642 hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id); 643 644 for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) { 645 uint32_t lo = 0; 646 uint32_t hi = 0; 647 648 qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) && 649 (tso_seg->tso_frags[num_frag].length)); 650 651 qdf_dmaaddr_to_32s( 652 tso_seg->tso_frags[num_frag].paddr, &lo, &hi); 653 hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi, 654 tso_seg->tso_frags[num_frag].length); 655 } 656 657 return; 658 } 659 #else 660 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg, 661 void *ext_desc) 662 { 663 return; 664 } 665 #endif 666 667 #if defined(FEATURE_TSO) 668 /** 669 * dp_tx_free_tso_seg_list() - Loop through the tso segments 670 * allocated and free them 671 * 672 * @soc: soc handle 673 * @free_seg: list of tso segments 674 * @msdu_info: msdu descriptor 675 * 676 * Return - void 677 */ 678 static void dp_tx_free_tso_seg_list( 679 struct dp_soc *soc, 680 struct qdf_tso_seg_elem_t *free_seg, 681 struct dp_tx_msdu_info_s *msdu_info) 682 { 683 struct qdf_tso_seg_elem_t *next_seg; 684 685 while (free_seg) { 686 next_seg = free_seg->next; 687 dp_tx_tso_desc_free(soc, 688 msdu_info->tx_queue.desc_pool_id, 689 free_seg); 690 free_seg = next_seg; 691 } 692 } 693 694 /** 695 * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments 696 * allocated and free them 697 * 698 * @soc: soc handle 699 * @free_num_seg: list of tso number segments 700 * @msdu_info: msdu descriptor 701 * Return - void 702 */ 703 static void dp_tx_free_tso_num_seg_list( 704 struct dp_soc *soc, 705 struct qdf_tso_num_seg_elem_t *free_num_seg, 706 struct dp_tx_msdu_info_s *msdu_info) 707 { 708 struct qdf_tso_num_seg_elem_t *next_num_seg; 709 710 while (free_num_seg) { 711 next_num_seg = free_num_seg->next; 712 dp_tso_num_seg_free(soc, 713 msdu_info->tx_queue.desc_pool_id, 714 free_num_seg); 715 free_num_seg = next_num_seg; 716 } 717 } 718 719 /** 720 * dp_tx_unmap_tso_seg_list() - Loop through the tso segments 721 * do dma unmap for each segment 722 * 723 * @soc: soc handle 724 * @free_seg: list of tso segments 725 * @num_seg_desc: tso number segment descriptor 726 * 727 * Return - void 728 */ 729 static void dp_tx_unmap_tso_seg_list( 730 struct dp_soc *soc, 731 struct qdf_tso_seg_elem_t *free_seg, 732 struct qdf_tso_num_seg_elem_t *num_seg_desc) 733 { 734 struct qdf_tso_seg_elem_t *next_seg; 735 736 if (qdf_unlikely(!num_seg_desc)) { 737 DP_TRACE(ERROR, "TSO number seg desc is NULL!"); 738 return; 739 } 740 741 while (free_seg) { 742 next_seg = free_seg->next; 743 dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc); 744 free_seg = next_seg; 745 } 746 } 747 748 #ifdef FEATURE_TSO_STATS 749 /** 750 * dp_tso_get_stats_idx: Retrieve the tso packet id 751 * @pdev - pdev handle 752 * 753 * Return: id 754 */ 755 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev) 756 { 757 uint32_t stats_idx; 758 759 stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx)) 760 % CDP_MAX_TSO_PACKETS); 761 return stats_idx; 762 } 763 #else 764 static int dp_tso_get_stats_idx(struct dp_pdev *pdev) 765 { 766 return 0; 767 } 768 #endif /* FEATURE_TSO_STATS */ 769 770 /** 771 * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any, 772 * free the tso segments descriptor and 773 * tso num segments descriptor 774 * 775 * @soc: soc handle 776 * @msdu_info: msdu descriptor 777 * @tso_seg_unmap: flag to show if dma unmap is necessary 778 * 779 * Return - void 780 */ 781 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc, 782 struct dp_tx_msdu_info_s *msdu_info, 783 bool tso_seg_unmap) 784 { 785 struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info; 786 struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list; 787 struct qdf_tso_num_seg_elem_t *tso_num_desc = 788 tso_info->tso_num_seg_list; 789 790 /* do dma unmap for each segment */ 791 if (tso_seg_unmap) 792 dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc); 793 794 /* free all tso number segment descriptor though looks only have 1 */ 795 dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info); 796 797 /* free all tso segment descriptor */ 798 dp_tx_free_tso_seg_list(soc, free_seg, msdu_info); 799 } 800 801 /** 802 * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info 803 * @vdev: virtual device handle 804 * @msdu: network buffer 805 * @msdu_info: meta data associated with the msdu 806 * 807 * Return: QDF_STATUS_SUCCESS success 808 */ 809 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev, 810 qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info) 811 { 812 struct qdf_tso_seg_elem_t *tso_seg; 813 int num_seg = qdf_nbuf_get_tso_num_seg(msdu); 814 struct dp_soc *soc = vdev->pdev->soc; 815 struct dp_pdev *pdev = vdev->pdev; 816 struct qdf_tso_info_t *tso_info; 817 struct qdf_tso_num_seg_elem_t *tso_num_seg; 818 tso_info = &msdu_info->u.tso_info; 819 tso_info->curr_seg = NULL; 820 tso_info->tso_seg_list = NULL; 821 tso_info->num_segs = num_seg; 822 msdu_info->frm_type = dp_tx_frm_tso; 823 tso_info->tso_num_seg_list = NULL; 824 825 TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg); 826 827 while (num_seg) { 828 tso_seg = dp_tx_tso_desc_alloc( 829 soc, msdu_info->tx_queue.desc_pool_id); 830 if (tso_seg) { 831 tso_seg->next = tso_info->tso_seg_list; 832 tso_info->tso_seg_list = tso_seg; 833 num_seg--; 834 } else { 835 dp_err_rl("Failed to alloc tso seg desc"); 836 DP_STATS_INC_PKT(vdev->pdev, 837 tso_stats.tso_no_mem_dropped, 1, 838 qdf_nbuf_len(msdu)); 839 dp_tx_free_remaining_tso_desc(soc, msdu_info, false); 840 841 return QDF_STATUS_E_NOMEM; 842 } 843 } 844 845 TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg); 846 847 tso_num_seg = dp_tso_num_seg_alloc(soc, 848 msdu_info->tx_queue.desc_pool_id); 849 850 if (tso_num_seg) { 851 tso_num_seg->next = tso_info->tso_num_seg_list; 852 tso_info->tso_num_seg_list = tso_num_seg; 853 } else { 854 DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc", 855 __func__); 856 dp_tx_free_remaining_tso_desc(soc, msdu_info, false); 857 858 return QDF_STATUS_E_NOMEM; 859 } 860 861 msdu_info->num_seg = 862 qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info); 863 864 TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__, 865 msdu_info->num_seg); 866 867 if (!(msdu_info->num_seg)) { 868 /* 869 * Free allocated TSO seg desc and number seg desc, 870 * do unmap for segments if dma map has done. 871 */ 872 DP_TRACE(ERROR, "%s: Failed to get tso info", __func__); 873 dp_tx_free_remaining_tso_desc(soc, msdu_info, true); 874 875 return QDF_STATUS_E_INVAL; 876 } 877 dp_tx_tso_history_add(soc, msdu_info->u.tso_info, 878 msdu, 0, DP_TX_DESC_MAP); 879 880 tso_info->curr_seg = tso_info->tso_seg_list; 881 882 tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev); 883 dp_tso_packet_update(pdev, tso_info->msdu_stats_idx, 884 msdu, msdu_info->num_seg); 885 dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list, 886 tso_info->msdu_stats_idx); 887 dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg); 888 return QDF_STATUS_SUCCESS; 889 } 890 #else 891 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev, 892 qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info) 893 { 894 return QDF_STATUS_E_NOMEM; 895 } 896 #endif 897 898 QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check, 899 (DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >= 900 sizeof(struct htt_tx_msdu_desc_ext2_t))); 901 902 /** 903 * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor 904 * @vdev: DP Vdev handle 905 * @msdu_info: MSDU info to be setup in MSDU extension descriptor 906 * @desc_pool_id: Descriptor Pool ID 907 * 908 * Return: 909 */ 910 static 911 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev, 912 struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id) 913 { 914 uint8_t i; 915 uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA]; 916 struct dp_tx_seg_info_s *seg_info; 917 struct dp_tx_ext_desc_elem_s *msdu_ext_desc; 918 struct dp_soc *soc = vdev->pdev->soc; 919 920 /* Allocate an extension descriptor */ 921 msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id); 922 qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA); 923 924 if (!msdu_ext_desc) { 925 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 926 return NULL; 927 } 928 929 if (msdu_info->exception_fw && 930 qdf_unlikely(vdev->mesh_vdev)) { 931 qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES], 932 &msdu_info->meta_data[0], 933 sizeof(struct htt_tx_msdu_desc_ext2_t)); 934 qdf_atomic_inc(&soc->num_tx_exception); 935 msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID; 936 } 937 938 switch (msdu_info->frm_type) { 939 case dp_tx_frm_sg: 940 case dp_tx_frm_me: 941 case dp_tx_frm_raw: 942 seg_info = msdu_info->u.sg_info.curr_seg; 943 /* Update the buffer pointers in MSDU Extension Descriptor */ 944 for (i = 0; i < seg_info->frag_cnt; i++) { 945 hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i, 946 seg_info->frags[i].paddr_lo, 947 seg_info->frags[i].paddr_hi, 948 seg_info->frags[i].len); 949 } 950 951 break; 952 953 case dp_tx_frm_tso: 954 dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg, 955 &cached_ext_desc[0]); 956 break; 957 958 959 default: 960 break; 961 } 962 963 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 964 cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA); 965 966 hal_tx_ext_desc_sync(&cached_ext_desc[0], 967 msdu_ext_desc->vaddr); 968 969 return msdu_ext_desc; 970 } 971 972 /** 973 * dp_tx_trace_pkt() - Trace TX packet at DP layer 974 * 975 * @skb: skb to be traced 976 * @msdu_id: msdu_id of the packet 977 * @vdev_id: vdev_id of the packet 978 * 979 * Return: None 980 */ 981 #ifdef DP_DISABLE_TX_PKT_TRACE 982 static void dp_tx_trace_pkt(struct dp_soc *soc, 983 qdf_nbuf_t skb, uint16_t msdu_id, 984 uint8_t vdev_id) 985 { 986 } 987 #else 988 static void dp_tx_trace_pkt(struct dp_soc *soc, 989 qdf_nbuf_t skb, uint16_t msdu_id, 990 uint8_t vdev_id) 991 { 992 if (dp_is_tput_high(soc)) 993 return; 994 995 QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK; 996 QDF_NBUF_CB_TX_DP_TRACE(skb) = 1; 997 DPTRACE(qdf_dp_trace_ptr(skb, 998 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD, 999 QDF_TRACE_DEFAULT_PDEV_ID, 1000 qdf_nbuf_data_addr(skb), 1001 sizeof(qdf_nbuf_data(skb)), 1002 msdu_id, vdev_id, 0)); 1003 1004 qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID); 1005 1006 DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID, 1007 QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD, 1008 msdu_id, QDF_TX)); 1009 } 1010 #endif 1011 1012 #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW 1013 /** 1014 * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as 1015 * exception by the upper layer (OS_IF) 1016 * @soc: DP soc handle 1017 * @nbuf: packet to be transmitted 1018 * 1019 * Returns: 1 if the packet is marked as exception, 1020 * 0, if the packet is not marked as exception. 1021 */ 1022 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc, 1023 qdf_nbuf_t nbuf) 1024 { 1025 return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf); 1026 } 1027 #else 1028 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc, 1029 qdf_nbuf_t nbuf) 1030 { 1031 return 0; 1032 } 1033 #endif 1034 1035 /** 1036 * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor 1037 * @vdev: DP vdev handle 1038 * @nbuf: skb 1039 * @desc_pool_id: Descriptor pool ID 1040 * @meta_data: Metadata to the fw 1041 * @tx_exc_metadata: Handle that holds exception path metadata 1042 * Allocate and prepare Tx descriptor with msdu information. 1043 * 1044 * Return: Pointer to Tx Descriptor on success, 1045 * NULL on failure 1046 */ 1047 static 1048 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev, 1049 qdf_nbuf_t nbuf, uint8_t desc_pool_id, 1050 struct dp_tx_msdu_info_s *msdu_info, 1051 struct cdp_tx_exception_metadata *tx_exc_metadata) 1052 { 1053 uint8_t align_pad; 1054 uint8_t is_exception = 0; 1055 uint8_t htt_hdr_size; 1056 struct dp_tx_desc_s *tx_desc; 1057 struct dp_pdev *pdev = vdev->pdev; 1058 struct dp_soc *soc = pdev->soc; 1059 1060 if (dp_tx_limit_check(vdev)) 1061 return NULL; 1062 1063 /* Allocate software Tx descriptor */ 1064 tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); 1065 1066 if (qdf_unlikely(!tx_desc)) { 1067 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 1068 DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1); 1069 return NULL; 1070 } 1071 1072 dp_tx_outstanding_inc(pdev); 1073 1074 /* Initialize the SW tx descriptor */ 1075 tx_desc->nbuf = nbuf; 1076 tx_desc->frm_type = dp_tx_frm_std; 1077 tx_desc->tx_encap_type = ((tx_exc_metadata && 1078 (tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ? 1079 tx_exc_metadata->tx_encap_type : vdev->tx_encap_type); 1080 tx_desc->vdev_id = vdev->vdev_id; 1081 tx_desc->pdev = pdev; 1082 tx_desc->msdu_ext_desc = NULL; 1083 tx_desc->pkt_offset = 0; 1084 tx_desc->length = qdf_nbuf_headlen(nbuf); 1085 1086 dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id); 1087 1088 if (qdf_unlikely(vdev->multipass_en)) { 1089 if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info)) 1090 goto failure; 1091 } 1092 1093 /* Packets marked by upper layer (OS-IF) to be sent to FW */ 1094 if (dp_tx_is_nbuf_marked_exception(soc, nbuf)) 1095 is_exception = 1; 1096 /* 1097 * For special modes (vdev_type == ocb or mesh), data frames should be 1098 * transmitted using varying transmit parameters (tx spec) which include 1099 * transmit rate, power, priority, channel, channel bandwidth , nss etc. 1100 * These are filled in HTT MSDU descriptor and sent in frame pre-header. 1101 * These frames are sent as exception packets to firmware. 1102 * 1103 * HW requirement is that metadata should always point to a 1104 * 8-byte aligned address. So we add alignment pad to start of buffer. 1105 * HTT Metadata should be ensured to be multiple of 8-bytes, 1106 * to get 8-byte aligned start address along with align_pad added 1107 * 1108 * |-----------------------------| 1109 * | | 1110 * |-----------------------------| <-----Buffer Pointer Address given 1111 * | | ^ in HW descriptor (aligned) 1112 * | HTT Metadata | | 1113 * | | | 1114 * | | | Packet Offset given in descriptor 1115 * | | | 1116 * |-----------------------------| | 1117 * | Alignment Pad | v 1118 * |-----------------------------| <----- Actual buffer start address 1119 * | SKB Data | (Unaligned) 1120 * | | 1121 * | | 1122 * | | 1123 * | | 1124 * | | 1125 * |-----------------------------| 1126 */ 1127 if (qdf_unlikely((msdu_info->exception_fw)) || 1128 (vdev->opmode == wlan_op_mode_ocb) || 1129 (tx_exc_metadata && 1130 tx_exc_metadata->is_tx_sniffer)) { 1131 align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7; 1132 1133 if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) { 1134 DP_STATS_INC(vdev, 1135 tx_i.dropped.headroom_insufficient, 1); 1136 goto failure; 1137 } 1138 1139 if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) { 1140 dp_tx_err("qdf_nbuf_push_head failed"); 1141 goto failure; 1142 } 1143 1144 htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf, 1145 msdu_info); 1146 if (htt_hdr_size == 0) 1147 goto failure; 1148 1149 tx_desc->length = qdf_nbuf_headlen(nbuf); 1150 tx_desc->pkt_offset = align_pad + htt_hdr_size; 1151 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 1152 is_exception = 1; 1153 tx_desc->length -= tx_desc->pkt_offset; 1154 } 1155 1156 #if !TQM_BYPASS_WAR 1157 if (is_exception || tx_exc_metadata) 1158 #endif 1159 { 1160 /* Temporary WAR due to TQM VP issues */ 1161 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 1162 qdf_atomic_inc(&soc->num_tx_exception); 1163 } 1164 1165 return tx_desc; 1166 1167 failure: 1168 dp_tx_desc_release(tx_desc, desc_pool_id); 1169 return NULL; 1170 } 1171 1172 /** 1173 * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame 1174 * @vdev: DP vdev handle 1175 * @nbuf: skb 1176 * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor 1177 * @desc_pool_id : Descriptor Pool ID 1178 * 1179 * Allocate and prepare Tx descriptor with msdu and fragment descritor 1180 * information. For frames wth fragments, allocate and prepare 1181 * an MSDU extension descriptor 1182 * 1183 * Return: Pointer to Tx Descriptor on success, 1184 * NULL on failure 1185 */ 1186 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev, 1187 qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info, 1188 uint8_t desc_pool_id) 1189 { 1190 struct dp_tx_desc_s *tx_desc; 1191 struct dp_tx_ext_desc_elem_s *msdu_ext_desc; 1192 struct dp_pdev *pdev = vdev->pdev; 1193 struct dp_soc *soc = pdev->soc; 1194 1195 if (dp_tx_limit_check(vdev)) 1196 return NULL; 1197 1198 /* Allocate software Tx descriptor */ 1199 tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); 1200 if (!tx_desc) { 1201 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 1202 return NULL; 1203 } 1204 dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg, 1205 nbuf, tx_desc->id, DP_TX_DESC_COOKIE); 1206 1207 dp_tx_outstanding_inc(pdev); 1208 1209 /* Initialize the SW tx descriptor */ 1210 tx_desc->nbuf = nbuf; 1211 tx_desc->frm_type = msdu_info->frm_type; 1212 tx_desc->tx_encap_type = vdev->tx_encap_type; 1213 tx_desc->vdev_id = vdev->vdev_id; 1214 tx_desc->pdev = pdev; 1215 tx_desc->pkt_offset = 0; 1216 1217 dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id); 1218 1219 /* Handle scattered frames - TSO/SG/ME */ 1220 /* Allocate and prepare an extension descriptor for scattered frames */ 1221 msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id); 1222 if (!msdu_ext_desc) { 1223 dp_tx_info("Tx Extension Descriptor Alloc Fail"); 1224 goto failure; 1225 } 1226 1227 #if TQM_BYPASS_WAR 1228 /* Temporary WAR due to TQM VP issues */ 1229 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 1230 qdf_atomic_inc(&soc->num_tx_exception); 1231 #endif 1232 if (qdf_unlikely(msdu_info->exception_fw)) 1233 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 1234 1235 tx_desc->msdu_ext_desc = msdu_ext_desc; 1236 tx_desc->flags |= DP_TX_DESC_FLAG_FRAG; 1237 1238 msdu_ext_desc->tso_desc = msdu_info->u.tso_info.curr_seg; 1239 msdu_ext_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list; 1240 1241 tx_desc->dma_addr = msdu_ext_desc->paddr; 1242 1243 if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID) 1244 tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA; 1245 else 1246 tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES; 1247 1248 return tx_desc; 1249 failure: 1250 dp_tx_desc_release(tx_desc, desc_pool_id); 1251 return NULL; 1252 } 1253 1254 /** 1255 * dp_tx_prepare_raw() - Prepare RAW packet TX 1256 * @vdev: DP vdev handle 1257 * @nbuf: buffer pointer 1258 * @seg_info: Pointer to Segment info Descriptor to be prepared 1259 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension 1260 * descriptor 1261 * 1262 * Return: 1263 */ 1264 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1265 struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info) 1266 { 1267 qdf_nbuf_t curr_nbuf = NULL; 1268 uint16_t total_len = 0; 1269 qdf_dma_addr_t paddr; 1270 int32_t i; 1271 int32_t mapped_buf_num = 0; 1272 1273 struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info; 1274 qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data; 1275 1276 DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf)); 1277 1278 /* Continue only if frames are of DATA type */ 1279 if (!DP_FRAME_IS_DATA(qos_wh)) { 1280 DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1); 1281 dp_tx_debug("Pkt. recd is of not data type"); 1282 goto error; 1283 } 1284 /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */ 1285 if (vdev->raw_mode_war && 1286 (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) && 1287 (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU)) 1288 qos_wh->i_fc[1] |= IEEE80211_FC1_WEP; 1289 1290 for (curr_nbuf = nbuf, i = 0; curr_nbuf; 1291 curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) { 1292 /* 1293 * Number of nbuf's must not exceed the size of the frags 1294 * array in seg_info. 1295 */ 1296 if (i >= DP_TX_MAX_NUM_FRAGS) { 1297 dp_err_rl("nbuf cnt exceeds the max number of segs"); 1298 DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1); 1299 goto error; 1300 } 1301 if (QDF_STATUS_SUCCESS != 1302 qdf_nbuf_map_nbytes_single(vdev->osdev, 1303 curr_nbuf, 1304 QDF_DMA_TO_DEVICE, 1305 curr_nbuf->len)) { 1306 dp_tx_err("%s dma map error ", __func__); 1307 DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1); 1308 goto error; 1309 } 1310 /* Update the count of mapped nbuf's */ 1311 mapped_buf_num++; 1312 paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0); 1313 seg_info->frags[i].paddr_lo = paddr; 1314 seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32); 1315 seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf); 1316 seg_info->frags[i].vaddr = (void *) curr_nbuf; 1317 total_len += qdf_nbuf_len(curr_nbuf); 1318 } 1319 1320 seg_info->frag_cnt = i; 1321 seg_info->total_len = total_len; 1322 seg_info->next = NULL; 1323 1324 sg_info->curr_seg = seg_info; 1325 1326 msdu_info->frm_type = dp_tx_frm_raw; 1327 msdu_info->num_seg = 1; 1328 1329 return nbuf; 1330 1331 error: 1332 i = 0; 1333 while (nbuf) { 1334 curr_nbuf = nbuf; 1335 if (i < mapped_buf_num) { 1336 qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf, 1337 QDF_DMA_TO_DEVICE, 1338 curr_nbuf->len); 1339 i++; 1340 } 1341 nbuf = qdf_nbuf_next(nbuf); 1342 qdf_nbuf_free(curr_nbuf); 1343 } 1344 return NULL; 1345 1346 } 1347 1348 /** 1349 * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame. 1350 * @soc: DP soc handle 1351 * @nbuf: Buffer pointer 1352 * 1353 * unmap the chain of nbufs that belong to this RAW frame. 1354 * 1355 * Return: None 1356 */ 1357 static void dp_tx_raw_prepare_unset(struct dp_soc *soc, 1358 qdf_nbuf_t nbuf) 1359 { 1360 qdf_nbuf_t cur_nbuf = nbuf; 1361 1362 do { 1363 qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf, 1364 QDF_DMA_TO_DEVICE, 1365 cur_nbuf->len); 1366 cur_nbuf = qdf_nbuf_next(cur_nbuf); 1367 } while (cur_nbuf); 1368 } 1369 1370 #ifdef VDEV_PEER_PROTOCOL_COUNT 1371 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl, 1372 qdf_nbuf_t nbuf) 1373 { 1374 qdf_nbuf_t nbuf_local; 1375 struct dp_vdev *vdev_local = vdev_hdl; 1376 1377 do { 1378 if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) 1379 break; 1380 nbuf_local = nbuf; 1381 if (qdf_unlikely(((vdev_local)->tx_encap_type) == 1382 htt_cmn_pkt_type_raw)) 1383 break; 1384 else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local)))) 1385 break; 1386 else if (qdf_nbuf_is_tso((nbuf_local))) 1387 break; 1388 dp_vdev_peer_stats_update_protocol_cnt((vdev_local), 1389 (nbuf_local), 1390 NULL, 1, 0); 1391 } while (0); 1392 } 1393 #endif 1394 1395 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 1396 /** 1397 * dp_tx_update_stats() - Update soc level tx stats 1398 * @soc: DP soc handle 1399 * @tx_desc: TX descriptor reference 1400 * @ring_id: TCL ring id 1401 * 1402 * Returns: none 1403 */ 1404 void dp_tx_update_stats(struct dp_soc *soc, 1405 struct dp_tx_desc_s *tx_desc, 1406 uint8_t ring_id) 1407 { 1408 uint32_t stats_len = 0; 1409 1410 if (tx_desc->frm_type == dp_tx_frm_tso) 1411 stats_len = tx_desc->msdu_ext_desc->tso_desc->seg.total_len; 1412 else 1413 stats_len = qdf_nbuf_len(tx_desc->nbuf); 1414 1415 DP_STATS_INC_PKT(soc, tx.egress[ring_id], 1, stats_len); 1416 } 1417 1418 int 1419 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev, 1420 struct dp_tx_desc_s *tx_desc, 1421 uint8_t tid, 1422 struct dp_tx_msdu_info_s *msdu_info, 1423 uint8_t ring_id) 1424 { 1425 struct dp_swlm *swlm = &soc->swlm; 1426 union swlm_data swlm_query_data; 1427 struct dp_swlm_tcl_data tcl_data; 1428 QDF_STATUS status; 1429 int ret; 1430 1431 if (!swlm->is_enabled) 1432 return msdu_info->skip_hp_update; 1433 1434 tcl_data.nbuf = tx_desc->nbuf; 1435 tcl_data.tid = tid; 1436 tcl_data.ring_id = ring_id; 1437 if (tx_desc->frm_type == dp_tx_frm_tso) { 1438 tcl_data.pkt_len = 1439 tx_desc->msdu_ext_desc->tso_desc->seg.total_len; 1440 } else { 1441 tcl_data.pkt_len = qdf_nbuf_len(tx_desc->nbuf); 1442 } 1443 tcl_data.num_ll_connections = vdev->num_latency_critical_conn; 1444 swlm_query_data.tcl_data = &tcl_data; 1445 1446 status = dp_swlm_tcl_pre_check(soc, &tcl_data); 1447 if (QDF_IS_STATUS_ERROR(status)) { 1448 dp_swlm_tcl_reset_session_data(soc, ring_id); 1449 DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1); 1450 return 0; 1451 } 1452 1453 ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data); 1454 if (ret) { 1455 DP_STATS_INC(swlm, tcl[ring_id].coalesce_success, 1); 1456 } else { 1457 DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1); 1458 } 1459 1460 return ret; 1461 } 1462 1463 void 1464 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl, 1465 int coalesce) 1466 { 1467 if (coalesce) 1468 dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl); 1469 else 1470 dp_tx_hal_ring_access_end(soc, hal_ring_hdl); 1471 } 1472 1473 static inline void 1474 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info) 1475 { 1476 if (((i + 1) < msdu_info->num_seg)) 1477 msdu_info->skip_hp_update = 1; 1478 else 1479 msdu_info->skip_hp_update = 0; 1480 } 1481 1482 static inline void 1483 dp_flush_tcp_hp(struct dp_soc *soc, uint8_t ring_id) 1484 { 1485 hal_ring_handle_t hal_ring_hdl = 1486 dp_tx_get_hal_ring_hdl(soc, ring_id); 1487 1488 if (dp_tx_hal_ring_access_start(soc, hal_ring_hdl)) { 1489 dp_err("Fillmore: SRNG access start failed"); 1490 return; 1491 } 1492 1493 dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0); 1494 } 1495 1496 static inline void 1497 dp_tx_check_and_flush_hp(struct dp_soc *soc, 1498 QDF_STATUS status, 1499 struct dp_tx_msdu_info_s *msdu_info) 1500 { 1501 if (QDF_IS_STATUS_ERROR(status) && !msdu_info->skip_hp_update) { 1502 dp_flush_tcp_hp(soc, 1503 (msdu_info->tx_queue.ring_id & DP_TX_QUEUE_MASK)); 1504 } 1505 } 1506 #else 1507 static inline void 1508 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info) 1509 { 1510 } 1511 1512 static inline void 1513 dp_tx_check_and_flush_hp(struct dp_soc *soc, 1514 QDF_STATUS status, 1515 struct dp_tx_msdu_info_s *msdu_info) 1516 { 1517 } 1518 #endif 1519 1520 #ifdef FEATURE_RUNTIME_PM 1521 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc) 1522 { 1523 return qdf_atomic_read(&soc->rtpm_high_tput_flag); 1524 } 1525 /** 1526 * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end 1527 * @soc: Datapath soc handle 1528 * @hal_ring_hdl: HAL ring handle 1529 * @coalesce: Coalesce the current write or not 1530 * 1531 * Wrapper for HAL ring access end for data transmission for 1532 * FEATURE_RUNTIME_PM 1533 * 1534 * Returns: none 1535 */ 1536 void 1537 dp_tx_ring_access_end_wrapper(struct dp_soc *soc, 1538 hal_ring_handle_t hal_ring_hdl, 1539 int coalesce) 1540 { 1541 int ret; 1542 1543 /* 1544 * Avoid runtime get and put APIs under high throughput scenarios. 1545 */ 1546 if (dp_get_rtpm_tput_policy_requirement(soc)) { 1547 dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce); 1548 return; 1549 } 1550 1551 ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP); 1552 if (QDF_IS_STATUS_SUCCESS(ret)) { 1553 if (hif_system_pm_state_check(soc->hif_handle)) { 1554 dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl); 1555 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); 1556 hal_srng_inc_flush_cnt(hal_ring_hdl); 1557 } else { 1558 dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce); 1559 } 1560 hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP); 1561 } else { 1562 dp_runtime_get(soc); 1563 dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl); 1564 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); 1565 qdf_atomic_inc(&soc->tx_pending_rtpm); 1566 hal_srng_inc_flush_cnt(hal_ring_hdl); 1567 dp_runtime_put(soc); 1568 } 1569 } 1570 #else 1571 1572 #ifdef DP_POWER_SAVE 1573 void 1574 dp_tx_ring_access_end_wrapper(struct dp_soc *soc, 1575 hal_ring_handle_t hal_ring_hdl, 1576 int coalesce) 1577 { 1578 if (hif_system_pm_state_check(soc->hif_handle)) { 1579 dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl); 1580 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); 1581 hal_srng_inc_flush_cnt(hal_ring_hdl); 1582 } else { 1583 dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce); 1584 } 1585 } 1586 #endif 1587 1588 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc) 1589 { 1590 return 0; 1591 } 1592 #endif 1593 1594 /** 1595 * dp_tx_get_tid() - Obtain TID to be used for this frame 1596 * @vdev: DP vdev handle 1597 * @nbuf: skb 1598 * 1599 * Extract the DSCP or PCP information from frame and map into TID value. 1600 * 1601 * Return: void 1602 */ 1603 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1604 struct dp_tx_msdu_info_s *msdu_info) 1605 { 1606 uint8_t tos = 0, dscp_tid_override = 0; 1607 uint8_t *hdr_ptr, *L3datap; 1608 uint8_t is_mcast = 0; 1609 qdf_ether_header_t *eh = NULL; 1610 qdf_ethervlan_header_t *evh = NULL; 1611 uint16_t ether_type; 1612 qdf_llc_t *llcHdr; 1613 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; 1614 1615 DP_TX_TID_OVERRIDE(msdu_info, nbuf); 1616 if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 1617 eh = (qdf_ether_header_t *)nbuf->data; 1618 hdr_ptr = (uint8_t *)(eh->ether_dhost); 1619 L3datap = hdr_ptr + sizeof(qdf_ether_header_t); 1620 } else { 1621 qdf_dot3_qosframe_t *qos_wh = 1622 (qdf_dot3_qosframe_t *) nbuf->data; 1623 msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ? 1624 qos_wh->i_qos[0] & DP_QOS_TID : 0; 1625 return; 1626 } 1627 1628 is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr); 1629 ether_type = eh->ether_type; 1630 1631 llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t)); 1632 /* 1633 * Check if packet is dot3 or eth2 type. 1634 */ 1635 if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) { 1636 ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE + 1637 sizeof(*llcHdr)); 1638 1639 if (ether_type == htons(ETHERTYPE_VLAN)) { 1640 L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) + 1641 sizeof(*llcHdr); 1642 ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE 1643 + sizeof(*llcHdr) + 1644 sizeof(qdf_net_vlanhdr_t)); 1645 } else { 1646 L3datap = hdr_ptr + sizeof(qdf_ether_header_t) + 1647 sizeof(*llcHdr); 1648 } 1649 } else { 1650 if (ether_type == htons(ETHERTYPE_VLAN)) { 1651 evh = (qdf_ethervlan_header_t *) eh; 1652 ether_type = evh->ether_type; 1653 L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t); 1654 } 1655 } 1656 1657 /* 1658 * Find priority from IP TOS DSCP field 1659 */ 1660 if (qdf_nbuf_is_ipv4_pkt(nbuf)) { 1661 qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap; 1662 if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) { 1663 /* Only for unicast frames */ 1664 if (!is_mcast) { 1665 /* send it on VO queue */ 1666 msdu_info->tid = DP_VO_TID; 1667 } 1668 } else { 1669 /* 1670 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7 1671 * from TOS byte. 1672 */ 1673 tos = ip->ip_tos; 1674 dscp_tid_override = 1; 1675 1676 } 1677 } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) { 1678 /* TODO 1679 * use flowlabel 1680 *igmpmld cases to be handled in phase 2 1681 */ 1682 unsigned long ver_pri_flowlabel; 1683 unsigned long pri; 1684 ver_pri_flowlabel = *(unsigned long *) L3datap; 1685 pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >> 1686 DP_IPV6_PRIORITY_SHIFT; 1687 tos = pri; 1688 dscp_tid_override = 1; 1689 } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) 1690 msdu_info->tid = DP_VO_TID; 1691 else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) { 1692 /* Only for unicast frames */ 1693 if (!is_mcast) { 1694 /* send ucast arp on VO queue */ 1695 msdu_info->tid = DP_VO_TID; 1696 } 1697 } 1698 1699 /* 1700 * Assign all MCAST packets to BE 1701 */ 1702 if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 1703 if (is_mcast) { 1704 tos = 0; 1705 dscp_tid_override = 1; 1706 } 1707 } 1708 1709 if (dscp_tid_override == 1) { 1710 tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK; 1711 msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos]; 1712 } 1713 1714 if (msdu_info->tid >= CDP_MAX_DATA_TIDS) 1715 msdu_info->tid = CDP_MAX_DATA_TIDS - 1; 1716 1717 return; 1718 } 1719 1720 /** 1721 * dp_tx_classify_tid() - Obtain TID to be used for this frame 1722 * @vdev: DP vdev handle 1723 * @nbuf: skb 1724 * 1725 * Software based TID classification is required when more than 2 DSCP-TID 1726 * mapping tables are needed. 1727 * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2. 1728 * 1729 * Return: void 1730 */ 1731 static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1732 struct dp_tx_msdu_info_s *msdu_info) 1733 { 1734 DP_TX_TID_OVERRIDE(msdu_info, nbuf); 1735 1736 /* 1737 * skip_sw_tid_classification flag will set in below cases- 1738 * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map 1739 * 2. hlos_tid_override enabled for vdev 1740 * 3. mesh mode enabled for vdev 1741 */ 1742 if (qdf_likely(vdev->skip_sw_tid_classification)) { 1743 /* Update tid in msdu_info from skb priority */ 1744 if (qdf_unlikely(vdev->skip_sw_tid_classification 1745 & DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) { 1746 uint32_t tid = qdf_nbuf_get_priority(nbuf); 1747 1748 if (tid == DP_TX_INVALID_QOS_TAG) 1749 return; 1750 1751 msdu_info->tid = tid; 1752 return; 1753 } 1754 return; 1755 } 1756 1757 dp_tx_get_tid(vdev, nbuf, msdu_info); 1758 } 1759 1760 #ifdef FEATURE_WLAN_TDLS 1761 /** 1762 * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame 1763 * @soc: datapath SOC 1764 * @vdev: datapath vdev 1765 * @tx_desc: TX descriptor 1766 * 1767 * Return: None 1768 */ 1769 static void dp_tx_update_tdls_flags(struct dp_soc *soc, 1770 struct dp_vdev *vdev, 1771 struct dp_tx_desc_s *tx_desc) 1772 { 1773 if (vdev) { 1774 if (vdev->is_tdls_frame) { 1775 tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME; 1776 vdev->is_tdls_frame = false; 1777 } 1778 } 1779 } 1780 1781 static uint8_t dp_htt_tx_comp_get_status(struct dp_soc *soc, char *htt_desc) 1782 { 1783 uint8_t tx_status = HTT_TX_FW2WBM_TX_STATUS_MAX; 1784 1785 switch (soc->arch_id) { 1786 case CDP_ARCH_TYPE_LI: 1787 tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]); 1788 break; 1789 1790 case CDP_ARCH_TYPE_BE: 1791 tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]); 1792 break; 1793 1794 default: 1795 dp_err("Incorrect CDP_ARCH %d", soc->arch_id); 1796 QDF_BUG(0); 1797 } 1798 1799 return tx_status; 1800 } 1801 1802 /** 1803 * dp_non_std_htt_tx_comp_free_buff() - Free the non std tx packet buffer 1804 * @soc: dp_soc handle 1805 * @tx_desc: TX descriptor 1806 * @vdev: datapath vdev handle 1807 * 1808 * Return: None 1809 */ 1810 static void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc, 1811 struct dp_tx_desc_s *tx_desc) 1812 { 1813 uint8_t tx_status = 0; 1814 uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN]; 1815 1816 qdf_nbuf_t nbuf = tx_desc->nbuf; 1817 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, 1818 DP_MOD_ID_TDLS); 1819 1820 if (qdf_unlikely(!vdev)) { 1821 dp_err_rl("vdev is null!"); 1822 goto error; 1823 } 1824 1825 hal_tx_comp_get_htt_desc(&tx_desc->comp, htt_tx_status); 1826 tx_status = dp_htt_tx_comp_get_status(soc, htt_tx_status); 1827 dp_debug("vdev_id: %d tx_status: %d", tx_desc->vdev_id, tx_status); 1828 1829 if (vdev->tx_non_std_data_callback.func) { 1830 qdf_nbuf_set_next(nbuf, NULL); 1831 vdev->tx_non_std_data_callback.func( 1832 vdev->tx_non_std_data_callback.ctxt, 1833 nbuf, tx_status); 1834 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS); 1835 return; 1836 } else { 1837 dp_err_rl("callback func is null"); 1838 } 1839 1840 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS); 1841 error: 1842 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE); 1843 qdf_nbuf_free(nbuf); 1844 } 1845 1846 /** 1847 * dp_tx_msdu_single_map() - do nbuf map 1848 * @vdev: DP vdev handle 1849 * @tx_desc: DP TX descriptor pointer 1850 * @nbuf: skb pointer 1851 * 1852 * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap 1853 * operation done in other component. 1854 * 1855 * Return: QDF_STATUS 1856 */ 1857 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev, 1858 struct dp_tx_desc_s *tx_desc, 1859 qdf_nbuf_t nbuf) 1860 { 1861 if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME))) 1862 return qdf_nbuf_map_nbytes_single(vdev->osdev, 1863 nbuf, 1864 QDF_DMA_TO_DEVICE, 1865 nbuf->len); 1866 else 1867 return qdf_nbuf_map_single(vdev->osdev, nbuf, 1868 QDF_DMA_TO_DEVICE); 1869 } 1870 #else 1871 static inline void dp_tx_update_tdls_flags(struct dp_soc *soc, 1872 struct dp_vdev *vdev, 1873 struct dp_tx_desc_s *tx_desc) 1874 { 1875 } 1876 1877 static inline void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc, 1878 struct dp_tx_desc_s *tx_desc) 1879 { 1880 } 1881 1882 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev, 1883 struct dp_tx_desc_s *tx_desc, 1884 qdf_nbuf_t nbuf) 1885 { 1886 return qdf_nbuf_map_nbytes_single(vdev->osdev, 1887 nbuf, 1888 QDF_DMA_TO_DEVICE, 1889 nbuf->len); 1890 } 1891 #endif 1892 1893 static inline 1894 qdf_dma_addr_t dp_tx_nbuf_map_regular(struct dp_vdev *vdev, 1895 struct dp_tx_desc_s *tx_desc, 1896 qdf_nbuf_t nbuf) 1897 { 1898 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 1899 1900 ret = dp_tx_msdu_single_map(vdev, tx_desc, nbuf); 1901 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) 1902 return 0; 1903 1904 return qdf_nbuf_mapped_paddr_get(nbuf); 1905 } 1906 1907 static inline 1908 void dp_tx_nbuf_unmap_regular(struct dp_soc *soc, struct dp_tx_desc_s *desc) 1909 { 1910 qdf_nbuf_unmap_nbytes_single_paddr(soc->osdev, 1911 desc->nbuf, 1912 desc->dma_addr, 1913 QDF_DMA_TO_DEVICE, 1914 desc->length); 1915 } 1916 1917 #if defined(QCA_DP_TX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 1918 static inline 1919 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev, 1920 struct dp_tx_desc_s *tx_desc, 1921 qdf_nbuf_t nbuf) 1922 { 1923 if (qdf_likely(tx_desc->flags & DP_TX_DESC_FLAG_SIMPLE)) { 1924 qdf_nbuf_dma_clean_range((void *)nbuf->data, 1925 (void *)(nbuf->data + nbuf->len)); 1926 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); 1927 } else { 1928 return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf); 1929 } 1930 } 1931 1932 static inline 1933 void dp_tx_nbuf_unmap(struct dp_soc *soc, 1934 struct dp_tx_desc_s *desc) 1935 { 1936 if (qdf_unlikely(!(desc->flags & DP_TX_DESC_FLAG_SIMPLE))) 1937 return dp_tx_nbuf_unmap_regular(soc, desc); 1938 } 1939 #else 1940 static inline 1941 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev, 1942 struct dp_tx_desc_s *tx_desc, 1943 qdf_nbuf_t nbuf) 1944 { 1945 return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf); 1946 } 1947 1948 static inline 1949 void dp_tx_nbuf_unmap(struct dp_soc *soc, 1950 struct dp_tx_desc_s *desc) 1951 { 1952 return dp_tx_nbuf_unmap_regular(soc, desc); 1953 } 1954 #endif 1955 1956 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO) 1957 static inline 1958 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc) 1959 { 1960 dp_tx_nbuf_unmap(soc, desc); 1961 desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE; 1962 } 1963 1964 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc) 1965 { 1966 if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE))) 1967 dp_tx_nbuf_unmap(soc, desc); 1968 } 1969 #else 1970 static inline 1971 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc) 1972 { 1973 } 1974 1975 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc) 1976 { 1977 dp_tx_nbuf_unmap(soc, desc); 1978 } 1979 #endif 1980 1981 #ifdef MESH_MODE_SUPPORT 1982 /** 1983 * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP 1984 * @soc: datapath SOC 1985 * @vdev: datapath vdev 1986 * @tx_desc: TX descriptor 1987 * 1988 * Return: None 1989 */ 1990 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc, 1991 struct dp_vdev *vdev, 1992 struct dp_tx_desc_s *tx_desc) 1993 { 1994 if (qdf_unlikely(vdev->mesh_vdev)) 1995 tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE; 1996 } 1997 1998 /** 1999 * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer 2000 * @soc: dp_soc handle 2001 * @tx_desc: TX descriptor 2002 * @vdev: datapath vdev handle 2003 * 2004 * Return: None 2005 */ 2006 static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc, 2007 struct dp_tx_desc_s *tx_desc) 2008 { 2009 qdf_nbuf_t nbuf = tx_desc->nbuf; 2010 struct dp_vdev *vdev = NULL; 2011 2012 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) { 2013 qdf_nbuf_free(nbuf); 2014 DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1); 2015 } else { 2016 vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, 2017 DP_MOD_ID_MESH); 2018 if (vdev && vdev->osif_tx_free_ext) 2019 vdev->osif_tx_free_ext((nbuf)); 2020 else 2021 qdf_nbuf_free(nbuf); 2022 2023 if (vdev) 2024 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH); 2025 } 2026 } 2027 #else 2028 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc, 2029 struct dp_vdev *vdev, 2030 struct dp_tx_desc_s *tx_desc) 2031 { 2032 } 2033 2034 static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc, 2035 struct dp_tx_desc_s *tx_desc) 2036 { 2037 } 2038 #endif 2039 2040 /** 2041 * dp_tx_frame_is_drop() - checks if the packet is loopback 2042 * @vdev: DP vdev handle 2043 * @nbuf: skb 2044 * 2045 * Return: 1 if frame needs to be dropped else 0 2046 */ 2047 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac) 2048 { 2049 struct dp_pdev *pdev = NULL; 2050 struct dp_ast_entry *src_ast_entry = NULL; 2051 struct dp_ast_entry *dst_ast_entry = NULL; 2052 struct dp_soc *soc = NULL; 2053 2054 qdf_assert(vdev); 2055 pdev = vdev->pdev; 2056 qdf_assert(pdev); 2057 soc = pdev->soc; 2058 2059 dst_ast_entry = dp_peer_ast_hash_find_by_pdevid 2060 (soc, dstmac, vdev->pdev->pdev_id); 2061 2062 src_ast_entry = dp_peer_ast_hash_find_by_pdevid 2063 (soc, srcmac, vdev->pdev->pdev_id); 2064 if (dst_ast_entry && src_ast_entry) { 2065 if (dst_ast_entry->peer_id == 2066 src_ast_entry->peer_id) 2067 return 1; 2068 } 2069 2070 return 0; 2071 } 2072 2073 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \ 2074 defined(WLAN_MCAST_MLO) 2075 /* MLO peer id for reinject*/ 2076 #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD 2077 /* MLO vdev id inc offset */ 2078 #define DP_MLO_VDEV_ID_OFFSET 0x80 2079 2080 static inline void 2081 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) 2082 { 2083 if (!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) { 2084 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 2085 qdf_atomic_inc(&soc->num_tx_exception); 2086 } 2087 } 2088 2089 static inline void 2090 dp_tx_update_mcast_param(uint16_t peer_id, 2091 uint16_t *htt_tcl_metadata, 2092 struct dp_vdev *vdev, 2093 struct dp_tx_msdu_info_s *msdu_info) 2094 { 2095 if (peer_id == DP_MLO_MCAST_REINJECT_PEER_ID) { 2096 *htt_tcl_metadata = 0; 2097 DP_TX_TCL_METADATA_TYPE_SET( 2098 *htt_tcl_metadata, 2099 HTT_TCL_METADATA_V2_TYPE_GLOBAL_SEQ_BASED); 2100 HTT_TX_TCL_METADATA_GLBL_SEQ_NO_SET(*htt_tcl_metadata, 2101 msdu_info->gsn); 2102 2103 msdu_info->vdev_id = vdev->vdev_id + DP_MLO_VDEV_ID_OFFSET; 2104 if (qdf_unlikely(vdev->nawds_enabled)) 2105 HTT_TX_TCL_METADATA_GLBL_SEQ_HOST_INSPECTED_SET( 2106 *htt_tcl_metadata, 1); 2107 } else { 2108 msdu_info->vdev_id = vdev->vdev_id; 2109 } 2110 } 2111 #else 2112 static inline void 2113 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) 2114 { 2115 } 2116 2117 static inline void 2118 dp_tx_update_mcast_param(uint16_t peer_id, 2119 uint16_t *htt_tcl_metadata, 2120 struct dp_vdev *vdev, 2121 struct dp_tx_msdu_info_s *msdu_info) 2122 { 2123 } 2124 #endif 2125 /** 2126 * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL 2127 * @vdev: DP vdev handle 2128 * @nbuf: skb 2129 * @tid: TID from HLOS for overriding default DSCP-TID mapping 2130 * @meta_data: Metadata to the fw 2131 * @tx_q: Tx queue to be used for this Tx frame 2132 * @peer_id: peer_id of the peer in case of NAWDS frames 2133 * @tx_exc_metadata: Handle that holds exception path metadata 2134 * 2135 * Return: NULL on success, 2136 * nbuf when it fails to send 2137 */ 2138 qdf_nbuf_t 2139 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 2140 struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id, 2141 struct cdp_tx_exception_metadata *tx_exc_metadata) 2142 { 2143 struct dp_pdev *pdev = vdev->pdev; 2144 struct dp_soc *soc = pdev->soc; 2145 struct dp_tx_desc_s *tx_desc; 2146 QDF_STATUS status; 2147 struct dp_tx_queue *tx_q = &(msdu_info->tx_queue); 2148 uint16_t htt_tcl_metadata = 0; 2149 enum cdp_tx_sw_drop drop_code = TX_MAX_DROP; 2150 uint8_t tid = msdu_info->tid; 2151 struct cdp_tid_tx_stats *tid_stats = NULL; 2152 qdf_dma_addr_t paddr; 2153 2154 /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */ 2155 tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id, 2156 msdu_info, tx_exc_metadata); 2157 if (!tx_desc) { 2158 dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d", 2159 vdev, tx_q->desc_pool_id); 2160 drop_code = TX_DESC_ERR; 2161 goto fail_return; 2162 } 2163 2164 dp_tx_update_tdls_flags(soc, vdev, tx_desc); 2165 2166 if (qdf_unlikely(peer_id == DP_INVALID_PEER)) { 2167 htt_tcl_metadata = vdev->htt_tcl_metadata; 2168 DP_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1); 2169 } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) { 2170 DP_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata, 2171 DP_TCL_METADATA_TYPE_PEER_BASED); 2172 DP_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata, 2173 peer_id); 2174 dp_tx_bypass_reinjection(soc, tx_desc); 2175 } else 2176 htt_tcl_metadata = vdev->htt_tcl_metadata; 2177 2178 if (msdu_info->exception_fw) 2179 DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1); 2180 2181 dp_tx_desc_update_fast_comp_flag(soc, tx_desc, 2182 !pdev->enhanced_stats_en); 2183 2184 dp_tx_update_mesh_flags(soc, vdev, tx_desc); 2185 2186 paddr = dp_tx_nbuf_map(vdev, tx_desc, nbuf); 2187 if (!paddr) { 2188 /* Handle failure */ 2189 dp_err("qdf_nbuf_map failed"); 2190 DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1); 2191 drop_code = TX_DMA_MAP_ERR; 2192 goto release_desc; 2193 } 2194 2195 tx_desc->dma_addr = paddr; 2196 dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf, 2197 tx_desc->id, DP_TX_DESC_MAP); 2198 dp_tx_update_mcast_param(peer_id, &htt_tcl_metadata, vdev, msdu_info); 2199 /* Enqueue the Tx MSDU descriptor to HW for transmit */ 2200 status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc, 2201 htt_tcl_metadata, 2202 tx_exc_metadata, msdu_info); 2203 2204 if (status != QDF_STATUS_SUCCESS) { 2205 dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d", 2206 tx_desc, tx_q->ring_id); 2207 dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf, 2208 tx_desc->id, DP_TX_DESC_UNMAP); 2209 dp_tx_nbuf_unmap(soc, tx_desc); 2210 drop_code = TX_HW_ENQUEUE; 2211 goto release_desc; 2212 } 2213 2214 return NULL; 2215 2216 release_desc: 2217 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 2218 2219 fail_return: 2220 dp_tx_get_tid(vdev, nbuf, msdu_info); 2221 tid_stats = &pdev->stats.tid_stats. 2222 tid_tx_stats[tx_q->ring_id][tid]; 2223 tid_stats->swdrop_cnt[drop_code]++; 2224 return nbuf; 2225 } 2226 2227 /** 2228 * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor 2229 * @soc: Soc handle 2230 * @desc: software Tx descriptor to be processed 2231 * 2232 * Return: none 2233 */ 2234 void dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc) 2235 { 2236 qdf_nbuf_t nbuf = desc->nbuf; 2237 enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags); 2238 2239 /* nbuf already freed in vdev detach path */ 2240 if (!nbuf) 2241 return; 2242 2243 /* If it is TDLS mgmt, don't unmap or free the frame */ 2244 if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) 2245 return dp_non_std_htt_tx_comp_free_buff(soc, desc); 2246 2247 /* 0 : MSDU buffer, 1 : MLE */ 2248 if (desc->msdu_ext_desc) { 2249 /* TSO free */ 2250 if (hal_tx_ext_desc_get_tso_enable( 2251 desc->msdu_ext_desc->vaddr)) { 2252 dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, 2253 desc->id, DP_TX_COMP_MSDU_EXT); 2254 dp_tx_tso_seg_history_add(soc, 2255 desc->msdu_ext_desc->tso_desc, 2256 desc->nbuf, desc->id, type); 2257 /* unmap eash TSO seg before free the nbuf */ 2258 dp_tx_tso_unmap_segment(soc, 2259 desc->msdu_ext_desc->tso_desc, 2260 desc->msdu_ext_desc-> 2261 tso_num_desc); 2262 qdf_nbuf_free(nbuf); 2263 return; 2264 } 2265 2266 if (qdf_unlikely(desc->frm_type == dp_tx_frm_sg)) { 2267 void *msdu_ext_desc = desc->msdu_ext_desc->vaddr; 2268 qdf_dma_addr_t iova; 2269 uint32_t frag_len; 2270 uint32_t i; 2271 2272 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, 2273 QDF_DMA_TO_DEVICE, 2274 qdf_nbuf_headlen(nbuf)); 2275 2276 for (i = 1; i < DP_TX_MAX_NUM_FRAGS; i++) { 2277 hal_tx_ext_desc_get_frag_info(msdu_ext_desc, i, 2278 &iova, 2279 &frag_len); 2280 if (!iova || !frag_len) 2281 break; 2282 2283 qdf_mem_unmap_page(soc->osdev, iova, frag_len, 2284 QDF_DMA_TO_DEVICE); 2285 } 2286 2287 qdf_nbuf_free(nbuf); 2288 return; 2289 } 2290 } 2291 /* If it's ME frame, dont unmap the cloned nbuf's */ 2292 if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf)) 2293 goto nbuf_free; 2294 2295 dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type); 2296 dp_tx_unmap(soc, desc); 2297 2298 if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE) 2299 return dp_mesh_tx_comp_free_buff(soc, desc); 2300 nbuf_free: 2301 qdf_nbuf_free(nbuf); 2302 } 2303 2304 /** 2305 * dp_tx_sg_unmap_buf() - Unmap scatter gather fragments 2306 * @soc: DP soc handle 2307 * @nbuf: skb 2308 * @msdu_info: MSDU info 2309 * 2310 * Return: None 2311 */ 2312 static inline void 2313 dp_tx_sg_unmap_buf(struct dp_soc *soc, qdf_nbuf_t nbuf, 2314 struct dp_tx_msdu_info_s *msdu_info) 2315 { 2316 uint32_t cur_idx; 2317 struct dp_tx_seg_info_s *seg = msdu_info->u.sg_info.curr_seg; 2318 2319 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE, 2320 qdf_nbuf_headlen(nbuf)); 2321 2322 for (cur_idx = 1; cur_idx < seg->frag_cnt; cur_idx++) 2323 qdf_mem_unmap_page(soc->osdev, (qdf_dma_addr_t) 2324 (seg->frags[cur_idx].paddr_lo | ((uint64_t) 2325 seg->frags[cur_idx].paddr_hi) << 32), 2326 seg->frags[cur_idx].len, 2327 QDF_DMA_TO_DEVICE); 2328 } 2329 2330 /** 2331 * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs 2332 * @vdev: DP vdev handle 2333 * @nbuf: skb 2334 * @msdu_info: MSDU info to be setup in MSDU extension descriptor 2335 * 2336 * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL 2337 * 2338 * Return: NULL on success, 2339 * nbuf when it fails to send 2340 */ 2341 #if QDF_LOCK_STATS 2342 noinline 2343 #else 2344 #endif 2345 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 2346 struct dp_tx_msdu_info_s *msdu_info) 2347 { 2348 uint32_t i; 2349 struct dp_pdev *pdev = vdev->pdev; 2350 struct dp_soc *soc = pdev->soc; 2351 struct dp_tx_desc_s *tx_desc; 2352 bool is_cce_classified = false; 2353 QDF_STATUS status; 2354 uint16_t htt_tcl_metadata = 0; 2355 struct dp_tx_queue *tx_q = &msdu_info->tx_queue; 2356 struct cdp_tid_tx_stats *tid_stats = NULL; 2357 uint8_t prep_desc_fail = 0, hw_enq_fail = 0; 2358 2359 if (msdu_info->frm_type == dp_tx_frm_me) 2360 nbuf = msdu_info->u.sg_info.curr_seg->nbuf; 2361 2362 i = 0; 2363 /* Print statement to track i and num_seg */ 2364 /* 2365 * For each segment (maps to 1 MSDU) , prepare software and hardware 2366 * descriptors using information in msdu_info 2367 */ 2368 while (i < msdu_info->num_seg) { 2369 /* 2370 * Setup Tx descriptor for an MSDU, and MSDU extension 2371 * descriptor 2372 */ 2373 tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info, 2374 tx_q->desc_pool_id); 2375 2376 if (!tx_desc) { 2377 if (msdu_info->frm_type == dp_tx_frm_me) { 2378 prep_desc_fail++; 2379 dp_tx_me_free_buf(pdev, 2380 (void *)(msdu_info->u.sg_info 2381 .curr_seg->frags[0].vaddr)); 2382 if (prep_desc_fail == msdu_info->num_seg) { 2383 /* 2384 * Unmap is needed only if descriptor 2385 * preparation failed for all segments. 2386 */ 2387 qdf_nbuf_unmap(soc->osdev, 2388 msdu_info->u.sg_info. 2389 curr_seg->nbuf, 2390 QDF_DMA_TO_DEVICE); 2391 } 2392 /* 2393 * Free the nbuf for the current segment 2394 * and make it point to the next in the list. 2395 * For me, there are as many segments as there 2396 * are no of clients. 2397 */ 2398 qdf_nbuf_free(msdu_info->u.sg_info 2399 .curr_seg->nbuf); 2400 if (msdu_info->u.sg_info.curr_seg->next) { 2401 msdu_info->u.sg_info.curr_seg = 2402 msdu_info->u.sg_info 2403 .curr_seg->next; 2404 nbuf = msdu_info->u.sg_info 2405 .curr_seg->nbuf; 2406 } 2407 i++; 2408 continue; 2409 } 2410 2411 if (msdu_info->frm_type == dp_tx_frm_tso) { 2412 dp_tx_tso_seg_history_add( 2413 soc, 2414 msdu_info->u.tso_info.curr_seg, 2415 nbuf, 0, DP_TX_DESC_UNMAP); 2416 dp_tx_tso_unmap_segment(soc, 2417 msdu_info->u.tso_info. 2418 curr_seg, 2419 msdu_info->u.tso_info. 2420 tso_num_seg_list); 2421 2422 if (msdu_info->u.tso_info.curr_seg->next) { 2423 msdu_info->u.tso_info.curr_seg = 2424 msdu_info->u.tso_info.curr_seg->next; 2425 i++; 2426 continue; 2427 } 2428 } 2429 2430 if (msdu_info->frm_type == dp_tx_frm_sg) 2431 dp_tx_sg_unmap_buf(soc, nbuf, msdu_info); 2432 2433 goto done; 2434 } 2435 2436 if (msdu_info->frm_type == dp_tx_frm_me) { 2437 tx_desc->msdu_ext_desc->me_buffer = 2438 (struct dp_tx_me_buf_t *)msdu_info-> 2439 u.sg_info.curr_seg->frags[0].vaddr; 2440 tx_desc->flags |= DP_TX_DESC_FLAG_ME; 2441 } 2442 2443 if (is_cce_classified) 2444 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 2445 2446 htt_tcl_metadata = vdev->htt_tcl_metadata; 2447 if (msdu_info->exception_fw) { 2448 DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1); 2449 } 2450 2451 dp_tx_is_hp_update_required(i, msdu_info); 2452 2453 /* 2454 * For frames with multiple segments (TSO, ME), jump to next 2455 * segment. 2456 */ 2457 if (msdu_info->frm_type == dp_tx_frm_tso) { 2458 if (msdu_info->u.tso_info.curr_seg->next) { 2459 msdu_info->u.tso_info.curr_seg = 2460 msdu_info->u.tso_info.curr_seg->next; 2461 2462 /* 2463 * If this is a jumbo nbuf, then increment the 2464 * number of nbuf users for each additional 2465 * segment of the msdu. This will ensure that 2466 * the skb is freed only after receiving tx 2467 * completion for all segments of an nbuf 2468 */ 2469 qdf_nbuf_inc_users(nbuf); 2470 2471 /* Check with MCL if this is needed */ 2472 /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; 2473 */ 2474 } 2475 } 2476 2477 dp_tx_update_mcast_param(DP_INVALID_PEER, 2478 &htt_tcl_metadata, 2479 vdev, 2480 msdu_info); 2481 /* 2482 * Enqueue the Tx MSDU descriptor to HW for transmit 2483 */ 2484 status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc, 2485 htt_tcl_metadata, 2486 NULL, msdu_info); 2487 2488 dp_tx_check_and_flush_hp(soc, status, msdu_info); 2489 2490 if (status != QDF_STATUS_SUCCESS) { 2491 dp_info_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d", 2492 tx_desc, tx_q->ring_id); 2493 2494 dp_tx_get_tid(vdev, nbuf, msdu_info); 2495 tid_stats = &pdev->stats.tid_stats. 2496 tid_tx_stats[tx_q->ring_id][msdu_info->tid]; 2497 tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++; 2498 2499 if (msdu_info->frm_type == dp_tx_frm_me) { 2500 hw_enq_fail++; 2501 if (hw_enq_fail == msdu_info->num_seg) { 2502 /* 2503 * Unmap is needed only if enqueue 2504 * failed for all segments. 2505 */ 2506 qdf_nbuf_unmap(soc->osdev, 2507 msdu_info->u.sg_info. 2508 curr_seg->nbuf, 2509 QDF_DMA_TO_DEVICE); 2510 } 2511 /* 2512 * Free the nbuf for the current segment 2513 * and make it point to the next in the list. 2514 * For me, there are as many segments as there 2515 * are no of clients. 2516 */ 2517 qdf_nbuf_free(msdu_info->u.sg_info 2518 .curr_seg->nbuf); 2519 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 2520 if (msdu_info->u.sg_info.curr_seg->next) { 2521 msdu_info->u.sg_info.curr_seg = 2522 msdu_info->u.sg_info 2523 .curr_seg->next; 2524 nbuf = msdu_info->u.sg_info 2525 .curr_seg->nbuf; 2526 } else 2527 break; 2528 i++; 2529 continue; 2530 } 2531 2532 /* 2533 * For TSO frames, the nbuf users increment done for 2534 * the current segment has to be reverted, since the 2535 * hw enqueue for this segment failed 2536 */ 2537 if (msdu_info->frm_type == dp_tx_frm_tso && 2538 msdu_info->u.tso_info.curr_seg) { 2539 /* 2540 * unmap and free current, 2541 * retransmit remaining segments 2542 */ 2543 dp_tx_comp_free_buf(soc, tx_desc); 2544 i++; 2545 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 2546 continue; 2547 } 2548 2549 if (msdu_info->frm_type == dp_tx_frm_sg) 2550 dp_tx_sg_unmap_buf(soc, nbuf, msdu_info); 2551 2552 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 2553 goto done; 2554 } 2555 2556 /* 2557 * TODO 2558 * if tso_info structure can be modified to have curr_seg 2559 * as first element, following 2 blocks of code (for TSO and SG) 2560 * can be combined into 1 2561 */ 2562 2563 /* 2564 * For Multicast-Unicast converted packets, 2565 * each converted frame (for a client) is represented as 2566 * 1 segment 2567 */ 2568 if ((msdu_info->frm_type == dp_tx_frm_sg) || 2569 (msdu_info->frm_type == dp_tx_frm_me)) { 2570 if (msdu_info->u.sg_info.curr_seg->next) { 2571 msdu_info->u.sg_info.curr_seg = 2572 msdu_info->u.sg_info.curr_seg->next; 2573 nbuf = msdu_info->u.sg_info.curr_seg->nbuf; 2574 } else 2575 break; 2576 } 2577 i++; 2578 } 2579 2580 nbuf = NULL; 2581 2582 done: 2583 return nbuf; 2584 } 2585 2586 /** 2587 * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info 2588 * for SG frames 2589 * @vdev: DP vdev handle 2590 * @nbuf: skb 2591 * @seg_info: Pointer to Segment info Descriptor to be prepared 2592 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. 2593 * 2594 * Return: NULL on success, 2595 * nbuf when it fails to send 2596 */ 2597 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 2598 struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info) 2599 { 2600 uint32_t cur_frag, nr_frags, i; 2601 qdf_dma_addr_t paddr; 2602 struct dp_tx_sg_info_s *sg_info; 2603 2604 sg_info = &msdu_info->u.sg_info; 2605 nr_frags = qdf_nbuf_get_nr_frags(nbuf); 2606 2607 if (QDF_STATUS_SUCCESS != 2608 qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf, 2609 QDF_DMA_TO_DEVICE, 2610 qdf_nbuf_headlen(nbuf))) { 2611 dp_tx_err("dma map error"); 2612 DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1); 2613 qdf_nbuf_free(nbuf); 2614 return NULL; 2615 } 2616 2617 paddr = qdf_nbuf_mapped_paddr_get(nbuf); 2618 seg_info->frags[0].paddr_lo = paddr; 2619 seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32; 2620 seg_info->frags[0].len = qdf_nbuf_headlen(nbuf); 2621 seg_info->frags[0].vaddr = (void *) nbuf; 2622 2623 for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) { 2624 if (QDF_STATUS_SUCCESS != qdf_nbuf_frag_map(vdev->osdev, 2625 nbuf, 0, 2626 QDF_DMA_TO_DEVICE, 2627 cur_frag)) { 2628 dp_tx_err("frag dma map error"); 2629 DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1); 2630 goto map_err; 2631 } 2632 2633 paddr = qdf_nbuf_get_tx_frag_paddr(nbuf); 2634 seg_info->frags[cur_frag + 1].paddr_lo = paddr; 2635 seg_info->frags[cur_frag + 1].paddr_hi = 2636 ((uint64_t) paddr) >> 32; 2637 seg_info->frags[cur_frag + 1].len = 2638 qdf_nbuf_get_frag_size(nbuf, cur_frag); 2639 } 2640 2641 seg_info->frag_cnt = (cur_frag + 1); 2642 seg_info->total_len = qdf_nbuf_len(nbuf); 2643 seg_info->next = NULL; 2644 2645 sg_info->curr_seg = seg_info; 2646 2647 msdu_info->frm_type = dp_tx_frm_sg; 2648 msdu_info->num_seg = 1; 2649 2650 return nbuf; 2651 map_err: 2652 /* restore paddr into nbuf before calling unmap */ 2653 qdf_nbuf_mapped_paddr_set(nbuf, 2654 (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo | 2655 ((uint64_t) 2656 seg_info->frags[0].paddr_hi) << 32)); 2657 qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf, 2658 QDF_DMA_TO_DEVICE, 2659 seg_info->frags[0].len); 2660 for (i = 1; i <= cur_frag; i++) { 2661 qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t) 2662 (seg_info->frags[i].paddr_lo | ((uint64_t) 2663 seg_info->frags[i].paddr_hi) << 32), 2664 seg_info->frags[i].len, 2665 QDF_DMA_TO_DEVICE); 2666 } 2667 qdf_nbuf_free(nbuf); 2668 return NULL; 2669 } 2670 2671 /** 2672 * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info 2673 * @vdev: DP vdev handle 2674 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. 2675 * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions 2676 * 2677 * Return: NULL on failure, 2678 * nbuf when extracted successfully 2679 */ 2680 static 2681 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev, 2682 struct dp_tx_msdu_info_s *msdu_info, 2683 uint16_t ppdu_cookie) 2684 { 2685 struct htt_tx_msdu_desc_ext2_t *meta_data = 2686 (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0]; 2687 2688 qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t)); 2689 2690 HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET 2691 (msdu_info->meta_data[5], 1); 2692 HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET 2693 (msdu_info->meta_data[5], 1); 2694 HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET 2695 (msdu_info->meta_data[6], ppdu_cookie); 2696 2697 msdu_info->exception_fw = 1; 2698 msdu_info->is_tx_sniffer = 1; 2699 } 2700 2701 #ifdef MESH_MODE_SUPPORT 2702 2703 /** 2704 * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf 2705 and prepare msdu_info for mesh frames. 2706 * @vdev: DP vdev handle 2707 * @nbuf: skb 2708 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. 2709 * 2710 * Return: NULL on failure, 2711 * nbuf when extracted successfully 2712 */ 2713 static 2714 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 2715 struct dp_tx_msdu_info_s *msdu_info) 2716 { 2717 struct meta_hdr_s *mhdr; 2718 struct htt_tx_msdu_desc_ext2_t *meta_data = 2719 (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0]; 2720 2721 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf); 2722 2723 if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) { 2724 msdu_info->exception_fw = 0; 2725 goto remove_meta_hdr; 2726 } 2727 2728 msdu_info->exception_fw = 1; 2729 2730 qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t)); 2731 2732 meta_data->host_tx_desc_pool = 1; 2733 meta_data->update_peer_cache = 1; 2734 meta_data->learning_frame = 1; 2735 2736 if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) { 2737 meta_data->power = mhdr->power; 2738 2739 meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs; 2740 meta_data->nss_mask = 1 << mhdr->rate_info[0].nss; 2741 meta_data->pream_type = mhdr->rate_info[0].preamble_type; 2742 meta_data->retry_limit = mhdr->rate_info[0].max_tries; 2743 2744 meta_data->dyn_bw = 1; 2745 2746 meta_data->valid_pwr = 1; 2747 meta_data->valid_mcs_mask = 1; 2748 meta_data->valid_nss_mask = 1; 2749 meta_data->valid_preamble_type = 1; 2750 meta_data->valid_retries = 1; 2751 meta_data->valid_bw_info = 1; 2752 } 2753 2754 if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) { 2755 meta_data->encrypt_type = 0; 2756 meta_data->valid_encrypt_type = 1; 2757 meta_data->learning_frame = 0; 2758 } 2759 2760 meta_data->valid_key_flags = 1; 2761 meta_data->key_flags = (mhdr->keyix & 0x3); 2762 2763 remove_meta_hdr: 2764 if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) { 2765 dp_tx_err("qdf_nbuf_pull_head failed"); 2766 qdf_nbuf_free(nbuf); 2767 return NULL; 2768 } 2769 2770 msdu_info->tid = qdf_nbuf_get_priority(nbuf); 2771 2772 dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x" 2773 " tid %d to_fw %d", 2774 msdu_info->meta_data[0], 2775 msdu_info->meta_data[1], 2776 msdu_info->meta_data[2], 2777 msdu_info->meta_data[3], 2778 msdu_info->meta_data[4], 2779 msdu_info->meta_data[5], 2780 msdu_info->tid, msdu_info->exception_fw); 2781 2782 return nbuf; 2783 } 2784 #else 2785 static 2786 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 2787 struct dp_tx_msdu_info_s *msdu_info) 2788 { 2789 return nbuf; 2790 } 2791 2792 #endif 2793 2794 /** 2795 * dp_check_exc_metadata() - Checks if parameters are valid 2796 * @tx_exc - holds all exception path parameters 2797 * 2798 * Returns true when all the parameters are valid else false 2799 * 2800 */ 2801 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc) 2802 { 2803 bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid != 2804 HTT_INVALID_TID); 2805 bool invalid_encap_type = 2806 (tx_exc->tx_encap_type > htt_cmn_pkt_num_types && 2807 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE); 2808 bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types && 2809 tx_exc->sec_type != CDP_INVALID_SEC_TYPE); 2810 bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 && 2811 tx_exc->ppdu_cookie == 0); 2812 2813 if (tx_exc->is_intrabss_fwd) 2814 return true; 2815 2816 if (invalid_tid || invalid_encap_type || invalid_sec_type || 2817 invalid_cookie) { 2818 return false; 2819 } 2820 2821 return true; 2822 } 2823 2824 #ifdef ATH_SUPPORT_IQUE 2825 /** 2826 * dp_tx_mcast_enhance() - Multicast enhancement on TX 2827 * @vdev: vdev handle 2828 * @nbuf: skb 2829 * 2830 * Return: true on success, 2831 * false on failure 2832 */ 2833 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 2834 { 2835 qdf_ether_header_t *eh; 2836 2837 /* Mcast to Ucast Conversion*/ 2838 if (qdf_likely(!vdev->mcast_enhancement_en)) 2839 return true; 2840 2841 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 2842 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) && 2843 !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) { 2844 dp_verbose_debug("Mcast frm for ME %pK", vdev); 2845 qdf_nbuf_set_next(nbuf, NULL); 2846 2847 DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1, 2848 qdf_nbuf_len(nbuf)); 2849 if (dp_tx_prepare_send_me(vdev, nbuf) == 2850 QDF_STATUS_SUCCESS) { 2851 return false; 2852 } 2853 2854 if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) { 2855 if (dp_tx_prepare_send_igmp_me(vdev, nbuf) == 2856 QDF_STATUS_SUCCESS) { 2857 return false; 2858 } 2859 } 2860 } 2861 2862 return true; 2863 } 2864 #else 2865 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 2866 { 2867 return true; 2868 } 2869 #endif 2870 2871 /** 2872 * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame 2873 * @nbuf: qdf_nbuf_t 2874 * @vdev: struct dp_vdev * 2875 * 2876 * Allow packet for processing only if it is for peer client which is 2877 * connected with same vap. Drop packet if client is connected to 2878 * different vap. 2879 * 2880 * Return: QDF_STATUS 2881 */ 2882 static inline QDF_STATUS 2883 dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev) 2884 { 2885 struct dp_ast_entry *dst_ast_entry = NULL; 2886 qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 2887 2888 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) || 2889 DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) 2890 return QDF_STATUS_SUCCESS; 2891 2892 qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock); 2893 dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc, 2894 eh->ether_dhost, 2895 vdev->vdev_id); 2896 2897 /* If there is no ast entry, return failure */ 2898 if (qdf_unlikely(!dst_ast_entry)) { 2899 qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock); 2900 return QDF_STATUS_E_FAILURE; 2901 } 2902 qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock); 2903 2904 return QDF_STATUS_SUCCESS; 2905 } 2906 2907 /** 2908 * dp_tx_nawds_handler() - NAWDS handler 2909 * 2910 * @soc: DP soc handle 2911 * @vdev_id: id of DP vdev handle 2912 * @msdu_info: msdu_info required to create HTT metadata 2913 * @nbuf: skb 2914 * 2915 * This API transfers the multicast frames with the peer id 2916 * on NAWDS enabled peer. 2917 2918 * Return: none 2919 */ 2920 2921 static inline 2922 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev, 2923 struct dp_tx_msdu_info_s *msdu_info, 2924 qdf_nbuf_t nbuf, uint16_t sa_peer_id) 2925 { 2926 struct dp_peer *peer = NULL; 2927 qdf_nbuf_t nbuf_clone = NULL; 2928 uint16_t peer_id = DP_INVALID_PEER; 2929 struct dp_txrx_peer *txrx_peer; 2930 2931 /* This check avoids pkt forwarding which is entered 2932 * in the ast table but still doesn't have valid peerid. 2933 */ 2934 if (sa_peer_id == HTT_INVALID_PEER) 2935 return; 2936 2937 qdf_spin_lock_bh(&vdev->peer_list_lock); 2938 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 2939 txrx_peer = dp_get_txrx_peer(peer); 2940 if (!txrx_peer) 2941 continue; 2942 2943 if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) { 2944 peer_id = peer->peer_id; 2945 2946 if (!dp_peer_is_primary_link_peer(peer)) 2947 continue; 2948 2949 /* Multicast packets needs to be 2950 * dropped in case of intra bss forwarding 2951 */ 2952 if (sa_peer_id == txrx_peer->peer_id) { 2953 dp_tx_debug("multicast packet"); 2954 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 2955 tx.nawds_mcast_drop, 2956 1); 2957 continue; 2958 } 2959 2960 nbuf_clone = qdf_nbuf_clone(nbuf); 2961 2962 if (!nbuf_clone) { 2963 QDF_TRACE(QDF_MODULE_ID_DP, 2964 QDF_TRACE_LEVEL_ERROR, 2965 FL("nbuf clone failed")); 2966 break; 2967 } 2968 2969 nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone, 2970 msdu_info, peer_id, 2971 NULL); 2972 2973 if (nbuf_clone) { 2974 dp_tx_debug("pkt send failed"); 2975 qdf_nbuf_free(nbuf_clone); 2976 } else { 2977 if (peer_id != DP_INVALID_PEER) 2978 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 2979 tx.nawds_mcast, 2980 1, qdf_nbuf_len(nbuf)); 2981 } 2982 } 2983 } 2984 2985 qdf_spin_unlock_bh(&vdev->peer_list_lock); 2986 } 2987 2988 /** 2989 * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path 2990 * @soc: DP soc handle 2991 * @vdev_id: id of DP vdev handle 2992 * @nbuf: skb 2993 * @tx_exc_metadata: Handle that holds exception path meta data 2994 * 2995 * Entry point for Core Tx layer (DP_TX) invoked from 2996 * hard_start_xmit in OSIF/HDD to transmit frames through fw 2997 * 2998 * Return: NULL on success, 2999 * nbuf when it fails to send 3000 */ 3001 qdf_nbuf_t 3002 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 3003 qdf_nbuf_t nbuf, 3004 struct cdp_tx_exception_metadata *tx_exc_metadata) 3005 { 3006 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3007 qdf_ether_header_t *eh = NULL; 3008 struct dp_tx_msdu_info_s msdu_info; 3009 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 3010 DP_MOD_ID_TX_EXCEPTION); 3011 3012 if (qdf_unlikely(!vdev)) 3013 goto fail; 3014 3015 qdf_mem_zero(&msdu_info, sizeof(msdu_info)); 3016 3017 if (!tx_exc_metadata) 3018 goto fail; 3019 3020 msdu_info.tid = tx_exc_metadata->tid; 3021 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 3022 dp_verbose_debug("skb "QDF_MAC_ADDR_FMT, 3023 QDF_MAC_ADDR_REF(nbuf->data)); 3024 3025 DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf)); 3026 3027 if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) { 3028 dp_tx_err("Invalid parameters in exception path"); 3029 goto fail; 3030 } 3031 3032 /* Basic sanity checks for unsupported packets */ 3033 3034 /* MESH mode */ 3035 if (qdf_unlikely(vdev->mesh_vdev)) { 3036 dp_tx_err("Mesh mode is not supported in exception path"); 3037 goto fail; 3038 } 3039 3040 /* 3041 * Classify the frame and call corresponding 3042 * "prepare" function which extracts the segment (TSO) 3043 * and fragmentation information (for TSO , SG, ME, or Raw) 3044 * into MSDU_INFO structure which is later used to fill 3045 * SW and HW descriptors. 3046 */ 3047 if (qdf_nbuf_is_tso(nbuf)) { 3048 dp_verbose_debug("TSO frame %pK", vdev); 3049 DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1, 3050 qdf_nbuf_len(nbuf)); 3051 3052 if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) { 3053 DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1, 3054 qdf_nbuf_len(nbuf)); 3055 goto fail; 3056 } 3057 3058 goto send_multiple; 3059 } 3060 3061 /* SG */ 3062 if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) { 3063 struct dp_tx_seg_info_s seg_info = {0}; 3064 3065 nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info); 3066 if (!nbuf) 3067 goto fail; 3068 3069 dp_verbose_debug("non-TSO SG frame %pK", vdev); 3070 3071 DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1, 3072 qdf_nbuf_len(nbuf)); 3073 3074 goto send_multiple; 3075 } 3076 3077 if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) { 3078 DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1, 3079 qdf_nbuf_len(nbuf)); 3080 3081 dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info, 3082 tx_exc_metadata->ppdu_cookie); 3083 } 3084 3085 /* 3086 * Get HW Queue to use for this frame. 3087 * TCL supports upto 4 DMA rings, out of which 3 rings are 3088 * dedicated for data and 1 for command. 3089 * "queue_id" maps to one hardware ring. 3090 * With each ring, we also associate a unique Tx descriptor pool 3091 * to minimize lock contention for these resources. 3092 */ 3093 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 3094 3095 if (qdf_likely(tx_exc_metadata->is_intrabss_fwd)) { 3096 if (qdf_unlikely(vdev->nawds_enabled)) { 3097 /* 3098 * This is a multicast packet 3099 */ 3100 dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf, 3101 tx_exc_metadata->peer_id); 3102 DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast, 3103 1, qdf_nbuf_len(nbuf)); 3104 } 3105 3106 nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, 3107 DP_INVALID_PEER, NULL); 3108 } else { 3109 /* 3110 * Check exception descriptors 3111 */ 3112 if (dp_tx_exception_limit_check(vdev)) 3113 goto fail; 3114 3115 /* Single linear frame */ 3116 /* 3117 * If nbuf is a simple linear frame, use send_single function to 3118 * prepare direct-buffer type TCL descriptor and enqueue to TCL 3119 * SRNG. There is no need to setup a MSDU extension descriptor. 3120 */ 3121 nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, 3122 tx_exc_metadata->peer_id, 3123 tx_exc_metadata); 3124 } 3125 3126 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION); 3127 return nbuf; 3128 3129 send_multiple: 3130 nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info); 3131 3132 fail: 3133 if (vdev) 3134 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION); 3135 dp_verbose_debug("pkt send failed"); 3136 return nbuf; 3137 } 3138 3139 /** 3140 * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP 3141 * in exception path in special case to avoid regular exception path chk. 3142 * @soc: DP soc handle 3143 * @vdev_id: id of DP vdev handle 3144 * @nbuf: skb 3145 * @tx_exc_metadata: Handle that holds exception path meta data 3146 * 3147 * Entry point for Core Tx layer (DP_TX) invoked from 3148 * hard_start_xmit in OSIF/HDD to transmit frames through fw 3149 * 3150 * Return: NULL on success, 3151 * nbuf when it fails to send 3152 */ 3153 qdf_nbuf_t 3154 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl, 3155 uint8_t vdev_id, qdf_nbuf_t nbuf, 3156 struct cdp_tx_exception_metadata *tx_exc_metadata) 3157 { 3158 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3159 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 3160 DP_MOD_ID_TX_EXCEPTION); 3161 3162 if (qdf_unlikely(!vdev)) 3163 goto fail; 3164 3165 if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev) 3166 == QDF_STATUS_E_FAILURE)) { 3167 DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1); 3168 goto fail; 3169 } 3170 3171 /* Unref count as it will agin be taken inside dp_tx_exception */ 3172 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION); 3173 3174 return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata); 3175 3176 fail: 3177 if (vdev) 3178 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION); 3179 dp_verbose_debug("pkt send failed"); 3180 return nbuf; 3181 } 3182 3183 /** 3184 * dp_tx_send_mesh() - Transmit mesh frame on a given VAP 3185 * @soc: DP soc handle 3186 * @vdev_id: DP vdev handle 3187 * @nbuf: skb 3188 * 3189 * Entry point for Core Tx layer (DP_TX) invoked from 3190 * hard_start_xmit in OSIF/HDD 3191 * 3192 * Return: NULL on success, 3193 * nbuf when it fails to send 3194 */ 3195 #ifdef MESH_MODE_SUPPORT 3196 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 3197 qdf_nbuf_t nbuf) 3198 { 3199 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3200 struct meta_hdr_s *mhdr; 3201 qdf_nbuf_t nbuf_mesh = NULL; 3202 qdf_nbuf_t nbuf_clone = NULL; 3203 struct dp_vdev *vdev; 3204 uint8_t no_enc_frame = 0; 3205 3206 nbuf_mesh = qdf_nbuf_unshare(nbuf); 3207 if (!nbuf_mesh) { 3208 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3209 "qdf_nbuf_unshare failed"); 3210 return nbuf; 3211 } 3212 3213 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH); 3214 if (!vdev) { 3215 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3216 "vdev is NULL for vdev_id %d", vdev_id); 3217 return nbuf; 3218 } 3219 3220 nbuf = nbuf_mesh; 3221 3222 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf); 3223 3224 if ((vdev->sec_type != cdp_sec_type_none) && 3225 (mhdr->flags & METAHDR_FLAG_NOENCRYPT)) 3226 no_enc_frame = 1; 3227 3228 if (mhdr->flags & METAHDR_FLAG_NOQOS) 3229 qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST); 3230 3231 if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) && 3232 !no_enc_frame) { 3233 nbuf_clone = qdf_nbuf_clone(nbuf); 3234 if (!nbuf_clone) { 3235 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3236 "qdf_nbuf_clone failed"); 3237 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH); 3238 return nbuf; 3239 } 3240 qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO); 3241 } 3242 3243 if (nbuf_clone) { 3244 if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) { 3245 DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1); 3246 } else { 3247 qdf_nbuf_free(nbuf_clone); 3248 } 3249 } 3250 3251 if (no_enc_frame) 3252 qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO); 3253 else 3254 qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID); 3255 3256 nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf); 3257 if ((!nbuf) && no_enc_frame) { 3258 DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1); 3259 } 3260 3261 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH); 3262 return nbuf; 3263 } 3264 3265 #else 3266 3267 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id, 3268 qdf_nbuf_t nbuf) 3269 { 3270 return dp_tx_send(soc, vdev_id, nbuf); 3271 } 3272 3273 #endif 3274 3275 #ifdef QCA_DP_TX_NBUF_AND_NBUF_DATA_PREFETCH 3276 static inline 3277 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf) 3278 { 3279 if (nbuf) { 3280 qdf_prefetch(&nbuf->len); 3281 qdf_prefetch(&nbuf->data); 3282 } 3283 } 3284 #else 3285 static inline 3286 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf) 3287 { 3288 } 3289 #endif 3290 3291 /** 3292 * dp_tx_send() - Transmit a frame on a given VAP 3293 * @soc: DP soc handle 3294 * @vdev_id: id of DP vdev handle 3295 * @nbuf: skb 3296 * 3297 * Entry point for Core Tx layer (DP_TX) invoked from 3298 * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding 3299 * cases 3300 * 3301 * Return: NULL on success, 3302 * nbuf when it fails to send 3303 */ 3304 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 3305 qdf_nbuf_t nbuf) 3306 { 3307 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3308 uint16_t peer_id = HTT_INVALID_PEER; 3309 /* 3310 * doing a memzero is causing additional function call overhead 3311 * so doing static stack clearing 3312 */ 3313 struct dp_tx_msdu_info_s msdu_info = {0}; 3314 struct dp_vdev *vdev = NULL; 3315 3316 if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT)) 3317 return nbuf; 3318 3319 /* 3320 * dp_vdev_get_ref_by_id does does a atomic operation avoid using 3321 * this in per packet path. 3322 * 3323 * As in this path vdev memory is already protected with netdev 3324 * tx lock 3325 */ 3326 vdev = soc->vdev_id_map[vdev_id]; 3327 if (qdf_unlikely(!vdev)) 3328 return nbuf; 3329 3330 /* 3331 * Set Default Host TID value to invalid TID 3332 * (TID override disabled) 3333 */ 3334 msdu_info.tid = HTT_TX_EXT_TID_INVALID; 3335 DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_headlen(nbuf)); 3336 3337 if (qdf_unlikely(vdev->mesh_vdev)) { 3338 qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf, 3339 &msdu_info); 3340 if (!nbuf_mesh) { 3341 dp_verbose_debug("Extracting mesh metadata failed"); 3342 return nbuf; 3343 } 3344 nbuf = nbuf_mesh; 3345 } 3346 3347 /* 3348 * Get HW Queue to use for this frame. 3349 * TCL supports upto 4 DMA rings, out of which 3 rings are 3350 * dedicated for data and 1 for command. 3351 * "queue_id" maps to one hardware ring. 3352 * With each ring, we also associate a unique Tx descriptor pool 3353 * to minimize lock contention for these resources. 3354 */ 3355 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 3356 3357 /* 3358 * TCL H/W supports 2 DSCP-TID mapping tables. 3359 * Table 1 - Default DSCP-TID mapping table 3360 * Table 2 - 1 DSCP-TID override table 3361 * 3362 * If we need a different DSCP-TID mapping for this vap, 3363 * call tid_classify to extract DSCP/ToS from frame and 3364 * map to a TID and store in msdu_info. This is later used 3365 * to fill in TCL Input descriptor (per-packet TID override). 3366 */ 3367 dp_tx_classify_tid(vdev, nbuf, &msdu_info); 3368 3369 /* 3370 * Classify the frame and call corresponding 3371 * "prepare" function which extracts the segment (TSO) 3372 * and fragmentation information (for TSO , SG, ME, or Raw) 3373 * into MSDU_INFO structure which is later used to fill 3374 * SW and HW descriptors. 3375 */ 3376 if (qdf_nbuf_is_tso(nbuf)) { 3377 dp_verbose_debug("TSO frame %pK", vdev); 3378 DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1, 3379 qdf_nbuf_len(nbuf)); 3380 3381 if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) { 3382 DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1, 3383 qdf_nbuf_len(nbuf)); 3384 return nbuf; 3385 } 3386 3387 goto send_multiple; 3388 } 3389 3390 /* SG */ 3391 if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) { 3392 if (qdf_nbuf_get_nr_frags(nbuf) > DP_TX_MAX_NUM_FRAGS - 1) { 3393 if (qdf_unlikely(qdf_nbuf_linearize(nbuf))) 3394 return nbuf; 3395 } else { 3396 struct dp_tx_seg_info_s seg_info = {0}; 3397 3398 nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, 3399 &msdu_info); 3400 if (!nbuf) 3401 return NULL; 3402 3403 dp_verbose_debug("non-TSO SG frame %pK", vdev); 3404 3405 DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1, 3406 qdf_nbuf_len(nbuf)); 3407 3408 goto send_multiple; 3409 } 3410 } 3411 3412 if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf))) 3413 return NULL; 3414 3415 /* RAW */ 3416 if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) { 3417 struct dp_tx_seg_info_s seg_info = {0}; 3418 3419 nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info); 3420 if (!nbuf) 3421 return NULL; 3422 3423 dp_verbose_debug("Raw frame %pK", vdev); 3424 3425 goto send_multiple; 3426 3427 } 3428 3429 if (qdf_unlikely(vdev->nawds_enabled)) { 3430 qdf_ether_header_t *eh = (qdf_ether_header_t *) 3431 qdf_nbuf_data(nbuf); 3432 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) { 3433 uint16_t sa_peer_id = DP_INVALID_PEER; 3434 3435 if (!soc->ast_offload_support) { 3436 struct dp_ast_entry *ast_entry = NULL; 3437 3438 qdf_spin_lock_bh(&soc->ast_lock); 3439 ast_entry = dp_peer_ast_hash_find_by_pdevid 3440 (soc, 3441 (uint8_t *)(eh->ether_shost), 3442 vdev->pdev->pdev_id); 3443 if (ast_entry) 3444 sa_peer_id = ast_entry->peer_id; 3445 qdf_spin_unlock_bh(&soc->ast_lock); 3446 } 3447 3448 dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf, 3449 sa_peer_id); 3450 } 3451 peer_id = DP_INVALID_PEER; 3452 DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast, 3453 1, qdf_nbuf_len(nbuf)); 3454 } 3455 3456 /* Single linear frame */ 3457 /* 3458 * If nbuf is a simple linear frame, use send_single function to 3459 * prepare direct-buffer type TCL descriptor and enqueue to TCL 3460 * SRNG. There is no need to setup a MSDU extension descriptor. 3461 */ 3462 dp_tx_prefetch_nbuf_data(nbuf); 3463 nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL); 3464 3465 return nbuf; 3466 3467 send_multiple: 3468 nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info); 3469 3470 if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw)) 3471 dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf); 3472 3473 return nbuf; 3474 } 3475 3476 /** 3477 * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special 3478 * case to vaoid check in perpkt path. 3479 * @soc: DP soc handle 3480 * @vdev_id: id of DP vdev handle 3481 * @nbuf: skb 3482 * 3483 * Entry point for Core Tx layer (DP_TX) invoked from 3484 * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send 3485 * with special condition to avoid per pkt check in dp_tx_send 3486 * 3487 * Return: NULL on success, 3488 * nbuf when it fails to send 3489 */ 3490 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl, 3491 uint8_t vdev_id, qdf_nbuf_t nbuf) 3492 { 3493 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3494 struct dp_vdev *vdev = NULL; 3495 3496 if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT)) 3497 return nbuf; 3498 3499 /* 3500 * dp_vdev_get_ref_by_id does does a atomic operation avoid using 3501 * this in per packet path. 3502 * 3503 * As in this path vdev memory is already protected with netdev 3504 * tx lock 3505 */ 3506 vdev = soc->vdev_id_map[vdev_id]; 3507 if (qdf_unlikely(!vdev)) 3508 return nbuf; 3509 3510 if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev) 3511 == QDF_STATUS_E_FAILURE)) { 3512 DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1); 3513 return nbuf; 3514 } 3515 3516 return dp_tx_send(soc_hdl, vdev_id, nbuf); 3517 } 3518 3519 #ifdef UMAC_SUPPORT_PROXY_ARP 3520 /** 3521 * dp_tx_proxy_arp() - Tx proxy arp handler 3522 * @vdev: datapath vdev handle 3523 * @buf: sk buffer 3524 * 3525 * Return: status 3526 */ 3527 static inline 3528 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 3529 { 3530 if (vdev->osif_proxy_arp) 3531 return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf); 3532 3533 /* 3534 * when UMAC_SUPPORT_PROXY_ARP is defined, we expect 3535 * osif_proxy_arp has a valid function pointer assigned 3536 * to it 3537 */ 3538 dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n"); 3539 3540 return QDF_STATUS_NOT_INITIALIZED; 3541 } 3542 #else 3543 /** 3544 * dp_tx_proxy_arp() - Tx proxy arp handler 3545 * @vdev: datapath vdev handle 3546 * @buf: sk buffer 3547 * 3548 * This function always return 0 when UMAC_SUPPORT_PROXY_ARP 3549 * is not defined. 3550 * 3551 * Return: status 3552 */ 3553 static inline 3554 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 3555 { 3556 return QDF_STATUS_SUCCESS; 3557 } 3558 #endif 3559 3560 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 3561 #ifdef WLAN_MCAST_MLO 3562 static bool 3563 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev, 3564 struct dp_tx_desc_s *tx_desc, 3565 qdf_nbuf_t nbuf, 3566 uint8_t reinject_reason) 3567 { 3568 if (reinject_reason == HTT_TX_FW2WBM_REINJECT_REASON_MLO_MCAST) { 3569 if (soc->arch_ops.dp_tx_mcast_handler) 3570 soc->arch_ops.dp_tx_mcast_handler(soc, vdev, nbuf); 3571 3572 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 3573 return true; 3574 } 3575 3576 return false; 3577 } 3578 #else /* WLAN_MCAST_MLO */ 3579 static inline bool 3580 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev, 3581 struct dp_tx_desc_s *tx_desc, 3582 qdf_nbuf_t nbuf, 3583 uint8_t reinject_reason) 3584 { 3585 return false; 3586 } 3587 #endif /* WLAN_MCAST_MLO */ 3588 #else 3589 static inline bool 3590 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev, 3591 struct dp_tx_desc_s *tx_desc, 3592 qdf_nbuf_t nbuf, 3593 uint8_t reinject_reason) 3594 { 3595 return false; 3596 } 3597 #endif 3598 3599 /** 3600 * dp_tx_reinject_handler() - Tx Reinject Handler 3601 * @soc: datapath soc handle 3602 * @vdev: datapath vdev handle 3603 * @tx_desc: software descriptor head pointer 3604 * @status : Tx completion status from HTT descriptor 3605 * @reinject_reason : reinject reason from HTT descriptor 3606 * 3607 * This function reinjects frames back to Target. 3608 * Todo - Host queue needs to be added 3609 * 3610 * Return: none 3611 */ 3612 void dp_tx_reinject_handler(struct dp_soc *soc, 3613 struct dp_vdev *vdev, 3614 struct dp_tx_desc_s *tx_desc, 3615 uint8_t *status, 3616 uint8_t reinject_reason) 3617 { 3618 struct dp_peer *peer = NULL; 3619 uint32_t peer_id = HTT_INVALID_PEER; 3620 qdf_nbuf_t nbuf = tx_desc->nbuf; 3621 qdf_nbuf_t nbuf_copy = NULL; 3622 struct dp_tx_msdu_info_s msdu_info; 3623 #ifdef WDS_VENDOR_EXTENSION 3624 int is_mcast = 0, is_ucast = 0; 3625 int num_peers_3addr = 0; 3626 qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf)); 3627 struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf)); 3628 #endif 3629 struct dp_txrx_peer *txrx_peer; 3630 3631 qdf_assert(vdev); 3632 3633 dp_tx_debug("Tx reinject path"); 3634 3635 DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1, 3636 qdf_nbuf_len(tx_desc->nbuf)); 3637 3638 if (dp_tx_reinject_mlo_hdl(soc, vdev, tx_desc, nbuf, reinject_reason)) 3639 return; 3640 3641 #ifdef WDS_VENDOR_EXTENSION 3642 if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 3643 is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0; 3644 } else { 3645 is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0; 3646 } 3647 is_ucast = !is_mcast; 3648 3649 qdf_spin_lock_bh(&vdev->peer_list_lock); 3650 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 3651 txrx_peer = dp_get_txrx_peer(peer); 3652 3653 if (!txrx_peer || txrx_peer->bss_peer) 3654 continue; 3655 3656 /* Detect wds peers that use 3-addr framing for mcast. 3657 * if there are any, the bss_peer is used to send the 3658 * the mcast frame using 3-addr format. all wds enabled 3659 * peers that use 4-addr framing for mcast frames will 3660 * be duplicated and sent as 4-addr frames below. 3661 */ 3662 if (!txrx_peer->wds_enabled || 3663 !txrx_peer->wds_ecm.wds_tx_mcast_4addr) { 3664 num_peers_3addr = 1; 3665 break; 3666 } 3667 } 3668 qdf_spin_unlock_bh(&vdev->peer_list_lock); 3669 #endif 3670 3671 if (qdf_unlikely(vdev->mesh_vdev)) { 3672 DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf); 3673 } else { 3674 qdf_spin_lock_bh(&vdev->peer_list_lock); 3675 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 3676 txrx_peer = dp_get_txrx_peer(peer); 3677 if (!txrx_peer) 3678 continue; 3679 3680 if ((txrx_peer->peer_id != HTT_INVALID_PEER) && 3681 #ifdef WDS_VENDOR_EXTENSION 3682 /* 3683 * . if 3-addr STA, then send on BSS Peer 3684 * . if Peer WDS enabled and accept 4-addr mcast, 3685 * send mcast on that peer only 3686 * . if Peer WDS enabled and accept 4-addr ucast, 3687 * send ucast on that peer only 3688 */ 3689 ((txrx_peer->bss_peer && num_peers_3addr && is_mcast) || 3690 (txrx_peer->wds_enabled && 3691 ((is_mcast && txrx_peer->wds_ecm.wds_tx_mcast_4addr) || 3692 (is_ucast && 3693 txrx_peer->wds_ecm.wds_tx_ucast_4addr))))) { 3694 #else 3695 (txrx_peer->bss_peer && 3696 (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) { 3697 #endif 3698 peer_id = DP_INVALID_PEER; 3699 3700 nbuf_copy = qdf_nbuf_copy(nbuf); 3701 3702 if (!nbuf_copy) { 3703 dp_tx_debug("nbuf copy failed"); 3704 break; 3705 } 3706 qdf_mem_zero(&msdu_info, sizeof(msdu_info)); 3707 dp_tx_get_queue(vdev, nbuf, 3708 &msdu_info.tx_queue); 3709 3710 nbuf_copy = dp_tx_send_msdu_single(vdev, 3711 nbuf_copy, 3712 &msdu_info, 3713 peer_id, 3714 NULL); 3715 3716 if (nbuf_copy) { 3717 dp_tx_debug("pkt send failed"); 3718 qdf_nbuf_free(nbuf_copy); 3719 } 3720 } 3721 } 3722 qdf_spin_unlock_bh(&vdev->peer_list_lock); 3723 } 3724 3725 qdf_nbuf_free(nbuf); 3726 3727 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 3728 } 3729 3730 /** 3731 * dp_tx_inspect_handler() - Tx Inspect Handler 3732 * @soc: datapath soc handle 3733 * @vdev: datapath vdev handle 3734 * @tx_desc: software descriptor head pointer 3735 * @status : Tx completion status from HTT descriptor 3736 * 3737 * Handles Tx frames sent back to Host for inspection 3738 * (ProxyARP) 3739 * 3740 * Return: none 3741 */ 3742 void dp_tx_inspect_handler(struct dp_soc *soc, 3743 struct dp_vdev *vdev, 3744 struct dp_tx_desc_s *tx_desc, 3745 uint8_t *status) 3746 { 3747 3748 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3749 "%s Tx inspect path", 3750 __func__); 3751 3752 DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1, 3753 qdf_nbuf_len(tx_desc->nbuf)); 3754 3755 DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf); 3756 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 3757 } 3758 3759 #ifdef MESH_MODE_SUPPORT 3760 /** 3761 * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats 3762 * in mesh meta header 3763 * @tx_desc: software descriptor head pointer 3764 * @ts: pointer to tx completion stats 3765 * Return: none 3766 */ 3767 static 3768 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc, 3769 struct hal_tx_completion_status *ts) 3770 { 3771 qdf_nbuf_t netbuf = tx_desc->nbuf; 3772 3773 if (!tx_desc->msdu_ext_desc) { 3774 if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) { 3775 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3776 "netbuf %pK offset %d", 3777 netbuf, tx_desc->pkt_offset); 3778 return; 3779 } 3780 } 3781 } 3782 3783 #else 3784 static 3785 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc, 3786 struct hal_tx_completion_status *ts) 3787 { 3788 } 3789 3790 #endif 3791 3792 #ifdef CONFIG_SAWF 3793 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc, 3794 struct dp_vdev *vdev, 3795 struct dp_txrx_peer *txrx_peer, 3796 struct dp_tx_desc_s *tx_desc, 3797 struct hal_tx_completion_status *ts, 3798 uint8_t tid) 3799 { 3800 dp_sawf_tx_compl_update_peer_stats(soc, vdev, txrx_peer, tx_desc, 3801 ts, tid); 3802 } 3803 3804 #else 3805 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc, 3806 struct dp_vdev *vdev, 3807 struct dp_txrx_peer *txrx_peer, 3808 struct dp_tx_desc_s *tx_desc, 3809 struct hal_tx_completion_status *ts, 3810 uint8_t tid) 3811 { 3812 } 3813 3814 #endif 3815 3816 #ifdef QCA_PEER_EXT_STATS 3817 /* 3818 * dp_tx_compute_tid_delay() - Compute per TID delay 3819 * @stats: Per TID delay stats 3820 * @tx_desc: Software Tx descriptor 3821 * 3822 * Compute the software enqueue and hw enqueue delays and 3823 * update the respective histograms 3824 * 3825 * Return: void 3826 */ 3827 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats, 3828 struct dp_tx_desc_s *tx_desc) 3829 { 3830 struct cdp_delay_tx_stats *tx_delay = &stats->tx_delay; 3831 int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue; 3832 uint32_t sw_enqueue_delay, fwhw_transmit_delay; 3833 3834 current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get()); 3835 timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf); 3836 timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp); 3837 sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress); 3838 fwhw_transmit_delay = (uint32_t)(current_timestamp - 3839 timestamp_hw_enqueue); 3840 3841 /* 3842 * Update the Tx software enqueue delay and HW enque-Completion delay. 3843 */ 3844 dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay); 3845 dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay); 3846 } 3847 3848 /* 3849 * dp_tx_update_peer_delay_stats() - Update the peer delay stats 3850 * @txrx_peer: DP peer context 3851 * @tx_desc: Tx software descriptor 3852 * @tid: Transmission ID 3853 * @ring_id: Rx CPU context ID/CPU_ID 3854 * 3855 * Update the peer extended stats. These are enhanced other 3856 * delay stats per msdu level. 3857 * 3858 * Return: void 3859 */ 3860 static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer, 3861 struct dp_tx_desc_s *tx_desc, 3862 uint8_t tid, uint8_t ring_id) 3863 { 3864 struct dp_pdev *pdev = txrx_peer->vdev->pdev; 3865 struct dp_soc *soc = NULL; 3866 struct dp_peer_delay_stats *delay_stats = NULL; 3867 3868 soc = pdev->soc; 3869 if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))) 3870 return; 3871 3872 delay_stats = txrx_peer->delay_stats; 3873 3874 qdf_assert(delay_stats); 3875 qdf_assert(ring < CDP_MAX_TXRX_CTX); 3876 3877 /* 3878 * For non-TID packets use the TID 9 3879 */ 3880 if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) 3881 tid = CDP_MAX_DATA_TIDS - 1; 3882 3883 dp_tx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id], 3884 tx_desc); 3885 } 3886 #else 3887 static inline void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer, 3888 struct dp_tx_desc_s *tx_desc, 3889 uint8_t tid, uint8_t ring_id) 3890 { 3891 } 3892 #endif 3893 3894 #ifdef HW_TX_DELAY_STATS_ENABLE 3895 /** 3896 * dp_update_tx_delay_stats() - update the delay stats 3897 * @vdev: vdev handle 3898 * @delay: delay in ms or us based on the flag delay_in_us 3899 * @tid: tid value 3900 * @mode: type of tx delay mode 3901 * @ring id: ring number 3902 * @delay_in_us: flag to indicate whether the delay is in ms or us 3903 * 3904 * Return: none 3905 */ 3906 static inline 3907 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid, 3908 uint8_t mode, uint8_t ring_id, bool delay_in_us) 3909 { 3910 struct cdp_tid_tx_stats *tstats = 3911 &vdev->stats.tid_tx_stats[ring_id][tid]; 3912 3913 dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id, 3914 delay_in_us); 3915 } 3916 #else 3917 static inline 3918 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid, 3919 uint8_t mode, uint8_t ring_id, bool delay_in_us) 3920 { 3921 struct cdp_tid_tx_stats *tstats = 3922 &vdev->pdev->stats.tid_stats.tid_tx_stats[ring_id][tid]; 3923 3924 dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id, 3925 delay_in_us); 3926 } 3927 #endif 3928 3929 /** 3930 * dp_tx_compute_delay() - Compute and fill in all timestamps 3931 * to pass in correct fields 3932 * 3933 * @vdev: pdev handle 3934 * @tx_desc: tx descriptor 3935 * @tid: tid value 3936 * @ring_id: TCL or WBM ring number for transmit path 3937 * Return: none 3938 */ 3939 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc, 3940 uint8_t tid, uint8_t ring_id) 3941 { 3942 int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue; 3943 uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay; 3944 uint32_t fwhw_transmit_delay_us; 3945 3946 if (qdf_likely(!vdev->pdev->delay_stats_flag) && 3947 qdf_likely(!dp_is_vdev_tx_delay_stats_enabled(vdev))) 3948 return; 3949 3950 if (dp_is_vdev_tx_delay_stats_enabled(vdev)) { 3951 fwhw_transmit_delay_us = 3952 qdf_ktime_to_us(qdf_ktime_real_get()) - 3953 qdf_ktime_to_us(tx_desc->timestamp); 3954 3955 /* 3956 * Delay between packet enqueued to HW and Tx completion in us 3957 */ 3958 dp_update_tx_delay_stats(vdev, fwhw_transmit_delay_us, tid, 3959 CDP_DELAY_STATS_FW_HW_TRANSMIT, 3960 ring_id, true); 3961 /* 3962 * For MCL, only enqueue to completion delay is required 3963 * so return if the vdev flag is enabled. 3964 */ 3965 return; 3966 } 3967 3968 current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get()); 3969 timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp); 3970 fwhw_transmit_delay = (uint32_t)(current_timestamp - 3971 timestamp_hw_enqueue); 3972 3973 /* 3974 * Delay between packet enqueued to HW and Tx completion in ms 3975 */ 3976 dp_update_tx_delay_stats(vdev, fwhw_transmit_delay, tid, 3977 CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id, 3978 false); 3979 3980 timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf); 3981 sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress); 3982 interframe_delay = (uint32_t)(timestamp_ingress - 3983 vdev->prev_tx_enq_tstamp); 3984 3985 /* 3986 * Delay in software enqueue 3987 */ 3988 dp_update_tx_delay_stats(vdev, sw_enqueue_delay, tid, 3989 CDP_DELAY_STATS_SW_ENQ, ring_id, 3990 false); 3991 3992 /* 3993 * Update interframe delay stats calculated at hardstart receive point. 3994 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so 3995 * interframe delay will not be calculate correctly for 1st frame. 3996 * On the other side, this will help in avoiding extra per packet check 3997 * of !vdev->prev_tx_enq_tstamp. 3998 */ 3999 dp_update_tx_delay_stats(vdev, interframe_delay, tid, 4000 CDP_DELAY_STATS_TX_INTERFRAME, ring_id, 4001 false); 4002 vdev->prev_tx_enq_tstamp = timestamp_ingress; 4003 } 4004 4005 #ifdef DISABLE_DP_STATS 4006 static 4007 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, 4008 struct dp_txrx_peer *txrx_peer) 4009 { 4010 } 4011 #else 4012 static inline void 4013 dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer) 4014 { 4015 enum qdf_proto_subtype subtype = QDF_PROTO_INVALID; 4016 4017 DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype)); 4018 if (subtype != QDF_PROTO_INVALID) 4019 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.no_ack_count[subtype], 4020 1); 4021 } 4022 #endif 4023 4024 #ifndef QCA_ENHANCED_STATS_SUPPORT 4025 /** 4026 * dp_tx_update_peer_extd_stats()- Update Tx extended path stats for peer 4027 * 4028 * @ts: Tx compltion status 4029 * @txrx_peer: datapath txrx_peer handle 4030 * 4031 * Return: void 4032 */ 4033 static inline void 4034 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts, 4035 struct dp_txrx_peer *txrx_peer) 4036 { 4037 uint8_t mcs, pkt_type, dst_mcs_idx; 4038 uint8_t retry_threshold = txrx_peer->mpdu_retry_threshold; 4039 4040 mcs = ts->mcs; 4041 pkt_type = ts->pkt_type; 4042 /* do HW to SW pkt type conversion */ 4043 pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX : 4044 hal_2_dp_pkt_type_map[pkt_type]); 4045 4046 dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs); 4047 if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx) 4048 DP_PEER_EXTD_STATS_INC(txrx_peer, 4049 tx.pkt_type[pkt_type].mcs_count[dst_mcs_idx], 4050 1); 4051 4052 DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1); 4053 DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1); 4054 DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi); 4055 DP_PEER_EXTD_STATS_INC(txrx_peer, 4056 tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1); 4057 DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc); 4058 DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc); 4059 DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1); 4060 if (ts->first_msdu) { 4061 DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries_mpdu, 1, 4062 ts->transmit_cnt > 1); 4063 4064 if (!retry_threshold) 4065 return; 4066 DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.mpdu_success_with_retries, 4067 qdf_do_div(ts->transmit_cnt, 4068 retry_threshold), 4069 ts->transmit_cnt > retry_threshold); 4070 } 4071 } 4072 #else 4073 static inline void 4074 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts, 4075 struct dp_txrx_peer *txrx_peer) 4076 { 4077 } 4078 #endif 4079 4080 /** 4081 * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications 4082 * per wbm ring 4083 * 4084 * @tx_desc: software descriptor head pointer 4085 * @ts: Tx completion status 4086 * @peer: peer handle 4087 * @ring_id: ring number 4088 * 4089 * Return: None 4090 */ 4091 static inline void 4092 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc, 4093 struct hal_tx_completion_status *ts, 4094 struct dp_txrx_peer *txrx_peer, uint8_t ring_id) 4095 { 4096 struct dp_pdev *pdev = txrx_peer->vdev->pdev; 4097 uint8_t tid = ts->tid; 4098 uint32_t length; 4099 struct cdp_tid_tx_stats *tid_stats; 4100 4101 if (!pdev) 4102 return; 4103 4104 if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) 4105 tid = CDP_MAX_DATA_TIDS - 1; 4106 4107 tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid]; 4108 4109 if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) { 4110 dp_err_rl("Release source:%d is not from TQM", ts->release_src); 4111 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.release_src_not_tqm, 1); 4112 return; 4113 } 4114 4115 length = qdf_nbuf_len(tx_desc->nbuf); 4116 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length); 4117 4118 if (qdf_unlikely(pdev->delay_stats_flag) || 4119 qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(txrx_peer->vdev))) 4120 dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id); 4121 4122 if (ts->status < CDP_MAX_TX_TQM_STATUS) { 4123 tid_stats->tqm_status_cnt[ts->status]++; 4124 } 4125 4126 if (qdf_likely(ts->status == HAL_TX_TQM_RR_FRAME_ACKED)) { 4127 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.retry_count, 1, 4128 ts->transmit_cnt > 1); 4129 4130 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.multiple_retry_count, 4131 1, ts->transmit_cnt > 2); 4132 4133 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma); 4134 4135 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.amsdu_cnt, 1, 4136 ts->msdu_part_of_amsdu); 4137 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.non_amsdu_cnt, 1, 4138 !ts->msdu_part_of_amsdu); 4139 4140 txrx_peer->stats.per_pkt_stats.tx.last_tx_ts = 4141 qdf_system_ticks(); 4142 4143 dp_tx_update_peer_extd_stats(ts, txrx_peer); 4144 4145 return; 4146 } 4147 4148 /* 4149 * tx_failed is ideally supposed to be updated from HTT ppdu 4150 * completion stats. But in IPQ807X/IPQ6018 chipsets owing to 4151 * hw limitation there are no completions for failed cases. 4152 * Hence updating tx_failed from data path. Please note that 4153 * if tx_failed is fixed to be from ppdu, then this has to be 4154 * removed 4155 */ 4156 DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1); 4157 4158 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.failed_retry_count, 1, 4159 ts->transmit_cnt > DP_RETRY_COUNT); 4160 dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer); 4161 4162 if (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) { 4163 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.age_out, 1); 4164 } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_REM) { 4165 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.dropped.fw_rem, 1, 4166 length); 4167 } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX) { 4168 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_notx, 1); 4169 } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TX) { 4170 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_tx, 1); 4171 } else if (ts->status == HAL_TX_TQM_RR_FW_REASON1) { 4172 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason1, 1); 4173 } else if (ts->status == HAL_TX_TQM_RR_FW_REASON2) { 4174 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason2, 1); 4175 } else if (ts->status == HAL_TX_TQM_RR_FW_REASON3) { 4176 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason3, 1); 4177 } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE) { 4178 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 4179 tx.dropped.fw_rem_queue_disable, 1); 4180 } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TILL_NONMATCHING) { 4181 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 4182 tx.dropped.fw_rem_no_match, 1); 4183 } else if (ts->status == HAL_TX_TQM_RR_DROP_THRESHOLD) { 4184 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 4185 tx.dropped.drop_threshold, 1); 4186 } else if (ts->status == HAL_TX_TQM_RR_LINK_DESC_UNAVAILABLE) { 4187 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 4188 tx.dropped.drop_link_desc_na, 1); 4189 } else if (ts->status == HAL_TX_TQM_RR_DROP_OR_INVALID_MSDU) { 4190 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 4191 tx.dropped.invalid_drop, 1); 4192 } else if (ts->status == HAL_TX_TQM_RR_MULTICAST_DROP) { 4193 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 4194 tx.dropped.mcast_vdev_drop, 1); 4195 } else { 4196 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.invalid_rr, 1); 4197 } 4198 } 4199 4200 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 4201 /** 4202 * dp_tx_flow_pool_lock() - take flow pool lock 4203 * @soc: core txrx main context 4204 * @tx_desc: tx desc 4205 * 4206 * Return: None 4207 */ 4208 static inline 4209 void dp_tx_flow_pool_lock(struct dp_soc *soc, 4210 struct dp_tx_desc_s *tx_desc) 4211 { 4212 struct dp_tx_desc_pool_s *pool; 4213 uint8_t desc_pool_id; 4214 4215 desc_pool_id = tx_desc->pool_id; 4216 pool = &soc->tx_desc[desc_pool_id]; 4217 4218 qdf_spin_lock_bh(&pool->flow_pool_lock); 4219 } 4220 4221 /** 4222 * dp_tx_flow_pool_unlock() - release flow pool lock 4223 * @soc: core txrx main context 4224 * @tx_desc: tx desc 4225 * 4226 * Return: None 4227 */ 4228 static inline 4229 void dp_tx_flow_pool_unlock(struct dp_soc *soc, 4230 struct dp_tx_desc_s *tx_desc) 4231 { 4232 struct dp_tx_desc_pool_s *pool; 4233 uint8_t desc_pool_id; 4234 4235 desc_pool_id = tx_desc->pool_id; 4236 pool = &soc->tx_desc[desc_pool_id]; 4237 4238 qdf_spin_unlock_bh(&pool->flow_pool_lock); 4239 } 4240 #else 4241 static inline 4242 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) 4243 { 4244 } 4245 4246 static inline 4247 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) 4248 { 4249 } 4250 #endif 4251 4252 /** 4253 * dp_tx_notify_completion() - Notify tx completion for this desc 4254 * @soc: core txrx main context 4255 * @vdev: datapath vdev handle 4256 * @tx_desc: tx desc 4257 * @netbuf: buffer 4258 * @status: tx status 4259 * 4260 * Return: none 4261 */ 4262 static inline void dp_tx_notify_completion(struct dp_soc *soc, 4263 struct dp_vdev *vdev, 4264 struct dp_tx_desc_s *tx_desc, 4265 qdf_nbuf_t netbuf, 4266 uint8_t status) 4267 { 4268 void *osif_dev; 4269 ol_txrx_completion_fp tx_compl_cbk = NULL; 4270 uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC); 4271 4272 qdf_assert(tx_desc); 4273 4274 if (!vdev || 4275 !vdev->osif_vdev) { 4276 return; 4277 } 4278 4279 osif_dev = vdev->osif_vdev; 4280 tx_compl_cbk = vdev->tx_comp; 4281 4282 if (status == HAL_TX_TQM_RR_FRAME_ACKED) 4283 flag |= BIT(QDF_TX_RX_STATUS_OK); 4284 4285 if (tx_compl_cbk) 4286 tx_compl_cbk(netbuf, osif_dev, flag); 4287 } 4288 4289 /** dp_tx_sojourn_stats_process() - Collect sojourn stats 4290 * @pdev: pdev handle 4291 * @tid: tid value 4292 * @txdesc_ts: timestamp from txdesc 4293 * @ppdu_id: ppdu id 4294 * 4295 * Return: none 4296 */ 4297 #ifdef FEATURE_PERPKT_INFO 4298 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev, 4299 struct dp_txrx_peer *txrx_peer, 4300 uint8_t tid, 4301 uint64_t txdesc_ts, 4302 uint32_t ppdu_id) 4303 { 4304 uint64_t delta_ms; 4305 struct cdp_tx_sojourn_stats *sojourn_stats; 4306 struct dp_peer *primary_link_peer = NULL; 4307 struct dp_soc *link_peer_soc = NULL; 4308 4309 if (qdf_unlikely(!pdev->enhanced_stats_en)) 4310 return; 4311 4312 if (qdf_unlikely(tid == HTT_INVALID_TID || 4313 tid >= CDP_DATA_TID_MAX)) 4314 return; 4315 4316 if (qdf_unlikely(!pdev->sojourn_buf)) 4317 return; 4318 4319 primary_link_peer = dp_get_primary_link_peer_by_id(pdev->soc, 4320 txrx_peer->peer_id, 4321 DP_MOD_ID_TX_COMP); 4322 4323 if (qdf_unlikely(!primary_link_peer)) 4324 return; 4325 4326 sojourn_stats = (struct cdp_tx_sojourn_stats *) 4327 qdf_nbuf_data(pdev->sojourn_buf); 4328 4329 link_peer_soc = primary_link_peer->vdev->pdev->soc; 4330 sojourn_stats->cookie = (void *) 4331 dp_monitor_peer_get_peerstats_ctx(link_peer_soc, 4332 primary_link_peer); 4333 4334 delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) - 4335 txdesc_ts; 4336 qdf_ewma_tx_lag_add(&txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid], 4337 delta_ms); 4338 sojourn_stats->sum_sojourn_msdu[tid] = delta_ms; 4339 sojourn_stats->num_msdus[tid] = 1; 4340 sojourn_stats->avg_sojourn_msdu[tid].internal = 4341 txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid].internal; 4342 dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc, 4343 pdev->sojourn_buf, HTT_INVALID_PEER, 4344 WDI_NO_VAL, pdev->pdev_id); 4345 sojourn_stats->sum_sojourn_msdu[tid] = 0; 4346 sojourn_stats->num_msdus[tid] = 0; 4347 sojourn_stats->avg_sojourn_msdu[tid].internal = 0; 4348 4349 dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_TX_COMP); 4350 } 4351 #else 4352 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev, 4353 struct dp_txrx_peer *txrx_peer, 4354 uint8_t tid, 4355 uint64_t txdesc_ts, 4356 uint32_t ppdu_id) 4357 { 4358 } 4359 #endif 4360 4361 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 4362 /** 4363 * dp_send_completion_to_pkt_capture() - send tx completion to packet capture 4364 * @soc: dp_soc handle 4365 * @desc: Tx Descriptor 4366 * @ts: HAL Tx completion descriptor contents 4367 * 4368 * This function is used to send tx completion to packet capture 4369 */ 4370 void dp_send_completion_to_pkt_capture(struct dp_soc *soc, 4371 struct dp_tx_desc_s *desc, 4372 struct hal_tx_completion_status *ts) 4373 { 4374 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc, 4375 desc, ts->peer_id, 4376 WDI_NO_VAL, desc->pdev->pdev_id); 4377 } 4378 #endif 4379 4380 /** 4381 * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf 4382 * @soc: DP Soc handle 4383 * @tx_desc: software Tx descriptor 4384 * @ts : Tx completion status from HAL/HTT descriptor 4385 * 4386 * Return: none 4387 */ 4388 void 4389 dp_tx_comp_process_desc(struct dp_soc *soc, 4390 struct dp_tx_desc_s *desc, 4391 struct hal_tx_completion_status *ts, 4392 struct dp_txrx_peer *txrx_peer) 4393 { 4394 uint64_t time_latency = 0; 4395 uint16_t peer_id = DP_INVALID_PEER_ID; 4396 4397 /* 4398 * m_copy/tx_capture modes are not supported for 4399 * scatter gather packets 4400 */ 4401 if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) { 4402 time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) - 4403 qdf_ktime_to_ms(desc->timestamp)); 4404 } 4405 4406 dp_send_completion_to_pkt_capture(soc, desc, ts); 4407 4408 if (dp_tx_pkt_tracepoints_enabled()) 4409 qdf_trace_dp_packet(desc->nbuf, QDF_TX, 4410 desc->msdu_ext_desc ? 4411 desc->msdu_ext_desc->tso_desc : NULL, 4412 qdf_ktime_to_ms(desc->timestamp)); 4413 4414 if (!(desc->msdu_ext_desc)) { 4415 dp_tx_enh_unmap(soc, desc); 4416 if (txrx_peer) 4417 peer_id = txrx_peer->peer_id; 4418 4419 if (QDF_STATUS_SUCCESS == 4420 dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer_id)) { 4421 return; 4422 } 4423 4424 if (QDF_STATUS_SUCCESS == 4425 dp_get_completion_indication_for_stack(soc, 4426 desc->pdev, 4427 txrx_peer, ts, 4428 desc->nbuf, 4429 time_latency)) { 4430 dp_send_completion_to_stack(soc, 4431 desc->pdev, 4432 ts->peer_id, 4433 ts->ppdu_id, 4434 desc->nbuf); 4435 return; 4436 } 4437 } 4438 4439 desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX; 4440 dp_tx_comp_free_buf(soc, desc); 4441 } 4442 4443 #ifdef DISABLE_DP_STATS 4444 /** 4445 * dp_tx_update_connectivity_stats() - update tx connectivity stats 4446 * @soc: core txrx main context 4447 * @tx_desc: tx desc 4448 * @status: tx status 4449 * 4450 * Return: none 4451 */ 4452 static inline 4453 void dp_tx_update_connectivity_stats(struct dp_soc *soc, 4454 struct dp_vdev *vdev, 4455 struct dp_tx_desc_s *tx_desc, 4456 uint8_t status) 4457 { 4458 } 4459 #else 4460 static inline 4461 void dp_tx_update_connectivity_stats(struct dp_soc *soc, 4462 struct dp_vdev *vdev, 4463 struct dp_tx_desc_s *tx_desc, 4464 uint8_t status) 4465 { 4466 void *osif_dev; 4467 ol_txrx_stats_rx_fp stats_cbk; 4468 uint8_t pkt_type; 4469 4470 qdf_assert(tx_desc); 4471 4472 if (!vdev || 4473 !vdev->osif_vdev || 4474 !vdev->stats_cb) 4475 return; 4476 4477 osif_dev = vdev->osif_vdev; 4478 stats_cbk = vdev->stats_cb; 4479 4480 stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type); 4481 if (status == HAL_TX_TQM_RR_FRAME_ACKED) 4482 stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT, 4483 &pkt_type); 4484 } 4485 #endif 4486 4487 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(CONFIG_SAWF) 4488 QDF_STATUS 4489 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts, 4490 uint32_t delta_tsf, 4491 uint32_t *delay_us) 4492 { 4493 uint32_t buffer_ts; 4494 uint32_t delay; 4495 4496 if (!delay_us) 4497 return QDF_STATUS_E_INVAL; 4498 4499 /* Tx_rate_stats_info_valid is 0 and tsf is invalid then */ 4500 if (!ts->valid) 4501 return QDF_STATUS_E_INVAL; 4502 4503 /* buffer_timestamp is in units of 1024 us and is [31:13] of 4504 * WBM_RELEASE_RING_4. After left shift 10 bits, it's 4505 * valid up to 29 bits. 4506 */ 4507 buffer_ts = ts->buffer_timestamp << 10; 4508 4509 delay = ts->tsf - buffer_ts - delta_tsf; 4510 delay &= 0x1FFFFFFF; /* mask 29 BITS */ 4511 if (delay > 0x1000000) { 4512 dp_info_rl("----------------------\n" 4513 "Tx completion status:\n" 4514 "----------------------\n" 4515 "release_src = %d\n" 4516 "ppdu_id = 0x%x\n" 4517 "release_reason = %d\n" 4518 "tsf = %u (0x%x)\n" 4519 "buffer_timestamp = %u (0x%x)\n" 4520 "delta_tsf = %u (0x%x)\n", 4521 ts->release_src, ts->ppdu_id, ts->status, 4522 ts->tsf, ts->tsf, ts->buffer_timestamp, 4523 ts->buffer_timestamp, delta_tsf, delta_tsf); 4524 return QDF_STATUS_E_FAILURE; 4525 } 4526 4527 *delay_us = delay; 4528 4529 return QDF_STATUS_SUCCESS; 4530 } 4531 4532 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 4533 uint32_t delta_tsf) 4534 { 4535 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 4536 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 4537 DP_MOD_ID_CDP); 4538 4539 if (!vdev) { 4540 dp_err_rl("vdev %d does not exist", vdev_id); 4541 return; 4542 } 4543 4544 vdev->delta_tsf = delta_tsf; 4545 dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf); 4546 4547 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 4548 } 4549 #endif 4550 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY 4551 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl, 4552 uint8_t vdev_id, bool enable) 4553 { 4554 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 4555 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 4556 DP_MOD_ID_CDP); 4557 4558 if (!vdev) { 4559 dp_err_rl("vdev %d does not exist", vdev_id); 4560 return QDF_STATUS_E_FAILURE; 4561 } 4562 4563 qdf_atomic_set(&vdev->ul_delay_report, enable); 4564 4565 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 4566 4567 return QDF_STATUS_SUCCESS; 4568 } 4569 4570 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 4571 uint32_t *val) 4572 { 4573 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 4574 struct dp_vdev *vdev; 4575 uint32_t delay_accum; 4576 uint32_t pkts_accum; 4577 4578 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 4579 if (!vdev) { 4580 dp_err_rl("vdev %d does not exist", vdev_id); 4581 return QDF_STATUS_E_FAILURE; 4582 } 4583 4584 if (!qdf_atomic_read(&vdev->ul_delay_report)) { 4585 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 4586 return QDF_STATUS_E_FAILURE; 4587 } 4588 4589 /* Average uplink delay based on current accumulated values */ 4590 delay_accum = qdf_atomic_read(&vdev->ul_delay_accum); 4591 pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum); 4592 4593 *val = delay_accum / pkts_accum; 4594 dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val, 4595 delay_accum, pkts_accum); 4596 4597 /* Reset accumulated values to 0 */ 4598 qdf_atomic_set(&vdev->ul_delay_accum, 0); 4599 qdf_atomic_set(&vdev->ul_pkts_accum, 0); 4600 4601 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 4602 4603 return QDF_STATUS_SUCCESS; 4604 } 4605 4606 static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev, 4607 struct hal_tx_completion_status *ts) 4608 { 4609 uint32_t ul_delay; 4610 4611 if (qdf_unlikely(!vdev)) { 4612 dp_info_rl("vdev is null or delete in progrss"); 4613 return; 4614 } 4615 4616 if (!qdf_atomic_read(&vdev->ul_delay_report)) 4617 return; 4618 4619 if (QDF_IS_STATUS_ERROR(dp_tx_compute_hw_delay_us(ts, 4620 vdev->delta_tsf, 4621 &ul_delay))) 4622 return; 4623 4624 ul_delay /= 1000; /* in unit of ms */ 4625 4626 qdf_atomic_add(ul_delay, &vdev->ul_delay_accum); 4627 qdf_atomic_inc(&vdev->ul_pkts_accum); 4628 } 4629 #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */ 4630 static inline 4631 void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev, 4632 struct hal_tx_completion_status *ts) 4633 { 4634 } 4635 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */ 4636 4637 /** 4638 * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info 4639 * @soc: DP soc handle 4640 * @tx_desc: software descriptor head pointer 4641 * @ts: Tx completion status 4642 * @txrx_peer: txrx peer handle 4643 * @ring_id: ring number 4644 * 4645 * Return: none 4646 */ 4647 void dp_tx_comp_process_tx_status(struct dp_soc *soc, 4648 struct dp_tx_desc_s *tx_desc, 4649 struct hal_tx_completion_status *ts, 4650 struct dp_txrx_peer *txrx_peer, 4651 uint8_t ring_id) 4652 { 4653 uint32_t length; 4654 qdf_ether_header_t *eh; 4655 struct dp_vdev *vdev = NULL; 4656 qdf_nbuf_t nbuf = tx_desc->nbuf; 4657 enum qdf_dp_tx_rx_status dp_status; 4658 4659 if (!nbuf) { 4660 dp_info_rl("invalid tx descriptor. nbuf NULL"); 4661 goto out; 4662 } 4663 4664 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 4665 length = qdf_nbuf_len(nbuf); 4666 4667 dp_status = dp_tx_hw_to_qdf(ts->status); 4668 DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf, 4669 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD, 4670 QDF_TRACE_DEFAULT_PDEV_ID, 4671 qdf_nbuf_data_addr(nbuf), 4672 sizeof(qdf_nbuf_data(nbuf)), 4673 tx_desc->id, ts->status, dp_status)); 4674 4675 dp_tx_comp_debug("-------------------- \n" 4676 "Tx Completion Stats: \n" 4677 "-------------------- \n" 4678 "ack_frame_rssi = %d \n" 4679 "first_msdu = %d \n" 4680 "last_msdu = %d \n" 4681 "msdu_part_of_amsdu = %d \n" 4682 "rate_stats valid = %d \n" 4683 "bw = %d \n" 4684 "pkt_type = %d \n" 4685 "stbc = %d \n" 4686 "ldpc = %d \n" 4687 "sgi = %d \n" 4688 "mcs = %d \n" 4689 "ofdma = %d \n" 4690 "tones_in_ru = %d \n" 4691 "tsf = %d \n" 4692 "ppdu_id = %d \n" 4693 "transmit_cnt = %d \n" 4694 "tid = %d \n" 4695 "peer_id = %d\n" 4696 "tx_status = %d\n", 4697 ts->ack_frame_rssi, ts->first_msdu, 4698 ts->last_msdu, ts->msdu_part_of_amsdu, 4699 ts->valid, ts->bw, ts->pkt_type, ts->stbc, 4700 ts->ldpc, ts->sgi, ts->mcs, ts->ofdma, 4701 ts->tones_in_ru, ts->tsf, ts->ppdu_id, 4702 ts->transmit_cnt, ts->tid, ts->peer_id, 4703 ts->status); 4704 4705 /* Update SoC level stats */ 4706 DP_STATS_INCC(soc, tx.dropped_fw_removed, 1, 4707 (ts->status == HAL_TX_TQM_RR_REM_CMD_REM)); 4708 4709 if (!txrx_peer) { 4710 dp_info_rl("peer is null or deletion in progress"); 4711 DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length); 4712 goto out; 4713 } 4714 vdev = txrx_peer->vdev; 4715 4716 dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status); 4717 dp_tx_update_uplink_delay(soc, vdev, ts); 4718 4719 /* check tx complete notification */ 4720 if (qdf_nbuf_tx_notify_comp_get(nbuf)) 4721 dp_tx_notify_completion(soc, vdev, tx_desc, 4722 nbuf, ts->status); 4723 4724 /* Update per-packet stats for mesh mode */ 4725 if (qdf_unlikely(vdev->mesh_vdev) && 4726 !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) 4727 dp_tx_comp_fill_tx_completion_stats(tx_desc, ts); 4728 4729 /* Update peer level stats */ 4730 if (qdf_unlikely(txrx_peer->bss_peer && 4731 vdev->opmode == wlan_op_mode_ap)) { 4732 if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) { 4733 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1, 4734 length); 4735 4736 if (txrx_peer->vdev->tx_encap_type == 4737 htt_cmn_pkt_type_ethernet && 4738 QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 4739 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 4740 tx.bcast, 1, 4741 length); 4742 } 4743 } 4744 } else { 4745 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length); 4746 if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) { 4747 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.tx_success, 4748 1, length); 4749 if (qdf_unlikely(txrx_peer->in_twt)) { 4750 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 4751 tx.tx_success_twt, 4752 1, length); 4753 } 4754 } 4755 } 4756 4757 dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id); 4758 dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts->tid, ring_id); 4759 dp_tx_update_peer_sawf_stats(soc, vdev, txrx_peer, tx_desc, 4760 ts, ts->tid); 4761 dp_tx_send_pktlog(soc, vdev->pdev, nbuf, dp_status); 4762 4763 #ifdef QCA_SUPPORT_RDK_STATS 4764 if (soc->peerstats_enabled) 4765 dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid, 4766 qdf_ktime_to_ms(tx_desc->timestamp), 4767 ts->ppdu_id); 4768 #endif 4769 4770 out: 4771 return; 4772 } 4773 4774 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \ 4775 defined(QCA_ENHANCED_STATS_SUPPORT) 4776 /* 4777 * dp_tx_update_peer_basic_stats(): Update peer basic stats 4778 * @txrx_peer: Datapath txrx_peer handle 4779 * @length: Length of the packet 4780 * @tx_status: Tx status from TQM/FW 4781 * @update: enhanced flag value present in dp_pdev 4782 * 4783 * Return: none 4784 */ 4785 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer, 4786 uint32_t length, uint8_t tx_status, 4787 bool update) 4788 { 4789 if ((!txrx_peer->hw_txrx_stats_en) || update) { 4790 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length); 4791 4792 if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED) 4793 DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1); 4794 } 4795 } 4796 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) 4797 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer, 4798 uint32_t length, uint8_t tx_status, 4799 bool update) 4800 { 4801 if (!peer->hw_txrx_stats_en) { 4802 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length); 4803 4804 if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED) 4805 DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1); 4806 } 4807 } 4808 4809 #else 4810 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer, 4811 uint32_t length, uint8_t tx_status, 4812 bool update) 4813 { 4814 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length); 4815 4816 if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED) 4817 DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1); 4818 } 4819 #endif 4820 4821 /* 4822 * dp_tx_prefetch_next_nbuf_data(): Prefetch nbuf and nbuf data 4823 * @nbuf: skb buffer 4824 * 4825 * Return: none 4826 */ 4827 #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH 4828 static inline 4829 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next) 4830 { 4831 qdf_nbuf_t nbuf = NULL; 4832 4833 if (next) 4834 nbuf = next->nbuf; 4835 if (nbuf) { 4836 /* prefetch skb->next and first few bytes of skb->cb */ 4837 qdf_prefetch(nbuf); 4838 /* prefetch skb fields present in different cachelines */ 4839 qdf_prefetch(&nbuf->len); 4840 qdf_prefetch(&nbuf->users); 4841 qdf_prefetch(skb_end_pointer(nbuf)); 4842 } 4843 } 4844 #else 4845 static inline 4846 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next) 4847 { 4848 } 4849 #endif 4850 4851 /** 4852 * dp_tx_mcast_reinject_handler() - Tx reinjected multicast packets handler 4853 * @soc: core txrx main context 4854 * @desc: software descriptor 4855 * 4856 * Return: true when packet is reinjected 4857 */ 4858 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \ 4859 defined(WLAN_MCAST_MLO) 4860 static inline bool 4861 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc) 4862 { 4863 struct dp_vdev *vdev = NULL; 4864 4865 if (desc->tx_status == HAL_TX_TQM_RR_MULTICAST_DROP) { 4866 if (!soc->arch_ops.dp_tx_mcast_handler) 4867 return false; 4868 4869 vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id, 4870 DP_MOD_ID_REINJECT); 4871 4872 if (qdf_unlikely(!vdev)) { 4873 dp_tx_comp_info_rl("Unable to get vdev ref %d", 4874 desc->id); 4875 return false; 4876 } 4877 DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1, 4878 qdf_nbuf_len(desc->nbuf)); 4879 soc->arch_ops.dp_tx_mcast_handler(soc, vdev, desc->nbuf); 4880 dp_tx_desc_release(desc, desc->pool_id); 4881 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT); 4882 return true; 4883 } 4884 4885 return false; 4886 } 4887 #else 4888 static inline bool 4889 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc) 4890 { 4891 return false; 4892 } 4893 #endif 4894 4895 /** 4896 * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler 4897 * @soc: core txrx main context 4898 * @comp_head: software descriptor head pointer 4899 * @ring_id: ring number 4900 * 4901 * This function will process batch of descriptors reaped by dp_tx_comp_handler 4902 * and release the software descriptors after processing is complete 4903 * 4904 * Return: none 4905 */ 4906 static void 4907 dp_tx_comp_process_desc_list(struct dp_soc *soc, 4908 struct dp_tx_desc_s *comp_head, uint8_t ring_id) 4909 { 4910 struct dp_tx_desc_s *desc; 4911 struct dp_tx_desc_s *next; 4912 struct hal_tx_completion_status ts; 4913 struct dp_txrx_peer *txrx_peer = NULL; 4914 uint16_t peer_id = DP_INVALID_PEER; 4915 dp_txrx_ref_handle txrx_ref_handle = NULL; 4916 4917 desc = comp_head; 4918 4919 while (desc) { 4920 next = desc->next; 4921 dp_tx_prefetch_next_nbuf_data(next); 4922 4923 if (peer_id != desc->peer_id) { 4924 if (txrx_peer) 4925 dp_txrx_peer_unref_delete(txrx_ref_handle, 4926 DP_MOD_ID_TX_COMP); 4927 peer_id = desc->peer_id; 4928 txrx_peer = 4929 dp_txrx_peer_get_ref_by_id(soc, peer_id, 4930 &txrx_ref_handle, 4931 DP_MOD_ID_TX_COMP); 4932 } 4933 4934 if (dp_tx_mcast_reinject_handler(soc, desc)) { 4935 desc = next; 4936 continue; 4937 } 4938 if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) { 4939 struct dp_pdev *pdev = desc->pdev; 4940 4941 if (qdf_likely(txrx_peer)) 4942 dp_tx_update_peer_basic_stats(txrx_peer, 4943 desc->length, 4944 desc->tx_status, 4945 false); 4946 qdf_assert(pdev); 4947 dp_tx_outstanding_dec(pdev); 4948 4949 /* 4950 * Calling a QDF WRAPPER here is creating signifcant 4951 * performance impact so avoided the wrapper call here 4952 */ 4953 dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, 4954 desc->id, DP_TX_COMP_UNMAP); 4955 dp_tx_nbuf_unmap(soc, desc); 4956 qdf_nbuf_free_simple(desc->nbuf); 4957 dp_tx_desc_free(soc, desc, desc->pool_id); 4958 desc = next; 4959 continue; 4960 } 4961 4962 hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc); 4963 4964 dp_tx_comp_process_tx_status(soc, desc, &ts, txrx_peer, 4965 ring_id); 4966 4967 dp_tx_comp_process_desc(soc, desc, &ts, txrx_peer); 4968 4969 dp_tx_desc_release(desc, desc->pool_id); 4970 desc = next; 4971 } 4972 if (txrx_peer) 4973 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP); 4974 } 4975 4976 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 4977 static inline 4978 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped, 4979 int max_reap_limit) 4980 { 4981 bool limit_hit = false; 4982 4983 limit_hit = 4984 (num_reaped >= max_reap_limit) ? true : false; 4985 4986 if (limit_hit) 4987 DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1); 4988 4989 return limit_hit; 4990 } 4991 4992 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc) 4993 { 4994 return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check; 4995 } 4996 4997 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc) 4998 { 4999 struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx; 5000 5001 return cfg->tx_comp_loop_pkt_limit; 5002 } 5003 #else 5004 static inline 5005 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped, 5006 int max_reap_limit) 5007 { 5008 return false; 5009 } 5010 5011 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc) 5012 { 5013 return false; 5014 } 5015 5016 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc) 5017 { 5018 return 0; 5019 } 5020 #endif 5021 5022 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ 5023 static inline int 5024 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng, 5025 int *max_reap_limit) 5026 { 5027 return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng, 5028 max_reap_limit); 5029 } 5030 #else 5031 static inline int 5032 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng, 5033 int *max_reap_limit) 5034 { 5035 return 0; 5036 } 5037 #endif 5038 5039 #ifdef DP_TX_TRACKING 5040 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc) 5041 { 5042 if ((tx_desc->magic != DP_TX_MAGIC_PATTERN_INUSE) && 5043 (tx_desc->magic != DP_TX_MAGIC_PATTERN_FREE)) { 5044 dp_err_rl("tx_desc %u is corrupted", tx_desc->id); 5045 qdf_trigger_self_recovery(NULL, QDF_TX_DESC_LEAK); 5046 } 5047 } 5048 #endif 5049 5050 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc, 5051 hal_ring_handle_t hal_ring_hdl, uint8_t ring_id, 5052 uint32_t quota) 5053 { 5054 void *tx_comp_hal_desc; 5055 void *last_prefetched_hw_desc = NULL; 5056 struct dp_tx_desc_s *last_prefetched_sw_desc = NULL; 5057 hal_soc_handle_t hal_soc; 5058 uint8_t buffer_src; 5059 struct dp_tx_desc_s *tx_desc = NULL; 5060 struct dp_tx_desc_s *head_desc = NULL; 5061 struct dp_tx_desc_s *tail_desc = NULL; 5062 uint32_t num_processed = 0; 5063 uint32_t count; 5064 uint32_t num_avail_for_reap = 0; 5065 bool force_break = false; 5066 struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id]; 5067 int max_reap_limit, ring_near_full; 5068 5069 DP_HIST_INIT(); 5070 5071 more_data: 5072 5073 hal_soc = soc->hal_soc; 5074 /* Re-initialize local variables to be re-used */ 5075 head_desc = NULL; 5076 tail_desc = NULL; 5077 count = 0; 5078 max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc); 5079 5080 ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring, 5081 &max_reap_limit); 5082 5083 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 5084 dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl); 5085 return 0; 5086 } 5087 5088 num_avail_for_reap = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0); 5089 5090 if (num_avail_for_reap >= quota) 5091 num_avail_for_reap = quota; 5092 5093 dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap); 5094 last_prefetched_hw_desc = dp_srng_dst_prefetch(hal_soc, hal_ring_hdl, 5095 num_avail_for_reap); 5096 5097 /* Find head descriptor from completion ring */ 5098 while (qdf_likely(num_avail_for_reap--)) { 5099 5100 tx_comp_hal_desc = dp_srng_dst_get_next(soc, hal_ring_hdl); 5101 if (qdf_unlikely(!tx_comp_hal_desc)) 5102 break; 5103 buffer_src = hal_tx_comp_get_buffer_source(hal_soc, 5104 tx_comp_hal_desc); 5105 5106 /* If this buffer was not released by TQM or FW, then it is not 5107 * Tx completion indication, assert */ 5108 if (qdf_unlikely(buffer_src != 5109 HAL_TX_COMP_RELEASE_SOURCE_TQM) && 5110 (qdf_unlikely(buffer_src != 5111 HAL_TX_COMP_RELEASE_SOURCE_FW))) { 5112 uint8_t wbm_internal_error; 5113 5114 dp_err_rl( 5115 "Tx comp release_src != TQM | FW but from %d", 5116 buffer_src); 5117 hal_dump_comp_desc(tx_comp_hal_desc); 5118 DP_STATS_INC(soc, tx.invalid_release_source, 1); 5119 5120 /* When WBM sees NULL buffer_addr_info in any of 5121 * ingress rings it sends an error indication, 5122 * with wbm_internal_error=1, to a specific ring. 5123 * The WBM2SW ring used to indicate these errors is 5124 * fixed in HW, and that ring is being used as Tx 5125 * completion ring. These errors are not related to 5126 * Tx completions, and should just be ignored 5127 */ 5128 wbm_internal_error = hal_get_wbm_internal_error( 5129 hal_soc, 5130 tx_comp_hal_desc); 5131 5132 if (wbm_internal_error) { 5133 dp_err_rl("Tx comp wbm_internal_error!!"); 5134 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1); 5135 5136 if (HAL_TX_COMP_RELEASE_SOURCE_REO == 5137 buffer_src) 5138 dp_handle_wbm_internal_error( 5139 soc, 5140 tx_comp_hal_desc, 5141 hal_tx_comp_get_buffer_type( 5142 tx_comp_hal_desc)); 5143 5144 } else { 5145 dp_err_rl("Tx comp wbm_internal_error false"); 5146 DP_STATS_INC(soc, tx.non_wbm_internal_err, 1); 5147 } 5148 continue; 5149 } 5150 5151 soc->arch_ops.tx_comp_get_params_from_hal_desc(soc, 5152 tx_comp_hal_desc, 5153 &tx_desc); 5154 if (!tx_desc) { 5155 dp_err("unable to retrieve tx_desc!"); 5156 QDF_BUG(0); 5157 continue; 5158 } 5159 tx_desc->buffer_src = buffer_src; 5160 /* 5161 * If the release source is FW, process the HTT status 5162 */ 5163 if (qdf_unlikely(buffer_src == 5164 HAL_TX_COMP_RELEASE_SOURCE_FW)) { 5165 uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN]; 5166 5167 hal_tx_comp_get_htt_desc(tx_comp_hal_desc, 5168 htt_tx_status); 5169 /* Collect hw completion contents */ 5170 hal_tx_comp_desc_sync(tx_comp_hal_desc, 5171 &tx_desc->comp, 1); 5172 soc->arch_ops.dp_tx_process_htt_completion( 5173 soc, 5174 tx_desc, 5175 htt_tx_status, 5176 ring_id); 5177 } else { 5178 tx_desc->tx_status = 5179 hal_tx_comp_get_tx_status(tx_comp_hal_desc); 5180 tx_desc->buffer_src = buffer_src; 5181 /* 5182 * If the fast completion mode is enabled extended 5183 * metadata from descriptor is not copied 5184 */ 5185 if (qdf_likely(tx_desc->flags & 5186 DP_TX_DESC_FLAG_SIMPLE)) 5187 goto add_to_pool; 5188 5189 /* 5190 * If the descriptor is already freed in vdev_detach, 5191 * continue to next descriptor 5192 */ 5193 if (qdf_unlikely 5194 ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) && 5195 !tx_desc->flags)) { 5196 dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d", 5197 tx_desc->id); 5198 DP_STATS_INC(soc, tx.tx_comp_exception, 1); 5199 dp_tx_desc_check_corruption(tx_desc); 5200 continue; 5201 } 5202 5203 if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) { 5204 dp_tx_comp_info_rl("pdev in down state %d", 5205 tx_desc->id); 5206 tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR; 5207 dp_tx_comp_free_buf(soc, tx_desc); 5208 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 5209 goto next_desc; 5210 } 5211 5212 if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) || 5213 !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) { 5214 dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d", 5215 tx_desc->flags, tx_desc->id); 5216 qdf_assert_always(0); 5217 } 5218 5219 /* Collect hw completion contents */ 5220 hal_tx_comp_desc_sync(tx_comp_hal_desc, 5221 &tx_desc->comp, 1); 5222 add_to_pool: 5223 DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id); 5224 5225 /* First ring descriptor on the cycle */ 5226 if (!head_desc) { 5227 head_desc = tx_desc; 5228 tail_desc = tx_desc; 5229 } 5230 5231 tail_desc->next = tx_desc; 5232 tx_desc->next = NULL; 5233 tail_desc = tx_desc; 5234 } 5235 next_desc: 5236 num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK); 5237 5238 /* 5239 * Processed packet count is more than given quota 5240 * stop to processing 5241 */ 5242 5243 count++; 5244 5245 dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc, 5246 num_avail_for_reap, 5247 hal_ring_hdl, 5248 &last_prefetched_hw_desc, 5249 &last_prefetched_sw_desc); 5250 5251 if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit)) 5252 break; 5253 } 5254 5255 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 5256 5257 /* Process the reaped descriptors */ 5258 if (head_desc) 5259 dp_tx_comp_process_desc_list(soc, head_desc, ring_id); 5260 5261 DP_STATS_INC(soc, tx.tx_comp[ring_id], count); 5262 5263 /* 5264 * If we are processing in near-full condition, there are 3 scenario 5265 * 1) Ring entries has reached critical state 5266 * 2) Ring entries are still near high threshold 5267 * 3) Ring entries are below the safe level 5268 * 5269 * One more loop will move te state to normal processing and yield 5270 */ 5271 if (ring_near_full) 5272 goto more_data; 5273 5274 if (dp_tx_comp_enable_eol_data_check(soc)) { 5275 5276 if (num_processed >= quota) 5277 force_break = true; 5278 5279 if (!force_break && 5280 hal_srng_dst_peek_sync_locked(soc->hal_soc, 5281 hal_ring_hdl)) { 5282 DP_STATS_INC(soc, tx.hp_oos2, 1); 5283 if (!hif_exec_should_yield(soc->hif_handle, 5284 int_ctx->dp_intr_id)) 5285 goto more_data; 5286 } 5287 } 5288 DP_TX_HIST_STATS_PER_PDEV(); 5289 5290 return num_processed; 5291 } 5292 5293 #ifdef FEATURE_WLAN_TDLS 5294 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 5295 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list) 5296 { 5297 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5298 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 5299 DP_MOD_ID_TDLS); 5300 5301 if (!vdev) { 5302 dp_err("vdev handle for id %d is NULL", vdev_id); 5303 return NULL; 5304 } 5305 5306 if (tx_spec & OL_TX_SPEC_NO_FREE) 5307 vdev->is_tdls_frame = true; 5308 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS); 5309 5310 return dp_tx_send(soc_hdl, vdev_id, msdu_list); 5311 } 5312 #endif 5313 5314 /** 5315 * dp_tx_vdev_attach() - attach vdev to dp tx 5316 * @vdev: virtual device instance 5317 * 5318 * Return: QDF_STATUS_SUCCESS: success 5319 * QDF_STATUS_E_RESOURCES: Error return 5320 */ 5321 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev) 5322 { 5323 int pdev_id; 5324 /* 5325 * Fill HTT TCL Metadata with Vdev ID and MAC ID 5326 */ 5327 DP_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata, 5328 DP_TCL_METADATA_TYPE_VDEV_BASED); 5329 5330 DP_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata, 5331 vdev->vdev_id); 5332 5333 pdev_id = 5334 dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc, 5335 vdev->pdev->pdev_id); 5336 DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id); 5337 5338 /* 5339 * Set HTT Extension Valid bit to 0 by default 5340 */ 5341 DP_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0); 5342 5343 dp_tx_vdev_update_search_flags(vdev); 5344 5345 return QDF_STATUS_SUCCESS; 5346 } 5347 5348 #ifndef FEATURE_WDS 5349 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev) 5350 { 5351 return false; 5352 } 5353 #endif 5354 5355 /** 5356 * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode 5357 * @vdev: virtual device instance 5358 * 5359 * Return: void 5360 * 5361 */ 5362 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev) 5363 { 5364 struct dp_soc *soc = vdev->pdev->soc; 5365 5366 /* 5367 * Enable both AddrY (SA based search) and AddrX (Da based search) 5368 * for TDLS link 5369 * 5370 * Enable AddrY (SA based search) only for non-WDS STA and 5371 * ProxySTA VAP (in HKv1) modes. 5372 * 5373 * In all other VAP modes, only DA based search should be 5374 * enabled 5375 */ 5376 if (vdev->opmode == wlan_op_mode_sta && 5377 vdev->tdls_link_connected) 5378 vdev->hal_desc_addr_search_flags = 5379 (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN); 5380 else if ((vdev->opmode == wlan_op_mode_sta) && 5381 !dp_tx_da_search_override(vdev)) 5382 vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN; 5383 else 5384 vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN; 5385 5386 if (vdev->opmode == wlan_op_mode_sta && !vdev->tdls_link_connected) 5387 vdev->search_type = soc->sta_mode_search_policy; 5388 else 5389 vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; 5390 } 5391 5392 static inline bool 5393 dp_is_tx_desc_flush_match(struct dp_pdev *pdev, 5394 struct dp_vdev *vdev, 5395 struct dp_tx_desc_s *tx_desc) 5396 { 5397 if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED))) 5398 return false; 5399 5400 /* 5401 * if vdev is given, then only check whether desc 5402 * vdev match. if vdev is NULL, then check whether 5403 * desc pdev match. 5404 */ 5405 return vdev ? (tx_desc->vdev_id == vdev->vdev_id) : 5406 (tx_desc->pdev == pdev); 5407 } 5408 5409 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 5410 /** 5411 * dp_tx_desc_flush() - release resources associated 5412 * to TX Desc 5413 * 5414 * @dp_pdev: Handle to DP pdev structure 5415 * @vdev: virtual device instance 5416 * NULL: no specific Vdev is required and check all allcated TX desc 5417 * on this pdev. 5418 * Non-NULL: only check the allocated TX Desc associated to this Vdev. 5419 * 5420 * @force_free: 5421 * true: flush the TX desc. 5422 * false: only reset the Vdev in each allocated TX desc 5423 * that associated to current Vdev. 5424 * 5425 * This function will go through the TX desc pool to flush 5426 * the outstanding TX data or reset Vdev to NULL in associated TX 5427 * Desc. 5428 */ 5429 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev, 5430 bool force_free) 5431 { 5432 uint8_t i; 5433 uint32_t j; 5434 uint32_t num_desc, page_id, offset; 5435 uint16_t num_desc_per_page; 5436 struct dp_soc *soc = pdev->soc; 5437 struct dp_tx_desc_s *tx_desc = NULL; 5438 struct dp_tx_desc_pool_s *tx_desc_pool = NULL; 5439 5440 if (!vdev && !force_free) { 5441 dp_err("Reset TX desc vdev, Vdev param is required!"); 5442 return; 5443 } 5444 5445 for (i = 0; i < MAX_TXDESC_POOLS; i++) { 5446 tx_desc_pool = &soc->tx_desc[i]; 5447 if (!(tx_desc_pool->pool_size) || 5448 IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) || 5449 !(tx_desc_pool->desc_pages.cacheable_pages)) 5450 continue; 5451 5452 /* 5453 * Add flow pool lock protection in case pool is freed 5454 * due to all tx_desc is recycled when handle TX completion. 5455 * this is not necessary when do force flush as: 5456 * a. double lock will happen if dp_tx_desc_release is 5457 * also trying to acquire it. 5458 * b. dp interrupt has been disabled before do force TX desc 5459 * flush in dp_pdev_deinit(). 5460 */ 5461 if (!force_free) 5462 qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock); 5463 num_desc = tx_desc_pool->pool_size; 5464 num_desc_per_page = 5465 tx_desc_pool->desc_pages.num_element_per_page; 5466 for (j = 0; j < num_desc; j++) { 5467 page_id = j / num_desc_per_page; 5468 offset = j % num_desc_per_page; 5469 5470 if (qdf_unlikely(!(tx_desc_pool-> 5471 desc_pages.cacheable_pages))) 5472 break; 5473 5474 tx_desc = dp_tx_desc_find(soc, i, page_id, offset); 5475 5476 if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) { 5477 /* 5478 * Free TX desc if force free is 5479 * required, otherwise only reset vdev 5480 * in this TX desc. 5481 */ 5482 if (force_free) { 5483 tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH; 5484 dp_tx_comp_free_buf(soc, tx_desc); 5485 dp_tx_desc_release(tx_desc, i); 5486 } else { 5487 tx_desc->vdev_id = DP_INVALID_VDEV_ID; 5488 } 5489 } 5490 } 5491 if (!force_free) 5492 qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock); 5493 } 5494 } 5495 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */ 5496 /** 5497 * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc 5498 * 5499 * @soc: Handle to DP soc structure 5500 * @tx_desc: pointer of one TX desc 5501 * @desc_pool_id: TX Desc pool id 5502 */ 5503 static inline void 5504 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, 5505 uint8_t desc_pool_id) 5506 { 5507 TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock); 5508 5509 tx_desc->vdev_id = DP_INVALID_VDEV_ID; 5510 5511 TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); 5512 } 5513 5514 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev, 5515 bool force_free) 5516 { 5517 uint8_t i, num_pool; 5518 uint32_t j; 5519 uint32_t num_desc, page_id, offset; 5520 uint16_t num_desc_per_page; 5521 struct dp_soc *soc = pdev->soc; 5522 struct dp_tx_desc_s *tx_desc = NULL; 5523 struct dp_tx_desc_pool_s *tx_desc_pool = NULL; 5524 5525 if (!vdev && !force_free) { 5526 dp_err("Reset TX desc vdev, Vdev param is required!"); 5527 return; 5528 } 5529 5530 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 5531 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 5532 5533 for (i = 0; i < num_pool; i++) { 5534 tx_desc_pool = &soc->tx_desc[i]; 5535 if (!tx_desc_pool->desc_pages.cacheable_pages) 5536 continue; 5537 5538 num_desc_per_page = 5539 tx_desc_pool->desc_pages.num_element_per_page; 5540 for (j = 0; j < num_desc; j++) { 5541 page_id = j / num_desc_per_page; 5542 offset = j % num_desc_per_page; 5543 tx_desc = dp_tx_desc_find(soc, i, page_id, offset); 5544 5545 if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) { 5546 if (force_free) { 5547 tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH; 5548 dp_tx_comp_free_buf(soc, tx_desc); 5549 dp_tx_desc_release(tx_desc, i); 5550 } else { 5551 dp_tx_desc_reset_vdev(soc, tx_desc, 5552 i); 5553 } 5554 } 5555 } 5556 } 5557 } 5558 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ 5559 5560 /** 5561 * dp_tx_vdev_detach() - detach vdev from dp tx 5562 * @vdev: virtual device instance 5563 * 5564 * Return: QDF_STATUS_SUCCESS: success 5565 * QDF_STATUS_E_RESOURCES: Error return 5566 */ 5567 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev) 5568 { 5569 struct dp_pdev *pdev = vdev->pdev; 5570 5571 /* Reset TX desc associated to this Vdev as NULL */ 5572 dp_tx_desc_flush(pdev, vdev, false); 5573 5574 return QDF_STATUS_SUCCESS; 5575 } 5576 5577 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 5578 /* Pools will be allocated dynamically */ 5579 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, 5580 int num_desc) 5581 { 5582 uint8_t i; 5583 5584 for (i = 0; i < num_pool; i++) { 5585 qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock); 5586 soc->tx_desc[i].status = FLOW_POOL_INACTIVE; 5587 } 5588 5589 return QDF_STATUS_SUCCESS; 5590 } 5591 5592 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool, 5593 uint32_t num_desc) 5594 { 5595 return QDF_STATUS_SUCCESS; 5596 } 5597 5598 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool) 5599 { 5600 } 5601 5602 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) 5603 { 5604 uint8_t i; 5605 5606 for (i = 0; i < num_pool; i++) 5607 qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock); 5608 } 5609 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */ 5610 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, 5611 uint32_t num_desc) 5612 { 5613 uint8_t i, count; 5614 5615 /* Allocate software Tx descriptor pools */ 5616 for (i = 0; i < num_pool; i++) { 5617 if (dp_tx_desc_pool_alloc(soc, i, num_desc)) { 5618 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 5619 FL("Tx Desc Pool alloc %d failed %pK"), 5620 i, soc); 5621 goto fail; 5622 } 5623 } 5624 return QDF_STATUS_SUCCESS; 5625 5626 fail: 5627 for (count = 0; count < i; count++) 5628 dp_tx_desc_pool_free(soc, count); 5629 5630 return QDF_STATUS_E_NOMEM; 5631 } 5632 5633 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool, 5634 uint32_t num_desc) 5635 { 5636 uint8_t i; 5637 for (i = 0; i < num_pool; i++) { 5638 if (dp_tx_desc_pool_init(soc, i, num_desc)) { 5639 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 5640 FL("Tx Desc Pool init %d failed %pK"), 5641 i, soc); 5642 return QDF_STATUS_E_NOMEM; 5643 } 5644 } 5645 return QDF_STATUS_SUCCESS; 5646 } 5647 5648 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool) 5649 { 5650 uint8_t i; 5651 5652 for (i = 0; i < num_pool; i++) 5653 dp_tx_desc_pool_deinit(soc, i); 5654 } 5655 5656 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) 5657 { 5658 uint8_t i; 5659 5660 for (i = 0; i < num_pool; i++) 5661 dp_tx_desc_pool_free(soc, i); 5662 } 5663 5664 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ 5665 5666 /** 5667 * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors 5668 * @soc: core txrx main context 5669 * @num_pool: number of pools 5670 * 5671 */ 5672 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool) 5673 { 5674 dp_tx_tso_desc_pool_deinit(soc, num_pool); 5675 dp_tx_tso_num_seg_pool_deinit(soc, num_pool); 5676 } 5677 5678 /** 5679 * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors 5680 * @soc: core txrx main context 5681 * @num_pool: number of pools 5682 * 5683 */ 5684 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool) 5685 { 5686 dp_tx_tso_desc_pool_free(soc, num_pool); 5687 dp_tx_tso_num_seg_pool_free(soc, num_pool); 5688 } 5689 5690 /** 5691 * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors 5692 * @soc: core txrx main context 5693 * 5694 * This function frees all tx related descriptors as below 5695 * 1. Regular TX descriptors (static pools) 5696 * 2. extension TX descriptors (used for ME, RAW, TSO etc...) 5697 * 3. TSO descriptors 5698 * 5699 */ 5700 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc) 5701 { 5702 uint8_t num_pool; 5703 5704 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 5705 5706 dp_tx_tso_cmn_desc_pool_free(soc, num_pool); 5707 dp_tx_ext_desc_pool_free(soc, num_pool); 5708 dp_tx_delete_static_pools(soc, num_pool); 5709 } 5710 5711 /** 5712 * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors 5713 * @soc: core txrx main context 5714 * 5715 * This function de-initializes all tx related descriptors as below 5716 * 1. Regular TX descriptors (static pools) 5717 * 2. extension TX descriptors (used for ME, RAW, TSO etc...) 5718 * 3. TSO descriptors 5719 * 5720 */ 5721 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc) 5722 { 5723 uint8_t num_pool; 5724 5725 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 5726 5727 dp_tx_flow_control_deinit(soc); 5728 dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool); 5729 dp_tx_ext_desc_pool_deinit(soc, num_pool); 5730 dp_tx_deinit_static_pools(soc, num_pool); 5731 } 5732 5733 /** 5734 * dp_tso_attach() - TSO attach handler 5735 * @txrx_soc: Opaque Dp handle 5736 * 5737 * Reserve TSO descriptor buffers 5738 * 5739 * Return: QDF_STATUS_E_FAILURE on failure or 5740 * QDF_STATUS_SUCCESS on success 5741 */ 5742 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc, 5743 uint8_t num_pool, 5744 uint32_t num_desc) 5745 { 5746 if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) { 5747 dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc); 5748 return QDF_STATUS_E_FAILURE; 5749 } 5750 5751 if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) { 5752 dp_err("TSO Num of seg Pool alloc %d failed %pK", 5753 num_pool, soc); 5754 return QDF_STATUS_E_FAILURE; 5755 } 5756 return QDF_STATUS_SUCCESS; 5757 } 5758 5759 /** 5760 * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init 5761 * @soc: DP soc handle 5762 * @num_pool: Number of pools 5763 * @num_desc: Number of descriptors 5764 * 5765 * Initialize TSO descriptor pools 5766 * 5767 * Return: QDF_STATUS_E_FAILURE on failure or 5768 * QDF_STATUS_SUCCESS on success 5769 */ 5770 5771 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc, 5772 uint8_t num_pool, 5773 uint32_t num_desc) 5774 { 5775 if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) { 5776 dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc); 5777 return QDF_STATUS_E_FAILURE; 5778 } 5779 5780 if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) { 5781 dp_err("TSO Num of seg Pool alloc %d failed %pK", 5782 num_pool, soc); 5783 return QDF_STATUS_E_FAILURE; 5784 } 5785 return QDF_STATUS_SUCCESS; 5786 } 5787 5788 /** 5789 * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory 5790 * @soc: core txrx main context 5791 * 5792 * This function allocates memory for following descriptor pools 5793 * 1. regular sw tx descriptor pools (static pools) 5794 * 2. TX extension descriptor pools (ME, RAW, TSO etc...) 5795 * 3. TSO descriptor pools 5796 * 5797 * Return: QDF_STATUS_SUCCESS: success 5798 * QDF_STATUS_E_RESOURCES: Error return 5799 */ 5800 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc) 5801 { 5802 uint8_t num_pool; 5803 uint32_t num_desc; 5804 uint32_t num_ext_desc; 5805 5806 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 5807 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 5808 num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); 5809 5810 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 5811 "%s Tx Desc Alloc num_pool = %d, descs = %d", 5812 __func__, num_pool, num_desc); 5813 5814 if ((num_pool > MAX_TXDESC_POOLS) || 5815 (num_desc > WLAN_CFG_NUM_TX_DESC_MAX)) 5816 goto fail1; 5817 5818 if (dp_tx_alloc_static_pools(soc, num_pool, num_desc)) 5819 goto fail1; 5820 5821 if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc)) 5822 goto fail2; 5823 5824 if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx)) 5825 return QDF_STATUS_SUCCESS; 5826 5827 if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc)) 5828 goto fail3; 5829 5830 return QDF_STATUS_SUCCESS; 5831 5832 fail3: 5833 dp_tx_ext_desc_pool_free(soc, num_pool); 5834 fail2: 5835 dp_tx_delete_static_pools(soc, num_pool); 5836 fail1: 5837 return QDF_STATUS_E_RESOURCES; 5838 } 5839 5840 /** 5841 * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools 5842 * @soc: core txrx main context 5843 * 5844 * This function initializes the following TX descriptor pools 5845 * 1. regular sw tx descriptor pools (static pools) 5846 * 2. TX extension descriptor pools (ME, RAW, TSO etc...) 5847 * 3. TSO descriptor pools 5848 * 5849 * Return: QDF_STATUS_SUCCESS: success 5850 * QDF_STATUS_E_RESOURCES: Error return 5851 */ 5852 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc) 5853 { 5854 uint8_t num_pool; 5855 uint32_t num_desc; 5856 uint32_t num_ext_desc; 5857 5858 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 5859 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 5860 num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); 5861 5862 if (dp_tx_init_static_pools(soc, num_pool, num_desc)) 5863 goto fail1; 5864 5865 if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc)) 5866 goto fail2; 5867 5868 if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx)) 5869 return QDF_STATUS_SUCCESS; 5870 5871 if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc)) 5872 goto fail3; 5873 5874 dp_tx_flow_control_init(soc); 5875 soc->process_tx_status = CONFIG_PROCESS_TX_STATUS; 5876 return QDF_STATUS_SUCCESS; 5877 5878 fail3: 5879 dp_tx_ext_desc_pool_deinit(soc, num_pool); 5880 fail2: 5881 dp_tx_deinit_static_pools(soc, num_pool); 5882 fail1: 5883 return QDF_STATUS_E_RESOURCES; 5884 } 5885 5886 /** 5887 * dp_tso_soc_attach() - Allocate and initialize TSO descriptors 5888 * @txrx_soc: dp soc handle 5889 * 5890 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 5891 * QDF_STATUS_E_FAILURE 5892 */ 5893 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc) 5894 { 5895 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 5896 uint8_t num_pool; 5897 uint32_t num_desc; 5898 uint32_t num_ext_desc; 5899 5900 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 5901 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 5902 num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); 5903 5904 if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc)) 5905 return QDF_STATUS_E_FAILURE; 5906 5907 if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc)) 5908 return QDF_STATUS_E_FAILURE; 5909 5910 return QDF_STATUS_SUCCESS; 5911 } 5912 5913 /** 5914 * dp_tso_soc_detach() - de-initialize and free the TSO descriptors 5915 * @txrx_soc: dp soc handle 5916 * 5917 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 5918 */ 5919 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc) 5920 { 5921 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 5922 uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 5923 5924 dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool); 5925 dp_tx_tso_cmn_desc_pool_free(soc, num_pool); 5926 5927 return QDF_STATUS_SUCCESS; 5928 } 5929 5930