1 /* 2 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "htt.h" 20 #include "dp_htt.h" 21 #include "hal_hw_headers.h" 22 #include "dp_tx.h" 23 #include "dp_tx_desc.h" 24 #include "dp_peer.h" 25 #include "dp_types.h" 26 #include "hal_tx.h" 27 #include "qdf_mem.h" 28 #include "qdf_nbuf.h" 29 #include "qdf_net_types.h" 30 #include <wlan_cfg.h> 31 #include "dp_ipa.h" 32 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO) 33 #include "if_meta_hdr.h" 34 #endif 35 #include "enet.h" 36 #include "dp_internal.h" 37 #ifdef FEATURE_WDS 38 #include "dp_txrx_wds.h" 39 #endif 40 #ifdef ATH_SUPPORT_IQUE 41 #include "dp_txrx_me.h" 42 #endif 43 #include "dp_hist.h" 44 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 45 #include <dp_swlm.h> 46 #endif 47 48 /* Flag to skip CCE classify when mesh or tid override enabled */ 49 #define DP_TX_SKIP_CCE_CLASSIFY \ 50 (DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED) 51 52 /* TODO Add support in TSO */ 53 #define DP_DESC_NUM_FRAG(x) 0 54 55 /* disable TQM_BYPASS */ 56 #define TQM_BYPASS_WAR 0 57 58 /* invalid peer id for reinject*/ 59 #define DP_INVALID_PEER 0XFFFE 60 61 /*mapping between hal encrypt type and cdp_sec_type*/ 62 #define MAX_CDP_SEC_TYPE 12 63 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = { 64 HAL_TX_ENCRYPT_TYPE_NO_CIPHER, 65 HAL_TX_ENCRYPT_TYPE_WEP_128, 66 HAL_TX_ENCRYPT_TYPE_WEP_104, 67 HAL_TX_ENCRYPT_TYPE_WEP_40, 68 HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC, 69 HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC, 70 HAL_TX_ENCRYPT_TYPE_AES_CCMP_128, 71 HAL_TX_ENCRYPT_TYPE_WAPI, 72 HAL_TX_ENCRYPT_TYPE_AES_CCMP_256, 73 HAL_TX_ENCRYPT_TYPE_AES_GCMP_128, 74 HAL_TX_ENCRYPT_TYPE_AES_GCMP_256, 75 HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4}; 76 77 #ifdef QCA_TX_LIMIT_CHECK 78 /** 79 * dp_tx_limit_check - Check if allocated tx descriptors reached 80 * soc max limit and pdev max limit 81 * @vdev: DP vdev handle 82 * 83 * Return: true if allocated tx descriptors reached max configured value, else 84 * false 85 */ 86 static inline bool 87 dp_tx_limit_check(struct dp_vdev *vdev) 88 { 89 struct dp_pdev *pdev = vdev->pdev; 90 struct dp_soc *soc = pdev->soc; 91 92 if (qdf_atomic_read(&soc->num_tx_outstanding) >= 93 soc->num_tx_allowed) { 94 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 95 "%s: queued packets are more than max tx, drop the frame", 96 __func__); 97 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 98 return true; 99 } 100 101 if (qdf_atomic_read(&pdev->num_tx_outstanding) >= 102 pdev->num_tx_allowed) { 103 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 104 "%s: queued packets are more than max tx, drop the frame", 105 __func__); 106 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 107 return true; 108 } 109 return false; 110 } 111 112 /** 113 * dp_tx_exception_limit_check - Check if allocated tx exception descriptors 114 * reached soc max limit 115 * @vdev: DP vdev handle 116 * 117 * Return: true if allocated tx descriptors reached max configured value, else 118 * false 119 */ 120 static inline bool 121 dp_tx_exception_limit_check(struct dp_vdev *vdev) 122 { 123 struct dp_pdev *pdev = vdev->pdev; 124 struct dp_soc *soc = pdev->soc; 125 126 if (qdf_atomic_read(&soc->num_tx_exception) >= 127 soc->num_msdu_exception_desc) { 128 dp_info("exc packets are more than max drop the exc pkt"); 129 DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1); 130 return true; 131 } 132 133 return false; 134 } 135 136 /** 137 * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc 138 * @vdev: DP pdev handle 139 * 140 * Return: void 141 */ 142 static inline void 143 dp_tx_outstanding_inc(struct dp_pdev *pdev) 144 { 145 struct dp_soc *soc = pdev->soc; 146 147 qdf_atomic_inc(&pdev->num_tx_outstanding); 148 qdf_atomic_inc(&soc->num_tx_outstanding); 149 } 150 151 /** 152 * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc 153 * @vdev: DP pdev handle 154 * 155 * Return: void 156 */ 157 static inline void 158 dp_tx_outstanding_dec(struct dp_pdev *pdev) 159 { 160 struct dp_soc *soc = pdev->soc; 161 162 qdf_atomic_dec(&pdev->num_tx_outstanding); 163 qdf_atomic_dec(&soc->num_tx_outstanding); 164 } 165 166 #else //QCA_TX_LIMIT_CHECK 167 static inline bool 168 dp_tx_limit_check(struct dp_vdev *vdev) 169 { 170 return false; 171 } 172 173 static inline bool 174 dp_tx_exception_limit_check(struct dp_vdev *vdev) 175 { 176 return false; 177 } 178 179 static inline void 180 dp_tx_outstanding_inc(struct dp_pdev *pdev) 181 { 182 qdf_atomic_inc(&pdev->num_tx_outstanding); 183 } 184 185 static inline void 186 dp_tx_outstanding_dec(struct dp_pdev *pdev) 187 { 188 qdf_atomic_dec(&pdev->num_tx_outstanding); 189 } 190 #endif //QCA_TX_LIMIT_CHECK 191 192 #if defined(FEATURE_TSO) 193 /** 194 * dp_tx_tso_unmap_segment() - Unmap TSO segment 195 * 196 * @soc - core txrx main context 197 * @seg_desc - tso segment descriptor 198 * @num_seg_desc - tso number segment descriptor 199 */ 200 static void dp_tx_tso_unmap_segment( 201 struct dp_soc *soc, 202 struct qdf_tso_seg_elem_t *seg_desc, 203 struct qdf_tso_num_seg_elem_t *num_seg_desc) 204 { 205 TSO_DEBUG("%s: Unmap the tso segment", __func__); 206 if (qdf_unlikely(!seg_desc)) { 207 DP_TRACE(ERROR, "%s %d TSO desc is NULL!", 208 __func__, __LINE__); 209 qdf_assert(0); 210 } else if (qdf_unlikely(!num_seg_desc)) { 211 DP_TRACE(ERROR, "%s %d TSO num desc is NULL!", 212 __func__, __LINE__); 213 qdf_assert(0); 214 } else { 215 bool is_last_seg; 216 /* no tso segment left to do dma unmap */ 217 if (num_seg_desc->num_seg.tso_cmn_num_seg < 1) 218 return; 219 220 is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ? 221 true : false; 222 qdf_nbuf_unmap_tso_segment(soc->osdev, 223 seg_desc, is_last_seg); 224 num_seg_desc->num_seg.tso_cmn_num_seg--; 225 } 226 } 227 228 /** 229 * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg 230 * back to the freelist 231 * 232 * @soc - soc device handle 233 * @tx_desc - Tx software descriptor 234 */ 235 static void dp_tx_tso_desc_release(struct dp_soc *soc, 236 struct dp_tx_desc_s *tx_desc) 237 { 238 TSO_DEBUG("%s: Free the tso descriptor", __func__); 239 if (qdf_unlikely(!tx_desc->tso_desc)) { 240 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 241 "%s %d TSO desc is NULL!", 242 __func__, __LINE__); 243 qdf_assert(0); 244 } else if (qdf_unlikely(!tx_desc->tso_num_desc)) { 245 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 246 "%s %d TSO num desc is NULL!", 247 __func__, __LINE__); 248 qdf_assert(0); 249 } else { 250 struct qdf_tso_num_seg_elem_t *tso_num_desc = 251 (struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc; 252 253 /* Add the tso num segment into the free list */ 254 if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) { 255 dp_tso_num_seg_free(soc, tx_desc->pool_id, 256 tx_desc->tso_num_desc); 257 tx_desc->tso_num_desc = NULL; 258 DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1); 259 } 260 261 /* Add the tso segment into the free list*/ 262 dp_tx_tso_desc_free(soc, 263 tx_desc->pool_id, tx_desc->tso_desc); 264 tx_desc->tso_desc = NULL; 265 } 266 } 267 #else 268 static void dp_tx_tso_unmap_segment( 269 struct dp_soc *soc, 270 struct qdf_tso_seg_elem_t *seg_desc, 271 struct qdf_tso_num_seg_elem_t *num_seg_desc) 272 273 { 274 } 275 276 static void dp_tx_tso_desc_release(struct dp_soc *soc, 277 struct dp_tx_desc_s *tx_desc) 278 { 279 } 280 #endif 281 /** 282 * dp_tx_desc_release() - Release Tx Descriptor 283 * @tx_desc : Tx Descriptor 284 * @desc_pool_id: Descriptor Pool ID 285 * 286 * Deallocate all resources attached to Tx descriptor and free the Tx 287 * descriptor. 288 * 289 * Return: 290 */ 291 static void 292 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id) 293 { 294 struct dp_pdev *pdev = tx_desc->pdev; 295 struct dp_soc *soc; 296 uint8_t comp_status = 0; 297 298 qdf_assert(pdev); 299 300 soc = pdev->soc; 301 302 dp_tx_outstanding_dec(pdev); 303 304 if (tx_desc->frm_type == dp_tx_frm_tso) 305 dp_tx_tso_desc_release(soc, tx_desc); 306 307 if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) 308 dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id); 309 310 if (tx_desc->flags & DP_TX_DESC_FLAG_ME) 311 dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer); 312 313 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) 314 qdf_atomic_dec(&soc->num_tx_exception); 315 316 if (HAL_TX_COMP_RELEASE_SOURCE_TQM == 317 hal_tx_comp_get_buffer_source(&tx_desc->comp)) 318 comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp, 319 soc->hal_soc); 320 else 321 comp_status = HAL_TX_COMP_RELEASE_REASON_FW; 322 323 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 324 "Tx Completion Release desc %d status %d outstanding %d", 325 tx_desc->id, comp_status, 326 qdf_atomic_read(&pdev->num_tx_outstanding)); 327 328 dp_tx_desc_free(soc, tx_desc, desc_pool_id); 329 return; 330 } 331 332 /** 333 * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames 334 * @vdev: DP vdev Handle 335 * @nbuf: skb 336 * @msdu_info: msdu_info required to create HTT metadata 337 * 338 * Prepares and fills HTT metadata in the frame pre-header for special frames 339 * that should be transmitted using varying transmit parameters. 340 * There are 2 VDEV modes that currently needs this special metadata - 341 * 1) Mesh Mode 342 * 2) DSRC Mode 343 * 344 * Return: HTT metadata size 345 * 346 */ 347 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 348 struct dp_tx_msdu_info_s *msdu_info) 349 { 350 uint32_t *meta_data = msdu_info->meta_data; 351 struct htt_tx_msdu_desc_ext2_t *desc_ext = 352 (struct htt_tx_msdu_desc_ext2_t *) meta_data; 353 354 uint8_t htt_desc_size; 355 356 /* Size rounded of multiple of 8 bytes */ 357 uint8_t htt_desc_size_aligned; 358 359 uint8_t *hdr = NULL; 360 361 /* 362 * Metadata - HTT MSDU Extension header 363 */ 364 htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t); 365 htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7; 366 367 if (vdev->mesh_vdev || msdu_info->is_tx_sniffer || 368 HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info-> 369 meta_data[0])) { 370 if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < 371 htt_desc_size_aligned)) { 372 nbuf = qdf_nbuf_realloc_headroom(nbuf, 373 htt_desc_size_aligned); 374 if (!nbuf) { 375 /* 376 * qdf_nbuf_realloc_headroom won't do skb_clone 377 * as skb_realloc_headroom does. so, no free is 378 * needed here. 379 */ 380 DP_STATS_INC(vdev, 381 tx_i.dropped.headroom_insufficient, 382 1); 383 qdf_print(" %s[%d] skb_realloc_headroom failed", 384 __func__, __LINE__); 385 return 0; 386 } 387 } 388 /* Fill and add HTT metaheader */ 389 hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned); 390 if (!hdr) { 391 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 392 "Error in filling HTT metadata"); 393 394 return 0; 395 } 396 qdf_mem_copy(hdr, desc_ext, htt_desc_size); 397 398 } else if (vdev->opmode == wlan_op_mode_ocb) { 399 /* Todo - Add support for DSRC */ 400 } 401 402 return htt_desc_size_aligned; 403 } 404 405 /** 406 * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO 407 * @tso_seg: TSO segment to process 408 * @ext_desc: Pointer to MSDU extension descriptor 409 * 410 * Return: void 411 */ 412 #if defined(FEATURE_TSO) 413 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg, 414 void *ext_desc) 415 { 416 uint8_t num_frag; 417 uint32_t tso_flags; 418 419 /* 420 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN), 421 * tcp_flag_mask 422 * 423 * Checksum enable flags are set in TCL descriptor and not in Extension 424 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor) 425 */ 426 tso_flags = *(uint32_t *) &tso_seg->tso_flags; 427 428 hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags); 429 430 hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len, 431 tso_seg->tso_flags.ip_len); 432 433 hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num); 434 hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id); 435 436 for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) { 437 uint32_t lo = 0; 438 uint32_t hi = 0; 439 440 qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) && 441 (tso_seg->tso_frags[num_frag].length)); 442 443 qdf_dmaaddr_to_32s( 444 tso_seg->tso_frags[num_frag].paddr, &lo, &hi); 445 hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi, 446 tso_seg->tso_frags[num_frag].length); 447 } 448 449 return; 450 } 451 #else 452 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg, 453 void *ext_desc) 454 { 455 return; 456 } 457 #endif 458 459 #if defined(FEATURE_TSO) 460 /** 461 * dp_tx_free_tso_seg_list() - Loop through the tso segments 462 * allocated and free them 463 * 464 * @soc: soc handle 465 * @free_seg: list of tso segments 466 * @msdu_info: msdu descriptor 467 * 468 * Return - void 469 */ 470 static void dp_tx_free_tso_seg_list( 471 struct dp_soc *soc, 472 struct qdf_tso_seg_elem_t *free_seg, 473 struct dp_tx_msdu_info_s *msdu_info) 474 { 475 struct qdf_tso_seg_elem_t *next_seg; 476 477 while (free_seg) { 478 next_seg = free_seg->next; 479 dp_tx_tso_desc_free(soc, 480 msdu_info->tx_queue.desc_pool_id, 481 free_seg); 482 free_seg = next_seg; 483 } 484 } 485 486 /** 487 * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments 488 * allocated and free them 489 * 490 * @soc: soc handle 491 * @free_num_seg: list of tso number segments 492 * @msdu_info: msdu descriptor 493 * Return - void 494 */ 495 static void dp_tx_free_tso_num_seg_list( 496 struct dp_soc *soc, 497 struct qdf_tso_num_seg_elem_t *free_num_seg, 498 struct dp_tx_msdu_info_s *msdu_info) 499 { 500 struct qdf_tso_num_seg_elem_t *next_num_seg; 501 502 while (free_num_seg) { 503 next_num_seg = free_num_seg->next; 504 dp_tso_num_seg_free(soc, 505 msdu_info->tx_queue.desc_pool_id, 506 free_num_seg); 507 free_num_seg = next_num_seg; 508 } 509 } 510 511 /** 512 * dp_tx_unmap_tso_seg_list() - Loop through the tso segments 513 * do dma unmap for each segment 514 * 515 * @soc: soc handle 516 * @free_seg: list of tso segments 517 * @num_seg_desc: tso number segment descriptor 518 * 519 * Return - void 520 */ 521 static void dp_tx_unmap_tso_seg_list( 522 struct dp_soc *soc, 523 struct qdf_tso_seg_elem_t *free_seg, 524 struct qdf_tso_num_seg_elem_t *num_seg_desc) 525 { 526 struct qdf_tso_seg_elem_t *next_seg; 527 528 if (qdf_unlikely(!num_seg_desc)) { 529 DP_TRACE(ERROR, "TSO number seg desc is NULL!"); 530 return; 531 } 532 533 while (free_seg) { 534 next_seg = free_seg->next; 535 dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc); 536 free_seg = next_seg; 537 } 538 } 539 540 #ifdef FEATURE_TSO_STATS 541 /** 542 * dp_tso_get_stats_idx: Retrieve the tso packet id 543 * @pdev - pdev handle 544 * 545 * Return: id 546 */ 547 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev) 548 { 549 uint32_t stats_idx; 550 551 stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx)) 552 % CDP_MAX_TSO_PACKETS); 553 return stats_idx; 554 } 555 #else 556 static int dp_tso_get_stats_idx(struct dp_pdev *pdev) 557 { 558 return 0; 559 } 560 #endif /* FEATURE_TSO_STATS */ 561 562 /** 563 * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any, 564 * free the tso segments descriptor and 565 * tso num segments descriptor 566 * 567 * @soc: soc handle 568 * @msdu_info: msdu descriptor 569 * @tso_seg_unmap: flag to show if dma unmap is necessary 570 * 571 * Return - void 572 */ 573 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc, 574 struct dp_tx_msdu_info_s *msdu_info, 575 bool tso_seg_unmap) 576 { 577 struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info; 578 struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list; 579 struct qdf_tso_num_seg_elem_t *tso_num_desc = 580 tso_info->tso_num_seg_list; 581 582 /* do dma unmap for each segment */ 583 if (tso_seg_unmap) 584 dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc); 585 586 /* free all tso number segment descriptor though looks only have 1 */ 587 dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info); 588 589 /* free all tso segment descriptor */ 590 dp_tx_free_tso_seg_list(soc, free_seg, msdu_info); 591 } 592 593 /** 594 * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info 595 * @vdev: virtual device handle 596 * @msdu: network buffer 597 * @msdu_info: meta data associated with the msdu 598 * 599 * Return: QDF_STATUS_SUCCESS success 600 */ 601 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev, 602 qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info) 603 { 604 struct qdf_tso_seg_elem_t *tso_seg; 605 int num_seg = qdf_nbuf_get_tso_num_seg(msdu); 606 struct dp_soc *soc = vdev->pdev->soc; 607 struct dp_pdev *pdev = vdev->pdev; 608 struct qdf_tso_info_t *tso_info; 609 struct qdf_tso_num_seg_elem_t *tso_num_seg; 610 tso_info = &msdu_info->u.tso_info; 611 tso_info->curr_seg = NULL; 612 tso_info->tso_seg_list = NULL; 613 tso_info->num_segs = num_seg; 614 msdu_info->frm_type = dp_tx_frm_tso; 615 tso_info->tso_num_seg_list = NULL; 616 617 TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg); 618 619 while (num_seg) { 620 tso_seg = dp_tx_tso_desc_alloc( 621 soc, msdu_info->tx_queue.desc_pool_id); 622 if (tso_seg) { 623 tso_seg->next = tso_info->tso_seg_list; 624 tso_info->tso_seg_list = tso_seg; 625 num_seg--; 626 } else { 627 dp_err_rl("Failed to alloc tso seg desc"); 628 DP_STATS_INC_PKT(vdev->pdev, 629 tso_stats.tso_no_mem_dropped, 1, 630 qdf_nbuf_len(msdu)); 631 dp_tx_free_remaining_tso_desc(soc, msdu_info, false); 632 633 return QDF_STATUS_E_NOMEM; 634 } 635 } 636 637 TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg); 638 639 tso_num_seg = dp_tso_num_seg_alloc(soc, 640 msdu_info->tx_queue.desc_pool_id); 641 642 if (tso_num_seg) { 643 tso_num_seg->next = tso_info->tso_num_seg_list; 644 tso_info->tso_num_seg_list = tso_num_seg; 645 } else { 646 DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc", 647 __func__); 648 dp_tx_free_remaining_tso_desc(soc, msdu_info, false); 649 650 return QDF_STATUS_E_NOMEM; 651 } 652 653 msdu_info->num_seg = 654 qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info); 655 656 TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__, 657 msdu_info->num_seg); 658 659 if (!(msdu_info->num_seg)) { 660 /* 661 * Free allocated TSO seg desc and number seg desc, 662 * do unmap for segments if dma map has done. 663 */ 664 DP_TRACE(ERROR, "%s: Failed to get tso info", __func__); 665 dp_tx_free_remaining_tso_desc(soc, msdu_info, true); 666 667 return QDF_STATUS_E_INVAL; 668 } 669 670 tso_info->curr_seg = tso_info->tso_seg_list; 671 672 tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev); 673 dp_tso_packet_update(pdev, tso_info->msdu_stats_idx, 674 msdu, msdu_info->num_seg); 675 dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list, 676 tso_info->msdu_stats_idx); 677 dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg); 678 return QDF_STATUS_SUCCESS; 679 } 680 #else 681 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev, 682 qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info) 683 { 684 return QDF_STATUS_E_NOMEM; 685 } 686 #endif 687 688 QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check, 689 (DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >= 690 sizeof(struct htt_tx_msdu_desc_ext2_t))); 691 692 /** 693 * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor 694 * @vdev: DP Vdev handle 695 * @msdu_info: MSDU info to be setup in MSDU extension descriptor 696 * @desc_pool_id: Descriptor Pool ID 697 * 698 * Return: 699 */ 700 static 701 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev, 702 struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id) 703 { 704 uint8_t i; 705 uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA]; 706 struct dp_tx_seg_info_s *seg_info; 707 struct dp_tx_ext_desc_elem_s *msdu_ext_desc; 708 struct dp_soc *soc = vdev->pdev->soc; 709 710 /* Allocate an extension descriptor */ 711 msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id); 712 qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA); 713 714 if (!msdu_ext_desc) { 715 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 716 return NULL; 717 } 718 719 if (msdu_info->exception_fw && 720 qdf_unlikely(vdev->mesh_vdev)) { 721 qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES], 722 &msdu_info->meta_data[0], 723 sizeof(struct htt_tx_msdu_desc_ext2_t)); 724 qdf_atomic_inc(&soc->num_tx_exception); 725 msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID; 726 } 727 728 switch (msdu_info->frm_type) { 729 case dp_tx_frm_sg: 730 case dp_tx_frm_me: 731 case dp_tx_frm_raw: 732 seg_info = msdu_info->u.sg_info.curr_seg; 733 /* Update the buffer pointers in MSDU Extension Descriptor */ 734 for (i = 0; i < seg_info->frag_cnt; i++) { 735 hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i, 736 seg_info->frags[i].paddr_lo, 737 seg_info->frags[i].paddr_hi, 738 seg_info->frags[i].len); 739 } 740 741 break; 742 743 case dp_tx_frm_tso: 744 dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg, 745 &cached_ext_desc[0]); 746 break; 747 748 749 default: 750 break; 751 } 752 753 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 754 cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA); 755 756 hal_tx_ext_desc_sync(&cached_ext_desc[0], 757 msdu_ext_desc->vaddr); 758 759 return msdu_ext_desc; 760 } 761 762 /** 763 * dp_tx_trace_pkt() - Trace TX packet at DP layer 764 * 765 * @skb: skb to be traced 766 * @msdu_id: msdu_id of the packet 767 * @vdev_id: vdev_id of the packet 768 * 769 * Return: None 770 */ 771 #ifdef DP_DISABLE_TX_PKT_TRACE 772 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id, 773 uint8_t vdev_id) 774 { 775 } 776 #else 777 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id, 778 uint8_t vdev_id) 779 { 780 QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK; 781 QDF_NBUF_CB_TX_DP_TRACE(skb) = 1; 782 DPTRACE(qdf_dp_trace_ptr(skb, 783 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD, 784 QDF_TRACE_DEFAULT_PDEV_ID, 785 qdf_nbuf_data_addr(skb), 786 sizeof(qdf_nbuf_data(skb)), 787 msdu_id, vdev_id)); 788 789 qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID); 790 791 DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID, 792 QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD, 793 msdu_id, QDF_TX)); 794 } 795 #endif 796 797 /** 798 * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor 799 * @vdev: DP vdev handle 800 * @nbuf: skb 801 * @desc_pool_id: Descriptor pool ID 802 * @meta_data: Metadata to the fw 803 * @tx_exc_metadata: Handle that holds exception path metadata 804 * Allocate and prepare Tx descriptor with msdu information. 805 * 806 * Return: Pointer to Tx Descriptor on success, 807 * NULL on failure 808 */ 809 static 810 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev, 811 qdf_nbuf_t nbuf, uint8_t desc_pool_id, 812 struct dp_tx_msdu_info_s *msdu_info, 813 struct cdp_tx_exception_metadata *tx_exc_metadata) 814 { 815 uint8_t align_pad; 816 uint8_t is_exception = 0; 817 uint8_t htt_hdr_size; 818 struct dp_tx_desc_s *tx_desc; 819 struct dp_pdev *pdev = vdev->pdev; 820 struct dp_soc *soc = pdev->soc; 821 822 if (dp_tx_limit_check(vdev)) 823 return NULL; 824 825 /* Allocate software Tx descriptor */ 826 tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); 827 if (qdf_unlikely(!tx_desc)) { 828 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 829 return NULL; 830 } 831 832 dp_tx_outstanding_inc(pdev); 833 834 /* Initialize the SW tx descriptor */ 835 tx_desc->nbuf = nbuf; 836 tx_desc->frm_type = dp_tx_frm_std; 837 tx_desc->tx_encap_type = ((tx_exc_metadata && 838 (tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ? 839 tx_exc_metadata->tx_encap_type : vdev->tx_encap_type); 840 tx_desc->vdev_id = vdev->vdev_id; 841 tx_desc->pdev = pdev; 842 tx_desc->msdu_ext_desc = NULL; 843 tx_desc->pkt_offset = 0; 844 845 dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id); 846 847 if (qdf_unlikely(vdev->multipass_en)) { 848 if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info)) 849 goto failure; 850 } 851 852 /* 853 * For special modes (vdev_type == ocb or mesh), data frames should be 854 * transmitted using varying transmit parameters (tx spec) which include 855 * transmit rate, power, priority, channel, channel bandwidth , nss etc. 856 * These are filled in HTT MSDU descriptor and sent in frame pre-header. 857 * These frames are sent as exception packets to firmware. 858 * 859 * HW requirement is that metadata should always point to a 860 * 8-byte aligned address. So we add alignment pad to start of buffer. 861 * HTT Metadata should be ensured to be multiple of 8-bytes, 862 * to get 8-byte aligned start address along with align_pad added 863 * 864 * |-----------------------------| 865 * | | 866 * |-----------------------------| <-----Buffer Pointer Address given 867 * | | ^ in HW descriptor (aligned) 868 * | HTT Metadata | | 869 * | | | 870 * | | | Packet Offset given in descriptor 871 * | | | 872 * |-----------------------------| | 873 * | Alignment Pad | v 874 * |-----------------------------| <----- Actual buffer start address 875 * | SKB Data | (Unaligned) 876 * | | 877 * | | 878 * | | 879 * | | 880 * | | 881 * |-----------------------------| 882 */ 883 if (qdf_unlikely((msdu_info->exception_fw)) || 884 (vdev->opmode == wlan_op_mode_ocb) || 885 (tx_exc_metadata && 886 tx_exc_metadata->is_tx_sniffer)) { 887 align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7; 888 889 if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) { 890 DP_STATS_INC(vdev, 891 tx_i.dropped.headroom_insufficient, 1); 892 goto failure; 893 } 894 895 if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) { 896 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 897 "qdf_nbuf_push_head failed"); 898 goto failure; 899 } 900 901 htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf, 902 msdu_info); 903 if (htt_hdr_size == 0) 904 goto failure; 905 tx_desc->pkt_offset = align_pad + htt_hdr_size; 906 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 907 is_exception = 1; 908 } 909 910 #if !TQM_BYPASS_WAR 911 if (is_exception || tx_exc_metadata) 912 #endif 913 { 914 /* Temporary WAR due to TQM VP issues */ 915 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 916 qdf_atomic_inc(&soc->num_tx_exception); 917 } 918 919 return tx_desc; 920 921 failure: 922 dp_tx_desc_release(tx_desc, desc_pool_id); 923 return NULL; 924 } 925 926 /** 927 * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame 928 * @vdev: DP vdev handle 929 * @nbuf: skb 930 * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor 931 * @desc_pool_id : Descriptor Pool ID 932 * 933 * Allocate and prepare Tx descriptor with msdu and fragment descritor 934 * information. For frames wth fragments, allocate and prepare 935 * an MSDU extension descriptor 936 * 937 * Return: Pointer to Tx Descriptor on success, 938 * NULL on failure 939 */ 940 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev, 941 qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info, 942 uint8_t desc_pool_id) 943 { 944 struct dp_tx_desc_s *tx_desc; 945 struct dp_tx_ext_desc_elem_s *msdu_ext_desc; 946 struct dp_pdev *pdev = vdev->pdev; 947 struct dp_soc *soc = pdev->soc; 948 949 if (dp_tx_limit_check(vdev)) 950 return NULL; 951 952 /* Allocate software Tx descriptor */ 953 tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); 954 if (!tx_desc) { 955 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 956 return NULL; 957 } 958 959 dp_tx_outstanding_inc(pdev); 960 961 /* Initialize the SW tx descriptor */ 962 tx_desc->nbuf = nbuf; 963 tx_desc->frm_type = msdu_info->frm_type; 964 tx_desc->tx_encap_type = vdev->tx_encap_type; 965 tx_desc->vdev_id = vdev->vdev_id; 966 tx_desc->pdev = pdev; 967 tx_desc->pkt_offset = 0; 968 tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg; 969 tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list; 970 971 dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id); 972 973 /* Handle scattered frames - TSO/SG/ME */ 974 /* Allocate and prepare an extension descriptor for scattered frames */ 975 msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id); 976 if (!msdu_ext_desc) { 977 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 978 "%s Tx Extension Descriptor Alloc Fail", 979 __func__); 980 goto failure; 981 } 982 983 #if TQM_BYPASS_WAR 984 /* Temporary WAR due to TQM VP issues */ 985 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 986 qdf_atomic_inc(&soc->num_tx_exception); 987 #endif 988 if (qdf_unlikely(msdu_info->exception_fw)) 989 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 990 991 tx_desc->msdu_ext_desc = msdu_ext_desc; 992 tx_desc->flags |= DP_TX_DESC_FLAG_FRAG; 993 994 return tx_desc; 995 failure: 996 dp_tx_desc_release(tx_desc, desc_pool_id); 997 return NULL; 998 } 999 1000 /** 1001 * dp_tx_prepare_raw() - Prepare RAW packet TX 1002 * @vdev: DP vdev handle 1003 * @nbuf: buffer pointer 1004 * @seg_info: Pointer to Segment info Descriptor to be prepared 1005 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension 1006 * descriptor 1007 * 1008 * Return: 1009 */ 1010 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1011 struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info) 1012 { 1013 qdf_nbuf_t curr_nbuf = NULL; 1014 uint16_t total_len = 0; 1015 qdf_dma_addr_t paddr; 1016 int32_t i; 1017 int32_t mapped_buf_num = 0; 1018 1019 struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info; 1020 qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data; 1021 1022 DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf)); 1023 1024 /* Continue only if frames are of DATA type */ 1025 if (!DP_FRAME_IS_DATA(qos_wh)) { 1026 DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1); 1027 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1028 "Pkt. recd is of not data type"); 1029 goto error; 1030 } 1031 /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */ 1032 if (vdev->raw_mode_war && 1033 (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) && 1034 (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU)) 1035 qos_wh->i_fc[1] |= IEEE80211_FC1_WEP; 1036 1037 for (curr_nbuf = nbuf, i = 0; curr_nbuf; 1038 curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) { 1039 1040 if (QDF_STATUS_SUCCESS != 1041 qdf_nbuf_map_nbytes_single(vdev->osdev, 1042 curr_nbuf, 1043 QDF_DMA_TO_DEVICE, 1044 curr_nbuf->len)) { 1045 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1046 "%s dma map error ", __func__); 1047 DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1); 1048 mapped_buf_num = i; 1049 goto error; 1050 } 1051 1052 paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0); 1053 seg_info->frags[i].paddr_lo = paddr; 1054 seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32); 1055 seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf); 1056 seg_info->frags[i].vaddr = (void *) curr_nbuf; 1057 total_len += qdf_nbuf_len(curr_nbuf); 1058 } 1059 1060 seg_info->frag_cnt = i; 1061 seg_info->total_len = total_len; 1062 seg_info->next = NULL; 1063 1064 sg_info->curr_seg = seg_info; 1065 1066 msdu_info->frm_type = dp_tx_frm_raw; 1067 msdu_info->num_seg = 1; 1068 1069 return nbuf; 1070 1071 error: 1072 i = 0; 1073 while (nbuf) { 1074 curr_nbuf = nbuf; 1075 if (i < mapped_buf_num) { 1076 qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf, 1077 QDF_DMA_TO_DEVICE, 1078 curr_nbuf->len); 1079 i++; 1080 } 1081 nbuf = qdf_nbuf_next(nbuf); 1082 qdf_nbuf_free(curr_nbuf); 1083 } 1084 return NULL; 1085 1086 } 1087 1088 /** 1089 * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame. 1090 * @soc: DP soc handle 1091 * @nbuf: Buffer pointer 1092 * 1093 * unmap the chain of nbufs that belong to this RAW frame. 1094 * 1095 * Return: None 1096 */ 1097 static void dp_tx_raw_prepare_unset(struct dp_soc *soc, 1098 qdf_nbuf_t nbuf) 1099 { 1100 qdf_nbuf_t cur_nbuf = nbuf; 1101 1102 do { 1103 qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf, 1104 QDF_DMA_TO_DEVICE, 1105 cur_nbuf->len); 1106 cur_nbuf = qdf_nbuf_next(cur_nbuf); 1107 } while (cur_nbuf); 1108 } 1109 1110 #ifdef VDEV_PEER_PROTOCOL_COUNT 1111 #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, nbuf) \ 1112 { \ 1113 qdf_nbuf_t nbuf_local; \ 1114 struct dp_vdev *vdev_local = vdev_hdl; \ 1115 do { \ 1116 if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \ 1117 break; \ 1118 nbuf_local = nbuf; \ 1119 if (qdf_unlikely(((vdev_local)->tx_encap_type) == \ 1120 htt_cmn_pkt_type_raw)) \ 1121 break; \ 1122 else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local)))) \ 1123 break; \ 1124 else if (qdf_nbuf_is_tso((nbuf_local))) \ 1125 break; \ 1126 dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \ 1127 (nbuf_local), \ 1128 NULL, 1, 0); \ 1129 } while (0); \ 1130 } 1131 #else 1132 #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, skb) 1133 #endif 1134 1135 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 1136 /** 1137 * dp_tx_update_stats() - Update soc level tx stats 1138 * @soc: DP soc handle 1139 * @nbuf: packet being transmitted 1140 * 1141 * Returns: none 1142 */ 1143 static inline void dp_tx_update_stats(struct dp_soc *soc, 1144 qdf_nbuf_t nbuf) 1145 { 1146 DP_STATS_INC_PKT(soc, tx.egress, 1, qdf_nbuf_len(nbuf)); 1147 } 1148 1149 /** 1150 * dp_tx_attempt_coalescing() - Check and attempt TCL register write coalescing 1151 * @soc: Datapath soc handle 1152 * @tx_desc: tx packet descriptor 1153 * @tid: TID for pkt transmission 1154 * 1155 * Returns: 1, if coalescing is to be done 1156 * 0, if coalescing is not to be done 1157 */ 1158 static inline int 1159 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev, 1160 struct dp_tx_desc_s *tx_desc, 1161 uint8_t tid) 1162 { 1163 struct dp_swlm *swlm = &soc->swlm; 1164 union swlm_data swlm_query_data; 1165 struct dp_swlm_tcl_data tcl_data; 1166 QDF_STATUS status; 1167 int ret; 1168 1169 if (qdf_unlikely(!swlm->is_enabled)) 1170 return 0; 1171 1172 tcl_data.nbuf = tx_desc->nbuf; 1173 tcl_data.tid = tid; 1174 tcl_data.num_ll_connections = vdev->num_latency_critical_conn; 1175 swlm_query_data.tcl_data = &tcl_data; 1176 1177 status = dp_swlm_tcl_pre_check(soc, &tcl_data); 1178 if (QDF_IS_STATUS_ERROR(status)) { 1179 dp_swlm_tcl_reset_session_data(soc); 1180 DP_STATS_INC(swlm, tcl.coalesce_fail, 1); 1181 return 0; 1182 } 1183 1184 ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data); 1185 if (ret) { 1186 DP_STATS_INC(swlm, tcl.coalesce_success, 1); 1187 } else { 1188 DP_STATS_INC(swlm, tcl.coalesce_fail, 1); 1189 } 1190 1191 return ret; 1192 } 1193 1194 /** 1195 * dp_tx_ring_access_end() - HAL ring access end for data transmission 1196 * @soc: Datapath soc handle 1197 * @hal_ring_hdl: HAL ring handle 1198 * @coalesce: Coalesce the current write or not 1199 * 1200 * Returns: none 1201 */ 1202 static inline void 1203 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl, 1204 int coalesce) 1205 { 1206 if (coalesce) 1207 dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl); 1208 else 1209 dp_tx_hal_ring_access_end(soc, hal_ring_hdl); 1210 } 1211 1212 #else 1213 static inline void dp_tx_update_stats(struct dp_soc *soc, 1214 qdf_nbuf_t nbuf) 1215 { 1216 } 1217 1218 static inline int 1219 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev, 1220 struct dp_tx_desc_s *tx_desc, 1221 uint8_t tid) 1222 { 1223 return 0; 1224 } 1225 1226 static inline void 1227 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl, 1228 int coalesce) 1229 { 1230 dp_tx_hal_ring_access_end(soc, hal_ring_hdl); 1231 } 1232 1233 #endif 1234 /** 1235 * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit 1236 * @soc: DP Soc Handle 1237 * @vdev: DP vdev handle 1238 * @tx_desc: Tx Descriptor Handle 1239 * @tid: TID from HLOS for overriding default DSCP-TID mapping 1240 * @fw_metadata: Metadata to send to Target Firmware along with frame 1241 * @ring_id: Ring ID of H/W ring to which we enqueue the packet 1242 * @tx_exc_metadata: Handle that holds exception path meta data 1243 * 1244 * Gets the next free TCL HW DMA descriptor and sets up required parameters 1245 * from software Tx descriptor 1246 * 1247 * Return: QDF_STATUS_SUCCESS: success 1248 * QDF_STATUS_E_RESOURCES: Error return 1249 */ 1250 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev, 1251 struct dp_tx_desc_s *tx_desc, uint8_t tid, 1252 uint16_t fw_metadata, uint8_t ring_id, 1253 struct cdp_tx_exception_metadata 1254 *tx_exc_metadata) 1255 { 1256 uint8_t type; 1257 void *hal_tx_desc; 1258 uint32_t *hal_tx_desc_cached; 1259 int coalesce = 0; 1260 1261 /* 1262 * Setting it initialization statically here to avoid 1263 * a memset call jump with qdf_mem_set call 1264 */ 1265 uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 }; 1266 1267 enum cdp_sec_type sec_type = ((tx_exc_metadata && 1268 tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ? 1269 tx_exc_metadata->sec_type : vdev->sec_type); 1270 1271 /* Return Buffer Manager ID */ 1272 uint8_t bm_id = dp_tx_get_rbm_id(soc, ring_id); 1273 1274 hal_ring_handle_t hal_ring_hdl = NULL; 1275 1276 QDF_STATUS status = QDF_STATUS_E_RESOURCES; 1277 1278 if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) { 1279 dp_err_rl("Invalid tx desc id:%d", tx_desc->id); 1280 return QDF_STATUS_E_RESOURCES; 1281 } 1282 1283 hal_tx_desc_cached = (void *) cached_desc; 1284 1285 if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) { 1286 type = HAL_TX_BUF_TYPE_EXT_DESC; 1287 tx_desc->dma_addr = tx_desc->msdu_ext_desc->paddr; 1288 1289 if (tx_desc->msdu_ext_desc->flags & 1290 DP_TX_EXT_DESC_FLAG_METADATA_VALID) 1291 tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA; 1292 else 1293 tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES; 1294 } else { 1295 tx_desc->length = qdf_nbuf_len(tx_desc->nbuf) - 1296 tx_desc->pkt_offset; 1297 type = HAL_TX_BUF_TYPE_BUFFER; 1298 tx_desc->dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf); 1299 } 1300 1301 qdf_assert_always(tx_desc->dma_addr); 1302 1303 hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached, 1304 tx_desc->dma_addr, bm_id, tx_desc->id, 1305 type); 1306 hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached, 1307 vdev->lmac_id); 1308 hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached, 1309 vdev->search_type); 1310 hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached, 1311 vdev->bss_ast_idx); 1312 hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached, 1313 vdev->dscp_tid_map_id); 1314 1315 hal_tx_desc_set_encrypt_type(hal_tx_desc_cached, 1316 sec_type_map[sec_type]); 1317 hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached, 1318 (vdev->bss_ast_hash & 0xF)); 1319 1320 hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata); 1321 hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length); 1322 hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset); 1323 hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type); 1324 hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached, 1325 vdev->hal_desc_addr_search_flags); 1326 1327 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) 1328 hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1); 1329 1330 /* verify checksum offload configuration*/ 1331 if (vdev->csum_enabled && 1332 ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP) 1333 || qdf_nbuf_is_tso(tx_desc->nbuf))) { 1334 hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1); 1335 hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1); 1336 } 1337 1338 if (tid != HTT_TX_EXT_TID_INVALID) 1339 hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid); 1340 1341 if (tx_desc->flags & DP_TX_DESC_FLAG_MESH) 1342 hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1); 1343 1344 if (qdf_unlikely(vdev->pdev->delay_stats_flag) || 1345 qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled( 1346 soc->wlan_cfg_ctx))) 1347 tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_real_get()); 1348 1349 dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u", 1350 tx_desc->length, type, (uint64_t)tx_desc->dma_addr, 1351 tx_desc->pkt_offset, tx_desc->id); 1352 1353 hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id); 1354 1355 if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) { 1356 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1357 "%s %d : HAL RING Access Failed -- %pK", 1358 __func__, __LINE__, hal_ring_hdl); 1359 DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1); 1360 DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1); 1361 return status; 1362 } 1363 1364 /* Sync cached descriptor with HW */ 1365 1366 hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl); 1367 if (qdf_unlikely(!hal_tx_desc)) { 1368 dp_verbose_debug("TCL ring full ring_id:%d", ring_id); 1369 DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1); 1370 DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1); 1371 goto ring_access_fail; 1372 } 1373 1374 tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX; 1375 dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf); 1376 hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc); 1377 coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid); 1378 DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length); 1379 dp_tx_update_stats(soc, tx_desc->nbuf); 1380 status = QDF_STATUS_SUCCESS; 1381 1382 ring_access_fail: 1383 if (hif_pm_runtime_get(soc->hif_handle, 1384 RTPM_ID_DW_TX_HW_ENQUEUE) == 0) { 1385 dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce); 1386 hif_pm_runtime_put(soc->hif_handle, 1387 RTPM_ID_DW_TX_HW_ENQUEUE); 1388 } else { 1389 dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl); 1390 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); 1391 hal_srng_inc_flush_cnt(hal_ring_hdl); 1392 } 1393 1394 return status; 1395 } 1396 1397 1398 /** 1399 * dp_cce_classify() - Classify the frame based on CCE rules 1400 * @vdev: DP vdev handle 1401 * @nbuf: skb 1402 * 1403 * Classify frames based on CCE rules 1404 * Return: bool( true if classified, 1405 * else false) 1406 */ 1407 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 1408 { 1409 qdf_ether_header_t *eh = NULL; 1410 uint16_t ether_type; 1411 qdf_llc_t *llcHdr; 1412 qdf_nbuf_t nbuf_clone = NULL; 1413 qdf_dot3_qosframe_t *qos_wh = NULL; 1414 1415 if (qdf_likely(vdev->skip_sw_tid_classification)) { 1416 /* 1417 * In case of mesh packets or hlos tid override enabled, 1418 * don't do any classification 1419 */ 1420 if (qdf_unlikely(vdev->skip_sw_tid_classification 1421 & DP_TX_SKIP_CCE_CLASSIFY)) 1422 return false; 1423 } 1424 1425 if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 1426 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1427 ether_type = eh->ether_type; 1428 llcHdr = (qdf_llc_t *)(nbuf->data + 1429 sizeof(qdf_ether_header_t)); 1430 } else { 1431 qos_wh = (qdf_dot3_qosframe_t *) nbuf->data; 1432 /* For encrypted packets don't do any classification */ 1433 if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP)) 1434 return false; 1435 1436 if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) { 1437 if (qdf_unlikely( 1438 qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS && 1439 qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) { 1440 1441 ether_type = *(uint16_t *)(nbuf->data 1442 + QDF_IEEE80211_4ADDR_HDR_LEN 1443 + sizeof(qdf_llc_t) 1444 - sizeof(ether_type)); 1445 llcHdr = (qdf_llc_t *)(nbuf->data + 1446 QDF_IEEE80211_4ADDR_HDR_LEN); 1447 } else { 1448 ether_type = *(uint16_t *)(nbuf->data 1449 + QDF_IEEE80211_3ADDR_HDR_LEN 1450 + sizeof(qdf_llc_t) 1451 - sizeof(ether_type)); 1452 llcHdr = (qdf_llc_t *)(nbuf->data + 1453 QDF_IEEE80211_3ADDR_HDR_LEN); 1454 } 1455 1456 if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr) 1457 && (ether_type == 1458 qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) { 1459 1460 DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1); 1461 return true; 1462 } 1463 } 1464 1465 return false; 1466 } 1467 1468 if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) { 1469 ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE + 1470 sizeof(*llcHdr)); 1471 nbuf_clone = qdf_nbuf_clone(nbuf); 1472 if (qdf_unlikely(nbuf_clone)) { 1473 qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr)); 1474 1475 if (ether_type == htons(ETHERTYPE_VLAN)) { 1476 qdf_nbuf_pull_head(nbuf_clone, 1477 sizeof(qdf_net_vlanhdr_t)); 1478 } 1479 } 1480 } else { 1481 if (ether_type == htons(ETHERTYPE_VLAN)) { 1482 nbuf_clone = qdf_nbuf_clone(nbuf); 1483 if (qdf_unlikely(nbuf_clone)) { 1484 qdf_nbuf_pull_head(nbuf_clone, 1485 sizeof(qdf_net_vlanhdr_t)); 1486 } 1487 } 1488 } 1489 1490 if (qdf_unlikely(nbuf_clone)) 1491 nbuf = nbuf_clone; 1492 1493 1494 if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) 1495 || qdf_nbuf_is_ipv4_arp_pkt(nbuf) 1496 || qdf_nbuf_is_ipv4_wapi_pkt(nbuf) 1497 || qdf_nbuf_is_ipv4_tdls_pkt(nbuf) 1498 || (qdf_nbuf_is_ipv4_pkt(nbuf) 1499 && qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) 1500 || (qdf_nbuf_is_ipv6_pkt(nbuf) && 1501 qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) { 1502 if (qdf_unlikely(nbuf_clone)) 1503 qdf_nbuf_free(nbuf_clone); 1504 return true; 1505 } 1506 1507 if (qdf_unlikely(nbuf_clone)) 1508 qdf_nbuf_free(nbuf_clone); 1509 1510 return false; 1511 } 1512 1513 /** 1514 * dp_tx_get_tid() - Obtain TID to be used for this frame 1515 * @vdev: DP vdev handle 1516 * @nbuf: skb 1517 * 1518 * Extract the DSCP or PCP information from frame and map into TID value. 1519 * 1520 * Return: void 1521 */ 1522 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1523 struct dp_tx_msdu_info_s *msdu_info) 1524 { 1525 uint8_t tos = 0, dscp_tid_override = 0; 1526 uint8_t *hdr_ptr, *L3datap; 1527 uint8_t is_mcast = 0; 1528 qdf_ether_header_t *eh = NULL; 1529 qdf_ethervlan_header_t *evh = NULL; 1530 uint16_t ether_type; 1531 qdf_llc_t *llcHdr; 1532 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; 1533 1534 DP_TX_TID_OVERRIDE(msdu_info, nbuf); 1535 if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 1536 eh = (qdf_ether_header_t *)nbuf->data; 1537 hdr_ptr = (uint8_t *)(eh->ether_dhost); 1538 L3datap = hdr_ptr + sizeof(qdf_ether_header_t); 1539 } else { 1540 qdf_dot3_qosframe_t *qos_wh = 1541 (qdf_dot3_qosframe_t *) nbuf->data; 1542 msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ? 1543 qos_wh->i_qos[0] & DP_QOS_TID : 0; 1544 return; 1545 } 1546 1547 is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr); 1548 ether_type = eh->ether_type; 1549 1550 llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t)); 1551 /* 1552 * Check if packet is dot3 or eth2 type. 1553 */ 1554 if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) { 1555 ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE + 1556 sizeof(*llcHdr)); 1557 1558 if (ether_type == htons(ETHERTYPE_VLAN)) { 1559 L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) + 1560 sizeof(*llcHdr); 1561 ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE 1562 + sizeof(*llcHdr) + 1563 sizeof(qdf_net_vlanhdr_t)); 1564 } else { 1565 L3datap = hdr_ptr + sizeof(qdf_ether_header_t) + 1566 sizeof(*llcHdr); 1567 } 1568 } else { 1569 if (ether_type == htons(ETHERTYPE_VLAN)) { 1570 evh = (qdf_ethervlan_header_t *) eh; 1571 ether_type = evh->ether_type; 1572 L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t); 1573 } 1574 } 1575 1576 /* 1577 * Find priority from IP TOS DSCP field 1578 */ 1579 if (qdf_nbuf_is_ipv4_pkt(nbuf)) { 1580 qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap; 1581 if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) { 1582 /* Only for unicast frames */ 1583 if (!is_mcast) { 1584 /* send it on VO queue */ 1585 msdu_info->tid = DP_VO_TID; 1586 } 1587 } else { 1588 /* 1589 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7 1590 * from TOS byte. 1591 */ 1592 tos = ip->ip_tos; 1593 dscp_tid_override = 1; 1594 1595 } 1596 } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) { 1597 /* TODO 1598 * use flowlabel 1599 *igmpmld cases to be handled in phase 2 1600 */ 1601 unsigned long ver_pri_flowlabel; 1602 unsigned long pri; 1603 ver_pri_flowlabel = *(unsigned long *) L3datap; 1604 pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >> 1605 DP_IPV6_PRIORITY_SHIFT; 1606 tos = pri; 1607 dscp_tid_override = 1; 1608 } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) 1609 msdu_info->tid = DP_VO_TID; 1610 else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) { 1611 /* Only for unicast frames */ 1612 if (!is_mcast) { 1613 /* send ucast arp on VO queue */ 1614 msdu_info->tid = DP_VO_TID; 1615 } 1616 } 1617 1618 /* 1619 * Assign all MCAST packets to BE 1620 */ 1621 if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 1622 if (is_mcast) { 1623 tos = 0; 1624 dscp_tid_override = 1; 1625 } 1626 } 1627 1628 if (dscp_tid_override == 1) { 1629 tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK; 1630 msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos]; 1631 } 1632 1633 if (msdu_info->tid >= CDP_MAX_DATA_TIDS) 1634 msdu_info->tid = CDP_MAX_DATA_TIDS - 1; 1635 1636 return; 1637 } 1638 1639 /** 1640 * dp_tx_classify_tid() - Obtain TID to be used for this frame 1641 * @vdev: DP vdev handle 1642 * @nbuf: skb 1643 * 1644 * Software based TID classification is required when more than 2 DSCP-TID 1645 * mapping tables are needed. 1646 * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2. 1647 * 1648 * Return: void 1649 */ 1650 static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1651 struct dp_tx_msdu_info_s *msdu_info) 1652 { 1653 DP_TX_TID_OVERRIDE(msdu_info, nbuf); 1654 1655 /* 1656 * skip_sw_tid_classification flag will set in below cases- 1657 * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map 1658 * 2. hlos_tid_override enabled for vdev 1659 * 3. mesh mode enabled for vdev 1660 */ 1661 if (qdf_likely(vdev->skip_sw_tid_classification)) { 1662 /* Update tid in msdu_info from skb priority */ 1663 if (qdf_unlikely(vdev->skip_sw_tid_classification 1664 & DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) { 1665 msdu_info->tid = qdf_nbuf_get_priority(nbuf); 1666 return; 1667 } 1668 return; 1669 } 1670 1671 dp_tx_get_tid(vdev, nbuf, msdu_info); 1672 } 1673 1674 #ifdef FEATURE_WLAN_TDLS 1675 /** 1676 * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame 1677 * @soc: datapath SOC 1678 * @vdev: datapath vdev 1679 * @tx_desc: TX descriptor 1680 * 1681 * Return: None 1682 */ 1683 static void dp_tx_update_tdls_flags(struct dp_soc *soc, 1684 struct dp_vdev *vdev, 1685 struct dp_tx_desc_s *tx_desc) 1686 { 1687 if (vdev) { 1688 if (vdev->is_tdls_frame) { 1689 tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME; 1690 vdev->is_tdls_frame = false; 1691 } 1692 } 1693 } 1694 1695 /** 1696 * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer 1697 * @soc: dp_soc handle 1698 * @tx_desc: TX descriptor 1699 * @vdev: datapath vdev handle 1700 * 1701 * Return: None 1702 */ 1703 static void dp_non_std_tx_comp_free_buff(struct dp_soc *soc, 1704 struct dp_tx_desc_s *tx_desc) 1705 { 1706 struct hal_tx_completion_status ts = {0}; 1707 qdf_nbuf_t nbuf = tx_desc->nbuf; 1708 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, 1709 DP_MOD_ID_TDLS); 1710 1711 if (qdf_unlikely(!vdev)) { 1712 dp_err_rl("vdev is null!"); 1713 goto error; 1714 } 1715 1716 hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc); 1717 if (vdev->tx_non_std_data_callback.func) { 1718 qdf_nbuf_set_next(nbuf, NULL); 1719 vdev->tx_non_std_data_callback.func( 1720 vdev->tx_non_std_data_callback.ctxt, 1721 nbuf, ts.status); 1722 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS); 1723 return; 1724 } else { 1725 dp_err_rl("callback func is null"); 1726 } 1727 1728 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS); 1729 error: 1730 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE); 1731 qdf_nbuf_free(nbuf); 1732 } 1733 1734 /** 1735 * dp_tx_msdu_single_map() - do nbuf map 1736 * @vdev: DP vdev handle 1737 * @tx_desc: DP TX descriptor pointer 1738 * @nbuf: skb pointer 1739 * 1740 * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap 1741 * operation done in other component. 1742 * 1743 * Return: QDF_STATUS 1744 */ 1745 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev, 1746 struct dp_tx_desc_s *tx_desc, 1747 qdf_nbuf_t nbuf) 1748 { 1749 if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME))) 1750 return qdf_nbuf_map_nbytes_single(vdev->osdev, 1751 nbuf, 1752 QDF_DMA_TO_DEVICE, 1753 nbuf->len); 1754 else 1755 return qdf_nbuf_map_single(vdev->osdev, nbuf, 1756 QDF_DMA_TO_DEVICE); 1757 } 1758 #else 1759 static inline void dp_tx_update_tdls_flags(struct dp_soc *soc, 1760 struct dp_vdev *vdev, 1761 struct dp_tx_desc_s *tx_desc) 1762 { 1763 } 1764 1765 static inline void dp_non_std_tx_comp_free_buff(struct dp_soc *soc, 1766 struct dp_tx_desc_s *tx_desc) 1767 { 1768 } 1769 1770 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev, 1771 struct dp_tx_desc_s *tx_desc, 1772 qdf_nbuf_t nbuf) 1773 { 1774 return qdf_nbuf_map_nbytes_single(vdev->osdev, 1775 nbuf, 1776 QDF_DMA_TO_DEVICE, 1777 nbuf->len); 1778 } 1779 #endif 1780 1781 #ifdef MESH_MODE_SUPPORT 1782 /** 1783 * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP 1784 * @soc: datapath SOC 1785 * @vdev: datapath vdev 1786 * @tx_desc: TX descriptor 1787 * 1788 * Return: None 1789 */ 1790 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc, 1791 struct dp_vdev *vdev, 1792 struct dp_tx_desc_s *tx_desc) 1793 { 1794 if (qdf_unlikely(vdev->mesh_vdev)) 1795 tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE; 1796 } 1797 1798 /** 1799 * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer 1800 * @soc: dp_soc handle 1801 * @tx_desc: TX descriptor 1802 * @vdev: datapath vdev handle 1803 * 1804 * Return: None 1805 */ 1806 static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc, 1807 struct dp_tx_desc_s *tx_desc) 1808 { 1809 qdf_nbuf_t nbuf = tx_desc->nbuf; 1810 struct dp_vdev *vdev = NULL; 1811 1812 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) { 1813 qdf_nbuf_free(nbuf); 1814 DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1); 1815 } else { 1816 vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, 1817 DP_MOD_ID_MESH); 1818 if (vdev && vdev->osif_tx_free_ext) 1819 vdev->osif_tx_free_ext((nbuf)); 1820 else 1821 qdf_nbuf_free(nbuf); 1822 1823 if (vdev) 1824 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH); 1825 } 1826 } 1827 #else 1828 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc, 1829 struct dp_vdev *vdev, 1830 struct dp_tx_desc_s *tx_desc) 1831 { 1832 } 1833 1834 static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc, 1835 struct dp_tx_desc_s *tx_desc) 1836 { 1837 } 1838 #endif 1839 1840 /** 1841 * dp_tx_frame_is_drop() - checks if the packet is loopback 1842 * @vdev: DP vdev handle 1843 * @nbuf: skb 1844 * 1845 * Return: 1 if frame needs to be dropped else 0 1846 */ 1847 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac) 1848 { 1849 struct dp_pdev *pdev = NULL; 1850 struct dp_ast_entry *src_ast_entry = NULL; 1851 struct dp_ast_entry *dst_ast_entry = NULL; 1852 struct dp_soc *soc = NULL; 1853 1854 qdf_assert(vdev); 1855 pdev = vdev->pdev; 1856 qdf_assert(pdev); 1857 soc = pdev->soc; 1858 1859 dst_ast_entry = dp_peer_ast_hash_find_by_pdevid 1860 (soc, dstmac, vdev->pdev->pdev_id); 1861 1862 src_ast_entry = dp_peer_ast_hash_find_by_pdevid 1863 (soc, srcmac, vdev->pdev->pdev_id); 1864 if (dst_ast_entry && src_ast_entry) { 1865 if (dst_ast_entry->peer_id == 1866 src_ast_entry->peer_id) 1867 return 1; 1868 } 1869 1870 return 0; 1871 } 1872 1873 /** 1874 * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL 1875 * @vdev: DP vdev handle 1876 * @nbuf: skb 1877 * @tid: TID from HLOS for overriding default DSCP-TID mapping 1878 * @meta_data: Metadata to the fw 1879 * @tx_q: Tx queue to be used for this Tx frame 1880 * @peer_id: peer_id of the peer in case of NAWDS frames 1881 * @tx_exc_metadata: Handle that holds exception path metadata 1882 * 1883 * Return: NULL on success, 1884 * nbuf when it fails to send 1885 */ 1886 qdf_nbuf_t 1887 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1888 struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id, 1889 struct cdp_tx_exception_metadata *tx_exc_metadata) 1890 { 1891 struct dp_pdev *pdev = vdev->pdev; 1892 struct dp_soc *soc = pdev->soc; 1893 struct dp_tx_desc_s *tx_desc; 1894 QDF_STATUS status; 1895 struct dp_tx_queue *tx_q = &(msdu_info->tx_queue); 1896 uint16_t htt_tcl_metadata = 0; 1897 enum cdp_tx_sw_drop drop_code = TX_MAX_DROP; 1898 uint8_t tid = msdu_info->tid; 1899 struct cdp_tid_tx_stats *tid_stats = NULL; 1900 1901 /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */ 1902 tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id, 1903 msdu_info, tx_exc_metadata); 1904 if (!tx_desc) { 1905 dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d", 1906 vdev, tx_q->desc_pool_id); 1907 drop_code = TX_DESC_ERR; 1908 goto fail_return; 1909 } 1910 1911 if (qdf_unlikely(soc->cce_disable)) { 1912 if (dp_cce_classify(vdev, nbuf) == true) { 1913 DP_STATS_INC(vdev, tx_i.cce_classified, 1); 1914 tid = DP_VO_TID; 1915 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 1916 } 1917 } 1918 1919 dp_tx_update_tdls_flags(soc, vdev, tx_desc); 1920 1921 if (qdf_unlikely(peer_id == DP_INVALID_PEER)) { 1922 htt_tcl_metadata = vdev->htt_tcl_metadata; 1923 HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1); 1924 } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) { 1925 HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata, 1926 HTT_TCL_METADATA_TYPE_PEER_BASED); 1927 HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata, 1928 peer_id); 1929 } else 1930 htt_tcl_metadata = vdev->htt_tcl_metadata; 1931 1932 if (msdu_info->exception_fw) 1933 HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1); 1934 1935 dp_tx_desc_update_fast_comp_flag(soc, tx_desc, 1936 !pdev->enhanced_stats_en); 1937 1938 dp_tx_update_mesh_flags(soc, vdev, tx_desc); 1939 1940 if (qdf_unlikely(QDF_STATUS_SUCCESS != 1941 dp_tx_msdu_single_map(vdev, tx_desc, nbuf))) { 1942 /* Handle failure */ 1943 dp_err("qdf_nbuf_map failed"); 1944 DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1); 1945 drop_code = TX_DMA_MAP_ERR; 1946 goto release_desc; 1947 } 1948 1949 /* Enqueue the Tx MSDU descriptor to HW for transmit */ 1950 status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid, 1951 htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata); 1952 1953 if (status != QDF_STATUS_SUCCESS) { 1954 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1955 "%s Tx_hw_enqueue Fail tx_desc %pK queue %d", 1956 __func__, tx_desc, tx_q->ring_id); 1957 qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf, 1958 QDF_DMA_TO_DEVICE, 1959 nbuf->len); 1960 drop_code = TX_HW_ENQUEUE; 1961 goto release_desc; 1962 } 1963 1964 return NULL; 1965 1966 release_desc: 1967 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 1968 1969 fail_return: 1970 dp_tx_get_tid(vdev, nbuf, msdu_info); 1971 tid_stats = &pdev->stats.tid_stats. 1972 tid_tx_stats[tx_q->ring_id][tid]; 1973 tid_stats->swdrop_cnt[drop_code]++; 1974 return nbuf; 1975 } 1976 1977 /** 1978 * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs 1979 * @vdev: DP vdev handle 1980 * @nbuf: skb 1981 * @msdu_info: MSDU info to be setup in MSDU extension descriptor 1982 * 1983 * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL 1984 * 1985 * Return: NULL on success, 1986 * nbuf when it fails to send 1987 */ 1988 #if QDF_LOCK_STATS 1989 noinline 1990 #else 1991 #endif 1992 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1993 struct dp_tx_msdu_info_s *msdu_info) 1994 { 1995 uint32_t i; 1996 struct dp_pdev *pdev = vdev->pdev; 1997 struct dp_soc *soc = pdev->soc; 1998 struct dp_tx_desc_s *tx_desc; 1999 bool is_cce_classified = false; 2000 QDF_STATUS status; 2001 uint16_t htt_tcl_metadata = 0; 2002 struct dp_tx_queue *tx_q = &msdu_info->tx_queue; 2003 struct cdp_tid_tx_stats *tid_stats = NULL; 2004 2005 if (qdf_unlikely(soc->cce_disable)) { 2006 is_cce_classified = dp_cce_classify(vdev, nbuf); 2007 if (is_cce_classified) { 2008 DP_STATS_INC(vdev, tx_i.cce_classified, 1); 2009 msdu_info->tid = DP_VO_TID; 2010 } 2011 } 2012 2013 if (msdu_info->frm_type == dp_tx_frm_me) 2014 nbuf = msdu_info->u.sg_info.curr_seg->nbuf; 2015 2016 i = 0; 2017 /* Print statement to track i and num_seg */ 2018 /* 2019 * For each segment (maps to 1 MSDU) , prepare software and hardware 2020 * descriptors using information in msdu_info 2021 */ 2022 while (i < msdu_info->num_seg) { 2023 /* 2024 * Setup Tx descriptor for an MSDU, and MSDU extension 2025 * descriptor 2026 */ 2027 tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info, 2028 tx_q->desc_pool_id); 2029 2030 if (!tx_desc) { 2031 if (msdu_info->frm_type == dp_tx_frm_me) { 2032 dp_tx_me_free_buf(pdev, 2033 (void *)(msdu_info->u.sg_info 2034 .curr_seg->frags[0].vaddr)); 2035 i++; 2036 continue; 2037 } 2038 goto done; 2039 } 2040 2041 if (msdu_info->frm_type == dp_tx_frm_me) { 2042 tx_desc->me_buffer = 2043 msdu_info->u.sg_info.curr_seg->frags[0].vaddr; 2044 tx_desc->flags |= DP_TX_DESC_FLAG_ME; 2045 } 2046 2047 if (is_cce_classified) 2048 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 2049 2050 htt_tcl_metadata = vdev->htt_tcl_metadata; 2051 if (msdu_info->exception_fw) { 2052 HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1); 2053 } 2054 2055 /* 2056 * Enqueue the Tx MSDU descriptor to HW for transmit 2057 */ 2058 status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid, 2059 htt_tcl_metadata, tx_q->ring_id, NULL); 2060 2061 if (status != QDF_STATUS_SUCCESS) { 2062 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2063 "%s Tx_hw_enqueue Fail tx_desc %pK queue %d", 2064 __func__, tx_desc, tx_q->ring_id); 2065 2066 dp_tx_get_tid(vdev, nbuf, msdu_info); 2067 tid_stats = &pdev->stats.tid_stats. 2068 tid_tx_stats[tx_q->ring_id][msdu_info->tid]; 2069 tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++; 2070 2071 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 2072 if (msdu_info->frm_type == dp_tx_frm_me) { 2073 i++; 2074 continue; 2075 } 2076 goto done; 2077 } 2078 2079 /* 2080 * TODO 2081 * if tso_info structure can be modified to have curr_seg 2082 * as first element, following 2 blocks of code (for TSO and SG) 2083 * can be combined into 1 2084 */ 2085 2086 /* 2087 * For frames with multiple segments (TSO, ME), jump to next 2088 * segment. 2089 */ 2090 if (msdu_info->frm_type == dp_tx_frm_tso) { 2091 if (msdu_info->u.tso_info.curr_seg->next) { 2092 msdu_info->u.tso_info.curr_seg = 2093 msdu_info->u.tso_info.curr_seg->next; 2094 2095 /* 2096 * If this is a jumbo nbuf, then increment the number of 2097 * nbuf users for each additional segment of the msdu. 2098 * This will ensure that the skb is freed only after 2099 * receiving tx completion for all segments of an nbuf 2100 */ 2101 qdf_nbuf_inc_users(nbuf); 2102 2103 /* Check with MCL if this is needed */ 2104 /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */ 2105 } 2106 } 2107 2108 /* 2109 * For Multicast-Unicast converted packets, 2110 * each converted frame (for a client) is represented as 2111 * 1 segment 2112 */ 2113 if ((msdu_info->frm_type == dp_tx_frm_sg) || 2114 (msdu_info->frm_type == dp_tx_frm_me)) { 2115 if (msdu_info->u.sg_info.curr_seg->next) { 2116 msdu_info->u.sg_info.curr_seg = 2117 msdu_info->u.sg_info.curr_seg->next; 2118 nbuf = msdu_info->u.sg_info.curr_seg->nbuf; 2119 } 2120 } 2121 i++; 2122 } 2123 2124 nbuf = NULL; 2125 2126 done: 2127 return nbuf; 2128 } 2129 2130 /** 2131 * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info 2132 * for SG frames 2133 * @vdev: DP vdev handle 2134 * @nbuf: skb 2135 * @seg_info: Pointer to Segment info Descriptor to be prepared 2136 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. 2137 * 2138 * Return: NULL on success, 2139 * nbuf when it fails to send 2140 */ 2141 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 2142 struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info) 2143 { 2144 uint32_t cur_frag, nr_frags, i; 2145 qdf_dma_addr_t paddr; 2146 struct dp_tx_sg_info_s *sg_info; 2147 2148 sg_info = &msdu_info->u.sg_info; 2149 nr_frags = qdf_nbuf_get_nr_frags(nbuf); 2150 2151 if (QDF_STATUS_SUCCESS != 2152 qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf, 2153 QDF_DMA_TO_DEVICE, 2154 qdf_nbuf_headlen(nbuf))) { 2155 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2156 "dma map error"); 2157 DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1); 2158 2159 qdf_nbuf_free(nbuf); 2160 return NULL; 2161 } 2162 2163 paddr = qdf_nbuf_mapped_paddr_get(nbuf); 2164 seg_info->frags[0].paddr_lo = paddr; 2165 seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32; 2166 seg_info->frags[0].len = qdf_nbuf_headlen(nbuf); 2167 seg_info->frags[0].vaddr = (void *) nbuf; 2168 2169 for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) { 2170 if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev, 2171 nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) { 2172 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2173 "frag dma map error"); 2174 DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1); 2175 goto map_err; 2176 } 2177 2178 paddr = qdf_nbuf_get_tx_frag_paddr(nbuf); 2179 seg_info->frags[cur_frag + 1].paddr_lo = paddr; 2180 seg_info->frags[cur_frag + 1].paddr_hi = 2181 ((uint64_t) paddr) >> 32; 2182 seg_info->frags[cur_frag + 1].len = 2183 qdf_nbuf_get_frag_size(nbuf, cur_frag); 2184 } 2185 2186 seg_info->frag_cnt = (cur_frag + 1); 2187 seg_info->total_len = qdf_nbuf_len(nbuf); 2188 seg_info->next = NULL; 2189 2190 sg_info->curr_seg = seg_info; 2191 2192 msdu_info->frm_type = dp_tx_frm_sg; 2193 msdu_info->num_seg = 1; 2194 2195 return nbuf; 2196 map_err: 2197 /* restore paddr into nbuf before calling unmap */ 2198 qdf_nbuf_mapped_paddr_set(nbuf, 2199 (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo | 2200 ((uint64_t) 2201 seg_info->frags[0].paddr_hi) << 32)); 2202 qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf, 2203 QDF_DMA_TO_DEVICE, 2204 seg_info->frags[0].len); 2205 for (i = 1; i <= cur_frag; i++) { 2206 qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t) 2207 (seg_info->frags[i].paddr_lo | ((uint64_t) 2208 seg_info->frags[i].paddr_hi) << 32), 2209 seg_info->frags[i].len, 2210 QDF_DMA_TO_DEVICE); 2211 } 2212 qdf_nbuf_free(nbuf); 2213 return NULL; 2214 } 2215 2216 /** 2217 * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info 2218 * @vdev: DP vdev handle 2219 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. 2220 * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions 2221 * 2222 * Return: NULL on failure, 2223 * nbuf when extracted successfully 2224 */ 2225 static 2226 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev, 2227 struct dp_tx_msdu_info_s *msdu_info, 2228 uint16_t ppdu_cookie) 2229 { 2230 struct htt_tx_msdu_desc_ext2_t *meta_data = 2231 (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0]; 2232 2233 qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t)); 2234 2235 HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET 2236 (msdu_info->meta_data[5], 1); 2237 HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET 2238 (msdu_info->meta_data[5], 1); 2239 HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET 2240 (msdu_info->meta_data[6], ppdu_cookie); 2241 2242 msdu_info->exception_fw = 1; 2243 msdu_info->is_tx_sniffer = 1; 2244 } 2245 2246 #ifdef MESH_MODE_SUPPORT 2247 2248 /** 2249 * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf 2250 and prepare msdu_info for mesh frames. 2251 * @vdev: DP vdev handle 2252 * @nbuf: skb 2253 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. 2254 * 2255 * Return: NULL on failure, 2256 * nbuf when extracted successfully 2257 */ 2258 static 2259 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 2260 struct dp_tx_msdu_info_s *msdu_info) 2261 { 2262 struct meta_hdr_s *mhdr; 2263 struct htt_tx_msdu_desc_ext2_t *meta_data = 2264 (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0]; 2265 2266 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf); 2267 2268 if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) { 2269 msdu_info->exception_fw = 0; 2270 goto remove_meta_hdr; 2271 } 2272 2273 msdu_info->exception_fw = 1; 2274 2275 qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t)); 2276 2277 meta_data->host_tx_desc_pool = 1; 2278 meta_data->update_peer_cache = 1; 2279 meta_data->learning_frame = 1; 2280 2281 if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) { 2282 meta_data->power = mhdr->power; 2283 2284 meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs; 2285 meta_data->nss_mask = 1 << mhdr->rate_info[0].nss; 2286 meta_data->pream_type = mhdr->rate_info[0].preamble_type; 2287 meta_data->retry_limit = mhdr->rate_info[0].max_tries; 2288 2289 meta_data->dyn_bw = 1; 2290 2291 meta_data->valid_pwr = 1; 2292 meta_data->valid_mcs_mask = 1; 2293 meta_data->valid_nss_mask = 1; 2294 meta_data->valid_preamble_type = 1; 2295 meta_data->valid_retries = 1; 2296 meta_data->valid_bw_info = 1; 2297 } 2298 2299 if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) { 2300 meta_data->encrypt_type = 0; 2301 meta_data->valid_encrypt_type = 1; 2302 meta_data->learning_frame = 0; 2303 } 2304 2305 meta_data->valid_key_flags = 1; 2306 meta_data->key_flags = (mhdr->keyix & 0x3); 2307 2308 remove_meta_hdr: 2309 if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) { 2310 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2311 "qdf_nbuf_pull_head failed"); 2312 qdf_nbuf_free(nbuf); 2313 return NULL; 2314 } 2315 2316 msdu_info->tid = qdf_nbuf_get_priority(nbuf); 2317 2318 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, 2319 "%s , Meta hdr %0x %0x %0x %0x %0x %0x" 2320 " tid %d to_fw %d", 2321 __func__, msdu_info->meta_data[0], 2322 msdu_info->meta_data[1], 2323 msdu_info->meta_data[2], 2324 msdu_info->meta_data[3], 2325 msdu_info->meta_data[4], 2326 msdu_info->meta_data[5], 2327 msdu_info->tid, msdu_info->exception_fw); 2328 2329 return nbuf; 2330 } 2331 #else 2332 static 2333 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 2334 struct dp_tx_msdu_info_s *msdu_info) 2335 { 2336 return nbuf; 2337 } 2338 2339 #endif 2340 2341 /** 2342 * dp_check_exc_metadata() - Checks if parameters are valid 2343 * @tx_exc - holds all exception path parameters 2344 * 2345 * Returns true when all the parameters are valid else false 2346 * 2347 */ 2348 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc) 2349 { 2350 bool invalid_tid = (tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != 2351 HTT_INVALID_TID); 2352 bool invalid_encap_type = 2353 (tx_exc->tx_encap_type > htt_cmn_pkt_num_types && 2354 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE); 2355 bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types && 2356 tx_exc->sec_type != CDP_INVALID_SEC_TYPE); 2357 bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 && 2358 tx_exc->ppdu_cookie == 0); 2359 2360 if (invalid_tid || invalid_encap_type || invalid_sec_type || 2361 invalid_cookie) { 2362 return false; 2363 } 2364 2365 return true; 2366 } 2367 2368 /** 2369 * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame 2370 * @nbuf: qdf_nbuf_t 2371 * @vdev: struct dp_vdev * 2372 * 2373 * Allow packet for processing only if it is for peer client which is 2374 * connected with same vap. Drop packet if client is connected to 2375 * different vap. 2376 * 2377 * Return: QDF_STATUS 2378 */ 2379 static inline QDF_STATUS 2380 dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev) 2381 { 2382 struct dp_ast_entry *dst_ast_entry = NULL; 2383 qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 2384 2385 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) || 2386 DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) 2387 return QDF_STATUS_SUCCESS; 2388 2389 qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock); 2390 dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc, 2391 eh->ether_dhost, 2392 vdev->vdev_id); 2393 2394 /* If there is no ast entry, return failure */ 2395 if (qdf_unlikely(!dst_ast_entry)) { 2396 qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock); 2397 return QDF_STATUS_E_FAILURE; 2398 } 2399 qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock); 2400 2401 return QDF_STATUS_SUCCESS; 2402 } 2403 2404 /** 2405 * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path 2406 * @soc: DP soc handle 2407 * @vdev_id: id of DP vdev handle 2408 * @nbuf: skb 2409 * @tx_exc_metadata: Handle that holds exception path meta data 2410 * 2411 * Entry point for Core Tx layer (DP_TX) invoked from 2412 * hard_start_xmit in OSIF/HDD to transmit frames through fw 2413 * 2414 * Return: NULL on success, 2415 * nbuf when it fails to send 2416 */ 2417 qdf_nbuf_t 2418 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2419 qdf_nbuf_t nbuf, 2420 struct cdp_tx_exception_metadata *tx_exc_metadata) 2421 { 2422 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2423 qdf_ether_header_t *eh = NULL; 2424 struct dp_tx_msdu_info_s msdu_info; 2425 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 2426 DP_MOD_ID_TX_EXCEPTION); 2427 2428 if (qdf_unlikely(!vdev)) 2429 goto fail; 2430 2431 qdf_mem_zero(&msdu_info, sizeof(msdu_info)); 2432 2433 if (!tx_exc_metadata) 2434 goto fail; 2435 2436 msdu_info.tid = tx_exc_metadata->tid; 2437 2438 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 2439 dp_verbose_debug("skb "QDF_MAC_ADDR_FMT, 2440 QDF_MAC_ADDR_REF(nbuf->data)); 2441 2442 DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf)); 2443 2444 if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) { 2445 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2446 "Invalid parameters in exception path"); 2447 goto fail; 2448 } 2449 2450 /* Basic sanity checks for unsupported packets */ 2451 2452 /* MESH mode */ 2453 if (qdf_unlikely(vdev->mesh_vdev)) { 2454 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2455 "Mesh mode is not supported in exception path"); 2456 goto fail; 2457 } 2458 2459 /* TSO or SG */ 2460 if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) || 2461 qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) { 2462 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2463 "TSO and SG are not supported in exception path"); 2464 2465 goto fail; 2466 } 2467 2468 /* RAW */ 2469 if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) { 2470 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2471 "Raw frame is not supported in exception path"); 2472 goto fail; 2473 } 2474 2475 2476 /* Mcast enhancement*/ 2477 if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) { 2478 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) && 2479 !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) { 2480 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2481 "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW"); 2482 } 2483 } 2484 2485 if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) { 2486 DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1, 2487 qdf_nbuf_len(nbuf)); 2488 2489 dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info, 2490 tx_exc_metadata->ppdu_cookie); 2491 } 2492 2493 /* 2494 * Get HW Queue to use for this frame. 2495 * TCL supports upto 4 DMA rings, out of which 3 rings are 2496 * dedicated for data and 1 for command. 2497 * "queue_id" maps to one hardware ring. 2498 * With each ring, we also associate a unique Tx descriptor pool 2499 * to minimize lock contention for these resources. 2500 */ 2501 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 2502 2503 /* 2504 * Check exception descriptors 2505 */ 2506 if (dp_tx_exception_limit_check(vdev)) 2507 goto fail; 2508 2509 /* Single linear frame */ 2510 /* 2511 * If nbuf is a simple linear frame, use send_single function to 2512 * prepare direct-buffer type TCL descriptor and enqueue to TCL 2513 * SRNG. There is no need to setup a MSDU extension descriptor. 2514 */ 2515 nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, 2516 tx_exc_metadata->peer_id, tx_exc_metadata); 2517 2518 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION); 2519 return nbuf; 2520 2521 fail: 2522 if (vdev) 2523 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION); 2524 dp_verbose_debug("pkt send failed"); 2525 return nbuf; 2526 } 2527 2528 /** 2529 * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP 2530 * in exception path in special case to avoid regular exception path chk. 2531 * @soc: DP soc handle 2532 * @vdev_id: id of DP vdev handle 2533 * @nbuf: skb 2534 * @tx_exc_metadata: Handle that holds exception path meta data 2535 * 2536 * Entry point for Core Tx layer (DP_TX) invoked from 2537 * hard_start_xmit in OSIF/HDD to transmit frames through fw 2538 * 2539 * Return: NULL on success, 2540 * nbuf when it fails to send 2541 */ 2542 qdf_nbuf_t 2543 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl, 2544 uint8_t vdev_id, qdf_nbuf_t nbuf, 2545 struct cdp_tx_exception_metadata *tx_exc_metadata) 2546 { 2547 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2548 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 2549 DP_MOD_ID_TX_EXCEPTION); 2550 2551 if (qdf_unlikely(!vdev)) 2552 goto fail; 2553 2554 if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev) 2555 == QDF_STATUS_E_FAILURE)) { 2556 DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1); 2557 goto fail; 2558 } 2559 2560 /* Unref count as it will agin be taken inside dp_tx_exception */ 2561 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION); 2562 2563 return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata); 2564 2565 fail: 2566 if (vdev) 2567 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION); 2568 dp_verbose_debug("pkt send failed"); 2569 return nbuf; 2570 } 2571 2572 /** 2573 * dp_tx_send_mesh() - Transmit mesh frame on a given VAP 2574 * @soc: DP soc handle 2575 * @vdev_id: DP vdev handle 2576 * @nbuf: skb 2577 * 2578 * Entry point for Core Tx layer (DP_TX) invoked from 2579 * hard_start_xmit in OSIF/HDD 2580 * 2581 * Return: NULL on success, 2582 * nbuf when it fails to send 2583 */ 2584 #ifdef MESH_MODE_SUPPORT 2585 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2586 qdf_nbuf_t nbuf) 2587 { 2588 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2589 struct meta_hdr_s *mhdr; 2590 qdf_nbuf_t nbuf_mesh = NULL; 2591 qdf_nbuf_t nbuf_clone = NULL; 2592 struct dp_vdev *vdev; 2593 uint8_t no_enc_frame = 0; 2594 2595 nbuf_mesh = qdf_nbuf_unshare(nbuf); 2596 if (!nbuf_mesh) { 2597 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2598 "qdf_nbuf_unshare failed"); 2599 return nbuf; 2600 } 2601 2602 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH); 2603 if (!vdev) { 2604 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2605 "vdev is NULL for vdev_id %d", vdev_id); 2606 return nbuf; 2607 } 2608 2609 nbuf = nbuf_mesh; 2610 2611 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf); 2612 2613 if ((vdev->sec_type != cdp_sec_type_none) && 2614 (mhdr->flags & METAHDR_FLAG_NOENCRYPT)) 2615 no_enc_frame = 1; 2616 2617 if (mhdr->flags & METAHDR_FLAG_NOQOS) 2618 qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST); 2619 2620 if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) && 2621 !no_enc_frame) { 2622 nbuf_clone = qdf_nbuf_clone(nbuf); 2623 if (!nbuf_clone) { 2624 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2625 "qdf_nbuf_clone failed"); 2626 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH); 2627 return nbuf; 2628 } 2629 qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO); 2630 } 2631 2632 if (nbuf_clone) { 2633 if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) { 2634 DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1); 2635 } else { 2636 qdf_nbuf_free(nbuf_clone); 2637 } 2638 } 2639 2640 if (no_enc_frame) 2641 qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO); 2642 else 2643 qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID); 2644 2645 nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf); 2646 if ((!nbuf) && no_enc_frame) { 2647 DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1); 2648 } 2649 2650 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH); 2651 return nbuf; 2652 } 2653 2654 #else 2655 2656 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id, 2657 qdf_nbuf_t nbuf) 2658 { 2659 return dp_tx_send(soc, vdev_id, nbuf); 2660 } 2661 2662 #endif 2663 2664 /** 2665 * dp_tx_nawds_handler() - NAWDS handler 2666 * 2667 * @soc: DP soc handle 2668 * @vdev_id: id of DP vdev handle 2669 * @msdu_info: msdu_info required to create HTT metadata 2670 * @nbuf: skb 2671 * 2672 * This API transfers the multicast frames with the peer id 2673 * on NAWDS enabled peer. 2674 2675 * Return: none 2676 */ 2677 2678 static inline 2679 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev, 2680 struct dp_tx_msdu_info_s *msdu_info, qdf_nbuf_t nbuf) 2681 { 2682 struct dp_peer *peer = NULL; 2683 qdf_nbuf_t nbuf_clone = NULL; 2684 uint16_t peer_id = DP_INVALID_PEER; 2685 uint16_t sa_peer_id = DP_INVALID_PEER; 2686 struct dp_ast_entry *ast_entry = NULL; 2687 qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 2688 2689 if (qdf_nbuf_get_tx_ftype(nbuf) == CB_FTYPE_INTRABSS_FWD) { 2690 qdf_spin_lock_bh(&soc->ast_lock); 2691 2692 ast_entry = dp_peer_ast_hash_find_by_pdevid 2693 (soc, 2694 (uint8_t *)(eh->ether_shost), 2695 vdev->pdev->pdev_id); 2696 2697 if (ast_entry) 2698 sa_peer_id = ast_entry->peer_id; 2699 qdf_spin_unlock_bh(&soc->ast_lock); 2700 } 2701 2702 qdf_spin_lock_bh(&vdev->peer_list_lock); 2703 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 2704 if (!peer->bss_peer && peer->nawds_enabled) { 2705 peer_id = peer->peer_id; 2706 /* Multicast packets needs to be 2707 * dropped in case of intra bss forwarding 2708 */ 2709 if (sa_peer_id == peer->peer_id) { 2710 QDF_TRACE(QDF_MODULE_ID_DP, 2711 QDF_TRACE_LEVEL_DEBUG, 2712 " %s: multicast packet", __func__); 2713 DP_STATS_INC(peer, tx.nawds_mcast_drop, 1); 2714 continue; 2715 } 2716 nbuf_clone = qdf_nbuf_clone(nbuf); 2717 2718 if (!nbuf_clone) { 2719 QDF_TRACE(QDF_MODULE_ID_DP, 2720 QDF_TRACE_LEVEL_ERROR, 2721 FL("nbuf clone failed")); 2722 break; 2723 } 2724 2725 nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone, 2726 msdu_info, peer_id, 2727 NULL); 2728 2729 if (nbuf_clone) { 2730 QDF_TRACE(QDF_MODULE_ID_DP, 2731 QDF_TRACE_LEVEL_DEBUG, 2732 FL("pkt send failed")); 2733 qdf_nbuf_free(nbuf_clone); 2734 } else { 2735 if (peer_id != DP_INVALID_PEER) 2736 DP_STATS_INC_PKT(peer, tx.nawds_mcast, 2737 1, qdf_nbuf_len(nbuf)); 2738 } 2739 } 2740 } 2741 2742 qdf_spin_unlock_bh(&vdev->peer_list_lock); 2743 } 2744 2745 /** 2746 * dp_tx_send() - Transmit a frame on a given VAP 2747 * @soc: DP soc handle 2748 * @vdev_id: id of DP vdev handle 2749 * @nbuf: skb 2750 * 2751 * Entry point for Core Tx layer (DP_TX) invoked from 2752 * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding 2753 * cases 2754 * 2755 * Return: NULL on success, 2756 * nbuf when it fails to send 2757 */ 2758 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2759 qdf_nbuf_t nbuf) 2760 { 2761 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2762 uint16_t peer_id = HTT_INVALID_PEER; 2763 /* 2764 * doing a memzero is causing additional function call overhead 2765 * so doing static stack clearing 2766 */ 2767 struct dp_tx_msdu_info_s msdu_info = {0}; 2768 struct dp_vdev *vdev = NULL; 2769 2770 if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT)) 2771 return nbuf; 2772 2773 /* 2774 * dp_vdev_get_ref_by_id does does a atomic operation avoid using 2775 * this in per packet path. 2776 * 2777 * As in this path vdev memory is already protected with netdev 2778 * tx lock 2779 */ 2780 vdev = soc->vdev_id_map[vdev_id]; 2781 if (qdf_unlikely(!vdev)) 2782 return nbuf; 2783 2784 dp_verbose_debug("skb "QDF_MAC_ADDR_FMT, 2785 QDF_MAC_ADDR_REF(nbuf->data)); 2786 2787 /* 2788 * Set Default Host TID value to invalid TID 2789 * (TID override disabled) 2790 */ 2791 msdu_info.tid = HTT_TX_EXT_TID_INVALID; 2792 DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf)); 2793 2794 if (qdf_unlikely(vdev->mesh_vdev)) { 2795 qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf, 2796 &msdu_info); 2797 if (!nbuf_mesh) { 2798 dp_verbose_debug("Extracting mesh metadata failed"); 2799 return nbuf; 2800 } 2801 nbuf = nbuf_mesh; 2802 } 2803 2804 /* 2805 * Get HW Queue to use for this frame. 2806 * TCL supports upto 4 DMA rings, out of which 3 rings are 2807 * dedicated for data and 1 for command. 2808 * "queue_id" maps to one hardware ring. 2809 * With each ring, we also associate a unique Tx descriptor pool 2810 * to minimize lock contention for these resources. 2811 */ 2812 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 2813 2814 /* 2815 * TCL H/W supports 2 DSCP-TID mapping tables. 2816 * Table 1 - Default DSCP-TID mapping table 2817 * Table 2 - 1 DSCP-TID override table 2818 * 2819 * If we need a different DSCP-TID mapping for this vap, 2820 * call tid_classify to extract DSCP/ToS from frame and 2821 * map to a TID and store in msdu_info. This is later used 2822 * to fill in TCL Input descriptor (per-packet TID override). 2823 */ 2824 dp_tx_classify_tid(vdev, nbuf, &msdu_info); 2825 2826 /* 2827 * Classify the frame and call corresponding 2828 * "prepare" function which extracts the segment (TSO) 2829 * and fragmentation information (for TSO , SG, ME, or Raw) 2830 * into MSDU_INFO structure which is later used to fill 2831 * SW and HW descriptors. 2832 */ 2833 if (qdf_nbuf_is_tso(nbuf)) { 2834 dp_verbose_debug("TSO frame %pK", vdev); 2835 DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1, 2836 qdf_nbuf_len(nbuf)); 2837 2838 if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) { 2839 DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1, 2840 qdf_nbuf_len(nbuf)); 2841 return nbuf; 2842 } 2843 2844 goto send_multiple; 2845 } 2846 2847 /* SG */ 2848 if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) { 2849 struct dp_tx_seg_info_s seg_info = {0}; 2850 2851 nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info); 2852 if (!nbuf) 2853 return NULL; 2854 2855 dp_verbose_debug("non-TSO SG frame %pK", vdev); 2856 2857 DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1, 2858 qdf_nbuf_len(nbuf)); 2859 2860 goto send_multiple; 2861 } 2862 2863 #ifdef ATH_SUPPORT_IQUE 2864 /* Mcast to Ucast Conversion*/ 2865 if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) { 2866 qdf_ether_header_t *eh = (qdf_ether_header_t *) 2867 qdf_nbuf_data(nbuf); 2868 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) && 2869 !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) { 2870 dp_verbose_debug("Mcast frm for ME %pK", vdev); 2871 2872 DP_STATS_INC_PKT(vdev, 2873 tx_i.mcast_en.mcast_pkt, 1, 2874 qdf_nbuf_len(nbuf)); 2875 if (dp_tx_prepare_send_me(vdev, nbuf) == 2876 QDF_STATUS_SUCCESS) { 2877 return NULL; 2878 } 2879 2880 if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) { 2881 if (dp_tx_prepare_send_igmp_me(vdev, nbuf) == 2882 QDF_STATUS_SUCCESS) { 2883 return NULL; 2884 } 2885 } 2886 } 2887 } 2888 #endif 2889 2890 /* RAW */ 2891 if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) { 2892 struct dp_tx_seg_info_s seg_info = {0}; 2893 2894 nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info); 2895 if (!nbuf) 2896 return NULL; 2897 2898 dp_verbose_debug("Raw frame %pK", vdev); 2899 2900 goto send_multiple; 2901 2902 } 2903 2904 if (qdf_unlikely(vdev->nawds_enabled)) { 2905 qdf_ether_header_t *eh = (qdf_ether_header_t *) 2906 qdf_nbuf_data(nbuf); 2907 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) 2908 dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf); 2909 2910 peer_id = DP_INVALID_PEER; 2911 DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast, 2912 1, qdf_nbuf_len(nbuf)); 2913 } 2914 2915 /* Single linear frame */ 2916 /* 2917 * If nbuf is a simple linear frame, use send_single function to 2918 * prepare direct-buffer type TCL descriptor and enqueue to TCL 2919 * SRNG. There is no need to setup a MSDU extension descriptor. 2920 */ 2921 nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL); 2922 2923 return nbuf; 2924 2925 send_multiple: 2926 nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info); 2927 2928 if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw)) 2929 dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf); 2930 2931 return nbuf; 2932 } 2933 2934 /** 2935 * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special 2936 * case to vaoid check in perpkt path. 2937 * @soc: DP soc handle 2938 * @vdev_id: id of DP vdev handle 2939 * @nbuf: skb 2940 * 2941 * Entry point for Core Tx layer (DP_TX) invoked from 2942 * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send 2943 * with special condition to avoid per pkt check in dp_tx_send 2944 * 2945 * Return: NULL on success, 2946 * nbuf when it fails to send 2947 */ 2948 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl, 2949 uint8_t vdev_id, qdf_nbuf_t nbuf) 2950 { 2951 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2952 struct dp_vdev *vdev = NULL; 2953 2954 if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT)) 2955 return nbuf; 2956 2957 /* 2958 * dp_vdev_get_ref_by_id does does a atomic operation avoid using 2959 * this in per packet path. 2960 * 2961 * As in this path vdev memory is already protected with netdev 2962 * tx lock 2963 */ 2964 vdev = soc->vdev_id_map[vdev_id]; 2965 if (qdf_unlikely(!vdev)) 2966 return nbuf; 2967 2968 if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev) 2969 == QDF_STATUS_E_FAILURE)) { 2970 DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1); 2971 return nbuf; 2972 } 2973 2974 return dp_tx_send(soc_hdl, vdev_id, nbuf); 2975 } 2976 2977 /** 2978 * dp_tx_reinject_handler() - Tx Reinject Handler 2979 * @soc: datapath soc handle 2980 * @vdev: datapath vdev handle 2981 * @tx_desc: software descriptor head pointer 2982 * @status : Tx completion status from HTT descriptor 2983 * 2984 * This function reinjects frames back to Target. 2985 * Todo - Host queue needs to be added 2986 * 2987 * Return: none 2988 */ 2989 static 2990 void dp_tx_reinject_handler(struct dp_soc *soc, 2991 struct dp_vdev *vdev, 2992 struct dp_tx_desc_s *tx_desc, 2993 uint8_t *status) 2994 { 2995 struct dp_peer *peer = NULL; 2996 uint32_t peer_id = HTT_INVALID_PEER; 2997 qdf_nbuf_t nbuf = tx_desc->nbuf; 2998 qdf_nbuf_t nbuf_copy = NULL; 2999 struct dp_tx_msdu_info_s msdu_info; 3000 #ifdef WDS_VENDOR_EXTENSION 3001 int is_mcast = 0, is_ucast = 0; 3002 int num_peers_3addr = 0; 3003 qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf)); 3004 struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf)); 3005 #endif 3006 3007 qdf_assert(vdev); 3008 3009 qdf_mem_zero(&msdu_info, sizeof(msdu_info)); 3010 3011 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 3012 3013 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 3014 "%s Tx reinject path", __func__); 3015 3016 DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1, 3017 qdf_nbuf_len(tx_desc->nbuf)); 3018 3019 #ifdef WDS_VENDOR_EXTENSION 3020 if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 3021 is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0; 3022 } else { 3023 is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0; 3024 } 3025 is_ucast = !is_mcast; 3026 3027 qdf_spin_lock_bh(&vdev->peer_list_lock); 3028 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 3029 if (peer->bss_peer) 3030 continue; 3031 3032 /* Detect wds peers that use 3-addr framing for mcast. 3033 * if there are any, the bss_peer is used to send the 3034 * the mcast frame using 3-addr format. all wds enabled 3035 * peers that use 4-addr framing for mcast frames will 3036 * be duplicated and sent as 4-addr frames below. 3037 */ 3038 if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) { 3039 num_peers_3addr = 1; 3040 break; 3041 } 3042 } 3043 qdf_spin_unlock_bh(&vdev->peer_list_lock); 3044 #endif 3045 3046 if (qdf_unlikely(vdev->mesh_vdev)) { 3047 DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf); 3048 } else { 3049 qdf_spin_lock_bh(&vdev->peer_list_lock); 3050 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 3051 if ((peer->peer_id != HTT_INVALID_PEER) && 3052 #ifdef WDS_VENDOR_EXTENSION 3053 /* 3054 * . if 3-addr STA, then send on BSS Peer 3055 * . if Peer WDS enabled and accept 4-addr mcast, 3056 * send mcast on that peer only 3057 * . if Peer WDS enabled and accept 4-addr ucast, 3058 * send ucast on that peer only 3059 */ 3060 ((peer->bss_peer && num_peers_3addr && is_mcast) || 3061 (peer->wds_enabled && 3062 ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) || 3063 (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) { 3064 #else 3065 ((peer->bss_peer && 3066 !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))))) { 3067 #endif 3068 peer_id = DP_INVALID_PEER; 3069 3070 nbuf_copy = qdf_nbuf_copy(nbuf); 3071 3072 if (!nbuf_copy) { 3073 QDF_TRACE(QDF_MODULE_ID_DP, 3074 QDF_TRACE_LEVEL_DEBUG, 3075 FL("nbuf copy failed")); 3076 break; 3077 } 3078 3079 nbuf_copy = dp_tx_send_msdu_single(vdev, 3080 nbuf_copy, 3081 &msdu_info, 3082 peer_id, 3083 NULL); 3084 3085 if (nbuf_copy) { 3086 QDF_TRACE(QDF_MODULE_ID_DP, 3087 QDF_TRACE_LEVEL_DEBUG, 3088 FL("pkt send failed")); 3089 qdf_nbuf_free(nbuf_copy); 3090 } 3091 } 3092 } 3093 qdf_spin_unlock_bh(&vdev->peer_list_lock); 3094 } 3095 3096 qdf_nbuf_free(nbuf); 3097 3098 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 3099 } 3100 3101 /** 3102 * dp_tx_inspect_handler() - Tx Inspect Handler 3103 * @soc: datapath soc handle 3104 * @vdev: datapath vdev handle 3105 * @tx_desc: software descriptor head pointer 3106 * @status : Tx completion status from HTT descriptor 3107 * 3108 * Handles Tx frames sent back to Host for inspection 3109 * (ProxyARP) 3110 * 3111 * Return: none 3112 */ 3113 static void dp_tx_inspect_handler(struct dp_soc *soc, 3114 struct dp_vdev *vdev, 3115 struct dp_tx_desc_s *tx_desc, 3116 uint8_t *status) 3117 { 3118 3119 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3120 "%s Tx inspect path", 3121 __func__); 3122 3123 DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1, 3124 qdf_nbuf_len(tx_desc->nbuf)); 3125 3126 DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf); 3127 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 3128 } 3129 3130 #ifdef FEATURE_PERPKT_INFO 3131 /** 3132 * dp_get_completion_indication_for_stack() - send completion to stack 3133 * @soc : dp_soc handle 3134 * @pdev: dp_pdev handle 3135 * @peer: dp peer handle 3136 * @ts: transmit completion status structure 3137 * @netbuf: Buffer pointer for free 3138 * 3139 * This function is used for indication whether buffer needs to be 3140 * sent to stack for freeing or not 3141 */ 3142 QDF_STATUS 3143 dp_get_completion_indication_for_stack(struct dp_soc *soc, 3144 struct dp_pdev *pdev, 3145 struct dp_peer *peer, 3146 struct hal_tx_completion_status *ts, 3147 qdf_nbuf_t netbuf, 3148 uint64_t time_latency) 3149 { 3150 struct tx_capture_hdr *ppdu_hdr; 3151 uint16_t peer_id = ts->peer_id; 3152 uint32_t ppdu_id = ts->ppdu_id; 3153 uint8_t first_msdu = ts->first_msdu; 3154 uint8_t last_msdu = ts->last_msdu; 3155 uint32_t txcap_hdr_size = sizeof(struct tx_capture_hdr); 3156 3157 if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode && 3158 !pdev->latency_capture_enable)) 3159 return QDF_STATUS_E_NOSUPPORT; 3160 3161 if (!peer) { 3162 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3163 FL("Peer Invalid")); 3164 return QDF_STATUS_E_INVAL; 3165 } 3166 3167 if (pdev->mcopy_mode) { 3168 /* If mcopy is enabled and mcopy_mode is M_COPY deliver 1st MSDU 3169 * per PPDU. If mcopy_mode is M_COPY_EXTENDED deliver 1st MSDU 3170 * for each MPDU 3171 */ 3172 if (pdev->mcopy_mode == M_COPY) { 3173 if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) && 3174 (pdev->m_copy_id.tx_peer_id == peer_id)) { 3175 return QDF_STATUS_E_INVAL; 3176 } 3177 } 3178 3179 if (!first_msdu) 3180 return QDF_STATUS_E_INVAL; 3181 3182 pdev->m_copy_id.tx_ppdu_id = ppdu_id; 3183 pdev->m_copy_id.tx_peer_id = peer_id; 3184 } 3185 3186 if (qdf_unlikely(qdf_nbuf_headroom(netbuf) < txcap_hdr_size)) { 3187 netbuf = qdf_nbuf_realloc_headroom(netbuf, txcap_hdr_size); 3188 if (!netbuf) { 3189 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3190 FL("No headroom")); 3191 return QDF_STATUS_E_NOMEM; 3192 } 3193 } 3194 3195 if (!qdf_nbuf_push_head(netbuf, txcap_hdr_size)) { 3196 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3197 FL("No headroom")); 3198 return QDF_STATUS_E_NOMEM; 3199 } 3200 3201 ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf); 3202 qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw, 3203 QDF_MAC_ADDR_SIZE); 3204 qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw, 3205 QDF_MAC_ADDR_SIZE); 3206 ppdu_hdr->ppdu_id = ppdu_id; 3207 ppdu_hdr->peer_id = peer_id; 3208 ppdu_hdr->first_msdu = first_msdu; 3209 ppdu_hdr->last_msdu = last_msdu; 3210 if (qdf_unlikely(pdev->latency_capture_enable)) { 3211 ppdu_hdr->tsf = ts->tsf; 3212 ppdu_hdr->time_latency = time_latency; 3213 } 3214 3215 return QDF_STATUS_SUCCESS; 3216 } 3217 3218 3219 /** 3220 * dp_send_completion_to_stack() - send completion to stack 3221 * @soc : dp_soc handle 3222 * @pdev: dp_pdev handle 3223 * @peer_id: peer_id of the peer for which completion came 3224 * @ppdu_id: ppdu_id 3225 * @netbuf: Buffer pointer for free 3226 * 3227 * This function is used to send completion to stack 3228 * to free buffer 3229 */ 3230 void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, 3231 uint16_t peer_id, uint32_t ppdu_id, 3232 qdf_nbuf_t netbuf) 3233 { 3234 dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc, 3235 netbuf, peer_id, 3236 WDI_NO_VAL, pdev->pdev_id); 3237 } 3238 #else 3239 static QDF_STATUS 3240 dp_get_completion_indication_for_stack(struct dp_soc *soc, 3241 struct dp_pdev *pdev, 3242 struct dp_peer *peer, 3243 struct hal_tx_completion_status *ts, 3244 qdf_nbuf_t netbuf, 3245 uint64_t time_latency) 3246 { 3247 return QDF_STATUS_E_NOSUPPORT; 3248 } 3249 3250 static void 3251 dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, 3252 uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf) 3253 { 3254 } 3255 #endif 3256 3257 /** 3258 * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor 3259 * @soc: Soc handle 3260 * @desc: software Tx descriptor to be processed 3261 * 3262 * Return: none 3263 */ 3264 static inline void dp_tx_comp_free_buf(struct dp_soc *soc, 3265 struct dp_tx_desc_s *desc) 3266 { 3267 qdf_nbuf_t nbuf = desc->nbuf; 3268 3269 /* nbuf already freed in vdev detach path */ 3270 if (!nbuf) 3271 return; 3272 3273 /* If it is TDLS mgmt, don't unmap or free the frame */ 3274 if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) 3275 return dp_non_std_tx_comp_free_buff(soc, desc); 3276 3277 /* 0 : MSDU buffer, 1 : MLE */ 3278 if (desc->msdu_ext_desc) { 3279 /* TSO free */ 3280 if (hal_tx_ext_desc_get_tso_enable( 3281 desc->msdu_ext_desc->vaddr)) { 3282 /* unmap eash TSO seg before free the nbuf */ 3283 dp_tx_tso_unmap_segment(soc, desc->tso_desc, 3284 desc->tso_num_desc); 3285 qdf_nbuf_free(nbuf); 3286 return; 3287 } 3288 } 3289 3290 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, 3291 QDF_DMA_TO_DEVICE, nbuf->len); 3292 3293 if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE) 3294 return dp_mesh_tx_comp_free_buff(soc, desc); 3295 3296 qdf_nbuf_free(nbuf); 3297 } 3298 3299 #ifdef MESH_MODE_SUPPORT 3300 /** 3301 * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats 3302 * in mesh meta header 3303 * @tx_desc: software descriptor head pointer 3304 * @ts: pointer to tx completion stats 3305 * Return: none 3306 */ 3307 static 3308 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc, 3309 struct hal_tx_completion_status *ts) 3310 { 3311 struct meta_hdr_s *mhdr; 3312 qdf_nbuf_t netbuf = tx_desc->nbuf; 3313 3314 if (!tx_desc->msdu_ext_desc) { 3315 if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) { 3316 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3317 "netbuf %pK offset %d", 3318 netbuf, tx_desc->pkt_offset); 3319 return; 3320 } 3321 } 3322 if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) { 3323 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3324 "netbuf %pK offset %lu", netbuf, 3325 sizeof(struct meta_hdr_s)); 3326 return; 3327 } 3328 3329 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf); 3330 mhdr->rssi = ts->ack_frame_rssi; 3331 mhdr->band = tx_desc->pdev->operating_channel.band; 3332 mhdr->channel = tx_desc->pdev->operating_channel.num; 3333 } 3334 3335 #else 3336 static 3337 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc, 3338 struct hal_tx_completion_status *ts) 3339 { 3340 } 3341 3342 #endif 3343 3344 #ifdef QCA_PEER_EXT_STATS 3345 /* 3346 * dp_tx_compute_tid_delay() - Compute per TID delay 3347 * @stats: Per TID delay stats 3348 * @tx_desc: Software Tx descriptor 3349 * 3350 * Compute the software enqueue and hw enqueue delays and 3351 * update the respective histograms 3352 * 3353 * Return: void 3354 */ 3355 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats, 3356 struct dp_tx_desc_s *tx_desc) 3357 { 3358 struct cdp_delay_tx_stats *tx_delay = &stats->tx_delay; 3359 int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue; 3360 uint32_t sw_enqueue_delay, fwhw_transmit_delay; 3361 3362 current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get()); 3363 timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf); 3364 timestamp_hw_enqueue = tx_desc->timestamp; 3365 sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress); 3366 fwhw_transmit_delay = (uint32_t)(current_timestamp - 3367 timestamp_hw_enqueue); 3368 3369 /* 3370 * Update the Tx software enqueue delay and HW enque-Completion delay. 3371 */ 3372 dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay); 3373 dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay); 3374 } 3375 3376 /* 3377 * dp_tx_update_peer_ext_stats() - Update the peer extended stats 3378 * @peer: DP peer context 3379 * @tx_desc: Tx software descriptor 3380 * @tid: Transmission ID 3381 * @ring_id: Rx CPU context ID/CPU_ID 3382 * 3383 * Update the peer extended stats. These are enhanced other 3384 * delay stats per msdu level. 3385 * 3386 * Return: void 3387 */ 3388 static void dp_tx_update_peer_ext_stats(struct dp_peer *peer, 3389 struct dp_tx_desc_s *tx_desc, 3390 uint8_t tid, uint8_t ring_id) 3391 { 3392 struct dp_pdev *pdev = peer->vdev->pdev; 3393 struct dp_soc *soc = NULL; 3394 struct cdp_peer_ext_stats *pext_stats = NULL; 3395 3396 soc = pdev->soc; 3397 if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))) 3398 return; 3399 3400 pext_stats = peer->pext_stats; 3401 3402 qdf_assert(pext_stats); 3403 qdf_assert(ring < CDP_MAX_TXRX_CTX); 3404 3405 /* 3406 * For non-TID packets use the TID 9 3407 */ 3408 if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) 3409 tid = CDP_MAX_DATA_TIDS - 1; 3410 3411 dp_tx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id], 3412 tx_desc); 3413 } 3414 #else 3415 static inline void dp_tx_update_peer_ext_stats(struct dp_peer *peer, 3416 struct dp_tx_desc_s *tx_desc, 3417 uint8_t tid, uint8_t ring_id) 3418 { 3419 } 3420 #endif 3421 3422 /** 3423 * dp_tx_compute_delay() - Compute and fill in all timestamps 3424 * to pass in correct fields 3425 * 3426 * @vdev: pdev handle 3427 * @tx_desc: tx descriptor 3428 * @tid: tid value 3429 * @ring_id: TCL or WBM ring number for transmit path 3430 * Return: none 3431 */ 3432 static void dp_tx_compute_delay(struct dp_vdev *vdev, 3433 struct dp_tx_desc_s *tx_desc, 3434 uint8_t tid, uint8_t ring_id) 3435 { 3436 int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue; 3437 uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay; 3438 3439 if (qdf_likely(!vdev->pdev->delay_stats_flag)) 3440 return; 3441 3442 current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get()); 3443 timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf); 3444 timestamp_hw_enqueue = tx_desc->timestamp; 3445 sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress); 3446 fwhw_transmit_delay = (uint32_t)(current_timestamp - 3447 timestamp_hw_enqueue); 3448 interframe_delay = (uint32_t)(timestamp_ingress - 3449 vdev->prev_tx_enq_tstamp); 3450 3451 /* 3452 * Delay in software enqueue 3453 */ 3454 dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid, 3455 CDP_DELAY_STATS_SW_ENQ, ring_id); 3456 /* 3457 * Delay between packet enqueued to HW and Tx completion 3458 */ 3459 dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid, 3460 CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id); 3461 3462 /* 3463 * Update interframe delay stats calculated at hardstart receive point. 3464 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so 3465 * interframe delay will not be calculate correctly for 1st frame. 3466 * On the other side, this will help in avoiding extra per packet check 3467 * of !vdev->prev_tx_enq_tstamp. 3468 */ 3469 dp_update_delay_stats(vdev->pdev, interframe_delay, tid, 3470 CDP_DELAY_STATS_TX_INTERFRAME, ring_id); 3471 vdev->prev_tx_enq_tstamp = timestamp_ingress; 3472 } 3473 3474 #ifdef DISABLE_DP_STATS 3475 static 3476 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer) 3477 { 3478 } 3479 #else 3480 static 3481 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer) 3482 { 3483 enum qdf_proto_subtype subtype = QDF_PROTO_INVALID; 3484 3485 DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype)); 3486 if (subtype != QDF_PROTO_INVALID) 3487 DP_STATS_INC(peer, tx.no_ack_count[subtype], 1); 3488 } 3489 #endif 3490 3491 /** 3492 * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications 3493 * per wbm ring 3494 * 3495 * @tx_desc: software descriptor head pointer 3496 * @ts: Tx completion status 3497 * @peer: peer handle 3498 * @ring_id: ring number 3499 * 3500 * Return: None 3501 */ 3502 static inline void 3503 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc, 3504 struct hal_tx_completion_status *ts, 3505 struct dp_peer *peer, uint8_t ring_id) 3506 { 3507 struct dp_pdev *pdev = peer->vdev->pdev; 3508 struct dp_soc *soc = NULL; 3509 uint8_t mcs, pkt_type; 3510 uint8_t tid = ts->tid; 3511 uint32_t length; 3512 struct cdp_tid_tx_stats *tid_stats; 3513 3514 if (!pdev) 3515 return; 3516 3517 if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) 3518 tid = CDP_MAX_DATA_TIDS - 1; 3519 3520 tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid]; 3521 soc = pdev->soc; 3522 3523 mcs = ts->mcs; 3524 pkt_type = ts->pkt_type; 3525 3526 if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) { 3527 dp_err("Release source is not from TQM"); 3528 return; 3529 } 3530 3531 length = qdf_nbuf_len(tx_desc->nbuf); 3532 DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length); 3533 3534 if (qdf_unlikely(pdev->delay_stats_flag)) 3535 dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id); 3536 DP_STATS_INCC(peer, tx.dropped.age_out, 1, 3537 (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED)); 3538 3539 DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length, 3540 (ts->status == HAL_TX_TQM_RR_REM_CMD_REM)); 3541 3542 DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1, 3543 (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX)); 3544 3545 DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1, 3546 (ts->status == HAL_TX_TQM_RR_REM_CMD_TX)); 3547 3548 DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1, 3549 (ts->status == HAL_TX_TQM_RR_FW_REASON1)); 3550 3551 DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1, 3552 (ts->status == HAL_TX_TQM_RR_FW_REASON2)); 3553 3554 DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1, 3555 (ts->status == HAL_TX_TQM_RR_FW_REASON3)); 3556 3557 /* 3558 * tx_failed is ideally supposed to be updated from HTT ppdu completion 3559 * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there 3560 * are no completions for failed cases. Hence updating tx_failed from 3561 * data path. Please note that if tx_failed is fixed to be from ppdu, 3562 * then this has to be removed 3563 */ 3564 peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num + 3565 peer->stats.tx.dropped.fw_rem_notx + 3566 peer->stats.tx.dropped.fw_rem_tx + 3567 peer->stats.tx.dropped.age_out + 3568 peer->stats.tx.dropped.fw_reason1 + 3569 peer->stats.tx.dropped.fw_reason2 + 3570 peer->stats.tx.dropped.fw_reason3; 3571 3572 if (ts->status < CDP_MAX_TX_TQM_STATUS) { 3573 tid_stats->tqm_status_cnt[ts->status]++; 3574 } 3575 3576 if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) { 3577 dp_update_no_ack_stats(tx_desc->nbuf, peer); 3578 return; 3579 } 3580 3581 DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma); 3582 3583 DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu); 3584 DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu); 3585 3586 /* 3587 * Following Rate Statistics are updated from HTT PPDU events from FW. 3588 * Return from here if HTT PPDU events are enabled. 3589 */ 3590 if (!(soc->process_tx_status)) 3591 return; 3592 3593 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 3594 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A))); 3595 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 3596 ((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A))); 3597 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 3598 ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B))); 3599 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 3600 ((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B))); 3601 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 3602 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N))); 3603 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 3604 ((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N))); 3605 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 3606 ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 3607 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 3608 ((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 3609 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 3610 ((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX))); 3611 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 3612 ((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX))); 3613 3614 DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1); 3615 DP_STATS_INC(peer, tx.bw[ts->bw], 1); 3616 DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi); 3617 DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1); 3618 DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc); 3619 DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc); 3620 DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1); 3621 3622 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 3623 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, 3624 &peer->stats, ts->peer_id, 3625 UPDATE_PEER_STATS, pdev->pdev_id); 3626 #endif 3627 } 3628 3629 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 3630 /** 3631 * dp_tx_flow_pool_lock() - take flow pool lock 3632 * @soc: core txrx main context 3633 * @tx_desc: tx desc 3634 * 3635 * Return: None 3636 */ 3637 static inline 3638 void dp_tx_flow_pool_lock(struct dp_soc *soc, 3639 struct dp_tx_desc_s *tx_desc) 3640 { 3641 struct dp_tx_desc_pool_s *pool; 3642 uint8_t desc_pool_id; 3643 3644 desc_pool_id = tx_desc->pool_id; 3645 pool = &soc->tx_desc[desc_pool_id]; 3646 3647 qdf_spin_lock_bh(&pool->flow_pool_lock); 3648 } 3649 3650 /** 3651 * dp_tx_flow_pool_unlock() - release flow pool lock 3652 * @soc: core txrx main context 3653 * @tx_desc: tx desc 3654 * 3655 * Return: None 3656 */ 3657 static inline 3658 void dp_tx_flow_pool_unlock(struct dp_soc *soc, 3659 struct dp_tx_desc_s *tx_desc) 3660 { 3661 struct dp_tx_desc_pool_s *pool; 3662 uint8_t desc_pool_id; 3663 3664 desc_pool_id = tx_desc->pool_id; 3665 pool = &soc->tx_desc[desc_pool_id]; 3666 3667 qdf_spin_unlock_bh(&pool->flow_pool_lock); 3668 } 3669 #else 3670 static inline 3671 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) 3672 { 3673 } 3674 3675 static inline 3676 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) 3677 { 3678 } 3679 #endif 3680 3681 /** 3682 * dp_tx_notify_completion() - Notify tx completion for this desc 3683 * @soc: core txrx main context 3684 * @vdev: datapath vdev handle 3685 * @tx_desc: tx desc 3686 * @netbuf: buffer 3687 * @status: tx status 3688 * 3689 * Return: none 3690 */ 3691 static inline void dp_tx_notify_completion(struct dp_soc *soc, 3692 struct dp_vdev *vdev, 3693 struct dp_tx_desc_s *tx_desc, 3694 qdf_nbuf_t netbuf, 3695 uint8_t status) 3696 { 3697 void *osif_dev; 3698 ol_txrx_completion_fp tx_compl_cbk = NULL; 3699 uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC); 3700 3701 qdf_assert(tx_desc); 3702 3703 dp_tx_flow_pool_lock(soc, tx_desc); 3704 3705 if (!vdev || 3706 !vdev->osif_vdev) { 3707 dp_tx_flow_pool_unlock(soc, tx_desc); 3708 return; 3709 } 3710 3711 osif_dev = vdev->osif_vdev; 3712 tx_compl_cbk = vdev->tx_comp; 3713 dp_tx_flow_pool_unlock(soc, tx_desc); 3714 3715 if (status == HAL_TX_TQM_RR_FRAME_ACKED) 3716 flag |= BIT(QDF_TX_RX_STATUS_OK); 3717 3718 if (tx_compl_cbk) 3719 tx_compl_cbk(netbuf, osif_dev, flag); 3720 } 3721 3722 /** dp_tx_sojourn_stats_process() - Collect sojourn stats 3723 * @pdev: pdev handle 3724 * @tid: tid value 3725 * @txdesc_ts: timestamp from txdesc 3726 * @ppdu_id: ppdu id 3727 * 3728 * Return: none 3729 */ 3730 #ifdef FEATURE_PERPKT_INFO 3731 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev, 3732 struct dp_peer *peer, 3733 uint8_t tid, 3734 uint64_t txdesc_ts, 3735 uint32_t ppdu_id) 3736 { 3737 uint64_t delta_ms; 3738 struct cdp_tx_sojourn_stats *sojourn_stats; 3739 3740 if (qdf_unlikely(pdev->enhanced_stats_en == 0)) 3741 return; 3742 3743 if (qdf_unlikely(tid == HTT_INVALID_TID || 3744 tid >= CDP_DATA_TID_MAX)) 3745 return; 3746 3747 if (qdf_unlikely(!pdev->sojourn_buf)) 3748 return; 3749 3750 sojourn_stats = (struct cdp_tx_sojourn_stats *) 3751 qdf_nbuf_data(pdev->sojourn_buf); 3752 3753 sojourn_stats->cookie = (void *)peer->rdkstats_ctx; 3754 3755 delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) - 3756 txdesc_ts; 3757 qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid], 3758 delta_ms); 3759 sojourn_stats->sum_sojourn_msdu[tid] = delta_ms; 3760 sojourn_stats->num_msdus[tid] = 1; 3761 sojourn_stats->avg_sojourn_msdu[tid].internal = 3762 peer->avg_sojourn_msdu[tid].internal; 3763 dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc, 3764 pdev->sojourn_buf, HTT_INVALID_PEER, 3765 WDI_NO_VAL, pdev->pdev_id); 3766 sojourn_stats->sum_sojourn_msdu[tid] = 0; 3767 sojourn_stats->num_msdus[tid] = 0; 3768 sojourn_stats->avg_sojourn_msdu[tid].internal = 0; 3769 } 3770 #else 3771 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev, 3772 struct dp_peer *peer, 3773 uint8_t tid, 3774 uint64_t txdesc_ts, 3775 uint32_t ppdu_id) 3776 { 3777 } 3778 #endif 3779 3780 /** 3781 * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf 3782 * @soc: DP Soc handle 3783 * @tx_desc: software Tx descriptor 3784 * @ts : Tx completion status from HAL/HTT descriptor 3785 * 3786 * Return: none 3787 */ 3788 static inline void 3789 dp_tx_comp_process_desc(struct dp_soc *soc, 3790 struct dp_tx_desc_s *desc, 3791 struct hal_tx_completion_status *ts, 3792 struct dp_peer *peer) 3793 { 3794 uint64_t time_latency = 0; 3795 /* 3796 * m_copy/tx_capture modes are not supported for 3797 * scatter gather packets 3798 */ 3799 if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) { 3800 time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) - 3801 desc->timestamp); 3802 } 3803 if (!(desc->msdu_ext_desc)) { 3804 if (QDF_STATUS_SUCCESS == 3805 dp_tx_add_to_comp_queue(soc, desc, ts, peer)) { 3806 return; 3807 } 3808 3809 if (QDF_STATUS_SUCCESS == 3810 dp_get_completion_indication_for_stack(soc, 3811 desc->pdev, 3812 peer, ts, 3813 desc->nbuf, 3814 time_latency)) { 3815 qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf, 3816 QDF_DMA_TO_DEVICE, 3817 desc->nbuf->len); 3818 dp_send_completion_to_stack(soc, 3819 desc->pdev, 3820 ts->peer_id, 3821 ts->ppdu_id, 3822 desc->nbuf); 3823 return; 3824 } 3825 } 3826 3827 dp_tx_comp_free_buf(soc, desc); 3828 } 3829 3830 #ifdef DISABLE_DP_STATS 3831 /** 3832 * dp_tx_update_connectivity_stats() - update tx connectivity stats 3833 * @soc: core txrx main context 3834 * @tx_desc: tx desc 3835 * @status: tx status 3836 * 3837 * Return: none 3838 */ 3839 static inline 3840 void dp_tx_update_connectivity_stats(struct dp_soc *soc, 3841 struct dp_vdev *vdev, 3842 struct dp_tx_desc_s *tx_desc, 3843 uint8_t status) 3844 { 3845 } 3846 #else 3847 static inline 3848 void dp_tx_update_connectivity_stats(struct dp_soc *soc, 3849 struct dp_vdev *vdev, 3850 struct dp_tx_desc_s *tx_desc, 3851 uint8_t status) 3852 { 3853 void *osif_dev; 3854 ol_txrx_stats_rx_fp stats_cbk; 3855 uint8_t pkt_type; 3856 3857 qdf_assert(tx_desc); 3858 3859 if (!vdev || 3860 !vdev->osif_vdev || 3861 !vdev->stats_cb) 3862 return; 3863 3864 osif_dev = vdev->osif_vdev; 3865 stats_cbk = vdev->stats_cb; 3866 3867 stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type); 3868 if (status == HAL_TX_TQM_RR_FRAME_ACKED) 3869 stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT, 3870 &pkt_type); 3871 } 3872 #endif 3873 3874 /** 3875 * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info 3876 * @soc: DP soc handle 3877 * @tx_desc: software descriptor head pointer 3878 * @ts: Tx completion status 3879 * @peer: peer handle 3880 * @ring_id: ring number 3881 * 3882 * Return: none 3883 */ 3884 static inline 3885 void dp_tx_comp_process_tx_status(struct dp_soc *soc, 3886 struct dp_tx_desc_s *tx_desc, 3887 struct hal_tx_completion_status *ts, 3888 struct dp_peer *peer, uint8_t ring_id) 3889 { 3890 uint32_t length; 3891 qdf_ether_header_t *eh; 3892 struct dp_vdev *vdev = NULL; 3893 qdf_nbuf_t nbuf = tx_desc->nbuf; 3894 uint8_t dp_status; 3895 3896 if (!nbuf) { 3897 dp_info_rl("invalid tx descriptor. nbuf NULL"); 3898 goto out; 3899 } 3900 3901 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 3902 length = qdf_nbuf_len(nbuf); 3903 3904 dp_status = qdf_dp_get_status_from_htt(ts->status); 3905 DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf, 3906 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD, 3907 QDF_TRACE_DEFAULT_PDEV_ID, 3908 qdf_nbuf_data_addr(nbuf), 3909 sizeof(qdf_nbuf_data(nbuf)), 3910 tx_desc->id, 3911 dp_status)); 3912 3913 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 3914 "-------------------- \n" 3915 "Tx Completion Stats: \n" 3916 "-------------------- \n" 3917 "ack_frame_rssi = %d \n" 3918 "first_msdu = %d \n" 3919 "last_msdu = %d \n" 3920 "msdu_part_of_amsdu = %d \n" 3921 "rate_stats valid = %d \n" 3922 "bw = %d \n" 3923 "pkt_type = %d \n" 3924 "stbc = %d \n" 3925 "ldpc = %d \n" 3926 "sgi = %d \n" 3927 "mcs = %d \n" 3928 "ofdma = %d \n" 3929 "tones_in_ru = %d \n" 3930 "tsf = %d \n" 3931 "ppdu_id = %d \n" 3932 "transmit_cnt = %d \n" 3933 "tid = %d \n" 3934 "peer_id = %d\n", 3935 ts->ack_frame_rssi, ts->first_msdu, 3936 ts->last_msdu, ts->msdu_part_of_amsdu, 3937 ts->valid, ts->bw, ts->pkt_type, ts->stbc, 3938 ts->ldpc, ts->sgi, ts->mcs, ts->ofdma, 3939 ts->tones_in_ru, ts->tsf, ts->ppdu_id, 3940 ts->transmit_cnt, ts->tid, ts->peer_id); 3941 3942 /* Update SoC level stats */ 3943 DP_STATS_INCC(soc, tx.dropped_fw_removed, 1, 3944 (ts->status == HAL_TX_TQM_RR_REM_CMD_REM)); 3945 3946 if (!peer) { 3947 dp_err_rl("peer is null or deletion in progress"); 3948 DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length); 3949 goto out; 3950 } 3951 vdev = peer->vdev; 3952 3953 dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status); 3954 3955 /* Update per-packet stats for mesh mode */ 3956 if (qdf_unlikely(vdev->mesh_vdev) && 3957 !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) 3958 dp_tx_comp_fill_tx_completion_stats(tx_desc, ts); 3959 3960 /* Update peer level stats */ 3961 if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) { 3962 if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) { 3963 DP_STATS_INC_PKT(peer, tx.mcast, 1, length); 3964 3965 if ((peer->vdev->tx_encap_type == 3966 htt_cmn_pkt_type_ethernet) && 3967 QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 3968 DP_STATS_INC_PKT(peer, tx.bcast, 1, length); 3969 } 3970 } 3971 } else { 3972 DP_STATS_INC_PKT(peer, tx.ucast, 1, length); 3973 if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) { 3974 DP_STATS_INC_PKT(peer, tx.tx_success, 1, length); 3975 if (qdf_unlikely(peer->in_twt)) { 3976 DP_STATS_INC_PKT(peer, 3977 tx.tx_success_twt, 3978 1, length); 3979 } 3980 } 3981 } 3982 3983 dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id); 3984 dp_tx_update_peer_ext_stats(peer, tx_desc, ts->tid, ring_id); 3985 3986 #ifdef QCA_SUPPORT_RDK_STATS 3987 if (soc->rdkstats_enabled) 3988 dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid, 3989 tx_desc->timestamp, 3990 ts->ppdu_id); 3991 #endif 3992 3993 out: 3994 return; 3995 } 3996 /** 3997 * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler 3998 * @soc: core txrx main context 3999 * @comp_head: software descriptor head pointer 4000 * @ring_id: ring number 4001 * 4002 * This function will process batch of descriptors reaped by dp_tx_comp_handler 4003 * and release the software descriptors after processing is complete 4004 * 4005 * Return: none 4006 */ 4007 static void 4008 dp_tx_comp_process_desc_list(struct dp_soc *soc, 4009 struct dp_tx_desc_s *comp_head, uint8_t ring_id) 4010 { 4011 struct dp_tx_desc_s *desc; 4012 struct dp_tx_desc_s *next; 4013 struct hal_tx_completion_status ts; 4014 struct dp_peer *peer = NULL; 4015 uint16_t peer_id = DP_INVALID_PEER; 4016 qdf_nbuf_t netbuf; 4017 4018 desc = comp_head; 4019 4020 while (desc) { 4021 if (peer_id != desc->peer_id) { 4022 if (peer) 4023 dp_peer_unref_delete(peer, 4024 DP_MOD_ID_TX_COMP); 4025 peer_id = desc->peer_id; 4026 peer = dp_peer_get_ref_by_id(soc, peer_id, 4027 DP_MOD_ID_TX_COMP); 4028 } 4029 if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) { 4030 struct dp_pdev *pdev = desc->pdev; 4031 4032 if (qdf_likely(peer)) { 4033 /* 4034 * Increment peer statistics 4035 * Minimal statistics update done here 4036 */ 4037 DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, 4038 desc->length); 4039 4040 if (desc->tx_status != 4041 HAL_TX_TQM_RR_FRAME_ACKED) 4042 DP_STATS_INC(peer, tx.tx_failed, 1); 4043 } 4044 4045 qdf_assert(pdev); 4046 dp_tx_outstanding_dec(pdev); 4047 4048 /* 4049 * Calling a QDF WRAPPER here is creating signifcant 4050 * performance impact so avoided the wrapper call here 4051 */ 4052 next = desc->next; 4053 qdf_mem_unmap_nbytes_single(soc->osdev, 4054 desc->dma_addr, 4055 QDF_DMA_TO_DEVICE, 4056 desc->length); 4057 qdf_nbuf_free(desc->nbuf); 4058 dp_tx_desc_free(soc, desc, desc->pool_id); 4059 desc = next; 4060 continue; 4061 } 4062 hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc); 4063 4064 dp_tx_comp_process_tx_status(soc, desc, &ts, peer, ring_id); 4065 4066 netbuf = desc->nbuf; 4067 /* check tx complete notification */ 4068 if (peer && 4069 QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(netbuf)) 4070 dp_tx_notify_completion(soc, peer->vdev, desc, 4071 netbuf, ts.status); 4072 4073 dp_tx_comp_process_desc(soc, desc, &ts, peer); 4074 4075 next = desc->next; 4076 4077 dp_tx_desc_release(desc, desc->pool_id); 4078 desc = next; 4079 } 4080 if (peer) 4081 dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP); 4082 } 4083 4084 /** 4085 * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler 4086 * @tx_desc: software descriptor head pointer 4087 * @status : Tx completion status from HTT descriptor 4088 * @ring_id: ring number 4089 * 4090 * This function will process HTT Tx indication messages from Target 4091 * 4092 * Return: none 4093 */ 4094 static 4095 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status, 4096 uint8_t ring_id) 4097 { 4098 uint8_t tx_status; 4099 struct dp_pdev *pdev; 4100 struct dp_vdev *vdev; 4101 struct dp_soc *soc; 4102 struct hal_tx_completion_status ts = {0}; 4103 uint32_t *htt_desc = (uint32_t *)status; 4104 struct dp_peer *peer; 4105 struct cdp_tid_tx_stats *tid_stats = NULL; 4106 struct htt_soc *htt_handle; 4107 4108 /* 4109 * If the descriptor is already freed in vdev_detach, 4110 * continue to next descriptor 4111 */ 4112 if ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) && !tx_desc->flags) { 4113 QDF_TRACE(QDF_MODULE_ID_DP, 4114 QDF_TRACE_LEVEL_INFO, 4115 "Descriptor freed in vdev_detach %d", 4116 tx_desc->id); 4117 return; 4118 } 4119 4120 pdev = tx_desc->pdev; 4121 soc = pdev->soc; 4122 4123 if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) { 4124 QDF_TRACE(QDF_MODULE_ID_DP, 4125 QDF_TRACE_LEVEL_INFO, 4126 "pdev in down state %d", 4127 tx_desc->id); 4128 dp_tx_comp_free_buf(soc, tx_desc); 4129 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 4130 return; 4131 } 4132 4133 qdf_assert(tx_desc->pdev); 4134 4135 vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, 4136 DP_MOD_ID_HTT_COMP); 4137 4138 if (!vdev) 4139 return; 4140 tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]); 4141 htt_handle = (struct htt_soc *)soc->htt_handle; 4142 htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status); 4143 4144 switch (tx_status) { 4145 case HTT_TX_FW2WBM_TX_STATUS_OK: 4146 case HTT_TX_FW2WBM_TX_STATUS_DROP: 4147 case HTT_TX_FW2WBM_TX_STATUS_TTL: 4148 { 4149 uint8_t tid; 4150 if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) { 4151 ts.peer_id = 4152 HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET( 4153 htt_desc[2]); 4154 ts.tid = 4155 HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET( 4156 htt_desc[2]); 4157 } else { 4158 ts.peer_id = HTT_INVALID_PEER; 4159 ts.tid = HTT_INVALID_TID; 4160 } 4161 ts.ppdu_id = 4162 HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET( 4163 htt_desc[1]); 4164 ts.ack_frame_rssi = 4165 HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET( 4166 htt_desc[1]); 4167 4168 ts.tsf = htt_desc[3]; 4169 ts.first_msdu = 1; 4170 ts.last_msdu = 1; 4171 tid = ts.tid; 4172 if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) 4173 tid = CDP_MAX_DATA_TIDS - 1; 4174 4175 tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid]; 4176 4177 if (qdf_unlikely(pdev->delay_stats_flag)) 4178 dp_tx_compute_delay(vdev, tx_desc, tid, ring_id); 4179 if (tx_status < CDP_MAX_TX_HTT_STATUS) { 4180 tid_stats->htt_status_cnt[tx_status]++; 4181 } 4182 4183 peer = dp_peer_get_ref_by_id(soc, ts.peer_id, 4184 DP_MOD_ID_HTT_COMP); 4185 4186 dp_tx_comp_process_tx_status(soc, tx_desc, &ts, peer, ring_id); 4187 dp_tx_comp_process_desc(soc, tx_desc, &ts, peer); 4188 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 4189 4190 if (qdf_likely(peer)) 4191 dp_peer_unref_delete(peer, DP_MOD_ID_HTT_COMP); 4192 4193 break; 4194 } 4195 case HTT_TX_FW2WBM_TX_STATUS_REINJECT: 4196 { 4197 dp_tx_reinject_handler(soc, vdev, tx_desc, status); 4198 break; 4199 } 4200 case HTT_TX_FW2WBM_TX_STATUS_INSPECT: 4201 { 4202 dp_tx_inspect_handler(soc, vdev, tx_desc, status); 4203 break; 4204 } 4205 case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY: 4206 { 4207 dp_tx_mec_handler(vdev, status); 4208 break; 4209 } 4210 default: 4211 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 4212 "%s Invalid HTT tx_status %d\n", 4213 __func__, tx_status); 4214 break; 4215 } 4216 4217 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP); 4218 } 4219 4220 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 4221 static inline 4222 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped) 4223 { 4224 bool limit_hit = false; 4225 struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx; 4226 4227 limit_hit = 4228 (num_reaped >= cfg->tx_comp_loop_pkt_limit) ? true : false; 4229 4230 if (limit_hit) 4231 DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1); 4232 4233 return limit_hit; 4234 } 4235 4236 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc) 4237 { 4238 return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check; 4239 } 4240 #else 4241 static inline 4242 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped) 4243 { 4244 return false; 4245 } 4246 4247 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc) 4248 { 4249 return false; 4250 } 4251 #endif 4252 4253 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc, 4254 hal_ring_handle_t hal_ring_hdl, uint8_t ring_id, 4255 uint32_t quota) 4256 { 4257 void *tx_comp_hal_desc; 4258 uint8_t buffer_src; 4259 uint8_t pool_id; 4260 uint32_t tx_desc_id; 4261 struct dp_tx_desc_s *tx_desc = NULL; 4262 struct dp_tx_desc_s *head_desc = NULL; 4263 struct dp_tx_desc_s *tail_desc = NULL; 4264 uint32_t num_processed = 0; 4265 uint32_t count; 4266 uint32_t num_avail_for_reap = 0; 4267 bool force_break = false; 4268 4269 DP_HIST_INIT(); 4270 4271 more_data: 4272 /* Re-initialize local variables to be re-used */ 4273 head_desc = NULL; 4274 tail_desc = NULL; 4275 count = 0; 4276 4277 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 4278 dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl); 4279 return 0; 4280 } 4281 4282 num_avail_for_reap = hal_srng_dst_num_valid(soc->hal_soc, hal_ring_hdl, 0); 4283 4284 if (num_avail_for_reap >= quota) 4285 num_avail_for_reap = quota; 4286 4287 dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap); 4288 4289 /* Find head descriptor from completion ring */ 4290 while (qdf_likely(num_avail_for_reap)) { 4291 4292 tx_comp_hal_desc = dp_srng_dst_get_next(soc, hal_ring_hdl); 4293 if (qdf_unlikely(!tx_comp_hal_desc)) 4294 break; 4295 4296 buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc); 4297 4298 /* If this buffer was not released by TQM or FW, then it is not 4299 * Tx completion indication, assert */ 4300 if (qdf_unlikely(buffer_src != 4301 HAL_TX_COMP_RELEASE_SOURCE_TQM) && 4302 (qdf_unlikely(buffer_src != 4303 HAL_TX_COMP_RELEASE_SOURCE_FW))) { 4304 uint8_t wbm_internal_error; 4305 4306 dp_err_rl( 4307 "Tx comp release_src != TQM | FW but from %d", 4308 buffer_src); 4309 hal_dump_comp_desc(tx_comp_hal_desc); 4310 DP_STATS_INC(soc, tx.invalid_release_source, 1); 4311 4312 /* When WBM sees NULL buffer_addr_info in any of 4313 * ingress rings it sends an error indication, 4314 * with wbm_internal_error=1, to a specific ring. 4315 * The WBM2SW ring used to indicate these errors is 4316 * fixed in HW, and that ring is being used as Tx 4317 * completion ring. These errors are not related to 4318 * Tx completions, and should just be ignored 4319 */ 4320 wbm_internal_error = hal_get_wbm_internal_error( 4321 soc->hal_soc, 4322 tx_comp_hal_desc); 4323 4324 if (wbm_internal_error) { 4325 dp_err_rl("Tx comp wbm_internal_error!!"); 4326 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1); 4327 4328 if (HAL_TX_COMP_RELEASE_SOURCE_REO == 4329 buffer_src) 4330 dp_handle_wbm_internal_error( 4331 soc, 4332 tx_comp_hal_desc, 4333 hal_tx_comp_get_buffer_type( 4334 tx_comp_hal_desc)); 4335 4336 } else { 4337 dp_err_rl("Tx comp wbm_internal_error false"); 4338 DP_STATS_INC(soc, tx.non_wbm_internal_err, 1); 4339 } 4340 continue; 4341 } 4342 4343 /* Get descriptor id */ 4344 tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc); 4345 pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >> 4346 DP_TX_DESC_ID_POOL_OS; 4347 4348 /* Find Tx descriptor */ 4349 tx_desc = dp_tx_desc_find(soc, pool_id, 4350 (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >> 4351 DP_TX_DESC_ID_PAGE_OS, 4352 (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >> 4353 DP_TX_DESC_ID_OFFSET_OS); 4354 4355 /* 4356 * If the release source is FW, process the HTT status 4357 */ 4358 if (qdf_unlikely(buffer_src == 4359 HAL_TX_COMP_RELEASE_SOURCE_FW)) { 4360 uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN]; 4361 hal_tx_comp_get_htt_desc(tx_comp_hal_desc, 4362 htt_tx_status); 4363 dp_tx_process_htt_completion(tx_desc, 4364 htt_tx_status, ring_id); 4365 } else { 4366 tx_desc->peer_id = 4367 hal_tx_comp_get_peer_id(tx_comp_hal_desc); 4368 tx_desc->tx_status = 4369 hal_tx_comp_get_tx_status(tx_comp_hal_desc); 4370 /* 4371 * If the fast completion mode is enabled extended 4372 * metadata from descriptor is not copied 4373 */ 4374 if (qdf_likely(tx_desc->flags & 4375 DP_TX_DESC_FLAG_SIMPLE)) 4376 goto add_to_pool; 4377 4378 /* 4379 * If the descriptor is already freed in vdev_detach, 4380 * continue to next descriptor 4381 */ 4382 if (qdf_unlikely 4383 ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) && 4384 !tx_desc->flags)) { 4385 QDF_TRACE(QDF_MODULE_ID_DP, 4386 QDF_TRACE_LEVEL_INFO, 4387 "Descriptor freed in vdev_detach %d", 4388 tx_desc_id); 4389 continue; 4390 } 4391 4392 if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) { 4393 QDF_TRACE(QDF_MODULE_ID_DP, 4394 QDF_TRACE_LEVEL_INFO, 4395 "pdev in down state %d", 4396 tx_desc_id); 4397 4398 dp_tx_comp_free_buf(soc, tx_desc); 4399 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 4400 goto next_desc; 4401 } 4402 4403 /* Pool id is not matching. Error */ 4404 if (tx_desc->pool_id != pool_id) { 4405 QDF_TRACE(QDF_MODULE_ID_DP, 4406 QDF_TRACE_LEVEL_FATAL, 4407 "Tx Comp pool id %d not matched %d", 4408 pool_id, tx_desc->pool_id); 4409 4410 qdf_assert_always(0); 4411 } 4412 4413 if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) || 4414 !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) { 4415 QDF_TRACE(QDF_MODULE_ID_DP, 4416 QDF_TRACE_LEVEL_FATAL, 4417 "Txdesc invalid, flgs = %x,id = %d", 4418 tx_desc->flags, tx_desc_id); 4419 qdf_assert_always(0); 4420 } 4421 4422 /* Collect hw completion contents */ 4423 hal_tx_comp_desc_sync(tx_comp_hal_desc, 4424 &tx_desc->comp, 1); 4425 add_to_pool: 4426 DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id); 4427 4428 /* First ring descriptor on the cycle */ 4429 if (!head_desc) { 4430 head_desc = tx_desc; 4431 tail_desc = tx_desc; 4432 } 4433 4434 tail_desc->next = tx_desc; 4435 tx_desc->next = NULL; 4436 tail_desc = tx_desc; 4437 } 4438 next_desc: 4439 num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK); 4440 4441 /* 4442 * Processed packet count is more than given quota 4443 * stop to processing 4444 */ 4445 4446 count++; 4447 4448 if (dp_tx_comp_loop_pkt_limit_hit(soc, count)) 4449 break; 4450 } 4451 4452 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 4453 4454 /* Process the reaped descriptors */ 4455 if (head_desc) 4456 dp_tx_comp_process_desc_list(soc, head_desc, ring_id); 4457 4458 if (dp_tx_comp_enable_eol_data_check(soc)) { 4459 4460 if (num_processed >= quota) 4461 force_break = true; 4462 4463 if (!force_break && 4464 hal_srng_dst_peek_sync_locked(soc->hal_soc, 4465 hal_ring_hdl)) { 4466 DP_STATS_INC(soc, tx.hp_oos2, 1); 4467 if (!hif_exec_should_yield(soc->hif_handle, 4468 int_ctx->dp_intr_id)) 4469 goto more_data; 4470 } 4471 } 4472 DP_TX_HIST_STATS_PER_PDEV(); 4473 4474 return num_processed; 4475 } 4476 4477 #ifdef FEATURE_WLAN_TDLS 4478 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 4479 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list) 4480 { 4481 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 4482 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 4483 DP_MOD_ID_TDLS); 4484 4485 if (!vdev) { 4486 dp_err("vdev handle for id %d is NULL", vdev_id); 4487 return NULL; 4488 } 4489 4490 if (tx_spec & OL_TX_SPEC_NO_FREE) 4491 vdev->is_tdls_frame = true; 4492 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS); 4493 4494 return dp_tx_send(soc_hdl, vdev_id, msdu_list); 4495 } 4496 #endif 4497 4498 static void dp_tx_vdev_update_feature_flags(struct dp_vdev *vdev) 4499 { 4500 struct wlan_cfg_dp_soc_ctxt *cfg; 4501 4502 struct dp_soc *soc; 4503 4504 soc = vdev->pdev->soc; 4505 if (!soc) 4506 return; 4507 4508 cfg = soc->wlan_cfg_ctx; 4509 if (!cfg) 4510 return; 4511 4512 if (vdev->opmode == wlan_op_mode_ndi) 4513 vdev->csum_enabled = wlan_cfg_get_nan_checksum_offload(cfg); 4514 else if ((vdev->subtype == wlan_op_subtype_p2p_device) || 4515 (vdev->subtype == wlan_op_subtype_p2p_cli) || 4516 (vdev->subtype == wlan_op_subtype_p2p_go)) 4517 vdev->csum_enabled = wlan_cfg_get_p2p_checksum_offload(cfg); 4518 else 4519 vdev->csum_enabled = wlan_cfg_get_checksum_offload(cfg); 4520 } 4521 4522 /** 4523 * dp_tx_vdev_attach() - attach vdev to dp tx 4524 * @vdev: virtual device instance 4525 * 4526 * Return: QDF_STATUS_SUCCESS: success 4527 * QDF_STATUS_E_RESOURCES: Error return 4528 */ 4529 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev) 4530 { 4531 int pdev_id; 4532 /* 4533 * Fill HTT TCL Metadata with Vdev ID and MAC ID 4534 */ 4535 HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata, 4536 HTT_TCL_METADATA_TYPE_VDEV_BASED); 4537 4538 HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata, 4539 vdev->vdev_id); 4540 4541 pdev_id = 4542 dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc, 4543 vdev->pdev->pdev_id); 4544 HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id); 4545 4546 /* 4547 * Set HTT Extension Valid bit to 0 by default 4548 */ 4549 HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0); 4550 4551 dp_tx_vdev_update_search_flags(vdev); 4552 4553 dp_tx_vdev_update_feature_flags(vdev); 4554 4555 return QDF_STATUS_SUCCESS; 4556 } 4557 4558 #ifndef FEATURE_WDS 4559 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev) 4560 { 4561 return false; 4562 } 4563 #endif 4564 4565 /** 4566 * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode 4567 * @vdev: virtual device instance 4568 * 4569 * Return: void 4570 * 4571 */ 4572 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev) 4573 { 4574 struct dp_soc *soc = vdev->pdev->soc; 4575 4576 /* 4577 * Enable both AddrY (SA based search) and AddrX (Da based search) 4578 * for TDLS link 4579 * 4580 * Enable AddrY (SA based search) only for non-WDS STA and 4581 * ProxySTA VAP (in HKv1) modes. 4582 * 4583 * In all other VAP modes, only DA based search should be 4584 * enabled 4585 */ 4586 if (vdev->opmode == wlan_op_mode_sta && 4587 vdev->tdls_link_connected) 4588 vdev->hal_desc_addr_search_flags = 4589 (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN); 4590 else if ((vdev->opmode == wlan_op_mode_sta) && 4591 !dp_tx_da_search_override(vdev)) 4592 vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN; 4593 else 4594 vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN; 4595 4596 /* Set search type only when peer map v2 messaging is enabled 4597 * as we will have the search index (AST hash) only when v2 is 4598 * enabled 4599 */ 4600 if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta) 4601 vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH; 4602 else 4603 vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; 4604 } 4605 4606 static inline bool 4607 dp_is_tx_desc_flush_match(struct dp_pdev *pdev, 4608 struct dp_vdev *vdev, 4609 struct dp_tx_desc_s *tx_desc) 4610 { 4611 if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED))) 4612 return false; 4613 4614 /* 4615 * if vdev is given, then only check whether desc 4616 * vdev match. if vdev is NULL, then check whether 4617 * desc pdev match. 4618 */ 4619 return vdev ? (tx_desc->vdev_id == vdev->vdev_id) : 4620 (tx_desc->pdev == pdev); 4621 } 4622 4623 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 4624 /** 4625 * dp_tx_desc_flush() - release resources associated 4626 * to TX Desc 4627 * 4628 * @dp_pdev: Handle to DP pdev structure 4629 * @vdev: virtual device instance 4630 * NULL: no specific Vdev is required and check all allcated TX desc 4631 * on this pdev. 4632 * Non-NULL: only check the allocated TX Desc associated to this Vdev. 4633 * 4634 * @force_free: 4635 * true: flush the TX desc. 4636 * false: only reset the Vdev in each allocated TX desc 4637 * that associated to current Vdev. 4638 * 4639 * This function will go through the TX desc pool to flush 4640 * the outstanding TX data or reset Vdev to NULL in associated TX 4641 * Desc. 4642 */ 4643 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev, 4644 bool force_free) 4645 { 4646 uint8_t i; 4647 uint32_t j; 4648 uint32_t num_desc, page_id, offset; 4649 uint16_t num_desc_per_page; 4650 struct dp_soc *soc = pdev->soc; 4651 struct dp_tx_desc_s *tx_desc = NULL; 4652 struct dp_tx_desc_pool_s *tx_desc_pool = NULL; 4653 4654 if (!vdev && !force_free) { 4655 dp_err("Reset TX desc vdev, Vdev param is required!"); 4656 return; 4657 } 4658 4659 for (i = 0; i < MAX_TXDESC_POOLS; i++) { 4660 tx_desc_pool = &soc->tx_desc[i]; 4661 if (!(tx_desc_pool->pool_size) || 4662 IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) || 4663 !(tx_desc_pool->desc_pages.cacheable_pages)) 4664 continue; 4665 4666 /* 4667 * Add flow pool lock protection in case pool is freed 4668 * due to all tx_desc is recycled when handle TX completion. 4669 * this is not necessary when do force flush as: 4670 * a. double lock will happen if dp_tx_desc_release is 4671 * also trying to acquire it. 4672 * b. dp interrupt has been disabled before do force TX desc 4673 * flush in dp_pdev_deinit(). 4674 */ 4675 if (!force_free) 4676 qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock); 4677 num_desc = tx_desc_pool->pool_size; 4678 num_desc_per_page = 4679 tx_desc_pool->desc_pages.num_element_per_page; 4680 for (j = 0; j < num_desc; j++) { 4681 page_id = j / num_desc_per_page; 4682 offset = j % num_desc_per_page; 4683 4684 if (qdf_unlikely(!(tx_desc_pool-> 4685 desc_pages.cacheable_pages))) 4686 break; 4687 4688 tx_desc = dp_tx_desc_find(soc, i, page_id, offset); 4689 4690 if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) { 4691 /* 4692 * Free TX desc if force free is 4693 * required, otherwise only reset vdev 4694 * in this TX desc. 4695 */ 4696 if (force_free) { 4697 dp_tx_comp_free_buf(soc, tx_desc); 4698 dp_tx_desc_release(tx_desc, i); 4699 } else { 4700 tx_desc->vdev_id = DP_INVALID_VDEV_ID; 4701 } 4702 } 4703 } 4704 if (!force_free) 4705 qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock); 4706 } 4707 } 4708 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */ 4709 /** 4710 * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc 4711 * 4712 * @soc: Handle to DP soc structure 4713 * @tx_desc: pointer of one TX desc 4714 * @desc_pool_id: TX Desc pool id 4715 */ 4716 static inline void 4717 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, 4718 uint8_t desc_pool_id) 4719 { 4720 TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock); 4721 4722 tx_desc->vdev_id = DP_INVALID_VDEV_ID; 4723 4724 TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); 4725 } 4726 4727 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev, 4728 bool force_free) 4729 { 4730 uint8_t i, num_pool; 4731 uint32_t j; 4732 uint32_t num_desc, page_id, offset; 4733 uint16_t num_desc_per_page; 4734 struct dp_soc *soc = pdev->soc; 4735 struct dp_tx_desc_s *tx_desc = NULL; 4736 struct dp_tx_desc_pool_s *tx_desc_pool = NULL; 4737 4738 if (!vdev && !force_free) { 4739 dp_err("Reset TX desc vdev, Vdev param is required!"); 4740 return; 4741 } 4742 4743 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 4744 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 4745 4746 for (i = 0; i < num_pool; i++) { 4747 tx_desc_pool = &soc->tx_desc[i]; 4748 if (!tx_desc_pool->desc_pages.cacheable_pages) 4749 continue; 4750 4751 num_desc_per_page = 4752 tx_desc_pool->desc_pages.num_element_per_page; 4753 for (j = 0; j < num_desc; j++) { 4754 page_id = j / num_desc_per_page; 4755 offset = j % num_desc_per_page; 4756 tx_desc = dp_tx_desc_find(soc, i, page_id, offset); 4757 4758 if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) { 4759 if (force_free) { 4760 dp_tx_comp_free_buf(soc, tx_desc); 4761 dp_tx_desc_release(tx_desc, i); 4762 } else { 4763 dp_tx_desc_reset_vdev(soc, tx_desc, 4764 i); 4765 } 4766 } 4767 } 4768 } 4769 } 4770 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ 4771 4772 /** 4773 * dp_tx_vdev_detach() - detach vdev from dp tx 4774 * @vdev: virtual device instance 4775 * 4776 * Return: QDF_STATUS_SUCCESS: success 4777 * QDF_STATUS_E_RESOURCES: Error return 4778 */ 4779 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev) 4780 { 4781 struct dp_pdev *pdev = vdev->pdev; 4782 4783 /* Reset TX desc associated to this Vdev as NULL */ 4784 dp_tx_desc_flush(pdev, vdev, false); 4785 dp_tx_vdev_multipass_deinit(vdev); 4786 4787 return QDF_STATUS_SUCCESS; 4788 } 4789 4790 /** 4791 * dp_tx_pdev_attach() - attach pdev to dp tx 4792 * @pdev: physical device instance 4793 * 4794 * Return: QDF_STATUS_SUCCESS: success 4795 * QDF_STATUS_E_RESOURCES: Error return 4796 */ 4797 QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev) 4798 { 4799 struct dp_soc *soc = pdev->soc; 4800 4801 /* Initialize Flow control counters */ 4802 qdf_atomic_init(&pdev->num_tx_outstanding); 4803 4804 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { 4805 /* Initialize descriptors in TCL Ring */ 4806 hal_tx_init_data_ring(soc->hal_soc, 4807 soc->tcl_data_ring[pdev->pdev_id].hal_srng); 4808 } 4809 4810 return QDF_STATUS_SUCCESS; 4811 } 4812 4813 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 4814 /* Pools will be allocated dynamically */ 4815 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, 4816 int num_desc) 4817 { 4818 uint8_t i; 4819 4820 for (i = 0; i < num_pool; i++) { 4821 qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock); 4822 soc->tx_desc[i].status = FLOW_POOL_INACTIVE; 4823 } 4824 4825 return QDF_STATUS_SUCCESS; 4826 } 4827 4828 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool, 4829 int num_desc) 4830 { 4831 return QDF_STATUS_SUCCESS; 4832 } 4833 4834 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool) 4835 { 4836 } 4837 4838 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) 4839 { 4840 uint8_t i; 4841 4842 for (i = 0; i < num_pool; i++) 4843 qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock); 4844 } 4845 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */ 4846 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, 4847 int num_desc) 4848 { 4849 uint8_t i, count; 4850 4851 /* Allocate software Tx descriptor pools */ 4852 for (i = 0; i < num_pool; i++) { 4853 if (dp_tx_desc_pool_alloc(soc, i, num_desc)) { 4854 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 4855 FL("Tx Desc Pool alloc %d failed %pK"), 4856 i, soc); 4857 goto fail; 4858 } 4859 } 4860 return QDF_STATUS_SUCCESS; 4861 4862 fail: 4863 for (count = 0; count < i; count++) 4864 dp_tx_desc_pool_free(soc, count); 4865 4866 return QDF_STATUS_E_NOMEM; 4867 } 4868 4869 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool, 4870 int num_desc) 4871 { 4872 uint8_t i; 4873 for (i = 0; i < num_pool; i++) { 4874 if (dp_tx_desc_pool_init(soc, i, num_desc)) { 4875 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 4876 FL("Tx Desc Pool init %d failed %pK"), 4877 i, soc); 4878 return QDF_STATUS_E_NOMEM; 4879 } 4880 } 4881 return QDF_STATUS_SUCCESS; 4882 } 4883 4884 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool) 4885 { 4886 uint8_t i; 4887 4888 for (i = 0; i < num_pool; i++) 4889 dp_tx_desc_pool_deinit(soc, i); 4890 } 4891 4892 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) 4893 { 4894 uint8_t i; 4895 4896 for (i = 0; i < num_pool; i++) 4897 dp_tx_desc_pool_free(soc, i); 4898 } 4899 4900 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ 4901 4902 /** 4903 * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors 4904 * @soc: core txrx main context 4905 * @num_pool: number of pools 4906 * 4907 */ 4908 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool) 4909 { 4910 dp_tx_tso_desc_pool_deinit(soc, num_pool); 4911 dp_tx_tso_num_seg_pool_deinit(soc, num_pool); 4912 } 4913 4914 /** 4915 * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors 4916 * @soc: core txrx main context 4917 * @num_pool: number of pools 4918 * 4919 */ 4920 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool) 4921 { 4922 dp_tx_tso_desc_pool_free(soc, num_pool); 4923 dp_tx_tso_num_seg_pool_free(soc, num_pool); 4924 } 4925 4926 /** 4927 * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors 4928 * @soc: core txrx main context 4929 * 4930 * This function frees all tx related descriptors as below 4931 * 1. Regular TX descriptors (static pools) 4932 * 2. extension TX descriptors (used for ME, RAW, TSO etc...) 4933 * 3. TSO descriptors 4934 * 4935 */ 4936 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc) 4937 { 4938 uint8_t num_pool; 4939 4940 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 4941 4942 dp_tx_tso_cmn_desc_pool_free(soc, num_pool); 4943 dp_tx_ext_desc_pool_free(soc, num_pool); 4944 dp_tx_delete_static_pools(soc, num_pool); 4945 } 4946 4947 /** 4948 * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors 4949 * @soc: core txrx main context 4950 * 4951 * This function de-initializes all tx related descriptors as below 4952 * 1. Regular TX descriptors (static pools) 4953 * 2. extension TX descriptors (used for ME, RAW, TSO etc...) 4954 * 3. TSO descriptors 4955 * 4956 */ 4957 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc) 4958 { 4959 uint8_t num_pool; 4960 4961 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 4962 4963 dp_tx_flow_control_deinit(soc); 4964 dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool); 4965 dp_tx_ext_desc_pool_deinit(soc, num_pool); 4966 dp_tx_deinit_static_pools(soc, num_pool); 4967 } 4968 4969 /** 4970 * dp_tso_attach() - TSO attach handler 4971 * @txrx_soc: Opaque Dp handle 4972 * 4973 * Reserve TSO descriptor buffers 4974 * 4975 * Return: QDF_STATUS_E_FAILURE on failure or 4976 * QDF_STATUS_SUCCESS on success 4977 */ 4978 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc, 4979 uint8_t num_pool, 4980 uint16_t num_desc) 4981 { 4982 if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) { 4983 dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc); 4984 return QDF_STATUS_E_FAILURE; 4985 } 4986 4987 if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) { 4988 dp_err("TSO Num of seg Pool alloc %d failed %pK", 4989 num_pool, soc); 4990 return QDF_STATUS_E_FAILURE; 4991 } 4992 return QDF_STATUS_SUCCESS; 4993 } 4994 4995 /** 4996 * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init 4997 * @soc: DP soc handle 4998 * @num_pool: Number of pools 4999 * @num_desc: Number of descriptors 5000 * 5001 * Initialize TSO descriptor pools 5002 * 5003 * Return: QDF_STATUS_E_FAILURE on failure or 5004 * QDF_STATUS_SUCCESS on success 5005 */ 5006 5007 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc, 5008 uint8_t num_pool, 5009 uint16_t num_desc) 5010 { 5011 if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) { 5012 dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc); 5013 return QDF_STATUS_E_FAILURE; 5014 } 5015 5016 if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) { 5017 dp_err("TSO Num of seg Pool alloc %d failed %pK", 5018 num_pool, soc); 5019 return QDF_STATUS_E_FAILURE; 5020 } 5021 return QDF_STATUS_SUCCESS; 5022 } 5023 5024 /** 5025 * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory 5026 * @soc: core txrx main context 5027 * 5028 * This function allocates memory for following descriptor pools 5029 * 1. regular sw tx descriptor pools (static pools) 5030 * 2. TX extension descriptor pools (ME, RAW, TSO etc...) 5031 * 3. TSO descriptor pools 5032 * 5033 * Return: QDF_STATUS_SUCCESS: success 5034 * QDF_STATUS_E_RESOURCES: Error return 5035 */ 5036 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc) 5037 { 5038 uint8_t num_pool; 5039 uint32_t num_desc; 5040 uint32_t num_ext_desc; 5041 5042 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 5043 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 5044 num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); 5045 5046 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 5047 "%s Tx Desc Alloc num_pool = %d, descs = %d", 5048 __func__, num_pool, num_desc); 5049 5050 if ((num_pool > MAX_TXDESC_POOLS) || 5051 (num_desc > WLAN_CFG_NUM_TX_DESC_MAX)) 5052 goto fail1; 5053 5054 if (dp_tx_alloc_static_pools(soc, num_pool, num_desc)) 5055 goto fail1; 5056 5057 if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc)) 5058 goto fail2; 5059 5060 if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx)) 5061 return QDF_STATUS_SUCCESS; 5062 5063 if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc)) 5064 goto fail3; 5065 5066 return QDF_STATUS_SUCCESS; 5067 5068 fail3: 5069 dp_tx_ext_desc_pool_free(soc, num_pool); 5070 fail2: 5071 dp_tx_delete_static_pools(soc, num_pool); 5072 fail1: 5073 return QDF_STATUS_E_RESOURCES; 5074 } 5075 5076 /** 5077 * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools 5078 * @soc: core txrx main context 5079 * 5080 * This function initializes the following TX descriptor pools 5081 * 1. regular sw tx descriptor pools (static pools) 5082 * 2. TX extension descriptor pools (ME, RAW, TSO etc...) 5083 * 3. TSO descriptor pools 5084 * 5085 * Return: QDF_STATUS_SUCCESS: success 5086 * QDF_STATUS_E_RESOURCES: Error return 5087 */ 5088 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc) 5089 { 5090 uint8_t num_pool; 5091 uint32_t num_desc; 5092 uint32_t num_ext_desc; 5093 5094 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 5095 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 5096 num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); 5097 5098 if (dp_tx_init_static_pools(soc, num_pool, num_desc)) 5099 goto fail1; 5100 5101 if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc)) 5102 goto fail2; 5103 5104 if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx)) 5105 return QDF_STATUS_SUCCESS; 5106 5107 if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc)) 5108 goto fail3; 5109 5110 dp_tx_flow_control_init(soc); 5111 soc->process_tx_status = CONFIG_PROCESS_TX_STATUS; 5112 return QDF_STATUS_SUCCESS; 5113 5114 fail3: 5115 dp_tx_ext_desc_pool_deinit(soc, num_pool); 5116 fail2: 5117 dp_tx_deinit_static_pools(soc, num_pool); 5118 fail1: 5119 return QDF_STATUS_E_RESOURCES; 5120 } 5121 5122 /** 5123 * dp_tso_soc_attach() - Allocate and initialize TSO descriptors 5124 * @txrx_soc: dp soc handle 5125 * 5126 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 5127 * QDF_STATUS_E_FAILURE 5128 */ 5129 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc) 5130 { 5131 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 5132 uint8_t num_pool; 5133 uint32_t num_desc; 5134 uint32_t num_ext_desc; 5135 5136 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 5137 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 5138 num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); 5139 5140 if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc)) 5141 return QDF_STATUS_E_FAILURE; 5142 5143 if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc)) 5144 return QDF_STATUS_E_FAILURE; 5145 5146 return QDF_STATUS_SUCCESS; 5147 } 5148 5149 /** 5150 * dp_tso_soc_detach() - de-initialize and free the TSO descriptors 5151 * @txrx_soc: dp soc handle 5152 * 5153 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 5154 */ 5155 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc) 5156 { 5157 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 5158 uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 5159 5160 dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool); 5161 dp_tx_tso_cmn_desc_pool_free(soc, num_pool); 5162 5163 return QDF_STATUS_SUCCESS; 5164 } 5165 5166