1 /* 2 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "htt.h" 20 #include "dp_htt.h" 21 #include "hal_hw_headers.h" 22 #include "dp_tx.h" 23 #include "dp_tx_desc.h" 24 #include "dp_peer.h" 25 #include "dp_types.h" 26 #include "hal_tx.h" 27 #include "qdf_mem.h" 28 #include "qdf_nbuf.h" 29 #include "qdf_net_types.h" 30 #include <wlan_cfg.h> 31 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO) 32 #include "if_meta_hdr.h" 33 #endif 34 #include "enet.h" 35 #include "dp_internal.h" 36 #ifdef FEATURE_WDS 37 #include "dp_txrx_wds.h" 38 #endif 39 #ifdef ATH_SUPPORT_IQUE 40 #include "dp_txrx_me.h" 41 #endif 42 43 44 /* TODO Add support in TSO */ 45 #define DP_DESC_NUM_FRAG(x) 0 46 47 /* disable TQM_BYPASS */ 48 #define TQM_BYPASS_WAR 0 49 50 /* invalid peer id for reinject*/ 51 #define DP_INVALID_PEER 0XFFFE 52 53 /*mapping between hal encrypt type and cdp_sec_type*/ 54 #define MAX_CDP_SEC_TYPE 12 55 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = { 56 HAL_TX_ENCRYPT_TYPE_NO_CIPHER, 57 HAL_TX_ENCRYPT_TYPE_WEP_128, 58 HAL_TX_ENCRYPT_TYPE_WEP_104, 59 HAL_TX_ENCRYPT_TYPE_WEP_40, 60 HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC, 61 HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC, 62 HAL_TX_ENCRYPT_TYPE_AES_CCMP_128, 63 HAL_TX_ENCRYPT_TYPE_WAPI, 64 HAL_TX_ENCRYPT_TYPE_AES_CCMP_256, 65 HAL_TX_ENCRYPT_TYPE_AES_GCMP_128, 66 HAL_TX_ENCRYPT_TYPE_AES_GCMP_256, 67 HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4}; 68 69 #ifdef QCA_TX_LIMIT_CHECK 70 /** 71 * dp_tx_limit_check - Check if allocated tx descriptors reached 72 * soc max limit and pdev max limit 73 * @vdev: DP vdev handle 74 * 75 * Return: true if allocated tx descriptors reached max configured value, else 76 * false 77 */ 78 static inline bool 79 dp_tx_limit_check(struct dp_vdev *vdev) 80 { 81 struct dp_pdev *pdev = vdev->pdev; 82 struct dp_soc *soc = pdev->soc; 83 84 if (qdf_atomic_read(&soc->num_tx_outstanding) >= 85 soc->num_tx_allowed) { 86 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 87 "%s: queued packets are more than max tx, drop the frame", 88 __func__); 89 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 90 return true; 91 } 92 93 if (qdf_atomic_read(&pdev->num_tx_outstanding) >= 94 pdev->num_tx_allowed) { 95 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 96 "%s: queued packets are more than max tx, drop the frame", 97 __func__); 98 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 99 return true; 100 } 101 return false; 102 } 103 104 /** 105 * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc 106 * @vdev: DP pdev handle 107 * 108 * Return: void 109 */ 110 static inline void 111 dp_tx_outstanding_inc(struct dp_pdev *pdev) 112 { 113 struct dp_soc *soc = pdev->soc; 114 115 qdf_atomic_inc(&pdev->num_tx_outstanding); 116 qdf_atomic_inc(&soc->num_tx_outstanding); 117 } 118 119 /** 120 * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc 121 * @vdev: DP pdev handle 122 * 123 * Return: void 124 */ 125 static inline void 126 dp_tx_outstanding_dec(struct dp_pdev *pdev) 127 { 128 struct dp_soc *soc = pdev->soc; 129 130 qdf_atomic_dec(&pdev->num_tx_outstanding); 131 qdf_atomic_dec(&soc->num_tx_outstanding); 132 } 133 134 #else //QCA_TX_LIMIT_CHECK 135 static inline bool 136 dp_tx_limit_check(struct dp_vdev *vdev) 137 { 138 return false; 139 } 140 141 static inline void 142 dp_tx_outstanding_inc(struct dp_pdev *pdev) 143 { 144 qdf_atomic_inc(&pdev->num_tx_outstanding); 145 } 146 147 static inline void 148 dp_tx_outstanding_dec(struct dp_pdev *pdev) 149 { 150 qdf_atomic_dec(&pdev->num_tx_outstanding); 151 } 152 #endif //QCA_TX_LIMIT_CHECK 153 154 #if defined(FEATURE_TSO) 155 /** 156 * dp_tx_tso_unmap_segment() - Unmap TSO segment 157 * 158 * @soc - core txrx main context 159 * @seg_desc - tso segment descriptor 160 * @num_seg_desc - tso number segment descriptor 161 */ 162 static void dp_tx_tso_unmap_segment( 163 struct dp_soc *soc, 164 struct qdf_tso_seg_elem_t *seg_desc, 165 struct qdf_tso_num_seg_elem_t *num_seg_desc) 166 { 167 TSO_DEBUG("%s: Unmap the tso segment", __func__); 168 if (qdf_unlikely(!seg_desc)) { 169 DP_TRACE(ERROR, "%s %d TSO desc is NULL!", 170 __func__, __LINE__); 171 qdf_assert(0); 172 } else if (qdf_unlikely(!num_seg_desc)) { 173 DP_TRACE(ERROR, "%s %d TSO num desc is NULL!", 174 __func__, __LINE__); 175 qdf_assert(0); 176 } else { 177 bool is_last_seg; 178 /* no tso segment left to do dma unmap */ 179 if (num_seg_desc->num_seg.tso_cmn_num_seg < 1) 180 return; 181 182 is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ? 183 true : false; 184 qdf_nbuf_unmap_tso_segment(soc->osdev, 185 seg_desc, is_last_seg); 186 num_seg_desc->num_seg.tso_cmn_num_seg--; 187 } 188 } 189 190 /** 191 * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg 192 * back to the freelist 193 * 194 * @soc - soc device handle 195 * @tx_desc - Tx software descriptor 196 */ 197 static void dp_tx_tso_desc_release(struct dp_soc *soc, 198 struct dp_tx_desc_s *tx_desc) 199 { 200 TSO_DEBUG("%s: Free the tso descriptor", __func__); 201 if (qdf_unlikely(!tx_desc->tso_desc)) { 202 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 203 "%s %d TSO desc is NULL!", 204 __func__, __LINE__); 205 qdf_assert(0); 206 } else if (qdf_unlikely(!tx_desc->tso_num_desc)) { 207 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 208 "%s %d TSO num desc is NULL!", 209 __func__, __LINE__); 210 qdf_assert(0); 211 } else { 212 struct qdf_tso_num_seg_elem_t *tso_num_desc = 213 (struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc; 214 215 /* Add the tso num segment into the free list */ 216 if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) { 217 dp_tso_num_seg_free(soc, tx_desc->pool_id, 218 tx_desc->tso_num_desc); 219 tx_desc->tso_num_desc = NULL; 220 DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1); 221 } 222 223 /* Add the tso segment into the free list*/ 224 dp_tx_tso_desc_free(soc, 225 tx_desc->pool_id, tx_desc->tso_desc); 226 tx_desc->tso_desc = NULL; 227 } 228 } 229 #else 230 static void dp_tx_tso_unmap_segment( 231 struct dp_soc *soc, 232 struct qdf_tso_seg_elem_t *seg_desc, 233 struct qdf_tso_num_seg_elem_t *num_seg_desc) 234 235 { 236 } 237 238 static void dp_tx_tso_desc_release(struct dp_soc *soc, 239 struct dp_tx_desc_s *tx_desc) 240 { 241 } 242 #endif 243 /** 244 * dp_tx_desc_release() - Release Tx Descriptor 245 * @tx_desc : Tx Descriptor 246 * @desc_pool_id: Descriptor Pool ID 247 * 248 * Deallocate all resources attached to Tx descriptor and free the Tx 249 * descriptor. 250 * 251 * Return: 252 */ 253 static void 254 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id) 255 { 256 struct dp_pdev *pdev = tx_desc->pdev; 257 struct dp_soc *soc; 258 uint8_t comp_status = 0; 259 260 qdf_assert(pdev); 261 262 soc = pdev->soc; 263 264 if (tx_desc->frm_type == dp_tx_frm_tso) 265 dp_tx_tso_desc_release(soc, tx_desc); 266 267 if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) 268 dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id); 269 270 if (tx_desc->flags & DP_TX_DESC_FLAG_ME) 271 dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer); 272 273 dp_tx_outstanding_dec(pdev); 274 275 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) 276 qdf_atomic_dec(&pdev->num_tx_exception); 277 278 if (HAL_TX_COMP_RELEASE_SOURCE_TQM == 279 hal_tx_comp_get_buffer_source(&tx_desc->comp)) 280 comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp, 281 soc->hal_soc); 282 else 283 comp_status = HAL_TX_COMP_RELEASE_REASON_FW; 284 285 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 286 "Tx Completion Release desc %d status %d outstanding %d", 287 tx_desc->id, comp_status, 288 qdf_atomic_read(&pdev->num_tx_outstanding)); 289 290 dp_tx_desc_free(soc, tx_desc, desc_pool_id); 291 return; 292 } 293 294 /** 295 * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames 296 * @vdev: DP vdev Handle 297 * @nbuf: skb 298 * @msdu_info: msdu_info required to create HTT metadata 299 * 300 * Prepares and fills HTT metadata in the frame pre-header for special frames 301 * that should be transmitted using varying transmit parameters. 302 * There are 2 VDEV modes that currently needs this special metadata - 303 * 1) Mesh Mode 304 * 2) DSRC Mode 305 * 306 * Return: HTT metadata size 307 * 308 */ 309 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 310 struct dp_tx_msdu_info_s *msdu_info) 311 { 312 uint32_t *meta_data = msdu_info->meta_data; 313 struct htt_tx_msdu_desc_ext2_t *desc_ext = 314 (struct htt_tx_msdu_desc_ext2_t *) meta_data; 315 316 uint8_t htt_desc_size; 317 318 /* Size rounded of multiple of 8 bytes */ 319 uint8_t htt_desc_size_aligned; 320 321 uint8_t *hdr = NULL; 322 323 /* 324 * Metadata - HTT MSDU Extension header 325 */ 326 htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t); 327 htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7; 328 329 if (vdev->mesh_vdev || msdu_info->is_tx_sniffer || 330 HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info-> 331 meta_data[0])) { 332 if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < 333 htt_desc_size_aligned)) { 334 nbuf = qdf_nbuf_realloc_headroom(nbuf, 335 htt_desc_size_aligned); 336 if (!nbuf) { 337 /* 338 * qdf_nbuf_realloc_headroom won't do skb_clone 339 * as skb_realloc_headroom does. so, no free is 340 * needed here. 341 */ 342 DP_STATS_INC(vdev, 343 tx_i.dropped.headroom_insufficient, 344 1); 345 qdf_print(" %s[%d] skb_realloc_headroom failed", 346 __func__, __LINE__); 347 return 0; 348 } 349 } 350 /* Fill and add HTT metaheader */ 351 hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned); 352 if (!hdr) { 353 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 354 "Error in filling HTT metadata"); 355 356 return 0; 357 } 358 qdf_mem_copy(hdr, desc_ext, htt_desc_size); 359 360 } else if (vdev->opmode == wlan_op_mode_ocb) { 361 /* Todo - Add support for DSRC */ 362 } 363 364 return htt_desc_size_aligned; 365 } 366 367 /** 368 * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO 369 * @tso_seg: TSO segment to process 370 * @ext_desc: Pointer to MSDU extension descriptor 371 * 372 * Return: void 373 */ 374 #if defined(FEATURE_TSO) 375 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg, 376 void *ext_desc) 377 { 378 uint8_t num_frag; 379 uint32_t tso_flags; 380 381 /* 382 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN), 383 * tcp_flag_mask 384 * 385 * Checksum enable flags are set in TCL descriptor and not in Extension 386 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor) 387 */ 388 tso_flags = *(uint32_t *) &tso_seg->tso_flags; 389 390 hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags); 391 392 hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len, 393 tso_seg->tso_flags.ip_len); 394 395 hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num); 396 hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id); 397 398 399 for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) { 400 uint32_t lo = 0; 401 uint32_t hi = 0; 402 403 qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) && 404 (tso_seg->tso_frags[num_frag].length)); 405 406 qdf_dmaaddr_to_32s( 407 tso_seg->tso_frags[num_frag].paddr, &lo, &hi); 408 hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi, 409 tso_seg->tso_frags[num_frag].length); 410 } 411 412 return; 413 } 414 #else 415 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg, 416 void *ext_desc) 417 { 418 return; 419 } 420 #endif 421 422 #if defined(FEATURE_TSO) 423 /** 424 * dp_tx_free_tso_seg_list() - Loop through the tso segments 425 * allocated and free them 426 * 427 * @soc: soc handle 428 * @free_seg: list of tso segments 429 * @msdu_info: msdu descriptor 430 * 431 * Return - void 432 */ 433 static void dp_tx_free_tso_seg_list( 434 struct dp_soc *soc, 435 struct qdf_tso_seg_elem_t *free_seg, 436 struct dp_tx_msdu_info_s *msdu_info) 437 { 438 struct qdf_tso_seg_elem_t *next_seg; 439 440 while (free_seg) { 441 next_seg = free_seg->next; 442 dp_tx_tso_desc_free(soc, 443 msdu_info->tx_queue.desc_pool_id, 444 free_seg); 445 free_seg = next_seg; 446 } 447 } 448 449 /** 450 * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments 451 * allocated and free them 452 * 453 * @soc: soc handle 454 * @free_num_seg: list of tso number segments 455 * @msdu_info: msdu descriptor 456 * Return - void 457 */ 458 static void dp_tx_free_tso_num_seg_list( 459 struct dp_soc *soc, 460 struct qdf_tso_num_seg_elem_t *free_num_seg, 461 struct dp_tx_msdu_info_s *msdu_info) 462 { 463 struct qdf_tso_num_seg_elem_t *next_num_seg; 464 465 while (free_num_seg) { 466 next_num_seg = free_num_seg->next; 467 dp_tso_num_seg_free(soc, 468 msdu_info->tx_queue.desc_pool_id, 469 free_num_seg); 470 free_num_seg = next_num_seg; 471 } 472 } 473 474 /** 475 * dp_tx_unmap_tso_seg_list() - Loop through the tso segments 476 * do dma unmap for each segment 477 * 478 * @soc: soc handle 479 * @free_seg: list of tso segments 480 * @num_seg_desc: tso number segment descriptor 481 * 482 * Return - void 483 */ 484 static void dp_tx_unmap_tso_seg_list( 485 struct dp_soc *soc, 486 struct qdf_tso_seg_elem_t *free_seg, 487 struct qdf_tso_num_seg_elem_t *num_seg_desc) 488 { 489 struct qdf_tso_seg_elem_t *next_seg; 490 491 if (qdf_unlikely(!num_seg_desc)) { 492 DP_TRACE(ERROR, "TSO number seg desc is NULL!"); 493 return; 494 } 495 496 while (free_seg) { 497 next_seg = free_seg->next; 498 dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc); 499 free_seg = next_seg; 500 } 501 } 502 503 #ifdef FEATURE_TSO_STATS 504 /** 505 * dp_tso_get_stats_idx: Retrieve the tso packet id 506 * @pdev - pdev handle 507 * 508 * Return: id 509 */ 510 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev) 511 { 512 uint32_t stats_idx; 513 514 stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx)) 515 % CDP_MAX_TSO_PACKETS); 516 return stats_idx; 517 } 518 #else 519 static int dp_tso_get_stats_idx(struct dp_pdev *pdev) 520 { 521 return 0; 522 } 523 #endif /* FEATURE_TSO_STATS */ 524 525 /** 526 * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any, 527 * free the tso segments descriptor and 528 * tso num segments descriptor 529 * 530 * @soc: soc handle 531 * @msdu_info: msdu descriptor 532 * @tso_seg_unmap: flag to show if dma unmap is necessary 533 * 534 * Return - void 535 */ 536 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc, 537 struct dp_tx_msdu_info_s *msdu_info, 538 bool tso_seg_unmap) 539 { 540 struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info; 541 struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list; 542 struct qdf_tso_num_seg_elem_t *tso_num_desc = 543 tso_info->tso_num_seg_list; 544 545 /* do dma unmap for each segment */ 546 if (tso_seg_unmap) 547 dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc); 548 549 /* free all tso number segment descriptor though looks only have 1 */ 550 dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info); 551 552 /* free all tso segment descriptor */ 553 dp_tx_free_tso_seg_list(soc, free_seg, msdu_info); 554 } 555 556 /** 557 * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info 558 * @vdev: virtual device handle 559 * @msdu: network buffer 560 * @msdu_info: meta data associated with the msdu 561 * 562 * Return: QDF_STATUS_SUCCESS success 563 */ 564 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev, 565 qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info) 566 { 567 struct qdf_tso_seg_elem_t *tso_seg; 568 int num_seg = qdf_nbuf_get_tso_num_seg(msdu); 569 struct dp_soc *soc = vdev->pdev->soc; 570 struct dp_pdev *pdev = vdev->pdev; 571 struct qdf_tso_info_t *tso_info; 572 struct qdf_tso_num_seg_elem_t *tso_num_seg; 573 tso_info = &msdu_info->u.tso_info; 574 tso_info->curr_seg = NULL; 575 tso_info->tso_seg_list = NULL; 576 tso_info->num_segs = num_seg; 577 msdu_info->frm_type = dp_tx_frm_tso; 578 tso_info->tso_num_seg_list = NULL; 579 580 TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg); 581 582 while (num_seg) { 583 tso_seg = dp_tx_tso_desc_alloc( 584 soc, msdu_info->tx_queue.desc_pool_id); 585 if (tso_seg) { 586 tso_seg->next = tso_info->tso_seg_list; 587 tso_info->tso_seg_list = tso_seg; 588 num_seg--; 589 } else { 590 dp_err_rl("Failed to alloc tso seg desc"); 591 DP_STATS_INC_PKT(vdev->pdev, 592 tso_stats.tso_no_mem_dropped, 1, 593 qdf_nbuf_len(msdu)); 594 dp_tx_free_remaining_tso_desc(soc, msdu_info, false); 595 596 return QDF_STATUS_E_NOMEM; 597 } 598 } 599 600 TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg); 601 602 tso_num_seg = dp_tso_num_seg_alloc(soc, 603 msdu_info->tx_queue.desc_pool_id); 604 605 if (tso_num_seg) { 606 tso_num_seg->next = tso_info->tso_num_seg_list; 607 tso_info->tso_num_seg_list = tso_num_seg; 608 } else { 609 DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc", 610 __func__); 611 dp_tx_free_remaining_tso_desc(soc, msdu_info, false); 612 613 return QDF_STATUS_E_NOMEM; 614 } 615 616 msdu_info->num_seg = 617 qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info); 618 619 TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__, 620 msdu_info->num_seg); 621 622 if (!(msdu_info->num_seg)) { 623 /* 624 * Free allocated TSO seg desc and number seg desc, 625 * do unmap for segments if dma map has done. 626 */ 627 DP_TRACE(ERROR, "%s: Failed to get tso info", __func__); 628 dp_tx_free_remaining_tso_desc(soc, msdu_info, true); 629 630 return QDF_STATUS_E_INVAL; 631 } 632 633 tso_info->curr_seg = tso_info->tso_seg_list; 634 635 tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev); 636 dp_tso_packet_update(pdev, tso_info->msdu_stats_idx, 637 msdu, msdu_info->num_seg); 638 dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list, 639 tso_info->msdu_stats_idx); 640 dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg); 641 return QDF_STATUS_SUCCESS; 642 } 643 #else 644 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev, 645 qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info) 646 { 647 return QDF_STATUS_E_NOMEM; 648 } 649 #endif 650 651 /** 652 * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor 653 * @vdev: DP Vdev handle 654 * @msdu_info: MSDU info to be setup in MSDU extension descriptor 655 * @desc_pool_id: Descriptor Pool ID 656 * 657 * Return: 658 */ 659 static 660 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev, 661 struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id) 662 { 663 uint8_t i; 664 uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA]; 665 struct dp_tx_seg_info_s *seg_info; 666 struct dp_tx_ext_desc_elem_s *msdu_ext_desc; 667 struct dp_soc *soc = vdev->pdev->soc; 668 669 /* Allocate an extension descriptor */ 670 msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id); 671 qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA); 672 673 if (!msdu_ext_desc) { 674 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 675 return NULL; 676 } 677 678 if (msdu_info->exception_fw && 679 qdf_unlikely(vdev->mesh_vdev)) { 680 qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES], 681 &msdu_info->meta_data[0], 682 sizeof(struct htt_tx_msdu_desc_ext2_t)); 683 qdf_atomic_inc(&vdev->pdev->num_tx_exception); 684 } 685 686 switch (msdu_info->frm_type) { 687 case dp_tx_frm_sg: 688 case dp_tx_frm_me: 689 case dp_tx_frm_raw: 690 seg_info = msdu_info->u.sg_info.curr_seg; 691 /* Update the buffer pointers in MSDU Extension Descriptor */ 692 for (i = 0; i < seg_info->frag_cnt; i++) { 693 hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i, 694 seg_info->frags[i].paddr_lo, 695 seg_info->frags[i].paddr_hi, 696 seg_info->frags[i].len); 697 } 698 699 break; 700 701 case dp_tx_frm_tso: 702 dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg, 703 &cached_ext_desc[0]); 704 break; 705 706 707 default: 708 break; 709 } 710 711 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 712 cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA); 713 714 hal_tx_ext_desc_sync(&cached_ext_desc[0], 715 msdu_ext_desc->vaddr); 716 717 return msdu_ext_desc; 718 } 719 720 /** 721 * dp_tx_trace_pkt() - Trace TX packet at DP layer 722 * 723 * @skb: skb to be traced 724 * @msdu_id: msdu_id of the packet 725 * @vdev_id: vdev_id of the packet 726 * 727 * Return: None 728 */ 729 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id, 730 uint8_t vdev_id) 731 { 732 QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK; 733 QDF_NBUF_CB_TX_DP_TRACE(skb) = 1; 734 DPTRACE(qdf_dp_trace_ptr(skb, 735 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD, 736 QDF_TRACE_DEFAULT_PDEV_ID, 737 qdf_nbuf_data_addr(skb), 738 sizeof(qdf_nbuf_data(skb)), 739 msdu_id, vdev_id)); 740 741 qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID); 742 743 DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID, 744 QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD, 745 msdu_id, QDF_TX)); 746 } 747 748 /** 749 * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor 750 * @vdev: DP vdev handle 751 * @nbuf: skb 752 * @desc_pool_id: Descriptor pool ID 753 * @meta_data: Metadata to the fw 754 * @tx_exc_metadata: Handle that holds exception path metadata 755 * Allocate and prepare Tx descriptor with msdu information. 756 * 757 * Return: Pointer to Tx Descriptor on success, 758 * NULL on failure 759 */ 760 static 761 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev, 762 qdf_nbuf_t nbuf, uint8_t desc_pool_id, 763 struct dp_tx_msdu_info_s *msdu_info, 764 struct cdp_tx_exception_metadata *tx_exc_metadata) 765 { 766 uint8_t align_pad; 767 uint8_t is_exception = 0; 768 uint8_t htt_hdr_size; 769 qdf_ether_header_t *eh; 770 struct dp_tx_desc_s *tx_desc; 771 struct dp_pdev *pdev = vdev->pdev; 772 struct dp_soc *soc = pdev->soc; 773 774 if (dp_tx_limit_check(vdev)) 775 return NULL; 776 777 /* Allocate software Tx descriptor */ 778 tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); 779 if (qdf_unlikely(!tx_desc)) { 780 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 781 return NULL; 782 } 783 784 dp_tx_outstanding_inc(pdev); 785 786 /* Initialize the SW tx descriptor */ 787 tx_desc->nbuf = nbuf; 788 tx_desc->frm_type = dp_tx_frm_std; 789 tx_desc->tx_encap_type = ((tx_exc_metadata && 790 (tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ? 791 tx_exc_metadata->tx_encap_type : vdev->tx_encap_type); 792 tx_desc->vdev = vdev; 793 tx_desc->pdev = pdev; 794 tx_desc->msdu_ext_desc = NULL; 795 tx_desc->pkt_offset = 0; 796 797 dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id); 798 799 if (qdf_unlikely(vdev->multipass_en)) { 800 if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info)) 801 goto failure; 802 } 803 804 /* 805 * For special modes (vdev_type == ocb or mesh), data frames should be 806 * transmitted using varying transmit parameters (tx spec) which include 807 * transmit rate, power, priority, channel, channel bandwidth , nss etc. 808 * These are filled in HTT MSDU descriptor and sent in frame pre-header. 809 * These frames are sent as exception packets to firmware. 810 * 811 * HW requirement is that metadata should always point to a 812 * 8-byte aligned address. So we add alignment pad to start of buffer. 813 * HTT Metadata should be ensured to be multiple of 8-bytes, 814 * to get 8-byte aligned start address along with align_pad added 815 * 816 * |-----------------------------| 817 * | | 818 * |-----------------------------| <-----Buffer Pointer Address given 819 * | | ^ in HW descriptor (aligned) 820 * | HTT Metadata | | 821 * | | | 822 * | | | Packet Offset given in descriptor 823 * | | | 824 * |-----------------------------| | 825 * | Alignment Pad | v 826 * |-----------------------------| <----- Actual buffer start address 827 * | SKB Data | (Unaligned) 828 * | | 829 * | | 830 * | | 831 * | | 832 * | | 833 * |-----------------------------| 834 */ 835 if (qdf_unlikely((msdu_info->exception_fw)) || 836 (vdev->opmode == wlan_op_mode_ocb) || 837 (tx_exc_metadata && 838 tx_exc_metadata->is_tx_sniffer)) { 839 align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7; 840 841 if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) { 842 DP_STATS_INC(vdev, 843 tx_i.dropped.headroom_insufficient, 1); 844 goto failure; 845 } 846 847 if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) { 848 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 849 "qdf_nbuf_push_head failed"); 850 goto failure; 851 } 852 853 htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf, 854 msdu_info); 855 if (htt_hdr_size == 0) 856 goto failure; 857 tx_desc->pkt_offset = align_pad + htt_hdr_size; 858 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 859 is_exception = 1; 860 } 861 862 if (qdf_unlikely(QDF_STATUS_SUCCESS != 863 qdf_nbuf_map(soc->osdev, nbuf, 864 QDF_DMA_TO_DEVICE))) { 865 /* Handle failure */ 866 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 867 "qdf_nbuf_map failed"); 868 DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1); 869 goto failure; 870 } 871 872 if (qdf_unlikely(vdev->nawds_enabled)) { 873 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 874 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) { 875 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 876 is_exception = 1; 877 } 878 } 879 880 #if !TQM_BYPASS_WAR 881 if (is_exception || tx_exc_metadata) 882 #endif 883 { 884 /* Temporary WAR due to TQM VP issues */ 885 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 886 qdf_atomic_inc(&pdev->num_tx_exception); 887 } 888 889 return tx_desc; 890 891 failure: 892 dp_tx_desc_release(tx_desc, desc_pool_id); 893 return NULL; 894 } 895 896 /** 897 * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame 898 * @vdev: DP vdev handle 899 * @nbuf: skb 900 * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor 901 * @desc_pool_id : Descriptor Pool ID 902 * 903 * Allocate and prepare Tx descriptor with msdu and fragment descritor 904 * information. For frames wth fragments, allocate and prepare 905 * an MSDU extension descriptor 906 * 907 * Return: Pointer to Tx Descriptor on success, 908 * NULL on failure 909 */ 910 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev, 911 qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info, 912 uint8_t desc_pool_id) 913 { 914 struct dp_tx_desc_s *tx_desc; 915 struct dp_tx_ext_desc_elem_s *msdu_ext_desc; 916 struct dp_pdev *pdev = vdev->pdev; 917 struct dp_soc *soc = pdev->soc; 918 919 if (dp_tx_limit_check(vdev)) 920 return NULL; 921 922 /* Allocate software Tx descriptor */ 923 tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); 924 if (!tx_desc) { 925 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 926 return NULL; 927 } 928 929 dp_tx_outstanding_inc(pdev); 930 931 /* Initialize the SW tx descriptor */ 932 tx_desc->nbuf = nbuf; 933 tx_desc->frm_type = msdu_info->frm_type; 934 tx_desc->tx_encap_type = vdev->tx_encap_type; 935 tx_desc->vdev = vdev; 936 tx_desc->pdev = pdev; 937 tx_desc->pkt_offset = 0; 938 tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg; 939 tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list; 940 941 dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id); 942 943 /* Handle scattered frames - TSO/SG/ME */ 944 /* Allocate and prepare an extension descriptor for scattered frames */ 945 msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id); 946 if (!msdu_ext_desc) { 947 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 948 "%s Tx Extension Descriptor Alloc Fail", 949 __func__); 950 goto failure; 951 } 952 953 #if TQM_BYPASS_WAR 954 /* Temporary WAR due to TQM VP issues */ 955 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 956 qdf_atomic_inc(&pdev->num_tx_exception); 957 #endif 958 if (qdf_unlikely(msdu_info->exception_fw)) 959 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 960 961 tx_desc->msdu_ext_desc = msdu_ext_desc; 962 tx_desc->flags |= DP_TX_DESC_FLAG_FRAG; 963 964 return tx_desc; 965 failure: 966 dp_tx_desc_release(tx_desc, desc_pool_id); 967 return NULL; 968 } 969 970 /** 971 * dp_tx_prepare_raw() - Prepare RAW packet TX 972 * @vdev: DP vdev handle 973 * @nbuf: buffer pointer 974 * @seg_info: Pointer to Segment info Descriptor to be prepared 975 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension 976 * descriptor 977 * 978 * Return: 979 */ 980 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 981 struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info) 982 { 983 qdf_nbuf_t curr_nbuf = NULL; 984 uint16_t total_len = 0; 985 qdf_dma_addr_t paddr; 986 int32_t i; 987 int32_t mapped_buf_num = 0; 988 989 struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info; 990 qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data; 991 992 DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf)); 993 994 /* Continue only if frames are of DATA type */ 995 if (!DP_FRAME_IS_DATA(qos_wh)) { 996 DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1); 997 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 998 "Pkt. recd is of not data type"); 999 goto error; 1000 } 1001 /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */ 1002 if (vdev->raw_mode_war && 1003 (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) && 1004 (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU)) 1005 qos_wh->i_fc[1] |= IEEE80211_FC1_WEP; 1006 1007 for (curr_nbuf = nbuf, i = 0; curr_nbuf; 1008 curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) { 1009 1010 if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf, 1011 QDF_DMA_TO_DEVICE)) { 1012 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1013 "%s dma map error ", __func__); 1014 DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1); 1015 mapped_buf_num = i; 1016 goto error; 1017 } 1018 1019 paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0); 1020 seg_info->frags[i].paddr_lo = paddr; 1021 seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32); 1022 seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf); 1023 seg_info->frags[i].vaddr = (void *) curr_nbuf; 1024 total_len += qdf_nbuf_len(curr_nbuf); 1025 } 1026 1027 seg_info->frag_cnt = i; 1028 seg_info->total_len = total_len; 1029 seg_info->next = NULL; 1030 1031 sg_info->curr_seg = seg_info; 1032 1033 msdu_info->frm_type = dp_tx_frm_raw; 1034 msdu_info->num_seg = 1; 1035 1036 return nbuf; 1037 1038 error: 1039 i = 0; 1040 while (nbuf) { 1041 curr_nbuf = nbuf; 1042 if (i < mapped_buf_num) { 1043 qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE); 1044 i++; 1045 } 1046 nbuf = qdf_nbuf_next(nbuf); 1047 qdf_nbuf_free(curr_nbuf); 1048 } 1049 return NULL; 1050 1051 } 1052 1053 /** 1054 * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame. 1055 * @soc: DP soc handle 1056 * @nbuf: Buffer pointer 1057 * 1058 * unmap the chain of nbufs that belong to this RAW frame. 1059 * 1060 * Return: None 1061 */ 1062 static void dp_tx_raw_prepare_unset(struct dp_soc *soc, 1063 qdf_nbuf_t nbuf) 1064 { 1065 qdf_nbuf_t cur_nbuf = nbuf; 1066 1067 do { 1068 qdf_nbuf_unmap(soc->osdev, cur_nbuf, QDF_DMA_TO_DEVICE); 1069 cur_nbuf = qdf_nbuf_next(cur_nbuf); 1070 } while (cur_nbuf); 1071 } 1072 1073 /** 1074 * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit 1075 * @soc: DP Soc Handle 1076 * @vdev: DP vdev handle 1077 * @tx_desc: Tx Descriptor Handle 1078 * @tid: TID from HLOS for overriding default DSCP-TID mapping 1079 * @fw_metadata: Metadata to send to Target Firmware along with frame 1080 * @ring_id: Ring ID of H/W ring to which we enqueue the packet 1081 * @tx_exc_metadata: Handle that holds exception path meta data 1082 * 1083 * Gets the next free TCL HW DMA descriptor and sets up required parameters 1084 * from software Tx descriptor 1085 * 1086 * Return: 1087 */ 1088 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev, 1089 struct dp_tx_desc_s *tx_desc, uint8_t tid, 1090 uint16_t fw_metadata, uint8_t ring_id, 1091 struct cdp_tx_exception_metadata 1092 *tx_exc_metadata) 1093 { 1094 uint8_t type; 1095 uint16_t length; 1096 void *hal_tx_desc, *hal_tx_desc_cached; 1097 qdf_dma_addr_t dma_addr; 1098 uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES]; 1099 1100 enum cdp_sec_type sec_type = ((tx_exc_metadata && 1101 tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ? 1102 tx_exc_metadata->sec_type : vdev->sec_type); 1103 1104 /* Return Buffer Manager ID */ 1105 uint8_t bm_id = ring_id; 1106 hal_ring_handle_t hal_ring_hdl = soc->tcl_data_ring[ring_id].hal_srng; 1107 1108 hal_tx_desc_cached = (void *) cached_desc; 1109 qdf_mem_zero(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES); 1110 1111 if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) { 1112 length = HAL_TX_EXT_DESC_WITH_META_DATA; 1113 type = HAL_TX_BUF_TYPE_EXT_DESC; 1114 dma_addr = tx_desc->msdu_ext_desc->paddr; 1115 } else { 1116 length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset; 1117 type = HAL_TX_BUF_TYPE_BUFFER; 1118 dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf); 1119 } 1120 1121 qdf_assert_always(dma_addr); 1122 1123 hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata); 1124 hal_tx_desc_set_buf_addr(hal_tx_desc_cached, 1125 dma_addr, bm_id, tx_desc->id, 1126 type, soc->hal_soc); 1127 1128 if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) 1129 return QDF_STATUS_E_RESOURCES; 1130 1131 hal_tx_desc_set_buf_length(hal_tx_desc_cached, length); 1132 hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset); 1133 hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type); 1134 hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached, 1135 vdev->pdev->lmac_id); 1136 hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached, 1137 vdev->search_type); 1138 hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached, 1139 vdev->bss_ast_idx); 1140 hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached, 1141 vdev->dscp_tid_map_id); 1142 hal_tx_desc_set_encrypt_type(hal_tx_desc_cached, 1143 sec_type_map[sec_type]); 1144 hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached, 1145 (vdev->bss_ast_hash & 0xF)); 1146 1147 dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u", 1148 length, type, (uint64_t)dma_addr, 1149 tx_desc->pkt_offset, tx_desc->id); 1150 1151 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) 1152 hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1); 1153 1154 hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached, 1155 vdev->hal_desc_addr_search_flags); 1156 1157 /* verify checksum offload configuration*/ 1158 if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) && 1159 ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP) 1160 || qdf_nbuf_is_tso(tx_desc->nbuf))) { 1161 hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1); 1162 hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1); 1163 } 1164 1165 if (tid != HTT_TX_EXT_TID_INVALID) 1166 hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid); 1167 1168 if (tx_desc->flags & DP_TX_DESC_FLAG_MESH) 1169 hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1); 1170 1171 1172 tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_get()); 1173 /* Sync cached descriptor with HW */ 1174 hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl); 1175 1176 if (!hal_tx_desc) { 1177 dp_verbose_debug("TCL ring full ring_id:%d", ring_id); 1178 DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1); 1179 DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1); 1180 return QDF_STATUS_E_RESOURCES; 1181 } 1182 1183 tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX; 1184 1185 hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc); 1186 DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length); 1187 1188 return QDF_STATUS_SUCCESS; 1189 } 1190 1191 1192 /** 1193 * dp_cce_classify() - Classify the frame based on CCE rules 1194 * @vdev: DP vdev handle 1195 * @nbuf: skb 1196 * 1197 * Classify frames based on CCE rules 1198 * Return: bool( true if classified, 1199 * else false) 1200 */ 1201 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 1202 { 1203 qdf_ether_header_t *eh = NULL; 1204 uint16_t ether_type; 1205 qdf_llc_t *llcHdr; 1206 qdf_nbuf_t nbuf_clone = NULL; 1207 qdf_dot3_qosframe_t *qos_wh = NULL; 1208 1209 /* for mesh packets don't do any classification */ 1210 if (qdf_unlikely(vdev->mesh_vdev)) 1211 return false; 1212 1213 if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 1214 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1215 ether_type = eh->ether_type; 1216 llcHdr = (qdf_llc_t *)(nbuf->data + 1217 sizeof(qdf_ether_header_t)); 1218 } else { 1219 qos_wh = (qdf_dot3_qosframe_t *) nbuf->data; 1220 /* For encrypted packets don't do any classification */ 1221 if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP)) 1222 return false; 1223 1224 if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) { 1225 if (qdf_unlikely( 1226 qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS && 1227 qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) { 1228 1229 ether_type = *(uint16_t *)(nbuf->data 1230 + QDF_IEEE80211_4ADDR_HDR_LEN 1231 + sizeof(qdf_llc_t) 1232 - sizeof(ether_type)); 1233 llcHdr = (qdf_llc_t *)(nbuf->data + 1234 QDF_IEEE80211_4ADDR_HDR_LEN); 1235 } else { 1236 ether_type = *(uint16_t *)(nbuf->data 1237 + QDF_IEEE80211_3ADDR_HDR_LEN 1238 + sizeof(qdf_llc_t) 1239 - sizeof(ether_type)); 1240 llcHdr = (qdf_llc_t *)(nbuf->data + 1241 QDF_IEEE80211_3ADDR_HDR_LEN); 1242 } 1243 1244 if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr) 1245 && (ether_type == 1246 qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) { 1247 1248 DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1); 1249 return true; 1250 } 1251 } 1252 1253 return false; 1254 } 1255 1256 if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) { 1257 ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE + 1258 sizeof(*llcHdr)); 1259 nbuf_clone = qdf_nbuf_clone(nbuf); 1260 if (qdf_unlikely(nbuf_clone)) { 1261 qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr)); 1262 1263 if (ether_type == htons(ETHERTYPE_VLAN)) { 1264 qdf_nbuf_pull_head(nbuf_clone, 1265 sizeof(qdf_net_vlanhdr_t)); 1266 } 1267 } 1268 } else { 1269 if (ether_type == htons(ETHERTYPE_VLAN)) { 1270 nbuf_clone = qdf_nbuf_clone(nbuf); 1271 if (qdf_unlikely(nbuf_clone)) { 1272 qdf_nbuf_pull_head(nbuf_clone, 1273 sizeof(qdf_net_vlanhdr_t)); 1274 } 1275 } 1276 } 1277 1278 if (qdf_unlikely(nbuf_clone)) 1279 nbuf = nbuf_clone; 1280 1281 1282 if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) 1283 || qdf_nbuf_is_ipv4_arp_pkt(nbuf) 1284 || qdf_nbuf_is_ipv4_wapi_pkt(nbuf) 1285 || qdf_nbuf_is_ipv4_tdls_pkt(nbuf) 1286 || (qdf_nbuf_is_ipv4_pkt(nbuf) 1287 && qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) 1288 || (qdf_nbuf_is_ipv6_pkt(nbuf) && 1289 qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) { 1290 if (qdf_unlikely(nbuf_clone)) 1291 qdf_nbuf_free(nbuf_clone); 1292 return true; 1293 } 1294 1295 if (qdf_unlikely(nbuf_clone)) 1296 qdf_nbuf_free(nbuf_clone); 1297 1298 return false; 1299 } 1300 1301 /** 1302 * dp_tx_get_tid() - Obtain TID to be used for this frame 1303 * @vdev: DP vdev handle 1304 * @nbuf: skb 1305 * 1306 * Extract the DSCP or PCP information from frame and map into TID value. 1307 * 1308 * Return: void 1309 */ 1310 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1311 struct dp_tx_msdu_info_s *msdu_info) 1312 { 1313 uint8_t tos = 0, dscp_tid_override = 0; 1314 uint8_t *hdr_ptr, *L3datap; 1315 uint8_t is_mcast = 0; 1316 qdf_ether_header_t *eh = NULL; 1317 qdf_ethervlan_header_t *evh = NULL; 1318 uint16_t ether_type; 1319 qdf_llc_t *llcHdr; 1320 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; 1321 1322 DP_TX_TID_OVERRIDE(msdu_info, nbuf); 1323 if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 1324 eh = (qdf_ether_header_t *)nbuf->data; 1325 hdr_ptr = eh->ether_dhost; 1326 L3datap = hdr_ptr + sizeof(qdf_ether_header_t); 1327 } else { 1328 qdf_dot3_qosframe_t *qos_wh = 1329 (qdf_dot3_qosframe_t *) nbuf->data; 1330 msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ? 1331 qos_wh->i_qos[0] & DP_QOS_TID : 0; 1332 return; 1333 } 1334 1335 is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr); 1336 ether_type = eh->ether_type; 1337 1338 llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t)); 1339 /* 1340 * Check if packet is dot3 or eth2 type. 1341 */ 1342 if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) { 1343 ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE + 1344 sizeof(*llcHdr)); 1345 1346 if (ether_type == htons(ETHERTYPE_VLAN)) { 1347 L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) + 1348 sizeof(*llcHdr); 1349 ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE 1350 + sizeof(*llcHdr) + 1351 sizeof(qdf_net_vlanhdr_t)); 1352 } else { 1353 L3datap = hdr_ptr + sizeof(qdf_ether_header_t) + 1354 sizeof(*llcHdr); 1355 } 1356 } else { 1357 if (ether_type == htons(ETHERTYPE_VLAN)) { 1358 evh = (qdf_ethervlan_header_t *) eh; 1359 ether_type = evh->ether_type; 1360 L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t); 1361 } 1362 } 1363 1364 /* 1365 * Find priority from IP TOS DSCP field 1366 */ 1367 if (qdf_nbuf_is_ipv4_pkt(nbuf)) { 1368 qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap; 1369 if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) { 1370 /* Only for unicast frames */ 1371 if (!is_mcast) { 1372 /* send it on VO queue */ 1373 msdu_info->tid = DP_VO_TID; 1374 } 1375 } else { 1376 /* 1377 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7 1378 * from TOS byte. 1379 */ 1380 tos = ip->ip_tos; 1381 dscp_tid_override = 1; 1382 1383 } 1384 } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) { 1385 /* TODO 1386 * use flowlabel 1387 *igmpmld cases to be handled in phase 2 1388 */ 1389 unsigned long ver_pri_flowlabel; 1390 unsigned long pri; 1391 ver_pri_flowlabel = *(unsigned long *) L3datap; 1392 pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >> 1393 DP_IPV6_PRIORITY_SHIFT; 1394 tos = pri; 1395 dscp_tid_override = 1; 1396 } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) 1397 msdu_info->tid = DP_VO_TID; 1398 else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) { 1399 /* Only for unicast frames */ 1400 if (!is_mcast) { 1401 /* send ucast arp on VO queue */ 1402 msdu_info->tid = DP_VO_TID; 1403 } 1404 } 1405 1406 /* 1407 * Assign all MCAST packets to BE 1408 */ 1409 if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 1410 if (is_mcast) { 1411 tos = 0; 1412 dscp_tid_override = 1; 1413 } 1414 } 1415 1416 if (dscp_tid_override == 1) { 1417 tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK; 1418 msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos]; 1419 } 1420 1421 if (msdu_info->tid >= CDP_MAX_DATA_TIDS) 1422 msdu_info->tid = CDP_MAX_DATA_TIDS - 1; 1423 1424 return; 1425 } 1426 1427 /** 1428 * dp_tx_classify_tid() - Obtain TID to be used for this frame 1429 * @vdev: DP vdev handle 1430 * @nbuf: skb 1431 * 1432 * Software based TID classification is required when more than 2 DSCP-TID 1433 * mapping tables are needed. 1434 * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2. 1435 * 1436 * Return: void 1437 */ 1438 static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1439 struct dp_tx_msdu_info_s *msdu_info) 1440 { 1441 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; 1442 1443 DP_TX_TID_OVERRIDE(msdu_info, nbuf); 1444 1445 if (pdev->soc && vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map) 1446 return; 1447 1448 /* for mesh packets don't do any classification */ 1449 if (qdf_unlikely(vdev->mesh_vdev)) 1450 return; 1451 1452 dp_tx_get_tid(vdev, nbuf, msdu_info); 1453 } 1454 1455 #ifdef FEATURE_WLAN_TDLS 1456 /** 1457 * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame 1458 * @tx_desc: TX descriptor 1459 * 1460 * Return: None 1461 */ 1462 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc) 1463 { 1464 if (tx_desc->vdev) { 1465 if (tx_desc->vdev->is_tdls_frame) { 1466 tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME; 1467 tx_desc->vdev->is_tdls_frame = false; 1468 } 1469 } 1470 } 1471 1472 /** 1473 * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer 1474 * @tx_desc: TX descriptor 1475 * @vdev: datapath vdev handle 1476 * 1477 * Return: None 1478 */ 1479 static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc, 1480 struct dp_vdev *vdev) 1481 { 1482 struct hal_tx_completion_status ts = {0}; 1483 qdf_nbuf_t nbuf = tx_desc->nbuf; 1484 1485 if (qdf_unlikely(!vdev)) { 1486 dp_err("vdev is null!"); 1487 return; 1488 } 1489 1490 hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc); 1491 if (vdev->tx_non_std_data_callback.func) { 1492 qdf_nbuf_set_next(tx_desc->nbuf, NULL); 1493 vdev->tx_non_std_data_callback.func( 1494 vdev->tx_non_std_data_callback.ctxt, 1495 nbuf, ts.status); 1496 return; 1497 } 1498 } 1499 #else 1500 static inline void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc) 1501 { 1502 } 1503 1504 static inline void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc, 1505 struct dp_vdev *vdev) 1506 { 1507 } 1508 #endif 1509 1510 /** 1511 * dp_tx_frame_is_drop() - checks if the packet is loopback 1512 * @vdev: DP vdev handle 1513 * @nbuf: skb 1514 * 1515 * Return: 1 if frame needs to be dropped else 0 1516 */ 1517 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac) 1518 { 1519 struct dp_pdev *pdev = NULL; 1520 struct dp_ast_entry *src_ast_entry = NULL; 1521 struct dp_ast_entry *dst_ast_entry = NULL; 1522 struct dp_soc *soc = NULL; 1523 1524 qdf_assert(vdev); 1525 pdev = vdev->pdev; 1526 qdf_assert(pdev); 1527 soc = pdev->soc; 1528 1529 dst_ast_entry = dp_peer_ast_hash_find_by_pdevid 1530 (soc, dstmac, vdev->pdev->pdev_id); 1531 1532 src_ast_entry = dp_peer_ast_hash_find_by_pdevid 1533 (soc, srcmac, vdev->pdev->pdev_id); 1534 if (dst_ast_entry && src_ast_entry) { 1535 if (dst_ast_entry->peer->peer_ids[0] == 1536 src_ast_entry->peer->peer_ids[0]) 1537 return 1; 1538 } 1539 1540 return 0; 1541 } 1542 1543 /** 1544 * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL 1545 * @vdev: DP vdev handle 1546 * @nbuf: skb 1547 * @tid: TID from HLOS for overriding default DSCP-TID mapping 1548 * @meta_data: Metadata to the fw 1549 * @tx_q: Tx queue to be used for this Tx frame 1550 * @peer_id: peer_id of the peer in case of NAWDS frames 1551 * @tx_exc_metadata: Handle that holds exception path metadata 1552 * 1553 * Return: NULL on success, 1554 * nbuf when it fails to send 1555 */ 1556 qdf_nbuf_t 1557 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1558 struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id, 1559 struct cdp_tx_exception_metadata *tx_exc_metadata) 1560 { 1561 struct dp_pdev *pdev = vdev->pdev; 1562 struct dp_soc *soc = pdev->soc; 1563 struct dp_tx_desc_s *tx_desc; 1564 QDF_STATUS status; 1565 struct dp_tx_queue *tx_q = &(msdu_info->tx_queue); 1566 hal_ring_handle_t hal_ring_hdl = 1567 soc->tcl_data_ring[tx_q->ring_id].hal_srng; 1568 uint16_t htt_tcl_metadata = 0; 1569 uint8_t tid = msdu_info->tid; 1570 struct cdp_tid_tx_stats *tid_stats = NULL; 1571 1572 /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */ 1573 tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id, 1574 msdu_info, tx_exc_metadata); 1575 if (!tx_desc) { 1576 dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d", 1577 vdev, tx_q->desc_pool_id); 1578 dp_tx_get_tid(vdev, nbuf, msdu_info); 1579 tid_stats = &pdev->stats.tid_stats. 1580 tid_tx_stats[tx_q->ring_id][msdu_info->tid]; 1581 tid_stats->swdrop_cnt[TX_DESC_ERR]++; 1582 return nbuf; 1583 } 1584 1585 if (qdf_unlikely(soc->cce_disable)) { 1586 if (dp_cce_classify(vdev, nbuf) == true) { 1587 DP_STATS_INC(vdev, tx_i.cce_classified, 1); 1588 tid = DP_VO_TID; 1589 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 1590 } 1591 } 1592 1593 dp_tx_update_tdls_flags(tx_desc); 1594 1595 if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_ring_hdl))) { 1596 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1597 "%s %d : HAL RING Access Failed -- %pK", 1598 __func__, __LINE__, hal_ring_hdl); 1599 dp_tx_get_tid(vdev, nbuf, msdu_info); 1600 tid_stats = &pdev->stats.tid_stats. 1601 tid_tx_stats[tx_q->ring_id][tid]; 1602 tid_stats->swdrop_cnt[TX_HAL_RING_ACCESS_ERR]++; 1603 DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1); 1604 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 1605 qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE); 1606 goto fail_return; 1607 } 1608 1609 if (qdf_unlikely(peer_id == DP_INVALID_PEER)) { 1610 htt_tcl_metadata = vdev->htt_tcl_metadata; 1611 HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1); 1612 } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) { 1613 HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata, 1614 HTT_TCL_METADATA_TYPE_PEER_BASED); 1615 HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata, 1616 peer_id); 1617 } else 1618 htt_tcl_metadata = vdev->htt_tcl_metadata; 1619 1620 1621 if (msdu_info->exception_fw) { 1622 HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1); 1623 } 1624 1625 /* Enqueue the Tx MSDU descriptor to HW for transmit */ 1626 status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid, 1627 htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata); 1628 1629 if (status != QDF_STATUS_SUCCESS) { 1630 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1631 "%s Tx_hw_enqueue Fail tx_desc %pK queue %d", 1632 __func__, tx_desc, tx_q->ring_id); 1633 dp_tx_get_tid(vdev, nbuf, msdu_info); 1634 tid_stats = &pdev->stats.tid_stats. 1635 tid_tx_stats[tx_q->ring_id][tid]; 1636 tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++; 1637 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 1638 qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE); 1639 goto fail_return; 1640 } 1641 1642 nbuf = NULL; 1643 1644 fail_return: 1645 if (hif_pm_runtime_get(soc->hif_handle) == 0) { 1646 hal_srng_access_end(soc->hal_soc, hal_ring_hdl); 1647 hif_pm_runtime_put(soc->hif_handle); 1648 } else { 1649 hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl); 1650 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); 1651 hal_srng_inc_flush_cnt(hal_ring_hdl); 1652 } 1653 1654 return nbuf; 1655 } 1656 1657 /** 1658 * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs 1659 * @vdev: DP vdev handle 1660 * @nbuf: skb 1661 * @msdu_info: MSDU info to be setup in MSDU extension descriptor 1662 * 1663 * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL 1664 * 1665 * Return: NULL on success, 1666 * nbuf when it fails to send 1667 */ 1668 #if QDF_LOCK_STATS 1669 noinline 1670 #else 1671 #endif 1672 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1673 struct dp_tx_msdu_info_s *msdu_info) 1674 { 1675 uint8_t i; 1676 struct dp_pdev *pdev = vdev->pdev; 1677 struct dp_soc *soc = pdev->soc; 1678 struct dp_tx_desc_s *tx_desc; 1679 bool is_cce_classified = false; 1680 QDF_STATUS status; 1681 uint16_t htt_tcl_metadata = 0; 1682 struct dp_tx_queue *tx_q = &msdu_info->tx_queue; 1683 hal_ring_handle_t hal_ring_hdl = 1684 soc->tcl_data_ring[tx_q->ring_id].hal_srng; 1685 struct cdp_tid_tx_stats *tid_stats = NULL; 1686 1687 if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_ring_hdl))) { 1688 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1689 "%s %d : HAL RING Access Failed -- %pK", 1690 __func__, __LINE__, hal_ring_hdl); 1691 dp_tx_get_tid(vdev, nbuf, msdu_info); 1692 tid_stats = &pdev->stats.tid_stats. 1693 tid_tx_stats[tx_q->ring_id][msdu_info->tid]; 1694 tid_stats->swdrop_cnt[TX_HAL_RING_ACCESS_ERR]++; 1695 DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1); 1696 return nbuf; 1697 } 1698 1699 if (qdf_unlikely(soc->cce_disable)) { 1700 is_cce_classified = dp_cce_classify(vdev, nbuf); 1701 if (is_cce_classified) { 1702 DP_STATS_INC(vdev, tx_i.cce_classified, 1); 1703 msdu_info->tid = DP_VO_TID; 1704 } 1705 } 1706 1707 if (msdu_info->frm_type == dp_tx_frm_me) 1708 nbuf = msdu_info->u.sg_info.curr_seg->nbuf; 1709 1710 i = 0; 1711 /* Print statement to track i and num_seg */ 1712 /* 1713 * For each segment (maps to 1 MSDU) , prepare software and hardware 1714 * descriptors using information in msdu_info 1715 */ 1716 while (i < msdu_info->num_seg) { 1717 /* 1718 * Setup Tx descriptor for an MSDU, and MSDU extension 1719 * descriptor 1720 */ 1721 tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info, 1722 tx_q->desc_pool_id); 1723 1724 if (!tx_desc) { 1725 if (msdu_info->frm_type == dp_tx_frm_me) { 1726 dp_tx_me_free_buf(pdev, 1727 (void *)(msdu_info->u.sg_info 1728 .curr_seg->frags[0].vaddr)); 1729 i++; 1730 continue; 1731 } 1732 goto done; 1733 } 1734 1735 if (msdu_info->frm_type == dp_tx_frm_me) { 1736 tx_desc->me_buffer = 1737 msdu_info->u.sg_info.curr_seg->frags[0].vaddr; 1738 tx_desc->flags |= DP_TX_DESC_FLAG_ME; 1739 } 1740 1741 if (is_cce_classified) 1742 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 1743 1744 htt_tcl_metadata = vdev->htt_tcl_metadata; 1745 if (msdu_info->exception_fw) { 1746 HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1); 1747 } 1748 1749 /* 1750 * Enqueue the Tx MSDU descriptor to HW for transmit 1751 */ 1752 status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid, 1753 htt_tcl_metadata, tx_q->ring_id, NULL); 1754 1755 if (status != QDF_STATUS_SUCCESS) { 1756 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1757 "%s Tx_hw_enqueue Fail tx_desc %pK queue %d", 1758 __func__, tx_desc, tx_q->ring_id); 1759 1760 dp_tx_get_tid(vdev, nbuf, msdu_info); 1761 tid_stats = &pdev->stats.tid_stats. 1762 tid_tx_stats[tx_q->ring_id][msdu_info->tid]; 1763 tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++; 1764 1765 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 1766 if (msdu_info->frm_type == dp_tx_frm_me) { 1767 i++; 1768 continue; 1769 } 1770 goto done; 1771 } 1772 1773 /* 1774 * TODO 1775 * if tso_info structure can be modified to have curr_seg 1776 * as first element, following 2 blocks of code (for TSO and SG) 1777 * can be combined into 1 1778 */ 1779 1780 /* 1781 * For frames with multiple segments (TSO, ME), jump to next 1782 * segment. 1783 */ 1784 if (msdu_info->frm_type == dp_tx_frm_tso) { 1785 if (msdu_info->u.tso_info.curr_seg->next) { 1786 msdu_info->u.tso_info.curr_seg = 1787 msdu_info->u.tso_info.curr_seg->next; 1788 1789 /* 1790 * If this is a jumbo nbuf, then increment the number of 1791 * nbuf users for each additional segment of the msdu. 1792 * This will ensure that the skb is freed only after 1793 * receiving tx completion for all segments of an nbuf 1794 */ 1795 qdf_nbuf_inc_users(nbuf); 1796 1797 /* Check with MCL if this is needed */ 1798 /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */ 1799 } 1800 } 1801 1802 /* 1803 * For Multicast-Unicast converted packets, 1804 * each converted frame (for a client) is represented as 1805 * 1 segment 1806 */ 1807 if ((msdu_info->frm_type == dp_tx_frm_sg) || 1808 (msdu_info->frm_type == dp_tx_frm_me)) { 1809 if (msdu_info->u.sg_info.curr_seg->next) { 1810 msdu_info->u.sg_info.curr_seg = 1811 msdu_info->u.sg_info.curr_seg->next; 1812 nbuf = msdu_info->u.sg_info.curr_seg->nbuf; 1813 } 1814 } 1815 i++; 1816 } 1817 1818 nbuf = NULL; 1819 1820 done: 1821 if (hif_pm_runtime_get(soc->hif_handle) == 0) { 1822 hal_srng_access_end(soc->hal_soc, hal_ring_hdl); 1823 hif_pm_runtime_put(soc->hif_handle); 1824 } else { 1825 hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl); 1826 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); 1827 hal_srng_inc_flush_cnt(hal_ring_hdl); 1828 } 1829 1830 return nbuf; 1831 } 1832 1833 /** 1834 * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info 1835 * for SG frames 1836 * @vdev: DP vdev handle 1837 * @nbuf: skb 1838 * @seg_info: Pointer to Segment info Descriptor to be prepared 1839 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. 1840 * 1841 * Return: NULL on success, 1842 * nbuf when it fails to send 1843 */ 1844 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1845 struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info) 1846 { 1847 uint32_t cur_frag, nr_frags; 1848 qdf_dma_addr_t paddr; 1849 struct dp_tx_sg_info_s *sg_info; 1850 1851 sg_info = &msdu_info->u.sg_info; 1852 nr_frags = qdf_nbuf_get_nr_frags(nbuf); 1853 1854 if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf, 1855 QDF_DMA_TO_DEVICE)) { 1856 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1857 "dma map error"); 1858 DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1); 1859 1860 qdf_nbuf_free(nbuf); 1861 return NULL; 1862 } 1863 1864 paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); 1865 seg_info->frags[0].paddr_lo = paddr; 1866 seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32; 1867 seg_info->frags[0].len = qdf_nbuf_headlen(nbuf); 1868 seg_info->frags[0].vaddr = (void *) nbuf; 1869 1870 for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) { 1871 if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev, 1872 nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) { 1873 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1874 "frag dma map error"); 1875 DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1); 1876 qdf_nbuf_free(nbuf); 1877 return NULL; 1878 } 1879 1880 paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); 1881 seg_info->frags[cur_frag + 1].paddr_lo = paddr; 1882 seg_info->frags[cur_frag + 1].paddr_hi = 1883 ((uint64_t) paddr) >> 32; 1884 seg_info->frags[cur_frag + 1].len = 1885 qdf_nbuf_get_frag_size(nbuf, cur_frag); 1886 } 1887 1888 seg_info->frag_cnt = (cur_frag + 1); 1889 seg_info->total_len = qdf_nbuf_len(nbuf); 1890 seg_info->next = NULL; 1891 1892 sg_info->curr_seg = seg_info; 1893 1894 msdu_info->frm_type = dp_tx_frm_sg; 1895 msdu_info->num_seg = 1; 1896 1897 return nbuf; 1898 } 1899 1900 /** 1901 * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info 1902 * @vdev: DP vdev handle 1903 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. 1904 * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions 1905 * 1906 * Return: NULL on failure, 1907 * nbuf when extracted successfully 1908 */ 1909 static 1910 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev, 1911 struct dp_tx_msdu_info_s *msdu_info, 1912 uint16_t ppdu_cookie) 1913 { 1914 struct htt_tx_msdu_desc_ext2_t *meta_data = 1915 (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0]; 1916 1917 qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t)); 1918 1919 HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET 1920 (msdu_info->meta_data[5], 1); 1921 HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET 1922 (msdu_info->meta_data[5], 1); 1923 HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET 1924 (msdu_info->meta_data[6], ppdu_cookie); 1925 1926 msdu_info->exception_fw = 1; 1927 msdu_info->is_tx_sniffer = 1; 1928 } 1929 1930 #ifdef MESH_MODE_SUPPORT 1931 1932 /** 1933 * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf 1934 and prepare msdu_info for mesh frames. 1935 * @vdev: DP vdev handle 1936 * @nbuf: skb 1937 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. 1938 * 1939 * Return: NULL on failure, 1940 * nbuf when extracted successfully 1941 */ 1942 static 1943 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1944 struct dp_tx_msdu_info_s *msdu_info) 1945 { 1946 struct meta_hdr_s *mhdr; 1947 struct htt_tx_msdu_desc_ext2_t *meta_data = 1948 (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0]; 1949 1950 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf); 1951 1952 if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) { 1953 msdu_info->exception_fw = 0; 1954 goto remove_meta_hdr; 1955 } 1956 1957 msdu_info->exception_fw = 1; 1958 1959 qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t)); 1960 1961 meta_data->host_tx_desc_pool = 1; 1962 meta_data->update_peer_cache = 1; 1963 meta_data->learning_frame = 1; 1964 1965 if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) { 1966 meta_data->power = mhdr->power; 1967 1968 meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs; 1969 meta_data->nss_mask = 1 << mhdr->rate_info[0].nss; 1970 meta_data->pream_type = mhdr->rate_info[0].preamble_type; 1971 meta_data->retry_limit = mhdr->rate_info[0].max_tries; 1972 1973 meta_data->dyn_bw = 1; 1974 1975 meta_data->valid_pwr = 1; 1976 meta_data->valid_mcs_mask = 1; 1977 meta_data->valid_nss_mask = 1; 1978 meta_data->valid_preamble_type = 1; 1979 meta_data->valid_retries = 1; 1980 meta_data->valid_bw_info = 1; 1981 } 1982 1983 if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) { 1984 meta_data->encrypt_type = 0; 1985 meta_data->valid_encrypt_type = 1; 1986 meta_data->learning_frame = 0; 1987 } 1988 1989 meta_data->valid_key_flags = 1; 1990 meta_data->key_flags = (mhdr->keyix & 0x3); 1991 1992 remove_meta_hdr: 1993 if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) { 1994 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1995 "qdf_nbuf_pull_head failed"); 1996 qdf_nbuf_free(nbuf); 1997 return NULL; 1998 } 1999 2000 msdu_info->tid = qdf_nbuf_get_priority(nbuf); 2001 2002 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, 2003 "%s , Meta hdr %0x %0x %0x %0x %0x %0x" 2004 " tid %d to_fw %d", 2005 __func__, msdu_info->meta_data[0], 2006 msdu_info->meta_data[1], 2007 msdu_info->meta_data[2], 2008 msdu_info->meta_data[3], 2009 msdu_info->meta_data[4], 2010 msdu_info->meta_data[5], 2011 msdu_info->tid, msdu_info->exception_fw); 2012 2013 return nbuf; 2014 } 2015 #else 2016 static 2017 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 2018 struct dp_tx_msdu_info_s *msdu_info) 2019 { 2020 return nbuf; 2021 } 2022 2023 #endif 2024 2025 /** 2026 * dp_check_exc_metadata() - Checks if parameters are valid 2027 * @tx_exc - holds all exception path parameters 2028 * 2029 * Returns true when all the parameters are valid else false 2030 * 2031 */ 2032 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc) 2033 { 2034 bool invalid_tid = (tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != 2035 HTT_INVALID_TID); 2036 bool invalid_encap_type = 2037 (tx_exc->tx_encap_type > htt_cmn_pkt_num_types && 2038 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE); 2039 bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types && 2040 tx_exc->sec_type != CDP_INVALID_SEC_TYPE); 2041 bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 && 2042 tx_exc->ppdu_cookie == 0); 2043 2044 if (invalid_tid || invalid_encap_type || invalid_sec_type || 2045 invalid_cookie) { 2046 return false; 2047 } 2048 2049 return true; 2050 } 2051 2052 /** 2053 * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path 2054 * @vap_dev: DP vdev handle 2055 * @nbuf: skb 2056 * @tx_exc_metadata: Handle that holds exception path meta data 2057 * 2058 * Entry point for Core Tx layer (DP_TX) invoked from 2059 * hard_start_xmit in OSIF/HDD to transmit frames through fw 2060 * 2061 * Return: NULL on success, 2062 * nbuf when it fails to send 2063 */ 2064 qdf_nbuf_t 2065 dp_tx_send_exception(struct cdp_vdev *vap_dev, qdf_nbuf_t nbuf, 2066 struct cdp_tx_exception_metadata *tx_exc_metadata) 2067 { 2068 qdf_ether_header_t *eh = NULL; 2069 struct dp_vdev *vdev = (struct dp_vdev *) vap_dev; 2070 struct dp_tx_msdu_info_s msdu_info; 2071 2072 qdf_mem_zero(&msdu_info, sizeof(msdu_info)); 2073 2074 if (!tx_exc_metadata) 2075 goto fail; 2076 2077 msdu_info.tid = tx_exc_metadata->tid; 2078 2079 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 2080 dp_verbose_debug("skb %pM", nbuf->data); 2081 2082 DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf)); 2083 2084 if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) { 2085 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2086 "Invalid parameters in exception path"); 2087 goto fail; 2088 } 2089 2090 /* Basic sanity checks for unsupported packets */ 2091 2092 /* MESH mode */ 2093 if (qdf_unlikely(vdev->mesh_vdev)) { 2094 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2095 "Mesh mode is not supported in exception path"); 2096 goto fail; 2097 } 2098 2099 /* TSO or SG */ 2100 if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) || 2101 qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) { 2102 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2103 "TSO and SG are not supported in exception path"); 2104 2105 goto fail; 2106 } 2107 2108 /* RAW */ 2109 if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) { 2110 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2111 "Raw frame is not supported in exception path"); 2112 goto fail; 2113 } 2114 2115 2116 /* Mcast enhancement*/ 2117 if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) { 2118 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) && 2119 !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) { 2120 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2121 "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW"); 2122 } 2123 } 2124 2125 if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) { 2126 DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1, 2127 qdf_nbuf_len(nbuf)); 2128 2129 dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info, 2130 tx_exc_metadata->ppdu_cookie); 2131 } 2132 2133 /* 2134 * Get HW Queue to use for this frame. 2135 * TCL supports upto 4 DMA rings, out of which 3 rings are 2136 * dedicated for data and 1 for command. 2137 * "queue_id" maps to one hardware ring. 2138 * With each ring, we also associate a unique Tx descriptor pool 2139 * to minimize lock contention for these resources. 2140 */ 2141 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 2142 2143 /* Single linear frame */ 2144 /* 2145 * If nbuf is a simple linear frame, use send_single function to 2146 * prepare direct-buffer type TCL descriptor and enqueue to TCL 2147 * SRNG. There is no need to setup a MSDU extension descriptor. 2148 */ 2149 nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, 2150 tx_exc_metadata->peer_id, tx_exc_metadata); 2151 2152 return nbuf; 2153 2154 fail: 2155 dp_verbose_debug("pkt send failed"); 2156 return nbuf; 2157 } 2158 2159 /** 2160 * dp_tx_send_mesh() - Transmit mesh frame on a given VAP 2161 * @soc: DP soc handle 2162 * @vdev_id: DP vdev handle 2163 * @nbuf: skb 2164 * 2165 * Entry point for Core Tx layer (DP_TX) invoked from 2166 * hard_start_xmit in OSIF/HDD 2167 * 2168 * Return: NULL on success, 2169 * nbuf when it fails to send 2170 */ 2171 #ifdef MESH_MODE_SUPPORT 2172 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id, 2173 qdf_nbuf_t nbuf) 2174 { 2175 struct meta_hdr_s *mhdr; 2176 qdf_nbuf_t nbuf_mesh = NULL; 2177 qdf_nbuf_t nbuf_clone = NULL; 2178 struct dp_vdev *vdev; 2179 uint8_t no_enc_frame = 0; 2180 2181 nbuf_mesh = qdf_nbuf_unshare(nbuf); 2182 if (!nbuf_mesh) { 2183 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2184 "qdf_nbuf_unshare failed"); 2185 return nbuf; 2186 } 2187 2188 vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, 2189 vdev_id); 2190 if (!vdev) { 2191 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2192 "vdev is NULL for vdev_id %d", vdev_id); 2193 return nbuf; 2194 } 2195 2196 nbuf = nbuf_mesh; 2197 2198 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf); 2199 2200 if ((vdev->sec_type != cdp_sec_type_none) && 2201 (mhdr->flags & METAHDR_FLAG_NOENCRYPT)) 2202 no_enc_frame = 1; 2203 2204 if (mhdr->flags & METAHDR_FLAG_NOQOS) 2205 qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST); 2206 2207 if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) && 2208 !no_enc_frame) { 2209 nbuf_clone = qdf_nbuf_clone(nbuf); 2210 if (!nbuf_clone) { 2211 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2212 "qdf_nbuf_clone failed"); 2213 return nbuf; 2214 } 2215 qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO); 2216 } 2217 2218 if (nbuf_clone) { 2219 if (!dp_tx_send(soc, vdev_id, nbuf_clone)) { 2220 DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1); 2221 } else { 2222 qdf_nbuf_free(nbuf_clone); 2223 } 2224 } 2225 2226 if (no_enc_frame) 2227 qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO); 2228 else 2229 qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID); 2230 2231 nbuf = dp_tx_send(soc, vdev_id, nbuf); 2232 if ((!nbuf) && no_enc_frame) { 2233 DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1); 2234 } 2235 2236 return nbuf; 2237 } 2238 2239 #else 2240 2241 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id, 2242 qdf_nbuf_t nbuf) 2243 { 2244 return dp_tx_send(soc, vdev_id, nbuf); 2245 } 2246 2247 #endif 2248 2249 /** 2250 * dp_tx_send() - Transmit a frame on a given VAP 2251 * @soc: DP soc handle 2252 * @vdev_id: id of DP vdev handle 2253 * @nbuf: skb 2254 * 2255 * Entry point for Core Tx layer (DP_TX) invoked from 2256 * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding 2257 * cases 2258 * 2259 * Return: NULL on success, 2260 * nbuf when it fails to send 2261 */ 2262 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf) 2263 { 2264 qdf_ether_header_t *eh = NULL; 2265 struct dp_tx_msdu_info_s msdu_info; 2266 struct dp_tx_seg_info_s seg_info; 2267 uint16_t peer_id = HTT_INVALID_PEER; 2268 qdf_nbuf_t nbuf_mesh = NULL; 2269 struct dp_vdev *vdev = 2270 dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, 2271 vdev_id); 2272 2273 if (qdf_unlikely(!vdev)) 2274 return nbuf; 2275 2276 qdf_mem_zero(&msdu_info, sizeof(msdu_info)); 2277 qdf_mem_zero(&seg_info, sizeof(seg_info)); 2278 2279 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 2280 2281 dp_verbose_debug("skb %pM", nbuf->data); 2282 2283 /* 2284 * Set Default Host TID value to invalid TID 2285 * (TID override disabled) 2286 */ 2287 msdu_info.tid = HTT_TX_EXT_TID_INVALID; 2288 DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf)); 2289 2290 if (qdf_unlikely(vdev->mesh_vdev)) { 2291 nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf, 2292 &msdu_info); 2293 if (!nbuf_mesh) { 2294 dp_verbose_debug("Extracting mesh metadata failed"); 2295 return nbuf; 2296 } 2297 nbuf = nbuf_mesh; 2298 } 2299 2300 /* 2301 * Get HW Queue to use for this frame. 2302 * TCL supports upto 4 DMA rings, out of which 3 rings are 2303 * dedicated for data and 1 for command. 2304 * "queue_id" maps to one hardware ring. 2305 * With each ring, we also associate a unique Tx descriptor pool 2306 * to minimize lock contention for these resources. 2307 */ 2308 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 2309 2310 /* 2311 * TCL H/W supports 2 DSCP-TID mapping tables. 2312 * Table 1 - Default DSCP-TID mapping table 2313 * Table 2 - 1 DSCP-TID override table 2314 * 2315 * If we need a different DSCP-TID mapping for this vap, 2316 * call tid_classify to extract DSCP/ToS from frame and 2317 * map to a TID and store in msdu_info. This is later used 2318 * to fill in TCL Input descriptor (per-packet TID override). 2319 */ 2320 dp_tx_classify_tid(vdev, nbuf, &msdu_info); 2321 2322 /* 2323 * Classify the frame and call corresponding 2324 * "prepare" function which extracts the segment (TSO) 2325 * and fragmentation information (for TSO , SG, ME, or Raw) 2326 * into MSDU_INFO structure which is later used to fill 2327 * SW and HW descriptors. 2328 */ 2329 if (qdf_nbuf_is_tso(nbuf)) { 2330 dp_verbose_debug("TSO frame %pK", vdev); 2331 DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1, 2332 qdf_nbuf_len(nbuf)); 2333 2334 if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) { 2335 DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1, 2336 qdf_nbuf_len(nbuf)); 2337 return nbuf; 2338 } 2339 2340 goto send_multiple; 2341 } 2342 2343 /* SG */ 2344 if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) { 2345 nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info); 2346 2347 if (!nbuf) 2348 return NULL; 2349 2350 dp_verbose_debug("non-TSO SG frame %pK", vdev); 2351 2352 DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1, 2353 qdf_nbuf_len(nbuf)); 2354 2355 goto send_multiple; 2356 } 2357 2358 #ifdef ATH_SUPPORT_IQUE 2359 /* Mcast to Ucast Conversion*/ 2360 if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) { 2361 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 2362 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) && 2363 !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) { 2364 dp_verbose_debug("Mcast frm for ME %pK", vdev); 2365 2366 DP_STATS_INC_PKT(vdev, 2367 tx_i.mcast_en.mcast_pkt, 1, 2368 qdf_nbuf_len(nbuf)); 2369 if (dp_tx_prepare_send_me(vdev, nbuf) == 2370 QDF_STATUS_SUCCESS) { 2371 return NULL; 2372 } 2373 } 2374 } 2375 #endif 2376 2377 /* RAW */ 2378 if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) { 2379 nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info); 2380 if (!nbuf) 2381 return NULL; 2382 2383 dp_verbose_debug("Raw frame %pK", vdev); 2384 2385 goto send_multiple; 2386 2387 } 2388 2389 /* Single linear frame */ 2390 /* 2391 * If nbuf is a simple linear frame, use send_single function to 2392 * prepare direct-buffer type TCL descriptor and enqueue to TCL 2393 * SRNG. There is no need to setup a MSDU extension descriptor. 2394 */ 2395 nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL); 2396 2397 return nbuf; 2398 2399 send_multiple: 2400 nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info); 2401 2402 if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw)) 2403 dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf); 2404 2405 return nbuf; 2406 } 2407 2408 /** 2409 * dp_tx_reinject_handler() - Tx Reinject Handler 2410 * @tx_desc: software descriptor head pointer 2411 * @status : Tx completion status from HTT descriptor 2412 * 2413 * This function reinjects frames back to Target. 2414 * Todo - Host queue needs to be added 2415 * 2416 * Return: none 2417 */ 2418 static 2419 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status) 2420 { 2421 struct dp_vdev *vdev; 2422 struct dp_peer *peer = NULL; 2423 uint32_t peer_id = HTT_INVALID_PEER; 2424 qdf_nbuf_t nbuf = tx_desc->nbuf; 2425 qdf_nbuf_t nbuf_copy = NULL; 2426 struct dp_tx_msdu_info_s msdu_info; 2427 struct dp_peer *sa_peer = NULL; 2428 struct dp_ast_entry *ast_entry = NULL; 2429 struct dp_soc *soc = NULL; 2430 qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 2431 #ifdef WDS_VENDOR_EXTENSION 2432 int is_mcast = 0, is_ucast = 0; 2433 int num_peers_3addr = 0; 2434 qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf)); 2435 struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf)); 2436 #endif 2437 2438 vdev = tx_desc->vdev; 2439 soc = vdev->pdev->soc; 2440 2441 qdf_assert(vdev); 2442 2443 qdf_mem_zero(&msdu_info, sizeof(msdu_info)); 2444 2445 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 2446 2447 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2448 "%s Tx reinject path", __func__); 2449 2450 DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1, 2451 qdf_nbuf_len(tx_desc->nbuf)); 2452 2453 qdf_spin_lock_bh(&(soc->ast_lock)); 2454 2455 ast_entry = dp_peer_ast_hash_find_by_pdevid 2456 (soc, 2457 (uint8_t *)(eh->ether_shost), 2458 vdev->pdev->pdev_id); 2459 2460 if (ast_entry) 2461 sa_peer = ast_entry->peer; 2462 2463 qdf_spin_unlock_bh(&(soc->ast_lock)); 2464 2465 #ifdef WDS_VENDOR_EXTENSION 2466 if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 2467 is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0; 2468 } else { 2469 is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0; 2470 } 2471 is_ucast = !is_mcast; 2472 2473 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 2474 if (peer->bss_peer) 2475 continue; 2476 2477 /* Detect wds peers that use 3-addr framing for mcast. 2478 * if there are any, the bss_peer is used to send the 2479 * the mcast frame using 3-addr format. all wds enabled 2480 * peers that use 4-addr framing for mcast frames will 2481 * be duplicated and sent as 4-addr frames below. 2482 */ 2483 if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) { 2484 num_peers_3addr = 1; 2485 break; 2486 } 2487 } 2488 #endif 2489 2490 if (qdf_unlikely(vdev->mesh_vdev)) { 2491 DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf); 2492 } else { 2493 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 2494 if ((peer->peer_ids[0] != HTT_INVALID_PEER) && 2495 #ifdef WDS_VENDOR_EXTENSION 2496 /* 2497 * . if 3-addr STA, then send on BSS Peer 2498 * . if Peer WDS enabled and accept 4-addr mcast, 2499 * send mcast on that peer only 2500 * . if Peer WDS enabled and accept 4-addr ucast, 2501 * send ucast on that peer only 2502 */ 2503 ((peer->bss_peer && num_peers_3addr && is_mcast) || 2504 (peer->wds_enabled && 2505 ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) || 2506 (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) { 2507 #else 2508 ((peer->bss_peer && 2509 !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) || 2510 peer->nawds_enabled)) { 2511 #endif 2512 peer_id = DP_INVALID_PEER; 2513 2514 if (peer->nawds_enabled) { 2515 peer_id = peer->peer_ids[0]; 2516 if (sa_peer == peer) { 2517 QDF_TRACE( 2518 QDF_MODULE_ID_DP, 2519 QDF_TRACE_LEVEL_DEBUG, 2520 " %s: multicast packet", 2521 __func__); 2522 DP_STATS_INC(peer, 2523 tx.nawds_mcast_drop, 1); 2524 continue; 2525 } 2526 } 2527 2528 nbuf_copy = qdf_nbuf_copy(nbuf); 2529 2530 if (!nbuf_copy) { 2531 QDF_TRACE(QDF_MODULE_ID_DP, 2532 QDF_TRACE_LEVEL_DEBUG, 2533 FL("nbuf copy failed")); 2534 break; 2535 } 2536 2537 nbuf_copy = dp_tx_send_msdu_single(vdev, 2538 nbuf_copy, 2539 &msdu_info, 2540 peer_id, 2541 NULL); 2542 2543 if (nbuf_copy) { 2544 QDF_TRACE(QDF_MODULE_ID_DP, 2545 QDF_TRACE_LEVEL_DEBUG, 2546 FL("pkt send failed")); 2547 qdf_nbuf_free(nbuf_copy); 2548 } else { 2549 if (peer_id != DP_INVALID_PEER) 2550 DP_STATS_INC_PKT(peer, 2551 tx.nawds_mcast, 2552 1, qdf_nbuf_len(nbuf)); 2553 } 2554 } 2555 } 2556 } 2557 2558 if (vdev->nawds_enabled) { 2559 peer_id = DP_INVALID_PEER; 2560 2561 DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast, 2562 1, qdf_nbuf_len(nbuf)); 2563 2564 nbuf = dp_tx_send_msdu_single(vdev, 2565 nbuf, 2566 &msdu_info, 2567 peer_id, NULL); 2568 2569 if (nbuf) { 2570 QDF_TRACE(QDF_MODULE_ID_DP, 2571 QDF_TRACE_LEVEL_DEBUG, 2572 FL("pkt send failed")); 2573 qdf_nbuf_free(nbuf); 2574 } 2575 } else 2576 qdf_nbuf_free(nbuf); 2577 2578 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 2579 } 2580 2581 /** 2582 * dp_tx_inspect_handler() - Tx Inspect Handler 2583 * @tx_desc: software descriptor head pointer 2584 * @status : Tx completion status from HTT descriptor 2585 * 2586 * Handles Tx frames sent back to Host for inspection 2587 * (ProxyARP) 2588 * 2589 * Return: none 2590 */ 2591 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status) 2592 { 2593 2594 struct dp_soc *soc; 2595 struct dp_pdev *pdev = tx_desc->pdev; 2596 2597 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 2598 "%s Tx inspect path", 2599 __func__); 2600 2601 qdf_assert(pdev); 2602 2603 soc = pdev->soc; 2604 2605 DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1, 2606 qdf_nbuf_len(tx_desc->nbuf)); 2607 2608 DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf); 2609 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 2610 } 2611 2612 #ifdef FEATURE_PERPKT_INFO 2613 /** 2614 * dp_get_completion_indication_for_stack() - send completion to stack 2615 * @soc : dp_soc handle 2616 * @pdev: dp_pdev handle 2617 * @peer: dp peer handle 2618 * @ts: transmit completion status structure 2619 * @netbuf: Buffer pointer for free 2620 * 2621 * This function is used for indication whether buffer needs to be 2622 * sent to stack for freeing or not 2623 */ 2624 QDF_STATUS 2625 dp_get_completion_indication_for_stack(struct dp_soc *soc, 2626 struct dp_pdev *pdev, 2627 struct dp_peer *peer, 2628 struct hal_tx_completion_status *ts, 2629 qdf_nbuf_t netbuf, 2630 uint64_t time_latency) 2631 { 2632 struct tx_capture_hdr *ppdu_hdr; 2633 uint16_t peer_id = ts->peer_id; 2634 uint32_t ppdu_id = ts->ppdu_id; 2635 uint8_t first_msdu = ts->first_msdu; 2636 uint8_t last_msdu = ts->last_msdu; 2637 2638 if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode && 2639 !pdev->latency_capture_enable)) 2640 return QDF_STATUS_E_NOSUPPORT; 2641 2642 if (!peer) { 2643 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2644 FL("Peer Invalid")); 2645 return QDF_STATUS_E_INVAL; 2646 } 2647 2648 if (pdev->mcopy_mode) { 2649 if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) && 2650 (pdev->m_copy_id.tx_peer_id == peer_id)) { 2651 return QDF_STATUS_E_INVAL; 2652 } 2653 2654 pdev->m_copy_id.tx_ppdu_id = ppdu_id; 2655 pdev->m_copy_id.tx_peer_id = peer_id; 2656 } 2657 2658 if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) { 2659 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2660 FL("No headroom")); 2661 return QDF_STATUS_E_NOMEM; 2662 } 2663 2664 ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf); 2665 qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw, 2666 QDF_MAC_ADDR_SIZE); 2667 qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw, 2668 QDF_MAC_ADDR_SIZE); 2669 ppdu_hdr->ppdu_id = ppdu_id; 2670 ppdu_hdr->peer_id = peer_id; 2671 ppdu_hdr->first_msdu = first_msdu; 2672 ppdu_hdr->last_msdu = last_msdu; 2673 if (qdf_unlikely(pdev->latency_capture_enable)) { 2674 ppdu_hdr->tsf = ts->tsf; 2675 ppdu_hdr->time_latency = time_latency; 2676 } 2677 2678 return QDF_STATUS_SUCCESS; 2679 } 2680 2681 2682 /** 2683 * dp_send_completion_to_stack() - send completion to stack 2684 * @soc : dp_soc handle 2685 * @pdev: dp_pdev handle 2686 * @peer_id: peer_id of the peer for which completion came 2687 * @ppdu_id: ppdu_id 2688 * @netbuf: Buffer pointer for free 2689 * 2690 * This function is used to send completion to stack 2691 * to free buffer 2692 */ 2693 void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, 2694 uint16_t peer_id, uint32_t ppdu_id, 2695 qdf_nbuf_t netbuf) 2696 { 2697 dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc, 2698 netbuf, peer_id, 2699 WDI_NO_VAL, pdev->pdev_id); 2700 } 2701 #else 2702 static QDF_STATUS 2703 dp_get_completion_indication_for_stack(struct dp_soc *soc, 2704 struct dp_pdev *pdev, 2705 struct dp_peer *peer, 2706 struct hal_tx_completion_status *ts, 2707 qdf_nbuf_t netbuf, 2708 uint64_t time_latency) 2709 { 2710 return QDF_STATUS_E_NOSUPPORT; 2711 } 2712 2713 static void 2714 dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, 2715 uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf) 2716 { 2717 } 2718 #endif 2719 2720 /** 2721 * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor 2722 * @soc: Soc handle 2723 * @desc: software Tx descriptor to be processed 2724 * 2725 * Return: none 2726 */ 2727 static inline void dp_tx_comp_free_buf(struct dp_soc *soc, 2728 struct dp_tx_desc_s *desc) 2729 { 2730 struct dp_vdev *vdev = desc->vdev; 2731 qdf_nbuf_t nbuf = desc->nbuf; 2732 2733 /* nbuf already freed in vdev detach path */ 2734 if (!nbuf) 2735 return; 2736 2737 /* If it is TDLS mgmt, don't unmap or free the frame */ 2738 if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) 2739 return dp_non_std_tx_comp_free_buff(desc, vdev); 2740 2741 /* 0 : MSDU buffer, 1 : MLE */ 2742 if (desc->msdu_ext_desc) { 2743 /* TSO free */ 2744 if (hal_tx_ext_desc_get_tso_enable( 2745 desc->msdu_ext_desc->vaddr)) { 2746 /* unmap eash TSO seg before free the nbuf */ 2747 dp_tx_tso_unmap_segment(soc, desc->tso_desc, 2748 desc->tso_num_desc); 2749 qdf_nbuf_free(nbuf); 2750 return; 2751 } 2752 } 2753 2754 qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE); 2755 2756 if (qdf_unlikely(!vdev)) { 2757 qdf_nbuf_free(nbuf); 2758 return; 2759 } 2760 2761 if (qdf_likely(!vdev->mesh_vdev)) 2762 qdf_nbuf_free(nbuf); 2763 else { 2764 if (desc->flags & DP_TX_DESC_FLAG_TO_FW) { 2765 qdf_nbuf_free(nbuf); 2766 DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1); 2767 } else 2768 vdev->osif_tx_free_ext((nbuf)); 2769 } 2770 } 2771 2772 #ifdef MESH_MODE_SUPPORT 2773 /** 2774 * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats 2775 * in mesh meta header 2776 * @tx_desc: software descriptor head pointer 2777 * @ts: pointer to tx completion stats 2778 * Return: none 2779 */ 2780 static 2781 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc, 2782 struct hal_tx_completion_status *ts) 2783 { 2784 struct meta_hdr_s *mhdr; 2785 qdf_nbuf_t netbuf = tx_desc->nbuf; 2786 2787 if (!tx_desc->msdu_ext_desc) { 2788 if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) { 2789 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2790 "netbuf %pK offset %d", 2791 netbuf, tx_desc->pkt_offset); 2792 return; 2793 } 2794 } 2795 if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) { 2796 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2797 "netbuf %pK offset %lu", netbuf, 2798 sizeof(struct meta_hdr_s)); 2799 return; 2800 } 2801 2802 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf); 2803 mhdr->rssi = ts->ack_frame_rssi; 2804 mhdr->channel = tx_desc->pdev->operating_channel; 2805 } 2806 2807 #else 2808 static 2809 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc, 2810 struct hal_tx_completion_status *ts) 2811 { 2812 } 2813 2814 #endif 2815 2816 /** 2817 * dp_tx_compute_delay() - Compute and fill in all timestamps 2818 * to pass in correct fields 2819 * 2820 * @vdev: pdev handle 2821 * @tx_desc: tx descriptor 2822 * @tid: tid value 2823 * @ring_id: TCL or WBM ring number for transmit path 2824 * Return: none 2825 */ 2826 static void dp_tx_compute_delay(struct dp_vdev *vdev, 2827 struct dp_tx_desc_s *tx_desc, 2828 uint8_t tid, uint8_t ring_id) 2829 { 2830 int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue; 2831 uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay; 2832 2833 if (qdf_likely(!vdev->pdev->delay_stats_flag)) 2834 return; 2835 2836 current_timestamp = qdf_ktime_to_ms(qdf_ktime_get()); 2837 timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf); 2838 timestamp_hw_enqueue = tx_desc->timestamp; 2839 sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress); 2840 fwhw_transmit_delay = (uint32_t)(current_timestamp - 2841 timestamp_hw_enqueue); 2842 interframe_delay = (uint32_t)(timestamp_ingress - 2843 vdev->prev_tx_enq_tstamp); 2844 2845 /* 2846 * Delay in software enqueue 2847 */ 2848 dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid, 2849 CDP_DELAY_STATS_SW_ENQ, ring_id); 2850 /* 2851 * Delay between packet enqueued to HW and Tx completion 2852 */ 2853 dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid, 2854 CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id); 2855 2856 /* 2857 * Update interframe delay stats calculated at hardstart receive point. 2858 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so 2859 * interframe delay will not be calculate correctly for 1st frame. 2860 * On the other side, this will help in avoiding extra per packet check 2861 * of !vdev->prev_tx_enq_tstamp. 2862 */ 2863 dp_update_delay_stats(vdev->pdev, interframe_delay, tid, 2864 CDP_DELAY_STATS_TX_INTERFRAME, ring_id); 2865 vdev->prev_tx_enq_tstamp = timestamp_ingress; 2866 } 2867 2868 /** 2869 * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications 2870 * per wbm ring 2871 * 2872 * @tx_desc: software descriptor head pointer 2873 * @ts: Tx completion status 2874 * @peer: peer handle 2875 * @ring_id: ring number 2876 * 2877 * Return: None 2878 */ 2879 static inline void 2880 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc, 2881 struct hal_tx_completion_status *ts, 2882 struct dp_peer *peer, uint8_t ring_id) 2883 { 2884 struct dp_pdev *pdev = peer->vdev->pdev; 2885 struct dp_soc *soc = NULL; 2886 uint8_t mcs, pkt_type; 2887 uint8_t tid = ts->tid; 2888 uint32_t length; 2889 struct cdp_tid_tx_stats *tid_stats; 2890 2891 if (!pdev) 2892 return; 2893 2894 if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) 2895 tid = CDP_MAX_DATA_TIDS - 1; 2896 2897 tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid]; 2898 soc = pdev->soc; 2899 2900 mcs = ts->mcs; 2901 pkt_type = ts->pkt_type; 2902 2903 if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) { 2904 dp_err("Release source is not from TQM"); 2905 return; 2906 } 2907 2908 length = qdf_nbuf_len(tx_desc->nbuf); 2909 DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length); 2910 2911 if (qdf_unlikely(pdev->delay_stats_flag)) 2912 dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id); 2913 DP_STATS_INCC(peer, tx.dropped.age_out, 1, 2914 (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED)); 2915 2916 DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length, 2917 (ts->status == HAL_TX_TQM_RR_REM_CMD_REM)); 2918 2919 DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1, 2920 (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX)); 2921 2922 DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1, 2923 (ts->status == HAL_TX_TQM_RR_REM_CMD_TX)); 2924 2925 DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1, 2926 (ts->status == HAL_TX_TQM_RR_FW_REASON1)); 2927 2928 DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1, 2929 (ts->status == HAL_TX_TQM_RR_FW_REASON2)); 2930 2931 DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1, 2932 (ts->status == HAL_TX_TQM_RR_FW_REASON3)); 2933 2934 /* 2935 * tx_failed is ideally supposed to be updated from HTT ppdu completion 2936 * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there 2937 * are no completions for failed cases. Hence updating tx_failed from 2938 * data path. Please note that if tx_failed is fixed to be from ppdu, 2939 * then this has to be removed 2940 */ 2941 peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num + 2942 peer->stats.tx.dropped.fw_rem_notx + 2943 peer->stats.tx.dropped.fw_rem_tx + 2944 peer->stats.tx.dropped.age_out + 2945 peer->stats.tx.dropped.fw_reason1 + 2946 peer->stats.tx.dropped.fw_reason2 + 2947 peer->stats.tx.dropped.fw_reason3; 2948 2949 if (ts->status < CDP_MAX_TX_TQM_STATUS) { 2950 tid_stats->tqm_status_cnt[ts->status]++; 2951 } 2952 2953 if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) { 2954 return; 2955 } 2956 2957 DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma); 2958 2959 DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu); 2960 DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu); 2961 2962 /* 2963 * Following Rate Statistics are updated from HTT PPDU events from FW. 2964 * Return from here if HTT PPDU events are enabled. 2965 */ 2966 if (!(soc->process_tx_status)) 2967 return; 2968 2969 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 2970 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A))); 2971 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 2972 ((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A))); 2973 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 2974 ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B))); 2975 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 2976 ((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B))); 2977 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 2978 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N))); 2979 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 2980 ((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N))); 2981 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 2982 ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 2983 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 2984 ((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 2985 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 2986 ((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX))); 2987 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 2988 ((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX))); 2989 2990 DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1); 2991 DP_STATS_INC(peer, tx.bw[ts->bw], 1); 2992 DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi); 2993 DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1); 2994 DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc); 2995 DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc); 2996 DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1); 2997 2998 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 2999 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, 3000 &peer->stats, ts->peer_id, 3001 UPDATE_PEER_STATS, pdev->pdev_id); 3002 #endif 3003 } 3004 3005 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 3006 /** 3007 * dp_tx_flow_pool_lock() - take flow pool lock 3008 * @soc: core txrx main context 3009 * @tx_desc: tx desc 3010 * 3011 * Return: None 3012 */ 3013 static inline 3014 void dp_tx_flow_pool_lock(struct dp_soc *soc, 3015 struct dp_tx_desc_s *tx_desc) 3016 { 3017 struct dp_tx_desc_pool_s *pool; 3018 uint8_t desc_pool_id; 3019 3020 desc_pool_id = tx_desc->pool_id; 3021 pool = &soc->tx_desc[desc_pool_id]; 3022 3023 qdf_spin_lock_bh(&pool->flow_pool_lock); 3024 } 3025 3026 /** 3027 * dp_tx_flow_pool_unlock() - release flow pool lock 3028 * @soc: core txrx main context 3029 * @tx_desc: tx desc 3030 * 3031 * Return: None 3032 */ 3033 static inline 3034 void dp_tx_flow_pool_unlock(struct dp_soc *soc, 3035 struct dp_tx_desc_s *tx_desc) 3036 { 3037 struct dp_tx_desc_pool_s *pool; 3038 uint8_t desc_pool_id; 3039 3040 desc_pool_id = tx_desc->pool_id; 3041 pool = &soc->tx_desc[desc_pool_id]; 3042 3043 qdf_spin_unlock_bh(&pool->flow_pool_lock); 3044 } 3045 #else 3046 static inline 3047 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) 3048 { 3049 } 3050 3051 static inline 3052 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) 3053 { 3054 } 3055 #endif 3056 3057 /** 3058 * dp_tx_notify_completion() - Notify tx completion for this desc 3059 * @soc: core txrx main context 3060 * @tx_desc: tx desc 3061 * @netbuf: buffer 3062 * 3063 * Return: none 3064 */ 3065 static inline void dp_tx_notify_completion(struct dp_soc *soc, 3066 struct dp_tx_desc_s *tx_desc, 3067 qdf_nbuf_t netbuf) 3068 { 3069 void *osif_dev; 3070 ol_txrx_completion_fp tx_compl_cbk = NULL; 3071 3072 qdf_assert(tx_desc); 3073 3074 dp_tx_flow_pool_lock(soc, tx_desc); 3075 3076 if (!tx_desc->vdev || 3077 !tx_desc->vdev->osif_vdev) { 3078 dp_tx_flow_pool_unlock(soc, tx_desc); 3079 return; 3080 } 3081 3082 osif_dev = tx_desc->vdev->osif_vdev; 3083 tx_compl_cbk = tx_desc->vdev->tx_comp; 3084 dp_tx_flow_pool_unlock(soc, tx_desc); 3085 3086 if (tx_compl_cbk) 3087 tx_compl_cbk(netbuf, osif_dev); 3088 } 3089 3090 /** dp_tx_sojourn_stats_process() - Collect sojourn stats 3091 * @pdev: pdev handle 3092 * @tid: tid value 3093 * @txdesc_ts: timestamp from txdesc 3094 * @ppdu_id: ppdu id 3095 * 3096 * Return: none 3097 */ 3098 #ifdef FEATURE_PERPKT_INFO 3099 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev, 3100 struct dp_peer *peer, 3101 uint8_t tid, 3102 uint64_t txdesc_ts, 3103 uint32_t ppdu_id) 3104 { 3105 uint64_t delta_ms; 3106 struct cdp_tx_sojourn_stats *sojourn_stats; 3107 3108 if (qdf_unlikely(pdev->enhanced_stats_en == 0)) 3109 return; 3110 3111 if (qdf_unlikely(tid == HTT_INVALID_TID || 3112 tid >= CDP_DATA_TID_MAX)) 3113 return; 3114 3115 if (qdf_unlikely(!pdev->sojourn_buf)) 3116 return; 3117 3118 sojourn_stats = (struct cdp_tx_sojourn_stats *) 3119 qdf_nbuf_data(pdev->sojourn_buf); 3120 3121 sojourn_stats->cookie = (void *)peer->wlanstats_ctx; 3122 3123 delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) - 3124 txdesc_ts; 3125 qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid], 3126 delta_ms); 3127 sojourn_stats->sum_sojourn_msdu[tid] = delta_ms; 3128 sojourn_stats->num_msdus[tid] = 1; 3129 sojourn_stats->avg_sojourn_msdu[tid].internal = 3130 peer->avg_sojourn_msdu[tid].internal; 3131 dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc, 3132 pdev->sojourn_buf, HTT_INVALID_PEER, 3133 WDI_NO_VAL, pdev->pdev_id); 3134 sojourn_stats->sum_sojourn_msdu[tid] = 0; 3135 sojourn_stats->num_msdus[tid] = 0; 3136 sojourn_stats->avg_sojourn_msdu[tid].internal = 0; 3137 } 3138 #else 3139 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev, 3140 uint8_t tid, 3141 uint64_t txdesc_ts, 3142 uint32_t ppdu_id) 3143 { 3144 } 3145 #endif 3146 3147 /** 3148 * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf 3149 * @soc: DP Soc handle 3150 * @tx_desc: software Tx descriptor 3151 * @ts : Tx completion status from HAL/HTT descriptor 3152 * 3153 * Return: none 3154 */ 3155 static inline void 3156 dp_tx_comp_process_desc(struct dp_soc *soc, 3157 struct dp_tx_desc_s *desc, 3158 struct hal_tx_completion_status *ts, 3159 struct dp_peer *peer) 3160 { 3161 uint64_t time_latency = 0; 3162 /* 3163 * m_copy/tx_capture modes are not supported for 3164 * scatter gather packets 3165 */ 3166 if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) { 3167 time_latency = (qdf_ktime_to_ms(qdf_ktime_get()) - 3168 desc->timestamp); 3169 } 3170 if (!(desc->msdu_ext_desc)) { 3171 if (QDF_STATUS_SUCCESS == 3172 dp_tx_add_to_comp_queue(soc, desc, ts, peer)) { 3173 return; 3174 } 3175 3176 if (QDF_STATUS_SUCCESS == 3177 dp_get_completion_indication_for_stack(soc, 3178 desc->pdev, 3179 peer, ts, 3180 desc->nbuf, 3181 time_latency)) { 3182 qdf_nbuf_unmap(soc->osdev, desc->nbuf, 3183 QDF_DMA_TO_DEVICE); 3184 dp_send_completion_to_stack(soc, 3185 desc->pdev, 3186 ts->peer_id, 3187 ts->ppdu_id, 3188 desc->nbuf); 3189 return; 3190 } 3191 } 3192 3193 dp_tx_comp_free_buf(soc, desc); 3194 } 3195 3196 /** 3197 * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info 3198 * @tx_desc: software descriptor head pointer 3199 * @ts: Tx completion status 3200 * @peer: peer handle 3201 * @ring_id: ring number 3202 * 3203 * Return: none 3204 */ 3205 static inline 3206 void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc, 3207 struct hal_tx_completion_status *ts, 3208 struct dp_peer *peer, uint8_t ring_id) 3209 { 3210 uint32_t length; 3211 qdf_ether_header_t *eh; 3212 struct dp_soc *soc = NULL; 3213 struct dp_vdev *vdev = tx_desc->vdev; 3214 qdf_nbuf_t nbuf = tx_desc->nbuf; 3215 3216 if (!vdev || !nbuf) { 3217 dp_info_rl("invalid tx descriptor. vdev or nbuf NULL"); 3218 goto out; 3219 } 3220 3221 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 3222 3223 DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf, 3224 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD, 3225 QDF_TRACE_DEFAULT_PDEV_ID, 3226 qdf_nbuf_data_addr(nbuf), 3227 sizeof(qdf_nbuf_data(nbuf)), 3228 tx_desc->id, 3229 ts->status)); 3230 3231 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 3232 "-------------------- \n" 3233 "Tx Completion Stats: \n" 3234 "-------------------- \n" 3235 "ack_frame_rssi = %d \n" 3236 "first_msdu = %d \n" 3237 "last_msdu = %d \n" 3238 "msdu_part_of_amsdu = %d \n" 3239 "rate_stats valid = %d \n" 3240 "bw = %d \n" 3241 "pkt_type = %d \n" 3242 "stbc = %d \n" 3243 "ldpc = %d \n" 3244 "sgi = %d \n" 3245 "mcs = %d \n" 3246 "ofdma = %d \n" 3247 "tones_in_ru = %d \n" 3248 "tsf = %d \n" 3249 "ppdu_id = %d \n" 3250 "transmit_cnt = %d \n" 3251 "tid = %d \n" 3252 "peer_id = %d\n", 3253 ts->ack_frame_rssi, ts->first_msdu, 3254 ts->last_msdu, ts->msdu_part_of_amsdu, 3255 ts->valid, ts->bw, ts->pkt_type, ts->stbc, 3256 ts->ldpc, ts->sgi, ts->mcs, ts->ofdma, 3257 ts->tones_in_ru, ts->tsf, ts->ppdu_id, 3258 ts->transmit_cnt, ts->tid, ts->peer_id); 3259 3260 soc = vdev->pdev->soc; 3261 3262 /* Update SoC level stats */ 3263 DP_STATS_INCC(soc, tx.dropped_fw_removed, 1, 3264 (ts->status == HAL_TX_TQM_RR_REM_CMD_REM)); 3265 3266 /* Update per-packet stats for mesh mode */ 3267 if (qdf_unlikely(vdev->mesh_vdev) && 3268 !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) 3269 dp_tx_comp_fill_tx_completion_stats(tx_desc, ts); 3270 3271 length = qdf_nbuf_len(nbuf); 3272 /* Update peer level stats */ 3273 if (!peer) { 3274 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, 3275 "peer is null or deletion in progress"); 3276 DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length); 3277 goto out; 3278 } 3279 3280 if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) { 3281 if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) { 3282 DP_STATS_INC_PKT(peer, tx.mcast, 1, length); 3283 3284 if ((peer->vdev->tx_encap_type == 3285 htt_cmn_pkt_type_ethernet) && 3286 QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 3287 DP_STATS_INC_PKT(peer, tx.bcast, 1, length); 3288 } 3289 } 3290 } else { 3291 DP_STATS_INC_PKT(peer, tx.ucast, 1, length); 3292 if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) 3293 DP_STATS_INC_PKT(peer, tx.tx_success, 1, length); 3294 } 3295 3296 dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id); 3297 3298 #ifdef QCA_SUPPORT_RDK_STATS 3299 if (soc->wlanstats_enabled) 3300 dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid, 3301 tx_desc->timestamp, 3302 ts->ppdu_id); 3303 #endif 3304 3305 out: 3306 return; 3307 } 3308 /** 3309 * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler 3310 * @soc: core txrx main context 3311 * @comp_head: software descriptor head pointer 3312 * @ring_id: ring number 3313 * 3314 * This function will process batch of descriptors reaped by dp_tx_comp_handler 3315 * and release the software descriptors after processing is complete 3316 * 3317 * Return: none 3318 */ 3319 static void 3320 dp_tx_comp_process_desc_list(struct dp_soc *soc, 3321 struct dp_tx_desc_s *comp_head, uint8_t ring_id) 3322 { 3323 struct dp_tx_desc_s *desc; 3324 struct dp_tx_desc_s *next; 3325 struct hal_tx_completion_status ts = {0}; 3326 struct dp_peer *peer; 3327 qdf_nbuf_t netbuf; 3328 3329 desc = comp_head; 3330 3331 while (desc) { 3332 hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc); 3333 peer = dp_peer_find_by_id(soc, ts.peer_id); 3334 dp_tx_comp_process_tx_status(desc, &ts, peer, ring_id); 3335 3336 netbuf = desc->nbuf; 3337 /* check tx complete notification */ 3338 if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(netbuf)) 3339 dp_tx_notify_completion(soc, desc, netbuf); 3340 3341 dp_tx_comp_process_desc(soc, desc, &ts, peer); 3342 3343 if (peer) 3344 dp_peer_unref_del_find_by_id(peer); 3345 3346 next = desc->next; 3347 3348 dp_tx_desc_release(desc, desc->pool_id); 3349 desc = next; 3350 } 3351 3352 } 3353 3354 /** 3355 * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler 3356 * @tx_desc: software descriptor head pointer 3357 * @status : Tx completion status from HTT descriptor 3358 * @ring_id: ring number 3359 * 3360 * This function will process HTT Tx indication messages from Target 3361 * 3362 * Return: none 3363 */ 3364 static 3365 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status, 3366 uint8_t ring_id) 3367 { 3368 uint8_t tx_status; 3369 struct dp_pdev *pdev; 3370 struct dp_vdev *vdev; 3371 struct dp_soc *soc; 3372 struct hal_tx_completion_status ts = {0}; 3373 uint32_t *htt_desc = (uint32_t *)status; 3374 struct dp_peer *peer; 3375 struct cdp_tid_tx_stats *tid_stats = NULL; 3376 struct htt_soc *htt_handle; 3377 3378 qdf_assert(tx_desc->pdev); 3379 3380 pdev = tx_desc->pdev; 3381 vdev = tx_desc->vdev; 3382 soc = pdev->soc; 3383 3384 if (!vdev) 3385 return; 3386 tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]); 3387 htt_handle = (struct htt_soc *)soc->htt_handle; 3388 htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status); 3389 3390 switch (tx_status) { 3391 case HTT_TX_FW2WBM_TX_STATUS_OK: 3392 case HTT_TX_FW2WBM_TX_STATUS_DROP: 3393 case HTT_TX_FW2WBM_TX_STATUS_TTL: 3394 { 3395 uint8_t tid; 3396 if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) { 3397 ts.peer_id = 3398 HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET( 3399 htt_desc[2]); 3400 ts.tid = 3401 HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET( 3402 htt_desc[2]); 3403 } else { 3404 ts.peer_id = HTT_INVALID_PEER; 3405 ts.tid = HTT_INVALID_TID; 3406 } 3407 ts.ppdu_id = 3408 HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET( 3409 htt_desc[1]); 3410 ts.ack_frame_rssi = 3411 HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET( 3412 htt_desc[1]); 3413 3414 ts.first_msdu = 1; 3415 ts.last_msdu = 1; 3416 tid = ts.tid; 3417 if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) 3418 tid = CDP_MAX_DATA_TIDS - 1; 3419 3420 tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid]; 3421 3422 if (qdf_unlikely(pdev->delay_stats_flag)) 3423 dp_tx_compute_delay(vdev, tx_desc, tid, ring_id); 3424 if (tx_status < CDP_MAX_TX_HTT_STATUS) { 3425 tid_stats->htt_status_cnt[tx_status]++; 3426 } 3427 3428 peer = dp_peer_find_by_id(soc, ts.peer_id); 3429 3430 if (qdf_likely(peer)) 3431 dp_peer_unref_del_find_by_id(peer); 3432 3433 dp_tx_comp_process_tx_status(tx_desc, &ts, peer, ring_id); 3434 dp_tx_comp_process_desc(soc, tx_desc, &ts, peer); 3435 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 3436 3437 break; 3438 } 3439 case HTT_TX_FW2WBM_TX_STATUS_REINJECT: 3440 { 3441 dp_tx_reinject_handler(tx_desc, status); 3442 break; 3443 } 3444 case HTT_TX_FW2WBM_TX_STATUS_INSPECT: 3445 { 3446 dp_tx_inspect_handler(tx_desc, status); 3447 break; 3448 } 3449 case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY: 3450 { 3451 dp_tx_mec_handler(vdev, status); 3452 break; 3453 } 3454 default: 3455 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 3456 "%s Invalid HTT tx_status %d\n", 3457 __func__, tx_status); 3458 break; 3459 } 3460 } 3461 3462 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 3463 static inline 3464 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped) 3465 { 3466 bool limit_hit = false; 3467 struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx; 3468 3469 limit_hit = 3470 (num_reaped >= cfg->tx_comp_loop_pkt_limit) ? true : false; 3471 3472 if (limit_hit) 3473 DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1); 3474 3475 return limit_hit; 3476 } 3477 3478 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc) 3479 { 3480 return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check; 3481 } 3482 #else 3483 static inline 3484 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped) 3485 { 3486 return false; 3487 } 3488 3489 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc) 3490 { 3491 return false; 3492 } 3493 #endif 3494 3495 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc, 3496 hal_ring_handle_t hal_ring_hdl, uint8_t ring_id, 3497 uint32_t quota) 3498 { 3499 void *tx_comp_hal_desc; 3500 uint8_t buffer_src; 3501 uint8_t pool_id; 3502 uint32_t tx_desc_id; 3503 struct dp_tx_desc_s *tx_desc = NULL; 3504 struct dp_tx_desc_s *head_desc = NULL; 3505 struct dp_tx_desc_s *tail_desc = NULL; 3506 uint32_t num_processed = 0; 3507 uint32_t count = 0; 3508 bool force_break = false; 3509 3510 DP_HIST_INIT(); 3511 3512 more_data: 3513 /* Re-initialize local variables to be re-used */ 3514 head_desc = NULL; 3515 tail_desc = NULL; 3516 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 3517 dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl); 3518 return 0; 3519 } 3520 3521 /* Find head descriptor from completion ring */ 3522 while (qdf_likely(tx_comp_hal_desc = 3523 hal_srng_dst_get_next(soc->hal_soc, hal_ring_hdl))) { 3524 3525 buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc); 3526 3527 /* If this buffer was not released by TQM or FW, then it is not 3528 * Tx completion indication, assert */ 3529 if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) && 3530 (buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) { 3531 uint8_t wbm_internal_error; 3532 3533 dp_err_rl( 3534 "Tx comp release_src != TQM | FW but from %d", 3535 buffer_src); 3536 hal_dump_comp_desc(tx_comp_hal_desc); 3537 DP_STATS_INC(soc, tx.invalid_release_source, 1); 3538 3539 /* When WBM sees NULL buffer_addr_info in any of 3540 * ingress rings it sends an error indication, 3541 * with wbm_internal_error=1, to a specific ring. 3542 * The WBM2SW ring used to indicate these errors is 3543 * fixed in HW, and that ring is being used as Tx 3544 * completion ring. These errors are not related to 3545 * Tx completions, and should just be ignored 3546 */ 3547 3548 wbm_internal_error = 3549 hal_get_wbm_internal_error(tx_comp_hal_desc); 3550 3551 if (wbm_internal_error) { 3552 dp_err_rl("Tx comp wbm_internal_error!!"); 3553 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1); 3554 3555 if (HAL_TX_COMP_RELEASE_SOURCE_REO == 3556 buffer_src) 3557 dp_handle_wbm_internal_error( 3558 soc, 3559 tx_comp_hal_desc, 3560 hal_tx_comp_get_buffer_type( 3561 tx_comp_hal_desc)); 3562 3563 } else { 3564 dp_err_rl("Tx comp wbm_internal_error false"); 3565 DP_STATS_INC(soc, tx.non_wbm_internal_err, 1); 3566 } 3567 continue; 3568 } 3569 3570 /* Get descriptor id */ 3571 tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc); 3572 pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >> 3573 DP_TX_DESC_ID_POOL_OS; 3574 3575 /* Find Tx descriptor */ 3576 tx_desc = dp_tx_desc_find(soc, pool_id, 3577 (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >> 3578 DP_TX_DESC_ID_PAGE_OS, 3579 (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >> 3580 DP_TX_DESC_ID_OFFSET_OS); 3581 3582 /* 3583 * If the descriptor is already freed in vdev_detach, 3584 * continue to next descriptor 3585 */ 3586 if (!tx_desc->vdev && !tx_desc->flags) { 3587 QDF_TRACE(QDF_MODULE_ID_DP, 3588 QDF_TRACE_LEVEL_INFO, 3589 "Descriptor freed in vdev_detach %d", 3590 tx_desc_id); 3591 3592 num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK); 3593 count++; 3594 continue; 3595 } 3596 3597 if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) { 3598 QDF_TRACE(QDF_MODULE_ID_DP, 3599 QDF_TRACE_LEVEL_INFO, 3600 "pdev in down state %d", 3601 tx_desc_id); 3602 3603 num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK); 3604 count++; 3605 3606 dp_tx_comp_free_buf(soc, tx_desc); 3607 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 3608 continue; 3609 } 3610 3611 /* 3612 * If the release source is FW, process the HTT status 3613 */ 3614 if (qdf_unlikely(buffer_src == 3615 HAL_TX_COMP_RELEASE_SOURCE_FW)) { 3616 uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN]; 3617 hal_tx_comp_get_htt_desc(tx_comp_hal_desc, 3618 htt_tx_status); 3619 dp_tx_process_htt_completion(tx_desc, 3620 htt_tx_status, ring_id); 3621 } else { 3622 /* Pool id is not matching. Error */ 3623 if (tx_desc->pool_id != pool_id) { 3624 QDF_TRACE(QDF_MODULE_ID_DP, 3625 QDF_TRACE_LEVEL_FATAL, 3626 "Tx Comp pool id %d not matched %d", 3627 pool_id, tx_desc->pool_id); 3628 3629 qdf_assert_always(0); 3630 } 3631 3632 if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) || 3633 !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) { 3634 QDF_TRACE(QDF_MODULE_ID_DP, 3635 QDF_TRACE_LEVEL_FATAL, 3636 "Txdesc invalid, flgs = %x,id = %d", 3637 tx_desc->flags, tx_desc_id); 3638 qdf_assert_always(0); 3639 } 3640 3641 /* First ring descriptor on the cycle */ 3642 if (!head_desc) { 3643 head_desc = tx_desc; 3644 tail_desc = tx_desc; 3645 } 3646 3647 tail_desc->next = tx_desc; 3648 tx_desc->next = NULL; 3649 tail_desc = tx_desc; 3650 3651 DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id); 3652 3653 /* Collect hw completion contents */ 3654 hal_tx_comp_desc_sync(tx_comp_hal_desc, 3655 &tx_desc->comp, 1); 3656 3657 } 3658 3659 num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK); 3660 3661 /* 3662 * Processed packet count is more than given quota 3663 * stop to processing 3664 */ 3665 if (num_processed >= quota) { 3666 force_break = true; 3667 break; 3668 } 3669 3670 count++; 3671 3672 if (dp_tx_comp_loop_pkt_limit_hit(soc, count)) 3673 break; 3674 } 3675 3676 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 3677 3678 /* Process the reaped descriptors */ 3679 if (head_desc) 3680 dp_tx_comp_process_desc_list(soc, head_desc, ring_id); 3681 3682 if (dp_tx_comp_enable_eol_data_check(soc)) { 3683 if (!force_break && 3684 hal_srng_dst_peek_sync_locked(soc->hal_soc, 3685 hal_ring_hdl)) { 3686 DP_STATS_INC(soc, tx.hp_oos2, 1); 3687 if (!hif_exec_should_yield(soc->hif_handle, 3688 int_ctx->dp_intr_id)) 3689 goto more_data; 3690 } 3691 } 3692 DP_TX_HIST_STATS_PER_PDEV(); 3693 3694 return num_processed; 3695 } 3696 3697 #ifdef FEATURE_WLAN_TDLS 3698 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 3699 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list) 3700 { 3701 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3702 struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); 3703 3704 if (!vdev) { 3705 dp_err("vdev handle for id %d is NULL", vdev_id); 3706 return NULL; 3707 } 3708 3709 if (tx_spec & OL_TX_SPEC_NO_FREE) 3710 vdev->is_tdls_frame = true; 3711 3712 return dp_tx_send(soc_hdl, vdev_id, msdu_list); 3713 } 3714 #endif 3715 3716 /** 3717 * dp_tx_vdev_attach() - attach vdev to dp tx 3718 * @vdev: virtual device instance 3719 * 3720 * Return: QDF_STATUS_SUCCESS: success 3721 * QDF_STATUS_E_RESOURCES: Error return 3722 */ 3723 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev) 3724 { 3725 /* 3726 * Fill HTT TCL Metadata with Vdev ID and MAC ID 3727 */ 3728 HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata, 3729 HTT_TCL_METADATA_TYPE_VDEV_BASED); 3730 3731 HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata, 3732 vdev->vdev_id); 3733 3734 HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, 3735 DP_SW2HW_MACID(vdev->pdev->pdev_id)); 3736 3737 /* 3738 * Set HTT Extension Valid bit to 0 by default 3739 */ 3740 HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0); 3741 3742 dp_tx_vdev_update_search_flags(vdev); 3743 3744 return QDF_STATUS_SUCCESS; 3745 } 3746 3747 #ifndef FEATURE_WDS 3748 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev) 3749 { 3750 return false; 3751 } 3752 #endif 3753 3754 /** 3755 * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode 3756 * @vdev: virtual device instance 3757 * 3758 * Return: void 3759 * 3760 */ 3761 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev) 3762 { 3763 struct dp_soc *soc = vdev->pdev->soc; 3764 3765 /* 3766 * Enable both AddrY (SA based search) and AddrX (Da based search) 3767 * for TDLS link 3768 * 3769 * Enable AddrY (SA based search) only for non-WDS STA and 3770 * ProxySTA VAP (in HKv1) modes. 3771 * 3772 * In all other VAP modes, only DA based search should be 3773 * enabled 3774 */ 3775 if (vdev->opmode == wlan_op_mode_sta && 3776 vdev->tdls_link_connected) 3777 vdev->hal_desc_addr_search_flags = 3778 (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN); 3779 else if ((vdev->opmode == wlan_op_mode_sta) && 3780 !dp_tx_da_search_override(vdev)) 3781 vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN; 3782 else 3783 vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN; 3784 3785 /* Set search type only when peer map v2 messaging is enabled 3786 * as we will have the search index (AST hash) only when v2 is 3787 * enabled 3788 */ 3789 if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta) 3790 vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH; 3791 else 3792 vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; 3793 } 3794 3795 static inline bool 3796 dp_is_tx_desc_flush_match(struct dp_pdev *pdev, 3797 struct dp_vdev *vdev, 3798 struct dp_tx_desc_s *tx_desc) 3799 { 3800 if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED))) 3801 return false; 3802 3803 /* 3804 * if vdev is given, then only check whether desc 3805 * vdev match. if vdev is NULL, then check whether 3806 * desc pdev match. 3807 */ 3808 return vdev ? (tx_desc->vdev == vdev) : (tx_desc->pdev == pdev); 3809 } 3810 3811 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 3812 /** 3813 * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc 3814 * 3815 * @soc: Handle to DP SoC structure 3816 * @tx_desc: pointer of one TX desc 3817 * @desc_pool_id: TX Desc pool id 3818 */ 3819 static inline void 3820 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, 3821 uint8_t desc_pool_id) 3822 { 3823 struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id]; 3824 3825 qdf_spin_lock_bh(&pool->flow_pool_lock); 3826 3827 tx_desc->vdev = NULL; 3828 3829 qdf_spin_unlock_bh(&pool->flow_pool_lock); 3830 } 3831 3832 /** 3833 * dp_tx_desc_flush() - release resources associated 3834 * to TX Desc 3835 * 3836 * @dp_pdev: Handle to DP pdev structure 3837 * @vdev: virtual device instance 3838 * NULL: no specific Vdev is required and check all allcated TX desc 3839 * on this pdev. 3840 * Non-NULL: only check the allocated TX Desc associated to this Vdev. 3841 * 3842 * @force_free: 3843 * true: flush the TX desc. 3844 * false: only reset the Vdev in each allocated TX desc 3845 * that associated to current Vdev. 3846 * 3847 * This function will go through the TX desc pool to flush 3848 * the outstanding TX data or reset Vdev to NULL in associated TX 3849 * Desc. 3850 */ 3851 static void dp_tx_desc_flush(struct dp_pdev *pdev, 3852 struct dp_vdev *vdev, 3853 bool force_free) 3854 { 3855 uint8_t i; 3856 uint32_t j; 3857 uint32_t num_desc, page_id, offset; 3858 uint16_t num_desc_per_page; 3859 struct dp_soc *soc = pdev->soc; 3860 struct dp_tx_desc_s *tx_desc = NULL; 3861 struct dp_tx_desc_pool_s *tx_desc_pool = NULL; 3862 3863 if (!vdev && !force_free) { 3864 dp_err("Reset TX desc vdev, Vdev param is required!"); 3865 return; 3866 } 3867 3868 for (i = 0; i < MAX_TXDESC_POOLS; i++) { 3869 tx_desc_pool = &soc->tx_desc[i]; 3870 if (!(tx_desc_pool->pool_size) || 3871 IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) || 3872 !(tx_desc_pool->desc_pages.cacheable_pages)) 3873 continue; 3874 3875 num_desc = tx_desc_pool->pool_size; 3876 num_desc_per_page = 3877 tx_desc_pool->desc_pages.num_element_per_page; 3878 for (j = 0; j < num_desc; j++) { 3879 page_id = j / num_desc_per_page; 3880 offset = j % num_desc_per_page; 3881 3882 if (qdf_unlikely(!(tx_desc_pool-> 3883 desc_pages.cacheable_pages))) 3884 break; 3885 3886 tx_desc = dp_tx_desc_find(soc, i, page_id, offset); 3887 3888 if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) { 3889 /* 3890 * Free TX desc if force free is 3891 * required, otherwise only reset vdev 3892 * in this TX desc. 3893 */ 3894 if (force_free) { 3895 dp_tx_comp_free_buf(soc, tx_desc); 3896 dp_tx_desc_release(tx_desc, i); 3897 } else { 3898 dp_tx_desc_reset_vdev(soc, tx_desc, 3899 i); 3900 } 3901 } 3902 } 3903 } 3904 } 3905 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */ 3906 3907 static inline void 3908 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, 3909 uint8_t desc_pool_id) 3910 { 3911 TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock); 3912 3913 tx_desc->vdev = NULL; 3914 3915 TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); 3916 } 3917 3918 static void dp_tx_desc_flush(struct dp_pdev *pdev, 3919 struct dp_vdev *vdev, 3920 bool force_free) 3921 { 3922 uint8_t i, num_pool; 3923 uint32_t j; 3924 uint32_t num_desc, page_id, offset; 3925 uint16_t num_desc_per_page; 3926 struct dp_soc *soc = pdev->soc; 3927 struct dp_tx_desc_s *tx_desc = NULL; 3928 struct dp_tx_desc_pool_s *tx_desc_pool = NULL; 3929 3930 if (!vdev && !force_free) { 3931 dp_err("Reset TX desc vdev, Vdev param is required!"); 3932 return; 3933 } 3934 3935 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 3936 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 3937 3938 for (i = 0; i < num_pool; i++) { 3939 tx_desc_pool = &soc->tx_desc[i]; 3940 if (!tx_desc_pool->desc_pages.cacheable_pages) 3941 continue; 3942 3943 num_desc_per_page = 3944 tx_desc_pool->desc_pages.num_element_per_page; 3945 for (j = 0; j < num_desc; j++) { 3946 page_id = j / num_desc_per_page; 3947 offset = j % num_desc_per_page; 3948 tx_desc = dp_tx_desc_find(soc, i, page_id, offset); 3949 3950 if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) { 3951 if (force_free) { 3952 dp_tx_comp_free_buf(soc, tx_desc); 3953 dp_tx_desc_release(tx_desc, i); 3954 } else { 3955 dp_tx_desc_reset_vdev(soc, tx_desc, 3956 i); 3957 } 3958 } 3959 } 3960 } 3961 } 3962 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ 3963 3964 /** 3965 * dp_tx_vdev_detach() - detach vdev from dp tx 3966 * @vdev: virtual device instance 3967 * 3968 * Return: QDF_STATUS_SUCCESS: success 3969 * QDF_STATUS_E_RESOURCES: Error return 3970 */ 3971 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev) 3972 { 3973 struct dp_pdev *pdev = vdev->pdev; 3974 3975 /* Reset TX desc associated to this Vdev as NULL */ 3976 dp_tx_desc_flush(pdev, vdev, false); 3977 dp_tx_vdev_multipass_deinit(vdev); 3978 3979 return QDF_STATUS_SUCCESS; 3980 } 3981 3982 /** 3983 * dp_tx_pdev_attach() - attach pdev to dp tx 3984 * @pdev: physical device instance 3985 * 3986 * Return: QDF_STATUS_SUCCESS: success 3987 * QDF_STATUS_E_RESOURCES: Error return 3988 */ 3989 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev) 3990 { 3991 struct dp_soc *soc = pdev->soc; 3992 3993 /* Initialize Flow control counters */ 3994 qdf_atomic_init(&pdev->num_tx_exception); 3995 qdf_atomic_init(&pdev->num_tx_outstanding); 3996 3997 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { 3998 /* Initialize descriptors in TCL Ring */ 3999 hal_tx_init_data_ring(soc->hal_soc, 4000 soc->tcl_data_ring[pdev->pdev_id].hal_srng); 4001 } 4002 4003 return QDF_STATUS_SUCCESS; 4004 } 4005 4006 /** 4007 * dp_tx_pdev_detach() - detach pdev from dp tx 4008 * @pdev: physical device instance 4009 * 4010 * Return: QDF_STATUS_SUCCESS: success 4011 * QDF_STATUS_E_RESOURCES: Error return 4012 */ 4013 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev) 4014 { 4015 /* flush TX outstanding data per pdev */ 4016 dp_tx_desc_flush(pdev, NULL, true); 4017 dp_tx_me_exit(pdev); 4018 return QDF_STATUS_SUCCESS; 4019 } 4020 4021 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 4022 /* Pools will be allocated dynamically */ 4023 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, 4024 int num_desc) 4025 { 4026 uint8_t i; 4027 4028 for (i = 0; i < num_pool; i++) { 4029 qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock); 4030 soc->tx_desc[i].status = FLOW_POOL_INACTIVE; 4031 } 4032 4033 return 0; 4034 } 4035 4036 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) 4037 { 4038 uint8_t i; 4039 4040 for (i = 0; i < num_pool; i++) 4041 qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock); 4042 } 4043 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */ 4044 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, 4045 int num_desc) 4046 { 4047 uint8_t i; 4048 4049 /* Allocate software Tx descriptor pools */ 4050 for (i = 0; i < num_pool; i++) { 4051 if (dp_tx_desc_pool_alloc(soc, i, num_desc)) { 4052 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 4053 "%s Tx Desc Pool alloc %d failed %pK", 4054 __func__, i, soc); 4055 return ENOMEM; 4056 } 4057 } 4058 return 0; 4059 } 4060 4061 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) 4062 { 4063 uint8_t i; 4064 4065 for (i = 0; i < num_pool; i++) { 4066 qdf_assert_always(!soc->tx_desc[i].num_allocated); 4067 if (dp_tx_desc_pool_free(soc, i)) { 4068 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 4069 "%s Tx Desc Pool Free failed", __func__); 4070 } 4071 } 4072 } 4073 4074 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ 4075 4076 #ifndef QCA_MEM_ATTACH_ON_WIFI3 4077 /** 4078 * dp_tso_attach_wifi3() - TSO attach handler 4079 * @txrx_soc: Opaque Dp handle 4080 * 4081 * Reserve TSO descriptor buffers 4082 * 4083 * Return: QDF_STATUS_E_FAILURE on failure or 4084 * QDF_STATUS_SUCCESS on success 4085 */ 4086 static 4087 QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc) 4088 { 4089 return dp_tso_soc_attach(txrx_soc); 4090 } 4091 4092 /** 4093 * dp_tso_detach_wifi3() - TSO Detach handler 4094 * @txrx_soc: Opaque Dp handle 4095 * 4096 * Deallocate TSO descriptor buffers 4097 * 4098 * Return: QDF_STATUS_E_FAILURE on failure or 4099 * QDF_STATUS_SUCCESS on success 4100 */ 4101 static 4102 QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc) 4103 { 4104 return dp_tso_soc_detach(txrx_soc); 4105 } 4106 #else 4107 static 4108 QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc) 4109 { 4110 return QDF_STATUS_SUCCESS; 4111 } 4112 4113 static 4114 QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc) 4115 { 4116 return QDF_STATUS_SUCCESS; 4117 } 4118 #endif 4119 4120 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc) 4121 { 4122 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 4123 uint8_t i; 4124 uint8_t num_pool; 4125 uint32_t num_desc; 4126 4127 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 4128 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 4129 4130 for (i = 0; i < num_pool; i++) 4131 dp_tx_tso_desc_pool_free(soc, i); 4132 4133 dp_info("%s TSO Desc Pool %d Free descs = %d", 4134 __func__, num_pool, num_desc); 4135 4136 for (i = 0; i < num_pool; i++) 4137 dp_tx_tso_num_seg_pool_free(soc, i); 4138 4139 dp_info("%s TSO Num of seg Desc Pool %d Free descs = %d", 4140 __func__, num_pool, num_desc); 4141 4142 return QDF_STATUS_SUCCESS; 4143 } 4144 4145 /** 4146 * dp_tso_attach() - TSO attach handler 4147 * @txrx_soc: Opaque Dp handle 4148 * 4149 * Reserve TSO descriptor buffers 4150 * 4151 * Return: QDF_STATUS_E_FAILURE on failure or 4152 * QDF_STATUS_SUCCESS on success 4153 */ 4154 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc) 4155 { 4156 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 4157 uint8_t i; 4158 uint8_t num_pool; 4159 uint32_t num_desc; 4160 4161 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 4162 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 4163 4164 for (i = 0; i < num_pool; i++) { 4165 if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) { 4166 dp_err("TSO Desc Pool alloc %d failed %pK", 4167 i, soc); 4168 4169 return QDF_STATUS_E_FAILURE; 4170 } 4171 } 4172 4173 dp_info("%s TSO Desc Alloc %d, descs = %d", 4174 __func__, num_pool, num_desc); 4175 4176 for (i = 0; i < num_pool; i++) { 4177 if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) { 4178 dp_err("TSO Num of seg Pool alloc %d failed %pK", 4179 i, soc); 4180 4181 return QDF_STATUS_E_FAILURE; 4182 } 4183 } 4184 return QDF_STATUS_SUCCESS; 4185 } 4186 4187 /** 4188 * dp_tx_soc_detach() - detach soc from dp tx 4189 * @soc: core txrx main context 4190 * 4191 * This function will detach dp tx into main device context 4192 * will free dp tx resource and initialize resources 4193 * 4194 * Return: QDF_STATUS_SUCCESS: success 4195 * QDF_STATUS_E_RESOURCES: Error return 4196 */ 4197 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc) 4198 { 4199 uint8_t num_pool; 4200 uint16_t num_desc; 4201 uint16_t num_ext_desc; 4202 uint8_t i; 4203 QDF_STATUS status = QDF_STATUS_SUCCESS; 4204 4205 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 4206 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 4207 num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); 4208 4209 dp_tx_flow_control_deinit(soc); 4210 dp_tx_delete_static_pools(soc, num_pool); 4211 4212 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 4213 "%s Tx Desc Pool Free num_pool = %d, descs = %d", 4214 __func__, num_pool, num_desc); 4215 4216 for (i = 0; i < num_pool; i++) { 4217 if (dp_tx_ext_desc_pool_free(soc, i)) { 4218 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 4219 "%s Tx Ext Desc Pool Free failed", 4220 __func__); 4221 return QDF_STATUS_E_RESOURCES; 4222 } 4223 } 4224 4225 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 4226 "%s MSDU Ext Desc Pool %d Free descs = %d", 4227 __func__, num_pool, num_ext_desc); 4228 4229 status = dp_tso_detach_wifi3(soc); 4230 if (status != QDF_STATUS_SUCCESS) 4231 return status; 4232 4233 return QDF_STATUS_SUCCESS; 4234 } 4235 4236 /** 4237 * dp_tx_soc_attach() - attach soc to dp tx 4238 * @soc: core txrx main context 4239 * 4240 * This function will attach dp tx into main device context 4241 * will allocate dp tx resource and initialize resources 4242 * 4243 * Return: QDF_STATUS_SUCCESS: success 4244 * QDF_STATUS_E_RESOURCES: Error return 4245 */ 4246 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc) 4247 { 4248 uint8_t i; 4249 uint8_t num_pool; 4250 uint32_t num_desc; 4251 uint32_t num_ext_desc; 4252 QDF_STATUS status = QDF_STATUS_SUCCESS; 4253 4254 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 4255 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 4256 num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); 4257 4258 if (num_pool > MAX_TXDESC_POOLS) 4259 goto fail; 4260 4261 if (dp_tx_alloc_static_pools(soc, num_pool, num_desc)) 4262 goto fail; 4263 4264 dp_tx_flow_control_init(soc); 4265 4266 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 4267 "%s Tx Desc Alloc num_pool = %d, descs = %d", 4268 __func__, num_pool, num_desc); 4269 4270 /* Allocate extension tx descriptor pools */ 4271 for (i = 0; i < num_pool; i++) { 4272 if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) { 4273 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 4274 "MSDU Ext Desc Pool alloc %d failed %pK", 4275 i, soc); 4276 4277 goto fail; 4278 } 4279 } 4280 4281 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 4282 "%s MSDU Ext Desc Alloc %d, descs = %d", 4283 __func__, num_pool, num_ext_desc); 4284 4285 status = dp_tso_attach_wifi3((void *)soc); 4286 if (status != QDF_STATUS_SUCCESS) 4287 goto fail; 4288 4289 4290 /* Initialize descriptors in TCL Rings */ 4291 if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { 4292 for (i = 0; i < soc->num_tcl_data_rings; i++) { 4293 hal_tx_init_data_ring(soc->hal_soc, 4294 soc->tcl_data_ring[i].hal_srng); 4295 } 4296 } 4297 4298 /* 4299 * todo - Add a runtime config option to enable this. 4300 */ 4301 /* 4302 * Due to multiple issues on NPR EMU, enable it selectively 4303 * only for NPR EMU, should be removed, once NPR platforms 4304 * are stable. 4305 */ 4306 soc->process_tx_status = CONFIG_PROCESS_TX_STATUS; 4307 4308 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 4309 "%s HAL Tx init Success", __func__); 4310 4311 return QDF_STATUS_SUCCESS; 4312 4313 fail: 4314 /* Detach will take care of freeing only allocated resources */ 4315 dp_tx_soc_detach(soc); 4316 return QDF_STATUS_E_RESOURCES; 4317 } 4318