1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "htt.h" 21 #include "dp_htt.h" 22 #include "hal_hw_headers.h" 23 #include "dp_tx.h" 24 #include "dp_tx_desc.h" 25 #include "dp_peer.h" 26 #include "dp_types.h" 27 #include "hal_tx.h" 28 #include "qdf_mem.h" 29 #include "qdf_nbuf.h" 30 #include "qdf_net_types.h" 31 #include "qdf_module.h" 32 #include <wlan_cfg.h> 33 #include "dp_ipa.h" 34 #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO) 35 #include "if_meta_hdr.h" 36 #endif 37 #include "enet.h" 38 #include "dp_internal.h" 39 #ifdef ATH_SUPPORT_IQUE 40 #include "dp_txrx_me.h" 41 #endif 42 #include "dp_hist.h" 43 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 44 #include <wlan_dp_swlm.h> 45 #endif 46 #ifdef WIFI_MONITOR_SUPPORT 47 #include <dp_mon.h> 48 #endif 49 #ifdef FEATURE_WDS 50 #include "dp_txrx_wds.h" 51 #endif 52 #include "cdp_txrx_cmn_reg.h" 53 #ifdef CONFIG_SAWF 54 #include <dp_sawf.h> 55 #endif 56 57 /* Flag to skip CCE classify when mesh or tid override enabled */ 58 #define DP_TX_SKIP_CCE_CLASSIFY \ 59 (DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED) 60 61 /* TODO Add support in TSO */ 62 #define DP_DESC_NUM_FRAG(x) 0 63 64 /* disable TQM_BYPASS */ 65 #define TQM_BYPASS_WAR 0 66 67 /* invalid peer id for reinject*/ 68 #define DP_INVALID_PEER 0XFFFE 69 70 #define DP_RETRY_COUNT 7 71 #ifdef WLAN_PEER_JITTER 72 #define DP_AVG_JITTER_WEIGHT_DENOM 4 73 #define DP_AVG_DELAY_WEIGHT_DENOM 3 74 #endif 75 76 #ifdef QCA_DP_TX_FW_METADATA_V2 77 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\ 78 HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val) 79 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \ 80 HTT_TX_TCL_METADATA_V2_VALID_HTT_SET(_var, _val) 81 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \ 82 HTT_TX_TCL_METADATA_TYPE_V2_SET(_var, _val) 83 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \ 84 HTT_TX_TCL_METADATA_V2_HOST_INSPECTED_SET(_var, _val) 85 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \ 86 HTT_TX_TCL_METADATA_V2_PEER_ID_SET(_var, _val) 87 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \ 88 HTT_TX_TCL_METADATA_V2_VDEV_ID_SET(_var, _val) 89 #define DP_TCL_METADATA_TYPE_PEER_BASED \ 90 HTT_TCL_METADATA_V2_TYPE_PEER_BASED 91 #define DP_TCL_METADATA_TYPE_VDEV_BASED \ 92 HTT_TCL_METADATA_V2_TYPE_VDEV_BASED 93 #else 94 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\ 95 HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) 96 #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \ 97 HTT_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) 98 #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \ 99 HTT_TX_TCL_METADATA_TYPE_SET(_var, _val) 100 #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \ 101 HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) 102 #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \ 103 HTT_TX_TCL_METADATA_PEER_ID_SET(_var, _val) 104 #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \ 105 HTT_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) 106 #define DP_TCL_METADATA_TYPE_PEER_BASED \ 107 HTT_TCL_METADATA_TYPE_PEER_BASED 108 #define DP_TCL_METADATA_TYPE_VDEV_BASED \ 109 HTT_TCL_METADATA_TYPE_VDEV_BASED 110 #endif 111 112 /*mapping between hal encrypt type and cdp_sec_type*/ 113 uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER, 114 HAL_TX_ENCRYPT_TYPE_WEP_128, 115 HAL_TX_ENCRYPT_TYPE_WEP_104, 116 HAL_TX_ENCRYPT_TYPE_WEP_40, 117 HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC, 118 HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC, 119 HAL_TX_ENCRYPT_TYPE_AES_CCMP_128, 120 HAL_TX_ENCRYPT_TYPE_WAPI, 121 HAL_TX_ENCRYPT_TYPE_AES_CCMP_256, 122 HAL_TX_ENCRYPT_TYPE_AES_GCMP_128, 123 HAL_TX_ENCRYPT_TYPE_AES_GCMP_256, 124 HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4}; 125 qdf_export_symbol(sec_type_map); 126 127 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY 128 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags) 129 { 130 enum dp_tx_event_type type; 131 132 if (flags & DP_TX_DESC_FLAG_FLUSH) 133 type = DP_TX_DESC_FLUSH; 134 else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR) 135 type = DP_TX_COMP_UNMAP_ERR; 136 else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX) 137 type = DP_TX_COMP_UNMAP; 138 else 139 type = DP_TX_DESC_UNMAP; 140 141 return type; 142 } 143 144 static inline void 145 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr, 146 qdf_nbuf_t skb, uint32_t sw_cookie, 147 enum dp_tx_event_type type) 148 { 149 struct dp_tx_tcl_history *tx_tcl_history = &soc->tx_tcl_history; 150 struct dp_tx_comp_history *tx_comp_history = &soc->tx_comp_history; 151 struct dp_tx_desc_event *entry; 152 uint32_t idx; 153 uint16_t slot; 154 155 switch (type) { 156 case DP_TX_COMP_UNMAP: 157 case DP_TX_COMP_UNMAP_ERR: 158 case DP_TX_COMP_MSDU_EXT: 159 if (qdf_unlikely(!tx_comp_history->allocated)) 160 return; 161 162 dp_get_frag_hist_next_atomic_idx(&tx_comp_history->index, &idx, 163 &slot, 164 DP_TX_COMP_HIST_SLOT_SHIFT, 165 DP_TX_COMP_HIST_PER_SLOT_MAX, 166 DP_TX_COMP_HISTORY_SIZE); 167 entry = &tx_comp_history->entry[slot][idx]; 168 break; 169 case DP_TX_DESC_MAP: 170 case DP_TX_DESC_UNMAP: 171 case DP_TX_DESC_COOKIE: 172 case DP_TX_DESC_FLUSH: 173 if (qdf_unlikely(!tx_tcl_history->allocated)) 174 return; 175 176 dp_get_frag_hist_next_atomic_idx(&tx_tcl_history->index, &idx, 177 &slot, 178 DP_TX_TCL_HIST_SLOT_SHIFT, 179 DP_TX_TCL_HIST_PER_SLOT_MAX, 180 DP_TX_TCL_HISTORY_SIZE); 181 entry = &tx_tcl_history->entry[slot][idx]; 182 break; 183 default: 184 dp_info_rl("Invalid dp_tx_event_type: %d", type); 185 return; 186 } 187 188 entry->skb = skb; 189 entry->paddr = paddr; 190 entry->sw_cookie = sw_cookie; 191 entry->type = type; 192 entry->ts = qdf_get_log_timestamp(); 193 } 194 195 static inline void 196 dp_tx_tso_seg_history_add(struct dp_soc *soc, 197 struct qdf_tso_seg_elem_t *tso_seg, 198 qdf_nbuf_t skb, uint32_t sw_cookie, 199 enum dp_tx_event_type type) 200 { 201 int i; 202 203 for (i = 1; i < tso_seg->seg.num_frags; i++) { 204 dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr, 205 skb, sw_cookie, type); 206 } 207 208 if (!tso_seg->next) 209 dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr, 210 skb, 0xFFFFFFFF, type); 211 } 212 213 static inline void 214 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info, 215 qdf_nbuf_t skb, uint32_t sw_cookie, 216 enum dp_tx_event_type type) 217 { 218 struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list; 219 uint32_t num_segs = tso_info.num_segs; 220 221 while (num_segs) { 222 dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type); 223 curr_seg = curr_seg->next; 224 num_segs--; 225 } 226 } 227 228 #else 229 static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags) 230 { 231 return DP_TX_DESC_INVAL_EVT; 232 } 233 234 static inline void 235 dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr, 236 qdf_nbuf_t skb, uint32_t sw_cookie, 237 enum dp_tx_event_type type) 238 { 239 } 240 241 static inline void 242 dp_tx_tso_seg_history_add(struct dp_soc *soc, 243 struct qdf_tso_seg_elem_t *tso_seg, 244 qdf_nbuf_t skb, uint32_t sw_cookie, 245 enum dp_tx_event_type type) 246 { 247 } 248 249 static inline void 250 dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info, 251 qdf_nbuf_t skb, uint32_t sw_cookie, 252 enum dp_tx_event_type type) 253 { 254 } 255 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */ 256 257 static int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc); 258 259 /** 260 * dp_is_tput_high() - Check if throughput is high 261 * 262 * @soc - core txrx main context 263 * 264 * The current function is based of the RTPM tput policy variable where RTPM is 265 * avoided based on throughput. 266 */ 267 static inline int dp_is_tput_high(struct dp_soc *soc) 268 { 269 return dp_get_rtpm_tput_policy_requirement(soc); 270 } 271 272 #if defined(FEATURE_TSO) 273 /** 274 * dp_tx_tso_unmap_segment() - Unmap TSO segment 275 * 276 * @soc - core txrx main context 277 * @seg_desc - tso segment descriptor 278 * @num_seg_desc - tso number segment descriptor 279 */ 280 static void dp_tx_tso_unmap_segment( 281 struct dp_soc *soc, 282 struct qdf_tso_seg_elem_t *seg_desc, 283 struct qdf_tso_num_seg_elem_t *num_seg_desc) 284 { 285 TSO_DEBUG("%s: Unmap the tso segment", __func__); 286 if (qdf_unlikely(!seg_desc)) { 287 DP_TRACE(ERROR, "%s %d TSO desc is NULL!", 288 __func__, __LINE__); 289 qdf_assert(0); 290 } else if (qdf_unlikely(!num_seg_desc)) { 291 DP_TRACE(ERROR, "%s %d TSO num desc is NULL!", 292 __func__, __LINE__); 293 qdf_assert(0); 294 } else { 295 bool is_last_seg; 296 /* no tso segment left to do dma unmap */ 297 if (num_seg_desc->num_seg.tso_cmn_num_seg < 1) 298 return; 299 300 is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ? 301 true : false; 302 qdf_nbuf_unmap_tso_segment(soc->osdev, 303 seg_desc, is_last_seg); 304 num_seg_desc->num_seg.tso_cmn_num_seg--; 305 } 306 } 307 308 /** 309 * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg 310 * back to the freelist 311 * 312 * @soc - soc device handle 313 * @tx_desc - Tx software descriptor 314 */ 315 static void dp_tx_tso_desc_release(struct dp_soc *soc, 316 struct dp_tx_desc_s *tx_desc) 317 { 318 TSO_DEBUG("%s: Free the tso descriptor", __func__); 319 if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_desc)) { 320 dp_tx_err("SO desc is NULL!"); 321 qdf_assert(0); 322 } else if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_num_desc)) { 323 dp_tx_err("TSO num desc is NULL!"); 324 qdf_assert(0); 325 } else { 326 struct qdf_tso_num_seg_elem_t *tso_num_desc = 327 (struct qdf_tso_num_seg_elem_t *)tx_desc-> 328 msdu_ext_desc->tso_num_desc; 329 330 /* Add the tso num segment into the free list */ 331 if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) { 332 dp_tso_num_seg_free(soc, tx_desc->pool_id, 333 tx_desc->msdu_ext_desc-> 334 tso_num_desc); 335 tx_desc->msdu_ext_desc->tso_num_desc = NULL; 336 DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1); 337 } 338 339 /* Add the tso segment into the free list*/ 340 dp_tx_tso_desc_free(soc, 341 tx_desc->pool_id, tx_desc->msdu_ext_desc-> 342 tso_desc); 343 tx_desc->msdu_ext_desc->tso_desc = NULL; 344 } 345 } 346 #else 347 static void dp_tx_tso_unmap_segment( 348 struct dp_soc *soc, 349 struct qdf_tso_seg_elem_t *seg_desc, 350 struct qdf_tso_num_seg_elem_t *num_seg_desc) 351 352 { 353 } 354 355 static void dp_tx_tso_desc_release(struct dp_soc *soc, 356 struct dp_tx_desc_s *tx_desc) 357 { 358 } 359 #endif 360 361 /** 362 * dp_tx_desc_release() - Release Tx Descriptor 363 * @tx_desc : Tx Descriptor 364 * @desc_pool_id: Descriptor Pool ID 365 * 366 * Deallocate all resources attached to Tx descriptor and free the Tx 367 * descriptor. 368 * 369 * Return: 370 */ 371 void 372 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id) 373 { 374 struct dp_pdev *pdev = tx_desc->pdev; 375 struct dp_soc *soc; 376 uint8_t comp_status = 0; 377 378 qdf_assert(pdev); 379 380 soc = pdev->soc; 381 382 dp_tx_outstanding_dec(pdev); 383 384 if (tx_desc->msdu_ext_desc) { 385 if (tx_desc->frm_type == dp_tx_frm_tso) 386 dp_tx_tso_desc_release(soc, tx_desc); 387 388 if (tx_desc->flags & DP_TX_DESC_FLAG_ME) 389 dp_tx_me_free_buf(tx_desc->pdev, 390 tx_desc->msdu_ext_desc->me_buffer); 391 392 dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id); 393 } 394 395 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) 396 qdf_atomic_dec(&soc->num_tx_exception); 397 398 if (HAL_TX_COMP_RELEASE_SOURCE_TQM == 399 tx_desc->buffer_src) 400 comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp, 401 soc->hal_soc); 402 else 403 comp_status = HAL_TX_COMP_RELEASE_REASON_FW; 404 405 dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d", 406 tx_desc->id, comp_status, 407 qdf_atomic_read(&pdev->num_tx_outstanding)); 408 409 dp_tx_desc_free(soc, tx_desc, desc_pool_id); 410 return; 411 } 412 413 /** 414 * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames 415 * @vdev: DP vdev Handle 416 * @nbuf: skb 417 * @msdu_info: msdu_info required to create HTT metadata 418 * 419 * Prepares and fills HTT metadata in the frame pre-header for special frames 420 * that should be transmitted using varying transmit parameters. 421 * There are 2 VDEV modes that currently needs this special metadata - 422 * 1) Mesh Mode 423 * 2) DSRC Mode 424 * 425 * Return: HTT metadata size 426 * 427 */ 428 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 429 struct dp_tx_msdu_info_s *msdu_info) 430 { 431 uint32_t *meta_data = msdu_info->meta_data; 432 struct htt_tx_msdu_desc_ext2_t *desc_ext = 433 (struct htt_tx_msdu_desc_ext2_t *) meta_data; 434 435 uint8_t htt_desc_size; 436 437 /* Size rounded of multiple of 8 bytes */ 438 uint8_t htt_desc_size_aligned; 439 440 uint8_t *hdr = NULL; 441 442 /* 443 * Metadata - HTT MSDU Extension header 444 */ 445 htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t); 446 htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7; 447 448 if (vdev->mesh_vdev || msdu_info->is_tx_sniffer || 449 HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info-> 450 meta_data[0]) || 451 msdu_info->exception_fw) { 452 if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < 453 htt_desc_size_aligned)) { 454 nbuf = qdf_nbuf_realloc_headroom(nbuf, 455 htt_desc_size_aligned); 456 if (!nbuf) { 457 /* 458 * qdf_nbuf_realloc_headroom won't do skb_clone 459 * as skb_realloc_headroom does. so, no free is 460 * needed here. 461 */ 462 DP_STATS_INC(vdev, 463 tx_i.dropped.headroom_insufficient, 464 1); 465 qdf_print(" %s[%d] skb_realloc_headroom failed", 466 __func__, __LINE__); 467 return 0; 468 } 469 } 470 /* Fill and add HTT metaheader */ 471 hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned); 472 if (!hdr) { 473 dp_tx_err("Error in filling HTT metadata"); 474 475 return 0; 476 } 477 qdf_mem_copy(hdr, desc_ext, htt_desc_size); 478 479 } else if (vdev->opmode == wlan_op_mode_ocb) { 480 /* Todo - Add support for DSRC */ 481 } 482 483 return htt_desc_size_aligned; 484 } 485 486 /** 487 * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO 488 * @tso_seg: TSO segment to process 489 * @ext_desc: Pointer to MSDU extension descriptor 490 * 491 * Return: void 492 */ 493 #if defined(FEATURE_TSO) 494 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg, 495 void *ext_desc) 496 { 497 uint8_t num_frag; 498 uint32_t tso_flags; 499 500 /* 501 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN), 502 * tcp_flag_mask 503 * 504 * Checksum enable flags are set in TCL descriptor and not in Extension 505 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor) 506 */ 507 tso_flags = *(uint32_t *) &tso_seg->tso_flags; 508 509 hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags); 510 511 hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len, 512 tso_seg->tso_flags.ip_len); 513 514 hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num); 515 hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id); 516 517 for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) { 518 uint32_t lo = 0; 519 uint32_t hi = 0; 520 521 qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) && 522 (tso_seg->tso_frags[num_frag].length)); 523 524 qdf_dmaaddr_to_32s( 525 tso_seg->tso_frags[num_frag].paddr, &lo, &hi); 526 hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi, 527 tso_seg->tso_frags[num_frag].length); 528 } 529 530 return; 531 } 532 #else 533 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg, 534 void *ext_desc) 535 { 536 return; 537 } 538 #endif 539 540 #if defined(FEATURE_TSO) 541 /** 542 * dp_tx_free_tso_seg_list() - Loop through the tso segments 543 * allocated and free them 544 * 545 * @soc: soc handle 546 * @free_seg: list of tso segments 547 * @msdu_info: msdu descriptor 548 * 549 * Return - void 550 */ 551 static void dp_tx_free_tso_seg_list( 552 struct dp_soc *soc, 553 struct qdf_tso_seg_elem_t *free_seg, 554 struct dp_tx_msdu_info_s *msdu_info) 555 { 556 struct qdf_tso_seg_elem_t *next_seg; 557 558 while (free_seg) { 559 next_seg = free_seg->next; 560 dp_tx_tso_desc_free(soc, 561 msdu_info->tx_queue.desc_pool_id, 562 free_seg); 563 free_seg = next_seg; 564 } 565 } 566 567 /** 568 * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments 569 * allocated and free them 570 * 571 * @soc: soc handle 572 * @free_num_seg: list of tso number segments 573 * @msdu_info: msdu descriptor 574 * Return - void 575 */ 576 static void dp_tx_free_tso_num_seg_list( 577 struct dp_soc *soc, 578 struct qdf_tso_num_seg_elem_t *free_num_seg, 579 struct dp_tx_msdu_info_s *msdu_info) 580 { 581 struct qdf_tso_num_seg_elem_t *next_num_seg; 582 583 while (free_num_seg) { 584 next_num_seg = free_num_seg->next; 585 dp_tso_num_seg_free(soc, 586 msdu_info->tx_queue.desc_pool_id, 587 free_num_seg); 588 free_num_seg = next_num_seg; 589 } 590 } 591 592 /** 593 * dp_tx_unmap_tso_seg_list() - Loop through the tso segments 594 * do dma unmap for each segment 595 * 596 * @soc: soc handle 597 * @free_seg: list of tso segments 598 * @num_seg_desc: tso number segment descriptor 599 * 600 * Return - void 601 */ 602 static void dp_tx_unmap_tso_seg_list( 603 struct dp_soc *soc, 604 struct qdf_tso_seg_elem_t *free_seg, 605 struct qdf_tso_num_seg_elem_t *num_seg_desc) 606 { 607 struct qdf_tso_seg_elem_t *next_seg; 608 609 if (qdf_unlikely(!num_seg_desc)) { 610 DP_TRACE(ERROR, "TSO number seg desc is NULL!"); 611 return; 612 } 613 614 while (free_seg) { 615 next_seg = free_seg->next; 616 dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc); 617 free_seg = next_seg; 618 } 619 } 620 621 #ifdef FEATURE_TSO_STATS 622 /** 623 * dp_tso_get_stats_idx: Retrieve the tso packet id 624 * @pdev - pdev handle 625 * 626 * Return: id 627 */ 628 static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev) 629 { 630 uint32_t stats_idx; 631 632 stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx)) 633 % CDP_MAX_TSO_PACKETS); 634 return stats_idx; 635 } 636 #else 637 static int dp_tso_get_stats_idx(struct dp_pdev *pdev) 638 { 639 return 0; 640 } 641 #endif /* FEATURE_TSO_STATS */ 642 643 /** 644 * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any, 645 * free the tso segments descriptor and 646 * tso num segments descriptor 647 * 648 * @soc: soc handle 649 * @msdu_info: msdu descriptor 650 * @tso_seg_unmap: flag to show if dma unmap is necessary 651 * 652 * Return - void 653 */ 654 static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc, 655 struct dp_tx_msdu_info_s *msdu_info, 656 bool tso_seg_unmap) 657 { 658 struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info; 659 struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list; 660 struct qdf_tso_num_seg_elem_t *tso_num_desc = 661 tso_info->tso_num_seg_list; 662 663 /* do dma unmap for each segment */ 664 if (tso_seg_unmap) 665 dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc); 666 667 /* free all tso number segment descriptor though looks only have 1 */ 668 dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info); 669 670 /* free all tso segment descriptor */ 671 dp_tx_free_tso_seg_list(soc, free_seg, msdu_info); 672 } 673 674 /** 675 * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info 676 * @vdev: virtual device handle 677 * @msdu: network buffer 678 * @msdu_info: meta data associated with the msdu 679 * 680 * Return: QDF_STATUS_SUCCESS success 681 */ 682 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev, 683 qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info) 684 { 685 struct qdf_tso_seg_elem_t *tso_seg; 686 int num_seg = qdf_nbuf_get_tso_num_seg(msdu); 687 struct dp_soc *soc = vdev->pdev->soc; 688 struct dp_pdev *pdev = vdev->pdev; 689 struct qdf_tso_info_t *tso_info; 690 struct qdf_tso_num_seg_elem_t *tso_num_seg; 691 tso_info = &msdu_info->u.tso_info; 692 tso_info->curr_seg = NULL; 693 tso_info->tso_seg_list = NULL; 694 tso_info->num_segs = num_seg; 695 msdu_info->frm_type = dp_tx_frm_tso; 696 tso_info->tso_num_seg_list = NULL; 697 698 TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg); 699 700 while (num_seg) { 701 tso_seg = dp_tx_tso_desc_alloc( 702 soc, msdu_info->tx_queue.desc_pool_id); 703 if (tso_seg) { 704 tso_seg->next = tso_info->tso_seg_list; 705 tso_info->tso_seg_list = tso_seg; 706 num_seg--; 707 } else { 708 dp_err_rl("Failed to alloc tso seg desc"); 709 DP_STATS_INC_PKT(vdev->pdev, 710 tso_stats.tso_no_mem_dropped, 1, 711 qdf_nbuf_len(msdu)); 712 dp_tx_free_remaining_tso_desc(soc, msdu_info, false); 713 714 return QDF_STATUS_E_NOMEM; 715 } 716 } 717 718 TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg); 719 720 tso_num_seg = dp_tso_num_seg_alloc(soc, 721 msdu_info->tx_queue.desc_pool_id); 722 723 if (tso_num_seg) { 724 tso_num_seg->next = tso_info->tso_num_seg_list; 725 tso_info->tso_num_seg_list = tso_num_seg; 726 } else { 727 DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc", 728 __func__); 729 dp_tx_free_remaining_tso_desc(soc, msdu_info, false); 730 731 return QDF_STATUS_E_NOMEM; 732 } 733 734 msdu_info->num_seg = 735 qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info); 736 737 TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__, 738 msdu_info->num_seg); 739 740 if (!(msdu_info->num_seg)) { 741 /* 742 * Free allocated TSO seg desc and number seg desc, 743 * do unmap for segments if dma map has done. 744 */ 745 DP_TRACE(ERROR, "%s: Failed to get tso info", __func__); 746 dp_tx_free_remaining_tso_desc(soc, msdu_info, true); 747 748 return QDF_STATUS_E_INVAL; 749 } 750 dp_tx_tso_history_add(soc, msdu_info->u.tso_info, 751 msdu, 0, DP_TX_DESC_MAP); 752 753 tso_info->curr_seg = tso_info->tso_seg_list; 754 755 tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev); 756 dp_tso_packet_update(pdev, tso_info->msdu_stats_idx, 757 msdu, msdu_info->num_seg); 758 dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list, 759 tso_info->msdu_stats_idx); 760 dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg); 761 return QDF_STATUS_SUCCESS; 762 } 763 #else 764 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev, 765 qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info) 766 { 767 return QDF_STATUS_E_NOMEM; 768 } 769 #endif 770 771 QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check, 772 (DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >= 773 sizeof(struct htt_tx_msdu_desc_ext2_t))); 774 775 /** 776 * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor 777 * @vdev: DP Vdev handle 778 * @msdu_info: MSDU info to be setup in MSDU extension descriptor 779 * @desc_pool_id: Descriptor Pool ID 780 * 781 * Return: 782 */ 783 static 784 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev, 785 struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id) 786 { 787 uint8_t i; 788 uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA]; 789 struct dp_tx_seg_info_s *seg_info; 790 struct dp_tx_ext_desc_elem_s *msdu_ext_desc; 791 struct dp_soc *soc = vdev->pdev->soc; 792 793 /* Allocate an extension descriptor */ 794 msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id); 795 qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA); 796 797 if (!msdu_ext_desc) { 798 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 799 return NULL; 800 } 801 802 if (msdu_info->exception_fw && 803 qdf_unlikely(vdev->mesh_vdev)) { 804 qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES], 805 &msdu_info->meta_data[0], 806 sizeof(struct htt_tx_msdu_desc_ext2_t)); 807 qdf_atomic_inc(&soc->num_tx_exception); 808 msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID; 809 } 810 811 switch (msdu_info->frm_type) { 812 case dp_tx_frm_sg: 813 case dp_tx_frm_me: 814 case dp_tx_frm_raw: 815 seg_info = msdu_info->u.sg_info.curr_seg; 816 /* Update the buffer pointers in MSDU Extension Descriptor */ 817 for (i = 0; i < seg_info->frag_cnt; i++) { 818 hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i, 819 seg_info->frags[i].paddr_lo, 820 seg_info->frags[i].paddr_hi, 821 seg_info->frags[i].len); 822 } 823 824 break; 825 826 case dp_tx_frm_tso: 827 dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg, 828 &cached_ext_desc[0]); 829 break; 830 831 832 default: 833 break; 834 } 835 836 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 837 cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA); 838 839 hal_tx_ext_desc_sync(&cached_ext_desc[0], 840 msdu_ext_desc->vaddr); 841 842 return msdu_ext_desc; 843 } 844 845 /** 846 * dp_tx_trace_pkt() - Trace TX packet at DP layer 847 * 848 * @skb: skb to be traced 849 * @msdu_id: msdu_id of the packet 850 * @vdev_id: vdev_id of the packet 851 * 852 * Return: None 853 */ 854 #ifdef DP_DISABLE_TX_PKT_TRACE 855 static void dp_tx_trace_pkt(struct dp_soc *soc, 856 qdf_nbuf_t skb, uint16_t msdu_id, 857 uint8_t vdev_id) 858 { 859 } 860 #else 861 static void dp_tx_trace_pkt(struct dp_soc *soc, 862 qdf_nbuf_t skb, uint16_t msdu_id, 863 uint8_t vdev_id) 864 { 865 if (dp_is_tput_high(soc)) 866 return; 867 868 QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK; 869 QDF_NBUF_CB_TX_DP_TRACE(skb) = 1; 870 DPTRACE(qdf_dp_trace_ptr(skb, 871 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD, 872 QDF_TRACE_DEFAULT_PDEV_ID, 873 qdf_nbuf_data_addr(skb), 874 sizeof(qdf_nbuf_data(skb)), 875 msdu_id, vdev_id, 0)); 876 877 qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID); 878 879 DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID, 880 QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD, 881 msdu_id, QDF_TX)); 882 } 883 #endif 884 885 #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW 886 /** 887 * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as 888 * exception by the upper layer (OS_IF) 889 * @soc: DP soc handle 890 * @nbuf: packet to be transmitted 891 * 892 * Returns: 1 if the packet is marked as exception, 893 * 0, if the packet is not marked as exception. 894 */ 895 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc, 896 qdf_nbuf_t nbuf) 897 { 898 return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf); 899 } 900 #else 901 static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc, 902 qdf_nbuf_t nbuf) 903 { 904 return 0; 905 } 906 #endif 907 908 #ifdef DP_TRAFFIC_END_INDICATION 909 /** 910 * dp_tx_get_traffic_end_indication_pkt() - Allocate and prepare packet to send 911 * as indication to fw to inform that 912 * data stream has ended 913 * @vdev: DP vdev handle 914 * @nbuf: original buffer from network stack 915 * 916 * Return: NULL on failure, 917 * nbuf on success 918 */ 919 static inline qdf_nbuf_t 920 dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev, 921 qdf_nbuf_t nbuf) 922 { 923 /* Packet length should be enough to copy upto L3 header */ 924 uint8_t end_nbuf_len = 64; 925 uint8_t htt_desc_size_aligned; 926 uint8_t htt_desc_size; 927 qdf_nbuf_t end_nbuf; 928 929 if (qdf_unlikely(QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) == 930 QDF_NBUF_CB_PACKET_TYPE_END_INDICATION)) { 931 htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t); 932 htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7; 933 934 end_nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q); 935 if (!end_nbuf) { 936 end_nbuf = qdf_nbuf_alloc(NULL, 937 (htt_desc_size_aligned + 938 end_nbuf_len), 939 htt_desc_size_aligned, 940 8, false); 941 if (!end_nbuf) { 942 dp_err("Packet allocation failed"); 943 goto out; 944 } 945 } else { 946 qdf_nbuf_reset(end_nbuf, htt_desc_size_aligned, 8); 947 } 948 qdf_mem_copy(qdf_nbuf_data(end_nbuf), qdf_nbuf_data(nbuf), 949 end_nbuf_len); 950 qdf_nbuf_set_pktlen(end_nbuf, end_nbuf_len); 951 952 return end_nbuf; 953 } 954 out: 955 return NULL; 956 } 957 958 /** 959 * dp_tx_send_traffic_end_indication_pkt() - Send indication packet to FW 960 * via exception path. 961 * @vdev: DP vdev handle 962 * @end_nbuf: skb to send as indication 963 * @msdu_info: msdu_info of original nbuf 964 * @peer_id: peer id 965 * 966 * Return: None 967 */ 968 static inline void 969 dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev, 970 qdf_nbuf_t end_nbuf, 971 struct dp_tx_msdu_info_s *msdu_info, 972 uint16_t peer_id) 973 { 974 struct dp_tx_msdu_info_s e_msdu_info = {0}; 975 qdf_nbuf_t nbuf; 976 struct htt_tx_msdu_desc_ext2_t *desc_ext = 977 (struct htt_tx_msdu_desc_ext2_t *)(e_msdu_info.meta_data); 978 e_msdu_info.tx_queue = msdu_info->tx_queue; 979 e_msdu_info.tid = msdu_info->tid; 980 e_msdu_info.exception_fw = 1; 981 desc_ext->host_tx_desc_pool = 1; 982 desc_ext->traffic_end_indication = 1; 983 nbuf = dp_tx_send_msdu_single(vdev, end_nbuf, &e_msdu_info, 984 peer_id, NULL); 985 if (nbuf) { 986 dp_err("Traffic end indication packet tx failed"); 987 qdf_nbuf_free(nbuf); 988 } 989 } 990 991 /** 992 * dp_tx_traffic_end_indication_set_desc_flag() - Set tx descriptor flag to 993 * mark it traffic end indication 994 * packet. 995 * @tx_desc: Tx descriptor pointer 996 * @msdu_info: msdu_info structure pointer 997 * 998 * Return: None 999 */ 1000 static inline void 1001 dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc, 1002 struct dp_tx_msdu_info_s *msdu_info) 1003 { 1004 struct htt_tx_msdu_desc_ext2_t *desc_ext = 1005 (struct htt_tx_msdu_desc_ext2_t *)(msdu_info->meta_data); 1006 1007 if (qdf_unlikely(desc_ext->traffic_end_indication)) 1008 tx_desc->flags |= DP_TX_DESC_FLAG_TRAFFIC_END_IND; 1009 } 1010 1011 /** 1012 * dp_tx_traffic_end_indication_enq_ind_pkt() - Enqueue the packet instead of 1013 * freeing which are associated 1014 * with traffic end indication 1015 * flagged descriptor. 1016 * @soc: dp soc handle 1017 * @desc: Tx descriptor pointer 1018 * @nbuf: buffer pointer 1019 * 1020 * Return: True if packet gets enqueued else false 1021 */ 1022 static bool 1023 dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc, 1024 struct dp_tx_desc_s *desc, 1025 qdf_nbuf_t nbuf) 1026 { 1027 struct dp_vdev *vdev = NULL; 1028 1029 if (qdf_unlikely((desc->flags & 1030 DP_TX_DESC_FLAG_TRAFFIC_END_IND) != 0)) { 1031 vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id, 1032 DP_MOD_ID_TX_COMP); 1033 if (vdev) { 1034 qdf_nbuf_queue_add(&vdev->end_ind_pkt_q, nbuf); 1035 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_COMP); 1036 return true; 1037 } 1038 } 1039 return false; 1040 } 1041 1042 /** 1043 * dp_tx_traffic_end_indication_is_enabled() - get the feature 1044 * enable/disable status 1045 * @vdev: dp vdev handle 1046 * 1047 * Return: True if feature is enable else false 1048 */ 1049 static inline bool 1050 dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev) 1051 { 1052 return qdf_unlikely(vdev->traffic_end_ind_en); 1053 } 1054 1055 static inline qdf_nbuf_t 1056 dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1057 struct dp_tx_msdu_info_s *msdu_info, 1058 uint16_t peer_id, qdf_nbuf_t end_nbuf) 1059 { 1060 if (dp_tx_traffic_end_indication_is_enabled(vdev)) 1061 end_nbuf = dp_tx_get_traffic_end_indication_pkt(vdev, nbuf); 1062 1063 nbuf = dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL); 1064 1065 if (qdf_unlikely(end_nbuf)) 1066 dp_tx_send_traffic_end_indication_pkt(vdev, end_nbuf, 1067 msdu_info, peer_id); 1068 return nbuf; 1069 } 1070 #else 1071 static inline qdf_nbuf_t 1072 dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev, 1073 qdf_nbuf_t nbuf) 1074 { 1075 return NULL; 1076 } 1077 1078 static inline void 1079 dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev, 1080 qdf_nbuf_t end_nbuf, 1081 struct dp_tx_msdu_info_s *msdu_info, 1082 uint16_t peer_id) 1083 {} 1084 1085 static inline void 1086 dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc, 1087 struct dp_tx_msdu_info_s *msdu_info) 1088 {} 1089 1090 static inline bool 1091 dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc, 1092 struct dp_tx_desc_s *desc, 1093 qdf_nbuf_t nbuf) 1094 { 1095 return false; 1096 } 1097 1098 static inline bool 1099 dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev) 1100 { 1101 return false; 1102 } 1103 1104 static inline qdf_nbuf_t 1105 dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1106 struct dp_tx_msdu_info_s *msdu_info, 1107 uint16_t peer_id, qdf_nbuf_t end_nbuf) 1108 { 1109 return dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL); 1110 } 1111 #endif 1112 1113 #if defined(QCA_SUPPORT_WDS_EXTENDED) 1114 static bool 1115 dp_tx_is_wds_ast_override_en(struct dp_soc *soc, 1116 struct cdp_tx_exception_metadata *tx_exc_metadata) 1117 { 1118 if (soc->features.wds_ext_ast_override_enable && 1119 tx_exc_metadata && tx_exc_metadata->is_wds_extended) 1120 return true; 1121 1122 return false; 1123 } 1124 #else 1125 static bool 1126 dp_tx_is_wds_ast_override_en(struct dp_soc *soc, 1127 struct cdp_tx_exception_metadata *tx_exc_metadata) 1128 { 1129 return false; 1130 } 1131 #endif 1132 1133 /** 1134 * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor 1135 * @vdev: DP vdev handle 1136 * @nbuf: skb 1137 * @desc_pool_id: Descriptor pool ID 1138 * @meta_data: Metadata to the fw 1139 * @tx_exc_metadata: Handle that holds exception path metadata 1140 * Allocate and prepare Tx descriptor with msdu information. 1141 * 1142 * Return: Pointer to Tx Descriptor on success, 1143 * NULL on failure 1144 */ 1145 static 1146 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev, 1147 qdf_nbuf_t nbuf, uint8_t desc_pool_id, 1148 struct dp_tx_msdu_info_s *msdu_info, 1149 struct cdp_tx_exception_metadata *tx_exc_metadata) 1150 { 1151 uint8_t align_pad; 1152 uint8_t is_exception = 0; 1153 uint8_t htt_hdr_size; 1154 struct dp_tx_desc_s *tx_desc; 1155 struct dp_pdev *pdev = vdev->pdev; 1156 struct dp_soc *soc = pdev->soc; 1157 1158 if (dp_tx_limit_check(vdev, nbuf)) 1159 return NULL; 1160 1161 /* Allocate software Tx descriptor */ 1162 tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); 1163 1164 if (qdf_unlikely(!tx_desc)) { 1165 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 1166 DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1); 1167 return NULL; 1168 } 1169 1170 dp_tx_outstanding_inc(pdev); 1171 1172 /* Initialize the SW tx descriptor */ 1173 tx_desc->nbuf = nbuf; 1174 tx_desc->frm_type = dp_tx_frm_std; 1175 tx_desc->tx_encap_type = ((tx_exc_metadata && 1176 (tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ? 1177 tx_exc_metadata->tx_encap_type : vdev->tx_encap_type); 1178 tx_desc->vdev_id = vdev->vdev_id; 1179 tx_desc->pdev = pdev; 1180 tx_desc->msdu_ext_desc = NULL; 1181 tx_desc->pkt_offset = 0; 1182 tx_desc->length = qdf_nbuf_headlen(nbuf); 1183 tx_desc->shinfo_addr = skb_end_pointer(nbuf); 1184 1185 dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id); 1186 1187 if (qdf_unlikely(vdev->multipass_en)) { 1188 if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info)) 1189 goto failure; 1190 } 1191 1192 /* Packets marked by upper layer (OS-IF) to be sent to FW */ 1193 if (dp_tx_is_nbuf_marked_exception(soc, nbuf)) 1194 is_exception = 1; 1195 1196 /* for BE chipsets if wds extension was enbled will not mark FW 1197 * in desc will mark ast index based search for ast index. 1198 */ 1199 if (dp_tx_is_wds_ast_override_en(soc, tx_exc_metadata)) 1200 return tx_desc; 1201 1202 /* 1203 * For special modes (vdev_type == ocb or mesh), data frames should be 1204 * transmitted using varying transmit parameters (tx spec) which include 1205 * transmit rate, power, priority, channel, channel bandwidth , nss etc. 1206 * These are filled in HTT MSDU descriptor and sent in frame pre-header. 1207 * These frames are sent as exception packets to firmware. 1208 * 1209 * HW requirement is that metadata should always point to a 1210 * 8-byte aligned address. So we add alignment pad to start of buffer. 1211 * HTT Metadata should be ensured to be multiple of 8-bytes, 1212 * to get 8-byte aligned start address along with align_pad added 1213 * 1214 * |-----------------------------| 1215 * | | 1216 * |-----------------------------| <-----Buffer Pointer Address given 1217 * | | ^ in HW descriptor (aligned) 1218 * | HTT Metadata | | 1219 * | | | 1220 * | | | Packet Offset given in descriptor 1221 * | | | 1222 * |-----------------------------| | 1223 * | Alignment Pad | v 1224 * |-----------------------------| <----- Actual buffer start address 1225 * | SKB Data | (Unaligned) 1226 * | | 1227 * | | 1228 * | | 1229 * | | 1230 * | | 1231 * |-----------------------------| 1232 */ 1233 if (qdf_unlikely((msdu_info->exception_fw)) || 1234 (vdev->opmode == wlan_op_mode_ocb) || 1235 (tx_exc_metadata && 1236 tx_exc_metadata->is_tx_sniffer)) { 1237 align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7; 1238 1239 if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) { 1240 DP_STATS_INC(vdev, 1241 tx_i.dropped.headroom_insufficient, 1); 1242 goto failure; 1243 } 1244 1245 if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) { 1246 dp_tx_err("qdf_nbuf_push_head failed"); 1247 goto failure; 1248 } 1249 1250 htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf, 1251 msdu_info); 1252 if (htt_hdr_size == 0) 1253 goto failure; 1254 1255 tx_desc->length = qdf_nbuf_headlen(nbuf); 1256 tx_desc->pkt_offset = align_pad + htt_hdr_size; 1257 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 1258 dp_tx_traffic_end_indication_set_desc_flag(tx_desc, 1259 msdu_info); 1260 is_exception = 1; 1261 tx_desc->length -= tx_desc->pkt_offset; 1262 } 1263 1264 #if !TQM_BYPASS_WAR 1265 if (is_exception || tx_exc_metadata) 1266 #endif 1267 { 1268 /* Temporary WAR due to TQM VP issues */ 1269 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 1270 qdf_atomic_inc(&soc->num_tx_exception); 1271 } 1272 1273 return tx_desc; 1274 1275 failure: 1276 dp_tx_desc_release(tx_desc, desc_pool_id); 1277 return NULL; 1278 } 1279 1280 /** 1281 * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame 1282 * @vdev: DP vdev handle 1283 * @nbuf: skb 1284 * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor 1285 * @desc_pool_id : Descriptor Pool ID 1286 * 1287 * Allocate and prepare Tx descriptor with msdu and fragment descritor 1288 * information. For frames with fragments, allocate and prepare 1289 * an MSDU extension descriptor 1290 * 1291 * Return: Pointer to Tx Descriptor on success, 1292 * NULL on failure 1293 */ 1294 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev, 1295 qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info, 1296 uint8_t desc_pool_id) 1297 { 1298 struct dp_tx_desc_s *tx_desc; 1299 struct dp_tx_ext_desc_elem_s *msdu_ext_desc; 1300 struct dp_pdev *pdev = vdev->pdev; 1301 struct dp_soc *soc = pdev->soc; 1302 1303 if (dp_tx_limit_check(vdev, nbuf)) 1304 return NULL; 1305 1306 /* Allocate software Tx descriptor */ 1307 tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); 1308 if (!tx_desc) { 1309 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 1310 return NULL; 1311 } 1312 dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg, 1313 nbuf, tx_desc->id, DP_TX_DESC_COOKIE); 1314 1315 dp_tx_outstanding_inc(pdev); 1316 1317 /* Initialize the SW tx descriptor */ 1318 tx_desc->nbuf = nbuf; 1319 tx_desc->frm_type = msdu_info->frm_type; 1320 tx_desc->tx_encap_type = vdev->tx_encap_type; 1321 tx_desc->vdev_id = vdev->vdev_id; 1322 tx_desc->pdev = pdev; 1323 tx_desc->pkt_offset = 0; 1324 1325 dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id); 1326 1327 /* Handle scattered frames - TSO/SG/ME */ 1328 /* Allocate and prepare an extension descriptor for scattered frames */ 1329 msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id); 1330 if (!msdu_ext_desc) { 1331 dp_tx_info("Tx Extension Descriptor Alloc Fail"); 1332 goto failure; 1333 } 1334 1335 #if TQM_BYPASS_WAR 1336 /* Temporary WAR due to TQM VP issues */ 1337 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 1338 qdf_atomic_inc(&soc->num_tx_exception); 1339 #endif 1340 if (qdf_unlikely(msdu_info->exception_fw)) 1341 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 1342 1343 tx_desc->msdu_ext_desc = msdu_ext_desc; 1344 tx_desc->flags |= DP_TX_DESC_FLAG_FRAG; 1345 1346 msdu_ext_desc->tso_desc = msdu_info->u.tso_info.curr_seg; 1347 msdu_ext_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list; 1348 1349 tx_desc->dma_addr = msdu_ext_desc->paddr; 1350 1351 if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID) 1352 tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA; 1353 else 1354 tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES; 1355 1356 return tx_desc; 1357 failure: 1358 dp_tx_desc_release(tx_desc, desc_pool_id); 1359 return NULL; 1360 } 1361 1362 /** 1363 * dp_tx_prepare_raw() - Prepare RAW packet TX 1364 * @vdev: DP vdev handle 1365 * @nbuf: buffer pointer 1366 * @seg_info: Pointer to Segment info Descriptor to be prepared 1367 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension 1368 * descriptor 1369 * 1370 * Return: 1371 */ 1372 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1373 struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info) 1374 { 1375 qdf_nbuf_t curr_nbuf = NULL; 1376 uint16_t total_len = 0; 1377 qdf_dma_addr_t paddr; 1378 int32_t i; 1379 int32_t mapped_buf_num = 0; 1380 1381 struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info; 1382 qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data; 1383 1384 DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf)); 1385 1386 /* Continue only if frames are of DATA type */ 1387 if (!DP_FRAME_IS_DATA(qos_wh)) { 1388 DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1); 1389 dp_tx_debug("Pkt. recd is of not data type"); 1390 goto error; 1391 } 1392 /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */ 1393 if (vdev->raw_mode_war && 1394 (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) && 1395 (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU)) 1396 qos_wh->i_fc[1] |= IEEE80211_FC1_WEP; 1397 1398 for (curr_nbuf = nbuf, i = 0; curr_nbuf; 1399 curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) { 1400 /* 1401 * Number of nbuf's must not exceed the size of the frags 1402 * array in seg_info. 1403 */ 1404 if (i >= DP_TX_MAX_NUM_FRAGS) { 1405 dp_err_rl("nbuf cnt exceeds the max number of segs"); 1406 DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1); 1407 goto error; 1408 } 1409 if (QDF_STATUS_SUCCESS != 1410 qdf_nbuf_map_nbytes_single(vdev->osdev, 1411 curr_nbuf, 1412 QDF_DMA_TO_DEVICE, 1413 curr_nbuf->len)) { 1414 dp_tx_err("%s dma map error ", __func__); 1415 DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1); 1416 goto error; 1417 } 1418 /* Update the count of mapped nbuf's */ 1419 mapped_buf_num++; 1420 paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0); 1421 seg_info->frags[i].paddr_lo = paddr; 1422 seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32); 1423 seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf); 1424 seg_info->frags[i].vaddr = (void *) curr_nbuf; 1425 total_len += qdf_nbuf_len(curr_nbuf); 1426 } 1427 1428 seg_info->frag_cnt = i; 1429 seg_info->total_len = total_len; 1430 seg_info->next = NULL; 1431 1432 sg_info->curr_seg = seg_info; 1433 1434 msdu_info->frm_type = dp_tx_frm_raw; 1435 msdu_info->num_seg = 1; 1436 1437 return nbuf; 1438 1439 error: 1440 i = 0; 1441 while (nbuf) { 1442 curr_nbuf = nbuf; 1443 if (i < mapped_buf_num) { 1444 qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf, 1445 QDF_DMA_TO_DEVICE, 1446 curr_nbuf->len); 1447 i++; 1448 } 1449 nbuf = qdf_nbuf_next(nbuf); 1450 qdf_nbuf_free(curr_nbuf); 1451 } 1452 return NULL; 1453 1454 } 1455 1456 /** 1457 * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame. 1458 * @soc: DP soc handle 1459 * @nbuf: Buffer pointer 1460 * 1461 * unmap the chain of nbufs that belong to this RAW frame. 1462 * 1463 * Return: None 1464 */ 1465 static void dp_tx_raw_prepare_unset(struct dp_soc *soc, 1466 qdf_nbuf_t nbuf) 1467 { 1468 qdf_nbuf_t cur_nbuf = nbuf; 1469 1470 do { 1471 qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf, 1472 QDF_DMA_TO_DEVICE, 1473 cur_nbuf->len); 1474 cur_nbuf = qdf_nbuf_next(cur_nbuf); 1475 } while (cur_nbuf); 1476 } 1477 1478 #ifdef VDEV_PEER_PROTOCOL_COUNT 1479 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl, 1480 qdf_nbuf_t nbuf) 1481 { 1482 qdf_nbuf_t nbuf_local; 1483 struct dp_vdev *vdev_local = vdev_hdl; 1484 1485 do { 1486 if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) 1487 break; 1488 nbuf_local = nbuf; 1489 if (qdf_unlikely(((vdev_local)->tx_encap_type) == 1490 htt_cmn_pkt_type_raw)) 1491 break; 1492 else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local)))) 1493 break; 1494 else if (qdf_nbuf_is_tso((nbuf_local))) 1495 break; 1496 dp_vdev_peer_stats_update_protocol_cnt((vdev_local), 1497 (nbuf_local), 1498 NULL, 1, 0); 1499 } while (0); 1500 } 1501 #endif 1502 1503 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 1504 /** 1505 * dp_tx_update_stats() - Update soc level tx stats 1506 * @soc: DP soc handle 1507 * @tx_desc: TX descriptor reference 1508 * @ring_id: TCL ring id 1509 * 1510 * Returns: none 1511 */ 1512 void dp_tx_update_stats(struct dp_soc *soc, 1513 struct dp_tx_desc_s *tx_desc, 1514 uint8_t ring_id) 1515 { 1516 uint32_t stats_len = dp_tx_get_pkt_len(tx_desc); 1517 1518 DP_STATS_INC_PKT(soc, tx.egress[ring_id], 1, stats_len); 1519 } 1520 1521 int 1522 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev, 1523 struct dp_tx_desc_s *tx_desc, 1524 uint8_t tid, 1525 struct dp_tx_msdu_info_s *msdu_info, 1526 uint8_t ring_id) 1527 { 1528 struct dp_swlm *swlm = &soc->swlm; 1529 union swlm_data swlm_query_data; 1530 struct dp_swlm_tcl_data tcl_data; 1531 QDF_STATUS status; 1532 int ret; 1533 1534 if (!swlm->is_enabled) 1535 return msdu_info->skip_hp_update; 1536 1537 tcl_data.nbuf = tx_desc->nbuf; 1538 tcl_data.tid = tid; 1539 tcl_data.ring_id = ring_id; 1540 tcl_data.pkt_len = dp_tx_get_pkt_len(tx_desc); 1541 tcl_data.num_ll_connections = vdev->num_latency_critical_conn; 1542 swlm_query_data.tcl_data = &tcl_data; 1543 1544 status = dp_swlm_tcl_pre_check(soc, &tcl_data); 1545 if (QDF_IS_STATUS_ERROR(status)) { 1546 dp_swlm_tcl_reset_session_data(soc, ring_id); 1547 DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1); 1548 return 0; 1549 } 1550 1551 ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data); 1552 if (ret) { 1553 DP_STATS_INC(swlm, tcl[ring_id].coalesce_success, 1); 1554 } else { 1555 DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1); 1556 } 1557 1558 return ret; 1559 } 1560 1561 void 1562 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl, 1563 int coalesce) 1564 { 1565 if (coalesce) 1566 dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl); 1567 else 1568 dp_tx_hal_ring_access_end(soc, hal_ring_hdl); 1569 } 1570 1571 static inline void 1572 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info) 1573 { 1574 if (((i + 1) < msdu_info->num_seg)) 1575 msdu_info->skip_hp_update = 1; 1576 else 1577 msdu_info->skip_hp_update = 0; 1578 } 1579 1580 static inline void 1581 dp_flush_tcp_hp(struct dp_soc *soc, uint8_t ring_id) 1582 { 1583 hal_ring_handle_t hal_ring_hdl = 1584 dp_tx_get_hal_ring_hdl(soc, ring_id); 1585 1586 if (dp_tx_hal_ring_access_start(soc, hal_ring_hdl)) { 1587 dp_err("Fillmore: SRNG access start failed"); 1588 return; 1589 } 1590 1591 dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0); 1592 } 1593 1594 static inline void 1595 dp_tx_check_and_flush_hp(struct dp_soc *soc, 1596 QDF_STATUS status, 1597 struct dp_tx_msdu_info_s *msdu_info) 1598 { 1599 if (QDF_IS_STATUS_ERROR(status) && !msdu_info->skip_hp_update) { 1600 dp_flush_tcp_hp(soc, 1601 (msdu_info->tx_queue.ring_id & DP_TX_QUEUE_MASK)); 1602 } 1603 } 1604 #else 1605 static inline void 1606 dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info) 1607 { 1608 } 1609 1610 static inline void 1611 dp_tx_check_and_flush_hp(struct dp_soc *soc, 1612 QDF_STATUS status, 1613 struct dp_tx_msdu_info_s *msdu_info) 1614 { 1615 } 1616 #endif 1617 1618 #ifdef FEATURE_RUNTIME_PM 1619 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc) 1620 { 1621 int ret; 1622 1623 ret = qdf_atomic_read(&soc->rtpm_high_tput_flag) && 1624 (hif_rtpm_get_state() <= HIF_RTPM_STATE_ON); 1625 return ret; 1626 } 1627 /** 1628 * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end 1629 * @soc: Datapath soc handle 1630 * @hal_ring_hdl: HAL ring handle 1631 * @coalesce: Coalesce the current write or not 1632 * 1633 * Wrapper for HAL ring access end for data transmission for 1634 * FEATURE_RUNTIME_PM 1635 * 1636 * Returns: none 1637 */ 1638 void 1639 dp_tx_ring_access_end_wrapper(struct dp_soc *soc, 1640 hal_ring_handle_t hal_ring_hdl, 1641 int coalesce) 1642 { 1643 int ret; 1644 1645 /* 1646 * Avoid runtime get and put APIs under high throughput scenarios. 1647 */ 1648 if (dp_get_rtpm_tput_policy_requirement(soc)) { 1649 dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce); 1650 return; 1651 } 1652 1653 ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP); 1654 if (QDF_IS_STATUS_SUCCESS(ret)) { 1655 if (hif_system_pm_state_check(soc->hif_handle)) { 1656 dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl); 1657 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); 1658 hal_srng_inc_flush_cnt(hal_ring_hdl); 1659 } else { 1660 dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce); 1661 } 1662 hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP); 1663 } else { 1664 dp_runtime_get(soc); 1665 dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl); 1666 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); 1667 qdf_atomic_inc(&soc->tx_pending_rtpm); 1668 hal_srng_inc_flush_cnt(hal_ring_hdl); 1669 dp_runtime_put(soc); 1670 } 1671 } 1672 #else 1673 1674 #ifdef DP_POWER_SAVE 1675 void 1676 dp_tx_ring_access_end_wrapper(struct dp_soc *soc, 1677 hal_ring_handle_t hal_ring_hdl, 1678 int coalesce) 1679 { 1680 if (hif_system_pm_state_check(soc->hif_handle)) { 1681 dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl); 1682 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); 1683 hal_srng_inc_flush_cnt(hal_ring_hdl); 1684 } else { 1685 dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce); 1686 } 1687 } 1688 #endif 1689 1690 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc) 1691 { 1692 return 0; 1693 } 1694 #endif 1695 1696 /** 1697 * dp_tx_get_tid() - Obtain TID to be used for this frame 1698 * @vdev: DP vdev handle 1699 * @nbuf: skb 1700 * 1701 * Extract the DSCP or PCP information from frame and map into TID value. 1702 * 1703 * Return: void 1704 */ 1705 static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1706 struct dp_tx_msdu_info_s *msdu_info) 1707 { 1708 uint8_t tos = 0, dscp_tid_override = 0; 1709 uint8_t *hdr_ptr, *L3datap; 1710 uint8_t is_mcast = 0; 1711 qdf_ether_header_t *eh = NULL; 1712 qdf_ethervlan_header_t *evh = NULL; 1713 uint16_t ether_type; 1714 qdf_llc_t *llcHdr; 1715 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; 1716 1717 DP_TX_TID_OVERRIDE(msdu_info, nbuf); 1718 if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 1719 eh = (qdf_ether_header_t *)nbuf->data; 1720 hdr_ptr = (uint8_t *)(eh->ether_dhost); 1721 L3datap = hdr_ptr + sizeof(qdf_ether_header_t); 1722 } else { 1723 qdf_dot3_qosframe_t *qos_wh = 1724 (qdf_dot3_qosframe_t *) nbuf->data; 1725 msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ? 1726 qos_wh->i_qos[0] & DP_QOS_TID : 0; 1727 return; 1728 } 1729 1730 is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr); 1731 ether_type = eh->ether_type; 1732 1733 llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t)); 1734 /* 1735 * Check if packet is dot3 or eth2 type. 1736 */ 1737 if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) { 1738 ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE + 1739 sizeof(*llcHdr)); 1740 1741 if (ether_type == htons(ETHERTYPE_VLAN)) { 1742 L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) + 1743 sizeof(*llcHdr); 1744 ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE 1745 + sizeof(*llcHdr) + 1746 sizeof(qdf_net_vlanhdr_t)); 1747 } else { 1748 L3datap = hdr_ptr + sizeof(qdf_ether_header_t) + 1749 sizeof(*llcHdr); 1750 } 1751 } else { 1752 if (ether_type == htons(ETHERTYPE_VLAN)) { 1753 evh = (qdf_ethervlan_header_t *) eh; 1754 ether_type = evh->ether_type; 1755 L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t); 1756 } 1757 } 1758 1759 /* 1760 * Find priority from IP TOS DSCP field 1761 */ 1762 if (qdf_nbuf_is_ipv4_pkt(nbuf)) { 1763 qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap; 1764 if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) { 1765 /* Only for unicast frames */ 1766 if (!is_mcast) { 1767 /* send it on VO queue */ 1768 msdu_info->tid = DP_VO_TID; 1769 } 1770 } else { 1771 /* 1772 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7 1773 * from TOS byte. 1774 */ 1775 tos = ip->ip_tos; 1776 dscp_tid_override = 1; 1777 1778 } 1779 } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) { 1780 /* TODO 1781 * use flowlabel 1782 *igmpmld cases to be handled in phase 2 1783 */ 1784 unsigned long ver_pri_flowlabel; 1785 unsigned long pri; 1786 ver_pri_flowlabel = *(unsigned long *) L3datap; 1787 pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >> 1788 DP_IPV6_PRIORITY_SHIFT; 1789 tos = pri; 1790 dscp_tid_override = 1; 1791 } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) 1792 msdu_info->tid = DP_VO_TID; 1793 else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) { 1794 /* Only for unicast frames */ 1795 if (!is_mcast) { 1796 /* send ucast arp on VO queue */ 1797 msdu_info->tid = DP_VO_TID; 1798 } 1799 } 1800 1801 /* 1802 * Assign all MCAST packets to BE 1803 */ 1804 if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 1805 if (is_mcast) { 1806 tos = 0; 1807 dscp_tid_override = 1; 1808 } 1809 } 1810 1811 if (dscp_tid_override == 1) { 1812 tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK; 1813 msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos]; 1814 } 1815 1816 if (msdu_info->tid >= CDP_MAX_DATA_TIDS) 1817 msdu_info->tid = CDP_MAX_DATA_TIDS - 1; 1818 1819 return; 1820 } 1821 1822 /** 1823 * dp_tx_classify_tid() - Obtain TID to be used for this frame 1824 * @vdev: DP vdev handle 1825 * @nbuf: skb 1826 * 1827 * Software based TID classification is required when more than 2 DSCP-TID 1828 * mapping tables are needed. 1829 * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2. 1830 * 1831 * Return: void 1832 */ 1833 static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1834 struct dp_tx_msdu_info_s *msdu_info) 1835 { 1836 DP_TX_TID_OVERRIDE(msdu_info, nbuf); 1837 1838 /* 1839 * skip_sw_tid_classification flag will set in below cases- 1840 * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map 1841 * 2. hlos_tid_override enabled for vdev 1842 * 3. mesh mode enabled for vdev 1843 */ 1844 if (qdf_likely(vdev->skip_sw_tid_classification)) { 1845 /* Update tid in msdu_info from skb priority */ 1846 if (qdf_unlikely(vdev->skip_sw_tid_classification 1847 & DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) { 1848 uint32_t tid = qdf_nbuf_get_priority(nbuf); 1849 1850 if (tid == DP_TX_INVALID_QOS_TAG) 1851 return; 1852 1853 msdu_info->tid = tid; 1854 return; 1855 } 1856 return; 1857 } 1858 1859 dp_tx_get_tid(vdev, nbuf, msdu_info); 1860 } 1861 1862 #ifdef FEATURE_WLAN_TDLS 1863 /** 1864 * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame 1865 * @soc: datapath SOC 1866 * @vdev: datapath vdev 1867 * @tx_desc: TX descriptor 1868 * 1869 * Return: None 1870 */ 1871 static void dp_tx_update_tdls_flags(struct dp_soc *soc, 1872 struct dp_vdev *vdev, 1873 struct dp_tx_desc_s *tx_desc) 1874 { 1875 if (vdev) { 1876 if (vdev->is_tdls_frame) { 1877 tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME; 1878 vdev->is_tdls_frame = false; 1879 } 1880 } 1881 } 1882 1883 static uint8_t dp_htt_tx_comp_get_status(struct dp_soc *soc, char *htt_desc) 1884 { 1885 uint8_t tx_status = HTT_TX_FW2WBM_TX_STATUS_MAX; 1886 1887 switch (soc->arch_id) { 1888 case CDP_ARCH_TYPE_LI: 1889 tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]); 1890 break; 1891 1892 case CDP_ARCH_TYPE_BE: 1893 tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]); 1894 break; 1895 1896 default: 1897 dp_err("Incorrect CDP_ARCH %d", soc->arch_id); 1898 QDF_BUG(0); 1899 } 1900 1901 return tx_status; 1902 } 1903 1904 /** 1905 * dp_non_std_htt_tx_comp_free_buff() - Free the non std tx packet buffer 1906 * @soc: dp_soc handle 1907 * @tx_desc: TX descriptor 1908 * @vdev: datapath vdev handle 1909 * 1910 * Return: None 1911 */ 1912 static void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc, 1913 struct dp_tx_desc_s *tx_desc) 1914 { 1915 uint8_t tx_status = 0; 1916 uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN]; 1917 1918 qdf_nbuf_t nbuf = tx_desc->nbuf; 1919 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, 1920 DP_MOD_ID_TDLS); 1921 1922 if (qdf_unlikely(!vdev)) { 1923 dp_err_rl("vdev is null!"); 1924 goto error; 1925 } 1926 1927 hal_tx_comp_get_htt_desc(&tx_desc->comp, htt_tx_status); 1928 tx_status = dp_htt_tx_comp_get_status(soc, htt_tx_status); 1929 dp_debug("vdev_id: %d tx_status: %d", tx_desc->vdev_id, tx_status); 1930 1931 if (vdev->tx_non_std_data_callback.func) { 1932 qdf_nbuf_set_next(nbuf, NULL); 1933 vdev->tx_non_std_data_callback.func( 1934 vdev->tx_non_std_data_callback.ctxt, 1935 nbuf, tx_status); 1936 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS); 1937 return; 1938 } else { 1939 dp_err_rl("callback func is null"); 1940 } 1941 1942 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS); 1943 error: 1944 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE); 1945 qdf_nbuf_free(nbuf); 1946 } 1947 1948 /** 1949 * dp_tx_msdu_single_map() - do nbuf map 1950 * @vdev: DP vdev handle 1951 * @tx_desc: DP TX descriptor pointer 1952 * @nbuf: skb pointer 1953 * 1954 * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap 1955 * operation done in other component. 1956 * 1957 * Return: QDF_STATUS 1958 */ 1959 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev, 1960 struct dp_tx_desc_s *tx_desc, 1961 qdf_nbuf_t nbuf) 1962 { 1963 if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME))) 1964 return qdf_nbuf_map_nbytes_single(vdev->osdev, 1965 nbuf, 1966 QDF_DMA_TO_DEVICE, 1967 nbuf->len); 1968 else 1969 return qdf_nbuf_map_single(vdev->osdev, nbuf, 1970 QDF_DMA_TO_DEVICE); 1971 } 1972 #else 1973 static inline void dp_tx_update_tdls_flags(struct dp_soc *soc, 1974 struct dp_vdev *vdev, 1975 struct dp_tx_desc_s *tx_desc) 1976 { 1977 } 1978 1979 static inline void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc, 1980 struct dp_tx_desc_s *tx_desc) 1981 { 1982 } 1983 1984 static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev, 1985 struct dp_tx_desc_s *tx_desc, 1986 qdf_nbuf_t nbuf) 1987 { 1988 return qdf_nbuf_map_nbytes_single(vdev->osdev, 1989 nbuf, 1990 QDF_DMA_TO_DEVICE, 1991 nbuf->len); 1992 } 1993 #endif 1994 1995 static inline 1996 qdf_dma_addr_t dp_tx_nbuf_map_regular(struct dp_vdev *vdev, 1997 struct dp_tx_desc_s *tx_desc, 1998 qdf_nbuf_t nbuf) 1999 { 2000 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 2001 2002 ret = dp_tx_msdu_single_map(vdev, tx_desc, nbuf); 2003 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) 2004 return 0; 2005 2006 return qdf_nbuf_mapped_paddr_get(nbuf); 2007 } 2008 2009 static inline 2010 void dp_tx_nbuf_unmap_regular(struct dp_soc *soc, struct dp_tx_desc_s *desc) 2011 { 2012 qdf_nbuf_unmap_nbytes_single_paddr(soc->osdev, 2013 desc->nbuf, 2014 desc->dma_addr, 2015 QDF_DMA_TO_DEVICE, 2016 desc->length); 2017 } 2018 2019 #ifdef QCA_DP_TX_RMNET_OPTIMIZATION 2020 static inline bool 2021 is_nbuf_frm_rmnet(qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info) 2022 { 2023 struct net_device *ingress_dev; 2024 skb_frag_t *frag; 2025 uint16_t buf_len = 0; 2026 uint16_t linear_data_len = 0; 2027 uint8_t *payload_addr = NULL; 2028 2029 ingress_dev = dev_get_by_index(dev_net(nbuf->dev), nbuf->skb_iif); 2030 2031 if ((ingress_dev->priv_flags & IFF_PHONY_HEADROOM)) { 2032 dev_put(ingress_dev); 2033 frag = &(skb_shinfo(nbuf)->frags[0]); 2034 buf_len = skb_frag_size(frag); 2035 payload_addr = (uint8_t *)skb_frag_address(frag); 2036 linear_data_len = skb_headlen(nbuf); 2037 2038 buf_len += linear_data_len; 2039 payload_addr = payload_addr - linear_data_len; 2040 memcpy(payload_addr, nbuf->data, linear_data_len); 2041 2042 msdu_info->frm_type = dp_tx_frm_rmnet; 2043 msdu_info->buf_len = buf_len; 2044 msdu_info->payload_addr = payload_addr; 2045 2046 return true; 2047 } 2048 dev_put(ingress_dev); 2049 return false; 2050 } 2051 2052 static inline 2053 qdf_dma_addr_t dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s *msdu_info, 2054 struct dp_tx_desc_s *tx_desc) 2055 { 2056 qdf_dma_addr_t paddr; 2057 2058 paddr = (qdf_dma_addr_t)qdf_mem_virt_to_phys(msdu_info->payload_addr); 2059 tx_desc->length = msdu_info->buf_len; 2060 2061 qdf_nbuf_dma_clean_range((void *)msdu_info->payload_addr, 2062 (void *)(msdu_info->payload_addr + 2063 msdu_info->buf_len)); 2064 2065 tx_desc->flags |= DP_TX_DESC_FLAG_RMNET; 2066 return paddr; 2067 } 2068 #else 2069 static inline bool 2070 is_nbuf_frm_rmnet(qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info) 2071 { 2072 return false; 2073 } 2074 2075 static inline 2076 qdf_dma_addr_t dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s *msdu_info, 2077 struct dp_tx_desc_s *tx_desc) 2078 { 2079 return 0; 2080 } 2081 #endif 2082 2083 #if defined(QCA_DP_TX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 2084 static inline 2085 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev, 2086 struct dp_tx_desc_s *tx_desc, 2087 qdf_nbuf_t nbuf) 2088 { 2089 if (qdf_likely(tx_desc->flags & DP_TX_DESC_FLAG_SIMPLE)) { 2090 qdf_nbuf_dma_clean_range((void *)nbuf->data, 2091 (void *)(nbuf->data + nbuf->len)); 2092 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); 2093 } else { 2094 return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf); 2095 } 2096 } 2097 2098 static inline 2099 void dp_tx_nbuf_unmap(struct dp_soc *soc, 2100 struct dp_tx_desc_s *desc) 2101 { 2102 if (qdf_unlikely(!(desc->flags & 2103 (DP_TX_DESC_FLAG_SIMPLE | DP_TX_DESC_FLAG_RMNET)))) 2104 return dp_tx_nbuf_unmap_regular(soc, desc); 2105 } 2106 #else 2107 static inline 2108 qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev, 2109 struct dp_tx_desc_s *tx_desc, 2110 qdf_nbuf_t nbuf) 2111 { 2112 return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf); 2113 } 2114 2115 static inline 2116 void dp_tx_nbuf_unmap(struct dp_soc *soc, 2117 struct dp_tx_desc_s *desc) 2118 { 2119 return dp_tx_nbuf_unmap_regular(soc, desc); 2120 } 2121 #endif 2122 2123 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO) 2124 static inline 2125 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc) 2126 { 2127 dp_tx_nbuf_unmap(soc, desc); 2128 desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE; 2129 } 2130 2131 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc) 2132 { 2133 if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE))) 2134 dp_tx_nbuf_unmap(soc, desc); 2135 } 2136 #else 2137 static inline 2138 void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc) 2139 { 2140 } 2141 2142 static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc) 2143 { 2144 dp_tx_nbuf_unmap(soc, desc); 2145 } 2146 #endif 2147 2148 #ifdef MESH_MODE_SUPPORT 2149 /** 2150 * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP 2151 * @soc: datapath SOC 2152 * @vdev: datapath vdev 2153 * @tx_desc: TX descriptor 2154 * 2155 * Return: None 2156 */ 2157 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc, 2158 struct dp_vdev *vdev, 2159 struct dp_tx_desc_s *tx_desc) 2160 { 2161 if (qdf_unlikely(vdev->mesh_vdev)) 2162 tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE; 2163 } 2164 2165 /** 2166 * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer 2167 * @soc: dp_soc handle 2168 * @tx_desc: TX descriptor 2169 * @delayed_free: delay the nbuf free 2170 * 2171 * Return: nbuf to be freed late 2172 */ 2173 static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc, 2174 struct dp_tx_desc_s *tx_desc, 2175 bool delayed_free) 2176 { 2177 qdf_nbuf_t nbuf = tx_desc->nbuf; 2178 struct dp_vdev *vdev = NULL; 2179 2180 vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, DP_MOD_ID_MESH); 2181 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) { 2182 if (vdev) 2183 DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1); 2184 2185 if (delayed_free) 2186 return nbuf; 2187 2188 qdf_nbuf_free(nbuf); 2189 } else { 2190 if (vdev && vdev->osif_tx_free_ext) { 2191 vdev->osif_tx_free_ext((nbuf)); 2192 } else { 2193 if (delayed_free) 2194 return nbuf; 2195 2196 qdf_nbuf_free(nbuf); 2197 } 2198 } 2199 2200 if (vdev) 2201 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH); 2202 2203 return NULL; 2204 } 2205 #else 2206 static inline void dp_tx_update_mesh_flags(struct dp_soc *soc, 2207 struct dp_vdev *vdev, 2208 struct dp_tx_desc_s *tx_desc) 2209 { 2210 } 2211 2212 static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc, 2213 struct dp_tx_desc_s *tx_desc, 2214 bool delayed_free) 2215 { 2216 return NULL; 2217 } 2218 #endif 2219 2220 /** 2221 * dp_tx_frame_is_drop() - checks if the packet is loopback 2222 * @vdev: DP vdev handle 2223 * @nbuf: skb 2224 * 2225 * Return: 1 if frame needs to be dropped else 0 2226 */ 2227 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac) 2228 { 2229 struct dp_pdev *pdev = NULL; 2230 struct dp_ast_entry *src_ast_entry = NULL; 2231 struct dp_ast_entry *dst_ast_entry = NULL; 2232 struct dp_soc *soc = NULL; 2233 2234 qdf_assert(vdev); 2235 pdev = vdev->pdev; 2236 qdf_assert(pdev); 2237 soc = pdev->soc; 2238 2239 dst_ast_entry = dp_peer_ast_hash_find_by_pdevid 2240 (soc, dstmac, vdev->pdev->pdev_id); 2241 2242 src_ast_entry = dp_peer_ast_hash_find_by_pdevid 2243 (soc, srcmac, vdev->pdev->pdev_id); 2244 if (dst_ast_entry && src_ast_entry) { 2245 if (dst_ast_entry->peer_id == 2246 src_ast_entry->peer_id) 2247 return 1; 2248 } 2249 2250 return 0; 2251 } 2252 2253 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \ 2254 defined(WLAN_MCAST_MLO) 2255 /* MLO peer id for reinject*/ 2256 #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD 2257 /* MLO vdev id inc offset */ 2258 #define DP_MLO_VDEV_ID_OFFSET 0x80 2259 2260 static inline void 2261 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) 2262 { 2263 if (!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) { 2264 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 2265 qdf_atomic_inc(&soc->num_tx_exception); 2266 } 2267 } 2268 2269 static inline void 2270 dp_tx_update_mcast_param(uint16_t peer_id, 2271 uint16_t *htt_tcl_metadata, 2272 struct dp_vdev *vdev, 2273 struct dp_tx_msdu_info_s *msdu_info) 2274 { 2275 if (peer_id == DP_MLO_MCAST_REINJECT_PEER_ID) { 2276 *htt_tcl_metadata = 0; 2277 DP_TX_TCL_METADATA_TYPE_SET( 2278 *htt_tcl_metadata, 2279 HTT_TCL_METADATA_V2_TYPE_GLOBAL_SEQ_BASED); 2280 HTT_TX_TCL_METADATA_GLBL_SEQ_NO_SET(*htt_tcl_metadata, 2281 msdu_info->gsn); 2282 2283 msdu_info->vdev_id = vdev->vdev_id + DP_MLO_VDEV_ID_OFFSET; 2284 if (qdf_unlikely(vdev->nawds_enabled)) 2285 HTT_TX_TCL_METADATA_GLBL_SEQ_HOST_INSPECTED_SET( 2286 *htt_tcl_metadata, 1); 2287 } else { 2288 msdu_info->vdev_id = vdev->vdev_id; 2289 } 2290 } 2291 #else 2292 static inline void 2293 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) 2294 { 2295 } 2296 2297 static inline void 2298 dp_tx_update_mcast_param(uint16_t peer_id, 2299 uint16_t *htt_tcl_metadata, 2300 struct dp_vdev *vdev, 2301 struct dp_tx_msdu_info_s *msdu_info) 2302 { 2303 } 2304 #endif 2305 2306 #ifdef DP_TX_SW_DROP_STATS_INC 2307 static void tx_sw_drop_stats_inc(struct dp_pdev *pdev, 2308 qdf_nbuf_t nbuf, 2309 enum cdp_tx_sw_drop drop_code) 2310 { 2311 /* EAPOL Drop stats */ 2312 if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) { 2313 switch (drop_code) { 2314 case TX_DESC_ERR: 2315 DP_STATS_INC(pdev, eap_drop_stats.tx_desc_err, 1); 2316 break; 2317 case TX_HAL_RING_ACCESS_ERR: 2318 DP_STATS_INC(pdev, 2319 eap_drop_stats.tx_hal_ring_access_err, 1); 2320 break; 2321 case TX_DMA_MAP_ERR: 2322 DP_STATS_INC(pdev, eap_drop_stats.tx_dma_map_err, 1); 2323 break; 2324 case TX_HW_ENQUEUE: 2325 DP_STATS_INC(pdev, eap_drop_stats.tx_hw_enqueue, 1); 2326 break; 2327 case TX_SW_ENQUEUE: 2328 DP_STATS_INC(pdev, eap_drop_stats.tx_sw_enqueue, 1); 2329 break; 2330 default: 2331 dp_info_rl("Invalid eapol_drop code: %d", drop_code); 2332 break; 2333 } 2334 } 2335 } 2336 #else 2337 static void tx_sw_drop_stats_inc(struct dp_pdev *pdev, 2338 qdf_nbuf_t nbuf, 2339 enum cdp_tx_sw_drop drop_code) 2340 { 2341 } 2342 #endif 2343 2344 /** 2345 * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL 2346 * @vdev: DP vdev handle 2347 * @nbuf: skb 2348 * @tid: TID from HLOS for overriding default DSCP-TID mapping 2349 * @meta_data: Metadata to the fw 2350 * @tx_q: Tx queue to be used for this Tx frame 2351 * @peer_id: peer_id of the peer in case of NAWDS frames 2352 * @tx_exc_metadata: Handle that holds exception path metadata 2353 * 2354 * Return: NULL on success, 2355 * nbuf when it fails to send 2356 */ 2357 qdf_nbuf_t 2358 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 2359 struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id, 2360 struct cdp_tx_exception_metadata *tx_exc_metadata) 2361 { 2362 struct dp_pdev *pdev = vdev->pdev; 2363 struct dp_soc *soc = pdev->soc; 2364 struct dp_tx_desc_s *tx_desc; 2365 QDF_STATUS status; 2366 struct dp_tx_queue *tx_q = &(msdu_info->tx_queue); 2367 uint16_t htt_tcl_metadata = 0; 2368 enum cdp_tx_sw_drop drop_code = TX_MAX_DROP; 2369 uint8_t tid = msdu_info->tid; 2370 struct cdp_tid_tx_stats *tid_stats = NULL; 2371 qdf_dma_addr_t paddr; 2372 2373 /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */ 2374 tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id, 2375 msdu_info, tx_exc_metadata); 2376 if (!tx_desc) { 2377 dp_err_rl("Tx_desc prepare Fail vdev_id %d vdev %pK queue %d", 2378 vdev->vdev_id, vdev, tx_q->desc_pool_id); 2379 drop_code = TX_DESC_ERR; 2380 goto fail_return; 2381 } 2382 2383 dp_tx_update_tdls_flags(soc, vdev, tx_desc); 2384 2385 if (qdf_unlikely(peer_id == DP_INVALID_PEER)) { 2386 htt_tcl_metadata = vdev->htt_tcl_metadata; 2387 DP_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1); 2388 } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) { 2389 DP_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata, 2390 DP_TCL_METADATA_TYPE_PEER_BASED); 2391 DP_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata, 2392 peer_id); 2393 dp_tx_bypass_reinjection(soc, tx_desc); 2394 } else 2395 htt_tcl_metadata = vdev->htt_tcl_metadata; 2396 2397 if (msdu_info->exception_fw) 2398 DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1); 2399 2400 dp_tx_desc_update_fast_comp_flag(soc, tx_desc, 2401 !pdev->enhanced_stats_en); 2402 2403 dp_tx_update_mesh_flags(soc, vdev, tx_desc); 2404 2405 if (qdf_unlikely(msdu_info->frm_type == dp_tx_frm_rmnet)) 2406 paddr = dp_tx_rmnet_nbuf_map(msdu_info, tx_desc); 2407 else 2408 paddr = dp_tx_nbuf_map(vdev, tx_desc, nbuf); 2409 2410 if (!paddr) { 2411 /* Handle failure */ 2412 dp_err("qdf_nbuf_map failed"); 2413 DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1); 2414 drop_code = TX_DMA_MAP_ERR; 2415 goto release_desc; 2416 } 2417 2418 tx_desc->dma_addr = paddr; 2419 dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf, 2420 tx_desc->id, DP_TX_DESC_MAP); 2421 dp_tx_update_mcast_param(peer_id, &htt_tcl_metadata, vdev, msdu_info); 2422 /* Enqueue the Tx MSDU descriptor to HW for transmit */ 2423 status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc, 2424 htt_tcl_metadata, 2425 tx_exc_metadata, msdu_info); 2426 2427 if (status != QDF_STATUS_SUCCESS) { 2428 dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d", 2429 tx_desc, tx_q->ring_id); 2430 dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf, 2431 tx_desc->id, DP_TX_DESC_UNMAP); 2432 dp_tx_nbuf_unmap(soc, tx_desc); 2433 drop_code = TX_HW_ENQUEUE; 2434 goto release_desc; 2435 } 2436 2437 tx_sw_drop_stats_inc(pdev, nbuf, drop_code); 2438 return NULL; 2439 2440 release_desc: 2441 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 2442 2443 fail_return: 2444 dp_tx_get_tid(vdev, nbuf, msdu_info); 2445 tx_sw_drop_stats_inc(pdev, nbuf, drop_code); 2446 tid_stats = &pdev->stats.tid_stats. 2447 tid_tx_stats[tx_q->ring_id][tid]; 2448 tid_stats->swdrop_cnt[drop_code]++; 2449 return nbuf; 2450 } 2451 2452 /** 2453 * dp_tdls_tx_comp_free_buff() - Free non std buffer when TDLS flag is set 2454 * @soc: Soc handle 2455 * @desc: software Tx descriptor to be processed 2456 * 2457 * Return: 0 if Success 2458 */ 2459 #ifdef FEATURE_WLAN_TDLS 2460 static inline int 2461 dp_tdls_tx_comp_free_buff(struct dp_soc *soc, struct dp_tx_desc_s *desc) 2462 { 2463 /* If it is TDLS mgmt, don't unmap or free the frame */ 2464 if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) { 2465 dp_non_std_htt_tx_comp_free_buff(soc, desc); 2466 return 0; 2467 } 2468 return 1; 2469 } 2470 #else 2471 static inline int 2472 dp_tdls_tx_comp_free_buff(struct dp_soc *soc, struct dp_tx_desc_s *desc) 2473 { 2474 return 1; 2475 } 2476 #endif 2477 2478 /** 2479 * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor 2480 * @soc: Soc handle 2481 * @desc: software Tx descriptor to be processed 2482 * @delayed_free: defer freeing of nbuf 2483 * 2484 * Return: nbuf to be freed later 2485 */ 2486 qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc, 2487 bool delayed_free) 2488 { 2489 qdf_nbuf_t nbuf = desc->nbuf; 2490 enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags); 2491 2492 /* nbuf already freed in vdev detach path */ 2493 if (!nbuf) 2494 return NULL; 2495 2496 if (!dp_tdls_tx_comp_free_buff(soc, desc)) 2497 return NULL; 2498 2499 /* 0 : MSDU buffer, 1 : MLE */ 2500 if (desc->msdu_ext_desc) { 2501 /* TSO free */ 2502 if (hal_tx_ext_desc_get_tso_enable( 2503 desc->msdu_ext_desc->vaddr)) { 2504 dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, 2505 desc->id, DP_TX_COMP_MSDU_EXT); 2506 dp_tx_tso_seg_history_add(soc, 2507 desc->msdu_ext_desc->tso_desc, 2508 desc->nbuf, desc->id, type); 2509 /* unmap eash TSO seg before free the nbuf */ 2510 dp_tx_tso_unmap_segment(soc, 2511 desc->msdu_ext_desc->tso_desc, 2512 desc->msdu_ext_desc-> 2513 tso_num_desc); 2514 goto nbuf_free; 2515 } 2516 2517 if (qdf_unlikely(desc->frm_type == dp_tx_frm_sg)) { 2518 void *msdu_ext_desc = desc->msdu_ext_desc->vaddr; 2519 qdf_dma_addr_t iova; 2520 uint32_t frag_len; 2521 uint32_t i; 2522 2523 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, 2524 QDF_DMA_TO_DEVICE, 2525 qdf_nbuf_headlen(nbuf)); 2526 2527 for (i = 1; i < DP_TX_MAX_NUM_FRAGS; i++) { 2528 hal_tx_ext_desc_get_frag_info(msdu_ext_desc, i, 2529 &iova, 2530 &frag_len); 2531 if (!iova || !frag_len) 2532 break; 2533 2534 qdf_mem_unmap_page(soc->osdev, iova, frag_len, 2535 QDF_DMA_TO_DEVICE); 2536 } 2537 2538 goto nbuf_free; 2539 } 2540 } 2541 /* If it's ME frame, dont unmap the cloned nbuf's */ 2542 if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf)) 2543 goto nbuf_free; 2544 2545 dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type); 2546 dp_tx_unmap(soc, desc); 2547 2548 if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE) 2549 return dp_mesh_tx_comp_free_buff(soc, desc, delayed_free); 2550 2551 if (dp_tx_traffic_end_indication_enq_ind_pkt(soc, desc, nbuf)) 2552 return NULL; 2553 2554 nbuf_free: 2555 if (delayed_free) 2556 return nbuf; 2557 2558 qdf_nbuf_free(nbuf); 2559 2560 return NULL; 2561 } 2562 2563 /** 2564 * dp_tx_sg_unmap_buf() - Unmap scatter gather fragments 2565 * @soc: DP soc handle 2566 * @nbuf: skb 2567 * @msdu_info: MSDU info 2568 * 2569 * Return: None 2570 */ 2571 static inline void 2572 dp_tx_sg_unmap_buf(struct dp_soc *soc, qdf_nbuf_t nbuf, 2573 struct dp_tx_msdu_info_s *msdu_info) 2574 { 2575 uint32_t cur_idx; 2576 struct dp_tx_seg_info_s *seg = msdu_info->u.sg_info.curr_seg; 2577 2578 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE, 2579 qdf_nbuf_headlen(nbuf)); 2580 2581 for (cur_idx = 1; cur_idx < seg->frag_cnt; cur_idx++) 2582 qdf_mem_unmap_page(soc->osdev, (qdf_dma_addr_t) 2583 (seg->frags[cur_idx].paddr_lo | ((uint64_t) 2584 seg->frags[cur_idx].paddr_hi) << 32), 2585 seg->frags[cur_idx].len, 2586 QDF_DMA_TO_DEVICE); 2587 } 2588 2589 /** 2590 * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs 2591 * @vdev: DP vdev handle 2592 * @nbuf: skb 2593 * @msdu_info: MSDU info to be setup in MSDU extension descriptor 2594 * 2595 * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL 2596 * 2597 * Return: NULL on success, 2598 * nbuf when it fails to send 2599 */ 2600 #if QDF_LOCK_STATS 2601 noinline 2602 #else 2603 #endif 2604 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 2605 struct dp_tx_msdu_info_s *msdu_info) 2606 { 2607 uint32_t i; 2608 struct dp_pdev *pdev = vdev->pdev; 2609 struct dp_soc *soc = pdev->soc; 2610 struct dp_tx_desc_s *tx_desc; 2611 bool is_cce_classified = false; 2612 QDF_STATUS status; 2613 uint16_t htt_tcl_metadata = 0; 2614 struct dp_tx_queue *tx_q = &msdu_info->tx_queue; 2615 struct cdp_tid_tx_stats *tid_stats = NULL; 2616 uint8_t prep_desc_fail = 0, hw_enq_fail = 0; 2617 2618 if (msdu_info->frm_type == dp_tx_frm_me) 2619 nbuf = msdu_info->u.sg_info.curr_seg->nbuf; 2620 2621 i = 0; 2622 /* Print statement to track i and num_seg */ 2623 /* 2624 * For each segment (maps to 1 MSDU) , prepare software and hardware 2625 * descriptors using information in msdu_info 2626 */ 2627 while (i < msdu_info->num_seg) { 2628 /* 2629 * Setup Tx descriptor for an MSDU, and MSDU extension 2630 * descriptor 2631 */ 2632 tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info, 2633 tx_q->desc_pool_id); 2634 2635 if (!tx_desc) { 2636 if (msdu_info->frm_type == dp_tx_frm_me) { 2637 prep_desc_fail++; 2638 dp_tx_me_free_buf(pdev, 2639 (void *)(msdu_info->u.sg_info 2640 .curr_seg->frags[0].vaddr)); 2641 if (prep_desc_fail == msdu_info->num_seg) { 2642 /* 2643 * Unmap is needed only if descriptor 2644 * preparation failed for all segments. 2645 */ 2646 qdf_nbuf_unmap(soc->osdev, 2647 msdu_info->u.sg_info. 2648 curr_seg->nbuf, 2649 QDF_DMA_TO_DEVICE); 2650 } 2651 /* 2652 * Free the nbuf for the current segment 2653 * and make it point to the next in the list. 2654 * For me, there are as many segments as there 2655 * are no of clients. 2656 */ 2657 qdf_nbuf_free(msdu_info->u.sg_info 2658 .curr_seg->nbuf); 2659 if (msdu_info->u.sg_info.curr_seg->next) { 2660 msdu_info->u.sg_info.curr_seg = 2661 msdu_info->u.sg_info 2662 .curr_seg->next; 2663 nbuf = msdu_info->u.sg_info 2664 .curr_seg->nbuf; 2665 } 2666 i++; 2667 continue; 2668 } 2669 2670 if (msdu_info->frm_type == dp_tx_frm_tso) { 2671 dp_tx_tso_seg_history_add( 2672 soc, 2673 msdu_info->u.tso_info.curr_seg, 2674 nbuf, 0, DP_TX_DESC_UNMAP); 2675 dp_tx_tso_unmap_segment(soc, 2676 msdu_info->u.tso_info. 2677 curr_seg, 2678 msdu_info->u.tso_info. 2679 tso_num_seg_list); 2680 2681 if (msdu_info->u.tso_info.curr_seg->next) { 2682 msdu_info->u.tso_info.curr_seg = 2683 msdu_info->u.tso_info.curr_seg->next; 2684 i++; 2685 continue; 2686 } 2687 } 2688 2689 if (msdu_info->frm_type == dp_tx_frm_sg) 2690 dp_tx_sg_unmap_buf(soc, nbuf, msdu_info); 2691 2692 goto done; 2693 } 2694 2695 if (msdu_info->frm_type == dp_tx_frm_me) { 2696 tx_desc->msdu_ext_desc->me_buffer = 2697 (struct dp_tx_me_buf_t *)msdu_info-> 2698 u.sg_info.curr_seg->frags[0].vaddr; 2699 tx_desc->flags |= DP_TX_DESC_FLAG_ME; 2700 } 2701 2702 if (is_cce_classified) 2703 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 2704 2705 htt_tcl_metadata = vdev->htt_tcl_metadata; 2706 if (msdu_info->exception_fw) { 2707 DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1); 2708 } 2709 2710 dp_tx_is_hp_update_required(i, msdu_info); 2711 2712 /* 2713 * For frames with multiple segments (TSO, ME), jump to next 2714 * segment. 2715 */ 2716 if (msdu_info->frm_type == dp_tx_frm_tso) { 2717 if (msdu_info->u.tso_info.curr_seg->next) { 2718 msdu_info->u.tso_info.curr_seg = 2719 msdu_info->u.tso_info.curr_seg->next; 2720 2721 /* 2722 * If this is a jumbo nbuf, then increment the 2723 * number of nbuf users for each additional 2724 * segment of the msdu. This will ensure that 2725 * the skb is freed only after receiving tx 2726 * completion for all segments of an nbuf 2727 */ 2728 qdf_nbuf_inc_users(nbuf); 2729 2730 /* Check with MCL if this is needed */ 2731 /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; 2732 */ 2733 } 2734 } 2735 2736 dp_tx_update_mcast_param(DP_INVALID_PEER, 2737 &htt_tcl_metadata, 2738 vdev, 2739 msdu_info); 2740 /* 2741 * Enqueue the Tx MSDU descriptor to HW for transmit 2742 */ 2743 status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc, 2744 htt_tcl_metadata, 2745 NULL, msdu_info); 2746 2747 dp_tx_check_and_flush_hp(soc, status, msdu_info); 2748 2749 if (status != QDF_STATUS_SUCCESS) { 2750 dp_info_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d", 2751 tx_desc, tx_q->ring_id); 2752 2753 dp_tx_get_tid(vdev, nbuf, msdu_info); 2754 tid_stats = &pdev->stats.tid_stats. 2755 tid_tx_stats[tx_q->ring_id][msdu_info->tid]; 2756 tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++; 2757 2758 if (msdu_info->frm_type == dp_tx_frm_me) { 2759 hw_enq_fail++; 2760 if (hw_enq_fail == msdu_info->num_seg) { 2761 /* 2762 * Unmap is needed only if enqueue 2763 * failed for all segments. 2764 */ 2765 qdf_nbuf_unmap(soc->osdev, 2766 msdu_info->u.sg_info. 2767 curr_seg->nbuf, 2768 QDF_DMA_TO_DEVICE); 2769 } 2770 /* 2771 * Free the nbuf for the current segment 2772 * and make it point to the next in the list. 2773 * For me, there are as many segments as there 2774 * are no of clients. 2775 */ 2776 qdf_nbuf_free(msdu_info->u.sg_info 2777 .curr_seg->nbuf); 2778 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 2779 if (msdu_info->u.sg_info.curr_seg->next) { 2780 msdu_info->u.sg_info.curr_seg = 2781 msdu_info->u.sg_info 2782 .curr_seg->next; 2783 nbuf = msdu_info->u.sg_info 2784 .curr_seg->nbuf; 2785 } else 2786 break; 2787 i++; 2788 continue; 2789 } 2790 2791 /* 2792 * For TSO frames, the nbuf users increment done for 2793 * the current segment has to be reverted, since the 2794 * hw enqueue for this segment failed 2795 */ 2796 if (msdu_info->frm_type == dp_tx_frm_tso && 2797 msdu_info->u.tso_info.curr_seg) { 2798 /* 2799 * unmap and free current, 2800 * retransmit remaining segments 2801 */ 2802 dp_tx_comp_free_buf(soc, tx_desc, false); 2803 i++; 2804 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 2805 continue; 2806 } 2807 2808 if (msdu_info->frm_type == dp_tx_frm_sg) 2809 dp_tx_sg_unmap_buf(soc, nbuf, msdu_info); 2810 2811 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 2812 goto done; 2813 } 2814 2815 /* 2816 * TODO 2817 * if tso_info structure can be modified to have curr_seg 2818 * as first element, following 2 blocks of code (for TSO and SG) 2819 * can be combined into 1 2820 */ 2821 2822 /* 2823 * For Multicast-Unicast converted packets, 2824 * each converted frame (for a client) is represented as 2825 * 1 segment 2826 */ 2827 if ((msdu_info->frm_type == dp_tx_frm_sg) || 2828 (msdu_info->frm_type == dp_tx_frm_me)) { 2829 if (msdu_info->u.sg_info.curr_seg->next) { 2830 msdu_info->u.sg_info.curr_seg = 2831 msdu_info->u.sg_info.curr_seg->next; 2832 nbuf = msdu_info->u.sg_info.curr_seg->nbuf; 2833 } else 2834 break; 2835 } 2836 i++; 2837 } 2838 2839 nbuf = NULL; 2840 2841 done: 2842 return nbuf; 2843 } 2844 2845 /** 2846 * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info 2847 * for SG frames 2848 * @vdev: DP vdev handle 2849 * @nbuf: skb 2850 * @seg_info: Pointer to Segment info Descriptor to be prepared 2851 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. 2852 * 2853 * Return: NULL on success, 2854 * nbuf when it fails to send 2855 */ 2856 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 2857 struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info) 2858 { 2859 uint32_t cur_frag, nr_frags, i; 2860 qdf_dma_addr_t paddr; 2861 struct dp_tx_sg_info_s *sg_info; 2862 2863 sg_info = &msdu_info->u.sg_info; 2864 nr_frags = qdf_nbuf_get_nr_frags(nbuf); 2865 2866 if (QDF_STATUS_SUCCESS != 2867 qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf, 2868 QDF_DMA_TO_DEVICE, 2869 qdf_nbuf_headlen(nbuf))) { 2870 dp_tx_err("dma map error"); 2871 DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1); 2872 qdf_nbuf_free(nbuf); 2873 return NULL; 2874 } 2875 2876 paddr = qdf_nbuf_mapped_paddr_get(nbuf); 2877 seg_info->frags[0].paddr_lo = paddr; 2878 seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32; 2879 seg_info->frags[0].len = qdf_nbuf_headlen(nbuf); 2880 seg_info->frags[0].vaddr = (void *) nbuf; 2881 2882 for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) { 2883 if (QDF_STATUS_SUCCESS != qdf_nbuf_frag_map(vdev->osdev, 2884 nbuf, 0, 2885 QDF_DMA_TO_DEVICE, 2886 cur_frag)) { 2887 dp_tx_err("frag dma map error"); 2888 DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1); 2889 goto map_err; 2890 } 2891 2892 paddr = qdf_nbuf_get_tx_frag_paddr(nbuf); 2893 seg_info->frags[cur_frag + 1].paddr_lo = paddr; 2894 seg_info->frags[cur_frag + 1].paddr_hi = 2895 ((uint64_t) paddr) >> 32; 2896 seg_info->frags[cur_frag + 1].len = 2897 qdf_nbuf_get_frag_size(nbuf, cur_frag); 2898 } 2899 2900 seg_info->frag_cnt = (cur_frag + 1); 2901 seg_info->total_len = qdf_nbuf_len(nbuf); 2902 seg_info->next = NULL; 2903 2904 sg_info->curr_seg = seg_info; 2905 2906 msdu_info->frm_type = dp_tx_frm_sg; 2907 msdu_info->num_seg = 1; 2908 2909 return nbuf; 2910 map_err: 2911 /* restore paddr into nbuf before calling unmap */ 2912 qdf_nbuf_mapped_paddr_set(nbuf, 2913 (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo | 2914 ((uint64_t) 2915 seg_info->frags[0].paddr_hi) << 32)); 2916 qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf, 2917 QDF_DMA_TO_DEVICE, 2918 seg_info->frags[0].len); 2919 for (i = 1; i <= cur_frag; i++) { 2920 qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t) 2921 (seg_info->frags[i].paddr_lo | ((uint64_t) 2922 seg_info->frags[i].paddr_hi) << 32), 2923 seg_info->frags[i].len, 2924 QDF_DMA_TO_DEVICE); 2925 } 2926 qdf_nbuf_free(nbuf); 2927 return NULL; 2928 } 2929 2930 /** 2931 * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info 2932 * @vdev: DP vdev handle 2933 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. 2934 * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions 2935 * 2936 * Return: NULL on failure, 2937 * nbuf when extracted successfully 2938 */ 2939 static 2940 void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev, 2941 struct dp_tx_msdu_info_s *msdu_info, 2942 uint16_t ppdu_cookie) 2943 { 2944 struct htt_tx_msdu_desc_ext2_t *meta_data = 2945 (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0]; 2946 2947 qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t)); 2948 2949 HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET 2950 (msdu_info->meta_data[5], 1); 2951 HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET 2952 (msdu_info->meta_data[5], 1); 2953 HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET 2954 (msdu_info->meta_data[6], ppdu_cookie); 2955 2956 msdu_info->exception_fw = 1; 2957 msdu_info->is_tx_sniffer = 1; 2958 } 2959 2960 #ifdef MESH_MODE_SUPPORT 2961 2962 /** 2963 * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf 2964 and prepare msdu_info for mesh frames. 2965 * @vdev: DP vdev handle 2966 * @nbuf: skb 2967 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. 2968 * 2969 * Return: NULL on failure, 2970 * nbuf when extracted successfully 2971 */ 2972 static 2973 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 2974 struct dp_tx_msdu_info_s *msdu_info) 2975 { 2976 struct meta_hdr_s *mhdr; 2977 struct htt_tx_msdu_desc_ext2_t *meta_data = 2978 (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0]; 2979 2980 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf); 2981 2982 if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) { 2983 msdu_info->exception_fw = 0; 2984 goto remove_meta_hdr; 2985 } 2986 2987 msdu_info->exception_fw = 1; 2988 2989 qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t)); 2990 2991 meta_data->host_tx_desc_pool = 1; 2992 meta_data->update_peer_cache = 1; 2993 meta_data->learning_frame = 1; 2994 2995 if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) { 2996 meta_data->power = mhdr->power; 2997 2998 meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs; 2999 meta_data->nss_mask = 1 << mhdr->rate_info[0].nss; 3000 meta_data->pream_type = mhdr->rate_info[0].preamble_type; 3001 meta_data->retry_limit = mhdr->rate_info[0].max_tries; 3002 3003 meta_data->dyn_bw = 1; 3004 3005 meta_data->valid_pwr = 1; 3006 meta_data->valid_mcs_mask = 1; 3007 meta_data->valid_nss_mask = 1; 3008 meta_data->valid_preamble_type = 1; 3009 meta_data->valid_retries = 1; 3010 meta_data->valid_bw_info = 1; 3011 } 3012 3013 if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) { 3014 meta_data->encrypt_type = 0; 3015 meta_data->valid_encrypt_type = 1; 3016 meta_data->learning_frame = 0; 3017 } 3018 3019 meta_data->valid_key_flags = 1; 3020 meta_data->key_flags = (mhdr->keyix & 0x3); 3021 3022 remove_meta_hdr: 3023 if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) { 3024 dp_tx_err("qdf_nbuf_pull_head failed"); 3025 qdf_nbuf_free(nbuf); 3026 return NULL; 3027 } 3028 3029 msdu_info->tid = qdf_nbuf_get_priority(nbuf); 3030 3031 dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x" 3032 " tid %d to_fw %d", 3033 msdu_info->meta_data[0], 3034 msdu_info->meta_data[1], 3035 msdu_info->meta_data[2], 3036 msdu_info->meta_data[3], 3037 msdu_info->meta_data[4], 3038 msdu_info->meta_data[5], 3039 msdu_info->tid, msdu_info->exception_fw); 3040 3041 return nbuf; 3042 } 3043 #else 3044 static 3045 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 3046 struct dp_tx_msdu_info_s *msdu_info) 3047 { 3048 return nbuf; 3049 } 3050 3051 #endif 3052 3053 /** 3054 * dp_check_exc_metadata() - Checks if parameters are valid 3055 * @tx_exc - holds all exception path parameters 3056 * 3057 * Returns true when all the parameters are valid else false 3058 * 3059 */ 3060 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc) 3061 { 3062 bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid != 3063 HTT_INVALID_TID); 3064 bool invalid_encap_type = 3065 (tx_exc->tx_encap_type > htt_cmn_pkt_num_types && 3066 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE); 3067 bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types && 3068 tx_exc->sec_type != CDP_INVALID_SEC_TYPE); 3069 bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 && 3070 tx_exc->ppdu_cookie == 0); 3071 3072 if (tx_exc->is_intrabss_fwd) 3073 return true; 3074 3075 if (invalid_tid || invalid_encap_type || invalid_sec_type || 3076 invalid_cookie) { 3077 return false; 3078 } 3079 3080 return true; 3081 } 3082 3083 #ifdef ATH_SUPPORT_IQUE 3084 /** 3085 * dp_tx_mcast_enhance() - Multicast enhancement on TX 3086 * @vdev: vdev handle 3087 * @nbuf: skb 3088 * 3089 * Return: true on success, 3090 * false on failure 3091 */ 3092 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 3093 { 3094 qdf_ether_header_t *eh; 3095 3096 /* Mcast to Ucast Conversion*/ 3097 if (qdf_likely(!vdev->mcast_enhancement_en)) 3098 return true; 3099 3100 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 3101 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) && 3102 !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) { 3103 dp_verbose_debug("Mcast frm for ME %pK", vdev); 3104 qdf_nbuf_set_next(nbuf, NULL); 3105 3106 DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1, 3107 qdf_nbuf_len(nbuf)); 3108 if (dp_tx_prepare_send_me(vdev, nbuf) == 3109 QDF_STATUS_SUCCESS) { 3110 return false; 3111 } 3112 3113 if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) { 3114 if (dp_tx_prepare_send_igmp_me(vdev, nbuf) == 3115 QDF_STATUS_SUCCESS) { 3116 return false; 3117 } 3118 } 3119 } 3120 3121 return true; 3122 } 3123 #else 3124 static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 3125 { 3126 return true; 3127 } 3128 #endif 3129 3130 #ifdef QCA_SUPPORT_WDS_EXTENDED 3131 /** 3132 * dp_tx_mcast_drop() - Drop mcast frame if drop_tx_mcast is set in WDS_EXT 3133 * @vdev: vdev handle 3134 * @nbuf: skb 3135 * 3136 * Return: true if frame is dropped, false otherwise 3137 */ 3138 static inline bool dp_tx_mcast_drop(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 3139 { 3140 /* Drop tx mcast and WDS Extended feature check */ 3141 if (qdf_unlikely((vdev->drop_tx_mcast) && (vdev->wds_ext_enabled))) { 3142 qdf_ether_header_t *eh = (qdf_ether_header_t *) 3143 qdf_nbuf_data(nbuf); 3144 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) { 3145 DP_STATS_INC(vdev, tx_i.dropped.tx_mcast_drop, 1); 3146 return true; 3147 } 3148 } 3149 3150 return false; 3151 } 3152 #else 3153 static inline bool dp_tx_mcast_drop(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 3154 { 3155 return false; 3156 } 3157 #endif 3158 /** 3159 * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame 3160 * @nbuf: qdf_nbuf_t 3161 * @vdev: struct dp_vdev * 3162 * 3163 * Allow packet for processing only if it is for peer client which is 3164 * connected with same vap. Drop packet if client is connected to 3165 * different vap. 3166 * 3167 * Return: QDF_STATUS 3168 */ 3169 static inline QDF_STATUS 3170 dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev) 3171 { 3172 struct dp_ast_entry *dst_ast_entry = NULL; 3173 qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 3174 3175 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) || 3176 DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) 3177 return QDF_STATUS_SUCCESS; 3178 3179 qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock); 3180 dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc, 3181 eh->ether_dhost, 3182 vdev->vdev_id); 3183 3184 /* If there is no ast entry, return failure */ 3185 if (qdf_unlikely(!dst_ast_entry)) { 3186 qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock); 3187 return QDF_STATUS_E_FAILURE; 3188 } 3189 qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock); 3190 3191 return QDF_STATUS_SUCCESS; 3192 } 3193 3194 /** 3195 * dp_tx_nawds_handler() - NAWDS handler 3196 * 3197 * @soc: DP soc handle 3198 * @vdev_id: id of DP vdev handle 3199 * @msdu_info: msdu_info required to create HTT metadata 3200 * @nbuf: skb 3201 * 3202 * This API transfers the multicast frames with the peer id 3203 * on NAWDS enabled peer. 3204 3205 * Return: none 3206 */ 3207 3208 static inline 3209 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev, 3210 struct dp_tx_msdu_info_s *msdu_info, 3211 qdf_nbuf_t nbuf, uint16_t sa_peer_id) 3212 { 3213 struct dp_peer *peer = NULL; 3214 qdf_nbuf_t nbuf_clone = NULL; 3215 uint16_t peer_id = DP_INVALID_PEER; 3216 struct dp_txrx_peer *txrx_peer; 3217 3218 /* This check avoids pkt forwarding which is entered 3219 * in the ast table but still doesn't have valid peerid. 3220 */ 3221 if (sa_peer_id == HTT_INVALID_PEER) 3222 return; 3223 3224 qdf_spin_lock_bh(&vdev->peer_list_lock); 3225 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 3226 txrx_peer = dp_get_txrx_peer(peer); 3227 if (!txrx_peer) 3228 continue; 3229 3230 if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) { 3231 peer_id = peer->peer_id; 3232 3233 if (!dp_peer_is_primary_link_peer(peer)) 3234 continue; 3235 3236 /* Multicast packets needs to be 3237 * dropped in case of intra bss forwarding 3238 */ 3239 if (sa_peer_id == txrx_peer->peer_id) { 3240 dp_tx_debug("multicast packet"); 3241 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 3242 tx.nawds_mcast_drop, 3243 1); 3244 continue; 3245 } 3246 3247 nbuf_clone = qdf_nbuf_clone(nbuf); 3248 3249 if (!nbuf_clone) { 3250 QDF_TRACE(QDF_MODULE_ID_DP, 3251 QDF_TRACE_LEVEL_ERROR, 3252 FL("nbuf clone failed")); 3253 break; 3254 } 3255 3256 nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone, 3257 msdu_info, peer_id, 3258 NULL); 3259 3260 if (nbuf_clone) { 3261 dp_tx_debug("pkt send failed"); 3262 qdf_nbuf_free(nbuf_clone); 3263 } else { 3264 if (peer_id != DP_INVALID_PEER) 3265 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 3266 tx.nawds_mcast, 3267 1, qdf_nbuf_len(nbuf)); 3268 } 3269 } 3270 } 3271 3272 qdf_spin_unlock_bh(&vdev->peer_list_lock); 3273 } 3274 3275 /** 3276 * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path 3277 * @soc: DP soc handle 3278 * @vdev_id: id of DP vdev handle 3279 * @nbuf: skb 3280 * @tx_exc_metadata: Handle that holds exception path meta data 3281 * 3282 * Entry point for Core Tx layer (DP_TX) invoked from 3283 * hard_start_xmit in OSIF/HDD to transmit frames through fw 3284 * 3285 * Return: NULL on success, 3286 * nbuf when it fails to send 3287 */ 3288 qdf_nbuf_t 3289 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 3290 qdf_nbuf_t nbuf, 3291 struct cdp_tx_exception_metadata *tx_exc_metadata) 3292 { 3293 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3294 struct dp_tx_msdu_info_s msdu_info; 3295 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 3296 DP_MOD_ID_TX_EXCEPTION); 3297 3298 if (qdf_unlikely(!vdev)) 3299 goto fail; 3300 3301 qdf_mem_zero(&msdu_info, sizeof(msdu_info)); 3302 3303 if (!tx_exc_metadata) 3304 goto fail; 3305 3306 msdu_info.tid = tx_exc_metadata->tid; 3307 dp_verbose_debug("skb "QDF_MAC_ADDR_FMT, 3308 QDF_MAC_ADDR_REF(nbuf->data)); 3309 3310 DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf)); 3311 3312 if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) { 3313 dp_tx_err("Invalid parameters in exception path"); 3314 goto fail; 3315 } 3316 3317 /* for peer based metadata check if peer is valid */ 3318 if (tx_exc_metadata->peer_id != CDP_INVALID_PEER) { 3319 struct dp_peer *peer = NULL; 3320 3321 peer = dp_peer_get_ref_by_id(vdev->pdev->soc, 3322 tx_exc_metadata->peer_id, 3323 DP_MOD_ID_TX_EXCEPTION); 3324 if (qdf_unlikely(!peer)) { 3325 DP_STATS_INC(vdev, 3326 tx_i.dropped.invalid_peer_id_in_exc_path, 3327 1); 3328 goto fail; 3329 } 3330 dp_peer_unref_delete(peer, DP_MOD_ID_TX_EXCEPTION); 3331 } 3332 /* Basic sanity checks for unsupported packets */ 3333 3334 /* MESH mode */ 3335 if (qdf_unlikely(vdev->mesh_vdev)) { 3336 dp_tx_err("Mesh mode is not supported in exception path"); 3337 goto fail; 3338 } 3339 3340 /* 3341 * Classify the frame and call corresponding 3342 * "prepare" function which extracts the segment (TSO) 3343 * and fragmentation information (for TSO , SG, ME, or Raw) 3344 * into MSDU_INFO structure which is later used to fill 3345 * SW and HW descriptors. 3346 */ 3347 if (qdf_nbuf_is_tso(nbuf)) { 3348 dp_verbose_debug("TSO frame %pK", vdev); 3349 DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1, 3350 qdf_nbuf_len(nbuf)); 3351 3352 if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) { 3353 DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1, 3354 qdf_nbuf_len(nbuf)); 3355 goto fail; 3356 } 3357 3358 DP_STATS_INC(vdev, tx_i.rcvd.num, msdu_info.num_seg - 1); 3359 3360 goto send_multiple; 3361 } 3362 3363 /* SG */ 3364 if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) { 3365 struct dp_tx_seg_info_s seg_info = {0}; 3366 3367 nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info); 3368 if (!nbuf) 3369 goto fail; 3370 3371 dp_verbose_debug("non-TSO SG frame %pK", vdev); 3372 3373 DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1, 3374 qdf_nbuf_len(nbuf)); 3375 3376 goto send_multiple; 3377 } 3378 3379 if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) { 3380 DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1, 3381 qdf_nbuf_len(nbuf)); 3382 3383 dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info, 3384 tx_exc_metadata->ppdu_cookie); 3385 } 3386 3387 /* 3388 * Get HW Queue to use for this frame. 3389 * TCL supports upto 4 DMA rings, out of which 3 rings are 3390 * dedicated for data and 1 for command. 3391 * "queue_id" maps to one hardware ring. 3392 * With each ring, we also associate a unique Tx descriptor pool 3393 * to minimize lock contention for these resources. 3394 */ 3395 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 3396 3397 if (qdf_likely(tx_exc_metadata->is_intrabss_fwd)) { 3398 if (qdf_unlikely(vdev->nawds_enabled)) { 3399 /* 3400 * This is a multicast packet 3401 */ 3402 dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf, 3403 tx_exc_metadata->peer_id); 3404 DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast, 3405 1, qdf_nbuf_len(nbuf)); 3406 } 3407 3408 nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, 3409 DP_INVALID_PEER, NULL); 3410 } else { 3411 /* 3412 * Check exception descriptors 3413 */ 3414 if (dp_tx_exception_limit_check(vdev)) 3415 goto fail; 3416 3417 /* Single linear frame */ 3418 /* 3419 * If nbuf is a simple linear frame, use send_single function to 3420 * prepare direct-buffer type TCL descriptor and enqueue to TCL 3421 * SRNG. There is no need to setup a MSDU extension descriptor. 3422 */ 3423 nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, 3424 tx_exc_metadata->peer_id, 3425 tx_exc_metadata); 3426 } 3427 3428 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION); 3429 return nbuf; 3430 3431 send_multiple: 3432 nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info); 3433 3434 fail: 3435 if (vdev) 3436 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION); 3437 dp_verbose_debug("pkt send failed"); 3438 return nbuf; 3439 } 3440 3441 /** 3442 * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP 3443 * in exception path in special case to avoid regular exception path chk. 3444 * @soc: DP soc handle 3445 * @vdev_id: id of DP vdev handle 3446 * @nbuf: skb 3447 * @tx_exc_metadata: Handle that holds exception path meta data 3448 * 3449 * Entry point for Core Tx layer (DP_TX) invoked from 3450 * hard_start_xmit in OSIF/HDD to transmit frames through fw 3451 * 3452 * Return: NULL on success, 3453 * nbuf when it fails to send 3454 */ 3455 qdf_nbuf_t 3456 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl, 3457 uint8_t vdev_id, qdf_nbuf_t nbuf, 3458 struct cdp_tx_exception_metadata *tx_exc_metadata) 3459 { 3460 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3461 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 3462 DP_MOD_ID_TX_EXCEPTION); 3463 3464 if (qdf_unlikely(!vdev)) 3465 goto fail; 3466 3467 if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev) 3468 == QDF_STATUS_E_FAILURE)) { 3469 DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1); 3470 goto fail; 3471 } 3472 3473 /* Unref count as it will again be taken inside dp_tx_exception */ 3474 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION); 3475 3476 return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata); 3477 3478 fail: 3479 if (vdev) 3480 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION); 3481 dp_verbose_debug("pkt send failed"); 3482 return nbuf; 3483 } 3484 3485 /** 3486 * dp_tx_send_mesh() - Transmit mesh frame on a given VAP 3487 * @soc: DP soc handle 3488 * @vdev_id: DP vdev handle 3489 * @nbuf: skb 3490 * 3491 * Entry point for Core Tx layer (DP_TX) invoked from 3492 * hard_start_xmit in OSIF/HDD 3493 * 3494 * Return: NULL on success, 3495 * nbuf when it fails to send 3496 */ 3497 #ifdef MESH_MODE_SUPPORT 3498 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 3499 qdf_nbuf_t nbuf) 3500 { 3501 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3502 struct meta_hdr_s *mhdr; 3503 qdf_nbuf_t nbuf_mesh = NULL; 3504 qdf_nbuf_t nbuf_clone = NULL; 3505 struct dp_vdev *vdev; 3506 uint8_t no_enc_frame = 0; 3507 3508 nbuf_mesh = qdf_nbuf_unshare(nbuf); 3509 if (!nbuf_mesh) { 3510 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3511 "qdf_nbuf_unshare failed"); 3512 return nbuf; 3513 } 3514 3515 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH); 3516 if (!vdev) { 3517 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3518 "vdev is NULL for vdev_id %d", vdev_id); 3519 return nbuf; 3520 } 3521 3522 nbuf = nbuf_mesh; 3523 3524 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf); 3525 3526 if ((vdev->sec_type != cdp_sec_type_none) && 3527 (mhdr->flags & METAHDR_FLAG_NOENCRYPT)) 3528 no_enc_frame = 1; 3529 3530 if (mhdr->flags & METAHDR_FLAG_NOQOS) 3531 qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST); 3532 3533 if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) && 3534 !no_enc_frame) { 3535 nbuf_clone = qdf_nbuf_clone(nbuf); 3536 if (!nbuf_clone) { 3537 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3538 "qdf_nbuf_clone failed"); 3539 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH); 3540 return nbuf; 3541 } 3542 qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO); 3543 } 3544 3545 if (nbuf_clone) { 3546 if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) { 3547 DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1); 3548 } else { 3549 qdf_nbuf_free(nbuf_clone); 3550 } 3551 } 3552 3553 if (no_enc_frame) 3554 qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO); 3555 else 3556 qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID); 3557 3558 nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf); 3559 if ((!nbuf) && no_enc_frame) { 3560 DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1); 3561 } 3562 3563 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH); 3564 return nbuf; 3565 } 3566 3567 #else 3568 3569 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id, 3570 qdf_nbuf_t nbuf) 3571 { 3572 return dp_tx_send(soc, vdev_id, nbuf); 3573 } 3574 3575 #endif 3576 3577 #ifdef QCA_DP_TX_NBUF_AND_NBUF_DATA_PREFETCH 3578 static inline 3579 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf) 3580 { 3581 if (nbuf) { 3582 qdf_prefetch(&nbuf->len); 3583 qdf_prefetch(&nbuf->data); 3584 } 3585 } 3586 #else 3587 static inline 3588 void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf) 3589 { 3590 } 3591 #endif 3592 3593 #ifdef DP_UMAC_HW_RESET_SUPPORT 3594 /* 3595 * dp_tx_drop() - Drop the frame on a given VAP 3596 * @soc: DP soc handle 3597 * @vdev_id: id of DP vdev handle 3598 * @nbuf: skb 3599 * 3600 * Drop all the incoming packets 3601 * 3602 * Return: nbuf 3603 * 3604 */ 3605 qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 3606 qdf_nbuf_t nbuf) 3607 { 3608 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3609 struct dp_vdev *vdev = NULL; 3610 3611 vdev = soc->vdev_id_map[vdev_id]; 3612 if (qdf_unlikely(!vdev)) 3613 return nbuf; 3614 3615 DP_STATS_INC(vdev, tx_i.dropped.drop_ingress, 1); 3616 return nbuf; 3617 } 3618 3619 /* 3620 * dp_tx_exc_drop() - Drop the frame on a given VAP 3621 * @soc: DP soc handle 3622 * @vdev_id: id of DP vdev handle 3623 * @nbuf: skb 3624 * @tx_exc_metadata: Handle that holds exception path meta data 3625 * 3626 * Drop all the incoming packets 3627 * 3628 * Return: nbuf 3629 * 3630 */ 3631 qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 3632 qdf_nbuf_t nbuf, 3633 struct cdp_tx_exception_metadata *tx_exc_metadata) 3634 { 3635 return dp_tx_drop(soc_hdl, vdev_id, nbuf); 3636 } 3637 #endif 3638 3639 #ifdef FEATURE_DIRECT_LINK 3640 /* 3641 * dp_vdev_tx_mark_to_fw() - Mark to_fw bit for the tx packet 3642 * @nbuf: skb 3643 * @vdev: DP vdev handle 3644 * 3645 * Return: None 3646 */ 3647 static inline void dp_vdev_tx_mark_to_fw(qdf_nbuf_t nbuf, struct dp_vdev *vdev) 3648 { 3649 if (qdf_unlikely(vdev->to_fw)) 3650 QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf) = 1; 3651 } 3652 #else 3653 static inline void dp_vdev_tx_mark_to_fw(qdf_nbuf_t nbuf, struct dp_vdev *vdev) 3654 { 3655 } 3656 #endif 3657 3658 /* 3659 * dp_tx_send() - Transmit a frame on a given VAP 3660 * @soc: DP soc handle 3661 * @vdev_id: id of DP vdev handle 3662 * @nbuf: skb 3663 * 3664 * Entry point for Core Tx layer (DP_TX) invoked from 3665 * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding 3666 * cases 3667 * 3668 * Return: NULL on success, 3669 * nbuf when it fails to send 3670 */ 3671 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 3672 qdf_nbuf_t nbuf) 3673 { 3674 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3675 uint16_t peer_id = HTT_INVALID_PEER; 3676 /* 3677 * doing a memzero is causing additional function call overhead 3678 * so doing static stack clearing 3679 */ 3680 struct dp_tx_msdu_info_s msdu_info = {0}; 3681 struct dp_vdev *vdev = NULL; 3682 qdf_nbuf_t end_nbuf = NULL; 3683 3684 if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT)) 3685 return nbuf; 3686 3687 /* 3688 * dp_vdev_get_ref_by_id does does a atomic operation avoid using 3689 * this in per packet path. 3690 * 3691 * As in this path vdev memory is already protected with netdev 3692 * tx lock 3693 */ 3694 vdev = soc->vdev_id_map[vdev_id]; 3695 if (qdf_unlikely(!vdev)) 3696 return nbuf; 3697 3698 dp_vdev_tx_mark_to_fw(nbuf, vdev); 3699 3700 /* 3701 * Set Default Host TID value to invalid TID 3702 * (TID override disabled) 3703 */ 3704 msdu_info.tid = HTT_TX_EXT_TID_INVALID; 3705 DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf)); 3706 3707 if (qdf_unlikely(vdev->mesh_vdev)) { 3708 qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf, 3709 &msdu_info); 3710 if (!nbuf_mesh) { 3711 dp_verbose_debug("Extracting mesh metadata failed"); 3712 return nbuf; 3713 } 3714 nbuf = nbuf_mesh; 3715 } 3716 3717 /* 3718 * Get HW Queue to use for this frame. 3719 * TCL supports upto 4 DMA rings, out of which 3 rings are 3720 * dedicated for data and 1 for command. 3721 * "queue_id" maps to one hardware ring. 3722 * With each ring, we also associate a unique Tx descriptor pool 3723 * to minimize lock contention for these resources. 3724 */ 3725 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 3726 DP_STATS_INC(vdev, tx_i.rcvd_per_core[msdu_info.tx_queue.desc_pool_id], 3727 1); 3728 3729 /* 3730 * TCL H/W supports 2 DSCP-TID mapping tables. 3731 * Table 1 - Default DSCP-TID mapping table 3732 * Table 2 - 1 DSCP-TID override table 3733 * 3734 * If we need a different DSCP-TID mapping for this vap, 3735 * call tid_classify to extract DSCP/ToS from frame and 3736 * map to a TID and store in msdu_info. This is later used 3737 * to fill in TCL Input descriptor (per-packet TID override). 3738 */ 3739 dp_tx_classify_tid(vdev, nbuf, &msdu_info); 3740 3741 /* 3742 * Classify the frame and call corresponding 3743 * "prepare" function which extracts the segment (TSO) 3744 * and fragmentation information (for TSO , SG, ME, or Raw) 3745 * into MSDU_INFO structure which is later used to fill 3746 * SW and HW descriptors. 3747 */ 3748 if (qdf_nbuf_is_tso(nbuf)) { 3749 dp_verbose_debug("TSO frame %pK", vdev); 3750 DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1, 3751 qdf_nbuf_len(nbuf)); 3752 3753 if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) { 3754 DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1, 3755 qdf_nbuf_len(nbuf)); 3756 return nbuf; 3757 } 3758 3759 DP_STATS_INC(vdev, tx_i.rcvd.num, msdu_info.num_seg - 1); 3760 3761 goto send_multiple; 3762 } 3763 3764 /* SG */ 3765 if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) { 3766 if (qdf_nbuf_get_nr_frags(nbuf) > DP_TX_MAX_NUM_FRAGS - 1) { 3767 if (qdf_unlikely(qdf_nbuf_linearize(nbuf))) 3768 return nbuf; 3769 } else { 3770 struct dp_tx_seg_info_s seg_info = {0}; 3771 3772 if (qdf_unlikely(is_nbuf_frm_rmnet(nbuf, &msdu_info))) 3773 goto send_single; 3774 3775 nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, 3776 &msdu_info); 3777 if (!nbuf) 3778 return NULL; 3779 3780 dp_verbose_debug("non-TSO SG frame %pK", vdev); 3781 3782 DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1, 3783 qdf_nbuf_len(nbuf)); 3784 3785 goto send_multiple; 3786 } 3787 } 3788 3789 if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf))) 3790 return NULL; 3791 3792 if (qdf_unlikely(dp_tx_mcast_drop(vdev, nbuf))) 3793 return nbuf; 3794 3795 /* RAW */ 3796 if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) { 3797 struct dp_tx_seg_info_s seg_info = {0}; 3798 3799 nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info); 3800 if (!nbuf) 3801 return NULL; 3802 3803 dp_verbose_debug("Raw frame %pK", vdev); 3804 3805 goto send_multiple; 3806 3807 } 3808 3809 if (qdf_unlikely(vdev->nawds_enabled)) { 3810 qdf_ether_header_t *eh = (qdf_ether_header_t *) 3811 qdf_nbuf_data(nbuf); 3812 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) { 3813 uint16_t sa_peer_id = DP_INVALID_PEER; 3814 3815 if (!soc->ast_offload_support) { 3816 struct dp_ast_entry *ast_entry = NULL; 3817 3818 qdf_spin_lock_bh(&soc->ast_lock); 3819 ast_entry = dp_peer_ast_hash_find_by_pdevid 3820 (soc, 3821 (uint8_t *)(eh->ether_shost), 3822 vdev->pdev->pdev_id); 3823 if (ast_entry) 3824 sa_peer_id = ast_entry->peer_id; 3825 qdf_spin_unlock_bh(&soc->ast_lock); 3826 } 3827 3828 dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf, 3829 sa_peer_id); 3830 } 3831 peer_id = DP_INVALID_PEER; 3832 DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast, 3833 1, qdf_nbuf_len(nbuf)); 3834 } 3835 3836 send_single: 3837 /* Single linear frame */ 3838 /* 3839 * If nbuf is a simple linear frame, use send_single function to 3840 * prepare direct-buffer type TCL descriptor and enqueue to TCL 3841 * SRNG. There is no need to setup a MSDU extension descriptor. 3842 */ 3843 dp_tx_prefetch_nbuf_data(nbuf); 3844 3845 nbuf = dp_tx_send_msdu_single_wrapper(vdev, nbuf, &msdu_info, 3846 peer_id, end_nbuf); 3847 return nbuf; 3848 3849 send_multiple: 3850 nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info); 3851 3852 if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw)) 3853 dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf); 3854 3855 return nbuf; 3856 } 3857 3858 /** 3859 * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special 3860 * case to vaoid check in perpkt path. 3861 * @soc: DP soc handle 3862 * @vdev_id: id of DP vdev handle 3863 * @nbuf: skb 3864 * 3865 * Entry point for Core Tx layer (DP_TX) invoked from 3866 * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send 3867 * with special condition to avoid per pkt check in dp_tx_send 3868 * 3869 * Return: NULL on success, 3870 * nbuf when it fails to send 3871 */ 3872 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl, 3873 uint8_t vdev_id, qdf_nbuf_t nbuf) 3874 { 3875 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 3876 struct dp_vdev *vdev = NULL; 3877 3878 if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT)) 3879 return nbuf; 3880 3881 /* 3882 * dp_vdev_get_ref_by_id does does a atomic operation avoid using 3883 * this in per packet path. 3884 * 3885 * As in this path vdev memory is already protected with netdev 3886 * tx lock 3887 */ 3888 vdev = soc->vdev_id_map[vdev_id]; 3889 if (qdf_unlikely(!vdev)) 3890 return nbuf; 3891 3892 if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev) 3893 == QDF_STATUS_E_FAILURE)) { 3894 DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1); 3895 return nbuf; 3896 } 3897 3898 return dp_tx_send(soc_hdl, vdev_id, nbuf); 3899 } 3900 3901 #ifdef UMAC_SUPPORT_PROXY_ARP 3902 /** 3903 * dp_tx_proxy_arp() - Tx proxy arp handler 3904 * @vdev: datapath vdev handle 3905 * @buf: sk buffer 3906 * 3907 * Return: status 3908 */ 3909 static inline 3910 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 3911 { 3912 if (vdev->osif_proxy_arp) 3913 return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf); 3914 3915 /* 3916 * when UMAC_SUPPORT_PROXY_ARP is defined, we expect 3917 * osif_proxy_arp has a valid function pointer assigned 3918 * to it 3919 */ 3920 dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n"); 3921 3922 return QDF_STATUS_NOT_INITIALIZED; 3923 } 3924 #else 3925 /** 3926 * dp_tx_proxy_arp() - Tx proxy arp handler 3927 * @vdev: datapath vdev handle 3928 * @buf: sk buffer 3929 * 3930 * This function always return 0 when UMAC_SUPPORT_PROXY_ARP 3931 * is not defined. 3932 * 3933 * Return: status 3934 */ 3935 static inline 3936 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 3937 { 3938 return QDF_STATUS_SUCCESS; 3939 } 3940 #endif 3941 3942 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 3943 #ifdef WLAN_MCAST_MLO 3944 static bool 3945 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev, 3946 struct dp_tx_desc_s *tx_desc, 3947 qdf_nbuf_t nbuf, 3948 uint8_t reinject_reason) 3949 { 3950 if (reinject_reason == HTT_TX_FW2WBM_REINJECT_REASON_MLO_MCAST) { 3951 if (soc->arch_ops.dp_tx_mcast_handler) 3952 soc->arch_ops.dp_tx_mcast_handler(soc, vdev, nbuf); 3953 3954 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 3955 return true; 3956 } 3957 3958 return false; 3959 } 3960 #else /* WLAN_MCAST_MLO */ 3961 static inline bool 3962 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev, 3963 struct dp_tx_desc_s *tx_desc, 3964 qdf_nbuf_t nbuf, 3965 uint8_t reinject_reason) 3966 { 3967 return false; 3968 } 3969 #endif /* WLAN_MCAST_MLO */ 3970 #else 3971 static inline bool 3972 dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev, 3973 struct dp_tx_desc_s *tx_desc, 3974 qdf_nbuf_t nbuf, 3975 uint8_t reinject_reason) 3976 { 3977 return false; 3978 } 3979 #endif 3980 3981 /** 3982 * dp_tx_reinject_handler() - Tx Reinject Handler 3983 * @soc: datapath soc handle 3984 * @vdev: datapath vdev handle 3985 * @tx_desc: software descriptor head pointer 3986 * @status : Tx completion status from HTT descriptor 3987 * @reinject_reason : reinject reason from HTT descriptor 3988 * 3989 * This function reinjects frames back to Target. 3990 * Todo - Host queue needs to be added 3991 * 3992 * Return: none 3993 */ 3994 void dp_tx_reinject_handler(struct dp_soc *soc, 3995 struct dp_vdev *vdev, 3996 struct dp_tx_desc_s *tx_desc, 3997 uint8_t *status, 3998 uint8_t reinject_reason) 3999 { 4000 struct dp_peer *peer = NULL; 4001 uint32_t peer_id = HTT_INVALID_PEER; 4002 qdf_nbuf_t nbuf = tx_desc->nbuf; 4003 qdf_nbuf_t nbuf_copy = NULL; 4004 struct dp_tx_msdu_info_s msdu_info; 4005 #ifdef WDS_VENDOR_EXTENSION 4006 int is_mcast = 0, is_ucast = 0; 4007 int num_peers_3addr = 0; 4008 qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf)); 4009 struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf)); 4010 #endif 4011 struct dp_txrx_peer *txrx_peer; 4012 4013 qdf_assert(vdev); 4014 4015 dp_tx_debug("Tx reinject path"); 4016 4017 DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1, 4018 qdf_nbuf_len(tx_desc->nbuf)); 4019 4020 if (dp_tx_reinject_mlo_hdl(soc, vdev, tx_desc, nbuf, reinject_reason)) 4021 return; 4022 4023 #ifdef WDS_VENDOR_EXTENSION 4024 if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 4025 is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0; 4026 } else { 4027 is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0; 4028 } 4029 is_ucast = !is_mcast; 4030 4031 qdf_spin_lock_bh(&vdev->peer_list_lock); 4032 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 4033 txrx_peer = dp_get_txrx_peer(peer); 4034 4035 if (!txrx_peer || txrx_peer->bss_peer) 4036 continue; 4037 4038 /* Detect wds peers that use 3-addr framing for mcast. 4039 * if there are any, the bss_peer is used to send the 4040 * the mcast frame using 3-addr format. all wds enabled 4041 * peers that use 4-addr framing for mcast frames will 4042 * be duplicated and sent as 4-addr frames below. 4043 */ 4044 if (!txrx_peer->wds_enabled || 4045 !txrx_peer->wds_ecm.wds_tx_mcast_4addr) { 4046 num_peers_3addr = 1; 4047 break; 4048 } 4049 } 4050 qdf_spin_unlock_bh(&vdev->peer_list_lock); 4051 #endif 4052 4053 if (qdf_unlikely(vdev->mesh_vdev)) { 4054 DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf); 4055 } else { 4056 qdf_spin_lock_bh(&vdev->peer_list_lock); 4057 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 4058 txrx_peer = dp_get_txrx_peer(peer); 4059 if (!txrx_peer) 4060 continue; 4061 4062 if ((txrx_peer->peer_id != HTT_INVALID_PEER) && 4063 #ifdef WDS_VENDOR_EXTENSION 4064 /* 4065 * . if 3-addr STA, then send on BSS Peer 4066 * . if Peer WDS enabled and accept 4-addr mcast, 4067 * send mcast on that peer only 4068 * . if Peer WDS enabled and accept 4-addr ucast, 4069 * send ucast on that peer only 4070 */ 4071 ((txrx_peer->bss_peer && num_peers_3addr && is_mcast) || 4072 (txrx_peer->wds_enabled && 4073 ((is_mcast && txrx_peer->wds_ecm.wds_tx_mcast_4addr) || 4074 (is_ucast && 4075 txrx_peer->wds_ecm.wds_tx_ucast_4addr))))) { 4076 #else 4077 (txrx_peer->bss_peer && 4078 (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) { 4079 #endif 4080 peer_id = DP_INVALID_PEER; 4081 4082 nbuf_copy = qdf_nbuf_copy(nbuf); 4083 4084 if (!nbuf_copy) { 4085 dp_tx_debug("nbuf copy failed"); 4086 break; 4087 } 4088 qdf_mem_zero(&msdu_info, sizeof(msdu_info)); 4089 dp_tx_get_queue(vdev, nbuf, 4090 &msdu_info.tx_queue); 4091 4092 nbuf_copy = dp_tx_send_msdu_single(vdev, 4093 nbuf_copy, 4094 &msdu_info, 4095 peer_id, 4096 NULL); 4097 4098 if (nbuf_copy) { 4099 dp_tx_debug("pkt send failed"); 4100 qdf_nbuf_free(nbuf_copy); 4101 } 4102 } 4103 } 4104 qdf_spin_unlock_bh(&vdev->peer_list_lock); 4105 4106 qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf, 4107 QDF_DMA_TO_DEVICE, nbuf->len); 4108 qdf_nbuf_free(nbuf); 4109 } 4110 4111 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 4112 } 4113 4114 /** 4115 * dp_tx_inspect_handler() - Tx Inspect Handler 4116 * @soc: datapath soc handle 4117 * @vdev: datapath vdev handle 4118 * @tx_desc: software descriptor head pointer 4119 * @status : Tx completion status from HTT descriptor 4120 * 4121 * Handles Tx frames sent back to Host for inspection 4122 * (ProxyARP) 4123 * 4124 * Return: none 4125 */ 4126 void dp_tx_inspect_handler(struct dp_soc *soc, 4127 struct dp_vdev *vdev, 4128 struct dp_tx_desc_s *tx_desc, 4129 uint8_t *status) 4130 { 4131 4132 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 4133 "%s Tx inspect path", 4134 __func__); 4135 4136 DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1, 4137 qdf_nbuf_len(tx_desc->nbuf)); 4138 4139 DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf); 4140 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 4141 } 4142 4143 #ifdef MESH_MODE_SUPPORT 4144 /** 4145 * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats 4146 * in mesh meta header 4147 * @tx_desc: software descriptor head pointer 4148 * @ts: pointer to tx completion stats 4149 * Return: none 4150 */ 4151 static 4152 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc, 4153 struct hal_tx_completion_status *ts) 4154 { 4155 qdf_nbuf_t netbuf = tx_desc->nbuf; 4156 4157 if (!tx_desc->msdu_ext_desc) { 4158 if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) { 4159 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 4160 "netbuf %pK offset %d", 4161 netbuf, tx_desc->pkt_offset); 4162 return; 4163 } 4164 } 4165 } 4166 4167 #else 4168 static 4169 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc, 4170 struct hal_tx_completion_status *ts) 4171 { 4172 } 4173 4174 #endif 4175 4176 #ifdef CONFIG_SAWF 4177 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc, 4178 struct dp_vdev *vdev, 4179 struct dp_txrx_peer *txrx_peer, 4180 struct dp_tx_desc_s *tx_desc, 4181 struct hal_tx_completion_status *ts, 4182 uint8_t tid) 4183 { 4184 dp_sawf_tx_compl_update_peer_stats(soc, vdev, txrx_peer, tx_desc, 4185 ts, tid); 4186 } 4187 4188 static void dp_tx_compute_delay_avg(struct cdp_delay_tx_stats *tx_delay, 4189 uint32_t nw_delay, 4190 uint32_t sw_delay, 4191 uint32_t hw_delay) 4192 { 4193 dp_peer_tid_delay_avg(tx_delay, 4194 nw_delay, 4195 sw_delay, 4196 hw_delay); 4197 } 4198 #else 4199 static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc, 4200 struct dp_vdev *vdev, 4201 struct dp_txrx_peer *txrx_peer, 4202 struct dp_tx_desc_s *tx_desc, 4203 struct hal_tx_completion_status *ts, 4204 uint8_t tid) 4205 { 4206 } 4207 4208 static inline void 4209 dp_tx_compute_delay_avg(struct cdp_delay_tx_stats *tx_delay, 4210 uint32_t nw_delay, uint32_t sw_delay, 4211 uint32_t hw_delay) 4212 { 4213 } 4214 #endif 4215 4216 #ifdef QCA_PEER_EXT_STATS 4217 #ifdef WLAN_CONFIG_TX_DELAY 4218 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats, 4219 struct dp_tx_desc_s *tx_desc, 4220 struct hal_tx_completion_status *ts, 4221 struct dp_vdev *vdev) 4222 { 4223 struct dp_soc *soc = vdev->pdev->soc; 4224 struct cdp_delay_tx_stats *tx_delay = &stats->tx_delay; 4225 int64_t timestamp_ingress, timestamp_hw_enqueue; 4226 uint32_t sw_enqueue_delay, fwhw_transmit_delay = 0; 4227 4228 if (!ts->valid) 4229 return; 4230 4231 timestamp_ingress = qdf_nbuf_get_timestamp_us(tx_desc->nbuf); 4232 timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp); 4233 4234 sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress); 4235 dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay); 4236 4237 if (soc->arch_ops.dp_tx_compute_hw_delay) 4238 if (!soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts, 4239 &fwhw_transmit_delay)) 4240 dp_hist_update_stats(&tx_delay->hwtx_delay, 4241 fwhw_transmit_delay); 4242 4243 dp_tx_compute_delay_avg(tx_delay, 0, sw_enqueue_delay, 4244 fwhw_transmit_delay); 4245 } 4246 #else 4247 /* 4248 * dp_tx_compute_tid_delay() - Compute per TID delay 4249 * @stats: Per TID delay stats 4250 * @tx_desc: Software Tx descriptor 4251 * @ts: Tx completion status 4252 * @vdev: vdev 4253 * 4254 * Compute the software enqueue and hw enqueue delays and 4255 * update the respective histograms 4256 * 4257 * Return: void 4258 */ 4259 static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats, 4260 struct dp_tx_desc_s *tx_desc, 4261 struct hal_tx_completion_status *ts, 4262 struct dp_vdev *vdev) 4263 { 4264 struct cdp_delay_tx_stats *tx_delay = &stats->tx_delay; 4265 int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue; 4266 uint32_t sw_enqueue_delay, fwhw_transmit_delay; 4267 4268 current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get()); 4269 timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf); 4270 timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp); 4271 sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress); 4272 fwhw_transmit_delay = (uint32_t)(current_timestamp - 4273 timestamp_hw_enqueue); 4274 4275 /* 4276 * Update the Tx software enqueue delay and HW enque-Completion delay. 4277 */ 4278 dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay); 4279 dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay); 4280 } 4281 #endif 4282 4283 /* 4284 * dp_tx_update_peer_delay_stats() - Update the peer delay stats 4285 * @txrx_peer: DP peer context 4286 * @tx_desc: Tx software descriptor 4287 * @tid: Transmission ID 4288 * @ring_id: Rx CPU context ID/CPU_ID 4289 * 4290 * Update the peer extended stats. These are enhanced other 4291 * delay stats per msdu level. 4292 * 4293 * Return: void 4294 */ 4295 static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer, 4296 struct dp_tx_desc_s *tx_desc, 4297 struct hal_tx_completion_status *ts, 4298 uint8_t ring_id) 4299 { 4300 struct dp_pdev *pdev = txrx_peer->vdev->pdev; 4301 struct dp_soc *soc = NULL; 4302 struct dp_peer_delay_stats *delay_stats = NULL; 4303 uint8_t tid; 4304 4305 soc = pdev->soc; 4306 if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))) 4307 return; 4308 4309 if (!txrx_peer->delay_stats) 4310 return; 4311 4312 tid = ts->tid; 4313 delay_stats = txrx_peer->delay_stats; 4314 4315 qdf_assert(ring < CDP_MAX_TXRX_CTX); 4316 4317 /* 4318 * For non-TID packets use the TID 9 4319 */ 4320 if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) 4321 tid = CDP_MAX_DATA_TIDS - 1; 4322 4323 dp_tx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id], 4324 tx_desc, ts, txrx_peer->vdev); 4325 } 4326 #else 4327 static inline 4328 void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer, 4329 struct dp_tx_desc_s *tx_desc, 4330 struct hal_tx_completion_status *ts, 4331 uint8_t ring_id) 4332 { 4333 } 4334 #endif 4335 4336 #ifdef WLAN_PEER_JITTER 4337 /* 4338 * dp_tx_jitter_get_avg_jitter() - compute the average jitter 4339 * @curr_delay: Current delay 4340 * @prev_Delay: Previous delay 4341 * @avg_jitter: Average Jitter 4342 * Return: Newly Computed Average Jitter 4343 */ 4344 static uint32_t dp_tx_jitter_get_avg_jitter(uint32_t curr_delay, 4345 uint32_t prev_delay, 4346 uint32_t avg_jitter) 4347 { 4348 uint32_t curr_jitter; 4349 int32_t jitter_diff; 4350 4351 curr_jitter = qdf_abs(curr_delay - prev_delay); 4352 if (!avg_jitter) 4353 return curr_jitter; 4354 4355 jitter_diff = curr_jitter - avg_jitter; 4356 if (jitter_diff < 0) 4357 avg_jitter = avg_jitter - 4358 (qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM); 4359 else 4360 avg_jitter = avg_jitter + 4361 (qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM); 4362 4363 return avg_jitter; 4364 } 4365 4366 /* 4367 * dp_tx_jitter_get_avg_delay() - compute the average delay 4368 * @curr_delay: Current delay 4369 * @avg_Delay: Average delay 4370 * Return: Newly Computed Average Delay 4371 */ 4372 static uint32_t dp_tx_jitter_get_avg_delay(uint32_t curr_delay, 4373 uint32_t avg_delay) 4374 { 4375 int32_t delay_diff; 4376 4377 if (!avg_delay) 4378 return curr_delay; 4379 4380 delay_diff = curr_delay - avg_delay; 4381 if (delay_diff < 0) 4382 avg_delay = avg_delay - (qdf_abs(delay_diff) >> 4383 DP_AVG_DELAY_WEIGHT_DENOM); 4384 else 4385 avg_delay = avg_delay + (qdf_abs(delay_diff) >> 4386 DP_AVG_DELAY_WEIGHT_DENOM); 4387 4388 return avg_delay; 4389 } 4390 4391 #ifdef WLAN_CONFIG_TX_DELAY 4392 /* 4393 * dp_tx_compute_cur_delay() - get the current delay 4394 * @soc: soc handle 4395 * @vdev: vdev structure for data path state 4396 * @ts: Tx completion status 4397 * @curr_delay: current delay 4398 * @tx_desc: tx descriptor 4399 * Return: void 4400 */ 4401 static 4402 QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc, 4403 struct dp_vdev *vdev, 4404 struct hal_tx_completion_status *ts, 4405 uint32_t *curr_delay, 4406 struct dp_tx_desc_s *tx_desc) 4407 { 4408 QDF_STATUS status = QDF_STATUS_E_FAILURE; 4409 4410 if (soc->arch_ops.dp_tx_compute_hw_delay) 4411 status = soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts, 4412 curr_delay); 4413 return status; 4414 } 4415 #else 4416 static 4417 QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc, 4418 struct dp_vdev *vdev, 4419 struct hal_tx_completion_status *ts, 4420 uint32_t *curr_delay, 4421 struct dp_tx_desc_s *tx_desc) 4422 { 4423 int64_t current_timestamp, timestamp_hw_enqueue; 4424 4425 current_timestamp = qdf_ktime_to_us(qdf_ktime_real_get()); 4426 timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp); 4427 *curr_delay = (uint32_t)(current_timestamp - timestamp_hw_enqueue); 4428 4429 return QDF_STATUS_SUCCESS; 4430 } 4431 #endif 4432 4433 /* dp_tx_compute_tid_jitter() - compute per tid per ring jitter 4434 * @jiiter - per tid per ring jitter stats 4435 * @ts: Tx completion status 4436 * @vdev - vdev structure for data path state 4437 * @tx_desc - tx descriptor 4438 * Return: void 4439 */ 4440 static void dp_tx_compute_tid_jitter(struct cdp_peer_tid_stats *jitter, 4441 struct hal_tx_completion_status *ts, 4442 struct dp_vdev *vdev, 4443 struct dp_tx_desc_s *tx_desc) 4444 { 4445 uint32_t curr_delay, avg_delay, avg_jitter, prev_delay; 4446 struct dp_soc *soc = vdev->pdev->soc; 4447 QDF_STATUS status = QDF_STATUS_E_FAILURE; 4448 4449 if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) { 4450 jitter->tx_drop += 1; 4451 return; 4452 } 4453 4454 status = dp_tx_compute_cur_delay(soc, vdev, ts, &curr_delay, 4455 tx_desc); 4456 4457 if (QDF_IS_STATUS_SUCCESS(status)) { 4458 avg_delay = jitter->tx_avg_delay; 4459 avg_jitter = jitter->tx_avg_jitter; 4460 prev_delay = jitter->tx_prev_delay; 4461 avg_jitter = dp_tx_jitter_get_avg_jitter(curr_delay, 4462 prev_delay, 4463 avg_jitter); 4464 avg_delay = dp_tx_jitter_get_avg_delay(curr_delay, avg_delay); 4465 jitter->tx_avg_delay = avg_delay; 4466 jitter->tx_avg_jitter = avg_jitter; 4467 jitter->tx_prev_delay = curr_delay; 4468 jitter->tx_total_success += 1; 4469 } else if (status == QDF_STATUS_E_FAILURE) { 4470 jitter->tx_avg_err += 1; 4471 } 4472 } 4473 4474 /* dp_tx_update_peer_jitter_stats() - Update the peer jitter stats 4475 * @txrx_peer: DP peer context 4476 * @tx_desc: Tx software descriptor 4477 * @ts: Tx completion status 4478 * @ring_id: Rx CPU context ID/CPU_ID 4479 * Return: void 4480 */ 4481 static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer, 4482 struct dp_tx_desc_s *tx_desc, 4483 struct hal_tx_completion_status *ts, 4484 uint8_t ring_id) 4485 { 4486 struct dp_pdev *pdev = txrx_peer->vdev->pdev; 4487 struct dp_soc *soc = pdev->soc; 4488 struct cdp_peer_tid_stats *jitter_stats = NULL; 4489 uint8_t tid; 4490 struct cdp_peer_tid_stats *rx_tid = NULL; 4491 4492 if (qdf_likely(!wlan_cfg_is_peer_jitter_stats_enabled(soc->wlan_cfg_ctx))) 4493 return; 4494 4495 tid = ts->tid; 4496 jitter_stats = txrx_peer->jitter_stats; 4497 qdf_assert_always(jitter_stats); 4498 qdf_assert(ring < CDP_MAX_TXRX_CTX); 4499 /* 4500 * For non-TID packets use the TID 9 4501 */ 4502 if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) 4503 tid = CDP_MAX_DATA_TIDS - 1; 4504 4505 rx_tid = &jitter_stats[tid * CDP_MAX_TXRX_CTX + ring_id]; 4506 dp_tx_compute_tid_jitter(rx_tid, 4507 ts, txrx_peer->vdev, tx_desc); 4508 } 4509 #else 4510 static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer, 4511 struct dp_tx_desc_s *tx_desc, 4512 struct hal_tx_completion_status *ts, 4513 uint8_t ring_id) 4514 { 4515 } 4516 #endif 4517 4518 #ifdef HW_TX_DELAY_STATS_ENABLE 4519 /** 4520 * dp_update_tx_delay_stats() - update the delay stats 4521 * @vdev: vdev handle 4522 * @delay: delay in ms or us based on the flag delay_in_us 4523 * @tid: tid value 4524 * @mode: type of tx delay mode 4525 * @ring id: ring number 4526 * @delay_in_us: flag to indicate whether the delay is in ms or us 4527 * 4528 * Return: none 4529 */ 4530 static inline 4531 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid, 4532 uint8_t mode, uint8_t ring_id, bool delay_in_us) 4533 { 4534 struct cdp_tid_tx_stats *tstats = 4535 &vdev->stats.tid_tx_stats[ring_id][tid]; 4536 4537 dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id, 4538 delay_in_us); 4539 } 4540 #else 4541 static inline 4542 void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid, 4543 uint8_t mode, uint8_t ring_id, bool delay_in_us) 4544 { 4545 struct cdp_tid_tx_stats *tstats = 4546 &vdev->pdev->stats.tid_stats.tid_tx_stats[ring_id][tid]; 4547 4548 dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id, 4549 delay_in_us); 4550 } 4551 #endif 4552 4553 /** 4554 * dp_tx_compute_delay() - Compute and fill in all timestamps 4555 * to pass in correct fields 4556 * 4557 * @vdev: pdev handle 4558 * @tx_desc: tx descriptor 4559 * @tid: tid value 4560 * @ring_id: TCL or WBM ring number for transmit path 4561 * Return: none 4562 */ 4563 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc, 4564 uint8_t tid, uint8_t ring_id) 4565 { 4566 int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue; 4567 uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay; 4568 uint32_t fwhw_transmit_delay_us; 4569 4570 if (qdf_likely(!vdev->pdev->delay_stats_flag) && 4571 qdf_likely(!dp_is_vdev_tx_delay_stats_enabled(vdev))) 4572 return; 4573 4574 if (dp_is_vdev_tx_delay_stats_enabled(vdev)) { 4575 fwhw_transmit_delay_us = 4576 qdf_ktime_to_us(qdf_ktime_real_get()) - 4577 qdf_ktime_to_us(tx_desc->timestamp); 4578 4579 /* 4580 * Delay between packet enqueued to HW and Tx completion in us 4581 */ 4582 dp_update_tx_delay_stats(vdev, fwhw_transmit_delay_us, tid, 4583 CDP_DELAY_STATS_FW_HW_TRANSMIT, 4584 ring_id, true); 4585 /* 4586 * For MCL, only enqueue to completion delay is required 4587 * so return if the vdev flag is enabled. 4588 */ 4589 return; 4590 } 4591 4592 current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get()); 4593 timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp); 4594 fwhw_transmit_delay = (uint32_t)(current_timestamp - 4595 timestamp_hw_enqueue); 4596 4597 if (!timestamp_hw_enqueue) 4598 return; 4599 /* 4600 * Delay between packet enqueued to HW and Tx completion in ms 4601 */ 4602 dp_update_tx_delay_stats(vdev, fwhw_transmit_delay, tid, 4603 CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id, 4604 false); 4605 4606 timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf); 4607 sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress); 4608 interframe_delay = (uint32_t)(timestamp_ingress - 4609 vdev->prev_tx_enq_tstamp); 4610 4611 /* 4612 * Delay in software enqueue 4613 */ 4614 dp_update_tx_delay_stats(vdev, sw_enqueue_delay, tid, 4615 CDP_DELAY_STATS_SW_ENQ, ring_id, 4616 false); 4617 4618 /* 4619 * Update interframe delay stats calculated at hardstart receive point. 4620 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so 4621 * interframe delay will not be calculate correctly for 1st frame. 4622 * On the other side, this will help in avoiding extra per packet check 4623 * of !vdev->prev_tx_enq_tstamp. 4624 */ 4625 dp_update_tx_delay_stats(vdev, interframe_delay, tid, 4626 CDP_DELAY_STATS_TX_INTERFRAME, ring_id, 4627 false); 4628 vdev->prev_tx_enq_tstamp = timestamp_ingress; 4629 } 4630 4631 #ifdef DISABLE_DP_STATS 4632 static 4633 inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, 4634 struct dp_txrx_peer *txrx_peer) 4635 { 4636 } 4637 #else 4638 static inline void 4639 dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer) 4640 { 4641 enum qdf_proto_subtype subtype = QDF_PROTO_INVALID; 4642 4643 DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype)); 4644 if (subtype != QDF_PROTO_INVALID) 4645 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.no_ack_count[subtype], 4646 1); 4647 } 4648 #endif 4649 4650 #ifndef QCA_ENHANCED_STATS_SUPPORT 4651 #ifdef DP_PEER_EXTENDED_API 4652 static inline uint8_t 4653 dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer) 4654 { 4655 return txrx_peer->mpdu_retry_threshold; 4656 } 4657 #else 4658 static inline uint8_t 4659 dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer) 4660 { 4661 return 0; 4662 } 4663 #endif 4664 4665 /** 4666 * dp_tx_update_peer_extd_stats()- Update Tx extended path stats for peer 4667 * 4668 * @ts: Tx compltion status 4669 * @txrx_peer: datapath txrx_peer handle 4670 * 4671 * Return: void 4672 */ 4673 static inline void 4674 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts, 4675 struct dp_txrx_peer *txrx_peer) 4676 { 4677 uint8_t mcs, pkt_type, dst_mcs_idx; 4678 uint8_t retry_threshold = dp_tx_get_mpdu_retry_threshold(txrx_peer); 4679 4680 mcs = ts->mcs; 4681 pkt_type = ts->pkt_type; 4682 /* do HW to SW pkt type conversion */ 4683 pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX : 4684 hal_2_dp_pkt_type_map[pkt_type]); 4685 4686 dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs); 4687 if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx) 4688 DP_PEER_EXTD_STATS_INC(txrx_peer, 4689 tx.pkt_type[pkt_type].mcs_count[dst_mcs_idx], 4690 1); 4691 4692 DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1); 4693 DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1); 4694 DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi); 4695 DP_PEER_EXTD_STATS_INC(txrx_peer, 4696 tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1); 4697 DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc); 4698 DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc); 4699 DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1); 4700 if (ts->first_msdu) { 4701 DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries_mpdu, 1, 4702 ts->transmit_cnt > 1); 4703 4704 if (!retry_threshold) 4705 return; 4706 DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.mpdu_success_with_retries, 4707 qdf_do_div(ts->transmit_cnt, 4708 retry_threshold), 4709 ts->transmit_cnt > retry_threshold); 4710 } 4711 } 4712 #else 4713 static inline void 4714 dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts, 4715 struct dp_txrx_peer *txrx_peer) 4716 { 4717 } 4718 #endif 4719 4720 /** 4721 * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications 4722 * per wbm ring 4723 * 4724 * @tx_desc: software descriptor head pointer 4725 * @ts: Tx completion status 4726 * @peer: peer handle 4727 * @ring_id: ring number 4728 * 4729 * Return: None 4730 */ 4731 static inline void 4732 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc, 4733 struct hal_tx_completion_status *ts, 4734 struct dp_txrx_peer *txrx_peer, uint8_t ring_id) 4735 { 4736 struct dp_pdev *pdev = txrx_peer->vdev->pdev; 4737 uint8_t tid = ts->tid; 4738 uint32_t length; 4739 struct cdp_tid_tx_stats *tid_stats; 4740 4741 if (!pdev) 4742 return; 4743 4744 if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) 4745 tid = CDP_MAX_DATA_TIDS - 1; 4746 4747 tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid]; 4748 4749 if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) { 4750 dp_err_rl("Release source:%d is not from TQM", ts->release_src); 4751 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.release_src_not_tqm, 1); 4752 return; 4753 } 4754 4755 length = qdf_nbuf_len(tx_desc->nbuf); 4756 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length); 4757 4758 if (qdf_unlikely(pdev->delay_stats_flag) || 4759 qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(txrx_peer->vdev))) 4760 dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id); 4761 4762 if (ts->status < CDP_MAX_TX_TQM_STATUS) { 4763 tid_stats->tqm_status_cnt[ts->status]++; 4764 } 4765 4766 if (qdf_likely(ts->status == HAL_TX_TQM_RR_FRAME_ACKED)) { 4767 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.retry_count, 1, 4768 ts->transmit_cnt > 1); 4769 4770 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.multiple_retry_count, 4771 1, ts->transmit_cnt > 2); 4772 4773 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma); 4774 4775 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.amsdu_cnt, 1, 4776 ts->msdu_part_of_amsdu); 4777 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.non_amsdu_cnt, 1, 4778 !ts->msdu_part_of_amsdu); 4779 4780 txrx_peer->stats.per_pkt_stats.tx.last_tx_ts = 4781 qdf_system_ticks(); 4782 4783 dp_tx_update_peer_extd_stats(ts, txrx_peer); 4784 4785 return; 4786 } 4787 4788 /* 4789 * tx_failed is ideally supposed to be updated from HTT ppdu 4790 * completion stats. But in IPQ807X/IPQ6018 chipsets owing to 4791 * hw limitation there are no completions for failed cases. 4792 * Hence updating tx_failed from data path. Please note that 4793 * if tx_failed is fixed to be from ppdu, then this has to be 4794 * removed 4795 */ 4796 DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1); 4797 4798 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.failed_retry_count, 1, 4799 ts->transmit_cnt > DP_RETRY_COUNT); 4800 dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer); 4801 4802 if (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) { 4803 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.age_out, 1); 4804 } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_REM) { 4805 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.dropped.fw_rem, 1, 4806 length); 4807 } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX) { 4808 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_notx, 1); 4809 } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TX) { 4810 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_tx, 1); 4811 } else if (ts->status == HAL_TX_TQM_RR_FW_REASON1) { 4812 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason1, 1); 4813 } else if (ts->status == HAL_TX_TQM_RR_FW_REASON2) { 4814 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason2, 1); 4815 } else if (ts->status == HAL_TX_TQM_RR_FW_REASON3) { 4816 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason3, 1); 4817 } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE) { 4818 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 4819 tx.dropped.fw_rem_queue_disable, 1); 4820 } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TILL_NONMATCHING) { 4821 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 4822 tx.dropped.fw_rem_no_match, 1); 4823 } else if (ts->status == HAL_TX_TQM_RR_DROP_THRESHOLD) { 4824 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 4825 tx.dropped.drop_threshold, 1); 4826 } else if (ts->status == HAL_TX_TQM_RR_LINK_DESC_UNAVAILABLE) { 4827 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 4828 tx.dropped.drop_link_desc_na, 1); 4829 } else if (ts->status == HAL_TX_TQM_RR_DROP_OR_INVALID_MSDU) { 4830 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 4831 tx.dropped.invalid_drop, 1); 4832 } else if (ts->status == HAL_TX_TQM_RR_MULTICAST_DROP) { 4833 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 4834 tx.dropped.mcast_vdev_drop, 1); 4835 } else { 4836 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.invalid_rr, 1); 4837 } 4838 } 4839 4840 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 4841 /** 4842 * dp_tx_flow_pool_lock() - take flow pool lock 4843 * @soc: core txrx main context 4844 * @tx_desc: tx desc 4845 * 4846 * Return: None 4847 */ 4848 static inline 4849 void dp_tx_flow_pool_lock(struct dp_soc *soc, 4850 struct dp_tx_desc_s *tx_desc) 4851 { 4852 struct dp_tx_desc_pool_s *pool; 4853 uint8_t desc_pool_id; 4854 4855 desc_pool_id = tx_desc->pool_id; 4856 pool = &soc->tx_desc[desc_pool_id]; 4857 4858 qdf_spin_lock_bh(&pool->flow_pool_lock); 4859 } 4860 4861 /** 4862 * dp_tx_flow_pool_unlock() - release flow pool lock 4863 * @soc: core txrx main context 4864 * @tx_desc: tx desc 4865 * 4866 * Return: None 4867 */ 4868 static inline 4869 void dp_tx_flow_pool_unlock(struct dp_soc *soc, 4870 struct dp_tx_desc_s *tx_desc) 4871 { 4872 struct dp_tx_desc_pool_s *pool; 4873 uint8_t desc_pool_id; 4874 4875 desc_pool_id = tx_desc->pool_id; 4876 pool = &soc->tx_desc[desc_pool_id]; 4877 4878 qdf_spin_unlock_bh(&pool->flow_pool_lock); 4879 } 4880 #else 4881 static inline 4882 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) 4883 { 4884 } 4885 4886 static inline 4887 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) 4888 { 4889 } 4890 #endif 4891 4892 /** 4893 * dp_tx_notify_completion() - Notify tx completion for this desc 4894 * @soc: core txrx main context 4895 * @vdev: datapath vdev handle 4896 * @tx_desc: tx desc 4897 * @netbuf: buffer 4898 * @status: tx status 4899 * 4900 * Return: none 4901 */ 4902 static inline void dp_tx_notify_completion(struct dp_soc *soc, 4903 struct dp_vdev *vdev, 4904 struct dp_tx_desc_s *tx_desc, 4905 qdf_nbuf_t netbuf, 4906 uint8_t status) 4907 { 4908 void *osif_dev; 4909 ol_txrx_completion_fp tx_compl_cbk = NULL; 4910 uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC); 4911 4912 qdf_assert(tx_desc); 4913 4914 if (!vdev || 4915 !vdev->osif_vdev) { 4916 return; 4917 } 4918 4919 osif_dev = vdev->osif_vdev; 4920 tx_compl_cbk = vdev->tx_comp; 4921 4922 if (status == HAL_TX_TQM_RR_FRAME_ACKED) 4923 flag |= BIT(QDF_TX_RX_STATUS_OK); 4924 4925 if (tx_compl_cbk) 4926 tx_compl_cbk(netbuf, osif_dev, flag); 4927 } 4928 4929 /** dp_tx_sojourn_stats_process() - Collect sojourn stats 4930 * @pdev: pdev handle 4931 * @tid: tid value 4932 * @txdesc_ts: timestamp from txdesc 4933 * @ppdu_id: ppdu id 4934 * 4935 * Return: none 4936 */ 4937 #ifdef FEATURE_PERPKT_INFO 4938 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev, 4939 struct dp_txrx_peer *txrx_peer, 4940 uint8_t tid, 4941 uint64_t txdesc_ts, 4942 uint32_t ppdu_id) 4943 { 4944 uint64_t delta_ms; 4945 struct cdp_tx_sojourn_stats *sojourn_stats; 4946 struct dp_peer *primary_link_peer = NULL; 4947 struct dp_soc *link_peer_soc = NULL; 4948 4949 if (qdf_unlikely(!pdev->enhanced_stats_en)) 4950 return; 4951 4952 if (qdf_unlikely(tid == HTT_INVALID_TID || 4953 tid >= CDP_DATA_TID_MAX)) 4954 return; 4955 4956 if (qdf_unlikely(!pdev->sojourn_buf)) 4957 return; 4958 4959 primary_link_peer = dp_get_primary_link_peer_by_id(pdev->soc, 4960 txrx_peer->peer_id, 4961 DP_MOD_ID_TX_COMP); 4962 4963 if (qdf_unlikely(!primary_link_peer)) 4964 return; 4965 4966 sojourn_stats = (struct cdp_tx_sojourn_stats *) 4967 qdf_nbuf_data(pdev->sojourn_buf); 4968 4969 link_peer_soc = primary_link_peer->vdev->pdev->soc; 4970 sojourn_stats->cookie = (void *) 4971 dp_monitor_peer_get_peerstats_ctx(link_peer_soc, 4972 primary_link_peer); 4973 4974 delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) - 4975 txdesc_ts; 4976 qdf_ewma_tx_lag_add(&txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid], 4977 delta_ms); 4978 sojourn_stats->sum_sojourn_msdu[tid] = delta_ms; 4979 sojourn_stats->num_msdus[tid] = 1; 4980 sojourn_stats->avg_sojourn_msdu[tid].internal = 4981 txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid].internal; 4982 dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc, 4983 pdev->sojourn_buf, HTT_INVALID_PEER, 4984 WDI_NO_VAL, pdev->pdev_id); 4985 sojourn_stats->sum_sojourn_msdu[tid] = 0; 4986 sojourn_stats->num_msdus[tid] = 0; 4987 sojourn_stats->avg_sojourn_msdu[tid].internal = 0; 4988 4989 dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_TX_COMP); 4990 } 4991 #else 4992 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev, 4993 struct dp_txrx_peer *txrx_peer, 4994 uint8_t tid, 4995 uint64_t txdesc_ts, 4996 uint32_t ppdu_id) 4997 { 4998 } 4999 #endif 5000 5001 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 5002 /** 5003 * dp_send_completion_to_pkt_capture() - send tx completion to packet capture 5004 * @soc: dp_soc handle 5005 * @desc: Tx Descriptor 5006 * @ts: HAL Tx completion descriptor contents 5007 * 5008 * This function is used to send tx completion to packet capture 5009 */ 5010 void dp_send_completion_to_pkt_capture(struct dp_soc *soc, 5011 struct dp_tx_desc_s *desc, 5012 struct hal_tx_completion_status *ts) 5013 { 5014 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc, 5015 desc, ts->peer_id, 5016 WDI_NO_VAL, desc->pdev->pdev_id); 5017 } 5018 #endif 5019 5020 /** 5021 * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf 5022 * @soc: DP Soc handle 5023 * @tx_desc: software Tx descriptor 5024 * @ts : Tx completion status from HAL/HTT descriptor 5025 * 5026 * Return: none 5027 */ 5028 void 5029 dp_tx_comp_process_desc(struct dp_soc *soc, 5030 struct dp_tx_desc_s *desc, 5031 struct hal_tx_completion_status *ts, 5032 struct dp_txrx_peer *txrx_peer) 5033 { 5034 uint64_t time_latency = 0; 5035 uint16_t peer_id = DP_INVALID_PEER_ID; 5036 5037 /* 5038 * m_copy/tx_capture modes are not supported for 5039 * scatter gather packets 5040 */ 5041 if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) { 5042 time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) - 5043 qdf_ktime_to_ms(desc->timestamp)); 5044 } 5045 5046 dp_send_completion_to_pkt_capture(soc, desc, ts); 5047 5048 if (dp_tx_pkt_tracepoints_enabled()) 5049 qdf_trace_dp_packet(desc->nbuf, QDF_TX, 5050 desc->msdu_ext_desc ? 5051 desc->msdu_ext_desc->tso_desc : NULL, 5052 qdf_ktime_to_ms(desc->timestamp)); 5053 5054 if (!(desc->msdu_ext_desc)) { 5055 dp_tx_enh_unmap(soc, desc); 5056 if (txrx_peer) 5057 peer_id = txrx_peer->peer_id; 5058 5059 if (QDF_STATUS_SUCCESS == 5060 dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer_id)) { 5061 return; 5062 } 5063 5064 if (QDF_STATUS_SUCCESS == 5065 dp_get_completion_indication_for_stack(soc, 5066 desc->pdev, 5067 txrx_peer, ts, 5068 desc->nbuf, 5069 time_latency)) { 5070 dp_send_completion_to_stack(soc, 5071 desc->pdev, 5072 ts->peer_id, 5073 ts->ppdu_id, 5074 desc->nbuf); 5075 return; 5076 } 5077 } 5078 5079 desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX; 5080 dp_tx_comp_free_buf(soc, desc, false); 5081 } 5082 5083 #ifdef DISABLE_DP_STATS 5084 /** 5085 * dp_tx_update_connectivity_stats() - update tx connectivity stats 5086 * @soc: core txrx main context 5087 * @tx_desc: tx desc 5088 * @status: tx status 5089 * 5090 * Return: none 5091 */ 5092 static inline 5093 void dp_tx_update_connectivity_stats(struct dp_soc *soc, 5094 struct dp_vdev *vdev, 5095 struct dp_tx_desc_s *tx_desc, 5096 uint8_t status) 5097 { 5098 } 5099 #else 5100 static inline 5101 void dp_tx_update_connectivity_stats(struct dp_soc *soc, 5102 struct dp_vdev *vdev, 5103 struct dp_tx_desc_s *tx_desc, 5104 uint8_t status) 5105 { 5106 void *osif_dev; 5107 ol_txrx_stats_rx_fp stats_cbk; 5108 uint8_t pkt_type; 5109 5110 qdf_assert(tx_desc); 5111 5112 if (!vdev || 5113 !vdev->osif_vdev || 5114 !vdev->stats_cb) 5115 return; 5116 5117 osif_dev = vdev->osif_vdev; 5118 stats_cbk = vdev->stats_cb; 5119 5120 stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type); 5121 if (status == HAL_TX_TQM_RR_FRAME_ACKED) 5122 stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT, 5123 &pkt_type); 5124 } 5125 #endif 5126 5127 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY) 5128 /* Mask for bit29 ~ bit31 */ 5129 #define DP_TX_TS_BIT29_31_MASK 0xE0000000 5130 /* Timestamp value (unit us) if bit29 is set */ 5131 #define DP_TX_TS_BIT29_SET_VALUE BIT(29) 5132 /** 5133 * dp_tx_adjust_enqueue_buffer_ts() - adjust the enqueue buffer_timestamp 5134 * @ack_ts: OTA ack timestamp, unit us. 5135 * @enqueue_ts: TCL enqueue TX data to TQM timestamp, unit us. 5136 * @base_delta_ts: base timestamp delta for ack_ts and enqueue_ts 5137 * 5138 * this function will restore the bit29 ~ bit31 3 bits value for 5139 * buffer_timestamp in wbm2sw ring entry, currently buffer_timestamp only 5140 * can support 0x7FFF * 1024 us (29 bits), but if the timestamp is > 5141 * 0x7FFF * 1024 us, bit29~ bit31 will be lost. 5142 * 5143 * Return: the adjusted buffer_timestamp value 5144 */ 5145 static inline 5146 uint32_t dp_tx_adjust_enqueue_buffer_ts(uint32_t ack_ts, 5147 uint32_t enqueue_ts, 5148 uint32_t base_delta_ts) 5149 { 5150 uint32_t ack_buffer_ts; 5151 uint32_t ack_buffer_ts_bit29_31; 5152 uint32_t adjusted_enqueue_ts; 5153 5154 /* corresponding buffer_timestamp value when receive OTA Ack */ 5155 ack_buffer_ts = ack_ts - base_delta_ts; 5156 ack_buffer_ts_bit29_31 = ack_buffer_ts & DP_TX_TS_BIT29_31_MASK; 5157 5158 /* restore the bit29 ~ bit31 value */ 5159 adjusted_enqueue_ts = ack_buffer_ts_bit29_31 | enqueue_ts; 5160 5161 /* 5162 * if actual enqueue_ts value occupied 29 bits only, this enqueue_ts 5163 * value + real UL delay overflow 29 bits, then 30th bit (bit-29) 5164 * should not be marked, otherwise extra 0x20000000 us is added to 5165 * enqueue_ts. 5166 */ 5167 if (qdf_unlikely(adjusted_enqueue_ts > ack_buffer_ts)) 5168 adjusted_enqueue_ts -= DP_TX_TS_BIT29_SET_VALUE; 5169 5170 return adjusted_enqueue_ts; 5171 } 5172 5173 QDF_STATUS 5174 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts, 5175 uint32_t delta_tsf, 5176 uint32_t *delay_us) 5177 { 5178 uint32_t buffer_ts; 5179 uint32_t delay; 5180 5181 if (!delay_us) 5182 return QDF_STATUS_E_INVAL; 5183 5184 /* Tx_rate_stats_info_valid is 0 and tsf is invalid then */ 5185 if (!ts->valid) 5186 return QDF_STATUS_E_INVAL; 5187 5188 /* buffer_timestamp is in units of 1024 us and is [31:13] of 5189 * WBM_RELEASE_RING_4. After left shift 10 bits, it's 5190 * valid up to 29 bits. 5191 */ 5192 buffer_ts = ts->buffer_timestamp << 10; 5193 buffer_ts = dp_tx_adjust_enqueue_buffer_ts(ts->tsf, 5194 buffer_ts, delta_tsf); 5195 5196 delay = ts->tsf - buffer_ts - delta_tsf; 5197 5198 if (qdf_unlikely(delay & 0x80000000)) { 5199 dp_err_rl("delay = 0x%x (-ve)\n" 5200 "release_src = %d\n" 5201 "ppdu_id = 0x%x\n" 5202 "peer_id = 0x%x\n" 5203 "tid = 0x%x\n" 5204 "release_reason = %d\n" 5205 "tsf = %u (0x%x)\n" 5206 "buffer_timestamp = %u (0x%x)\n" 5207 "delta_tsf = %u (0x%x)\n", 5208 delay, ts->release_src, ts->ppdu_id, ts->peer_id, 5209 ts->tid, ts->status, ts->tsf, ts->tsf, 5210 ts->buffer_timestamp, ts->buffer_timestamp, 5211 delta_tsf, delta_tsf); 5212 5213 delay = 0; 5214 goto end; 5215 } 5216 5217 delay &= 0x1FFFFFFF; /* mask 29 BITS */ 5218 if (delay > 0x1000000) { 5219 dp_info_rl("----------------------\n" 5220 "Tx completion status:\n" 5221 "----------------------\n" 5222 "release_src = %d\n" 5223 "ppdu_id = 0x%x\n" 5224 "release_reason = %d\n" 5225 "tsf = %u (0x%x)\n" 5226 "buffer_timestamp = %u (0x%x)\n" 5227 "delta_tsf = %u (0x%x)\n", 5228 ts->release_src, ts->ppdu_id, ts->status, 5229 ts->tsf, ts->tsf, ts->buffer_timestamp, 5230 ts->buffer_timestamp, delta_tsf, delta_tsf); 5231 return QDF_STATUS_E_FAILURE; 5232 } 5233 5234 5235 end: 5236 *delay_us = delay; 5237 5238 return QDF_STATUS_SUCCESS; 5239 } 5240 5241 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 5242 uint32_t delta_tsf) 5243 { 5244 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5245 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 5246 DP_MOD_ID_CDP); 5247 5248 if (!vdev) { 5249 dp_err_rl("vdev %d does not exist", vdev_id); 5250 return; 5251 } 5252 5253 vdev->delta_tsf = delta_tsf; 5254 dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf); 5255 5256 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5257 } 5258 #endif 5259 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY 5260 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl, 5261 uint8_t vdev_id, bool enable) 5262 { 5263 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5264 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 5265 DP_MOD_ID_CDP); 5266 5267 if (!vdev) { 5268 dp_err_rl("vdev %d does not exist", vdev_id); 5269 return QDF_STATUS_E_FAILURE; 5270 } 5271 5272 qdf_atomic_set(&vdev->ul_delay_report, enable); 5273 5274 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5275 5276 return QDF_STATUS_SUCCESS; 5277 } 5278 5279 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 5280 uint32_t *val) 5281 { 5282 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5283 struct dp_vdev *vdev; 5284 uint32_t delay_accum; 5285 uint32_t pkts_accum; 5286 5287 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 5288 if (!vdev) { 5289 dp_err_rl("vdev %d does not exist", vdev_id); 5290 return QDF_STATUS_E_FAILURE; 5291 } 5292 5293 if (!qdf_atomic_read(&vdev->ul_delay_report)) { 5294 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5295 return QDF_STATUS_E_FAILURE; 5296 } 5297 5298 /* Average uplink delay based on current accumulated values */ 5299 delay_accum = qdf_atomic_read(&vdev->ul_delay_accum); 5300 pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum); 5301 5302 *val = delay_accum / pkts_accum; 5303 dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val, 5304 delay_accum, pkts_accum); 5305 5306 /* Reset accumulated values to 0 */ 5307 qdf_atomic_set(&vdev->ul_delay_accum, 0); 5308 qdf_atomic_set(&vdev->ul_pkts_accum, 0); 5309 5310 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5311 5312 return QDF_STATUS_SUCCESS; 5313 } 5314 5315 static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev, 5316 struct hal_tx_completion_status *ts) 5317 { 5318 uint32_t ul_delay; 5319 5320 if (qdf_unlikely(!vdev)) { 5321 dp_info_rl("vdev is null or delete in progress"); 5322 return; 5323 } 5324 5325 if (!qdf_atomic_read(&vdev->ul_delay_report)) 5326 return; 5327 5328 if (QDF_IS_STATUS_ERROR(dp_tx_compute_hw_delay_us(ts, 5329 vdev->delta_tsf, 5330 &ul_delay))) 5331 return; 5332 5333 ul_delay /= 1000; /* in unit of ms */ 5334 5335 qdf_atomic_add(ul_delay, &vdev->ul_delay_accum); 5336 qdf_atomic_inc(&vdev->ul_pkts_accum); 5337 } 5338 #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */ 5339 static inline 5340 void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev, 5341 struct hal_tx_completion_status *ts) 5342 { 5343 } 5344 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */ 5345 5346 /** 5347 * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info 5348 * @soc: DP soc handle 5349 * @tx_desc: software descriptor head pointer 5350 * @ts: Tx completion status 5351 * @txrx_peer: txrx peer handle 5352 * @ring_id: ring number 5353 * 5354 * Return: none 5355 */ 5356 void dp_tx_comp_process_tx_status(struct dp_soc *soc, 5357 struct dp_tx_desc_s *tx_desc, 5358 struct hal_tx_completion_status *ts, 5359 struct dp_txrx_peer *txrx_peer, 5360 uint8_t ring_id) 5361 { 5362 uint32_t length; 5363 qdf_ether_header_t *eh; 5364 struct dp_vdev *vdev = NULL; 5365 qdf_nbuf_t nbuf = tx_desc->nbuf; 5366 enum qdf_dp_tx_rx_status dp_status; 5367 5368 if (!nbuf) { 5369 dp_info_rl("invalid tx descriptor. nbuf NULL"); 5370 goto out; 5371 } 5372 5373 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 5374 length = dp_tx_get_pkt_len(tx_desc); 5375 5376 dp_status = dp_tx_hw_to_qdf(ts->status); 5377 DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf, 5378 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD, 5379 QDF_TRACE_DEFAULT_PDEV_ID, 5380 qdf_nbuf_data_addr(nbuf), 5381 sizeof(qdf_nbuf_data(nbuf)), 5382 tx_desc->id, ts->status, dp_status)); 5383 5384 dp_tx_comp_debug("-------------------- \n" 5385 "Tx Completion Stats: \n" 5386 "-------------------- \n" 5387 "ack_frame_rssi = %d \n" 5388 "first_msdu = %d \n" 5389 "last_msdu = %d \n" 5390 "msdu_part_of_amsdu = %d \n" 5391 "rate_stats valid = %d \n" 5392 "bw = %d \n" 5393 "pkt_type = %d \n" 5394 "stbc = %d \n" 5395 "ldpc = %d \n" 5396 "sgi = %d \n" 5397 "mcs = %d \n" 5398 "ofdma = %d \n" 5399 "tones_in_ru = %d \n" 5400 "tsf = %d \n" 5401 "ppdu_id = %d \n" 5402 "transmit_cnt = %d \n" 5403 "tid = %d \n" 5404 "peer_id = %d\n" 5405 "tx_status = %d\n", 5406 ts->ack_frame_rssi, ts->first_msdu, 5407 ts->last_msdu, ts->msdu_part_of_amsdu, 5408 ts->valid, ts->bw, ts->pkt_type, ts->stbc, 5409 ts->ldpc, ts->sgi, ts->mcs, ts->ofdma, 5410 ts->tones_in_ru, ts->tsf, ts->ppdu_id, 5411 ts->transmit_cnt, ts->tid, ts->peer_id, 5412 ts->status); 5413 5414 /* Update SoC level stats */ 5415 DP_STATS_INCC(soc, tx.dropped_fw_removed, 1, 5416 (ts->status == HAL_TX_TQM_RR_REM_CMD_REM)); 5417 5418 if (!txrx_peer) { 5419 dp_info_rl("peer is null or deletion in progress"); 5420 DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length); 5421 goto out; 5422 } 5423 vdev = txrx_peer->vdev; 5424 5425 dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status); 5426 dp_tx_update_uplink_delay(soc, vdev, ts); 5427 5428 /* check tx complete notification */ 5429 if (qdf_nbuf_tx_notify_comp_get(nbuf)) 5430 dp_tx_notify_completion(soc, vdev, tx_desc, 5431 nbuf, ts->status); 5432 5433 /* Update per-packet stats for mesh mode */ 5434 if (qdf_unlikely(vdev->mesh_vdev) && 5435 !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) 5436 dp_tx_comp_fill_tx_completion_stats(tx_desc, ts); 5437 5438 /* Update peer level stats */ 5439 if (qdf_unlikely(txrx_peer->bss_peer && 5440 vdev->opmode == wlan_op_mode_ap)) { 5441 if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) { 5442 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1, 5443 length); 5444 5445 if (txrx_peer->vdev->tx_encap_type == 5446 htt_cmn_pkt_type_ethernet && 5447 QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 5448 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 5449 tx.bcast, 1, 5450 length); 5451 } 5452 } 5453 } else { 5454 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length); 5455 if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) { 5456 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.tx_success, 5457 1, length); 5458 if (qdf_unlikely(txrx_peer->in_twt)) { 5459 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 5460 tx.tx_success_twt, 5461 1, length); 5462 } 5463 } 5464 } 5465 5466 dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id); 5467 dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts, ring_id); 5468 dp_tx_update_peer_jitter_stats(txrx_peer, tx_desc, ts, ring_id); 5469 dp_tx_update_peer_sawf_stats(soc, vdev, txrx_peer, tx_desc, 5470 ts, ts->tid); 5471 dp_tx_send_pktlog(soc, vdev->pdev, tx_desc, nbuf, dp_status); 5472 5473 #ifdef QCA_SUPPORT_RDK_STATS 5474 if (soc->peerstats_enabled) 5475 dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid, 5476 qdf_ktime_to_ms(tx_desc->timestamp), 5477 ts->ppdu_id); 5478 #endif 5479 5480 out: 5481 return; 5482 } 5483 5484 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \ 5485 defined(QCA_ENHANCED_STATS_SUPPORT) 5486 /* 5487 * dp_tx_update_peer_basic_stats(): Update peer basic stats 5488 * @txrx_peer: Datapath txrx_peer handle 5489 * @length: Length of the packet 5490 * @tx_status: Tx status from TQM/FW 5491 * @update: enhanced flag value present in dp_pdev 5492 * 5493 * Return: none 5494 */ 5495 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer, 5496 uint32_t length, uint8_t tx_status, 5497 bool update) 5498 { 5499 if (update || (!txrx_peer->hw_txrx_stats_en)) { 5500 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length); 5501 5502 if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED) 5503 DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1); 5504 } 5505 } 5506 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) 5507 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer, 5508 uint32_t length, uint8_t tx_status, 5509 bool update) 5510 { 5511 if (!txrx_peer->hw_txrx_stats_en) { 5512 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length); 5513 5514 if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED) 5515 DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1); 5516 } 5517 } 5518 5519 #else 5520 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer, 5521 uint32_t length, uint8_t tx_status, 5522 bool update) 5523 { 5524 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length); 5525 5526 if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED) 5527 DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1); 5528 } 5529 #endif 5530 5531 /* 5532 * dp_tx_prefetch_next_nbuf_data(): Prefetch nbuf and nbuf data 5533 * @nbuf: skb buffer 5534 * 5535 * Return: none 5536 */ 5537 #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH 5538 static inline 5539 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next) 5540 { 5541 qdf_nbuf_t nbuf = NULL; 5542 5543 if (next) 5544 nbuf = next->nbuf; 5545 if (nbuf) 5546 qdf_prefetch(nbuf); 5547 } 5548 #else 5549 static inline 5550 void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next) 5551 { 5552 } 5553 #endif 5554 5555 /** 5556 * dp_tx_mcast_reinject_handler() - Tx reinjected multicast packets handler 5557 * @soc: core txrx main context 5558 * @desc: software descriptor 5559 * 5560 * Return: true when packet is reinjected 5561 */ 5562 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \ 5563 defined(WLAN_MCAST_MLO) 5564 static inline bool 5565 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc) 5566 { 5567 struct dp_vdev *vdev = NULL; 5568 5569 if (desc->tx_status == HAL_TX_TQM_RR_MULTICAST_DROP) { 5570 if (!soc->arch_ops.dp_tx_mcast_handler || 5571 !soc->arch_ops.dp_tx_is_mcast_primary) 5572 return false; 5573 5574 vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id, 5575 DP_MOD_ID_REINJECT); 5576 5577 if (qdf_unlikely(!vdev)) { 5578 dp_tx_comp_info_rl("Unable to get vdev ref %d", 5579 desc->id); 5580 return false; 5581 } 5582 5583 if (!(soc->arch_ops.dp_tx_is_mcast_primary(soc, vdev))) { 5584 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT); 5585 return false; 5586 } 5587 DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1, 5588 qdf_nbuf_len(desc->nbuf)); 5589 soc->arch_ops.dp_tx_mcast_handler(soc, vdev, desc->nbuf); 5590 dp_tx_desc_release(desc, desc->pool_id); 5591 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT); 5592 return true; 5593 } 5594 5595 return false; 5596 } 5597 #else 5598 static inline bool 5599 dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc) 5600 { 5601 return false; 5602 } 5603 #endif 5604 5605 #ifdef QCA_DP_TX_NBUF_LIST_FREE 5606 static inline void 5607 dp_tx_nbuf_queue_head_init(qdf_nbuf_queue_head_t *nbuf_queue_head) 5608 { 5609 qdf_nbuf_queue_head_init(nbuf_queue_head); 5610 } 5611 5612 static inline void 5613 dp_tx_nbuf_dev_queue_free(qdf_nbuf_queue_head_t *nbuf_queue_head, 5614 struct dp_tx_desc_s *desc) 5615 { 5616 qdf_nbuf_t nbuf = NULL; 5617 5618 nbuf = desc->nbuf; 5619 if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_FAST)) 5620 qdf_nbuf_dev_queue_head(nbuf_queue_head, nbuf); 5621 else 5622 qdf_nbuf_free(nbuf); 5623 } 5624 5625 static inline void 5626 dp_tx_nbuf_dev_kfree_list(qdf_nbuf_queue_head_t *nbuf_queue_head) 5627 { 5628 qdf_nbuf_dev_kfree_list(nbuf_queue_head); 5629 } 5630 #else 5631 static inline void 5632 dp_tx_nbuf_queue_head_init(qdf_nbuf_queue_head_t *nbuf_queue_head) 5633 { 5634 } 5635 5636 static inline void 5637 dp_tx_nbuf_dev_queue_free(qdf_nbuf_queue_head_t *nbuf_queue_head, 5638 struct dp_tx_desc_s *desc) 5639 { 5640 qdf_nbuf_free(desc->nbuf); 5641 } 5642 5643 static inline void 5644 dp_tx_nbuf_dev_kfree_list(qdf_nbuf_queue_head_t *nbuf_queue_head) 5645 { 5646 } 5647 #endif 5648 5649 /** 5650 * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler 5651 * @soc: core txrx main context 5652 * @comp_head: software descriptor head pointer 5653 * @ring_id: ring number 5654 * 5655 * This function will process batch of descriptors reaped by dp_tx_comp_handler 5656 * and release the software descriptors after processing is complete 5657 * 5658 * Return: none 5659 */ 5660 void 5661 dp_tx_comp_process_desc_list(struct dp_soc *soc, 5662 struct dp_tx_desc_s *comp_head, uint8_t ring_id) 5663 { 5664 struct dp_tx_desc_s *desc; 5665 struct dp_tx_desc_s *next; 5666 struct hal_tx_completion_status ts; 5667 struct dp_txrx_peer *txrx_peer = NULL; 5668 uint16_t peer_id = DP_INVALID_PEER; 5669 dp_txrx_ref_handle txrx_ref_handle = NULL; 5670 qdf_nbuf_queue_head_t h; 5671 5672 desc = comp_head; 5673 5674 dp_tx_nbuf_queue_head_init(&h); 5675 5676 while (desc) { 5677 next = desc->next; 5678 dp_tx_prefetch_next_nbuf_data(next); 5679 5680 if (peer_id != desc->peer_id) { 5681 if (txrx_peer) 5682 dp_txrx_peer_unref_delete(txrx_ref_handle, 5683 DP_MOD_ID_TX_COMP); 5684 peer_id = desc->peer_id; 5685 txrx_peer = 5686 dp_txrx_peer_get_ref_by_id(soc, peer_id, 5687 &txrx_ref_handle, 5688 DP_MOD_ID_TX_COMP); 5689 } 5690 5691 if (dp_tx_mcast_reinject_handler(soc, desc)) { 5692 desc = next; 5693 continue; 5694 } 5695 5696 if (desc->flags & DP_TX_DESC_FLAG_PPEDS) { 5697 if (qdf_likely(txrx_peer)) 5698 dp_tx_update_peer_basic_stats(txrx_peer, 5699 desc->length, 5700 desc->tx_status, 5701 false); 5702 dp_tx_nbuf_dev_queue_free(&h, desc); 5703 dp_ppeds_tx_desc_free(soc, desc); 5704 desc = next; 5705 continue; 5706 } 5707 5708 if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) { 5709 struct dp_pdev *pdev = desc->pdev; 5710 5711 if (qdf_likely(txrx_peer)) 5712 dp_tx_update_peer_basic_stats(txrx_peer, 5713 desc->length, 5714 desc->tx_status, 5715 false); 5716 qdf_assert(pdev); 5717 dp_tx_outstanding_dec(pdev); 5718 5719 /* 5720 * Calling a QDF WRAPPER here is creating significant 5721 * performance impact so avoided the wrapper call here 5722 */ 5723 dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, 5724 desc->id, DP_TX_COMP_UNMAP); 5725 dp_tx_nbuf_unmap(soc, desc); 5726 dp_tx_nbuf_dev_queue_free(&h, desc); 5727 dp_tx_desc_free(soc, desc, desc->pool_id); 5728 desc = next; 5729 continue; 5730 } 5731 5732 hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc); 5733 5734 dp_tx_comp_process_tx_status(soc, desc, &ts, txrx_peer, 5735 ring_id); 5736 5737 dp_tx_comp_process_desc(soc, desc, &ts, txrx_peer); 5738 5739 dp_tx_desc_release(desc, desc->pool_id); 5740 desc = next; 5741 } 5742 dp_tx_nbuf_dev_kfree_list(&h); 5743 if (txrx_peer) 5744 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP); 5745 } 5746 5747 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 5748 static inline 5749 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped, 5750 int max_reap_limit) 5751 { 5752 bool limit_hit = false; 5753 5754 limit_hit = 5755 (num_reaped >= max_reap_limit) ? true : false; 5756 5757 if (limit_hit) 5758 DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1); 5759 5760 return limit_hit; 5761 } 5762 5763 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc) 5764 { 5765 return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check; 5766 } 5767 5768 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc) 5769 { 5770 struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx; 5771 5772 return cfg->tx_comp_loop_pkt_limit; 5773 } 5774 #else 5775 static inline 5776 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped, 5777 int max_reap_limit) 5778 { 5779 return false; 5780 } 5781 5782 static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc) 5783 { 5784 return false; 5785 } 5786 5787 static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc) 5788 { 5789 return 0; 5790 } 5791 #endif 5792 5793 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ 5794 static inline int 5795 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng, 5796 int *max_reap_limit) 5797 { 5798 return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng, 5799 max_reap_limit); 5800 } 5801 #else 5802 static inline int 5803 dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng, 5804 int *max_reap_limit) 5805 { 5806 return 0; 5807 } 5808 #endif 5809 5810 #ifdef DP_TX_TRACKING 5811 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc) 5812 { 5813 if ((tx_desc->magic != DP_TX_MAGIC_PATTERN_INUSE) && 5814 (tx_desc->magic != DP_TX_MAGIC_PATTERN_FREE)) { 5815 dp_err_rl("tx_desc %u is corrupted", tx_desc->id); 5816 qdf_trigger_self_recovery(NULL, QDF_TX_DESC_LEAK); 5817 } 5818 } 5819 #endif 5820 5821 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc, 5822 hal_ring_handle_t hal_ring_hdl, uint8_t ring_id, 5823 uint32_t quota) 5824 { 5825 void *tx_comp_hal_desc; 5826 void *last_prefetched_hw_desc = NULL; 5827 struct dp_tx_desc_s *last_prefetched_sw_desc = NULL; 5828 hal_soc_handle_t hal_soc; 5829 uint8_t buffer_src; 5830 struct dp_tx_desc_s *tx_desc = NULL; 5831 struct dp_tx_desc_s *head_desc = NULL; 5832 struct dp_tx_desc_s *tail_desc = NULL; 5833 uint32_t num_processed = 0; 5834 uint32_t count; 5835 uint32_t num_avail_for_reap = 0; 5836 bool force_break = false; 5837 struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id]; 5838 int max_reap_limit, ring_near_full; 5839 uint32_t num_entries; 5840 5841 DP_HIST_INIT(); 5842 5843 num_entries = hal_srng_get_num_entries(soc->hal_soc, hal_ring_hdl); 5844 5845 more_data: 5846 5847 hal_soc = soc->hal_soc; 5848 /* Re-initialize local variables to be re-used */ 5849 head_desc = NULL; 5850 tail_desc = NULL; 5851 count = 0; 5852 max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc); 5853 5854 ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring, 5855 &max_reap_limit); 5856 5857 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 5858 dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl); 5859 return 0; 5860 } 5861 5862 if (!num_avail_for_reap) 5863 num_avail_for_reap = hal_srng_dst_num_valid(hal_soc, 5864 hal_ring_hdl, 0); 5865 5866 if (num_avail_for_reap >= quota) 5867 num_avail_for_reap = quota; 5868 5869 dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap); 5870 last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc, 5871 hal_ring_hdl, 5872 num_avail_for_reap); 5873 5874 /* Find head descriptor from completion ring */ 5875 while (qdf_likely(num_avail_for_reap--)) { 5876 5877 tx_comp_hal_desc = dp_srng_dst_get_next(soc, hal_ring_hdl); 5878 if (qdf_unlikely(!tx_comp_hal_desc)) 5879 break; 5880 buffer_src = hal_tx_comp_get_buffer_source(hal_soc, 5881 tx_comp_hal_desc); 5882 5883 /* If this buffer was not released by TQM or FW, then it is not 5884 * Tx completion indication, assert */ 5885 if (qdf_unlikely(buffer_src != 5886 HAL_TX_COMP_RELEASE_SOURCE_TQM) && 5887 (qdf_unlikely(buffer_src != 5888 HAL_TX_COMP_RELEASE_SOURCE_FW))) { 5889 uint8_t wbm_internal_error; 5890 5891 dp_err_rl( 5892 "Tx comp release_src != TQM | FW but from %d", 5893 buffer_src); 5894 hal_dump_comp_desc(tx_comp_hal_desc); 5895 DP_STATS_INC(soc, tx.invalid_release_source, 1); 5896 5897 /* When WBM sees NULL buffer_addr_info in any of 5898 * ingress rings it sends an error indication, 5899 * with wbm_internal_error=1, to a specific ring. 5900 * The WBM2SW ring used to indicate these errors is 5901 * fixed in HW, and that ring is being used as Tx 5902 * completion ring. These errors are not related to 5903 * Tx completions, and should just be ignored 5904 */ 5905 wbm_internal_error = hal_get_wbm_internal_error( 5906 hal_soc, 5907 tx_comp_hal_desc); 5908 5909 if (wbm_internal_error) { 5910 dp_err_rl("Tx comp wbm_internal_error!!"); 5911 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1); 5912 5913 if (HAL_TX_COMP_RELEASE_SOURCE_REO == 5914 buffer_src) 5915 dp_handle_wbm_internal_error( 5916 soc, 5917 tx_comp_hal_desc, 5918 hal_tx_comp_get_buffer_type( 5919 tx_comp_hal_desc)); 5920 5921 } else { 5922 dp_err_rl("Tx comp wbm_internal_error false"); 5923 DP_STATS_INC(soc, tx.non_wbm_internal_err, 1); 5924 } 5925 continue; 5926 } 5927 5928 soc->arch_ops.tx_comp_get_params_from_hal_desc(soc, 5929 tx_comp_hal_desc, 5930 &tx_desc); 5931 if (qdf_unlikely(!tx_desc)) { 5932 dp_err("unable to retrieve tx_desc!"); 5933 hal_dump_comp_desc(tx_comp_hal_desc); 5934 DP_STATS_INC(soc, tx.invalid_tx_comp_desc, 1); 5935 QDF_BUG(0); 5936 continue; 5937 } 5938 tx_desc->buffer_src = buffer_src; 5939 5940 if (tx_desc->flags & DP_TX_DESC_FLAG_PPEDS) 5941 goto add_to_pool2; 5942 5943 /* 5944 * If the release source is FW, process the HTT status 5945 */ 5946 if (qdf_unlikely(buffer_src == 5947 HAL_TX_COMP_RELEASE_SOURCE_FW)) { 5948 uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN]; 5949 5950 hal_tx_comp_get_htt_desc(tx_comp_hal_desc, 5951 htt_tx_status); 5952 /* Collect hw completion contents */ 5953 hal_tx_comp_desc_sync(tx_comp_hal_desc, 5954 &tx_desc->comp, 1); 5955 soc->arch_ops.dp_tx_process_htt_completion( 5956 soc, 5957 tx_desc, 5958 htt_tx_status, 5959 ring_id); 5960 } else { 5961 tx_desc->tx_status = 5962 hal_tx_comp_get_tx_status(tx_comp_hal_desc); 5963 tx_desc->buffer_src = buffer_src; 5964 /* 5965 * If the fast completion mode is enabled extended 5966 * metadata from descriptor is not copied 5967 */ 5968 if (qdf_likely(tx_desc->flags & 5969 DP_TX_DESC_FLAG_SIMPLE)) 5970 goto add_to_pool; 5971 5972 /* 5973 * If the descriptor is already freed in vdev_detach, 5974 * continue to next descriptor 5975 */ 5976 if (qdf_unlikely 5977 ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) && 5978 !tx_desc->flags)) { 5979 dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d", 5980 tx_desc->id); 5981 DP_STATS_INC(soc, tx.tx_comp_exception, 1); 5982 dp_tx_desc_check_corruption(tx_desc); 5983 continue; 5984 } 5985 5986 if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) { 5987 dp_tx_comp_info_rl("pdev in down state %d", 5988 tx_desc->id); 5989 tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR; 5990 dp_tx_comp_free_buf(soc, tx_desc, false); 5991 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 5992 goto next_desc; 5993 } 5994 5995 if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) || 5996 !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) { 5997 dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d", 5998 tx_desc->flags, tx_desc->id); 5999 qdf_assert_always(0); 6000 } 6001 6002 /* Collect hw completion contents */ 6003 hal_tx_comp_desc_sync(tx_comp_hal_desc, 6004 &tx_desc->comp, 1); 6005 add_to_pool: 6006 DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id); 6007 6008 add_to_pool2: 6009 /* First ring descriptor on the cycle */ 6010 if (!head_desc) { 6011 head_desc = tx_desc; 6012 tail_desc = tx_desc; 6013 } 6014 6015 tail_desc->next = tx_desc; 6016 tx_desc->next = NULL; 6017 tail_desc = tx_desc; 6018 } 6019 next_desc: 6020 num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK); 6021 6022 /* 6023 * Processed packet count is more than given quota 6024 * stop to processing 6025 */ 6026 6027 count++; 6028 6029 dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc, 6030 num_avail_for_reap, 6031 hal_ring_hdl, 6032 &last_prefetched_hw_desc, 6033 &last_prefetched_sw_desc); 6034 6035 if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit)) 6036 break; 6037 } 6038 6039 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 6040 6041 /* Process the reaped descriptors */ 6042 if (head_desc) 6043 dp_tx_comp_process_desc_list(soc, head_desc, ring_id); 6044 6045 DP_STATS_INC(soc, tx.tx_comp[ring_id], count); 6046 6047 /* 6048 * If we are processing in near-full condition, there are 3 scenario 6049 * 1) Ring entries has reached critical state 6050 * 2) Ring entries are still near high threshold 6051 * 3) Ring entries are below the safe level 6052 * 6053 * One more loop will move the state to normal processing and yield 6054 */ 6055 if (ring_near_full) 6056 goto more_data; 6057 6058 if (dp_tx_comp_enable_eol_data_check(soc)) { 6059 6060 if (num_processed >= quota) 6061 force_break = true; 6062 6063 if (!force_break && 6064 hal_srng_dst_peek_sync_locked(soc->hal_soc, 6065 hal_ring_hdl)) { 6066 DP_STATS_INC(soc, tx.hp_oos2, 1); 6067 if (!hif_exec_should_yield(soc->hif_handle, 6068 int_ctx->dp_intr_id)) 6069 goto more_data; 6070 6071 num_avail_for_reap = 6072 hal_srng_dst_num_valid_locked(soc->hal_soc, 6073 hal_ring_hdl, 6074 true); 6075 if (qdf_unlikely(num_entries && 6076 (num_avail_for_reap >= 6077 num_entries >> 1))) { 6078 DP_STATS_INC(soc, tx.near_full, 1); 6079 goto more_data; 6080 } 6081 } 6082 } 6083 DP_TX_HIST_STATS_PER_PDEV(); 6084 6085 return num_processed; 6086 } 6087 6088 #ifdef FEATURE_WLAN_TDLS 6089 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 6090 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list) 6091 { 6092 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 6093 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 6094 DP_MOD_ID_TDLS); 6095 6096 if (!vdev) { 6097 dp_err("vdev handle for id %d is NULL", vdev_id); 6098 return NULL; 6099 } 6100 6101 if (tx_spec & OL_TX_SPEC_NO_FREE) 6102 vdev->is_tdls_frame = true; 6103 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS); 6104 6105 return dp_tx_send(soc_hdl, vdev_id, msdu_list); 6106 } 6107 #endif 6108 6109 /** 6110 * dp_tx_vdev_attach() - attach vdev to dp tx 6111 * @vdev: virtual device instance 6112 * 6113 * Return: QDF_STATUS_SUCCESS: success 6114 * QDF_STATUS_E_RESOURCES: Error return 6115 */ 6116 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev) 6117 { 6118 int pdev_id; 6119 /* 6120 * Fill HTT TCL Metadata with Vdev ID and MAC ID 6121 */ 6122 DP_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata, 6123 DP_TCL_METADATA_TYPE_VDEV_BASED); 6124 6125 DP_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata, 6126 vdev->vdev_id); 6127 6128 pdev_id = 6129 dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc, 6130 vdev->pdev->pdev_id); 6131 DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id); 6132 6133 /* 6134 * Set HTT Extension Valid bit to 0 by default 6135 */ 6136 DP_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0); 6137 6138 dp_tx_vdev_update_search_flags(vdev); 6139 6140 return QDF_STATUS_SUCCESS; 6141 } 6142 6143 #ifndef FEATURE_WDS 6144 static inline bool dp_tx_da_search_override(struct dp_vdev *vdev) 6145 { 6146 return false; 6147 } 6148 #endif 6149 6150 /** 6151 * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode 6152 * @vdev: virtual device instance 6153 * 6154 * Return: void 6155 * 6156 */ 6157 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev) 6158 { 6159 struct dp_soc *soc = vdev->pdev->soc; 6160 6161 /* 6162 * Enable both AddrY (SA based search) and AddrX (Da based search) 6163 * for TDLS link 6164 * 6165 * Enable AddrY (SA based search) only for non-WDS STA and 6166 * ProxySTA VAP (in HKv1) modes. 6167 * 6168 * In all other VAP modes, only DA based search should be 6169 * enabled 6170 */ 6171 if (vdev->opmode == wlan_op_mode_sta && 6172 vdev->tdls_link_connected) 6173 vdev->hal_desc_addr_search_flags = 6174 (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN); 6175 else if ((vdev->opmode == wlan_op_mode_sta) && 6176 !dp_tx_da_search_override(vdev)) 6177 vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN; 6178 else 6179 vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN; 6180 6181 if (vdev->opmode == wlan_op_mode_sta && !vdev->tdls_link_connected) 6182 vdev->search_type = soc->sta_mode_search_policy; 6183 else 6184 vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; 6185 } 6186 6187 static inline bool 6188 dp_is_tx_desc_flush_match(struct dp_pdev *pdev, 6189 struct dp_vdev *vdev, 6190 struct dp_tx_desc_s *tx_desc) 6191 { 6192 if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED))) 6193 return false; 6194 6195 /* 6196 * if vdev is given, then only check whether desc 6197 * vdev match. if vdev is NULL, then check whether 6198 * desc pdev match. 6199 */ 6200 return vdev ? (tx_desc->vdev_id == vdev->vdev_id) : 6201 (tx_desc->pdev == pdev); 6202 } 6203 6204 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 6205 /** 6206 * dp_tx_desc_flush() - release resources associated 6207 * to TX Desc 6208 * 6209 * @dp_pdev: Handle to DP pdev structure 6210 * @vdev: virtual device instance 6211 * NULL: no specific Vdev is required and check all allcated TX desc 6212 * on this pdev. 6213 * Non-NULL: only check the allocated TX Desc associated to this Vdev. 6214 * 6215 * @force_free: 6216 * true: flush the TX desc. 6217 * false: only reset the Vdev in each allocated TX desc 6218 * that associated to current Vdev. 6219 * 6220 * This function will go through the TX desc pool to flush 6221 * the outstanding TX data or reset Vdev to NULL in associated TX 6222 * Desc. 6223 */ 6224 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev, 6225 bool force_free) 6226 { 6227 uint8_t i; 6228 uint32_t j; 6229 uint32_t num_desc, page_id, offset; 6230 uint16_t num_desc_per_page; 6231 struct dp_soc *soc = pdev->soc; 6232 struct dp_tx_desc_s *tx_desc = NULL; 6233 struct dp_tx_desc_pool_s *tx_desc_pool = NULL; 6234 6235 if (!vdev && !force_free) { 6236 dp_err("Reset TX desc vdev, Vdev param is required!"); 6237 return; 6238 } 6239 6240 for (i = 0; i < MAX_TXDESC_POOLS; i++) { 6241 tx_desc_pool = &soc->tx_desc[i]; 6242 if (!(tx_desc_pool->pool_size) || 6243 IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) || 6244 !(tx_desc_pool->desc_pages.cacheable_pages)) 6245 continue; 6246 6247 /* 6248 * Add flow pool lock protection in case pool is freed 6249 * due to all tx_desc is recycled when handle TX completion. 6250 * this is not necessary when do force flush as: 6251 * a. double lock will happen if dp_tx_desc_release is 6252 * also trying to acquire it. 6253 * b. dp interrupt has been disabled before do force TX desc 6254 * flush in dp_pdev_deinit(). 6255 */ 6256 if (!force_free) 6257 qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock); 6258 num_desc = tx_desc_pool->pool_size; 6259 num_desc_per_page = 6260 tx_desc_pool->desc_pages.num_element_per_page; 6261 for (j = 0; j < num_desc; j++) { 6262 page_id = j / num_desc_per_page; 6263 offset = j % num_desc_per_page; 6264 6265 if (qdf_unlikely(!(tx_desc_pool-> 6266 desc_pages.cacheable_pages))) 6267 break; 6268 6269 tx_desc = dp_tx_desc_find(soc, i, page_id, offset); 6270 6271 if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) { 6272 /* 6273 * Free TX desc if force free is 6274 * required, otherwise only reset vdev 6275 * in this TX desc. 6276 */ 6277 if (force_free) { 6278 tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH; 6279 dp_tx_comp_free_buf(soc, tx_desc, 6280 false); 6281 dp_tx_desc_release(tx_desc, i); 6282 } else { 6283 tx_desc->vdev_id = DP_INVALID_VDEV_ID; 6284 } 6285 } 6286 } 6287 if (!force_free) 6288 qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock); 6289 } 6290 } 6291 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */ 6292 /** 6293 * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc 6294 * 6295 * @soc: Handle to DP soc structure 6296 * @tx_desc: pointer of one TX desc 6297 * @desc_pool_id: TX Desc pool id 6298 */ 6299 static inline void 6300 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, 6301 uint8_t desc_pool_id) 6302 { 6303 TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock); 6304 6305 tx_desc->vdev_id = DP_INVALID_VDEV_ID; 6306 6307 TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); 6308 } 6309 6310 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev, 6311 bool force_free) 6312 { 6313 uint8_t i, num_pool; 6314 uint32_t j; 6315 uint32_t num_desc, page_id, offset; 6316 uint16_t num_desc_per_page; 6317 struct dp_soc *soc = pdev->soc; 6318 struct dp_tx_desc_s *tx_desc = NULL; 6319 struct dp_tx_desc_pool_s *tx_desc_pool = NULL; 6320 6321 if (!vdev && !force_free) { 6322 dp_err("Reset TX desc vdev, Vdev param is required!"); 6323 return; 6324 } 6325 6326 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 6327 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 6328 6329 for (i = 0; i < num_pool; i++) { 6330 tx_desc_pool = &soc->tx_desc[i]; 6331 if (!tx_desc_pool->desc_pages.cacheable_pages) 6332 continue; 6333 6334 num_desc_per_page = 6335 tx_desc_pool->desc_pages.num_element_per_page; 6336 for (j = 0; j < num_desc; j++) { 6337 page_id = j / num_desc_per_page; 6338 offset = j % num_desc_per_page; 6339 tx_desc = dp_tx_desc_find(soc, i, page_id, offset); 6340 6341 if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) { 6342 if (force_free) { 6343 tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH; 6344 dp_tx_comp_free_buf(soc, tx_desc, 6345 false); 6346 dp_tx_desc_release(tx_desc, i); 6347 } else { 6348 dp_tx_desc_reset_vdev(soc, tx_desc, 6349 i); 6350 } 6351 } 6352 } 6353 } 6354 } 6355 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ 6356 6357 /** 6358 * dp_tx_vdev_detach() - detach vdev from dp tx 6359 * @vdev: virtual device instance 6360 * 6361 * Return: QDF_STATUS_SUCCESS: success 6362 * QDF_STATUS_E_RESOURCES: Error return 6363 */ 6364 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev) 6365 { 6366 struct dp_pdev *pdev = vdev->pdev; 6367 6368 /* Reset TX desc associated to this Vdev as NULL */ 6369 dp_tx_desc_flush(pdev, vdev, false); 6370 6371 return QDF_STATUS_SUCCESS; 6372 } 6373 6374 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 6375 /* Pools will be allocated dynamically */ 6376 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, 6377 int num_desc) 6378 { 6379 uint8_t i; 6380 6381 for (i = 0; i < num_pool; i++) { 6382 qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock); 6383 soc->tx_desc[i].status = FLOW_POOL_INACTIVE; 6384 } 6385 6386 return QDF_STATUS_SUCCESS; 6387 } 6388 6389 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool, 6390 uint32_t num_desc) 6391 { 6392 return QDF_STATUS_SUCCESS; 6393 } 6394 6395 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool) 6396 { 6397 } 6398 6399 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) 6400 { 6401 uint8_t i; 6402 6403 for (i = 0; i < num_pool; i++) 6404 qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock); 6405 } 6406 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */ 6407 static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, 6408 uint32_t num_desc) 6409 { 6410 uint8_t i, count; 6411 6412 /* Allocate software Tx descriptor pools */ 6413 for (i = 0; i < num_pool; i++) { 6414 if (dp_tx_desc_pool_alloc(soc, i, num_desc)) { 6415 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 6416 FL("Tx Desc Pool alloc %d failed %pK"), 6417 i, soc); 6418 goto fail; 6419 } 6420 } 6421 return QDF_STATUS_SUCCESS; 6422 6423 fail: 6424 for (count = 0; count < i; count++) 6425 dp_tx_desc_pool_free(soc, count); 6426 6427 return QDF_STATUS_E_NOMEM; 6428 } 6429 6430 static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool, 6431 uint32_t num_desc) 6432 { 6433 uint8_t i; 6434 for (i = 0; i < num_pool; i++) { 6435 if (dp_tx_desc_pool_init(soc, i, num_desc)) { 6436 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 6437 FL("Tx Desc Pool init %d failed %pK"), 6438 i, soc); 6439 return QDF_STATUS_E_NOMEM; 6440 } 6441 } 6442 return QDF_STATUS_SUCCESS; 6443 } 6444 6445 static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool) 6446 { 6447 uint8_t i; 6448 6449 for (i = 0; i < num_pool; i++) 6450 dp_tx_desc_pool_deinit(soc, i); 6451 } 6452 6453 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) 6454 { 6455 uint8_t i; 6456 6457 for (i = 0; i < num_pool; i++) 6458 dp_tx_desc_pool_free(soc, i); 6459 } 6460 6461 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ 6462 6463 /** 6464 * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors 6465 * @soc: core txrx main context 6466 * @num_pool: number of pools 6467 * 6468 */ 6469 static void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool) 6470 { 6471 dp_tx_tso_desc_pool_deinit(soc, num_pool); 6472 dp_tx_tso_num_seg_pool_deinit(soc, num_pool); 6473 } 6474 6475 /** 6476 * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors 6477 * @soc: core txrx main context 6478 * @num_pool: number of pools 6479 * 6480 */ 6481 static void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool) 6482 { 6483 dp_tx_tso_desc_pool_free(soc, num_pool); 6484 dp_tx_tso_num_seg_pool_free(soc, num_pool); 6485 } 6486 6487 /** 6488 * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors 6489 * @soc: core txrx main context 6490 * 6491 * This function frees all tx related descriptors as below 6492 * 1. Regular TX descriptors (static pools) 6493 * 2. extension TX descriptors (used for ME, RAW, TSO etc...) 6494 * 3. TSO descriptors 6495 * 6496 */ 6497 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc) 6498 { 6499 uint8_t num_pool; 6500 6501 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 6502 6503 dp_tx_tso_cmn_desc_pool_free(soc, num_pool); 6504 dp_tx_ext_desc_pool_free(soc, num_pool); 6505 dp_tx_delete_static_pools(soc, num_pool); 6506 } 6507 6508 /** 6509 * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors 6510 * @soc: core txrx main context 6511 * 6512 * This function de-initializes all tx related descriptors as below 6513 * 1. Regular TX descriptors (static pools) 6514 * 2. extension TX descriptors (used for ME, RAW, TSO etc...) 6515 * 3. TSO descriptors 6516 * 6517 */ 6518 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc) 6519 { 6520 uint8_t num_pool; 6521 6522 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 6523 6524 dp_tx_flow_control_deinit(soc); 6525 dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool); 6526 dp_tx_ext_desc_pool_deinit(soc, num_pool); 6527 dp_tx_deinit_static_pools(soc, num_pool); 6528 } 6529 6530 /** 6531 * dp_tx_tso_cmn_desc_pool_alloc() - TSO cmn desc pool allocator 6532 * @soc: DP soc handle 6533 * @num_pool: Number of pools 6534 * @num_desc: Number of descriptors 6535 * 6536 * Reserve TSO descriptor buffers 6537 * 6538 * Return: QDF_STATUS_E_FAILURE on failure or 6539 * QDF_STATUS_SUCCESS on success 6540 */ 6541 static QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc, 6542 uint8_t num_pool, 6543 uint32_t num_desc) 6544 { 6545 if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) { 6546 dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc); 6547 return QDF_STATUS_E_FAILURE; 6548 } 6549 6550 if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) { 6551 dp_err("TSO Num of seg Pool alloc %d failed %pK", 6552 num_pool, soc); 6553 return QDF_STATUS_E_FAILURE; 6554 } 6555 return QDF_STATUS_SUCCESS; 6556 } 6557 6558 /** 6559 * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init 6560 * @soc: DP soc handle 6561 * @num_pool: Number of pools 6562 * @num_desc: Number of descriptors 6563 * 6564 * Initialize TSO descriptor pools 6565 * 6566 * Return: QDF_STATUS_E_FAILURE on failure or 6567 * QDF_STATUS_SUCCESS on success 6568 */ 6569 6570 static QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc, 6571 uint8_t num_pool, 6572 uint32_t num_desc) 6573 { 6574 if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) { 6575 dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc); 6576 return QDF_STATUS_E_FAILURE; 6577 } 6578 6579 if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) { 6580 dp_err("TSO Num of seg Pool alloc %d failed %pK", 6581 num_pool, soc); 6582 return QDF_STATUS_E_FAILURE; 6583 } 6584 return QDF_STATUS_SUCCESS; 6585 } 6586 6587 /** 6588 * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory 6589 * @soc: core txrx main context 6590 * 6591 * This function allocates memory for following descriptor pools 6592 * 1. regular sw tx descriptor pools (static pools) 6593 * 2. TX extension descriptor pools (ME, RAW, TSO etc...) 6594 * 3. TSO descriptor pools 6595 * 6596 * Return: QDF_STATUS_SUCCESS: success 6597 * QDF_STATUS_E_RESOURCES: Error return 6598 */ 6599 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc) 6600 { 6601 uint8_t num_pool; 6602 uint32_t num_desc; 6603 uint32_t num_ext_desc; 6604 6605 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 6606 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 6607 num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); 6608 6609 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 6610 "%s Tx Desc Alloc num_pool = %d, descs = %d", 6611 __func__, num_pool, num_desc); 6612 6613 if ((num_pool > MAX_TXDESC_POOLS) || 6614 (num_desc > WLAN_CFG_NUM_TX_DESC_MAX)) 6615 goto fail1; 6616 6617 if (dp_tx_alloc_static_pools(soc, num_pool, num_desc)) 6618 goto fail1; 6619 6620 if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc)) 6621 goto fail2; 6622 6623 if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx)) 6624 return QDF_STATUS_SUCCESS; 6625 6626 if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc)) 6627 goto fail3; 6628 6629 return QDF_STATUS_SUCCESS; 6630 6631 fail3: 6632 dp_tx_ext_desc_pool_free(soc, num_pool); 6633 fail2: 6634 dp_tx_delete_static_pools(soc, num_pool); 6635 fail1: 6636 return QDF_STATUS_E_RESOURCES; 6637 } 6638 6639 /** 6640 * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools 6641 * @soc: core txrx main context 6642 * 6643 * This function initializes the following TX descriptor pools 6644 * 1. regular sw tx descriptor pools (static pools) 6645 * 2. TX extension descriptor pools (ME, RAW, TSO etc...) 6646 * 3. TSO descriptor pools 6647 * 6648 * Return: QDF_STATUS_SUCCESS: success 6649 * QDF_STATUS_E_RESOURCES: Error return 6650 */ 6651 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc) 6652 { 6653 uint8_t num_pool; 6654 uint32_t num_desc; 6655 uint32_t num_ext_desc; 6656 6657 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 6658 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 6659 num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); 6660 6661 if (dp_tx_init_static_pools(soc, num_pool, num_desc)) 6662 goto fail1; 6663 6664 if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc)) 6665 goto fail2; 6666 6667 if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx)) 6668 return QDF_STATUS_SUCCESS; 6669 6670 if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc)) 6671 goto fail3; 6672 6673 dp_tx_flow_control_init(soc); 6674 soc->process_tx_status = CONFIG_PROCESS_TX_STATUS; 6675 return QDF_STATUS_SUCCESS; 6676 6677 fail3: 6678 dp_tx_ext_desc_pool_deinit(soc, num_pool); 6679 fail2: 6680 dp_tx_deinit_static_pools(soc, num_pool); 6681 fail1: 6682 return QDF_STATUS_E_RESOURCES; 6683 } 6684 6685 /** 6686 * dp_tso_soc_attach() - Allocate and initialize TSO descriptors 6687 * @txrx_soc: dp soc handle 6688 * 6689 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 6690 * QDF_STATUS_E_FAILURE 6691 */ 6692 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc) 6693 { 6694 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 6695 uint8_t num_pool; 6696 uint32_t num_ext_desc; 6697 6698 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 6699 num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); 6700 6701 if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc)) 6702 return QDF_STATUS_E_FAILURE; 6703 6704 if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc)) 6705 return QDF_STATUS_E_FAILURE; 6706 6707 return QDF_STATUS_SUCCESS; 6708 } 6709 6710 /** 6711 * dp_tso_soc_detach() - de-initialize and free the TSO descriptors 6712 * @txrx_soc: dp soc handle 6713 * 6714 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 6715 */ 6716 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc) 6717 { 6718 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 6719 uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 6720 6721 dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool); 6722 dp_tx_tso_cmn_desc_pool_free(soc, num_pool); 6723 6724 return QDF_STATUS_SUCCESS; 6725 } 6726 6727 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP 6728 void dp_pkt_add_timestamp(struct dp_vdev *vdev, 6729 enum qdf_pkt_timestamp_index index, uint64_t time, 6730 qdf_nbuf_t nbuf) 6731 { 6732 if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled())) { 6733 uint64_t tsf_time; 6734 6735 if (vdev->get_tsf_time) { 6736 vdev->get_tsf_time(vdev->osif_vdev, time, &tsf_time); 6737 qdf_add_dp_pkt_timestamp(nbuf, index, tsf_time); 6738 } 6739 } 6740 } 6741 6742 void dp_pkt_get_timestamp(uint64_t *time) 6743 { 6744 if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled())) 6745 *time = qdf_get_log_timestamp(); 6746 } 6747 #endif 6748 6749