1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "htt.h" 20 #include "hal_hw_headers.h" 21 #include "dp_tx.h" 22 #include "dp_tx_desc.h" 23 #include "dp_peer.h" 24 #include "dp_types.h" 25 #include "hal_tx.h" 26 #include "qdf_mem.h" 27 #include "qdf_nbuf.h" 28 #include "qdf_net_types.h" 29 #include <wlan_cfg.h> 30 #ifdef MESH_MODE_SUPPORT 31 #include "if_meta_hdr.h" 32 #endif 33 34 #define DP_TX_QUEUE_MASK 0x3 35 36 /* TODO Add support in TSO */ 37 #define DP_DESC_NUM_FRAG(x) 0 38 39 /* disable TQM_BYPASS */ 40 #define TQM_BYPASS_WAR 0 41 42 /* invalid peer id for reinject*/ 43 #define DP_INVALID_PEER 0XFFFE 44 45 /*mapping between hal encrypt type and cdp_sec_type*/ 46 #define MAX_CDP_SEC_TYPE 12 47 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = { 48 HAL_TX_ENCRYPT_TYPE_NO_CIPHER, 49 HAL_TX_ENCRYPT_TYPE_WEP_128, 50 HAL_TX_ENCRYPT_TYPE_WEP_104, 51 HAL_TX_ENCRYPT_TYPE_WEP_40, 52 HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC, 53 HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC, 54 HAL_TX_ENCRYPT_TYPE_AES_CCMP_128, 55 HAL_TX_ENCRYPT_TYPE_WAPI, 56 HAL_TX_ENCRYPT_TYPE_AES_CCMP_256, 57 HAL_TX_ENCRYPT_TYPE_AES_GCMP_128, 58 HAL_TX_ENCRYPT_TYPE_AES_GCMP_256, 59 HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4}; 60 61 /** 62 * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame 63 * @vdev: DP Virtual device handle 64 * @nbuf: Buffer pointer 65 * @queue: queue ids container for nbuf 66 * 67 * TX packet queue has 2 instances, software descriptors id and dma ring id 68 * Based on tx feature and hardware configuration queue id combination could be 69 * different. 70 * For example - 71 * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id 72 * With no XPS,lock based resource protection, Descriptor pool ids are different 73 * for each vdev, dma ring id will be same as single pdev id 74 * 75 * Return: None 76 */ 77 #ifdef QCA_OL_TX_MULTIQ_SUPPORT 78 static inline void dp_tx_get_queue(struct dp_vdev *vdev, 79 qdf_nbuf_t nbuf, struct dp_tx_queue *queue) 80 { 81 uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK; 82 queue->desc_pool_id = queue_offset; 83 queue->ring_id = vdev->pdev->soc->tx_ring_map[queue_offset]; 84 85 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 86 "%s, pool_id:%d ring_id: %d", 87 __func__, queue->desc_pool_id, queue->ring_id); 88 89 return; 90 } 91 #else /* QCA_OL_TX_MULTIQ_SUPPORT */ 92 static inline void dp_tx_get_queue(struct dp_vdev *vdev, 93 qdf_nbuf_t nbuf, struct dp_tx_queue *queue) 94 { 95 /* get flow id */ 96 queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev); 97 queue->ring_id = DP_TX_GET_RING_ID(vdev); 98 99 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 100 "%s, pool_id:%d ring_id: %d", 101 __func__, queue->desc_pool_id, queue->ring_id); 102 103 return; 104 } 105 #endif 106 107 #if defined(FEATURE_TSO) 108 /** 109 * dp_tx_tso_unmap_segment() - Unmap TSO segment 110 * 111 * @soc - core txrx main context 112 * @tx_desc - Tx software descriptor 113 */ 114 static void dp_tx_tso_unmap_segment(struct dp_soc *soc, 115 struct dp_tx_desc_s *tx_desc) 116 { 117 TSO_DEBUG("%s: Unmap the tso segment", __func__); 118 if (qdf_unlikely(!tx_desc->tso_desc)) { 119 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 120 "%s %d TSO desc is NULL!", 121 __func__, __LINE__); 122 qdf_assert(0); 123 } else if (qdf_unlikely(!tx_desc->tso_num_desc)) { 124 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 125 "%s %d TSO num desc is NULL!", 126 __func__, __LINE__); 127 qdf_assert(0); 128 } else { 129 bool is_last_seg; 130 struct qdf_tso_num_seg_elem_t *tso_num_desc = 131 (struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc; 132 133 if (tso_num_desc->num_seg.tso_cmn_num_seg > 1) 134 is_last_seg = false; 135 else 136 is_last_seg = true; 137 tso_num_desc->num_seg.tso_cmn_num_seg--; 138 qdf_nbuf_unmap_tso_segment(soc->osdev, 139 tx_desc->tso_desc, is_last_seg); 140 } 141 } 142 143 /** 144 * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg 145 * back to the freelist 146 * 147 * @soc - soc device handle 148 * @tx_desc - Tx software descriptor 149 */ 150 static void dp_tx_tso_desc_release(struct dp_soc *soc, 151 struct dp_tx_desc_s *tx_desc) 152 { 153 TSO_DEBUG("%s: Free the tso descriptor", __func__); 154 if (qdf_unlikely(!tx_desc->tso_desc)) { 155 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 156 "%s %d TSO desc is NULL!", 157 __func__, __LINE__); 158 qdf_assert(0); 159 } else if (qdf_unlikely(!tx_desc->tso_num_desc)) { 160 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 161 "%s %d TSO num desc is NULL!", 162 __func__, __LINE__); 163 qdf_assert(0); 164 } else { 165 struct qdf_tso_num_seg_elem_t *tso_num_desc = 166 (struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc; 167 168 /* Add the tso num segment into the free list */ 169 if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) { 170 dp_tso_num_seg_free(soc, tx_desc->pool_id, 171 tx_desc->tso_num_desc); 172 tx_desc->tso_num_desc = NULL; 173 } 174 175 /* Add the tso segment into the free list*/ 176 dp_tx_tso_desc_free(soc, 177 tx_desc->pool_id, tx_desc->tso_desc); 178 tx_desc->tso_desc = NULL; 179 } 180 } 181 #else 182 static void dp_tx_tso_unmap_segment(struct dp_soc *soc, 183 struct dp_tx_desc_s *tx_desc) 184 185 { 186 } 187 188 static void dp_tx_tso_desc_release(struct dp_soc *soc, 189 struct dp_tx_desc_s *tx_desc) 190 { 191 } 192 #endif 193 /** 194 * dp_tx_desc_release() - Release Tx Descriptor 195 * @tx_desc : Tx Descriptor 196 * @desc_pool_id: Descriptor Pool ID 197 * 198 * Deallocate all resources attached to Tx descriptor and free the Tx 199 * descriptor. 200 * 201 * Return: 202 */ 203 static void 204 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id) 205 { 206 struct dp_pdev *pdev = tx_desc->pdev; 207 struct dp_soc *soc; 208 uint8_t comp_status = 0; 209 210 qdf_assert(pdev); 211 212 soc = pdev->soc; 213 214 if (tx_desc->frm_type == dp_tx_frm_tso) 215 dp_tx_tso_desc_release(soc, tx_desc); 216 217 if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) 218 dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id); 219 220 if (tx_desc->flags & DP_TX_DESC_FLAG_ME) 221 dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer); 222 223 qdf_atomic_dec(&pdev->num_tx_outstanding); 224 225 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) 226 qdf_atomic_dec(&pdev->num_tx_exception); 227 228 if (HAL_TX_COMP_RELEASE_SOURCE_TQM == 229 hal_tx_comp_get_buffer_source(&tx_desc->comp)) 230 comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp, 231 soc->hal_soc); 232 else 233 comp_status = HAL_TX_COMP_RELEASE_REASON_FW; 234 235 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 236 "Tx Completion Release desc %d status %d outstanding %d", 237 tx_desc->id, comp_status, 238 qdf_atomic_read(&pdev->num_tx_outstanding)); 239 240 dp_tx_desc_free(soc, tx_desc, desc_pool_id); 241 return; 242 } 243 244 /** 245 * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames 246 * @vdev: DP vdev Handle 247 * @nbuf: skb 248 * 249 * Prepares and fills HTT metadata in the frame pre-header for special frames 250 * that should be transmitted using varying transmit parameters. 251 * There are 2 VDEV modes that currently needs this special metadata - 252 * 1) Mesh Mode 253 * 2) DSRC Mode 254 * 255 * Return: HTT metadata size 256 * 257 */ 258 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 259 uint32_t *meta_data) 260 { 261 struct htt_tx_msdu_desc_ext2_t *desc_ext = 262 (struct htt_tx_msdu_desc_ext2_t *) meta_data; 263 264 uint8_t htt_desc_size; 265 266 /* Size rounded of multiple of 8 bytes */ 267 uint8_t htt_desc_size_aligned; 268 269 uint8_t *hdr = NULL; 270 271 /* 272 * Metadata - HTT MSDU Extension header 273 */ 274 htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t); 275 htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7; 276 277 if (vdev->mesh_vdev) { 278 if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < 279 htt_desc_size_aligned)) { 280 DP_STATS_INC(vdev, 281 tx_i.dropped.headroom_insufficient, 1); 282 return 0; 283 } 284 /* Fill and add HTT metaheader */ 285 hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned); 286 if (hdr == NULL) { 287 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 288 "Error in filling HTT metadata"); 289 290 return 0; 291 } 292 qdf_mem_copy(hdr, desc_ext, htt_desc_size); 293 294 } else if (vdev->opmode == wlan_op_mode_ocb) { 295 /* Todo - Add support for DSRC */ 296 } 297 298 return htt_desc_size_aligned; 299 } 300 301 /** 302 * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO 303 * @tso_seg: TSO segment to process 304 * @ext_desc: Pointer to MSDU extension descriptor 305 * 306 * Return: void 307 */ 308 #if defined(FEATURE_TSO) 309 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg, 310 void *ext_desc) 311 { 312 uint8_t num_frag; 313 uint32_t tso_flags; 314 315 /* 316 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN), 317 * tcp_flag_mask 318 * 319 * Checksum enable flags are set in TCL descriptor and not in Extension 320 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor) 321 */ 322 tso_flags = *(uint32_t *) &tso_seg->tso_flags; 323 324 hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags); 325 326 hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len, 327 tso_seg->tso_flags.ip_len); 328 329 hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num); 330 hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id); 331 332 333 for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) { 334 uint32_t lo = 0; 335 uint32_t hi = 0; 336 337 qdf_dmaaddr_to_32s( 338 tso_seg->tso_frags[num_frag].paddr, &lo, &hi); 339 hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi, 340 tso_seg->tso_frags[num_frag].length); 341 } 342 343 return; 344 } 345 #else 346 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg, 347 void *ext_desc) 348 { 349 return; 350 } 351 #endif 352 353 #if defined(FEATURE_TSO) 354 /** 355 * dp_tx_free_tso_seg() - Loop through the tso segments 356 * allocated and free them 357 * 358 * @soc: soc handle 359 * @free_seg: list of tso segments 360 * @msdu_info: msdu descriptor 361 * 362 * Return - void 363 */ 364 static void dp_tx_free_tso_seg(struct dp_soc *soc, 365 struct qdf_tso_seg_elem_t *free_seg, 366 struct dp_tx_msdu_info_s *msdu_info) 367 { 368 struct qdf_tso_seg_elem_t *next_seg; 369 370 while (free_seg) { 371 next_seg = free_seg->next; 372 dp_tx_tso_desc_free(soc, 373 msdu_info->tx_queue.desc_pool_id, 374 free_seg); 375 free_seg = next_seg; 376 } 377 } 378 379 /** 380 * dp_tx_free_tso_num_seg() - Loop through the tso num segments 381 * allocated and free them 382 * 383 * @soc: soc handle 384 * @free_seg: list of tso segments 385 * @msdu_info: msdu descriptor 386 * Return - void 387 */ 388 static void dp_tx_free_tso_num_seg(struct dp_soc *soc, 389 struct qdf_tso_num_seg_elem_t *free_seg, 390 struct dp_tx_msdu_info_s *msdu_info) 391 { 392 struct qdf_tso_num_seg_elem_t *next_seg; 393 394 while (free_seg) { 395 next_seg = free_seg->next; 396 dp_tso_num_seg_free(soc, 397 msdu_info->tx_queue.desc_pool_id, 398 free_seg); 399 free_seg = next_seg; 400 } 401 } 402 403 /** 404 * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info 405 * @vdev: virtual device handle 406 * @msdu: network buffer 407 * @msdu_info: meta data associated with the msdu 408 * 409 * Return: QDF_STATUS_SUCCESS success 410 */ 411 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev, 412 qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info) 413 { 414 struct qdf_tso_seg_elem_t *tso_seg; 415 int num_seg = qdf_nbuf_get_tso_num_seg(msdu); 416 struct dp_soc *soc = vdev->pdev->soc; 417 struct qdf_tso_info_t *tso_info; 418 struct qdf_tso_num_seg_elem_t *tso_num_seg; 419 420 tso_info = &msdu_info->u.tso_info; 421 tso_info->curr_seg = NULL; 422 tso_info->tso_seg_list = NULL; 423 tso_info->num_segs = num_seg; 424 msdu_info->frm_type = dp_tx_frm_tso; 425 tso_info->tso_num_seg_list = NULL; 426 427 TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg); 428 429 while (num_seg) { 430 tso_seg = dp_tx_tso_desc_alloc( 431 soc, msdu_info->tx_queue.desc_pool_id); 432 if (tso_seg) { 433 tso_seg->next = tso_info->tso_seg_list; 434 tso_info->tso_seg_list = tso_seg; 435 num_seg--; 436 } else { 437 struct qdf_tso_seg_elem_t *free_seg = 438 tso_info->tso_seg_list; 439 440 dp_tx_free_tso_seg(soc, free_seg, msdu_info); 441 442 return QDF_STATUS_E_NOMEM; 443 } 444 } 445 446 TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg); 447 448 tso_num_seg = dp_tso_num_seg_alloc(soc, 449 msdu_info->tx_queue.desc_pool_id); 450 451 if (tso_num_seg) { 452 tso_num_seg->next = tso_info->tso_num_seg_list; 453 tso_info->tso_num_seg_list = tso_num_seg; 454 } else { 455 /* Bug: free tso_num_seg and tso_seg */ 456 /* Free the already allocated num of segments */ 457 struct qdf_tso_seg_elem_t *free_seg = 458 tso_info->tso_seg_list; 459 460 TSO_DEBUG(" %s: Failed alloc - Number of segs for a TSO packet", 461 __func__); 462 dp_tx_free_tso_seg(soc, free_seg, msdu_info); 463 464 return QDF_STATUS_E_NOMEM; 465 } 466 467 msdu_info->num_seg = 468 qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info); 469 470 TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__, 471 msdu_info->num_seg); 472 473 if (!(msdu_info->num_seg)) { 474 dp_tx_free_tso_seg(soc, tso_info->tso_seg_list, msdu_info); 475 dp_tx_free_tso_num_seg(soc, tso_info->tso_num_seg_list, 476 msdu_info); 477 return QDF_STATUS_E_INVAL; 478 } 479 480 tso_info->curr_seg = tso_info->tso_seg_list; 481 482 return QDF_STATUS_SUCCESS; 483 } 484 #else 485 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev, 486 qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info) 487 { 488 return QDF_STATUS_E_NOMEM; 489 } 490 #endif 491 492 /** 493 * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor 494 * @vdev: DP Vdev handle 495 * @msdu_info: MSDU info to be setup in MSDU extension descriptor 496 * @desc_pool_id: Descriptor Pool ID 497 * 498 * Return: 499 */ 500 static 501 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev, 502 struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id) 503 { 504 uint8_t i; 505 uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA]; 506 struct dp_tx_seg_info_s *seg_info; 507 struct dp_tx_ext_desc_elem_s *msdu_ext_desc; 508 struct dp_soc *soc = vdev->pdev->soc; 509 510 /* Allocate an extension descriptor */ 511 msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id); 512 qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA); 513 514 if (!msdu_ext_desc) { 515 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 516 return NULL; 517 } 518 519 if (msdu_info->exception_fw && 520 qdf_unlikely(vdev->mesh_vdev)) { 521 qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES], 522 &msdu_info->meta_data[0], 523 sizeof(struct htt_tx_msdu_desc_ext2_t)); 524 qdf_atomic_inc(&vdev->pdev->num_tx_exception); 525 } 526 527 switch (msdu_info->frm_type) { 528 case dp_tx_frm_sg: 529 case dp_tx_frm_me: 530 case dp_tx_frm_raw: 531 seg_info = msdu_info->u.sg_info.curr_seg; 532 /* Update the buffer pointers in MSDU Extension Descriptor */ 533 for (i = 0; i < seg_info->frag_cnt; i++) { 534 hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i, 535 seg_info->frags[i].paddr_lo, 536 seg_info->frags[i].paddr_hi, 537 seg_info->frags[i].len); 538 } 539 540 break; 541 542 case dp_tx_frm_tso: 543 dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg, 544 &cached_ext_desc[0]); 545 break; 546 547 548 default: 549 break; 550 } 551 552 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 553 cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA); 554 555 hal_tx_ext_desc_sync(&cached_ext_desc[0], 556 msdu_ext_desc->vaddr); 557 558 return msdu_ext_desc; 559 } 560 561 /** 562 * dp_tx_trace_pkt() - Trace TX packet at DP layer 563 * 564 * @skb: skb to be traced 565 * @msdu_id: msdu_id of the packet 566 * @vdev_id: vdev_id of the packet 567 * 568 * Return: None 569 */ 570 static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id, 571 uint8_t vdev_id) 572 { 573 QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK; 574 QDF_NBUF_CB_TX_DP_TRACE(skb) = 1; 575 DPTRACE(qdf_dp_trace_ptr(skb, 576 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD, 577 QDF_TRACE_DEFAULT_PDEV_ID, 578 qdf_nbuf_data_addr(skb), 579 sizeof(qdf_nbuf_data(skb)), 580 msdu_id, vdev_id)); 581 582 qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID); 583 584 DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID, 585 QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD, 586 msdu_id, QDF_TX)); 587 } 588 589 /** 590 * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor 591 * @vdev: DP vdev handle 592 * @nbuf: skb 593 * @desc_pool_id: Descriptor pool ID 594 * @meta_data: Metadata to the fw 595 * @tx_exc_metadata: Handle that holds exception path metadata 596 * Allocate and prepare Tx descriptor with msdu information. 597 * 598 * Return: Pointer to Tx Descriptor on success, 599 * NULL on failure 600 */ 601 static 602 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev, 603 qdf_nbuf_t nbuf, uint8_t desc_pool_id, 604 struct dp_tx_msdu_info_s *msdu_info, 605 struct cdp_tx_exception_metadata *tx_exc_metadata) 606 { 607 uint8_t align_pad; 608 uint8_t is_exception = 0; 609 uint8_t htt_hdr_size; 610 struct ether_header *eh; 611 struct dp_tx_desc_s *tx_desc; 612 struct dp_pdev *pdev = vdev->pdev; 613 struct dp_soc *soc = pdev->soc; 614 615 /* Allocate software Tx descriptor */ 616 tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); 617 if (qdf_unlikely(!tx_desc)) { 618 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 619 return NULL; 620 } 621 622 /* Flow control/Congestion Control counters */ 623 qdf_atomic_inc(&pdev->num_tx_outstanding); 624 625 /* Initialize the SW tx descriptor */ 626 tx_desc->nbuf = nbuf; 627 tx_desc->frm_type = dp_tx_frm_std; 628 tx_desc->tx_encap_type = (tx_exc_metadata ? 629 tx_exc_metadata->tx_encap_type : vdev->tx_encap_type); 630 tx_desc->vdev = vdev; 631 tx_desc->pdev = pdev; 632 tx_desc->msdu_ext_desc = NULL; 633 tx_desc->pkt_offset = 0; 634 635 dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id); 636 637 /* Reset the control block */ 638 qdf_nbuf_reset_ctxt(nbuf); 639 640 /* 641 * For special modes (vdev_type == ocb or mesh), data frames should be 642 * transmitted using varying transmit parameters (tx spec) which include 643 * transmit rate, power, priority, channel, channel bandwidth , nss etc. 644 * These are filled in HTT MSDU descriptor and sent in frame pre-header. 645 * These frames are sent as exception packets to firmware. 646 * 647 * HW requirement is that metadata should always point to a 648 * 8-byte aligned address. So we add alignment pad to start of buffer. 649 * HTT Metadata should be ensured to be multiple of 8-bytes, 650 * to get 8-byte aligned start address along with align_pad added 651 * 652 * |-----------------------------| 653 * | | 654 * |-----------------------------| <-----Buffer Pointer Address given 655 * | | ^ in HW descriptor (aligned) 656 * | HTT Metadata | | 657 * | | | 658 * | | | Packet Offset given in descriptor 659 * | | | 660 * |-----------------------------| | 661 * | Alignment Pad | v 662 * |-----------------------------| <----- Actual buffer start address 663 * | SKB Data | (Unaligned) 664 * | | 665 * | | 666 * | | 667 * | | 668 * | | 669 * |-----------------------------| 670 */ 671 if (qdf_unlikely((msdu_info->exception_fw)) || 672 (vdev->opmode == wlan_op_mode_ocb)) { 673 align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7; 674 675 if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) { 676 DP_STATS_INC(vdev, 677 tx_i.dropped.headroom_insufficient, 1); 678 goto failure; 679 } 680 681 if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) { 682 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 683 "qdf_nbuf_push_head failed"); 684 goto failure; 685 } 686 687 htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf, 688 msdu_info->meta_data); 689 if (htt_hdr_size == 0) 690 goto failure; 691 tx_desc->pkt_offset = align_pad + htt_hdr_size; 692 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 693 is_exception = 1; 694 } 695 696 if (qdf_unlikely(QDF_STATUS_SUCCESS != 697 qdf_nbuf_map(soc->osdev, nbuf, 698 QDF_DMA_TO_DEVICE))) { 699 /* Handle failure */ 700 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 701 "qdf_nbuf_map failed"); 702 DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1); 703 goto failure; 704 } 705 706 if (qdf_unlikely(vdev->nawds_enabled)) { 707 eh = (struct ether_header *) qdf_nbuf_data(nbuf); 708 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) { 709 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 710 is_exception = 1; 711 } 712 } 713 714 #if !TQM_BYPASS_WAR 715 if (is_exception || tx_exc_metadata) 716 #endif 717 { 718 /* Temporary WAR due to TQM VP issues */ 719 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 720 qdf_atomic_inc(&pdev->num_tx_exception); 721 } 722 723 return tx_desc; 724 725 failure: 726 dp_tx_desc_release(tx_desc, desc_pool_id); 727 return NULL; 728 } 729 730 /** 731 * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame 732 * @vdev: DP vdev handle 733 * @nbuf: skb 734 * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor 735 * @desc_pool_id : Descriptor Pool ID 736 * 737 * Allocate and prepare Tx descriptor with msdu and fragment descritor 738 * information. For frames wth fragments, allocate and prepare 739 * an MSDU extension descriptor 740 * 741 * Return: Pointer to Tx Descriptor on success, 742 * NULL on failure 743 */ 744 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev, 745 qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info, 746 uint8_t desc_pool_id) 747 { 748 struct dp_tx_desc_s *tx_desc; 749 struct dp_tx_ext_desc_elem_s *msdu_ext_desc; 750 struct dp_pdev *pdev = vdev->pdev; 751 struct dp_soc *soc = pdev->soc; 752 753 /* Allocate software Tx descriptor */ 754 tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); 755 if (!tx_desc) { 756 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 757 return NULL; 758 } 759 760 /* Flow control/Congestion Control counters */ 761 qdf_atomic_inc(&pdev->num_tx_outstanding); 762 763 /* Initialize the SW tx descriptor */ 764 tx_desc->nbuf = nbuf; 765 tx_desc->frm_type = msdu_info->frm_type; 766 tx_desc->tx_encap_type = vdev->tx_encap_type; 767 tx_desc->vdev = vdev; 768 tx_desc->pdev = pdev; 769 tx_desc->pkt_offset = 0; 770 tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg; 771 tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list; 772 773 dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id); 774 775 /* Reset the control block */ 776 qdf_nbuf_reset_ctxt(nbuf); 777 778 /* Handle scattered frames - TSO/SG/ME */ 779 /* Allocate and prepare an extension descriptor for scattered frames */ 780 msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id); 781 if (!msdu_ext_desc) { 782 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 783 "%s Tx Extension Descriptor Alloc Fail", 784 __func__); 785 goto failure; 786 } 787 788 #if TQM_BYPASS_WAR 789 /* Temporary WAR due to TQM VP issues */ 790 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 791 qdf_atomic_inc(&pdev->num_tx_exception); 792 #endif 793 if (qdf_unlikely(msdu_info->exception_fw)) 794 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 795 796 tx_desc->msdu_ext_desc = msdu_ext_desc; 797 tx_desc->flags |= DP_TX_DESC_FLAG_FRAG; 798 799 return tx_desc; 800 failure: 801 dp_tx_desc_release(tx_desc, desc_pool_id); 802 return NULL; 803 } 804 805 /** 806 * dp_tx_prepare_raw() - Prepare RAW packet TX 807 * @vdev: DP vdev handle 808 * @nbuf: buffer pointer 809 * @seg_info: Pointer to Segment info Descriptor to be prepared 810 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension 811 * descriptor 812 * 813 * Return: 814 */ 815 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 816 struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info) 817 { 818 qdf_nbuf_t curr_nbuf = NULL; 819 uint16_t total_len = 0; 820 qdf_dma_addr_t paddr; 821 int32_t i; 822 int32_t mapped_buf_num = 0; 823 824 struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info; 825 qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data; 826 827 DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf)); 828 829 /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */ 830 if (vdev->raw_mode_war && 831 (qos_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)) 832 qos_wh->i_fc[1] |= IEEE80211_FC1_WEP; 833 834 for (curr_nbuf = nbuf, i = 0; curr_nbuf; 835 curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) { 836 837 if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf, 838 QDF_DMA_TO_DEVICE)) { 839 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 840 "%s dma map error ", __func__); 841 DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1); 842 mapped_buf_num = i; 843 goto error; 844 } 845 846 paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0); 847 seg_info->frags[i].paddr_lo = paddr; 848 seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32); 849 seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf); 850 seg_info->frags[i].vaddr = (void *) curr_nbuf; 851 total_len += qdf_nbuf_len(curr_nbuf); 852 } 853 854 seg_info->frag_cnt = i; 855 seg_info->total_len = total_len; 856 seg_info->next = NULL; 857 858 sg_info->curr_seg = seg_info; 859 860 msdu_info->frm_type = dp_tx_frm_raw; 861 msdu_info->num_seg = 1; 862 863 return nbuf; 864 865 error: 866 i = 0; 867 while (nbuf) { 868 curr_nbuf = nbuf; 869 if (i < mapped_buf_num) { 870 qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE); 871 i++; 872 } 873 nbuf = qdf_nbuf_next(nbuf); 874 qdf_nbuf_free(curr_nbuf); 875 } 876 return NULL; 877 878 } 879 880 /** 881 * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit 882 * @soc: DP Soc Handle 883 * @vdev: DP vdev handle 884 * @tx_desc: Tx Descriptor Handle 885 * @tid: TID from HLOS for overriding default DSCP-TID mapping 886 * @fw_metadata: Metadata to send to Target Firmware along with frame 887 * @ring_id: Ring ID of H/W ring to which we enqueue the packet 888 * @tx_exc_metadata: Handle that holds exception path meta data 889 * 890 * Gets the next free TCL HW DMA descriptor and sets up required parameters 891 * from software Tx descriptor 892 * 893 * Return: 894 */ 895 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev, 896 struct dp_tx_desc_s *tx_desc, uint8_t tid, 897 uint16_t fw_metadata, uint8_t ring_id, 898 struct cdp_tx_exception_metadata 899 *tx_exc_metadata) 900 { 901 uint8_t type; 902 uint16_t length; 903 void *hal_tx_desc, *hal_tx_desc_cached; 904 qdf_dma_addr_t dma_addr; 905 uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES]; 906 907 enum cdp_sec_type sec_type = (tx_exc_metadata ? 908 tx_exc_metadata->sec_type : vdev->sec_type); 909 910 /* Return Buffer Manager ID */ 911 uint8_t bm_id = ring_id; 912 void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng; 913 914 hal_tx_desc_cached = (void *) cached_desc; 915 qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES); 916 917 if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) { 918 length = HAL_TX_EXT_DESC_WITH_META_DATA; 919 type = HAL_TX_BUF_TYPE_EXT_DESC; 920 dma_addr = tx_desc->msdu_ext_desc->paddr; 921 } else { 922 length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset; 923 type = HAL_TX_BUF_TYPE_BUFFER; 924 dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf); 925 } 926 927 hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata); 928 hal_tx_desc_set_buf_addr(hal_tx_desc_cached, 929 dma_addr, bm_id, tx_desc->id, 930 type, soc->hal_soc); 931 932 if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) 933 return QDF_STATUS_E_RESOURCES; 934 935 hal_tx_desc_set_buf_length(hal_tx_desc_cached, length); 936 hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset); 937 hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type); 938 hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached, 939 vdev->pdev->lmac_id); 940 hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached, 941 vdev->search_type); 942 hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached, 943 vdev->bss_ast_hash); 944 hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached, 945 vdev->dscp_tid_map_id); 946 hal_tx_desc_set_encrypt_type(hal_tx_desc_cached, 947 sec_type_map[sec_type]); 948 949 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 950 "%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u", 951 __func__, length, type, (uint64_t)dma_addr, 952 tx_desc->pkt_offset, tx_desc->id); 953 954 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) 955 hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1); 956 957 hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached, 958 vdev->hal_desc_addr_search_flags); 959 960 /* verify checksum offload configuration*/ 961 if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) && 962 ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP) 963 || qdf_nbuf_is_tso(tx_desc->nbuf))) { 964 hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1); 965 hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1); 966 } 967 968 if (tid != HTT_TX_EXT_TID_INVALID) 969 hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid); 970 971 if (tx_desc->flags & DP_TX_DESC_FLAG_MESH) 972 hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1); 973 974 975 /* Sync cached descriptor with HW */ 976 hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng); 977 978 if (!hal_tx_desc) { 979 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 980 "%s TCL ring full ring_id:%d", __func__, ring_id); 981 DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1); 982 DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1); 983 return QDF_STATUS_E_RESOURCES; 984 } 985 986 tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX; 987 988 hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc); 989 DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length); 990 991 return QDF_STATUS_SUCCESS; 992 } 993 994 995 /** 996 * dp_cce_classify() - Classify the frame based on CCE rules 997 * @vdev: DP vdev handle 998 * @nbuf: skb 999 * 1000 * Classify frames based on CCE rules 1001 * Return: bool( true if classified, 1002 * else false) 1003 */ 1004 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 1005 { 1006 struct ether_header *eh = NULL; 1007 uint16_t ether_type; 1008 qdf_llc_t *llcHdr; 1009 qdf_nbuf_t nbuf_clone = NULL; 1010 qdf_dot3_qosframe_t *qos_wh = NULL; 1011 1012 /* for mesh packets don't do any classification */ 1013 if (qdf_unlikely(vdev->mesh_vdev)) 1014 return false; 1015 1016 if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 1017 eh = (struct ether_header *) qdf_nbuf_data(nbuf); 1018 ether_type = eh->ether_type; 1019 llcHdr = (qdf_llc_t *)(nbuf->data + 1020 sizeof(struct ether_header)); 1021 } else { 1022 qos_wh = (qdf_dot3_qosframe_t *) nbuf->data; 1023 /* For encrypted packets don't do any classification */ 1024 if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP)) 1025 return false; 1026 1027 if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) { 1028 if (qdf_unlikely( 1029 qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS && 1030 qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) { 1031 1032 ether_type = *(uint16_t *)(nbuf->data 1033 + QDF_IEEE80211_4ADDR_HDR_LEN 1034 + sizeof(qdf_llc_t) 1035 - sizeof(ether_type)); 1036 llcHdr = (qdf_llc_t *)(nbuf->data + 1037 QDF_IEEE80211_4ADDR_HDR_LEN); 1038 } else { 1039 ether_type = *(uint16_t *)(nbuf->data 1040 + QDF_IEEE80211_3ADDR_HDR_LEN 1041 + sizeof(qdf_llc_t) 1042 - sizeof(ether_type)); 1043 llcHdr = (qdf_llc_t *)(nbuf->data + 1044 QDF_IEEE80211_3ADDR_HDR_LEN); 1045 } 1046 1047 if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr) 1048 && (ether_type == 1049 qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) { 1050 1051 DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1); 1052 return true; 1053 } 1054 } 1055 1056 return false; 1057 } 1058 1059 if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) { 1060 ether_type = *(uint16_t *)(nbuf->data + 2*ETHER_ADDR_LEN + 1061 sizeof(*llcHdr)); 1062 nbuf_clone = qdf_nbuf_clone(nbuf); 1063 if (qdf_unlikely(nbuf_clone)) { 1064 qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr)); 1065 1066 if (ether_type == htons(ETHERTYPE_8021Q)) { 1067 qdf_nbuf_pull_head(nbuf_clone, 1068 sizeof(qdf_net_vlanhdr_t)); 1069 } 1070 } 1071 } else { 1072 if (ether_type == htons(ETHERTYPE_8021Q)) { 1073 nbuf_clone = qdf_nbuf_clone(nbuf); 1074 if (qdf_unlikely(nbuf_clone)) { 1075 qdf_nbuf_pull_head(nbuf_clone, 1076 sizeof(qdf_net_vlanhdr_t)); 1077 } 1078 } 1079 } 1080 1081 if (qdf_unlikely(nbuf_clone)) 1082 nbuf = nbuf_clone; 1083 1084 1085 if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) 1086 || qdf_nbuf_is_ipv4_arp_pkt(nbuf) 1087 || qdf_nbuf_is_ipv4_wapi_pkt(nbuf) 1088 || qdf_nbuf_is_ipv4_tdls_pkt(nbuf) 1089 || (qdf_nbuf_is_ipv4_pkt(nbuf) 1090 && qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) 1091 || (qdf_nbuf_is_ipv6_pkt(nbuf) && 1092 qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) { 1093 if (qdf_unlikely(nbuf_clone != NULL)) 1094 qdf_nbuf_free(nbuf_clone); 1095 return true; 1096 } 1097 1098 if (qdf_unlikely(nbuf_clone != NULL)) 1099 qdf_nbuf_free(nbuf_clone); 1100 1101 return false; 1102 } 1103 1104 /** 1105 * dp_tx_classify_tid() - Obtain TID to be used for this frame 1106 * @vdev: DP vdev handle 1107 * @nbuf: skb 1108 * 1109 * Extract the DSCP or PCP information from frame and map into TID value. 1110 * Software based TID classification is required when more than 2 DSCP-TID 1111 * mapping tables are needed. 1112 * Hardware supports 2 DSCP-TID mapping tables 1113 * 1114 * Return: void 1115 */ 1116 static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1117 struct dp_tx_msdu_info_s *msdu_info) 1118 { 1119 uint8_t tos = 0, dscp_tid_override = 0; 1120 uint8_t *hdr_ptr, *L3datap; 1121 uint8_t is_mcast = 0; 1122 struct ether_header *eh = NULL; 1123 qdf_ethervlan_header_t *evh = NULL; 1124 uint16_t ether_type; 1125 qdf_llc_t *llcHdr; 1126 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; 1127 1128 DP_TX_TID_OVERRIDE(msdu_info, nbuf); 1129 1130 if (pdev->soc && vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map) 1131 return; 1132 1133 /* for mesh packets don't do any classification */ 1134 if (qdf_unlikely(vdev->mesh_vdev)) 1135 return; 1136 1137 if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 1138 eh = (struct ether_header *) nbuf->data; 1139 hdr_ptr = eh->ether_dhost; 1140 L3datap = hdr_ptr + sizeof(struct ether_header); 1141 } else { 1142 qdf_dot3_qosframe_t *qos_wh = 1143 (qdf_dot3_qosframe_t *) nbuf->data; 1144 msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ? 1145 qos_wh->i_qos[0] & DP_QOS_TID : 0; 1146 return; 1147 } 1148 1149 is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr); 1150 ether_type = eh->ether_type; 1151 1152 llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(struct ether_header)); 1153 /* 1154 * Check if packet is dot3 or eth2 type. 1155 */ 1156 if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) { 1157 ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN + 1158 sizeof(*llcHdr)); 1159 1160 if (ether_type == htons(ETHERTYPE_8021Q)) { 1161 L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) + 1162 sizeof(*llcHdr); 1163 ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN 1164 + sizeof(*llcHdr) + 1165 sizeof(qdf_net_vlanhdr_t)); 1166 } else { 1167 L3datap = hdr_ptr + sizeof(struct ether_header) + 1168 sizeof(*llcHdr); 1169 } 1170 } else { 1171 if (ether_type == htons(ETHERTYPE_8021Q)) { 1172 evh = (qdf_ethervlan_header_t *) eh; 1173 ether_type = evh->ether_type; 1174 L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t); 1175 } 1176 } 1177 1178 /* 1179 * Find priority from IP TOS DSCP field 1180 */ 1181 if (qdf_nbuf_is_ipv4_pkt(nbuf)) { 1182 qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap; 1183 if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) { 1184 /* Only for unicast frames */ 1185 if (!is_mcast) { 1186 /* send it on VO queue */ 1187 msdu_info->tid = DP_VO_TID; 1188 } 1189 } else { 1190 /* 1191 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7 1192 * from TOS byte. 1193 */ 1194 tos = ip->ip_tos; 1195 dscp_tid_override = 1; 1196 1197 } 1198 } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) { 1199 /* TODO 1200 * use flowlabel 1201 *igmpmld cases to be handled in phase 2 1202 */ 1203 unsigned long ver_pri_flowlabel; 1204 unsigned long pri; 1205 ver_pri_flowlabel = *(unsigned long *) L3datap; 1206 pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >> 1207 DP_IPV6_PRIORITY_SHIFT; 1208 tos = pri; 1209 dscp_tid_override = 1; 1210 } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) 1211 msdu_info->tid = DP_VO_TID; 1212 else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) { 1213 /* Only for unicast frames */ 1214 if (!is_mcast) { 1215 /* send ucast arp on VO queue */ 1216 msdu_info->tid = DP_VO_TID; 1217 } 1218 } 1219 1220 /* 1221 * Assign all MCAST packets to BE 1222 */ 1223 if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 1224 if (is_mcast) { 1225 tos = 0; 1226 dscp_tid_override = 1; 1227 } 1228 } 1229 1230 if (dscp_tid_override == 1) { 1231 tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK; 1232 msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos]; 1233 } 1234 return; 1235 } 1236 1237 #ifdef CONVERGED_TDLS_ENABLE 1238 /** 1239 * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame 1240 * @tx_desc: TX descriptor 1241 * 1242 * Return: None 1243 */ 1244 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc) 1245 { 1246 if (tx_desc->vdev) { 1247 if (tx_desc->vdev->is_tdls_frame) 1248 tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME; 1249 tx_desc->vdev->is_tdls_frame = false; 1250 } 1251 } 1252 1253 /** 1254 * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer 1255 * @tx_desc: TX descriptor 1256 * @vdev: datapath vdev handle 1257 * 1258 * Return: None 1259 */ 1260 static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc, 1261 struct dp_vdev *vdev) 1262 { 1263 struct hal_tx_completion_status ts = {0}; 1264 qdf_nbuf_t nbuf = tx_desc->nbuf; 1265 1266 hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc); 1267 if (vdev->tx_non_std_data_callback.func) { 1268 qdf_nbuf_set_next(tx_desc->nbuf, NULL); 1269 vdev->tx_non_std_data_callback.func( 1270 vdev->tx_non_std_data_callback.ctxt, 1271 nbuf, ts.status); 1272 return; 1273 } 1274 } 1275 #endif 1276 1277 /** 1278 * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL 1279 * @vdev: DP vdev handle 1280 * @nbuf: skb 1281 * @tid: TID from HLOS for overriding default DSCP-TID mapping 1282 * @meta_data: Metadata to the fw 1283 * @tx_q: Tx queue to be used for this Tx frame 1284 * @peer_id: peer_id of the peer in case of NAWDS frames 1285 * @tx_exc_metadata: Handle that holds exception path metadata 1286 * 1287 * Return: NULL on success, 1288 * nbuf when it fails to send 1289 */ 1290 static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1291 struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id, 1292 struct cdp_tx_exception_metadata *tx_exc_metadata) 1293 { 1294 struct dp_pdev *pdev = vdev->pdev; 1295 struct dp_soc *soc = pdev->soc; 1296 struct dp_tx_desc_s *tx_desc; 1297 QDF_STATUS status; 1298 struct dp_tx_queue *tx_q = &(msdu_info->tx_queue); 1299 void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng; 1300 uint16_t htt_tcl_metadata = 0; 1301 uint8_t tid = msdu_info->tid; 1302 1303 /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */ 1304 tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id, 1305 msdu_info, tx_exc_metadata); 1306 if (!tx_desc) { 1307 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1308 "%s Tx_desc prepare Fail vdev %pK queue %d", 1309 __func__, vdev, tx_q->desc_pool_id); 1310 return nbuf; 1311 } 1312 1313 if (qdf_unlikely(soc->cce_disable)) { 1314 if (dp_cce_classify(vdev, nbuf) == true) { 1315 DP_STATS_INC(vdev, tx_i.cce_classified, 1); 1316 tid = DP_VO_TID; 1317 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 1318 } 1319 } 1320 1321 dp_tx_update_tdls_flags(tx_desc); 1322 1323 if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { 1324 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1325 "%s %d : HAL RING Access Failed -- %pK", 1326 __func__, __LINE__, hal_srng); 1327 DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1); 1328 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 1329 goto fail_return; 1330 } 1331 1332 if (qdf_unlikely(peer_id == DP_INVALID_PEER)) { 1333 htt_tcl_metadata = vdev->htt_tcl_metadata; 1334 HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1); 1335 } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) { 1336 HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata, 1337 HTT_TCL_METADATA_TYPE_PEER_BASED); 1338 HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata, 1339 peer_id); 1340 } else 1341 htt_tcl_metadata = vdev->htt_tcl_metadata; 1342 1343 1344 if (msdu_info->exception_fw) { 1345 HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1); 1346 } 1347 1348 /* Enqueue the Tx MSDU descriptor to HW for transmit */ 1349 status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid, 1350 htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata); 1351 1352 if (status != QDF_STATUS_SUCCESS) { 1353 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1354 "%s Tx_hw_enqueue Fail tx_desc %pK queue %d", 1355 __func__, tx_desc, tx_q->ring_id); 1356 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 1357 goto fail_return; 1358 } 1359 1360 nbuf = NULL; 1361 1362 fail_return: 1363 if (hif_pm_runtime_get(soc->hif_handle) == 0) { 1364 hal_srng_access_end(soc->hal_soc, hal_srng); 1365 hif_pm_runtime_put(soc->hif_handle); 1366 } else { 1367 hal_srng_access_end_reap(soc->hal_soc, hal_srng); 1368 } 1369 1370 return nbuf; 1371 } 1372 1373 /** 1374 * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs 1375 * @vdev: DP vdev handle 1376 * @nbuf: skb 1377 * @msdu_info: MSDU info to be setup in MSDU extension descriptor 1378 * 1379 * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL 1380 * 1381 * Return: NULL on success, 1382 * nbuf when it fails to send 1383 */ 1384 #if QDF_LOCK_STATS 1385 static noinline 1386 #else 1387 static 1388 #endif 1389 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1390 struct dp_tx_msdu_info_s *msdu_info) 1391 { 1392 uint8_t i; 1393 struct dp_pdev *pdev = vdev->pdev; 1394 struct dp_soc *soc = pdev->soc; 1395 struct dp_tx_desc_s *tx_desc; 1396 bool is_cce_classified = false; 1397 QDF_STATUS status; 1398 uint16_t htt_tcl_metadata = 0; 1399 1400 struct dp_tx_queue *tx_q = &msdu_info->tx_queue; 1401 void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng; 1402 1403 if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { 1404 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1405 "%s %d : HAL RING Access Failed -- %pK", 1406 __func__, __LINE__, hal_srng); 1407 DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1); 1408 return nbuf; 1409 } 1410 1411 if (qdf_unlikely(soc->cce_disable)) { 1412 is_cce_classified = dp_cce_classify(vdev, nbuf); 1413 if (is_cce_classified) { 1414 DP_STATS_INC(vdev, tx_i.cce_classified, 1); 1415 msdu_info->tid = DP_VO_TID; 1416 } 1417 } 1418 1419 if (msdu_info->frm_type == dp_tx_frm_me) 1420 nbuf = msdu_info->u.sg_info.curr_seg->nbuf; 1421 1422 i = 0; 1423 /* Print statement to track i and num_seg */ 1424 /* 1425 * For each segment (maps to 1 MSDU) , prepare software and hardware 1426 * descriptors using information in msdu_info 1427 */ 1428 while (i < msdu_info->num_seg) { 1429 /* 1430 * Setup Tx descriptor for an MSDU, and MSDU extension 1431 * descriptor 1432 */ 1433 tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info, 1434 tx_q->desc_pool_id); 1435 1436 if (!tx_desc) { 1437 if (msdu_info->frm_type == dp_tx_frm_me) { 1438 dp_tx_me_free_buf(pdev, 1439 (void *)(msdu_info->u.sg_info 1440 .curr_seg->frags[0].vaddr)); 1441 } 1442 goto done; 1443 } 1444 1445 if (msdu_info->frm_type == dp_tx_frm_me) { 1446 tx_desc->me_buffer = 1447 msdu_info->u.sg_info.curr_seg->frags[0].vaddr; 1448 tx_desc->flags |= DP_TX_DESC_FLAG_ME; 1449 } 1450 1451 if (is_cce_classified) 1452 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 1453 1454 htt_tcl_metadata = vdev->htt_tcl_metadata; 1455 if (msdu_info->exception_fw) { 1456 HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1); 1457 } 1458 1459 /* 1460 * Enqueue the Tx MSDU descriptor to HW for transmit 1461 */ 1462 status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid, 1463 htt_tcl_metadata, tx_q->ring_id, NULL); 1464 1465 if (status != QDF_STATUS_SUCCESS) { 1466 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1467 "%s Tx_hw_enqueue Fail tx_desc %pK queue %d", 1468 __func__, tx_desc, tx_q->ring_id); 1469 1470 if (tx_desc->flags & DP_TX_DESC_FLAG_ME) 1471 dp_tx_me_free_buf(pdev, tx_desc->me_buffer); 1472 1473 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 1474 goto done; 1475 } 1476 1477 /* 1478 * TODO 1479 * if tso_info structure can be modified to have curr_seg 1480 * as first element, following 2 blocks of code (for TSO and SG) 1481 * can be combined into 1 1482 */ 1483 1484 /* 1485 * For frames with multiple segments (TSO, ME), jump to next 1486 * segment. 1487 */ 1488 if (msdu_info->frm_type == dp_tx_frm_tso) { 1489 if (msdu_info->u.tso_info.curr_seg->next) { 1490 msdu_info->u.tso_info.curr_seg = 1491 msdu_info->u.tso_info.curr_seg->next; 1492 1493 /* 1494 * If this is a jumbo nbuf, then increment the number of 1495 * nbuf users for each additional segment of the msdu. 1496 * This will ensure that the skb is freed only after 1497 * receiving tx completion for all segments of an nbuf 1498 */ 1499 qdf_nbuf_inc_users(nbuf); 1500 1501 /* Check with MCL if this is needed */ 1502 /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */ 1503 } 1504 } 1505 1506 /* 1507 * For Multicast-Unicast converted packets, 1508 * each converted frame (for a client) is represented as 1509 * 1 segment 1510 */ 1511 if ((msdu_info->frm_type == dp_tx_frm_sg) || 1512 (msdu_info->frm_type == dp_tx_frm_me)) { 1513 if (msdu_info->u.sg_info.curr_seg->next) { 1514 msdu_info->u.sg_info.curr_seg = 1515 msdu_info->u.sg_info.curr_seg->next; 1516 nbuf = msdu_info->u.sg_info.curr_seg->nbuf; 1517 } 1518 } 1519 i++; 1520 } 1521 1522 nbuf = NULL; 1523 1524 done: 1525 if (hif_pm_runtime_get(soc->hif_handle) == 0) { 1526 hal_srng_access_end(soc->hal_soc, hal_srng); 1527 hif_pm_runtime_put(soc->hif_handle); 1528 } else { 1529 hal_srng_access_end_reap(soc->hal_soc, hal_srng); 1530 } 1531 1532 return nbuf; 1533 } 1534 1535 /** 1536 * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info 1537 * for SG frames 1538 * @vdev: DP vdev handle 1539 * @nbuf: skb 1540 * @seg_info: Pointer to Segment info Descriptor to be prepared 1541 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. 1542 * 1543 * Return: NULL on success, 1544 * nbuf when it fails to send 1545 */ 1546 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1547 struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info) 1548 { 1549 uint32_t cur_frag, nr_frags; 1550 qdf_dma_addr_t paddr; 1551 struct dp_tx_sg_info_s *sg_info; 1552 1553 sg_info = &msdu_info->u.sg_info; 1554 nr_frags = qdf_nbuf_get_nr_frags(nbuf); 1555 1556 if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf, 1557 QDF_DMA_TO_DEVICE)) { 1558 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1559 "dma map error"); 1560 DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1); 1561 1562 qdf_nbuf_free(nbuf); 1563 return NULL; 1564 } 1565 1566 paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); 1567 seg_info->frags[0].paddr_lo = paddr; 1568 seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32; 1569 seg_info->frags[0].len = qdf_nbuf_headlen(nbuf); 1570 seg_info->frags[0].vaddr = (void *) nbuf; 1571 1572 for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) { 1573 if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev, 1574 nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) { 1575 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1576 "frag dma map error"); 1577 DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1); 1578 qdf_nbuf_free(nbuf); 1579 return NULL; 1580 } 1581 1582 paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); 1583 seg_info->frags[cur_frag + 1].paddr_lo = paddr; 1584 seg_info->frags[cur_frag + 1].paddr_hi = 1585 ((uint64_t) paddr) >> 32; 1586 seg_info->frags[cur_frag + 1].len = 1587 qdf_nbuf_get_frag_size(nbuf, cur_frag); 1588 } 1589 1590 seg_info->frag_cnt = (cur_frag + 1); 1591 seg_info->total_len = qdf_nbuf_len(nbuf); 1592 seg_info->next = NULL; 1593 1594 sg_info->curr_seg = seg_info; 1595 1596 msdu_info->frm_type = dp_tx_frm_sg; 1597 msdu_info->num_seg = 1; 1598 1599 return nbuf; 1600 } 1601 1602 #ifdef MESH_MODE_SUPPORT 1603 1604 /** 1605 * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf 1606 and prepare msdu_info for mesh frames. 1607 * @vdev: DP vdev handle 1608 * @nbuf: skb 1609 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. 1610 * 1611 * Return: NULL on failure, 1612 * nbuf when extracted successfully 1613 */ 1614 static 1615 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1616 struct dp_tx_msdu_info_s *msdu_info) 1617 { 1618 struct meta_hdr_s *mhdr; 1619 struct htt_tx_msdu_desc_ext2_t *meta_data = 1620 (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0]; 1621 1622 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf); 1623 1624 if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) { 1625 msdu_info->exception_fw = 0; 1626 goto remove_meta_hdr; 1627 } 1628 1629 msdu_info->exception_fw = 1; 1630 1631 qdf_mem_set(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t), 0); 1632 1633 meta_data->host_tx_desc_pool = 1; 1634 meta_data->update_peer_cache = 1; 1635 meta_data->learning_frame = 1; 1636 1637 if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) { 1638 meta_data->power = mhdr->power; 1639 1640 meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs; 1641 meta_data->nss_mask = 1 << mhdr->rate_info[0].nss; 1642 meta_data->pream_type = mhdr->rate_info[0].preamble_type; 1643 meta_data->retry_limit = mhdr->rate_info[0].max_tries; 1644 1645 meta_data->dyn_bw = 1; 1646 1647 meta_data->valid_pwr = 1; 1648 meta_data->valid_mcs_mask = 1; 1649 meta_data->valid_nss_mask = 1; 1650 meta_data->valid_preamble_type = 1; 1651 meta_data->valid_retries = 1; 1652 meta_data->valid_bw_info = 1; 1653 } 1654 1655 if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) { 1656 meta_data->encrypt_type = 0; 1657 meta_data->valid_encrypt_type = 1; 1658 meta_data->learning_frame = 0; 1659 } 1660 1661 meta_data->valid_key_flags = 1; 1662 meta_data->key_flags = (mhdr->keyix & 0x3); 1663 1664 remove_meta_hdr: 1665 if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) { 1666 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1667 "qdf_nbuf_pull_head failed"); 1668 qdf_nbuf_free(nbuf); 1669 return NULL; 1670 } 1671 1672 if (mhdr->flags & METAHDR_FLAG_NOQOS) 1673 msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST; 1674 else 1675 msdu_info->tid = qdf_nbuf_get_priority(nbuf); 1676 1677 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, 1678 "%s , Meta hdr %0x %0x %0x %0x %0x %0x" 1679 " tid %d to_fw %d", 1680 __func__, msdu_info->meta_data[0], 1681 msdu_info->meta_data[1], 1682 msdu_info->meta_data[2], 1683 msdu_info->meta_data[3], 1684 msdu_info->meta_data[4], 1685 msdu_info->meta_data[5], 1686 msdu_info->tid, msdu_info->exception_fw); 1687 1688 return nbuf; 1689 } 1690 #else 1691 static 1692 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1693 struct dp_tx_msdu_info_s *msdu_info) 1694 { 1695 return nbuf; 1696 } 1697 1698 #endif 1699 1700 #ifdef DP_FEATURE_NAWDS_TX 1701 /** 1702 * dp_tx_prepare_nawds(): Tramit NAWDS frames 1703 * @vdev: dp_vdev handle 1704 * @nbuf: skb 1705 * @tid: TID from HLOS for overriding default DSCP-TID mapping 1706 * @tx_q: Tx queue to be used for this Tx frame 1707 * @meta_data: Meta date for mesh 1708 * @peer_id: peer_id of the peer in case of NAWDS frames 1709 * 1710 * return: NULL on success nbuf on failure 1711 */ 1712 static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1713 struct dp_tx_msdu_info_s *msdu_info) 1714 { 1715 struct dp_peer *peer = NULL; 1716 struct dp_soc *soc = vdev->pdev->soc; 1717 struct dp_ast_entry *ast_entry = NULL; 1718 struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf); 1719 uint16_t peer_id = HTT_INVALID_PEER; 1720 1721 struct dp_peer *sa_peer = NULL; 1722 qdf_nbuf_t nbuf_copy; 1723 1724 qdf_spin_lock_bh(&(soc->ast_lock)); 1725 ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost)); 1726 1727 if (ast_entry) 1728 sa_peer = ast_entry->peer; 1729 1730 qdf_spin_unlock_bh(&(soc->ast_lock)); 1731 1732 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 1733 if ((peer->peer_ids[0] != HTT_INVALID_PEER) && 1734 (peer->nawds_enabled)) { 1735 if (sa_peer == peer) { 1736 QDF_TRACE(QDF_MODULE_ID_DP, 1737 QDF_TRACE_LEVEL_DEBUG, 1738 " %s: broadcast multicast packet", 1739 __func__); 1740 DP_STATS_INC(peer, tx.nawds_mcast_drop, 1); 1741 continue; 1742 } 1743 1744 nbuf_copy = qdf_nbuf_copy(nbuf); 1745 if (!nbuf_copy) { 1746 QDF_TRACE(QDF_MODULE_ID_DP, 1747 QDF_TRACE_LEVEL_ERROR, 1748 "nbuf copy failed"); 1749 } 1750 1751 peer_id = peer->peer_ids[0]; 1752 nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy, 1753 msdu_info, peer_id, NULL); 1754 if (nbuf_copy != NULL) { 1755 qdf_nbuf_free(nbuf_copy); 1756 continue; 1757 } 1758 DP_STATS_INC_PKT(peer, tx.nawds_mcast, 1759 1, qdf_nbuf_len(nbuf)); 1760 } 1761 } 1762 if (peer_id == HTT_INVALID_PEER) 1763 return nbuf; 1764 1765 return NULL; 1766 } 1767 #endif 1768 1769 /** 1770 * dp_check_exc_metadata() - Checks if parameters are valid 1771 * @tx_exc - holds all exception path parameters 1772 * 1773 * Returns true when all the parameters are valid else false 1774 * 1775 */ 1776 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc) 1777 { 1778 if ((tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != HTT_INVALID_TID) || 1779 tx_exc->tx_encap_type > htt_cmn_pkt_num_types || 1780 tx_exc->sec_type > cdp_num_sec_types) { 1781 return false; 1782 } 1783 1784 return true; 1785 } 1786 1787 /** 1788 * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path 1789 * @vap_dev: DP vdev handle 1790 * @nbuf: skb 1791 * @tx_exc_metadata: Handle that holds exception path meta data 1792 * 1793 * Entry point for Core Tx layer (DP_TX) invoked from 1794 * hard_start_xmit in OSIF/HDD to transmit frames through fw 1795 * 1796 * Return: NULL on success, 1797 * nbuf when it fails to send 1798 */ 1799 qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf, 1800 struct cdp_tx_exception_metadata *tx_exc_metadata) 1801 { 1802 struct ether_header *eh = NULL; 1803 struct dp_vdev *vdev = (struct dp_vdev *) vap_dev; 1804 struct dp_tx_msdu_info_s msdu_info; 1805 1806 qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0); 1807 1808 msdu_info.tid = tx_exc_metadata->tid; 1809 1810 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 1811 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1812 "%s , skb %pM", 1813 __func__, nbuf->data); 1814 1815 DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf)); 1816 1817 if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) { 1818 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1819 "Invalid parameters in exception path"); 1820 goto fail; 1821 } 1822 1823 /* Basic sanity checks for unsupported packets */ 1824 1825 /* MESH mode */ 1826 if (qdf_unlikely(vdev->mesh_vdev)) { 1827 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1828 "Mesh mode is not supported in exception path"); 1829 goto fail; 1830 } 1831 1832 /* TSO or SG */ 1833 if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) || 1834 qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) { 1835 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1836 "TSO and SG are not supported in exception path"); 1837 1838 goto fail; 1839 } 1840 1841 /* RAW */ 1842 if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) { 1843 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1844 "Raw frame is not supported in exception path"); 1845 goto fail; 1846 } 1847 1848 1849 /* Mcast enhancement*/ 1850 if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) { 1851 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) { 1852 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1853 "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW"); 1854 } 1855 } 1856 1857 /* 1858 * Get HW Queue to use for this frame. 1859 * TCL supports upto 4 DMA rings, out of which 3 rings are 1860 * dedicated for data and 1 for command. 1861 * "queue_id" maps to one hardware ring. 1862 * With each ring, we also associate a unique Tx descriptor pool 1863 * to minimize lock contention for these resources. 1864 */ 1865 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 1866 1867 /* Single linear frame */ 1868 /* 1869 * If nbuf is a simple linear frame, use send_single function to 1870 * prepare direct-buffer type TCL descriptor and enqueue to TCL 1871 * SRNG. There is no need to setup a MSDU extension descriptor. 1872 */ 1873 nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, 1874 tx_exc_metadata->peer_id, tx_exc_metadata); 1875 1876 return nbuf; 1877 1878 fail: 1879 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1880 "pkt send failed"); 1881 return nbuf; 1882 } 1883 1884 /** 1885 * dp_tx_send_mesh() - Transmit mesh frame on a given VAP 1886 * @vap_dev: DP vdev handle 1887 * @nbuf: skb 1888 * 1889 * Entry point for Core Tx layer (DP_TX) invoked from 1890 * hard_start_xmit in OSIF/HDD 1891 * 1892 * Return: NULL on success, 1893 * nbuf when it fails to send 1894 */ 1895 #ifdef MESH_MODE_SUPPORT 1896 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf) 1897 { 1898 struct meta_hdr_s *mhdr; 1899 qdf_nbuf_t nbuf_mesh = NULL; 1900 qdf_nbuf_t nbuf_clone = NULL; 1901 struct dp_vdev *vdev = (struct dp_vdev *) vap_dev; 1902 uint8_t no_enc_frame = 0; 1903 1904 nbuf_mesh = qdf_nbuf_unshare(nbuf); 1905 if (nbuf_mesh == NULL) { 1906 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1907 "qdf_nbuf_unshare failed"); 1908 return nbuf; 1909 } 1910 nbuf = nbuf_mesh; 1911 1912 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf); 1913 1914 if ((vdev->sec_type != cdp_sec_type_none) && 1915 (mhdr->flags & METAHDR_FLAG_NOENCRYPT)) 1916 no_enc_frame = 1; 1917 1918 if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) && 1919 !no_enc_frame) { 1920 nbuf_clone = qdf_nbuf_clone(nbuf); 1921 if (nbuf_clone == NULL) { 1922 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1923 "qdf_nbuf_clone failed"); 1924 return nbuf; 1925 } 1926 qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO); 1927 } 1928 1929 if (nbuf_clone) { 1930 if (!dp_tx_send(vap_dev, nbuf_clone)) { 1931 DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1); 1932 } else { 1933 qdf_nbuf_free(nbuf_clone); 1934 } 1935 } 1936 1937 if (no_enc_frame) 1938 qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO); 1939 else 1940 qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID); 1941 1942 nbuf = dp_tx_send(vap_dev, nbuf); 1943 if ((nbuf == NULL) && no_enc_frame) { 1944 DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1); 1945 } 1946 1947 return nbuf; 1948 } 1949 1950 #else 1951 1952 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf) 1953 { 1954 return dp_tx_send(vap_dev, nbuf); 1955 } 1956 1957 #endif 1958 1959 /** 1960 * dp_tx_send() - Transmit a frame on a given VAP 1961 * @vap_dev: DP vdev handle 1962 * @nbuf: skb 1963 * 1964 * Entry point for Core Tx layer (DP_TX) invoked from 1965 * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding 1966 * cases 1967 * 1968 * Return: NULL on success, 1969 * nbuf when it fails to send 1970 */ 1971 qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf) 1972 { 1973 struct ether_header *eh = NULL; 1974 struct dp_tx_msdu_info_s msdu_info; 1975 struct dp_tx_seg_info_s seg_info; 1976 struct dp_vdev *vdev = (struct dp_vdev *) vap_dev; 1977 uint16_t peer_id = HTT_INVALID_PEER; 1978 qdf_nbuf_t nbuf_mesh = NULL; 1979 1980 qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0); 1981 qdf_mem_set(&seg_info, sizeof(seg_info), 0x0); 1982 1983 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 1984 1985 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1986 "%s , skb %pM", 1987 __func__, nbuf->data); 1988 1989 /* 1990 * Set Default Host TID value to invalid TID 1991 * (TID override disabled) 1992 */ 1993 msdu_info.tid = HTT_TX_EXT_TID_INVALID; 1994 DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf)); 1995 1996 if (qdf_unlikely(vdev->mesh_vdev)) { 1997 nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf, 1998 &msdu_info); 1999 if (nbuf_mesh == NULL) { 2000 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2001 "Extracting mesh metadata failed"); 2002 return nbuf; 2003 } 2004 nbuf = nbuf_mesh; 2005 } 2006 2007 /* 2008 * Get HW Queue to use for this frame. 2009 * TCL supports upto 4 DMA rings, out of which 3 rings are 2010 * dedicated for data and 1 for command. 2011 * "queue_id" maps to one hardware ring. 2012 * With each ring, we also associate a unique Tx descriptor pool 2013 * to minimize lock contention for these resources. 2014 */ 2015 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 2016 2017 /* 2018 * TCL H/W supports 2 DSCP-TID mapping tables. 2019 * Table 1 - Default DSCP-TID mapping table 2020 * Table 2 - 1 DSCP-TID override table 2021 * 2022 * If we need a different DSCP-TID mapping for this vap, 2023 * call tid_classify to extract DSCP/ToS from frame and 2024 * map to a TID and store in msdu_info. This is later used 2025 * to fill in TCL Input descriptor (per-packet TID override). 2026 */ 2027 dp_tx_classify_tid(vdev, nbuf, &msdu_info); 2028 2029 /* 2030 * Classify the frame and call corresponding 2031 * "prepare" function which extracts the segment (TSO) 2032 * and fragmentation information (for TSO , SG, ME, or Raw) 2033 * into MSDU_INFO structure which is later used to fill 2034 * SW and HW descriptors. 2035 */ 2036 if (qdf_nbuf_is_tso(nbuf)) { 2037 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2038 "%s TSO frame %pK", __func__, vdev); 2039 DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1, 2040 qdf_nbuf_len(nbuf)); 2041 2042 if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) { 2043 DP_STATS_INC_PKT(vdev, tx_i.tso.dropped_host, 1, 2044 qdf_nbuf_len(nbuf)); 2045 return nbuf; 2046 } 2047 2048 goto send_multiple; 2049 } 2050 2051 /* SG */ 2052 if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) { 2053 nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info); 2054 2055 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2056 "%s non-TSO SG frame %pK", __func__, vdev); 2057 2058 DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1, 2059 qdf_nbuf_len(nbuf)); 2060 2061 goto send_multiple; 2062 } 2063 2064 #ifdef ATH_SUPPORT_IQUE 2065 /* Mcast to Ucast Conversion*/ 2066 if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) { 2067 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 2068 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) { 2069 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2070 "%s Mcast frm for ME %pK", __func__, vdev); 2071 2072 DP_STATS_INC_PKT(vdev, 2073 tx_i.mcast_en.mcast_pkt, 1, 2074 qdf_nbuf_len(nbuf)); 2075 if (dp_tx_prepare_send_me(vdev, nbuf) == 2076 QDF_STATUS_SUCCESS) { 2077 return NULL; 2078 } 2079 } 2080 } 2081 #endif 2082 2083 /* RAW */ 2084 if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) { 2085 nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info); 2086 if (nbuf == NULL) 2087 return NULL; 2088 2089 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2090 "%s Raw frame %pK", __func__, vdev); 2091 2092 goto send_multiple; 2093 2094 } 2095 2096 /* Single linear frame */ 2097 /* 2098 * If nbuf is a simple linear frame, use send_single function to 2099 * prepare direct-buffer type TCL descriptor and enqueue to TCL 2100 * SRNG. There is no need to setup a MSDU extension descriptor. 2101 */ 2102 nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL); 2103 2104 return nbuf; 2105 2106 send_multiple: 2107 nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info); 2108 2109 return nbuf; 2110 } 2111 2112 /** 2113 * dp_tx_reinject_handler() - Tx Reinject Handler 2114 * @tx_desc: software descriptor head pointer 2115 * @status : Tx completion status from HTT descriptor 2116 * 2117 * This function reinjects frames back to Target. 2118 * Todo - Host queue needs to be added 2119 * 2120 * Return: none 2121 */ 2122 static 2123 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status) 2124 { 2125 struct dp_vdev *vdev; 2126 struct dp_peer *peer = NULL; 2127 uint32_t peer_id = HTT_INVALID_PEER; 2128 qdf_nbuf_t nbuf = tx_desc->nbuf; 2129 qdf_nbuf_t nbuf_copy = NULL; 2130 struct dp_tx_msdu_info_s msdu_info; 2131 struct dp_peer *sa_peer = NULL; 2132 struct dp_ast_entry *ast_entry = NULL; 2133 struct dp_soc *soc = NULL; 2134 struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf); 2135 #ifdef WDS_VENDOR_EXTENSION 2136 int is_mcast = 0, is_ucast = 0; 2137 int num_peers_3addr = 0; 2138 struct ether_header *eth_hdr = (struct ether_header *)(qdf_nbuf_data(nbuf)); 2139 struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf)); 2140 #endif 2141 2142 vdev = tx_desc->vdev; 2143 soc = vdev->pdev->soc; 2144 2145 qdf_assert(vdev); 2146 2147 qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0); 2148 2149 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 2150 2151 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2152 "%s Tx reinject path", __func__); 2153 2154 DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1, 2155 qdf_nbuf_len(tx_desc->nbuf)); 2156 2157 qdf_spin_lock_bh(&(soc->ast_lock)); 2158 2159 ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost)); 2160 2161 if (ast_entry) 2162 sa_peer = ast_entry->peer; 2163 2164 qdf_spin_unlock_bh(&(soc->ast_lock)); 2165 2166 #ifdef WDS_VENDOR_EXTENSION 2167 if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 2168 is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0; 2169 } else { 2170 is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0; 2171 } 2172 is_ucast = !is_mcast; 2173 2174 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 2175 if (peer->bss_peer) 2176 continue; 2177 2178 /* Detect wds peers that use 3-addr framing for mcast. 2179 * if there are any, the bss_peer is used to send the 2180 * the mcast frame using 3-addr format. all wds enabled 2181 * peers that use 4-addr framing for mcast frames will 2182 * be duplicated and sent as 4-addr frames below. 2183 */ 2184 if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) { 2185 num_peers_3addr = 1; 2186 break; 2187 } 2188 } 2189 #endif 2190 2191 if (qdf_unlikely(vdev->mesh_vdev)) { 2192 DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf); 2193 } else { 2194 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 2195 if ((peer->peer_ids[0] != HTT_INVALID_PEER) && 2196 #ifdef WDS_VENDOR_EXTENSION 2197 /* 2198 * . if 3-addr STA, then send on BSS Peer 2199 * . if Peer WDS enabled and accept 4-addr mcast, 2200 * send mcast on that peer only 2201 * . if Peer WDS enabled and accept 4-addr ucast, 2202 * send ucast on that peer only 2203 */ 2204 ((peer->bss_peer && num_peers_3addr && is_mcast) || 2205 (peer->wds_enabled && 2206 ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) || 2207 (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) { 2208 #else 2209 ((peer->bss_peer && 2210 !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) || 2211 peer->nawds_enabled)) { 2212 #endif 2213 peer_id = DP_INVALID_PEER; 2214 2215 if (peer->nawds_enabled) { 2216 peer_id = peer->peer_ids[0]; 2217 if (sa_peer == peer) { 2218 QDF_TRACE( 2219 QDF_MODULE_ID_DP, 2220 QDF_TRACE_LEVEL_DEBUG, 2221 " %s: multicast packet", 2222 __func__); 2223 DP_STATS_INC(peer, 2224 tx.nawds_mcast_drop, 1); 2225 continue; 2226 } 2227 } 2228 2229 nbuf_copy = qdf_nbuf_copy(nbuf); 2230 2231 if (!nbuf_copy) { 2232 QDF_TRACE(QDF_MODULE_ID_DP, 2233 QDF_TRACE_LEVEL_DEBUG, 2234 FL("nbuf copy failed")); 2235 break; 2236 } 2237 2238 nbuf_copy = dp_tx_send_msdu_single(vdev, 2239 nbuf_copy, 2240 &msdu_info, 2241 peer_id, 2242 NULL); 2243 2244 if (nbuf_copy) { 2245 QDF_TRACE(QDF_MODULE_ID_DP, 2246 QDF_TRACE_LEVEL_DEBUG, 2247 FL("pkt send failed")); 2248 qdf_nbuf_free(nbuf_copy); 2249 } else { 2250 if (peer_id != DP_INVALID_PEER) 2251 DP_STATS_INC_PKT(peer, 2252 tx.nawds_mcast, 2253 1, qdf_nbuf_len(nbuf)); 2254 } 2255 } 2256 } 2257 } 2258 2259 if (vdev->nawds_enabled) { 2260 peer_id = DP_INVALID_PEER; 2261 2262 DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast, 2263 1, qdf_nbuf_len(nbuf)); 2264 2265 nbuf = dp_tx_send_msdu_single(vdev, 2266 nbuf, 2267 &msdu_info, 2268 peer_id, NULL); 2269 2270 if (nbuf) { 2271 QDF_TRACE(QDF_MODULE_ID_DP, 2272 QDF_TRACE_LEVEL_DEBUG, 2273 FL("pkt send failed")); 2274 qdf_nbuf_free(nbuf); 2275 } 2276 } else 2277 qdf_nbuf_free(nbuf); 2278 2279 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 2280 } 2281 2282 /** 2283 * dp_tx_inspect_handler() - Tx Inspect Handler 2284 * @tx_desc: software descriptor head pointer 2285 * @status : Tx completion status from HTT descriptor 2286 * 2287 * Handles Tx frames sent back to Host for inspection 2288 * (ProxyARP) 2289 * 2290 * Return: none 2291 */ 2292 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status) 2293 { 2294 2295 struct dp_soc *soc; 2296 struct dp_pdev *pdev = tx_desc->pdev; 2297 2298 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 2299 "%s Tx inspect path", 2300 __func__); 2301 2302 qdf_assert(pdev); 2303 2304 soc = pdev->soc; 2305 2306 DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1, 2307 qdf_nbuf_len(tx_desc->nbuf)); 2308 2309 DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf); 2310 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 2311 } 2312 2313 #ifdef FEATURE_PERPKT_INFO 2314 /** 2315 * dp_get_completion_indication_for_stack() - send completion to stack 2316 * @soc : dp_soc handle 2317 * @pdev: dp_pdev handle 2318 * @peer: dp peer handle 2319 * @peer_id: peer_id of the peer for which completion came 2320 * @ppdu_id: ppdu_id 2321 * @first_msdu: first msdu 2322 * @last_msdu: last msdu 2323 * @netbuf: Buffer pointer for free 2324 * 2325 * This function is used for indication whether buffer needs to be 2326 * send to stack for free or not 2327 */ 2328 QDF_STATUS 2329 dp_get_completion_indication_for_stack(struct dp_soc *soc, 2330 struct dp_pdev *pdev, 2331 struct dp_peer *peer, uint16_t peer_id, 2332 uint32_t ppdu_id, uint8_t first_msdu, 2333 uint8_t last_msdu, qdf_nbuf_t netbuf) 2334 { 2335 struct tx_capture_hdr *ppdu_hdr; 2336 struct ether_header *eh; 2337 2338 if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode)) 2339 return QDF_STATUS_E_NOSUPPORT; 2340 2341 if (!peer) { 2342 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2343 FL("Peer Invalid")); 2344 return QDF_STATUS_E_INVAL; 2345 } 2346 2347 if (pdev->mcopy_mode) { 2348 if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) && 2349 (pdev->m_copy_id.tx_peer_id == peer_id)) { 2350 return QDF_STATUS_E_INVAL; 2351 } 2352 2353 pdev->m_copy_id.tx_ppdu_id = ppdu_id; 2354 pdev->m_copy_id.tx_peer_id = peer_id; 2355 } 2356 2357 eh = (struct ether_header *)qdf_nbuf_data(netbuf); 2358 2359 if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) { 2360 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2361 FL("No headroom")); 2362 return QDF_STATUS_E_NOMEM; 2363 } 2364 2365 ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf); 2366 qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw, 2367 IEEE80211_ADDR_LEN); 2368 if (peer->bss_peer) { 2369 qdf_mem_copy(ppdu_hdr->ra, eh->ether_dhost, IEEE80211_ADDR_LEN); 2370 } else { 2371 qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw, 2372 IEEE80211_ADDR_LEN); 2373 } 2374 2375 ppdu_hdr->ppdu_id = ppdu_id; 2376 ppdu_hdr->peer_id = peer_id; 2377 ppdu_hdr->first_msdu = first_msdu; 2378 ppdu_hdr->last_msdu = last_msdu; 2379 2380 return QDF_STATUS_SUCCESS; 2381 } 2382 2383 2384 /** 2385 * dp_send_completion_to_stack() - send completion to stack 2386 * @soc : dp_soc handle 2387 * @pdev: dp_pdev handle 2388 * @peer_id: peer_id of the peer for which completion came 2389 * @ppdu_id: ppdu_id 2390 * @netbuf: Buffer pointer for free 2391 * 2392 * This function is used to send completion to stack 2393 * to free buffer 2394 */ 2395 void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, 2396 uint16_t peer_id, uint32_t ppdu_id, 2397 qdf_nbuf_t netbuf) 2398 { 2399 dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc, 2400 netbuf, peer_id, 2401 WDI_NO_VAL, pdev->pdev_id); 2402 } 2403 #else 2404 static QDF_STATUS 2405 dp_get_completion_indication_for_stack(struct dp_soc *soc, 2406 struct dp_pdev *pdev, 2407 struct dp_peer *peer, uint16_t peer_id, 2408 uint32_t ppdu_id, uint8_t first_msdu, 2409 uint8_t last_msdu, qdf_nbuf_t netbuf) 2410 { 2411 return QDF_STATUS_E_NOSUPPORT; 2412 } 2413 2414 static void 2415 dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, 2416 uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf) 2417 { 2418 } 2419 #endif 2420 2421 /** 2422 * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor 2423 * @soc: Soc handle 2424 * @desc: software Tx descriptor to be processed 2425 * 2426 * Return: none 2427 */ 2428 static inline void dp_tx_comp_free_buf(struct dp_soc *soc, 2429 struct dp_tx_desc_s *desc) 2430 { 2431 struct dp_vdev *vdev = desc->vdev; 2432 qdf_nbuf_t nbuf = desc->nbuf; 2433 2434 /* If it is TDLS mgmt, don't unmap or free the frame */ 2435 if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) 2436 return dp_non_std_tx_comp_free_buff(desc, vdev); 2437 2438 /* 0 : MSDU buffer, 1 : MLE */ 2439 if (desc->msdu_ext_desc) { 2440 /* TSO free */ 2441 if (hal_tx_ext_desc_get_tso_enable( 2442 desc->msdu_ext_desc->vaddr)) { 2443 /* unmap eash TSO seg before free the nbuf */ 2444 dp_tx_tso_unmap_segment(soc, desc); 2445 qdf_nbuf_free(nbuf); 2446 return; 2447 } 2448 } 2449 2450 qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE); 2451 2452 if (qdf_likely(!vdev->mesh_vdev)) 2453 qdf_nbuf_free(nbuf); 2454 else { 2455 if (desc->flags & DP_TX_DESC_FLAG_TO_FW) { 2456 qdf_nbuf_free(nbuf); 2457 DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1); 2458 } else 2459 vdev->osif_tx_free_ext((nbuf)); 2460 } 2461 } 2462 2463 /** 2464 * dp_tx_mec_handler() - Tx MEC Notify Handler 2465 * @vdev: pointer to dp dev handler 2466 * @status : Tx completion status from HTT descriptor 2467 * 2468 * Handles MEC notify event sent from fw to Host 2469 * 2470 * Return: none 2471 */ 2472 #ifdef FEATURE_WDS 2473 void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status) 2474 { 2475 2476 struct dp_soc *soc; 2477 uint32_t flags = IEEE80211_NODE_F_WDS_HM; 2478 struct dp_peer *peer; 2479 uint8_t mac_addr[DP_MAC_ADDR_LEN], i; 2480 2481 if (!vdev->wds_enabled) 2482 return; 2483 2484 /* MEC required only in STA mode */ 2485 if (vdev->opmode != wlan_op_mode_sta) 2486 return; 2487 2488 soc = vdev->pdev->soc; 2489 qdf_spin_lock_bh(&soc->peer_ref_mutex); 2490 peer = TAILQ_FIRST(&vdev->peer_list); 2491 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 2492 2493 if (!peer) { 2494 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2495 FL("peer is NULL")); 2496 return; 2497 } 2498 2499 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2500 "%s Tx MEC Handler", 2501 __func__); 2502 2503 for (i = 0; i < DP_MAC_ADDR_LEN; i++) 2504 mac_addr[(DP_MAC_ADDR_LEN - 1) - i] = 2505 status[(DP_MAC_ADDR_LEN - 2) + i]; 2506 2507 if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, DP_MAC_ADDR_LEN)) 2508 dp_peer_add_ast(soc, 2509 peer, 2510 mac_addr, 2511 CDP_TXRX_AST_TYPE_MEC, 2512 flags); 2513 } 2514 #endif 2515 2516 /** 2517 * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler 2518 * @tx_desc: software descriptor head pointer 2519 * @status : Tx completion status from HTT descriptor 2520 * 2521 * This function will process HTT Tx indication messages from Target 2522 * 2523 * Return: none 2524 */ 2525 static 2526 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status) 2527 { 2528 uint8_t tx_status; 2529 struct dp_pdev *pdev; 2530 struct dp_vdev *vdev; 2531 struct dp_soc *soc; 2532 uint32_t *htt_status_word = (uint32_t *) status; 2533 2534 qdf_assert(tx_desc->pdev); 2535 2536 pdev = tx_desc->pdev; 2537 vdev = tx_desc->vdev; 2538 soc = pdev->soc; 2539 2540 tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_status_word[0]); 2541 2542 switch (tx_status) { 2543 case HTT_TX_FW2WBM_TX_STATUS_OK: 2544 case HTT_TX_FW2WBM_TX_STATUS_DROP: 2545 case HTT_TX_FW2WBM_TX_STATUS_TTL: 2546 { 2547 dp_tx_comp_free_buf(soc, tx_desc); 2548 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 2549 break; 2550 } 2551 case HTT_TX_FW2WBM_TX_STATUS_REINJECT: 2552 { 2553 dp_tx_reinject_handler(tx_desc, status); 2554 break; 2555 } 2556 case HTT_TX_FW2WBM_TX_STATUS_INSPECT: 2557 { 2558 dp_tx_inspect_handler(tx_desc, status); 2559 break; 2560 } 2561 case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY: 2562 { 2563 dp_tx_mec_handler(vdev, status); 2564 break; 2565 } 2566 default: 2567 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2568 "%s Invalid HTT tx_status %d", 2569 __func__, tx_status); 2570 break; 2571 } 2572 } 2573 2574 #ifdef MESH_MODE_SUPPORT 2575 /** 2576 * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats 2577 * in mesh meta header 2578 * @tx_desc: software descriptor head pointer 2579 * @ts: pointer to tx completion stats 2580 * Return: none 2581 */ 2582 static 2583 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc, 2584 struct hal_tx_completion_status *ts) 2585 { 2586 struct meta_hdr_s *mhdr; 2587 qdf_nbuf_t netbuf = tx_desc->nbuf; 2588 2589 if (!tx_desc->msdu_ext_desc) { 2590 if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) { 2591 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2592 "netbuf %pK offset %d", 2593 netbuf, tx_desc->pkt_offset); 2594 return; 2595 } 2596 } 2597 if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) { 2598 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2599 "netbuf %pK offset %d", netbuf, 2600 sizeof(struct meta_hdr_s)); 2601 return; 2602 } 2603 2604 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf); 2605 mhdr->rssi = ts->ack_frame_rssi; 2606 mhdr->channel = tx_desc->pdev->operating_channel; 2607 } 2608 2609 #else 2610 static 2611 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc, 2612 struct hal_tx_completion_status *ts) 2613 { 2614 } 2615 2616 #endif 2617 2618 /** 2619 * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications 2620 * @peer: Handle to DP peer 2621 * @ts: pointer to HAL Tx completion stats 2622 * @length: MSDU length 2623 * 2624 * Return: None 2625 */ 2626 static void dp_tx_update_peer_stats(struct dp_peer *peer, 2627 struct hal_tx_completion_status *ts, uint32_t length) 2628 { 2629 struct dp_pdev *pdev = peer->vdev->pdev; 2630 struct dp_soc *soc = pdev->soc; 2631 uint8_t mcs, pkt_type; 2632 2633 mcs = ts->mcs; 2634 pkt_type = ts->pkt_type; 2635 2636 if (!ts->release_src == HAL_TX_COMP_RELEASE_SOURCE_TQM) 2637 return; 2638 2639 if (peer->bss_peer) { 2640 DP_STATS_INC_PKT(peer, tx.mcast, 1, length); 2641 } else { 2642 DP_STATS_INC_PKT(peer, tx.ucast, 1, length); 2643 } 2644 2645 DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length); 2646 2647 DP_STATS_INCC_PKT(peer, tx.tx_success, 1, length, 2648 (ts->status == HAL_TX_TQM_RR_FRAME_ACKED)); 2649 2650 DP_STATS_INCC(peer, tx.dropped.age_out, 1, 2651 (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED)); 2652 2653 DP_STATS_INCC(peer, tx.dropped.fw_rem, 1, 2654 (ts->status == HAL_TX_TQM_RR_REM_CMD_REM)); 2655 2656 DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1, 2657 (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX)); 2658 2659 DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1, 2660 (ts->status == HAL_TX_TQM_RR_REM_CMD_TX)); 2661 2662 DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1, 2663 (ts->status == HAL_TX_TQM_RR_FW_REASON1)); 2664 2665 DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1, 2666 (ts->status == HAL_TX_TQM_RR_FW_REASON2)); 2667 2668 DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1, 2669 (ts->status == HAL_TX_TQM_RR_FW_REASON3)); 2670 2671 if (!ts->status == HAL_TX_TQM_RR_FRAME_ACKED) 2672 return; 2673 2674 DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma); 2675 2676 DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu); 2677 DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu); 2678 2679 if (!(soc->process_tx_status)) 2680 return; 2681 2682 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 2683 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A))); 2684 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 2685 ((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A))); 2686 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 2687 ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B))); 2688 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 2689 ((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B))); 2690 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 2691 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N))); 2692 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 2693 ((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N))); 2694 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 2695 ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 2696 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 2697 ((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 2698 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 2699 ((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX))); 2700 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 2701 ((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX))); 2702 DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1); 2703 DP_STATS_INC(peer, tx.bw[ts->bw], 1); 2704 DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi); 2705 DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1); 2706 DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc); 2707 DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc); 2708 DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1); 2709 2710 if (soc->cdp_soc.ol_ops->update_dp_stats) { 2711 soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev, 2712 &peer->stats, ts->peer_id, 2713 UPDATE_PEER_STATS); 2714 } 2715 } 2716 2717 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 2718 /** 2719 * dp_tx_flow_pool_lock() - take flow pool lock 2720 * @soc: core txrx main context 2721 * @tx_desc: tx desc 2722 * 2723 * Return: None 2724 */ 2725 static inline 2726 void dp_tx_flow_pool_lock(struct dp_soc *soc, 2727 struct dp_tx_desc_s *tx_desc) 2728 { 2729 struct dp_tx_desc_pool_s *pool; 2730 uint8_t desc_pool_id; 2731 2732 desc_pool_id = tx_desc->pool_id; 2733 pool = &soc->tx_desc[desc_pool_id]; 2734 2735 qdf_spin_lock_bh(&pool->flow_pool_lock); 2736 } 2737 2738 /** 2739 * dp_tx_flow_pool_unlock() - release flow pool lock 2740 * @soc: core txrx main context 2741 * @tx_desc: tx desc 2742 * 2743 * Return: None 2744 */ 2745 static inline 2746 void dp_tx_flow_pool_unlock(struct dp_soc *soc, 2747 struct dp_tx_desc_s *tx_desc) 2748 { 2749 struct dp_tx_desc_pool_s *pool; 2750 uint8_t desc_pool_id; 2751 2752 desc_pool_id = tx_desc->pool_id; 2753 pool = &soc->tx_desc[desc_pool_id]; 2754 2755 qdf_spin_unlock_bh(&pool->flow_pool_lock); 2756 } 2757 #else 2758 static inline 2759 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) 2760 { 2761 } 2762 2763 static inline 2764 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) 2765 { 2766 } 2767 #endif 2768 2769 /** 2770 * dp_tx_notify_completion() - Notify tx completion for this desc 2771 * @soc: core txrx main context 2772 * @tx_desc: tx desc 2773 * @netbuf: buffer 2774 * 2775 * Return: none 2776 */ 2777 static inline void dp_tx_notify_completion(struct dp_soc *soc, 2778 struct dp_tx_desc_s *tx_desc, 2779 qdf_nbuf_t netbuf) 2780 { 2781 void *osif_dev; 2782 ol_txrx_completion_fp tx_compl_cbk = NULL; 2783 2784 qdf_assert(tx_desc); 2785 2786 dp_tx_flow_pool_lock(soc, tx_desc); 2787 2788 if (!tx_desc->vdev || 2789 !tx_desc->vdev->osif_vdev) { 2790 dp_tx_flow_pool_unlock(soc, tx_desc); 2791 return; 2792 } 2793 2794 osif_dev = tx_desc->vdev->osif_vdev; 2795 tx_compl_cbk = tx_desc->vdev->tx_comp; 2796 dp_tx_flow_pool_unlock(soc, tx_desc); 2797 2798 if (tx_compl_cbk) 2799 tx_compl_cbk(netbuf, osif_dev); 2800 } 2801 2802 /** dp_tx_sojourn_stats_process() - Collect sojourn stats 2803 * @pdev: pdev handle 2804 * @tid: tid value 2805 * @txdesc_ts: timestamp from txdesc 2806 * @ppdu_id: ppdu id 2807 * 2808 * Return: none 2809 */ 2810 #ifdef FEATURE_PERPKT_INFO 2811 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev, 2812 uint8_t tid, 2813 uint64_t txdesc_ts, 2814 uint32_t ppdu_id) 2815 { 2816 uint64_t delta_ms; 2817 struct cdp_tx_sojourn_stats *sojourn_stats; 2818 2819 if (pdev->enhanced_stats_en == 0) 2820 return; 2821 2822 if (pdev->sojourn_stats.ppdu_seq_id == 0) 2823 pdev->sojourn_stats.ppdu_seq_id = ppdu_id; 2824 2825 if (ppdu_id != pdev->sojourn_stats.ppdu_seq_id) { 2826 if (!pdev->sojourn_buf) 2827 return; 2828 2829 sojourn_stats = (struct cdp_tx_sojourn_stats *) 2830 qdf_nbuf_data(pdev->sojourn_buf); 2831 2832 qdf_mem_copy(sojourn_stats, &pdev->sojourn_stats, 2833 sizeof(struct cdp_tx_sojourn_stats)); 2834 2835 qdf_mem_zero(&pdev->sojourn_stats, 2836 sizeof(struct cdp_tx_sojourn_stats)); 2837 2838 dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc, 2839 pdev->sojourn_buf, HTT_INVALID_PEER, 2840 WDI_NO_VAL, pdev->pdev_id); 2841 2842 pdev->sojourn_stats.ppdu_seq_id = ppdu_id; 2843 } 2844 2845 if (tid == HTT_INVALID_TID) 2846 return; 2847 2848 delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) - 2849 txdesc_ts; 2850 qdf_ewma_tx_lag_add(&pdev->sojourn_stats.avg_sojourn_msdu[tid], 2851 delta_ms); 2852 pdev->sojourn_stats.sum_sojourn_msdu[tid] += delta_ms; 2853 pdev->sojourn_stats.num_msdus[tid]++; 2854 } 2855 #else 2856 static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev, 2857 uint8_t tid, 2858 uint64_t txdesc_ts, 2859 uint32_t ppdu_id) 2860 { 2861 } 2862 #endif 2863 2864 /** 2865 * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info 2866 * @tx_desc: software descriptor head pointer 2867 * @length: packet length 2868 * @peer: peer handle 2869 * 2870 * Return: none 2871 */ 2872 static inline 2873 void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc, 2874 uint32_t length, struct dp_peer *peer) 2875 { 2876 struct hal_tx_completion_status ts = {0}; 2877 struct dp_soc *soc = NULL; 2878 struct dp_vdev *vdev = tx_desc->vdev; 2879 struct ether_header *eh = 2880 (struct ether_header *)qdf_nbuf_data(tx_desc->nbuf); 2881 2882 if (!vdev) { 2883 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 2884 "invalid vdev"); 2885 goto out; 2886 } 2887 2888 hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc); 2889 2890 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2891 "-------------------- \n" 2892 "Tx Completion Stats: \n" 2893 "-------------------- \n" 2894 "ack_frame_rssi = %d \n" 2895 "first_msdu = %d \n" 2896 "last_msdu = %d \n" 2897 "msdu_part_of_amsdu = %d \n" 2898 "rate_stats valid = %d \n" 2899 "bw = %d \n" 2900 "pkt_type = %d \n" 2901 "stbc = %d \n" 2902 "ldpc = %d \n" 2903 "sgi = %d \n" 2904 "mcs = %d \n" 2905 "ofdma = %d \n" 2906 "tones_in_ru = %d \n" 2907 "tsf = %d \n" 2908 "ppdu_id = %d \n" 2909 "transmit_cnt = %d \n" 2910 "tid = %d \n" 2911 "peer_id = %d ", 2912 ts.ack_frame_rssi, ts.first_msdu, ts.last_msdu, 2913 ts.msdu_part_of_amsdu, ts.valid, ts.bw, 2914 ts.pkt_type, ts.stbc, ts.ldpc, ts.sgi, 2915 ts.mcs, ts.ofdma, ts.tones_in_ru, ts.tsf, 2916 ts.ppdu_id, ts.transmit_cnt, ts.tid, 2917 ts.peer_id); 2918 2919 soc = vdev->pdev->soc; 2920 2921 /* Update SoC level stats */ 2922 DP_STATS_INCC(soc, tx.dropped_fw_removed, 1, 2923 (ts.status == HAL_TX_TQM_RR_REM_CMD_REM)); 2924 2925 /* Update per-packet stats */ 2926 if (qdf_unlikely(vdev->mesh_vdev) && 2927 !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) 2928 dp_tx_comp_fill_tx_completion_stats(tx_desc, &ts); 2929 2930 /* Update peer level stats */ 2931 if (!peer) { 2932 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 2933 "invalid peer"); 2934 DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length); 2935 goto out; 2936 } 2937 2938 if (qdf_likely(peer->vdev->tx_encap_type == 2939 htt_cmn_pkt_type_ethernet)) { 2940 if (peer->bss_peer && IEEE80211_IS_BROADCAST(eh->ether_dhost)) 2941 DP_STATS_INC_PKT(peer, tx.bcast, 1, length); 2942 } 2943 2944 dp_tx_sojourn_stats_process(vdev->pdev, ts.tid, 2945 tx_desc->timestamp, 2946 ts.ppdu_id); 2947 2948 dp_tx_update_peer_stats(peer, &ts, length); 2949 2950 out: 2951 return; 2952 } 2953 /** 2954 * dp_tx_comp_process_desc() - Tx complete software descriptor handler 2955 * @soc: core txrx main context 2956 * @comp_head: software descriptor head pointer 2957 * 2958 * This function will process batch of descriptors reaped by dp_tx_comp_handler 2959 * and release the software descriptors after processing is complete 2960 * 2961 * Return: none 2962 */ 2963 static void dp_tx_comp_process_desc(struct dp_soc *soc, 2964 struct dp_tx_desc_s *comp_head) 2965 { 2966 struct dp_tx_desc_s *desc; 2967 struct dp_tx_desc_s *next; 2968 struct hal_tx_completion_status ts = {0}; 2969 uint32_t length; 2970 struct dp_peer *peer; 2971 2972 DP_HIST_INIT(); 2973 desc = comp_head; 2974 2975 while (desc) { 2976 hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc); 2977 peer = dp_peer_find_by_id(soc, ts.peer_id); 2978 length = qdf_nbuf_len(desc->nbuf); 2979 2980 /* check tx completion notification */ 2981 if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(desc->nbuf)) 2982 dp_tx_notify_completion(soc, desc, desc->nbuf); 2983 2984 dp_tx_comp_process_tx_status(desc, length, peer); 2985 2986 DPTRACE(qdf_dp_trace_ptr 2987 (desc->nbuf, 2988 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD, 2989 QDF_TRACE_DEFAULT_PDEV_ID, 2990 qdf_nbuf_data_addr(desc->nbuf), 2991 sizeof(qdf_nbuf_data(desc->nbuf)), 2992 desc->id, ts.status) 2993 ); 2994 2995 /*currently m_copy/tx_capture is not supported for scatter gather packets*/ 2996 if (!(desc->msdu_ext_desc) && 2997 (dp_get_completion_indication_for_stack(soc, desc->pdev, 2998 peer, ts.peer_id, ts.ppdu_id, 2999 ts.first_msdu, ts.last_msdu, 3000 desc->nbuf) == QDF_STATUS_SUCCESS)) { 3001 qdf_nbuf_unmap(soc->osdev, desc->nbuf, 3002 QDF_DMA_TO_DEVICE); 3003 3004 dp_send_completion_to_stack(soc, desc->pdev, ts.peer_id, 3005 ts.ppdu_id, desc->nbuf); 3006 } else { 3007 dp_tx_comp_free_buf(soc, desc); 3008 } 3009 3010 if (peer) 3011 dp_peer_unref_del_find_by_id(peer); 3012 3013 DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id); 3014 3015 next = desc->next; 3016 3017 dp_tx_desc_release(desc, desc->pool_id); 3018 desc = next; 3019 } 3020 3021 DP_TX_HIST_STATS_PER_PDEV(); 3022 } 3023 3024 /** 3025 * dp_tx_comp_handler() - Tx completion handler 3026 * @soc: core txrx main context 3027 * @ring_id: completion ring id 3028 * @quota: No. of packets/descriptors that can be serviced in one loop 3029 * 3030 * This function will collect hardware release ring element contents and 3031 * handle descriptor contents. Based on contents, free packet or handle error 3032 * conditions 3033 * 3034 * Return: none 3035 */ 3036 uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota) 3037 { 3038 void *tx_comp_hal_desc; 3039 uint8_t buffer_src; 3040 uint8_t pool_id; 3041 uint32_t tx_desc_id; 3042 struct dp_tx_desc_s *tx_desc = NULL; 3043 struct dp_tx_desc_s *head_desc = NULL; 3044 struct dp_tx_desc_s *tail_desc = NULL; 3045 uint32_t num_processed; 3046 uint32_t count; 3047 3048 if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { 3049 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3050 "%s %d : HAL RING Access Failed -- %pK", 3051 __func__, __LINE__, hal_srng); 3052 return 0; 3053 } 3054 3055 num_processed = 0; 3056 count = 0; 3057 3058 /* Find head descriptor from completion ring */ 3059 while (qdf_likely(tx_comp_hal_desc = 3060 hal_srng_dst_get_next(soc->hal_soc, hal_srng))) { 3061 3062 buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc); 3063 3064 /* If this buffer was not released by TQM or FW, then it is not 3065 * Tx completion indication, assert */ 3066 if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) && 3067 (buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) { 3068 3069 QDF_TRACE(QDF_MODULE_ID_DP, 3070 QDF_TRACE_LEVEL_FATAL, 3071 "Tx comp release_src != TQM | FW"); 3072 3073 qdf_assert_always(0); 3074 } 3075 3076 /* Get descriptor id */ 3077 tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc); 3078 pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >> 3079 DP_TX_DESC_ID_POOL_OS; 3080 3081 if (!dp_tx_is_desc_id_valid(soc, tx_desc_id)) 3082 continue; 3083 3084 /* Find Tx descriptor */ 3085 tx_desc = dp_tx_desc_find(soc, pool_id, 3086 (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >> 3087 DP_TX_DESC_ID_PAGE_OS, 3088 (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >> 3089 DP_TX_DESC_ID_OFFSET_OS); 3090 3091 /* 3092 * If the release source is FW, process the HTT status 3093 */ 3094 if (qdf_unlikely(buffer_src == 3095 HAL_TX_COMP_RELEASE_SOURCE_FW)) { 3096 uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN]; 3097 hal_tx_comp_get_htt_desc(tx_comp_hal_desc, 3098 htt_tx_status); 3099 dp_tx_process_htt_completion(tx_desc, 3100 htt_tx_status); 3101 } else { 3102 /* Pool id is not matching. Error */ 3103 if (tx_desc->pool_id != pool_id) { 3104 QDF_TRACE(QDF_MODULE_ID_DP, 3105 QDF_TRACE_LEVEL_FATAL, 3106 "Tx Comp pool id %d not matched %d", 3107 pool_id, tx_desc->pool_id); 3108 3109 qdf_assert_always(0); 3110 } 3111 3112 if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) || 3113 !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) { 3114 QDF_TRACE(QDF_MODULE_ID_DP, 3115 QDF_TRACE_LEVEL_FATAL, 3116 "Txdesc invalid, flgs = %x,id = %d", 3117 tx_desc->flags, tx_desc_id); 3118 qdf_assert_always(0); 3119 } 3120 3121 /* First ring descriptor on the cycle */ 3122 if (!head_desc) { 3123 head_desc = tx_desc; 3124 tail_desc = tx_desc; 3125 } 3126 3127 tail_desc->next = tx_desc; 3128 tx_desc->next = NULL; 3129 tail_desc = tx_desc; 3130 3131 /* Collect hw completion contents */ 3132 hal_tx_comp_desc_sync(tx_comp_hal_desc, 3133 &tx_desc->comp, 1); 3134 3135 } 3136 3137 num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK); 3138 3139 /* 3140 * Processed packet count is more than given quota 3141 * stop to processing 3142 */ 3143 if ((num_processed >= quota)) 3144 break; 3145 3146 count++; 3147 } 3148 3149 hal_srng_access_end(soc->hal_soc, hal_srng); 3150 3151 /* Process the reaped descriptors */ 3152 if (head_desc) 3153 dp_tx_comp_process_desc(soc, head_desc); 3154 3155 return num_processed; 3156 } 3157 3158 #ifdef CONVERGED_TDLS_ENABLE 3159 /** 3160 * dp_tx_non_std() - Allow the control-path SW to send data frames 3161 * 3162 * @data_vdev - which vdev should transmit the tx data frames 3163 * @tx_spec - what non-standard handling to apply to the tx data frames 3164 * @msdu_list - NULL-terminated list of tx MSDUs 3165 * 3166 * Return: NULL on success, 3167 * nbuf when it fails to send 3168 */ 3169 qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle, 3170 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list) 3171 { 3172 struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle; 3173 3174 if (tx_spec & OL_TX_SPEC_NO_FREE) 3175 vdev->is_tdls_frame = true; 3176 return dp_tx_send(vdev_handle, msdu_list); 3177 } 3178 #endif 3179 3180 /** 3181 * dp_tx_vdev_attach() - attach vdev to dp tx 3182 * @vdev: virtual device instance 3183 * 3184 * Return: QDF_STATUS_SUCCESS: success 3185 * QDF_STATUS_E_RESOURCES: Error return 3186 */ 3187 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev) 3188 { 3189 /* 3190 * Fill HTT TCL Metadata with Vdev ID and MAC ID 3191 */ 3192 HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata, 3193 HTT_TCL_METADATA_TYPE_VDEV_BASED); 3194 3195 HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata, 3196 vdev->vdev_id); 3197 3198 HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, 3199 DP_SW2HW_MACID(vdev->pdev->pdev_id)); 3200 3201 /* 3202 * Set HTT Extension Valid bit to 0 by default 3203 */ 3204 HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0); 3205 3206 dp_tx_vdev_update_search_flags(vdev); 3207 3208 return QDF_STATUS_SUCCESS; 3209 } 3210 3211 /** 3212 * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode 3213 * @vdev: virtual device instance 3214 * 3215 * Return: void 3216 * 3217 */ 3218 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev) 3219 { 3220 struct dp_soc *soc = vdev->pdev->soc; 3221 3222 /* 3223 * Enable both AddrY (SA based search) and AddrX (Da based search) 3224 * for TDLS link 3225 * 3226 * Enable AddrY (SA based search) only for non-WDS STA and 3227 * ProxySTA VAP modes. 3228 * 3229 * In all other VAP modes, only DA based search should be 3230 * enabled 3231 */ 3232 if (vdev->opmode == wlan_op_mode_sta && 3233 vdev->tdls_link_connected) 3234 vdev->hal_desc_addr_search_flags = 3235 (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN); 3236 else if ((vdev->opmode == wlan_op_mode_sta && 3237 (!vdev->wds_enabled || vdev->proxysta_vdev))) 3238 vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN; 3239 else 3240 vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN; 3241 3242 /* Set search type only when peer map v2 messaging is enabled 3243 * as we will have the search index (AST hash) only when v2 is 3244 * enabled 3245 */ 3246 if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta) 3247 vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH; 3248 else 3249 vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; 3250 } 3251 3252 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 3253 static void dp_tx_desc_flush(struct dp_vdev *vdev) 3254 { 3255 } 3256 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */ 3257 3258 /* dp_tx_desc_flush() - release resources associated 3259 * to tx_desc 3260 * @vdev: virtual device instance 3261 * 3262 * This function will free all outstanding Tx buffers, 3263 * including ME buffer for which either free during 3264 * completion didn't happened or completion is not 3265 * received. 3266 */ 3267 static void dp_tx_desc_flush(struct dp_vdev *vdev) 3268 { 3269 uint8_t i, num_pool; 3270 uint32_t j; 3271 uint32_t num_desc; 3272 struct dp_soc *soc = vdev->pdev->soc; 3273 struct dp_tx_desc_s *tx_desc = NULL; 3274 struct dp_tx_desc_pool_s *tx_desc_pool = NULL; 3275 3276 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 3277 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 3278 3279 for (i = 0; i < num_pool; i++) { 3280 for (j = 0; j < num_desc; j++) { 3281 tx_desc_pool = &((soc)->tx_desc[(i)]); 3282 if (tx_desc_pool && 3283 tx_desc_pool->desc_pages.cacheable_pages) { 3284 tx_desc = dp_tx_desc_find(soc, i, 3285 (j & DP_TX_DESC_ID_PAGE_MASK) >> 3286 DP_TX_DESC_ID_PAGE_OS, 3287 (j & DP_TX_DESC_ID_OFFSET_MASK) >> 3288 DP_TX_DESC_ID_OFFSET_OS); 3289 3290 if (tx_desc && (tx_desc->vdev == vdev) && 3291 (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) { 3292 dp_tx_comp_free_buf(soc, tx_desc); 3293 dp_tx_desc_release(tx_desc, i); 3294 } 3295 } 3296 } 3297 } 3298 } 3299 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ 3300 3301 /** 3302 * dp_tx_vdev_detach() - detach vdev from dp tx 3303 * @vdev: virtual device instance 3304 * 3305 * Return: QDF_STATUS_SUCCESS: success 3306 * QDF_STATUS_E_RESOURCES: Error return 3307 */ 3308 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev) 3309 { 3310 dp_tx_desc_flush(vdev); 3311 return QDF_STATUS_SUCCESS; 3312 } 3313 3314 /** 3315 * dp_tx_pdev_attach() - attach pdev to dp tx 3316 * @pdev: physical device instance 3317 * 3318 * Return: QDF_STATUS_SUCCESS: success 3319 * QDF_STATUS_E_RESOURCES: Error return 3320 */ 3321 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev) 3322 { 3323 struct dp_soc *soc = pdev->soc; 3324 3325 /* Initialize Flow control counters */ 3326 qdf_atomic_init(&pdev->num_tx_exception); 3327 qdf_atomic_init(&pdev->num_tx_outstanding); 3328 3329 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { 3330 /* Initialize descriptors in TCL Ring */ 3331 hal_tx_init_data_ring(soc->hal_soc, 3332 soc->tcl_data_ring[pdev->pdev_id].hal_srng); 3333 } 3334 3335 return QDF_STATUS_SUCCESS; 3336 } 3337 3338 /** 3339 * dp_tx_pdev_detach() - detach pdev from dp tx 3340 * @pdev: physical device instance 3341 * 3342 * Return: QDF_STATUS_SUCCESS: success 3343 * QDF_STATUS_E_RESOURCES: Error return 3344 */ 3345 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev) 3346 { 3347 dp_tx_me_exit(pdev); 3348 return QDF_STATUS_SUCCESS; 3349 } 3350 3351 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 3352 /* Pools will be allocated dynamically */ 3353 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, 3354 int num_desc) 3355 { 3356 uint8_t i; 3357 3358 for (i = 0; i < num_pool; i++) { 3359 qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock); 3360 soc->tx_desc[i].status = FLOW_POOL_INACTIVE; 3361 } 3362 3363 return 0; 3364 } 3365 3366 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) 3367 { 3368 uint8_t i; 3369 3370 for (i = 0; i < num_pool; i++) 3371 qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock); 3372 } 3373 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */ 3374 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, 3375 int num_desc) 3376 { 3377 uint8_t i; 3378 3379 /* Allocate software Tx descriptor pools */ 3380 for (i = 0; i < num_pool; i++) { 3381 if (dp_tx_desc_pool_alloc(soc, i, num_desc)) { 3382 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3383 "%s Tx Desc Pool alloc %d failed %pK", 3384 __func__, i, soc); 3385 return ENOMEM; 3386 } 3387 } 3388 return 0; 3389 } 3390 3391 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) 3392 { 3393 uint8_t i; 3394 3395 for (i = 0; i < num_pool; i++) { 3396 qdf_assert_always(!soc->tx_desc[i].num_allocated); 3397 if (dp_tx_desc_pool_free(soc, i)) { 3398 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3399 "%s Tx Desc Pool Free failed", __func__); 3400 } 3401 } 3402 } 3403 3404 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ 3405 3406 /** 3407 * dp_tx_soc_detach() - detach soc from dp tx 3408 * @soc: core txrx main context 3409 * 3410 * This function will detach dp tx into main device context 3411 * will free dp tx resource and initialize resources 3412 * 3413 * Return: QDF_STATUS_SUCCESS: success 3414 * QDF_STATUS_E_RESOURCES: Error return 3415 */ 3416 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc) 3417 { 3418 uint8_t num_pool; 3419 uint16_t num_desc; 3420 uint16_t num_ext_desc; 3421 uint8_t i; 3422 3423 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 3424 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 3425 num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); 3426 3427 dp_tx_flow_control_deinit(soc); 3428 dp_tx_delete_static_pools(soc, num_pool); 3429 3430 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3431 "%s Tx Desc Pool Free num_pool = %d, descs = %d", 3432 __func__, num_pool, num_desc); 3433 3434 for (i = 0; i < num_pool; i++) { 3435 if (dp_tx_ext_desc_pool_free(soc, i)) { 3436 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3437 "%s Tx Ext Desc Pool Free failed", 3438 __func__); 3439 return QDF_STATUS_E_RESOURCES; 3440 } 3441 } 3442 3443 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3444 "%s MSDU Ext Desc Pool %d Free descs = %d", 3445 __func__, num_pool, num_ext_desc); 3446 3447 for (i = 0; i < num_pool; i++) { 3448 dp_tx_tso_desc_pool_free(soc, i); 3449 } 3450 3451 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3452 "%s TSO Desc Pool %d Free descs = %d", 3453 __func__, num_pool, num_desc); 3454 3455 3456 for (i = 0; i < num_pool; i++) 3457 dp_tx_tso_num_seg_pool_free(soc, i); 3458 3459 3460 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3461 "%s TSO Num of seg Desc Pool %d Free descs = %d", 3462 __func__, num_pool, num_desc); 3463 3464 return QDF_STATUS_SUCCESS; 3465 } 3466 3467 /** 3468 * dp_tx_soc_attach() - attach soc to dp tx 3469 * @soc: core txrx main context 3470 * 3471 * This function will attach dp tx into main device context 3472 * will allocate dp tx resource and initialize resources 3473 * 3474 * Return: QDF_STATUS_SUCCESS: success 3475 * QDF_STATUS_E_RESOURCES: Error return 3476 */ 3477 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc) 3478 { 3479 uint8_t i; 3480 uint8_t num_pool; 3481 uint32_t num_desc; 3482 uint32_t num_ext_desc; 3483 3484 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 3485 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 3486 num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); 3487 3488 if (dp_tx_alloc_static_pools(soc, num_pool, num_desc)) 3489 goto fail; 3490 3491 dp_tx_flow_control_init(soc); 3492 3493 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3494 "%s Tx Desc Alloc num_pool = %d, descs = %d", 3495 __func__, num_pool, num_desc); 3496 3497 /* Allocate extension tx descriptor pools */ 3498 for (i = 0; i < num_pool; i++) { 3499 if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) { 3500 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3501 "MSDU Ext Desc Pool alloc %d failed %pK", 3502 i, soc); 3503 3504 goto fail; 3505 } 3506 } 3507 3508 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3509 "%s MSDU Ext Desc Alloc %d, descs = %d", 3510 __func__, num_pool, num_ext_desc); 3511 3512 for (i = 0; i < num_pool; i++) { 3513 if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) { 3514 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3515 "TSO Desc Pool alloc %d failed %pK", 3516 i, soc); 3517 3518 goto fail; 3519 } 3520 } 3521 3522 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3523 "%s TSO Desc Alloc %d, descs = %d", 3524 __func__, num_pool, num_desc); 3525 3526 for (i = 0; i < num_pool; i++) { 3527 if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) { 3528 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3529 "TSO Num of seg Pool alloc %d failed %pK", 3530 i, soc); 3531 3532 goto fail; 3533 } 3534 } 3535 3536 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3537 "%s TSO Num of seg pool Alloc %d, descs = %d", 3538 __func__, num_pool, num_desc); 3539 3540 /* Initialize descriptors in TCL Rings */ 3541 if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { 3542 for (i = 0; i < soc->num_tcl_data_rings; i++) { 3543 hal_tx_init_data_ring(soc->hal_soc, 3544 soc->tcl_data_ring[i].hal_srng); 3545 } 3546 } 3547 3548 /* 3549 * todo - Add a runtime config option to enable this. 3550 */ 3551 /* 3552 * Due to multiple issues on NPR EMU, enable it selectively 3553 * only for NPR EMU, should be removed, once NPR platforms 3554 * are stable. 3555 */ 3556 soc->process_tx_status = CONFIG_PROCESS_TX_STATUS; 3557 3558 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3559 "%s HAL Tx init Success", __func__); 3560 3561 return QDF_STATUS_SUCCESS; 3562 3563 fail: 3564 /* Detach will take care of freeing only allocated resources */ 3565 dp_tx_soc_detach(soc); 3566 return QDF_STATUS_E_RESOURCES; 3567 } 3568 3569 /* 3570 * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement 3571 * pdev: pointer to DP PDEV structure 3572 * seg_info_head: Pointer to the head of list 3573 * 3574 * return: void 3575 */ 3576 static void dp_tx_me_mem_free(struct dp_pdev *pdev, 3577 struct dp_tx_seg_info_s *seg_info_head) 3578 { 3579 struct dp_tx_me_buf_t *mc_uc_buf; 3580 struct dp_tx_seg_info_s *seg_info_new = NULL; 3581 qdf_nbuf_t nbuf = NULL; 3582 uint64_t phy_addr; 3583 3584 while (seg_info_head) { 3585 nbuf = seg_info_head->nbuf; 3586 mc_uc_buf = (struct dp_tx_me_buf_t *) 3587 seg_info_head->frags[0].vaddr; 3588 phy_addr = seg_info_head->frags[0].paddr_hi; 3589 phy_addr = (phy_addr << 32) | seg_info_head->frags[0].paddr_lo; 3590 qdf_mem_unmap_nbytes_single(pdev->soc->osdev, 3591 phy_addr, 3592 QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN); 3593 dp_tx_me_free_buf(pdev, mc_uc_buf); 3594 qdf_nbuf_free(nbuf); 3595 seg_info_new = seg_info_head; 3596 seg_info_head = seg_info_head->next; 3597 qdf_mem_free(seg_info_new); 3598 } 3599 } 3600 3601 /** 3602 * dp_tx_me_send_convert_ucast(): function to convert multicast to unicast 3603 * @vdev: DP VDEV handle 3604 * @nbuf: Multicast nbuf 3605 * @newmac: Table of the clients to which packets have to be sent 3606 * @new_mac_cnt: No of clients 3607 * 3608 * return: no of converted packets 3609 */ 3610 uint16_t 3611 dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf, 3612 uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt) 3613 { 3614 struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle; 3615 struct dp_pdev *pdev = vdev->pdev; 3616 struct ether_header *eh; 3617 uint8_t *data; 3618 uint16_t len; 3619 3620 /* reference to frame dst addr */ 3621 uint8_t *dstmac; 3622 /* copy of original frame src addr */ 3623 uint8_t srcmac[DP_MAC_ADDR_LEN]; 3624 3625 /* local index into newmac */ 3626 uint8_t new_mac_idx = 0; 3627 struct dp_tx_me_buf_t *mc_uc_buf; 3628 qdf_nbuf_t nbuf_clone; 3629 struct dp_tx_msdu_info_s msdu_info; 3630 struct dp_tx_seg_info_s *seg_info_head = NULL; 3631 struct dp_tx_seg_info_s *seg_info_tail = NULL; 3632 struct dp_tx_seg_info_s *seg_info_new; 3633 struct dp_tx_frag_info_s data_frag; 3634 qdf_dma_addr_t paddr_data; 3635 qdf_dma_addr_t paddr_mcbuf = 0; 3636 uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0}; 3637 QDF_STATUS status; 3638 3639 qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0); 3640 3641 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 3642 3643 eh = (struct ether_header *) nbuf; 3644 qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN); 3645 3646 len = qdf_nbuf_len(nbuf); 3647 3648 data = qdf_nbuf_data(nbuf); 3649 3650 status = qdf_nbuf_map(vdev->osdev, nbuf, 3651 QDF_DMA_TO_DEVICE); 3652 3653 if (status) { 3654 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3655 "Mapping failure Error:%d", status); 3656 DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1); 3657 qdf_nbuf_free(nbuf); 3658 return 1; 3659 } 3660 3661 paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN; 3662 3663 /*preparing data fragment*/ 3664 data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN; 3665 data_frag.paddr_lo = (uint32_t)paddr_data; 3666 data_frag.paddr_hi = (((uint64_t) paddr_data) >> 32); 3667 data_frag.len = len - DP_MAC_ADDR_LEN; 3668 3669 for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) { 3670 dstmac = newmac[new_mac_idx]; 3671 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3672 "added mac addr (%pM)", dstmac); 3673 3674 /* Check for NULL Mac Address */ 3675 if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN)) 3676 continue; 3677 3678 /* frame to self mac. skip */ 3679 if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN)) 3680 continue; 3681 3682 /* 3683 * TODO: optimize to avoid malloc in per-packet path 3684 * For eg. seg_pool can be made part of vdev structure 3685 */ 3686 seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new)); 3687 3688 if (!seg_info_new) { 3689 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3690 "alloc failed"); 3691 DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1); 3692 goto fail_seg_alloc; 3693 } 3694 3695 mc_uc_buf = dp_tx_me_alloc_buf(pdev); 3696 if (mc_uc_buf == NULL) 3697 goto fail_buf_alloc; 3698 3699 /* 3700 * TODO: Check if we need to clone the nbuf 3701 * Or can we just use the reference for all cases 3702 */ 3703 if (new_mac_idx < (new_mac_cnt - 1)) { 3704 nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf); 3705 if (nbuf_clone == NULL) { 3706 DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1); 3707 goto fail_clone; 3708 } 3709 } else { 3710 /* 3711 * Update the ref 3712 * to account for frame sent without cloning 3713 */ 3714 qdf_nbuf_ref(nbuf); 3715 nbuf_clone = nbuf; 3716 } 3717 3718 qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN); 3719 3720 status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data, 3721 QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN, 3722 &paddr_mcbuf); 3723 3724 if (status) { 3725 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3726 "Mapping failure Error:%d", status); 3727 DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1); 3728 goto fail_map; 3729 } 3730 3731 seg_info_new->frags[0].vaddr = (uint8_t *)mc_uc_buf; 3732 seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf; 3733 seg_info_new->frags[0].paddr_hi = 3734 ((uint64_t) paddr_mcbuf >> 32); 3735 seg_info_new->frags[0].len = DP_MAC_ADDR_LEN; 3736 3737 seg_info_new->frags[1] = data_frag; 3738 seg_info_new->nbuf = nbuf_clone; 3739 seg_info_new->frag_cnt = 2; 3740 seg_info_new->total_len = len; 3741 3742 seg_info_new->next = NULL; 3743 3744 if (seg_info_head == NULL) 3745 seg_info_head = seg_info_new; 3746 else 3747 seg_info_tail->next = seg_info_new; 3748 3749 seg_info_tail = seg_info_new; 3750 } 3751 3752 if (!seg_info_head) { 3753 goto free_return; 3754 } 3755 3756 msdu_info.u.sg_info.curr_seg = seg_info_head; 3757 msdu_info.num_seg = new_mac_cnt; 3758 msdu_info.frm_type = dp_tx_frm_me; 3759 3760 DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt); 3761 dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info); 3762 3763 while (seg_info_head->next) { 3764 seg_info_new = seg_info_head; 3765 seg_info_head = seg_info_head->next; 3766 qdf_mem_free(seg_info_new); 3767 } 3768 qdf_mem_free(seg_info_head); 3769 3770 qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE); 3771 qdf_nbuf_free(nbuf); 3772 return new_mac_cnt; 3773 3774 fail_map: 3775 qdf_nbuf_free(nbuf_clone); 3776 3777 fail_clone: 3778 dp_tx_me_free_buf(pdev, mc_uc_buf); 3779 3780 fail_buf_alloc: 3781 qdf_mem_free(seg_info_new); 3782 3783 fail_seg_alloc: 3784 dp_tx_me_mem_free(pdev, seg_info_head); 3785 3786 free_return: 3787 qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE); 3788 qdf_nbuf_free(nbuf); 3789 return 1; 3790 } 3791 3792