1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "htt.h" 20 #include "dp_tx.h" 21 #include "dp_tx_desc.h" 22 #include "dp_peer.h" 23 #include "dp_types.h" 24 #include "hal_tx.h" 25 #include "qdf_mem.h" 26 #include "qdf_nbuf.h" 27 #include "qdf_net_types.h" 28 #include <wlan_cfg.h> 29 #ifdef MESH_MODE_SUPPORT 30 #include "if_meta_hdr.h" 31 #endif 32 33 #define DP_TX_QUEUE_MASK 0x3 34 35 /* TODO Add support in TSO */ 36 #define DP_DESC_NUM_FRAG(x) 0 37 38 /* disable TQM_BYPASS */ 39 #define TQM_BYPASS_WAR 0 40 41 /* invalid peer id for reinject*/ 42 #define DP_INVALID_PEER 0XFFFE 43 44 /*mapping between hal encrypt type and cdp_sec_type*/ 45 #define MAX_CDP_SEC_TYPE 12 46 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = { 47 HAL_TX_ENCRYPT_TYPE_NO_CIPHER, 48 HAL_TX_ENCRYPT_TYPE_WEP_128, 49 HAL_TX_ENCRYPT_TYPE_WEP_104, 50 HAL_TX_ENCRYPT_TYPE_WEP_40, 51 HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC, 52 HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC, 53 HAL_TX_ENCRYPT_TYPE_AES_CCMP_128, 54 HAL_TX_ENCRYPT_TYPE_WAPI, 55 HAL_TX_ENCRYPT_TYPE_AES_CCMP_256, 56 HAL_TX_ENCRYPT_TYPE_AES_GCMP_128, 57 HAL_TX_ENCRYPT_TYPE_AES_GCMP_256, 58 HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4}; 59 60 /** 61 * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame 62 * @vdev: DP Virtual device handle 63 * @nbuf: Buffer pointer 64 * @queue: queue ids container for nbuf 65 * 66 * TX packet queue has 2 instances, software descriptors id and dma ring id 67 * Based on tx feature and hardware configuration queue id combination could be 68 * different. 69 * For example - 70 * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id 71 * With no XPS,lock based resource protection, Descriptor pool ids are different 72 * for each vdev, dma ring id will be same as single pdev id 73 * 74 * Return: None 75 */ 76 #ifdef QCA_OL_TX_MULTIQ_SUPPORT 77 static inline void dp_tx_get_queue(struct dp_vdev *vdev, 78 qdf_nbuf_t nbuf, struct dp_tx_queue *queue) 79 { 80 uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK; 81 queue->desc_pool_id = queue_offset; 82 queue->ring_id = vdev->pdev->soc->tx_ring_map[queue_offset]; 83 84 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 85 "%s, pool_id:%d ring_id: %d", 86 __func__, queue->desc_pool_id, queue->ring_id); 87 88 return; 89 } 90 #else /* QCA_OL_TX_MULTIQ_SUPPORT */ 91 static inline void dp_tx_get_queue(struct dp_vdev *vdev, 92 qdf_nbuf_t nbuf, struct dp_tx_queue *queue) 93 { 94 /* get flow id */ 95 queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev); 96 queue->ring_id = DP_TX_GET_RING_ID(vdev); 97 98 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 99 "%s, pool_id:%d ring_id: %d", 100 __func__, queue->desc_pool_id, queue->ring_id); 101 102 return; 103 } 104 #endif 105 106 #if defined(FEATURE_TSO) 107 /** 108 * dp_tx_tso_desc_release() - Release the tso segment 109 * after unmapping all the fragments 110 * 111 * @pdev - physical device handle 112 * @tx_desc - Tx software descriptor 113 */ 114 static void dp_tx_tso_desc_release(struct dp_soc *soc, 115 struct dp_tx_desc_s *tx_desc) 116 { 117 TSO_DEBUG("%s: Free the tso descriptor", __func__); 118 if (qdf_unlikely(tx_desc->tso_desc == NULL)) { 119 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 120 "%s %d TSO desc is NULL!", 121 __func__, __LINE__); 122 qdf_assert(0); 123 } else if (qdf_unlikely(tx_desc->tso_num_desc == NULL)) { 124 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 125 "%s %d TSO common info is NULL!", 126 __func__, __LINE__); 127 qdf_assert(0); 128 } else { 129 struct qdf_tso_num_seg_elem_t *tso_num_desc = 130 (struct qdf_tso_num_seg_elem_t *) tx_desc->tso_num_desc; 131 132 if (tso_num_desc->num_seg.tso_cmn_num_seg > 1) { 133 tso_num_desc->num_seg.tso_cmn_num_seg--; 134 qdf_nbuf_unmap_tso_segment(soc->osdev, 135 tx_desc->tso_desc, false); 136 } else { 137 tso_num_desc->num_seg.tso_cmn_num_seg--; 138 qdf_assert(tso_num_desc->num_seg.tso_cmn_num_seg == 0); 139 qdf_nbuf_unmap_tso_segment(soc->osdev, 140 tx_desc->tso_desc, true); 141 dp_tso_num_seg_free(soc, tx_desc->pool_id, 142 tx_desc->tso_num_desc); 143 tx_desc->tso_num_desc = NULL; 144 } 145 dp_tx_tso_desc_free(soc, 146 tx_desc->pool_id, tx_desc->tso_desc); 147 tx_desc->tso_desc = NULL; 148 } 149 } 150 #else 151 static void dp_tx_tso_desc_release(struct dp_soc *soc, 152 struct dp_tx_desc_s *tx_desc) 153 { 154 return; 155 } 156 #endif 157 /** 158 * dp_tx_desc_release() - Release Tx Descriptor 159 * @tx_desc : Tx Descriptor 160 * @desc_pool_id: Descriptor Pool ID 161 * 162 * Deallocate all resources attached to Tx descriptor and free the Tx 163 * descriptor. 164 * 165 * Return: 166 */ 167 static void 168 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id) 169 { 170 struct dp_pdev *pdev = tx_desc->pdev; 171 struct dp_soc *soc; 172 uint8_t comp_status = 0; 173 174 qdf_assert(pdev); 175 176 soc = pdev->soc; 177 178 if (tx_desc->frm_type == dp_tx_frm_tso) 179 dp_tx_tso_desc_release(soc, tx_desc); 180 181 if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) 182 dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id); 183 184 if (tx_desc->flags & DP_TX_DESC_FLAG_ME) 185 dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer); 186 187 qdf_atomic_dec(&pdev->num_tx_outstanding); 188 189 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) 190 qdf_atomic_dec(&pdev->num_tx_exception); 191 192 if (HAL_TX_COMP_RELEASE_SOURCE_TQM == 193 hal_tx_comp_get_buffer_source(&tx_desc->comp)) 194 comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp); 195 else 196 comp_status = HAL_TX_COMP_RELEASE_REASON_FW; 197 198 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 199 "Tx Completion Release desc %d status %d outstanding %d", 200 tx_desc->id, comp_status, 201 qdf_atomic_read(&pdev->num_tx_outstanding)); 202 203 dp_tx_desc_free(soc, tx_desc, desc_pool_id); 204 return; 205 } 206 207 /** 208 * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames 209 * @vdev: DP vdev Handle 210 * @nbuf: skb 211 * 212 * Prepares and fills HTT metadata in the frame pre-header for special frames 213 * that should be transmitted using varying transmit parameters. 214 * There are 2 VDEV modes that currently needs this special metadata - 215 * 1) Mesh Mode 216 * 2) DSRC Mode 217 * 218 * Return: HTT metadata size 219 * 220 */ 221 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 222 uint32_t *meta_data) 223 { 224 struct htt_tx_msdu_desc_ext2_t *desc_ext = 225 (struct htt_tx_msdu_desc_ext2_t *) meta_data; 226 227 uint8_t htt_desc_size; 228 229 /* Size rounded of multiple of 8 bytes */ 230 uint8_t htt_desc_size_aligned; 231 232 uint8_t *hdr = NULL; 233 234 /* 235 * Metadata - HTT MSDU Extension header 236 */ 237 htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t); 238 htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7; 239 240 if (vdev->mesh_vdev) { 241 242 /* Fill and add HTT metaheader */ 243 hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned); 244 if (hdr == NULL) { 245 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 246 "Error in filling HTT metadata\n"); 247 248 return 0; 249 } 250 qdf_mem_copy(hdr, desc_ext, htt_desc_size); 251 252 } else if (vdev->opmode == wlan_op_mode_ocb) { 253 /* Todo - Add support for DSRC */ 254 } 255 256 return htt_desc_size_aligned; 257 } 258 259 /** 260 * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO 261 * @tso_seg: TSO segment to process 262 * @ext_desc: Pointer to MSDU extension descriptor 263 * 264 * Return: void 265 */ 266 #if defined(FEATURE_TSO) 267 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg, 268 void *ext_desc) 269 { 270 uint8_t num_frag; 271 uint32_t tso_flags; 272 273 /* 274 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN), 275 * tcp_flag_mask 276 * 277 * Checksum enable flags are set in TCL descriptor and not in Extension 278 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor) 279 */ 280 tso_flags = *(uint32_t *) &tso_seg->tso_flags; 281 282 hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags); 283 284 hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len, 285 tso_seg->tso_flags.ip_len); 286 287 hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num); 288 hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id); 289 290 291 for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) { 292 uint32_t lo = 0; 293 uint32_t hi = 0; 294 295 qdf_dmaaddr_to_32s( 296 tso_seg->tso_frags[num_frag].paddr, &lo, &hi); 297 hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi, 298 tso_seg->tso_frags[num_frag].length); 299 } 300 301 return; 302 } 303 #else 304 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg, 305 void *ext_desc) 306 { 307 return; 308 } 309 #endif 310 311 #if defined(FEATURE_TSO) 312 /** 313 * dp_tx_free_tso_seg() - Loop through the tso segments 314 * allocated and free them 315 * 316 * @soc: soc handle 317 * @free_seg: list of tso segments 318 * @msdu_info: msdu descriptor 319 * 320 * Return - void 321 */ 322 static void dp_tx_free_tso_seg(struct dp_soc *soc, 323 struct qdf_tso_seg_elem_t *free_seg, 324 struct dp_tx_msdu_info_s *msdu_info) 325 { 326 struct qdf_tso_seg_elem_t *next_seg; 327 328 while (free_seg) { 329 next_seg = free_seg->next; 330 dp_tx_tso_desc_free(soc, 331 msdu_info->tx_queue.desc_pool_id, 332 free_seg); 333 free_seg = next_seg; 334 } 335 } 336 337 /** 338 * dp_tx_free_tso_num_seg() - Loop through the tso num segments 339 * allocated and free them 340 * 341 * @soc: soc handle 342 * @free_seg: list of tso segments 343 * @msdu_info: msdu descriptor 344 * Return - void 345 */ 346 static void dp_tx_free_tso_num_seg(struct dp_soc *soc, 347 struct qdf_tso_num_seg_elem_t *free_seg, 348 struct dp_tx_msdu_info_s *msdu_info) 349 { 350 struct qdf_tso_num_seg_elem_t *next_seg; 351 352 while (free_seg) { 353 next_seg = free_seg->next; 354 dp_tso_num_seg_free(soc, 355 msdu_info->tx_queue.desc_pool_id, 356 free_seg); 357 free_seg = next_seg; 358 } 359 } 360 361 /** 362 * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info 363 * @vdev: virtual device handle 364 * @msdu: network buffer 365 * @msdu_info: meta data associated with the msdu 366 * 367 * Return: QDF_STATUS_SUCCESS success 368 */ 369 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev, 370 qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info) 371 { 372 struct qdf_tso_seg_elem_t *tso_seg; 373 int num_seg = qdf_nbuf_get_tso_num_seg(msdu); 374 struct dp_soc *soc = vdev->pdev->soc; 375 struct qdf_tso_info_t *tso_info; 376 struct qdf_tso_num_seg_elem_t *tso_num_seg; 377 378 tso_info = &msdu_info->u.tso_info; 379 tso_info->curr_seg = NULL; 380 tso_info->tso_seg_list = NULL; 381 tso_info->num_segs = num_seg; 382 msdu_info->frm_type = dp_tx_frm_tso; 383 tso_info->tso_num_seg_list = NULL; 384 385 TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg); 386 387 while (num_seg) { 388 tso_seg = dp_tx_tso_desc_alloc( 389 soc, msdu_info->tx_queue.desc_pool_id); 390 if (tso_seg) { 391 tso_seg->next = tso_info->tso_seg_list; 392 tso_info->tso_seg_list = tso_seg; 393 num_seg--; 394 } else { 395 struct qdf_tso_seg_elem_t *free_seg = 396 tso_info->tso_seg_list; 397 398 dp_tx_free_tso_seg(soc, free_seg, msdu_info); 399 400 return QDF_STATUS_E_NOMEM; 401 } 402 } 403 404 TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg); 405 406 tso_num_seg = dp_tso_num_seg_alloc(soc, 407 msdu_info->tx_queue.desc_pool_id); 408 409 if (tso_num_seg) { 410 tso_num_seg->next = tso_info->tso_num_seg_list; 411 tso_info->tso_num_seg_list = tso_num_seg; 412 } else { 413 /* Bug: free tso_num_seg and tso_seg */ 414 /* Free the already allocated num of segments */ 415 struct qdf_tso_seg_elem_t *free_seg = 416 tso_info->tso_seg_list; 417 418 TSO_DEBUG(" %s: Failed alloc - Number of segs for a TSO packet", 419 __func__); 420 dp_tx_free_tso_seg(soc, free_seg, msdu_info); 421 422 return QDF_STATUS_E_NOMEM; 423 } 424 425 msdu_info->num_seg = 426 qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info); 427 428 TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__, 429 msdu_info->num_seg); 430 431 if (!(msdu_info->num_seg)) { 432 dp_tx_free_tso_seg(soc, tso_info->tso_seg_list, msdu_info); 433 dp_tx_free_tso_num_seg(soc, tso_info->tso_num_seg_list, 434 msdu_info); 435 return QDF_STATUS_E_INVAL; 436 } 437 438 tso_info->curr_seg = tso_info->tso_seg_list; 439 440 return QDF_STATUS_SUCCESS; 441 } 442 #else 443 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev, 444 qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info) 445 { 446 return QDF_STATUS_E_NOMEM; 447 } 448 #endif 449 450 /** 451 * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor 452 * @vdev: DP Vdev handle 453 * @msdu_info: MSDU info to be setup in MSDU extension descriptor 454 * @desc_pool_id: Descriptor Pool ID 455 * 456 * Return: 457 */ 458 static 459 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev, 460 struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id) 461 { 462 uint8_t i; 463 uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA]; 464 struct dp_tx_seg_info_s *seg_info; 465 struct dp_tx_ext_desc_elem_s *msdu_ext_desc; 466 struct dp_soc *soc = vdev->pdev->soc; 467 468 /* Allocate an extension descriptor */ 469 msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id); 470 qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA); 471 472 if (!msdu_ext_desc) { 473 DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1); 474 return NULL; 475 } 476 477 if (msdu_info->exception_fw && 478 qdf_unlikely(vdev->mesh_vdev)) { 479 qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES], 480 &msdu_info->meta_data[0], 481 sizeof(struct htt_tx_msdu_desc_ext2_t)); 482 qdf_atomic_inc(&vdev->pdev->num_tx_exception); 483 } 484 485 switch (msdu_info->frm_type) { 486 case dp_tx_frm_sg: 487 case dp_tx_frm_me: 488 case dp_tx_frm_raw: 489 seg_info = msdu_info->u.sg_info.curr_seg; 490 /* Update the buffer pointers in MSDU Extension Descriptor */ 491 for (i = 0; i < seg_info->frag_cnt; i++) { 492 hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i, 493 seg_info->frags[i].paddr_lo, 494 seg_info->frags[i].paddr_hi, 495 seg_info->frags[i].len); 496 } 497 498 break; 499 500 case dp_tx_frm_tso: 501 dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg, 502 &cached_ext_desc[0]); 503 break; 504 505 506 default: 507 break; 508 } 509 510 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 511 cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA); 512 513 hal_tx_ext_desc_sync(&cached_ext_desc[0], 514 msdu_ext_desc->vaddr); 515 516 return msdu_ext_desc; 517 } 518 519 /** 520 * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor 521 * @vdev: DP vdev handle 522 * @nbuf: skb 523 * @desc_pool_id: Descriptor pool ID 524 * @meta_data: Metadata to the fw 525 * @tx_exc_metadata: Handle that holds exception path metadata 526 * Allocate and prepare Tx descriptor with msdu information. 527 * 528 * Return: Pointer to Tx Descriptor on success, 529 * NULL on failure 530 */ 531 static 532 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev, 533 qdf_nbuf_t nbuf, uint8_t desc_pool_id, 534 struct dp_tx_msdu_info_s *msdu_info, 535 struct cdp_tx_exception_metadata *tx_exc_metadata) 536 { 537 uint8_t align_pad; 538 uint8_t is_exception = 0; 539 uint8_t htt_hdr_size; 540 struct ether_header *eh; 541 struct dp_tx_desc_s *tx_desc; 542 struct dp_pdev *pdev = vdev->pdev; 543 struct dp_soc *soc = pdev->soc; 544 545 /* Allocate software Tx descriptor */ 546 tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); 547 if (qdf_unlikely(!tx_desc)) { 548 DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1); 549 return NULL; 550 } 551 552 /* Flow control/Congestion Control counters */ 553 qdf_atomic_inc(&pdev->num_tx_outstanding); 554 555 /* Initialize the SW tx descriptor */ 556 tx_desc->nbuf = nbuf; 557 tx_desc->frm_type = dp_tx_frm_std; 558 tx_desc->tx_encap_type = (tx_exc_metadata ? 559 tx_exc_metadata->tx_encap_type : vdev->tx_encap_type); 560 tx_desc->vdev = vdev; 561 tx_desc->pdev = pdev; 562 tx_desc->msdu_ext_desc = NULL; 563 tx_desc->pkt_offset = 0; 564 565 /* 566 * For special modes (vdev_type == ocb or mesh), data frames should be 567 * transmitted using varying transmit parameters (tx spec) which include 568 * transmit rate, power, priority, channel, channel bandwidth , nss etc. 569 * These are filled in HTT MSDU descriptor and sent in frame pre-header. 570 * These frames are sent as exception packets to firmware. 571 * 572 * HW requirement is that metadata should always point to a 573 * 8-byte aligned address. So we add alignment pad to start of buffer. 574 * HTT Metadata should be ensured to be multiple of 8-bytes, 575 * to get 8-byte aligned start address along with align_pad added 576 * 577 * |-----------------------------| 578 * | | 579 * |-----------------------------| <-----Buffer Pointer Address given 580 * | | ^ in HW descriptor (aligned) 581 * | HTT Metadata | | 582 * | | | 583 * | | | Packet Offset given in descriptor 584 * | | | 585 * |-----------------------------| | 586 * | Alignment Pad | v 587 * |-----------------------------| <----- Actual buffer start address 588 * | SKB Data | (Unaligned) 589 * | | 590 * | | 591 * | | 592 * | | 593 * | | 594 * |-----------------------------| 595 */ 596 if (qdf_unlikely((msdu_info->exception_fw)) || 597 (vdev->opmode == wlan_op_mode_ocb)) { 598 align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7; 599 if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) { 600 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 601 "qdf_nbuf_push_head failed\n"); 602 goto failure; 603 } 604 605 htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf, 606 msdu_info->meta_data); 607 if (htt_hdr_size == 0) 608 goto failure; 609 tx_desc->pkt_offset = align_pad + htt_hdr_size; 610 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 611 is_exception = 1; 612 } 613 614 if (qdf_unlikely(QDF_STATUS_SUCCESS != 615 qdf_nbuf_map(soc->osdev, nbuf, 616 QDF_DMA_TO_DEVICE))) { 617 /* Handle failure */ 618 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 619 "qdf_nbuf_map failed\n"); 620 DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1); 621 goto failure; 622 } 623 624 if (qdf_unlikely(vdev->nawds_enabled)) { 625 eh = (struct ether_header *) qdf_nbuf_data(nbuf); 626 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) { 627 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 628 is_exception = 1; 629 } 630 } 631 632 #if !TQM_BYPASS_WAR 633 if (is_exception || tx_exc_metadata) 634 #endif 635 { 636 /* Temporary WAR due to TQM VP issues */ 637 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 638 qdf_atomic_inc(&pdev->num_tx_exception); 639 } 640 641 return tx_desc; 642 643 failure: 644 dp_tx_desc_release(tx_desc, desc_pool_id); 645 return NULL; 646 } 647 648 /** 649 * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame 650 * @vdev: DP vdev handle 651 * @nbuf: skb 652 * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor 653 * @desc_pool_id : Descriptor Pool ID 654 * 655 * Allocate and prepare Tx descriptor with msdu and fragment descritor 656 * information. For frames wth fragments, allocate and prepare 657 * an MSDU extension descriptor 658 * 659 * Return: Pointer to Tx Descriptor on success, 660 * NULL on failure 661 */ 662 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev, 663 qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info, 664 uint8_t desc_pool_id) 665 { 666 struct dp_tx_desc_s *tx_desc; 667 struct dp_tx_ext_desc_elem_s *msdu_ext_desc; 668 struct dp_pdev *pdev = vdev->pdev; 669 struct dp_soc *soc = pdev->soc; 670 671 /* Allocate software Tx descriptor */ 672 tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); 673 if (!tx_desc) { 674 DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1); 675 return NULL; 676 } 677 678 /* Flow control/Congestion Control counters */ 679 qdf_atomic_inc(&pdev->num_tx_outstanding); 680 681 /* Initialize the SW tx descriptor */ 682 tx_desc->nbuf = nbuf; 683 tx_desc->frm_type = msdu_info->frm_type; 684 tx_desc->tx_encap_type = vdev->tx_encap_type; 685 tx_desc->vdev = vdev; 686 tx_desc->pdev = pdev; 687 tx_desc->pkt_offset = 0; 688 tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg; 689 tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list; 690 691 /* Handle scattered frames - TSO/SG/ME */ 692 /* Allocate and prepare an extension descriptor for scattered frames */ 693 msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id); 694 if (!msdu_ext_desc) { 695 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 696 "%s Tx Extension Descriptor Alloc Fail\n", 697 __func__); 698 goto failure; 699 } 700 701 #if TQM_BYPASS_WAR 702 /* Temporary WAR due to TQM VP issues */ 703 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 704 qdf_atomic_inc(&pdev->num_tx_exception); 705 #endif 706 if (qdf_unlikely(msdu_info->exception_fw)) 707 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 708 709 tx_desc->msdu_ext_desc = msdu_ext_desc; 710 tx_desc->flags |= DP_TX_DESC_FLAG_FRAG; 711 712 return tx_desc; 713 failure: 714 dp_tx_desc_release(tx_desc, desc_pool_id); 715 return NULL; 716 } 717 718 /** 719 * dp_tx_prepare_raw() - Prepare RAW packet TX 720 * @vdev: DP vdev handle 721 * @nbuf: buffer pointer 722 * @seg_info: Pointer to Segment info Descriptor to be prepared 723 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension 724 * descriptor 725 * 726 * Return: 727 */ 728 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 729 struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info) 730 { 731 qdf_nbuf_t curr_nbuf = NULL; 732 uint16_t total_len = 0; 733 qdf_dma_addr_t paddr; 734 int32_t i; 735 int32_t mapped_buf_num = 0; 736 737 struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info; 738 qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data; 739 740 DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf)); 741 742 /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */ 743 if (qos_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) 744 qos_wh->i_fc[1] |= IEEE80211_FC1_WEP; 745 746 for (curr_nbuf = nbuf, i = 0; curr_nbuf; 747 curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) { 748 749 if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf, 750 QDF_DMA_TO_DEVICE)) { 751 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 752 "%s dma map error \n", __func__); 753 DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1); 754 mapped_buf_num = i; 755 goto error; 756 } 757 758 paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0); 759 seg_info->frags[i].paddr_lo = paddr; 760 seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32); 761 seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf); 762 seg_info->frags[i].vaddr = (void *) curr_nbuf; 763 total_len += qdf_nbuf_len(curr_nbuf); 764 } 765 766 seg_info->frag_cnt = i; 767 seg_info->total_len = total_len; 768 seg_info->next = NULL; 769 770 sg_info->curr_seg = seg_info; 771 772 msdu_info->frm_type = dp_tx_frm_raw; 773 msdu_info->num_seg = 1; 774 775 return nbuf; 776 777 error: 778 i = 0; 779 while (nbuf) { 780 curr_nbuf = nbuf; 781 if (i < mapped_buf_num) { 782 qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE); 783 i++; 784 } 785 nbuf = qdf_nbuf_next(nbuf); 786 qdf_nbuf_free(curr_nbuf); 787 } 788 return NULL; 789 790 } 791 792 /** 793 * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit 794 * @soc: DP Soc Handle 795 * @vdev: DP vdev handle 796 * @tx_desc: Tx Descriptor Handle 797 * @tid: TID from HLOS for overriding default DSCP-TID mapping 798 * @fw_metadata: Metadata to send to Target Firmware along with frame 799 * @ring_id: Ring ID of H/W ring to which we enqueue the packet 800 * @tx_exc_metadata: Handle that holds exception path meta data 801 * 802 * Gets the next free TCL HW DMA descriptor and sets up required parameters 803 * from software Tx descriptor 804 * 805 * Return: 806 */ 807 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev, 808 struct dp_tx_desc_s *tx_desc, uint8_t tid, 809 uint16_t fw_metadata, uint8_t ring_id, 810 struct cdp_tx_exception_metadata 811 *tx_exc_metadata) 812 { 813 uint8_t type; 814 uint16_t length; 815 void *hal_tx_desc, *hal_tx_desc_cached; 816 qdf_dma_addr_t dma_addr; 817 uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES]; 818 819 enum cdp_sec_type sec_type = (tx_exc_metadata ? 820 tx_exc_metadata->sec_type : vdev->sec_type); 821 822 /* Return Buffer Manager ID */ 823 uint8_t bm_id = ring_id; 824 void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng; 825 826 hal_tx_desc_cached = (void *) cached_desc; 827 qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES); 828 829 if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) { 830 length = HAL_TX_EXT_DESC_WITH_META_DATA; 831 type = HAL_TX_BUF_TYPE_EXT_DESC; 832 dma_addr = tx_desc->msdu_ext_desc->paddr; 833 } else { 834 length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset; 835 type = HAL_TX_BUF_TYPE_BUFFER; 836 dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf); 837 } 838 839 hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata); 840 hal_tx_desc_set_buf_addr(hal_tx_desc_cached, 841 dma_addr , bm_id, tx_desc->id, type); 842 hal_tx_desc_set_buf_length(hal_tx_desc_cached, length); 843 hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset); 844 hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type); 845 hal_tx_desc_set_lmac_id(hal_tx_desc_cached, 846 HAL_TX_DESC_DEFAULT_LMAC_ID); 847 hal_tx_desc_set_dscp_tid_table_id(hal_tx_desc_cached, 848 vdev->dscp_tid_map_id); 849 hal_tx_desc_set_encrypt_type(hal_tx_desc_cached, 850 sec_type_map[sec_type]); 851 852 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 853 "%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u", 854 __func__, length, type, (uint64_t)dma_addr, 855 tx_desc->pkt_offset, tx_desc->id); 856 857 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) 858 hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1); 859 860 hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached, 861 vdev->hal_desc_addr_search_flags); 862 863 /* verify checksum offload configuration*/ 864 if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) && 865 ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP) 866 || qdf_nbuf_is_tso(tx_desc->nbuf))) { 867 hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1); 868 hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1); 869 } 870 871 if (tid != HTT_TX_EXT_TID_INVALID) 872 hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid); 873 874 if (tx_desc->flags & DP_TX_DESC_FLAG_MESH) 875 hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1); 876 877 878 /* Sync cached descriptor with HW */ 879 hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng); 880 881 if (!hal_tx_desc) { 882 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 883 "%s TCL ring full ring_id:%d\n", __func__, ring_id); 884 DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1); 885 DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1); 886 return QDF_STATUS_E_RESOURCES; 887 } 888 889 tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX; 890 891 hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc); 892 DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length); 893 894 /* 895 * If one packet is enqueued in HW, PM usage count needs to be 896 * incremented by one to prevent future runtime suspend. This 897 * should be tied with the success of enqueuing. It will be 898 * decremented after the packet has been sent. 899 */ 900 hif_pm_runtime_get_noresume(soc->hif_handle); 901 902 return QDF_STATUS_SUCCESS; 903 } 904 905 906 /** 907 * dp_cce_classify() - Classify the frame based on CCE rules 908 * @vdev: DP vdev handle 909 * @nbuf: skb 910 * 911 * Classify frames based on CCE rules 912 * Return: bool( true if classified, 913 * else false) 914 */ 915 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 916 { 917 struct ether_header *eh = NULL; 918 uint16_t ether_type; 919 qdf_llc_t *llcHdr; 920 qdf_nbuf_t nbuf_clone = NULL; 921 qdf_dot3_qosframe_t *qos_wh = NULL; 922 923 /* for mesh packets don't do any classification */ 924 if (qdf_unlikely(vdev->mesh_vdev)) 925 return false; 926 927 if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 928 eh = (struct ether_header *) qdf_nbuf_data(nbuf); 929 ether_type = eh->ether_type; 930 llcHdr = (qdf_llc_t *)(nbuf->data + 931 sizeof(struct ether_header)); 932 } else { 933 qos_wh = (qdf_dot3_qosframe_t *) nbuf->data; 934 935 if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) { 936 if (qdf_unlikely( 937 qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS && 938 qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) { 939 940 ether_type = *(uint16_t *)(nbuf->data 941 + QDF_IEEE80211_4ADDR_HDR_LEN 942 + sizeof(qdf_llc_t) 943 - sizeof(ether_type)); 944 llcHdr = (qdf_llc_t *)(nbuf->data + 945 QDF_IEEE80211_4ADDR_HDR_LEN); 946 } else { 947 ether_type = *(uint16_t *)(nbuf->data 948 + QDF_IEEE80211_3ADDR_HDR_LEN 949 + sizeof(qdf_llc_t) 950 - sizeof(ether_type)); 951 llcHdr = (qdf_llc_t *)(nbuf->data + 952 QDF_IEEE80211_3ADDR_HDR_LEN); 953 } 954 955 if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr) 956 && (ether_type == 957 qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) { 958 959 DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1); 960 return true; 961 } 962 } 963 964 return false; 965 } 966 967 if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) { 968 ether_type = *(uint16_t *)(nbuf->data + 2*ETHER_ADDR_LEN + 969 sizeof(*llcHdr)); 970 nbuf_clone = qdf_nbuf_clone(nbuf); 971 if (qdf_unlikely(nbuf_clone)) { 972 qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr)); 973 974 if (ether_type == htons(ETHERTYPE_8021Q)) { 975 qdf_nbuf_pull_head(nbuf_clone, 976 sizeof(qdf_net_vlanhdr_t)); 977 } 978 } 979 } else { 980 if (ether_type == htons(ETHERTYPE_8021Q)) { 981 nbuf_clone = qdf_nbuf_clone(nbuf); 982 if (qdf_unlikely(nbuf_clone)) { 983 qdf_nbuf_pull_head(nbuf_clone, 984 sizeof(qdf_net_vlanhdr_t)); 985 } 986 } 987 } 988 989 if (qdf_unlikely(nbuf_clone)) 990 nbuf = nbuf_clone; 991 992 993 if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) 994 || qdf_nbuf_is_ipv4_arp_pkt(nbuf) 995 || qdf_nbuf_is_ipv4_wapi_pkt(nbuf) 996 || qdf_nbuf_is_ipv4_tdls_pkt(nbuf) 997 || (qdf_nbuf_is_ipv4_pkt(nbuf) 998 && qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) 999 || (qdf_nbuf_is_ipv6_pkt(nbuf) && 1000 qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) { 1001 if (qdf_unlikely(nbuf_clone != NULL)) 1002 qdf_nbuf_free(nbuf_clone); 1003 return true; 1004 } 1005 1006 if (qdf_unlikely(nbuf_clone != NULL)) 1007 qdf_nbuf_free(nbuf_clone); 1008 1009 return false; 1010 } 1011 1012 /** 1013 * dp_tx_classify_tid() - Obtain TID to be used for this frame 1014 * @vdev: DP vdev handle 1015 * @nbuf: skb 1016 * 1017 * Extract the DSCP or PCP information from frame and map into TID value. 1018 * Software based TID classification is required when more than 2 DSCP-TID 1019 * mapping tables are needed. 1020 * Hardware supports 2 DSCP-TID mapping tables 1021 * 1022 * Return: void 1023 */ 1024 static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1025 struct dp_tx_msdu_info_s *msdu_info) 1026 { 1027 uint8_t tos = 0, dscp_tid_override = 0; 1028 uint8_t *hdr_ptr, *L3datap; 1029 uint8_t is_mcast = 0; 1030 struct ether_header *eh = NULL; 1031 qdf_ethervlan_header_t *evh = NULL; 1032 uint16_t ether_type; 1033 qdf_llc_t *llcHdr; 1034 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; 1035 1036 DP_TX_TID_OVERRIDE(msdu_info, nbuf); 1037 1038 if (vdev->dscp_tid_map_id <= 1) 1039 return; 1040 1041 /* for mesh packets don't do any classification */ 1042 if (qdf_unlikely(vdev->mesh_vdev)) 1043 return; 1044 1045 if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 1046 eh = (struct ether_header *) nbuf->data; 1047 hdr_ptr = eh->ether_dhost; 1048 L3datap = hdr_ptr + sizeof(struct ether_header); 1049 } else { 1050 qdf_dot3_qosframe_t *qos_wh = 1051 (qdf_dot3_qosframe_t *) nbuf->data; 1052 msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ? 1053 qos_wh->i_qos[0] & DP_QOS_TID : 0; 1054 return; 1055 } 1056 1057 is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr); 1058 ether_type = eh->ether_type; 1059 1060 llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(struct ether_header)); 1061 /* 1062 * Check if packet is dot3 or eth2 type. 1063 */ 1064 if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) { 1065 ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN + 1066 sizeof(*llcHdr)); 1067 1068 if (ether_type == htons(ETHERTYPE_8021Q)) { 1069 L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) + 1070 sizeof(*llcHdr); 1071 ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN 1072 + sizeof(*llcHdr) + 1073 sizeof(qdf_net_vlanhdr_t)); 1074 } else { 1075 L3datap = hdr_ptr + sizeof(struct ether_header) + 1076 sizeof(*llcHdr); 1077 } 1078 } else { 1079 if (ether_type == htons(ETHERTYPE_8021Q)) { 1080 evh = (qdf_ethervlan_header_t *) eh; 1081 ether_type = evh->ether_type; 1082 L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t); 1083 } 1084 } 1085 1086 /* 1087 * Find priority from IP TOS DSCP field 1088 */ 1089 if (qdf_nbuf_is_ipv4_pkt(nbuf)) { 1090 qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap; 1091 if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) { 1092 /* Only for unicast frames */ 1093 if (!is_mcast) { 1094 /* send it on VO queue */ 1095 msdu_info->tid = DP_VO_TID; 1096 } 1097 } else { 1098 /* 1099 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7 1100 * from TOS byte. 1101 */ 1102 tos = ip->ip_tos; 1103 dscp_tid_override = 1; 1104 1105 } 1106 } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) { 1107 /* TODO 1108 * use flowlabel 1109 *igmpmld cases to be handled in phase 2 1110 */ 1111 unsigned long ver_pri_flowlabel; 1112 unsigned long pri; 1113 ver_pri_flowlabel = *(unsigned long *) L3datap; 1114 pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >> 1115 DP_IPV6_PRIORITY_SHIFT; 1116 tos = pri; 1117 dscp_tid_override = 1; 1118 } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) 1119 msdu_info->tid = DP_VO_TID; 1120 else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) { 1121 /* Only for unicast frames */ 1122 if (!is_mcast) { 1123 /* send ucast arp on VO queue */ 1124 msdu_info->tid = DP_VO_TID; 1125 } 1126 } 1127 1128 /* 1129 * Assign all MCAST packets to BE 1130 */ 1131 if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 1132 if (is_mcast) { 1133 tos = 0; 1134 dscp_tid_override = 1; 1135 } 1136 } 1137 1138 if (dscp_tid_override == 1) { 1139 tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK; 1140 msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos]; 1141 } 1142 return; 1143 } 1144 1145 #ifdef CONVERGED_TDLS_ENABLE 1146 /** 1147 * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame 1148 * @tx_desc: TX descriptor 1149 * 1150 * Return: None 1151 */ 1152 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc) 1153 { 1154 if (tx_desc->vdev) { 1155 if (tx_desc->vdev->is_tdls_frame) 1156 tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME; 1157 tx_desc->vdev->is_tdls_frame = false; 1158 } 1159 } 1160 1161 /** 1162 * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer 1163 * @tx_desc: TX descriptor 1164 * @vdev: datapath vdev handle 1165 * 1166 * Return: None 1167 */ 1168 static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc, 1169 struct dp_vdev *vdev) 1170 { 1171 struct hal_tx_completion_status ts = {0}; 1172 qdf_nbuf_t nbuf = tx_desc->nbuf; 1173 1174 hal_tx_comp_get_status(&tx_desc->comp, &ts); 1175 if (vdev->tx_non_std_data_callback.func) { 1176 qdf_nbuf_set_next(tx_desc->nbuf, NULL); 1177 vdev->tx_non_std_data_callback.func( 1178 vdev->tx_non_std_data_callback.ctxt, 1179 nbuf, ts.status); 1180 return; 1181 } 1182 } 1183 #endif 1184 1185 /** 1186 * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL 1187 * @vdev: DP vdev handle 1188 * @nbuf: skb 1189 * @tid: TID from HLOS for overriding default DSCP-TID mapping 1190 * @meta_data: Metadata to the fw 1191 * @tx_q: Tx queue to be used for this Tx frame 1192 * @peer_id: peer_id of the peer in case of NAWDS frames 1193 * @tx_exc_metadata: Handle that holds exception path metadata 1194 * 1195 * Return: NULL on success, 1196 * nbuf when it fails to send 1197 */ 1198 static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1199 struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id, 1200 struct cdp_tx_exception_metadata *tx_exc_metadata) 1201 { 1202 struct dp_pdev *pdev = vdev->pdev; 1203 struct dp_soc *soc = pdev->soc; 1204 struct dp_tx_desc_s *tx_desc; 1205 QDF_STATUS status; 1206 struct dp_tx_queue *tx_q = &(msdu_info->tx_queue); 1207 void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng; 1208 uint16_t htt_tcl_metadata = 0; 1209 uint8_t tid = msdu_info->tid; 1210 1211 /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */ 1212 tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id, 1213 msdu_info, tx_exc_metadata); 1214 if (!tx_desc) { 1215 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1216 "%s Tx_desc prepare Fail vdev %pK queue %d\n", 1217 __func__, vdev, tx_q->desc_pool_id); 1218 return nbuf; 1219 } 1220 1221 if (qdf_unlikely(soc->cce_disable)) { 1222 if (dp_cce_classify(vdev, nbuf) == true) { 1223 DP_STATS_INC(vdev, tx_i.cce_classified, 1); 1224 tid = DP_VO_TID; 1225 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 1226 } 1227 } 1228 1229 dp_tx_update_tdls_flags(tx_desc); 1230 1231 if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { 1232 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1233 "%s %d : HAL RING Access Failed -- %pK\n", 1234 __func__, __LINE__, hal_srng); 1235 DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1); 1236 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 1237 goto fail_return; 1238 } 1239 1240 if (qdf_unlikely(peer_id == DP_INVALID_PEER)) { 1241 htt_tcl_metadata = vdev->htt_tcl_metadata; 1242 HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1); 1243 } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) { 1244 HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata, 1245 HTT_TCL_METADATA_TYPE_PEER_BASED); 1246 HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata, 1247 peer_id); 1248 } else 1249 htt_tcl_metadata = vdev->htt_tcl_metadata; 1250 1251 1252 if (msdu_info->exception_fw) { 1253 HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1); 1254 } 1255 1256 /* Enqueue the Tx MSDU descriptor to HW for transmit */ 1257 status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid, 1258 htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata); 1259 1260 if (status != QDF_STATUS_SUCCESS) { 1261 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1262 "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n", 1263 __func__, tx_desc, tx_q->ring_id); 1264 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 1265 goto fail_return; 1266 } 1267 1268 nbuf = NULL; 1269 1270 fail_return: 1271 if (hif_pm_runtime_get(soc->hif_handle) == 0) { 1272 hal_srng_access_end(soc->hal_soc, hal_srng); 1273 hif_pm_runtime_put(soc->hif_handle); 1274 } else { 1275 hal_srng_access_end_reap(soc->hal_soc, hal_srng); 1276 } 1277 1278 return nbuf; 1279 } 1280 1281 /** 1282 * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs 1283 * @vdev: DP vdev handle 1284 * @nbuf: skb 1285 * @msdu_info: MSDU info to be setup in MSDU extension descriptor 1286 * 1287 * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL 1288 * 1289 * Return: NULL on success, 1290 * nbuf when it fails to send 1291 */ 1292 #if QDF_LOCK_STATS 1293 static noinline 1294 #else 1295 static 1296 #endif 1297 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1298 struct dp_tx_msdu_info_s *msdu_info) 1299 { 1300 uint8_t i; 1301 struct dp_pdev *pdev = vdev->pdev; 1302 struct dp_soc *soc = pdev->soc; 1303 struct dp_tx_desc_s *tx_desc; 1304 bool is_cce_classified = false; 1305 QDF_STATUS status; 1306 uint16_t htt_tcl_metadata = 0; 1307 1308 struct dp_tx_queue *tx_q = &msdu_info->tx_queue; 1309 void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng; 1310 1311 if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { 1312 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1313 "%s %d : HAL RING Access Failed -- %pK\n", 1314 __func__, __LINE__, hal_srng); 1315 DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1); 1316 return nbuf; 1317 } 1318 1319 if (qdf_unlikely(soc->cce_disable)) { 1320 is_cce_classified = dp_cce_classify(vdev, nbuf); 1321 if (is_cce_classified) { 1322 DP_STATS_INC(vdev, tx_i.cce_classified, 1); 1323 msdu_info->tid = DP_VO_TID; 1324 } 1325 } 1326 1327 if (msdu_info->frm_type == dp_tx_frm_me) 1328 nbuf = msdu_info->u.sg_info.curr_seg->nbuf; 1329 1330 i = 0; 1331 /* Print statement to track i and num_seg */ 1332 /* 1333 * For each segment (maps to 1 MSDU) , prepare software and hardware 1334 * descriptors using information in msdu_info 1335 */ 1336 while (i < msdu_info->num_seg) { 1337 /* 1338 * Setup Tx descriptor for an MSDU, and MSDU extension 1339 * descriptor 1340 */ 1341 tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info, 1342 tx_q->desc_pool_id); 1343 1344 if (!tx_desc) { 1345 if (msdu_info->frm_type == dp_tx_frm_me) { 1346 dp_tx_me_free_buf(pdev, 1347 (void *)(msdu_info->u.sg_info 1348 .curr_seg->frags[0].vaddr)); 1349 } 1350 goto done; 1351 } 1352 1353 if (msdu_info->frm_type == dp_tx_frm_me) { 1354 tx_desc->me_buffer = 1355 msdu_info->u.sg_info.curr_seg->frags[0].vaddr; 1356 tx_desc->flags |= DP_TX_DESC_FLAG_ME; 1357 } 1358 1359 if (is_cce_classified) 1360 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 1361 1362 htt_tcl_metadata = vdev->htt_tcl_metadata; 1363 if (msdu_info->exception_fw) { 1364 HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1); 1365 } 1366 1367 /* 1368 * Enqueue the Tx MSDU descriptor to HW for transmit 1369 */ 1370 status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid, 1371 htt_tcl_metadata, tx_q->ring_id, NULL); 1372 1373 if (status != QDF_STATUS_SUCCESS) { 1374 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1375 "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n", 1376 __func__, tx_desc, tx_q->ring_id); 1377 1378 if (tx_desc->flags & DP_TX_DESC_FLAG_ME) 1379 dp_tx_me_free_buf(pdev, tx_desc->me_buffer); 1380 1381 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 1382 goto done; 1383 } 1384 1385 /* 1386 * TODO 1387 * if tso_info structure can be modified to have curr_seg 1388 * as first element, following 2 blocks of code (for TSO and SG) 1389 * can be combined into 1 1390 */ 1391 1392 /* 1393 * For frames with multiple segments (TSO, ME), jump to next 1394 * segment. 1395 */ 1396 if (msdu_info->frm_type == dp_tx_frm_tso) { 1397 if (msdu_info->u.tso_info.curr_seg->next) { 1398 msdu_info->u.tso_info.curr_seg = 1399 msdu_info->u.tso_info.curr_seg->next; 1400 1401 /* 1402 * If this is a jumbo nbuf, then increment the number of 1403 * nbuf users for each additional segment of the msdu. 1404 * This will ensure that the skb is freed only after 1405 * receiving tx completion for all segments of an nbuf 1406 */ 1407 qdf_nbuf_inc_users(nbuf); 1408 1409 /* Check with MCL if this is needed */ 1410 /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */ 1411 } 1412 } 1413 1414 /* 1415 * For Multicast-Unicast converted packets, 1416 * each converted frame (for a client) is represented as 1417 * 1 segment 1418 */ 1419 if ((msdu_info->frm_type == dp_tx_frm_sg) || 1420 (msdu_info->frm_type == dp_tx_frm_me)) { 1421 if (msdu_info->u.sg_info.curr_seg->next) { 1422 msdu_info->u.sg_info.curr_seg = 1423 msdu_info->u.sg_info.curr_seg->next; 1424 nbuf = msdu_info->u.sg_info.curr_seg->nbuf; 1425 } 1426 } 1427 i++; 1428 } 1429 1430 nbuf = NULL; 1431 1432 done: 1433 if (hif_pm_runtime_get(soc->hif_handle) == 0) { 1434 hal_srng_access_end(soc->hal_soc, hal_srng); 1435 hif_pm_runtime_put(soc->hif_handle); 1436 } else { 1437 hal_srng_access_end_reap(soc->hal_soc, hal_srng); 1438 } 1439 1440 return nbuf; 1441 } 1442 1443 /** 1444 * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info 1445 * for SG frames 1446 * @vdev: DP vdev handle 1447 * @nbuf: skb 1448 * @seg_info: Pointer to Segment info Descriptor to be prepared 1449 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. 1450 * 1451 * Return: NULL on success, 1452 * nbuf when it fails to send 1453 */ 1454 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1455 struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info) 1456 { 1457 uint32_t cur_frag, nr_frags; 1458 qdf_dma_addr_t paddr; 1459 struct dp_tx_sg_info_s *sg_info; 1460 1461 sg_info = &msdu_info->u.sg_info; 1462 nr_frags = qdf_nbuf_get_nr_frags(nbuf); 1463 1464 if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf, 1465 QDF_DMA_TO_DEVICE)) { 1466 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1467 "dma map error\n"); 1468 DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1); 1469 1470 qdf_nbuf_free(nbuf); 1471 return NULL; 1472 } 1473 1474 paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); 1475 seg_info->frags[0].paddr_lo = paddr; 1476 seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32; 1477 seg_info->frags[0].len = qdf_nbuf_headlen(nbuf); 1478 seg_info->frags[0].vaddr = (void *) nbuf; 1479 1480 for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) { 1481 if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev, 1482 nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) { 1483 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1484 "frag dma map error\n"); 1485 DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1); 1486 qdf_nbuf_free(nbuf); 1487 return NULL; 1488 } 1489 1490 paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); 1491 seg_info->frags[cur_frag + 1].paddr_lo = paddr; 1492 seg_info->frags[cur_frag + 1].paddr_hi = 1493 ((uint64_t) paddr) >> 32; 1494 seg_info->frags[cur_frag + 1].len = 1495 qdf_nbuf_get_frag_size(nbuf, cur_frag); 1496 } 1497 1498 seg_info->frag_cnt = (cur_frag + 1); 1499 seg_info->total_len = qdf_nbuf_len(nbuf); 1500 seg_info->next = NULL; 1501 1502 sg_info->curr_seg = seg_info; 1503 1504 msdu_info->frm_type = dp_tx_frm_sg; 1505 msdu_info->num_seg = 1; 1506 1507 return nbuf; 1508 } 1509 1510 #ifdef MESH_MODE_SUPPORT 1511 1512 /** 1513 * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf 1514 and prepare msdu_info for mesh frames. 1515 * @vdev: DP vdev handle 1516 * @nbuf: skb 1517 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. 1518 * 1519 * Return: NULL on failure, 1520 * nbuf when extracted successfully 1521 */ 1522 static 1523 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1524 struct dp_tx_msdu_info_s *msdu_info) 1525 { 1526 struct meta_hdr_s *mhdr; 1527 struct htt_tx_msdu_desc_ext2_t *meta_data = 1528 (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0]; 1529 1530 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf); 1531 1532 if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) { 1533 msdu_info->exception_fw = 0; 1534 goto remove_meta_hdr; 1535 } 1536 1537 msdu_info->exception_fw = 1; 1538 1539 qdf_mem_set(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t), 0); 1540 1541 meta_data->host_tx_desc_pool = 1; 1542 meta_data->update_peer_cache = 1; 1543 meta_data->learning_frame = 1; 1544 1545 if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) { 1546 meta_data->power = mhdr->power; 1547 1548 meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs; 1549 meta_data->nss_mask = 1 << mhdr->rate_info[0].nss; 1550 meta_data->pream_type = mhdr->rate_info[0].preamble_type; 1551 meta_data->retry_limit = mhdr->rate_info[0].max_tries; 1552 1553 meta_data->dyn_bw = 1; 1554 1555 meta_data->valid_pwr = 1; 1556 meta_data->valid_mcs_mask = 1; 1557 meta_data->valid_nss_mask = 1; 1558 meta_data->valid_preamble_type = 1; 1559 meta_data->valid_retries = 1; 1560 meta_data->valid_bw_info = 1; 1561 } 1562 1563 if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) { 1564 meta_data->encrypt_type = 0; 1565 meta_data->valid_encrypt_type = 1; 1566 meta_data->learning_frame = 0; 1567 } 1568 1569 meta_data->valid_key_flags = 1; 1570 meta_data->key_flags = (mhdr->keyix & 0x3); 1571 1572 remove_meta_hdr: 1573 if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) { 1574 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1575 "qdf_nbuf_pull_head failed\n"); 1576 qdf_nbuf_free(nbuf); 1577 return NULL; 1578 } 1579 1580 if (mhdr->flags & METAHDR_FLAG_NOQOS) 1581 msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST; 1582 else 1583 msdu_info->tid = qdf_nbuf_get_priority(nbuf); 1584 1585 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, 1586 "%s , Meta hdr %0x %0x %0x %0x %0x %0x" 1587 " tid %d to_fw %d\n", 1588 __func__, msdu_info->meta_data[0], 1589 msdu_info->meta_data[1], 1590 msdu_info->meta_data[2], 1591 msdu_info->meta_data[3], 1592 msdu_info->meta_data[4], 1593 msdu_info->meta_data[5], 1594 msdu_info->tid, msdu_info->exception_fw); 1595 1596 return nbuf; 1597 } 1598 #else 1599 static 1600 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1601 struct dp_tx_msdu_info_s *msdu_info) 1602 { 1603 return nbuf; 1604 } 1605 1606 #endif 1607 1608 #ifdef DP_FEATURE_NAWDS_TX 1609 /** 1610 * dp_tx_prepare_nawds(): Tramit NAWDS frames 1611 * @vdev: dp_vdev handle 1612 * @nbuf: skb 1613 * @tid: TID from HLOS for overriding default DSCP-TID mapping 1614 * @tx_q: Tx queue to be used for this Tx frame 1615 * @meta_data: Meta date for mesh 1616 * @peer_id: peer_id of the peer in case of NAWDS frames 1617 * 1618 * return: NULL on success nbuf on failure 1619 */ 1620 static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1621 struct dp_tx_msdu_info_s *msdu_info) 1622 { 1623 struct dp_peer *peer = NULL; 1624 struct dp_soc *soc = vdev->pdev->soc; 1625 struct dp_ast_entry *ast_entry = NULL; 1626 struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf); 1627 uint16_t peer_id = HTT_INVALID_PEER; 1628 1629 struct dp_peer *sa_peer = NULL; 1630 qdf_nbuf_t nbuf_copy; 1631 1632 qdf_spin_lock_bh(&(soc->ast_lock)); 1633 ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost)); 1634 1635 if (ast_entry) 1636 sa_peer = ast_entry->peer; 1637 1638 qdf_spin_unlock_bh(&(soc->ast_lock)); 1639 1640 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 1641 if ((peer->peer_ids[0] != HTT_INVALID_PEER) && 1642 (peer->nawds_enabled)) { 1643 if (sa_peer == peer) { 1644 QDF_TRACE(QDF_MODULE_ID_DP, 1645 QDF_TRACE_LEVEL_DEBUG, 1646 " %s: broadcast multicast packet", 1647 __func__); 1648 DP_STATS_INC(peer, tx.nawds_mcast_drop, 1); 1649 continue; 1650 } 1651 1652 nbuf_copy = qdf_nbuf_copy(nbuf); 1653 if (!nbuf_copy) { 1654 QDF_TRACE(QDF_MODULE_ID_DP, 1655 QDF_TRACE_LEVEL_ERROR, 1656 "nbuf copy failed"); 1657 } 1658 1659 peer_id = peer->peer_ids[0]; 1660 nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy, 1661 msdu_info, peer_id, NULL); 1662 if (nbuf_copy != NULL) { 1663 qdf_nbuf_free(nbuf_copy); 1664 continue; 1665 } 1666 DP_STATS_INC_PKT(peer, tx.nawds_mcast, 1667 1, qdf_nbuf_len(nbuf)); 1668 } 1669 } 1670 if (peer_id == HTT_INVALID_PEER) 1671 return nbuf; 1672 1673 return NULL; 1674 } 1675 #endif 1676 1677 /** 1678 * dp_check_exc_metadata() - Checks if parameters are valid 1679 * @tx_exc - holds all exception path parameters 1680 * 1681 * Returns true when all the parameters are valid else false 1682 * 1683 */ 1684 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc) 1685 { 1686 if ((tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != HTT_INVALID_TID) || 1687 tx_exc->tx_encap_type > htt_cmn_pkt_num_types || 1688 tx_exc->sec_type > cdp_num_sec_types) { 1689 return false; 1690 } 1691 1692 return true; 1693 } 1694 1695 /** 1696 * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path 1697 * @vap_dev: DP vdev handle 1698 * @nbuf: skb 1699 * @tx_exc_metadata: Handle that holds exception path meta data 1700 * 1701 * Entry point for Core Tx layer (DP_TX) invoked from 1702 * hard_start_xmit in OSIF/HDD to transmit frames through fw 1703 * 1704 * Return: NULL on success, 1705 * nbuf when it fails to send 1706 */ 1707 qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf, 1708 struct cdp_tx_exception_metadata *tx_exc_metadata) 1709 { 1710 struct ether_header *eh = NULL; 1711 struct dp_vdev *vdev = (struct dp_vdev *) vap_dev; 1712 struct dp_tx_msdu_info_s msdu_info; 1713 1714 qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0); 1715 1716 msdu_info.tid = tx_exc_metadata->tid; 1717 1718 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 1719 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1720 "%s , skb %pM", 1721 __func__, nbuf->data); 1722 1723 DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf)); 1724 1725 if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) { 1726 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1727 "Invalid parameters in exception path"); 1728 goto fail; 1729 } 1730 1731 /* Basic sanity checks for unsupported packets */ 1732 1733 /* MESH mode */ 1734 if (qdf_unlikely(vdev->mesh_vdev)) { 1735 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1736 "Mesh mode is not supported in exception path"); 1737 goto fail; 1738 } 1739 1740 /* TSO or SG */ 1741 if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) || 1742 qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) { 1743 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1744 "TSO and SG are not supported in exception path"); 1745 1746 goto fail; 1747 } 1748 1749 /* RAW */ 1750 if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) { 1751 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1752 "Raw frame is not supported in exception path"); 1753 goto fail; 1754 } 1755 1756 1757 /* Mcast enhancement*/ 1758 if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) { 1759 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) { 1760 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1761 "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW\n"); 1762 } 1763 } 1764 1765 /* 1766 * Get HW Queue to use for this frame. 1767 * TCL supports upto 4 DMA rings, out of which 3 rings are 1768 * dedicated for data and 1 for command. 1769 * "queue_id" maps to one hardware ring. 1770 * With each ring, we also associate a unique Tx descriptor pool 1771 * to minimize lock contention for these resources. 1772 */ 1773 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 1774 1775 /* Reset the control block */ 1776 qdf_nbuf_reset_ctxt(nbuf); 1777 1778 /* Single linear frame */ 1779 /* 1780 * If nbuf is a simple linear frame, use send_single function to 1781 * prepare direct-buffer type TCL descriptor and enqueue to TCL 1782 * SRNG. There is no need to setup a MSDU extension descriptor. 1783 */ 1784 nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, 1785 tx_exc_metadata->peer_id, tx_exc_metadata); 1786 1787 return nbuf; 1788 1789 fail: 1790 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1791 "pkt send failed"); 1792 return nbuf; 1793 } 1794 1795 /** 1796 * dp_tx_send_mesh() - Transmit mesh frame on a given VAP 1797 * @vap_dev: DP vdev handle 1798 * @nbuf: skb 1799 * 1800 * Entry point for Core Tx layer (DP_TX) invoked from 1801 * hard_start_xmit in OSIF/HDD 1802 * 1803 * Return: NULL on success, 1804 * nbuf when it fails to send 1805 */ 1806 #ifdef MESH_MODE_SUPPORT 1807 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf) 1808 { 1809 struct meta_hdr_s *mhdr; 1810 qdf_nbuf_t nbuf_mesh = NULL; 1811 qdf_nbuf_t nbuf_clone = NULL; 1812 struct dp_vdev *vdev = (struct dp_vdev *) vap_dev; 1813 uint8_t no_enc_frame = 0; 1814 1815 nbuf_mesh = qdf_nbuf_unshare(nbuf); 1816 if (nbuf_mesh == NULL) { 1817 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1818 "qdf_nbuf_unshare failed\n"); 1819 return nbuf; 1820 } 1821 nbuf = nbuf_mesh; 1822 1823 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf); 1824 1825 if ((vdev->sec_type != cdp_sec_type_none) && 1826 (mhdr->flags & METAHDR_FLAG_NOENCRYPT)) 1827 no_enc_frame = 1; 1828 1829 if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) && 1830 !no_enc_frame) { 1831 nbuf_clone = qdf_nbuf_clone(nbuf); 1832 if (nbuf_clone == NULL) { 1833 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1834 "qdf_nbuf_clone failed\n"); 1835 return nbuf; 1836 } 1837 qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO); 1838 } 1839 1840 if (nbuf_clone) { 1841 if (!dp_tx_send(vap_dev, nbuf_clone)) { 1842 DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1); 1843 } else 1844 qdf_nbuf_free(nbuf_clone); 1845 } 1846 1847 if (no_enc_frame) 1848 qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO); 1849 else 1850 qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID); 1851 1852 nbuf = dp_tx_send(vap_dev, nbuf); 1853 if ((nbuf == NULL) && no_enc_frame) { 1854 DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1); 1855 } 1856 1857 return nbuf; 1858 } 1859 1860 #else 1861 1862 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf) 1863 { 1864 return dp_tx_send(vap_dev, nbuf); 1865 } 1866 1867 #endif 1868 1869 /** 1870 * dp_tx_send() - Transmit a frame on a given VAP 1871 * @vap_dev: DP vdev handle 1872 * @nbuf: skb 1873 * 1874 * Entry point for Core Tx layer (DP_TX) invoked from 1875 * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding 1876 * cases 1877 * 1878 * Return: NULL on success, 1879 * nbuf when it fails to send 1880 */ 1881 qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf) 1882 { 1883 struct ether_header *eh = NULL; 1884 struct dp_tx_msdu_info_s msdu_info; 1885 struct dp_tx_seg_info_s seg_info; 1886 struct dp_vdev *vdev = (struct dp_vdev *) vap_dev; 1887 uint16_t peer_id = HTT_INVALID_PEER; 1888 qdf_nbuf_t nbuf_mesh = NULL; 1889 1890 qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0); 1891 qdf_mem_set(&seg_info, sizeof(seg_info), 0x0); 1892 1893 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 1894 1895 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1896 "%s , skb %pM", 1897 __func__, nbuf->data); 1898 1899 /* 1900 * Set Default Host TID value to invalid TID 1901 * (TID override disabled) 1902 */ 1903 msdu_info.tid = HTT_TX_EXT_TID_INVALID; 1904 DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf)); 1905 1906 if (qdf_unlikely(vdev->mesh_vdev)) { 1907 nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf, 1908 &msdu_info); 1909 if (nbuf_mesh == NULL) { 1910 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1911 "Extracting mesh metadata failed\n"); 1912 return nbuf; 1913 } 1914 nbuf = nbuf_mesh; 1915 } 1916 1917 /* 1918 * Get HW Queue to use for this frame. 1919 * TCL supports upto 4 DMA rings, out of which 3 rings are 1920 * dedicated for data and 1 for command. 1921 * "queue_id" maps to one hardware ring. 1922 * With each ring, we also associate a unique Tx descriptor pool 1923 * to minimize lock contention for these resources. 1924 */ 1925 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 1926 1927 /* 1928 * TCL H/W supports 2 DSCP-TID mapping tables. 1929 * Table 1 - Default DSCP-TID mapping table 1930 * Table 2 - 1 DSCP-TID override table 1931 * 1932 * If we need a different DSCP-TID mapping for this vap, 1933 * call tid_classify to extract DSCP/ToS from frame and 1934 * map to a TID and store in msdu_info. This is later used 1935 * to fill in TCL Input descriptor (per-packet TID override). 1936 */ 1937 dp_tx_classify_tid(vdev, nbuf, &msdu_info); 1938 1939 /* Reset the control block */ 1940 qdf_nbuf_reset_ctxt(nbuf); 1941 1942 /* 1943 * Classify the frame and call corresponding 1944 * "prepare" function which extracts the segment (TSO) 1945 * and fragmentation information (for TSO , SG, ME, or Raw) 1946 * into MSDU_INFO structure which is later used to fill 1947 * SW and HW descriptors. 1948 */ 1949 if (qdf_nbuf_is_tso(nbuf)) { 1950 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1951 "%s TSO frame %pK\n", __func__, vdev); 1952 DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1, 1953 qdf_nbuf_len(nbuf)); 1954 1955 if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) { 1956 DP_STATS_INC(vdev, tx_i.tso.dropped_host, 1); 1957 return nbuf; 1958 } 1959 1960 goto send_multiple; 1961 } 1962 1963 /* SG */ 1964 if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) { 1965 nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info); 1966 1967 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1968 "%s non-TSO SG frame %pK\n", __func__, vdev); 1969 1970 DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1, 1971 qdf_nbuf_len(nbuf)); 1972 1973 goto send_multiple; 1974 } 1975 1976 #ifdef ATH_SUPPORT_IQUE 1977 /* Mcast to Ucast Conversion*/ 1978 if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) { 1979 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 1980 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) { 1981 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1982 "%s Mcast frm for ME %pK\n", __func__, vdev); 1983 1984 DP_STATS_INC_PKT(vdev, 1985 tx_i.mcast_en.mcast_pkt, 1, 1986 qdf_nbuf_len(nbuf)); 1987 if (dp_tx_prepare_send_me(vdev, nbuf) == 1988 QDF_STATUS_SUCCESS) { 1989 return NULL; 1990 } 1991 } 1992 } 1993 #endif 1994 1995 /* RAW */ 1996 if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) { 1997 nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info); 1998 if (nbuf == NULL) 1999 return NULL; 2000 2001 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2002 "%s Raw frame %pK\n", __func__, vdev); 2003 2004 goto send_multiple; 2005 2006 } 2007 2008 /* Single linear frame */ 2009 /* 2010 * If nbuf is a simple linear frame, use send_single function to 2011 * prepare direct-buffer type TCL descriptor and enqueue to TCL 2012 * SRNG. There is no need to setup a MSDU extension descriptor. 2013 */ 2014 nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL); 2015 2016 return nbuf; 2017 2018 send_multiple: 2019 nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info); 2020 2021 return nbuf; 2022 } 2023 2024 /** 2025 * dp_tx_reinject_handler() - Tx Reinject Handler 2026 * @tx_desc: software descriptor head pointer 2027 * @status : Tx completion status from HTT descriptor 2028 * 2029 * This function reinjects frames back to Target. 2030 * Todo - Host queue needs to be added 2031 * 2032 * Return: none 2033 */ 2034 static 2035 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status) 2036 { 2037 struct dp_vdev *vdev; 2038 struct dp_peer *peer = NULL; 2039 uint32_t peer_id = HTT_INVALID_PEER; 2040 qdf_nbuf_t nbuf = tx_desc->nbuf; 2041 qdf_nbuf_t nbuf_copy = NULL; 2042 struct dp_tx_msdu_info_s msdu_info; 2043 struct dp_peer *sa_peer = NULL; 2044 struct dp_ast_entry *ast_entry = NULL; 2045 struct dp_soc *soc = NULL; 2046 struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf); 2047 #ifdef WDS_VENDOR_EXTENSION 2048 int is_mcast = 0, is_ucast = 0; 2049 int num_peers_3addr = 0; 2050 struct ether_header *eth_hdr = (struct ether_header *)(qdf_nbuf_data(nbuf)); 2051 struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf)); 2052 #endif 2053 2054 vdev = tx_desc->vdev; 2055 soc = vdev->pdev->soc; 2056 2057 qdf_assert(vdev); 2058 2059 qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0); 2060 2061 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 2062 2063 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2064 "%s Tx reinject path\n", __func__); 2065 2066 DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1, 2067 qdf_nbuf_len(tx_desc->nbuf)); 2068 2069 qdf_spin_lock_bh(&(soc->ast_lock)); 2070 2071 ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost)); 2072 2073 if (ast_entry) 2074 sa_peer = ast_entry->peer; 2075 2076 qdf_spin_unlock_bh(&(soc->ast_lock)); 2077 2078 #ifdef WDS_VENDOR_EXTENSION 2079 if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 2080 is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0; 2081 } else { 2082 is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0; 2083 } 2084 is_ucast = !is_mcast; 2085 2086 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 2087 if (peer->bss_peer) 2088 continue; 2089 2090 /* Detect wds peers that use 3-addr framing for mcast. 2091 * if there are any, the bss_peer is used to send the 2092 * the mcast frame using 3-addr format. all wds enabled 2093 * peers that use 4-addr framing for mcast frames will 2094 * be duplicated and sent as 4-addr frames below. 2095 */ 2096 if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) { 2097 num_peers_3addr = 1; 2098 break; 2099 } 2100 } 2101 #endif 2102 2103 if (qdf_unlikely(vdev->mesh_vdev)) { 2104 DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf); 2105 } else { 2106 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 2107 if ((peer->peer_ids[0] != HTT_INVALID_PEER) && 2108 #ifdef WDS_VENDOR_EXTENSION 2109 /* 2110 * . if 3-addr STA, then send on BSS Peer 2111 * . if Peer WDS enabled and accept 4-addr mcast, 2112 * send mcast on that peer only 2113 * . if Peer WDS enabled and accept 4-addr ucast, 2114 * send ucast on that peer only 2115 */ 2116 ((peer->bss_peer && num_peers_3addr && is_mcast) || 2117 (peer->wds_enabled && 2118 ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) || 2119 (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) { 2120 #else 2121 ((peer->bss_peer && 2122 !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) || 2123 peer->nawds_enabled)) { 2124 #endif 2125 peer_id = DP_INVALID_PEER; 2126 2127 if (peer->nawds_enabled) { 2128 peer_id = peer->peer_ids[0]; 2129 if (sa_peer == peer) { 2130 QDF_TRACE( 2131 QDF_MODULE_ID_DP, 2132 QDF_TRACE_LEVEL_DEBUG, 2133 " %s: multicast packet", 2134 __func__); 2135 DP_STATS_INC(peer, 2136 tx.nawds_mcast_drop, 1); 2137 continue; 2138 } 2139 } 2140 2141 nbuf_copy = qdf_nbuf_copy(nbuf); 2142 2143 if (!nbuf_copy) { 2144 QDF_TRACE(QDF_MODULE_ID_DP, 2145 QDF_TRACE_LEVEL_DEBUG, 2146 FL("nbuf copy failed")); 2147 break; 2148 } 2149 2150 nbuf_copy = dp_tx_send_msdu_single(vdev, 2151 nbuf_copy, 2152 &msdu_info, 2153 peer_id, 2154 NULL); 2155 2156 if (nbuf_copy) { 2157 QDF_TRACE(QDF_MODULE_ID_DP, 2158 QDF_TRACE_LEVEL_DEBUG, 2159 FL("pkt send failed")); 2160 qdf_nbuf_free(nbuf_copy); 2161 } else { 2162 if (peer_id != DP_INVALID_PEER) 2163 DP_STATS_INC_PKT(peer, 2164 tx.nawds_mcast, 2165 1, qdf_nbuf_len(nbuf)); 2166 } 2167 } 2168 } 2169 } 2170 2171 if (vdev->nawds_enabled) { 2172 peer_id = DP_INVALID_PEER; 2173 2174 DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast, 2175 1, qdf_nbuf_len(nbuf)); 2176 2177 nbuf = dp_tx_send_msdu_single(vdev, 2178 nbuf, 2179 &msdu_info, 2180 peer_id, NULL); 2181 2182 if (nbuf) { 2183 QDF_TRACE(QDF_MODULE_ID_DP, 2184 QDF_TRACE_LEVEL_DEBUG, 2185 FL("pkt send failed")); 2186 qdf_nbuf_free(nbuf); 2187 } 2188 } else 2189 qdf_nbuf_free(nbuf); 2190 2191 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 2192 } 2193 2194 /** 2195 * dp_tx_inspect_handler() - Tx Inspect Handler 2196 * @tx_desc: software descriptor head pointer 2197 * @status : Tx completion status from HTT descriptor 2198 * 2199 * Handles Tx frames sent back to Host for inspection 2200 * (ProxyARP) 2201 * 2202 * Return: none 2203 */ 2204 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status) 2205 { 2206 2207 struct dp_soc *soc; 2208 struct dp_pdev *pdev = tx_desc->pdev; 2209 2210 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 2211 "%s Tx inspect path\n", 2212 __func__); 2213 2214 qdf_assert(pdev); 2215 2216 soc = pdev->soc; 2217 2218 DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1, 2219 qdf_nbuf_len(tx_desc->nbuf)); 2220 2221 DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf); 2222 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 2223 } 2224 2225 #ifdef FEATURE_PERPKT_INFO 2226 /** 2227 * dp_get_completion_indication_for_stack() - send completion to stack 2228 * @soc : dp_soc handle 2229 * @pdev: dp_pdev handle 2230 * @peer_id: peer_id of the peer for which completion came 2231 * @ppdu_id: ppdu_id 2232 * @first_msdu: first msdu 2233 * @last_msdu: last msdu 2234 * @netbuf: Buffer pointer for free 2235 * 2236 * This function is used for indication whether buffer needs to be 2237 * send to stack for free or not 2238 */ 2239 QDF_STATUS 2240 dp_get_completion_indication_for_stack(struct dp_soc *soc, struct dp_pdev *pdev, 2241 uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu, 2242 uint8_t last_msdu, qdf_nbuf_t netbuf) 2243 { 2244 struct tx_capture_hdr *ppdu_hdr; 2245 struct dp_peer *peer = NULL; 2246 2247 if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode)) 2248 return QDF_STATUS_E_NOSUPPORT; 2249 2250 peer = (peer_id == HTT_INVALID_PEER) ? NULL : 2251 dp_peer_find_by_id(soc, peer_id); 2252 2253 if (!peer) { 2254 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2255 FL("Peer Invalid")); 2256 return QDF_STATUS_E_INVAL; 2257 } 2258 2259 if (pdev->mcopy_mode) { 2260 if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) && 2261 (pdev->m_copy_id.tx_peer_id == peer_id)) { 2262 return QDF_STATUS_E_INVAL; 2263 } 2264 2265 pdev->m_copy_id.tx_ppdu_id = ppdu_id; 2266 pdev->m_copy_id.tx_peer_id = peer_id; 2267 } 2268 2269 if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) { 2270 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2271 FL("No headroom")); 2272 return QDF_STATUS_E_NOMEM; 2273 } 2274 2275 ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf); 2276 qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw, 2277 IEEE80211_ADDR_LEN); 2278 ppdu_hdr->ppdu_id = ppdu_id; 2279 qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw, 2280 IEEE80211_ADDR_LEN); 2281 ppdu_hdr->peer_id = peer_id; 2282 ppdu_hdr->first_msdu = first_msdu; 2283 ppdu_hdr->last_msdu = last_msdu; 2284 2285 return QDF_STATUS_SUCCESS; 2286 } 2287 2288 2289 /** 2290 * dp_send_completion_to_stack() - send completion to stack 2291 * @soc : dp_soc handle 2292 * @pdev: dp_pdev handle 2293 * @peer_id: peer_id of the peer for which completion came 2294 * @ppdu_id: ppdu_id 2295 * @netbuf: Buffer pointer for free 2296 * 2297 * This function is used to send completion to stack 2298 * to free buffer 2299 */ 2300 void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, 2301 uint16_t peer_id, uint32_t ppdu_id, 2302 qdf_nbuf_t netbuf) 2303 { 2304 dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc, 2305 netbuf, peer_id, 2306 WDI_NO_VAL, pdev->pdev_id); 2307 } 2308 #else 2309 static QDF_STATUS 2310 dp_get_completion_indication_for_stack(struct dp_soc *soc, struct dp_pdev *pdev, 2311 uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu, 2312 uint8_t last_msdu, qdf_nbuf_t netbuf) 2313 { 2314 return QDF_STATUS_E_NOSUPPORT; 2315 } 2316 2317 static void 2318 dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, 2319 uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf) 2320 { 2321 } 2322 #endif 2323 2324 /** 2325 * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor 2326 * @soc: Soc handle 2327 * @desc: software Tx descriptor to be processed 2328 * 2329 * Return: none 2330 */ 2331 static inline void dp_tx_comp_free_buf(struct dp_soc *soc, 2332 struct dp_tx_desc_s *desc) 2333 { 2334 struct dp_vdev *vdev = desc->vdev; 2335 qdf_nbuf_t nbuf = desc->nbuf; 2336 2337 /* If it is TDLS mgmt, don't unmap or free the frame */ 2338 if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) 2339 return dp_non_std_tx_comp_free_buff(desc, vdev); 2340 2341 /* 0 : MSDU buffer, 1 : MLE */ 2342 if (desc->msdu_ext_desc) { 2343 /* TSO free */ 2344 if (hal_tx_ext_desc_get_tso_enable( 2345 desc->msdu_ext_desc->vaddr)) { 2346 /* If remaining number of segment is 0 2347 * actual TSO may unmap and free */ 2348 if (qdf_nbuf_get_users(nbuf) == 1) 2349 __qdf_nbuf_unmap_single(soc->osdev, 2350 nbuf, 2351 QDF_DMA_TO_DEVICE); 2352 2353 qdf_nbuf_free(nbuf); 2354 return; 2355 } 2356 } 2357 2358 qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE); 2359 2360 if (qdf_likely(!vdev->mesh_vdev)) 2361 qdf_nbuf_free(nbuf); 2362 else { 2363 if (desc->flags & DP_TX_DESC_FLAG_TO_FW) { 2364 qdf_nbuf_free(nbuf); 2365 DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1); 2366 } else 2367 vdev->osif_tx_free_ext((nbuf)); 2368 } 2369 } 2370 2371 /** 2372 * dp_tx_mec_handler() - Tx MEC Notify Handler 2373 * @vdev: pointer to dp dev handler 2374 * @status : Tx completion status from HTT descriptor 2375 * 2376 * Handles MEC notify event sent from fw to Host 2377 * 2378 * Return: none 2379 */ 2380 #ifdef FEATURE_WDS 2381 void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status) 2382 { 2383 2384 struct dp_soc *soc; 2385 uint32_t flags = IEEE80211_NODE_F_WDS_HM; 2386 struct dp_peer *peer; 2387 uint8_t mac_addr[DP_MAC_ADDR_LEN], i; 2388 2389 if (!vdev->wds_enabled) 2390 return; 2391 2392 soc = vdev->pdev->soc; 2393 qdf_spin_lock_bh(&soc->peer_ref_mutex); 2394 peer = TAILQ_FIRST(&vdev->peer_list); 2395 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 2396 2397 if (!peer) { 2398 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2399 FL("peer is NULL")); 2400 return; 2401 } 2402 2403 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2404 "%s Tx MEC Handler\n", 2405 __func__); 2406 2407 for (i = 0; i < DP_MAC_ADDR_LEN; i++) 2408 mac_addr[(DP_MAC_ADDR_LEN - 1) - i] = 2409 status[(DP_MAC_ADDR_LEN - 2) + i]; 2410 2411 if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, DP_MAC_ADDR_LEN)) 2412 dp_peer_add_ast(soc, 2413 peer, 2414 mac_addr, 2415 CDP_TXRX_AST_TYPE_MEC, 2416 flags); 2417 } 2418 #endif 2419 2420 /** 2421 * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler 2422 * @tx_desc: software descriptor head pointer 2423 * @status : Tx completion status from HTT descriptor 2424 * 2425 * This function will process HTT Tx indication messages from Target 2426 * 2427 * Return: none 2428 */ 2429 static 2430 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status) 2431 { 2432 uint8_t tx_status; 2433 struct dp_pdev *pdev; 2434 struct dp_vdev *vdev; 2435 struct dp_soc *soc; 2436 uint32_t *htt_status_word = (uint32_t *) status; 2437 2438 qdf_assert(tx_desc->pdev); 2439 2440 pdev = tx_desc->pdev; 2441 vdev = tx_desc->vdev; 2442 soc = pdev->soc; 2443 2444 tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_status_word[0]); 2445 2446 switch (tx_status) { 2447 case HTT_TX_FW2WBM_TX_STATUS_OK: 2448 case HTT_TX_FW2WBM_TX_STATUS_DROP: 2449 case HTT_TX_FW2WBM_TX_STATUS_TTL: 2450 { 2451 dp_tx_comp_free_buf(soc, tx_desc); 2452 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 2453 break; 2454 } 2455 case HTT_TX_FW2WBM_TX_STATUS_REINJECT: 2456 { 2457 dp_tx_reinject_handler(tx_desc, status); 2458 break; 2459 } 2460 case HTT_TX_FW2WBM_TX_STATUS_INSPECT: 2461 { 2462 dp_tx_inspect_handler(tx_desc, status); 2463 break; 2464 } 2465 case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY: 2466 { 2467 dp_tx_mec_handler(vdev, status); 2468 break; 2469 } 2470 default: 2471 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2472 "%s Invalid HTT tx_status %d\n", 2473 __func__, tx_status); 2474 break; 2475 } 2476 } 2477 2478 #ifdef MESH_MODE_SUPPORT 2479 /** 2480 * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats 2481 * in mesh meta header 2482 * @tx_desc: software descriptor head pointer 2483 * @ts: pointer to tx completion stats 2484 * Return: none 2485 */ 2486 static 2487 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc, 2488 struct hal_tx_completion_status *ts) 2489 { 2490 struct meta_hdr_s *mhdr; 2491 qdf_nbuf_t netbuf = tx_desc->nbuf; 2492 2493 if (!tx_desc->msdu_ext_desc) { 2494 if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) { 2495 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2496 "netbuf %pK offset %d\n", 2497 netbuf, tx_desc->pkt_offset); 2498 return; 2499 } 2500 } 2501 if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) { 2502 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2503 "netbuf %pK offset %d\n", netbuf, 2504 sizeof(struct meta_hdr_s)); 2505 return; 2506 } 2507 2508 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf); 2509 mhdr->rssi = ts->ack_frame_rssi; 2510 mhdr->channel = tx_desc->pdev->operating_channel; 2511 } 2512 2513 #else 2514 static 2515 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc, 2516 struct hal_tx_completion_status *ts) 2517 { 2518 } 2519 2520 #endif 2521 2522 /** 2523 * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications 2524 * @peer: Handle to DP peer 2525 * @ts: pointer to HAL Tx completion stats 2526 * @length: MSDU length 2527 * 2528 * Return: None 2529 */ 2530 static void dp_tx_update_peer_stats(struct dp_peer *peer, 2531 struct hal_tx_completion_status *ts, uint32_t length) 2532 { 2533 struct dp_pdev *pdev = peer->vdev->pdev; 2534 struct dp_soc *soc = pdev->soc; 2535 uint8_t mcs, pkt_type; 2536 2537 mcs = ts->mcs; 2538 pkt_type = ts->pkt_type; 2539 2540 if (!ts->release_src == HAL_TX_COMP_RELEASE_SOURCE_TQM) 2541 return; 2542 2543 if (peer->bss_peer) { 2544 DP_STATS_INC_PKT(peer, tx.mcast, 1, length); 2545 } else { 2546 if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) { 2547 DP_STATS_INC_PKT(peer, tx.tx_success, 1, length); 2548 } 2549 DP_STATS_INC_PKT(peer, tx.ucast, 1, length); 2550 } 2551 2552 DP_STATS_INCC(peer, tx.dropped.age_out, 1, 2553 (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED)); 2554 2555 DP_STATS_INCC(peer, tx.dropped.fw_rem, 1, 2556 (ts->status == HAL_TX_TQM_RR_REM_CMD_REM)); 2557 2558 DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1, 2559 (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX)); 2560 2561 DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1, 2562 (ts->status == HAL_TX_TQM_RR_REM_CMD_TX)); 2563 2564 DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1, 2565 (ts->status == HAL_TX_TQM_RR_FW_REASON1)); 2566 2567 DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1, 2568 (ts->status == HAL_TX_TQM_RR_FW_REASON2)); 2569 2570 DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1, 2571 (ts->status == HAL_TX_TQM_RR_FW_REASON3)); 2572 2573 if (!ts->status == HAL_TX_TQM_RR_FRAME_ACKED) 2574 return; 2575 2576 DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma); 2577 2578 DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu); 2579 DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu); 2580 2581 if (!(soc->process_tx_status)) 2582 return; 2583 2584 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 2585 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A))); 2586 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 2587 ((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A))); 2588 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 2589 ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B))); 2590 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 2591 ((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B))); 2592 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 2593 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N))); 2594 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 2595 ((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N))); 2596 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 2597 ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 2598 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 2599 ((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 2600 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 2601 ((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX))); 2602 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 2603 ((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX))); 2604 DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1); 2605 DP_STATS_INC(peer, tx.bw[ts->bw], 1); 2606 DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi); 2607 DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1); 2608 DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc); 2609 DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc); 2610 DP_STATS_INC_PKT(peer, tx.tx_success, 1, length); 2611 DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1); 2612 2613 if (soc->cdp_soc.ol_ops->update_dp_stats) { 2614 soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev, 2615 &peer->stats, ts->peer_id, 2616 UPDATE_PEER_STATS); 2617 } 2618 } 2619 2620 /** 2621 * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info 2622 * @tx_desc: software descriptor head pointer 2623 * @length: packet length 2624 * 2625 * Return: none 2626 */ 2627 static inline void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc, 2628 uint32_t length) 2629 { 2630 struct hal_tx_completion_status ts; 2631 struct dp_soc *soc = NULL; 2632 struct dp_vdev *vdev = tx_desc->vdev; 2633 struct dp_peer *peer = NULL; 2634 struct ether_header *eh = 2635 (struct ether_header *)qdf_nbuf_data(tx_desc->nbuf); 2636 2637 hal_tx_comp_get_status(&tx_desc->comp, &ts); 2638 2639 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2640 "-------------------- \n" 2641 "Tx Completion Stats: \n" 2642 "-------------------- \n" 2643 "ack_frame_rssi = %d \n" 2644 "first_msdu = %d \n" 2645 "last_msdu = %d \n" 2646 "msdu_part_of_amsdu = %d \n" 2647 "rate_stats valid = %d \n" 2648 "bw = %d \n" 2649 "pkt_type = %d \n" 2650 "stbc = %d \n" 2651 "ldpc = %d \n" 2652 "sgi = %d \n" 2653 "mcs = %d \n" 2654 "ofdma = %d \n" 2655 "tones_in_ru = %d \n" 2656 "tsf = %d \n" 2657 "ppdu_id = %d \n" 2658 "transmit_cnt = %d \n" 2659 "tid = %d \n" 2660 "peer_id = %d \n", 2661 ts.ack_frame_rssi, ts.first_msdu, ts.last_msdu, 2662 ts.msdu_part_of_amsdu, ts.valid, ts.bw, 2663 ts.pkt_type, ts.stbc, ts.ldpc, ts.sgi, 2664 ts.mcs, ts.ofdma, ts.tones_in_ru, ts.tsf, 2665 ts.ppdu_id, ts.transmit_cnt, ts.tid, 2666 ts.peer_id); 2667 2668 if (!vdev) { 2669 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 2670 "invalid vdev"); 2671 goto out; 2672 } 2673 2674 soc = vdev->pdev->soc; 2675 2676 /* Update SoC level stats */ 2677 DP_STATS_INCC(soc, tx.dropped_fw_removed, 1, 2678 (ts.status == HAL_TX_TQM_RR_REM_CMD_REM)); 2679 2680 /* Update per-packet stats */ 2681 if (qdf_unlikely(vdev->mesh_vdev) && 2682 !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) 2683 dp_tx_comp_fill_tx_completion_stats(tx_desc, &ts); 2684 2685 /* Update peer level stats */ 2686 peer = dp_peer_find_by_id(soc, ts.peer_id); 2687 if (!peer) { 2688 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 2689 "invalid peer"); 2690 DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length); 2691 goto out; 2692 } 2693 2694 if (qdf_likely(peer->vdev->tx_encap_type == 2695 htt_cmn_pkt_type_ethernet)) { 2696 if (peer->bss_peer && IEEE80211_IS_BROADCAST(eh->ether_dhost)) 2697 DP_STATS_INC_PKT(peer, tx.bcast, 1, length); 2698 } 2699 2700 dp_tx_update_peer_stats(peer, &ts, length); 2701 2702 out: 2703 return; 2704 } 2705 2706 /** 2707 * dp_tx_comp_process_desc() - Tx complete software descriptor handler 2708 * @soc: core txrx main context 2709 * @comp_head: software descriptor head pointer 2710 * 2711 * This function will process batch of descriptors reaped by dp_tx_comp_handler 2712 * and release the software descriptors after processing is complete 2713 * 2714 * Return: none 2715 */ 2716 static void dp_tx_comp_process_desc(struct dp_soc *soc, 2717 struct dp_tx_desc_s *comp_head) 2718 { 2719 struct dp_tx_desc_s *desc; 2720 struct dp_tx_desc_s *next; 2721 struct hal_tx_completion_status ts = {0}; 2722 uint32_t length; 2723 struct dp_peer *peer; 2724 2725 DP_HIST_INIT(); 2726 desc = comp_head; 2727 2728 while (desc) { 2729 hal_tx_comp_get_status(&desc->comp, &ts); 2730 peer = dp_peer_find_by_id(soc, ts.peer_id); 2731 length = qdf_nbuf_len(desc->nbuf); 2732 2733 dp_tx_comp_process_tx_status(desc, length); 2734 2735 /*currently m_copy/tx_capture is not supported for scatter gather packets*/ 2736 if (!(desc->msdu_ext_desc) && (dp_get_completion_indication_for_stack(soc, 2737 desc->pdev, ts.peer_id, ts.ppdu_id, 2738 ts.first_msdu, ts.last_msdu, 2739 desc->nbuf) == QDF_STATUS_SUCCESS)) { 2740 qdf_nbuf_unmap(soc->osdev, desc->nbuf, 2741 QDF_DMA_TO_DEVICE); 2742 2743 dp_send_completion_to_stack(soc, desc->pdev, ts.peer_id, 2744 ts.ppdu_id, desc->nbuf); 2745 } else { 2746 dp_tx_comp_free_buf(soc, desc); 2747 } 2748 2749 DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id); 2750 2751 next = desc->next; 2752 dp_tx_desc_release(desc, desc->pool_id); 2753 desc = next; 2754 } 2755 DP_TX_HIST_STATS_PER_PDEV(); 2756 } 2757 2758 /** 2759 * dp_tx_comp_handler() - Tx completion handler 2760 * @soc: core txrx main context 2761 * @ring_id: completion ring id 2762 * @quota: No. of packets/descriptors that can be serviced in one loop 2763 * 2764 * This function will collect hardware release ring element contents and 2765 * handle descriptor contents. Based on contents, free packet or handle error 2766 * conditions 2767 * 2768 * Return: none 2769 */ 2770 uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota) 2771 { 2772 void *tx_comp_hal_desc; 2773 uint8_t buffer_src; 2774 uint8_t pool_id; 2775 uint32_t tx_desc_id; 2776 struct dp_tx_desc_s *tx_desc = NULL; 2777 struct dp_tx_desc_s *head_desc = NULL; 2778 struct dp_tx_desc_s *tail_desc = NULL; 2779 uint32_t num_processed; 2780 uint32_t count; 2781 2782 if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { 2783 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2784 "%s %d : HAL RING Access Failed -- %pK\n", 2785 __func__, __LINE__, hal_srng); 2786 return 0; 2787 } 2788 2789 num_processed = 0; 2790 count = 0; 2791 2792 /* Find head descriptor from completion ring */ 2793 while (qdf_likely(tx_comp_hal_desc = 2794 hal_srng_dst_get_next(soc->hal_soc, hal_srng))) { 2795 2796 buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc); 2797 2798 /* If this buffer was not released by TQM or FW, then it is not 2799 * Tx completion indication, assert */ 2800 if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) && 2801 (buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) { 2802 2803 QDF_TRACE(QDF_MODULE_ID_DP, 2804 QDF_TRACE_LEVEL_FATAL, 2805 "Tx comp release_src != TQM | FW"); 2806 2807 qdf_assert_always(0); 2808 } 2809 2810 /* Get descriptor id */ 2811 tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc); 2812 pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >> 2813 DP_TX_DESC_ID_POOL_OS; 2814 2815 /* Pool ID is out of limit. Error */ 2816 if (pool_id > wlan_cfg_get_num_tx_desc_pool( 2817 soc->wlan_cfg_ctx)) { 2818 QDF_TRACE(QDF_MODULE_ID_DP, 2819 QDF_TRACE_LEVEL_FATAL, 2820 "Tx Comp pool id %d not valid", 2821 pool_id); 2822 2823 qdf_assert_always(0); 2824 } 2825 2826 /* Find Tx descriptor */ 2827 tx_desc = dp_tx_desc_find(soc, pool_id, 2828 (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >> 2829 DP_TX_DESC_ID_PAGE_OS, 2830 (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >> 2831 DP_TX_DESC_ID_OFFSET_OS); 2832 2833 /* 2834 * If the release source is FW, process the HTT status 2835 */ 2836 if (qdf_unlikely(buffer_src == 2837 HAL_TX_COMP_RELEASE_SOURCE_FW)) { 2838 uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN]; 2839 hal_tx_comp_get_htt_desc(tx_comp_hal_desc, 2840 htt_tx_status); 2841 dp_tx_process_htt_completion(tx_desc, 2842 htt_tx_status); 2843 } else { 2844 /* Pool id is not matching. Error */ 2845 if (tx_desc->pool_id != pool_id) { 2846 QDF_TRACE(QDF_MODULE_ID_DP, 2847 QDF_TRACE_LEVEL_FATAL, 2848 "Tx Comp pool id %d not matched %d", 2849 pool_id, tx_desc->pool_id); 2850 2851 qdf_assert_always(0); 2852 } 2853 2854 if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) || 2855 !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) { 2856 QDF_TRACE(QDF_MODULE_ID_DP, 2857 QDF_TRACE_LEVEL_FATAL, 2858 "Txdesc invalid, flgs = %x,id = %d", 2859 tx_desc->flags, tx_desc_id); 2860 qdf_assert_always(0); 2861 } 2862 2863 /* First ring descriptor on the cycle */ 2864 if (!head_desc) { 2865 head_desc = tx_desc; 2866 tail_desc = tx_desc; 2867 } 2868 2869 tail_desc->next = tx_desc; 2870 tx_desc->next = NULL; 2871 tail_desc = tx_desc; 2872 2873 /* Collect hw completion contents */ 2874 hal_tx_comp_desc_sync(tx_comp_hal_desc, 2875 &tx_desc->comp, 1); 2876 2877 } 2878 2879 num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK); 2880 /* Decrement PM usage count if the packet has been sent.*/ 2881 hif_pm_runtime_put(soc->hif_handle); 2882 2883 /* 2884 * Processed packet count is more than given quota 2885 * stop to processing 2886 */ 2887 if ((num_processed >= quota)) 2888 break; 2889 2890 count++; 2891 } 2892 2893 hal_srng_access_end(soc->hal_soc, hal_srng); 2894 2895 /* Process the reaped descriptors */ 2896 if (head_desc) 2897 dp_tx_comp_process_desc(soc, head_desc); 2898 2899 return num_processed; 2900 } 2901 2902 #ifdef CONVERGED_TDLS_ENABLE 2903 /** 2904 * dp_tx_non_std() - Allow the control-path SW to send data frames 2905 * 2906 * @data_vdev - which vdev should transmit the tx data frames 2907 * @tx_spec - what non-standard handling to apply to the tx data frames 2908 * @msdu_list - NULL-terminated list of tx MSDUs 2909 * 2910 * Return: NULL on success, 2911 * nbuf when it fails to send 2912 */ 2913 qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle, 2914 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list) 2915 { 2916 struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle; 2917 2918 if (tx_spec & OL_TX_SPEC_NO_FREE) 2919 vdev->is_tdls_frame = true; 2920 return dp_tx_send(vdev_handle, msdu_list); 2921 } 2922 #endif 2923 2924 /** 2925 * dp_tx_vdev_attach() - attach vdev to dp tx 2926 * @vdev: virtual device instance 2927 * 2928 * Return: QDF_STATUS_SUCCESS: success 2929 * QDF_STATUS_E_RESOURCES: Error return 2930 */ 2931 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev) 2932 { 2933 /* 2934 * Fill HTT TCL Metadata with Vdev ID and MAC ID 2935 */ 2936 HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata, 2937 HTT_TCL_METADATA_TYPE_VDEV_BASED); 2938 2939 HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata, 2940 vdev->vdev_id); 2941 2942 HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, 2943 DP_SW2HW_MACID(vdev->pdev->pdev_id)); 2944 2945 /* 2946 * Set HTT Extension Valid bit to 0 by default 2947 */ 2948 HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0); 2949 2950 dp_tx_vdev_update_search_flags(vdev); 2951 2952 return QDF_STATUS_SUCCESS; 2953 } 2954 2955 /** 2956 * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode 2957 * @vdev: virtual device instance 2958 * 2959 * Return: void 2960 * 2961 */ 2962 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev) 2963 { 2964 /* 2965 * Enable both AddrY (SA based search) and AddrX (Da based search) 2966 * for TDLS link 2967 * 2968 * Enable AddrY (SA based search) only for non-WDS STA and 2969 * ProxySTA VAP modes. 2970 * 2971 * In all other VAP modes, only DA based search should be 2972 * enabled 2973 */ 2974 if (vdev->opmode == wlan_op_mode_sta && 2975 vdev->tdls_link_connected) 2976 vdev->hal_desc_addr_search_flags = 2977 (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN); 2978 else if ((vdev->opmode == wlan_op_mode_sta && 2979 (!vdev->wds_enabled || vdev->proxysta_vdev))) 2980 vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN; 2981 else 2982 vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN; 2983 } 2984 2985 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 2986 static void dp_tx_desc_flush(struct dp_vdev *vdev) 2987 { 2988 } 2989 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */ 2990 2991 /* dp_tx_desc_flush() - release resources associated 2992 * to tx_desc 2993 * @vdev: virtual device instance 2994 * 2995 * This function will free all outstanding Tx buffers, 2996 * including ME buffer for which either free during 2997 * completion didn't happened or completion is not 2998 * received. 2999 */ 3000 static void dp_tx_desc_flush(struct dp_vdev *vdev) 3001 { 3002 uint8_t i, num_pool; 3003 uint32_t j; 3004 uint32_t num_desc; 3005 struct dp_soc *soc = vdev->pdev->soc; 3006 struct dp_tx_desc_s *tx_desc = NULL; 3007 struct dp_tx_desc_pool_s *tx_desc_pool = NULL; 3008 3009 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 3010 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 3011 3012 for (i = 0; i < num_pool; i++) { 3013 for (j = 0; j < num_desc; j++) { 3014 tx_desc_pool = &((soc)->tx_desc[(i)]); 3015 if (tx_desc_pool && 3016 tx_desc_pool->desc_pages.cacheable_pages) { 3017 tx_desc = dp_tx_desc_find(soc, i, 3018 (j & DP_TX_DESC_ID_PAGE_MASK) >> 3019 DP_TX_DESC_ID_PAGE_OS, 3020 (j & DP_TX_DESC_ID_OFFSET_MASK) >> 3021 DP_TX_DESC_ID_OFFSET_OS); 3022 3023 if (tx_desc && (tx_desc->vdev == vdev) && 3024 (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) { 3025 dp_tx_comp_free_buf(soc, tx_desc); 3026 dp_tx_desc_release(tx_desc, i); 3027 } 3028 } 3029 } 3030 } 3031 } 3032 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ 3033 3034 /** 3035 * dp_tx_vdev_detach() - detach vdev from dp tx 3036 * @vdev: virtual device instance 3037 * 3038 * Return: QDF_STATUS_SUCCESS: success 3039 * QDF_STATUS_E_RESOURCES: Error return 3040 */ 3041 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev) 3042 { 3043 dp_tx_desc_flush(vdev); 3044 return QDF_STATUS_SUCCESS; 3045 } 3046 3047 /** 3048 * dp_tx_pdev_attach() - attach pdev to dp tx 3049 * @pdev: physical device instance 3050 * 3051 * Return: QDF_STATUS_SUCCESS: success 3052 * QDF_STATUS_E_RESOURCES: Error return 3053 */ 3054 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev) 3055 { 3056 struct dp_soc *soc = pdev->soc; 3057 3058 /* Initialize Flow control counters */ 3059 qdf_atomic_init(&pdev->num_tx_exception); 3060 qdf_atomic_init(&pdev->num_tx_outstanding); 3061 3062 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { 3063 /* Initialize descriptors in TCL Ring */ 3064 hal_tx_init_data_ring(soc->hal_soc, 3065 soc->tcl_data_ring[pdev->pdev_id].hal_srng); 3066 } 3067 3068 return QDF_STATUS_SUCCESS; 3069 } 3070 3071 /** 3072 * dp_tx_pdev_detach() - detach pdev from dp tx 3073 * @pdev: physical device instance 3074 * 3075 * Return: QDF_STATUS_SUCCESS: success 3076 * QDF_STATUS_E_RESOURCES: Error return 3077 */ 3078 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev) 3079 { 3080 dp_tx_me_exit(pdev); 3081 return QDF_STATUS_SUCCESS; 3082 } 3083 3084 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 3085 /* Pools will be allocated dynamically */ 3086 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, 3087 int num_desc) 3088 { 3089 uint8_t i; 3090 3091 for (i = 0; i < num_pool; i++) { 3092 qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock); 3093 soc->tx_desc[i].status = FLOW_POOL_INACTIVE; 3094 } 3095 3096 return 0; 3097 } 3098 3099 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) 3100 { 3101 uint8_t i; 3102 3103 for (i = 0; i < num_pool; i++) 3104 qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock); 3105 } 3106 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */ 3107 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, 3108 int num_desc) 3109 { 3110 uint8_t i; 3111 3112 /* Allocate software Tx descriptor pools */ 3113 for (i = 0; i < num_pool; i++) { 3114 if (dp_tx_desc_pool_alloc(soc, i, num_desc)) { 3115 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3116 "%s Tx Desc Pool alloc %d failed %pK\n", 3117 __func__, i, soc); 3118 return ENOMEM; 3119 } 3120 } 3121 return 0; 3122 } 3123 3124 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) 3125 { 3126 uint8_t i; 3127 3128 for (i = 0; i < num_pool; i++) { 3129 qdf_assert_always(!soc->tx_desc[i].num_allocated); 3130 if (dp_tx_desc_pool_free(soc, i)) { 3131 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3132 "%s Tx Desc Pool Free failed\n", __func__); 3133 } 3134 } 3135 } 3136 3137 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ 3138 3139 /** 3140 * dp_tx_soc_detach() - detach soc from dp tx 3141 * @soc: core txrx main context 3142 * 3143 * This function will detach dp tx into main device context 3144 * will free dp tx resource and initialize resources 3145 * 3146 * Return: QDF_STATUS_SUCCESS: success 3147 * QDF_STATUS_E_RESOURCES: Error return 3148 */ 3149 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc) 3150 { 3151 uint8_t num_pool; 3152 uint16_t num_desc; 3153 uint16_t num_ext_desc; 3154 uint8_t i; 3155 3156 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 3157 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 3158 num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); 3159 3160 dp_tx_flow_control_deinit(soc); 3161 dp_tx_delete_static_pools(soc, num_pool); 3162 3163 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3164 "%s Tx Desc Pool Free num_pool = %d, descs = %d\n", 3165 __func__, num_pool, num_desc); 3166 3167 for (i = 0; i < num_pool; i++) { 3168 if (dp_tx_ext_desc_pool_free(soc, i)) { 3169 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3170 "%s Tx Ext Desc Pool Free failed\n", 3171 __func__); 3172 return QDF_STATUS_E_RESOURCES; 3173 } 3174 } 3175 3176 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3177 "%s MSDU Ext Desc Pool %d Free descs = %d\n", 3178 __func__, num_pool, num_ext_desc); 3179 3180 for (i = 0; i < num_pool; i++) { 3181 dp_tx_tso_desc_pool_free(soc, i); 3182 } 3183 3184 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3185 "%s TSO Desc Pool %d Free descs = %d\n", 3186 __func__, num_pool, num_desc); 3187 3188 3189 for (i = 0; i < num_pool; i++) 3190 dp_tx_tso_num_seg_pool_free(soc, i); 3191 3192 3193 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3194 "%s TSO Num of seg Desc Pool %d Free descs = %d\n", 3195 __func__, num_pool, num_desc); 3196 3197 return QDF_STATUS_SUCCESS; 3198 } 3199 3200 /** 3201 * dp_tx_soc_attach() - attach soc to dp tx 3202 * @soc: core txrx main context 3203 * 3204 * This function will attach dp tx into main device context 3205 * will allocate dp tx resource and initialize resources 3206 * 3207 * Return: QDF_STATUS_SUCCESS: success 3208 * QDF_STATUS_E_RESOURCES: Error return 3209 */ 3210 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc) 3211 { 3212 uint8_t i; 3213 uint8_t num_pool; 3214 uint32_t num_desc; 3215 uint32_t num_ext_desc; 3216 3217 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 3218 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 3219 num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); 3220 3221 if (dp_tx_alloc_static_pools(soc, num_pool, num_desc)) 3222 goto fail; 3223 3224 dp_tx_flow_control_init(soc); 3225 3226 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3227 "%s Tx Desc Alloc num_pool = %d, descs = %d\n", 3228 __func__, num_pool, num_desc); 3229 3230 /* Allocate extension tx descriptor pools */ 3231 for (i = 0; i < num_pool; i++) { 3232 if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) { 3233 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3234 "MSDU Ext Desc Pool alloc %d failed %pK\n", 3235 i, soc); 3236 3237 goto fail; 3238 } 3239 } 3240 3241 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3242 "%s MSDU Ext Desc Alloc %d, descs = %d\n", 3243 __func__, num_pool, num_ext_desc); 3244 3245 for (i = 0; i < num_pool; i++) { 3246 if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) { 3247 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3248 "TSO Desc Pool alloc %d failed %pK\n", 3249 i, soc); 3250 3251 goto fail; 3252 } 3253 } 3254 3255 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3256 "%s TSO Desc Alloc %d, descs = %d\n", 3257 __func__, num_pool, num_desc); 3258 3259 for (i = 0; i < num_pool; i++) { 3260 if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) { 3261 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3262 "TSO Num of seg Pool alloc %d failed %pK\n", 3263 i, soc); 3264 3265 goto fail; 3266 } 3267 } 3268 3269 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3270 "%s TSO Num of seg pool Alloc %d, descs = %d\n", 3271 __func__, num_pool, num_desc); 3272 3273 /* Initialize descriptors in TCL Rings */ 3274 if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { 3275 for (i = 0; i < soc->num_tcl_data_rings; i++) { 3276 hal_tx_init_data_ring(soc->hal_soc, 3277 soc->tcl_data_ring[i].hal_srng); 3278 } 3279 } 3280 3281 /* 3282 * todo - Add a runtime config option to enable this. 3283 */ 3284 /* 3285 * Due to multiple issues on NPR EMU, enable it selectively 3286 * only for NPR EMU, should be removed, once NPR platforms 3287 * are stable. 3288 */ 3289 soc->process_tx_status = CONFIG_PROCESS_TX_STATUS; 3290 3291 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3292 "%s HAL Tx init Success\n", __func__); 3293 3294 return QDF_STATUS_SUCCESS; 3295 3296 fail: 3297 /* Detach will take care of freeing only allocated resources */ 3298 dp_tx_soc_detach(soc); 3299 return QDF_STATUS_E_RESOURCES; 3300 } 3301 3302 /* 3303 * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement 3304 * pdev: pointer to DP PDEV structure 3305 * seg_info_head: Pointer to the head of list 3306 * 3307 * return: void 3308 */ 3309 static void dp_tx_me_mem_free(struct dp_pdev *pdev, 3310 struct dp_tx_seg_info_s *seg_info_head) 3311 { 3312 struct dp_tx_me_buf_t *mc_uc_buf; 3313 struct dp_tx_seg_info_s *seg_info_new = NULL; 3314 qdf_nbuf_t nbuf = NULL; 3315 uint64_t phy_addr; 3316 3317 while (seg_info_head) { 3318 nbuf = seg_info_head->nbuf; 3319 mc_uc_buf = (struct dp_tx_me_buf_t *) 3320 seg_info_head->frags[0].vaddr; 3321 phy_addr = seg_info_head->frags[0].paddr_hi; 3322 phy_addr = (phy_addr << 32) | seg_info_head->frags[0].paddr_lo; 3323 qdf_mem_unmap_nbytes_single(pdev->soc->osdev, 3324 phy_addr, 3325 QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN); 3326 dp_tx_me_free_buf(pdev, mc_uc_buf); 3327 qdf_nbuf_free(nbuf); 3328 seg_info_new = seg_info_head; 3329 seg_info_head = seg_info_head->next; 3330 qdf_mem_free(seg_info_new); 3331 } 3332 } 3333 3334 /** 3335 * dp_tx_me_send_convert_ucast(): function to convert multicast to unicast 3336 * @vdev: DP VDEV handle 3337 * @nbuf: Multicast nbuf 3338 * @newmac: Table of the clients to which packets have to be sent 3339 * @new_mac_cnt: No of clients 3340 * 3341 * return: no of converted packets 3342 */ 3343 uint16_t 3344 dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf, 3345 uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt) 3346 { 3347 struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle; 3348 struct dp_pdev *pdev = vdev->pdev; 3349 struct ether_header *eh; 3350 uint8_t *data; 3351 uint16_t len; 3352 3353 /* reference to frame dst addr */ 3354 uint8_t *dstmac; 3355 /* copy of original frame src addr */ 3356 uint8_t srcmac[DP_MAC_ADDR_LEN]; 3357 3358 /* local index into newmac */ 3359 uint8_t new_mac_idx = 0; 3360 struct dp_tx_me_buf_t *mc_uc_buf; 3361 qdf_nbuf_t nbuf_clone; 3362 struct dp_tx_msdu_info_s msdu_info; 3363 struct dp_tx_seg_info_s *seg_info_head = NULL; 3364 struct dp_tx_seg_info_s *seg_info_tail = NULL; 3365 struct dp_tx_seg_info_s *seg_info_new; 3366 struct dp_tx_frag_info_s data_frag; 3367 qdf_dma_addr_t paddr_data; 3368 qdf_dma_addr_t paddr_mcbuf = 0; 3369 uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0}; 3370 QDF_STATUS status; 3371 3372 qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0); 3373 3374 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 3375 3376 eh = (struct ether_header *) nbuf; 3377 qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN); 3378 3379 len = qdf_nbuf_len(nbuf); 3380 3381 data = qdf_nbuf_data(nbuf); 3382 3383 status = qdf_nbuf_map(vdev->osdev, nbuf, 3384 QDF_DMA_TO_DEVICE); 3385 3386 if (status) { 3387 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3388 "Mapping failure Error:%d", status); 3389 DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1); 3390 qdf_nbuf_free(nbuf); 3391 return 1; 3392 } 3393 3394 paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN; 3395 3396 /*preparing data fragment*/ 3397 data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN; 3398 data_frag.paddr_lo = (uint32_t)paddr_data; 3399 data_frag.paddr_hi = (((uint64_t) paddr_data) >> 32); 3400 data_frag.len = len - DP_MAC_ADDR_LEN; 3401 3402 for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) { 3403 dstmac = newmac[new_mac_idx]; 3404 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3405 "added mac addr (%pM)", dstmac); 3406 3407 /* Check for NULL Mac Address */ 3408 if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN)) 3409 continue; 3410 3411 /* frame to self mac. skip */ 3412 if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN)) 3413 continue; 3414 3415 /* 3416 * TODO: optimize to avoid malloc in per-packet path 3417 * For eg. seg_pool can be made part of vdev structure 3418 */ 3419 seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new)); 3420 3421 if (!seg_info_new) { 3422 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3423 "alloc failed"); 3424 DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1); 3425 goto fail_seg_alloc; 3426 } 3427 3428 mc_uc_buf = dp_tx_me_alloc_buf(pdev); 3429 if (mc_uc_buf == NULL) 3430 goto fail_buf_alloc; 3431 3432 /* 3433 * TODO: Check if we need to clone the nbuf 3434 * Or can we just use the reference for all cases 3435 */ 3436 if (new_mac_idx < (new_mac_cnt - 1)) { 3437 nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf); 3438 if (nbuf_clone == NULL) { 3439 DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1); 3440 goto fail_clone; 3441 } 3442 } else { 3443 /* 3444 * Update the ref 3445 * to account for frame sent without cloning 3446 */ 3447 qdf_nbuf_ref(nbuf); 3448 nbuf_clone = nbuf; 3449 } 3450 3451 qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN); 3452 3453 status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data, 3454 QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN, 3455 &paddr_mcbuf); 3456 3457 if (status) { 3458 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3459 "Mapping failure Error:%d", status); 3460 DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1); 3461 goto fail_map; 3462 } 3463 3464 seg_info_new->frags[0].vaddr = (uint8_t *)mc_uc_buf; 3465 seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf; 3466 seg_info_new->frags[0].paddr_hi = 3467 ((uint64_t) paddr_mcbuf >> 32); 3468 seg_info_new->frags[0].len = DP_MAC_ADDR_LEN; 3469 3470 seg_info_new->frags[1] = data_frag; 3471 seg_info_new->nbuf = nbuf_clone; 3472 seg_info_new->frag_cnt = 2; 3473 seg_info_new->total_len = len; 3474 3475 seg_info_new->next = NULL; 3476 3477 if (seg_info_head == NULL) 3478 seg_info_head = seg_info_new; 3479 else 3480 seg_info_tail->next = seg_info_new; 3481 3482 seg_info_tail = seg_info_new; 3483 } 3484 3485 if (!seg_info_head) { 3486 goto free_return; 3487 } 3488 3489 msdu_info.u.sg_info.curr_seg = seg_info_head; 3490 msdu_info.num_seg = new_mac_cnt; 3491 msdu_info.frm_type = dp_tx_frm_me; 3492 3493 DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt); 3494 dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info); 3495 3496 while (seg_info_head->next) { 3497 seg_info_new = seg_info_head; 3498 seg_info_head = seg_info_head->next; 3499 qdf_mem_free(seg_info_new); 3500 } 3501 qdf_mem_free(seg_info_head); 3502 3503 qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE); 3504 qdf_nbuf_free(nbuf); 3505 return new_mac_cnt; 3506 3507 fail_map: 3508 qdf_nbuf_free(nbuf_clone); 3509 3510 fail_clone: 3511 dp_tx_me_free_buf(pdev, mc_uc_buf); 3512 3513 fail_buf_alloc: 3514 qdf_mem_free(seg_info_new); 3515 3516 fail_seg_alloc: 3517 dp_tx_me_mem_free(pdev, seg_info_head); 3518 3519 free_return: 3520 qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE); 3521 qdf_nbuf_free(nbuf); 3522 return 1; 3523 } 3524 3525