1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "htt.h" 20 #include "dp_tx.h" 21 #include "dp_tx_desc.h" 22 #include "dp_peer.h" 23 #include "dp_types.h" 24 #include "hal_tx.h" 25 #include "qdf_mem.h" 26 #include "qdf_nbuf.h" 27 #include "qdf_net_types.h" 28 #include <wlan_cfg.h> 29 #ifdef MESH_MODE_SUPPORT 30 #include "if_meta_hdr.h" 31 #endif 32 33 #ifdef TX_PER_PDEV_DESC_POOL 34 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 35 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id) 36 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */ 37 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id) 38 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ 39 #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id) 40 #else 41 #ifdef TX_PER_VDEV_DESC_POOL 42 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id) 43 #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id) 44 #else 45 #define DP_TX_GET_DESC_POOL_ID(vdev) qdf_get_cpu() 46 #define DP_TX_GET_RING_ID(vdev) vdev->pdev->soc->tx_ring_map[qdf_get_cpu()] 47 #endif /* TX_PER_VDEV_DESC_POOL */ 48 #endif /* TX_PER_PDEV_DESC_POOL */ 49 50 /* TODO Add support in TSO */ 51 #define DP_DESC_NUM_FRAG(x) 0 52 53 /* disable TQM_BYPASS */ 54 #define TQM_BYPASS_WAR 0 55 56 /* invalid peer id for reinject*/ 57 #define DP_INVALID_PEER 0XFFFE 58 59 /*mapping between hal encrypt type and cdp_sec_type*/ 60 #define MAX_CDP_SEC_TYPE 12 61 static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = { 62 HAL_TX_ENCRYPT_TYPE_NO_CIPHER, 63 HAL_TX_ENCRYPT_TYPE_WEP_128, 64 HAL_TX_ENCRYPT_TYPE_WEP_104, 65 HAL_TX_ENCRYPT_TYPE_WEP_40, 66 HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC, 67 HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC, 68 HAL_TX_ENCRYPT_TYPE_AES_CCMP_128, 69 HAL_TX_ENCRYPT_TYPE_WAPI, 70 HAL_TX_ENCRYPT_TYPE_AES_CCMP_256, 71 HAL_TX_ENCRYPT_TYPE_AES_GCMP_128, 72 HAL_TX_ENCRYPT_TYPE_AES_GCMP_256, 73 HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4}; 74 75 /** 76 * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame 77 * @vdev: DP Virtual device handle 78 * @nbuf: Buffer pointer 79 * @queue: queue ids container for nbuf 80 * 81 * TX packet queue has 2 instances, software descriptors id and dma ring id 82 * Based on tx feature and hardware configuration queue id combination could be 83 * different. 84 * For example - 85 * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id 86 * With no XPS,lock based resource protection, Descriptor pool ids are different 87 * for each vdev, dma ring id will be same as single pdev id 88 * 89 * Return: None 90 */ 91 static inline void dp_tx_get_queue(struct dp_vdev *vdev, 92 qdf_nbuf_t nbuf, struct dp_tx_queue *queue) 93 { 94 /* get flow id */ 95 queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev); 96 queue->ring_id = DP_TX_GET_RING_ID(vdev); 97 98 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 99 "%s, pool_id:%d ring_id: %d", 100 __func__, queue->desc_pool_id, queue->ring_id); 101 102 return; 103 } 104 105 #if defined(FEATURE_TSO) 106 /** 107 * dp_tx_tso_desc_release() - Release the tso segment 108 * after unmapping all the fragments 109 * 110 * @pdev - physical device handle 111 * @tx_desc - Tx software descriptor 112 */ 113 static void dp_tx_tso_desc_release(struct dp_soc *soc, 114 struct dp_tx_desc_s *tx_desc) 115 { 116 TSO_DEBUG("%s: Free the tso descriptor", __func__); 117 if (qdf_unlikely(tx_desc->tso_desc == NULL)) { 118 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 119 "%s %d TSO desc is NULL!", 120 __func__, __LINE__); 121 qdf_assert(0); 122 } else if (qdf_unlikely(tx_desc->tso_num_desc == NULL)) { 123 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 124 "%s %d TSO common info is NULL!", 125 __func__, __LINE__); 126 qdf_assert(0); 127 } else { 128 struct qdf_tso_num_seg_elem_t *tso_num_desc = 129 (struct qdf_tso_num_seg_elem_t *) tx_desc->tso_num_desc; 130 131 if (tso_num_desc->num_seg.tso_cmn_num_seg > 1) { 132 tso_num_desc->num_seg.tso_cmn_num_seg--; 133 qdf_nbuf_unmap_tso_segment(soc->osdev, 134 tx_desc->tso_desc, false); 135 } else { 136 tso_num_desc->num_seg.tso_cmn_num_seg--; 137 qdf_assert(tso_num_desc->num_seg.tso_cmn_num_seg == 0); 138 qdf_nbuf_unmap_tso_segment(soc->osdev, 139 tx_desc->tso_desc, true); 140 dp_tso_num_seg_free(soc, tx_desc->pool_id, 141 tx_desc->tso_num_desc); 142 tx_desc->tso_num_desc = NULL; 143 } 144 dp_tx_tso_desc_free(soc, 145 tx_desc->pool_id, tx_desc->tso_desc); 146 tx_desc->tso_desc = NULL; 147 } 148 } 149 #else 150 static void dp_tx_tso_desc_release(struct dp_soc *soc, 151 struct dp_tx_desc_s *tx_desc) 152 { 153 return; 154 } 155 #endif 156 /** 157 * dp_tx_desc_release() - Release Tx Descriptor 158 * @tx_desc : Tx Descriptor 159 * @desc_pool_id: Descriptor Pool ID 160 * 161 * Deallocate all resources attached to Tx descriptor and free the Tx 162 * descriptor. 163 * 164 * Return: 165 */ 166 static void 167 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id) 168 { 169 struct dp_pdev *pdev = tx_desc->pdev; 170 struct dp_soc *soc; 171 uint8_t comp_status = 0; 172 173 qdf_assert(pdev); 174 175 soc = pdev->soc; 176 177 if (tx_desc->frm_type == dp_tx_frm_tso) 178 dp_tx_tso_desc_release(soc, tx_desc); 179 180 if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) 181 dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id); 182 183 if (tx_desc->flags & DP_TX_DESC_FLAG_ME) 184 dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer); 185 186 qdf_atomic_dec(&pdev->num_tx_outstanding); 187 188 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) 189 qdf_atomic_dec(&pdev->num_tx_exception); 190 191 if (HAL_TX_COMP_RELEASE_SOURCE_TQM == 192 hal_tx_comp_get_buffer_source(&tx_desc->comp)) 193 comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp); 194 else 195 comp_status = HAL_TX_COMP_RELEASE_REASON_FW; 196 197 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 198 "Tx Completion Release desc %d status %d outstanding %d", 199 tx_desc->id, comp_status, 200 qdf_atomic_read(&pdev->num_tx_outstanding)); 201 202 dp_tx_desc_free(soc, tx_desc, desc_pool_id); 203 return; 204 } 205 206 /** 207 * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames 208 * @vdev: DP vdev Handle 209 * @nbuf: skb 210 * 211 * Prepares and fills HTT metadata in the frame pre-header for special frames 212 * that should be transmitted using varying transmit parameters. 213 * There are 2 VDEV modes that currently needs this special metadata - 214 * 1) Mesh Mode 215 * 2) DSRC Mode 216 * 217 * Return: HTT metadata size 218 * 219 */ 220 static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 221 uint32_t *meta_data) 222 { 223 struct htt_tx_msdu_desc_ext2_t *desc_ext = 224 (struct htt_tx_msdu_desc_ext2_t *) meta_data; 225 226 uint8_t htt_desc_size; 227 228 /* Size rounded of multiple of 8 bytes */ 229 uint8_t htt_desc_size_aligned; 230 231 uint8_t *hdr = NULL; 232 233 HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 1); 234 235 /* 236 * Metadata - HTT MSDU Extension header 237 */ 238 htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t); 239 htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7; 240 241 if (vdev->mesh_vdev) { 242 243 /* Fill and add HTT metaheader */ 244 hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned); 245 if (hdr == NULL) { 246 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 247 "Error in filling HTT metadata\n"); 248 249 return 0; 250 } 251 qdf_mem_copy(hdr, desc_ext, htt_desc_size); 252 253 } else if (vdev->opmode == wlan_op_mode_ocb) { 254 /* Todo - Add support for DSRC */ 255 } 256 257 return htt_desc_size_aligned; 258 } 259 260 /** 261 * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO 262 * @tso_seg: TSO segment to process 263 * @ext_desc: Pointer to MSDU extension descriptor 264 * 265 * Return: void 266 */ 267 #if defined(FEATURE_TSO) 268 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg, 269 void *ext_desc) 270 { 271 uint8_t num_frag; 272 uint32_t tso_flags; 273 274 /* 275 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN), 276 * tcp_flag_mask 277 * 278 * Checksum enable flags are set in TCL descriptor and not in Extension 279 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor) 280 */ 281 tso_flags = *(uint32_t *) &tso_seg->tso_flags; 282 283 hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags); 284 285 hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len, 286 tso_seg->tso_flags.ip_len); 287 288 hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num); 289 hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id); 290 291 292 for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) { 293 uint32_t lo = 0; 294 uint32_t hi = 0; 295 296 qdf_dmaaddr_to_32s( 297 tso_seg->tso_frags[num_frag].paddr, &lo, &hi); 298 hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi, 299 tso_seg->tso_frags[num_frag].length); 300 } 301 302 return; 303 } 304 #else 305 static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg, 306 void *ext_desc) 307 { 308 return; 309 } 310 #endif 311 312 #if defined(FEATURE_TSO) 313 /** 314 * dp_tx_free_tso_seg() - Loop through the tso segments 315 * allocated and free them 316 * 317 * @soc: soc handle 318 * @free_seg: list of tso segments 319 * @msdu_info: msdu descriptor 320 * 321 * Return - void 322 */ 323 static void dp_tx_free_tso_seg(struct dp_soc *soc, 324 struct qdf_tso_seg_elem_t *free_seg, 325 struct dp_tx_msdu_info_s *msdu_info) 326 { 327 struct qdf_tso_seg_elem_t *next_seg; 328 329 while (free_seg) { 330 next_seg = free_seg->next; 331 dp_tx_tso_desc_free(soc, 332 msdu_info->tx_queue.desc_pool_id, 333 free_seg); 334 free_seg = next_seg; 335 } 336 } 337 338 /** 339 * dp_tx_free_tso_num_seg() - Loop through the tso num segments 340 * allocated and free them 341 * 342 * @soc: soc handle 343 * @free_seg: list of tso segments 344 * @msdu_info: msdu descriptor 345 * Return - void 346 */ 347 static void dp_tx_free_tso_num_seg(struct dp_soc *soc, 348 struct qdf_tso_num_seg_elem_t *free_seg, 349 struct dp_tx_msdu_info_s *msdu_info) 350 { 351 struct qdf_tso_num_seg_elem_t *next_seg; 352 353 while (free_seg) { 354 next_seg = free_seg->next; 355 dp_tso_num_seg_free(soc, 356 msdu_info->tx_queue.desc_pool_id, 357 free_seg); 358 free_seg = next_seg; 359 } 360 } 361 362 /** 363 * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info 364 * @vdev: virtual device handle 365 * @msdu: network buffer 366 * @msdu_info: meta data associated with the msdu 367 * 368 * Return: QDF_STATUS_SUCCESS success 369 */ 370 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev, 371 qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info) 372 { 373 struct qdf_tso_seg_elem_t *tso_seg; 374 int num_seg = qdf_nbuf_get_tso_num_seg(msdu); 375 struct dp_soc *soc = vdev->pdev->soc; 376 struct qdf_tso_info_t *tso_info; 377 struct qdf_tso_num_seg_elem_t *tso_num_seg; 378 379 tso_info = &msdu_info->u.tso_info; 380 tso_info->curr_seg = NULL; 381 tso_info->tso_seg_list = NULL; 382 tso_info->num_segs = num_seg; 383 msdu_info->frm_type = dp_tx_frm_tso; 384 tso_info->tso_num_seg_list = NULL; 385 386 TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg); 387 388 while (num_seg) { 389 tso_seg = dp_tx_tso_desc_alloc( 390 soc, msdu_info->tx_queue.desc_pool_id); 391 if (tso_seg) { 392 tso_seg->next = tso_info->tso_seg_list; 393 tso_info->tso_seg_list = tso_seg; 394 num_seg--; 395 } else { 396 struct qdf_tso_seg_elem_t *free_seg = 397 tso_info->tso_seg_list; 398 399 dp_tx_free_tso_seg(soc, free_seg, msdu_info); 400 401 return QDF_STATUS_E_NOMEM; 402 } 403 } 404 405 TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg); 406 407 tso_num_seg = dp_tso_num_seg_alloc(soc, 408 msdu_info->tx_queue.desc_pool_id); 409 410 if (tso_num_seg) { 411 tso_num_seg->next = tso_info->tso_num_seg_list; 412 tso_info->tso_num_seg_list = tso_num_seg; 413 } else { 414 /* Bug: free tso_num_seg and tso_seg */ 415 /* Free the already allocated num of segments */ 416 struct qdf_tso_seg_elem_t *free_seg = 417 tso_info->tso_seg_list; 418 419 TSO_DEBUG(" %s: Failed alloc - Number of segs for a TSO packet", 420 __func__); 421 dp_tx_free_tso_seg(soc, free_seg, msdu_info); 422 423 return QDF_STATUS_E_NOMEM; 424 } 425 426 msdu_info->num_seg = 427 qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info); 428 429 TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__, 430 msdu_info->num_seg); 431 432 if (!(msdu_info->num_seg)) { 433 dp_tx_free_tso_seg(soc, tso_info->tso_seg_list, msdu_info); 434 dp_tx_free_tso_num_seg(soc, tso_info->tso_num_seg_list, 435 msdu_info); 436 return QDF_STATUS_E_INVAL; 437 } 438 439 tso_info->curr_seg = tso_info->tso_seg_list; 440 441 return QDF_STATUS_SUCCESS; 442 } 443 #else 444 static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev, 445 qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info) 446 { 447 return QDF_STATUS_E_NOMEM; 448 } 449 #endif 450 451 /** 452 * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor 453 * @vdev: DP Vdev handle 454 * @msdu_info: MSDU info to be setup in MSDU extension descriptor 455 * @desc_pool_id: Descriptor Pool ID 456 * 457 * Return: 458 */ 459 static 460 struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev, 461 struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id) 462 { 463 uint8_t i; 464 uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA]; 465 struct dp_tx_seg_info_s *seg_info; 466 struct dp_tx_ext_desc_elem_s *msdu_ext_desc; 467 struct dp_soc *soc = vdev->pdev->soc; 468 469 /* Allocate an extension descriptor */ 470 msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id); 471 qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA); 472 473 if (!msdu_ext_desc) { 474 DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1); 475 return NULL; 476 } 477 478 if (msdu_info->exception_fw && 479 qdf_unlikely(vdev->mesh_vdev)) { 480 qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES], 481 &msdu_info->meta_data[0], 482 sizeof(struct htt_tx_msdu_desc_ext2_t)); 483 qdf_atomic_inc(&vdev->pdev->num_tx_exception); 484 HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 1); 485 } 486 487 switch (msdu_info->frm_type) { 488 case dp_tx_frm_sg: 489 case dp_tx_frm_me: 490 case dp_tx_frm_raw: 491 seg_info = msdu_info->u.sg_info.curr_seg; 492 /* Update the buffer pointers in MSDU Extension Descriptor */ 493 for (i = 0; i < seg_info->frag_cnt; i++) { 494 hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i, 495 seg_info->frags[i].paddr_lo, 496 seg_info->frags[i].paddr_hi, 497 seg_info->frags[i].len); 498 } 499 500 break; 501 502 case dp_tx_frm_tso: 503 dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg, 504 &cached_ext_desc[0]); 505 break; 506 507 508 default: 509 break; 510 } 511 512 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 513 cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA); 514 515 hal_tx_ext_desc_sync(&cached_ext_desc[0], 516 msdu_ext_desc->vaddr); 517 518 return msdu_ext_desc; 519 } 520 521 /** 522 * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor 523 * @vdev: DP vdev handle 524 * @nbuf: skb 525 * @desc_pool_id: Descriptor pool ID 526 * @meta_data: Metadata to the fw 527 * @tx_exc_metadata: Handle that holds exception path metadata 528 * Allocate and prepare Tx descriptor with msdu information. 529 * 530 * Return: Pointer to Tx Descriptor on success, 531 * NULL on failure 532 */ 533 static 534 struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev, 535 qdf_nbuf_t nbuf, uint8_t desc_pool_id, 536 struct dp_tx_msdu_info_s *msdu_info, 537 struct cdp_tx_exception_metadata *tx_exc_metadata) 538 { 539 uint8_t align_pad; 540 uint8_t is_exception = 0; 541 uint8_t htt_hdr_size; 542 struct ether_header *eh; 543 struct dp_tx_desc_s *tx_desc; 544 struct dp_pdev *pdev = vdev->pdev; 545 struct dp_soc *soc = pdev->soc; 546 547 /* Allocate software Tx descriptor */ 548 tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); 549 if (qdf_unlikely(!tx_desc)) { 550 DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1); 551 return NULL; 552 } 553 554 /* Flow control/Congestion Control counters */ 555 qdf_atomic_inc(&pdev->num_tx_outstanding); 556 557 /* Initialize the SW tx descriptor */ 558 tx_desc->nbuf = nbuf; 559 tx_desc->frm_type = dp_tx_frm_std; 560 tx_desc->tx_encap_type = (tx_exc_metadata ? 561 tx_exc_metadata->tx_encap_type : vdev->tx_encap_type); 562 tx_desc->vdev = vdev; 563 tx_desc->pdev = pdev; 564 tx_desc->msdu_ext_desc = NULL; 565 tx_desc->pkt_offset = 0; 566 567 /* 568 * For special modes (vdev_type == ocb or mesh), data frames should be 569 * transmitted using varying transmit parameters (tx spec) which include 570 * transmit rate, power, priority, channel, channel bandwidth , nss etc. 571 * These are filled in HTT MSDU descriptor and sent in frame pre-header. 572 * These frames are sent as exception packets to firmware. 573 * 574 * HW requirement is that metadata should always point to a 575 * 8-byte aligned address. So we add alignment pad to start of buffer. 576 * HTT Metadata should be ensured to be multiple of 8-bytes, 577 * to get 8-byte aligned start address along with align_pad added 578 * 579 * |-----------------------------| 580 * | | 581 * |-----------------------------| <-----Buffer Pointer Address given 582 * | | ^ in HW descriptor (aligned) 583 * | HTT Metadata | | 584 * | | | 585 * | | | Packet Offset given in descriptor 586 * | | | 587 * |-----------------------------| | 588 * | Alignment Pad | v 589 * |-----------------------------| <----- Actual buffer start address 590 * | SKB Data | (Unaligned) 591 * | | 592 * | | 593 * | | 594 * | | 595 * | | 596 * |-----------------------------| 597 */ 598 if (qdf_unlikely((msdu_info->exception_fw)) || 599 (vdev->opmode == wlan_op_mode_ocb)) { 600 align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7; 601 if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) { 602 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 603 "qdf_nbuf_push_head failed\n"); 604 goto failure; 605 } 606 607 htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf, 608 msdu_info->meta_data); 609 if (htt_hdr_size == 0) 610 goto failure; 611 tx_desc->pkt_offset = align_pad + htt_hdr_size; 612 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 613 is_exception = 1; 614 } 615 616 if (qdf_unlikely(QDF_STATUS_SUCCESS != 617 qdf_nbuf_map(soc->osdev, nbuf, 618 QDF_DMA_TO_DEVICE))) { 619 /* Handle failure */ 620 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 621 "qdf_nbuf_map failed\n"); 622 DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1); 623 goto failure; 624 } 625 626 if (qdf_unlikely(vdev->nawds_enabled)) { 627 eh = (struct ether_header *) qdf_nbuf_data(nbuf); 628 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) { 629 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 630 is_exception = 1; 631 } 632 } 633 634 #if !TQM_BYPASS_WAR 635 if (is_exception || tx_exc_metadata) 636 #endif 637 { 638 /* Temporary WAR due to TQM VP issues */ 639 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 640 qdf_atomic_inc(&pdev->num_tx_exception); 641 } 642 643 return tx_desc; 644 645 failure: 646 dp_tx_desc_release(tx_desc, desc_pool_id); 647 return NULL; 648 } 649 650 /** 651 * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame 652 * @vdev: DP vdev handle 653 * @nbuf: skb 654 * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor 655 * @desc_pool_id : Descriptor Pool ID 656 * 657 * Allocate and prepare Tx descriptor with msdu and fragment descritor 658 * information. For frames wth fragments, allocate and prepare 659 * an MSDU extension descriptor 660 * 661 * Return: Pointer to Tx Descriptor on success, 662 * NULL on failure 663 */ 664 static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev, 665 qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info, 666 uint8_t desc_pool_id) 667 { 668 struct dp_tx_desc_s *tx_desc; 669 struct dp_tx_ext_desc_elem_s *msdu_ext_desc; 670 struct dp_pdev *pdev = vdev->pdev; 671 struct dp_soc *soc = pdev->soc; 672 673 /* Allocate software Tx descriptor */ 674 tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); 675 if (!tx_desc) { 676 DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1); 677 return NULL; 678 } 679 680 /* Flow control/Congestion Control counters */ 681 qdf_atomic_inc(&pdev->num_tx_outstanding); 682 683 /* Initialize the SW tx descriptor */ 684 tx_desc->nbuf = nbuf; 685 tx_desc->frm_type = msdu_info->frm_type; 686 tx_desc->tx_encap_type = vdev->tx_encap_type; 687 tx_desc->vdev = vdev; 688 tx_desc->pdev = pdev; 689 tx_desc->pkt_offset = 0; 690 tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg; 691 tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list; 692 693 /* Handle scattered frames - TSO/SG/ME */ 694 /* Allocate and prepare an extension descriptor for scattered frames */ 695 msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id); 696 if (!msdu_ext_desc) { 697 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 698 "%s Tx Extension Descriptor Alloc Fail\n", 699 __func__); 700 goto failure; 701 } 702 703 #if TQM_BYPASS_WAR 704 /* Temporary WAR due to TQM VP issues */ 705 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 706 qdf_atomic_inc(&pdev->num_tx_exception); 707 #endif 708 if (qdf_unlikely(msdu_info->exception_fw)) 709 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 710 711 tx_desc->msdu_ext_desc = msdu_ext_desc; 712 tx_desc->flags |= DP_TX_DESC_FLAG_FRAG; 713 714 return tx_desc; 715 failure: 716 dp_tx_desc_release(tx_desc, desc_pool_id); 717 return NULL; 718 } 719 720 /** 721 * dp_tx_prepare_raw() - Prepare RAW packet TX 722 * @vdev: DP vdev handle 723 * @nbuf: buffer pointer 724 * @seg_info: Pointer to Segment info Descriptor to be prepared 725 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension 726 * descriptor 727 * 728 * Return: 729 */ 730 static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 731 struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info) 732 { 733 qdf_nbuf_t curr_nbuf = NULL; 734 uint16_t total_len = 0; 735 qdf_dma_addr_t paddr; 736 int32_t i; 737 int32_t mapped_buf_num = 0; 738 739 struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info; 740 qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data; 741 742 DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf)); 743 744 /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */ 745 if (qos_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) 746 qos_wh->i_fc[1] |= IEEE80211_FC1_WEP; 747 748 for (curr_nbuf = nbuf, i = 0; curr_nbuf; 749 curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) { 750 751 if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf, 752 QDF_DMA_TO_DEVICE)) { 753 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 754 "%s dma map error \n", __func__); 755 DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1); 756 mapped_buf_num = i; 757 goto error; 758 } 759 760 paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0); 761 seg_info->frags[i].paddr_lo = paddr; 762 seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32); 763 seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf); 764 seg_info->frags[i].vaddr = (void *) curr_nbuf; 765 total_len += qdf_nbuf_len(curr_nbuf); 766 } 767 768 seg_info->frag_cnt = i; 769 seg_info->total_len = total_len; 770 seg_info->next = NULL; 771 772 sg_info->curr_seg = seg_info; 773 774 msdu_info->frm_type = dp_tx_frm_raw; 775 msdu_info->num_seg = 1; 776 777 return nbuf; 778 779 error: 780 i = 0; 781 while (nbuf) { 782 curr_nbuf = nbuf; 783 if (i < mapped_buf_num) { 784 qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE); 785 i++; 786 } 787 nbuf = qdf_nbuf_next(nbuf); 788 qdf_nbuf_free(curr_nbuf); 789 } 790 return NULL; 791 792 } 793 794 /** 795 * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit 796 * @soc: DP Soc Handle 797 * @vdev: DP vdev handle 798 * @tx_desc: Tx Descriptor Handle 799 * @tid: TID from HLOS for overriding default DSCP-TID mapping 800 * @fw_metadata: Metadata to send to Target Firmware along with frame 801 * @ring_id: Ring ID of H/W ring to which we enqueue the packet 802 * @tx_exc_metadata: Handle that holds exception path meta data 803 * 804 * Gets the next free TCL HW DMA descriptor and sets up required parameters 805 * from software Tx descriptor 806 * 807 * Return: 808 */ 809 static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev, 810 struct dp_tx_desc_s *tx_desc, uint8_t tid, 811 uint16_t fw_metadata, uint8_t ring_id, 812 struct cdp_tx_exception_metadata 813 *tx_exc_metadata) 814 { 815 uint8_t type; 816 uint16_t length; 817 void *hal_tx_desc, *hal_tx_desc_cached; 818 qdf_dma_addr_t dma_addr; 819 uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES]; 820 821 enum cdp_sec_type sec_type = (tx_exc_metadata ? 822 tx_exc_metadata->sec_type : vdev->sec_type); 823 824 /* Return Buffer Manager ID */ 825 uint8_t bm_id = ring_id; 826 void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng; 827 828 hal_tx_desc_cached = (void *) cached_desc; 829 qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES); 830 831 if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) { 832 length = HAL_TX_EXT_DESC_WITH_META_DATA; 833 type = HAL_TX_BUF_TYPE_EXT_DESC; 834 dma_addr = tx_desc->msdu_ext_desc->paddr; 835 } else { 836 length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset; 837 type = HAL_TX_BUF_TYPE_BUFFER; 838 dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf); 839 } 840 841 hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata); 842 hal_tx_desc_set_buf_addr(hal_tx_desc_cached, 843 dma_addr , bm_id, tx_desc->id, type); 844 hal_tx_desc_set_buf_length(hal_tx_desc_cached, length); 845 hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset); 846 hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type); 847 hal_tx_desc_set_lmac_id(hal_tx_desc_cached, 848 HAL_TX_DESC_DEFAULT_LMAC_ID); 849 hal_tx_desc_set_dscp_tid_table_id(hal_tx_desc_cached, 850 vdev->dscp_tid_map_id); 851 hal_tx_desc_set_encrypt_type(hal_tx_desc_cached, 852 sec_type_map[sec_type]); 853 854 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 855 "%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u", 856 __func__, length, type, (uint64_t)dma_addr, 857 tx_desc->pkt_offset, tx_desc->id); 858 859 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) 860 hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1); 861 862 hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached, 863 vdev->hal_desc_addr_search_flags); 864 865 /* verify checksum offload configuration*/ 866 if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) && 867 ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP) 868 || qdf_nbuf_is_tso(tx_desc->nbuf))) { 869 hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1); 870 hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1); 871 } 872 873 if (tid != HTT_TX_EXT_TID_INVALID) 874 hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid); 875 876 if (tx_desc->flags & DP_TX_DESC_FLAG_MESH) 877 hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1); 878 879 880 /* Sync cached descriptor with HW */ 881 hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng); 882 883 if (!hal_tx_desc) { 884 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 885 "%s TCL ring full ring_id:%d\n", __func__, ring_id); 886 DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1); 887 DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1); 888 return QDF_STATUS_E_RESOURCES; 889 } 890 891 tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX; 892 893 hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc); 894 DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length); 895 896 /* 897 * If one packet is enqueued in HW, PM usage count needs to be 898 * incremented by one to prevent future runtime suspend. This 899 * should be tied with the success of enqueuing. It will be 900 * decremented after the packet has been sent. 901 */ 902 hif_pm_runtime_get_noresume(soc->hif_handle); 903 904 return QDF_STATUS_SUCCESS; 905 } 906 907 908 /** 909 * dp_cce_classify() - Classify the frame based on CCE rules 910 * @vdev: DP vdev handle 911 * @nbuf: skb 912 * 913 * Classify frames based on CCE rules 914 * Return: bool( true if classified, 915 * else false) 916 */ 917 static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 918 { 919 struct ether_header *eh = NULL; 920 uint16_t ether_type; 921 qdf_llc_t *llcHdr; 922 qdf_nbuf_t nbuf_clone = NULL; 923 qdf_dot3_qosframe_t *qos_wh = NULL; 924 925 /* for mesh packets don't do any classification */ 926 if (qdf_unlikely(vdev->mesh_vdev)) 927 return false; 928 929 if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 930 eh = (struct ether_header *) qdf_nbuf_data(nbuf); 931 ether_type = eh->ether_type; 932 llcHdr = (qdf_llc_t *)(nbuf->data + 933 sizeof(struct ether_header)); 934 } else { 935 qos_wh = (qdf_dot3_qosframe_t *) nbuf->data; 936 937 if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) { 938 if (qdf_unlikely( 939 qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS && 940 qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) { 941 942 ether_type = *(uint16_t *)(nbuf->data 943 + QDF_IEEE80211_4ADDR_HDR_LEN 944 + sizeof(qdf_llc_t) 945 - sizeof(ether_type)); 946 llcHdr = (qdf_llc_t *)(nbuf->data + 947 QDF_IEEE80211_4ADDR_HDR_LEN); 948 } else { 949 ether_type = *(uint16_t *)(nbuf->data 950 + QDF_IEEE80211_3ADDR_HDR_LEN 951 + sizeof(qdf_llc_t) 952 - sizeof(ether_type)); 953 llcHdr = (qdf_llc_t *)(nbuf->data + 954 QDF_IEEE80211_3ADDR_HDR_LEN); 955 } 956 957 if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr) 958 && (ether_type == 959 qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) { 960 961 DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1); 962 return true; 963 } 964 } 965 966 return false; 967 } 968 969 if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) { 970 ether_type = *(uint16_t *)(nbuf->data + 2*ETHER_ADDR_LEN + 971 sizeof(*llcHdr)); 972 nbuf_clone = qdf_nbuf_clone(nbuf); 973 qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr)); 974 975 if (ether_type == htons(ETHERTYPE_8021Q)) { 976 qdf_nbuf_pull_head(nbuf_clone, 977 sizeof(qdf_net_vlanhdr_t)); 978 } 979 } else { 980 if (ether_type == htons(ETHERTYPE_8021Q)) { 981 nbuf_clone = qdf_nbuf_clone(nbuf); 982 qdf_nbuf_pull_head(nbuf_clone, 983 sizeof(qdf_net_vlanhdr_t)); 984 } 985 } 986 987 if (qdf_unlikely(nbuf_clone)) 988 nbuf = nbuf_clone; 989 990 991 if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) 992 || qdf_nbuf_is_ipv4_arp_pkt(nbuf) 993 || qdf_nbuf_is_ipv4_wapi_pkt(nbuf) 994 || qdf_nbuf_is_ipv4_tdls_pkt(nbuf) 995 || (qdf_nbuf_is_ipv4_pkt(nbuf) 996 && qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) 997 || (qdf_nbuf_is_ipv6_pkt(nbuf) && 998 qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) { 999 if (qdf_unlikely(nbuf_clone != NULL)) 1000 qdf_nbuf_free(nbuf_clone); 1001 return true; 1002 } 1003 1004 if (qdf_unlikely(nbuf_clone != NULL)) 1005 qdf_nbuf_free(nbuf_clone); 1006 1007 return false; 1008 } 1009 1010 /** 1011 * dp_tx_classify_tid() - Obtain TID to be used for this frame 1012 * @vdev: DP vdev handle 1013 * @nbuf: skb 1014 * 1015 * Extract the DSCP or PCP information from frame and map into TID value. 1016 * Software based TID classification is required when more than 2 DSCP-TID 1017 * mapping tables are needed. 1018 * Hardware supports 2 DSCP-TID mapping tables 1019 * 1020 * Return: void 1021 */ 1022 static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1023 struct dp_tx_msdu_info_s *msdu_info) 1024 { 1025 uint8_t tos = 0, dscp_tid_override = 0; 1026 uint8_t *hdr_ptr, *L3datap; 1027 uint8_t is_mcast = 0; 1028 struct ether_header *eh = NULL; 1029 qdf_ethervlan_header_t *evh = NULL; 1030 uint16_t ether_type; 1031 qdf_llc_t *llcHdr; 1032 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; 1033 1034 /* for mesh packets don't do any classification */ 1035 if (qdf_unlikely(vdev->mesh_vdev)) 1036 return; 1037 1038 if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 1039 eh = (struct ether_header *) nbuf->data; 1040 hdr_ptr = eh->ether_dhost; 1041 L3datap = hdr_ptr + sizeof(struct ether_header); 1042 } else { 1043 qdf_dot3_qosframe_t *qos_wh = 1044 (qdf_dot3_qosframe_t *) nbuf->data; 1045 msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ? 1046 qos_wh->i_qos[0] & DP_QOS_TID : 0; 1047 return; 1048 } 1049 1050 is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr); 1051 ether_type = eh->ether_type; 1052 1053 llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(struct ether_header)); 1054 /* 1055 * Check if packet is dot3 or eth2 type. 1056 */ 1057 if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) { 1058 ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN + 1059 sizeof(*llcHdr)); 1060 1061 if (ether_type == htons(ETHERTYPE_8021Q)) { 1062 L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) + 1063 sizeof(*llcHdr); 1064 ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN 1065 + sizeof(*llcHdr) + 1066 sizeof(qdf_net_vlanhdr_t)); 1067 } else { 1068 L3datap = hdr_ptr + sizeof(struct ether_header) + 1069 sizeof(*llcHdr); 1070 } 1071 } else { 1072 if (ether_type == htons(ETHERTYPE_8021Q)) { 1073 evh = (qdf_ethervlan_header_t *) eh; 1074 ether_type = evh->ether_type; 1075 L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t); 1076 } 1077 } 1078 1079 /* 1080 * Find priority from IP TOS DSCP field 1081 */ 1082 if (qdf_nbuf_is_ipv4_pkt(nbuf)) { 1083 qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap; 1084 if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) { 1085 /* Only for unicast frames */ 1086 if (!is_mcast) { 1087 /* send it on VO queue */ 1088 msdu_info->tid = DP_VO_TID; 1089 } 1090 } else { 1091 /* 1092 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7 1093 * from TOS byte. 1094 */ 1095 tos = ip->ip_tos; 1096 dscp_tid_override = 1; 1097 1098 } 1099 } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) { 1100 /* TODO 1101 * use flowlabel 1102 *igmpmld cases to be handled in phase 2 1103 */ 1104 unsigned long ver_pri_flowlabel; 1105 unsigned long pri; 1106 ver_pri_flowlabel = *(unsigned long *) L3datap; 1107 pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >> 1108 DP_IPV6_PRIORITY_SHIFT; 1109 tos = pri; 1110 dscp_tid_override = 1; 1111 } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) 1112 msdu_info->tid = DP_VO_TID; 1113 else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) { 1114 /* Only for unicast frames */ 1115 if (!is_mcast) { 1116 /* send ucast arp on VO queue */ 1117 msdu_info->tid = DP_VO_TID; 1118 } 1119 } 1120 1121 /* 1122 * Assign all MCAST packets to BE 1123 */ 1124 if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 1125 if (is_mcast) { 1126 tos = 0; 1127 dscp_tid_override = 1; 1128 } 1129 } 1130 1131 if (dscp_tid_override == 1) { 1132 tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK; 1133 msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos]; 1134 } 1135 return; 1136 } 1137 1138 #ifdef CONVERGED_TDLS_ENABLE 1139 /** 1140 * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame 1141 * @tx_desc: TX descriptor 1142 * 1143 * Return: None 1144 */ 1145 static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc) 1146 { 1147 if (tx_desc->vdev) { 1148 if (tx_desc->vdev->is_tdls_frame) 1149 tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME; 1150 tx_desc->vdev->is_tdls_frame = false; 1151 } 1152 } 1153 1154 /** 1155 * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer 1156 * @tx_desc: TX descriptor 1157 * @vdev: datapath vdev handle 1158 * 1159 * Return: None 1160 */ 1161 static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc, 1162 struct dp_vdev *vdev) 1163 { 1164 struct hal_tx_completion_status ts = {0}; 1165 qdf_nbuf_t nbuf = tx_desc->nbuf; 1166 1167 hal_tx_comp_get_status(&tx_desc->comp, &ts); 1168 if (vdev->tx_non_std_data_callback.func) { 1169 qdf_nbuf_set_next(tx_desc->nbuf, NULL); 1170 vdev->tx_non_std_data_callback.func( 1171 vdev->tx_non_std_data_callback.ctxt, 1172 nbuf, ts.status); 1173 return; 1174 } 1175 } 1176 #endif 1177 1178 /** 1179 * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL 1180 * @vdev: DP vdev handle 1181 * @nbuf: skb 1182 * @tid: TID from HLOS for overriding default DSCP-TID mapping 1183 * @meta_data: Metadata to the fw 1184 * @tx_q: Tx queue to be used for this Tx frame 1185 * @peer_id: peer_id of the peer in case of NAWDS frames 1186 * @tx_exc_metadata: Handle that holds exception path metadata 1187 * 1188 * Return: NULL on success, 1189 * nbuf when it fails to send 1190 */ 1191 static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1192 struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id, 1193 struct cdp_tx_exception_metadata *tx_exc_metadata) 1194 { 1195 struct dp_pdev *pdev = vdev->pdev; 1196 struct dp_soc *soc = pdev->soc; 1197 struct dp_tx_desc_s *tx_desc; 1198 QDF_STATUS status; 1199 struct dp_tx_queue *tx_q = &(msdu_info->tx_queue); 1200 void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng; 1201 uint16_t htt_tcl_metadata = 0; 1202 uint8_t tid = msdu_info->tid; 1203 1204 HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 0); 1205 /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */ 1206 tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id, 1207 msdu_info, tx_exc_metadata); 1208 if (!tx_desc) { 1209 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1210 "%s Tx_desc prepare Fail vdev %pK queue %d\n", 1211 __func__, vdev, tx_q->desc_pool_id); 1212 return nbuf; 1213 } 1214 1215 if (qdf_unlikely(soc->cce_disable)) { 1216 if (dp_cce_classify(vdev, nbuf) == true) { 1217 DP_STATS_INC(vdev, tx_i.cce_classified, 1); 1218 tid = DP_VO_TID; 1219 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 1220 } 1221 } 1222 1223 dp_tx_update_tdls_flags(tx_desc); 1224 1225 if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { 1226 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1227 "%s %d : HAL RING Access Failed -- %pK\n", 1228 __func__, __LINE__, hal_srng); 1229 DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1); 1230 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 1231 goto fail_return; 1232 } 1233 1234 if (qdf_unlikely(peer_id == DP_INVALID_PEER)) { 1235 htt_tcl_metadata = vdev->htt_tcl_metadata; 1236 HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1); 1237 } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) { 1238 HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata, 1239 HTT_TCL_METADATA_TYPE_PEER_BASED); 1240 HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata, 1241 peer_id); 1242 } else 1243 htt_tcl_metadata = vdev->htt_tcl_metadata; 1244 1245 /* Enqueue the Tx MSDU descriptor to HW for transmit */ 1246 status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid, 1247 htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata); 1248 1249 if (status != QDF_STATUS_SUCCESS) { 1250 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1251 "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n", 1252 __func__, tx_desc, tx_q->ring_id); 1253 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 1254 goto fail_return; 1255 } 1256 1257 nbuf = NULL; 1258 1259 fail_return: 1260 if (hif_pm_runtime_get(soc->hif_handle) == 0) { 1261 hal_srng_access_end(soc->hal_soc, hal_srng); 1262 hif_pm_runtime_put(soc->hif_handle); 1263 } else { 1264 hal_srng_access_end_reap(soc->hal_soc, hal_srng); 1265 } 1266 1267 return nbuf; 1268 } 1269 1270 /** 1271 * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs 1272 * @vdev: DP vdev handle 1273 * @nbuf: skb 1274 * @msdu_info: MSDU info to be setup in MSDU extension descriptor 1275 * 1276 * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL 1277 * 1278 * Return: NULL on success, 1279 * nbuf when it fails to send 1280 */ 1281 #if QDF_LOCK_STATS 1282 static noinline 1283 #else 1284 static 1285 #endif 1286 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1287 struct dp_tx_msdu_info_s *msdu_info) 1288 { 1289 uint8_t i; 1290 struct dp_pdev *pdev = vdev->pdev; 1291 struct dp_soc *soc = pdev->soc; 1292 struct dp_tx_desc_s *tx_desc; 1293 bool is_cce_classified = false; 1294 QDF_STATUS status; 1295 1296 struct dp_tx_queue *tx_q = &msdu_info->tx_queue; 1297 void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng; 1298 1299 if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { 1300 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1301 "%s %d : HAL RING Access Failed -- %pK\n", 1302 __func__, __LINE__, hal_srng); 1303 DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1); 1304 return nbuf; 1305 } 1306 1307 if (qdf_unlikely(soc->cce_disable)) { 1308 is_cce_classified = dp_cce_classify(vdev, nbuf); 1309 if (is_cce_classified) { 1310 DP_STATS_INC(vdev, tx_i.cce_classified, 1); 1311 msdu_info->tid = DP_VO_TID; 1312 } 1313 } 1314 1315 if (msdu_info->frm_type == dp_tx_frm_me) 1316 nbuf = msdu_info->u.sg_info.curr_seg->nbuf; 1317 1318 i = 0; 1319 /* Print statement to track i and num_seg */ 1320 /* 1321 * For each segment (maps to 1 MSDU) , prepare software and hardware 1322 * descriptors using information in msdu_info 1323 */ 1324 while (i < msdu_info->num_seg) { 1325 /* 1326 * Setup Tx descriptor for an MSDU, and MSDU extension 1327 * descriptor 1328 */ 1329 tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info, 1330 tx_q->desc_pool_id); 1331 1332 if (!tx_desc) { 1333 if (msdu_info->frm_type == dp_tx_frm_me) { 1334 dp_tx_me_free_buf(pdev, 1335 (void *)(msdu_info->u.sg_info 1336 .curr_seg->frags[0].vaddr)); 1337 } 1338 goto done; 1339 } 1340 1341 if (msdu_info->frm_type == dp_tx_frm_me) { 1342 tx_desc->me_buffer = 1343 msdu_info->u.sg_info.curr_seg->frags[0].vaddr; 1344 tx_desc->flags |= DP_TX_DESC_FLAG_ME; 1345 } 1346 1347 if (is_cce_classified) 1348 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; 1349 1350 /* 1351 * Enqueue the Tx MSDU descriptor to HW for transmit 1352 */ 1353 status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid, 1354 vdev->htt_tcl_metadata, tx_q->ring_id, NULL); 1355 1356 if (status != QDF_STATUS_SUCCESS) { 1357 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1358 "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n", 1359 __func__, tx_desc, tx_q->ring_id); 1360 1361 if (tx_desc->flags & DP_TX_DESC_FLAG_ME) 1362 dp_tx_me_free_buf(pdev, tx_desc->me_buffer); 1363 1364 dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); 1365 goto done; 1366 } 1367 1368 /* 1369 * TODO 1370 * if tso_info structure can be modified to have curr_seg 1371 * as first element, following 2 blocks of code (for TSO and SG) 1372 * can be combined into 1 1373 */ 1374 1375 /* 1376 * For frames with multiple segments (TSO, ME), jump to next 1377 * segment. 1378 */ 1379 if (msdu_info->frm_type == dp_tx_frm_tso) { 1380 if (msdu_info->u.tso_info.curr_seg->next) { 1381 msdu_info->u.tso_info.curr_seg = 1382 msdu_info->u.tso_info.curr_seg->next; 1383 1384 /* 1385 * If this is a jumbo nbuf, then increment the number of 1386 * nbuf users for each additional segment of the msdu. 1387 * This will ensure that the skb is freed only after 1388 * receiving tx completion for all segments of an nbuf 1389 */ 1390 qdf_nbuf_inc_users(nbuf); 1391 1392 /* Check with MCL if this is needed */ 1393 /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */ 1394 } 1395 } 1396 1397 /* 1398 * For Multicast-Unicast converted packets, 1399 * each converted frame (for a client) is represented as 1400 * 1 segment 1401 */ 1402 if ((msdu_info->frm_type == dp_tx_frm_sg) || 1403 (msdu_info->frm_type == dp_tx_frm_me)) { 1404 if (msdu_info->u.sg_info.curr_seg->next) { 1405 msdu_info->u.sg_info.curr_seg = 1406 msdu_info->u.sg_info.curr_seg->next; 1407 nbuf = msdu_info->u.sg_info.curr_seg->nbuf; 1408 } 1409 } 1410 i++; 1411 } 1412 1413 nbuf = NULL; 1414 1415 done: 1416 if (hif_pm_runtime_get(soc->hif_handle) == 0) { 1417 hal_srng_access_end(soc->hal_soc, hal_srng); 1418 hif_pm_runtime_put(soc->hif_handle); 1419 } else { 1420 hal_srng_access_end_reap(soc->hal_soc, hal_srng); 1421 } 1422 1423 return nbuf; 1424 } 1425 1426 /** 1427 * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info 1428 * for SG frames 1429 * @vdev: DP vdev handle 1430 * @nbuf: skb 1431 * @seg_info: Pointer to Segment info Descriptor to be prepared 1432 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. 1433 * 1434 * Return: NULL on success, 1435 * nbuf when it fails to send 1436 */ 1437 static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1438 struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info) 1439 { 1440 uint32_t cur_frag, nr_frags; 1441 qdf_dma_addr_t paddr; 1442 struct dp_tx_sg_info_s *sg_info; 1443 1444 sg_info = &msdu_info->u.sg_info; 1445 nr_frags = qdf_nbuf_get_nr_frags(nbuf); 1446 1447 if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf, 1448 QDF_DMA_TO_DEVICE)) { 1449 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1450 "dma map error\n"); 1451 DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1); 1452 1453 qdf_nbuf_free(nbuf); 1454 return NULL; 1455 } 1456 1457 paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); 1458 seg_info->frags[0].paddr_lo = paddr; 1459 seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32; 1460 seg_info->frags[0].len = qdf_nbuf_headlen(nbuf); 1461 seg_info->frags[0].vaddr = (void *) nbuf; 1462 1463 for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) { 1464 if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev, 1465 nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) { 1466 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1467 "frag dma map error\n"); 1468 DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1); 1469 qdf_nbuf_free(nbuf); 1470 return NULL; 1471 } 1472 1473 paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); 1474 seg_info->frags[cur_frag + 1].paddr_lo = paddr; 1475 seg_info->frags[cur_frag + 1].paddr_hi = 1476 ((uint64_t) paddr) >> 32; 1477 seg_info->frags[cur_frag + 1].len = 1478 qdf_nbuf_get_frag_size(nbuf, cur_frag); 1479 } 1480 1481 seg_info->frag_cnt = (cur_frag + 1); 1482 seg_info->total_len = qdf_nbuf_len(nbuf); 1483 seg_info->next = NULL; 1484 1485 sg_info->curr_seg = seg_info; 1486 1487 msdu_info->frm_type = dp_tx_frm_sg; 1488 msdu_info->num_seg = 1; 1489 1490 return nbuf; 1491 } 1492 1493 #ifdef MESH_MODE_SUPPORT 1494 1495 /** 1496 * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf 1497 and prepare msdu_info for mesh frames. 1498 * @vdev: DP vdev handle 1499 * @nbuf: skb 1500 * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. 1501 * 1502 * Return: NULL on failure, 1503 * nbuf when extracted successfully 1504 */ 1505 static 1506 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1507 struct dp_tx_msdu_info_s *msdu_info) 1508 { 1509 struct meta_hdr_s *mhdr; 1510 struct htt_tx_msdu_desc_ext2_t *meta_data = 1511 (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0]; 1512 1513 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf); 1514 1515 if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) { 1516 msdu_info->exception_fw = 0; 1517 goto remove_meta_hdr; 1518 } 1519 1520 msdu_info->exception_fw = 1; 1521 1522 qdf_mem_set(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t), 0); 1523 1524 meta_data->host_tx_desc_pool = 1; 1525 meta_data->update_peer_cache = 1; 1526 meta_data->learning_frame = 1; 1527 1528 if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) { 1529 meta_data->power = mhdr->power; 1530 1531 meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs; 1532 meta_data->nss_mask = 1 << mhdr->rate_info[0].nss; 1533 meta_data->pream_type = mhdr->rate_info[0].preamble_type; 1534 meta_data->retry_limit = mhdr->rate_info[0].max_tries; 1535 1536 meta_data->dyn_bw = 1; 1537 1538 meta_data->valid_pwr = 1; 1539 meta_data->valid_mcs_mask = 1; 1540 meta_data->valid_nss_mask = 1; 1541 meta_data->valid_preamble_type = 1; 1542 meta_data->valid_retries = 1; 1543 meta_data->valid_bw_info = 1; 1544 } 1545 1546 if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) { 1547 meta_data->encrypt_type = 0; 1548 meta_data->valid_encrypt_type = 1; 1549 meta_data->learning_frame = 0; 1550 } 1551 1552 meta_data->valid_key_flags = 1; 1553 meta_data->key_flags = (mhdr->keyix & 0x3); 1554 1555 remove_meta_hdr: 1556 if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) { 1557 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1558 "qdf_nbuf_pull_head failed\n"); 1559 qdf_nbuf_free(nbuf); 1560 return NULL; 1561 } 1562 1563 if (mhdr->flags & METAHDR_FLAG_NOQOS) 1564 msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST; 1565 else 1566 msdu_info->tid = qdf_nbuf_get_priority(nbuf); 1567 1568 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, 1569 "%s , Meta hdr %0x %0x %0x %0x %0x %0x" 1570 " tid %d to_fw %d\n", 1571 __func__, msdu_info->meta_data[0], 1572 msdu_info->meta_data[1], 1573 msdu_info->meta_data[2], 1574 msdu_info->meta_data[3], 1575 msdu_info->meta_data[4], 1576 msdu_info->meta_data[5], 1577 msdu_info->tid, msdu_info->exception_fw); 1578 1579 return nbuf; 1580 } 1581 #else 1582 static 1583 qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1584 struct dp_tx_msdu_info_s *msdu_info) 1585 { 1586 return nbuf; 1587 } 1588 1589 #endif 1590 1591 #ifdef DP_FEATURE_NAWDS_TX 1592 /** 1593 * dp_tx_prepare_nawds(): Tramit NAWDS frames 1594 * @vdev: dp_vdev handle 1595 * @nbuf: skb 1596 * @tid: TID from HLOS for overriding default DSCP-TID mapping 1597 * @tx_q: Tx queue to be used for this Tx frame 1598 * @meta_data: Meta date for mesh 1599 * @peer_id: peer_id of the peer in case of NAWDS frames 1600 * 1601 * return: NULL on success nbuf on failure 1602 */ 1603 static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1604 struct dp_tx_msdu_info_s *msdu_info) 1605 { 1606 struct dp_peer *peer = NULL; 1607 struct dp_soc *soc = vdev->pdev->soc; 1608 struct dp_ast_entry *ast_entry = NULL; 1609 struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf); 1610 uint16_t peer_id = HTT_INVALID_PEER; 1611 1612 struct dp_peer *sa_peer = NULL; 1613 qdf_nbuf_t nbuf_copy; 1614 1615 qdf_spin_lock_bh(&(soc->ast_lock)); 1616 ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost)); 1617 1618 if (ast_entry) 1619 sa_peer = ast_entry->peer; 1620 1621 qdf_spin_unlock_bh(&(soc->ast_lock)); 1622 1623 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 1624 if ((peer->peer_ids[0] != HTT_INVALID_PEER) && 1625 (peer->nawds_enabled)) { 1626 if (sa_peer == peer) { 1627 QDF_TRACE(QDF_MODULE_ID_DP, 1628 QDF_TRACE_LEVEL_DEBUG, 1629 " %s: broadcast multicast packet", 1630 __func__); 1631 DP_STATS_INC(peer, tx.nawds_mcast_drop, 1); 1632 continue; 1633 } 1634 1635 nbuf_copy = qdf_nbuf_copy(nbuf); 1636 if (!nbuf_copy) { 1637 QDF_TRACE(QDF_MODULE_ID_DP, 1638 QDF_TRACE_LEVEL_ERROR, 1639 "nbuf copy failed"); 1640 } 1641 1642 peer_id = peer->peer_ids[0]; 1643 nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy, 1644 msdu_info, peer_id, NULL); 1645 if (nbuf_copy != NULL) { 1646 qdf_nbuf_free(nbuf_copy); 1647 continue; 1648 } 1649 DP_STATS_INC_PKT(peer, tx.nawds_mcast, 1650 1, qdf_nbuf_len(nbuf)); 1651 } 1652 } 1653 if (peer_id == HTT_INVALID_PEER) 1654 return nbuf; 1655 1656 return NULL; 1657 } 1658 #endif 1659 1660 /** 1661 * dp_check_exc_metadata() - Checks if parameters are valid 1662 * @tx_exc - holds all exception path parameters 1663 * 1664 * Returns true when all the parameters are valid else false 1665 * 1666 */ 1667 static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc) 1668 { 1669 if ((tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != HTT_INVALID_TID) || 1670 tx_exc->tx_encap_type > htt_cmn_pkt_num_types || 1671 tx_exc->sec_type > cdp_num_sec_types) { 1672 return false; 1673 } 1674 1675 return true; 1676 } 1677 1678 /** 1679 * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path 1680 * @vap_dev: DP vdev handle 1681 * @nbuf: skb 1682 * @tx_exc_metadata: Handle that holds exception path meta data 1683 * 1684 * Entry point for Core Tx layer (DP_TX) invoked from 1685 * hard_start_xmit in OSIF/HDD to transmit frames through fw 1686 * 1687 * Return: NULL on success, 1688 * nbuf when it fails to send 1689 */ 1690 qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf, 1691 struct cdp_tx_exception_metadata *tx_exc_metadata) 1692 { 1693 struct ether_header *eh = NULL; 1694 struct dp_vdev *vdev = (struct dp_vdev *) vap_dev; 1695 struct dp_tx_msdu_info_s msdu_info; 1696 1697 qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0); 1698 1699 msdu_info.tid = tx_exc_metadata->tid; 1700 1701 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 1702 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1703 "%s , skb %pM", 1704 __func__, nbuf->data); 1705 1706 DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf)); 1707 1708 if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) { 1709 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1710 "Invalid parameters in exception path"); 1711 goto fail; 1712 } 1713 1714 /* Basic sanity checks for unsupported packets */ 1715 1716 /* MESH mode */ 1717 if (qdf_unlikely(vdev->mesh_vdev)) { 1718 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1719 "Mesh mode is not supported in exception path"); 1720 goto fail; 1721 } 1722 1723 /* TSO or SG */ 1724 if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) || 1725 qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) { 1726 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1727 "TSO and SG are not supported in exception path"); 1728 1729 goto fail; 1730 } 1731 1732 /* RAW */ 1733 if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) { 1734 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1735 "Raw frame is not supported in exception path"); 1736 goto fail; 1737 } 1738 1739 1740 /* Mcast enhancement*/ 1741 if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) { 1742 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) { 1743 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1744 "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW\n"); 1745 } 1746 } 1747 1748 /* 1749 * Get HW Queue to use for this frame. 1750 * TCL supports upto 4 DMA rings, out of which 3 rings are 1751 * dedicated for data and 1 for command. 1752 * "queue_id" maps to one hardware ring. 1753 * With each ring, we also associate a unique Tx descriptor pool 1754 * to minimize lock contention for these resources. 1755 */ 1756 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 1757 1758 /* Reset the control block */ 1759 qdf_nbuf_reset_ctxt(nbuf); 1760 1761 /* Single linear frame */ 1762 /* 1763 * If nbuf is a simple linear frame, use send_single function to 1764 * prepare direct-buffer type TCL descriptor and enqueue to TCL 1765 * SRNG. There is no need to setup a MSDU extension descriptor. 1766 */ 1767 nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, 1768 tx_exc_metadata->peer_id, tx_exc_metadata); 1769 1770 return nbuf; 1771 1772 fail: 1773 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1774 "pkt send failed"); 1775 return nbuf; 1776 } 1777 1778 /** 1779 * dp_tx_send_mesh() - Transmit mesh frame on a given VAP 1780 * @vap_dev: DP vdev handle 1781 * @nbuf: skb 1782 * 1783 * Entry point for Core Tx layer (DP_TX) invoked from 1784 * hard_start_xmit in OSIF/HDD 1785 * 1786 * Return: NULL on success, 1787 * nbuf when it fails to send 1788 */ 1789 #ifdef MESH_MODE_SUPPORT 1790 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf) 1791 { 1792 struct meta_hdr_s *mhdr; 1793 qdf_nbuf_t nbuf_mesh = NULL; 1794 qdf_nbuf_t nbuf_clone = NULL; 1795 struct dp_vdev *vdev = (struct dp_vdev *) vap_dev; 1796 uint8_t no_enc_frame = 0; 1797 1798 nbuf_mesh = qdf_nbuf_unshare(nbuf); 1799 if (nbuf_mesh == NULL) { 1800 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1801 "qdf_nbuf_unshare failed\n"); 1802 return nbuf; 1803 } 1804 nbuf = nbuf_mesh; 1805 1806 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf); 1807 1808 if ((vdev->sec_type != cdp_sec_type_none) && 1809 (mhdr->flags & METAHDR_FLAG_NOENCRYPT)) 1810 no_enc_frame = 1; 1811 1812 if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) && 1813 !no_enc_frame) { 1814 nbuf_clone = qdf_nbuf_clone(nbuf); 1815 if (nbuf_clone == NULL) { 1816 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1817 "qdf_nbuf_clone failed\n"); 1818 return nbuf; 1819 } 1820 qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO); 1821 } 1822 1823 if (nbuf_clone) { 1824 if (!dp_tx_send(vap_dev, nbuf_clone)) { 1825 DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1); 1826 } else 1827 qdf_nbuf_free(nbuf_clone); 1828 } 1829 1830 if (no_enc_frame) 1831 qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO); 1832 else 1833 qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID); 1834 1835 nbuf = dp_tx_send(vap_dev, nbuf); 1836 if ((nbuf == NULL) && no_enc_frame) { 1837 DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1); 1838 } 1839 1840 return nbuf; 1841 } 1842 1843 #else 1844 1845 qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf) 1846 { 1847 return dp_tx_send(vap_dev, nbuf); 1848 } 1849 1850 #endif 1851 1852 /** 1853 * dp_tx_send() - Transmit a frame on a given VAP 1854 * @vap_dev: DP vdev handle 1855 * @nbuf: skb 1856 * 1857 * Entry point for Core Tx layer (DP_TX) invoked from 1858 * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding 1859 * cases 1860 * 1861 * Return: NULL on success, 1862 * nbuf when it fails to send 1863 */ 1864 qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf) 1865 { 1866 struct ether_header *eh = NULL; 1867 struct dp_tx_msdu_info_s msdu_info; 1868 struct dp_tx_seg_info_s seg_info; 1869 struct dp_vdev *vdev = (struct dp_vdev *) vap_dev; 1870 uint16_t peer_id = HTT_INVALID_PEER; 1871 qdf_nbuf_t nbuf_mesh = NULL; 1872 1873 qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0); 1874 qdf_mem_set(&seg_info, sizeof(seg_info), 0x0); 1875 1876 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 1877 1878 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1879 "%s , skb %pM", 1880 __func__, nbuf->data); 1881 1882 /* 1883 * Set Default Host TID value to invalid TID 1884 * (TID override disabled) 1885 */ 1886 msdu_info.tid = HTT_TX_EXT_TID_INVALID; 1887 DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf)); 1888 1889 if (qdf_unlikely(vdev->mesh_vdev)) { 1890 nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf, 1891 &msdu_info); 1892 if (nbuf_mesh == NULL) { 1893 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1894 "Extracting mesh metadata failed\n"); 1895 return nbuf; 1896 } 1897 nbuf = nbuf_mesh; 1898 } 1899 1900 /* 1901 * Get HW Queue to use for this frame. 1902 * TCL supports upto 4 DMA rings, out of which 3 rings are 1903 * dedicated for data and 1 for command. 1904 * "queue_id" maps to one hardware ring. 1905 * With each ring, we also associate a unique Tx descriptor pool 1906 * to minimize lock contention for these resources. 1907 */ 1908 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 1909 1910 /* 1911 * TCL H/W supports 2 DSCP-TID mapping tables. 1912 * Table 1 - Default DSCP-TID mapping table 1913 * Table 2 - 1 DSCP-TID override table 1914 * 1915 * If we need a different DSCP-TID mapping for this vap, 1916 * call tid_classify to extract DSCP/ToS from frame and 1917 * map to a TID and store in msdu_info. This is later used 1918 * to fill in TCL Input descriptor (per-packet TID override). 1919 */ 1920 if (vdev->dscp_tid_map_id > 1) 1921 dp_tx_classify_tid(vdev, nbuf, &msdu_info); 1922 1923 /* Reset the control block */ 1924 qdf_nbuf_reset_ctxt(nbuf); 1925 1926 /* 1927 * Classify the frame and call corresponding 1928 * "prepare" function which extracts the segment (TSO) 1929 * and fragmentation information (for TSO , SG, ME, or Raw) 1930 * into MSDU_INFO structure which is later used to fill 1931 * SW and HW descriptors. 1932 */ 1933 if (qdf_nbuf_is_tso(nbuf)) { 1934 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1935 "%s TSO frame %pK\n", __func__, vdev); 1936 DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1, 1937 qdf_nbuf_len(nbuf)); 1938 1939 if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) { 1940 DP_STATS_INC(vdev, tx_i.tso.dropped_host, 1); 1941 return nbuf; 1942 } 1943 1944 goto send_multiple; 1945 } 1946 1947 /* SG */ 1948 if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) { 1949 nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info); 1950 1951 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1952 "%s non-TSO SG frame %pK\n", __func__, vdev); 1953 1954 DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1, 1955 qdf_nbuf_len(nbuf)); 1956 1957 goto send_multiple; 1958 } 1959 1960 #ifdef ATH_SUPPORT_IQUE 1961 /* Mcast to Ucast Conversion*/ 1962 if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) { 1963 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 1964 if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) { 1965 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1966 "%s Mcast frm for ME %pK\n", __func__, vdev); 1967 1968 DP_STATS_INC_PKT(vdev, 1969 tx_i.mcast_en.mcast_pkt, 1, 1970 qdf_nbuf_len(nbuf)); 1971 if (dp_tx_prepare_send_me(vdev, nbuf) == 1972 QDF_STATUS_SUCCESS) { 1973 return NULL; 1974 } 1975 } 1976 } 1977 #endif 1978 1979 /* RAW */ 1980 if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) { 1981 nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info); 1982 if (nbuf == NULL) 1983 return NULL; 1984 1985 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1986 "%s Raw frame %pK\n", __func__, vdev); 1987 1988 goto send_multiple; 1989 1990 } 1991 1992 /* Single linear frame */ 1993 /* 1994 * If nbuf is a simple linear frame, use send_single function to 1995 * prepare direct-buffer type TCL descriptor and enqueue to TCL 1996 * SRNG. There is no need to setup a MSDU extension descriptor. 1997 */ 1998 nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL); 1999 2000 return nbuf; 2001 2002 send_multiple: 2003 nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info); 2004 2005 return nbuf; 2006 } 2007 2008 /** 2009 * dp_tx_reinject_handler() - Tx Reinject Handler 2010 * @tx_desc: software descriptor head pointer 2011 * @status : Tx completion status from HTT descriptor 2012 * 2013 * This function reinjects frames back to Target. 2014 * Todo - Host queue needs to be added 2015 * 2016 * Return: none 2017 */ 2018 static 2019 void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status) 2020 { 2021 struct dp_vdev *vdev; 2022 struct dp_peer *peer = NULL; 2023 uint32_t peer_id = HTT_INVALID_PEER; 2024 qdf_nbuf_t nbuf = tx_desc->nbuf; 2025 qdf_nbuf_t nbuf_copy = NULL; 2026 struct dp_tx_msdu_info_s msdu_info; 2027 struct dp_peer *sa_peer = NULL; 2028 struct dp_ast_entry *ast_entry = NULL; 2029 struct dp_soc *soc = NULL; 2030 struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf); 2031 #ifdef WDS_VENDOR_EXTENSION 2032 int is_mcast = 0, is_ucast = 0; 2033 int num_peers_3addr = 0; 2034 struct ether_header *eth_hdr = (struct ether_header *)(qdf_nbuf_data(nbuf)); 2035 struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf)); 2036 #endif 2037 2038 vdev = tx_desc->vdev; 2039 soc = vdev->pdev->soc; 2040 2041 qdf_assert(vdev); 2042 2043 qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0); 2044 2045 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 2046 2047 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2048 "%s Tx reinject path\n", __func__); 2049 2050 DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1, 2051 qdf_nbuf_len(tx_desc->nbuf)); 2052 2053 qdf_spin_lock_bh(&(soc->ast_lock)); 2054 2055 ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost)); 2056 2057 if (ast_entry) 2058 sa_peer = ast_entry->peer; 2059 2060 qdf_spin_unlock_bh(&(soc->ast_lock)); 2061 2062 #ifdef WDS_VENDOR_EXTENSION 2063 if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { 2064 is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0; 2065 } else { 2066 is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0; 2067 } 2068 is_ucast = !is_mcast; 2069 2070 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 2071 if (peer->bss_peer) 2072 continue; 2073 2074 /* Detect wds peers that use 3-addr framing for mcast. 2075 * if there are any, the bss_peer is used to send the 2076 * the mcast frame using 3-addr format. all wds enabled 2077 * peers that use 4-addr framing for mcast frames will 2078 * be duplicated and sent as 4-addr frames below. 2079 */ 2080 if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) { 2081 num_peers_3addr = 1; 2082 break; 2083 } 2084 } 2085 #endif 2086 2087 if (qdf_unlikely(vdev->mesh_vdev)) { 2088 DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf); 2089 } else { 2090 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 2091 if ((peer->peer_ids[0] != HTT_INVALID_PEER) && 2092 #ifdef WDS_VENDOR_EXTENSION 2093 /* 2094 * . if 3-addr STA, then send on BSS Peer 2095 * . if Peer WDS enabled and accept 4-addr mcast, 2096 * send mcast on that peer only 2097 * . if Peer WDS enabled and accept 4-addr ucast, 2098 * send ucast on that peer only 2099 */ 2100 ((peer->bss_peer && num_peers_3addr && is_mcast) || 2101 (peer->wds_enabled && 2102 ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) || 2103 (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) { 2104 #else 2105 ((peer->bss_peer && 2106 !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) || 2107 peer->nawds_enabled)) { 2108 #endif 2109 peer_id = DP_INVALID_PEER; 2110 2111 if (peer->nawds_enabled) { 2112 peer_id = peer->peer_ids[0]; 2113 if (sa_peer == peer) { 2114 QDF_TRACE( 2115 QDF_MODULE_ID_DP, 2116 QDF_TRACE_LEVEL_DEBUG, 2117 " %s: multicast packet", 2118 __func__); 2119 DP_STATS_INC(peer, 2120 tx.nawds_mcast_drop, 1); 2121 continue; 2122 } 2123 } 2124 2125 nbuf_copy = qdf_nbuf_copy(nbuf); 2126 2127 if (!nbuf_copy) { 2128 QDF_TRACE(QDF_MODULE_ID_DP, 2129 QDF_TRACE_LEVEL_DEBUG, 2130 FL("nbuf copy failed")); 2131 break; 2132 } 2133 2134 nbuf_copy = dp_tx_send_msdu_single(vdev, 2135 nbuf_copy, 2136 &msdu_info, 2137 peer_id, 2138 NULL); 2139 2140 if (nbuf_copy) { 2141 QDF_TRACE(QDF_MODULE_ID_DP, 2142 QDF_TRACE_LEVEL_DEBUG, 2143 FL("pkt send failed")); 2144 qdf_nbuf_free(nbuf_copy); 2145 } else { 2146 if (peer_id != DP_INVALID_PEER) 2147 DP_STATS_INC_PKT(peer, 2148 tx.nawds_mcast, 2149 1, qdf_nbuf_len(nbuf)); 2150 } 2151 } 2152 } 2153 } 2154 2155 if (vdev->nawds_enabled) { 2156 peer_id = DP_INVALID_PEER; 2157 2158 DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast, 2159 1, qdf_nbuf_len(nbuf)); 2160 2161 nbuf = dp_tx_send_msdu_single(vdev, 2162 nbuf, 2163 &msdu_info, 2164 peer_id, NULL); 2165 2166 if (nbuf) { 2167 QDF_TRACE(QDF_MODULE_ID_DP, 2168 QDF_TRACE_LEVEL_DEBUG, 2169 FL("pkt send failed")); 2170 qdf_nbuf_free(nbuf); 2171 } 2172 } else 2173 qdf_nbuf_free(nbuf); 2174 2175 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 2176 } 2177 2178 /** 2179 * dp_tx_inspect_handler() - Tx Inspect Handler 2180 * @tx_desc: software descriptor head pointer 2181 * @status : Tx completion status from HTT descriptor 2182 * 2183 * Handles Tx frames sent back to Host for inspection 2184 * (ProxyARP) 2185 * 2186 * Return: none 2187 */ 2188 static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status) 2189 { 2190 2191 struct dp_soc *soc; 2192 struct dp_pdev *pdev = tx_desc->pdev; 2193 2194 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 2195 "%s Tx inspect path\n", 2196 __func__); 2197 2198 qdf_assert(pdev); 2199 2200 soc = pdev->soc; 2201 2202 DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1, 2203 qdf_nbuf_len(tx_desc->nbuf)); 2204 2205 DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf); 2206 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 2207 } 2208 2209 #ifdef FEATURE_PERPKT_INFO 2210 /** 2211 * dp_get_completion_indication_for_stack() - send completion to stack 2212 * @soc : dp_soc handle 2213 * @pdev: dp_pdev handle 2214 * @peer_id: peer_id of the peer for which completion came 2215 * @ppdu_id: ppdu_id 2216 * @first_msdu: first msdu 2217 * @last_msdu: last msdu 2218 * @netbuf: Buffer pointer for free 2219 * 2220 * This function is used for indication whether buffer needs to be 2221 * send to stack for free or not 2222 */ 2223 QDF_STATUS 2224 dp_get_completion_indication_for_stack(struct dp_soc *soc, struct dp_pdev *pdev, 2225 uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu, 2226 uint8_t last_msdu, qdf_nbuf_t netbuf) 2227 { 2228 struct tx_capture_hdr *ppdu_hdr; 2229 struct dp_peer *peer = NULL; 2230 2231 if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode)) 2232 return QDF_STATUS_E_NOSUPPORT; 2233 2234 peer = (peer_id == HTT_INVALID_PEER) ? NULL : 2235 dp_peer_find_by_id(soc, peer_id); 2236 2237 if (!peer) { 2238 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2239 FL("Peer Invalid")); 2240 return QDF_STATUS_E_INVAL; 2241 } 2242 2243 if (pdev->mcopy_mode) { 2244 if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) && 2245 (pdev->m_copy_id.tx_peer_id == peer_id)) { 2246 return QDF_STATUS_E_INVAL; 2247 } 2248 2249 pdev->m_copy_id.tx_ppdu_id = ppdu_id; 2250 pdev->m_copy_id.tx_peer_id = peer_id; 2251 } 2252 2253 if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) { 2254 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2255 FL("No headroom")); 2256 return QDF_STATUS_E_NOMEM; 2257 } 2258 2259 ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf); 2260 qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw, 2261 IEEE80211_ADDR_LEN); 2262 ppdu_hdr->ppdu_id = ppdu_id; 2263 qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw, 2264 IEEE80211_ADDR_LEN); 2265 ppdu_hdr->peer_id = peer_id; 2266 ppdu_hdr->first_msdu = first_msdu; 2267 ppdu_hdr->last_msdu = last_msdu; 2268 2269 return QDF_STATUS_SUCCESS; 2270 } 2271 2272 2273 /** 2274 * dp_send_completion_to_stack() - send completion to stack 2275 * @soc : dp_soc handle 2276 * @pdev: dp_pdev handle 2277 * @peer_id: peer_id of the peer for which completion came 2278 * @ppdu_id: ppdu_id 2279 * @netbuf: Buffer pointer for free 2280 * 2281 * This function is used to send completion to stack 2282 * to free buffer 2283 */ 2284 void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, 2285 uint16_t peer_id, uint32_t ppdu_id, 2286 qdf_nbuf_t netbuf) 2287 { 2288 dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc, 2289 netbuf, peer_id, 2290 WDI_NO_VAL, pdev->pdev_id); 2291 } 2292 #else 2293 static QDF_STATUS 2294 dp_get_completion_indication_for_stack(struct dp_soc *soc, struct dp_pdev *pdev, 2295 uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu, 2296 uint8_t last_msdu, qdf_nbuf_t netbuf) 2297 { 2298 return QDF_STATUS_E_NOSUPPORT; 2299 } 2300 2301 static void 2302 dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, 2303 uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf) 2304 { 2305 } 2306 #endif 2307 2308 /** 2309 * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor 2310 * @soc: Soc handle 2311 * @desc: software Tx descriptor to be processed 2312 * 2313 * Return: none 2314 */ 2315 static inline void dp_tx_comp_free_buf(struct dp_soc *soc, 2316 struct dp_tx_desc_s *desc) 2317 { 2318 struct dp_vdev *vdev = desc->vdev; 2319 qdf_nbuf_t nbuf = desc->nbuf; 2320 2321 /* If it is TDLS mgmt, don't unmap or free the frame */ 2322 if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) 2323 return dp_non_std_tx_comp_free_buff(desc, vdev); 2324 2325 /* 0 : MSDU buffer, 1 : MLE */ 2326 if (desc->msdu_ext_desc) { 2327 /* TSO free */ 2328 if (hal_tx_ext_desc_get_tso_enable( 2329 desc->msdu_ext_desc->vaddr)) { 2330 /* If remaining number of segment is 0 2331 * actual TSO may unmap and free */ 2332 if (qdf_nbuf_get_users(nbuf) == 1) 2333 __qdf_nbuf_unmap_single(soc->osdev, 2334 nbuf, 2335 QDF_DMA_TO_DEVICE); 2336 2337 qdf_nbuf_free(nbuf); 2338 return; 2339 } 2340 } 2341 2342 qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE); 2343 2344 if (qdf_likely(!vdev->mesh_vdev)) 2345 qdf_nbuf_free(nbuf); 2346 else { 2347 if (desc->flags & DP_TX_DESC_FLAG_TO_FW) { 2348 qdf_nbuf_free(nbuf); 2349 DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1); 2350 } else 2351 vdev->osif_tx_free_ext((nbuf)); 2352 } 2353 } 2354 2355 /** 2356 * dp_tx_mec_handler() - Tx MEC Notify Handler 2357 * @vdev: pointer to dp dev handler 2358 * @status : Tx completion status from HTT descriptor 2359 * 2360 * Handles MEC notify event sent from fw to Host 2361 * 2362 * Return: none 2363 */ 2364 #ifdef FEATURE_WDS 2365 void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status) 2366 { 2367 2368 struct dp_soc *soc; 2369 uint32_t flags = IEEE80211_NODE_F_WDS_HM; 2370 struct dp_peer *peer; 2371 uint8_t mac_addr[DP_MAC_ADDR_LEN], i; 2372 2373 if (!vdev->wds_enabled) 2374 return; 2375 2376 soc = vdev->pdev->soc; 2377 qdf_spin_lock_bh(&soc->peer_ref_mutex); 2378 peer = TAILQ_FIRST(&vdev->peer_list); 2379 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 2380 2381 if (!peer) { 2382 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2383 FL("peer is NULL")); 2384 return; 2385 } 2386 2387 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2388 "%s Tx MEC Handler\n", 2389 __func__); 2390 2391 for (i = 0; i < DP_MAC_ADDR_LEN; i++) 2392 mac_addr[(DP_MAC_ADDR_LEN - 1) - i] = 2393 status[(DP_MAC_ADDR_LEN - 2) + i]; 2394 2395 if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, DP_MAC_ADDR_LEN)) 2396 dp_peer_add_ast(soc, 2397 peer, 2398 mac_addr, 2399 CDP_TXRX_AST_TYPE_MEC, 2400 flags); 2401 } 2402 #else 2403 static void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status) 2404 { 2405 } 2406 #endif 2407 2408 /** 2409 * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler 2410 * @tx_desc: software descriptor head pointer 2411 * @status : Tx completion status from HTT descriptor 2412 * 2413 * This function will process HTT Tx indication messages from Target 2414 * 2415 * Return: none 2416 */ 2417 static 2418 void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status) 2419 { 2420 uint8_t tx_status; 2421 struct dp_pdev *pdev; 2422 struct dp_vdev *vdev; 2423 struct dp_soc *soc; 2424 uint32_t *htt_status_word = (uint32_t *) status; 2425 2426 qdf_assert(tx_desc->pdev); 2427 2428 pdev = tx_desc->pdev; 2429 vdev = tx_desc->vdev; 2430 soc = pdev->soc; 2431 2432 tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_status_word[0]); 2433 2434 switch (tx_status) { 2435 case HTT_TX_FW2WBM_TX_STATUS_OK: 2436 case HTT_TX_FW2WBM_TX_STATUS_DROP: 2437 case HTT_TX_FW2WBM_TX_STATUS_TTL: 2438 { 2439 dp_tx_comp_free_buf(soc, tx_desc); 2440 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 2441 break; 2442 } 2443 case HTT_TX_FW2WBM_TX_STATUS_REINJECT: 2444 { 2445 dp_tx_reinject_handler(tx_desc, status); 2446 break; 2447 } 2448 case HTT_TX_FW2WBM_TX_STATUS_INSPECT: 2449 { 2450 dp_tx_inspect_handler(tx_desc, status); 2451 break; 2452 } 2453 case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY: 2454 { 2455 dp_tx_mec_handler(vdev, status); 2456 break; 2457 } 2458 default: 2459 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2460 "%s Invalid HTT tx_status %d\n", 2461 __func__, tx_status); 2462 break; 2463 } 2464 } 2465 2466 #ifdef MESH_MODE_SUPPORT 2467 /** 2468 * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats 2469 * in mesh meta header 2470 * @tx_desc: software descriptor head pointer 2471 * @ts: pointer to tx completion stats 2472 * Return: none 2473 */ 2474 static 2475 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc, 2476 struct hal_tx_completion_status *ts) 2477 { 2478 struct meta_hdr_s *mhdr; 2479 qdf_nbuf_t netbuf = tx_desc->nbuf; 2480 2481 if (!tx_desc->msdu_ext_desc) { 2482 if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) { 2483 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2484 "netbuf %pK offset %d\n", 2485 netbuf, tx_desc->pkt_offset); 2486 return; 2487 } 2488 } 2489 if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) { 2490 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2491 "netbuf %pK offset %d\n", netbuf, 2492 sizeof(struct meta_hdr_s)); 2493 return; 2494 } 2495 2496 mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf); 2497 mhdr->rssi = ts->ack_frame_rssi; 2498 mhdr->channel = tx_desc->pdev->operating_channel; 2499 } 2500 2501 #else 2502 static 2503 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc, 2504 struct hal_tx_completion_status *ts) 2505 { 2506 } 2507 2508 #endif 2509 2510 /** 2511 * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications 2512 * @peer: Handle to DP peer 2513 * @ts: pointer to HAL Tx completion stats 2514 * @length: MSDU length 2515 * 2516 * Return: None 2517 */ 2518 static void dp_tx_update_peer_stats(struct dp_peer *peer, 2519 struct hal_tx_completion_status *ts, uint32_t length) 2520 { 2521 struct dp_pdev *pdev = peer->vdev->pdev; 2522 struct dp_soc *soc = pdev->soc; 2523 uint8_t mcs, pkt_type; 2524 2525 mcs = ts->mcs; 2526 pkt_type = ts->pkt_type; 2527 2528 if (!ts->release_src == HAL_TX_COMP_RELEASE_SOURCE_TQM) 2529 return; 2530 2531 if (peer->bss_peer) { 2532 DP_STATS_INC_PKT(peer, tx.mcast, 1, length); 2533 DP_STATS_INC_PKT(peer, tx.tx_success, 1, length); 2534 } else { 2535 if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) { 2536 DP_STATS_INC_PKT(peer, tx.ucast, 1, length); 2537 DP_STATS_INC_PKT(peer, tx.tx_success, 1, length); 2538 } 2539 } 2540 2541 DP_STATS_INCC(peer, tx.dropped.age_out, 1, 2542 (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED)); 2543 2544 DP_STATS_INCC(peer, tx.dropped.fw_rem, 1, 2545 (ts->status == HAL_TX_TQM_RR_REM_CMD_REM)); 2546 2547 DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1, 2548 (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX)); 2549 2550 DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1, 2551 (ts->status == HAL_TX_TQM_RR_REM_CMD_TX)); 2552 2553 DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1, 2554 (ts->status == HAL_TX_TQM_RR_FW_REASON1)); 2555 2556 DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1, 2557 (ts->status == HAL_TX_TQM_RR_FW_REASON2)); 2558 2559 DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1, 2560 (ts->status == HAL_TX_TQM_RR_FW_REASON3)); 2561 2562 if (!ts->status == HAL_TX_TQM_RR_FRAME_ACKED) 2563 return; 2564 2565 DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma); 2566 2567 DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu); 2568 2569 if (!(soc->process_tx_status)) 2570 return; 2571 2572 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 2573 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A))); 2574 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 2575 ((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A))); 2576 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 2577 ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B))); 2578 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 2579 ((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B))); 2580 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 2581 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N))); 2582 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 2583 ((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N))); 2584 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 2585 ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 2586 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 2587 ((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 2588 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 2589 ((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX))); 2590 DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, 2591 ((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX))); 2592 DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1); 2593 DP_STATS_INC(peer, tx.bw[ts->bw], 1); 2594 DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi); 2595 DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1); 2596 DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc); 2597 DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc); 2598 DP_STATS_INC_PKT(peer, tx.tx_success, 1, length); 2599 DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1); 2600 2601 if (soc->cdp_soc.ol_ops->update_dp_stats) { 2602 soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev, 2603 &peer->stats, ts->peer_id, 2604 UPDATE_PEER_STATS); 2605 } 2606 } 2607 2608 /** 2609 * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info 2610 * @tx_desc: software descriptor head pointer 2611 * @length: packet length 2612 * 2613 * Return: none 2614 */ 2615 static inline void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc, 2616 uint32_t length) 2617 { 2618 struct hal_tx_completion_status ts; 2619 struct dp_soc *soc = NULL; 2620 struct dp_vdev *vdev = tx_desc->vdev; 2621 struct dp_peer *peer = NULL; 2622 struct ether_header *eh = 2623 (struct ether_header *)qdf_nbuf_data(tx_desc->nbuf); 2624 2625 hal_tx_comp_get_status(&tx_desc->comp, &ts); 2626 2627 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2628 "-------------------- \n" 2629 "Tx Completion Stats: \n" 2630 "-------------------- \n" 2631 "ack_frame_rssi = %d \n" 2632 "first_msdu = %d \n" 2633 "last_msdu = %d \n" 2634 "msdu_part_of_amsdu = %d \n" 2635 "rate_stats valid = %d \n" 2636 "bw = %d \n" 2637 "pkt_type = %d \n" 2638 "stbc = %d \n" 2639 "ldpc = %d \n" 2640 "sgi = %d \n" 2641 "mcs = %d \n" 2642 "ofdma = %d \n" 2643 "tones_in_ru = %d \n" 2644 "tsf = %d \n" 2645 "ppdu_id = %d \n" 2646 "transmit_cnt = %d \n" 2647 "tid = %d \n" 2648 "peer_id = %d \n", 2649 ts.ack_frame_rssi, ts.first_msdu, ts.last_msdu, 2650 ts.msdu_part_of_amsdu, ts.valid, ts.bw, 2651 ts.pkt_type, ts.stbc, ts.ldpc, ts.sgi, 2652 ts.mcs, ts.ofdma, ts.tones_in_ru, ts.tsf, 2653 ts.ppdu_id, ts.transmit_cnt, ts.tid, 2654 ts.peer_id); 2655 2656 if (!vdev) { 2657 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 2658 "invalid vdev"); 2659 goto out; 2660 } 2661 2662 soc = vdev->pdev->soc; 2663 2664 /* Update SoC level stats */ 2665 DP_STATS_INCC(soc, tx.dropped_fw_removed, 1, 2666 (ts.status == HAL_TX_TQM_RR_REM_CMD_REM)); 2667 2668 /* Update per-packet stats */ 2669 if (qdf_unlikely(vdev->mesh_vdev) && 2670 !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) 2671 dp_tx_comp_fill_tx_completion_stats(tx_desc, &ts); 2672 2673 /* Update peer level stats */ 2674 peer = dp_peer_find_by_id(soc, ts.peer_id); 2675 if (!peer) { 2676 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 2677 "invalid peer"); 2678 DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length); 2679 goto out; 2680 } 2681 2682 if (qdf_likely(peer->vdev->tx_encap_type == 2683 htt_cmn_pkt_type_ethernet)) { 2684 if (peer->bss_peer && IEEE80211_IS_BROADCAST(eh->ether_dhost)) 2685 DP_STATS_INC_PKT(peer, tx.bcast, 1, length); 2686 } 2687 2688 dp_tx_update_peer_stats(peer, &ts, length); 2689 2690 out: 2691 return; 2692 } 2693 2694 /** 2695 * dp_tx_comp_process_desc() - Tx complete software descriptor handler 2696 * @soc: core txrx main context 2697 * @comp_head: software descriptor head pointer 2698 * 2699 * This function will process batch of descriptors reaped by dp_tx_comp_handler 2700 * and release the software descriptors after processing is complete 2701 * 2702 * Return: none 2703 */ 2704 static void dp_tx_comp_process_desc(struct dp_soc *soc, 2705 struct dp_tx_desc_s *comp_head) 2706 { 2707 struct dp_tx_desc_s *desc; 2708 struct dp_tx_desc_s *next; 2709 struct hal_tx_completion_status ts = {0}; 2710 uint32_t length; 2711 struct dp_peer *peer; 2712 2713 DP_HIST_INIT(); 2714 desc = comp_head; 2715 2716 while (desc) { 2717 hal_tx_comp_get_status(&desc->comp, &ts); 2718 peer = dp_peer_find_by_id(soc, ts.peer_id); 2719 length = qdf_nbuf_len(desc->nbuf); 2720 2721 dp_tx_comp_process_tx_status(desc, length); 2722 2723 /*currently m_copy/tx_capture is not supported for scatter gather packets*/ 2724 if (!(desc->msdu_ext_desc) && (dp_get_completion_indication_for_stack(soc, 2725 desc->pdev, ts.peer_id, ts.ppdu_id, 2726 ts.first_msdu, ts.last_msdu, 2727 desc->nbuf) == QDF_STATUS_SUCCESS)) { 2728 qdf_nbuf_unmap(soc->osdev, desc->nbuf, 2729 QDF_DMA_TO_DEVICE); 2730 2731 dp_send_completion_to_stack(soc, desc->pdev, ts.peer_id, 2732 ts.ppdu_id, desc->nbuf); 2733 } else { 2734 dp_tx_comp_free_buf(soc, desc); 2735 } 2736 2737 DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id); 2738 2739 next = desc->next; 2740 dp_tx_desc_release(desc, desc->pool_id); 2741 desc = next; 2742 } 2743 DP_TX_HIST_STATS_PER_PDEV(); 2744 } 2745 2746 /** 2747 * dp_tx_comp_handler() - Tx completion handler 2748 * @soc: core txrx main context 2749 * @ring_id: completion ring id 2750 * @quota: No. of packets/descriptors that can be serviced in one loop 2751 * 2752 * This function will collect hardware release ring element contents and 2753 * handle descriptor contents. Based on contents, free packet or handle error 2754 * conditions 2755 * 2756 * Return: none 2757 */ 2758 uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota) 2759 { 2760 void *tx_comp_hal_desc; 2761 uint8_t buffer_src; 2762 uint8_t pool_id; 2763 uint32_t tx_desc_id; 2764 struct dp_tx_desc_s *tx_desc = NULL; 2765 struct dp_tx_desc_s *head_desc = NULL; 2766 struct dp_tx_desc_s *tail_desc = NULL; 2767 uint32_t num_processed; 2768 uint32_t count; 2769 2770 if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { 2771 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2772 "%s %d : HAL RING Access Failed -- %pK\n", 2773 __func__, __LINE__, hal_srng); 2774 return 0; 2775 } 2776 2777 num_processed = 0; 2778 count = 0; 2779 2780 /* Find head descriptor from completion ring */ 2781 while (qdf_likely(tx_comp_hal_desc = 2782 hal_srng_dst_get_next(soc->hal_soc, hal_srng))) { 2783 2784 buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc); 2785 2786 /* If this buffer was not released by TQM or FW, then it is not 2787 * Tx completion indication, assert */ 2788 if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) && 2789 (buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) { 2790 2791 QDF_TRACE(QDF_MODULE_ID_DP, 2792 QDF_TRACE_LEVEL_FATAL, 2793 "Tx comp release_src != TQM | FW"); 2794 2795 qdf_assert_always(0); 2796 } 2797 2798 /* Get descriptor id */ 2799 tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc); 2800 pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >> 2801 DP_TX_DESC_ID_POOL_OS; 2802 2803 /* Pool ID is out of limit. Error */ 2804 if (pool_id > wlan_cfg_get_num_tx_desc_pool( 2805 soc->wlan_cfg_ctx)) { 2806 QDF_TRACE(QDF_MODULE_ID_DP, 2807 QDF_TRACE_LEVEL_FATAL, 2808 "Tx Comp pool id %d not valid", 2809 pool_id); 2810 2811 qdf_assert_always(0); 2812 } 2813 2814 /* Find Tx descriptor */ 2815 tx_desc = dp_tx_desc_find(soc, pool_id, 2816 (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >> 2817 DP_TX_DESC_ID_PAGE_OS, 2818 (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >> 2819 DP_TX_DESC_ID_OFFSET_OS); 2820 2821 /* 2822 * If the release source is FW, process the HTT status 2823 */ 2824 if (qdf_unlikely(buffer_src == 2825 HAL_TX_COMP_RELEASE_SOURCE_FW)) { 2826 uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN]; 2827 hal_tx_comp_get_htt_desc(tx_comp_hal_desc, 2828 htt_tx_status); 2829 dp_tx_process_htt_completion(tx_desc, 2830 htt_tx_status); 2831 } else { 2832 /* Pool id is not matching. Error */ 2833 if (tx_desc->pool_id != pool_id) { 2834 QDF_TRACE(QDF_MODULE_ID_DP, 2835 QDF_TRACE_LEVEL_FATAL, 2836 "Tx Comp pool id %d not matched %d", 2837 pool_id, tx_desc->pool_id); 2838 2839 qdf_assert_always(0); 2840 } 2841 2842 if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) || 2843 !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) { 2844 QDF_TRACE(QDF_MODULE_ID_DP, 2845 QDF_TRACE_LEVEL_FATAL, 2846 "Txdesc invalid, flgs = %x,id = %d", 2847 tx_desc->flags, tx_desc_id); 2848 qdf_assert_always(0); 2849 } 2850 2851 /* First ring descriptor on the cycle */ 2852 if (!head_desc) { 2853 head_desc = tx_desc; 2854 tail_desc = tx_desc; 2855 } 2856 2857 tail_desc->next = tx_desc; 2858 tx_desc->next = NULL; 2859 tail_desc = tx_desc; 2860 2861 /* Collect hw completion contents */ 2862 hal_tx_comp_desc_sync(tx_comp_hal_desc, 2863 &tx_desc->comp, 1); 2864 2865 } 2866 2867 num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK); 2868 /* Decrement PM usage count if the packet has been sent.*/ 2869 hif_pm_runtime_put(soc->hif_handle); 2870 2871 /* 2872 * Processed packet count is more than given quota 2873 * stop to processing 2874 */ 2875 if ((num_processed >= quota)) 2876 break; 2877 2878 count++; 2879 } 2880 2881 hal_srng_access_end(soc->hal_soc, hal_srng); 2882 2883 /* Process the reaped descriptors */ 2884 if (head_desc) 2885 dp_tx_comp_process_desc(soc, head_desc); 2886 2887 return num_processed; 2888 } 2889 2890 #ifdef CONVERGED_TDLS_ENABLE 2891 /** 2892 * dp_tx_non_std() - Allow the control-path SW to send data frames 2893 * 2894 * @data_vdev - which vdev should transmit the tx data frames 2895 * @tx_spec - what non-standard handling to apply to the tx data frames 2896 * @msdu_list - NULL-terminated list of tx MSDUs 2897 * 2898 * Return: NULL on success, 2899 * nbuf when it fails to send 2900 */ 2901 qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle, 2902 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list) 2903 { 2904 struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle; 2905 2906 if (tx_spec & OL_TX_SPEC_NO_FREE) 2907 vdev->is_tdls_frame = true; 2908 return dp_tx_send(vdev_handle, msdu_list); 2909 } 2910 #endif 2911 2912 /** 2913 * dp_tx_vdev_attach() - attach vdev to dp tx 2914 * @vdev: virtual device instance 2915 * 2916 * Return: QDF_STATUS_SUCCESS: success 2917 * QDF_STATUS_E_RESOURCES: Error return 2918 */ 2919 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev) 2920 { 2921 /* 2922 * Fill HTT TCL Metadata with Vdev ID and MAC ID 2923 */ 2924 HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata, 2925 HTT_TCL_METADATA_TYPE_VDEV_BASED); 2926 2927 HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata, 2928 vdev->vdev_id); 2929 2930 HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, 2931 DP_SW2HW_MACID(vdev->pdev->pdev_id)); 2932 2933 /* 2934 * Set HTT Extension Valid bit to 0 by default 2935 */ 2936 HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0); 2937 2938 dp_tx_vdev_update_search_flags(vdev); 2939 2940 return QDF_STATUS_SUCCESS; 2941 } 2942 2943 /** 2944 * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode 2945 * @vdev: virtual device instance 2946 * 2947 * Return: void 2948 * 2949 */ 2950 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev) 2951 { 2952 /* 2953 * Enable both AddrY (SA based search) and AddrX (Da based search) 2954 * for TDLS link 2955 * 2956 * Enable AddrY (SA based search) only for non-WDS STA and 2957 * ProxySTA VAP modes. 2958 * 2959 * In all other VAP modes, only DA based search should be 2960 * enabled 2961 */ 2962 if (vdev->opmode == wlan_op_mode_sta && 2963 vdev->tdls_link_connected) 2964 vdev->hal_desc_addr_search_flags = 2965 (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN); 2966 else if ((vdev->opmode == wlan_op_mode_sta && 2967 (!vdev->wds_enabled || vdev->proxysta_vdev))) 2968 vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN; 2969 else 2970 vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN; 2971 } 2972 2973 /** 2974 * dp_tx_vdev_detach() - detach vdev from dp tx 2975 * @vdev: virtual device instance 2976 * 2977 * Return: QDF_STATUS_SUCCESS: success 2978 * QDF_STATUS_E_RESOURCES: Error return 2979 */ 2980 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev) 2981 { 2982 return QDF_STATUS_SUCCESS; 2983 } 2984 2985 /** 2986 * dp_tx_pdev_attach() - attach pdev to dp tx 2987 * @pdev: physical device instance 2988 * 2989 * Return: QDF_STATUS_SUCCESS: success 2990 * QDF_STATUS_E_RESOURCES: Error return 2991 */ 2992 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev) 2993 { 2994 struct dp_soc *soc = pdev->soc; 2995 2996 /* Initialize Flow control counters */ 2997 qdf_atomic_init(&pdev->num_tx_exception); 2998 qdf_atomic_init(&pdev->num_tx_outstanding); 2999 3000 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { 3001 /* Initialize descriptors in TCL Ring */ 3002 hal_tx_init_data_ring(soc->hal_soc, 3003 soc->tcl_data_ring[pdev->pdev_id].hal_srng); 3004 } 3005 3006 return QDF_STATUS_SUCCESS; 3007 } 3008 3009 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 3010 /* Pools will be allocated dynamically */ 3011 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, 3012 int num_desc) 3013 { 3014 uint8_t i; 3015 3016 for (i = 0; i < num_pool; i++) { 3017 qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock); 3018 soc->tx_desc[i].status = FLOW_POOL_INACTIVE; 3019 } 3020 3021 return 0; 3022 } 3023 3024 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) 3025 { 3026 uint8_t i; 3027 3028 for (i = 0; i < num_pool; i++) 3029 qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock); 3030 } 3031 3032 static void dp_tx_desc_flush(struct dp_pdev *pdev) 3033 { 3034 } 3035 #else /* QCA_LL_TX_FLOW_CONTROL_V2! */ 3036 static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, 3037 int num_desc) 3038 { 3039 uint8_t i; 3040 3041 /* Allocate software Tx descriptor pools */ 3042 for (i = 0; i < num_pool; i++) { 3043 if (dp_tx_desc_pool_alloc(soc, i, num_desc)) { 3044 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3045 "%s Tx Desc Pool alloc %d failed %pK\n", 3046 __func__, i, soc); 3047 return ENOMEM; 3048 } 3049 } 3050 return 0; 3051 } 3052 3053 static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) 3054 { 3055 uint8_t i; 3056 3057 for (i = 0; i < num_pool; i++) { 3058 if (dp_tx_desc_pool_free(soc, i)) { 3059 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3060 "%s Tx Desc Pool Free failed\n", __func__); 3061 } 3062 } 3063 } 3064 3065 /* dp_tx_desc_flush() - release resources associated 3066 * to tx_desc 3067 * @pdev: physical device instance 3068 * 3069 * This function will free all outstanding Tx buffers, 3070 * including ME buffer for which either free during 3071 * completion didn't happened or completion is not 3072 * received. 3073 */ 3074 static void dp_tx_desc_flush(struct dp_pdev *pdev) 3075 { 3076 uint8_t i, num_pool; 3077 uint32_t j; 3078 uint32_t num_desc; 3079 struct dp_soc *soc = pdev->soc; 3080 struct dp_tx_desc_s *tx_desc = NULL; 3081 struct dp_tx_desc_pool_s *tx_desc_pool = NULL; 3082 3083 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 3084 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 3085 3086 for (i = 0; i < num_pool; i++) { 3087 for (j = 0; j < num_desc; j++) { 3088 tx_desc_pool = &((soc)->tx_desc[(i)]); 3089 if (tx_desc_pool && 3090 tx_desc_pool->desc_pages.cacheable_pages) { 3091 tx_desc = dp_tx_desc_find(soc, i, 3092 (j & DP_TX_DESC_ID_PAGE_MASK) >> 3093 DP_TX_DESC_ID_PAGE_OS, 3094 (j & DP_TX_DESC_ID_OFFSET_MASK) >> 3095 DP_TX_DESC_ID_OFFSET_OS); 3096 3097 if (tx_desc && (tx_desc->pdev == pdev) && 3098 (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) { 3099 dp_tx_comp_free_buf(soc, tx_desc); 3100 dp_tx_desc_release(tx_desc, i); 3101 } 3102 } 3103 } 3104 } 3105 } 3106 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ 3107 3108 /** 3109 * dp_tx_pdev_detach() - detach pdev from dp tx 3110 * @pdev: physical device instance 3111 * 3112 * Return: QDF_STATUS_SUCCESS: success 3113 * QDF_STATUS_E_RESOURCES: Error return 3114 */ 3115 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev) 3116 { 3117 dp_tx_desc_flush(pdev); 3118 dp_tx_me_exit(pdev); 3119 return QDF_STATUS_SUCCESS; 3120 } 3121 3122 /** 3123 * dp_tx_soc_detach() - detach soc from dp tx 3124 * @soc: core txrx main context 3125 * 3126 * This function will detach dp tx into main device context 3127 * will free dp tx resource and initialize resources 3128 * 3129 * Return: QDF_STATUS_SUCCESS: success 3130 * QDF_STATUS_E_RESOURCES: Error return 3131 */ 3132 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc) 3133 { 3134 uint8_t num_pool; 3135 uint16_t num_desc; 3136 uint16_t num_ext_desc; 3137 uint8_t i; 3138 3139 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 3140 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 3141 num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); 3142 3143 dp_tx_flow_control_deinit(soc); 3144 dp_tx_delete_static_pools(soc, num_pool); 3145 3146 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3147 "%s Tx Desc Pool Free num_pool = %d, descs = %d\n", 3148 __func__, num_pool, num_desc); 3149 3150 for (i = 0; i < num_pool; i++) { 3151 if (dp_tx_ext_desc_pool_free(soc, i)) { 3152 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3153 "%s Tx Ext Desc Pool Free failed\n", 3154 __func__); 3155 return QDF_STATUS_E_RESOURCES; 3156 } 3157 } 3158 3159 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3160 "%s MSDU Ext Desc Pool %d Free descs = %d\n", 3161 __func__, num_pool, num_ext_desc); 3162 3163 for (i = 0; i < num_pool; i++) { 3164 dp_tx_tso_desc_pool_free(soc, i); 3165 } 3166 3167 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3168 "%s TSO Desc Pool %d Free descs = %d\n", 3169 __func__, num_pool, num_desc); 3170 3171 3172 for (i = 0; i < num_pool; i++) 3173 dp_tx_tso_num_seg_pool_free(soc, i); 3174 3175 3176 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3177 "%s TSO Num of seg Desc Pool %d Free descs = %d\n", 3178 __func__, num_pool, num_desc); 3179 3180 return QDF_STATUS_SUCCESS; 3181 } 3182 3183 /** 3184 * dp_tx_soc_attach() - attach soc to dp tx 3185 * @soc: core txrx main context 3186 * 3187 * This function will attach dp tx into main device context 3188 * will allocate dp tx resource and initialize resources 3189 * 3190 * Return: QDF_STATUS_SUCCESS: success 3191 * QDF_STATUS_E_RESOURCES: Error return 3192 */ 3193 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc) 3194 { 3195 uint8_t i; 3196 uint8_t num_pool; 3197 uint32_t num_desc; 3198 uint32_t num_ext_desc; 3199 3200 num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); 3201 num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 3202 num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); 3203 3204 if (dp_tx_alloc_static_pools(soc, num_pool, num_desc)) 3205 goto fail; 3206 3207 dp_tx_flow_control_init(soc); 3208 3209 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3210 "%s Tx Desc Alloc num_pool = %d, descs = %d\n", 3211 __func__, num_pool, num_desc); 3212 3213 /* Allocate extension tx descriptor pools */ 3214 for (i = 0; i < num_pool; i++) { 3215 if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) { 3216 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3217 "MSDU Ext Desc Pool alloc %d failed %pK\n", 3218 i, soc); 3219 3220 goto fail; 3221 } 3222 } 3223 3224 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3225 "%s MSDU Ext Desc Alloc %d, descs = %d\n", 3226 __func__, num_pool, num_ext_desc); 3227 3228 for (i = 0; i < num_pool; i++) { 3229 if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) { 3230 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3231 "TSO Desc Pool alloc %d failed %pK\n", 3232 i, soc); 3233 3234 goto fail; 3235 } 3236 } 3237 3238 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3239 "%s TSO Desc Alloc %d, descs = %d\n", 3240 __func__, num_pool, num_desc); 3241 3242 for (i = 0; i < num_pool; i++) { 3243 if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) { 3244 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3245 "TSO Num of seg Pool alloc %d failed %pK\n", 3246 i, soc); 3247 3248 goto fail; 3249 } 3250 } 3251 3252 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3253 "%s TSO Num of seg pool Alloc %d, descs = %d\n", 3254 __func__, num_pool, num_desc); 3255 3256 /* Initialize descriptors in TCL Rings */ 3257 if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { 3258 for (i = 0; i < soc->num_tcl_data_rings; i++) { 3259 hal_tx_init_data_ring(soc->hal_soc, 3260 soc->tcl_data_ring[i].hal_srng); 3261 } 3262 } 3263 3264 /* 3265 * todo - Add a runtime config option to enable this. 3266 */ 3267 /* 3268 * Due to multiple issues on NPR EMU, enable it selectively 3269 * only for NPR EMU, should be removed, once NPR platforms 3270 * are stable. 3271 */ 3272 soc->process_tx_status = CONFIG_PROCESS_TX_STATUS; 3273 3274 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3275 "%s HAL Tx init Success\n", __func__); 3276 3277 return QDF_STATUS_SUCCESS; 3278 3279 fail: 3280 /* Detach will take care of freeing only allocated resources */ 3281 dp_tx_soc_detach(soc); 3282 return QDF_STATUS_E_RESOURCES; 3283 } 3284 3285 /* 3286 * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement 3287 * pdev: pointer to DP PDEV structure 3288 * seg_info_head: Pointer to the head of list 3289 * 3290 * return: void 3291 */ 3292 static void dp_tx_me_mem_free(struct dp_pdev *pdev, 3293 struct dp_tx_seg_info_s *seg_info_head) 3294 { 3295 struct dp_tx_me_buf_t *mc_uc_buf; 3296 struct dp_tx_seg_info_s *seg_info_new = NULL; 3297 qdf_nbuf_t nbuf = NULL; 3298 uint64_t phy_addr; 3299 3300 while (seg_info_head) { 3301 nbuf = seg_info_head->nbuf; 3302 mc_uc_buf = (struct dp_tx_me_buf_t *) 3303 seg_info_head->frags[0].vaddr; 3304 phy_addr = seg_info_head->frags[0].paddr_hi; 3305 phy_addr = (phy_addr << 32) | seg_info_head->frags[0].paddr_lo; 3306 qdf_mem_unmap_nbytes_single(pdev->soc->osdev, 3307 phy_addr, 3308 QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN); 3309 dp_tx_me_free_buf(pdev, mc_uc_buf); 3310 qdf_nbuf_free(nbuf); 3311 seg_info_new = seg_info_head; 3312 seg_info_head = seg_info_head->next; 3313 qdf_mem_free(seg_info_new); 3314 } 3315 } 3316 3317 /** 3318 * dp_tx_me_send_convert_ucast(): fuction to convert multicast to unicast 3319 * @vdev: DP VDEV handle 3320 * @nbuf: Multicast nbuf 3321 * @newmac: Table of the clients to which packets have to be sent 3322 * @new_mac_cnt: No of clients 3323 * 3324 * return: no of converted packets 3325 */ 3326 uint16_t 3327 dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf, 3328 uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt) 3329 { 3330 struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle; 3331 struct dp_pdev *pdev = vdev->pdev; 3332 struct ether_header *eh; 3333 uint8_t *data; 3334 uint16_t len; 3335 3336 /* reference to frame dst addr */ 3337 uint8_t *dstmac; 3338 /* copy of original frame src addr */ 3339 uint8_t srcmac[DP_MAC_ADDR_LEN]; 3340 3341 /* local index into newmac */ 3342 uint8_t new_mac_idx = 0; 3343 struct dp_tx_me_buf_t *mc_uc_buf; 3344 qdf_nbuf_t nbuf_clone; 3345 struct dp_tx_msdu_info_s msdu_info; 3346 struct dp_tx_seg_info_s *seg_info_head = NULL; 3347 struct dp_tx_seg_info_s *seg_info_tail = NULL; 3348 struct dp_tx_seg_info_s *seg_info_new; 3349 struct dp_tx_frag_info_s data_frag; 3350 qdf_dma_addr_t paddr_data; 3351 qdf_dma_addr_t paddr_mcbuf = 0; 3352 uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0}; 3353 QDF_STATUS status; 3354 3355 qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0); 3356 3357 dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); 3358 3359 eh = (struct ether_header *) nbuf; 3360 qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN); 3361 3362 len = qdf_nbuf_len(nbuf); 3363 3364 data = qdf_nbuf_data(nbuf); 3365 3366 status = qdf_nbuf_map(vdev->osdev, nbuf, 3367 QDF_DMA_TO_DEVICE); 3368 3369 if (status) { 3370 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3371 "Mapping failure Error:%d", status); 3372 DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1); 3373 qdf_nbuf_free(nbuf); 3374 return 1; 3375 } 3376 3377 paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN; 3378 3379 /*preparing data fragment*/ 3380 data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN; 3381 data_frag.paddr_lo = (uint32_t)paddr_data; 3382 data_frag.paddr_hi = (((uint64_t) paddr_data) >> 32); 3383 data_frag.len = len - DP_MAC_ADDR_LEN; 3384 3385 for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) { 3386 dstmac = newmac[new_mac_idx]; 3387 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 3388 "added mac addr (%pM)", dstmac); 3389 3390 /* Check for NULL Mac Address */ 3391 if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN)) 3392 continue; 3393 3394 /* frame to self mac. skip */ 3395 if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN)) 3396 continue; 3397 3398 /* 3399 * TODO: optimize to avoid malloc in per-packet path 3400 * For eg. seg_pool can be made part of vdev structure 3401 */ 3402 seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new)); 3403 3404 if (!seg_info_new) { 3405 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3406 "alloc failed"); 3407 DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1); 3408 goto fail_seg_alloc; 3409 } 3410 3411 mc_uc_buf = dp_tx_me_alloc_buf(pdev); 3412 if (mc_uc_buf == NULL) 3413 goto fail_buf_alloc; 3414 3415 /* 3416 * TODO: Check if we need to clone the nbuf 3417 * Or can we just use the reference for all cases 3418 */ 3419 if (new_mac_idx < (new_mac_cnt - 1)) { 3420 nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf); 3421 if (nbuf_clone == NULL) { 3422 DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1); 3423 goto fail_clone; 3424 } 3425 } else { 3426 /* 3427 * Update the ref 3428 * to account for frame sent without cloning 3429 */ 3430 qdf_nbuf_ref(nbuf); 3431 nbuf_clone = nbuf; 3432 } 3433 3434 qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN); 3435 3436 status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data, 3437 QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN, 3438 &paddr_mcbuf); 3439 3440 if (status) { 3441 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 3442 "Mapping failure Error:%d", status); 3443 DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1); 3444 goto fail_map; 3445 } 3446 3447 seg_info_new->frags[0].vaddr = (uint8_t *)mc_uc_buf; 3448 seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf; 3449 seg_info_new->frags[0].paddr_hi = 3450 ((uint64_t) paddr_mcbuf >> 32); 3451 seg_info_new->frags[0].len = DP_MAC_ADDR_LEN; 3452 3453 seg_info_new->frags[1] = data_frag; 3454 seg_info_new->nbuf = nbuf_clone; 3455 seg_info_new->frag_cnt = 2; 3456 seg_info_new->total_len = len; 3457 3458 seg_info_new->next = NULL; 3459 3460 if (seg_info_head == NULL) 3461 seg_info_head = seg_info_new; 3462 else 3463 seg_info_tail->next = seg_info_new; 3464 3465 seg_info_tail = seg_info_new; 3466 } 3467 3468 if (!seg_info_head) { 3469 goto free_return; 3470 } 3471 3472 msdu_info.u.sg_info.curr_seg = seg_info_head; 3473 msdu_info.num_seg = new_mac_cnt; 3474 msdu_info.frm_type = dp_tx_frm_me; 3475 3476 DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt); 3477 dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info); 3478 3479 while (seg_info_head->next) { 3480 seg_info_new = seg_info_head; 3481 seg_info_head = seg_info_head->next; 3482 qdf_mem_free(seg_info_new); 3483 } 3484 qdf_mem_free(seg_info_head); 3485 3486 qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE); 3487 qdf_nbuf_free(nbuf); 3488 return new_mac_cnt; 3489 3490 fail_map: 3491 qdf_nbuf_free(nbuf_clone); 3492 3493 fail_clone: 3494 dp_tx_me_free_buf(pdev, mc_uc_buf); 3495 3496 fail_buf_alloc: 3497 qdf_mem_free(seg_info_new); 3498 3499 fail_seg_alloc: 3500 dp_tx_me_mem_free(pdev, seg_info_head); 3501 3502 free_return: 3503 qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE); 3504 qdf_nbuf_free(nbuf); 3505 return 1; 3506 } 3507 3508