1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "cdp_txrx_cmn_struct.h" 21 #include "dp_types.h" 22 #include "dp_tx.h" 23 #include "dp_be_tx.h" 24 #include "dp_tx_desc.h" 25 #include "hal_tx.h" 26 #include <hal_be_api.h> 27 #include <hal_be_tx.h> 28 #include <dp_htt.h> 29 #ifdef FEATURE_WDS 30 #include "dp_txrx_wds.h" 31 #endif 32 33 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) 34 #define DP_TX_BANK_LOCK_CREATE(lock) qdf_mutex_create(lock) 35 #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_mutex_destroy(lock) 36 #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_mutex_acquire(lock) 37 #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_mutex_release(lock) 38 #else 39 #define DP_TX_BANK_LOCK_CREATE(lock) qdf_spinlock_create(lock) 40 #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock) 41 #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_spin_lock_bh(lock) 42 #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_spin_unlock_bh(lock) 43 #endif 44 45 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 46 #ifdef WLAN_MCAST_MLO 47 /* MLO peer id for reinject*/ 48 #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD 49 #define MAX_GSN_NUM 0x0FFF 50 51 #ifdef QCA_MULTIPASS_SUPPORT 52 #define INVALID_VLAN_ID 0xFFFF 53 #define MULTIPASS_WITH_VLAN_ID 0xFFFE 54 /** 55 * struct dp_mlo_mpass_buf - Multipass buffer 56 * @vlan_id: vlan_id of frame 57 * @nbuf: pointer to skb buf 58 */ 59 struct dp_mlo_mpass_buf { 60 uint16_t vlan_id; 61 qdf_nbuf_t nbuf; 62 }; 63 #endif 64 #endif 65 #endif 66 67 #define DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(_var) \ 68 HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(_var) 69 #define DP_TX_WBM_COMPLETION_V3_VALID_GET(_var) \ 70 HTT_TX_WBM_COMPLETION_V2_VALID_GET(_var) 71 #define DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(_var) \ 72 HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(_var) 73 #define DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(_var) \ 74 HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(_var) 75 #define DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(_var) \ 76 HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(_var) 77 #define DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(_var) \ 78 HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(_var) 79 80 extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE]; 81 82 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH 83 static inline uint16_t dp_tx_comp_get_peer_id(struct dp_soc *soc, 84 void *tx_comp_hal_desc) 85 { 86 uint16_t peer_id = hal_tx_comp_get_peer_id(tx_comp_hal_desc); 87 struct dp_tx_comp_peer_id *tx_peer_id = 88 (struct dp_tx_comp_peer_id *)&peer_id; 89 90 return (tx_peer_id->peer_id | 91 (tx_peer_id->ml_peer_valid << soc->peer_id_shift)); 92 } 93 #else 94 /* Combine ml_peer_valid and peer_id field */ 95 #define DP_BE_TX_COMP_PEER_ID_MASK 0x00003fff 96 #define DP_BE_TX_COMP_PEER_ID_SHIFT 0 97 98 static inline uint16_t dp_tx_comp_get_peer_id(struct dp_soc *soc, 99 void *tx_comp_hal_desc) 100 { 101 uint16_t peer_id = hal_tx_comp_get_peer_id(tx_comp_hal_desc); 102 103 return ((peer_id & DP_BE_TX_COMP_PEER_ID_MASK) >> 104 DP_BE_TX_COMP_PEER_ID_SHIFT); 105 } 106 #endif 107 108 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION 109 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION 110 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc, 111 void *tx_comp_hal_desc, 112 struct dp_tx_desc_s **r_tx_desc) 113 { 114 uint32_t tx_desc_id; 115 116 if (qdf_likely( 117 hal_tx_comp_get_cookie_convert_done(tx_comp_hal_desc))) { 118 /* HW cookie conversion done */ 119 *r_tx_desc = (struct dp_tx_desc_s *) 120 hal_tx_comp_get_desc_va(tx_comp_hal_desc); 121 } else { 122 /* SW do cookie conversion to VA */ 123 tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc); 124 *r_tx_desc = 125 (struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id); 126 } 127 128 if (*r_tx_desc) 129 (*r_tx_desc)->peer_id = dp_tx_comp_get_peer_id(soc, 130 tx_comp_hal_desc); 131 } 132 #else 133 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc, 134 void *tx_comp_hal_desc, 135 struct dp_tx_desc_s **r_tx_desc) 136 { 137 *r_tx_desc = (struct dp_tx_desc_s *) 138 hal_tx_comp_get_desc_va(tx_comp_hal_desc); 139 140 if (*r_tx_desc) 141 (*r_tx_desc)->peer_id = dp_tx_comp_get_peer_id(soc, 142 tx_comp_hal_desc); 143 } 144 #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */ 145 #else 146 147 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc, 148 void *tx_comp_hal_desc, 149 struct dp_tx_desc_s **r_tx_desc) 150 { 151 uint32_t tx_desc_id; 152 153 /* SW do cookie conversion to VA */ 154 tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc); 155 *r_tx_desc = 156 (struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id); 157 158 if (*r_tx_desc) 159 (*r_tx_desc)->peer_id = dp_tx_comp_get_peer_id(soc, 160 tx_comp_hal_desc); 161 } 162 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */ 163 164 static inline 165 void dp_tx_process_mec_notify_be(struct dp_soc *soc, uint8_t *status) 166 { 167 struct dp_vdev *vdev; 168 uint8_t vdev_id; 169 uint32_t *htt_desc = (uint32_t *)status; 170 171 qdf_assert_always(!soc->mec_fw_offload); 172 173 /* 174 * Get vdev id from HTT status word in case of MEC 175 * notification 176 */ 177 vdev_id = DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(htt_desc[4]); 178 if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT)) 179 return; 180 181 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 182 DP_MOD_ID_HTT_COMP); 183 if (!vdev) 184 return; 185 dp_tx_mec_handler(vdev, status); 186 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP); 187 } 188 189 void dp_tx_process_htt_completion_be(struct dp_soc *soc, 190 struct dp_tx_desc_s *tx_desc, 191 uint8_t *status, 192 uint8_t ring_id) 193 { 194 uint8_t tx_status; 195 struct dp_pdev *pdev; 196 struct dp_vdev *vdev = NULL; 197 struct hal_tx_completion_status ts = {0}; 198 uint32_t *htt_desc = (uint32_t *)status; 199 struct dp_txrx_peer *txrx_peer; 200 dp_txrx_ref_handle txrx_ref_handle = NULL; 201 struct cdp_tid_tx_stats *tid_stats = NULL; 202 struct htt_soc *htt_handle; 203 uint8_t vdev_id; 204 205 tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]); 206 htt_handle = (struct htt_soc *)soc->htt_handle; 207 htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status); 208 209 /* 210 * There can be scenario where WBM consuming descriptor enqueued 211 * from TQM2WBM first and TQM completion can happen before MEC 212 * notification comes from FW2WBM. Avoid access any field of tx 213 * descriptor in case of MEC notify. 214 */ 215 if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY) 216 return dp_tx_process_mec_notify_be(soc, status); 217 218 /* 219 * If the descriptor is already freed in vdev_detach, 220 * continue to next descriptor 221 */ 222 if (qdf_unlikely(!tx_desc->flags)) { 223 dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d", 224 tx_desc->id); 225 return; 226 } 227 228 if (qdf_unlikely(tx_desc->vdev_id == DP_INVALID_VDEV_ID)) { 229 dp_tx_comp_info_rl("Invalid vdev_id %d", tx_desc->id); 230 tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR; 231 goto release_tx_desc; 232 } 233 234 pdev = tx_desc->pdev; 235 236 if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) { 237 dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id); 238 tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR; 239 goto release_tx_desc; 240 } 241 242 qdf_assert(tx_desc->pdev); 243 244 vdev_id = tx_desc->vdev_id; 245 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 246 DP_MOD_ID_HTT_COMP); 247 248 if (qdf_unlikely(!vdev)) { 249 dp_tx_comp_info_rl("Unable to get vdev ref %d", tx_desc->id); 250 tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR; 251 goto release_tx_desc; 252 } 253 254 switch (tx_status) { 255 case HTT_TX_FW2WBM_TX_STATUS_OK: 256 case HTT_TX_FW2WBM_TX_STATUS_DROP: 257 case HTT_TX_FW2WBM_TX_STATUS_TTL: 258 { 259 uint8_t tid; 260 261 if (DP_TX_WBM_COMPLETION_V3_VALID_GET(htt_desc[3])) { 262 ts.peer_id = 263 DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET( 264 htt_desc[3]); 265 ts.tid = 266 DP_TX_WBM_COMPLETION_V3_TID_NUM_GET( 267 htt_desc[3]); 268 } else { 269 ts.peer_id = HTT_INVALID_PEER; 270 ts.tid = HTT_INVALID_TID; 271 } 272 ts.release_src = HAL_TX_COMP_RELEASE_SOURCE_FW; 273 ts.ppdu_id = 274 DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET( 275 htt_desc[2]); 276 ts.ack_frame_rssi = 277 DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET( 278 htt_desc[2]); 279 280 ts.tsf = htt_desc[4]; 281 ts.first_msdu = 1; 282 ts.last_msdu = 1; 283 ts.status = (tx_status == HTT_TX_FW2WBM_TX_STATUS_OK ? 284 HAL_TX_TQM_RR_FRAME_ACKED : 285 HAL_TX_TQM_RR_REM_CMD_REM); 286 tid = ts.tid; 287 if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) 288 tid = CDP_MAX_DATA_TIDS - 1; 289 290 tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid]; 291 292 if (qdf_unlikely(pdev->delay_stats_flag) || 293 qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev))) 294 dp_tx_compute_delay(vdev, tx_desc, tid, ring_id); 295 if (tx_status < CDP_MAX_TX_HTT_STATUS) 296 tid_stats->htt_status_cnt[tx_status]++; 297 298 txrx_peer = dp_txrx_peer_get_ref_by_id(soc, ts.peer_id, 299 &txrx_ref_handle, 300 DP_MOD_ID_HTT_COMP); 301 if (qdf_likely(txrx_peer)) 302 dp_tx_update_peer_basic_stats( 303 txrx_peer, 304 qdf_nbuf_len(tx_desc->nbuf), 305 tx_status, 306 pdev->enhanced_stats_en); 307 308 dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer, 309 ring_id); 310 dp_tx_comp_process_desc(soc, tx_desc, &ts, txrx_peer); 311 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 312 313 if (qdf_likely(txrx_peer)) 314 dp_txrx_peer_unref_delete(txrx_ref_handle, 315 DP_MOD_ID_HTT_COMP); 316 317 break; 318 } 319 case HTT_TX_FW2WBM_TX_STATUS_REINJECT: 320 { 321 uint8_t reinject_reason; 322 323 reinject_reason = 324 HTT_TX_WBM_COMPLETION_V3_REINJECT_REASON_GET( 325 htt_desc[1]); 326 dp_tx_reinject_handler(soc, vdev, tx_desc, 327 status, reinject_reason); 328 break; 329 } 330 case HTT_TX_FW2WBM_TX_STATUS_INSPECT: 331 { 332 dp_tx_inspect_handler(soc, vdev, tx_desc, status); 333 break; 334 } 335 case HTT_TX_FW2WBM_TX_STATUS_VDEVID_MISMATCH: 336 { 337 DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1); 338 goto release_tx_desc; 339 } 340 default: 341 dp_tx_comp_err("Invalid HTT tx_status %d\n", 342 tx_status); 343 goto release_tx_desc; 344 } 345 346 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP); 347 return; 348 349 release_tx_desc: 350 dp_tx_comp_free_buf(soc, tx_desc, false); 351 dp_tx_desc_release(tx_desc, tx_desc->pool_id); 352 if (vdev) 353 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP); 354 } 355 356 #ifdef QCA_OL_TX_MULTIQ_SUPPORT 357 #ifdef DP_TX_IMPLICIT_RBM_MAPPING 358 /* 359 * dp_tx_get_rbm_id()- Get the RBM ID for data transmission completion. 360 * @dp_soc - DP soc structure pointer 361 * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled 362 * 363 * Return - RBM ID corresponding to TCL ring_id 364 */ 365 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc, 366 uint8_t ring_id) 367 { 368 return 0; 369 } 370 #else 371 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc, 372 uint8_t ring_id) 373 { 374 return (ring_id ? soc->wbm_sw0_bm_id + (ring_id - 1) : 375 HAL_WBM_SW2_BM_ID(soc->wbm_sw0_bm_id)); 376 } 377 #endif /*DP_TX_IMPLICIT_RBM_MAPPING*/ 378 #else 379 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc, 380 uint8_t tcl_index) 381 { 382 uint8_t rbm; 383 384 rbm = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_index); 385 dp_verbose_debug("tcl_id %u rbm %u", tcl_index, rbm); 386 return rbm; 387 } 388 #endif 389 #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES 390 391 /* 392 * dp_tx_set_min_rates_for_critical_frames()- sets min-rates for critical pkts 393 * @dp_soc - DP soc structure pointer 394 * @hal_tx_desc - HAL descriptor where fields are set 395 * nbuf - skb to be considered for min rates 396 * 397 * The function relies on upper layers to set QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL 398 * and uses it to determine if the frame is critical. For a critical frame, 399 * flow override bits are set to classify the frame into HW's high priority 400 * queue. The HW will pick pre-configured min rates for such packets. 401 * 402 * Return - None 403 */ 404 static void 405 dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc, 406 uint32_t *hal_tx_desc, 407 qdf_nbuf_t nbuf) 408 { 409 /* 410 * Critical frames should be queued to the high priority queue for the TID on 411 * on which they are sent out (for the concerned peer). 412 * FW is using HTT_MSDU_Q_IDX 2 for HOL (high priority) queue. 413 * htt_msdu_idx = (2 * who_classify_info_sel) + flow_override 414 * Hence, using who_classify_info_sel = 1, flow_override = 0 to select 415 * HOL queue. 416 */ 417 if (QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(nbuf)) { 418 hal_tx_desc_set_flow_override_enable(hal_tx_desc, 1); 419 hal_tx_desc_set_flow_override(hal_tx_desc, 0); 420 hal_tx_desc_set_who_classify_info_sel(hal_tx_desc, 1); 421 hal_tx_desc_set_tx_notify_frame(hal_tx_desc, 422 TX_SEMI_HARD_NOTIFY_E); 423 } 424 } 425 #else 426 static inline void 427 dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc, 428 uint32_t *hal_tx_desc_cached, 429 qdf_nbuf_t nbuf) 430 { 431 } 432 #endif 433 434 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \ 435 defined(WLAN_MCAST_MLO) 436 #ifdef QCA_MULTIPASS_SUPPORT 437 /** 438 * dp_tx_mlo_mcast_multipass_lookup() - lookup vlan_id in mpass peer list 439 * @be_vdev: Handle to DP be_vdev structure 440 * @ptnr_vdev: DP ptnr_vdev handle 441 * @arg: pointer to dp_mlo_mpass_ buf 442 * 443 * Return: None 444 */ 445 static void 446 dp_tx_mlo_mcast_multipass_lookup(struct dp_vdev_be *be_vdev, 447 struct dp_vdev *ptnr_vdev, 448 void *arg) 449 { 450 struct dp_mlo_mpass_buf *ptr = (struct dp_mlo_mpass_buf *)arg; 451 struct dp_txrx_peer *txrx_peer = NULL; 452 struct vlan_ethhdr *veh = NULL; 453 qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(ptr->nbuf); 454 uint16_t vlan_id = 0; 455 bool not_vlan = ((ptnr_vdev->tx_encap_type == htt_cmn_pkt_type_raw) || 456 (htons(eh->ether_type) != ETH_P_8021Q)); 457 458 if (qdf_unlikely(not_vlan)) 459 return; 460 veh = (struct vlan_ethhdr *)eh; 461 vlan_id = (ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK); 462 463 qdf_spin_lock_bh(&ptnr_vdev->mpass_peer_mutex); 464 TAILQ_FOREACH(txrx_peer, &ptnr_vdev->mpass_peer_list, 465 mpass_peer_list_elem) { 466 if (vlan_id == txrx_peer->vlan_id) { 467 qdf_spin_unlock_bh(&ptnr_vdev->mpass_peer_mutex); 468 ptr->vlan_id = vlan_id; 469 return; 470 } 471 } 472 qdf_spin_unlock_bh(&ptnr_vdev->mpass_peer_mutex); 473 } 474 475 /** 476 * dp_tx_mlo_mcast_multipass_send() - send multipass MLO Mcast packets 477 * @be_vdev: Handle to DP be_vdev structure 478 * @ptnr_vdev: DP ptnr_vdev handle 479 * @arg: pointer to dp_mlo_mpass_ buf 480 * 481 * Return: None 482 */ 483 static void 484 dp_tx_mlo_mcast_multipass_send(struct dp_vdev_be *be_vdev, 485 struct dp_vdev *ptnr_vdev, 486 void *arg) 487 { 488 struct dp_mlo_mpass_buf *ptr = (struct dp_mlo_mpass_buf *)arg; 489 struct dp_tx_msdu_info_s msdu_info; 490 struct dp_vdev_be *be_ptnr_vdev = NULL; 491 qdf_nbuf_t nbuf_clone; 492 uint16_t group_key = 0; 493 494 be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev); 495 if (be_vdev != be_ptnr_vdev) { 496 nbuf_clone = qdf_nbuf_clone(ptr->nbuf); 497 if (qdf_unlikely(!nbuf_clone)) { 498 dp_tx_debug("nbuf clone failed"); 499 return; 500 } 501 } else { 502 nbuf_clone = ptr->nbuf; 503 } 504 qdf_mem_zero(&msdu_info, sizeof(msdu_info)); 505 dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue); 506 msdu_info.gsn = be_vdev->seq_num; 507 be_ptnr_vdev->seq_num = be_vdev->seq_num; 508 509 if (ptr->vlan_id == MULTIPASS_WITH_VLAN_ID) { 510 msdu_info.tid = HTT_TX_EXT_TID_INVALID; 511 HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET( 512 msdu_info.meta_data[0], 1); 513 } else { 514 /* return when vlan map is not initialized */ 515 if (!ptnr_vdev->iv_vlan_map) 516 return; 517 group_key = ptnr_vdev->iv_vlan_map[ptr->vlan_id]; 518 519 /* 520 * If group key is not installed, drop the frame. 521 */ 522 523 if (!group_key) 524 return; 525 526 dp_tx_remove_vlan_tag(ptnr_vdev, nbuf_clone); 527 dp_tx_add_groupkey_metadata(ptnr_vdev, &msdu_info, group_key); 528 msdu_info.exception_fw = 1; 529 } 530 531 nbuf_clone = dp_tx_send_msdu_single( 532 ptnr_vdev, 533 nbuf_clone, 534 &msdu_info, 535 DP_MLO_MCAST_REINJECT_PEER_ID, 536 NULL); 537 if (qdf_unlikely(nbuf_clone)) { 538 dp_info("pkt send failed"); 539 qdf_nbuf_free(nbuf_clone); 540 return; 541 } 542 } 543 544 /** 545 * dp_tx_mlo_mcast_multipass_handler - If frame needs multipass processing 546 * @soc: DP soc handle 547 * @vdev: DP vdev handle 548 * @nbuf: nbuf to be enqueued 549 * 550 * Return: true if handling is done else false 551 */ 552 static bool 553 dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc, 554 struct dp_vdev *vdev, 555 qdf_nbuf_t nbuf) 556 { 557 struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev); 558 struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc); 559 qdf_nbuf_t nbuf_copy = NULL; 560 struct dp_mlo_mpass_buf mpass_buf; 561 562 memset(&mpass_buf, 0, sizeof(struct dp_mlo_mpass_buf)); 563 mpass_buf.vlan_id = INVALID_VLAN_ID; 564 mpass_buf.nbuf = nbuf; 565 566 dp_tx_mlo_mcast_multipass_lookup(be_vdev, vdev, &mpass_buf); 567 if (mpass_buf.vlan_id == INVALID_VLAN_ID) { 568 dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev, 569 dp_tx_mlo_mcast_multipass_lookup, 570 &mpass_buf, DP_MOD_ID_TX); 571 /* 572 * Do not drop the frame when vlan_id doesn't match. 573 * Send the frame as it is. 574 */ 575 if (mpass_buf.vlan_id == INVALID_VLAN_ID) 576 return false; 577 } 578 579 /* AP can have classic clients, special clients & 580 * classic repeaters. 581 * 1. Classic clients & special client: 582 * Remove vlan header, find corresponding group key 583 * index, fill in metaheader and enqueue multicast 584 * frame to TCL. 585 * 2. Classic repeater: 586 * Pass through to classic repeater with vlan tag 587 * intact without any group key index. Hardware 588 * will know which key to use to send frame to 589 * repeater. 590 */ 591 nbuf_copy = qdf_nbuf_copy(nbuf); 592 593 /* 594 * Send multicast frame to special peers even 595 * if pass through to classic repeater fails. 596 */ 597 if (nbuf_copy) { 598 struct dp_mlo_mpass_buf mpass_buf_copy = {0}; 599 600 mpass_buf_copy.vlan_id = MULTIPASS_WITH_VLAN_ID; 601 mpass_buf_copy.nbuf = nbuf_copy; 602 /* send frame on partner vdevs */ 603 dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev, 604 dp_tx_mlo_mcast_multipass_send, 605 &mpass_buf_copy, DP_MOD_ID_TX); 606 607 /* send frame on mcast primary vdev */ 608 dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf_copy); 609 610 if (qdf_unlikely(be_vdev->seq_num > MAX_GSN_NUM)) 611 be_vdev->seq_num = 0; 612 else 613 be_vdev->seq_num++; 614 } 615 616 dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev, 617 dp_tx_mlo_mcast_multipass_send, 618 &mpass_buf, DP_MOD_ID_TX); 619 dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf); 620 621 if (qdf_unlikely(be_vdev->seq_num > MAX_GSN_NUM)) 622 be_vdev->seq_num = 0; 623 else 624 be_vdev->seq_num++; 625 626 return true; 627 } 628 #else 629 static bool 630 dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc, struct dp_vdev *vdev, 631 qdf_nbuf_t nbuf) 632 { 633 return false; 634 } 635 #endif 636 637 void dp_tx_mcast_mlo_reinject_routing_set(struct dp_soc *soc, void *arg) 638 { 639 hal_soc_handle_t hal_soc = soc->hal_soc; 640 uint8_t *cmd = (uint8_t *)arg; 641 642 if (*cmd) 643 hal_tx_mcast_mlo_reinject_routing_set( 644 hal_soc, 645 HAL_TX_MCAST_MLO_REINJECT_TQM_NOTIFY); 646 else 647 hal_tx_mcast_mlo_reinject_routing_set( 648 hal_soc, 649 HAL_TX_MCAST_MLO_REINJECT_FW_NOTIFY); 650 } 651 652 void 653 dp_tx_mlo_mcast_pkt_send(struct dp_vdev_be *be_vdev, 654 struct dp_vdev *ptnr_vdev, 655 void *arg) 656 { 657 qdf_nbuf_t nbuf = (qdf_nbuf_t)arg; 658 qdf_nbuf_t nbuf_clone; 659 struct dp_vdev_be *be_ptnr_vdev = NULL; 660 struct dp_tx_msdu_info_s msdu_info; 661 662 be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev); 663 if (be_vdev != be_ptnr_vdev) { 664 nbuf_clone = qdf_nbuf_clone(nbuf); 665 if (qdf_unlikely(!nbuf_clone)) { 666 dp_tx_debug("nbuf clone failed"); 667 return; 668 } 669 } else { 670 nbuf_clone = nbuf; 671 } 672 673 qdf_mem_zero(&msdu_info, sizeof(msdu_info)); 674 dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue); 675 msdu_info.gsn = be_vdev->seq_num; 676 be_ptnr_vdev->seq_num = be_vdev->seq_num; 677 678 nbuf_clone = dp_tx_send_msdu_single( 679 ptnr_vdev, 680 nbuf_clone, 681 &msdu_info, 682 DP_MLO_MCAST_REINJECT_PEER_ID, 683 NULL); 684 if (qdf_unlikely(nbuf_clone)) { 685 dp_info("pkt send failed"); 686 qdf_nbuf_free(nbuf_clone); 687 return; 688 } 689 } 690 691 static inline void 692 dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached, 693 struct dp_vdev *vdev, 694 struct dp_tx_msdu_info_s *msdu_info) 695 { 696 hal_tx_desc_set_vdev_id(hal_tx_desc_cached, msdu_info->vdev_id); 697 } 698 699 void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc, 700 struct dp_vdev *vdev, 701 qdf_nbuf_t nbuf) 702 { 703 struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev); 704 struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc); 705 706 if (qdf_unlikely(vdev->multipass_en) && 707 dp_tx_mlo_mcast_multipass_handler(soc, vdev, nbuf)) 708 return; 709 /* send frame on partner vdevs */ 710 dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev, 711 dp_tx_mlo_mcast_pkt_send, 712 nbuf, DP_MOD_ID_REINJECT); 713 714 /* send frame on mcast primary vdev */ 715 dp_tx_mlo_mcast_pkt_send(be_vdev, vdev, nbuf); 716 717 if (qdf_unlikely(be_vdev->seq_num > MAX_GSN_NUM)) 718 be_vdev->seq_num = 0; 719 else 720 be_vdev->seq_num++; 721 } 722 #else 723 static inline void 724 dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached, 725 struct dp_vdev *vdev, 726 struct dp_tx_msdu_info_s *msdu_info) 727 { 728 hal_tx_desc_set_vdev_id(hal_tx_desc_cached, vdev->vdev_id); 729 } 730 #endif 731 #if defined(WLAN_FEATURE_11BE_MLO) && !defined(WLAN_MLO_MULTI_CHIP) && \ 732 !defined(WLAN_MCAST_MLO) 733 void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc, 734 struct dp_vdev *vdev, 735 qdf_nbuf_t nbuf) 736 { 737 } 738 #endif 739 740 #ifdef CONFIG_SAWF 741 void dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached, 742 uint16_t *fw_metadata, qdf_nbuf_t nbuf) 743 { 744 uint8_t q_id = 0; 745 746 if (!wlan_cfg_get_sawf_config(soc->wlan_cfg_ctx)) 747 return; 748 749 dp_sawf_tcl_cmd(fw_metadata, nbuf); 750 q_id = dp_sawf_queue_id_get(nbuf); 751 752 if (q_id == DP_SAWF_DEFAULT_Q_INVALID) 753 return; 754 hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, DP_TX_HLOS_TID_GET(q_id)); 755 hal_tx_desc_set_flow_override_enable(hal_tx_desc_cached, 756 DP_TX_FLOW_OVERRIDE_ENABLE); 757 hal_tx_desc_set_flow_override(hal_tx_desc_cached, 758 DP_TX_FLOW_OVERRIDE_GET(q_id)); 759 hal_tx_desc_set_who_classify_info_sel(hal_tx_desc_cached, 760 DP_TX_WHO_CLFY_INF_SEL_GET(q_id)); 761 } 762 763 #else 764 765 static inline 766 void dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached, 767 uint16_t *fw_metadata, qdf_nbuf_t nbuf) 768 { 769 } 770 771 static inline 772 QDF_STATUS dp_sawf_tx_enqueue_peer_stats(struct dp_soc *soc, 773 struct dp_tx_desc_s *tx_desc) 774 { 775 return QDF_STATUS_SUCCESS; 776 } 777 778 static inline 779 QDF_STATUS dp_sawf_tx_enqueue_fail_peer_stats(struct dp_soc *soc, 780 struct dp_tx_desc_s *tx_desc) 781 { 782 return QDF_STATUS_SUCCESS; 783 } 784 #endif 785 786 QDF_STATUS 787 dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev, 788 struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata, 789 struct cdp_tx_exception_metadata *tx_exc_metadata, 790 struct dp_tx_msdu_info_s *msdu_info) 791 { 792 void *hal_tx_desc; 793 uint32_t *hal_tx_desc_cached; 794 int coalesce = 0; 795 struct dp_tx_queue *tx_q = &msdu_info->tx_queue; 796 uint8_t ring_id = tx_q->ring_id; 797 uint8_t tid = msdu_info->tid; 798 struct dp_vdev_be *be_vdev; 799 uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 }; 800 uint8_t bm_id = dp_tx_get_rbm_id_be(soc, ring_id); 801 hal_ring_handle_t hal_ring_hdl = NULL; 802 QDF_STATUS status = QDF_STATUS_E_RESOURCES; 803 uint8_t num_desc_bytes = HAL_TX_DESC_LEN_BYTES; 804 805 be_vdev = dp_get_be_vdev_from_dp_vdev(vdev); 806 807 if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) { 808 dp_err_rl("Invalid tx desc id:%d", tx_desc->id); 809 return QDF_STATUS_E_RESOURCES; 810 } 811 812 if (qdf_unlikely(tx_exc_metadata)) { 813 qdf_assert_always((tx_exc_metadata->tx_encap_type == 814 CDP_INVALID_TX_ENCAP_TYPE) || 815 (tx_exc_metadata->tx_encap_type == 816 vdev->tx_encap_type)); 817 818 if (tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw) 819 qdf_assert_always((tx_exc_metadata->sec_type == 820 CDP_INVALID_SEC_TYPE) || 821 tx_exc_metadata->sec_type == 822 vdev->sec_type); 823 } 824 825 hal_tx_desc_cached = (void *)cached_desc; 826 827 if (dp_sawf_tag_valid_get(tx_desc->nbuf)) { 828 dp_sawf_config_be(soc, hal_tx_desc_cached, 829 &fw_metadata, tx_desc->nbuf); 830 dp_sawf_tx_enqueue_peer_stats(soc, tx_desc); 831 } 832 833 hal_tx_desc_set_buf_addr_be(soc->hal_soc, hal_tx_desc_cached, 834 tx_desc->dma_addr, bm_id, tx_desc->id, 835 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)); 836 hal_tx_desc_set_lmac_id_be(soc->hal_soc, hal_tx_desc_cached, 837 vdev->lmac_id); 838 839 hal_tx_desc_set_search_index_be(soc->hal_soc, hal_tx_desc_cached, 840 vdev->bss_ast_idx); 841 /* 842 * Bank_ID is used as DSCP_TABLE number in beryllium 843 * So there is no explicit field used for DSCP_TID_TABLE_NUM. 844 */ 845 846 hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached, 847 (vdev->bss_ast_hash & 0xF)); 848 849 hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata); 850 hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length); 851 hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset); 852 853 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) 854 hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1); 855 856 /* verify checksum offload configuration*/ 857 if ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == 858 QDF_NBUF_TX_CKSUM_TCP_UDP) || 859 qdf_nbuf_is_tso(tx_desc->nbuf)) { 860 hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1); 861 hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1); 862 } 863 864 hal_tx_desc_set_bank_id(hal_tx_desc_cached, vdev->bank_id); 865 866 dp_tx_vdev_id_set_hal_tx_desc(hal_tx_desc_cached, vdev, msdu_info); 867 868 if (tid != HTT_TX_EXT_TID_INVALID) 869 hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid); 870 871 dp_tx_set_min_rates_for_critical_frames(soc, hal_tx_desc_cached, 872 tx_desc->nbuf); 873 dp_tx_desc_set_ktimestamp(vdev, tx_desc); 874 875 hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id); 876 877 if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) { 878 dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl); 879 DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1); 880 DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1); 881 dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc); 882 return status; 883 } 884 885 hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl); 886 if (qdf_unlikely(!hal_tx_desc)) { 887 dp_verbose_debug("TCL ring full ring_id:%d", ring_id); 888 DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1); 889 DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1); 890 dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc); 891 goto ring_access_fail; 892 } 893 894 tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX; 895 dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf); 896 897 /* Sync cached descriptor with HW */ 898 hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc, num_desc_bytes); 899 900 coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid, 901 msdu_info, ring_id); 902 903 DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length); 904 DP_STATS_INC(soc, tx.tcl_enq[ring_id], 1); 905 dp_tx_update_stats(soc, tx_desc, ring_id); 906 status = QDF_STATUS_SUCCESS; 907 908 dp_tx_hw_desc_update_evt((uint8_t *)hal_tx_desc_cached, 909 hal_ring_hdl, soc); 910 911 ring_access_fail: 912 dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, coalesce); 913 dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT, 914 qdf_get_log_timestamp(), tx_desc->nbuf); 915 return status; 916 } 917 918 QDF_STATUS dp_tx_init_bank_profiles(struct dp_soc_be *be_soc) 919 { 920 int i, num_tcl_banks; 921 922 num_tcl_banks = hal_tx_get_num_tcl_banks(be_soc->soc.hal_soc); 923 924 qdf_assert_always(num_tcl_banks); 925 be_soc->num_bank_profiles = num_tcl_banks; 926 927 be_soc->bank_profiles = qdf_mem_malloc(num_tcl_banks * 928 sizeof(*be_soc->bank_profiles)); 929 if (!be_soc->bank_profiles) { 930 dp_err("unable to allocate memory for DP TX Profiles!"); 931 return QDF_STATUS_E_NOMEM; 932 } 933 934 DP_TX_BANK_LOCK_CREATE(&be_soc->tx_bank_lock); 935 936 for (i = 0; i < num_tcl_banks; i++) { 937 be_soc->bank_profiles[i].is_configured = false; 938 qdf_atomic_init(&be_soc->bank_profiles[i].ref_count); 939 } 940 dp_info("initialized %u bank profiles", be_soc->num_bank_profiles); 941 return QDF_STATUS_SUCCESS; 942 } 943 944 void dp_tx_deinit_bank_profiles(struct dp_soc_be *be_soc) 945 { 946 qdf_mem_free(be_soc->bank_profiles); 947 DP_TX_BANK_LOCK_DESTROY(&be_soc->tx_bank_lock); 948 } 949 950 static 951 void dp_tx_get_vdev_bank_config(struct dp_vdev_be *be_vdev, 952 union hal_tx_bank_config *bank_config) 953 { 954 struct dp_vdev *vdev = &be_vdev->vdev; 955 956 bank_config->epd = 0; 957 958 bank_config->encap_type = vdev->tx_encap_type; 959 960 /* Only valid for raw frames. Needs work for RAW mode */ 961 if (vdev->tx_encap_type == htt_cmn_pkt_type_raw) { 962 bank_config->encrypt_type = sec_type_map[vdev->sec_type]; 963 } else { 964 bank_config->encrypt_type = 0; 965 } 966 967 bank_config->src_buffer_swap = 0; 968 bank_config->link_meta_swap = 0; 969 970 if ((vdev->search_type == HAL_TX_ADDR_INDEX_SEARCH) && 971 vdev->opmode == wlan_op_mode_sta) { 972 bank_config->index_lookup_enable = 1; 973 bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_MEC_NOTIFY; 974 bank_config->addrx_en = 0; 975 bank_config->addry_en = 0; 976 } else { 977 bank_config->index_lookup_enable = 0; 978 bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_FW_EXCEPTION; 979 bank_config->addrx_en = 980 (vdev->hal_desc_addr_search_flags & 981 HAL_TX_DESC_ADDRX_EN) ? 1 : 0; 982 bank_config->addry_en = 983 (vdev->hal_desc_addr_search_flags & 984 HAL_TX_DESC_ADDRY_EN) ? 1 : 0; 985 } 986 987 bank_config->mesh_enable = vdev->mesh_vdev ? 1 : 0; 988 989 bank_config->dscp_tid_map_id = vdev->dscp_tid_map_id; 990 991 /* Disabling vdev id check for now. Needs revist. */ 992 bank_config->vdev_id_check_en = be_vdev->vdev_id_check_en; 993 994 bank_config->pmac_id = vdev->lmac_id; 995 } 996 997 int dp_tx_get_bank_profile(struct dp_soc_be *be_soc, 998 struct dp_vdev_be *be_vdev) 999 { 1000 char *temp_str = ""; 1001 bool found_match = false; 1002 int bank_id = DP_BE_INVALID_BANK_ID; 1003 int i; 1004 int unconfigured_slot = DP_BE_INVALID_BANK_ID; 1005 int zero_ref_count_slot = DP_BE_INVALID_BANK_ID; 1006 union hal_tx_bank_config vdev_config = {0}; 1007 1008 /* convert vdev params into hal_tx_bank_config */ 1009 dp_tx_get_vdev_bank_config(be_vdev, &vdev_config); 1010 1011 DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock); 1012 /* go over all banks and find a matching/unconfigured/unsed bank */ 1013 for (i = 0; i < be_soc->num_bank_profiles; i++) { 1014 if (be_soc->bank_profiles[i].is_configured && 1015 (be_soc->bank_profiles[i].bank_config.val ^ 1016 vdev_config.val) == 0) { 1017 found_match = true; 1018 break; 1019 } 1020 1021 if (unconfigured_slot == DP_BE_INVALID_BANK_ID && 1022 !be_soc->bank_profiles[i].is_configured) 1023 unconfigured_slot = i; 1024 else if (zero_ref_count_slot == DP_BE_INVALID_BANK_ID && 1025 !qdf_atomic_read(&be_soc->bank_profiles[i].ref_count)) 1026 zero_ref_count_slot = i; 1027 } 1028 1029 if (found_match) { 1030 temp_str = "matching"; 1031 bank_id = i; 1032 goto inc_ref_and_return; 1033 } 1034 if (unconfigured_slot != DP_BE_INVALID_BANK_ID) { 1035 temp_str = "unconfigured"; 1036 bank_id = unconfigured_slot; 1037 goto configure_and_return; 1038 } 1039 if (zero_ref_count_slot != DP_BE_INVALID_BANK_ID) { 1040 temp_str = "zero_ref_count"; 1041 bank_id = zero_ref_count_slot; 1042 } 1043 if (bank_id == DP_BE_INVALID_BANK_ID) { 1044 dp_alert("unable to find TX bank!"); 1045 QDF_BUG(0); 1046 return bank_id; 1047 } 1048 1049 configure_and_return: 1050 be_soc->bank_profiles[bank_id].is_configured = true; 1051 be_soc->bank_profiles[bank_id].bank_config.val = vdev_config.val; 1052 hal_tx_populate_bank_register(be_soc->soc.hal_soc, 1053 &be_soc->bank_profiles[bank_id].bank_config, 1054 bank_id); 1055 inc_ref_and_return: 1056 qdf_atomic_inc(&be_soc->bank_profiles[bank_id].ref_count); 1057 DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock); 1058 1059 dp_info("found %s slot at index %d, input:0x%x match:0x%x ref_count %u", 1060 temp_str, bank_id, vdev_config.val, 1061 be_soc->bank_profiles[bank_id].bank_config.val, 1062 qdf_atomic_read(&be_soc->bank_profiles[bank_id].ref_count)); 1063 1064 dp_info("epd:%x encap:%x encryp:%x src_buf_swap:%x link_meta_swap:%x addrx_en:%x addry_en:%x mesh_en:%x vdev_id_check:%x pmac_id:%x mcast_pkt_ctrl:%x", 1065 be_soc->bank_profiles[bank_id].bank_config.epd, 1066 be_soc->bank_profiles[bank_id].bank_config.encap_type, 1067 be_soc->bank_profiles[bank_id].bank_config.encrypt_type, 1068 be_soc->bank_profiles[bank_id].bank_config.src_buffer_swap, 1069 be_soc->bank_profiles[bank_id].bank_config.link_meta_swap, 1070 be_soc->bank_profiles[bank_id].bank_config.addrx_en, 1071 be_soc->bank_profiles[bank_id].bank_config.addry_en, 1072 be_soc->bank_profiles[bank_id].bank_config.mesh_enable, 1073 be_soc->bank_profiles[bank_id].bank_config.vdev_id_check_en, 1074 be_soc->bank_profiles[bank_id].bank_config.pmac_id, 1075 be_soc->bank_profiles[bank_id].bank_config.mcast_pkt_ctrl); 1076 1077 return bank_id; 1078 } 1079 1080 void dp_tx_put_bank_profile(struct dp_soc_be *be_soc, 1081 struct dp_vdev_be *be_vdev) 1082 { 1083 DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock); 1084 qdf_atomic_dec(&be_soc->bank_profiles[be_vdev->bank_id].ref_count); 1085 DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock); 1086 } 1087 1088 void dp_tx_update_bank_profile(struct dp_soc_be *be_soc, 1089 struct dp_vdev_be *be_vdev) 1090 { 1091 dp_tx_put_bank_profile(be_soc, be_vdev); 1092 be_vdev->bank_id = dp_tx_get_bank_profile(be_soc, be_vdev); 1093 be_vdev->vdev.bank_id = be_vdev->bank_id; 1094 } 1095 1096 QDF_STATUS dp_tx_desc_pool_init_be(struct dp_soc *soc, 1097 uint32_t num_elem, 1098 uint8_t pool_id) 1099 { 1100 struct dp_tx_desc_pool_s *tx_desc_pool; 1101 struct dp_hw_cookie_conversion_t *cc_ctx; 1102 struct dp_soc_be *be_soc; 1103 struct dp_spt_page_desc *page_desc; 1104 struct dp_tx_desc_s *tx_desc; 1105 uint32_t ppt_idx = 0; 1106 uint32_t avail_entry_index = 0; 1107 1108 if (!num_elem) { 1109 dp_err("desc_num 0 !!"); 1110 return QDF_STATUS_E_FAILURE; 1111 } 1112 1113 be_soc = dp_get_be_soc_from_dp_soc(soc); 1114 tx_desc_pool = &soc->tx_desc[pool_id]; 1115 cc_ctx = &be_soc->tx_cc_ctx[pool_id]; 1116 1117 tx_desc = tx_desc_pool->freelist; 1118 page_desc = &cc_ctx->page_desc_base[0]; 1119 while (tx_desc) { 1120 if (avail_entry_index == 0) { 1121 if (ppt_idx >= cc_ctx->total_page_num) { 1122 dp_alert("insufficient secondary page tables"); 1123 qdf_assert_always(0); 1124 } 1125 page_desc = &cc_ctx->page_desc_base[ppt_idx++]; 1126 } 1127 1128 /* put each TX Desc VA to SPT pages and 1129 * get corresponding ID 1130 */ 1131 DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr, 1132 avail_entry_index, 1133 tx_desc); 1134 tx_desc->id = 1135 dp_cc_desc_id_generate(page_desc->ppt_index, 1136 avail_entry_index); 1137 tx_desc->pool_id = pool_id; 1138 dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE); 1139 tx_desc = tx_desc->next; 1140 avail_entry_index = (avail_entry_index + 1) & 1141 DP_CC_SPT_PAGE_MAX_ENTRIES_MASK; 1142 } 1143 1144 return QDF_STATUS_SUCCESS; 1145 } 1146 1147 void dp_tx_desc_pool_deinit_be(struct dp_soc *soc, 1148 struct dp_tx_desc_pool_s *tx_desc_pool, 1149 uint8_t pool_id) 1150 { 1151 struct dp_spt_page_desc *page_desc; 1152 struct dp_soc_be *be_soc; 1153 int i = 0; 1154 struct dp_hw_cookie_conversion_t *cc_ctx; 1155 1156 be_soc = dp_get_be_soc_from_dp_soc(soc); 1157 cc_ctx = &be_soc->tx_cc_ctx[pool_id]; 1158 1159 for (i = 0; i < cc_ctx->total_page_num; i++) { 1160 page_desc = &cc_ctx->page_desc_base[i]; 1161 qdf_mem_zero(page_desc->page_v_addr, qdf_page_size); 1162 } 1163 } 1164 1165 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ 1166 uint32_t dp_tx_comp_nf_handler(struct dp_intr *int_ctx, struct dp_soc *soc, 1167 hal_ring_handle_t hal_ring_hdl, uint8_t ring_id, 1168 uint32_t quota) 1169 { 1170 struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id]; 1171 uint32_t work_done = 0; 1172 1173 if (dp_srng_get_near_full_level(soc, tx_comp_ring) < 1174 DP_SRNG_THRESH_NEAR_FULL) 1175 return 0; 1176 1177 qdf_atomic_set(&tx_comp_ring->near_full, 1); 1178 work_done++; 1179 1180 return work_done; 1181 } 1182 #endif 1183 1184 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \ 1185 defined(WLAN_CONFIG_TX_DELAY) 1186 #define PPDUID_GET_HW_LINK_ID(PPDU_ID, LINK_ID_OFFSET, LINK_ID_BITS) \ 1187 (((PPDU_ID) >> (LINK_ID_OFFSET)) & ((1 << (LINK_ID_BITS)) - 1)) 1188 1189 #define HW_TX_DELAY_MAX 0x1000000 1190 #define TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US 10 1191 #define HW_TX_DELAY_MASK 0x1FFFFFFF 1192 #define TX_COMPL_BUFFER_TSTAMP_US(TSTAMP) \ 1193 (((TSTAMP) << TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US) & \ 1194 HW_TX_DELAY_MASK) 1195 1196 static inline 1197 QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc, 1198 struct dp_vdev *vdev, 1199 struct hal_tx_completion_status *ts, 1200 uint32_t *delay_us) 1201 { 1202 uint32_t ppdu_id; 1203 uint8_t link_id_offset, link_id_bits; 1204 uint8_t hw_link_id; 1205 uint32_t msdu_tqm_enqueue_tstamp_us, final_msdu_tqm_enqueue_tstamp_us; 1206 uint32_t msdu_compl_tsf_tstamp_us, final_msdu_compl_tsf_tstamp_us; 1207 uint32_t delay; 1208 int32_t delta_tsf2, delta_tqm; 1209 1210 if (!ts->valid) 1211 return QDF_STATUS_E_INVAL; 1212 1213 link_id_offset = soc->link_id_offset; 1214 link_id_bits = soc->link_id_bits; 1215 ppdu_id = ts->ppdu_id; 1216 hw_link_id = PPDUID_GET_HW_LINK_ID(ppdu_id, link_id_offset, 1217 link_id_bits); 1218 1219 msdu_tqm_enqueue_tstamp_us = 1220 TX_COMPL_BUFFER_TSTAMP_US(ts->buffer_timestamp); 1221 msdu_compl_tsf_tstamp_us = ts->tsf; 1222 1223 delta_tsf2 = dp_mlo_get_delta_tsf2_wrt_mlo_offset(soc, hw_link_id); 1224 delta_tqm = dp_mlo_get_delta_tqm_wrt_mlo_offset(soc); 1225 1226 final_msdu_tqm_enqueue_tstamp_us = (msdu_tqm_enqueue_tstamp_us + 1227 delta_tqm) & HW_TX_DELAY_MASK; 1228 1229 final_msdu_compl_tsf_tstamp_us = (msdu_compl_tsf_tstamp_us + 1230 delta_tsf2) & HW_TX_DELAY_MASK; 1231 1232 delay = (final_msdu_compl_tsf_tstamp_us - 1233 final_msdu_tqm_enqueue_tstamp_us) & HW_TX_DELAY_MASK; 1234 1235 if (delay > HW_TX_DELAY_MAX) 1236 return QDF_STATUS_E_FAILURE; 1237 1238 if (delay_us) 1239 *delay_us = delay; 1240 1241 return QDF_STATUS_SUCCESS; 1242 } 1243 #else 1244 static inline 1245 QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc, 1246 struct dp_vdev *vdev, 1247 struct hal_tx_completion_status *ts, 1248 uint32_t *delay_us) 1249 { 1250 return QDF_STATUS_SUCCESS; 1251 } 1252 #endif 1253 1254 QDF_STATUS dp_tx_compute_tx_delay_be(struct dp_soc *soc, 1255 struct dp_vdev *vdev, 1256 struct hal_tx_completion_status *ts, 1257 uint32_t *delay_us) 1258 { 1259 return dp_mlo_compute_hw_delay_us(soc, vdev, ts, delay_us); 1260 } 1261 1262 static inline 1263 qdf_dma_addr_t dp_tx_nbuf_map_be(struct dp_vdev *vdev, 1264 struct dp_tx_desc_s *tx_desc, 1265 qdf_nbuf_t nbuf) 1266 { 1267 qdf_nbuf_dma_clean_range_no_dsb((void *)nbuf->data, 1268 (void *)(nbuf->data + 256)); 1269 1270 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); 1271 } 1272 1273 static inline 1274 void dp_tx_nbuf_unmap_be(struct dp_soc *soc, 1275 struct dp_tx_desc_s *desc) 1276 { 1277 } 1278 1279 /** 1280 * dp_tx_fast_send_be() - Transmit a frame on a given VAP 1281 * @soc: DP soc handle 1282 * @vdev_id: id of DP vdev handle 1283 * @nbuf: skb 1284 * 1285 * Entry point for Core Tx layer (DP_TX) invoked from 1286 * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding 1287 * cases 1288 * 1289 * Return: NULL on success, 1290 * nbuf when it fails to send 1291 */ 1292 qdf_nbuf_t dp_tx_fast_send_be(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 1293 qdf_nbuf_t nbuf) 1294 { 1295 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1296 struct dp_vdev *vdev = NULL; 1297 struct dp_pdev *pdev = NULL; 1298 struct dp_tx_desc_s *tx_desc; 1299 uint16_t desc_pool_id; 1300 uint16_t pkt_len; 1301 qdf_dma_addr_t paddr; 1302 QDF_STATUS status = QDF_STATUS_E_RESOURCES; 1303 uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 }; 1304 hal_ring_handle_t hal_ring_hdl = NULL; 1305 uint32_t *hal_tx_desc_cached; 1306 void *hal_tx_desc; 1307 1308 if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT)) 1309 return nbuf; 1310 1311 vdev = soc->vdev_id_map[vdev_id]; 1312 if (qdf_unlikely(!vdev)) 1313 return nbuf; 1314 1315 desc_pool_id = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK; 1316 1317 pkt_len = qdf_nbuf_headlen(nbuf); 1318 DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, pkt_len); 1319 DP_STATS_INC(vdev, tx_i.rcvd_in_fast_xmit_flow, 1); 1320 DP_STATS_INC(vdev, tx_i.rcvd_per_core[desc_pool_id], 1); 1321 1322 pdev = vdev->pdev; 1323 if (dp_tx_limit_check(vdev)) 1324 return nbuf; 1325 1326 tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); 1327 1328 if (qdf_unlikely(!tx_desc)) { 1329 DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); 1330 DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1); 1331 return nbuf; 1332 } 1333 1334 dp_tx_outstanding_inc(pdev); 1335 1336 /* Initialize the SW tx descriptor */ 1337 tx_desc->nbuf = nbuf; 1338 tx_desc->shinfo_addr = skb_end_pointer(nbuf); 1339 tx_desc->frm_type = dp_tx_frm_std; 1340 tx_desc->tx_encap_type = vdev->tx_encap_type; 1341 tx_desc->vdev_id = vdev_id; 1342 tx_desc->pdev = pdev; 1343 tx_desc->pkt_offset = 0; 1344 tx_desc->length = pkt_len; 1345 tx_desc->flags |= DP_TX_DESC_FLAG_SIMPLE; 1346 1347 paddr = dp_tx_nbuf_map_be(vdev, tx_desc, nbuf); 1348 if (!paddr) { 1349 /* Handle failure */ 1350 dp_err("qdf_nbuf_map failed"); 1351 DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1); 1352 goto release_desc; 1353 } 1354 1355 tx_desc->dma_addr = paddr; 1356 1357 hal_tx_desc_cached = (void *)cached_desc; 1358 hal_tx_desc_cached[0] = (uint32_t)tx_desc->dma_addr; 1359 hal_tx_desc_cached[1] = tx_desc->id << 1360 TCL_DATA_CMD_BUF_ADDR_INFO_SW_BUFFER_COOKIE_LSB; 1361 1362 /* bank_id */ 1363 hal_tx_desc_cached[2] = vdev->bank_id << TCL_DATA_CMD_BANK_ID_LSB; 1364 hal_tx_desc_cached[3] = vdev->htt_tcl_metadata << 1365 TCL_DATA_CMD_TCL_CMD_NUMBER_LSB; 1366 1367 hal_tx_desc_cached[4] = tx_desc->length; 1368 /* l3 and l4 checksum enable */ 1369 hal_tx_desc_cached[4] |= DP_TX_L3_L4_CSUM_ENABLE << 1370 TCL_DATA_CMD_IPV4_CHECKSUM_EN_LSB; 1371 1372 hal_tx_desc_cached[5] = vdev->lmac_id << TCL_DATA_CMD_PMAC_ID_LSB; 1373 hal_tx_desc_cached[5] |= vdev->vdev_id << TCL_DATA_CMD_VDEV_ID_LSB; 1374 1375 hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, desc_pool_id); 1376 1377 if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) { 1378 dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl); 1379 DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1); 1380 DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1); 1381 goto ring_access_fail2; 1382 } 1383 1384 hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl); 1385 if (qdf_unlikely(!hal_tx_desc)) { 1386 dp_verbose_debug("TCL ring full ring_id:%d", desc_pool_id); 1387 DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1); 1388 DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1); 1389 goto ring_access_fail; 1390 } 1391 1392 tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX; 1393 1394 /* Sync cached descriptor with HW */ 1395 qdf_mem_copy(hal_tx_desc, hal_tx_desc_cached, DP_TX_FAST_DESC_SIZE); 1396 qdf_dsb(); 1397 1398 DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length); 1399 DP_STATS_INC(soc, tx.tcl_enq[desc_pool_id], 1); 1400 status = QDF_STATUS_SUCCESS; 1401 1402 ring_access_fail: 1403 dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0); 1404 1405 ring_access_fail2: 1406 if (status != QDF_STATUS_SUCCESS) { 1407 dp_tx_nbuf_unmap_be(soc, tx_desc); 1408 goto release_desc; 1409 } 1410 1411 return NULL; 1412 1413 release_desc: 1414 dp_tx_desc_release(tx_desc, desc_pool_id); 1415 1416 return nbuf; 1417 } 1418